return err;
}
+static inline void kfd_queue_work(struct workqueue_struct *wq,
+ struct work_struct *work)
+{
+ int cpu, new_cpu;
+
+ cpu = new_cpu = smp_processor_id();
+ do {
+ new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids;
+ if (cpu_to_node(new_cpu) == numa_node_id())
+ break;
+ } while (cpu != new_cpu);
+
+ queue_work_on(new_cpu, wq, work);
+}
+
/* This is called directly from KGD at ISR. */
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
{
patched_ihre, &is_patched)
&& enqueue_ih_ring_entry(kfd,
is_patched ? patched_ihre : ih_ring_entry))
- queue_work(kfd->ih_wq, &kfd->interrupt_work);
+ kfd_queue_work(kfd->ih_wq, &kfd->interrupt_work);
spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
}