/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
threads to each one as cpus come/go. */
+static long migrate_sequence __read_mostly;
static DEFINE_MUTEX(workqueue_mutex);
static LIST_HEAD(workqueues);
* Probably keventd trying to flush its own queue. So simply run
* it by hand rather than deadlocking.
*/
- preempt_enable();
- /*
- * We can still touch *cwq here because we are keventd, and
- * hot-unplug will be waiting us to exit.
- */
run_workqueue(cwq);
- preempt_disable();
} else {
struct wq_barrier barr;
int active = 0;
}
spin_unlock_irq(&cwq->lock);
- if (active) {
- preempt_enable();
+ if (active)
wait_for_completion(&barr.done);
- preempt_disable();
- }
}
}
*/
void fastcall flush_workqueue(struct workqueue_struct *wq)
{
- preempt_disable(); /* CPU hotplug */
if (is_single_threaded(wq)) {
/* Always use first cpu's area. */
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
} else {
+ long sequence;
int cpu;
+again:
+ sequence = migrate_sequence;
- for_each_online_cpu(cpu)
+ for_each_possible_cpu(cpu)
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
+
+ if (unlikely(sequence != migrate_sequence))
+ goto again;
}
- preempt_enable();
}
EXPORT_SYMBOL_GPL(flush_workqueue);
}
EXPORT_SYMBOL_GPL(flush_work);
-static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
- int cpu)
+static void init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
{
struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
- struct task_struct *p;
- spin_lock_init(&cwq->lock);
cwq->wq = wq;
- cwq->thread = NULL;
+ spin_lock_init(&cwq->lock);
INIT_LIST_HEAD(&cwq->worklist);
init_waitqueue_head(&cwq->more_work);
+}
+
+static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
+ int cpu)
+{
+ struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
+ struct task_struct *p;
if (is_single_threaded(wq))
p = kthread_create(worker_thread, cwq, "%s", wq->name);
mutex_lock(&workqueue_mutex);
if (singlethread) {
INIT_LIST_HEAD(&wq->list);
+ init_cpu_workqueue(wq, singlethread_cpu);
p = create_workqueue_thread(wq, singlethread_cpu);
if (!p)
destroy = 1;
wake_up_process(p);
} else {
list_add(&wq->list, &workqueues);
- for_each_online_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
+ init_cpu_workqueue(wq, cpu);
+ if (!cpu_online(cpu))
+ continue;
+
p = create_workqueue_thread(wq, cpu);
if (p) {
kthread_bind(p, cpu);
spin_lock_irq(&cwq->lock);
list_replace_init(&cwq->worklist, &list);
+ migrate_sequence++;
while (!list_empty(&list)) {
printk("Taking work for %s\n", wq->name);