From f5faa0774e07eada85b0c55ec789b3f337d01412 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Tue, 19 Feb 2013 12:17:02 -0800 Subject: [PATCH] workqueue: use %current instead of worker->task in worker_maybe_bind_and_lock() worker_maybe_bind_and_lock() uses both @worker->task and @current at the same time. As worker_maybe_bind_and_lock() can only be called by the current worker task, they are always the same. Update worker_maybe_bind_and_lock() to use %current consistently. This doesn't introduce any functional change. tj: Massaged the description. Signed-off-by: Lai Jiangshan Signed-off-by: Tejun Heo --- kernel/workqueue.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 81f2457811eb..f456433cf535 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1512,7 +1512,7 @@ static void worker_leave_idle(struct worker *worker) * flushed from cpu callbacks while cpu is going down, they are * guaranteed to execute on the cpu. * - * This function is to be used by rogue workers and rescuers to bind + * This function is to be used by unbound workers and rescuers to bind * themselves to the target cpu and may race with cpu going down or * coming online. kthread_bind() can't be used because it may put the * worker to already dead cpu and set_cpus_allowed_ptr() can't be used @@ -1537,7 +1537,6 @@ static bool worker_maybe_bind_and_lock(struct worker *worker) __acquires(&pool->lock) { struct worker_pool *pool = worker->pool; - struct task_struct *task = worker->task; while (true) { /* @@ -1547,12 +1546,12 @@ __acquires(&pool->lock) * against POOL_DISASSOCIATED. */ if (!(pool->flags & POOL_DISASSOCIATED)) - set_cpus_allowed_ptr(task, get_cpu_mask(pool->cpu)); + set_cpus_allowed_ptr(current, get_cpu_mask(pool->cpu)); spin_lock_irq(&pool->lock); if (pool->flags & POOL_DISASSOCIATED) return false; - if (task_cpu(task) == pool->cpu && + if (task_cpu(current) == pool->cpu && cpumask_equal(¤t->cpus_allowed, get_cpu_mask(pool->cpu))) return true; -- 2.30.2