return &pools[highpri];
}
-static struct pool_workqueue *get_pwq(int cpu, struct workqueue_struct *wq)
+static struct pool_workqueue *first_pwq(struct workqueue_struct *wq)
{
- if (!(wq->flags & WQ_UNBOUND)) {
- if (likely(cpu < nr_cpu_ids))
- return per_cpu_ptr(wq->cpu_pwqs, cpu);
- } else if (likely(cpu == WORK_CPU_UNBOUND)) {
- return list_first_entry(&wq->pwqs, struct pool_workqueue,
- pwqs_node);
- }
- return NULL;
+ return list_first_entry(&wq->pwqs, struct pool_workqueue, pwqs_node);
}
static unsigned int work_color_to_flags(int color)
* work needs to be queued on that cpu to guarantee
* non-reentrancy.
*/
- pwq = get_pwq(cpu, wq);
+ pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
last_pool = get_work_pool(work);
if (last_pool && last_pool != pwq->pool) {
worker = find_worker_executing_work(last_pool, work);
if (worker && worker->current_pwq->wq == wq) {
- pwq = get_pwq(last_pool->cpu, wq);
+ pwq = per_cpu_ptr(wq->cpu_pwqs, last_pool->cpu);
} else {
/* meh... not running there, queue here */
spin_unlock(&last_pool->lock);
spin_lock(&pwq->pool->lock);
}
} else {
- pwq = get_pwq(WORK_CPU_UNBOUND, wq);
+ pwq = first_pwq(wq);
spin_lock(&pwq->pool->lock);
}
else
wq = system_wq;
- insert_work(get_pwq(pool->cpu, wq), rebind_work,
+ insert_work(per_cpu_ptr(wq->cpu_pwqs, pool->cpu), rebind_work,
worker->scheduled.next,
work_color_to_flags(WORK_NO_COLOR));
}
return -ENOMEM;
for_each_possible_cpu(cpu) {
- struct pool_workqueue *pwq = get_pwq(cpu, wq);
+ struct pool_workqueue *pwq =
+ per_cpu_ptr(wq->cpu_pwqs, cpu);
pwq->pool = get_std_worker_pool(cpu, highpri);
list_add_tail(&pwq->pwqs_node, &wq->pwqs);
*/
bool workqueue_congested(int cpu, struct workqueue_struct *wq)
{
- struct pool_workqueue *pwq = get_pwq(cpu, wq);
+ struct pool_workqueue *pwq;
+
+ if (!(wq->flags & WQ_UNBOUND))
+ pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
+ else
+ pwq = first_pwq(wq);
return !list_empty(&pwq->delayed_works);
}