lockdep_is_held(&workqueue_lock), \
"sched RCU or workqueue lock should be held")
-#define for_each_std_worker_pool(pool, cpu) \
- for ((pool) = &per_cpu(cpu_std_worker_pools, cpu)[0]; \
- (pool) < &per_cpu(cpu_std_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
+#define for_each_cpu_worker_pool(pool, cpu) \
+ for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
+ (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
(pool)++)
#define for_each_busy_worker(worker, i, pool) \
* POOL_DISASSOCIATED set, and their workers have WORKER_UNBOUND set.
*/
static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
- cpu_std_worker_pools);
+ cpu_worker_pools);
/*
* idr of all pools. Modifications are protected by workqueue_lock. Read
struct pool_workqueue *pwq =
per_cpu_ptr(wq->cpu_pwqs, cpu);
struct worker_pool *cpu_pools =
- per_cpu(cpu_std_worker_pools, cpu);
+ per_cpu(cpu_worker_pools, cpu);
pwq->pool = &cpu_pools[highpri];
list_add_tail_rcu(&pwq->pwqs_node, &wq->pwqs);
struct worker *worker;
int i;
- for_each_std_worker_pool(pool, cpu) {
+ for_each_cpu_worker_pool(pool, cpu) {
WARN_ON_ONCE(cpu != smp_processor_id());
mutex_lock(&pool->assoc_mutex);
* unbound chain execution of pending work items if other workers
* didn't already.
*/
- for_each_std_worker_pool(pool, cpu)
+ for_each_cpu_worker_pool(pool, cpu)
atomic_set(&pool->nr_running, 0);
}
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
- for_each_std_worker_pool(pool, cpu) {
+ for_each_cpu_worker_pool(pool, cpu) {
struct worker *worker;
if (pool->nr_workers)
case CPU_DOWN_FAILED:
case CPU_ONLINE:
- for_each_std_worker_pool(pool, cpu) {
+ for_each_cpu_worker_pool(pool, cpu) {
mutex_lock(&pool->assoc_mutex);
spin_lock_irq(&pool->lock);
struct worker_pool *pool;
i = 0;
- for_each_std_worker_pool(pool, cpu) {
+ for_each_cpu_worker_pool(pool, cpu) {
BUG_ON(init_worker_pool(pool));
pool->cpu = cpu;
cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
for_each_online_cpu(cpu) {
struct worker_pool *pool;
- for_each_std_worker_pool(pool, cpu) {
+ for_each_cpu_worker_pool(pool, cpu) {
struct worker *worker;
pool->flags &= ~POOL_DISASSOCIATED;