min(pool->nr_free, pool->percpu_batch_size));
}
-static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags)
-{
- int tag = -ENOSPC;
-
- spin_lock(&tags->lock);
- if (tags->nr_free)
- tag = tags->freelist[--tags->nr_free];
- spin_unlock(&tags->lock);
-
- return tag;
-}
-
/**
* percpu_ida_alloc - allocate a tag
* @pool: pool to allocate from
DEFINE_WAIT(wait);
struct percpu_ida_cpu *tags;
unsigned long flags;
- int tag;
+ int tag = -ENOSPC;
- local_irq_save(flags);
- tags = this_cpu_ptr(pool->tag_cpu);
+ tags = raw_cpu_ptr(pool->tag_cpu);
+ spin_lock_irqsave(&tags->lock, flags);
/* Fastpath */
- tag = alloc_local_tag(tags);
- if (likely(tag >= 0)) {
- local_irq_restore(flags);
+ if (likely(tags->nr_free >= 0)) {
+ tag = tags->freelist[--tags->nr_free];
+ spin_unlock_irqrestore(&tags->lock, flags);
return tag;
}
+ spin_unlock_irqrestore(&tags->lock, flags);
while (1) {
- spin_lock(&pool->lock);
+ spin_lock_irqsave(&pool->lock, flags);
+ tags = this_cpu_ptr(pool->tag_cpu);
/*
* prepare_to_wait() must come before steal_tags(), in case
&pool->cpus_have_tags);
}
- spin_unlock(&pool->lock);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&pool->lock, flags);
if (tag >= 0 || state == TASK_RUNNING)
break;
}
schedule();
-
- local_irq_save(flags);
- tags = this_cpu_ptr(pool->tag_cpu);
}
if (state != TASK_RUNNING)
finish_wait(&pool->wait, &wait);
BUG_ON(tag >= pool->nr_tags);
- local_irq_save(flags);
- tags = this_cpu_ptr(pool->tag_cpu);
+ tags = raw_cpu_ptr(pool->tag_cpu);
- spin_lock(&tags->lock);
+ spin_lock_irqsave(&tags->lock, flags);
tags->freelist[tags->nr_free++] = tag;
nr_free = tags->nr_free;
- spin_unlock(&tags->lock);
if (nr_free == 1) {
cpumask_set_cpu(smp_processor_id(),
&pool->cpus_have_tags);
wake_up(&pool->wait);
}
+ spin_unlock_irqrestore(&tags->lock, flags);
if (nr_free == pool->percpu_max_size) {
- spin_lock(&pool->lock);
+ spin_lock_irqsave(&pool->lock, flags);
+ spin_lock(&tags->lock);
- /*
- * Global lock held and irqs disabled, don't need percpu
- * lock
- */
if (tags->nr_free == pool->percpu_max_size) {
move_tags(pool->freelist, &pool->nr_free,
tags->freelist, &tags->nr_free,
wake_up(&pool->wait);
}
- spin_unlock(&pool->lock);
+ spin_unlock(&tags->lock);
+ spin_unlock_irqrestore(&pool->lock, flags);
}
-
- local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(percpu_ida_free);
struct percpu_ida_cpu *remote;
unsigned cpu, i, err = 0;
- local_irq_save(flags);
for_each_possible_cpu(cpu) {
remote = per_cpu_ptr(pool->tag_cpu, cpu);
- spin_lock(&remote->lock);
+ spin_lock_irqsave(&remote->lock, flags);
for (i = 0; i < remote->nr_free; i++) {
err = fn(remote->freelist[i], data);
if (err)
break;
}
- spin_unlock(&remote->lock);
+ spin_unlock_irqrestore(&remote->lock, flags);
if (err)
goto out;
}
- spin_lock(&pool->lock);
+ spin_lock_irqsave(&pool->lock, flags);
for (i = 0; i < pool->nr_free; i++) {
err = fn(pool->freelist[i], data);
if (err)
break;
}
- spin_unlock(&pool->lock);
+ spin_unlock_irqrestore(&pool->lock, flags);
out:
- local_irq_restore(flags);
return err;
}
EXPORT_SYMBOL_GPL(percpu_ida_for_each_free);