parent = bfqg_parent(bfqg);
- lockdep_assert_held(bfqg_to_blkg(bfqg)->q->queue_lock);
+ lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock);
if (unlikely(!parent))
return;
unsigned long flags;
struct bfq_io_cq *icq;
- spin_lock_irqsave(q->queue_lock, flags);
+ spin_lock_irqsave(&q->queue_lock, flags);
icq = icq_to_bic(ioc_lookup_icq(ioc, q));
- spin_unlock_irqrestore(q->queue_lock, flags);
+ spin_unlock_irqrestore(&q->queue_lock, flags);
return icq;
}
* In addition, the following queue lock guarantees that
* bfqq_group(bfqq) exists as well.
*/
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
if (idle_timer_disabled)
/*
* Since the idle timer has been disabled,
bfqg_stats_set_start_empty_time(bfqg);
bfqg_stats_update_io_remove(bfqg, rq->cmd_flags);
}
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
}
#else
static inline void bfq_update_dispatch_stats(struct request_queue *q,
* In addition, the following queue lock guarantees that
* bfqq_group(bfqq) exists as well.
*/
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
if (idle_timer_disabled)
bfqg_stats_update_idle_time(bfqq_group(bfqq));
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
}
#else
static inline void bfq_update_insert_stats(struct request_queue *q,
}
eq->elevator_data = bfqd;
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
q->elevator = eq;
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
/*
* Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
if (blkg && blkg->q == q) {
if (update_hint) {
- lockdep_assert_held(q->queue_lock);
+ lockdep_assert_held(&q->queue_lock);
rcu_assign_pointer(blkcg->blkg_hint, blkg);
}
return blkg;
int i, ret;
WARN_ON_ONCE(!rcu_read_lock_held());
- lockdep_assert_held(q->queue_lock);
+ lockdep_assert_held(&q->queue_lock);
/* blkg holds a reference to blkcg */
if (!css_tryget_online(&blkcg->css)) {
struct blkcg_gq *blkg;
WARN_ON_ONCE(!rcu_read_lock_held());
- lockdep_assert_held(q->queue_lock);
+ lockdep_assert_held(&q->queue_lock);
blkg = __blkg_lookup(blkcg, q, true);
if (blkg)
struct blkcg_gq *parent = blkg->parent;
int i;
- lockdep_assert_held(blkg->q->queue_lock);
+ lockdep_assert_held(&blkg->q->queue_lock);
lockdep_assert_held(&blkcg->lock);
/* Something wrong if we are trying to remove same group twice */
{
struct blkcg_gq *blkg, *n;
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
struct blkcg *blkcg = blkg->blkcg;
}
q->root_blkg = NULL;
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
}
/*
rcu_read_lock();
hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
- spin_lock_irq(blkg->q->queue_lock);
+ spin_lock_irq(&blkg->q->queue_lock);
if (blkcg_policy_enabled(blkg->q, pol))
total += prfill(sf, blkg->pd[pol->plid], data);
- spin_unlock_irq(blkg->q->queue_lock);
+ spin_unlock_irq(&blkg->q->queue_lock);
}
rcu_read_unlock();
struct cgroup_subsys_state *pos_css;
u64 sum = 0;
- lockdep_assert_held(blkg->q->queue_lock);
+ lockdep_assert_held(&blkg->q->queue_lock);
rcu_read_lock();
blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
struct blkg_rwstat sum = { };
int i;
- lockdep_assert_held(blkg->q->queue_lock);
+ lockdep_assert_held(&blkg->q->queue_lock);
rcu_read_lock();
blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
struct request_queue *q)
{
WARN_ON_ONCE(!rcu_read_lock_held());
- lockdep_assert_held(q->queue_lock);
+ lockdep_assert_held(&q->queue_lock);
if (!blkcg_policy_enabled(q, pol))
return ERR_PTR(-EOPNOTSUPP);
*/
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
char *input, struct blkg_conf_ctx *ctx)
- __acquires(rcu) __acquires(disk->queue->queue_lock)
+ __acquires(rcu) __acquires(&disk->queue->queue_lock)
{
struct gendisk *disk;
struct request_queue *q;
q = disk->queue;
rcu_read_lock();
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
blkg = blkg_lookup_check(blkcg, pol, q);
if (IS_ERR(blkg)) {
}
/* Drop locks to do new blkg allocation with GFP_KERNEL. */
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
rcu_read_unlock();
new_blkg = blkg_alloc(pos, q, GFP_KERNEL);
}
rcu_read_lock();
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
blkg = blkg_lookup_check(pos, pol, q);
if (IS_ERR(blkg)) {
return 0;
fail_unlock:
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
rcu_read_unlock();
fail:
put_disk_and_module(disk);
* with blkg_conf_prep().
*/
void blkg_conf_finish(struct blkg_conf_ctx *ctx)
- __releases(ctx->disk->queue->queue_lock) __releases(rcu)
+ __releases(&ctx->disk->queue->queue_lock) __releases(rcu)
{
- spin_unlock_irq(ctx->disk->queue->queue_lock);
+ spin_unlock_irq(&ctx->disk->queue->queue_lock);
rcu_read_unlock();
put_disk_and_module(ctx->disk);
}
*/
off += scnprintf(buf+off, size-off, "%s ", dname);
- spin_lock_irq(blkg->q->queue_lock);
+ spin_lock_irq(&blkg->q->queue_lock);
rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
offsetof(struct blkcg_gq, stat_bytes));
wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
dios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]);
- spin_unlock_irq(blkg->q->queue_lock);
+ spin_unlock_irq(&blkg->q->queue_lock);
if (rbytes || wbytes || rios || wios) {
has_stats = true;
struct blkcg_gq, blkcg_node);
struct request_queue *q = blkg->q;
- if (spin_trylock(q->queue_lock)) {
+ if (spin_trylock(&q->queue_lock)) {
blkg_destroy(blkg);
- spin_unlock(q->queue_lock);
+ spin_unlock(&q->queue_lock);
} else {
spin_unlock_irq(&blkcg->lock);
cpu_relax();
/* Make sure the root blkg exists. */
rcu_read_lock();
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
blkg = blkg_create(&blkcg_root, q, new_blkg);
if (IS_ERR(blkg))
goto err_unlock;
q->root_blkg = blkg;
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
rcu_read_unlock();
if (preloaded)
blkg_destroy_all(q);
return ret;
err_unlock:
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
rcu_read_unlock();
if (preloaded)
radix_tree_preload_end();
*/
void blkcg_drain_queue(struct request_queue *q)
{
- lockdep_assert_held(q->queue_lock);
+ lockdep_assert_held(&q->queue_lock);
/*
* @q could be exiting and already have destroyed all blkgs as
}
}
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
list_for_each_entry(blkg, &q->blkg_list, q_node) {
struct blkg_policy_data *pd;
if (!pd)
swap(pd, pd_prealloc);
if (!pd) {
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
goto pd_prealloc;
}
__set_bit(pol->plid, q->blkcg_pols);
ret = 0;
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
out_bypass_end:
if (q->mq_ops)
blk_mq_unfreeze_queue(q);
if (q->mq_ops)
blk_mq_freeze_queue(q);
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
__clear_bit(pol->plid, q->blkcg_pols);
}
}
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
if (q->mq_ops)
blk_mq_unfreeze_queue(q);
*/
void blk_cleanup_queue(struct request_queue *q)
{
- spinlock_t *lock = q->queue_lock;
-
/* mark @q DYING, no new request or merges will be allowed afterwards */
mutex_lock(&q->sysfs_lock);
blk_set_queue_dying(q);
percpu_ref_exit(&q->q_usage_counter);
- spin_lock_irq(lock);
- if (q->queue_lock != &q->__queue_lock)
- q->queue_lock = &q->__queue_lock;
- spin_unlock_irq(lock);
-
/* @q is and will stay empty, shutdown and put */
blk_put_queue(q);
}
mutex_init(&q->blk_trace_mutex);
#endif
mutex_init(&q->sysfs_lock);
- spin_lock_init(&q->__queue_lock);
- q->queue_lock = &q->__queue_lock;
+ spin_lock_init(&q->queue_lock);
init_waitqueue_head(&q->mq_freeze_wq);
struct io_cq, ioc_node);
struct request_queue *q = icq->q;
- if (spin_trylock(q->queue_lock)) {
+ if (spin_trylock(&q->queue_lock)) {
ioc_destroy_icq(icq);
- spin_unlock(q->queue_lock);
+ spin_unlock(&q->queue_lock);
} else {
spin_unlock_irqrestore(&ioc->lock, flags);
cpu_relax();
{
LIST_HEAD(icq_list);
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
list_splice_init(&q->icq_list, &icq_list);
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
__ioc_clear_queue(&icq_list);
}
{
struct io_cq *icq;
- lockdep_assert_held(q->queue_lock);
+ lockdep_assert_held(&q->queue_lock);
/*
* icq's are indexed from @ioc using radix tree and hint pointer,
INIT_HLIST_NODE(&icq->ioc_node);
/* lock both q and ioc and try to link @icq */
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
spin_lock(&ioc->lock);
if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
}
spin_unlock(&ioc->lock);
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
radix_tree_preload_end();
return icq;
}
bio_associate_blkcg(bio, &blkcg->css);
blkg = blkg_lookup(blkcg, q);
if (unlikely(!blkg)) {
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
blkg = blkg_lookup_create(blkcg, q);
if (IS_ERR(blkg))
blkg = NULL;
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
}
if (!blkg)
goto out;
struct io_context *ioc = rq_ioc(bio);
struct io_cq *icq;
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
icq = ioc_lookup_icq(ioc, q);
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
if (!icq) {
icq = ioc_create_icq(ioc, q, GFP_ATOMIC);
/* Switch q_usage_counter back to per-cpu mode. */
blk_mq_unfreeze_queue(q);
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
if (ret < 0)
pm_runtime_mark_last_busy(q->dev);
else
q->rpm_status = RPM_SUSPENDING;
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
if (ret)
blk_clear_pm_only(q);
if (!q->dev)
return;
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
if (!err) {
q->rpm_status = RPM_SUSPENDED;
} else {
q->rpm_status = RPM_ACTIVE;
pm_runtime_mark_last_busy(q->dev);
}
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
if (err)
blk_clear_pm_only(q);
if (!q->dev)
return;
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
q->rpm_status = RPM_RESUMING;
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
}
EXPORT_SYMBOL(blk_pre_runtime_resume);
if (!q->dev)
return;
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
if (!err) {
q->rpm_status = RPM_ACTIVE;
pm_runtime_mark_last_busy(q->dev);
} else {
q->rpm_status = RPM_SUSPENDED;
}
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
if (!err)
blk_clear_pm_only(q);
*/
void blk_set_runtime_active(struct request_queue *q)
{
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
q->rpm_status = RPM_ACTIVE;
pm_runtime_mark_last_busy(q->dev);
pm_request_autosuspend(q->dev);
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
}
EXPORT_SYMBOL(blk_set_runtime_active);
static inline void blk_pm_requeue_request(struct request *rq)
{
- lockdep_assert_held(rq->q->queue_lock);
+ lockdep_assert_held(&rq->q->queue_lock);
if (rq->q->dev && !(rq->rq_flags & RQF_PM))
rq->q->nr_pending--;
static inline void blk_pm_add_request(struct request_queue *q,
struct request *rq)
{
- lockdep_assert_held(q->queue_lock);
+ lockdep_assert_held(&q->queue_lock);
if (q->dev && !(rq->rq_flags & RQF_PM))
q->nr_pending++;
static inline void blk_pm_put_request(struct request *rq)
{
- lockdep_assert_held(rq->q->queue_lock);
+ lockdep_assert_held(&rq->q->queue_lock);
if (rq->q->dev && !(rq->rq_flags & RQF_PM))
--rq->q->nr_pending;
if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
return -EINVAL;
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
q->limits.max_sectors = max_sectors_kb << 1;
q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
return ret;
}
bool dispatched;
int ret;
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
if (throtl_can_upgrade(td, NULL))
throtl_upgrade_state(td);
break;
/* this dispatch windows is still open, relax and repeat */
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
cpu_relax();
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
}
if (!dispatched)
queue_work(kthrotld_workqueue, &td->dispatch_work);
}
out_unlock:
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
}
/**
bio_list_init(&bio_list_on_stack);
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
for (rw = READ; rw <= WRITE; rw++)
while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
bio_list_add(&bio_list_on_stack, bio);
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
if (!bio_list_empty(&bio_list_on_stack)) {
blk_start_plug(&plug);
if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw])
goto out;
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
throtl_update_latency_buckets(td);
}
out_unlock:
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
out:
bio_set_flag(bio, BIO_THROTTLED);
* Dispatch all currently throttled bios on @q through ->make_request_fn().
*/
void blk_throtl_drain(struct request_queue *q)
- __releases(q->queue_lock) __acquires(q->queue_lock)
+ __releases(&q->queue_lock) __acquires(&q->queue_lock)
{
struct throtl_data *td = q->td;
struct blkcg_gq *blkg;
tg_drain_bios(&td->service_queue);
rcu_read_unlock();
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
/* all bios now should be in td->service_queue, issue them */
for (rw = READ; rw <= WRITE; rw++)
NULL)))
generic_make_request(bio);
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
}
int blk_throtl_init(struct request_queue *q)
DRS->maxtrack = 1;
/* unlock chained buffers */
- spin_lock_irqsave(q->queue_lock, flags);
+ spin_lock_irqsave(&q->queue_lock, flags);
floppy_end_request(req, 0);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ spin_unlock_irqrestore(&q->queue_lock, flags);
} else {
if (rq_data_dir(req) == WRITE) {
/* record write error information */
DRWE->last_error_sector = blk_rq_pos(req);
DRWE->last_error_generation = DRS->generation;
}
- spin_lock_irqsave(q->queue_lock, flags);
+ spin_lock_irqsave(&q->queue_lock, flags);
floppy_end_request(req, BLK_STS_IOERR);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ spin_unlock_irqrestore(&q->queue_lock, flags);
}
}
* Some CDRW drives can not handle writes larger than one packet,
* even if the size is a multiple of the packet size.
*/
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
blk_queue_max_hw_sectors(q, pd->settings.size);
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
set_bit(PACKET_WRITABLE, &pd->flags);
} else {
pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
{
struct request_queue *q = rq->q;
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
if (unlikely(blk_queue_dying(q))) {
rq->rq_flags |= RQF_QUIET;
scsi_req(rq)->result = -ENXIO;
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
blk_mq_end_request(rq, BLK_STS_OK);
return -ENXIO;
}
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
blk_execute_rq(q, NULL, rq, true);
return scsi_req(rq)->result ? -EIO : 0;
printk("%s: completing PM request, %s\n", drive->name,
(ide_req(rq)->type == ATA_PRIV_PM_SUSPEND) ? "suspend" : "resume");
#endif
- spin_lock_irqsave(q->queue_lock, flags);
+ spin_lock_irqsave(&q->queue_lock, flags);
if (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND)
blk_mq_stop_hw_queues(q);
else
drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
- spin_unlock_irqrestore(q->queue_lock, flags);
+ spin_unlock_irqrestore(&q->queue_lock, flags);
drive->hwif->rq = NULL;
blkg = blkg_lookup(blkcg, q);
if (unlikely(!blkg)) {
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
blkg = blkg_lookup_create(blkcg, q);
if (IS_ERR(blkg))
blkg = NULL;
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
}
throtl = blk_throtl_bio(q, blkg, bio);
*/
gfp_t bounce_gfp;
- /*
- * protects queue structures from reentrancy. ->__queue_lock should
- * _never_ be used directly, it is queue private. always use
- * ->queue_lock.
- */
- spinlock_t __queue_lock;
- spinlock_t *queue_lock;
+ spinlock_t queue_lock;
/*
* queue kobject