if (blkcg_policy_enabled(q, pol))
return 0;
- if (q->mq_ops)
+ if (queue_is_mq(q))
blk_mq_freeze_queue(q);
pd_prealloc:
if (!pd_prealloc) {
spin_unlock_irq(&q->queue_lock);
out_bypass_end:
- if (q->mq_ops)
+ if (queue_is_mq(q))
blk_mq_unfreeze_queue(q);
if (pd_prealloc)
pol->pd_free_fn(pd_prealloc);
if (!blkcg_policy_enabled(q, pol))
return;
- if (q->mq_ops)
+ if (queue_is_mq(q))
blk_mq_freeze_queue(q);
spin_lock_irq(&q->queue_lock);
spin_unlock_irq(&q->queue_lock);
- if (q->mq_ops)
+ if (queue_is_mq(q))
blk_mq_unfreeze_queue(q);
}
EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
del_timer_sync(&q->timeout);
cancel_work_sync(&q->timeout_work);
- if (q->mq_ops) {
+ if (queue_is_mq(q)) {
struct blk_mq_hw_ctx *hctx;
int i;
*/
blk_freeze_queue_start(q);
- if (q->mq_ops)
+ if (queue_is_mq(q))
blk_mq_wake_waiters(q);
/* Make blk_queue_enter() reexamine the DYING flag. */
* blk_freeze_queue() should be enough for cases of passthrough
* request.
*/
- if (q->mq_ops && blk_queue_init_done(q))
+ if (queue_is_mq(q) && blk_queue_init_done(q))
blk_mq_quiesce_queue(q);
/* for synchronous bio-based driver finish in-flight integrity i/o */
blk_exit_queue(q);
- if (q->mq_ops)
+ if (queue_is_mq(q))
blk_mq_free_queue(q);
percpu_ref_exit(&q->q_usage_counter);
* For a REQ_NOWAIT based request, return -EOPNOTSUPP
* if queue is not a request based queue.
*/
- if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q))
+ if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q))
goto not_supported;
if (should_fail_bio(bio))
*/
int blk_lld_busy(struct request_queue *q)
{
- if (q->mq_ops && q->mq_ops->busy)
+ if (queue_is_mq(q) && q->mq_ops->busy)
return q->mq_ops->busy(q);
return 0;
* assigned to empty flushes, and we deadlock if we are expecting
* other requests to make progress. Don't defer for that case.
*/
- if (!list_empty(&fq->flush_data_in_flight) &&
- !(q->mq_ops && q->elevator) &&
+ if (!list_empty(&fq->flush_data_in_flight) && q->elevator &&
time_before(jiffies,
fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
return;
freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
if (freeze_depth == 1) {
percpu_ref_kill(&q->q_usage_counter);
- if (q->mq_ops)
+ if (queue_is_mq(q))
blk_mq_run_hw_queues(q, false);
}
}
unsigned long nr;
int ret, err;
- if (!q->mq_ops)
+ if (!queue_is_mq(q))
return -EINVAL;
ret = queue_var_store(&nr, page, count);
blk_queue_free_zone_bitmaps(q);
- if (q->mq_ops)
+ if (queue_is_mq(q))
blk_mq_release(q);
blk_trace_shutdown(q);
- if (q->mq_ops)
+ if (queue_is_mq(q))
blk_mq_debugfs_unregister(q);
bioset_exit(&q->bio_split);
goto unlock;
}
- if (q->mq_ops) {
+ if (queue_is_mq(q)) {
__blk_mq_register_dev(dev, q);
blk_mq_debugfs_register(q);
}
blk_throtl_register_queue(q);
- if ((q->mq_ops && q->elevator)) {
+ if (q->elevator) {
ret = elv_register_queue(q);
if (ret) {
mutex_unlock(&q->sysfs_lock);
* Remove the sysfs attributes before unregistering the queue data
* structures that can be modified through sysfs.
*/
- if (q->mq_ops)
+ if (queue_is_mq(q))
blk_mq_unregister_dev(disk_to_dev(disk), q);
mutex_unlock(&q->sysfs_lock);
blk_trace_remove_sysfs(disk_to_dev(disk));
mutex_lock(&q->sysfs_lock);
- if (q->mq_ops && q->elevator)
+ if (q->elevator)
elv_unregister_queue(q);
mutex_unlock(&q->sysfs_lock);
td->throtl_slice = DFL_THROTL_SLICE_HD;
#endif
- td->track_bio_latency = !queue_is_rq_based(q);
+ td->track_bio_latency = !queue_is_mq(q);
if (!td->track_bio_latency)
blk_stat_enable_accounting(q);
}
if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
return;
- if (q->mq_ops && IS_ENABLED(CONFIG_BLK_WBT_MQ))
+ if (queue_is_mq(q) && IS_ENABLED(CONFIG_BLK_WBT_MQ))
wbt_init(q);
}
EXPORT_SYMBOL_GPL(wbt_enable_default);
* BIO based queues do not use a scheduler so only q->nr_zones
* needs to be updated so that the sysfs exposed value is correct.
*/
- if (!queue_is_rq_based(q)) {
+ if (!queue_is_mq(q)) {
q->nr_zones = nr_zones;
return 0;
}
/*
* we need a proper transport to send commands, not a stacked device
*/
- if (!queue_is_rq_based(q))
+ if (!queue_is_mq(q))
return 0;
bcd = &q->bsg_dev;
/*
* Special case for mq, turn off scheduling
*/
- if (q->mq_ops && !strncmp(name, "none", 4))
+ if (!strncmp(name, "none", 4))
return elevator_switch(q, NULL);
strlcpy(elevator_name, name, sizeof(elevator_name));
static inline bool elv_support_iosched(struct request_queue *q)
{
- if (q->mq_ops && q->tag_set && (q->tag_set->flags &
- BLK_MQ_F_NO_SCHED))
+ if (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED))
return false;
return true;
}
{
int ret;
- if (!q->mq_ops || !elv_support_iosched(q))
+ if (!queue_is_mq(q) || !elv_support_iosched(q))
return count;
ret = __elevator_change(q, name);
struct elevator_type *__e;
int len = 0;
- if (!queue_is_rq_based(q))
+ if (!queue_is_mq(q))
return sprintf(name, "none\n");
if (!q->elevator)
}
spin_unlock(&elv_list_lock);
- if (q->mq_ops && q->elevator)
+ if (q->elevator)
len += sprintf(name+len, "none");
len += sprintf(len+name, "\n");
void part_inc_in_flight(struct request_queue *q, struct hd_struct *part, int rw)
{
- if (q->mq_ops)
+ if (queue_is_mq(q))
return;
atomic_inc(&part->in_flight[rw]);
void part_dec_in_flight(struct request_queue *q, struct hd_struct *part, int rw)
{
- if (q->mq_ops)
+ if (queue_is_mq(q))
return;
atomic_dec(&part->in_flight[rw]);
void part_in_flight(struct request_queue *q, struct hd_struct *part,
unsigned int inflight[2])
{
- if (q->mq_ops) {
+ if (queue_is_mq(q)) {
blk_mq_in_flight(q, part, inflight);
return;
}
void part_in_flight_rw(struct request_queue *q, struct hd_struct *part,
unsigned int inflight[2])
{
- if (q->mq_ops) {
+ if (queue_is_mq(q)) {
blk_mq_in_flight_rw(q, part, inflight);
return;
}
int dm_request_based(struct mapped_device *md)
{
- return queue_is_rq_based(md->queue);
+ return queue_is_mq(md->queue);
}
void dm_start_queue(struct request_queue *q)
struct request_queue *q = bdev_get_queue(dev->bdev);
struct verify_rq_based_data *v = data;
- if (q->mq_ops)
+ if (queue_is_mq(q))
v->mq_count++;
else
v->sq_count++;
- return queue_is_rq_based(q);
+ return queue_is_mq(q);
}
static int dm_table_determine_type(struct dm_table *t)
#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
-/*
- * Driver can handle struct request, if it either has an old style
- * request_fn defined, or is blk-mq based.
- */
-static inline bool queue_is_rq_based(struct request_queue *q)
+static inline bool queue_is_mq(struct request_queue *q)
{
return q->mq_ops;
}