return atomic_read(&hctx->nr_active) < depth;
}
-static int __blk_mq_get_tag(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt)
+static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
+ struct sbitmap_queue *bt)
{
- if (!hctx_may_queue(hctx, bt))
+ if (!(data->flags & BLK_MQ_REQ_INTERNAL) &&
+ !hctx_may_queue(data->hctx, bt))
return -1;
return __sbitmap_queue_get(bt);
}
tag_offset = tags->nr_reserved_tags;
}
- tag = __blk_mq_get_tag(data->hctx, bt);
+ tag = __blk_mq_get_tag(data, bt);
if (tag != -1)
goto found_tag;
do {
prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE);
- tag = __blk_mq_get_tag(data->hctx, bt);
+ tag = __blk_mq_get_tag(data, bt);
if (tag != -1)
break;
* Retry tag allocation after running the hardware queue,
* as running the queue may also have found completions.
*/
- tag = __blk_mq_get_tag(data->hctx, bt);
+ tag = __blk_mq_get_tag(data, bt);
if (tag != -1)
break;
rq = tags->static_rqs[tag];
- if (blk_mq_tag_busy(data->hctx)) {
- rq->rq_flags = RQF_MQ_INFLIGHT;
- atomic_inc(&data->hctx->nr_active);
- }
-
if (data->flags & BLK_MQ_REQ_INTERNAL) {
rq->tag = -1;
rq->internal_tag = tag;
} else {
+ if (blk_mq_tag_busy(data->hctx)) {
+ rq->rq_flags = RQF_MQ_INFLIGHT;
+ atomic_inc(&data->hctx->nr_active);
+ }
rq->tag = tag;
rq->internal_tag = -1;
}
rq->tag = blk_mq_get_tag(&data);
if (rq->tag >= 0) {
+ if (blk_mq_tag_busy(data.hctx)) {
+ rq->rq_flags |= RQF_MQ_INFLIGHT;
+ atomic_inc(&data.hctx->nr_active);
+ }
data.hctx->tags->rqs[rq->tag] = rq;
goto done;
}