Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Modified by me to also check at driver tag allocation time if the
original request was reserved, so we can be sure to allocate a
properly reserved tag at that point in time, too.
Signed-off-by: Jens Axboe <axboe@fb.com>
*/
ret = 0;
queue_for_each_hw_ctx(q, hctx, i) {
- hctx->sched_tags = blk_mq_alloc_rq_map(set, i, q->nr_requests, 0);
+ hctx->sched_tags = blk_mq_alloc_rq_map(set, i,
+ q->nr_requests, set->reserved_tags);
if (!hctx->sched_tags) {
ret = -ENOMEM;
break;
void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
struct blk_mq_ctx *ctx, unsigned int tag)
{
- if (tag >= tags->nr_reserved_tags) {
+ if (!blk_mq_tag_is_reserved(tags, tag)) {
const int real_tag = tag - tags->nr_reserved_tags;
BUG_ON(real_tag >= tags->nr_tags);
hctx->tags->rqs[tag] = rq;
}
+static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags,
+ unsigned int tag)
+{
+ return tag < tags->nr_reserved_tags;
+}
+
#endif
return true;
}
+ if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
+ data.flags |= BLK_MQ_REQ_RESERVED;
+
rq->tag = blk_mq_get_tag(&data);
if (rq->tag >= 0) {
if (blk_mq_tag_busy(data.hctx)) {