blk-mq: change blk_mq_queue_busy() to blk_mq_queue_inflight()
authorJens Axboe <axboe@kernel.dk>
Tue, 18 Dec 2018 04:11:17 +0000 (21:11 -0700)
committerJens Axboe <axboe@kernel.dk>
Tue, 18 Dec 2018 04:31:42 +0000 (21:31 -0700)
There's a single user of this function, dm, and dm just wants
to check if IO is inflight, not that it's just allocated.

This fixes a hang with srp/002 in blktests with dm, where it tries
to suspend but waits for inflight IO to finish first. As it checks
for just allocated requests, this fails.

Tested-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq.c
drivers/md/dm.c
include/linux/blk-mq.h

index 6847f014606b1a3996d8acee1a8b141ecb598045..b0888a89fa662df768a1d0e88e744324592302b6 100644 (file)
@@ -805,14 +805,14 @@ struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
 }
 EXPORT_SYMBOL(blk_mq_tag_to_rq);
 
-static bool blk_mq_check_busy(struct blk_mq_hw_ctx *hctx, struct request *rq,
-                             void *priv, bool reserved)
+static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq,
+                              void *priv, bool reserved)
 {
        /*
-        * If we find a request, we know the queue is busy. Return false
-        * to stop the iteration.
+        * If we find a request that is inflight and the queue matches,
+        * we know the queue is busy. Return false to stop the iteration.
         */
-       if (rq->q == hctx->queue) {
+       if (rq->state == MQ_RQ_IN_FLIGHT && rq->q == hctx->queue) {
                bool *busy = priv;
 
                *busy = true;
@@ -822,14 +822,14 @@ static bool blk_mq_check_busy(struct blk_mq_hw_ctx *hctx, struct request *rq,
        return true;
 }
 
-bool blk_mq_queue_busy(struct request_queue *q)
+bool blk_mq_queue_inflight(struct request_queue *q)
 {
        bool busy = false;
 
-       blk_mq_queue_tag_busy_iter(q, blk_mq_check_busy, &busy);
+       blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy);
        return busy;
 }
-EXPORT_SYMBOL_GPL(blk_mq_queue_busy);
+EXPORT_SYMBOL_GPL(blk_mq_queue_inflight);
 
 static void blk_mq_rq_timed_out(struct request *req, bool reserved)
 {
index c414d40d645dcbb206d37802ed57c5ab63ae03f2..dddbca63e140e82c6110b8e8b92a2697a7cc25a0 100644 (file)
@@ -663,7 +663,7 @@ static bool md_in_flight_bios(struct mapped_device *md)
 static bool md_in_flight(struct mapped_device *md)
 {
        if (queue_is_mq(md->queue))
-               return blk_mq_queue_busy(md->queue);
+               return blk_mq_queue_inflight(md->queue);
        else
                return md_in_flight_bios(md);
 }
index 57eda7b2024311525fdbbcbd8f1ba534a76d347a..d3c0a0d2680b98ee61547102b4dd038a4671165d 100644 (file)
@@ -257,7 +257,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
 void blk_mq_free_request(struct request *rq);
 bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
 
-bool blk_mq_queue_busy(struct request_queue *q);
+bool blk_mq_queue_inflight(struct request_queue *q);
 
 enum {
        /* return when out of requests */