nvme: return BLK_EH_DONE from ->timeout
authorChristoph Hellwig <hch@lst.de>
Tue, 29 May 2018 13:52:30 +0000 (15:52 +0200)
committerJens Axboe <axboe@kernel.dk>
Tue, 29 May 2018 14:59:21 +0000 (08:59 -0600)
NVMe always completes the request before returning from ->timeout, either
by polling for it, or by disabling the controller.  Return BLK_EH_DONE so
that the block layer doesn't even try to complete it again.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/target/loop.c

index 917e1714f7d970e267c9d0b6ff204a67f11c7910..31525324b79f92d46c413222e18493bed9cf2bb9 100644 (file)
@@ -1205,7 +1205,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
                nvme_warn_reset(dev, csts);
                nvme_dev_disable(dev, false);
                nvme_reset_ctrl(&dev->ctrl);
-               return BLK_EH_HANDLED;
+               return BLK_EH_DONE;
        }
 
        /*
@@ -1215,14 +1215,14 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
                dev_warn(dev->ctrl.device,
                         "I/O %d QID %d timeout, completion polled\n",
                         req->tag, nvmeq->qid);
-               return BLK_EH_HANDLED;
+               return BLK_EH_DONE;
        }
 
        /*
         * Shutdown immediately if controller times out while starting. The
         * reset work will see the pci device disabled when it gets the forced
         * cancellation error. All outstanding requests are completed on
-        * shutdown, so we return BLK_EH_HANDLED.
+        * shutdown, so we return BLK_EH_DONE.
         */
        switch (dev->ctrl.state) {
        case NVME_CTRL_CONNECTING:
@@ -1232,7 +1232,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
                         req->tag, nvmeq->qid);
                nvme_dev_disable(dev, false);
                nvme_req(req)->flags |= NVME_REQ_CANCELLED;
-               return BLK_EH_HANDLED;
+               return BLK_EH_DONE;
        default:
                break;
        }
@@ -1249,12 +1249,8 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
                nvme_dev_disable(dev, false);
                nvme_reset_ctrl(&dev->ctrl);
 
-               /*
-                * Mark the request as handled, since the inline shutdown
-                * forces all outstanding requests to complete.
-                */
                nvme_req(req)->flags |= NVME_REQ_CANCELLED;
-               return BLK_EH_HANDLED;
+               return BLK_EH_DONE;
        }
 
        if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) {
index 1eb4438a87637d89bcf0c24760863bfdf0fd395a..ac7462cd7f0f8ef87c9fc0b8af1914cfd03cec75 100644 (file)
@@ -1598,7 +1598,7 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
        /* fail with DNR on cmd timeout */
        nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
 
-       return BLK_EH_HANDLED;
+       return BLK_EH_DONE;
 }
 
 static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
index 27a8561c0cb97596d25b53620493978040082124..22e3627bf16b4c9ec91c38a5150ac1df783c6e8c 100644 (file)
@@ -146,7 +146,7 @@ nvme_loop_timeout(struct request *rq, bool reserved)
        /* fail with DNR on admin cmd timeout */
        nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
 
-       return BLK_EH_HANDLED;
+       return BLK_EH_DONE;
 }
 
 static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,