nvme: introduce nvme_is_aen_req function
authorIsrael Rukshin <israelr@mellanox.com>
Sun, 13 Oct 2019 16:57:31 +0000 (19:57 +0300)
committerJens Axboe <axboe@kernel.dk>
Mon, 4 Nov 2019 17:56:40 +0000 (10:56 -0700)
This function improves code readability and reduces code duplication.

Signed-off-by: Israel Rukshin <israelr@mellanox.com>
Signed-off-by: Max Gurtovoy <maxg@mellanox.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/host/tcp.c
drivers/nvme/target/loop.c

index 38a83ef5bcd3513e13b8f00f567ed61efec1f8d6..912f9500ed11fa907cd49a80c505729b49894905 100644 (file)
@@ -445,6 +445,11 @@ static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl)
        put_device(ctrl->device);
 }
 
+static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
+{
+       return !qid && command_id >= NVME_AQ_BLK_MQ_DEPTH;
+}
+
 void nvme_complete_rq(struct request *req);
 bool nvme_cancel_request(struct request *req, void *data, bool reserved);
 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
index bb88681f4dc35c386b47cbd571824f06d7cce2bf..7082116e9206befbe242fee368ce708fb66d3637 100644 (file)
@@ -967,8 +967,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
         * aborts.  We don't even bother to allocate a struct request
         * for them but rather special case them here.
         */
-       if (unlikely(nvmeq->qid == 0 &&
-                       cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
+       if (unlikely(nvme_is_aen_req(nvmeq->qid, cqe->command_id))) {
                nvme_complete_async_event(&nvmeq->dev->ctrl,
                                cqe->status, &cqe->result);
                return;
index 4d280160dd3fd32608b5d4e9a7b699646adcdaf8..154fa4e32ad8fc5f10739b2107791079bc557d39 100644 (file)
@@ -1501,8 +1501,8 @@ static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
         * aborts.  We don't even bother to allocate a struct request
         * for them but rather special case them here.
         */
-       if (unlikely(nvme_rdma_queue_idx(queue) == 0 &&
-                       cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH))
+       if (unlikely(nvme_is_aen_req(nvme_rdma_queue_idx(queue),
+                                    cqe->command_id)))
                nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
                                &cqe->result);
        else
index 385a5212c10f199126cd2627a5264ed82b753b76..124fda67613a65b023e065bdc5680476e3f58010 100644 (file)
@@ -491,8 +491,8 @@ static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
         * aborts.  We don't even bother to allocate a struct request
         * for them but rather special case them here.
         */
-       if (unlikely(nvme_tcp_queue_id(queue) == 0 &&
-           cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH))
+       if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
+                                    cqe->command_id)))
                nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
                                &cqe->result);
        else
index 748a39fca771408717d18dc1c61e4723d62c6078..bd1f81f97ab76b199e4e74f4949d8d134d00e509 100644 (file)
@@ -102,8 +102,8 @@ static void nvme_loop_queue_response(struct nvmet_req *req)
         * aborts.  We don't even bother to allocate a struct request
         * for them but rather special case them here.
         */
-       if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
-                       cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
+       if (unlikely(nvme_is_aen_req(nvme_loop_queue_idx(queue),
+                                    cqe->command_id))) {
                nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
                                &cqe->result);
        } else {