nvme-pci: Remap CMB SQ entries on every controller reset
authorKeith Busch <keith.busch@intel.com>
Tue, 13 Feb 2018 12:44:44 +0000 (05:44 -0700)
committerKeith Busch <keith.busch@intel.com>
Wed, 14 Feb 2018 00:09:50 +0000 (17:09 -0700)
The controller memory buffer is remapped into a kernel address on each
reset, but the driver was setting the submission queue base address
only on the very first queue creation. The remapped address is likely to
change after a reset, so accessing the old address will hit a kernel bug.

This patch fixes that by setting the queue's CMB base address each time
the queue is created.

Fixes: f63572dff1421 ("nvme: unmap CMB and remove sysfs file in reset path")
Reported-by: Christian Black <christian.d.black@intel.com>
Cc: Jon Derrick <jonathan.derrick@intel.com>
Cc: <stable@vger.kernel.org> # 4.9+
Signed-off-by: Keith Busch <keith.busch@intel.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
drivers/nvme/host/pci.c

index ab9c19525fa80092efe44164843540b9e15fd0bb..b427157af74e06a48d3343eb17dfddc4c32ab0dc 100644 (file)
@@ -1364,18 +1364,14 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
 static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
                                int qid, int depth)
 {
-       if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
-               unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
-                                                     dev->ctrl.page_size);
-               nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
-               nvmeq->sq_cmds_io = dev->cmb + offset;
-       } else {
-               nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
-                                       &nvmeq->sq_dma_addr, GFP_KERNEL);
-               if (!nvmeq->sq_cmds)
-                       return -ENOMEM;
-       }
+       /* CMB SQEs will be mapped before creation */
+       if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS))
+               return 0;
 
+       nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
+                                           &nvmeq->sq_dma_addr, GFP_KERNEL);
+       if (!nvmeq->sq_cmds)
+               return -ENOMEM;
        return 0;
 }
 
@@ -1449,6 +1445,13 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
        struct nvme_dev *dev = nvmeq->dev;
        int result;
 
+       if (dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
+               unsigned offset = (qid - 1) * roundup(SQ_SIZE(nvmeq->q_depth),
+                                                     dev->ctrl.page_size);
+               nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
+               nvmeq->sq_cmds_io = dev->cmb + offset;
+       }
+
        nvmeq->cq_vector = qid - 1;
        result = adapter_alloc_cq(dev, qid, nvmeq);
        if (result < 0)