NVME_RDMA_Q_CONNECTED = (1 << 0),
NVME_RDMA_IB_QUEUE_ALLOCATED = (1 << 1),
NVME_RDMA_Q_DELETING = (1 << 2),
+ NVME_RDMA_Q_LIVE = (1 << 3),
};
struct nvme_rdma_queue {
ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
if (ret)
break;
+ set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags);
}
return ret;
if (ret)
goto stop_admin_q;
+ set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags);
+
ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
if (ret)
goto stop_admin_q;
nvme_stop_keep_alive(&ctrl->ctrl);
- for (i = 0; i < ctrl->queue_count; i++)
+ for (i = 0; i < ctrl->queue_count; i++) {
clear_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[i].flags);
+ clear_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags);
+ }
if (ctrl->queue_count > 1)
nvme_stop_queues(&ctrl->ctrl);
return BLK_EH_HANDLED;
}
+/*
+ * We cannot accept any other command until the Connect command has completed.
+ */
+static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
+ struct request *rq)
+{
+ if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
+ struct nvme_command *cmd = (struct nvme_command *)rq->cmd;
+
+ if (rq->cmd_type != REQ_TYPE_DRV_PRIV ||
+ cmd->common.opcode != nvme_fabrics_command ||
+ cmd->fabrics.fctype != nvme_fabrics_type_connect)
+ return false;
+ }
+
+ return true;
+}
+
static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
WARN_ON_ONCE(rq->tag < 0);
+ if (!nvme_rdma_queue_is_ready(queue, rq))
+ return BLK_MQ_RQ_QUEUE_BUSY;
+
dev = queue->device->dev;
ib_dma_sync_single_for_cpu(dev, sqe->dma,
sizeof(struct nvme_command), DMA_TO_DEVICE);
if (error)
goto out_cleanup_queue;
+ set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags);
+
error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
if (error) {
dev_err(ctrl->ctrl.device,