spin_lock_init(&ctxp->ctxlock);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (ctxp->ts_cmd_nvme) {
- ctxp->ts_cmd_nvme = ktime_get_ns();
+ /* NOTE: isr time stamp is stale when context is re-assigned*/
+ if (ctxp->ts_isr_cmd) {
+ ctxp->ts_cmd_nvme = 0;
ctxp->ts_nvme_data = 0;
ctxp->ts_data_wqput = 0;
ctxp->ts_isr_data = 0;
payload = (uint32_t *)(nvmebuf->dbuf.virt);
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (ctxp->ts_isr_cmd)
+ ctxp->ts_cmd_nvme = ktime_get_ns();
+#endif
/*
* The calling sequence should be:
* nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
* @phba: pointer to lpfc hba data structure.
* @idx: relative index of MRQ vector
* @nvmebuf: pointer to lpfc nvme command HBQ data structure.
+ * @isr_timestamp: in jiffies.
+ * @cqflag: cq processing information regarding workload.
*
* This routine is used for processing the WQE associated with a unsolicited
* event. It first determines whether there is an existing ndlp that matches
lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
uint32_t idx,
struct rqb_dmabuf *nvmebuf,
- uint64_t isr_timestamp)
+ uint64_t isr_timestamp,
+ uint8_t cqflag)
{
struct lpfc_nvmet_rcv_ctx *ctxp;
struct lpfc_nvmet_tgtport *tgtp;
spin_lock_init(&ctxp->ctxlock);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (isr_timestamp) {
+ if (isr_timestamp)
ctxp->ts_isr_cmd = isr_timestamp;
- ctxp->ts_cmd_nvme = ktime_get_ns();
- ctxp->ts_nvme_data = 0;
- ctxp->ts_data_wqput = 0;
- ctxp->ts_isr_data = 0;
- ctxp->ts_data_nvme = 0;
- ctxp->ts_nvme_status = 0;
- ctxp->ts_status_wqput = 0;
- ctxp->ts_isr_status = 0;
- ctxp->ts_status_nvme = 0;
- } else {
- ctxp->ts_cmd_nvme = 0;
- }
+ ctxp->ts_cmd_nvme = 0;
+ ctxp->ts_nvme_data = 0;
+ ctxp->ts_data_wqput = 0;
+ ctxp->ts_isr_data = 0;
+ ctxp->ts_data_nvme = 0;
+ ctxp->ts_nvme_status = 0;
+ ctxp->ts_status_wqput = 0;
+ ctxp->ts_isr_status = 0;
+ ctxp->ts_status_nvme = 0;
#endif
atomic_inc(&tgtp->rcv_fcp_cmd_in);
- lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
+ /* check for cq processing load */
+ if (!cqflag) {
+ lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
+ return;
+ }
+
+ if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
+ atomic_inc(&tgtp->rcv_fcp_cmd_drop);
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+ "6325 Unable to queue work for oxid x%x. "
+ "FCP Drop IO [x%x x%x x%x]\n",
+ ctxp->oxid,
+ atomic_read(&tgtp->rcv_fcp_cmd_in),
+ atomic_read(&tgtp->rcv_fcp_cmd_out),
+ atomic_read(&tgtp->xmt_fcp_release));
+
+ spin_lock_irqsave(&ctxp->ctxlock, iflag);
+ lpfc_nvmet_defer_release(phba, ctxp);
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+ lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
+ }
}
/**
* @phba: pointer to lpfc hba data structure.
* @idx: relative index of MRQ vector
* @nvmebuf: pointer to received nvme data structure.
+ * @isr_timestamp: in jiffies.
+ * @cqflag: cq processing information regarding workload.
*
* This routine is used to process an unsolicited event received from a SLI
* (Service Level Interface) ring. The actual processing of the data buffer
lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
uint32_t idx,
struct rqb_dmabuf *nvmebuf,
- uint64_t isr_timestamp)
+ uint64_t isr_timestamp,
+ uint8_t cqflag)
{
if (phba->nvmet_support == 0) {
lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
return;
}
- lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf,
- isr_timestamp);
+ lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, isr_timestamp, cqflag);
}
/**
goto rearm_and_exit;
/* Process all the entries to the CQ */
+ cq->q_flag = 0;
cqe = lpfc_sli4_cq_get(cq);
while (cqe) {
-#if defined(CONFIG_SCSI_LPFC_DEBUG_FS) && defined(BUILD_NVME)
- if (phba->ktime_on)
- cq->isr_timestamp = ktime_get_ns();
- else
- cq->isr_timestamp = 0;
-#endif
workposted |= handler(phba, cq, cqe);
__lpfc_sli4_consume_cqe(phba, cq, cqe);
consumed = 0;
}
+ if (count == LPFC_NVMET_CQ_NOTIFY)
+ cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
+
cqe = lpfc_sli4_cq_get(cq);
}
if (count >= phba->cfg_cq_poll_threshold) {
goto drop;
if (fc_hdr->fh_type == FC_TYPE_FCP) {
- dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
+ dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
lpfc_nvmet_unsol_fcp_event(
- phba, idx, dma_buf,
- cq->isr_timestamp);
+ phba, idx, dma_buf, cq->isr_timestamp,
+ cq->q_flag & HBA_NVMET_CQ_NOTIFY);
return false;
}
drop:
}
work_cq:
+#if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
+ if (phba->ktime_on)
+ cq->isr_timestamp = ktime_get_ns();
+ else
+ cq->isr_timestamp = 0;
+#endif
if (!queue_work_on(cq->chann, phba->wq, &cq->irqwork))
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0363 Cannot schedule soft IRQ "