} else {
lpfc_ncmd->status = (bf_get(lpfc_wcqe_c_status, wcqe) &
LPFC_IOCB_STATUS_MASK);
- lpfc_ncmd->result = wcqe->parameter;
+ lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
/* For NVME, the only failure path that results in an
* IO error is when the adapter rejects it. All other
lpfc_ncmd->status, lpfc_ncmd->result,
wcqe->total_data_placed);
break;
+ case IOSTAT_LOCAL_REJECT:
+ /* Let fall through to set command final state. */
+ if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED)
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NVME_IOERR,
+ "6032 Delay Aborted cmd %p "
+ "nvme cmd %p, xri x%x, "
+ "xb %d\n",
+ lpfc_ncmd, nCmd,
+ lpfc_ncmd->cur_iocbq.sli4_xritag,
+ bf_get(lpfc_wcqe_c_xb, wcqe));
default:
out_err:
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
#endif
freqpriv = nCmd->private;
freqpriv->nvme_buf = NULL;
- nCmd->done(nCmd);
+
+ /* NVME targets need completion held off until the abort exchange
+ * completes.
+ */
+ if (!lpfc_ncmd->flags & LPFC_SBUF_XBUSY)
+ nCmd->done(nCmd);
spin_lock_irqsave(&phba->hbalock, flags);
lpfc_ncmd->nrport = NULL;
spin_unlock_irqrestore(&phba->hbalock, flags);
+ /* Call release with XB=1 to queue the IO into the abort list. */
lpfc_release_nvme_buf(phba, lpfc_ncmd);
}
spin_lock_irqsave(&phba->nvme_buf_list_get_lock, iflag);
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
&phba->lpfc_nvme_buf_list_get, list) {
- if (lpfc_test_rrq_active(phba, ndlp,
- lpfc_ncmd->cur_iocbq.sli4_lxritag))
- continue;
list_del_init(&lpfc_ncmd->list);
found = 1;
break;
spin_unlock(&phba->nvme_buf_list_put_lock);
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
&phba->lpfc_nvme_buf_list_get, list) {
- if (lpfc_test_rrq_active(
- phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
- continue;
list_del_init(&lpfc_ncmd->list);
found = 1;
break;
spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock,
iflag);
- lpfc_ncmd->nvmeCmd = NULL;
list_add_tail(&lpfc_ncmd->list,
&phba->sli4_hba.lpfc_abts_nvme_buf_list);
spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
* @axri: pointer to the fcp xri abort wcqe structure.
*
* This routine is invoked by the worker thread to process a SLI4 fast-path
- * FCP aborted xri.
+ * NVME aborted xri. Aborted NVME IO commands are completed to the transport
+ * here.
**/
void
lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri)
{
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
- uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
struct lpfc_nvme_buf *lpfc_ncmd, *next_lpfc_ncmd;
+ struct nvmefc_fcp_req *nvme_cmd = NULL;
struct lpfc_nodelist *ndlp;
unsigned long iflag = 0;
- int rrq_empty = 0;
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
return;
spin_unlock(
&phba->sli4_hba.abts_nvme_buf_list_lock);
- rrq_empty = list_empty(&phba->active_rrq_list);
spin_unlock_irqrestore(&phba->hbalock, iflag);
ndlp = lpfc_ncmd->ndlp;
- if (ndlp) {
- lpfc_set_rrq_active(
- phba, ndlp,
- lpfc_ncmd->cur_iocbq.sli4_lxritag,
- rxid, 1);
+ if (ndlp)
lpfc_sli4_abts_err_handler(phba, ndlp, axri);
- }
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
- "6311 XRI Aborted xri x%x tag x%x "
- "released\n",
- xri, lpfc_ncmd->cur_iocbq.iotag);
-
+ "6311 nvme_cmd %p xri x%x tag x%x "
+ "abort complete and xri released\n",
+ lpfc_ncmd->nvmeCmd, xri,
+ lpfc_ncmd->cur_iocbq.iotag);
+
+ /* Aborted NVME commands are required to not complete
+ * before the abort exchange command fully completes.
+ * Once completed, it is available via the put list.
+ */
+ nvme_cmd = lpfc_ncmd->nvmeCmd;
+ nvme_cmd->done(nvme_cmd);
lpfc_release_nvme_buf(phba, lpfc_ncmd);
- if (rrq_empty)
- lpfc_worker_wake_up(phba);
return;
}
}