if (qhandle == NULL)
return -ENOMEM;
- qhandle->cpu_id = smp_processor_id();
+ qhandle->cpu_id = raw_smp_processor_id();
qhandle->qidx = qidx;
/*
* NVME qidx == 0 is the admin queue, so both admin queue
if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
uint32_t cpu;
idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
- cpu = smp_processor_id();
+ cpu = raw_smp_processor_id();
if (cpu < LPFC_CHECK_CPU_CNT) {
if (lpfc_ncmd->cpu != cpu)
lpfc_printf_vlog(vport,
if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
idx = lpfc_queue_info->index;
} else {
- cpu = smp_processor_id();
+ cpu = raw_smp_processor_id();
idx = phba->sli4_hba.cpu_map[cpu].hdwq;
}
lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
- cpu = smp_processor_id();
+ cpu = raw_smp_processor_id();
if (cpu < LPFC_CHECK_CPU_CNT) {
lpfc_ncmd->cpu = cpu;
if (idx != cpu)
* Use the CPU context list, from the MRQ the IO was received on
* (ctxp->idx), to save context structure.
*/
- cpu = smp_processor_id();
+ cpu = raw_smp_processor_id();
infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
}
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
- id = smp_processor_id();
+ id = raw_smp_processor_id();
if (id < LPFC_CHECK_CPU_CNT) {
if (ctxp->cpu != id)
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
- int id = smp_processor_id();
+ int id = raw_smp_processor_id();
if (id < LPFC_CHECK_CPU_CNT) {
if (rsp->hwqid != id)
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
- ctxp->oxid, ctxp->size, smp_processor_id());
+ ctxp->oxid, ctxp->size, raw_smp_processor_id());
if (!nvmebuf) {
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
lpfc_nvmeio_data(phba,
"NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
- xri, smp_processor_id(), 0);
+ xri, raw_smp_processor_id(), 0);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
spin_unlock_irqrestore(&phba->hbalock, iflag);
lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
- xri, smp_processor_id(), 1);
+ xri, raw_smp_processor_id(), 1);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6320 NVMET Rcv ABTS:rjt xri x%x\n", xri);
* be empty, thus it would need to be replenished with the
* context list from another CPU for this MRQ.
*/
- current_cpu = smp_processor_id();
+ current_cpu = raw_smp_processor_id();
current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
spin_lock_irqsave(¤t_infop->nvmet_ctx_list_lock, iflag);
if (current_infop->nvmet_ctx_list_cnt) {
#endif
lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
- oxid, size, smp_processor_id());
+ oxid, size, raw_smp_processor_id());
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
uint32_t sgl_size, cpu, idx;
int tag;
- cpu = smp_processor_id();
+ cpu = raw_smp_processor_id();
if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
tag = blk_mq_unique_tag(cmnd->request);
idx = blk_mq_unique_tag_to_hwq(tag);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
- cpu = smp_processor_id();
+ cpu = raw_smp_processor_id();
if (cpu < LPFC_CHECK_CPU_CNT && phba->sli4_hba.hdwq)
phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
}
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
- cpu = smp_processor_id();
+ cpu = raw_smp_processor_id();
if (cpu < LPFC_CHECK_CPU_CNT) {
struct lpfc_sli4_hdw_queue *hdwq =
&phba->sli4_hba.hdwq[lpfc_cmd->hdwq_no];
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0390 Cannot schedule soft IRQ "
"for CQ eqcqid=%d, cqid=%d on CPU %d\n",
- cqid, cq->queue_id, smp_processor_id());
+ cqid, cq->queue_id, raw_smp_processor_id());
}
/**
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0363 Cannot schedule soft IRQ "
"for CQ eqcqid=%d, cqid=%d on CPU %d\n",
- cqid, cq->queue_id, smp_processor_id());
+ cqid, cq->queue_id, raw_smp_processor_id());
}
/**
eqi = phba->sli4_hba.eq_info;
icnt = this_cpu_inc_return(eqi->icnt);
- fpeq->last_cpu = smp_processor_id();
+ fpeq->last_cpu = raw_smp_processor_id();
if (icnt > LPFC_EQD_ISR_TRIGGER &&
phba->cfg_irq_chann == 1 &&