This patch improves readability of the qla2xxx source code.
Cc: Himanshu Madhani <hmadhani@marvell.com>
Cc: Giridhar Malavali <gmalavali@marvell.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Acked-by: Himanshu Madhani <hmadhani@marvell.com>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
int type;
uint32_t idc_control;
uint8_t *tmp_data = NULL;
+
if (off != 0)
return -EINVAL;
char *buf)
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
return scnprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
}
char *buf)
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_desc);
}
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
+
return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
ha->bios_revision[0]);
}
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
+
return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
ha->efi_revision[0]);
}
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
+
return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
ha->fcode_revision[0]);
}
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
+
return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
ha->fw_revision[3]);
struct device_attribute *attr, char *buf)
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
return scnprintf(buf, PAGE_SIZE, "%d\n",
vha->qla_stats.total_isp_aborts);
}
if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
if (ha->fw_attributes & BIT_4) {
int prot = 0, guard;
+
vha->flags.difdix_supported = 1;
ql_dbg(ql_dbg_user, vha, 0x7082,
"Registered for DIF/DIX type 1 and 3 protection.\n");
uint32_t count;
dma_addr_t sfp_dma;
void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+
if (!sfp) {
bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_NO_MEMORY;
struct qla_status_reg *sr = (void *)bsg;
dma_addr_t sfp_dma;
uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+
if (!sfp) {
bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_NO_MEMORY;
struct qla_status_reg *sr = (void *)bsg;
dma_addr_t sfp_dma;
uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+
if (!sfp) {
bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_NO_MEMORY;
struct qla_i2c_access *i2c = (void *)bsg;
dma_addr_t sfp_dma;
uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+
if (!sfp) {
bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_NO_MEMORY;
struct qla_i2c_access *i2c = (void *)bsg;
dma_addr_t sfp_dma;
uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+
if (!sfp) {
bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
EXT_STATUS_NO_MEMORY;
qla2x00_dfs_tgt_sess_open(struct inode *inode, struct file *file)
{
scsi_qla_host_t *vha = inode->i_private;
+
return single_open(file, qla2x00_dfs_tgt_sess_show, vha);
}
qla_dfs_fw_resource_cnt_open(struct inode *inode, struct file *file)
{
struct scsi_qla_host *vha = inode->i_private;
+
return single_open(file, qla_dfs_fw_resource_cnt_show, vha);
}
qla_dfs_tgt_counters_open(struct inode *inode, struct file *file)
{
struct scsi_qla_host *vha = inode->i_private;
+
return single_open(file, qla_dfs_tgt_counters_show, vha);
}
int ret, rval;
uint16_t mb[MAILBOX_REGISTER_COUNT];
struct qla_hw_data *ha = vha->hw;
+
ret = QLA_SUCCESS;
if (vha->flags.management_server_logged_in)
return ret;
{
ms_iocb_entry_t *ms_pkt;
struct qla_hw_data *ha = vha->hw;
+
ms_pkt = ha->ms_iocb;
memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
u8 *port_name, u8 *node_name, void *pla, u8 fc4_type)
{
struct qla_work_evt *e;
+
e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
if (!e)
return QLA_FUNCTION_FAILED;
return;
{
unsigned long flags;
+
fcport = qla2x00_find_fcport_by_nportid
(vha, &ea->id, 1);
if (fcport) {
int rval;
unsigned long flags, save_flags;
struct qla_hw_data *ha = vha->hw;
+
rval = QLA_SUCCESS;
/* Get Initiator ID */
qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
} else {
const char *state = qla83xx_dev_state_to_string(dev_state);
+
ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state);
/* SV: XXX: Is timeout required here? */
qla84xx_put_chip(struct scsi_qla_host *vha)
{
struct qla_hw_data *ha = vha->hw;
+
if (ha->cs84xx)
kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
}
if (sp) {
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+
sgl = scsi_prot_sglist(cmd);
vha = sp->vha;
difctx = sp->u.scmd.ctx;
cmd_pkt->entry_status = (uint8_t) rsp->id;
} else {
struct cmd_type_7 *cmd_pkt;
+
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) {
cnt = (uint16_t)RD_REG_DWORD_RELAXED(
if (ha->flags.fawwpn_enabled &&
(ha->current_topology == ISP_CFG_F)) {
void *wwpn = ha->init_cb->port_name;
+
memcpy(vha->port_name, wwpn, WWN_SIZE);
fc_host_port_name(vha->host) =
wwn_to_u64(vha->port_name);
struct fc_bsg_reply *bsg_reply;
sts_entry_t *sts;
struct sts_entry_24xx *sts24;
+
sts = (sts_entry_t *) pkt;
sts24 = (struct sts_entry_24xx *) pkt;
/* Adjust ring index */
if (IS_P3P_TYPE(ha)) {
struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
+
WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index);
} else {
WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
mbx_cmd_t *mcp = &mc;
int rval = QLA_FUNCTION_FAILED;
int offset = 0, size = MINIDUMP_SIZE_36K;
+
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
"Entered %s.\n", __func__);
{
int rval;
unsigned long flags;
+
rval = QLA_SUCCESS;
flags = vha->dpc_flags;
fx_iocb.flags = fxio->u.fxiocb.flags;
} else {
struct scatterlist *sg;
+
bsg_job = sp->u.bsg_job;
bsg_request = bsg_job->request;
piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
if (unlikely(!fd->sqid)) {
struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
+
if (cmd->sqe.common.opcode == nvme_admin_async_event) {
nvme->u.nvme.aen_op = 1;
atomic_inc(&ha->nvme_active_aen_cnt);
} else if (addr_in_range(addr, QLA82XX_ADDR_OCM0,
QLA82XX_ADDR_OCM0_MAX)) {
unsigned int temp1;
+
if ((addr & 0x00ff800) == 0xff800) {
ql_log(ql_log_warn, vha, 0xb004,
"%s: QM access not handled.\n", __func__);
qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val)
{
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR);
qla82xx_wait_rom_busy(ha);
if (qla82xx_wait_rom_done(ha)) {
qla82xx_flash_set_write_enable(struct qla_hw_data *ha)
{
uint32_t val;
+
qla82xx_wait_rom_busy(ha);
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WREN);
qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
{
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
if (qla82xx_flash_set_write_enable(ha))
return -1;
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val);
qla82xx_write_disable_flash(struct qla_hw_data *ha)
{
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI);
if (qla82xx_wait_rom_done(ha)) {
ql_log(ql_log_warn, vha, 0xb00f,
long memaddr = BOOTLD_START;
u64 data;
u32 high, low;
+
size = (IMAGE_START - BOOTLD_START) / 8;
for (i = 0; i < size; i++) {
qla82xx_reset_chip(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
+
ha->isp_ops->disable_intrs(ha);
return QLA_SUCCESS;
uint16_t __iomem *wptr;
struct qla_hw_data *ha = vha->hw;
struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
+
wptr = (uint16_t __iomem *)®->mailbox_out[1];
/* Load return mailbox registers. */
qla82xx_enable_intrs(struct qla_hw_data *ha)
{
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
qla82xx_mbx_intr_enable(vha);
spin_lock_irq(&ha->hardware_lock);
if (IS_QLA8044(ha))
qla82xx_disable_intrs(struct qla_hw_data *ha)
{
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
qla82xx_mbx_intr_disable(vha);
spin_lock_irq(&ha->hardware_lock);
if (IS_QLA8044(ha))
int rval;
struct qla_hw_data *ha = vha->hw;
+
qla82xx_idc_lock(ha);
rval = qla82xx_mbx_beacon_ctl(vha, 1);
int rval;
struct qla_hw_data *ha = vha->hw;
+
qla82xx_idc_lock(ha);
rval = qla82xx_mbx_beacon_ctl(vha, 0);
uint16_t count;
uint32_t poll, mask, modify_mask;
uint32_t wait_count = 0;
-
uint32_t *data_ptr = *d_ptr;
-
struct qla8044_minidump_entry_rddfe *rddfe;
+
rddfe = (struct qla8044_minidump_entry_rddfe *) entry_hdr;
addr1 = rddfe->addr_1;
struct rsp_que *rsp)
{
struct qla_hw_data *ha = vha->hw;
+
rsp->qpair = ha->base_qpair;
rsp->req = req;
ha->base_qpair->hw = ha;
struct rsp_que *rsp)
{
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
ha->req_q_map = kcalloc(ha->max_req_queues, sizeof(struct req_que *),
GFP_KERNEL);
if (!ha->req_q_map) {
if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
if (ha->fw_attributes & BIT_4) {
int prot = 0, guard;
+
base_vha->flags.difdix_supported = 1;
ql_dbg(ql_dbg_init, base_vha, 0x00f1,
"Registering for DIF/DIX type 1 and 3 protection.\n");
qla2xxx_wake_dpc(base_vha);
} else {
int now;
+
if (rport) {
ql_dbg(ql_dbg_disc, fcport->vha, 0x2109,
"%s %8phN. rport %p roles %x\n",
uint32_t idc_lck_rcvry_stage_mask = 0x3;
uint32_t idc_lck_rcvry_owner_mask = 0x3c;
struct qla_hw_data *ha = base_vha->hw;
+
ql_dbg(ql_dbg_p3p, base_vha, 0xb086,
"Trying force recovery of the IDC lock.\n");
/* Send marker if required */
if (unlikely(vha->marker_needed != 0)) {
int rc = qla2x00_issue_marker(vha, vha_locked);
+
if (rc != QLA_SUCCESS) {
ql_dbg(ql_dbg_tgt, vha, 0xe03d,
"qla_target(%d): issue_marker() failed\n",
struct imm_ntfy_from_isp *ntfy, int type)
{
struct qla_work_evt *e;
+
e = qla2x00_alloc_work(vha, QLA_EVT_NACK);
if (!e)
return QLA_FUNCTION_FAILED;
struct qlt_plogi_ack_t *con =
sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
struct imm_ntfy_from_isp *iocb;
+
own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
if (con) {
res = -ENOENT;
for (i = 0; i < entries; i++) {
struct gid_list_info *gid = (struct gid_list_info *)id_iter;
+
if ((gid->al_pa == s_id[2]) &&
(gid->area == s_id[1]) &&
(gid->domain == s_id[0])) {
{
struct qla_hw_data *ha;
struct qla_qpair *qpair;
+
if (!cmd->sg_mapped)
return;
if (ctio != NULL) {
struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
+
term = !(c->flags &
cpu_to_le16(OF_TERM_EXCH));
} else
list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
+
if (op_key == key) {
op->aborted = true;
count++;
list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
+
if (cmd_key == key) {
cmd->aborted = 1;
count++;
if (sess != NULL) {
bool delete = false;
int sec;
+
spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
switch (sess->fw_login_state) {
case DSC_LS_PLOGI_PEND:
case ELS_ADISC:
{
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+
if (tgt->link_reinit_iocb_pending) {
qlt_send_notify_ack(ha->base_qpair,
&tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
case IMM_NTFY_LIP_LINK_REINIT:
{
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
"qla_target(%d): LINK REINIT (loop %#x, "
"subcode %x)\n", vha->vp_idx,
case CTIO_TYPE7:
{
struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
+
qlt_do_ctio_completion(vha, rsp, entry->handle,
le16_to_cpu(entry->status)|(pkt->entry_status << 16),
entry);
{
struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
int rc;
+
if (atio->u.isp2x.status !=
cpu_to_le16(ATIO_CDB_VALID)) {
ql_dbg(ql_dbg_tgt, vha, 0xe05e,
case CONTINUE_TGT_IO_TYPE:
{
struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
+
qlt_do_ctio_completion(vha, rsp, entry->handle,
le16_to_cpu(entry->status)|(pkt->entry_status << 16),
entry);
case CTIO_A64_TYPE:
{
struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
+
qlt_do_ctio_completion(vha, rsp, entry->handle,
le16_to_cpu(entry->status)|(pkt->entry_status << 16),
entry);
case NOTIFY_ACK_TYPE:
if (tgt->notify_ack_expected > 0) {
struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
+
ql_dbg(ql_dbg_tgt, vha, 0xe036,
"NOTIFY_ACK seq %08x status %x\n",
le16_to_cpu(entry->u.isp2x.seq_id),
if (rc == -ENOENT) {
qlt_port_logo_t logo;
+
sid_to_portid(s_id, &logo.id);
logo.cmd_count = 1;
qlt_send_first_logo(vha, &logo);
unsigned long flags;
struct qla_qpair *qpair = ha->queue_pair_map[i];
+
h = &tgt->qphints[i + 1];
INIT_LIST_HEAD(&h->hint_elem);
if (qpair) {