cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
- spin_lock(&cmd->cmd_lock);
cmd->cmd_sent_to_fw = 1;
- spin_unlock(&cmd->cmd_lock);
cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
/* Memory Barrier */
qlt_load_data_segments(&prm);
cmd->state = QLA_TGT_STATE_NEED_DATA;
- spin_lock(&cmd->cmd_lock);
cmd->cmd_sent_to_fw = 1;
- spin_unlock(&cmd->cmd_lock);
cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
/* Memory Barrier */
static void tcm_qla2xxx_complete_free(struct work_struct *work)
{
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
- unsigned long flags;
cmd->cmd_in_wq = 0;
WARN_ON(cmd->trc_flags & TRC_CMD_FREE);
- spin_lock_irqsave(&cmd->cmd_lock, flags);
+ /* To do: protect all tgt_counters manipulations with proper locking. */
cmd->qpair->tgt_counters.qla_core_ret_sta_ctio++;
cmd->trc_flags |= TRC_CMD_FREE;
cmd->cmd_sent_to_fw = 0;
- spin_unlock_irqrestore(&cmd->cmd_lock, flags);
transport_generic_free_cmd(&cmd->se_cmd, 0);
}
static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
{
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
- unsigned long flags;
/*
* Ensure that the complete FCP WRITE payload has been received.
* Otherwise return an exception via CHECK_CONDITION status.
*/
cmd->cmd_in_wq = 0;
-
- spin_lock_irqsave(&cmd->cmd_lock, flags);
cmd->cmd_sent_to_fw = 0;
if (cmd->aborted) {
- spin_unlock_irqrestore(&cmd->cmd_lock, flags);
-
transport_generic_request_failure(&cmd->se_cmd,
TCM_CHECK_CONDITION_ABORT_CMD);
return;
}
- spin_unlock_irqrestore(&cmd->cmd_lock, flags);
cmd->qpair->tgt_counters.qla_core_ret_ctio++;
if (!cmd->write_data_transferred) {