* @meta: array of meta data for each command/tx buffer
* @dma_addr_cmd: physical address of cmd/tx buffer array
* @txb: array of per-TFD driver data
+ * lock: queue lock
* @time_stamp: time (in jiffies) of last read_ptr change
* @need_update: indicates need to update read/write index
* @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
struct iwl_device_cmd **cmd;
struct iwl_cmd_meta *meta;
struct sk_buff **skbs;
+ spinlock_t lock;
unsigned long time_stamp;
u8 need_update;
u8 sched_retry;
{
struct iwl_tfd *tfd_tmp = txq->tfds;
+ lockdep_assert_held(&txq->lock);
+
iwlagn_unmap_tfd(trans, &txq->meta[index], &tfd_tmp[index], dma_dir);
/* free SKB */
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
dma_addr_t phys_addr;
- unsigned long flags;
u32 idx;
u16 copy_size, cmd_size;
bool is_ct_kill = false;
return -EIO;
}
- spin_lock_irqsave(&trans->hcmd_lock, flags);
+ spin_lock_bh(&txq->lock);
if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
- spin_unlock_irqrestore(&trans->hcmd_lock, flags);
+ spin_unlock_bh(&txq->lock);
IWL_ERR(trans, "No space in command queue\n");
is_ct_kill = iwl_check_for_ct_kill(priv(trans));
iwl_txq_update_write_ptr(trans, txq);
out:
- spin_unlock_irqrestore(&trans->hcmd_lock, flags);
+ spin_unlock_bh(&txq->lock);
return idx;
}
struct iwl_queue *q = &txq->q;
int nfreed = 0;
+ lockdep_assert_held(&txq->lock);
+
if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
"index %d is out of range [0-%d] %d %d.\n", __func__,
struct iwl_cmd_meta *meta;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
- unsigned long flags;
/* If a Tx command is being handled and it isn't in the actual
* command queue then there a command routing bug has been introduced
return;
}
+ spin_lock(&txq->lock);
+
cmd_index = get_cmd_index(&txq->q, index);
cmd = txq->cmd[cmd_index];
meta = &txq->meta[cmd_index];
rxb->page = NULL;
}
- spin_lock_irqsave(&trans->hcmd_lock, flags);
-
iwl_hcmd_queue_reclaim(trans, txq_id, index);
if (!(meta->flags & CMD_ASYNC)) {
meta->flags = 0;
- spin_unlock_irqrestore(&trans->hcmd_lock, flags);
+ spin_unlock(&txq->lock);
}
#define HOST_COMPLETE_TIMEOUT (2 * HZ)
if (WARN_ON(txq_id == trans->shrd->cmd_queue))
return 0;
+ lockdep_assert_held(&txq->lock);
+
/*Since we free until index _not_ inclusive, the one before index is
* the last we will free. This one must be used */
last_to_free = iwl_queue_dec_wrap(index, q->n_bd);
if (ret)
return ret;
+ spin_lock_init(&txq->lock);
+
/*
* Tell nic where to find circular buffer of Tx Frame Descriptors for
* given Tx queue, and enable the DMA channel used for that queue.
struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
struct iwl_queue *q = &txq->q;
enum dma_data_direction dma_dir;
- unsigned long flags;
- spinlock_t *lock;
if (!q->n_bd)
return;
/* In the command queue, all the TBs are mapped as BIDI
* so unmap them as such.
*/
- if (txq_id == trans->shrd->cmd_queue) {
+ if (txq_id == trans->shrd->cmd_queue)
dma_dir = DMA_BIDIRECTIONAL;
- lock = &trans->hcmd_lock;
- } else {
+ else
dma_dir = DMA_TO_DEVICE;
- lock = &trans->shrd->sta_lock;
- }
- spin_lock_irqsave(lock, flags);
+ spin_lock_bh(&txq->lock);
while (q->write_ptr != q->read_ptr) {
/* The read_ptr needs to bound by q->n_window */
iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr),
dma_dir);
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
}
- spin_unlock_irqrestore(lock, flags);
+ spin_unlock_bh(&txq->lock);
}
/**
txq = &trans_pcie->txq[txq_id];
q = &txq->q;
+ spin_lock(&txq->lock);
+
/* In AGG mode, the index in the ring must correspond to the WiFi
* sequence number. This is a HW requirements to help the SCD to parse
* the BA.
&dev_cmd->hdr, firstlen,
DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
- return -1;
+ goto out_err;
dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
dma_unmap_len_set(out_meta, len, firstlen);
dma_unmap_addr(out_meta, mapping),
dma_unmap_len(out_meta, len),
DMA_BIDIRECTIONAL);
- return -1;
+ goto out_err;
}
}
iwl_stop_queue(trans, txq, "Queue is full");
}
}
+ spin_unlock(&txq->lock);
return 0;
+ out_err:
+ spin_unlock(&txq->lock);
+ return -1;
}
static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
int tfd_num = ssn & (txq->q.n_bd - 1);
int freed = 0;
+ spin_lock(&txq->lock);
+
txq->time_stamp = jiffies;
if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE &&
IWL_DEBUG_TX_QUEUES(trans, "Bad queue mapping txq_id %d, "
"agg_txq[sta_id[tid] %d", txq_id,
trans_pcie->agg_txq[sta_id][tid]);
+ spin_unlock(&txq->lock);
return 1;
}
status != TX_STATUS_FAIL_PASSIVE_NO_RX))
iwl_wake_queue(trans, txq, "Packets reclaimed");
}
+
+ spin_unlock(&txq->lock);
return 0;
}
trans->ops = &trans_ops_pcie;
trans->shrd = shrd;
trans_pcie->trans = trans;
- spin_lock_init(&trans->hcmd_lock);
spin_lock_init(&trans_pcie->irq_lock);
/* W/A - seems to solve weird behavior. We need to remove this if we
* @ops - pointer to iwl_trans_ops
* @op_mode - pointer to the op_mode
* @shrd - pointer to iwl_shared which holds shared data from the upper layer
- * @hcmd_lock: protects HCMD
* @reg_lock - protect hw register access
* @dev - pointer to struct device * that represents the device
* @irq - the irq number for the device
struct iwl_op_mode *op_mode;
struct iwl_shared *shrd;
enum iwl_trans_state state;
- spinlock_t hcmd_lock;
spinlock_t reg_lock;
struct device *dev;