struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
struct iwl_cmd_meta *out_meta;
- struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans_pcie->txq[txq_id];
void *tfd;
if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
unsigned long flags;
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
+ struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
int cmd_idx;
int ret;
&trans->status),
HOST_COMPLETE_TIMEOUT);
if (!ret) {
- struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
-
IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
* in later, it will possibly set an invalid
* address (cmd->meta.source).
*/
- trans_pcie->txq[trans_pcie->cmd_queue].
- entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
+ txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
}
if (cmd->resp_pkt) {
void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans_pcie->txq[txq_id];
spin_lock_bh(&txq->lock);
while (txq->write_ptr != txq->read_ptr) {
unsigned int timeout)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = &trans_pcie->txq[cmd->scd_queue];
+ struct iwl_txq *txq = trans_pcie->txq[cmd->scd_queue];
struct iwl_host_cmd hcmd = {
.id = cmd_id,
.len = { sizeof(*cmd) },
int i;
for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
- struct iwl_txq *txq = &trans_pcie->txq[i];
+ struct iwl_txq *txq = trans_pcie->txq[i];
spin_lock_bh(&txq->lock);
- if (trans_pcie->txq[i].need_update) {
+ if (txq->need_update) {
iwl_pcie_txq_inc_wr_ptr(trans, txq);
- trans_pcie->txq[i].need_update = false;
+ txq->need_update = false;
}
spin_unlock_bh(&txq->lock);
}
static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans_pcie->txq[txq_id];
spin_lock_bh(&txq->lock);
while (txq->write_ptr != txq->read_ptr) {
static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans_pcie->txq[txq_id];
struct device *dev = trans->dev;
int i;
for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
txq_id++) {
- struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans_pcie->txq[txq_id];
if (trans->cfg->use_tfh)
iwl_write_direct64(trans,
FH_MEM_CBBC_QUEUE(trans, txq_id),
memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
/* This can happen: start_hw, stop_device */
- if (!trans_pcie->txq)
+ if (!trans_pcie->txq_memory)
return 0;
/* Unmap DMA from host system and free skb's */
memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
/* Tx queues */
- if (trans_pcie->txq) {
+ if (trans_pcie->txq_memory) {
for (txq_id = 0;
- txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
+ txq_id < trans->cfg->base_params->num_of_queues;
+ txq_id++) {
iwl_pcie_txq_free(trans, txq_id);
+ trans_pcie->txq[txq_id] = NULL;
+ }
}
- kfree(trans_pcie->txq);
- trans_pcie->txq = NULL;
+ kfree(trans_pcie->txq_memory);
+ trans_pcie->txq_memory = NULL;
iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
/*It is not allowed to alloc twice, so warn when this happens.
* We cannot rely on the previous allocation, so free and fail */
- if (WARN_ON(trans_pcie->txq)) {
+ if (WARN_ON(trans_pcie->txq_memory)) {
ret = -EINVAL;
goto error;
}
goto error;
}
- trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
- sizeof(struct iwl_txq), GFP_KERNEL);
- if (!trans_pcie->txq) {
+ trans_pcie->txq_memory = kcalloc(trans->cfg->base_params->num_of_queues,
+ sizeof(struct iwl_txq), GFP_KERNEL);
+ if (!trans_pcie->txq_memory) {
IWL_ERR(trans, "Not enough memory for txq\n");
ret = -ENOMEM;
goto error;
txq_id++) {
slots_num = (txq_id == trans_pcie->cmd_queue) ?
TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id],
- slots_num, txq_id);
+ trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id];
+ ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id],
+ slots_num, txq_id);
if (ret) {
IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
goto error;
int txq_id, slots_num;
bool alloc = false;
- if (!trans_pcie->txq) {
+ if (!trans_pcie->txq_memory) {
ret = iwl_pcie_tx_alloc(trans);
if (ret)
goto error;
txq_id++) {
slots_num = (txq_id == trans_pcie->cmd_queue) ?
TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id],
- slots_num, txq_id);
+ ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id],
+ slots_num, txq_id);
if (ret) {
IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
goto error;
* Circular buffer (TFD queue in DRAM) physical base address
*/
iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
- trans_pcie->txq[txq_id].dma_addr >> 8);
+ trans_pcie->txq[txq_id]->dma_addr >> 8);
}
iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
int txq_id, slots_num;
bool alloc = false;
- if (!trans_pcie->txq) {
+ if (!trans_pcie->txq_memory) {
/* TODO: change this when moving to new TX alloc model */
ret = iwl_pcie_tx_alloc(trans);
if (ret)
txq_id++) {
slots_num = (txq_id == trans_pcie->cmd_queue) ?
TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id],
+ ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id],
slots_num, txq_id);
if (ret) {
IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
struct sk_buff_head *skbs)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans_pcie->txq[txq_id];
int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
int last_to_free;
static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans_pcie->txq[txq_id];
unsigned long flags;
int nfreed = 0;
unsigned int wdg_timeout)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans_pcie->txq[txq_id];
int fifo = -1;
if (test_and_set_bit(txq_id, trans_pcie->queue_used))
bool shared_mode)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans_pcie->txq[txq_id];
txq->ampdu = !shared_mode;
}
SCD_TX_STTS_QUEUE_OFFSET(txq_id);
static const u32 zero_val[4] = {};
- trans_pcie->txq[txq_id].frozen_expiry_remainder = 0;
- trans_pcie->txq[txq_id].frozen = false;
+ trans_pcie->txq[txq_id]->frozen_expiry_remainder = 0;
+ trans_pcie->txq[txq_id]->frozen = false;
/*
* Upon HW Rfkill - we stop the device, and then stop the queues
}
iwl_pcie_txq_unmap(trans, txq_id);
- trans_pcie->txq[txq_id].ampdu = false;
+ trans_pcie->txq[txq_id]->ampdu = false;
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
}
struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
unsigned long flags;
struct iwl_device_cmd *cmd;
struct iwl_cmd_meta *meta;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
/* If a Tx command is being handled and it isn't in the actual
* command queue then there a command routing bug has been introduced
* in the queue management code. */
if (WARN(txq_id != trans_pcie->cmd_queue,
"wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
- txq_id, trans_pcie->cmd_queue, sequence,
- trans_pcie->txq[trans_pcie->cmd_queue].read_ptr,
- trans_pcie->txq[trans_pcie->cmd_queue].write_ptr)) {
+ txq_id, trans_pcie->cmd_queue, sequence, txq->read_ptr,
+ txq->write_ptr)) {
iwl_print_hex_error(trans, pkt, 32);
return;
}
struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
int cmd_idx;
int ret;
&trans->status),
HOST_COMPLETE_TIMEOUT);
if (!ret) {
- struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
-
IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
iwl_get_cmd_string(trans, cmd->id),
jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
* in later, it will possibly set an invalid
* address (cmd->meta.source).
*/
- trans_pcie->txq[trans_pcie->cmd_queue].
- entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
+ txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
}
if (cmd->resp_pkt) {
u16 wifi_seq;
bool amsdu;
- txq = &trans_pcie->txq[txq_id];
+ txq = trans_pcie->txq[txq_id];
if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
"TX on unused queue %d\n", txq_id))