u32 agg_size = 0, mpdu_dens = 0;
if (!update || (flags & STA_MODIFY_QUEUES)) {
- add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
- if (flags & STA_MODIFY_QUEUES)
- add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
+ if (!iwl_mvm_has_new_tx_api(mvm)) {
+ add_sta_cmd.tfd_queue_msk =
+ cpu_to_le32(mvm_sta->tfd_queue_msk);
+
+ if (flags & STA_MODIFY_QUEUES)
+ add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
+ } else {
+ WARN_ON(flags & STA_MODIFY_QUEUES);
+ }
}
switch (sta->bandwidth) {
u8 sta_id;
int ret;
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
spin_lock_bh(&mvm->queue_info_lock);
sta_id = mvm->queue_info[queue].ra_sta_id;
spin_unlock_bh(&mvm->queue_info_lock);
lockdep_assert_held(&mvm->mutex);
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
spin_lock_bh(&mvm->queue_info_lock);
sta_id = mvm->queue_info[queue].ra_sta_id;
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
lockdep_assert_held(&mvm->mutex);
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
spin_lock_bh(&mvm->queue_info_lock);
sta_id = mvm->queue_info[queue].ra_sta_id;
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
lockdep_assert_held(&mvm->mutex);
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
spin_lock_bh(&mvm->queue_info_lock);
txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
sta_id = mvm->queue_info[queue].ra_sta_id;
int i;
lockdep_assert_held(&mvm->queue_info_lock);
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
unsigned long mq;
int ret;
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
/*
* If the AC is lower than current one - FIFO needs to be redirected to
* the lowest one of the streams in the queue. Check if this is needed
/* No free queue - we'll have to share */
if (queue <= 0) {
+ /* This shouldn't happen in new HW - we have 512 queues */
+ if (WARN(iwl_mvm_has_new_tx_api(mvm),
+ "No available queues for tid %d on sta_id %d\n",
+ tid, cfg.sta_id)) {
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ return queue;
+ }
+
queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
if (queue > 0) {
shared_queue = true;
mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
spin_unlock_bh(&mvmsta->lock);
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return 0;
+
if (!shared_queue) {
ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
if (ret)
lockdep_assert_held(&mvm->mutex);
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return;
+
spin_lock_bh(&mvm->queue_info_lock);
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
spin_unlock_bh(&mvm->queue_info_lock);
int ssn;
int ret = true;
+ /* queue sharing is disabled on new TX path */
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return;
+
lockdep_assert_held(&mvm->mutex);
spin_lock_bh(&mvm->queue_info_lock);
cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
color));
- cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
cmd.tid_disable_tx = cpu_to_le16(0xffff);
if (addr)
cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
cmd.sta_id = mvm_sta->sta_id;
cmd.add_modify = STA_MODE_MODIFY;
- cmd.modify_mask = STA_MODIFY_QUEUES | STA_MODIFY_TID_DISABLE_TX;
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ cmd.modify_mask = STA_MODIFY_QUEUES;
+ cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
* changed from current (become smaller)
*/
if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
+ /*
+ * On new TX API rs and BA manager are offloaded.
+ * For now though, just don't support being reconfigured
+ */
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return -ENOTSUPP;
+
/*
* If reconfiguring an existing queue, it first must be
* drained
mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
return i;
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return -ENOSPC;
+
/*
* If no free queue found - settle for an inactive one to reconfigure
* Make sure that the inactive queue either already belongs to this STA,
};
int ret;
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
spin_lock_bh(&mvm->queue_info_lock);
if (WARN(mvm->queue_info[queue].hw_queue_refcount == 0,
"Trying to reconfig unallocated queue %d\n", queue)) {
/* Send the enabling command if we need to */
if (iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
cfg->sta_id, cfg->tid)) {
- struct iwl_scd_txq_cfg_cmd cmd = {
+ struct iwl_tx_queue_cfg_cmd cmd = {
.scd_queue = queue,
.action = SCD_CFG_ENABLE_QUEUE,
.window = cfg->frame_limit,
.tid = cfg->tid,
};
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ iwl_trans_txq_alloc(mvm->trans, (void *)&cmd,
+ SCD_QUEUE_CFG, wdg_timeout);
+ return;
+ }
+
iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL,
wdg_timeout);
- WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
+ WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0,
+ sizeof(struct iwl_scd_txq_cfg_cmd),
&cmd),
"Failed to configure queue %d on FIFO %d\n", queue,
cfg->fifo);
int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u8 tid, u8 flags)
{
- struct iwl_scd_txq_cfg_cmd cmd = {
+ struct iwl_tx_queue_cfg_cmd cmd = {
.scd_queue = queue,
.action = SCD_CFG_DISABLE_QUEUE,
};
spin_unlock_bh(&mvm->queue_info_lock);
- iwl_trans_txq_disable(mvm->trans, queue, false);
- ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
- sizeof(cmd), &cmd);
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ iwl_trans_txq_free(mvm->trans, queue);
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
+ sizeof(cmd), &cmd);
+ } else {
+ iwl_trans_txq_disable(mvm->trans, queue, false);
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
+ sizeof(struct iwl_scd_txq_cfg_cmd),
+ &cmd);
+ }
+
if (ret)
IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
queue, ret);
lockdep_assert_held(&mvmsta->lock);
lockdep_assert_held(&mvm->queue_info_lock);
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return;
+
/* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
/* If some TFDs are still queued - don't mark TID as inactive */
unsigned long now = jiffies;
int i;
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return;
+
spin_lock_bh(&mvm->queue_info_lock);
for (i = 0; i < IWL_MAX_HW_QUEUES; i++)
if (mvm->queue_info[i].hw_queue_refcount > 0)