iwlwifi: mvm: add traffic condition monitoring (TCM)
authorLuca Coelho <luciano.coelho@intel.com>
Thu, 12 Apr 2018 13:15:07 +0000 (16:15 +0300)
committerLuca Coelho <luciano.coelho@intel.com>
Fri, 20 Apr 2018 07:57:16 +0000 (10:57 +0300)
Traffic condition monitor gathers data about the traffic load and
other conditions and can be used to make decisions regarding latency,
throughput etc.  This patch introduces the code and data structures to
collect this data for future use.

Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
drivers/net/wireless/intel/iwlwifi/mvm/constants.h
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/mvm/rx.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/scan.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/intel/iwlwifi/mvm/utils.c

index 96b52a275ee399ba3677378df3335f0d98cd4fbb..2763fc69f04b8d427d429521317658082d7af420 100644 (file)
 #define IWL_MVM_PARSE_NVM                      0
 #define IWL_MVM_ADWELL_ENABLE                  1
 #define IWL_MVM_ADWELL_MAX_BUDGET              0
+#define IWL_MVM_TCM_LOAD_MEDIUM_THRESH         10 /* percentage */
+#define IWL_MVM_TCM_LOAD_HIGH_THRESH           50 /* percentage */
+#define IWL_MVM_TCM_LOWLAT_ENABLE_THRESH       100 /* packets/10 seconds */
 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE    1
 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE      2
 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW   1
index 2efe9b099556d862c946f2aeaff9746cfc3dded5..80a9a7cb83bebc26c62be6fc388192e5e3e7f587 100644 (file)
@@ -1097,6 +1097,7 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
 
        /* make sure the d0i3 exit work is not pending */
        flush_work(&mvm->d0i3_exit_work);
+       iwl_mvm_pause_tcm(mvm, true);
 
        iwl_fw_runtime_suspend(&mvm->fwrt);
 
@@ -2014,6 +2015,8 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
 
        mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
 
+       iwl_mvm_resume_tcm(mvm);
+
        iwl_fw_runtime_resume(&mvm->fwrt);
 
        return ret;
@@ -2042,6 +2045,8 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
 
        mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
 
+       iwl_mvm_pause_tcm(mvm, true);
+
        iwl_fw_runtime_suspend(&mvm->fwrt);
 
        /* start pseudo D3 */
@@ -2104,6 +2109,8 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
        __iwl_mvm_resume(mvm, true);
        rtnl_unlock();
 
+       iwl_mvm_resume_tcm(mvm);
+
        iwl_fw_runtime_resume(&mvm->fwrt);
 
        mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
index f7fcf700196bc979ad235b049fef17f2decdf1da..798605c4f1227d7f4e34367456dcb58885cda2ae 100644 (file)
@@ -269,6 +269,8 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
                         mvmvif->id, mvmvif->color);
        pos += scnprintf(buf+pos, bufsz-pos, "bssid: %pM\n",
                         vif->bss_conf.bssid);
+       pos += scnprintf(buf+pos, bufsz-pos, "Load: %d\n",
+                        mvm->tcm.result.load[mvmvif->id]);
        pos += scnprintf(buf+pos, bufsz-pos, "QoS:\n");
        for (i = 0; i < ARRAY_SIZE(mvmvif->queue_params); i++)
                pos += scnprintf(buf+pos, bufsz-pos,
index d2cf751db68dcc8548919fbef9acc7d9b62e3fdb..72bab44082ea983f35e0b4e44a1ace9ab1329c03 100644 (file)
@@ -92,6 +92,8 @@
 #include "fw/acpi.h"
 #include "fw/debugfs.h"
 
+#include <linux/average.h>
+
 #define IWL_MVM_MAX_ADDRESSES          5
 /* RSSI offset for WkP */
 #define IWL_RSSI_OFFSET 50
@@ -595,6 +597,51 @@ enum iwl_mvm_tdls_cs_state {
        IWL_MVM_TDLS_SW_ACTIVE,
 };
 
+enum iwl_mvm_traffic_load {
+       IWL_MVM_TRAFFIC_LOW,
+       IWL_MVM_TRAFFIC_MEDIUM,
+       IWL_MVM_TRAFFIC_HIGH,
+};
+
+DECLARE_EWMA(rate, 16, 16)
+
+struct iwl_mvm_tcm_mac {
+       struct {
+               u32 pkts[IEEE80211_NUM_ACS];
+               u32 airtime;
+       } tx;
+       struct {
+               u32 pkts[IEEE80211_NUM_ACS];
+               u32 airtime;
+               u32 last_ampdu_ref;
+       } rx;
+       struct {
+               /* track AP's transfer in client mode */
+               u64 rx_bytes;
+               struct ewma_rate rate;
+               bool detected;
+       } uapsd_nonagg_detect;
+};
+
+struct iwl_mvm_tcm {
+       struct delayed_work work;
+       spinlock_t lock; /* used when time elapsed */
+       unsigned long ts; /* timestamp when period ends */
+       unsigned long ll_ts;
+       unsigned long uapsd_nonagg_ts;
+       bool paused;
+       struct iwl_mvm_tcm_mac data[NUM_MAC_INDEX_DRIVER];
+       struct {
+               u32 elapsed; /* milliseconds for this TCM period */
+               u32 airtime[NUM_MAC_INDEX_DRIVER];
+               enum iwl_mvm_traffic_load load[NUM_MAC_INDEX_DRIVER];
+               enum iwl_mvm_traffic_load global_load;
+               bool low_latency[NUM_MAC_INDEX_DRIVER];
+               bool change[NUM_MAC_INDEX_DRIVER];
+               bool global_change;
+       } result;
+};
+
 /**
  * struct iwl_mvm_reorder_buffer - per ra/tid/queue reorder buffer
  * @head_sn: reorder window head sn
@@ -978,6 +1025,9 @@ struct iwl_mvm {
         */
        bool temperature_test;  /* Debug test temperature is enabled */
 
+       unsigned long bt_coex_last_tcm_ts;
+       struct iwl_mvm_tcm tcm;
+
        struct iwl_time_quota_cmd last_quota_cmd;
 
 #ifdef CONFIG_NL80211_TESTMODE
@@ -1906,6 +1956,15 @@ bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm);
 
 void iwl_mvm_inactivity_check(struct iwl_mvm *mvm);
 
+#define MVM_TCM_PERIOD_MSEC 500
+#define MVM_TCM_PERIOD (HZ * MVM_TCM_PERIOD_MSEC / 1000)
+#define MVM_LL_PERIOD (10 * HZ)
+void iwl_mvm_tcm_work(struct work_struct *work);
+void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm);
+void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel);
+void iwl_mvm_resume_tcm(struct iwl_mvm *mvm);
+u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed);
+
 void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error);
 unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
                                    struct ieee80211_vif *vif,
index 224bfa1bcf53e8443e426b85bcdcd834a25e7b06..6a3f557543e4d4efb09ed406bd756d18659991ac 100644 (file)
@@ -667,6 +667,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 
        SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
 
+       spin_lock_init(&mvm->tcm.lock);
+       INIT_DELAYED_WORK(&mvm->tcm.work, iwl_mvm_tcm_work);
+       mvm->tcm.ts = jiffies;
+       mvm->tcm.ll_ts = jiffies;
+       mvm->tcm.uapsd_nonagg_ts = jiffies;
+
        INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork);
 
        /*
@@ -859,6 +865,8 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
        for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
                kfree(mvm->nvm_sections[i].data);
 
+       cancel_delayed_work_sync(&mvm->tcm.work);
+
        iwl_mvm_tof_clean(mvm);
 
        mutex_destroy(&mvm->mutex);
@@ -1432,6 +1440,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
                mvm->d0i3_offloading = false;
        }
 
+       iwl_mvm_pause_tcm(mvm, true);
        /* make sure we have no running tx while configuring the seqno */
        synchronize_net();
 
@@ -1615,6 +1624,7 @@ out:
        /* the FW might have updated the regdomain */
        iwl_mvm_update_changed_regdom(mvm);
 
+       iwl_mvm_resume_tcm(mvm);
        iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK);
        mutex_unlock(&mvm->mutex);
 }
index d26833c5ce1fc945a2060c44f83a34717330d1e7..be80294349c3143f4b6915290c3ab81217a79a73 100644 (file)
@@ -254,6 +254,39 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
        return 0;
 }
 
+static void iwl_mvm_rx_handle_tcm(struct iwl_mvm *mvm,
+                                 struct ieee80211_sta *sta,
+                                 struct ieee80211_hdr *hdr, u32 len,
+                                 struct iwl_rx_phy_info *phy_info,
+                                 u32 rate_n_flags)
+{
+       struct iwl_mvm_sta *mvmsta;
+       struct iwl_mvm_tcm_mac *mdata;
+       int mac;
+       int ac = IEEE80211_AC_BE; /* treat non-QoS as BE */
+
+       if (ieee80211_is_data_qos(hdr->frame_control))
+               ac = tid_to_mac80211_ac[ieee80211_get_tid(hdr)];
+
+       mvmsta = iwl_mvm_sta_from_mac80211(sta);
+       mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
+
+       if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD))
+               schedule_delayed_work(&mvm->tcm.work, 0);
+       mdata = &mvm->tcm.data[mac];
+       mdata->rx.pkts[ac]++;
+
+       /* count the airtime only once for each ampdu */
+       if (mdata->rx.last_ampdu_ref != mvm->ampdu_ref) {
+               mdata->rx.last_ampdu_ref = mvm->ampdu_ref;
+               mdata->rx.airtime += le16_to_cpu(phy_info->frame_time);
+       }
+
+       if (!(rate_n_flags & (RATE_MCS_HT_MSK | RATE_MCS_VHT_MSK)))
+               return;
+
+}
+
 static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
                            struct sk_buff *skb,
                            u32 status)
@@ -408,6 +441,12 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
                                                        NULL);
                }
 
+               if (!mvm->tcm.paused && len >= sizeof(*hdr) &&
+                   !is_multicast_ether_addr(hdr->addr1) &&
+                   ieee80211_is_data(hdr->frame_control))
+                       iwl_mvm_rx_handle_tcm(mvm, sta, hdr, len, phy_info,
+                                             rate_n_flags);
+
                if (ieee80211_is_data(hdr->frame_control))
                        iwl_mvm_rx_csum(sta, skb, rx_pkt_status);
        }
@@ -654,7 +693,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
        int expected_size;
        int i;
        u8 *energy;
-       __le32 *bytes, *air_time;
+       __le32 *air_time;
        __le32 flags;
 
        if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
@@ -729,13 +768,11 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
                struct iwl_notif_statistics_v11 *v11 = (void *)&pkt->data;
 
                energy = (void *)&v11->load_stats.avg_energy;
-               bytes = (void *)&v11->load_stats.byte_count;
                air_time = (void *)&v11->load_stats.air_time;
        } else {
                struct iwl_notif_statistics_cdb *stats = (void *)&pkt->data;
 
                energy = (void *)&stats->load_stats.avg_energy;
-               bytes = (void *)&stats->load_stats.byte_count;
                air_time = (void *)&stats->load_stats.air_time;
        }
 
@@ -752,6 +789,23 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
                sta->avg_energy = energy[i];
        }
        rcu_read_unlock();
+
+       /*
+        * Don't update in case the statistics are not cleared, since
+        * we will end up counting twice the same airtime, once in TCM
+        * request and once in statistics notification.
+        */
+       if (!(le32_to_cpu(flags) & IWL_STATISTICS_REPLY_FLG_CLEAR))
+               return;
+
+       spin_lock(&mvm->tcm.lock);
+       for (i = 0; i < NUM_MAC_INDEX_DRIVER; i++) {
+               struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[i];
+               u32 airtime = le32_to_cpu(air_time[i]);
+
+               mdata->rx.airtime += airtime;
+       }
+       spin_unlock(&mvm->tcm.lock);
 }
 
 void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
index 4a4ccfd11e5b0c3702d2f32cd675877fc5a80455..f9cd7575b422e01118ddf35beb46f1899de6f376 100644 (file)
@@ -941,6 +941,12 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
                               IWL_RX_MPDU_REORDER_BAID_MASK) >>
                               IWL_RX_MPDU_REORDER_BAID_SHIFT);
 
+               if (!mvm->tcm.paused && len >= sizeof(*hdr) &&
+                   !is_multicast_ether_addr(hdr->addr1) &&
+                   ieee80211_is_data(hdr->frame_control) &&
+                   time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD))
+                       schedule_delayed_work(&mvm->tcm.work, 0);
+
                /*
                 * We have tx blocked stations (with CS bit). If we heard
                 * frames from a blocked station on a new channel we can
index b31f0ffbbbf00494a1ffe2855d69e95a43f028af..cd7ae6976935896990489aa1f747866c4746bc3d 100644 (file)
 #define IWL_DENSE_EBS_SCAN_RATIO 5
 #define IWL_SPARSE_EBS_SCAN_RATIO 1
 
-enum iwl_mvm_traffic_load {
-       IWL_MVM_TRAFFIC_LOW,
-       IWL_MVM_TRAFFIC_MEDIUM,
-       IWL_MVM_TRAFFIC_HIGH,
-};
-
 #define IWL_SCAN_DWELL_ACTIVE          10
 #define IWL_SCAN_DWELL_PASSIVE         110
 #define IWL_SCAN_DWELL_FRAGMENTED      44
@@ -437,6 +431,7 @@ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
                ieee80211_scan_completed(mvm->hw, &info);
                iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
                cancel_delayed_work(&mvm->scan_timeout_dwork);
+               iwl_mvm_resume_tcm(mvm);
        } else {
                IWL_ERR(mvm,
                        "got scan complete notification but no scan is running\n");
@@ -1568,6 +1563,8 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        if (ret)
                return ret;
 
+       iwl_mvm_pause_tcm(mvm, false);
+
        ret = iwl_mvm_send_cmd(mvm, &hcmd);
        if (ret) {
                /* If the scan failed, it usually means that the FW was unable
@@ -1575,6 +1572,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                 * should try to send the command again with different params.
                 */
                IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
+               iwl_mvm_resume_tcm(mvm);
                return ret;
        }
 
@@ -1711,6 +1709,7 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
                mvm->scan_vif = NULL;
                iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
                cancel_delayed_work(&mvm->scan_timeout_dwork);
+               iwl_mvm_resume_tcm(mvm);
        } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
                ieee80211_sched_scan_stopped(mvm->hw);
                mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
index 795065974d780a512f31a091c8c5d755dfc9a36e..f06a0ee7be28d58cb58282c91a91bf4dbf69ea71 100644 (file)
@@ -930,6 +930,32 @@ static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
        return false;
 }
 
+static void iwl_mvm_tx_airtime(struct iwl_mvm *mvm,
+                              struct iwl_mvm_sta *mvmsta,
+                              int airtime)
+{
+       int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
+       struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
+
+       if (mvm->tcm.paused)
+               return;
+
+       if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD))
+               schedule_delayed_work(&mvm->tcm.work, 0);
+
+       mdata->tx.airtime += airtime;
+}
+
+static void iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm,
+                                 struct iwl_mvm_sta *mvmsta, int tid)
+{
+       u32 ac = tid_to_mac80211_ac[tid];
+       int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
+       struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
+
+       mdata->tx.pkts[ac]++;
+}
+
 /*
  * Sets the fields in the Tx cmd that are crypto related
  */
@@ -1067,6 +1093,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
 
        spin_unlock(&mvmsta->lock);
 
+       iwl_mvm_tx_pkt_queued(mvm, mvmsta, tid == IWL_MAX_TID_COUNT ? 0 : tid);
+
        return 0;
 
 drop_unlock_sta:
@@ -1469,6 +1497,9 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
        if (!IS_ERR(sta)) {
                mvmsta = iwl_mvm_sta_from_mac80211(sta);
 
+               iwl_mvm_tx_airtime(mvm, mvmsta,
+                                  le16_to_cpu(tx_resp->wireless_media_time));
+
                if (tid != IWL_TID_NON_QOS && tid != IWL_MGMT_TID) {
                        struct iwl_mvm_tid_data *tid_data =
                                &mvmsta->tid_data[tid];
@@ -1610,6 +1641,8 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
                        le16_to_cpu(tx_resp->wireless_media_time);
                mvmsta->tid_data[tid].lq_color =
                        TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
+               iwl_mvm_tx_airtime(mvm, mvmsta,
+                                  le16_to_cpu(tx_resp->wireless_media_time));
        }
 
        rcu_read_unlock();
@@ -1800,6 +1833,8 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
                                           le32_to_cpu(ba_res->tx_rate));
                }
 
+               iwl_mvm_tx_airtime(mvm, mvmsta,
+                                  le32_to_cpu(ba_res->wireless_time));
 out_unlock:
                rcu_read_unlock();
 out:
index d99d9ea78e4cf82895a006b8bf9b6e86cd6ce083..27905f3fe3baa03758f67fd91e835071d8ea9104 100644 (file)
@@ -1429,6 +1429,237 @@ void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
                                sta->addr, tid);
 }
 
+u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed)
+{
+       if (!elapsed)
+               return 0;
+
+       return (100 * airtime / elapsed) / USEC_PER_MSEC;
+}
+
+static enum iwl_mvm_traffic_load
+iwl_mvm_tcm_load(struct iwl_mvm *mvm, u32 airtime, unsigned long elapsed)
+{
+       u8 load = iwl_mvm_tcm_load_percentage(airtime, elapsed);
+
+       if (load > IWL_MVM_TCM_LOAD_HIGH_THRESH)
+               return IWL_MVM_TRAFFIC_HIGH;
+       if (load > IWL_MVM_TCM_LOAD_MEDIUM_THRESH)
+               return IWL_MVM_TRAFFIC_MEDIUM;
+
+       return IWL_MVM_TRAFFIC_LOW;
+}
+
+struct iwl_mvm_tcm_iter_data {
+       struct iwl_mvm *mvm;
+       bool any_sent;
+};
+
+static void iwl_mvm_tcm_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
+{
+       struct iwl_mvm_tcm_iter_data *data = _data;
+       struct iwl_mvm *mvm = data->mvm;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       bool low_latency, prev = mvmvif->low_latency & LOW_LATENCY_TRAFFIC;
+
+       if (mvmvif->id >= NUM_MAC_INDEX_DRIVER)
+               return;
+
+       low_latency = mvm->tcm.result.low_latency[mvmvif->id];
+
+       if (!mvm->tcm.result.change[mvmvif->id] &&
+           prev == low_latency) {
+               iwl_mvm_update_quotas(mvm, false, NULL);
+               return;
+       }
+
+       if (prev != low_latency) {
+               /* this sends traffic load and updates quota as well */
+               iwl_mvm_update_low_latency(mvm, vif, low_latency,
+                                          LOW_LATENCY_TRAFFIC);
+       } else {
+               iwl_mvm_update_quotas(mvm, false, NULL);
+       }
+
+       data->any_sent = true;
+}
+
+static void iwl_mvm_tcm_results(struct iwl_mvm *mvm)
+{
+       struct iwl_mvm_tcm_iter_data data = {
+               .mvm = mvm,
+               .any_sent = false,
+       };
+
+       mutex_lock(&mvm->mutex);
+
+       ieee80211_iterate_active_interfaces(
+               mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+               iwl_mvm_tcm_iter, &data);
+
+       if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
+               iwl_mvm_config_scan(mvm);
+
+       mutex_unlock(&mvm->mutex);
+}
+
+
+static unsigned long iwl_mvm_calc_tcm_stats(struct iwl_mvm *mvm,
+                                           unsigned long ts,
+                                           bool handle_uapsd)
+{
+       unsigned int elapsed = jiffies_to_msecs(ts - mvm->tcm.ts);
+       u32 total_airtime = 0;
+       int ac, mac;
+       bool low_latency = false;
+       enum iwl_mvm_traffic_load load;
+       bool handle_ll = time_after(ts, mvm->tcm.ll_ts + MVM_LL_PERIOD);
+
+       if (handle_ll)
+               mvm->tcm.ll_ts = ts;
+       if (handle_uapsd)
+               mvm->tcm.uapsd_nonagg_ts = ts;
+
+       mvm->tcm.result.elapsed = elapsed;
+
+       for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
+               struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
+               u32 vo_vi_pkts = 0;
+               u32 airtime = mdata->rx.airtime + mdata->tx.airtime;
+
+               total_airtime += airtime;
+
+               load = iwl_mvm_tcm_load(mvm, airtime, elapsed);
+               mvm->tcm.result.change[mac] = load != mvm->tcm.result.load[mac];
+               mvm->tcm.result.load[mac] = load;
+               mvm->tcm.result.airtime[mac] = airtime;
+
+               for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_VI; ac++)
+                       vo_vi_pkts += mdata->rx.pkts[ac] +
+                                     mdata->tx.pkts[ac];
+
+               /* enable immediately with enough packets but defer disabling */
+               if (vo_vi_pkts > IWL_MVM_TCM_LOWLAT_ENABLE_THRESH)
+                       mvm->tcm.result.low_latency[mac] = true;
+               else if (handle_ll)
+                       mvm->tcm.result.low_latency[mac] = false;
+
+               if (handle_ll) {
+                       /* clear old data */
+                       memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
+                       memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
+               }
+               low_latency |= mvm->tcm.result.low_latency[mac];
+
+               memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
+               memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
+       }
+
+       load = iwl_mvm_tcm_load(mvm, total_airtime, elapsed);
+       mvm->tcm.result.global_change = load != mvm->tcm.result.global_load;
+       mvm->tcm.result.global_load = load;
+
+       /*
+        * If the current load isn't low we need to force re-evaluation
+        * in the TCM period, so that we can return to low load if there
+        * was no traffic at all (and thus iwl_mvm_recalc_tcm didn't get
+        * triggered by traffic).
+        */
+       if (load != IWL_MVM_TRAFFIC_LOW)
+               return MVM_TCM_PERIOD;
+       /*
+        * If low-latency is active we need to force re-evaluation after
+        * (the longer) MVM_LL_PERIOD, so that we can disable low-latency
+        * when there's no traffic at all.
+        */
+       if (low_latency)
+               return MVM_LL_PERIOD;
+       /*
+        * Otherwise, we don't need to run the work struct because we're
+        * in the default "idle" state - traffic indication is low (which
+        * also covers the "no traffic" case) and low-latency is disabled
+        * so there's no state that may need to be disabled when there's
+        * no traffic at all.
+        *
+        * Note that this has no impact on the regular scheduling of the
+        * updates triggered by traffic - those happen whenever one of the
+        * two timeouts expire (if there's traffic at all.)
+        */
+       return 0;
+}
+
+void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm)
+{
+       unsigned long ts = jiffies;
+       bool handle_uapsd =
+               false;
+
+       spin_lock(&mvm->tcm.lock);
+       if (mvm->tcm.paused || !time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
+               spin_unlock(&mvm->tcm.lock);
+               return;
+       }
+       spin_unlock(&mvm->tcm.lock);
+
+
+       spin_lock(&mvm->tcm.lock);
+       /* re-check if somebody else won the recheck race */
+       if (!mvm->tcm.paused && time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
+               /* calculate statistics */
+               unsigned long work_delay = iwl_mvm_calc_tcm_stats(mvm, ts,
+                                                                 handle_uapsd);
+
+               /* the memset needs to be visible before the timestamp */
+               smp_mb();
+               mvm->tcm.ts = ts;
+               if (work_delay)
+                       schedule_delayed_work(&mvm->tcm.work, work_delay);
+       }
+       spin_unlock(&mvm->tcm.lock);
+
+       iwl_mvm_tcm_results(mvm);
+}
+
+void iwl_mvm_tcm_work(struct work_struct *work)
+{
+       struct delayed_work *delayed_work = to_delayed_work(work);
+       struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm,
+                                          tcm.work);
+
+       iwl_mvm_recalc_tcm(mvm);
+}
+
+void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel)
+{
+       spin_lock_bh(&mvm->tcm.lock);
+       mvm->tcm.paused = true;
+       spin_unlock_bh(&mvm->tcm.lock);
+       if (with_cancel)
+               cancel_delayed_work_sync(&mvm->tcm.work);
+}
+
+void iwl_mvm_resume_tcm(struct iwl_mvm *mvm)
+{
+       int mac;
+
+       spin_lock_bh(&mvm->tcm.lock);
+       mvm->tcm.ts = jiffies;
+       mvm->tcm.ll_ts = jiffies;
+       for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
+               struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
+
+               memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
+               memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
+               memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
+               memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
+       }
+       /* The TCM data needs to be reset before "paused" flag changes */
+       smp_mb();
+       mvm->tcm.paused = false;
+       spin_unlock_bh(&mvm->tcm.lock);
+}
+
+
 void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
 {
        bool ps_disabled;