#define IWL_MVM_PARSE_NVM 0
#define IWL_MVM_ADWELL_ENABLE 1
#define IWL_MVM_ADWELL_MAX_BUDGET 0
+#define IWL_MVM_TCM_LOAD_MEDIUM_THRESH 10 /* percentage */
+#define IWL_MVM_TCM_LOAD_HIGH_THRESH 50 /* percentage */
+#define IWL_MVM_TCM_LOWLAT_ENABLE_THRESH 100 /* packets/10 seconds */
#define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1
#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2
#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1
/* make sure the d0i3 exit work is not pending */
flush_work(&mvm->d0i3_exit_work);
+ iwl_mvm_pause_tcm(mvm, true);
iwl_fw_runtime_suspend(&mvm->fwrt);
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+ iwl_mvm_resume_tcm(mvm);
+
iwl_fw_runtime_resume(&mvm->fwrt);
return ret;
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
+ iwl_mvm_pause_tcm(mvm, true);
+
iwl_fw_runtime_suspend(&mvm->fwrt);
/* start pseudo D3 */
__iwl_mvm_resume(mvm, true);
rtnl_unlock();
+ iwl_mvm_resume_tcm(mvm);
+
iwl_fw_runtime_resume(&mvm->fwrt);
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
mvmvif->id, mvmvif->color);
pos += scnprintf(buf+pos, bufsz-pos, "bssid: %pM\n",
vif->bss_conf.bssid);
+ pos += scnprintf(buf+pos, bufsz-pos, "Load: %d\n",
+ mvm->tcm.result.load[mvmvif->id]);
pos += scnprintf(buf+pos, bufsz-pos, "QoS:\n");
for (i = 0; i < ARRAY_SIZE(mvmvif->queue_params); i++)
pos += scnprintf(buf+pos, bufsz-pos,
#include "fw/acpi.h"
#include "fw/debugfs.h"
+#include <linux/average.h>
+
#define IWL_MVM_MAX_ADDRESSES 5
/* RSSI offset for WkP */
#define IWL_RSSI_OFFSET 50
IWL_MVM_TDLS_SW_ACTIVE,
};
+enum iwl_mvm_traffic_load {
+ IWL_MVM_TRAFFIC_LOW,
+ IWL_MVM_TRAFFIC_MEDIUM,
+ IWL_MVM_TRAFFIC_HIGH,
+};
+
+DECLARE_EWMA(rate, 16, 16)
+
+struct iwl_mvm_tcm_mac {
+ struct {
+ u32 pkts[IEEE80211_NUM_ACS];
+ u32 airtime;
+ } tx;
+ struct {
+ u32 pkts[IEEE80211_NUM_ACS];
+ u32 airtime;
+ u32 last_ampdu_ref;
+ } rx;
+ struct {
+ /* track AP's transfer in client mode */
+ u64 rx_bytes;
+ struct ewma_rate rate;
+ bool detected;
+ } uapsd_nonagg_detect;
+};
+
+struct iwl_mvm_tcm {
+ struct delayed_work work;
+ spinlock_t lock; /* used when time elapsed */
+ unsigned long ts; /* timestamp when period ends */
+ unsigned long ll_ts;
+ unsigned long uapsd_nonagg_ts;
+ bool paused;
+ struct iwl_mvm_tcm_mac data[NUM_MAC_INDEX_DRIVER];
+ struct {
+ u32 elapsed; /* milliseconds for this TCM period */
+ u32 airtime[NUM_MAC_INDEX_DRIVER];
+ enum iwl_mvm_traffic_load load[NUM_MAC_INDEX_DRIVER];
+ enum iwl_mvm_traffic_load global_load;
+ bool low_latency[NUM_MAC_INDEX_DRIVER];
+ bool change[NUM_MAC_INDEX_DRIVER];
+ bool global_change;
+ } result;
+};
+
/**
* struct iwl_mvm_reorder_buffer - per ra/tid/queue reorder buffer
* @head_sn: reorder window head sn
*/
bool temperature_test; /* Debug test temperature is enabled */
+ unsigned long bt_coex_last_tcm_ts;
+ struct iwl_mvm_tcm tcm;
+
struct iwl_time_quota_cmd last_quota_cmd;
#ifdef CONFIG_NL80211_TESTMODE
void iwl_mvm_inactivity_check(struct iwl_mvm *mvm);
+#define MVM_TCM_PERIOD_MSEC 500
+#define MVM_TCM_PERIOD (HZ * MVM_TCM_PERIOD_MSEC / 1000)
+#define MVM_LL_PERIOD (10 * HZ)
+void iwl_mvm_tcm_work(struct work_struct *work);
+void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm);
+void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel);
+void iwl_mvm_resume_tcm(struct iwl_mvm *mvm);
+u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed);
+
void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error);
unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
+ spin_lock_init(&mvm->tcm.lock);
+ INIT_DELAYED_WORK(&mvm->tcm.work, iwl_mvm_tcm_work);
+ mvm->tcm.ts = jiffies;
+ mvm->tcm.ll_ts = jiffies;
+ mvm->tcm.uapsd_nonagg_ts = jiffies;
+
INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork);
/*
for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
kfree(mvm->nvm_sections[i].data);
+ cancel_delayed_work_sync(&mvm->tcm.work);
+
iwl_mvm_tof_clean(mvm);
mutex_destroy(&mvm->mutex);
mvm->d0i3_offloading = false;
}
+ iwl_mvm_pause_tcm(mvm, true);
/* make sure we have no running tx while configuring the seqno */
synchronize_net();
/* the FW might have updated the regdomain */
iwl_mvm_update_changed_regdom(mvm);
+ iwl_mvm_resume_tcm(mvm);
iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK);
mutex_unlock(&mvm->mutex);
}
return 0;
}
+static void iwl_mvm_rx_handle_tcm(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct ieee80211_hdr *hdr, u32 len,
+ struct iwl_rx_phy_info *phy_info,
+ u32 rate_n_flags)
+{
+ struct iwl_mvm_sta *mvmsta;
+ struct iwl_mvm_tcm_mac *mdata;
+ int mac;
+ int ac = IEEE80211_AC_BE; /* treat non-QoS as BE */
+
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ ac = tid_to_mac80211_ac[ieee80211_get_tid(hdr)];
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
+
+ if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD))
+ schedule_delayed_work(&mvm->tcm.work, 0);
+ mdata = &mvm->tcm.data[mac];
+ mdata->rx.pkts[ac]++;
+
+ /* count the airtime only once for each ampdu */
+ if (mdata->rx.last_ampdu_ref != mvm->ampdu_ref) {
+ mdata->rx.last_ampdu_ref = mvm->ampdu_ref;
+ mdata->rx.airtime += le16_to_cpu(phy_info->frame_time);
+ }
+
+ if (!(rate_n_flags & (RATE_MCS_HT_MSK | RATE_MCS_VHT_MSK)))
+ return;
+
+}
+
static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
struct sk_buff *skb,
u32 status)
NULL);
}
+ if (!mvm->tcm.paused && len >= sizeof(*hdr) &&
+ !is_multicast_ether_addr(hdr->addr1) &&
+ ieee80211_is_data(hdr->frame_control))
+ iwl_mvm_rx_handle_tcm(mvm, sta, hdr, len, phy_info,
+ rate_n_flags);
+
if (ieee80211_is_data(hdr->frame_control))
iwl_mvm_rx_csum(sta, skb, rx_pkt_status);
}
int expected_size;
int i;
u8 *energy;
- __le32 *bytes, *air_time;
+ __le32 *air_time;
__le32 flags;
if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
struct iwl_notif_statistics_v11 *v11 = (void *)&pkt->data;
energy = (void *)&v11->load_stats.avg_energy;
- bytes = (void *)&v11->load_stats.byte_count;
air_time = (void *)&v11->load_stats.air_time;
} else {
struct iwl_notif_statistics_cdb *stats = (void *)&pkt->data;
energy = (void *)&stats->load_stats.avg_energy;
- bytes = (void *)&stats->load_stats.byte_count;
air_time = (void *)&stats->load_stats.air_time;
}
sta->avg_energy = energy[i];
}
rcu_read_unlock();
+
+ /*
+ * Don't update in case the statistics are not cleared, since
+ * we will end up counting twice the same airtime, once in TCM
+ * request and once in statistics notification.
+ */
+ if (!(le32_to_cpu(flags) & IWL_STATISTICS_REPLY_FLG_CLEAR))
+ return;
+
+ spin_lock(&mvm->tcm.lock);
+ for (i = 0; i < NUM_MAC_INDEX_DRIVER; i++) {
+ struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[i];
+ u32 airtime = le32_to_cpu(air_time[i]);
+
+ mdata->rx.airtime += airtime;
+ }
+ spin_unlock(&mvm->tcm.lock);
}
void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
IWL_RX_MPDU_REORDER_BAID_MASK) >>
IWL_RX_MPDU_REORDER_BAID_SHIFT);
+ if (!mvm->tcm.paused && len >= sizeof(*hdr) &&
+ !is_multicast_ether_addr(hdr->addr1) &&
+ ieee80211_is_data(hdr->frame_control) &&
+ time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD))
+ schedule_delayed_work(&mvm->tcm.work, 0);
+
/*
* We have tx blocked stations (with CS bit). If we heard
* frames from a blocked station on a new channel we can
#define IWL_DENSE_EBS_SCAN_RATIO 5
#define IWL_SPARSE_EBS_SCAN_RATIO 1
-enum iwl_mvm_traffic_load {
- IWL_MVM_TRAFFIC_LOW,
- IWL_MVM_TRAFFIC_MEDIUM,
- IWL_MVM_TRAFFIC_HIGH,
-};
-
#define IWL_SCAN_DWELL_ACTIVE 10
#define IWL_SCAN_DWELL_PASSIVE 110
#define IWL_SCAN_DWELL_FRAGMENTED 44
ieee80211_scan_completed(mvm->hw, &info);
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
cancel_delayed_work(&mvm->scan_timeout_dwork);
+ iwl_mvm_resume_tcm(mvm);
} else {
IWL_ERR(mvm,
"got scan complete notification but no scan is running\n");
if (ret)
return ret;
+ iwl_mvm_pause_tcm(mvm, false);
+
ret = iwl_mvm_send_cmd(mvm, &hcmd);
if (ret) {
/* If the scan failed, it usually means that the FW was unable
* should try to send the command again with different params.
*/
IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
+ iwl_mvm_resume_tcm(mvm);
return ret;
}
mvm->scan_vif = NULL;
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
cancel_delayed_work(&mvm->scan_timeout_dwork);
+ iwl_mvm_resume_tcm(mvm);
} else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
ieee80211_sched_scan_stopped(mvm->hw);
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
return false;
}
+static void iwl_mvm_tx_airtime(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta,
+ int airtime)
+{
+ int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
+ struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
+
+ if (mvm->tcm.paused)
+ return;
+
+ if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD))
+ schedule_delayed_work(&mvm->tcm.work, 0);
+
+ mdata->tx.airtime += airtime;
+}
+
+static void iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta, int tid)
+{
+ u32 ac = tid_to_mac80211_ac[tid];
+ int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
+ struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
+
+ mdata->tx.pkts[ac]++;
+}
+
/*
* Sets the fields in the Tx cmd that are crypto related
*/
spin_unlock(&mvmsta->lock);
+ iwl_mvm_tx_pkt_queued(mvm, mvmsta, tid == IWL_MAX_TID_COUNT ? 0 : tid);
+
return 0;
drop_unlock_sta:
if (!IS_ERR(sta)) {
mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ iwl_mvm_tx_airtime(mvm, mvmsta,
+ le16_to_cpu(tx_resp->wireless_media_time));
+
if (tid != IWL_TID_NON_QOS && tid != IWL_MGMT_TID) {
struct iwl_mvm_tid_data *tid_data =
&mvmsta->tid_data[tid];
le16_to_cpu(tx_resp->wireless_media_time);
mvmsta->tid_data[tid].lq_color =
TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
+ iwl_mvm_tx_airtime(mvm, mvmsta,
+ le16_to_cpu(tx_resp->wireless_media_time));
}
rcu_read_unlock();
le32_to_cpu(ba_res->tx_rate));
}
+ iwl_mvm_tx_airtime(mvm, mvmsta,
+ le32_to_cpu(ba_res->wireless_time));
out_unlock:
rcu_read_unlock();
out:
sta->addr, tid);
}
+u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed)
+{
+ if (!elapsed)
+ return 0;
+
+ return (100 * airtime / elapsed) / USEC_PER_MSEC;
+}
+
+static enum iwl_mvm_traffic_load
+iwl_mvm_tcm_load(struct iwl_mvm *mvm, u32 airtime, unsigned long elapsed)
+{
+ u8 load = iwl_mvm_tcm_load_percentage(airtime, elapsed);
+
+ if (load > IWL_MVM_TCM_LOAD_HIGH_THRESH)
+ return IWL_MVM_TRAFFIC_HIGH;
+ if (load > IWL_MVM_TCM_LOAD_MEDIUM_THRESH)
+ return IWL_MVM_TRAFFIC_MEDIUM;
+
+ return IWL_MVM_TRAFFIC_LOW;
+}
+
+struct iwl_mvm_tcm_iter_data {
+ struct iwl_mvm *mvm;
+ bool any_sent;
+};
+
+static void iwl_mvm_tcm_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_tcm_iter_data *data = _data;
+ struct iwl_mvm *mvm = data->mvm;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ bool low_latency, prev = mvmvif->low_latency & LOW_LATENCY_TRAFFIC;
+
+ if (mvmvif->id >= NUM_MAC_INDEX_DRIVER)
+ return;
+
+ low_latency = mvm->tcm.result.low_latency[mvmvif->id];
+
+ if (!mvm->tcm.result.change[mvmvif->id] &&
+ prev == low_latency) {
+ iwl_mvm_update_quotas(mvm, false, NULL);
+ return;
+ }
+
+ if (prev != low_latency) {
+ /* this sends traffic load and updates quota as well */
+ iwl_mvm_update_low_latency(mvm, vif, low_latency,
+ LOW_LATENCY_TRAFFIC);
+ } else {
+ iwl_mvm_update_quotas(mvm, false, NULL);
+ }
+
+ data->any_sent = true;
+}
+
+static void iwl_mvm_tcm_results(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_tcm_iter_data data = {
+ .mvm = mvm,
+ .any_sent = false,
+ };
+
+ mutex_lock(&mvm->mutex);
+
+ ieee80211_iterate_active_interfaces(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_tcm_iter, &data);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
+ iwl_mvm_config_scan(mvm);
+
+ mutex_unlock(&mvm->mutex);
+}
+
+
+static unsigned long iwl_mvm_calc_tcm_stats(struct iwl_mvm *mvm,
+ unsigned long ts,
+ bool handle_uapsd)
+{
+ unsigned int elapsed = jiffies_to_msecs(ts - mvm->tcm.ts);
+ u32 total_airtime = 0;
+ int ac, mac;
+ bool low_latency = false;
+ enum iwl_mvm_traffic_load load;
+ bool handle_ll = time_after(ts, mvm->tcm.ll_ts + MVM_LL_PERIOD);
+
+ if (handle_ll)
+ mvm->tcm.ll_ts = ts;
+ if (handle_uapsd)
+ mvm->tcm.uapsd_nonagg_ts = ts;
+
+ mvm->tcm.result.elapsed = elapsed;
+
+ for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
+ struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
+ u32 vo_vi_pkts = 0;
+ u32 airtime = mdata->rx.airtime + mdata->tx.airtime;
+
+ total_airtime += airtime;
+
+ load = iwl_mvm_tcm_load(mvm, airtime, elapsed);
+ mvm->tcm.result.change[mac] = load != mvm->tcm.result.load[mac];
+ mvm->tcm.result.load[mac] = load;
+ mvm->tcm.result.airtime[mac] = airtime;
+
+ for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_VI; ac++)
+ vo_vi_pkts += mdata->rx.pkts[ac] +
+ mdata->tx.pkts[ac];
+
+ /* enable immediately with enough packets but defer disabling */
+ if (vo_vi_pkts > IWL_MVM_TCM_LOWLAT_ENABLE_THRESH)
+ mvm->tcm.result.low_latency[mac] = true;
+ else if (handle_ll)
+ mvm->tcm.result.low_latency[mac] = false;
+
+ if (handle_ll) {
+ /* clear old data */
+ memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
+ memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
+ }
+ low_latency |= mvm->tcm.result.low_latency[mac];
+
+ memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
+ memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
+ }
+
+ load = iwl_mvm_tcm_load(mvm, total_airtime, elapsed);
+ mvm->tcm.result.global_change = load != mvm->tcm.result.global_load;
+ mvm->tcm.result.global_load = load;
+
+ /*
+ * If the current load isn't low we need to force re-evaluation
+ * in the TCM period, so that we can return to low load if there
+ * was no traffic at all (and thus iwl_mvm_recalc_tcm didn't get
+ * triggered by traffic).
+ */
+ if (load != IWL_MVM_TRAFFIC_LOW)
+ return MVM_TCM_PERIOD;
+ /*
+ * If low-latency is active we need to force re-evaluation after
+ * (the longer) MVM_LL_PERIOD, so that we can disable low-latency
+ * when there's no traffic at all.
+ */
+ if (low_latency)
+ return MVM_LL_PERIOD;
+ /*
+ * Otherwise, we don't need to run the work struct because we're
+ * in the default "idle" state - traffic indication is low (which
+ * also covers the "no traffic" case) and low-latency is disabled
+ * so there's no state that may need to be disabled when there's
+ * no traffic at all.
+ *
+ * Note that this has no impact on the regular scheduling of the
+ * updates triggered by traffic - those happen whenever one of the
+ * two timeouts expire (if there's traffic at all.)
+ */
+ return 0;
+}
+
+void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm)
+{
+ unsigned long ts = jiffies;
+ bool handle_uapsd =
+ false;
+
+ spin_lock(&mvm->tcm.lock);
+ if (mvm->tcm.paused || !time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
+ spin_unlock(&mvm->tcm.lock);
+ return;
+ }
+ spin_unlock(&mvm->tcm.lock);
+
+
+ spin_lock(&mvm->tcm.lock);
+ /* re-check if somebody else won the recheck race */
+ if (!mvm->tcm.paused && time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
+ /* calculate statistics */
+ unsigned long work_delay = iwl_mvm_calc_tcm_stats(mvm, ts,
+ handle_uapsd);
+
+ /* the memset needs to be visible before the timestamp */
+ smp_mb();
+ mvm->tcm.ts = ts;
+ if (work_delay)
+ schedule_delayed_work(&mvm->tcm.work, work_delay);
+ }
+ spin_unlock(&mvm->tcm.lock);
+
+ iwl_mvm_tcm_results(mvm);
+}
+
+void iwl_mvm_tcm_work(struct work_struct *work)
+{
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm,
+ tcm.work);
+
+ iwl_mvm_recalc_tcm(mvm);
+}
+
+void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel)
+{
+ spin_lock_bh(&mvm->tcm.lock);
+ mvm->tcm.paused = true;
+ spin_unlock_bh(&mvm->tcm.lock);
+ if (with_cancel)
+ cancel_delayed_work_sync(&mvm->tcm.work);
+}
+
+void iwl_mvm_resume_tcm(struct iwl_mvm *mvm)
+{
+ int mac;
+
+ spin_lock_bh(&mvm->tcm.lock);
+ mvm->tcm.ts = jiffies;
+ mvm->tcm.ll_ts = jiffies;
+ for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
+ struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
+
+ memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
+ memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
+ memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
+ memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
+ }
+ /* The TCM data needs to be reset before "paused" flag changes */
+ smp_mb();
+ mvm->tcm.paused = false;
+ spin_unlock_bh(&mvm->tcm.lock);
+}
+
+
void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
{
bool ps_disabled;