--- /dev/null
+From: Felix Fietkau <nbd@nbd.name>
+Date: Sat, 28 Sep 2019 15:44:06 +0200
+Subject: [PATCH] mac80211: minstrel: remove divisions in tx status path
+
+Use a slightly different threshold for downgrading spatial streams to
+make it easier to calculate without divisions.
+Slightly reduces CPU overhead.
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/net/mac80211/rc80211_minstrel.c
++++ b/net/mac80211/rc80211_minstrel.c
+@@ -289,8 +289,7 @@ minstrel_tx_status(void *priv, struct ie
+ if (mi->sample_deferred > 0)
+ mi->sample_deferred--;
+
+- if (time_after(jiffies, mi->last_stats_update +
+- (mp->update_interval * HZ) / 1000))
++ if (time_after(jiffies, mi->last_stats_update + mp->update_interval))
+ minstrel_update_stats(mp, mi);
+ }
+
+--- a/net/mac80211/rc80211_minstrel_ht.c
++++ b/net/mac80211/rc80211_minstrel_ht.c
+@@ -970,23 +970,21 @@ minstrel_ht_tx_status(void *priv, struct
+ */
+ rate = minstrel_get_ratestats(mi, mi->max_tp_rate[0]);
+ if (rate->attempts > 30 &&
+- MINSTREL_FRAC(rate->success, rate->attempts) <
+- MINSTREL_FRAC(20, 100)) {
++ rate->success < rate->attempts / 4) {
+ minstrel_downgrade_rate(mi, &mi->max_tp_rate[0], true);
+ update = true;
+ }
+
+ rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate[1]);
+ if (rate2->attempts > 30 &&
+- MINSTREL_FRAC(rate2->success, rate2->attempts) <
+- MINSTREL_FRAC(20, 100)) {
++ rate2->success < rate2->attempts / 4) {
+ minstrel_downgrade_rate(mi, &mi->max_tp_rate[1], false);
+ update = true;
+ }
+ }
+
+ if (time_after(jiffies, mi->last_stats_update +
+- (mp->update_interval / 2 * HZ) / 1000)) {
++ mp->update_interval / 2)) {
+ update = true;
+ minstrel_ht_update_stats(mp, mi, true);
+ }
+@@ -1666,7 +1664,7 @@ minstrel_ht_alloc(struct ieee80211_hw *h
+ mp->has_mrr = true;
+
+ mp->hw = hw;
+- mp->update_interval = 100;
++ mp->update_interval = HZ / 10;
+
+ #ifdef CPTCFG_MAC80211_DEBUGFS
+ mp->fixed_rate_idx = (u32) -1;
--- /dev/null
+From: Felix Fietkau <nbd@nbd.name>
+Date: Sat, 28 Sep 2019 15:46:06 +0200
+Subject: [PATCH] mac80211: minstrel_ht: replace rate stats ewma with a
+ better moving average
+
+Rate success probability usually fluctuates a lot under normal conditions.
+With a simple EWMA, noise and fluctuation can be reduced by increasing the
+window length, but that comes at the cost of introducing lag on sudden
+changes.
+
+This change replaces the EWMA implementation with a moving average that's
+designed to significantly reduce lag while keeping a bigger window size
+by being better at filtering out noise.
+
+It is only slightly more expensive than the simple EWMA and still avoids
+divisions in its calculation.
+
+The algorithm is adapted from an implementation intended for a completely
+different field (stock market trading), where the tradeoff of lag vs
+noise filtering is equally important.
+
+The algorithm works in the same way as the "smoothing filter" from
+http://www.stockspotter.com/files/PredictiveIndicators.pdf adapted for
+fixed-point math with some constants, using only addition, bit shifts
+and multiplication
+
+To better make use of the filtering and bigger window size, the update
+interval is cut in half.
+
+For testing, the algorithm can be reverted to the older one via debugfs
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/net/mac80211/rc80211_minstrel.c
++++ b/net/mac80211/rc80211_minstrel.c
+@@ -157,14 +157,18 @@ minstrel_update_rates(struct minstrel_pr
+ * Recalculate statistics and counters of a given rate
+ */
+ void
+-minstrel_calc_rate_stats(struct minstrel_rate_stats *mrs)
++minstrel_calc_rate_stats(struct minstrel_priv *mp,
++ struct minstrel_rate_stats *mrs)
+ {
+ unsigned int cur_prob;
+
+ if (unlikely(mrs->attempts > 0)) {
+ mrs->sample_skipped = 0;
+ cur_prob = MINSTREL_FRAC(mrs->success, mrs->attempts);
+- if (unlikely(!mrs->att_hist)) {
++ if (mp->new_avg) {
++ mrs->prob_ewma = minstrel_filter_avg_add(&mrs->avg,
++ cur_prob);
++ } else if (unlikely(!mrs->att_hist)) {
+ mrs->prob_ewma = cur_prob;
+ } else {
+ /*update exponential weighted moving avarage */
+@@ -200,7 +204,7 @@ minstrel_update_stats(struct minstrel_pr
+ struct minstrel_rate_stats *tmp_mrs = &mi->r[tmp_prob_rate].stats;
+
+ /* Update statistics of success probability per rate */
+- minstrel_calc_rate_stats(mrs);
++ minstrel_calc_rate_stats(mp, mrs);
+
+ /* Sample less often below the 10% chance of success.
+ * Sample less often above the 95% chance of success. */
+@@ -289,7 +293,8 @@ minstrel_tx_status(void *priv, struct ie
+ if (mi->sample_deferred > 0)
+ mi->sample_deferred--;
+
+- if (time_after(jiffies, mi->last_stats_update + mp->update_interval))
++ if (time_after(jiffies, mi->last_stats_update +
++ mp->update_interval / (mp->new_avg ? 2 : 1)))
+ minstrel_update_stats(mp, mi);
+ }
+
+--- a/net/mac80211/rc80211_minstrel.h
++++ b/net/mac80211/rc80211_minstrel.h
+@@ -19,6 +19,21 @@
+ #define MAX_THR_RATES 4
+
+ /*
++ * Coefficients for moving average with noise filter (period=16),
++ * scaled by 10 bits
++ *
++ * a1 = exp(-pi * sqrt(2) / period)
++ * coeff2 = 2 * a1 * cos(sqrt(2) * 2 * pi / period)
++ * coeff3 = -sqr(a1)
++ * coeff1 = 1 - coeff2 - coeff3
++ */
++#define MINSTREL_AVG_COEFF1 (MINSTREL_FRAC(1, 1) - \
++ MINSTREL_AVG_COEFF2 - \
++ MINSTREL_AVG_COEFF3)
++#define MINSTREL_AVG_COEFF2 0x00001499
++#define MINSTREL_AVG_COEFF3 -0x0000092e
++
++/*
+ * Perform EWMA (Exponentially Weighted Moving Average) calculation
+ */
+ static inline int
+@@ -32,6 +47,41 @@ minstrel_ewma(int old, int new, int weig
+ return old + incr;
+ }
+
++struct minstrel_avg_ctx {
++ s32 prev[2];
++};
++
++static inline int minstrel_filter_avg_add(struct minstrel_avg_ctx *ctx, s32 in)
++{
++ s32 out_1 = ctx->prev[0];
++ s32 out_2 = ctx->prev[1];
++ s32 val;
++
++ if (!in)
++ in += 1;
++
++ if (!out_1) {
++ val = out_1 = in;
++ goto out;
++ }
++
++ val = MINSTREL_AVG_COEFF1 * in;
++ val += MINSTREL_AVG_COEFF2 * out_1;
++ val += MINSTREL_AVG_COEFF3 * out_2;
++ val >>= MINSTREL_SCALE;
++
++ if (val > 1 << MINSTREL_SCALE)
++ val = 1 << MINSTREL_SCALE;
++ if (val < 0)
++ val = 1;
++
++out:
++ ctx->prev[1] = out_1;
++ ctx->prev[0] = val;
++
++ return val;
++}
++
+ struct minstrel_rate_stats {
+ /* current / last sampling period attempts/success counters */
+ u16 attempts, last_attempts;
+@@ -40,6 +90,8 @@ struct minstrel_rate_stats {
+ /* total attempts/success counters */
+ u32 att_hist, succ_hist;
+
++ struct minstrel_avg_ctx avg;
++
+ /* prob_ewma - exponential weighted moving average of prob */
+ u16 prob_ewma;
+
+@@ -95,6 +147,7 @@ struct minstrel_sta_info {
+ struct minstrel_priv {
+ struct ieee80211_hw *hw;
+ bool has_mrr;
++ bool new_avg;
+ u32 sample_switch;
+ unsigned int cw_min;
+ unsigned int cw_max;
+@@ -126,7 +179,8 @@ extern const struct rate_control_ops mac
+ void minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir);
+
+ /* Recalculate success probabilities and counters for a given rate using EWMA */
+-void minstrel_calc_rate_stats(struct minstrel_rate_stats *mrs);
++void minstrel_calc_rate_stats(struct minstrel_priv *mp,
++ struct minstrel_rate_stats *mrs);
+ int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma);
+
+ /* debugfs */
+--- a/net/mac80211/rc80211_minstrel_ht.c
++++ b/net/mac80211/rc80211_minstrel_ht.c
+@@ -737,7 +737,7 @@ minstrel_ht_update_stats(struct minstrel
+
+ mrs = &mg->rates[i];
+ mrs->retry_updated = false;
+- minstrel_calc_rate_stats(mrs);
++ minstrel_calc_rate_stats(mp, mrs);
+ cur_prob = mrs->prob_ewma;
+
+ if (minstrel_ht_get_tp_avg(mi, group, i, cur_prob) == 0)
+@@ -773,6 +773,8 @@ minstrel_ht_update_stats(struct minstrel
+
+ /* try to sample all available rates during each interval */
+ mi->sample_count *= 8;
++ if (mp->new_avg)
++ mi->sample_count /= 2;
+
+ if (sample)
+ minstrel_ht_rate_sample_switch(mp, mi);
+@@ -889,6 +891,7 @@ minstrel_ht_tx_status(void *priv, struct
+ struct ieee80211_tx_rate *ar = info->status.rates;
+ struct minstrel_rate_stats *rate, *rate2, *rate_sample = NULL;
+ struct minstrel_priv *mp = priv;
++ u32 update_interval = mp->update_interval / 2;
+ bool last, update = false;
+ bool sample_status = false;
+ int i;
+@@ -943,6 +946,10 @@ minstrel_ht_tx_status(void *priv, struct
+
+ switch (mi->sample_mode) {
+ case MINSTREL_SAMPLE_IDLE:
++ if (mp->new_avg &&
++ (mp->hw->max_rates > 1 ||
++ mi->total_packets_cur < SAMPLE_SWITCH_THR))
++ update_interval /= 2;
+ break;
+
+ case MINSTREL_SAMPLE_ACTIVE:
+@@ -983,8 +990,7 @@ minstrel_ht_tx_status(void *priv, struct
+ }
+ }
+
+- if (time_after(jiffies, mi->last_stats_update +
+- mp->update_interval / 2)) {
++ if (time_after(jiffies, mi->last_stats_update + update_interval)) {
+ update = true;
+ minstrel_ht_update_stats(mp, mi, true);
+ }
+@@ -1665,6 +1671,7 @@ minstrel_ht_alloc(struct ieee80211_hw *h
+
+ mp->hw = hw;
+ mp->update_interval = HZ / 10;
++ mp->new_avg = true;
+
+ #ifdef CPTCFG_MAC80211_DEBUGFS
+ mp->fixed_rate_idx = (u32) -1;
+@@ -1672,6 +1679,8 @@ minstrel_ht_alloc(struct ieee80211_hw *h
+ &mp->fixed_rate_idx);
+ debugfs_create_u32("sample_switch", S_IRUGO | S_IWUSR, debugfsdir,
+ &mp->sample_switch);
++ debugfs_create_bool("new_avg", S_IRUGO | S_IWUSR, debugfsdir,
++ &mp->new_avg);
+ #endif
+
+ minstrel_ht_init_cck_rates(mp);
--- /dev/null
+From: Felix Fietkau <nbd@nbd.name>
+Date: Tue, 8 Oct 2019 18:54:46 +0200
+Subject: [PATCH] mac80211: minstrel_ht: rename prob_ewma to prob_avg, use it
+ for the new average
+
+Reduces per-rate data structure size
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/net/mac80211/rc80211_minstrel.c
++++ b/net/mac80211/rc80211_minstrel.c
+@@ -70,7 +70,7 @@ rix_to_ndx(struct minstrel_sta_info *mi,
+ }
+
+ /* return current EMWA throughput */
+-int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma)
++int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_avg)
+ {
+ int usecs;
+
+@@ -79,13 +79,13 @@ int minstrel_get_tp_avg(struct minstrel_
+ usecs = 1000000;
+
+ /* reset thr. below 10% success */
+- if (mr->stats.prob_ewma < MINSTREL_FRAC(10, 100))
++ if (mr->stats.prob_avg < MINSTREL_FRAC(10, 100))
+ return 0;
+
+- if (prob_ewma > MINSTREL_FRAC(90, 100))
++ if (prob_avg > MINSTREL_FRAC(90, 100))
+ return MINSTREL_TRUNC(100000 * (MINSTREL_FRAC(90, 100) / usecs));
+ else
+- return MINSTREL_TRUNC(100000 * (prob_ewma / usecs));
++ return MINSTREL_TRUNC(100000 * (prob_avg / usecs));
+ }
+
+ /* find & sort topmost throughput rates */
+@@ -98,8 +98,8 @@ minstrel_sort_best_tp_rates(struct minst
+
+ for (j = MAX_THR_RATES; j > 0; --j) {
+ tmp_mrs = &mi->r[tp_list[j - 1]].stats;
+- if (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) <=
+- minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))
++ if (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_avg) <=
++ minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_avg))
+ break;
+ }
+
+@@ -166,15 +166,15 @@ minstrel_calc_rate_stats(struct minstrel
+ mrs->sample_skipped = 0;
+ cur_prob = MINSTREL_FRAC(mrs->success, mrs->attempts);
+ if (mp->new_avg) {
+- mrs->prob_ewma = minstrel_filter_avg_add(&mrs->avg,
+- cur_prob);
++ minstrel_filter_avg_add(&mrs->prob_avg,
++ &mrs->prob_avg_1, cur_prob);
+ } else if (unlikely(!mrs->att_hist)) {
+- mrs->prob_ewma = cur_prob;
++ mrs->prob_avg = cur_prob;
+ } else {
+ /*update exponential weighted moving avarage */
+- mrs->prob_ewma = minstrel_ewma(mrs->prob_ewma,
+- cur_prob,
+- EWMA_LEVEL);
++ mrs->prob_avg = minstrel_ewma(mrs->prob_avg,
++ cur_prob,
++ EWMA_LEVEL);
+ }
+ mrs->att_hist += mrs->attempts;
+ mrs->succ_hist += mrs->success;
+@@ -208,8 +208,8 @@ minstrel_update_stats(struct minstrel_pr
+
+ /* Sample less often below the 10% chance of success.
+ * Sample less often above the 95% chance of success. */
+- if (mrs->prob_ewma > MINSTREL_FRAC(95, 100) ||
+- mrs->prob_ewma < MINSTREL_FRAC(10, 100)) {
++ if (mrs->prob_avg > MINSTREL_FRAC(95, 100) ||
++ mrs->prob_avg < MINSTREL_FRAC(10, 100)) {
+ mr->adjusted_retry_count = mrs->retry_count >> 1;
+ if (mr->adjusted_retry_count > 2)
+ mr->adjusted_retry_count = 2;
+@@ -229,14 +229,14 @@ minstrel_update_stats(struct minstrel_pr
+ * choose the maximum throughput rate as max_prob_rate
+ * (2) if all success probabilities < 95%, the rate with
+ * highest success probability is chosen as max_prob_rate */
+- if (mrs->prob_ewma >= MINSTREL_FRAC(95, 100)) {
+- tmp_cur_tp = minstrel_get_tp_avg(mr, mrs->prob_ewma);
++ if (mrs->prob_avg >= MINSTREL_FRAC(95, 100)) {
++ tmp_cur_tp = minstrel_get_tp_avg(mr, mrs->prob_avg);
+ tmp_prob_tp = minstrel_get_tp_avg(&mi->r[tmp_prob_rate],
+- tmp_mrs->prob_ewma);
++ tmp_mrs->prob_avg);
+ if (tmp_cur_tp >= tmp_prob_tp)
+ tmp_prob_rate = i;
+ } else {
+- if (mrs->prob_ewma >= tmp_mrs->prob_ewma)
++ if (mrs->prob_avg >= tmp_mrs->prob_avg)
+ tmp_prob_rate = i;
+ }
+ }
+@@ -426,7 +426,7 @@ minstrel_get_rate(void *priv, struct iee
+ * has a probability of >95%, we shouldn't be attempting
+ * to use it, as this only wastes precious airtime */
+ if (!mrr_capable &&
+- (mi->r[ndx].stats.prob_ewma > MINSTREL_FRAC(95, 100)))
++ (mi->r[ndx].stats.prob_avg > MINSTREL_FRAC(95, 100)))
+ return;
+
+ mi->prev_sample = true;
+@@ -577,7 +577,7 @@ static u32 minstrel_get_expected_through
+ * computing cur_tp
+ */
+ tmp_mrs = &mi->r[idx].stats;
+- tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_ewma) * 10;
++ tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_avg) * 10;
+ tmp_cur_tp = tmp_cur_tp * 1200 * 8 / 1024;
+
+ return tmp_cur_tp;
+--- a/net/mac80211/rc80211_minstrel.h
++++ b/net/mac80211/rc80211_minstrel.h
+@@ -47,14 +47,10 @@ minstrel_ewma(int old, int new, int weig
+ return old + incr;
+ }
+
+-struct minstrel_avg_ctx {
+- s32 prev[2];
+-};
+-
+-static inline int minstrel_filter_avg_add(struct minstrel_avg_ctx *ctx, s32 in)
++static inline int minstrel_filter_avg_add(u16 *prev_1, u16 *prev_2, s32 in)
+ {
+- s32 out_1 = ctx->prev[0];
+- s32 out_2 = ctx->prev[1];
++ s32 out_1 = *prev_1;
++ s32 out_2 = *prev_2;
+ s32 val;
+
+ if (!in)
+@@ -76,8 +72,8 @@ static inline int minstrel_filter_avg_ad
+ val = 1;
+
+ out:
+- ctx->prev[1] = out_1;
+- ctx->prev[0] = val;
++ *prev_2 = out_1;
++ *prev_1 = val;
+
+ return val;
+ }
+@@ -90,10 +86,9 @@ struct minstrel_rate_stats {
+ /* total attempts/success counters */
+ u32 att_hist, succ_hist;
+
+- struct minstrel_avg_ctx avg;
+-
+- /* prob_ewma - exponential weighted moving average of prob */
+- u16 prob_ewma;
++ /* prob_avg - moving average of prob */
++ u16 prob_avg;
++ u16 prob_avg_1;
+
+ /* maximum retry counts */
+ u8 retry_count;
+@@ -181,7 +176,7 @@ void minstrel_add_sta_debugfs(void *priv
+ /* Recalculate success probabilities and counters for a given rate using EWMA */
+ void minstrel_calc_rate_stats(struct minstrel_priv *mp,
+ struct minstrel_rate_stats *mrs);
+-int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma);
++int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_avg);
+
+ /* debugfs */
+ int minstrel_stats_open(struct inode *inode, struct file *file);
+--- a/net/mac80211/rc80211_minstrel_debugfs.c
++++ b/net/mac80211/rc80211_minstrel_debugfs.c
+@@ -90,8 +90,8 @@ minstrel_stats_open(struct inode *inode,
+ p += sprintf(p, "%6u ", mr->perfect_tx_time);
+
+ tp_max = minstrel_get_tp_avg(mr, MINSTREL_FRAC(100,100));
+- tp_avg = minstrel_get_tp_avg(mr, mrs->prob_ewma);
+- eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
++ tp_avg = minstrel_get_tp_avg(mr, mrs->prob_avg);
++ eprob = MINSTREL_TRUNC(mrs->prob_avg * 1000);
+
+ p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u"
+ " %3u %3u %-3u "
+@@ -147,8 +147,8 @@ minstrel_stats_csv_open(struct inode *in
+ p += sprintf(p, "%u,",mr->perfect_tx_time);
+
+ tp_max = minstrel_get_tp_avg(mr, MINSTREL_FRAC(100,100));
+- tp_avg = minstrel_get_tp_avg(mr, mrs->prob_ewma);
+- eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
++ tp_avg = minstrel_get_tp_avg(mr, mrs->prob_avg);
++ eprob = MINSTREL_TRUNC(mrs->prob_avg * 1000);
+
+ p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u,%u,%u,"
+ "%llu,%llu,%d,%d\n",
+--- a/net/mac80211/rc80211_minstrel_ht.c
++++ b/net/mac80211/rc80211_minstrel_ht.c
+@@ -346,12 +346,12 @@ minstrel_ht_avg_ampdu_len(struct minstre
+ */
+ int
+ minstrel_ht_get_tp_avg(struct minstrel_ht_sta *mi, int group, int rate,
+- int prob_ewma)
++ int prob_avg)
+ {
+ unsigned int nsecs = 0;
+
+ /* do not account throughput if sucess prob is below 10% */
+- if (prob_ewma < MINSTREL_FRAC(10, 100))
++ if (prob_avg < MINSTREL_FRAC(10, 100))
+ return 0;
+
+ if (group != MINSTREL_CCK_GROUP)
+@@ -365,11 +365,11 @@ minstrel_ht_get_tp_avg(struct minstrel_h
+ * account for collision related packet error rate fluctuation
+ * (prob is scaled - see MINSTREL_FRAC above)
+ */
+- if (prob_ewma > MINSTREL_FRAC(90, 100))
++ if (prob_avg > MINSTREL_FRAC(90, 100))
+ return MINSTREL_TRUNC(100000 * ((MINSTREL_FRAC(90, 100) * 1000)
+ / nsecs));
+ else
+- return MINSTREL_TRUNC(100000 * ((prob_ewma * 1000) / nsecs));
++ return MINSTREL_TRUNC(100000 * ((prob_avg * 1000) / nsecs));
+ }
+
+ /*
+@@ -389,13 +389,13 @@ minstrel_ht_sort_best_tp_rates(struct mi
+
+ cur_group = index / MCS_GROUP_RATES;
+ cur_idx = index % MCS_GROUP_RATES;
+- cur_prob = mi->groups[cur_group].rates[cur_idx].prob_ewma;
++ cur_prob = mi->groups[cur_group].rates[cur_idx].prob_avg;
+ cur_tp_avg = minstrel_ht_get_tp_avg(mi, cur_group, cur_idx, cur_prob);
+
+ do {
+ tmp_group = tp_list[j - 1] / MCS_GROUP_RATES;
+ tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES;
+- tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_ewma;
++ tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
+ tmp_tp_avg = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx,
+ tmp_prob);
+ if (cur_tp_avg < tmp_tp_avg ||
+@@ -432,7 +432,7 @@ minstrel_ht_set_best_prob_rate(struct mi
+
+ tmp_group = mi->max_prob_rate / MCS_GROUP_RATES;
+ tmp_idx = mi->max_prob_rate % MCS_GROUP_RATES;
+- tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_ewma;
++ tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
+ tmp_tp_avg = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob);
+
+ /* if max_tp_rate[0] is from MCS_GROUP max_prob_rate get selected from
+@@ -444,11 +444,11 @@ minstrel_ht_set_best_prob_rate(struct mi
+
+ max_gpr_group = mg->max_group_prob_rate / MCS_GROUP_RATES;
+ max_gpr_idx = mg->max_group_prob_rate % MCS_GROUP_RATES;
+- max_gpr_prob = mi->groups[max_gpr_group].rates[max_gpr_idx].prob_ewma;
++ max_gpr_prob = mi->groups[max_gpr_group].rates[max_gpr_idx].prob_avg;
+
+- if (mrs->prob_ewma > MINSTREL_FRAC(75, 100)) {
++ if (mrs->prob_avg > MINSTREL_FRAC(75, 100)) {
+ cur_tp_avg = minstrel_ht_get_tp_avg(mi, cur_group, cur_idx,
+- mrs->prob_ewma);
++ mrs->prob_avg);
+ if (cur_tp_avg > tmp_tp_avg)
+ mi->max_prob_rate = index;
+
+@@ -458,9 +458,9 @@ minstrel_ht_set_best_prob_rate(struct mi
+ if (cur_tp_avg > max_gpr_tp_avg)
+ mg->max_group_prob_rate = index;
+ } else {
+- if (mrs->prob_ewma > tmp_prob)
++ if (mrs->prob_avg > tmp_prob)
+ mi->max_prob_rate = index;
+- if (mrs->prob_ewma > max_gpr_prob)
++ if (mrs->prob_avg > max_gpr_prob)
+ mg->max_group_prob_rate = index;
+ }
+ }
+@@ -482,12 +482,12 @@ minstrel_ht_assign_best_tp_rates(struct
+
+ tmp_group = tmp_cck_tp_rate[0] / MCS_GROUP_RATES;
+ tmp_idx = tmp_cck_tp_rate[0] % MCS_GROUP_RATES;
+- tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_ewma;
++ tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
+ tmp_cck_tp = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob);
+
+ tmp_group = tmp_mcs_tp_rate[0] / MCS_GROUP_RATES;
+ tmp_idx = tmp_mcs_tp_rate[0] % MCS_GROUP_RATES;
+- tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_ewma;
++ tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
+ tmp_mcs_tp = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob);
+
+ if (tmp_cck_tp_rate && tmp_cck_tp > tmp_mcs_tp) {
+@@ -518,7 +518,7 @@ minstrel_ht_prob_rate_reduce_streams(str
+ continue;
+
+ tmp_idx = mg->max_group_prob_rate % MCS_GROUP_RATES;
+- tmp_prob = mi->groups[group].rates[tmp_idx].prob_ewma;
++ tmp_prob = mi->groups[group].rates[tmp_idx].prob_avg;
+
+ if (tmp_tp < minstrel_ht_get_tp_avg(mi, group, tmp_idx, tmp_prob) &&
+ (minstrel_mcs_groups[group].streams < tmp_max_streams)) {
+@@ -623,7 +623,7 @@ minstrel_ht_rate_sample_switch(struct mi
+ * If that fails, look again for a rate that is at least as fast
+ */
+ mrs = minstrel_get_ratestats(mi, mi->max_tp_rate[0]);
+- faster_rate = mrs->prob_ewma > MINSTREL_FRAC(75, 100);
++ faster_rate = mrs->prob_avg > MINSTREL_FRAC(75, 100);
+ minstrel_ht_find_probe_rates(mi, rates, &n_rates, faster_rate);
+ if (!n_rates && faster_rate)
+ minstrel_ht_find_probe_rates(mi, rates, &n_rates, false);
+@@ -738,7 +738,7 @@ minstrel_ht_update_stats(struct minstrel
+ mrs = &mg->rates[i];
+ mrs->retry_updated = false;
+ minstrel_calc_rate_stats(mp, mrs);
+- cur_prob = mrs->prob_ewma;
++ cur_prob = mrs->prob_avg;
+
+ if (minstrel_ht_get_tp_avg(mi, group, i, cur_prob) == 0)
+ continue;
+@@ -1012,7 +1012,7 @@ minstrel_calc_retransmit(struct minstrel
+ unsigned int overhead = 0, overhead_rtscts = 0;
+
+ mrs = minstrel_get_ratestats(mi, index);
+- if (mrs->prob_ewma < MINSTREL_FRAC(1, 10)) {
++ if (mrs->prob_avg < MINSTREL_FRAC(1, 10)) {
+ mrs->retry_count = 1;
+ mrs->retry_count_rtscts = 1;
+ return;
+@@ -1069,7 +1069,7 @@ minstrel_ht_set_rate(struct minstrel_pri
+ if (!mrs->retry_updated)
+ minstrel_calc_retransmit(mp, mi, index);
+
+- if (mrs->prob_ewma < MINSTREL_FRAC(20, 100) || !mrs->retry_count) {
++ if (mrs->prob_avg < MINSTREL_FRAC(20, 100) || !mrs->retry_count) {
+ ratetbl->rate[offset].count = 2;
+ ratetbl->rate[offset].count_rts = 2;
+ ratetbl->rate[offset].count_cts = 2;
+@@ -1103,11 +1103,11 @@ minstrel_ht_set_rate(struct minstrel_pri
+ }
+
+ static inline int
+-minstrel_ht_get_prob_ewma(struct minstrel_ht_sta *mi, int rate)
++minstrel_ht_get_prob_avg(struct minstrel_ht_sta *mi, int rate)
+ {
+ int group = rate / MCS_GROUP_RATES;
+ rate %= MCS_GROUP_RATES;
+- return mi->groups[group].rates[rate].prob_ewma;
++ return mi->groups[group].rates[rate].prob_avg;
+ }
+
+ static int
+@@ -1119,7 +1119,7 @@ minstrel_ht_get_max_amsdu_len(struct min
+ unsigned int duration;
+
+ /* Disable A-MSDU if max_prob_rate is bad */
+- if (mi->groups[group].rates[rate].prob_ewma < MINSTREL_FRAC(50, 100))
++ if (mi->groups[group].rates[rate].prob_avg < MINSTREL_FRAC(50, 100))
+ return 1;
+
+ duration = g->duration[rate];
+@@ -1142,7 +1142,7 @@ minstrel_ht_get_max_amsdu_len(struct min
+ * data packet size
+ */
+ if (duration > MCS_DURATION(1, 0, 260) ||
+- (minstrel_ht_get_prob_ewma(mi, mi->max_tp_rate[0]) <
++ (minstrel_ht_get_prob_avg(mi, mi->max_tp_rate[0]) <
+ MINSTREL_FRAC(75, 100)))
+ return 3200;
+
+@@ -1247,7 +1247,7 @@ minstrel_get_sample_rate(struct minstrel
+ * rate, to avoid wasting airtime.
+ */
+ sample_dur = minstrel_get_duration(sample_idx);
+- if (mrs->prob_ewma > MINSTREL_FRAC(95, 100) ||
++ if (mrs->prob_avg > MINSTREL_FRAC(95, 100) ||
+ minstrel_get_duration(mi->max_prob_rate) * 3 < sample_dur)
+ return -1;
+
+@@ -1705,7 +1705,7 @@ static u32 minstrel_ht_get_expected_thro
+
+ i = mi->max_tp_rate[0] / MCS_GROUP_RATES;
+ j = mi->max_tp_rate[0] % MCS_GROUP_RATES;
+- prob = mi->groups[i].rates[j].prob_ewma;
++ prob = mi->groups[i].rates[j].prob_avg;
+
+ /* convert tp_avg from pkt per second in kbps */
+ tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * 10;
+--- a/net/mac80211/rc80211_minstrel_ht.h
++++ b/net/mac80211/rc80211_minstrel_ht.h
+@@ -119,6 +119,6 @@ struct minstrel_ht_sta_priv {
+
+ void minstrel_ht_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir);
+ int minstrel_ht_get_tp_avg(struct minstrel_ht_sta *mi, int group, int rate,
+- int prob_ewma);
++ int prob_avg);
+
+ #endif
+--- a/net/mac80211/rc80211_minstrel_ht_debugfs.c
++++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c
+@@ -98,8 +98,8 @@ minstrel_ht_stats_dump(struct minstrel_h
+ p += sprintf(p, "%6u ", tx_time);
+
+ tp_max = minstrel_ht_get_tp_avg(mi, i, j, MINSTREL_FRAC(100, 100));
+- tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_ewma);
+- eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
++ tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_avg);
++ eprob = MINSTREL_TRUNC(mrs->prob_avg * 1000);
+
+ p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u"
+ " %3u %3u %-3u "
+@@ -243,8 +243,8 @@ minstrel_ht_stats_csv_dump(struct minstr
+ p += sprintf(p, "%u,", tx_time);
+
+ tp_max = minstrel_ht_get_tp_avg(mi, i, j, MINSTREL_FRAC(100, 100));
+- tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_ewma);
+- eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
++ tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_avg);
++ eprob = MINSTREL_TRUNC(mrs->prob_avg * 1000);
+
+ p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u,%u,"
+ "%u,%llu,%llu,",
--- /dev/null
+From b478e06a16a8baa00c5ecc87c1d636981f2206d5 Mon Sep 17 00:00:00 2001
+From: Johannes Berg <johannes.berg@intel.com>
+Date: Tue, 29 Oct 2019 10:25:25 +0100
+Subject: [PATCH] mac80211: sta: randomize BA session dialog token allocator
+
+We currently always start the dialog token generator at zero,
+so the first dialog token we use is always 1. This would be
+OK if we had a perfect guarantee that we always do a proper
+deauth/re-auth handshake, but in IBSS mode this doesn't always
+happen properly.
+
+To make problems with block ack (aggregation) sessions getting
+stuck less likely, randomize the dialog token so if we start a
+new session but the peer still has old state for us, it can
+better detect this.
+
+This is really just a workaround to make things a bit more
+robust than they are now - a better fix would be to do a full
+authentication handshake in IBSS mode upon having discovered a
+new station, and on the receiver resetting the state (removing
+and re-adding the station) on receiving the authentication
+packet.
+
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+---
+ net/mac80211/sta_info.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -324,6 +324,7 @@ struct sta_info *sta_info_alloc(struct i
+ INIT_WORK(&sta->drv_deliver_wk, sta_deliver_ps_frames);
+ INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
+ mutex_init(&sta->ampdu_mlme.mtx);
++ sta->ampdu_mlme.dialog_token_allocator = prandom_u32_max(U8_MAX);
+ #ifdef CPTCFG_MAC80211_MESH
+ if (ieee80211_vif_is_mesh(&sdata->vif)) {
+ sta->mesh = kzalloc(sizeof(*sta->mesh), gfp);
+++ /dev/null
-From: Felix Fietkau <nbd@nbd.name>
-Date: Sat, 28 Sep 2019 15:44:06 +0200
-Subject: [PATCH] mac80211: minstrel: remove divisions in tx status path
-
-Use a slightly different threshold for downgrading spatial streams to
-make it easier to calculate without divisions.
-Slightly reduces CPU overhead.
-
-Signed-off-by: Felix Fietkau <nbd@nbd.name>
----
-
---- a/net/mac80211/rc80211_minstrel.c
-+++ b/net/mac80211/rc80211_minstrel.c
-@@ -289,8 +289,7 @@ minstrel_tx_status(void *priv, struct ie
- if (mi->sample_deferred > 0)
- mi->sample_deferred--;
-
-- if (time_after(jiffies, mi->last_stats_update +
-- (mp->update_interval * HZ) / 1000))
-+ if (time_after(jiffies, mi->last_stats_update + mp->update_interval))
- minstrel_update_stats(mp, mi);
- }
-
---- a/net/mac80211/rc80211_minstrel_ht.c
-+++ b/net/mac80211/rc80211_minstrel_ht.c
-@@ -970,23 +970,21 @@ minstrel_ht_tx_status(void *priv, struct
- */
- rate = minstrel_get_ratestats(mi, mi->max_tp_rate[0]);
- if (rate->attempts > 30 &&
-- MINSTREL_FRAC(rate->success, rate->attempts) <
-- MINSTREL_FRAC(20, 100)) {
-+ rate->success < rate->attempts / 4) {
- minstrel_downgrade_rate(mi, &mi->max_tp_rate[0], true);
- update = true;
- }
-
- rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate[1]);
- if (rate2->attempts > 30 &&
-- MINSTREL_FRAC(rate2->success, rate2->attempts) <
-- MINSTREL_FRAC(20, 100)) {
-+ rate2->success < rate2->attempts / 4) {
- minstrel_downgrade_rate(mi, &mi->max_tp_rate[1], false);
- update = true;
- }
- }
-
- if (time_after(jiffies, mi->last_stats_update +
-- (mp->update_interval / 2 * HZ) / 1000)) {
-+ mp->update_interval / 2)) {
- update = true;
- minstrel_ht_update_stats(mp, mi, true);
- }
-@@ -1666,7 +1664,7 @@ minstrel_ht_alloc(struct ieee80211_hw *h
- mp->has_mrr = true;
-
- mp->hw = hw;
-- mp->update_interval = 100;
-+ mp->update_interval = HZ / 10;
-
- #ifdef CPTCFG_MAC80211_DEBUGFS
- mp->fixed_rate_idx = (u32) -1;
+++ /dev/null
-From: Felix Fietkau <nbd@nbd.name>
-Date: Sat, 28 Sep 2019 15:46:06 +0200
-Subject: [PATCH] mac80211: minstrel_ht: replace rate stats ewma with a
- better moving average
-
-Rate success probability usually fluctuates a lot under normal conditions.
-With a simple EWMA, noise and fluctuation can be reduced by increasing the
-window length, but that comes at the cost of introducing lag on sudden
-changes.
-
-This change replaces the EWMA implementation with a moving average that's
-designed to significantly reduce lag while keeping a bigger window size
-by being better at filtering out noise.
-
-It is only slightly more expensive than the simple EWMA and still avoids
-divisions in its calculation.
-
-The algorithm is adapted from an implementation intended for a completely
-different field (stock market trading), where the tradeoff of lag vs
-noise filtering is equally important.
-
-The algorithm works in the same way as the "smoothing filter" from
-http://www.stockspotter.com/files/PredictiveIndicators.pdf adapted for
-fixed-point math with some constants, using only addition, bit shifts
-and multiplication
-
-To better make use of the filtering and bigger window size, the update
-interval is cut in half.
-
-For testing, the algorithm can be reverted to the older one via debugfs
-
-Signed-off-by: Felix Fietkau <nbd@nbd.name>
----
-
---- a/net/mac80211/rc80211_minstrel.c
-+++ b/net/mac80211/rc80211_minstrel.c
-@@ -157,14 +157,18 @@ minstrel_update_rates(struct minstrel_pr
- * Recalculate statistics and counters of a given rate
- */
- void
--minstrel_calc_rate_stats(struct minstrel_rate_stats *mrs)
-+minstrel_calc_rate_stats(struct minstrel_priv *mp,
-+ struct minstrel_rate_stats *mrs)
- {
- unsigned int cur_prob;
-
- if (unlikely(mrs->attempts > 0)) {
- mrs->sample_skipped = 0;
- cur_prob = MINSTREL_FRAC(mrs->success, mrs->attempts);
-- if (unlikely(!mrs->att_hist)) {
-+ if (mp->new_avg) {
-+ mrs->prob_ewma = minstrel_filter_avg_add(&mrs->avg,
-+ cur_prob);
-+ } else if (unlikely(!mrs->att_hist)) {
- mrs->prob_ewma = cur_prob;
- } else {
- /*update exponential weighted moving avarage */
-@@ -200,7 +204,7 @@ minstrel_update_stats(struct minstrel_pr
- struct minstrel_rate_stats *tmp_mrs = &mi->r[tmp_prob_rate].stats;
-
- /* Update statistics of success probability per rate */
-- minstrel_calc_rate_stats(mrs);
-+ minstrel_calc_rate_stats(mp, mrs);
-
- /* Sample less often below the 10% chance of success.
- * Sample less often above the 95% chance of success. */
-@@ -289,7 +293,8 @@ minstrel_tx_status(void *priv, struct ie
- if (mi->sample_deferred > 0)
- mi->sample_deferred--;
-
-- if (time_after(jiffies, mi->last_stats_update + mp->update_interval))
-+ if (time_after(jiffies, mi->last_stats_update +
-+ mp->update_interval / (mp->new_avg ? 2 : 1)))
- minstrel_update_stats(mp, mi);
- }
-
---- a/net/mac80211/rc80211_minstrel.h
-+++ b/net/mac80211/rc80211_minstrel.h
-@@ -19,6 +19,21 @@
- #define MAX_THR_RATES 4
-
- /*
-+ * Coefficients for moving average with noise filter (period=16),
-+ * scaled by 10 bits
-+ *
-+ * a1 = exp(-pi * sqrt(2) / period)
-+ * coeff2 = 2 * a1 * cos(sqrt(2) * 2 * pi / period)
-+ * coeff3 = -sqr(a1)
-+ * coeff1 = 1 - coeff2 - coeff3
-+ */
-+#define MINSTREL_AVG_COEFF1 (MINSTREL_FRAC(1, 1) - \
-+ MINSTREL_AVG_COEFF2 - \
-+ MINSTREL_AVG_COEFF3)
-+#define MINSTREL_AVG_COEFF2 0x00001499
-+#define MINSTREL_AVG_COEFF3 -0x0000092e
-+
-+/*
- * Perform EWMA (Exponentially Weighted Moving Average) calculation
- */
- static inline int
-@@ -32,6 +47,41 @@ minstrel_ewma(int old, int new, int weig
- return old + incr;
- }
-
-+struct minstrel_avg_ctx {
-+ s32 prev[2];
-+};
-+
-+static inline int minstrel_filter_avg_add(struct minstrel_avg_ctx *ctx, s32 in)
-+{
-+ s32 out_1 = ctx->prev[0];
-+ s32 out_2 = ctx->prev[1];
-+ s32 val;
-+
-+ if (!in)
-+ in += 1;
-+
-+ if (!out_1) {
-+ val = out_1 = in;
-+ goto out;
-+ }
-+
-+ val = MINSTREL_AVG_COEFF1 * in;
-+ val += MINSTREL_AVG_COEFF2 * out_1;
-+ val += MINSTREL_AVG_COEFF3 * out_2;
-+ val >>= MINSTREL_SCALE;
-+
-+ if (val > 1 << MINSTREL_SCALE)
-+ val = 1 << MINSTREL_SCALE;
-+ if (val < 0)
-+ val = 1;
-+
-+out:
-+ ctx->prev[1] = out_1;
-+ ctx->prev[0] = val;
-+
-+ return val;
-+}
-+
- struct minstrel_rate_stats {
- /* current / last sampling period attempts/success counters */
- u16 attempts, last_attempts;
-@@ -40,6 +90,8 @@ struct minstrel_rate_stats {
- /* total attempts/success counters */
- u32 att_hist, succ_hist;
-
-+ struct minstrel_avg_ctx avg;
-+
- /* prob_ewma - exponential weighted moving average of prob */
- u16 prob_ewma;
-
-@@ -95,6 +147,7 @@ struct minstrel_sta_info {
- struct minstrel_priv {
- struct ieee80211_hw *hw;
- bool has_mrr;
-+ bool new_avg;
- u32 sample_switch;
- unsigned int cw_min;
- unsigned int cw_max;
-@@ -126,7 +179,8 @@ extern const struct rate_control_ops mac
- void minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir);
-
- /* Recalculate success probabilities and counters for a given rate using EWMA */
--void minstrel_calc_rate_stats(struct minstrel_rate_stats *mrs);
-+void minstrel_calc_rate_stats(struct minstrel_priv *mp,
-+ struct minstrel_rate_stats *mrs);
- int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma);
-
- /* debugfs */
---- a/net/mac80211/rc80211_minstrel_ht.c
-+++ b/net/mac80211/rc80211_minstrel_ht.c
-@@ -737,7 +737,7 @@ minstrel_ht_update_stats(struct minstrel
-
- mrs = &mg->rates[i];
- mrs->retry_updated = false;
-- minstrel_calc_rate_stats(mrs);
-+ minstrel_calc_rate_stats(mp, mrs);
- cur_prob = mrs->prob_ewma;
-
- if (minstrel_ht_get_tp_avg(mi, group, i, cur_prob) == 0)
-@@ -773,6 +773,8 @@ minstrel_ht_update_stats(struct minstrel
-
- /* try to sample all available rates during each interval */
- mi->sample_count *= 8;
-+ if (mp->new_avg)
-+ mi->sample_count /= 2;
-
- if (sample)
- minstrel_ht_rate_sample_switch(mp, mi);
-@@ -889,6 +891,7 @@ minstrel_ht_tx_status(void *priv, struct
- struct ieee80211_tx_rate *ar = info->status.rates;
- struct minstrel_rate_stats *rate, *rate2, *rate_sample = NULL;
- struct minstrel_priv *mp = priv;
-+ u32 update_interval = mp->update_interval / 2;
- bool last, update = false;
- bool sample_status = false;
- int i;
-@@ -943,6 +946,10 @@ minstrel_ht_tx_status(void *priv, struct
-
- switch (mi->sample_mode) {
- case MINSTREL_SAMPLE_IDLE:
-+ if (mp->new_avg &&
-+ (mp->hw->max_rates > 1 ||
-+ mi->total_packets_cur < SAMPLE_SWITCH_THR))
-+ update_interval /= 2;
- break;
-
- case MINSTREL_SAMPLE_ACTIVE:
-@@ -983,8 +990,7 @@ minstrel_ht_tx_status(void *priv, struct
- }
- }
-
-- if (time_after(jiffies, mi->last_stats_update +
-- mp->update_interval / 2)) {
-+ if (time_after(jiffies, mi->last_stats_update + update_interval)) {
- update = true;
- minstrel_ht_update_stats(mp, mi, true);
- }
-@@ -1665,6 +1671,7 @@ minstrel_ht_alloc(struct ieee80211_hw *h
-
- mp->hw = hw;
- mp->update_interval = HZ / 10;
-+ mp->new_avg = true;
-
- #ifdef CPTCFG_MAC80211_DEBUGFS
- mp->fixed_rate_idx = (u32) -1;
-@@ -1672,6 +1679,8 @@ minstrel_ht_alloc(struct ieee80211_hw *h
- &mp->fixed_rate_idx);
- debugfs_create_u32("sample_switch", S_IRUGO | S_IWUSR, debugfsdir,
- &mp->sample_switch);
-+ debugfs_create_bool("new_avg", S_IRUGO | S_IWUSR, debugfsdir,
-+ &mp->new_avg);
- #endif
-
- minstrel_ht_init_cck_rates(mp);
+++ /dev/null
-From: Felix Fietkau <nbd@nbd.name>
-Date: Tue, 8 Oct 2019 18:54:46 +0200
-Subject: [PATCH] mac80211: minstrel_ht: rename prob_ewma to prob_avg, use it
- for the new average
-
-Reduces per-rate data structure size
-
-Signed-off-by: Felix Fietkau <nbd@nbd.name>
----
-
---- a/net/mac80211/rc80211_minstrel.c
-+++ b/net/mac80211/rc80211_minstrel.c
-@@ -70,7 +70,7 @@ rix_to_ndx(struct minstrel_sta_info *mi,
- }
-
- /* return current EMWA throughput */
--int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma)
-+int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_avg)
- {
- int usecs;
-
-@@ -79,13 +79,13 @@ int minstrel_get_tp_avg(struct minstrel_
- usecs = 1000000;
-
- /* reset thr. below 10% success */
-- if (mr->stats.prob_ewma < MINSTREL_FRAC(10, 100))
-+ if (mr->stats.prob_avg < MINSTREL_FRAC(10, 100))
- return 0;
-
-- if (prob_ewma > MINSTREL_FRAC(90, 100))
-+ if (prob_avg > MINSTREL_FRAC(90, 100))
- return MINSTREL_TRUNC(100000 * (MINSTREL_FRAC(90, 100) / usecs));
- else
-- return MINSTREL_TRUNC(100000 * (prob_ewma / usecs));
-+ return MINSTREL_TRUNC(100000 * (prob_avg / usecs));
- }
-
- /* find & sort topmost throughput rates */
-@@ -98,8 +98,8 @@ minstrel_sort_best_tp_rates(struct minst
-
- for (j = MAX_THR_RATES; j > 0; --j) {
- tmp_mrs = &mi->r[tp_list[j - 1]].stats;
-- if (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) <=
-- minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))
-+ if (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_avg) <=
-+ minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_avg))
- break;
- }
-
-@@ -166,15 +166,15 @@ minstrel_calc_rate_stats(struct minstrel
- mrs->sample_skipped = 0;
- cur_prob = MINSTREL_FRAC(mrs->success, mrs->attempts);
- if (mp->new_avg) {
-- mrs->prob_ewma = minstrel_filter_avg_add(&mrs->avg,
-- cur_prob);
-+ minstrel_filter_avg_add(&mrs->prob_avg,
-+ &mrs->prob_avg_1, cur_prob);
- } else if (unlikely(!mrs->att_hist)) {
-- mrs->prob_ewma = cur_prob;
-+ mrs->prob_avg = cur_prob;
- } else {
- /*update exponential weighted moving avarage */
-- mrs->prob_ewma = minstrel_ewma(mrs->prob_ewma,
-- cur_prob,
-- EWMA_LEVEL);
-+ mrs->prob_avg = minstrel_ewma(mrs->prob_avg,
-+ cur_prob,
-+ EWMA_LEVEL);
- }
- mrs->att_hist += mrs->attempts;
- mrs->succ_hist += mrs->success;
-@@ -208,8 +208,8 @@ minstrel_update_stats(struct minstrel_pr
-
- /* Sample less often below the 10% chance of success.
- * Sample less often above the 95% chance of success. */
-- if (mrs->prob_ewma > MINSTREL_FRAC(95, 100) ||
-- mrs->prob_ewma < MINSTREL_FRAC(10, 100)) {
-+ if (mrs->prob_avg > MINSTREL_FRAC(95, 100) ||
-+ mrs->prob_avg < MINSTREL_FRAC(10, 100)) {
- mr->adjusted_retry_count = mrs->retry_count >> 1;
- if (mr->adjusted_retry_count > 2)
- mr->adjusted_retry_count = 2;
-@@ -229,14 +229,14 @@ minstrel_update_stats(struct minstrel_pr
- * choose the maximum throughput rate as max_prob_rate
- * (2) if all success probabilities < 95%, the rate with
- * highest success probability is chosen as max_prob_rate */
-- if (mrs->prob_ewma >= MINSTREL_FRAC(95, 100)) {
-- tmp_cur_tp = minstrel_get_tp_avg(mr, mrs->prob_ewma);
-+ if (mrs->prob_avg >= MINSTREL_FRAC(95, 100)) {
-+ tmp_cur_tp = minstrel_get_tp_avg(mr, mrs->prob_avg);
- tmp_prob_tp = minstrel_get_tp_avg(&mi->r[tmp_prob_rate],
-- tmp_mrs->prob_ewma);
-+ tmp_mrs->prob_avg);
- if (tmp_cur_tp >= tmp_prob_tp)
- tmp_prob_rate = i;
- } else {
-- if (mrs->prob_ewma >= tmp_mrs->prob_ewma)
-+ if (mrs->prob_avg >= tmp_mrs->prob_avg)
- tmp_prob_rate = i;
- }
- }
-@@ -426,7 +426,7 @@ minstrel_get_rate(void *priv, struct iee
- * has a probability of >95%, we shouldn't be attempting
- * to use it, as this only wastes precious airtime */
- if (!mrr_capable &&
-- (mi->r[ndx].stats.prob_ewma > MINSTREL_FRAC(95, 100)))
-+ (mi->r[ndx].stats.prob_avg > MINSTREL_FRAC(95, 100)))
- return;
-
- mi->prev_sample = true;
-@@ -577,7 +577,7 @@ static u32 minstrel_get_expected_through
- * computing cur_tp
- */
- tmp_mrs = &mi->r[idx].stats;
-- tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_ewma) * 10;
-+ tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_avg) * 10;
- tmp_cur_tp = tmp_cur_tp * 1200 * 8 / 1024;
-
- return tmp_cur_tp;
---- a/net/mac80211/rc80211_minstrel.h
-+++ b/net/mac80211/rc80211_minstrel.h
-@@ -47,14 +47,10 @@ minstrel_ewma(int old, int new, int weig
- return old + incr;
- }
-
--struct minstrel_avg_ctx {
-- s32 prev[2];
--};
--
--static inline int minstrel_filter_avg_add(struct minstrel_avg_ctx *ctx, s32 in)
-+static inline int minstrel_filter_avg_add(u16 *prev_1, u16 *prev_2, s32 in)
- {
-- s32 out_1 = ctx->prev[0];
-- s32 out_2 = ctx->prev[1];
-+ s32 out_1 = *prev_1;
-+ s32 out_2 = *prev_2;
- s32 val;
-
- if (!in)
-@@ -76,8 +72,8 @@ static inline int minstrel_filter_avg_ad
- val = 1;
-
- out:
-- ctx->prev[1] = out_1;
-- ctx->prev[0] = val;
-+ *prev_2 = out_1;
-+ *prev_1 = val;
-
- return val;
- }
-@@ -90,10 +86,9 @@ struct minstrel_rate_stats {
- /* total attempts/success counters */
- u32 att_hist, succ_hist;
-
-- struct minstrel_avg_ctx avg;
--
-- /* prob_ewma - exponential weighted moving average of prob */
-- u16 prob_ewma;
-+ /* prob_avg - moving average of prob */
-+ u16 prob_avg;
-+ u16 prob_avg_1;
-
- /* maximum retry counts */
- u8 retry_count;
-@@ -181,7 +176,7 @@ void minstrel_add_sta_debugfs(void *priv
- /* Recalculate success probabilities and counters for a given rate using EWMA */
- void minstrel_calc_rate_stats(struct minstrel_priv *mp,
- struct minstrel_rate_stats *mrs);
--int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma);
-+int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_avg);
-
- /* debugfs */
- int minstrel_stats_open(struct inode *inode, struct file *file);
---- a/net/mac80211/rc80211_minstrel_debugfs.c
-+++ b/net/mac80211/rc80211_minstrel_debugfs.c
-@@ -90,8 +90,8 @@ minstrel_stats_open(struct inode *inode,
- p += sprintf(p, "%6u ", mr->perfect_tx_time);
-
- tp_max = minstrel_get_tp_avg(mr, MINSTREL_FRAC(100,100));
-- tp_avg = minstrel_get_tp_avg(mr, mrs->prob_ewma);
-- eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
-+ tp_avg = minstrel_get_tp_avg(mr, mrs->prob_avg);
-+ eprob = MINSTREL_TRUNC(mrs->prob_avg * 1000);
-
- p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u"
- " %3u %3u %-3u "
-@@ -147,8 +147,8 @@ minstrel_stats_csv_open(struct inode *in
- p += sprintf(p, "%u,",mr->perfect_tx_time);
-
- tp_max = minstrel_get_tp_avg(mr, MINSTREL_FRAC(100,100));
-- tp_avg = minstrel_get_tp_avg(mr, mrs->prob_ewma);
-- eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
-+ tp_avg = minstrel_get_tp_avg(mr, mrs->prob_avg);
-+ eprob = MINSTREL_TRUNC(mrs->prob_avg * 1000);
-
- p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u,%u,%u,"
- "%llu,%llu,%d,%d\n",
---- a/net/mac80211/rc80211_minstrel_ht.c
-+++ b/net/mac80211/rc80211_minstrel_ht.c
-@@ -346,12 +346,12 @@ minstrel_ht_avg_ampdu_len(struct minstre
- */
- int
- minstrel_ht_get_tp_avg(struct minstrel_ht_sta *mi, int group, int rate,
-- int prob_ewma)
-+ int prob_avg)
- {
- unsigned int nsecs = 0;
-
- /* do not account throughput if sucess prob is below 10% */
-- if (prob_ewma < MINSTREL_FRAC(10, 100))
-+ if (prob_avg < MINSTREL_FRAC(10, 100))
- return 0;
-
- if (group != MINSTREL_CCK_GROUP)
-@@ -365,11 +365,11 @@ minstrel_ht_get_tp_avg(struct minstrel_h
- * account for collision related packet error rate fluctuation
- * (prob is scaled - see MINSTREL_FRAC above)
- */
-- if (prob_ewma > MINSTREL_FRAC(90, 100))
-+ if (prob_avg > MINSTREL_FRAC(90, 100))
- return MINSTREL_TRUNC(100000 * ((MINSTREL_FRAC(90, 100) * 1000)
- / nsecs));
- else
-- return MINSTREL_TRUNC(100000 * ((prob_ewma * 1000) / nsecs));
-+ return MINSTREL_TRUNC(100000 * ((prob_avg * 1000) / nsecs));
- }
-
- /*
-@@ -389,13 +389,13 @@ minstrel_ht_sort_best_tp_rates(struct mi
-
- cur_group = index / MCS_GROUP_RATES;
- cur_idx = index % MCS_GROUP_RATES;
-- cur_prob = mi->groups[cur_group].rates[cur_idx].prob_ewma;
-+ cur_prob = mi->groups[cur_group].rates[cur_idx].prob_avg;
- cur_tp_avg = minstrel_ht_get_tp_avg(mi, cur_group, cur_idx, cur_prob);
-
- do {
- tmp_group = tp_list[j - 1] / MCS_GROUP_RATES;
- tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES;
-- tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_ewma;
-+ tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
- tmp_tp_avg = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx,
- tmp_prob);
- if (cur_tp_avg < tmp_tp_avg ||
-@@ -432,7 +432,7 @@ minstrel_ht_set_best_prob_rate(struct mi
-
- tmp_group = mi->max_prob_rate / MCS_GROUP_RATES;
- tmp_idx = mi->max_prob_rate % MCS_GROUP_RATES;
-- tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_ewma;
-+ tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
- tmp_tp_avg = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob);
-
- /* if max_tp_rate[0] is from MCS_GROUP max_prob_rate get selected from
-@@ -444,11 +444,11 @@ minstrel_ht_set_best_prob_rate(struct mi
-
- max_gpr_group = mg->max_group_prob_rate / MCS_GROUP_RATES;
- max_gpr_idx = mg->max_group_prob_rate % MCS_GROUP_RATES;
-- max_gpr_prob = mi->groups[max_gpr_group].rates[max_gpr_idx].prob_ewma;
-+ max_gpr_prob = mi->groups[max_gpr_group].rates[max_gpr_idx].prob_avg;
-
-- if (mrs->prob_ewma > MINSTREL_FRAC(75, 100)) {
-+ if (mrs->prob_avg > MINSTREL_FRAC(75, 100)) {
- cur_tp_avg = minstrel_ht_get_tp_avg(mi, cur_group, cur_idx,
-- mrs->prob_ewma);
-+ mrs->prob_avg);
- if (cur_tp_avg > tmp_tp_avg)
- mi->max_prob_rate = index;
-
-@@ -458,9 +458,9 @@ minstrel_ht_set_best_prob_rate(struct mi
- if (cur_tp_avg > max_gpr_tp_avg)
- mg->max_group_prob_rate = index;
- } else {
-- if (mrs->prob_ewma > tmp_prob)
-+ if (mrs->prob_avg > tmp_prob)
- mi->max_prob_rate = index;
-- if (mrs->prob_ewma > max_gpr_prob)
-+ if (mrs->prob_avg > max_gpr_prob)
- mg->max_group_prob_rate = index;
- }
- }
-@@ -482,12 +482,12 @@ minstrel_ht_assign_best_tp_rates(struct
-
- tmp_group = tmp_cck_tp_rate[0] / MCS_GROUP_RATES;
- tmp_idx = tmp_cck_tp_rate[0] % MCS_GROUP_RATES;
-- tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_ewma;
-+ tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
- tmp_cck_tp = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob);
-
- tmp_group = tmp_mcs_tp_rate[0] / MCS_GROUP_RATES;
- tmp_idx = tmp_mcs_tp_rate[0] % MCS_GROUP_RATES;
-- tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_ewma;
-+ tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
- tmp_mcs_tp = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob);
-
- if (tmp_cck_tp_rate && tmp_cck_tp > tmp_mcs_tp) {
-@@ -518,7 +518,7 @@ minstrel_ht_prob_rate_reduce_streams(str
- continue;
-
- tmp_idx = mg->max_group_prob_rate % MCS_GROUP_RATES;
-- tmp_prob = mi->groups[group].rates[tmp_idx].prob_ewma;
-+ tmp_prob = mi->groups[group].rates[tmp_idx].prob_avg;
-
- if (tmp_tp < minstrel_ht_get_tp_avg(mi, group, tmp_idx, tmp_prob) &&
- (minstrel_mcs_groups[group].streams < tmp_max_streams)) {
-@@ -623,7 +623,7 @@ minstrel_ht_rate_sample_switch(struct mi
- * If that fails, look again for a rate that is at least as fast
- */
- mrs = minstrel_get_ratestats(mi, mi->max_tp_rate[0]);
-- faster_rate = mrs->prob_ewma > MINSTREL_FRAC(75, 100);
-+ faster_rate = mrs->prob_avg > MINSTREL_FRAC(75, 100);
- minstrel_ht_find_probe_rates(mi, rates, &n_rates, faster_rate);
- if (!n_rates && faster_rate)
- minstrel_ht_find_probe_rates(mi, rates, &n_rates, false);
-@@ -738,7 +738,7 @@ minstrel_ht_update_stats(struct minstrel
- mrs = &mg->rates[i];
- mrs->retry_updated = false;
- minstrel_calc_rate_stats(mp, mrs);
-- cur_prob = mrs->prob_ewma;
-+ cur_prob = mrs->prob_avg;
-
- if (minstrel_ht_get_tp_avg(mi, group, i, cur_prob) == 0)
- continue;
-@@ -1012,7 +1012,7 @@ minstrel_calc_retransmit(struct minstrel
- unsigned int overhead = 0, overhead_rtscts = 0;
-
- mrs = minstrel_get_ratestats(mi, index);
-- if (mrs->prob_ewma < MINSTREL_FRAC(1, 10)) {
-+ if (mrs->prob_avg < MINSTREL_FRAC(1, 10)) {
- mrs->retry_count = 1;
- mrs->retry_count_rtscts = 1;
- return;
-@@ -1069,7 +1069,7 @@ minstrel_ht_set_rate(struct minstrel_pri
- if (!mrs->retry_updated)
- minstrel_calc_retransmit(mp, mi, index);
-
-- if (mrs->prob_ewma < MINSTREL_FRAC(20, 100) || !mrs->retry_count) {
-+ if (mrs->prob_avg < MINSTREL_FRAC(20, 100) || !mrs->retry_count) {
- ratetbl->rate[offset].count = 2;
- ratetbl->rate[offset].count_rts = 2;
- ratetbl->rate[offset].count_cts = 2;
-@@ -1103,11 +1103,11 @@ minstrel_ht_set_rate(struct minstrel_pri
- }
-
- static inline int
--minstrel_ht_get_prob_ewma(struct minstrel_ht_sta *mi, int rate)
-+minstrel_ht_get_prob_avg(struct minstrel_ht_sta *mi, int rate)
- {
- int group = rate / MCS_GROUP_RATES;
- rate %= MCS_GROUP_RATES;
-- return mi->groups[group].rates[rate].prob_ewma;
-+ return mi->groups[group].rates[rate].prob_avg;
- }
-
- static int
-@@ -1119,7 +1119,7 @@ minstrel_ht_get_max_amsdu_len(struct min
- unsigned int duration;
-
- /* Disable A-MSDU if max_prob_rate is bad */
-- if (mi->groups[group].rates[rate].prob_ewma < MINSTREL_FRAC(50, 100))
-+ if (mi->groups[group].rates[rate].prob_avg < MINSTREL_FRAC(50, 100))
- return 1;
-
- duration = g->duration[rate];
-@@ -1142,7 +1142,7 @@ minstrel_ht_get_max_amsdu_len(struct min
- * data packet size
- */
- if (duration > MCS_DURATION(1, 0, 260) ||
-- (minstrel_ht_get_prob_ewma(mi, mi->max_tp_rate[0]) <
-+ (minstrel_ht_get_prob_avg(mi, mi->max_tp_rate[0]) <
- MINSTREL_FRAC(75, 100)))
- return 3200;
-
-@@ -1247,7 +1247,7 @@ minstrel_get_sample_rate(struct minstrel
- * rate, to avoid wasting airtime.
- */
- sample_dur = minstrel_get_duration(sample_idx);
-- if (mrs->prob_ewma > MINSTREL_FRAC(95, 100) ||
-+ if (mrs->prob_avg > MINSTREL_FRAC(95, 100) ||
- minstrel_get_duration(mi->max_prob_rate) * 3 < sample_dur)
- return -1;
-
-@@ -1705,7 +1705,7 @@ static u32 minstrel_ht_get_expected_thro
-
- i = mi->max_tp_rate[0] / MCS_GROUP_RATES;
- j = mi->max_tp_rate[0] % MCS_GROUP_RATES;
-- prob = mi->groups[i].rates[j].prob_ewma;
-+ prob = mi->groups[i].rates[j].prob_avg;
-
- /* convert tp_avg from pkt per second in kbps */
- tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * 10;
---- a/net/mac80211/rc80211_minstrel_ht.h
-+++ b/net/mac80211/rc80211_minstrel_ht.h
-@@ -119,6 +119,6 @@ struct minstrel_ht_sta_priv {
-
- void minstrel_ht_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir);
- int minstrel_ht_get_tp_avg(struct minstrel_ht_sta *mi, int group, int rate,
-- int prob_ewma);
-+ int prob_avg);
-
- #endif
---- a/net/mac80211/rc80211_minstrel_ht_debugfs.c
-+++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c
-@@ -98,8 +98,8 @@ minstrel_ht_stats_dump(struct minstrel_h
- p += sprintf(p, "%6u ", tx_time);
-
- tp_max = minstrel_ht_get_tp_avg(mi, i, j, MINSTREL_FRAC(100, 100));
-- tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_ewma);
-- eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
-+ tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_avg);
-+ eprob = MINSTREL_TRUNC(mrs->prob_avg * 1000);
-
- p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u"
- " %3u %3u %-3u "
-@@ -243,8 +243,8 @@ minstrel_ht_stats_csv_dump(struct minstr
- p += sprintf(p, "%u,", tx_time);
-
- tp_max = minstrel_ht_get_tp_avg(mi, i, j, MINSTREL_FRAC(100, 100));
-- tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_ewma);
-- eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
-+ tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_avg);
-+ eprob = MINSTREL_TRUNC(mrs->prob_avg * 1000);
-
- p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u,%u,"
- "%u,%llu,%llu,",
+++ /dev/null
-From b478e06a16a8baa00c5ecc87c1d636981f2206d5 Mon Sep 17 00:00:00 2001
-From: Johannes Berg <johannes.berg@intel.com>
-Date: Tue, 29 Oct 2019 10:25:25 +0100
-Subject: [PATCH] mac80211: sta: randomize BA session dialog token allocator
-
-We currently always start the dialog token generator at zero,
-so the first dialog token we use is always 1. This would be
-OK if we had a perfect guarantee that we always do a proper
-deauth/re-auth handshake, but in IBSS mode this doesn't always
-happen properly.
-
-To make problems with block ack (aggregation) sessions getting
-stuck less likely, randomize the dialog token so if we start a
-new session but the peer still has old state for us, it can
-better detect this.
-
-This is really just a workaround to make things a bit more
-robust than they are now - a better fix would be to do a full
-authentication handshake in IBSS mode upon having discovered a
-new station, and on the receiver resetting the state (removing
-and re-adding the station) on receiving the authentication
-packet.
-
-Signed-off-by: Johannes Berg <johannes.berg@intel.com>
----
- net/mac80211/sta_info.c | 1 +
- 1 file changed, 1 insertion(+)
-
---- a/net/mac80211/sta_info.c
-+++ b/net/mac80211/sta_info.c
-@@ -324,6 +324,7 @@ struct sta_info *sta_info_alloc(struct i
- INIT_WORK(&sta->drv_deliver_wk, sta_deliver_ps_frames);
- INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
- mutex_init(&sta->ampdu_mlme.mtx);
-+ sta->ampdu_mlme.dialog_token_allocator = prandom_u32_max(U8_MAX);
- #ifdef CPTCFG_MAC80211_MESH
- if (ieee80211_vif_is_mesh(&sdata->vif)) {
- sta->mesh = kzalloc(sizeof(*sta->mesh), gfp);