1 From: Felix Fietkau <nbd@nbd.name>
2 Date: Fri, 22 Jan 2021 23:57:50 +0100
3 Subject: [PATCH] mac80211: minstrel_ht: significantly redesign the rate
6 The biggest flaw in current minstrel_ht is the fact that it needs way too
7 many probing packets to be able to quickly find the best rate.
8 Depending on the wifi hardware and operating mode, this can significantly
9 reduce throughput when not operating at the highest available data rate.
11 In order to be able to significantly reduce the amount of rate sampling,
12 we need a much smarter selection of probing rates.
14 The new approach introduced by this patch maintains a limited set of
15 available rates to be tested during a statistics window.
17 They are split into distinct categories:
18 - MINSTREL_SAMPLE_TYPE_INC - incremental rate upgrade:
19 Pick the next rate group and find the first rate that is faster than
20 the current max. throughput rate
21 - MINSTREL_SAMPLE_TYPE_JUMP - random testing of higher rates:
22 Pick a random rate from the next group that is faster than the current
23 max throughput rate. This allows faster adaptation when the link changes
25 - MINSTREL_SAMPLE_TYPE_SLOW - test a rate between max_prob, max_tp2 and
26 max_tp in order to reduce the gap between them
28 In order to prioritize sampling, every 6 attempts are split into 3x INC,
31 Available rates are checked and refilled on every stats window update.
33 With this approach, we finally get a very small delta in throughput when
34 comparing setting the optimal data rate as a fixed rate vs normal rate
37 Signed-off-by: Felix Fietkau <nbd@nbd.name>
40 --- a/net/mac80211/rc80211_minstrel_ht.c
41 +++ b/net/mac80211/rc80211_minstrel_ht.c
42 @@ -266,6 +266,14 @@ const struct mcs_group minstrel_mcs_grou
43 const s16 minstrel_cck_bitrates[4] = { 10, 20, 55, 110 };
44 const s16 minstrel_ofdm_bitrates[8] = { 60, 90, 120, 180, 240, 360, 480, 540 };
45 static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES] __read_mostly;
46 +static const u8 minstrel_sample_seq[] = {
47 + MINSTREL_SAMPLE_TYPE_INC,
48 + MINSTREL_SAMPLE_TYPE_JUMP,
49 + MINSTREL_SAMPLE_TYPE_INC,
50 + MINSTREL_SAMPLE_TYPE_JUMP,
51 + MINSTREL_SAMPLE_TYPE_INC,
52 + MINSTREL_SAMPLE_TYPE_SLOW,
56 minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi);
57 @@ -620,77 +628,31 @@ minstrel_ht_prob_rate_reduce_streams(str
62 -minstrel_ht_probe_group(struct minstrel_ht_sta *mi, const struct mcs_group *tp_group,
63 - int tp_idx, const struct mcs_group *group)
65 - if (group->bw < tp_group->bw)
68 - if (group->streams == tp_group->streams)
71 - if (tp_idx < 4 && group->streams == tp_group->streams - 1)
74 - return group->streams == tp_group->streams + 1;
78 -minstrel_ht_find_probe_rates(struct minstrel_ht_sta *mi, u16 *rates, int *n_rates,
81 +__minstrel_ht_get_sample_rate(struct minstrel_ht_sta *mi,
82 + enum minstrel_sample_type type)
84 - const struct mcs_group *group, *tp_group;
88 - tp_group = &minstrel_mcs_groups[MI_RATE_GROUP(mi->max_tp_rate[0])];
89 - tp_idx = MI_RATE_IDX(mi->max_tp_rate[0]);
91 - max_dur = minstrel_get_duration(mi->max_tp_rate[0]);
93 - max_dur -= max_dur / 16;
95 - for (g = 0; g < MINSTREL_GROUPS_NB; g++) {
96 - u16 supported = mi->supported[g];
100 + u16 *rates = mi->sample[type].sample_rates;
104 - group = &minstrel_mcs_groups[g];
105 - if (!minstrel_ht_probe_group(mi, tp_group, tp_idx, group))
106 + for (i = 0; i < MINSTREL_SAMPLE_RATES; i++) {
110 - for (i = 0; supported; supported >>= 1, i++) {
113 - if (!(supported & 1))
116 - if ((group->duration[i] << group->shift) > max_dur)
119 - idx = MI_RATE(g, i);
120 - if (idx == mi->max_tp_rate[0])
123 - rates[(*n_rates)++] = idx;
135 minstrel_ht_rate_sample_switch(struct minstrel_priv *mp,
136 struct minstrel_ht_sta *mi)
138 - struct minstrel_rate_stats *mrs;
139 - u16 rates[MINSTREL_GROUPS_NB];
141 - int probe_rate = 0;
148 * Use rate switching instead of probing packets for devices with
149 @@ -699,43 +661,11 @@ minstrel_ht_rate_sample_switch(struct mi
150 if (mp->hw->max_rates > 1)
154 - * If the current EWMA prob is >75%, look for a rate that's 6.25%
155 - * faster than the max tp rate.
156 - * If that fails, look again for a rate that is at least as fast
158 - mrs = minstrel_get_ratestats(mi, mi->max_tp_rate[0]);
159 - faster_rate = mrs->prob_avg > MINSTREL_FRAC(75, 100);
160 - minstrel_ht_find_probe_rates(mi, rates, &n_rates, faster_rate);
161 - if (!n_rates && faster_rate)
162 - minstrel_ht_find_probe_rates(mi, rates, &n_rates, false);
164 - /* If no suitable rate was found, try to pick the next one in the group */
166 - int g_idx = MI_RATE_GROUP(mi->max_tp_rate[0]);
167 - u16 supported = mi->supported[g_idx];
169 - supported >>= MI_RATE_IDX(mi->max_tp_rate[0]);
170 - for (i = 0; supported; supported >>= 1, i++) {
171 - if (!(supported & 1))
174 - probe_rate = mi->max_tp_rate[0] + i;
178 + rate = __minstrel_ht_get_sample_rate(mi, MINSTREL_SAMPLE_TYPE_INC);
185 - random = prandom_u32();
186 - i = random % n_rates;
188 - probe_rate = rates[i];
191 - mi->sample_rate = probe_rate;
192 + mi->sample_rate = rate;
193 mi->sample_mode = MINSTREL_SAMPLE_ACTIVE;
196 @@ -804,6 +734,274 @@ minstrel_ht_calc_rate_stats(struct minst
201 +minstrel_ht_find_sample_rate(struct minstrel_ht_sta *mi, int type, int idx)
205 + for (i = 0; i < MINSTREL_SAMPLE_RATES; i++) {
206 + u16 cur = mi->sample[type].sample_rates[i];
219 +minstrel_ht_move_sample_rates(struct minstrel_ht_sta *mi, int type,
220 + u32 fast_rate_dur, u32 slow_rate_dur)
222 + u16 *rates = mi->sample[type].sample_rates;
225 + for (i = 0, j = 0; i < MINSTREL_SAMPLE_RATES; i++) {
227 + bool valid = false;
234 + duration = minstrel_get_duration(cur);
236 + case MINSTREL_SAMPLE_TYPE_SLOW:
237 + valid = duration > fast_rate_dur &&
238 + duration < slow_rate_dur;
240 + case MINSTREL_SAMPLE_TYPE_INC:
241 + case MINSTREL_SAMPLE_TYPE_JUMP:
242 + valid = duration < fast_rate_dur;
265 +minstrel_ht_group_min_rate_offset(struct minstrel_ht_sta *mi, int group,
268 + u16 supported = mi->supported[group];
271 + for (i = 0; i < MCS_GROUP_RATES && supported; i++, supported >>= 1) {
272 + if (!(supported & BIT(0)))
275 + if (minstrel_get_duration(MI_RATE(group, i)) >= max_duration)
285 + * Incremental update rates:
286 + * Flip through groups and pick the first group rate that is faster than the
287 + * highest currently selected rate
290 +minstrel_ht_next_inc_rate(struct minstrel_ht_sta *mi, u32 fast_rate_dur)
292 + struct minstrel_mcs_group_data *mg;
293 + u8 type = MINSTREL_SAMPLE_TYPE_INC;
297 + group = mi->sample[type].sample_group;
298 + for (i = 0; i < ARRAY_SIZE(minstrel_mcs_groups); i++) {
299 + group = (group + 1) % ARRAY_SIZE(minstrel_mcs_groups);
300 + mg = &mi->groups[group];
302 + index = minstrel_ht_group_min_rate_offset(mi, group,
307 + index = MI_RATE(group, index & 0xf);
308 + if (!minstrel_ht_find_sample_rate(mi, type, index))
314 + mi->sample[type].sample_group = group;
320 +minstrel_ht_next_group_sample_rate(struct minstrel_ht_sta *mi, int group,
321 + u16 supported, int offset)
323 + struct minstrel_mcs_group_data *mg = &mi->groups[group];
327 + for (i = 0; i < MCS_GROUP_RATES; i++) {
328 + idx = sample_table[mg->column][mg->index];
329 + if (++mg->index >= MCS_GROUP_RATES) {
331 + if (++mg->column >= ARRAY_SIZE(sample_table))
338 + if (!(supported & BIT(idx)))
341 + return MI_RATE(group, idx);
349 + * Sample random rates, use those that are faster than the highest
350 + * currently selected rate. Rates between the fastest and the slowest
351 + * get sorted into the slow sample bucket, but only if it has room
354 +minstrel_ht_next_jump_rate(struct minstrel_ht_sta *mi, u32 fast_rate_dur,
355 + u32 slow_rate_dur, int *slow_rate_ofs)
357 + struct minstrel_mcs_group_data *mg;
358 + struct minstrel_rate_stats *mrs;
359 + u32 max_duration = slow_rate_dur;
360 + int i, index, offset;
366 + if (*slow_rate_ofs >= MINSTREL_SAMPLE_RATES)
367 + max_duration = fast_rate_dur;
369 + slow_rates = mi->sample[MINSTREL_SAMPLE_TYPE_SLOW].sample_rates;
370 + group = mi->sample[MINSTREL_SAMPLE_TYPE_JUMP].sample_group;
371 + for (i = 0; i < ARRAY_SIZE(minstrel_mcs_groups); i++) {
374 + group = (group + 1) % ARRAY_SIZE(minstrel_mcs_groups);
375 + mg = &mi->groups[group];
377 + supported = mi->supported[group];
381 + offset = minstrel_ht_group_min_rate_offset(mi, group,
386 + index = minstrel_ht_next_group_sample_rate(mi, group, supported,
391 + duration = minstrel_get_duration(index);
392 + if (duration < fast_rate_dur)
393 + type = MINSTREL_SAMPLE_TYPE_JUMP;
395 + type = MINSTREL_SAMPLE_TYPE_SLOW;
397 + if (minstrel_ht_find_sample_rate(mi, type, index))
400 + if (type == MINSTREL_SAMPLE_TYPE_JUMP)
403 + if (*slow_rate_ofs >= MINSTREL_SAMPLE_RATES)
406 + if (duration >= slow_rate_dur)
409 + /* skip slow rates with high success probability */
410 + mrs = minstrel_get_ratestats(mi, index);
411 + if (mrs->prob_avg > MINSTREL_FRAC(95, 100))
414 + slow_rates[(*slow_rate_ofs)++] = index;
415 + if (*slow_rate_ofs >= MINSTREL_SAMPLE_RATES)
416 + max_duration = fast_rate_dur;
421 + mi->sample[MINSTREL_SAMPLE_TYPE_JUMP].sample_group = group;
427 +minstrel_ht_refill_sample_rates(struct minstrel_ht_sta *mi)
429 + u32 prob_dur = minstrel_get_duration(mi->max_prob_rate);
430 + u32 tp_dur = minstrel_get_duration(mi->max_tp_rate[0]);
431 + u32 tp2_dur = minstrel_get_duration(mi->max_tp_rate[1]);
432 + u32 fast_rate_dur = min(min(tp_dur, tp2_dur), prob_dur);
433 + u32 slow_rate_dur = max(max(tp_dur, tp2_dur), prob_dur);
437 + rates = mi->sample[MINSTREL_SAMPLE_TYPE_INC].sample_rates;
438 + i = minstrel_ht_move_sample_rates(mi, MINSTREL_SAMPLE_TYPE_INC,
439 + fast_rate_dur, slow_rate_dur);
440 + while (i < MINSTREL_SAMPLE_RATES) {
441 + rates[i] = minstrel_ht_next_inc_rate(mi, tp_dur);
448 + rates = mi->sample[MINSTREL_SAMPLE_TYPE_JUMP].sample_rates;
449 + i = minstrel_ht_move_sample_rates(mi, MINSTREL_SAMPLE_TYPE_JUMP,
450 + fast_rate_dur, slow_rate_dur);
451 + j = minstrel_ht_move_sample_rates(mi, MINSTREL_SAMPLE_TYPE_SLOW,
452 + fast_rate_dur, slow_rate_dur);
453 + while (i < MINSTREL_SAMPLE_RATES) {
454 + rates[i] = minstrel_ht_next_jump_rate(mi, fast_rate_dur,
455 + slow_rate_dur, &j);
462 + for (i = 0; i < ARRAY_SIZE(mi->sample); i++)
463 + memcpy(mi->sample[i].cur_sample_rates, mi->sample[i].sample_rates,
464 + sizeof(mi->sample[i].cur_sample_rates));
469 * Update rate statistics and select new primary rates
471 @@ -848,8 +1046,6 @@ minstrel_ht_update_stats(struct minstrel
472 mi->ampdu_packets = 0;
475 - mi->sample_count = 0;
477 memset(tmp_mcs_tp_rate, 0, sizeof(tmp_mcs_tp_rate));
478 memset(tmp_legacy_tp_rate, 0, sizeof(tmp_legacy_tp_rate));
480 @@ -885,8 +1081,6 @@ minstrel_ht_update_stats(struct minstrel
481 if (!mi->supported[group])
484 - mi->sample_count++;
486 /* (re)Initialize group rate indexes */
487 for(j = 0; j < MAX_THR_RATES; j++)
488 tmp_group_tp_rate[j] = MI_RATE(group, 0);
489 @@ -953,9 +1147,7 @@ minstrel_ht_update_stats(struct minstrel
491 /* Try to increase robustness of max_prob_rate*/
492 minstrel_ht_prob_rate_reduce_streams(mi);
494 - /* try to sample half of all available rates during each interval */
495 - mi->sample_count *= 4;
496 + minstrel_ht_refill_sample_rates(mi);
499 minstrel_ht_rate_sample_switch(mp, mi);
500 @@ -972,6 +1164,7 @@ minstrel_ht_update_stats(struct minstrel
502 /* Reset update timer */
503 mi->last_stats_update = jiffies;
504 + mi->sample_time = jiffies;
508 @@ -1002,28 +1195,6 @@ minstrel_ht_txstat_valid(struct minstrel
512 -minstrel_set_next_sample_idx(struct minstrel_ht_sta *mi)
514 - struct minstrel_mcs_group_data *mg;
517 - mi->sample_group++;
518 - mi->sample_group %= ARRAY_SIZE(minstrel_mcs_groups);
519 - mg = &mi->groups[mi->sample_group];
521 - if (!mi->supported[mi->sample_group])
524 - if (++mg->index >= MCS_GROUP_RATES) {
526 - if (++mg->column >= ARRAY_SIZE(sample_table))
534 minstrel_downgrade_rate(struct minstrel_ht_sta *mi, u16 *idx, bool primary)
536 int group, orig_group;
537 @@ -1108,14 +1279,6 @@ minstrel_ht_tx_status(void *priv, struct
539 mi->ampdu_len += info->status.ampdu_len;
541 - if (!mi->sample_wait && !mi->sample_tries && mi->sample_count > 0) {
542 - int avg_ampdu_len = minstrel_ht_avg_ampdu_len(mi);
544 - mi->sample_wait = 16 + 2 * avg_ampdu_len;
545 - mi->sample_tries = 1;
546 - mi->sample_count--;
549 if (mi->sample_mode != MINSTREL_SAMPLE_IDLE)
550 rate_sample = minstrel_get_ratestats(mi, mi->sample_rate);
552 @@ -1387,97 +1550,20 @@ minstrel_ht_update_rates(struct minstrel
553 rate_control_set_rates(mp->hw, mi->sta, rates);
557 -minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
559 +minstrel_ht_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
561 - struct minstrel_rate_stats *mrs;
562 - struct minstrel_mcs_group_data *mg;
563 - unsigned int sample_dur, sample_group, cur_max_tp_streams;
564 - int tp_rate1, tp_rate2;
565 - int sample_idx = 0;
567 - if (mp->hw->max_rates == 1 && mp->sample_switch &&
568 - (mi->total_packets_cur >= SAMPLE_SWITCH_THR ||
569 - mp->sample_switch == 1))
572 - if (mi->sample_wait > 0) {
577 - if (!mi->sample_tries)
580 - sample_group = mi->sample_group;
581 - mg = &mi->groups[sample_group];
582 - sample_idx = sample_table[mg->column][mg->index];
583 - minstrel_set_next_sample_idx(mi);
585 - if (!(mi->supported[sample_group] & BIT(sample_idx)))
588 - mrs = &mg->rates[sample_idx];
589 - sample_idx += MI_RATE(sample_group, 0);
591 - tp_rate1 = mi->max_tp_rate[0];
594 - /* Set tp_rate2 to the second highest max_tp_rate */
595 - if (minstrel_get_duration(mi->max_tp_rate[0]) >
596 - minstrel_get_duration(mi->max_tp_rate[1])) {
597 - tp_rate2 = mi->max_tp_rate[0];
598 + if (mp->hw->max_rates > 1) {
599 + seq = mi->sample_seq;
600 + mi->sample_seq = (seq + 1) % ARRAY_SIZE(minstrel_sample_seq);
601 + seq = minstrel_sample_seq[seq];
603 - tp_rate2 = mi->max_tp_rate[1];
604 + seq = MINSTREL_SAMPLE_TYPE_INC;
608 - * Sampling might add some overhead (RTS, no aggregation)
609 - * to the frame. Hence, don't use sampling for the highest currently
610 - * used highest throughput or probability rate.
612 - if (sample_idx == mi->max_tp_rate[0] || sample_idx == mi->max_prob_rate)
616 - * Do not sample if the probability is already higher than 95%,
617 - * or if the rate is 3 times slower than the current max probability
618 - * rate, to avoid wasting airtime.
620 - sample_dur = minstrel_get_duration(sample_idx);
621 - if (mrs->prob_avg > MINSTREL_FRAC(95, 100) ||
622 - minstrel_get_duration(mi->max_prob_rate) * 3 < sample_dur)
627 - * For devices with no configurable multi-rate retry, skip sampling
628 - * below the per-group max throughput rate, and only use one sampling
631 - if (mp->hw->max_rates == 1 &&
632 - (minstrel_get_duration(mg->max_group_tp_rate[0]) < sample_dur ||
636 - /* Skip already sampled slow rates */
637 - if (sample_dur >= minstrel_get_duration(tp_rate1) && mrs->attempts)
641 - * Make sure that lower rates get sampled only occasionally,
642 - * if the link is working perfectly.
645 - cur_max_tp_streams = minstrel_mcs_groups[MI_RATE_GROUP(tp_rate1)].streams;
646 - if (sample_dur >= minstrel_get_duration(tp_rate2) &&
647 - (cur_max_tp_streams - 1 <
648 - minstrel_mcs_groups[sample_group].streams ||
649 - sample_dur >= minstrel_get_duration(mi->max_prob_rate)))
652 - mi->sample_tries--;
655 + return __minstrel_ht_get_sample_rate(mi, seq);
659 @@ -1489,7 +1575,7 @@ minstrel_ht_get_rate(void *priv, struct
660 struct ieee80211_tx_rate *rate = &info->status.rates[0];
661 struct minstrel_ht_sta *mi = priv_sta;
662 struct minstrel_priv *mp = priv;
666 if (!(info->flags & IEEE80211_TX_CTL_AMPDU) &&
667 !minstrel_ht_is_legacy_group(MI_RATE_GROUP(mi->max_prob_rate)))
668 @@ -1505,11 +1591,19 @@ minstrel_ht_get_rate(void *priv, struct
669 /* Don't use EAPOL frames for sampling on non-mrr hw */
670 if (mp->hw->max_rates == 1 &&
671 (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
674 - sample_idx = minstrel_get_sample_rate(mp, mi);
677 - if (sample_idx < 0)
678 + if (mp->hw->max_rates == 1 && mp->sample_switch &&
679 + (mi->total_packets_cur >= SAMPLE_SWITCH_THR ||
680 + mp->sample_switch == 1))
683 + if (time_is_before_jiffies(mi->sample_time))
686 + mi->sample_time = jiffies + MINSTREL_SAMPLE_INTERVAL;
687 + sample_idx = minstrel_ht_get_sample_rate(mp, mi);
691 sample_group = &minstrel_mcs_groups[MI_RATE_GROUP(sample_idx)];
692 @@ -1630,16 +1724,6 @@ minstrel_ht_update_caps(void *priv, stru
694 mi->avg_ampdu_len = MINSTREL_FRAC(1, 1);
696 - /* When using MRR, sample more on the first attempt, without delay */
698 - mi->sample_count = 16;
699 - mi->sample_wait = 0;
701 - mi->sample_count = 8;
702 - mi->sample_wait = 8;
704 - mi->sample_tries = 4;
707 stbc = (ht_cap & IEEE80211_HT_CAP_RX_STBC) >>
708 IEEE80211_HT_CAP_RX_STBC_SHIFT;
709 --- a/net/mac80211/rc80211_minstrel_ht.h
710 +++ b/net/mac80211/rc80211_minstrel_ht.h
712 #define MI_RATE_IDX(_rate) FIELD_GET(MI_RATE_IDX_MASK, _rate)
713 #define MI_RATE_GROUP(_rate) FIELD_GET(MI_RATE_GROUP_MASK, _rate)
715 +#define MINSTREL_SAMPLE_RATES 5 /* rates per sample type */
716 +#define MINSTREL_SAMPLE_INTERVAL (HZ / 50)
718 struct minstrel_priv {
719 struct ieee80211_hw *hw;
720 @@ -126,6 +128,13 @@ struct minstrel_rate_stats {
724 +enum minstrel_sample_type {
725 + MINSTREL_SAMPLE_TYPE_INC,
726 + MINSTREL_SAMPLE_TYPE_JUMP,
727 + MINSTREL_SAMPLE_TYPE_SLOW,
728 + __MINSTREL_SAMPLE_TYPE_MAX
731 struct minstrel_mcs_group_data {
734 @@ -144,6 +153,12 @@ enum minstrel_sample_mode {
735 MINSTREL_SAMPLE_PENDING,
738 +struct minstrel_sample_category {
740 + u16 sample_rates[MINSTREL_SAMPLE_RATES];
741 + u16 cur_sample_rates[MINSTREL_SAMPLE_RATES];
744 struct minstrel_ht_sta {
745 struct ieee80211_sta *sta;
747 @@ -175,16 +190,14 @@ struct minstrel_ht_sta {
748 /* tx flags to add for frames for this sta */
754 + unsigned long sample_time;
755 + struct minstrel_sample_category sample[__MINSTREL_SAMPLE_TYPE_MAX];
759 enum minstrel_sample_mode sample_mode;
762 - /* current MCS group to be sampled */
767 /* Bitfield of supported MCS rates of all groups */