1 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= <toke@redhat.com>
2 Date: Wed, 23 Jun 2021 15:47:55 +0200
3 Subject: [PATCH] mac80211: Switch to a virtual time-based airtime scheduler
5 Content-Type: text/plain; charset=UTF-8
6 Content-Transfer-Encoding: 8bit
8 This switches the airtime scheduler in mac80211 to use a virtual
9 time-based scheduler instead of the round-robin scheduler used before.
10 This has a couple of advantages:
12 - No need to sync up the round-robin scheduler in firmware/hardware with
13 the round-robin airtime scheduler.
15 - If several stations are eligible for transmission we can schedule both
16 of them; no need to hard-block the scheduling rotation until the head
17 of the queue has used up its quantum.
19 - The check of whether a station is eligible for transmission becomes
20 simpler (in ieee80211_txq_may_transmit()).
22 The drawback is that scheduling becomes slightly more expensive, as we
23 need to maintain an rbtree of TXQs sorted by virtual time. This means
24 that ieee80211_register_airtime() becomes O(logN) in the number of
25 currently scheduled TXQs because it can change the order of the
26 scheduled stations. We mitigate this overhead by only resorting when a
27 station changes position in the tree, and hopefully N rarely grows too
28 big (it's only TXQs currently backlogged, not all associated stations),
29 so it shouldn't be too big of an issue.
31 To prevent divisions in the fast path, we maintain both station sums and
32 pre-computed reciprocals of the sums. This turns the fast-path operation
33 into a multiplication, with divisions only happening as the number of
34 active stations change (to re-compute the current sum of all active
35 station weights). To prevent this re-computation of the reciprocal from
36 happening too frequently, we use a time-based notion of station
37 activity, instead of updating the weight every time a station gets
38 scheduled or de-scheduled. As queues can oscillate between empty and
39 occupied quite frequently, this can significantly cut down on the number
40 of re-computations. It also has the added benefit of making the station
41 airtime calculation independent on whether the queue happened to have
42 drained at the time an airtime value was accounted.
44 Co-developed-by: Yibo Zhao <yiboz@codeaurora.org>
45 Signed-off-by: Yibo Zhao <yiboz@codeaurora.org>
46 Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
47 Link: https://lore.kernel.org/r/20210623134755.235545-1-toke@redhat.com
48 Signed-off-by: Johannes Berg <johannes.berg@intel.com>
51 --- a/include/net/mac80211.h
52 +++ b/include/net/mac80211.h
53 @@ -6552,9 +6552,6 @@ static inline void ieee80211_txq_schedul
57 -void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
58 - struct ieee80211_txq *txq, bool force);
61 * ieee80211_schedule_txq - schedule a TXQ for transmission
63 @@ -6567,11 +6564,7 @@ void __ieee80211_schedule_txq(struct iee
64 * The driver may call this function if it has buffered packets for
65 * this TXQ internally.
68 -ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
70 - __ieee80211_schedule_txq(hw, txq, true);
72 +void ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
75 * ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq()
76 @@ -6583,12 +6576,8 @@ ieee80211_schedule_txq(struct ieee80211_
77 * The driver may set force=true if it has buffered packets for this TXQ
81 -ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq,
84 - __ieee80211_schedule_txq(hw, txq, force);
86 +void ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq,
90 * ieee80211_txq_may_transmit - check whether TXQ is allowed to transmit
91 --- a/net/mac80211/cfg.c
92 +++ b/net/mac80211/cfg.c
93 @@ -1442,6 +1442,38 @@ static void sta_apply_mesh_params(struct
97 +static void sta_apply_airtime_params(struct ieee80211_local *local,
98 + struct sta_info *sta,
99 + struct station_parameters *params)
103 + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
104 + struct airtime_sched_info *air_sched = &local->airtime[ac];
105 + struct airtime_info *air_info = &sta->airtime[ac];
106 + struct txq_info *txqi;
109 + spin_lock_bh(&air_sched->lock);
110 + for (tid = 0; tid < IEEE80211_NUM_TIDS + 1; tid++) {
111 + if (air_info->weight == params->airtime_weight ||
112 + !sta->sta.txq[tid] ||
113 + ac != ieee80211_ac_from_tid(tid))
116 + airtime_weight_set(air_info, params->airtime_weight);
118 + txqi = to_txq_info(sta->sta.txq[tid]);
119 + if (RB_EMPTY_NODE(&txqi->schedule_order))
122 + ieee80211_update_airtime_weight(local, air_sched,
125 + spin_unlock_bh(&air_sched->lock);
129 static int sta_apply_parameters(struct ieee80211_local *local,
130 struct sta_info *sta,
131 struct station_parameters *params)
132 @@ -1629,7 +1661,8 @@ static int sta_apply_parameters(struct i
133 sta_apply_mesh_params(local, sta, params);
135 if (params->airtime_weight)
136 - sta->airtime_weight = params->airtime_weight;
137 + sta_apply_airtime_params(local, sta, params);
140 /* set the STA state after all sta info from usermode has been set */
141 if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) ||
142 --- a/net/mac80211/debugfs.c
143 +++ b/net/mac80211/debugfs.c
144 @@ -216,14 +216,14 @@ static ssize_t aql_txq_limit_read(struct
148 - local->aql_txq_limit_low[IEEE80211_AC_VO],
149 - local->aql_txq_limit_high[IEEE80211_AC_VO],
150 - local->aql_txq_limit_low[IEEE80211_AC_VI],
151 - local->aql_txq_limit_high[IEEE80211_AC_VI],
152 - local->aql_txq_limit_low[IEEE80211_AC_BE],
153 - local->aql_txq_limit_high[IEEE80211_AC_BE],
154 - local->aql_txq_limit_low[IEEE80211_AC_BK],
155 - local->aql_txq_limit_high[IEEE80211_AC_BK]);
156 + local->airtime[IEEE80211_AC_VO].aql_txq_limit_low,
157 + local->airtime[IEEE80211_AC_VO].aql_txq_limit_high,
158 + local->airtime[IEEE80211_AC_VI].aql_txq_limit_low,
159 + local->airtime[IEEE80211_AC_VI].aql_txq_limit_high,
160 + local->airtime[IEEE80211_AC_BE].aql_txq_limit_low,
161 + local->airtime[IEEE80211_AC_BE].aql_txq_limit_high,
162 + local->airtime[IEEE80211_AC_BK].aql_txq_limit_low,
163 + local->airtime[IEEE80211_AC_BK].aql_txq_limit_high);
164 return simple_read_from_buffer(user_buf, count, ppos,
167 @@ -255,11 +255,11 @@ static ssize_t aql_txq_limit_write(struc
168 if (ac >= IEEE80211_NUM_ACS)
171 - q_limit_low_old = local->aql_txq_limit_low[ac];
172 - q_limit_high_old = local->aql_txq_limit_high[ac];
173 + q_limit_low_old = local->airtime[ac].aql_txq_limit_low;
174 + q_limit_high_old = local->airtime[ac].aql_txq_limit_high;
176 - local->aql_txq_limit_low[ac] = q_limit_low;
177 - local->aql_txq_limit_high[ac] = q_limit_high;
178 + local->airtime[ac].aql_txq_limit_low = q_limit_low;
179 + local->airtime[ac].aql_txq_limit_high = q_limit_high;
181 mutex_lock(&local->sta_mtx);
182 list_for_each_entry(sta, &local->sta_list, list) {
183 @@ -382,6 +382,46 @@ static const struct file_operations forc
184 .llseek = default_llseek,
187 +static ssize_t airtime_read(struct file *file,
188 + char __user *user_buf,
192 + struct ieee80211_local *local = file->private_data;
194 + u64 v_t[IEEE80211_NUM_ACS];
195 + u64 wt[IEEE80211_NUM_ACS];
198 + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
199 + spin_lock_bh(&local->airtime[ac].lock);
200 + v_t[ac] = local->airtime[ac].v_t;
201 + wt[ac] = local->airtime[ac].weight_sum;
202 + spin_unlock_bh(&local->airtime[ac].lock);
204 + len = scnprintf(buf, sizeof(buf),
206 + "Virt-t\t%-10llu %-10llu %-10llu %-10llu\n"
207 + "Weight\t%-10llu %-10llu %-10llu %-10llu\n",
217 + return simple_read_from_buffer(user_buf, count, ppos,
221 +static const struct file_operations airtime_ops = {
222 + .read = airtime_read,
223 + .open = simple_open,
224 + .llseek = default_llseek,
228 static ssize_t reset_write(struct file *file, const char __user *user_buf,
229 size_t count, loff_t *ppos)
230 @@ -624,7 +664,11 @@ void debugfs_hw_add(struct ieee80211_loc
231 if (local->ops->wake_tx_queue)
232 DEBUGFS_ADD_MODE(aqm, 0600);
234 - DEBUGFS_ADD_MODE(airtime_flags, 0600);
235 + if (wiphy_ext_feature_isset(local->hw.wiphy,
236 + NL80211_EXT_FEATURE_AIRTIME_FAIRNESS)) {
237 + DEBUGFS_ADD_MODE(airtime, 0600);
238 + DEBUGFS_ADD_MODE(airtime_flags, 0600);
241 DEBUGFS_ADD(aql_txq_limit);
242 debugfs_create_u32("aql_threshold", 0600,
243 --- a/net/mac80211/debugfs_netdev.c
244 +++ b/net/mac80211/debugfs_netdev.c
245 @@ -513,6 +513,34 @@ static ssize_t ieee80211_if_fmt_aqm(
247 IEEE80211_IF_FILE_R(aqm);
249 +static ssize_t ieee80211_if_fmt_airtime(
250 + const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
252 + struct ieee80211_local *local = sdata->local;
253 + struct ieee80211_txq *txq = sdata->vif.txq;
254 + struct airtime_info *air_info;
260 + spin_lock_bh(&local->airtime[txq->ac].lock);
261 + air_info = to_airtime_info(txq);
262 + len = scnprintf(buf,
264 + "RX: %llu us\nTX: %llu us\nWeight: %u\n"
265 + "Virt-T: %lld us\n",
266 + air_info->rx_airtime,
267 + air_info->tx_airtime,
270 + spin_unlock_bh(&local->airtime[txq->ac].lock);
275 +IEEE80211_IF_FILE_R(airtime);
277 IEEE80211_IF_FILE(multicast_to_unicast, u.ap.multicast_to_unicast, HEX);
279 /* IBSS attributes */
280 @@ -661,8 +689,10 @@ static void add_common_files(struct ieee
282 if (sdata->local->ops->wake_tx_queue &&
283 sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
284 - sdata->vif.type != NL80211_IFTYPE_NAN)
285 + sdata->vif.type != NL80211_IFTYPE_NAN) {
287 + DEBUGFS_ADD(airtime);
291 static void add_sta_files(struct ieee80211_sub_if_data *sdata)
292 --- a/net/mac80211/debugfs_sta.c
293 +++ b/net/mac80211/debugfs_sta.c
294 @@ -202,7 +202,7 @@ static ssize_t sta_airtime_read(struct f
296 char *buf = kzalloc(bufsz, GFP_KERNEL), *p = buf;
297 u64 rx_airtime = 0, tx_airtime = 0;
298 - s64 deficit[IEEE80211_NUM_ACS];
299 + u64 v_t[IEEE80211_NUM_ACS];
303 @@ -210,18 +210,18 @@ static ssize_t sta_airtime_read(struct f
306 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
307 - spin_lock_bh(&local->active_txq_lock[ac]);
308 + spin_lock_bh(&local->airtime[ac].lock);
309 rx_airtime += sta->airtime[ac].rx_airtime;
310 tx_airtime += sta->airtime[ac].tx_airtime;
311 - deficit[ac] = sta->airtime[ac].deficit;
312 - spin_unlock_bh(&local->active_txq_lock[ac]);
313 + v_t[ac] = sta->airtime[ac].v_t;
314 + spin_unlock_bh(&local->airtime[ac].lock);
317 p += scnprintf(p, bufsz + buf - p,
318 "RX: %llu us\nTX: %llu us\nWeight: %u\n"
319 - "Deficit: VO: %lld us VI: %lld us BE: %lld us BK: %lld us\n",
320 - rx_airtime, tx_airtime, sta->airtime_weight,
321 - deficit[0], deficit[1], deficit[2], deficit[3]);
322 + "Virt-T: VO: %lld us VI: %lld us BE: %lld us BK: %lld us\n",
323 + rx_airtime, tx_airtime, sta->airtime[0].weight,
324 + v_t[0], v_t[1], v_t[2], v_t[3]);
326 rv = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
328 @@ -236,11 +236,11 @@ static ssize_t sta_airtime_write(struct
331 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
332 - spin_lock_bh(&local->active_txq_lock[ac]);
333 + spin_lock_bh(&local->airtime[ac].lock);
334 sta->airtime[ac].rx_airtime = 0;
335 sta->airtime[ac].tx_airtime = 0;
336 - sta->airtime[ac].deficit = sta->airtime_weight;
337 - spin_unlock_bh(&local->active_txq_lock[ac]);
338 + sta->airtime[ac].v_t = 0;
339 + spin_unlock_bh(&local->airtime[ac].lock);
343 @@ -263,10 +263,10 @@ static ssize_t sta_aql_read(struct file
346 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
347 - spin_lock_bh(&local->active_txq_lock[ac]);
348 + spin_lock_bh(&local->airtime[ac].lock);
349 q_limit_l[ac] = sta->airtime[ac].aql_limit_low;
350 q_limit_h[ac] = sta->airtime[ac].aql_limit_high;
351 - spin_unlock_bh(&local->active_txq_lock[ac]);
352 + spin_unlock_bh(&local->airtime[ac].lock);
353 q_depth[ac] = atomic_read(&sta->airtime[ac].aql_tx_pending);
356 --- a/net/mac80211/ieee80211_i.h
357 +++ b/net/mac80211/ieee80211_i.h
358 @@ -840,20 +840,16 @@ enum txq_info_flags {
359 * @def_flow: used as a fallback flow when a packet destined to @tin hashes to
360 * a fq_flow which is already owned by a different tin
361 * @def_cvars: codel vars for @def_flow
362 - * @frags: used to keep fragments created after dequeue
363 * @schedule_order: used with ieee80211_local->active_txqs
364 - * @schedule_round: counter to prevent infinite loops on TXQ scheduling
365 + * @frags: used to keep fragments created after dequeue
369 struct codel_vars def_cvars;
370 struct codel_stats cstats;
372 - u16 schedule_round;
373 - struct list_head schedule_order;
374 + struct rb_node schedule_order;
376 struct sk_buff_head frags;
381 @@ -930,6 +926,8 @@ struct ieee80211_sub_if_data {
382 struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS];
383 struct mac80211_qos_map __rcu *qos_map;
385 + struct airtime_info airtime[IEEE80211_NUM_ACS];
387 struct work_struct csa_finalize_work;
388 bool csa_block_tx; /* write-protected by sdata_lock and local->mtx */
389 struct cfg80211_chan_def csa_chandef;
390 @@ -1143,6 +1141,44 @@ enum mac80211_scan_state {
395 + * struct airtime_sched_info - state used for airtime scheduling and AQL
397 + * @lock: spinlock that protects all the fields in this struct
398 + * @active_txqs: rbtree of currently backlogged queues, sorted by virtual time
399 + * @schedule_pos: the current position maintained while a driver walks the tree
400 + * with ieee80211_next_txq()
401 + * @active_list: list of struct airtime_info structs that were active within
402 + * the last AIRTIME_ACTIVE_DURATION (100 ms), used to compute
404 + * @last_weight_update: used for rate limiting walking active_list
405 + * @last_schedule_time: tracks the last time a transmission was scheduled; used
406 + * for catching up v_t if no stations are eligible for
408 + * @v_t: global virtual time; queues with v_t < this are eligible for
410 + * @weight_sum: total sum of all active stations used for dividing airtime
411 + * @weight_sum_reciprocal: reciprocal of weight_sum (to avoid divisions in fast
412 + * path - see comment above
413 + * IEEE80211_RECIPROCAL_DIVISOR_64)
414 + * @aql_txq_limit_low: AQL limit when total outstanding airtime
415 + * is < IEEE80211_AQL_THRESHOLD
416 + * @aql_txq_limit_high: AQL limit when total outstanding airtime
417 + * is > IEEE80211_AQL_THRESHOLD
419 +struct airtime_sched_info {
421 + struct rb_root_cached active_txqs;
422 + struct rb_node *schedule_pos;
423 + struct list_head active_list;
424 + u64 last_weight_update;
425 + u64 last_schedule_activity;
428 + u64 weight_sum_reciprocal;
429 + u32 aql_txq_limit_low;
430 + u32 aql_txq_limit_high;
432 DECLARE_STATIC_KEY_FALSE(aql_disable);
434 struct ieee80211_local {
435 @@ -1156,13 +1192,8 @@ struct ieee80211_local {
436 struct codel_params cparams;
438 /* protects active_txqs and txqi->schedule_order */
439 - spinlock_t active_txq_lock[IEEE80211_NUM_ACS];
440 - struct list_head active_txqs[IEEE80211_NUM_ACS];
441 - u16 schedule_round[IEEE80211_NUM_ACS];
443 + struct airtime_sched_info airtime[IEEE80211_NUM_ACS];
445 - u32 aql_txq_limit_low[IEEE80211_NUM_ACS];
446 - u32 aql_txq_limit_high[IEEE80211_NUM_ACS];
448 atomic_t aql_total_pending_airtime;
450 @@ -1581,6 +1612,125 @@ static inline bool txq_has_queue(struct
451 return !(skb_queue_empty(&txqi->frags) && !txqi->tin.backlog_packets);
454 +static inline struct airtime_info *to_airtime_info(struct ieee80211_txq *txq)
456 + struct ieee80211_sub_if_data *sdata;
457 + struct sta_info *sta;
460 + sta = container_of(txq->sta, struct sta_info, sta);
461 + return &sta->airtime[txq->ac];
464 + sdata = vif_to_sdata(txq->vif);
465 + return &sdata->airtime[txq->ac];
468 +/* To avoid divisions in the fast path, we keep pre-computed reciprocals for
469 + * airtime weight calculations. There are two different weights to keep track
470 + * of: The per-station weight and the sum of weights per phy.
472 + * For the per-station weights (kept in airtime_info below), we use 32-bit
473 + * reciprocals with a devisor of 2^19. This lets us keep the multiplications and
474 + * divisions for the station weights as 32-bit operations at the cost of a bit
475 + * of rounding error for high weights; but the choice of divisor keeps rounding
476 + * errors <10% for weights <2^15, assuming no more than 8ms of airtime is
477 + * reported at a time.
479 + * For the per-phy sum of weights the values can get higher, so we use 64-bit
480 + * operations for those with a 32-bit divisor, which should avoid any
481 + * significant rounding errors.
483 +#define IEEE80211_RECIPROCAL_DIVISOR_64 0x100000000ULL
484 +#define IEEE80211_RECIPROCAL_SHIFT_64 32
485 +#define IEEE80211_RECIPROCAL_DIVISOR_32 0x80000U
486 +#define IEEE80211_RECIPROCAL_SHIFT_32 19
488 +static inline void airtime_weight_set(struct airtime_info *air_info, u16 weight)
490 + if (air_info->weight == weight)
493 + air_info->weight = weight;
495 + air_info->weight_reciprocal =
496 + IEEE80211_RECIPROCAL_DIVISOR_32 / weight;
498 + air_info->weight_reciprocal = 0;
502 +static inline void airtime_weight_sum_set(struct airtime_sched_info *air_sched,
505 + if (air_sched->weight_sum == weight_sum)
508 + air_sched->weight_sum = weight_sum;
509 + if (air_sched->weight_sum) {
510 + air_sched->weight_sum_reciprocal = IEEE80211_RECIPROCAL_DIVISOR_64;
511 + do_div(air_sched->weight_sum_reciprocal, air_sched->weight_sum);
513 + air_sched->weight_sum_reciprocal = 0;
517 +/* A problem when trying to enforce airtime fairness is that we want to divide
518 + * the airtime between the currently *active* stations. However, basing this on
519 + * the instantaneous queue state of stations doesn't work, as queues tend to
520 + * oscillate very quickly between empty and occupied, leading to the scheduler
521 + * thinking only a single station is active when deciding whether to allow
522 + * transmission (and thus not throttling correctly).
524 + * To fix this we use a timer-based notion of activity: a station is considered
525 + * active if it has been scheduled within the last 100 ms; we keep a separate
526 + * list of all the stations considered active in this manner, and lazily update
527 + * the total weight of active stations from this list (filtering the stations in
528 + * the list by their 'last active' time).
530 + * We add one additional safeguard to guard against stations that manage to get
531 + * scheduled every 100 ms but don't transmit a lot of data, and thus don't use
532 + * up any airtime. Such stations would be able to get priority for an extended
533 + * period of time if they do start transmitting at full capacity again, and so
534 + * we add an explicit maximum for how far behind a station is allowed to fall in
535 + * the virtual airtime domain. This limit is set to a relatively high value of
536 + * 20 ms because the main mechanism for catching up idle stations is the active
537 + * state as described above; i.e., the hard limit should only be hit in
538 + * pathological cases.
540 +#define AIRTIME_ACTIVE_DURATION (100 * NSEC_PER_MSEC)
541 +#define AIRTIME_MAX_BEHIND 20000 /* 20 ms */
543 +static inline bool airtime_is_active(struct airtime_info *air_info, u64 now)
545 + return air_info->last_scheduled >= now - AIRTIME_ACTIVE_DURATION;
548 +static inline void airtime_set_active(struct airtime_sched_info *air_sched,
549 + struct airtime_info *air_info, u64 now)
551 + air_info->last_scheduled = now;
552 + air_sched->last_schedule_activity = now;
553 + list_move_tail(&air_info->list, &air_sched->active_list);
556 +static inline bool airtime_catchup_v_t(struct airtime_sched_info *air_sched,
559 + air_sched->v_t = v_t;
563 +static inline void init_airtime_info(struct airtime_info *air_info,
564 + struct airtime_sched_info *air_sched)
566 + atomic_set(&air_info->aql_tx_pending, 0);
567 + air_info->aql_limit_low = air_sched->aql_txq_limit_low;
568 + air_info->aql_limit_high = air_sched->aql_txq_limit_high;
569 + airtime_weight_set(air_info, IEEE80211_DEFAULT_AIRTIME_WEIGHT);
570 + INIT_LIST_HEAD(&air_info->list);
573 static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr)
575 return ether_addr_equal(raddr, addr) ||
576 @@ -1821,6 +1971,14 @@ int ieee80211_tx_control_port(struct wip
578 int ieee80211_probe_mesh_link(struct wiphy *wiphy, struct net_device *dev,
579 const u8 *buf, size_t len);
580 +void ieee80211_resort_txq(struct ieee80211_hw *hw,
581 + struct ieee80211_txq *txq);
582 +void ieee80211_unschedule_txq(struct ieee80211_hw *hw,
583 + struct ieee80211_txq *txq,
585 +void ieee80211_update_airtime_weight(struct ieee80211_local *local,
586 + struct airtime_sched_info *air_sched,
587 + u64 now, bool force);
590 void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
591 --- a/net/mac80211/iface.c
592 +++ b/net/mac80211/iface.c
593 @@ -2067,6 +2067,9 @@ int ieee80211_if_add(struct ieee80211_lo
597 + for (i = 0; i < IEEE80211_NUM_ACS; i++)
598 + init_airtime_info(&sdata->airtime[i], &local->airtime[i]);
600 ieee80211_set_default_queues(sdata);
602 sdata->ap_power_level = IEEE80211_UNSET_POWER_LEVEL;
603 --- a/net/mac80211/main.c
604 +++ b/net/mac80211/main.c
605 @@ -693,10 +693,13 @@ struct ieee80211_hw *ieee80211_alloc_hw_
606 spin_lock_init(&local->queue_stop_reason_lock);
608 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
609 - INIT_LIST_HEAD(&local->active_txqs[i]);
610 - spin_lock_init(&local->active_txq_lock[i]);
611 - local->aql_txq_limit_low[i] = IEEE80211_DEFAULT_AQL_TXQ_LIMIT_L;
612 - local->aql_txq_limit_high[i] =
613 + struct airtime_sched_info *air_sched = &local->airtime[i];
615 + air_sched->active_txqs = RB_ROOT_CACHED;
616 + INIT_LIST_HEAD(&air_sched->active_list);
617 + spin_lock_init(&air_sched->lock);
618 + air_sched->aql_txq_limit_low = IEEE80211_DEFAULT_AQL_TXQ_LIMIT_L;
619 + air_sched->aql_txq_limit_high =
620 IEEE80211_DEFAULT_AQL_TXQ_LIMIT_H;
623 --- a/net/mac80211/rx.c
624 +++ b/net/mac80211/rx.c
625 @@ -1573,12 +1573,8 @@ static void sta_ps_start(struct sta_info
627 for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) {
628 struct ieee80211_txq *txq = sta->sta.txq[tid];
629 - struct txq_info *txqi = to_txq_info(txq);
631 - spin_lock(&local->active_txq_lock[txq->ac]);
632 - if (!list_empty(&txqi->schedule_order))
633 - list_del_init(&txqi->schedule_order);
634 - spin_unlock(&local->active_txq_lock[txq->ac]);
635 + ieee80211_unschedule_txq(&local->hw, txq, false);
637 if (txq_has_queue(txq))
638 set_bit(tid, &sta->txq_buffered_tids);
639 --- a/net/mac80211/sta_info.c
640 +++ b/net/mac80211/sta_info.c
641 @@ -426,15 +426,11 @@ struct sta_info *sta_info_alloc(struct i
642 if (sta_prepare_rate_control(local, sta, gfp))
645 - sta->airtime_weight = IEEE80211_DEFAULT_AIRTIME_WEIGHT;
647 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
648 skb_queue_head_init(&sta->ps_tx_buf[i]);
649 skb_queue_head_init(&sta->tx_filtered[i]);
650 - sta->airtime[i].deficit = sta->airtime_weight;
651 - atomic_set(&sta->airtime[i].aql_tx_pending, 0);
652 - sta->airtime[i].aql_limit_low = local->aql_txq_limit_low[i];
653 - sta->airtime[i].aql_limit_high = local->aql_txq_limit_high[i];
654 + init_airtime_info(&sta->airtime[i], &local->airtime[i]);
657 for (i = 0; i < IEEE80211_NUM_TIDS; i++)
658 @@ -1898,24 +1894,59 @@ void ieee80211_sta_set_buffered(struct i
660 EXPORT_SYMBOL(ieee80211_sta_set_buffered);
662 -void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid,
663 - u32 tx_airtime, u32 rx_airtime)
664 +void ieee80211_register_airtime(struct ieee80211_txq *txq,
665 + u32 tx_airtime, u32 rx_airtime)
667 - struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
668 - struct ieee80211_local *local = sta->sdata->local;
669 - u8 ac = ieee80211_ac_from_tid(tid);
670 + struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->vif);
671 + struct ieee80211_local *local = sdata->local;
672 + u64 weight_sum, weight_sum_reciprocal;
673 + struct airtime_sched_info *air_sched;
674 + struct airtime_info *air_info;
677 - if (sta->local->airtime_flags & AIRTIME_USE_TX)
678 + air_sched = &local->airtime[txq->ac];
679 + air_info = to_airtime_info(txq);
681 + if (local->airtime_flags & AIRTIME_USE_TX)
682 airtime += tx_airtime;
683 - if (sta->local->airtime_flags & AIRTIME_USE_RX)
684 + if (local->airtime_flags & AIRTIME_USE_RX)
685 airtime += rx_airtime;
687 - spin_lock_bh(&local->active_txq_lock[ac]);
688 - sta->airtime[ac].tx_airtime += tx_airtime;
689 - sta->airtime[ac].rx_airtime += rx_airtime;
690 - sta->airtime[ac].deficit -= airtime;
691 - spin_unlock_bh(&local->active_txq_lock[ac]);
692 + /* Weights scale so the unit weight is 256 */
695 + spin_lock_bh(&air_sched->lock);
697 + air_info->tx_airtime += tx_airtime;
698 + air_info->rx_airtime += rx_airtime;
700 + if (air_sched->weight_sum) {
701 + weight_sum = air_sched->weight_sum;
702 + weight_sum_reciprocal = air_sched->weight_sum_reciprocal;
704 + weight_sum = air_info->weight;
705 + weight_sum_reciprocal = air_info->weight_reciprocal;
708 + /* Round the calculation of global vt */
709 + air_sched->v_t += (u64)((airtime + (weight_sum >> 1)) *
710 + weight_sum_reciprocal) >> IEEE80211_RECIPROCAL_SHIFT_64;
711 + air_info->v_t += (u32)((airtime + (air_info->weight >> 1)) *
712 + air_info->weight_reciprocal) >> IEEE80211_RECIPROCAL_SHIFT_32;
713 + ieee80211_resort_txq(&local->hw, txq);
715 + spin_unlock_bh(&air_sched->lock);
718 +void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid,
719 + u32 tx_airtime, u32 rx_airtime)
721 + struct ieee80211_txq *txq = pubsta->txq[tid];
726 + ieee80211_register_airtime(txq, tx_airtime, rx_airtime);
728 EXPORT_SYMBOL(ieee80211_sta_register_airtime);
730 @@ -2364,7 +2395,7 @@ void sta_set_sinfo(struct sta_info *sta,
733 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT))) {
734 - sinfo->airtime_weight = sta->airtime_weight;
735 + sinfo->airtime_weight = sta->airtime[0].weight;
736 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT);
739 --- a/net/mac80211/sta_info.h
740 +++ b/net/mac80211/sta_info.h
741 @@ -135,18 +135,25 @@ enum ieee80211_agg_stop_reason {
742 #define AIRTIME_USE_TX BIT(0)
743 #define AIRTIME_USE_RX BIT(1)
746 struct airtime_info {
751 + u64 last_scheduled;
752 + struct list_head list;
753 atomic_t aql_tx_pending; /* Estimated airtime for frames pending */
756 + u32 weight_reciprocal;
760 void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local,
761 struct sta_info *sta, u8 ac,
762 u16 tx_airtime, bool tx_completed);
763 +void ieee80211_register_airtime(struct ieee80211_txq *txq,
764 + u32 tx_airtime, u32 rx_airtime);
768 @@ -515,7 +522,6 @@ struct ieee80211_fragment_cache {
769 * @tid_seq: per-TID sequence numbers for sending to this STA
770 * @airtime: per-AC struct airtime_info describing airtime statistics for this
772 - * @airtime_weight: station weight for airtime fairness calculation purposes
773 * @ampdu_mlme: A-MPDU state machine state
774 * @mesh: mesh STA information
775 * @debugfs_dir: debug filesystem directory dentry
776 @@ -646,7 +652,6 @@ struct sta_info {
777 u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1];
779 struct airtime_info airtime[IEEE80211_NUM_ACS];
780 - u16 airtime_weight;
783 * Aggregation information, locked with lock.
784 --- a/net/mac80211/status.c
785 +++ b/net/mac80211/status.c
786 @@ -972,6 +972,25 @@ static void __ieee80211_tx_status(struct
787 if (!(info->flags & IEEE80211_TX_CTL_INJECTED) && acked)
788 ieee80211_frame_acked(sta, skb);
790 + } else if (wiphy_ext_feature_isset(local->hw.wiphy,
791 + NL80211_EXT_FEATURE_AIRTIME_FAIRNESS)) {
792 + struct ieee80211_sub_if_data *sdata;
793 + struct ieee80211_txq *txq;
796 + /* Account airtime to multicast queue */
797 + sdata = ieee80211_sdata_from_skb(local, skb);
799 + if (sdata && (txq = sdata->vif.txq)) {
800 + airtime = info->status.tx_time ?:
801 + ieee80211_calc_expected_tx_airtime(hw,
807 + ieee80211_register_airtime(txq, airtime, 0);
812 --- a/net/mac80211/tx.c
813 +++ b/net/mac80211/tx.c
815 #include <linux/bitmap.h>
816 #include <linux/rcupdate.h>
817 #include <linux/export.h>
818 +#include <linux/timekeeping.h>
819 #include <net/net_namespace.h>
820 #include <net/ieee80211_radiotap.h>
821 #include <net/cfg80211.h>
822 @@ -1489,7 +1490,7 @@ void ieee80211_txq_init(struct ieee80211
823 codel_vars_init(&txqi->def_cvars);
824 codel_stats_init(&txqi->cstats);
825 __skb_queue_head_init(&txqi->frags);
826 - INIT_LIST_HEAD(&txqi->schedule_order);
827 + RB_CLEAR_NODE(&txqi->schedule_order);
829 txqi->txq.vif = &sdata->vif;
831 @@ -1533,9 +1534,7 @@ void ieee80211_txq_purge(struct ieee8021
832 ieee80211_purge_tx_queue(&local->hw, &txqi->frags);
833 spin_unlock_bh(&fq->lock);
835 - spin_lock_bh(&local->active_txq_lock[txqi->txq.ac]);
836 - list_del_init(&txqi->schedule_order);
837 - spin_unlock_bh(&local->active_txq_lock[txqi->txq.ac]);
838 + ieee80211_unschedule_txq(&local->hw, &txqi->txq, true);
841 void ieee80211_txq_set_params(struct ieee80211_local *local)
842 @@ -3797,102 +3796,259 @@ EXPORT_SYMBOL(ieee80211_tx_dequeue);
843 struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
845 struct ieee80211_local *local = hw_to_local(hw);
846 + struct airtime_sched_info *air_sched;
847 + u64 now = ktime_get_boottime_ns();
848 struct ieee80211_txq *ret = NULL;
849 - struct txq_info *txqi = NULL, *head = NULL;
850 - bool found_eligible_txq = false;
851 + struct airtime_info *air_info;
852 + struct txq_info *txqi = NULL;
853 + struct rb_node *node;
854 + bool first = false;
856 - spin_lock_bh(&local->active_txq_lock[ac]);
857 + air_sched = &local->airtime[ac];
858 + spin_lock_bh(&air_sched->lock);
861 - txqi = list_first_entry_or_null(&local->active_txqs[ac],
865 + node = air_sched->schedule_pos;
869 + node = rb_first_cached(&air_sched->active_txqs);
872 + node = rb_next(node);
878 - if (txqi == head) {
879 - if (!found_eligible_txq)
882 - found_eligible_txq = false;
883 + txqi = container_of(node, struct txq_info, schedule_order);
884 + air_info = to_airtime_info(&txqi->txq);
886 + if (air_info->v_t > air_sched->v_t &&
887 + (!first || !airtime_catchup_v_t(air_sched, air_info->v_t, now)))
890 + if (!ieee80211_txq_airtime_check(hw, &txqi->txq)) {
897 + air_sched->schedule_pos = node;
898 + air_sched->last_schedule_activity = now;
901 + spin_unlock_bh(&air_sched->lock);
904 +EXPORT_SYMBOL(ieee80211_next_txq);
906 - if (txqi->txq.sta) {
907 - struct sta_info *sta = container_of(txqi->txq.sta,
908 - struct sta_info, sta);
909 - bool aql_check = ieee80211_txq_airtime_check(hw, &txqi->txq);
910 - s64 deficit = sta->airtime[txqi->txq.ac].deficit;
911 +static void __ieee80211_insert_txq(struct rb_root_cached *root,
912 + struct txq_info *txqi)
914 + struct rb_node **new = &root->rb_root.rb_node;
915 + struct airtime_info *old_air, *new_air;
916 + struct rb_node *parent = NULL;
917 + struct txq_info *__txqi;
918 + bool leftmost = true;
922 + __txqi = rb_entry(parent, struct txq_info, schedule_order);
923 + old_air = to_airtime_info(&__txqi->txq);
924 + new_air = to_airtime_info(&txqi->txq);
927 - found_eligible_txq = true;
928 + if (new_air->v_t <= old_air->v_t) {
929 + new = &parent->rb_left;
931 + new = &parent->rb_right;
937 - sta->airtime[txqi->txq.ac].deficit +=
938 - sta->airtime_weight;
940 - if (deficit < 0 || !aql_check) {
941 - list_move_tail(&txqi->schedule_order,
942 - &local->active_txqs[txqi->txq.ac]);
944 + rb_link_node(&txqi->schedule_order, parent, new);
945 + rb_insert_color_cached(&txqi->schedule_order, root, leftmost);
948 +void ieee80211_resort_txq(struct ieee80211_hw *hw,
949 + struct ieee80211_txq *txq)
951 + struct airtime_info *air_info = to_airtime_info(txq);
952 + struct ieee80211_local *local = hw_to_local(hw);
953 + struct txq_info *txqi = to_txq_info(txq);
954 + struct airtime_sched_info *air_sched;
956 + air_sched = &local->airtime[txq->ac];
958 + lockdep_assert_held(&air_sched->lock);
960 + if (!RB_EMPTY_NODE(&txqi->schedule_order)) {
961 + struct airtime_info *a_prev = NULL, *a_next = NULL;
962 + struct txq_info *t_prev, *t_next;
963 + struct rb_node *n_prev, *n_next;
965 + /* Erasing a node can cause an expensive rebalancing operation,
966 + * so we check the previous and next nodes first and only remove
967 + * and re-insert if the current node is not already in the
968 + * correct position.
970 + if ((n_prev = rb_prev(&txqi->schedule_order)) != NULL) {
971 + t_prev = container_of(n_prev, struct txq_info,
973 + a_prev = to_airtime_info(&t_prev->txq);
976 + if ((n_next = rb_next(&txqi->schedule_order)) != NULL) {
977 + t_next = container_of(n_next, struct txq_info,
979 + a_next = to_airtime_info(&t_next->txq);
982 + if ((!a_prev || a_prev->v_t <= air_info->v_t) &&
983 + (!a_next || a_next->v_t > air_info->v_t))
986 + if (air_sched->schedule_pos == &txqi->schedule_order)
987 + air_sched->schedule_pos = n_prev;
989 + rb_erase_cached(&txqi->schedule_order,
990 + &air_sched->active_txqs);
991 + RB_CLEAR_NODE(&txqi->schedule_order);
992 + __ieee80211_insert_txq(&air_sched->active_txqs, txqi);
996 +void ieee80211_update_airtime_weight(struct ieee80211_local *local,
997 + struct airtime_sched_info *air_sched,
998 + u64 now, bool force)
1000 + struct airtime_info *air_info, *tmp;
1001 + u64 weight_sum = 0;
1003 + if (unlikely(!now))
1004 + now = ktime_get_boottime_ns();
1006 + lockdep_assert_held(&air_sched->lock);
1008 + if (!force && (air_sched->last_weight_update <
1009 + now - AIRTIME_ACTIVE_DURATION))
1012 + list_for_each_entry_safe(air_info, tmp,
1013 + &air_sched->active_list, list) {
1014 + if (airtime_is_active(air_info, now))
1015 + weight_sum += air_info->weight;
1017 + list_del_init(&air_info->list);
1019 + airtime_weight_sum_set(air_sched, weight_sum);
1020 + air_sched->last_weight_update = now;
1023 +void ieee80211_schedule_txq(struct ieee80211_hw *hw,
1024 + struct ieee80211_txq *txq)
1025 + __acquires(txq_lock) __releases(txq_lock)
1027 + struct ieee80211_local *local = hw_to_local(hw);
1028 + struct txq_info *txqi = to_txq_info(txq);
1029 + struct airtime_sched_info *air_sched;
1030 + u64 now = ktime_get_boottime_ns();
1031 + struct airtime_info *air_info;
1035 - if (txqi->schedule_round == local->schedule_round[ac])
1036 + air_sched = &local->airtime[ac];
1037 + air_info = to_airtime_info(txq);
1039 + spin_lock_bh(&air_sched->lock);
1040 + was_active = airtime_is_active(air_info, now);
1041 + airtime_set_active(air_sched, air_info, now);
1043 + if (!RB_EMPTY_NODE(&txqi->schedule_order))
1046 - list_del_init(&txqi->schedule_order);
1047 - txqi->schedule_round = local->schedule_round[ac];
1049 + /* If the station has been inactive for a while, catch up its v_t so it
1050 + * doesn't get indefinite priority; see comment above the definition of
1051 + * AIRTIME_MAX_BEHIND.
1053 + if ((!was_active && air_info->v_t < air_sched->v_t) ||
1054 + air_info->v_t < air_sched->v_t - AIRTIME_MAX_BEHIND)
1055 + air_info->v_t = air_sched->v_t;
1057 + ieee80211_update_airtime_weight(local, air_sched, now, !was_active);
1058 + __ieee80211_insert_txq(&air_sched->active_txqs, txqi);
1061 - spin_unlock_bh(&local->active_txq_lock[ac]);
1063 + spin_unlock_bh(&air_sched->lock);
1065 -EXPORT_SYMBOL(ieee80211_next_txq);
1066 +EXPORT_SYMBOL(ieee80211_schedule_txq);
1068 -void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
1069 - struct ieee80211_txq *txq,
1071 +static void __ieee80211_unschedule_txq(struct ieee80211_hw *hw,
1072 + struct ieee80211_txq *txq,
1075 struct ieee80211_local *local = hw_to_local(hw);
1076 struct txq_info *txqi = to_txq_info(txq);
1077 + struct airtime_sched_info *air_sched;
1078 + struct airtime_info *air_info;
1080 - spin_lock_bh(&local->active_txq_lock[txq->ac]);
1081 + air_sched = &local->airtime[txq->ac];
1082 + air_info = to_airtime_info(&txqi->txq);
1084 - if (list_empty(&txqi->schedule_order) &&
1085 - (force || !skb_queue_empty(&txqi->frags) ||
1086 - txqi->tin.backlog_packets)) {
1087 - /* If airtime accounting is active, always enqueue STAs at the
1088 - * head of the list to ensure that they only get moved to the
1089 - * back by the airtime DRR scheduler once they have a negative
1090 - * deficit. A station that already has a negative deficit will
1091 - * get immediately moved to the back of the list on the next
1092 - * call to ieee80211_next_txq().
1094 - if (txqi->txq.sta && local->airtime_flags &&
1095 - wiphy_ext_feature_isset(local->hw.wiphy,
1096 - NL80211_EXT_FEATURE_AIRTIME_FAIRNESS))
1097 - list_add(&txqi->schedule_order,
1098 - &local->active_txqs[txq->ac]);
1100 - list_add_tail(&txqi->schedule_order,
1101 - &local->active_txqs[txq->ac]);
1102 + lockdep_assert_held(&air_sched->lock);
1105 + list_del_init(&air_info->list);
1106 + ieee80211_update_airtime_weight(local, air_sched, 0, true);
1109 - spin_unlock_bh(&local->active_txq_lock[txq->ac]);
1110 + if (RB_EMPTY_NODE(&txqi->schedule_order))
1113 + if (air_sched->schedule_pos == &txqi->schedule_order)
1114 + air_sched->schedule_pos = rb_prev(&txqi->schedule_order);
1117 + airtime_set_active(air_sched, air_info,
1118 + ktime_get_boottime_ns());
1120 + rb_erase_cached(&txqi->schedule_order,
1121 + &air_sched->active_txqs);
1122 + RB_CLEAR_NODE(&txqi->schedule_order);
1125 +void ieee80211_unschedule_txq(struct ieee80211_hw *hw,
1126 + struct ieee80211_txq *txq,
1128 + __acquires(txq_lock) __releases(txq_lock)
1130 + struct ieee80211_local *local = hw_to_local(hw);
1132 + spin_lock_bh(&local->airtime[txq->ac].lock);
1133 + __ieee80211_unschedule_txq(hw, txq, purge);
1134 + spin_unlock_bh(&local->airtime[txq->ac].lock);
1137 +void ieee80211_return_txq(struct ieee80211_hw *hw,
1138 + struct ieee80211_txq *txq, bool force)
1140 + struct ieee80211_local *local = hw_to_local(hw);
1141 + struct txq_info *txqi = to_txq_info(txq);
1143 + spin_lock_bh(&local->airtime[txq->ac].lock);
1145 + if (!RB_EMPTY_NODE(&txqi->schedule_order) && !force &&
1146 + !txq_has_queue(txq))
1147 + __ieee80211_unschedule_txq(hw, txq, false);
1149 + spin_unlock_bh(&local->airtime[txq->ac].lock);
1151 -EXPORT_SYMBOL(__ieee80211_schedule_txq);
1152 +EXPORT_SYMBOL(ieee80211_return_txq);
1154 DEFINE_STATIC_KEY_FALSE(aql_disable);
1156 bool ieee80211_txq_airtime_check(struct ieee80211_hw *hw,
1157 struct ieee80211_txq *txq)
1159 - struct sta_info *sta;
1160 + struct airtime_info *air_info = to_airtime_info(txq);
1161 struct ieee80211_local *local = hw_to_local(hw);
1163 if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL))
1164 @@ -3907,15 +4063,12 @@ bool ieee80211_txq_airtime_check(struct
1165 if (unlikely(txq->tid == IEEE80211_NUM_TIDS))
1168 - sta = container_of(txq->sta, struct sta_info, sta);
1169 - if (atomic_read(&sta->airtime[txq->ac].aql_tx_pending) <
1170 - sta->airtime[txq->ac].aql_limit_low)
1171 + if (atomic_read(&air_info->aql_tx_pending) < air_info->aql_limit_low)
1174 if (atomic_read(&local->aql_total_pending_airtime) <
1175 local->aql_threshold &&
1176 - atomic_read(&sta->airtime[txq->ac].aql_tx_pending) <
1177 - sta->airtime[txq->ac].aql_limit_high)
1178 + atomic_read(&air_info->aql_tx_pending) < air_info->aql_limit_high)
1182 @@ -3925,60 +4078,59 @@ EXPORT_SYMBOL(ieee80211_txq_airtime_chec
1183 bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
1184 struct ieee80211_txq *txq)
1186 + struct txq_info *first_txqi = NULL, *txqi = to_txq_info(txq);
1187 struct ieee80211_local *local = hw_to_local(hw);
1188 - struct txq_info *iter, *tmp, *txqi = to_txq_info(txq);
1189 - struct sta_info *sta;
1191 + struct airtime_sched_info *air_sched;
1192 + struct airtime_info *air_info;
1193 + struct rb_node *node = NULL;
1197 - spin_lock_bh(&local->active_txq_lock[ac]);
1199 - if (!txqi->txq.sta)
1201 + if (!ieee80211_txq_airtime_check(hw, txq))
1204 + air_sched = &local->airtime[txq->ac];
1205 + spin_lock_bh(&air_sched->lock);
1207 - if (list_empty(&txqi->schedule_order))
1208 + if (RB_EMPTY_NODE(&txqi->schedule_order))
1211 - list_for_each_entry_safe(iter, tmp, &local->active_txqs[ac],
1215 + now = ktime_get_boottime_ns();
1217 - if (!iter->txq.sta) {
1218 - list_move_tail(&iter->schedule_order,
1219 - &local->active_txqs[ac]);
1222 - sta = container_of(iter->txq.sta, struct sta_info, sta);
1223 - if (sta->airtime[ac].deficit < 0)
1224 - sta->airtime[ac].deficit += sta->airtime_weight;
1225 - list_move_tail(&iter->schedule_order, &local->active_txqs[ac]);
1226 + /* Like in ieee80211_next_txq(), make sure the first station in the
1227 + * scheduling order is eligible for transmission to avoid starvation.
1229 + node = rb_first_cached(&air_sched->active_txqs);
1231 + first_txqi = container_of(node, struct txq_info,
1233 + air_info = to_airtime_info(&first_txqi->txq);
1235 + if (air_sched->v_t < air_info->v_t)
1236 + airtime_catchup_v_t(air_sched, air_info->v_t, now);
1239 - sta = container_of(txqi->txq.sta, struct sta_info, sta);
1240 - if (sta->airtime[ac].deficit >= 0)
1243 - sta->airtime[ac].deficit += sta->airtime_weight;
1244 - list_move_tail(&txqi->schedule_order, &local->active_txqs[ac]);
1245 - spin_unlock_bh(&local->active_txq_lock[ac]);
1246 + air_info = to_airtime_info(&txqi->txq);
1247 + if (air_info->v_t <= air_sched->v_t) {
1248 + air_sched->last_schedule_activity = now;
1254 - if (!list_empty(&txqi->schedule_order))
1255 - list_del_init(&txqi->schedule_order);
1256 - spin_unlock_bh(&local->active_txq_lock[ac]);
1259 + spin_unlock_bh(&air_sched->lock);
1262 EXPORT_SYMBOL(ieee80211_txq_may_transmit);
1264 void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
1266 struct ieee80211_local *local = hw_to_local(hw);
1267 + struct airtime_sched_info *air_sched = &local->airtime[ac];
1269 - spin_lock_bh(&local->active_txq_lock[ac]);
1270 - local->schedule_round[ac]++;
1271 - spin_unlock_bh(&local->active_txq_lock[ac]);
1272 + spin_lock_bh(&air_sched->lock);
1273 + air_sched->schedule_pos = NULL;
1274 + spin_unlock_bh(&air_sched->lock);
1276 EXPORT_SYMBOL(ieee80211_txq_schedule_start);