1 From: Felix Fietkau <nbd@nbd.name>
2 Date: Wed, 25 Nov 2020 18:10:34 +0100
3 Subject: [PATCH] net/fq_impl: do not maintain a backlog-sorted list of
6 A sorted flow list is only needed to drop packets in the biggest flow when
7 hitting the overmemory condition.
8 By scanning flows only when needed, we can avoid paying the cost of
9 maintaining the list under normal conditions
10 In order to avoid scanning lots of empty flows and touching too many cold
11 cache lines, a bitmap of flows with backlog is maintained
13 Signed-off-by: Felix Fietkau <nbd@nbd.name>
16 --- a/include/net/fq.h
17 +++ b/include/net/fq.h
18 @@ -19,8 +19,6 @@ struct fq_tin;
19 * @flowchain: can be linked to fq_tin's new_flows or old_flows. Used for DRR++
20 * (deficit round robin) based round robin queuing similar to the one
21 * found in net/sched/sch_fq_codel.c
22 - * @backlogchain: can be linked to other fq_flow and fq. Used to keep track of
23 - * fat flows and efficient head-dropping if packet limit is reached
24 * @queue: sk_buff queue to hold packets
25 * @backlog: number of bytes pending in the queue. The number of packets can be
26 * found in @queue.qlen
27 @@ -29,7 +27,6 @@ struct fq_tin;
30 struct list_head flowchain;
31 - struct list_head backlogchain;
32 struct sk_buff_head queue;
35 @@ -47,6 +44,7 @@ struct fq_flow {
37 struct list_head new_flows;
38 struct list_head old_flows;
39 + struct list_head tin_list;
40 struct fq_flow default_flow;
43 @@ -60,14 +58,14 @@ struct fq_tin {
45 * struct fq - main container for fair queuing purposes
47 - * @backlogs: linked to fq_flows. Used to maintain fat flows for efficient
48 - * head-dropping when @backlog reaches @limit
49 * @limit: max number of packets that can be queued across all flows
50 * @backlog: number of packets queued across all flows
53 struct fq_flow *flows;
54 - struct list_head backlogs;
55 + unsigned long *flows_bitmap;
57 + struct list_head tin_backlog;
61 --- a/include/net/fq_impl.h
62 +++ b/include/net/fq_impl.h
63 @@ -17,12 +17,24 @@ __fq_adjust_removal(struct fq *fq, struc
64 unsigned int bytes, unsigned int truesize)
66 struct fq_tin *tin = flow->tin;
69 tin->backlog_bytes -= bytes;
70 tin->backlog_packets -= packets;
71 flow->backlog -= bytes;
72 fq->backlog -= packets;
73 fq->memory_usage -= truesize;
78 + if (flow == &tin->default_flow) {
79 + list_del_init(&tin->tin_list);
83 + idx = flow - fq->flows;
84 + __clear_bit(idx, fq->flows_bitmap);
87 static void fq_adjust_removal(struct fq *fq,
88 @@ -32,24 +44,6 @@ static void fq_adjust_removal(struct fq
89 __fq_adjust_removal(fq, flow, 1, skb->len, skb->truesize);
92 -static void fq_rejigger_backlog(struct fq *fq, struct fq_flow *flow)
96 - if (flow->backlog == 0) {
97 - list_del_init(&flow->backlogchain);
101 - list_for_each_entry_continue(i, &fq->backlogs, backlogchain)
102 - if (i->backlog < flow->backlog)
105 - list_move_tail(&flow->backlogchain,
110 static struct sk_buff *fq_flow_dequeue(struct fq *fq,
111 struct fq_flow *flow)
113 @@ -62,7 +56,6 @@ static struct sk_buff *fq_flow_dequeue(s
116 fq_adjust_removal(fq, flow, skb);
117 - fq_rejigger_backlog(fq, flow);
121 @@ -90,7 +83,6 @@ static int fq_flow_drop(struct fq *fq, s
122 } while (packets < pending);
124 __fq_adjust_removal(fq, flow, packets, bytes, truesize);
125 - fq_rejigger_backlog(fq, flow);
129 @@ -170,22 +162,36 @@ static struct fq_flow *fq_flow_classify(
133 -static void fq_recalc_backlog(struct fq *fq,
134 - struct fq_tin *tin,
135 - struct fq_flow *flow)
139 - if (list_empty(&flow->backlogchain))
140 - list_add_tail(&flow->backlogchain, &fq->backlogs);
143 - list_for_each_entry_continue_reverse(i, &fq->backlogs,
145 - if (i->backlog > flow->backlog)
147 +static struct fq_flow *fq_find_fattest_flow(struct fq *fq)
149 + struct fq_tin *tin;
150 + struct fq_flow *flow = NULL;
154 + for_each_set_bit(i, fq->flows_bitmap, fq->flows_cnt) {
155 + struct fq_flow *cur = &fq->flows[i];
156 + unsigned int cur_len;
158 + cur_len = cur->backlog;
159 + if (cur_len <= len)
166 - list_move(&flow->backlogchain, &i->backlogchain);
167 + list_for_each_entry(tin, &fq->tin_backlog, tin_list) {
168 + unsigned int cur_len = tin->default_flow.backlog;
170 + if (cur_len <= len)
173 + flow = &tin->default_flow;
180 static void fq_tin_enqueue(struct fq *fq,
181 @@ -200,6 +206,13 @@ static void fq_tin_enqueue(struct fq *fq
183 flow = fq_flow_classify(fq, tin, idx, skb);
185 + if (!flow->backlog) {
186 + if (flow != &tin->default_flow)
187 + __set_bit(idx, fq->flows_bitmap);
188 + else if (list_empty(&tin->tin_list))
189 + list_add(&tin->tin_list, &fq->tin_backlog);
193 flow->backlog += skb->len;
194 tin->backlog_bytes += skb->len;
195 @@ -207,8 +220,6 @@ static void fq_tin_enqueue(struct fq *fq
196 fq->memory_usage += skb->truesize;
199 - fq_recalc_backlog(fq, tin, flow);
201 if (list_empty(&flow->flowchain)) {
202 flow->deficit = fq->quantum;
203 list_add_tail(&flow->flowchain,
204 @@ -218,9 +229,7 @@ static void fq_tin_enqueue(struct fq *fq
205 __skb_queue_tail(&flow->queue, skb);
206 oom = (fq->memory_usage > fq->memory_limit);
207 while (fq->backlog > fq->limit || oom) {
208 - flow = list_first_entry_or_null(&fq->backlogs,
211 + flow = fq_find_fattest_flow(fq);
215 @@ -255,8 +264,6 @@ static void fq_flow_filter(struct fq *fq
216 fq_adjust_removal(fq, flow, skb);
217 free_func(fq, tin, flow, skb);
220 - fq_rejigger_backlog(fq, flow);
223 static void fq_tin_filter(struct fq *fq,
224 @@ -279,16 +286,18 @@ static void fq_flow_reset(struct fq *fq,
225 struct fq_flow *flow,
226 fq_skb_free_t free_func)
228 + struct fq_tin *tin = flow->tin;
231 while ((skb = fq_flow_dequeue(fq, flow)))
232 - free_func(fq, flow->tin, flow, skb);
233 + free_func(fq, tin, flow, skb);
235 - if (!list_empty(&flow->flowchain))
236 + if (!list_empty(&flow->flowchain)) {
237 list_del_init(&flow->flowchain);
239 - if (!list_empty(&flow->backlogchain))
240 - list_del_init(&flow->backlogchain);
241 + if (list_empty(&tin->new_flows) &&
242 + list_empty(&tin->old_flows))
243 + list_del_init(&tin->tin_list);
248 @@ -314,6 +323,7 @@ static void fq_tin_reset(struct fq *fq,
249 fq_flow_reset(fq, flow, free_func);
252 + WARN_ON_ONCE(!list_empty(&tin->tin_list));
253 WARN_ON_ONCE(tin->backlog_bytes);
254 WARN_ON_ONCE(tin->backlog_packets);
256 @@ -321,7 +331,6 @@ static void fq_tin_reset(struct fq *fq,
257 static void fq_flow_init(struct fq_flow *flow)
259 INIT_LIST_HEAD(&flow->flowchain);
260 - INIT_LIST_HEAD(&flow->backlogchain);
261 __skb_queue_head_init(&flow->queue);
264 @@ -329,6 +338,7 @@ static void fq_tin_init(struct fq_tin *t
266 INIT_LIST_HEAD(&tin->new_flows);
267 INIT_LIST_HEAD(&tin->old_flows);
268 + INIT_LIST_HEAD(&tin->tin_list);
269 fq_flow_init(&tin->default_flow);
272 @@ -337,8 +347,8 @@ static int fq_init(struct fq *fq, int fl
275 memset(fq, 0, sizeof(fq[0]));
276 - INIT_LIST_HEAD(&fq->backlogs);
277 spin_lock_init(&fq->lock);
278 + INIT_LIST_HEAD(&fq->tin_backlog);
279 fq->flows_cnt = max_t(u32, flows_cnt, 1);
282 @@ -348,6 +358,14 @@ static int fq_init(struct fq *fq, int fl
286 + fq->flows_bitmap = kcalloc(BITS_TO_LONGS(fq->flows_cnt), sizeof(long),
288 + if (!fq->flows_bitmap) {
294 for (i = 0; i < fq->flows_cnt; i++)
295 fq_flow_init(&fq->flows[i]);
297 @@ -364,6 +382,9 @@ static void fq_reset(struct fq *fq,
302 + kfree(fq->flows_bitmap);
303 + fq->flows_bitmap = NULL;
307 --- a/net/mac80211/tx.c
308 +++ b/net/mac80211/tx.c
309 @@ -3335,8 +3335,6 @@ out_recalc:
310 if (head->len != orig_len) {
311 flow->backlog += head->len - orig_len;
312 tin->backlog_bytes += head->len - orig_len;
314 - fq_recalc_backlog(fq, tin, flow);
317 spin_unlock_bh(&fq->lock);