182c6afb78d3a846e7cf440de114ac8593b5abec
[openwrt/staging/xback.git] /
1 From: Felix Fietkau <nbd@nbd.name>
2 Date: Mon, 21 Feb 2022 15:39:18 +0100
3 Subject: [PATCH] net: ethernet: mtk_eth_soc: rework hardware flow table
4 management
5
6 The hardware was designed to handle flow detection and creation of flow entries
7 by itself, relying on the software primarily for filling in egress routing
8 information.
9 When there is a hash collision between multiple flows, this allows the hardware
10 to maintain the entry for the most active flow.
11 Additionally, the hardware only keeps offloading active for entries with at
12 least 30 packets per second.
13
14 With this rework, the code no longer creates a hardware entries directly.
15 Instead, the hardware entry is only created when the PPE reports a matching
16 unbound flow with the minimum target rate.
17 In order to reduce CPU overhead, looking for flows belonging to a hash entry
18 is rate limited to once every 100ms.
19
20 This rework is also used as preparation for emulating bridge offload by
21 managing L4 offload entries on demand.
22
23 Signed-off-by: Felix Fietkau <nbd@nbd.name>
24 ---
25
26 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
27 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
28 @@ -21,6 +21,7 @@
29 #include <linux/pinctrl/devinfo.h>
30 #include <linux/phylink.h>
31 #include <linux/jhash.h>
32 +#include <linux/bitfield.h>
33 #include <net/dsa.h>
34
35 #include "mtk_eth_soc.h"
36 @@ -1303,7 +1304,7 @@ static int mtk_poll_rx(struct napi_struc
37 struct net_device *netdev;
38 unsigned int pktlen;
39 dma_addr_t dma_addr;
40 - u32 hash;
41 + u32 hash, reason;
42 int mac;
43
44 ring = mtk_get_rx_ring(eth);
45 @@ -1382,6 +1383,11 @@ static int mtk_poll_rx(struct napi_struc
46 skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
47 }
48
49 + reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
50 + if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
51 + mtk_ppe_check_skb(eth->ppe, skb,
52 + trxd.rxd4 & MTK_RXD4_FOE_ENTRY);
53 +
54 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
55 (trxd.rxd2 & RX_DMA_VTAG))
56 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
57 @@ -3307,7 +3313,7 @@ static int mtk_probe(struct platform_dev
58 }
59
60 if (eth->soc->offload_version) {
61 - eth->ppe = mtk_ppe_init(eth->dev, eth->base + MTK_ETH_PPE_BASE, 2);
62 + eth->ppe = mtk_ppe_init(eth, eth->base + MTK_ETH_PPE_BASE, 2);
63 if (!eth->ppe) {
64 err = -ENOMEM;
65 goto err_free_dev;
66 --- a/drivers/net/ethernet/mediatek/mtk_ppe.c
67 +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
68 @@ -6,9 +6,12 @@
69 #include <linux/iopoll.h>
70 #include <linux/etherdevice.h>
71 #include <linux/platform_device.h>
72 +#include "mtk_eth_soc.h"
73 #include "mtk_ppe.h"
74 #include "mtk_ppe_regs.h"
75
76 +static DEFINE_SPINLOCK(ppe_lock);
77 +
78 static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
79 {
80 writel(val, ppe->base + reg);
81 @@ -41,6 +44,11 @@ static u32 ppe_clear(struct mtk_ppe *ppe
82 return ppe_m32(ppe, reg, val, 0);
83 }
84
85 +static u32 mtk_eth_timestamp(struct mtk_eth *eth)
86 +{
87 + return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
88 +}
89 +
90 static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
91 {
92 int ret;
93 @@ -353,26 +361,59 @@ static inline bool mtk_foe_entry_usable(
94 FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
95 }
96
97 -int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
98 - u16 timestamp)
99 +static bool
100 +mtk_flow_entry_match(struct mtk_flow_entry *entry, struct mtk_foe_entry *data)
101 +{
102 + int type, len;
103 +
104 + if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
105 + return false;
106 +
107 + type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
108 + if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
109 + len = offsetof(struct mtk_foe_entry, ipv6._rsv);
110 + else
111 + len = offsetof(struct mtk_foe_entry, ipv4.ib2);
112 +
113 + return !memcmp(&entry->data.data, &data->data, len - 4);
114 +}
115 +
116 +static void
117 +mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
118 {
119 struct mtk_foe_entry *hwe;
120 - u32 hash;
121 + struct mtk_foe_entry foe;
122
123 + spin_lock_bh(&ppe_lock);
124 + if (entry->hash == 0xffff)
125 + goto out;
126 +
127 + hwe = &ppe->foe_table[entry->hash];
128 + memcpy(&foe, hwe, sizeof(foe));
129 + if (!mtk_flow_entry_match(entry, &foe)) {
130 + entry->hash = 0xffff;
131 + goto out;
132 + }
133 +
134 + entry->data.ib1 = foe.ib1;
135 +
136 +out:
137 + spin_unlock_bh(&ppe_lock);
138 +}
139 +
140 +static void
141 +__mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
142 + u16 hash)
143 +{
144 + struct mtk_foe_entry *hwe;
145 + u16 timestamp;
146 +
147 + timestamp = mtk_eth_timestamp(ppe->eth);
148 timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP;
149 entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
150 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
151
152 - hash = mtk_ppe_hash_entry(entry);
153 hwe = &ppe->foe_table[hash];
154 - if (!mtk_foe_entry_usable(hwe)) {
155 - hwe++;
156 - hash++;
157 -
158 - if (!mtk_foe_entry_usable(hwe))
159 - return -ENOSPC;
160 - }
161 -
162 memcpy(&hwe->data, &entry->data, sizeof(hwe->data));
163 wmb();
164 hwe->ib1 = entry->ib1;
165 @@ -380,13 +421,77 @@ int mtk_foe_entry_commit(struct mtk_ppe
166 dma_wmb();
167
168 mtk_ppe_cache_clear(ppe);
169 +}
170
171 - return hash;
172 +void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
173 +{
174 + spin_lock_bh(&ppe_lock);
175 + hlist_del_init(&entry->list);
176 + if (entry->hash != 0xffff) {
177 + ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE;
178 + ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE,
179 + MTK_FOE_STATE_BIND);
180 + dma_wmb();
181 + }
182 + entry->hash = 0xffff;
183 + spin_unlock_bh(&ppe_lock);
184 +}
185 +
186 +int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
187 +{
188 + u32 hash = mtk_ppe_hash_entry(&entry->data);
189 +
190 + entry->hash = 0xffff;
191 + spin_lock_bh(&ppe_lock);
192 + hlist_add_head(&entry->list, &ppe->foe_flow[hash / 2]);
193 + spin_unlock_bh(&ppe_lock);
194 +
195 + return 0;
196 +}
197 +
198 +void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
199 +{
200 + struct hlist_head *head = &ppe->foe_flow[hash / 2];
201 + struct mtk_flow_entry *entry;
202 + struct mtk_foe_entry *hwe = &ppe->foe_table[hash];
203 + bool found = false;
204 +
205 + if (hlist_empty(head))
206 + return;
207 +
208 + spin_lock_bh(&ppe_lock);
209 + hlist_for_each_entry(entry, head, list) {
210 + if (found || !mtk_flow_entry_match(entry, hwe)) {
211 + if (entry->hash != 0xffff)
212 + entry->hash = 0xffff;
213 + continue;
214 + }
215 +
216 + entry->hash = hash;
217 + __mtk_foe_entry_commit(ppe, &entry->data, hash);
218 + found = true;
219 + }
220 + spin_unlock_bh(&ppe_lock);
221 +}
222 +
223 +int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
224 +{
225 + u16 now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP;
226 + u16 timestamp;
227 +
228 + mtk_flow_entry_update(ppe, entry);
229 + timestamp = entry->data.ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
230 +
231 + if (timestamp > now)
232 + return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now;
233 + else
234 + return now - timestamp;
235 }
236
237 -struct mtk_ppe *mtk_ppe_init(struct device *dev, void __iomem *base,
238 +struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
239 int version)
240 {
241 + struct device *dev = eth->dev;
242 struct mtk_foe_entry *foe;
243 struct mtk_ppe *ppe;
244
245 @@ -398,6 +503,7 @@ struct mtk_ppe *mtk_ppe_init(struct devi
246 * not coherent.
247 */
248 ppe->base = base;
249 + ppe->eth = eth;
250 ppe->dev = dev;
251 ppe->version = version;
252
253 --- a/drivers/net/ethernet/mediatek/mtk_ppe.h
254 +++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
255 @@ -235,7 +235,17 @@ enum {
256 MTK_PPE_CPU_REASON_INVALID = 0x1f,
257 };
258
259 +struct mtk_flow_entry {
260 + struct rhash_head node;
261 + struct hlist_node list;
262 + unsigned long cookie;
263 + struct mtk_foe_entry data;
264 + u16 hash;
265 + s8 wed_index;
266 +};
267 +
268 struct mtk_ppe {
269 + struct mtk_eth *eth;
270 struct device *dev;
271 void __iomem *base;
272 int version;
273 @@ -243,18 +253,33 @@ struct mtk_ppe {
274 struct mtk_foe_entry *foe_table;
275 dma_addr_t foe_phys;
276
277 + u16 foe_check_time[MTK_PPE_ENTRIES];
278 + struct hlist_head foe_flow[MTK_PPE_ENTRIES / 2];
279 +
280 void *acct_table;
281 };
282
283 -struct mtk_ppe *mtk_ppe_init(struct device *dev, void __iomem *base, int version);
284 +struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int version);
285 int mtk_ppe_start(struct mtk_ppe *ppe);
286 int mtk_ppe_stop(struct mtk_ppe *ppe);
287
288 +void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash);
289 +
290 static inline void
291 -mtk_foe_entry_clear(struct mtk_ppe *ppe, u16 hash)
292 +mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
293 {
294 - ppe->foe_table[hash].ib1 = 0;
295 - dma_wmb();
296 + u16 now, diff;
297 +
298 + if (!ppe)
299 + return;
300 +
301 + now = (u16)jiffies;
302 + diff = now - ppe->foe_check_time[hash];
303 + if (diff < HZ / 10)
304 + return;
305 +
306 + ppe->foe_check_time[hash] = now;
307 + __mtk_ppe_check_skb(ppe, skb, hash);
308 }
309
310 static inline int
311 @@ -282,8 +307,9 @@ int mtk_foe_entry_set_vlan(struct mtk_fo
312 int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
313 int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
314 int bss, int wcid);
315 -int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
316 - u16 timestamp);
317 +int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
318 +void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
319 +int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
320 int mtk_ppe_debugfs_init(struct mtk_ppe *ppe);
321
322 #endif
323 --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
324 +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
325 @@ -42,13 +42,6 @@ struct mtk_flow_data {
326 } pppoe;
327 };
328
329 -struct mtk_flow_entry {
330 - struct rhash_head node;
331 - unsigned long cookie;
332 - u16 hash;
333 - s8 wed_index;
334 -};
335 -
336 static const struct rhashtable_params mtk_flow_ht_params = {
337 .head_offset = offsetof(struct mtk_flow_entry, node),
338 .key_offset = offsetof(struct mtk_flow_entry, cookie),
339 @@ -56,12 +49,6 @@ static const struct rhashtable_params mt
340 .automatic_shrinking = true,
341 };
342
343 -static u32
344 -mtk_eth_timestamp(struct mtk_eth *eth)
345 -{
346 - return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
347 -}
348 -
349 static int
350 mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
351 bool egress)
352 @@ -237,10 +224,8 @@ mtk_flow_offload_replace(struct mtk_eth
353 int offload_type = 0;
354 int wed_index = -1;
355 u16 addr_type = 0;
356 - u32 timestamp;
357 u8 l4proto = 0;
358 int err = 0;
359 - int hash;
360 int i;
361
362 if (rhashtable_lookup(&eth->flow_table, &f->cookie, mtk_flow_ht_params))
363 @@ -410,23 +395,21 @@ mtk_flow_offload_replace(struct mtk_eth
364 return -ENOMEM;
365
366 entry->cookie = f->cookie;
367 - timestamp = mtk_eth_timestamp(eth);
368 - hash = mtk_foe_entry_commit(eth->ppe, &foe, timestamp);
369 - if (hash < 0) {
370 - err = hash;
371 + memcpy(&entry->data, &foe, sizeof(entry->data));
372 + entry->wed_index = wed_index;
373 +
374 + if (mtk_foe_entry_commit(eth->ppe, entry) < 0)
375 goto free;
376 - }
377
378 - entry->hash = hash;
379 - entry->wed_index = wed_index;
380 err = rhashtable_insert_fast(&eth->flow_table, &entry->node,
381 mtk_flow_ht_params);
382 if (err < 0)
383 - goto clear_flow;
384 + goto clear;
385
386 return 0;
387 -clear_flow:
388 - mtk_foe_entry_clear(eth->ppe, hash);
389 +
390 +clear:
391 + mtk_foe_entry_clear(eth->ppe, entry);
392 free:
393 kfree(entry);
394 if (wed_index >= 0)
395 @@ -444,7 +427,7 @@ mtk_flow_offload_destroy(struct mtk_eth
396 if (!entry)
397 return -ENOENT;
398
399 - mtk_foe_entry_clear(eth->ppe, entry->hash);
400 + mtk_foe_entry_clear(eth->ppe, entry);
401 rhashtable_remove_fast(&eth->flow_table, &entry->node,
402 mtk_flow_ht_params);
403 if (entry->wed_index >= 0)
404 @@ -458,7 +441,6 @@ static int
405 mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
406 {
407 struct mtk_flow_entry *entry;
408 - int timestamp;
409 u32 idle;
410
411 entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
412 @@ -466,11 +448,7 @@ mtk_flow_offload_stats(struct mtk_eth *e
413 if (!entry)
414 return -ENOENT;
415
416 - timestamp = mtk_foe_entry_timestamp(eth->ppe, entry->hash);
417 - if (timestamp < 0)
418 - return -ETIMEDOUT;
419 -
420 - idle = mtk_eth_timestamp(eth) - timestamp;
421 + idle = mtk_foe_entry_idle_time(eth->ppe, entry);
422 f->stats.lastused = jiffies - idle * HZ;
423
424 return 0;