209c65e66aa039d04df6de8639549f27846e8027
[openwrt/staging/blogic.git] /
1 From: Felix Fietkau <nbd@nbd.name>
2 Date: Wed, 23 Feb 2022 10:56:34 +0100
3 Subject: [PATCH] net: ethernet: mtk_eth_soc: support creating mac
4 address based offload entries
5
6 This will be used to implement a limited form of bridge offloading.
7 Since the hardware does not support flow table entries with just source
8 and destination MAC address, the driver has to emulate it.
9
10 The hardware automatically creates entries entries for incoming flows, even
11 when they are bridged instead of routed, and reports when packets for these
12 flows have reached the minimum PPS rate for offloading.
13
14 After this happens, we look up the L2 flow offload entry based on the MAC
15 header and fill in the output routing information in the flow table.
16 The dynamically created per-flow entries are automatically removed when
17 either the hardware flowtable entry expires, is replaced, or if the offload
18 rule they belong to is removed
19
20 Signed-off-by: Felix Fietkau <nbd@nbd.name>
21 ---
22
23 --- a/drivers/net/ethernet/mediatek/mtk_ppe.c
24 +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
25 @@ -6,12 +6,22 @@
26 #include <linux/iopoll.h>
27 #include <linux/etherdevice.h>
28 #include <linux/platform_device.h>
29 +#include <linux/if_ether.h>
30 +#include <linux/if_vlan.h>
31 +#include <net/dsa.h>
32 #include "mtk_eth_soc.h"
33 #include "mtk_ppe.h"
34 #include "mtk_ppe_regs.h"
35
36 static DEFINE_SPINLOCK(ppe_lock);
37
38 +static const struct rhashtable_params mtk_flow_l2_ht_params = {
39 + .head_offset = offsetof(struct mtk_flow_entry, l2_node),
40 + .key_offset = offsetof(struct mtk_flow_entry, data.bridge),
41 + .key_len = offsetof(struct mtk_foe_bridge, key_end),
42 + .automatic_shrinking = true,
43 +};
44 +
45 static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
46 {
47 writel(val, ppe->base + reg);
48 @@ -123,6 +133,9 @@ mtk_foe_entry_l2(struct mtk_foe_entry *e
49 {
50 int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
51
52 + if (type == MTK_PPE_PKT_TYPE_BRIDGE)
53 + return &entry->bridge.l2;
54 +
55 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
56 return &entry->ipv6.l2;
57
58 @@ -134,6 +147,9 @@ mtk_foe_entry_ib2(struct mtk_foe_entry *
59 {
60 int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
61
62 + if (type == MTK_PPE_PKT_TYPE_BRIDGE)
63 + return &entry->bridge.ib2;
64 +
65 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
66 return &entry->ipv6.ib2;
67
68 @@ -168,7 +184,12 @@ int mtk_foe_entry_prepare(struct mtk_foe
69 if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
70 entry->ipv6.ports = ports_pad;
71
72 - if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
73 + if (type == MTK_PPE_PKT_TYPE_BRIDGE) {
74 + ether_addr_copy(entry->bridge.src_mac, src_mac);
75 + ether_addr_copy(entry->bridge.dest_mac, dest_mac);
76 + entry->bridge.ib2 = val;
77 + l2 = &entry->bridge.l2;
78 + } else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
79 entry->ipv6.ib2 = val;
80 l2 = &entry->ipv6.l2;
81 } else {
82 @@ -372,12 +393,96 @@ mtk_flow_entry_match(struct mtk_flow_ent
83 }
84
85 static void
86 +__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
87 +{
88 + struct hlist_head *head;
89 + struct hlist_node *tmp;
90 +
91 + if (entry->type == MTK_FLOW_TYPE_L2) {
92 + rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
93 + mtk_flow_l2_ht_params);
94 +
95 + head = &entry->l2_flows;
96 + hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
97 + __mtk_foe_entry_clear(ppe, entry);
98 + return;
99 + }
100 +
101 + hlist_del_init(&entry->list);
102 + if (entry->hash != 0xffff) {
103 + ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE;
104 + ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE,
105 + MTK_FOE_STATE_BIND);
106 + dma_wmb();
107 + }
108 + entry->hash = 0xffff;
109 +
110 + if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
111 + return;
112 +
113 + hlist_del_init(&entry->l2_data.list);
114 + kfree(entry);
115 +}
116 +
117 +static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
118 +{
119 + u16 timestamp;
120 + u16 now;
121 +
122 + now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP;
123 + timestamp = ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
124 +
125 + if (timestamp > now)
126 + return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now;
127 + else
128 + return now - timestamp;
129 +}
130 +
131 +static void
132 +mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
133 +{
134 + struct mtk_flow_entry *cur;
135 + struct mtk_foe_entry *hwe;
136 + struct hlist_node *tmp;
137 + int idle;
138 +
139 + idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
140 + hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
141 + int cur_idle;
142 + u32 ib1;
143 +
144 + hwe = &ppe->foe_table[cur->hash];
145 + ib1 = READ_ONCE(hwe->ib1);
146 +
147 + if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
148 + cur->hash = 0xffff;
149 + __mtk_foe_entry_clear(ppe, cur);
150 + continue;
151 + }
152 +
153 + cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
154 + if (cur_idle >= idle)
155 + continue;
156 +
157 + idle = cur_idle;
158 + entry->data.ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
159 + entry->data.ib1 |= hwe->ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
160 + }
161 +}
162 +
163 +static void
164 mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
165 {
166 struct mtk_foe_entry *hwe;
167 struct mtk_foe_entry foe;
168
169 spin_lock_bh(&ppe_lock);
170 +
171 + if (entry->type == MTK_FLOW_TYPE_L2) {
172 + mtk_flow_entry_update_l2(ppe, entry);
173 + goto out;
174 + }
175 +
176 if (entry->hash == 0xffff)
177 goto out;
178
179 @@ -419,21 +524,28 @@ __mtk_foe_entry_commit(struct mtk_ppe *p
180 void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
181 {
182 spin_lock_bh(&ppe_lock);
183 - hlist_del_init(&entry->list);
184 - if (entry->hash != 0xffff) {
185 - ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE;
186 - ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE,
187 - MTK_FOE_STATE_BIND);
188 - dma_wmb();
189 - }
190 - entry->hash = 0xffff;
191 + __mtk_foe_entry_clear(ppe, entry);
192 spin_unlock_bh(&ppe_lock);
193 }
194
195 +static int
196 +mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
197 +{
198 + entry->type = MTK_FLOW_TYPE_L2;
199 +
200 + return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node,
201 + mtk_flow_l2_ht_params);
202 +}
203 +
204 int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
205 {
206 - u32 hash = mtk_ppe_hash_entry(&entry->data);
207 + int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
208 + u32 hash;
209 +
210 + if (type == MTK_PPE_PKT_TYPE_BRIDGE)
211 + return mtk_foe_entry_commit_l2(ppe, entry);
212
213 + hash = mtk_ppe_hash_entry(&entry->data);
214 entry->hash = 0xffff;
215 spin_lock_bh(&ppe_lock);
216 hlist_add_head(&entry->list, &ppe->foe_flow[hash / 2]);
217 @@ -442,18 +554,72 @@ int mtk_foe_entry_commit(struct mtk_ppe
218 return 0;
219 }
220
221 +static void
222 +mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
223 + u16 hash)
224 +{
225 + struct mtk_flow_entry *flow_info;
226 + struct mtk_foe_entry foe, *hwe;
227 + struct mtk_foe_mac_info *l2;
228 + u32 ib1_mask = MTK_FOE_IB1_PACKET_TYPE | MTK_FOE_IB1_UDP;
229 + int type;
230 +
231 + flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end),
232 + GFP_ATOMIC);
233 + if (!flow_info)
234 + return;
235 +
236 + flow_info->l2_data.base_flow = entry;
237 + flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
238 + flow_info->hash = hash;
239 + hlist_add_head(&flow_info->list, &ppe->foe_flow[hash / 2]);
240 + hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
241 +
242 + hwe = &ppe->foe_table[hash];
243 + memcpy(&foe, hwe, sizeof(foe));
244 + foe.ib1 &= ib1_mask;
245 + foe.ib1 |= entry->data.ib1 & ~ib1_mask;
246 +
247 + l2 = mtk_foe_entry_l2(&foe);
248 + memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
249 +
250 + type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, foe.ib1);
251 + if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
252 + memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
253 + else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
254 + l2->etype = ETH_P_IPV6;
255 +
256 + *mtk_foe_entry_ib2(&foe) = entry->data.bridge.ib2;
257 +
258 + __mtk_foe_entry_commit(ppe, &foe, hash);
259 +}
260 +
261 void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
262 {
263 struct hlist_head *head = &ppe->foe_flow[hash / 2];
264 - struct mtk_flow_entry *entry;
265 struct mtk_foe_entry *hwe = &ppe->foe_table[hash];
266 + struct mtk_flow_entry *entry;
267 + struct mtk_foe_bridge key = {};
268 + struct ethhdr *eh;
269 bool found = false;
270 -
271 - if (hlist_empty(head))
272 - return;
273 + u8 *tag;
274
275 spin_lock_bh(&ppe_lock);
276 +
277 + if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
278 + goto out;
279 +
280 hlist_for_each_entry(entry, head, list) {
281 + if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
282 + if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
283 + MTK_FOE_STATE_BIND))
284 + continue;
285 +
286 + entry->hash = 0xffff;
287 + __mtk_foe_entry_clear(ppe, entry);
288 + continue;
289 + }
290 +
291 if (found || !mtk_flow_entry_match(entry, hwe)) {
292 if (entry->hash != 0xffff)
293 entry->hash = 0xffff;
294 @@ -464,21 +630,50 @@ void __mtk_ppe_check_skb(struct mtk_ppe
295 __mtk_foe_entry_commit(ppe, &entry->data, hash);
296 found = true;
297 }
298 +
299 + if (found)
300 + goto out;
301 +
302 + eh = eth_hdr(skb);
303 + ether_addr_copy(key.dest_mac, eh->h_dest);
304 + ether_addr_copy(key.src_mac, eh->h_source);
305 + tag = skb->data - 2;
306 + key.vlan = 0;
307 + switch (skb->protocol) {
308 +#if IS_ENABLED(CONFIG_NET_DSA)
309 + case htons(ETH_P_XDSA):
310 + if (!netdev_uses_dsa(skb->dev) ||
311 + skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
312 + goto out;
313 +
314 + tag += 4;
315 + if (get_unaligned_be16(tag) != ETH_P_8021Q)
316 + break;
317 +
318 + fallthrough;
319 +#endif
320 + case htons(ETH_P_8021Q):
321 + key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK;
322 + break;
323 + default:
324 + break;
325 + }
326 +
327 + entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params);
328 + if (!entry)
329 + goto out;
330 +
331 + mtk_foe_entry_commit_subflow(ppe, entry, hash);
332 +
333 +out:
334 spin_unlock_bh(&ppe_lock);
335 }
336
337 int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
338 {
339 - u16 now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP;
340 - u16 timestamp;
341 -
342 mtk_flow_entry_update(ppe, entry);
343 - timestamp = entry->data.ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
344
345 - if (timestamp > now)
346 - return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now;
347 - else
348 - return now - timestamp;
349 + return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
350 }
351
352 struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
353 @@ -492,6 +687,8 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_
354 if (!ppe)
355 return NULL;
356
357 + rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params);
358 +
359 /* need to allocate a separate device, since it PPE DMA access is
360 * not coherent.
361 */
362 --- a/drivers/net/ethernet/mediatek/mtk_ppe.h
363 +++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
364 @@ -6,6 +6,7 @@
365
366 #include <linux/kernel.h>
367 #include <linux/bitfield.h>
368 +#include <linux/rhashtable.h>
369
370 #define MTK_ETH_PPE_BASE 0xc00
371
372 @@ -84,19 +85,16 @@ struct mtk_foe_mac_info {
373 u16 src_mac_lo;
374 };
375
376 +/* software-only entry type */
377 struct mtk_foe_bridge {
378 - u32 dest_mac_hi;
379 -
380 - u16 src_mac_lo;
381 - u16 dest_mac_lo;
382 + u8 dest_mac[ETH_ALEN];
383 + u8 src_mac[ETH_ALEN];
384 + u16 vlan;
385
386 - u32 src_mac_hi;
387 + struct {} key_end;
388
389 u32 ib2;
390
391 - u32 _rsv[5];
392 -
393 - u32 udf_tsid;
394 struct mtk_foe_mac_info l2;
395 };
396
397 @@ -235,13 +233,33 @@ enum {
398 MTK_PPE_CPU_REASON_INVALID = 0x1f,
399 };
400
401 +enum {
402 + MTK_FLOW_TYPE_L4,
403 + MTK_FLOW_TYPE_L2,
404 + MTK_FLOW_TYPE_L2_SUBFLOW,
405 +};
406 +
407 struct mtk_flow_entry {
408 + union {
409 + struct hlist_node list;
410 + struct {
411 + struct rhash_head l2_node;
412 + struct hlist_head l2_flows;
413 + };
414 + };
415 + u8 type;
416 + s8 wed_index;
417 + u16 hash;
418 + union {
419 + struct mtk_foe_entry data;
420 + struct {
421 + struct mtk_flow_entry *base_flow;
422 + struct hlist_node list;
423 + struct {} end;
424 + } l2_data;
425 + };
426 struct rhash_head node;
427 - struct hlist_node list;
428 unsigned long cookie;
429 - struct mtk_foe_entry data;
430 - u16 hash;
431 - s8 wed_index;
432 };
433
434 struct mtk_ppe {
435 @@ -256,6 +274,8 @@ struct mtk_ppe {
436 u16 foe_check_time[MTK_PPE_ENTRIES];
437 struct hlist_head foe_flow[MTK_PPE_ENTRIES / 2];
438
439 + struct rhashtable l2_flows;
440 +
441 void *acct_table;
442 };
443
444 --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
445 +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
446 @@ -31,6 +31,8 @@ struct mtk_flow_data {
447 __be16 src_port;
448 __be16 dst_port;
449
450 + u16 vlan_in;
451 +
452 struct {
453 u16 id;
454 __be16 proto;
455 @@ -257,9 +259,45 @@ mtk_flow_offload_replace(struct mtk_eth
456 return -EOPNOTSUPP;
457 }
458
459 + switch (addr_type) {
460 + case 0:
461 + offload_type = MTK_PPE_PKT_TYPE_BRIDGE;
462 + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
463 + struct flow_match_eth_addrs match;
464 +
465 + flow_rule_match_eth_addrs(rule, &match);
466 + memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
467 + memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
468 + } else {
469 + return -EOPNOTSUPP;
470 + }
471 +
472 + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
473 + struct flow_match_vlan match;
474 +
475 + flow_rule_match_vlan(rule, &match);
476 +
477 + if (match.key->vlan_tpid != cpu_to_be16(ETH_P_8021Q))
478 + return -EOPNOTSUPP;
479 +
480 + data.vlan_in = match.key->vlan_id;
481 + }
482 + break;
483 + case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
484 + offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
485 + break;
486 + case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
487 + offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T;
488 + break;
489 + default:
490 + return -EOPNOTSUPP;
491 + }
492 +
493 flow_action_for_each(i, act, &rule->action) {
494 switch (act->id) {
495 case FLOW_ACTION_MANGLE:
496 + if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
497 + return -EOPNOTSUPP;
498 if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
499 mtk_flow_offload_mangle_eth(act, &data.eth);
500 break;
501 @@ -291,17 +329,6 @@ mtk_flow_offload_replace(struct mtk_eth
502 }
503 }
504
505 - switch (addr_type) {
506 - case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
507 - offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
508 - break;
509 - case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
510 - offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T;
511 - break;
512 - default:
513 - return -EOPNOTSUPP;
514 - }
515 -
516 if (!is_valid_ether_addr(data.eth.h_source) ||
517 !is_valid_ether_addr(data.eth.h_dest))
518 return -EINVAL;
519 @@ -315,10 +342,13 @@ mtk_flow_offload_replace(struct mtk_eth
520 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
521 struct flow_match_ports ports;
522
523 + if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
524 + return -EOPNOTSUPP;
525 +
526 flow_rule_match_ports(rule, &ports);
527 data.src_port = ports.key->src;
528 data.dst_port = ports.key->dst;
529 - } else {
530 + } else if (offload_type != MTK_PPE_PKT_TYPE_BRIDGE) {
531 return -EOPNOTSUPP;
532 }
533
534 @@ -348,6 +378,9 @@ mtk_flow_offload_replace(struct mtk_eth
535 if (act->id != FLOW_ACTION_MANGLE)
536 continue;
537
538 + if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
539 + return -EOPNOTSUPP;
540 +
541 switch (act->mangle.htype) {
542 case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
543 case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
544 @@ -373,6 +406,9 @@ mtk_flow_offload_replace(struct mtk_eth
545 return err;
546 }
547
548 + if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
549 + foe.bridge.vlan = data.vlan_in;
550 +
551 if (data.vlan.num == 1) {
552 if (data.vlan.proto != htons(ETH_P_8021Q))
553 return -EOPNOTSUPP;