07e7e863406fea97d1ee287c3c92214aa4c839cb
[openwrt/staging/xback.git] /
1 From dee4dd10c79aaca192b73520d8fb64628468ae0f Mon Sep 17 00:00:00 2001
2 From: Elad Yifee <eladwf@gmail.com>
3 Date: Fri, 7 Jun 2024 11:21:50 +0300
4 Subject: [PATCH] net: ethernet: mtk_eth_soc: ppe: add support for multiple
5 PPEs
6
7 Add the missing pieces to allow multiple PPEs units, one for each GMAC.
8 mtk_gdm_config has been modified to work on targted mac ID,
9 the inner loop moved outside of the function to allow unrelated
10 operations like setting the MAC's PPE index.
11 Introduce a sanity check in flow_offload_replace to account for
12 non-MTK ingress devices.
13 Additional field 'ppe_idx' was added to struct mtk_mac in order
14 to keep track on the assigned PPE unit.
15
16 Signed-off-by: Elad Yifee <eladwf@gmail.com>
17 Link: https://lore.kernel.org/r/20240607082155.20021-1-eladwf@gmail.com
18 Signed-off-by: Jakub Kicinski <kuba@kernel.org>
19 ---
20 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 112 +++++++++++-------
21 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 8 +-
22 .../net/ethernet/mediatek/mtk_ppe_offload.c | 17 ++-
23 3 files changed, 92 insertions(+), 45 deletions(-)
24
25 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
26 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
27 @@ -80,7 +80,9 @@ static const struct mtk_reg_map mtk_reg_
28 .fq_blen = 0x1b2c,
29 },
30 .gdm1_cnt = 0x2400,
31 - .gdma_to_ppe = 0x4444,
32 + .gdma_to_ppe = {
33 + [0] = 0x4444,
34 + },
35 .ppe_base = 0x0c00,
36 .wdma_base = {
37 [0] = 0x2800,
38 @@ -144,7 +146,10 @@ static const struct mtk_reg_map mt7986_r
39 .tx_sch_rate = 0x4798,
40 },
41 .gdm1_cnt = 0x1c00,
42 - .gdma_to_ppe = 0x3333,
43 + .gdma_to_ppe = {
44 + [0] = 0x3333,
45 + [1] = 0x4444,
46 + },
47 .ppe_base = 0x2000,
48 .wdma_base = {
49 [0] = 0x4800,
50 @@ -192,7 +197,11 @@ static const struct mtk_reg_map mt7988_r
51 .tx_sch_rate = 0x4798,
52 },
53 .gdm1_cnt = 0x1c00,
54 - .gdma_to_ppe = 0x3333,
55 + .gdma_to_ppe = {
56 + [0] = 0x3333,
57 + [1] = 0x4444,
58 + [2] = 0xcccc,
59 + },
60 .ppe_base = 0x2000,
61 .wdma_base = {
62 [0] = 0x4800,
63 @@ -2015,6 +2024,7 @@ static int mtk_poll_rx(struct napi_struc
64 struct mtk_rx_dma_v2 *rxd, trxd;
65 int done = 0, bytes = 0;
66 dma_addr_t dma_addr = DMA_MAPPING_ERROR;
67 + int ppe_idx = 0;
68
69 while (done < budget) {
70 unsigned int pktlen, *rxdcsum;
71 @@ -2058,6 +2068,7 @@ static int mtk_poll_rx(struct napi_struc
72 goto release_desc;
73
74 netdev = eth->netdev[mac];
75 + ppe_idx = eth->mac[mac]->ppe_idx;
76
77 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
78 goto release_desc;
79 @@ -2181,7 +2192,7 @@ static int mtk_poll_rx(struct napi_struc
80 }
81
82 if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
83 - mtk_ppe_check_skb(eth->ppe[0], skb, hash);
84 + mtk_ppe_check_skb(eth->ppe[ppe_idx], skb, hash);
85
86 skb_record_rx_queue(skb, 0);
87 napi_gro_receive(napi, skb);
88 @@ -3276,37 +3287,27 @@ static int mtk_start_dma(struct mtk_eth
89 return 0;
90 }
91
92 -static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
93 +static void mtk_gdm_config(struct mtk_eth *eth, u32 id, u32 config)
94 {
95 - int i;
96 + u32 val;
97
98 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
99 return;
100
101 - for (i = 0; i < MTK_MAX_DEVS; i++) {
102 - u32 val;
103 -
104 - if (!eth->netdev[i])
105 - continue;
106 + val = mtk_r32(eth, MTK_GDMA_FWD_CFG(id));
107
108 - val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
109 + /* default setup the forward port to send frame to PDMA */
110 + val &= ~0xffff;
111
112 - /* default setup the forward port to send frame to PDMA */
113 - val &= ~0xffff;
114 + /* Enable RX checksum */
115 + val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
116
117 - /* Enable RX checksum */
118 - val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
119 + val |= config;
120
121 - val |= config;
122 + if (eth->netdev[id] && netdev_uses_dsa(eth->netdev[id]))
123 + val |= MTK_GDMA_SPECIAL_TAG;
124
125 - if (netdev_uses_dsa(eth->netdev[i]))
126 - val |= MTK_GDMA_SPECIAL_TAG;
127 -
128 - mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
129 - }
130 - /* Reset and enable PSE */
131 - mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
132 - mtk_w32(eth, 0, MTK_RST_GL);
133 + mtk_w32(eth, val, MTK_GDMA_FWD_CFG(id));
134 }
135
136
137 @@ -3366,7 +3367,10 @@ static int mtk_open(struct net_device *d
138 {
139 struct mtk_mac *mac = netdev_priv(dev);
140 struct mtk_eth *eth = mac->hw;
141 - int i, err;
142 + struct mtk_mac *target_mac;
143 + int i, err, ppe_num;
144 +
145 + ppe_num = eth->soc->ppe_num;
146
147 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
148 if (err) {
149 @@ -3390,18 +3394,38 @@ static int mtk_open(struct net_device *d
150 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
151 mtk_ppe_start(eth->ppe[i]);
152
153 - gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe
154 - : MTK_GDMA_TO_PDMA;
155 - mtk_gdm_config(eth, gdm_config);
156 + for (i = 0; i < MTK_MAX_DEVS; i++) {
157 + if (!eth->netdev[i])
158 + break;
159 +
160 + target_mac = netdev_priv(eth->netdev[i]);
161 + if (!soc->offload_version) {
162 + target_mac->ppe_idx = 0;
163 + gdm_config = MTK_GDMA_TO_PDMA;
164 + } else if (ppe_num >= 3 && target_mac->id == 2) {
165 + target_mac->ppe_idx = 2;
166 + gdm_config = soc->reg_map->gdma_to_ppe[2];
167 + } else if (ppe_num >= 2 && target_mac->id == 1) {
168 + target_mac->ppe_idx = 1;
169 + gdm_config = soc->reg_map->gdma_to_ppe[1];
170 + } else {
171 + target_mac->ppe_idx = 0;
172 + gdm_config = soc->reg_map->gdma_to_ppe[0];
173 + }
174 + mtk_gdm_config(eth, target_mac->id, gdm_config);
175 + }
176 + /* Reset and enable PSE */
177 + mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
178 + mtk_w32(eth, 0, MTK_RST_GL);
179
180 napi_enable(&eth->tx_napi);
181 napi_enable(&eth->rx_napi);
182 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
183 mtk_rx_irq_enable(eth, soc->rx.irq_done_mask);
184 refcount_set(&eth->dma_refcnt, 1);
185 - }
186 - else
187 + } else {
188 refcount_inc(&eth->dma_refcnt);
189 + }
190
191 phylink_start(mac->phylink);
192 netif_tx_start_all_queues(dev);
193 @@ -3478,7 +3502,8 @@ static int mtk_stop(struct net_device *d
194 if (!refcount_dec_and_test(&eth->dma_refcnt))
195 return 0;
196
197 - mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
198 + for (i = 0; i < MTK_MAX_DEVS; i++)
199 + mtk_gdm_config(eth, i, MTK_GDMA_DROP_ALL);
200
201 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
202 mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
203 @@ -4957,23 +4982,24 @@ static int mtk_probe(struct platform_dev
204 }
205
206 if (eth->soc->offload_version) {
207 - u32 num_ppe = mtk_is_netsys_v2_or_greater(eth) ? 2 : 1;
208 + u8 ppe_num = eth->soc->ppe_num;
209
210 - num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
211 - for (i = 0; i < num_ppe; i++) {
212 - u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
213 + ppe_num = min_t(u8, ARRAY_SIZE(eth->ppe), ppe_num);
214 + for (i = 0; i < ppe_num; i++) {
215 + u32 ppe_addr = eth->soc->reg_map->ppe_base;
216
217 + ppe_addr += (i == 2 ? 0xc00 : i * 0x400);
218 eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, i);
219
220 if (!eth->ppe[i]) {
221 err = -ENOMEM;
222 goto err_deinit_ppe;
223 }
224 - }
225 + err = mtk_eth_offload_init(eth, i);
226
227 - err = mtk_eth_offload_init(eth);
228 - if (err)
229 - goto err_deinit_ppe;
230 + if (err)
231 + goto err_deinit_ppe;
232 + }
233 }
234
235 for (i = 0; i < MTK_MAX_DEVS; i++) {
236 @@ -5076,6 +5102,7 @@ static const struct mtk_soc_data mt7621_
237 .required_pctl = false,
238 .version = 1,
239 .offload_version = 1,
240 + .ppe_num = 1,
241 .hash_offset = 2,
242 .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
243 .tx = {
244 @@ -5104,6 +5131,7 @@ static const struct mtk_soc_data mt7622_
245 .required_pctl = false,
246 .version = 1,
247 .offload_version = 2,
248 + .ppe_num = 1,
249 .hash_offset = 2,
250 .has_accounting = true,
251 .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
252 @@ -5132,6 +5160,7 @@ static const struct mtk_soc_data mt7623_
253 .required_pctl = true,
254 .version = 1,
255 .offload_version = 1,
256 + .ppe_num = 1,
257 .hash_offset = 2,
258 .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
259 .disable_pll_modes = true,
260 @@ -5187,6 +5216,7 @@ static const struct mtk_soc_data mt7981_
261 .required_pctl = false,
262 .version = 2,
263 .offload_version = 2,
264 + .ppe_num = 2,
265 .hash_offset = 4,
266 .has_accounting = true,
267 .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
268 @@ -5216,6 +5246,7 @@ static const struct mtk_soc_data mt7986_
269 .required_pctl = false,
270 .version = 2,
271 .offload_version = 2,
272 + .ppe_num = 2,
273 .hash_offset = 4,
274 .has_accounting = true,
275 .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
276 @@ -5245,6 +5276,7 @@ static const struct mtk_soc_data mt7988_
277 .required_pctl = false,
278 .version = 3,
279 .offload_version = 2,
280 + .ppe_num = 3,
281 .hash_offset = 4,
282 .has_accounting = true,
283 .foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
284 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
285 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
286 @@ -1132,7 +1132,7 @@ struct mtk_reg_map {
287 u32 tx_sch_rate; /* tx scheduler rate control registers */
288 } qdma;
289 u32 gdm1_cnt;
290 - u32 gdma_to_ppe;
291 + u32 gdma_to_ppe[3];
292 u32 ppe_base;
293 u32 wdma_base[3];
294 u32 pse_iq_sta;
295 @@ -1170,6 +1170,7 @@ struct mtk_soc_data {
296 u8 offload_version;
297 u8 hash_offset;
298 u8 version;
299 + u8 ppe_num;
300 u16 foe_entry_size;
301 netdev_features_t hw_features;
302 bool has_accounting;
303 @@ -1294,7 +1295,7 @@ struct mtk_eth {
304
305 struct metadata_dst *dsa_meta[MTK_MAX_DSA_PORTS];
306
307 - struct mtk_ppe *ppe[2];
308 + struct mtk_ppe *ppe[3];
309 struct rhashtable flow_table;
310
311 struct bpf_prog __rcu *prog;
312 @@ -1319,6 +1320,7 @@ struct mtk_eth {
313 struct mtk_mac {
314 int id;
315 phy_interface_t interface;
316 + u8 ppe_idx;
317 int speed;
318 struct device_node *of_node;
319 struct phylink *phylink;
320 @@ -1440,7 +1442,7 @@ int mtk_gmac_sgmii_path_setup(struct mtk
321 int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id);
322 int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
323
324 -int mtk_eth_offload_init(struct mtk_eth *eth);
325 +int mtk_eth_offload_init(struct mtk_eth *eth, u8 id);
326 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
327 void *type_data);
328 int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls,
329 --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
330 +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
331 @@ -245,10 +245,10 @@ mtk_flow_offload_replace(struct mtk_eth
332 int ppe_index)
333 {
334 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
335 + struct net_device *idev = NULL, *odev = NULL;
336 struct flow_action_entry *act;
337 struct mtk_flow_data data = {};
338 struct mtk_foe_entry foe;
339 - struct net_device *odev = NULL;
340 struct mtk_flow_entry *entry;
341 int offload_type = 0;
342 int wed_index = -1;
343 @@ -264,6 +264,17 @@ mtk_flow_offload_replace(struct mtk_eth
344 struct flow_match_meta match;
345
346 flow_rule_match_meta(rule, &match);
347 + if (mtk_is_netsys_v2_or_greater(eth)) {
348 + idev = __dev_get_by_index(&init_net, match.key->ingress_ifindex);
349 + if (idev) {
350 + struct mtk_mac *mac = netdev_priv(idev);
351 +
352 + if (WARN_ON(mac->ppe_idx >= eth->soc->ppe_num))
353 + return -EINVAL;
354 +
355 + ppe_index = mac->ppe_idx;
356 + }
357 + }
358 } else {
359 return -EOPNOTSUPP;
360 }
361 @@ -630,7 +641,9 @@ int mtk_eth_setup_tc(struct net_device *
362 }
363 }
364
365 -int mtk_eth_offload_init(struct mtk_eth *eth)
366 +int mtk_eth_offload_init(struct mtk_eth *eth, u8 id)
367 {
368 + if (!eth->ppe[id] || !eth->ppe[id]->foe_table)
369 + return 0;
370 return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
371 }