1 From dee4dd10c79aaca192b73520d8fb64628468ae0f Mon Sep 17 00:00:00 2001
2 From: Elad Yifee <eladwf@gmail.com>
3 Date: Fri, 7 Jun 2024 11:21:50 +0300
4 Subject: [PATCH] net: ethernet: mtk_eth_soc: ppe: add support for multiple
7 Add the missing pieces to allow multiple PPEs units, one for each GMAC.
8 mtk_gdm_config has been modified to work on targted mac ID,
9 the inner loop moved outside of the function to allow unrelated
10 operations like setting the MAC's PPE index.
11 Introduce a sanity check in flow_offload_replace to account for
12 non-MTK ingress devices.
13 Additional field 'ppe_idx' was added to struct mtk_mac in order
14 to keep track on the assigned PPE unit.
16 Signed-off-by: Elad Yifee <eladwf@gmail.com>
17 Link: https://lore.kernel.org/r/20240607082155.20021-1-eladwf@gmail.com
18 Signed-off-by: Jakub Kicinski <kuba@kernel.org>
20 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 112 +++++++++++-------
21 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 8 +-
22 .../net/ethernet/mediatek/mtk_ppe_offload.c | 17 ++-
23 3 files changed, 92 insertions(+), 45 deletions(-)
25 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
26 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
27 @@ -80,7 +80,9 @@ static const struct mtk_reg_map mtk_reg_
31 - .gdma_to_ppe = 0x4444,
38 @@ -144,7 +146,10 @@ static const struct mtk_reg_map mt7986_r
39 .tx_sch_rate = 0x4798,
42 - .gdma_to_ppe = 0x3333,
50 @@ -192,7 +197,11 @@ static const struct mtk_reg_map mt7988_r
51 .tx_sch_rate = 0x4798,
54 - .gdma_to_ppe = 0x3333,
63 @@ -2015,6 +2024,7 @@ static int mtk_poll_rx(struct napi_struc
64 struct mtk_rx_dma_v2 *rxd, trxd;
65 int done = 0, bytes = 0;
66 dma_addr_t dma_addr = DMA_MAPPING_ERROR;
69 while (done < budget) {
70 unsigned int pktlen, *rxdcsum;
71 @@ -2058,6 +2068,7 @@ static int mtk_poll_rx(struct napi_struc
74 netdev = eth->netdev[mac];
75 + ppe_idx = eth->mac[mac]->ppe_idx;
77 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
79 @@ -2181,7 +2192,7 @@ static int mtk_poll_rx(struct napi_struc
82 if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
83 - mtk_ppe_check_skb(eth->ppe[0], skb, hash);
84 + mtk_ppe_check_skb(eth->ppe[ppe_idx], skb, hash);
86 skb_record_rx_queue(skb, 0);
87 napi_gro_receive(napi, skb);
88 @@ -3276,37 +3287,27 @@ static int mtk_start_dma(struct mtk_eth
92 -static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
93 +static void mtk_gdm_config(struct mtk_eth *eth, u32 id, u32 config)
98 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
101 - for (i = 0; i < MTK_MAX_DEVS; i++) {
104 - if (!eth->netdev[i])
106 + val = mtk_r32(eth, MTK_GDMA_FWD_CFG(id));
108 - val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
109 + /* default setup the forward port to send frame to PDMA */
112 - /* default setup the forward port to send frame to PDMA */
114 + /* Enable RX checksum */
115 + val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
117 - /* Enable RX checksum */
118 - val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
122 + if (eth->netdev[id] && netdev_uses_dsa(eth->netdev[id]))
123 + val |= MTK_GDMA_SPECIAL_TAG;
125 - if (netdev_uses_dsa(eth->netdev[i]))
126 - val |= MTK_GDMA_SPECIAL_TAG;
128 - mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
130 - /* Reset and enable PSE */
131 - mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
132 - mtk_w32(eth, 0, MTK_RST_GL);
133 + mtk_w32(eth, val, MTK_GDMA_FWD_CFG(id));
137 @@ -3366,7 +3367,10 @@ static int mtk_open(struct net_device *d
139 struct mtk_mac *mac = netdev_priv(dev);
140 struct mtk_eth *eth = mac->hw;
142 + struct mtk_mac *target_mac;
143 + int i, err, ppe_num;
145 + ppe_num = eth->soc->ppe_num;
147 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
149 @@ -3390,18 +3394,38 @@ static int mtk_open(struct net_device *d
150 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
151 mtk_ppe_start(eth->ppe[i]);
153 - gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe
154 - : MTK_GDMA_TO_PDMA;
155 - mtk_gdm_config(eth, gdm_config);
156 + for (i = 0; i < MTK_MAX_DEVS; i++) {
157 + if (!eth->netdev[i])
160 + target_mac = netdev_priv(eth->netdev[i]);
161 + if (!soc->offload_version) {
162 + target_mac->ppe_idx = 0;
163 + gdm_config = MTK_GDMA_TO_PDMA;
164 + } else if (ppe_num >= 3 && target_mac->id == 2) {
165 + target_mac->ppe_idx = 2;
166 + gdm_config = soc->reg_map->gdma_to_ppe[2];
167 + } else if (ppe_num >= 2 && target_mac->id == 1) {
168 + target_mac->ppe_idx = 1;
169 + gdm_config = soc->reg_map->gdma_to_ppe[1];
171 + target_mac->ppe_idx = 0;
172 + gdm_config = soc->reg_map->gdma_to_ppe[0];
174 + mtk_gdm_config(eth, target_mac->id, gdm_config);
176 + /* Reset and enable PSE */
177 + mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
178 + mtk_w32(eth, 0, MTK_RST_GL);
180 napi_enable(ð->tx_napi);
181 napi_enable(ð->rx_napi);
182 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
183 mtk_rx_irq_enable(eth, soc->rx.irq_done_mask);
184 refcount_set(ð->dma_refcnt, 1);
188 refcount_inc(ð->dma_refcnt);
191 phylink_start(mac->phylink);
192 netif_tx_start_all_queues(dev);
193 @@ -3478,7 +3502,8 @@ static int mtk_stop(struct net_device *d
194 if (!refcount_dec_and_test(ð->dma_refcnt))
197 - mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
198 + for (i = 0; i < MTK_MAX_DEVS; i++)
199 + mtk_gdm_config(eth, i, MTK_GDMA_DROP_ALL);
201 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
202 mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
203 @@ -4957,23 +4982,24 @@ static int mtk_probe(struct platform_dev
206 if (eth->soc->offload_version) {
207 - u32 num_ppe = mtk_is_netsys_v2_or_greater(eth) ? 2 : 1;
208 + u8 ppe_num = eth->soc->ppe_num;
210 - num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
211 - for (i = 0; i < num_ppe; i++) {
212 - u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
213 + ppe_num = min_t(u8, ARRAY_SIZE(eth->ppe), ppe_num);
214 + for (i = 0; i < ppe_num; i++) {
215 + u32 ppe_addr = eth->soc->reg_map->ppe_base;
217 + ppe_addr += (i == 2 ? 0xc00 : i * 0x400);
218 eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, i);
225 + err = mtk_eth_offload_init(eth, i);
227 - err = mtk_eth_offload_init(eth);
229 - goto err_deinit_ppe;
231 + goto err_deinit_ppe;
235 for (i = 0; i < MTK_MAX_DEVS; i++) {
236 @@ -5076,6 +5102,7 @@ static const struct mtk_soc_data mt7621_
237 .required_pctl = false,
239 .offload_version = 1,
242 .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
244 @@ -5104,6 +5131,7 @@ static const struct mtk_soc_data mt7622_
245 .required_pctl = false,
247 .offload_version = 2,
250 .has_accounting = true,
251 .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
252 @@ -5132,6 +5160,7 @@ static const struct mtk_soc_data mt7623_
253 .required_pctl = true,
255 .offload_version = 1,
258 .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
259 .disable_pll_modes = true,
260 @@ -5187,6 +5216,7 @@ static const struct mtk_soc_data mt7981_
261 .required_pctl = false,
263 .offload_version = 2,
266 .has_accounting = true,
267 .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
268 @@ -5216,6 +5246,7 @@ static const struct mtk_soc_data mt7986_
269 .required_pctl = false,
271 .offload_version = 2,
274 .has_accounting = true,
275 .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
276 @@ -5245,6 +5276,7 @@ static const struct mtk_soc_data mt7988_
277 .required_pctl = false,
279 .offload_version = 2,
282 .has_accounting = true,
283 .foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
284 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
285 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
286 @@ -1132,7 +1132,7 @@ struct mtk_reg_map {
287 u32 tx_sch_rate; /* tx scheduler rate control registers */
291 + u32 gdma_to_ppe[3];
295 @@ -1170,6 +1170,7 @@ struct mtk_soc_data {
301 netdev_features_t hw_features;
303 @@ -1294,7 +1295,7 @@ struct mtk_eth {
305 struct metadata_dst *dsa_meta[MTK_MAX_DSA_PORTS];
307 - struct mtk_ppe *ppe[2];
308 + struct mtk_ppe *ppe[3];
309 struct rhashtable flow_table;
311 struct bpf_prog __rcu *prog;
312 @@ -1319,6 +1320,7 @@ struct mtk_eth {
315 phy_interface_t interface;
318 struct device_node *of_node;
319 struct phylink *phylink;
320 @@ -1440,7 +1442,7 @@ int mtk_gmac_sgmii_path_setup(struct mtk
321 int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id);
322 int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
324 -int mtk_eth_offload_init(struct mtk_eth *eth);
325 +int mtk_eth_offload_init(struct mtk_eth *eth, u8 id);
326 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
328 int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls,
329 --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
330 +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
331 @@ -245,10 +245,10 @@ mtk_flow_offload_replace(struct mtk_eth
334 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
335 + struct net_device *idev = NULL, *odev = NULL;
336 struct flow_action_entry *act;
337 struct mtk_flow_data data = {};
338 struct mtk_foe_entry foe;
339 - struct net_device *odev = NULL;
340 struct mtk_flow_entry *entry;
341 int offload_type = 0;
343 @@ -264,6 +264,17 @@ mtk_flow_offload_replace(struct mtk_eth
344 struct flow_match_meta match;
346 flow_rule_match_meta(rule, &match);
347 + if (mtk_is_netsys_v2_or_greater(eth)) {
348 + idev = __dev_get_by_index(&init_net, match.key->ingress_ifindex);
350 + struct mtk_mac *mac = netdev_priv(idev);
352 + if (WARN_ON(mac->ppe_idx >= eth->soc->ppe_num))
355 + ppe_index = mac->ppe_idx;
361 @@ -630,7 +641,9 @@ int mtk_eth_setup_tc(struct net_device *
365 -int mtk_eth_offload_init(struct mtk_eth *eth)
366 +int mtk_eth_offload_init(struct mtk_eth *eth, u8 id)
368 + if (!eth->ppe[id] || !eth->ppe[id]->foe_table)
370 return rhashtable_init(ð->flow_table, &mtk_flow_ht_params);