22aceecc534c5968f7c4a7bc9f3e718d7b9817cb
[openwrt/staging/wigyori.git] /
1 From 5d0fad48d2dec175ecb999974b94203c577973ef Mon Sep 17 00:00:00 2001
2 From: Lorenzo Bianconi <lorenzo@kernel.org>
3 Date: Wed, 8 May 2024 11:43:34 +0100
4 Subject: [PATCH] net: ethernet: mediatek: split tx and rx fields in
5 mtk_soc_data struct
6
7 Split tx and rx fields in mtk_soc_data struct. This is a preliminary
8 patch to roll back to ADMAv1 for MT7986 and MT7981 SoC in order to fix a
9 hw hang if the device receives a corrupted packet when using ADMAv2.0.
10
11 Fixes: 197c9e9b17b1 ("net: ethernet: mtk_eth_soc: introduce support for mt7986 chipset")
12 Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
13 Signed-off-by: Daniel Golle <daniel@makrotopia.org>
14 Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
15 Link: https://lore.kernel.org/r/70a799b1f060ec2f57883e88ccb420ac0fb0abb5.1715164770.git.daniel@makrotopia.org
16 Signed-off-by: Jakub Kicinski <kuba@kernel.org>
17 ---
18 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 210 ++++++++++++--------
19 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 29 +--
20 2 files changed, 139 insertions(+), 100 deletions(-)
21
22 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
23 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
24 @@ -1138,7 +1138,7 @@ static int mtk_init_fq_dma(struct mtk_et
25 eth->scratch_ring = eth->sram_base;
26 else
27 eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
28 - cnt * soc->txrx.txd_size,
29 + cnt * soc->tx.desc_size,
30 &eth->phy_scratch_ring,
31 GFP_KERNEL);
32 if (unlikely(!eth->scratch_ring))
33 @@ -1154,16 +1154,16 @@ static int mtk_init_fq_dma(struct mtk_et
34 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
35 return -ENOMEM;
36
37 - phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
38 + phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
39
40 for (i = 0; i < cnt; i++) {
41 struct mtk_tx_dma_v2 *txd;
42
43 - txd = eth->scratch_ring + i * soc->txrx.txd_size;
44 + txd = eth->scratch_ring + i * soc->tx.desc_size;
45 txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
46 if (i < cnt - 1)
47 txd->txd2 = eth->phy_scratch_ring +
48 - (i + 1) * soc->txrx.txd_size;
49 + (i + 1) * soc->tx.desc_size;
50
51 txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
52 txd->txd4 = 0;
53 @@ -1412,7 +1412,7 @@ static int mtk_tx_map(struct sk_buff *sk
54 if (itxd == ring->last_free)
55 return -ENOMEM;
56
57 - itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
58 + itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
59 memset(itx_buf, 0, sizeof(*itx_buf));
60
61 txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
62 @@ -1453,7 +1453,7 @@ static int mtk_tx_map(struct sk_buff *sk
63
64 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
65 txd_info.size = min_t(unsigned int, frag_size,
66 - soc->txrx.dma_max_len);
67 + soc->tx.dma_max_len);
68 txd_info.qid = queue;
69 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
70 !(frag_size - txd_info.size);
71 @@ -1466,7 +1466,7 @@ static int mtk_tx_map(struct sk_buff *sk
72 mtk_tx_set_dma_desc(dev, txd, &txd_info);
73
74 tx_buf = mtk_desc_to_tx_buf(ring, txd,
75 - soc->txrx.txd_size);
76 + soc->tx.desc_size);
77 if (new_desc)
78 memset(tx_buf, 0, sizeof(*tx_buf));
79 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
80 @@ -1509,7 +1509,7 @@ static int mtk_tx_map(struct sk_buff *sk
81 } else {
82 int next_idx;
83
84 - next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
85 + next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_size),
86 ring->dma_size);
87 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
88 }
89 @@ -1518,7 +1518,7 @@ static int mtk_tx_map(struct sk_buff *sk
90
91 err_dma:
92 do {
93 - tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
94 + tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
95
96 /* unmap dma */
97 mtk_tx_unmap(eth, tx_buf, NULL, false);
98 @@ -1543,7 +1543,7 @@ static int mtk_cal_txd_req(struct mtk_et
99 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
100 frag = &skb_shinfo(skb)->frags[i];
101 nfrags += DIV_ROUND_UP(skb_frag_size(frag),
102 - eth->soc->txrx.dma_max_len);
103 + eth->soc->tx.dma_max_len);
104 }
105 } else {
106 nfrags += skb_shinfo(skb)->nr_frags;
107 @@ -1650,7 +1650,7 @@ static struct mtk_rx_ring *mtk_get_rx_ri
108
109 ring = &eth->rx_ring[i];
110 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
111 - rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
112 + rxd = ring->dma + idx * eth->soc->rx.desc_size;
113 if (rxd->rxd2 & RX_DMA_DONE) {
114 ring->calc_idx_update = true;
115 return ring;
116 @@ -1818,7 +1818,7 @@ static int mtk_xdp_submit_frame(struct m
117 }
118 htxd = txd;
119
120 - tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
121 + tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_size);
122 memset(tx_buf, 0, sizeof(*tx_buf));
123 htx_buf = tx_buf;
124
125 @@ -1837,7 +1837,7 @@ static int mtk_xdp_submit_frame(struct m
126 goto unmap;
127
128 tx_buf = mtk_desc_to_tx_buf(ring, txd,
129 - soc->txrx.txd_size);
130 + soc->tx.desc_size);
131 memset(tx_buf, 0, sizeof(*tx_buf));
132 n_desc++;
133 }
134 @@ -1875,7 +1875,7 @@ static int mtk_xdp_submit_frame(struct m
135 } else {
136 int idx;
137
138 - idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
139 + idx = txd_to_idx(ring, txd, soc->tx.desc_size);
140 mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
141 MT7628_TX_CTX_IDX0);
142 }
143 @@ -1886,7 +1886,7 @@ static int mtk_xdp_submit_frame(struct m
144
145 unmap:
146 while (htxd != txd) {
147 - tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
148 + tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_size);
149 mtk_tx_unmap(eth, tx_buf, NULL, false);
150
151 htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
152 @@ -2017,7 +2017,7 @@ static int mtk_poll_rx(struct napi_struc
153 goto rx_done;
154
155 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
156 - rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
157 + rxd = ring->dma + idx * eth->soc->rx.desc_size;
158 data = ring->data[idx];
159
160 if (!mtk_rx_get_desc(eth, &trxd, rxd))
161 @@ -2152,7 +2152,7 @@ static int mtk_poll_rx(struct napi_struc
162 rxdcsum = &trxd.rxd4;
163 }
164
165 - if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
166 + if (*rxdcsum & eth->soc->rx.dma_l4_valid)
167 skb->ip_summed = CHECKSUM_UNNECESSARY;
168 else
169 skb_checksum_none_assert(skb);
170 @@ -2276,7 +2276,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
171 break;
172
173 tx_buf = mtk_desc_to_tx_buf(ring, desc,
174 - eth->soc->txrx.txd_size);
175 + eth->soc->tx.desc_size);
176 if (!tx_buf->data)
177 break;
178
179 @@ -2327,7 +2327,7 @@ static int mtk_poll_tx_pdma(struct mtk_e
180 }
181 mtk_tx_unmap(eth, tx_buf, &bq, true);
182
183 - desc = ring->dma + cpu * eth->soc->txrx.txd_size;
184 + desc = ring->dma + cpu * eth->soc->tx.desc_size;
185 ring->last_free = desc;
186 atomic_inc(&ring->free_count);
187
188 @@ -2417,7 +2417,7 @@ static int mtk_napi_rx(struct napi_struc
189 do {
190 int rx_done;
191
192 - mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
193 + mtk_w32(eth, eth->soc->rx.irq_done_mask,
194 reg_map->pdma.irq_status);
195 rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
196 rx_done_total += rx_done;
197 @@ -2433,10 +2433,10 @@ static int mtk_napi_rx(struct napi_struc
198 return budget;
199
200 } while (mtk_r32(eth, reg_map->pdma.irq_status) &
201 - eth->soc->txrx.rx_irq_done_mask);
202 + eth->soc->rx.irq_done_mask);
203
204 if (napi_complete_done(napi, rx_done_total))
205 - mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
206 + mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
207
208 return rx_done_total;
209 }
210 @@ -2445,7 +2445,7 @@ static int mtk_tx_alloc(struct mtk_eth *
211 {
212 const struct mtk_soc_data *soc = eth->soc;
213 struct mtk_tx_ring *ring = &eth->tx_ring;
214 - int i, sz = soc->txrx.txd_size;
215 + int i, sz = soc->tx.desc_size;
216 struct mtk_tx_dma_v2 *txd;
217 int ring_size;
218 u32 ofs, val;
219 @@ -2568,14 +2568,14 @@ static void mtk_tx_clean(struct mtk_eth
220 }
221 if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
222 dma_free_coherent(eth->dma_dev,
223 - ring->dma_size * soc->txrx.txd_size,
224 + ring->dma_size * soc->tx.desc_size,
225 ring->dma, ring->phys);
226 ring->dma = NULL;
227 }
228
229 if (ring->dma_pdma) {
230 dma_free_coherent(eth->dma_dev,
231 - ring->dma_size * soc->txrx.txd_size,
232 + ring->dma_size * soc->tx.desc_size,
233 ring->dma_pdma, ring->phys_pdma);
234 ring->dma_pdma = NULL;
235 }
236 @@ -2630,15 +2630,15 @@ static int mtk_rx_alloc(struct mtk_eth *
237 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
238 rx_flag != MTK_RX_FLAGS_NORMAL) {
239 ring->dma = dma_alloc_coherent(eth->dma_dev,
240 - rx_dma_size * eth->soc->txrx.rxd_size,
241 - &ring->phys, GFP_KERNEL);
242 + rx_dma_size * eth->soc->rx.desc_size,
243 + &ring->phys, GFP_KERNEL);
244 } else {
245 struct mtk_tx_ring *tx_ring = &eth->tx_ring;
246
247 ring->dma = tx_ring->dma + tx_ring_size *
248 - eth->soc->txrx.txd_size * (ring_no + 1);
249 + eth->soc->tx.desc_size * (ring_no + 1);
250 ring->phys = tx_ring->phys + tx_ring_size *
251 - eth->soc->txrx.txd_size * (ring_no + 1);
252 + eth->soc->tx.desc_size * (ring_no + 1);
253 }
254
255 if (!ring->dma)
256 @@ -2649,7 +2649,7 @@ static int mtk_rx_alloc(struct mtk_eth *
257 dma_addr_t dma_addr;
258 void *data;
259
260 - rxd = ring->dma + i * eth->soc->txrx.rxd_size;
261 + rxd = ring->dma + i * eth->soc->rx.desc_size;
262 if (ring->page_pool) {
263 data = mtk_page_pool_get_buff(ring->page_pool,
264 &dma_addr, GFP_KERNEL);
265 @@ -2740,7 +2740,7 @@ static void mtk_rx_clean(struct mtk_eth
266 if (!ring->data[i])
267 continue;
268
269 - rxd = ring->dma + i * eth->soc->txrx.rxd_size;
270 + rxd = ring->dma + i * eth->soc->rx.desc_size;
271 if (!rxd->rxd1)
272 continue;
273
274 @@ -2757,7 +2757,7 @@ static void mtk_rx_clean(struct mtk_eth
275
276 if (!in_sram && ring->dma) {
277 dma_free_coherent(eth->dma_dev,
278 - ring->dma_size * eth->soc->txrx.rxd_size,
279 + ring->dma_size * eth->soc->rx.desc_size,
280 ring->dma, ring->phys);
281 ring->dma = NULL;
282 }
283 @@ -3120,7 +3120,7 @@ static void mtk_dma_free(struct mtk_eth
284 netdev_reset_queue(eth->netdev[i]);
285 if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
286 dma_free_coherent(eth->dma_dev,
287 - MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
288 + MTK_QDMA_RING_SIZE * soc->tx.desc_size,
289 eth->scratch_ring, eth->phy_scratch_ring);
290 eth->scratch_ring = NULL;
291 eth->phy_scratch_ring = 0;
292 @@ -3170,7 +3170,7 @@ static irqreturn_t mtk_handle_irq_rx(int
293
294 eth->rx_events++;
295 if (likely(napi_schedule_prep(&eth->rx_napi))) {
296 - mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
297 + mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
298 __napi_schedule(&eth->rx_napi);
299 }
300
301 @@ -3196,9 +3196,9 @@ static irqreturn_t mtk_handle_irq(int ir
302 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
303
304 if (mtk_r32(eth, reg_map->pdma.irq_mask) &
305 - eth->soc->txrx.rx_irq_done_mask) {
306 + eth->soc->rx.irq_done_mask) {
307 if (mtk_r32(eth, reg_map->pdma.irq_status) &
308 - eth->soc->txrx.rx_irq_done_mask)
309 + eth->soc->rx.irq_done_mask)
310 mtk_handle_irq_rx(irq, _eth);
311 }
312 if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
313 @@ -3216,10 +3216,10 @@ static void mtk_poll_controller(struct n
314 struct mtk_eth *eth = mac->hw;
315
316 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
317 - mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
318 + mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
319 mtk_handle_irq_rx(eth->irq[2], dev);
320 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
321 - mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
322 + mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
323 }
324 #endif
325
326 @@ -3383,7 +3383,7 @@ static int mtk_open(struct net_device *d
327 napi_enable(&eth->tx_napi);
328 napi_enable(&eth->rx_napi);
329 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
330 - mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
331 + mtk_rx_irq_enable(eth, soc->rx.irq_done_mask);
332 refcount_set(&eth->dma_refcnt, 1);
333 }
334 else
335 @@ -3467,7 +3467,7 @@ static int mtk_stop(struct net_device *d
336 mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
337
338 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
339 - mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
340 + mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
341 napi_disable(&eth->tx_napi);
342 napi_disable(&eth->rx_napi);
343
344 @@ -3943,9 +3943,9 @@ static int mtk_hw_init(struct mtk_eth *e
345
346 /* FE int grouping */
347 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
348 - mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
349 + mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.int_grp + 4);
350 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
351 - mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
352 + mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->qdma.int_grp + 4);
353 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
354
355 if (mtk_is_netsys_v3_or_greater(eth)) {
356 @@ -5037,11 +5037,15 @@ static const struct mtk_soc_data mt2701_
357 .required_clks = MT7623_CLKS_BITMAP,
358 .required_pctl = true,
359 .version = 1,
360 - .txrx = {
361 - .txd_size = sizeof(struct mtk_tx_dma),
362 - .rxd_size = sizeof(struct mtk_rx_dma),
363 - .rx_irq_done_mask = MTK_RX_DONE_INT,
364 - .rx_dma_l4_valid = RX_DMA_L4_VALID,
365 + .tx = {
366 + .desc_size = sizeof(struct mtk_tx_dma),
367 + .dma_max_len = MTK_TX_DMA_BUF_LEN,
368 + .dma_len_offset = 16,
369 + },
370 + .rx = {
371 + .desc_size = sizeof(struct mtk_rx_dma),
372 + .irq_done_mask = MTK_RX_DONE_INT,
373 + .dma_l4_valid = RX_DMA_L4_VALID,
374 .dma_max_len = MTK_TX_DMA_BUF_LEN,
375 .dma_len_offset = 16,
376 },
377 @@ -5057,11 +5061,15 @@ static const struct mtk_soc_data mt7621_
378 .offload_version = 1,
379 .hash_offset = 2,
380 .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
381 - .txrx = {
382 - .txd_size = sizeof(struct mtk_tx_dma),
383 - .rxd_size = sizeof(struct mtk_rx_dma),
384 - .rx_irq_done_mask = MTK_RX_DONE_INT,
385 - .rx_dma_l4_valid = RX_DMA_L4_VALID,
386 + .tx = {
387 + .desc_size = sizeof(struct mtk_tx_dma),
388 + .dma_max_len = MTK_TX_DMA_BUF_LEN,
389 + .dma_len_offset = 16,
390 + },
391 + .rx = {
392 + .desc_size = sizeof(struct mtk_rx_dma),
393 + .irq_done_mask = MTK_RX_DONE_INT,
394 + .dma_l4_valid = RX_DMA_L4_VALID,
395 .dma_max_len = MTK_TX_DMA_BUF_LEN,
396 .dma_len_offset = 16,
397 },
398 @@ -5079,11 +5087,15 @@ static const struct mtk_soc_data mt7622_
399 .hash_offset = 2,
400 .has_accounting = true,
401 .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
402 - .txrx = {
403 - .txd_size = sizeof(struct mtk_tx_dma),
404 - .rxd_size = sizeof(struct mtk_rx_dma),
405 - .rx_irq_done_mask = MTK_RX_DONE_INT,
406 - .rx_dma_l4_valid = RX_DMA_L4_VALID,
407 + .tx = {
408 + .desc_size = sizeof(struct mtk_tx_dma),
409 + .dma_max_len = MTK_TX_DMA_BUF_LEN,
410 + .dma_len_offset = 16,
411 + },
412 + .rx = {
413 + .desc_size = sizeof(struct mtk_rx_dma),
414 + .irq_done_mask = MTK_RX_DONE_INT,
415 + .dma_l4_valid = RX_DMA_L4_VALID,
416 .dma_max_len = MTK_TX_DMA_BUF_LEN,
417 .dma_len_offset = 16,
418 },
419 @@ -5100,11 +5112,15 @@ static const struct mtk_soc_data mt7623_
420 .hash_offset = 2,
421 .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
422 .disable_pll_modes = true,
423 - .txrx = {
424 - .txd_size = sizeof(struct mtk_tx_dma),
425 - .rxd_size = sizeof(struct mtk_rx_dma),
426 - .rx_irq_done_mask = MTK_RX_DONE_INT,
427 - .rx_dma_l4_valid = RX_DMA_L4_VALID,
428 + .tx = {
429 + .desc_size = sizeof(struct mtk_tx_dma),
430 + .dma_max_len = MTK_TX_DMA_BUF_LEN,
431 + .dma_len_offset = 16,
432 + },
433 + .rx = {
434 + .desc_size = sizeof(struct mtk_rx_dma),
435 + .irq_done_mask = MTK_RX_DONE_INT,
436 + .dma_l4_valid = RX_DMA_L4_VALID,
437 .dma_max_len = MTK_TX_DMA_BUF_LEN,
438 .dma_len_offset = 16,
439 },
440 @@ -5119,11 +5135,15 @@ static const struct mtk_soc_data mt7629_
441 .required_pctl = false,
442 .has_accounting = true,
443 .version = 1,
444 - .txrx = {
445 - .txd_size = sizeof(struct mtk_tx_dma),
446 - .rxd_size = sizeof(struct mtk_rx_dma),
447 - .rx_irq_done_mask = MTK_RX_DONE_INT,
448 - .rx_dma_l4_valid = RX_DMA_L4_VALID,
449 + .tx = {
450 + .desc_size = sizeof(struct mtk_tx_dma),
451 + .dma_max_len = MTK_TX_DMA_BUF_LEN,
452 + .dma_len_offset = 16,
453 + },
454 + .rx = {
455 + .desc_size = sizeof(struct mtk_rx_dma),
456 + .irq_done_mask = MTK_RX_DONE_INT,
457 + .dma_l4_valid = RX_DMA_L4_VALID,
458 .dma_max_len = MTK_TX_DMA_BUF_LEN,
459 .dma_len_offset = 16,
460 },
461 @@ -5141,11 +5161,15 @@ static const struct mtk_soc_data mt7981_
462 .hash_offset = 4,
463 .has_accounting = true,
464 .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
465 - .txrx = {
466 - .txd_size = sizeof(struct mtk_tx_dma_v2),
467 - .rxd_size = sizeof(struct mtk_rx_dma_v2),
468 - .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
469 - .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
470 + .tx = {
471 + .desc_size = sizeof(struct mtk_tx_dma_v2),
472 + .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
473 + .dma_len_offset = 8,
474 + },
475 + .rx = {
476 + .desc_size = sizeof(struct mtk_rx_dma_v2),
477 + .irq_done_mask = MTK_RX_DONE_INT_V2,
478 + .dma_l4_valid = RX_DMA_L4_VALID_V2,
479 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
480 .dma_len_offset = 8,
481 },
482 @@ -5163,11 +5187,15 @@ static const struct mtk_soc_data mt7986_
483 .hash_offset = 4,
484 .has_accounting = true,
485 .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
486 - .txrx = {
487 - .txd_size = sizeof(struct mtk_tx_dma_v2),
488 - .rxd_size = sizeof(struct mtk_rx_dma_v2),
489 - .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
490 - .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
491 + .tx = {
492 + .desc_size = sizeof(struct mtk_tx_dma_v2),
493 + .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
494 + .dma_len_offset = 8,
495 + },
496 + .rx = {
497 + .desc_size = sizeof(struct mtk_rx_dma_v2),
498 + .irq_done_mask = MTK_RX_DONE_INT_V2,
499 + .dma_l4_valid = RX_DMA_L4_VALID_V2,
500 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
501 .dma_len_offset = 8,
502 },
503 @@ -5185,11 +5213,15 @@ static const struct mtk_soc_data mt7988_
504 .hash_offset = 4,
505 .has_accounting = true,
506 .foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
507 - .txrx = {
508 - .txd_size = sizeof(struct mtk_tx_dma_v2),
509 - .rxd_size = sizeof(struct mtk_rx_dma_v2),
510 - .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
511 - .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
512 + .tx = {
513 + .desc_size = sizeof(struct mtk_tx_dma_v2),
514 + .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
515 + .dma_len_offset = 8,
516 + },
517 + .rx = {
518 + .desc_size = sizeof(struct mtk_rx_dma_v2),
519 + .irq_done_mask = MTK_RX_DONE_INT_V2,
520 + .dma_l4_valid = RX_DMA_L4_VALID_V2,
521 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
522 .dma_len_offset = 8,
523 },
524 @@ -5202,11 +5234,15 @@ static const struct mtk_soc_data rt5350_
525 .required_clks = MT7628_CLKS_BITMAP,
526 .required_pctl = false,
527 .version = 1,
528 - .txrx = {
529 - .txd_size = sizeof(struct mtk_tx_dma),
530 - .rxd_size = sizeof(struct mtk_rx_dma),
531 - .rx_irq_done_mask = MTK_RX_DONE_INT,
532 - .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
533 + .tx = {
534 + .desc_size = sizeof(struct mtk_tx_dma),
535 + .dma_max_len = MTK_TX_DMA_BUF_LEN,
536 + .dma_len_offset = 16,
537 + },
538 + .rx = {
539 + .desc_size = sizeof(struct mtk_rx_dma),
540 + .irq_done_mask = MTK_RX_DONE_INT,
541 + .dma_l4_valid = RX_DMA_L4_VALID_PDMA,
542 .dma_max_len = MTK_TX_DMA_BUF_LEN,
543 .dma_len_offset = 16,
544 },
545 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
546 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
547 @@ -327,8 +327,8 @@
548 /* QDMA descriptor txd3 */
549 #define TX_DMA_OWNER_CPU BIT(31)
550 #define TX_DMA_LS0 BIT(30)
551 -#define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
552 -#define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len)
553 +#define TX_DMA_PLEN0(x) (((x) & eth->soc->tx.dma_max_len) << eth->soc->tx.dma_len_offset)
554 +#define TX_DMA_PLEN1(x) ((x) & eth->soc->tx.dma_max_len)
555 #define TX_DMA_SWC BIT(14)
556 #define TX_DMA_PQID GENMASK(3, 0)
557 #define TX_DMA_ADDR64_MASK GENMASK(3, 0)
558 @@ -348,8 +348,8 @@
559 /* QDMA descriptor rxd2 */
560 #define RX_DMA_DONE BIT(31)
561 #define RX_DMA_LSO BIT(30)
562 -#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
563 -#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len)
564 +#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->rx.dma_max_len) << eth->soc->rx.dma_len_offset)
565 +#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->rx.dma_len_offset) & eth->soc->rx.dma_max_len)
566 #define RX_DMA_VTAG BIT(15)
567 #define RX_DMA_ADDR64_MASK GENMASK(3, 0)
568 #if IS_ENABLED(CONFIG_64BIT)
569 @@ -1153,10 +1153,9 @@ struct mtk_reg_map {
570 * @foe_entry_size Foe table entry size.
571 * @has_accounting Bool indicating support for accounting of
572 * offloaded flows.
573 - * @txd_size Tx DMA descriptor size.
574 - * @rxd_size Rx DMA descriptor size.
575 - * @rx_irq_done_mask Rx irq done register mask.
576 - * @rx_dma_l4_valid Rx DMA valid register mask.
577 + * @desc_size Tx/Rx DMA descriptor size.
578 + * @irq_done_mask Rx irq done register mask.
579 + * @dma_l4_valid Rx DMA valid register mask.
580 * @dma_max_len Max DMA tx/rx buffer length.
581 * @dma_len_offset Tx/Rx DMA length field offset.
582 */
583 @@ -1174,13 +1173,17 @@ struct mtk_soc_data {
584 bool has_accounting;
585 bool disable_pll_modes;
586 struct {
587 - u32 txd_size;
588 - u32 rxd_size;
589 - u32 rx_irq_done_mask;
590 - u32 rx_dma_l4_valid;
591 + u32 desc_size;
592 u32 dma_max_len;
593 u32 dma_len_offset;
594 - } txrx;
595 + } tx;
596 + struct {
597 + u32 desc_size;
598 + u32 irq_done_mask;
599 + u32 dma_l4_valid;
600 + u32 dma_max_len;
601 + u32 dma_len_offset;
602 + } rx;
603 };
604
605 #define MTK_DMA_MONITOR_TIMEOUT msecs_to_jiffies(1000)