d1f19d6a4543374d7e4e06acf22a041a2944a2e1
[openwrt/staging/wigyori.git] /
1 From: Lorenzo Bianconi <lorenzo@kernel.org>
2 Date: Fri, 20 May 2022 20:11:36 +0200
3 Subject: [PATCH] net: ethernet: mtk_eth_soc: introduce MTK_NETSYS_V2 support
4
5 Introduce MTK_NETSYS_V2 support. MTK_NETSYS_V2 defines 32B TX/RX DMA
6 descriptors.
7 This is a preliminary patch to add mt7986 ethernet support.
8
9 Tested-by: Sam Shih <sam.shih@mediatek.com>
10 Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
11 Signed-off-by: David S. Miller <davem@davemloft.net>
12 ---
13
14 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
15 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
16 @@ -862,8 +862,8 @@ static inline int mtk_max_buf_size(int f
17 return buf_size;
18 }
19
20 -static inline bool mtk_rx_get_desc(struct mtk_rx_dma *rxd,
21 - struct mtk_rx_dma *dma_rxd)
22 +static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
23 + struct mtk_rx_dma_v2 *dma_rxd)
24 {
25 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
26 if (!(rxd->rxd2 & RX_DMA_DONE))
27 @@ -872,6 +872,10 @@ static inline bool mtk_rx_get_desc(struc
28 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
29 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
30 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
31 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
32 + rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
33 + rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
34 + }
35
36 return true;
37 }
38 @@ -916,7 +920,7 @@ static int mtk_init_fq_dma(struct mtk_et
39 phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
40
41 for (i = 0; i < cnt; i++) {
42 - struct mtk_tx_dma *txd;
43 + struct mtk_tx_dma_v2 *txd;
44
45 txd = (void *)eth->scratch_ring + i * soc->txrx.txd_size;
46 txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
47 @@ -926,6 +930,12 @@ static int mtk_init_fq_dma(struct mtk_et
48
49 txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
50 txd->txd4 = 0;
51 + if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
52 + txd->txd5 = 0;
53 + txd->txd6 = 0;
54 + txd->txd7 = 0;
55 + txd->txd8 = 0;
56 + }
57 }
58
59 mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
60 @@ -1029,10 +1039,12 @@ static void setup_tx_buf(struct mtk_eth
61 }
62 }
63
64 -static void mtk_tx_set_dma_desc(struct net_device *dev, struct mtk_tx_dma *desc,
65 - struct mtk_tx_dma_desc_info *info)
66 +static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
67 + struct mtk_tx_dma_desc_info *info)
68 {
69 struct mtk_mac *mac = netdev_priv(dev);
70 + struct mtk_eth *eth = mac->hw;
71 + struct mtk_tx_dma *desc = txd;
72 u32 data;
73
74 WRITE_ONCE(desc->txd1, info->addr);
75 @@ -1056,6 +1068,59 @@ static void mtk_tx_set_dma_desc(struct n
76 WRITE_ONCE(desc->txd4, data);
77 }
78
79 +static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
80 + struct mtk_tx_dma_desc_info *info)
81 +{
82 + struct mtk_mac *mac = netdev_priv(dev);
83 + struct mtk_tx_dma_v2 *desc = txd;
84 + struct mtk_eth *eth = mac->hw;
85 + u32 data;
86 +
87 + WRITE_ONCE(desc->txd1, info->addr);
88 +
89 + data = TX_DMA_PLEN0(info->size);
90 + if (info->last)
91 + data |= TX_DMA_LS0;
92 + WRITE_ONCE(desc->txd3, data);
93 +
94 + if (!info->qid && mac->id)
95 + info->qid = MTK_QDMA_GMAC2_QID;
96 +
97 + data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
98 + data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
99 + WRITE_ONCE(desc->txd4, data);
100 +
101 + data = 0;
102 + if (info->first) {
103 + if (info->gso)
104 + data |= TX_DMA_TSO_V2;
105 + /* tx checksum offload */
106 + if (info->csum)
107 + data |= TX_DMA_CHKSUM_V2;
108 + }
109 + WRITE_ONCE(desc->txd5, data);
110 +
111 + data = 0;
112 + if (info->first && info->vlan)
113 + data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
114 + WRITE_ONCE(desc->txd6, data);
115 +
116 + WRITE_ONCE(desc->txd7, 0);
117 + WRITE_ONCE(desc->txd8, 0);
118 +}
119 +
120 +static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
121 + struct mtk_tx_dma_desc_info *info)
122 +{
123 + struct mtk_mac *mac = netdev_priv(dev);
124 + struct mtk_eth *eth = mac->hw;
125 +
126 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
127 + mtk_tx_set_dma_desc_v2(dev, txd, info);
128 + else
129 + mtk_tx_set_dma_desc_v1(dev, txd, info);
130 +}
131 +
132 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
133 int tx_num, struct mtk_tx_ring *ring, bool gso)
134 {
135 @@ -1064,6 +1129,7 @@ static int mtk_tx_map(struct sk_buff *sk
136 .gso = gso,
137 .csum = skb->ip_summed == CHECKSUM_PARTIAL,
138 .vlan = skb_vlan_tag_present(skb),
139 + .qid = skb->mark & MTK_QDMA_TX_MASK,
140 .vlan_tci = skb_vlan_tag_get(skb),
141 .first = true,
142 .last = !skb_is_nonlinear(skb),
143 @@ -1123,7 +1189,9 @@ static int mtk_tx_map(struct sk_buff *sk
144 }
145
146 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
147 - txd_info.size = min(frag_size, MTK_TX_DMA_BUF_LEN);
148 + txd_info.size = min_t(unsigned int, frag_size,
149 + soc->txrx.dma_max_len);
150 + txd_info.qid = skb->mark & MTK_QDMA_TX_MASK;
151 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
152 !(frag_size - txd_info.size);
153 txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
154 @@ -1204,17 +1272,16 @@ err_dma:
155 return -ENOMEM;
156 }
157
158 -static inline int mtk_cal_txd_req(struct sk_buff *skb)
159 +static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
160 {
161 - int i, nfrags;
162 + int i, nfrags = 1;
163 skb_frag_t *frag;
164
165 - nfrags = 1;
166 if (skb_is_gso(skb)) {
167 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
168 frag = &skb_shinfo(skb)->frags[i];
169 nfrags += DIV_ROUND_UP(skb_frag_size(frag),
170 - MTK_TX_DMA_BUF_LEN);
171 + eth->soc->txrx.dma_max_len);
172 }
173 } else {
174 nfrags += skb_shinfo(skb)->nr_frags;
175 @@ -1266,7 +1333,7 @@ static netdev_tx_t mtk_start_xmit(struct
176 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
177 goto drop;
178
179 - tx_num = mtk_cal_txd_req(skb);
180 + tx_num = mtk_cal_txd_req(eth, skb);
181 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
182 netif_stop_queue(dev);
183 netif_err(eth, tx_queued, dev,
184 @@ -1358,7 +1425,7 @@ static int mtk_poll_rx(struct napi_struc
185 int idx;
186 struct sk_buff *skb;
187 u8 *data, *new_data;
188 - struct mtk_rx_dma *rxd, trxd;
189 + struct mtk_rx_dma_v2 *rxd, trxd;
190 int done = 0, bytes = 0;
191
192 while (done < budget) {
193 @@ -1366,7 +1433,7 @@ static int mtk_poll_rx(struct napi_struc
194 unsigned int pktlen;
195 dma_addr_t dma_addr;
196 u32 hash, reason;
197 - int mac;
198 + int mac = 0;
199
200 ring = mtk_get_rx_ring(eth);
201 if (unlikely(!ring))
202 @@ -1376,16 +1443,15 @@ static int mtk_poll_rx(struct napi_struc
203 rxd = (void *)ring->dma + idx * eth->soc->txrx.rxd_size;
204 data = ring->data[idx];
205
206 - if (!mtk_rx_get_desc(&trxd, rxd))
207 + if (!mtk_rx_get_desc(eth, &trxd, rxd))
208 break;
209
210 /* find out which mac the packet come from. values start at 1 */
211 - if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) ||
212 - (trxd.rxd4 & RX_DMA_SPECIAL_TAG))
213 - mac = 0;
214 - else
215 - mac = ((trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
216 - RX_DMA_FPORT_MASK) - 1;
217 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
218 + mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
219 + else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
220 + !(trxd.rxd4 & RX_DMA_SPECIAL_TAG))
221 + mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
222
223 if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
224 !eth->netdev[mac]))
225 @@ -1431,7 +1497,7 @@ static int mtk_poll_rx(struct napi_struc
226 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
227 skb->dev = netdev;
228 skb_put(skb, pktlen);
229 - if (trxd.rxd4 & eth->rx_dma_l4_valid)
230 + if (trxd.rxd4 & eth->soc->txrx.rx_dma_l4_valid)
231 skb->ip_summed = CHECKSUM_UNNECESSARY;
232 else
233 skb_checksum_none_assert(skb);
234 @@ -1449,10 +1515,25 @@ static int mtk_poll_rx(struct napi_struc
235 mtk_ppe_check_skb(eth->ppe, skb,
236 trxd.rxd4 & MTK_RXD4_FOE_ENTRY);
237
238 - if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
239 - (trxd.rxd2 & RX_DMA_VTAG))
240 - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
241 - RX_DMA_VID(trxd.rxd3));
242 + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
243 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
244 + if (trxd.rxd3 & RX_DMA_VTAG_V2)
245 + __vlan_hwaccel_put_tag(skb,
246 + htons(RX_DMA_VPID(trxd.rxd4)),
247 + RX_DMA_VID(trxd.rxd4));
248 + } else if (trxd.rxd2 & RX_DMA_VTAG) {
249 + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
250 + RX_DMA_VID(trxd.rxd3));
251 + }
252 +
253 + /* If the device is attached to a dsa switch, the special
254 + * tag inserted in VLAN field by hw switch can * be offloaded
255 + * by RX HW VLAN offload. Clear vlan info.
256 + */
257 + if (netdev_uses_dsa(netdev))
258 + __vlan_hwaccel_clear_tag(skb);
259 + }
260 +
261 skb_record_rx_queue(skb, 0);
262 napi_gro_receive(napi, skb);
263
264 @@ -1464,7 +1545,7 @@ release_desc:
265 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
266 rxd->rxd2 = RX_DMA_LSO;
267 else
268 - rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
269 + rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
270
271 ring->calc_idx = idx;
272
273 @@ -1666,7 +1747,8 @@ static int mtk_napi_rx(struct napi_struc
274 do {
275 int rx_done;
276
277 - mtk_w32(eth, MTK_RX_DONE_INT, reg_map->pdma.irq_status);
278 + mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
279 + reg_map->pdma.irq_status);
280 rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
281 rx_done_total += rx_done;
282
283 @@ -1680,10 +1762,11 @@ static int mtk_napi_rx(struct napi_struc
284 if (rx_done_total == budget)
285 return budget;
286
287 - } while (mtk_r32(eth, reg_map->pdma.irq_status) & MTK_RX_DONE_INT);
288 + } while (mtk_r32(eth, reg_map->pdma.irq_status) &
289 + eth->soc->txrx.rx_irq_done_mask);
290
291 if (napi_complete_done(napi, rx_done_total))
292 - mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
293 + mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
294
295 return rx_done_total;
296 }
297 @@ -1693,7 +1776,7 @@ static int mtk_tx_alloc(struct mtk_eth *
298 const struct mtk_soc_data *soc = eth->soc;
299 struct mtk_tx_ring *ring = &eth->tx_ring;
300 int i, sz = soc->txrx.txd_size;
301 - struct mtk_tx_dma *txd;
302 + struct mtk_tx_dma_v2 *txd;
303
304 ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
305 GFP_KERNEL);
306 @@ -1713,13 +1796,19 @@ static int mtk_tx_alloc(struct mtk_eth *
307 txd->txd2 = next_ptr;
308 txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
309 txd->txd4 = 0;
310 + if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
311 + txd->txd5 = 0;
312 + txd->txd6 = 0;
313 + txd->txd7 = 0;
314 + txd->txd8 = 0;
315 + }
316 }
317
318 /* On MT7688 (PDMA only) this driver uses the ring->dma structs
319 * only as the framework. The real HW descriptors are the PDMA
320 * descriptors in ring->dma_pdma.
321 */
322 - if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
323 + if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
324 ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
325 &ring->phys_pdma, GFP_KERNEL);
326 if (!ring->dma_pdma)
327 @@ -1799,13 +1888,11 @@ static int mtk_rx_alloc(struct mtk_eth *
328 struct mtk_rx_ring *ring;
329 int rx_data_len, rx_dma_size;
330 int i;
331 - u32 offset = 0;
332
333 if (rx_flag == MTK_RX_FLAGS_QDMA) {
334 if (ring_no)
335 return -EINVAL;
336 ring = &eth->rx_ring_qdma;
337 - offset = 0x1000;
338 } else {
339 ring = &eth->rx_ring[ring_no];
340 }
341 @@ -1841,7 +1928,7 @@ static int mtk_rx_alloc(struct mtk_eth *
342 return -ENOMEM;
343
344 for (i = 0; i < rx_dma_size; i++) {
345 - struct mtk_rx_dma *rxd;
346 + struct mtk_rx_dma_v2 *rxd;
347
348 dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
349 ring->data[i] + NET_SKB_PAD + eth->ip_align,
350 @@ -1856,26 +1943,47 @@ static int mtk_rx_alloc(struct mtk_eth *
351 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
352 rxd->rxd2 = RX_DMA_LSO;
353 else
354 - rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
355 + rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
356
357 rxd->rxd3 = 0;
358 rxd->rxd4 = 0;
359 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
360 + rxd->rxd5 = 0;
361 + rxd->rxd6 = 0;
362 + rxd->rxd7 = 0;
363 + rxd->rxd8 = 0;
364 + }
365 }
366 ring->dma_size = rx_dma_size;
367 ring->calc_idx_update = false;
368 ring->calc_idx = rx_dma_size - 1;
369 - ring->crx_idx_reg = reg_map->pdma.pcrx_ptr + ring_no * MTK_QRX_OFFSET;
370 + if (rx_flag == MTK_RX_FLAGS_QDMA)
371 + ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
372 + ring_no * MTK_QRX_OFFSET;
373 + else
374 + ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
375 + ring_no * MTK_QRX_OFFSET;
376 /* make sure that all changes to the dma ring are flushed before we
377 * continue
378 */
379 wmb();
380
381 - mtk_w32(eth, ring->phys,
382 - reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET + offset);
383 - mtk_w32(eth, rx_dma_size,
384 - reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET + offset);
385 - mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset);
386 - mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), reg_map->pdma.rst_idx + offset);
387 + if (rx_flag == MTK_RX_FLAGS_QDMA) {
388 + mtk_w32(eth, ring->phys,
389 + reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
390 + mtk_w32(eth, rx_dma_size,
391 + reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
392 + mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
393 + reg_map->qdma.rst_idx);
394 + } else {
395 + mtk_w32(eth, ring->phys,
396 + reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
397 + mtk_w32(eth, rx_dma_size,
398 + reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
399 + mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
400 + reg_map->pdma.rst_idx);
401 + }
402 + mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
403
404 return 0;
405 }
406 @@ -2297,7 +2405,7 @@ static irqreturn_t mtk_handle_irq_rx(int
407 eth->rx_events++;
408 if (likely(napi_schedule_prep(&eth->rx_napi))) {
409 __napi_schedule(&eth->rx_napi);
410 - mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
411 + mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
412 }
413
414 return IRQ_HANDLED;
415 @@ -2321,8 +2429,10 @@ static irqreturn_t mtk_handle_irq(int ir
416 struct mtk_eth *eth = _eth;
417 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
418
419 - if (mtk_r32(eth, reg_map->pdma.irq_mask) & MTK_RX_DONE_INT) {
420 - if (mtk_r32(eth, reg_map->pdma.irq_status) & MTK_RX_DONE_INT)
421 + if (mtk_r32(eth, reg_map->pdma.irq_mask) &
422 + eth->soc->txrx.rx_irq_done_mask) {
423 + if (mtk_r32(eth, reg_map->pdma.irq_status) &
424 + eth->soc->txrx.rx_irq_done_mask)
425 mtk_handle_irq_rx(irq, _eth);
426 }
427 if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
428 @@ -2340,16 +2450,16 @@ static void mtk_poll_controller(struct n
429 struct mtk_eth *eth = mac->hw;
430
431 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
432 - mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
433 + mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
434 mtk_handle_irq_rx(eth->irq[2], dev);
435 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
436 - mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
437 + mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
438 }
439 #endif
440
441 static int mtk_start_dma(struct mtk_eth *eth)
442 {
443 - u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
444 + u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
445 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
446 int err;
447
448 @@ -2360,12 +2470,19 @@ static int mtk_start_dma(struct mtk_eth
449 }
450
451 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
452 - mtk_w32(eth,
453 - MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
454 - MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
455 - MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
456 - MTK_RX_BT_32DWORDS,
457 - reg_map->qdma.glo_cfg);
458 + val = mtk_r32(eth, reg_map->qdma.glo_cfg);
459 + val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
460 + MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
461 + MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
462 +
463 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
464 + val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
465 + MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
466 + MTK_CHK_DDONE_EN;
467 + else
468 + val |= MTK_RX_BT_32DWORDS;
469 + mtk_w32(eth, val, reg_map->qdma.glo_cfg);
470 +
471 mtk_w32(eth,
472 MTK_RX_DMA_EN | rx_2b_offset |
473 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
474 @@ -2439,7 +2556,7 @@ static int mtk_open(struct net_device *d
475 napi_enable(&eth->tx_napi);
476 napi_enable(&eth->rx_napi);
477 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
478 - mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
479 + mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
480 refcount_set(&eth->dma_refcnt, 1);
481 }
482 else
483 @@ -2491,7 +2608,7 @@ static int mtk_stop(struct net_device *d
484 mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
485
486 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
487 - mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
488 + mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
489 napi_disable(&eth->tx_napi);
490 napi_disable(&eth->rx_napi);
491
492 @@ -2651,9 +2768,25 @@ static int mtk_hw_init(struct mtk_eth *e
493 return 0;
494 }
495
496 - /* Non-MT7628 handling... */
497 - ethsys_reset(eth, RSTCTRL_FE);
498 - ethsys_reset(eth, RSTCTRL_PPE);
499 + val = RSTCTRL_FE | RSTCTRL_PPE;
500 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
501 + regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
502 +
503 + val |= RSTCTRL_ETH;
504 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
505 + val |= RSTCTRL_PPE1;
506 + }
507 +
508 + ethsys_reset(eth, val);
509 +
510 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
511 + regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
512 + 0x3ffffff);
513 +
514 + /* Set FE to PDMAv2 if necessary */
515 + val = mtk_r32(eth, MTK_FE_GLO_MISC);
516 + mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
517 + }
518
519 if (eth->pctl) {
520 /* Set GE2 driving and slew rate */
521 @@ -2692,11 +2825,47 @@ static int mtk_hw_init(struct mtk_eth *e
522
523 /* FE int grouping */
524 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
525 - mtk_w32(eth, MTK_RX_DONE_INT, reg_map->pdma.int_grp + 4);
526 + mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
527 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
528 - mtk_w32(eth, MTK_RX_DONE_INT, reg_map->qdma.int_grp + 4);
529 + mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
530 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
531
532 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
533 + /* PSE should not drop port8 and port9 packets */
534 + mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
535 +
536 + /* PSE Free Queue Flow Control */
537 + mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
538 +
539 + /* PSE config input queue threshold */
540 + mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
541 + mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
542 + mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
543 + mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
544 + mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
545 + mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
546 + mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
547 + mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
548 +
549 + /* PSE config output queue threshold */
550 + mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
551 + mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
552 + mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
553 + mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
554 + mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
555 + mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
556 + mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
557 + mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
558 +
559 + /* GDM and CDM Threshold */
560 + mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
561 + mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
562 + mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
563 + mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
564 + mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
565 + mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
566 + }
567 +
568 return 0;
569
570 err_disable_pm:
571 @@ -3233,12 +3402,8 @@ static int mtk_probe(struct platform_dev
572 if (IS_ERR(eth->base))
573 return PTR_ERR(eth->base);
574
575 - if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
576 - eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
577 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
578 eth->ip_align = NET_IP_ALIGN;
579 - } else {
580 - eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
581 - }
582
583 spin_lock_init(&eth->page_lock);
584 spin_lock_init(&eth->tx_irq_lock);
585 @@ -3474,6 +3639,10 @@ static const struct mtk_soc_data mt2701_
586 .txrx = {
587 .txd_size = sizeof(struct mtk_tx_dma),
588 .rxd_size = sizeof(struct mtk_rx_dma),
589 + .rx_irq_done_mask = MTK_RX_DONE_INT,
590 + .rx_dma_l4_valid = RX_DMA_L4_VALID,
591 + .dma_max_len = MTK_TX_DMA_BUF_LEN,
592 + .dma_len_offset = 16,
593 },
594 };
595
596 @@ -3487,6 +3656,10 @@ static const struct mtk_soc_data mt7621_
597 .txrx = {
598 .txd_size = sizeof(struct mtk_tx_dma),
599 .rxd_size = sizeof(struct mtk_rx_dma),
600 + .rx_irq_done_mask = MTK_RX_DONE_INT,
601 + .rx_dma_l4_valid = RX_DMA_L4_VALID,
602 + .dma_max_len = MTK_TX_DMA_BUF_LEN,
603 + .dma_len_offset = 16,
604 },
605 };
606
607 @@ -3501,6 +3674,10 @@ static const struct mtk_soc_data mt7622_
608 .txrx = {
609 .txd_size = sizeof(struct mtk_tx_dma),
610 .rxd_size = sizeof(struct mtk_rx_dma),
611 + .rx_irq_done_mask = MTK_RX_DONE_INT,
612 + .rx_dma_l4_valid = RX_DMA_L4_VALID,
613 + .dma_max_len = MTK_TX_DMA_BUF_LEN,
614 + .dma_len_offset = 16,
615 },
616 };
617
618 @@ -3514,6 +3691,10 @@ static const struct mtk_soc_data mt7623_
619 .txrx = {
620 .txd_size = sizeof(struct mtk_tx_dma),
621 .rxd_size = sizeof(struct mtk_rx_dma),
622 + .rx_irq_done_mask = MTK_RX_DONE_INT,
623 + .rx_dma_l4_valid = RX_DMA_L4_VALID,
624 + .dma_max_len = MTK_TX_DMA_BUF_LEN,
625 + .dma_len_offset = 16,
626 },
627 };
628
629 @@ -3527,6 +3708,10 @@ static const struct mtk_soc_data mt7629_
630 .txrx = {
631 .txd_size = sizeof(struct mtk_tx_dma),
632 .rxd_size = sizeof(struct mtk_rx_dma),
633 + .rx_irq_done_mask = MTK_RX_DONE_INT,
634 + .rx_dma_l4_valid = RX_DMA_L4_VALID,
635 + .dma_max_len = MTK_TX_DMA_BUF_LEN,
636 + .dma_len_offset = 16,
637 },
638 };
639
640 @@ -3539,6 +3724,10 @@ static const struct mtk_soc_data rt5350_
641 .txrx = {
642 .txd_size = sizeof(struct mtk_tx_dma),
643 .rxd_size = sizeof(struct mtk_rx_dma),
644 + .rx_irq_done_mask = MTK_RX_DONE_INT,
645 + .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
646 + .dma_max_len = MTK_TX_DMA_BUF_LEN,
647 + .dma_len_offset = 16,
648 },
649 };
650
651 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
652 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
653 @@ -23,6 +23,7 @@
654 #define MTK_MAX_RX_LENGTH 1536
655 #define MTK_MAX_RX_LENGTH_2K 2048
656 #define MTK_TX_DMA_BUF_LEN 0x3fff
657 +#define MTK_TX_DMA_BUF_LEN_V2 0xffff
658 #define MTK_DMA_SIZE 512
659 #define MTK_NAPI_WEIGHT 64
660 #define MTK_MAC_COUNT 2
661 @@ -83,6 +84,10 @@
662 #define MTK_CDMQ_IG_CTRL 0x1400
663 #define MTK_CDMQ_STAG_EN BIT(0)
664
665 +/* CDMP Ingress Control Register */
666 +#define MTK_CDMP_IG_CTRL 0x400
667 +#define MTK_CDMP_STAG_EN BIT(0)
668 +
669 /* CDMP Exgress Control Register */
670 #define MTK_CDMP_EG_CTRL 0x404
671
672 @@ -102,13 +107,38 @@
673 /* Unicast Filter MAC Address Register - High */
674 #define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
675
676 +/* FE global misc reg*/
677 +#define MTK_FE_GLO_MISC 0x124
678 +
679 +/* PSE Free Queue Flow Control */
680 +#define PSE_FQFC_CFG1 0x100
681 +#define PSE_FQFC_CFG2 0x104
682 +#define PSE_DROP_CFG 0x108
683 +
684 +/* PSE Input Queue Reservation Register*/
685 +#define PSE_IQ_REV(x) (0x140 + (((x) - 1) << 2))
686 +
687 +/* PSE Output Queue Threshold Register*/
688 +#define PSE_OQ_TH(x) (0x160 + (((x) - 1) << 2))
689 +
690 +/* GDM and CDM Threshold */
691 +#define MTK_GDM2_THRES 0x1530
692 +#define MTK_CDMW0_THRES 0x164c
693 +#define MTK_CDMW1_THRES 0x1650
694 +#define MTK_CDME0_THRES 0x1654
695 +#define MTK_CDME1_THRES 0x1658
696 +#define MTK_CDMM_THRES 0x165c
697 +
698 /* PDMA HW LRO Control Registers */
699 #define MTK_PDMA_LRO_CTRL_DW0 0x980
700 #define MTK_LRO_EN BIT(0)
701 #define MTK_L3_CKS_UPD_EN BIT(7)
702 +#define MTK_L3_CKS_UPD_EN_V2 BIT(19)
703 #define MTK_LRO_ALT_PKT_CNT_MODE BIT(21)
704 #define MTK_LRO_RING_RELINQUISH_REQ (0x7 << 26)
705 +#define MTK_LRO_RING_RELINQUISH_REQ_V2 (0xf << 24)
706 #define MTK_LRO_RING_RELINQUISH_DONE (0x7 << 29)
707 +#define MTK_LRO_RING_RELINQUISH_DONE_V2 (0xf << 28)
708
709 #define MTK_PDMA_LRO_CTRL_DW1 0x984
710 #define MTK_PDMA_LRO_CTRL_DW2 0x988
711 @@ -180,6 +210,13 @@
712 #define MTK_TX_DMA_EN BIT(0)
713 #define MTK_DMA_BUSY_TIMEOUT_US 1000000
714
715 +/* QDMA V2 Global Configuration Register */
716 +#define MTK_CHK_DDONE_EN BIT(28)
717 +#define MTK_DMAD_WR_WDONE BIT(26)
718 +#define MTK_WCOMP_EN BIT(24)
719 +#define MTK_RESV_BUF (0x40 << 16)
720 +#define MTK_MUTLI_CNT (0x4 << 12)
721 +
722 /* QDMA Flow Control Register */
723 #define FC_THRES_DROP_MODE BIT(20)
724 #define FC_THRES_DROP_EN (7 << 16)
725 @@ -199,11 +236,32 @@
726 #define MTK_RX_DONE_INT MTK_RX_DONE_DLY
727 #define MTK_TX_DONE_INT MTK_TX_DONE_DLY
728
729 +#define MTK_RX_DONE_INT_V2 BIT(14)
730 +
731 /* QDMA Interrupt grouping registers */
732 #define MTK_RLS_DONE_INT BIT(0)
733
734 #define MTK_STAT_OFFSET 0x40
735
736 +/* QDMA TX NUM */
737 +#define MTK_QDMA_TX_NUM 16
738 +#define MTK_QDMA_TX_MASK (MTK_QDMA_TX_NUM - 1)
739 +#define QID_BITS_V2(x) (((x) & 0x3f) << 16)
740 +#define MTK_QDMA_GMAC2_QID 8
741 +
742 +#define MTK_TX_DMA_BUF_SHIFT 8
743 +
744 +/* QDMA V2 descriptor txd6 */
745 +#define TX_DMA_INS_VLAN_V2 BIT(16)
746 +/* QDMA V2 descriptor txd5 */
747 +#define TX_DMA_CHKSUM_V2 (0x7 << 28)
748 +#define TX_DMA_TSO_V2 BIT(31)
749 +
750 +/* QDMA V2 descriptor txd4 */
751 +#define TX_DMA_FPORT_SHIFT_V2 8
752 +#define TX_DMA_FPORT_MASK_V2 0xf
753 +#define TX_DMA_SWC_V2 BIT(30)
754 +
755 #define MTK_WDMA0_BASE 0x2800
756 #define MTK_WDMA1_BASE 0x2c00
757
758 @@ -217,10 +275,9 @@
759 /* QDMA descriptor txd3 */
760 #define TX_DMA_OWNER_CPU BIT(31)
761 #define TX_DMA_LS0 BIT(30)
762 -#define TX_DMA_PLEN0(_x) (((_x) & MTK_TX_DMA_BUF_LEN) << 16)
763 -#define TX_DMA_PLEN1(_x) ((_x) & MTK_TX_DMA_BUF_LEN)
764 +#define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
765 +#define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len)
766 #define TX_DMA_SWC BIT(14)
767 -#define TX_DMA_SDL(_x) (((_x) & 0x3fff) << 16)
768
769 /* PDMA on MT7628 */
770 #define TX_DMA_DONE BIT(31)
771 @@ -230,12 +287,14 @@
772 /* QDMA descriptor rxd2 */
773 #define RX_DMA_DONE BIT(31)
774 #define RX_DMA_LSO BIT(30)
775 -#define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16)
776 -#define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff)
777 +#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
778 +#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len)
779 #define RX_DMA_VTAG BIT(15)
780
781 /* QDMA descriptor rxd3 */
782 -#define RX_DMA_VID(_x) ((_x) & 0xfff)
783 +#define RX_DMA_VID(x) ((x) & VLAN_VID_MASK)
784 +#define RX_DMA_TCI(x) ((x) & (VLAN_PRIO_MASK | VLAN_VID_MASK))
785 +#define RX_DMA_VPID(x) (((x) >> 16) & 0xffff)
786
787 /* QDMA descriptor rxd4 */
788 #define MTK_RXD4_FOE_ENTRY GENMASK(13, 0)
789 @@ -246,10 +305,15 @@
790 /* QDMA descriptor rxd4 */
791 #define RX_DMA_L4_VALID BIT(24)
792 #define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */
793 -#define RX_DMA_FPORT_SHIFT 19
794 -#define RX_DMA_FPORT_MASK 0x7
795 #define RX_DMA_SPECIAL_TAG BIT(22)
796
797 +#define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0xf)
798 +#define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0x7)
799 +
800 +/* PDMA V2 descriptor rxd3 */
801 +#define RX_DMA_VTAG_V2 BIT(0)
802 +#define RX_DMA_L4_VALID_V2 BIT(2)
803 +
804 /* PHY Indirect Access Control registers */
805 #define MTK_PHY_IAC 0x10004
806 #define PHY_IAC_ACCESS BIT(31)
807 @@ -370,6 +434,16 @@
808 #define ETHSYS_TRGMII_MT7621_DDR_PLL BIT(5)
809
810 /* ethernet reset control register */
811 +#define ETHSYS_RSTCTRL 0x34
812 +#define RSTCTRL_FE BIT(6)
813 +#define RSTCTRL_PPE BIT(31)
814 +#define RSTCTRL_PPE1 BIT(30)
815 +#define RSTCTRL_ETH BIT(23)
816 +
817 +/* ethernet reset check idle register */
818 +#define ETHSYS_FE_RST_CHK_IDLE_EN 0x28
819 +
820 +/* ethernet reset control register */
821 #define ETHSYS_RSTCTRL 0x34
822 #define RSTCTRL_FE BIT(6)
823 #define RSTCTRL_PPE BIT(31)
824 @@ -453,6 +527,17 @@ struct mtk_rx_dma {
825 unsigned int rxd4;
826 } __packed __aligned(4);
827
828 +struct mtk_rx_dma_v2 {
829 + unsigned int rxd1;
830 + unsigned int rxd2;
831 + unsigned int rxd3;
832 + unsigned int rxd4;
833 + unsigned int rxd5;
834 + unsigned int rxd6;
835 + unsigned int rxd7;
836 + unsigned int rxd8;
837 +} __packed __aligned(4);
838 +
839 struct mtk_tx_dma {
840 unsigned int txd1;
841 unsigned int txd2;
842 @@ -460,6 +545,17 @@ struct mtk_tx_dma {
843 unsigned int txd4;
844 } __packed __aligned(4);
845
846 +struct mtk_tx_dma_v2 {
847 + unsigned int txd1;
848 + unsigned int txd2;
849 + unsigned int txd3;
850 + unsigned int txd4;
851 + unsigned int txd5;
852 + unsigned int txd6;
853 + unsigned int txd7;
854 + unsigned int txd8;
855 +} __packed __aligned(4);
856 +
857 struct mtk_eth;
858 struct mtk_mac;
859
860 @@ -646,7 +742,9 @@ enum mkt_eth_capabilities {
861 MTK_SHARED_INT_BIT,
862 MTK_TRGMII_MT7621_CLK_BIT,
863 MTK_QDMA_BIT,
864 + MTK_NETSYS_V2_BIT,
865 MTK_SOC_MT7628_BIT,
866 + MTK_RSTCTRL_PPE1_BIT,
867
868 /* MUX BITS*/
869 MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT,
870 @@ -678,7 +776,9 @@ enum mkt_eth_capabilities {
871 #define MTK_SHARED_INT BIT(MTK_SHARED_INT_BIT)
872 #define MTK_TRGMII_MT7621_CLK BIT(MTK_TRGMII_MT7621_CLK_BIT)
873 #define MTK_QDMA BIT(MTK_QDMA_BIT)
874 +#define MTK_NETSYS_V2 BIT(MTK_NETSYS_V2_BIT)
875 #define MTK_SOC_MT7628 BIT(MTK_SOC_MT7628_BIT)
876 +#define MTK_RSTCTRL_PPE1 BIT(MTK_RSTCTRL_PPE1_BIT)
877
878 #define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \
879 BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT)
880 @@ -755,6 +855,7 @@ struct mtk_tx_dma_desc_info {
881 dma_addr_t addr;
882 u32 size;
883 u16 vlan_tci;
884 + u16 qid;
885 u8 gso:1;
886 u8 csum:1;
887 u8 vlan:1;
888 @@ -812,6 +913,10 @@ struct mtk_reg_map {
889 * the extra setup for those pins used by GMAC.
890 * @txd_size Tx DMA descriptor size.
891 * @rxd_size Rx DMA descriptor size.
892 + * @rx_irq_done_mask Rx irq done register mask.
893 + * @rx_dma_l4_valid Rx DMA valid register mask.
894 + * @dma_max_len Max DMA tx/rx buffer length.
895 + * @dma_len_offset Tx/Rx DMA length field offset.
896 */
897 struct mtk_soc_data {
898 const struct mtk_reg_map *reg_map;
899 @@ -824,6 +929,10 @@ struct mtk_soc_data {
900 struct {
901 u32 txd_size;
902 u32 rxd_size;
903 + u32 rx_irq_done_mask;
904 + u32 rx_dma_l4_valid;
905 + u32 dma_max_len;
906 + u32 dma_len_offset;
907 } txrx;
908 };
909
910 @@ -942,7 +1051,6 @@ struct mtk_eth {
911 u32 tx_bytes;
912 struct dim tx_dim;
913
914 - u32 rx_dma_l4_valid;
915 int ip_align;
916
917 struct mtk_ppe *ppe;