1 From: Lorenzo Bianconi <lorenzo@kernel.org>
2 Date: Fri, 20 May 2022 20:11:35 +0200
3 Subject: [PATCH] net: ethernet: mtk_eth_soc: introduce device register map
5 Introduce reg_map structure to add the capability to support different
6 register definitions. Move register definitions in mtk_regmap structure.
7 This is a preliminary patch to introduce mt7986 ethernet support.
9 Tested-by: Sam Shih <sam.shih@mediatek.com>
10 Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
11 Signed-off-by: David S. Miller <davem@davemloft.net>
14 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
15 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
16 @@ -34,6 +34,59 @@ MODULE_PARM_DESC(msg_level, "Message lev
17 #define MTK_ETHTOOL_STAT(x) { #x, \
18 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
20 +static const struct mtk_reg_map mtk_reg_map = {
21 + .tx_irq_mask = 0x1a1c,
22 + .tx_irq_status = 0x1a18,
25 + .rx_cnt_cfg = 0x0904,
29 + .delay_irq = 0x0a0c,
30 + .irq_status = 0x0a20,
37 + .rx_cnt_cfg = 0x1904,
41 + .delay_irq = 0x1a0c,
57 +static const struct mtk_reg_map mt7628_reg_map = {
58 + .tx_irq_mask = 0x0a28,
59 + .tx_irq_status = 0x0a20,
62 + .rx_cnt_cfg = 0x0904,
66 + .delay_irq = 0x0a0c,
67 + .irq_status = 0x0a20,
73 /* strings used by ethtool */
74 static const struct mtk_ethtool_stats {
75 char str[ETH_GSTRING_LEN];
76 @@ -629,8 +682,8 @@ static inline void mtk_tx_irq_disable(st
79 spin_lock_irqsave(ð->tx_irq_lock, flags);
80 - val = mtk_r32(eth, eth->tx_int_mask_reg);
81 - mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg);
82 + val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
83 + mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
84 spin_unlock_irqrestore(ð->tx_irq_lock, flags);
87 @@ -640,8 +693,8 @@ static inline void mtk_tx_irq_enable(str
90 spin_lock_irqsave(ð->tx_irq_lock, flags);
91 - val = mtk_r32(eth, eth->tx_int_mask_reg);
92 - mtk_w32(eth, val | mask, eth->tx_int_mask_reg);
93 + val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
94 + mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
95 spin_unlock_irqrestore(ð->tx_irq_lock, flags);
98 @@ -651,8 +704,8 @@ static inline void mtk_rx_irq_disable(st
101 spin_lock_irqsave(ð->rx_irq_lock, flags);
102 - val = mtk_r32(eth, MTK_PDMA_INT_MASK);
103 - mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
104 + val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
105 + mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
106 spin_unlock_irqrestore(ð->rx_irq_lock, flags);
109 @@ -662,8 +715,8 @@ static inline void mtk_rx_irq_enable(str
112 spin_lock_irqsave(ð->rx_irq_lock, flags);
113 - val = mtk_r32(eth, MTK_PDMA_INT_MASK);
114 - mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
115 + val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
116 + mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
117 spin_unlock_irqrestore(ð->rx_irq_lock, flags);
120 @@ -714,39 +767,39 @@ void mtk_stats_update_mac(struct mtk_mac
121 hw_stats->rx_checksum_errors +=
122 mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
124 + const struct mtk_reg_map *reg_map = eth->soc->reg_map;
125 unsigned int offs = hw_stats->reg_offset;
128 - hw_stats->rx_bytes += mtk_r32(mac->hw,
129 - MTK_GDM1_RX_GBCNT_L + offs);
130 - stats = mtk_r32(mac->hw, MTK_GDM1_RX_GBCNT_H + offs);
131 + hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
132 + stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
134 hw_stats->rx_bytes += (stats << 32);
135 hw_stats->rx_packets +=
136 - mtk_r32(mac->hw, MTK_GDM1_RX_GPCNT + offs);
137 + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
138 hw_stats->rx_overflow +=
139 - mtk_r32(mac->hw, MTK_GDM1_RX_OERCNT + offs);
140 + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
141 hw_stats->rx_fcs_errors +=
142 - mtk_r32(mac->hw, MTK_GDM1_RX_FERCNT + offs);
143 + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
144 hw_stats->rx_short_errors +=
145 - mtk_r32(mac->hw, MTK_GDM1_RX_SERCNT + offs);
146 + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
147 hw_stats->rx_long_errors +=
148 - mtk_r32(mac->hw, MTK_GDM1_RX_LENCNT + offs);
149 + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
150 hw_stats->rx_checksum_errors +=
151 - mtk_r32(mac->hw, MTK_GDM1_RX_CERCNT + offs);
152 + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
153 hw_stats->rx_flow_control_packets +=
154 - mtk_r32(mac->hw, MTK_GDM1_RX_FCCNT + offs);
155 + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
157 - mtk_r32(mac->hw, MTK_GDM1_TX_SKIPCNT + offs);
158 + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
159 hw_stats->tx_collisions +=
160 - mtk_r32(mac->hw, MTK_GDM1_TX_COLCNT + offs);
161 + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
162 hw_stats->tx_bytes +=
163 - mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_L + offs);
164 - stats = mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_H + offs);
165 + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
166 + stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
168 hw_stats->tx_bytes += (stats << 32);
169 hw_stats->tx_packets +=
170 - mtk_r32(mac->hw, MTK_GDM1_TX_GPCNT + offs);
171 + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
174 u64_stats_update_end(&hw_stats->syncp);
175 @@ -886,10 +939,10 @@ static int mtk_init_fq_dma(struct mtk_et
179 - mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
180 - mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
181 - mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
182 - mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
183 + mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
184 + mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
185 + mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
186 + mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
190 @@ -1133,7 +1186,7 @@ static int mtk_tx_map(struct sk_buff *sk
191 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
192 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
194 - mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
195 + mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
199 @@ -1450,6 +1503,7 @@ rx_done:
200 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
201 unsigned int *done, unsigned int *bytes)
203 + const struct mtk_reg_map *reg_map = eth->soc->reg_map;
204 struct mtk_tx_ring *ring = ð->tx_ring;
205 struct mtk_tx_dma *desc;
207 @@ -1457,7 +1511,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
210 cpu = ring->last_free_ptr;
211 - dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
212 + dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
214 desc = mtk_qdma_phys_to_virt(ring, cpu);
216 @@ -1492,7 +1546,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
219 ring->last_free_ptr = cpu;
220 - mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
221 + mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
225 @@ -1585,24 +1639,25 @@ static void mtk_handle_status_irq(struct
226 static int mtk_napi_tx(struct napi_struct *napi, int budget)
228 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
229 + const struct mtk_reg_map *reg_map = eth->soc->reg_map;
232 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
233 mtk_handle_status_irq(eth);
234 - mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg);
235 + mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
236 tx_done = mtk_poll_tx(eth, budget);
238 if (unlikely(netif_msg_intr(eth))) {
240 "done tx %d, intr 0x%08x/0x%x\n", tx_done,
241 - mtk_r32(eth, eth->tx_int_status_reg),
242 - mtk_r32(eth, eth->tx_int_mask_reg));
243 + mtk_r32(eth, reg_map->tx_irq_status),
244 + mtk_r32(eth, reg_map->tx_irq_mask));
247 if (tx_done == budget)
250 - if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
251 + if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
254 if (napi_complete_done(napi, tx_done))
255 @@ -1614,6 +1669,7 @@ static int mtk_napi_tx(struct napi_struc
256 static int mtk_napi_rx(struct napi_struct *napi, int budget)
258 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
259 + const struct mtk_reg_map *reg_map = eth->soc->reg_map;
260 int rx_done_total = 0;
262 mtk_handle_status_irq(eth);
263 @@ -1621,21 +1677,21 @@ static int mtk_napi_rx(struct napi_struc
267 - mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
268 + mtk_w32(eth, MTK_RX_DONE_INT, reg_map->pdma.irq_status);
269 rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
270 rx_done_total += rx_done;
272 if (unlikely(netif_msg_intr(eth))) {
274 "done rx %d, intr 0x%08x/0x%x\n", rx_done,
275 - mtk_r32(eth, MTK_PDMA_INT_STATUS),
276 - mtk_r32(eth, MTK_PDMA_INT_MASK));
277 + mtk_r32(eth, reg_map->pdma.irq_status),
278 + mtk_r32(eth, reg_map->pdma.irq_mask));
281 if (rx_done_total == budget)
284 - } while (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT);
285 + } while (mtk_r32(eth, reg_map->pdma.irq_status) & MTK_RX_DONE_INT);
287 if (napi_complete_done(napi, rx_done_total))
288 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
289 @@ -1698,20 +1754,20 @@ static int mtk_tx_alloc(struct mtk_eth *
293 - if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
294 - mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
295 - mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
296 + if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
297 + mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
298 + mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
300 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
302 - mtk_w32(eth, ring->last_free_ptr, MTK_QTX_DRX_PTR);
303 + soc->reg_map->qdma.crx_ptr);
304 + mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
305 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
307 + soc->reg_map->qdma.qtx_cfg);
309 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
310 mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
311 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
312 - mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX);
313 + mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
317 @@ -1750,6 +1806,7 @@ static void mtk_tx_clean(struct mtk_eth
319 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
321 + const struct mtk_reg_map *reg_map = eth->soc->reg_map;
322 struct mtk_rx_ring *ring;
323 int rx_data_len, rx_dma_size;
325 @@ -1818,16 +1875,18 @@ static int mtk_rx_alloc(struct mtk_eth *
326 ring->dma_size = rx_dma_size;
327 ring->calc_idx_update = false;
328 ring->calc_idx = rx_dma_size - 1;
329 - ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no);
330 + ring->crx_idx_reg = reg_map->pdma.pcrx_ptr + ring_no * MTK_QRX_OFFSET;
331 /* make sure that all changes to the dma ring are flushed before we
336 - mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no) + offset);
337 - mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no) + offset);
338 + mtk_w32(eth, ring->phys,
339 + reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET + offset);
340 + mtk_w32(eth, rx_dma_size,
341 + reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET + offset);
342 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset);
343 - mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX + offset);
344 + mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), reg_map->pdma.rst_idx + offset);
348 @@ -2139,9 +2198,9 @@ static int mtk_dma_busy_wait(struct mtk_
351 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
352 - reg = MTK_QDMA_GLO_CFG;
353 + reg = eth->soc->reg_map->qdma.glo_cfg;
355 - reg = MTK_PDMA_GLO_CFG;
356 + reg = eth->soc->reg_map->pdma.glo_cfg;
358 ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
359 !(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
360 @@ -2199,8 +2258,8 @@ static int mtk_dma_init(struct mtk_eth *
363 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
364 - FC_THRES_MIN, MTK_QDMA_FC_THRES);
365 - mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
366 + FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
367 + mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
371 @@ -2274,13 +2333,14 @@ static irqreturn_t mtk_handle_irq_tx(int
372 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
374 struct mtk_eth *eth = _eth;
375 + const struct mtk_reg_map *reg_map = eth->soc->reg_map;
377 - if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT) {
378 - if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT)
379 + if (mtk_r32(eth, reg_map->pdma.irq_mask) & MTK_RX_DONE_INT) {
380 + if (mtk_r32(eth, reg_map->pdma.irq_status) & MTK_RX_DONE_INT)
381 mtk_handle_irq_rx(irq, _eth);
383 - if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) {
384 - if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
385 + if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
386 + if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
387 mtk_handle_irq_tx(irq, _eth);
390 @@ -2304,6 +2364,7 @@ static void mtk_poll_controller(struct n
391 static int mtk_start_dma(struct mtk_eth *eth)
393 u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
394 + const struct mtk_reg_map *reg_map = eth->soc->reg_map;
397 err = mtk_dma_init(eth);
398 @@ -2318,16 +2379,15 @@ static int mtk_start_dma(struct mtk_eth
399 MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
400 MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
404 + reg_map->qdma.glo_cfg);
406 MTK_RX_DMA_EN | rx_2b_offset |
407 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
409 + reg_map->pdma.glo_cfg);
411 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
412 MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
414 + reg_map->pdma.glo_cfg);
418 @@ -2453,8 +2513,8 @@ static int mtk_stop(struct net_device *d
419 cancel_work_sync(ð->tx_dim.work);
421 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
422 - mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
423 - mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
424 + mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
425 + mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
429 @@ -2508,6 +2568,7 @@ static void mtk_dim_rx(struct work_struc
431 struct dim *dim = container_of(work, struct dim, work);
432 struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
433 + const struct mtk_reg_map *reg_map = eth->soc->reg_map;
434 struct dim_cq_moder cur_profile;
437 @@ -2515,7 +2576,7 @@ static void mtk_dim_rx(struct work_struc
439 spin_lock_bh(ð->dim_lock);
441 - val = mtk_r32(eth, MTK_PDMA_DELAY_INT);
442 + val = mtk_r32(eth, reg_map->pdma.delay_irq);
443 val &= MTK_PDMA_DELAY_TX_MASK;
444 val |= MTK_PDMA_DELAY_RX_EN;
446 @@ -2525,9 +2586,9 @@ static void mtk_dim_rx(struct work_struc
447 cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
448 val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
450 - mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
451 + mtk_w32(eth, val, reg_map->pdma.delay_irq);
452 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
453 - mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
454 + mtk_w32(eth, val, reg_map->qdma.delay_irq);
456 spin_unlock_bh(ð->dim_lock);
458 @@ -2538,6 +2599,7 @@ static void mtk_dim_tx(struct work_struc
460 struct dim *dim = container_of(work, struct dim, work);
461 struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
462 + const struct mtk_reg_map *reg_map = eth->soc->reg_map;
463 struct dim_cq_moder cur_profile;
466 @@ -2545,7 +2607,7 @@ static void mtk_dim_tx(struct work_struc
468 spin_lock_bh(ð->dim_lock);
470 - val = mtk_r32(eth, MTK_PDMA_DELAY_INT);
471 + val = mtk_r32(eth, reg_map->pdma.delay_irq);
472 val &= MTK_PDMA_DELAY_RX_MASK;
473 val |= MTK_PDMA_DELAY_TX_EN;
475 @@ -2555,9 +2617,9 @@ static void mtk_dim_tx(struct work_struc
476 cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
477 val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
479 - mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
480 + mtk_w32(eth, val, reg_map->pdma.delay_irq);
481 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
482 - mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
483 + mtk_w32(eth, val, reg_map->qdma.delay_irq);
485 spin_unlock_bh(ð->dim_lock);
487 @@ -2568,6 +2630,7 @@ static int mtk_hw_init(struct mtk_eth *e
489 u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
490 ETHSYS_DMA_AG_MAP_PPE;
491 + const struct mtk_reg_map *reg_map = eth->soc->reg_map;
494 if (test_and_set_bit(MTK_HW_INIT, ð->state))
495 @@ -2642,10 +2705,10 @@ static int mtk_hw_init(struct mtk_eth *e
496 mtk_rx_irq_disable(eth, ~0);
498 /* FE int grouping */
499 - mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
500 - mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
501 - mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
502 - mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
503 + mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
504 + mtk_w32(eth, MTK_RX_DONE_INT, reg_map->pdma.int_grp + 4);
505 + mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
506 + mtk_w32(eth, MTK_RX_DONE_INT, reg_map->qdma.int_grp + 4);
507 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
510 @@ -3177,14 +3240,6 @@ static int mtk_probe(struct platform_dev
511 if (IS_ERR(eth->base))
512 return PTR_ERR(eth->base);
514 - if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
515 - eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
516 - eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
518 - eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
519 - eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
522 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
523 eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
524 eth->ip_align = NET_IP_ALIGN;
525 @@ -3418,6 +3473,7 @@ static int mtk_remove(struct platform_de
528 static const struct mtk_soc_data mt2701_data = {
529 + .reg_map = &mtk_reg_map,
530 .caps = MT7623_CAPS | MTK_HWLRO,
531 .hw_features = MTK_HW_FEATURES,
532 .required_clks = MT7623_CLKS_BITMAP,
533 @@ -3429,6 +3485,7 @@ static const struct mtk_soc_data mt2701_
536 static const struct mtk_soc_data mt7621_data = {
537 + .reg_map = &mtk_reg_map,
539 .hw_features = MTK_HW_FEATURES,
540 .required_clks = MT7621_CLKS_BITMAP,
541 @@ -3441,6 +3498,7 @@ static const struct mtk_soc_data mt7621_
544 static const struct mtk_soc_data mt7622_data = {
545 + .reg_map = &mtk_reg_map,
547 .caps = MT7622_CAPS | MTK_HWLRO,
548 .hw_features = MTK_HW_FEATURES,
549 @@ -3454,6 +3512,7 @@ static const struct mtk_soc_data mt7622_
552 static const struct mtk_soc_data mt7623_data = {
553 + .reg_map = &mtk_reg_map,
554 .caps = MT7623_CAPS | MTK_HWLRO,
555 .hw_features = MTK_HW_FEATURES,
556 .required_clks = MT7623_CLKS_BITMAP,
557 @@ -3466,6 +3525,7 @@ static const struct mtk_soc_data mt7623_
560 static const struct mtk_soc_data mt7629_data = {
561 + .reg_map = &mtk_reg_map,
563 .caps = MT7629_CAPS | MTK_HWLRO,
564 .hw_features = MTK_HW_FEATURES,
565 @@ -3478,6 +3538,7 @@ static const struct mtk_soc_data mt7629_
568 static const struct mtk_soc_data rt5350_data = {
569 + .reg_map = &mt7628_reg_map,
571 .hw_features = MTK_HW_FEATURES_MT7628,
572 .required_clks = MT7628_CLKS_BITMAP,
573 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
574 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
576 #define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM)
577 #define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
579 +#define MTK_QRX_OFFSET 0x10
581 #define MTK_MAX_RX_RING_NUM 4
582 #define MTK_HW_LRO_DMA_SIZE 8
585 /* Unicast Filter MAC Address Register - High */
586 #define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
588 -/* PDMA RX Base Pointer Register */
589 -#define MTK_PRX_BASE_PTR0 0x900
590 -#define MTK_PRX_BASE_PTR_CFG(x) (MTK_PRX_BASE_PTR0 + (x * 0x10))
592 -/* PDMA RX Maximum Count Register */
593 -#define MTK_PRX_MAX_CNT0 0x904
594 -#define MTK_PRX_MAX_CNT_CFG(x) (MTK_PRX_MAX_CNT0 + (x * 0x10))
596 -/* PDMA RX CPU Pointer Register */
597 -#define MTK_PRX_CRX_IDX0 0x908
598 -#define MTK_PRX_CRX_IDX_CFG(x) (MTK_PRX_CRX_IDX0 + (x * 0x10))
600 /* PDMA HW LRO Control Registers */
601 #define MTK_PDMA_LRO_CTRL_DW0 0x980
602 #define MTK_LRO_EN BIT(0)
603 @@ -126,18 +116,19 @@
604 #define MTK_ADMA_MODE BIT(15)
605 #define MTK_LRO_MIN_RXD_SDL (MTK_HW_LRO_SDL_REMAIN_ROOM << 16)
607 -/* PDMA Global Configuration Register */
608 -#define MTK_PDMA_GLO_CFG 0xa04
609 +#define MTK_RX_DMA_LRO_EN BIT(8)
610 #define MTK_MULTI_EN BIT(10)
611 #define MTK_PDMA_SIZE_8DWORDS (1 << 4)
613 +/* PDMA Global Configuration Register */
614 +#define MTK_PDMA_LRO_SDL 0x3000
615 +#define MTK_RX_CFG_SDL_OFFSET 16
617 /* PDMA Reset Index Register */
618 -#define MTK_PDMA_RST_IDX 0xa08
619 #define MTK_PST_DRX_IDX0 BIT(16)
620 #define MTK_PST_DRX_IDX_CFG(x) (MTK_PST_DRX_IDX0 << (x))
622 /* PDMA Delay Interrupt Register */
623 -#define MTK_PDMA_DELAY_INT 0xa0c
624 #define MTK_PDMA_DELAY_RX_MASK GENMASK(15, 0)
625 #define MTK_PDMA_DELAY_RX_EN BIT(15)
626 #define MTK_PDMA_DELAY_RX_PINT_SHIFT 8
628 #define MTK_PDMA_DELAY_PINT_MASK 0x7f
629 #define MTK_PDMA_DELAY_PTIME_MASK 0xff
631 -/* PDMA Interrupt Status Register */
632 -#define MTK_PDMA_INT_STATUS 0xa20
634 -/* PDMA Interrupt Mask Register */
635 -#define MTK_PDMA_INT_MASK 0xa28
637 /* PDMA HW LRO Alter Flow Delta Register */
638 #define MTK_PDMA_LRO_ALT_SCORE_DELTA 0xa4c
640 -/* PDMA Interrupt grouping registers */
641 -#define MTK_PDMA_INT_GRP1 0xa50
642 -#define MTK_PDMA_INT_GRP2 0xa54
644 /* PDMA HW LRO IP Setting Registers */
645 #define MTK_LRO_RX_RING0_DIP_DW0 0xb04
646 #define MTK_LRO_DIP_DW0_CFG(x) (MTK_LRO_RX_RING0_DIP_DW0 + (x * 0x40))
648 #define MTK_RING_MAX_AGG_CNT_H ((MTK_HW_LRO_MAX_AGG_CNT >> 6) & 0x3)
650 /* QDMA TX Queue Configuration Registers */
651 -#define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
652 #define QDMA_RES_THRES 4
654 -/* QDMA TX Queue Scheduler Registers */
655 -#define MTK_QTX_SCH(x) (0x1804 + (x * 0x10))
657 -/* QDMA RX Base Pointer Register */
658 -#define MTK_QRX_BASE_PTR0 0x1900
660 -/* QDMA RX Maximum Count Register */
661 -#define MTK_QRX_MAX_CNT0 0x1904
663 -/* QDMA RX CPU Pointer Register */
664 -#define MTK_QRX_CRX_IDX0 0x1908
666 -/* QDMA RX DMA Pointer Register */
667 -#define MTK_QRX_DRX_IDX0 0x190C
669 /* QDMA Global Configuration Register */
670 -#define MTK_QDMA_GLO_CFG 0x1A04
671 #define MTK_RX_2B_OFFSET BIT(31)
672 #define MTK_RX_BT_32DWORDS (3 << 11)
673 #define MTK_NDP_CO_PRO BIT(10)
674 @@ -216,20 +180,12 @@
675 #define MTK_TX_DMA_EN BIT(0)
676 #define MTK_DMA_BUSY_TIMEOUT_US 1000000
678 -/* QDMA Reset Index Register */
679 -#define MTK_QDMA_RST_IDX 0x1A08
681 -/* QDMA Delay Interrupt Register */
682 -#define MTK_QDMA_DELAY_INT 0x1A0C
684 /* QDMA Flow Control Register */
685 -#define MTK_QDMA_FC_THRES 0x1A10
686 #define FC_THRES_DROP_MODE BIT(20)
687 #define FC_THRES_DROP_EN (7 << 16)
688 #define FC_THRES_MIN 0x4444
690 /* QDMA Interrupt Status Register */
691 -#define MTK_QDMA_INT_STATUS 0x1A18
692 #define MTK_RX_DONE_DLY BIT(30)
693 #define MTK_TX_DONE_DLY BIT(28)
694 #define MTK_RX_DONE_INT3 BIT(19)
696 #define MTK_TX_DONE_INT MTK_TX_DONE_DLY
698 /* QDMA Interrupt grouping registers */
699 -#define MTK_QDMA_INT_GRP1 0x1a20
700 -#define MTK_QDMA_INT_GRP2 0x1a24
701 #define MTK_RLS_DONE_INT BIT(0)
703 -/* QDMA Interrupt Status Register */
704 -#define MTK_QDMA_INT_MASK 0x1A1C
706 -/* QDMA Interrupt Mask Register */
707 -#define MTK_QDMA_HRED2 0x1A44
709 -/* QDMA TX Forward CPU Pointer Register */
710 -#define MTK_QTX_CTX_PTR 0x1B00
712 -/* QDMA TX Forward DMA Pointer Register */
713 -#define MTK_QTX_DTX_PTR 0x1B04
715 -/* QDMA TX Release CPU Pointer Register */
716 -#define MTK_QTX_CRX_PTR 0x1B10
718 -/* QDMA TX Release DMA Pointer Register */
719 -#define MTK_QTX_DRX_PTR 0x1B14
721 -/* QDMA FQ Head Pointer Register */
722 -#define MTK_QDMA_FQ_HEAD 0x1B20
724 -/* QDMA FQ Head Pointer Register */
725 -#define MTK_QDMA_FQ_TAIL 0x1B24
727 -/* QDMA FQ Free Page Counter Register */
728 -#define MTK_QDMA_FQ_CNT 0x1B28
730 -/* QDMA FQ Free Page Buffer Length Register */
731 -#define MTK_QDMA_FQ_BLEN 0x1B2C
733 -/* GMA1 counter / statics register */
734 -#define MTK_GDM1_RX_GBCNT_L 0x2400
735 -#define MTK_GDM1_RX_GBCNT_H 0x2404
736 -#define MTK_GDM1_RX_GPCNT 0x2408
737 -#define MTK_GDM1_RX_OERCNT 0x2410
738 -#define MTK_GDM1_RX_FERCNT 0x2414
739 -#define MTK_GDM1_RX_SERCNT 0x2418
740 -#define MTK_GDM1_RX_LENCNT 0x241c
741 -#define MTK_GDM1_RX_CERCNT 0x2420
742 -#define MTK_GDM1_RX_FCCNT 0x2424
743 -#define MTK_GDM1_TX_SKIPCNT 0x2428
744 -#define MTK_GDM1_TX_COLCNT 0x242c
745 -#define MTK_GDM1_TX_GBCNT_L 0x2430
746 -#define MTK_GDM1_TX_GBCNT_H 0x2434
747 -#define MTK_GDM1_TX_GPCNT 0x2438
748 #define MTK_STAT_OFFSET 0x40
750 #define MTK_WDMA0_BASE 0x2800
751 @@ -854,8 +763,46 @@ struct mtk_tx_dma_desc_info {
755 +struct mtk_reg_map {
759 + u32 rx_ptr; /* rx base pointer */
760 + u32 rx_cnt_cfg; /* rx max count configuration */
761 + u32 pcrx_ptr; /* rx cpu pointer */
762 + u32 glo_cfg; /* global configuration */
763 + u32 rst_idx; /* reset index */
764 + u32 delay_irq; /* delay interrupt */
765 + u32 irq_status; /* interrupt status */
766 + u32 irq_mask; /* interrupt mask */
770 + u32 qtx_cfg; /* tx queue configuration */
771 + u32 rx_ptr; /* rx base pointer */
772 + u32 rx_cnt_cfg; /* rx max count configuration */
773 + u32 qcrx_ptr; /* rx cpu pointer */
774 + u32 glo_cfg; /* global configuration */
775 + u32 rst_idx; /* reset index */
776 + u32 delay_irq; /* delay interrupt */
777 + u32 fc_th; /* flow control */
779 + u32 hred; /* interrupt mask */
780 + u32 ctx_ptr; /* tx acquire cpu pointer */
781 + u32 dtx_ptr; /* tx acquire dma pointer */
782 + u32 crx_ptr; /* tx release cpu pointer */
783 + u32 drx_ptr; /* tx release dma pointer */
784 + u32 fq_head; /* fq head pointer */
785 + u32 fq_tail; /* fq tail pointer */
786 + u32 fq_count; /* fq free page count */
787 + u32 fq_blen; /* fq free page buffer length */
792 /* struct mtk_eth_data - This is the structure holding all differences
793 * among various plaforms
794 + * @reg_map Soc register map.
795 * @ana_rgc3: The offset for register ANA_RGC3 related to
797 * @caps Flags shown the extra capability for the SoC
798 @@ -868,6 +815,7 @@ struct mtk_tx_dma_desc_info {
799 * @rxd_size Rx DMA descriptor size.
801 struct mtk_soc_data {
802 + const struct mtk_reg_map *reg_map;
806 @@ -995,8 +943,6 @@ struct mtk_eth {
810 - u32 tx_int_mask_reg;
811 - u32 tx_int_status_reg;