--- /dev/null
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Thu, 2 Nov 2023 16:47:07 +0100
+Subject: [PATCH net-next 1/2] net: ethernet: mediatek: split tx and rx fields
+ in mtk_soc_data struct
+
+Split tx and rx fields in mtk_soc_data struct. This is a preliminary
+patch to roll back to QDMA for MT7986 SoC in order to fix a hw hang
+if the device receives a corrupted packet.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 210 ++++++++++++--------
+ drivers/net/ethernet/mediatek/mtk_eth_soc.h | 29 +--
+ 2 files changed, 139 insertions(+), 100 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -1239,7 +1239,7 @@ static int mtk_init_fq_dma(struct mtk_et
+ eth->scratch_ring = eth->sram_base;
+ else
+ eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
+- cnt * soc->txrx.txd_size,
++ cnt * soc->tx.desc_size,
+ ð->phy_scratch_ring,
+ GFP_KERNEL);
+ if (unlikely(!eth->scratch_ring))
+@@ -1255,16 +1255,16 @@ static int mtk_init_fq_dma(struct mtk_et
+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
+ return -ENOMEM;
+
+- phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
++ phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
+
+ for (i = 0; i < cnt; i++) {
+ struct mtk_tx_dma_v2 *txd;
+
+- txd = eth->scratch_ring + i * soc->txrx.txd_size;
++ txd = eth->scratch_ring + i * soc->tx.desc_size;
+ txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
+ if (i < cnt - 1)
+ txd->txd2 = eth->phy_scratch_ring +
+- (i + 1) * soc->txrx.txd_size;
++ (i + 1) * soc->tx.desc_size;
+
+ txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
+ txd->txd4 = 0;
+@@ -1511,7 +1511,7 @@ static int mtk_tx_map(struct sk_buff *sk
+ if (itxd == ring->last_free)
+ return -ENOMEM;
+
+- itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
++ itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
+ memset(itx_buf, 0, sizeof(*itx_buf));
+
+ txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
+@@ -1552,7 +1552,7 @@ static int mtk_tx_map(struct sk_buff *sk
+
+ memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
+ txd_info.size = min_t(unsigned int, frag_size,
+- soc->txrx.dma_max_len);
++ soc->tx.dma_max_len);
+ txd_info.qid = queue;
+ txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
+ !(frag_size - txd_info.size);
+@@ -1565,7 +1565,7 @@ static int mtk_tx_map(struct sk_buff *sk
+ mtk_tx_set_dma_desc(dev, txd, &txd_info);
+
+ tx_buf = mtk_desc_to_tx_buf(ring, txd,
+- soc->txrx.txd_size);
++ soc->tx.desc_size);
+ if (new_desc)
+ memset(tx_buf, 0, sizeof(*tx_buf));
+ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
+@@ -1608,7 +1608,7 @@ static int mtk_tx_map(struct sk_buff *sk
+ } else {
+ int next_idx;
+
+- next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
++ next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_size),
+ ring->dma_size);
+ mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
+ }
+@@ -1617,7 +1617,7 @@ static int mtk_tx_map(struct sk_buff *sk
+
+ err_dma:
+ do {
+- tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
++ tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
+
+ /* unmap dma */
+ mtk_tx_unmap(eth, tx_buf, false);
+@@ -1642,7 +1642,7 @@ static int mtk_cal_txd_req(struct mtk_et
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ nfrags += DIV_ROUND_UP(skb_frag_size(frag),
+- eth->soc->txrx.dma_max_len);
++ eth->soc->tx.dma_max_len);
+ }
+ } else {
+ nfrags += skb_shinfo(skb)->nr_frags;
+@@ -1783,7 +1783,7 @@ static struct mtk_rx_ring *mtk_get_rx_ri
+
+ ring = ð->rx_ring[i];
+ idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
+- rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
++ rxd = ring->dma + idx * eth->soc->rx.desc_size;
+ if (rxd->rxd2 & RX_DMA_DONE) {
+ ring->calc_idx_update = true;
+ return ring;
+@@ -1951,7 +1951,7 @@ static int mtk_xdp_submit_frame(struct m
+ }
+ htxd = txd;
+
+- tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
++ tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_size);
+ memset(tx_buf, 0, sizeof(*tx_buf));
+ htx_buf = tx_buf;
+
+@@ -1971,7 +1971,7 @@ static int mtk_xdp_submit_frame(struct m
+ goto unmap;
+
+ tx_buf = mtk_desc_to_tx_buf(ring, txd,
+- soc->txrx.txd_size);
++ soc->tx.desc_size);
+ memset(tx_buf, 0, sizeof(*tx_buf));
+ n_desc++;
+ }
+@@ -2008,7 +2008,7 @@ static int mtk_xdp_submit_frame(struct m
+ } else {
+ int idx;
+
+- idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
++ idx = txd_to_idx(ring, txd, soc->tx.desc_size);
+ mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
+ MT7628_TX_CTX_IDX0);
+ }
+@@ -2020,7 +2020,7 @@ static int mtk_xdp_submit_frame(struct m
+ unmap:
+ while (htxd != txd) {
+ txd_pdma = qdma_to_pdma(ring, htxd);
+- tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
++ tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_size);
+ mtk_tx_unmap(eth, tx_buf, false);
+
+ htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+@@ -2148,7 +2148,7 @@ static int mtk_poll_rx(struct napi_struc
+ goto rx_done;
+
+ idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
+- rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
++ rxd = ring->dma + idx * eth->soc->rx.desc_size;
+ data = ring->data[idx];
+
+ if (!mtk_rx_get_desc(eth, &trxd, rxd))
+@@ -2283,7 +2283,7 @@ static int mtk_poll_rx(struct napi_struc
+ rxdcsum = &trxd.rxd4;
+ }
+
+- if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
++ if (*rxdcsum & eth->soc->rx.dma_l4_valid)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+@@ -2405,7 +2405,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
+ break;
+
+ tx_buf = mtk_desc_to_tx_buf(ring, desc,
+- eth->soc->txrx.txd_size);
++ eth->soc->tx.desc_size);
+ if (!tx_buf->data)
+ break;
+
+@@ -2453,7 +2453,7 @@ static int mtk_poll_tx_pdma(struct mtk_e
+ }
+ mtk_tx_unmap(eth, tx_buf, true);
+
+- desc = ring->dma + cpu * eth->soc->txrx.txd_size;
++ desc = ring->dma + cpu * eth->soc->tx.desc_size;
+ ring->last_free = desc;
+ atomic_inc(&ring->free_count);
+
+@@ -2542,7 +2542,7 @@ static int mtk_napi_rx(struct napi_struc
+ do {
+ int rx_done;
+
+- mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
++ mtk_w32(eth, eth->soc->rx.irq_done_mask,
+ reg_map->pdma.irq_status);
+ rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
+ rx_done_total += rx_done;
+@@ -2558,10 +2558,10 @@ static int mtk_napi_rx(struct napi_struc
+ return budget;
+
+ } while (mtk_r32(eth, reg_map->pdma.irq_status) &
+- eth->soc->txrx.rx_irq_done_mask);
++ eth->soc->rx.irq_done_mask);
+
+ if (napi_complete_done(napi, rx_done_total))
+- mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
++ mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
+
+ return rx_done_total;
+ }
+@@ -2570,7 +2570,7 @@ static int mtk_tx_alloc(struct mtk_eth *
+ {
+ const struct mtk_soc_data *soc = eth->soc;
+ struct mtk_tx_ring *ring = ð->tx_ring;
+- int i, sz = soc->txrx.txd_size;
++ int i, sz = soc->tx.desc_size;
+ struct mtk_tx_dma_v2 *txd;
+ int ring_size;
+ u32 ofs, val;
+@@ -2693,14 +2693,14 @@ static void mtk_tx_clean(struct mtk_eth
+ }
+ if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
+ dma_free_coherent(eth->dma_dev,
+- ring->dma_size * soc->txrx.txd_size,
++ ring->dma_size * soc->tx.desc_size,
+ ring->dma, ring->phys);
+ ring->dma = NULL;
+ }
+
+ if (ring->dma_pdma) {
+ dma_free_coherent(eth->dma_dev,
+- ring->dma_size * soc->txrx.txd_size,
++ ring->dma_size * soc->tx.desc_size,
+ ring->dma_pdma, ring->phys_pdma);
+ ring->dma_pdma = NULL;
+ }
+@@ -2755,15 +2755,15 @@ static int mtk_rx_alloc(struct mtk_eth *
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
+ rx_flag != MTK_RX_FLAGS_NORMAL) {
+ ring->dma = dma_alloc_coherent(eth->dma_dev,
+- rx_dma_size * eth->soc->txrx.rxd_size,
+- &ring->phys, GFP_KERNEL);
++ rx_dma_size * eth->soc->rx.desc_size,
++ &ring->phys, GFP_KERNEL);
+ } else {
+ struct mtk_tx_ring *tx_ring = ð->tx_ring;
+
+ ring->dma = tx_ring->dma + tx_ring_size *
+- eth->soc->txrx.txd_size * (ring_no + 1);
++ eth->soc->tx.desc_size * (ring_no + 1);
+ ring->phys = tx_ring->phys + tx_ring_size *
+- eth->soc->txrx.txd_size * (ring_no + 1);
++ eth->soc->tx.desc_size * (ring_no + 1);
+ }
+
+ if (!ring->dma)
+@@ -2774,7 +2774,7 @@ static int mtk_rx_alloc(struct mtk_eth *
+ dma_addr_t dma_addr;
+ void *data;
+
+- rxd = ring->dma + i * eth->soc->txrx.rxd_size;
++ rxd = ring->dma + i * eth->soc->rx.desc_size;
+ if (ring->page_pool) {
+ data = mtk_page_pool_get_buff(ring->page_pool,
+ &dma_addr, GFP_KERNEL);
+@@ -2863,7 +2863,7 @@ static void mtk_rx_clean(struct mtk_eth
+ if (!ring->data[i])
+ continue;
+
+- rxd = ring->dma + i * eth->soc->txrx.rxd_size;
++ rxd = ring->dma + i * eth->soc->rx.desc_size;
+ if (!rxd->rxd1)
+ continue;
+
+@@ -2880,7 +2880,7 @@ static void mtk_rx_clean(struct mtk_eth
+
+ if (!in_sram && ring->dma) {
+ dma_free_coherent(eth->dma_dev,
+- ring->dma_size * eth->soc->txrx.rxd_size,
++ ring->dma_size * eth->soc->rx.desc_size,
+ ring->dma, ring->phys);
+ ring->dma = NULL;
+ }
+@@ -3243,7 +3243,7 @@ static void mtk_dma_free(struct mtk_eth
+ netdev_reset_queue(eth->netdev[i]);
+ if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
+ dma_free_coherent(eth->dma_dev,
+- MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
++ MTK_QDMA_RING_SIZE * soc->tx.desc_size,
+ eth->scratch_ring, eth->phy_scratch_ring);
+ eth->scratch_ring = NULL;
+ eth->phy_scratch_ring = 0;
+@@ -3293,7 +3293,7 @@ static irqreturn_t mtk_handle_irq_rx(int
+
+ eth->rx_events++;
+ if (likely(napi_schedule_prep(ð->rx_napi))) {
+- mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
++ mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
+ __napi_schedule(ð->rx_napi);
+ }
+
+@@ -3319,9 +3319,9 @@ static irqreturn_t mtk_handle_irq(int ir
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
+
+ if (mtk_r32(eth, reg_map->pdma.irq_mask) &
+- eth->soc->txrx.rx_irq_done_mask) {
++ eth->soc->rx.irq_done_mask) {
+ if (mtk_r32(eth, reg_map->pdma.irq_status) &
+- eth->soc->txrx.rx_irq_done_mask)
++ eth->soc->rx.irq_done_mask)
+ mtk_handle_irq_rx(irq, _eth);
+ }
+ if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
+@@ -3339,10 +3339,10 @@ static void mtk_poll_controller(struct n
+ struct mtk_eth *eth = mac->hw;
+
+ mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+- mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
++ mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
+ mtk_handle_irq_rx(eth->irq[2], dev);
+ mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+- mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
++ mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
+ }
+ #endif
+
+@@ -3507,7 +3507,7 @@ static int mtk_open(struct net_device *d
+ napi_enable(ð->tx_napi);
+ napi_enable(ð->rx_napi);
+ mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+- mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
++ mtk_rx_irq_enable(eth, soc->rx.irq_done_mask);
+ refcount_set(ð->dma_refcnt, 1);
+ }
+ else
+@@ -3590,7 +3590,7 @@ static int mtk_stop(struct net_device *d
+ mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
+
+ mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+- mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
++ mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
+ napi_disable(ð->tx_napi);
+ napi_disable(ð->rx_napi);
+
+@@ -4066,9 +4066,9 @@ static int mtk_hw_init(struct mtk_eth *e
+
+ /* FE int grouping */
+ mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
+- mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
++ mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.int_grp + 4);
+ mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
+- mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
++ mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->qdma.int_grp + 4);
+ mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
+
+ if (mtk_is_netsys_v3_or_greater(eth)) {
+@@ -5165,11 +5165,15 @@ static const struct mtk_soc_data mt2701_
+ .required_clks = MT7623_CLKS_BITMAP,
+ .required_pctl = true,
+ .version = 1,
+- .txrx = {
+- .txd_size = sizeof(struct mtk_tx_dma),
+- .rxd_size = sizeof(struct mtk_rx_dma),
+- .rx_irq_done_mask = MTK_RX_DONE_INT,
+- .rx_dma_l4_valid = RX_DMA_L4_VALID,
++ .tx = {
++ .desc_size = sizeof(struct mtk_tx_dma),
++ .dma_max_len = MTK_TX_DMA_BUF_LEN,
++ .dma_len_offset = 16,
++ },
++ .rx = {
++ .desc_size = sizeof(struct mtk_rx_dma),
++ .irq_done_mask = MTK_RX_DONE_INT,
++ .dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+@@ -5185,11 +5189,15 @@ static const struct mtk_soc_data mt7621_
+ .offload_version = 1,
+ .hash_offset = 2,
+ .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
+- .txrx = {
+- .txd_size = sizeof(struct mtk_tx_dma),
+- .rxd_size = sizeof(struct mtk_rx_dma),
+- .rx_irq_done_mask = MTK_RX_DONE_INT,
+- .rx_dma_l4_valid = RX_DMA_L4_VALID,
++ .tx = {
++ .desc_size = sizeof(struct mtk_tx_dma),
++ .dma_max_len = MTK_TX_DMA_BUF_LEN,
++ .dma_len_offset = 16,
++ },
++ .rx = {
++ .desc_size = sizeof(struct mtk_rx_dma),
++ .irq_done_mask = MTK_RX_DONE_INT,
++ .dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+@@ -5207,11 +5215,15 @@ static const struct mtk_soc_data mt7622_
+ .hash_offset = 2,
+ .has_accounting = true,
+ .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
+- .txrx = {
+- .txd_size = sizeof(struct mtk_tx_dma),
+- .rxd_size = sizeof(struct mtk_rx_dma),
+- .rx_irq_done_mask = MTK_RX_DONE_INT,
+- .rx_dma_l4_valid = RX_DMA_L4_VALID,
++ .tx = {
++ .desc_size = sizeof(struct mtk_tx_dma),
++ .dma_max_len = MTK_TX_DMA_BUF_LEN,
++ .dma_len_offset = 16,
++ },
++ .rx = {
++ .desc_size = sizeof(struct mtk_rx_dma),
++ .irq_done_mask = MTK_RX_DONE_INT,
++ .dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+@@ -5228,11 +5240,15 @@ static const struct mtk_soc_data mt7623_
+ .hash_offset = 2,
+ .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
+ .disable_pll_modes = true,
+- .txrx = {
+- .txd_size = sizeof(struct mtk_tx_dma),
+- .rxd_size = sizeof(struct mtk_rx_dma),
+- .rx_irq_done_mask = MTK_RX_DONE_INT,
+- .rx_dma_l4_valid = RX_DMA_L4_VALID,
++ .tx = {
++ .desc_size = sizeof(struct mtk_tx_dma),
++ .dma_max_len = MTK_TX_DMA_BUF_LEN,
++ .dma_len_offset = 16,
++ },
++ .rx = {
++ .desc_size = sizeof(struct mtk_rx_dma),
++ .irq_done_mask = MTK_RX_DONE_INT,
++ .dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+@@ -5247,11 +5263,15 @@ static const struct mtk_soc_data mt7629_
+ .required_pctl = false,
+ .has_accounting = true,
+ .version = 1,
+- .txrx = {
+- .txd_size = sizeof(struct mtk_tx_dma),
+- .rxd_size = sizeof(struct mtk_rx_dma),
+- .rx_irq_done_mask = MTK_RX_DONE_INT,
+- .rx_dma_l4_valid = RX_DMA_L4_VALID,
++ .tx = {
++ .desc_size = sizeof(struct mtk_tx_dma),
++ .dma_max_len = MTK_TX_DMA_BUF_LEN,
++ .dma_len_offset = 16,
++ },
++ .rx = {
++ .desc_size = sizeof(struct mtk_rx_dma),
++ .irq_done_mask = MTK_RX_DONE_INT,
++ .dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+@@ -5269,11 +5289,15 @@ static const struct mtk_soc_data mt7981_
+ .hash_offset = 4,
+ .has_accounting = true,
+ .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
+- .txrx = {
+- .txd_size = sizeof(struct mtk_tx_dma_v2),
+- .rxd_size = sizeof(struct mtk_rx_dma_v2),
+- .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+- .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
++ .tx = {
++ .desc_size = sizeof(struct mtk_tx_dma_v2),
++ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
++ .dma_len_offset = 8,
++ },
++ .rx = {
++ .desc_size = sizeof(struct mtk_rx_dma_v2),
++ .irq_done_mask = MTK_RX_DONE_INT_V2,
++ .dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ .dma_len_offset = 8,
+ },
+@@ -5291,11 +5315,15 @@ static const struct mtk_soc_data mt7986_
+ .hash_offset = 4,
+ .has_accounting = true,
+ .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
+- .txrx = {
+- .txd_size = sizeof(struct mtk_tx_dma_v2),
+- .rxd_size = sizeof(struct mtk_rx_dma_v2),
+- .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+- .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
++ .tx = {
++ .desc_size = sizeof(struct mtk_tx_dma_v2),
++ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
++ .dma_len_offset = 8,
++ },
++ .rx = {
++ .desc_size = sizeof(struct mtk_rx_dma_v2),
++ .irq_done_mask = MTK_RX_DONE_INT_V2,
++ .dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ .dma_len_offset = 8,
+ },
+@@ -5313,11 +5341,15 @@ static const struct mtk_soc_data mt7988_
+ .hash_offset = 4,
+ .has_accounting = true,
+ .foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
+- .txrx = {
+- .txd_size = sizeof(struct mtk_tx_dma_v2),
+- .rxd_size = sizeof(struct mtk_rx_dma_v2),
+- .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+- .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
++ .tx = {
++ .desc_size = sizeof(struct mtk_tx_dma_v2),
++ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
++ .dma_len_offset = 8,
++ },
++ .rx = {
++ .desc_size = sizeof(struct mtk_rx_dma_v2),
++ .irq_done_mask = MTK_RX_DONE_INT_V2,
++ .dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ .dma_len_offset = 8,
+ },
+@@ -5330,11 +5362,15 @@ static const struct mtk_soc_data rt5350_
+ .required_clks = MT7628_CLKS_BITMAP,
+ .required_pctl = false,
+ .version = 1,
+- .txrx = {
+- .txd_size = sizeof(struct mtk_tx_dma),
+- .rxd_size = sizeof(struct mtk_rx_dma),
+- .rx_irq_done_mask = MTK_RX_DONE_INT,
+- .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
++ .tx = {
++ .desc_size = sizeof(struct mtk_tx_dma),
++ .dma_max_len = MTK_TX_DMA_BUF_LEN,
++ .dma_len_offset = 16,
++ },
++ .rx = {
++ .desc_size = sizeof(struct mtk_rx_dma),
++ .irq_done_mask = MTK_RX_DONE_INT,
++ .dma_l4_valid = RX_DMA_L4_VALID_PDMA,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -326,8 +326,8 @@
+ /* QDMA descriptor txd3 */
+ #define TX_DMA_OWNER_CPU BIT(31)
+ #define TX_DMA_LS0 BIT(30)
+-#define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
+-#define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len)
++#define TX_DMA_PLEN0(x) (((x) & eth->soc->tx.dma_max_len) << eth->soc->tx.dma_len_offset)
++#define TX_DMA_PLEN1(x) ((x) & eth->soc->tx.dma_max_len)
+ #define TX_DMA_SWC BIT(14)
+ #define TX_DMA_PQID GENMASK(3, 0)
+ #define TX_DMA_ADDR64_MASK GENMASK(3, 0)
+@@ -347,8 +347,8 @@
+ /* QDMA descriptor rxd2 */
+ #define RX_DMA_DONE BIT(31)
+ #define RX_DMA_LSO BIT(30)
+-#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
+-#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len)
++#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->rx.dma_max_len) << eth->soc->rx.dma_len_offset)
++#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->rx.dma_len_offset) & eth->soc->rx.dma_max_len)
+ #define RX_DMA_VTAG BIT(15)
+ #define RX_DMA_ADDR64_MASK GENMASK(3, 0)
+ #if IS_ENABLED(CONFIG_64BIT)
+@@ -1279,10 +1279,9 @@ struct mtk_reg_map {
+ * @foe_entry_size Foe table entry size.
+ * @has_accounting Bool indicating support for accounting of
+ * offloaded flows.
+- * @txd_size Tx DMA descriptor size.
+- * @rxd_size Rx DMA descriptor size.
+- * @rx_irq_done_mask Rx irq done register mask.
+- * @rx_dma_l4_valid Rx DMA valid register mask.
++ * @desc_size Tx/Rx DMA descriptor size.
++ * @irq_done_mask Rx irq done register mask.
++ * @dma_l4_valid Rx DMA valid register mask.
+ * @dma_max_len Max DMA tx/rx buffer length.
+ * @dma_len_offset Tx/Rx DMA length field offset.
+ */
+@@ -1300,13 +1299,17 @@ struct mtk_soc_data {
+ bool has_accounting;
+ bool disable_pll_modes;
+ struct {
+- u32 txd_size;
+- u32 rxd_size;
+- u32 rx_irq_done_mask;
+- u32 rx_dma_l4_valid;
++ u32 desc_size;
+ u32 dma_max_len;
+ u32 dma_len_offset;
+- } txrx;
++ } tx;
++ struct {
++ u32 desc_size;
++ u32 irq_done_mask;
++ u32 dma_l4_valid;
++ u32 dma_max_len;
++ u32 dma_len_offset;
++ } rx;
+ };
+
+ #define MTK_DMA_MONITOR_TIMEOUT msecs_to_jiffies(1000)
--- /dev/null
+From: Daniel Golle <daniel@makrotopia.org>
+Date: Tue, 10 Oct 2023 21:06:43 +0200
+Subject: [PATCH net-next 2/2] net: ethernet: mediatek: use QDMA instead of
+ ADMAv2 on MT7981 and MT7986
+
+ADMA is plagued by RX hangs which can't easily detected and happen upon
+receival of a corrupted package.
+Use QDMA just like on netsys v1 which is also still present and usable, and
+doesn't suffer from that problem.
+
+Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Daniel Golle <daniel@makrotopia.org>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 46 ++++++++++-----------
+ 1 file changed, 23 insertions(+), 23 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -108,16 +108,16 @@ static const struct mtk_reg_map mt7986_r
+ .tx_irq_mask = 0x461c,
+ .tx_irq_status = 0x4618,
+ .pdma = {
+- .rx_ptr = 0x6100,
+- .rx_cnt_cfg = 0x6104,
+- .pcrx_ptr = 0x6108,
+- .glo_cfg = 0x6204,
+- .rst_idx = 0x6208,
+- .delay_irq = 0x620c,
+- .irq_status = 0x6220,
+- .irq_mask = 0x6228,
+- .adma_rx_dbg0 = 0x6238,
+- .int_grp = 0x6250,
++ .rx_ptr = 0x4100,
++ .rx_cnt_cfg = 0x4104,
++ .pcrx_ptr = 0x4108,
++ .glo_cfg = 0x4204,
++ .rst_idx = 0x4208,
++ .delay_irq = 0x420c,
++ .irq_status = 0x4220,
++ .irq_mask = 0x4228,
++ .adma_rx_dbg0 = 0x4238,
++ .int_grp = 0x4250,
+ },
+ .qdma = {
+ .qtx_cfg = 0x4400,
+@@ -1207,7 +1207,7 @@ static bool mtk_rx_get_desc(struct mtk_e
+ rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
+ rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
+ rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
+- if (mtk_is_netsys_v2_or_greater(eth)) {
++ if (mtk_is_netsys_v3_or_greater(eth)) {
+ rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
+ rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
+ }
+@@ -2155,7 +2155,7 @@ static int mtk_poll_rx(struct napi_struc
+ break;
+
+ /* find out which mac the packet come from. values start at 1 */
+- if (mtk_is_netsys_v2_or_greater(eth)) {
++ if (mtk_is_netsys_v3_or_greater(eth)) {
+ u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
+
+ switch (val) {
+@@ -2267,7 +2267,7 @@ static int mtk_poll_rx(struct napi_struc
+ skb->dev = netdev;
+ bytes += skb->len;
+
+- if (mtk_is_netsys_v2_or_greater(eth)) {
++ if (mtk_is_netsys_v3_or_greater(eth)) {
+ reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
+ hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
+ if (hash != MTK_RXD5_FOE_ENTRY)
+@@ -2809,7 +2809,7 @@ static int mtk_rx_alloc(struct mtk_eth *
+
+ rxd->rxd3 = 0;
+ rxd->rxd4 = 0;
+- if (mtk_is_netsys_v2_or_greater(eth)) {
++ if (mtk_is_netsys_v3_or_greater(eth)) {
+ rxd->rxd5 = 0;
+ rxd->rxd6 = 0;
+ rxd->rxd7 = 0;
+@@ -4012,7 +4012,7 @@ static int mtk_hw_init(struct mtk_eth *e
+ else
+ mtk_hw_reset(eth);
+
+- if (mtk_is_netsys_v2_or_greater(eth)) {
++ if (mtk_is_netsys_v3_or_greater(eth)) {
+ /* Set FE to PDMAv2 if necessary */
+ val = mtk_r32(eth, MTK_FE_GLO_MISC);
+ mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
+@@ -5295,11 +5295,11 @@ static const struct mtk_soc_data mt7981_
+ .dma_len_offset = 8,
+ },
+ .rx = {
+- .desc_size = sizeof(struct mtk_rx_dma_v2),
+- .irq_done_mask = MTK_RX_DONE_INT_V2,
++ .desc_size = sizeof(struct mtk_rx_dma),
++ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID_V2,
+- .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+- .dma_len_offset = 8,
++ .dma_max_len = MTK_TX_DMA_BUF_LEN,
++ .dma_len_offset = 16,
+ },
+ };
+
+@@ -5321,11 +5321,11 @@ static const struct mtk_soc_data mt7986_
+ .dma_len_offset = 8,
+ },
+ .rx = {
+- .desc_size = sizeof(struct mtk_rx_dma_v2),
+- .irq_done_mask = MTK_RX_DONE_INT_V2,
++ .desc_size = sizeof(struct mtk_rx_dma),
++ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID_V2,
+- .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+- .dma_len_offset = 8,
++ .dma_max_len = MTK_TX_DMA_BUF_LEN,
++ .dma_len_offset = 16,
+ },
+ };
+
--- /dev/null
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Thu, 2 Nov 2023 16:47:07 +0100
+Subject: [PATCH net-next 1/2] net: ethernet: mediatek: split tx and rx fields
+ in mtk_soc_data struct
+
+Split tx and rx fields in mtk_soc_data struct. This is a preliminary
+patch to roll back to QDMA for MT7986 SoC in order to fix a hw hang
+if the device receives a corrupted packet.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 210 ++++++++++++--------
+ drivers/net/ethernet/mediatek/mtk_eth_soc.h | 29 +--
+ 2 files changed, 139 insertions(+), 100 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -1238,7 +1238,7 @@ static int mtk_init_fq_dma(struct mtk_et
+ eth->scratch_ring = eth->sram_base;
+ else
+ eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
+- cnt * soc->txrx.txd_size,
++ cnt * soc->tx.desc_size,
+ ð->phy_scratch_ring,
+ GFP_KERNEL);
+ if (unlikely(!eth->scratch_ring))
+@@ -1254,16 +1254,16 @@ static int mtk_init_fq_dma(struct mtk_et
+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
+ return -ENOMEM;
+
+- phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
++ phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
+
+ for (i = 0; i < cnt; i++) {
+ struct mtk_tx_dma_v2 *txd;
+
+- txd = eth->scratch_ring + i * soc->txrx.txd_size;
++ txd = eth->scratch_ring + i * soc->tx.desc_size;
+ txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
+ if (i < cnt - 1)
+ txd->txd2 = eth->phy_scratch_ring +
+- (i + 1) * soc->txrx.txd_size;
++ (i + 1) * soc->tx.desc_size;
+
+ txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
+ txd->txd4 = 0;
+@@ -1512,7 +1512,7 @@ static int mtk_tx_map(struct sk_buff *sk
+ if (itxd == ring->last_free)
+ return -ENOMEM;
+
+- itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
++ itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
+ memset(itx_buf, 0, sizeof(*itx_buf));
+
+ txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
+@@ -1553,7 +1553,7 @@ static int mtk_tx_map(struct sk_buff *sk
+
+ memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
+ txd_info.size = min_t(unsigned int, frag_size,
+- soc->txrx.dma_max_len);
++ soc->tx.dma_max_len);
+ txd_info.qid = queue;
+ txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
+ !(frag_size - txd_info.size);
+@@ -1566,7 +1566,7 @@ static int mtk_tx_map(struct sk_buff *sk
+ mtk_tx_set_dma_desc(dev, txd, &txd_info);
+
+ tx_buf = mtk_desc_to_tx_buf(ring, txd,
+- soc->txrx.txd_size);
++ soc->tx.desc_size);
+ if (new_desc)
+ memset(tx_buf, 0, sizeof(*tx_buf));
+ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
+@@ -1609,7 +1609,7 @@ static int mtk_tx_map(struct sk_buff *sk
+ } else {
+ int next_idx;
+
+- next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
++ next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_size),
+ ring->dma_size);
+ mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
+ }
+@@ -1618,7 +1618,7 @@ static int mtk_tx_map(struct sk_buff *sk
+
+ err_dma:
+ do {
+- tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
++ tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
+
+ /* unmap dma */
+ mtk_tx_unmap(eth, tx_buf, NULL, false);
+@@ -1643,7 +1643,7 @@ static int mtk_cal_txd_req(struct mtk_et
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ nfrags += DIV_ROUND_UP(skb_frag_size(frag),
+- eth->soc->txrx.dma_max_len);
++ eth->soc->tx.dma_max_len);
+ }
+ } else {
+ nfrags += skb_shinfo(skb)->nr_frags;
+@@ -1784,7 +1784,7 @@ static struct mtk_rx_ring *mtk_get_rx_ri
+
+ ring = ð->rx_ring[i];
+ idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
+- rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
++ rxd = ring->dma + idx * eth->soc->rx.desc_size;
+ if (rxd->rxd2 & RX_DMA_DONE) {
+ ring->calc_idx_update = true;
+ return ring;
+@@ -1952,7 +1952,7 @@ static int mtk_xdp_submit_frame(struct m
+ }
+ htxd = txd;
+
+- tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
++ tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_size);
+ memset(tx_buf, 0, sizeof(*tx_buf));
+ htx_buf = tx_buf;
+
+@@ -1971,7 +1971,7 @@ static int mtk_xdp_submit_frame(struct m
+ goto unmap;
+
+ tx_buf = mtk_desc_to_tx_buf(ring, txd,
+- soc->txrx.txd_size);
++ soc->tx.desc_size);
+ memset(tx_buf, 0, sizeof(*tx_buf));
+ n_desc++;
+ }
+@@ -2009,7 +2009,7 @@ static int mtk_xdp_submit_frame(struct m
+ } else {
+ int idx;
+
+- idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
++ idx = txd_to_idx(ring, txd, soc->tx.desc_size);
+ mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
+ MT7628_TX_CTX_IDX0);
+ }
+@@ -2020,7 +2020,7 @@ static int mtk_xdp_submit_frame(struct m
+
+ unmap:
+ while (htxd != txd) {
+- tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
++ tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_size);
+ mtk_tx_unmap(eth, tx_buf, NULL, false);
+
+ htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+@@ -2151,7 +2151,7 @@ static int mtk_poll_rx(struct napi_struc
+ goto rx_done;
+
+ idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
+- rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
++ rxd = ring->dma + idx * eth->soc->rx.desc_size;
+ data = ring->data[idx];
+
+ if (!mtk_rx_get_desc(eth, &trxd, rxd))
+@@ -2286,7 +2286,7 @@ static int mtk_poll_rx(struct napi_struc
+ rxdcsum = &trxd.rxd4;
+ }
+
+- if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
++ if (*rxdcsum & eth->soc->rx.dma_l4_valid)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+@@ -2410,7 +2410,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
+ break;
+
+ tx_buf = mtk_desc_to_tx_buf(ring, desc,
+- eth->soc->txrx.txd_size);
++ eth->soc->tx.desc_size);
+ if (!tx_buf->data)
+ break;
+
+@@ -2461,7 +2461,7 @@ static int mtk_poll_tx_pdma(struct mtk_e
+ }
+ mtk_tx_unmap(eth, tx_buf, &bq, true);
+
+- desc = ring->dma + cpu * eth->soc->txrx.txd_size;
++ desc = ring->dma + cpu * eth->soc->tx.desc_size;
+ ring->last_free = desc;
+ atomic_inc(&ring->free_count);
+
+@@ -2551,7 +2551,7 @@ static int mtk_napi_rx(struct napi_struc
+ do {
+ int rx_done;
+
+- mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
++ mtk_w32(eth, eth->soc->rx.irq_done_mask,
+ reg_map->pdma.irq_status);
+ rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
+ rx_done_total += rx_done;
+@@ -2567,10 +2567,10 @@ static int mtk_napi_rx(struct napi_struc
+ return budget;
+
+ } while (mtk_r32(eth, reg_map->pdma.irq_status) &
+- eth->soc->txrx.rx_irq_done_mask);
++ eth->soc->rx.irq_done_mask);
+
+ if (napi_complete_done(napi, rx_done_total))
+- mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
++ mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
+
+ return rx_done_total;
+ }
+@@ -2579,7 +2579,7 @@ static int mtk_tx_alloc(struct mtk_eth *
+ {
+ const struct mtk_soc_data *soc = eth->soc;
+ struct mtk_tx_ring *ring = ð->tx_ring;
+- int i, sz = soc->txrx.txd_size;
++ int i, sz = soc->tx.desc_size;
+ struct mtk_tx_dma_v2 *txd;
+ int ring_size;
+ u32 ofs, val;
+@@ -2702,14 +2702,14 @@ static void mtk_tx_clean(struct mtk_eth
+ }
+ if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
+ dma_free_coherent(eth->dma_dev,
+- ring->dma_size * soc->txrx.txd_size,
++ ring->dma_size * soc->tx.desc_size,
+ ring->dma, ring->phys);
+ ring->dma = NULL;
+ }
+
+ if (ring->dma_pdma) {
+ dma_free_coherent(eth->dma_dev,
+- ring->dma_size * soc->txrx.txd_size,
++ ring->dma_size * soc->tx.desc_size,
+ ring->dma_pdma, ring->phys_pdma);
+ ring->dma_pdma = NULL;
+ }
+@@ -2764,15 +2764,15 @@ static int mtk_rx_alloc(struct mtk_eth *
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
+ rx_flag != MTK_RX_FLAGS_NORMAL) {
+ ring->dma = dma_alloc_coherent(eth->dma_dev,
+- rx_dma_size * eth->soc->txrx.rxd_size,
+- &ring->phys, GFP_KERNEL);
++ rx_dma_size * eth->soc->rx.desc_size,
++ &ring->phys, GFP_KERNEL);
+ } else {
+ struct mtk_tx_ring *tx_ring = ð->tx_ring;
+
+ ring->dma = tx_ring->dma + tx_ring_size *
+- eth->soc->txrx.txd_size * (ring_no + 1);
++ eth->soc->tx.desc_size * (ring_no + 1);
+ ring->phys = tx_ring->phys + tx_ring_size *
+- eth->soc->txrx.txd_size * (ring_no + 1);
++ eth->soc->tx.desc_size * (ring_no + 1);
+ }
+
+ if (!ring->dma)
+@@ -2783,7 +2783,7 @@ static int mtk_rx_alloc(struct mtk_eth *
+ dma_addr_t dma_addr;
+ void *data;
+
+- rxd = ring->dma + i * eth->soc->txrx.rxd_size;
++ rxd = ring->dma + i * eth->soc->rx.desc_size;
+ if (ring->page_pool) {
+ data = mtk_page_pool_get_buff(ring->page_pool,
+ &dma_addr, GFP_KERNEL);
+@@ -2874,7 +2874,7 @@ static void mtk_rx_clean(struct mtk_eth
+ if (!ring->data[i])
+ continue;
+
+- rxd = ring->dma + i * eth->soc->txrx.rxd_size;
++ rxd = ring->dma + i * eth->soc->rx.desc_size;
+ if (!rxd->rxd1)
+ continue;
+
+@@ -2891,7 +2891,7 @@ static void mtk_rx_clean(struct mtk_eth
+
+ if (!in_sram && ring->dma) {
+ dma_free_coherent(eth->dma_dev,
+- ring->dma_size * eth->soc->txrx.rxd_size,
++ ring->dma_size * eth->soc->rx.desc_size,
+ ring->dma, ring->phys);
+ ring->dma = NULL;
+ }
+@@ -3254,7 +3254,7 @@ static void mtk_dma_free(struct mtk_eth
+ netdev_reset_queue(eth->netdev[i]);
+ if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
+ dma_free_coherent(eth->dma_dev,
+- MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
++ MTK_QDMA_RING_SIZE * soc->tx.desc_size,
+ eth->scratch_ring, eth->phy_scratch_ring);
+ eth->scratch_ring = NULL;
+ eth->phy_scratch_ring = 0;
+@@ -3304,7 +3304,7 @@ static irqreturn_t mtk_handle_irq_rx(int
+
+ eth->rx_events++;
+ if (likely(napi_schedule_prep(ð->rx_napi))) {
+- mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
++ mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
+ __napi_schedule(ð->rx_napi);
+ }
+
+@@ -3330,9 +3330,9 @@ static irqreturn_t mtk_handle_irq(int ir
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
+
+ if (mtk_r32(eth, reg_map->pdma.irq_mask) &
+- eth->soc->txrx.rx_irq_done_mask) {
++ eth->soc->rx.irq_done_mask) {
+ if (mtk_r32(eth, reg_map->pdma.irq_status) &
+- eth->soc->txrx.rx_irq_done_mask)
++ eth->soc->rx.irq_done_mask)
+ mtk_handle_irq_rx(irq, _eth);
+ }
+ if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
+@@ -3350,10 +3350,10 @@ static void mtk_poll_controller(struct n
+ struct mtk_eth *eth = mac->hw;
+
+ mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+- mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
++ mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
+ mtk_handle_irq_rx(eth->irq[2], dev);
+ mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+- mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
++ mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
+ }
+ #endif
+
+@@ -3516,7 +3516,7 @@ static int mtk_open(struct net_device *d
+ napi_enable(ð->tx_napi);
+ napi_enable(ð->rx_napi);
+ mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+- mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
++ mtk_rx_irq_enable(eth, soc->rx.irq_done_mask);
+ refcount_set(ð->dma_refcnt, 1);
+ }
+ else
+@@ -3599,7 +3599,7 @@ static int mtk_stop(struct net_device *d
+ mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
+
+ mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+- mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
++ mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
+ napi_disable(ð->tx_napi);
+ napi_disable(ð->rx_napi);
+
+@@ -4075,9 +4075,9 @@ static int mtk_hw_init(struct mtk_eth *e
+
+ /* FE int grouping */
+ mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
+- mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
++ mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.int_grp + 4);
+ mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
+- mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
++ mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->qdma.int_grp + 4);
+ mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
+
+ if (mtk_is_netsys_v3_or_greater(eth)) {
+@@ -5172,11 +5172,15 @@ static const struct mtk_soc_data mt2701_
+ .required_clks = MT7623_CLKS_BITMAP,
+ .required_pctl = true,
+ .version = 1,
+- .txrx = {
+- .txd_size = sizeof(struct mtk_tx_dma),
+- .rxd_size = sizeof(struct mtk_rx_dma),
+- .rx_irq_done_mask = MTK_RX_DONE_INT,
+- .rx_dma_l4_valid = RX_DMA_L4_VALID,
++ .tx = {
++ .desc_size = sizeof(struct mtk_tx_dma),
++ .dma_max_len = MTK_TX_DMA_BUF_LEN,
++ .dma_len_offset = 16,
++ },
++ .rx = {
++ .desc_size = sizeof(struct mtk_rx_dma),
++ .irq_done_mask = MTK_RX_DONE_INT,
++ .dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+@@ -5192,11 +5196,15 @@ static const struct mtk_soc_data mt7621_
+ .offload_version = 1,
+ .hash_offset = 2,
+ .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
+- .txrx = {
+- .txd_size = sizeof(struct mtk_tx_dma),
+- .rxd_size = sizeof(struct mtk_rx_dma),
+- .rx_irq_done_mask = MTK_RX_DONE_INT,
+- .rx_dma_l4_valid = RX_DMA_L4_VALID,
++ .tx = {
++ .desc_size = sizeof(struct mtk_tx_dma),
++ .dma_max_len = MTK_TX_DMA_BUF_LEN,
++ .dma_len_offset = 16,
++ },
++ .rx = {
++ .desc_size = sizeof(struct mtk_rx_dma),
++ .irq_done_mask = MTK_RX_DONE_INT,
++ .dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+@@ -5214,11 +5222,15 @@ static const struct mtk_soc_data mt7622_
+ .hash_offset = 2,
+ .has_accounting = true,
+ .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
+- .txrx = {
+- .txd_size = sizeof(struct mtk_tx_dma),
+- .rxd_size = sizeof(struct mtk_rx_dma),
+- .rx_irq_done_mask = MTK_RX_DONE_INT,
+- .rx_dma_l4_valid = RX_DMA_L4_VALID,
++ .tx = {
++ .desc_size = sizeof(struct mtk_tx_dma),
++ .dma_max_len = MTK_TX_DMA_BUF_LEN,
++ .dma_len_offset = 16,
++ },
++ .rx = {
++ .desc_size = sizeof(struct mtk_rx_dma),
++ .irq_done_mask = MTK_RX_DONE_INT,
++ .dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+@@ -5235,11 +5247,15 @@ static const struct mtk_soc_data mt7623_
+ .hash_offset = 2,
+ .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
+ .disable_pll_modes = true,
+- .txrx = {
+- .txd_size = sizeof(struct mtk_tx_dma),
+- .rxd_size = sizeof(struct mtk_rx_dma),
+- .rx_irq_done_mask = MTK_RX_DONE_INT,
+- .rx_dma_l4_valid = RX_DMA_L4_VALID,
++ .tx = {
++ .desc_size = sizeof(struct mtk_tx_dma),
++ .dma_max_len = MTK_TX_DMA_BUF_LEN,
++ .dma_len_offset = 16,
++ },
++ .rx = {
++ .desc_size = sizeof(struct mtk_rx_dma),
++ .irq_done_mask = MTK_RX_DONE_INT,
++ .dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+@@ -5254,11 +5270,15 @@ static const struct mtk_soc_data mt7629_
+ .required_pctl = false,
+ .has_accounting = true,
+ .version = 1,
+- .txrx = {
+- .txd_size = sizeof(struct mtk_tx_dma),
+- .rxd_size = sizeof(struct mtk_rx_dma),
+- .rx_irq_done_mask = MTK_RX_DONE_INT,
+- .rx_dma_l4_valid = RX_DMA_L4_VALID,
++ .tx = {
++ .desc_size = sizeof(struct mtk_tx_dma),
++ .dma_max_len = MTK_TX_DMA_BUF_LEN,
++ .dma_len_offset = 16,
++ },
++ .rx = {
++ .desc_size = sizeof(struct mtk_rx_dma),
++ .irq_done_mask = MTK_RX_DONE_INT,
++ .dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+@@ -5276,11 +5296,15 @@ static const struct mtk_soc_data mt7981_
+ .hash_offset = 4,
+ .has_accounting = true,
+ .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
+- .txrx = {
+- .txd_size = sizeof(struct mtk_tx_dma_v2),
+- .rxd_size = sizeof(struct mtk_rx_dma_v2),
+- .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+- .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
++ .tx = {
++ .desc_size = sizeof(struct mtk_tx_dma_v2),
++ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
++ .dma_len_offset = 8,
++ },
++ .rx = {
++ .desc_size = sizeof(struct mtk_rx_dma_v2),
++ .irq_done_mask = MTK_RX_DONE_INT_V2,
++ .dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ .dma_len_offset = 8,
+ },
+@@ -5298,11 +5322,15 @@ static const struct mtk_soc_data mt7986_
+ .hash_offset = 4,
+ .has_accounting = true,
+ .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
+- .txrx = {
+- .txd_size = sizeof(struct mtk_tx_dma_v2),
+- .rxd_size = sizeof(struct mtk_rx_dma_v2),
+- .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+- .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
++ .tx = {
++ .desc_size = sizeof(struct mtk_tx_dma_v2),
++ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
++ .dma_len_offset = 8,
++ },
++ .rx = {
++ .desc_size = sizeof(struct mtk_rx_dma_v2),
++ .irq_done_mask = MTK_RX_DONE_INT_V2,
++ .dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ .dma_len_offset = 8,
+ },
+@@ -5320,11 +5348,15 @@ static const struct mtk_soc_data mt7988_
+ .hash_offset = 4,
+ .has_accounting = true,
+ .foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
+- .txrx = {
+- .txd_size = sizeof(struct mtk_tx_dma_v2),
+- .rxd_size = sizeof(struct mtk_rx_dma_v2),
+- .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+- .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
++ .tx = {
++ .desc_size = sizeof(struct mtk_tx_dma_v2),
++ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
++ .dma_len_offset = 8,
++ },
++ .rx = {
++ .desc_size = sizeof(struct mtk_rx_dma_v2),
++ .irq_done_mask = MTK_RX_DONE_INT_V2,
++ .dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ .dma_len_offset = 8,
+ },
+@@ -5337,11 +5369,15 @@ static const struct mtk_soc_data rt5350_
+ .required_clks = MT7628_CLKS_BITMAP,
+ .required_pctl = false,
+ .version = 1,
+- .txrx = {
+- .txd_size = sizeof(struct mtk_tx_dma),
+- .rxd_size = sizeof(struct mtk_rx_dma),
+- .rx_irq_done_mask = MTK_RX_DONE_INT,
+- .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
++ .tx = {
++ .desc_size = sizeof(struct mtk_tx_dma),
++ .dma_max_len = MTK_TX_DMA_BUF_LEN,
++ .dma_len_offset = 16,
++ },
++ .rx = {
++ .desc_size = sizeof(struct mtk_rx_dma),
++ .irq_done_mask = MTK_RX_DONE_INT,
++ .dma_l4_valid = RX_DMA_L4_VALID_PDMA,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -326,8 +326,8 @@
+ /* QDMA descriptor txd3 */
+ #define TX_DMA_OWNER_CPU BIT(31)
+ #define TX_DMA_LS0 BIT(30)
+-#define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
+-#define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len)
++#define TX_DMA_PLEN0(x) (((x) & eth->soc->tx.dma_max_len) << eth->soc->tx.dma_len_offset)
++#define TX_DMA_PLEN1(x) ((x) & eth->soc->tx.dma_max_len)
+ #define TX_DMA_SWC BIT(14)
+ #define TX_DMA_PQID GENMASK(3, 0)
+ #define TX_DMA_ADDR64_MASK GENMASK(3, 0)
+@@ -347,8 +347,8 @@
+ /* QDMA descriptor rxd2 */
+ #define RX_DMA_DONE BIT(31)
+ #define RX_DMA_LSO BIT(30)
+-#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
+-#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len)
++#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->rx.dma_max_len) << eth->soc->rx.dma_len_offset)
++#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->rx.dma_len_offset) & eth->soc->rx.dma_max_len)
+ #define RX_DMA_VTAG BIT(15)
+ #define RX_DMA_ADDR64_MASK GENMASK(3, 0)
+ #if IS_ENABLED(CONFIG_64BIT)
+@@ -1279,10 +1279,9 @@ struct mtk_reg_map {
+ * @foe_entry_size Foe table entry size.
+ * @has_accounting Bool indicating support for accounting of
+ * offloaded flows.
+- * @txd_size Tx DMA descriptor size.
+- * @rxd_size Rx DMA descriptor size.
+- * @rx_irq_done_mask Rx irq done register mask.
+- * @rx_dma_l4_valid Rx DMA valid register mask.
++ * @desc_size Tx/Rx DMA descriptor size.
++ * @irq_done_mask Rx irq done register mask.
++ * @dma_l4_valid Rx DMA valid register mask.
+ * @dma_max_len Max DMA tx/rx buffer length.
+ * @dma_len_offset Tx/Rx DMA length field offset.
+ */
+@@ -1300,13 +1299,17 @@ struct mtk_soc_data {
+ bool has_accounting;
+ bool disable_pll_modes;
+ struct {
+- u32 txd_size;
+- u32 rxd_size;
+- u32 rx_irq_done_mask;
+- u32 rx_dma_l4_valid;
++ u32 desc_size;
+ u32 dma_max_len;
+ u32 dma_len_offset;
+- } txrx;
++ } tx;
++ struct {
++ u32 desc_size;
++ u32 irq_done_mask;
++ u32 dma_l4_valid;
++ u32 dma_max_len;
++ u32 dma_len_offset;
++ } rx;
+ };
+
+ #define MTK_DMA_MONITOR_TIMEOUT msecs_to_jiffies(1000)
--- /dev/null
+From: Daniel Golle <daniel@makrotopia.org>
+Date: Tue, 10 Oct 2023 21:06:43 +0200
+Subject: [PATCH net-next 2/2] net: ethernet: mediatek: use QDMA instead of
+ ADMAv2 on MT7981 and MT7986
+
+ADMA is plagued by RX hangs which can't easily detected and happen upon
+receival of a corrupted package.
+Use QDMA just like on netsys v1 which is also still present and usable, and
+doesn't suffer from that problem.
+
+Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Daniel Golle <daniel@makrotopia.org>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 46 ++++++++++-----------
+ 1 file changed, 23 insertions(+), 23 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -108,16 +108,16 @@ static const struct mtk_reg_map mt7986_r
+ .tx_irq_mask = 0x461c,
+ .tx_irq_status = 0x4618,
+ .pdma = {
+- .rx_ptr = 0x6100,
+- .rx_cnt_cfg = 0x6104,
+- .pcrx_ptr = 0x6108,
+- .glo_cfg = 0x6204,
+- .rst_idx = 0x6208,
+- .delay_irq = 0x620c,
+- .irq_status = 0x6220,
+- .irq_mask = 0x6228,
+- .adma_rx_dbg0 = 0x6238,
+- .int_grp = 0x6250,
++ .rx_ptr = 0x4100,
++ .rx_cnt_cfg = 0x4104,
++ .pcrx_ptr = 0x4108,
++ .glo_cfg = 0x4204,
++ .rst_idx = 0x4208,
++ .delay_irq = 0x420c,
++ .irq_status = 0x4220,
++ .irq_mask = 0x4228,
++ .adma_rx_dbg0 = 0x4238,
++ .int_grp = 0x4250,
+ },
+ .qdma = {
+ .qtx_cfg = 0x4400,
+@@ -1206,7 +1206,7 @@ static bool mtk_rx_get_desc(struct mtk_e
+ rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
+ rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
+ rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
+- if (mtk_is_netsys_v2_or_greater(eth)) {
++ if (mtk_is_netsys_v3_or_greater(eth)) {
+ rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
+ rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
+ }
+@@ -2158,7 +2158,7 @@ static int mtk_poll_rx(struct napi_struc
+ break;
+
+ /* find out which mac the packet come from. values start at 1 */
+- if (mtk_is_netsys_v2_or_greater(eth)) {
++ if (mtk_is_netsys_v3_or_greater(eth)) {
+ u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
+
+ switch (val) {
+@@ -2270,7 +2270,7 @@ static int mtk_poll_rx(struct napi_struc
+ skb->dev = netdev;
+ bytes += skb->len;
+
+- if (mtk_is_netsys_v2_or_greater(eth)) {
++ if (mtk_is_netsys_v3_or_greater(eth)) {
+ reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
+ hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
+ if (hash != MTK_RXD5_FOE_ENTRY)
+@@ -2820,7 +2820,7 @@ static int mtk_rx_alloc(struct mtk_eth *
+
+ rxd->rxd3 = 0;
+ rxd->rxd4 = 0;
+- if (mtk_is_netsys_v2_or_greater(eth)) {
++ if (mtk_is_netsys_v3_or_greater(eth)) {
+ rxd->rxd5 = 0;
+ rxd->rxd6 = 0;
+ rxd->rxd7 = 0;
+@@ -4021,7 +4021,7 @@ static int mtk_hw_init(struct mtk_eth *e
+ else
+ mtk_hw_reset(eth);
+
+- if (mtk_is_netsys_v2_or_greater(eth)) {
++ if (mtk_is_netsys_v3_or_greater(eth)) {
+ /* Set FE to PDMAv2 if necessary */
+ val = mtk_r32(eth, MTK_FE_GLO_MISC);
+ mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
+@@ -5302,11 +5302,11 @@ static const struct mtk_soc_data mt7981_
+ .dma_len_offset = 8,
+ },
+ .rx = {
+- .desc_size = sizeof(struct mtk_rx_dma_v2),
+- .irq_done_mask = MTK_RX_DONE_INT_V2,
++ .desc_size = sizeof(struct mtk_rx_dma),
++ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID_V2,
+- .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+- .dma_len_offset = 8,
++ .dma_max_len = MTK_TX_DMA_BUF_LEN,
++ .dma_len_offset = 16,
+ },
+ };
+
+@@ -5328,11 +5328,11 @@ static const struct mtk_soc_data mt7986_
+ .dma_len_offset = 8,
+ },
+ .rx = {
+- .desc_size = sizeof(struct mtk_rx_dma_v2),
+- .irq_done_mask = MTK_RX_DONE_INT_V2,
++ .desc_size = sizeof(struct mtk_rx_dma),
++ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID_V2,
+- .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+- .dma_len_offset = 8,
++ .dma_max_len = MTK_TX_DMA_BUF_LEN,
++ .dma_len_offset = 16,
+ },
+ };
+