--- /dev/null
+From c57e558194430d10d5e5f4acd8a8655b68dade13 Mon Sep 17 00:00:00 2001
+From: Frank Wunderlich <frank-w@public-files.de>
+Date: Mon, 3 Jun 2024 21:25:05 +0200
+Subject: [PATCH] net: ethernet: mtk_eth_soc: handle dma buffer size soc
+ specific
+
+The mainline MTK ethernet driver suffers long time from rarly but
+annoying tx queue timeouts. We think that this is caused by fixed
+dma sizes hardcoded for all SoCs.
+
+We suspect this problem arises from a low level of free TX DMADs,
+the TX Ring alomost full.
+
+The transmit timeout is caused by the Tx queue not waking up. The
+Tx queue stops when the free counter is less than ring->thres, and
+it will wake up once the free counter is greater than ring->thres.
+If the CPU is too late to wake up the Tx queues, it may cause a
+transmit timeout.
+Therefore, we increased the TX and RX DMADs to improve this error
+situation.
+
+Use the dma-size implementation from SDK in a per SoC manner. In
+difference to SDK we have no RSS feature yet, so all RX/TX sizes
+should be raised from 512 to 2048 byte except fqdma on mt7988 to
+avoid the tx timeout issue.
+
+Fixes: 656e705243fd ("net-next: mediatek: add support for MT7623 ethernet")
+Suggested-by: Daniel Golle <daniel@makrotopia.org>
+Signed-off-by: Frank Wunderlich <frank-w@public-files.de>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 104 +++++++++++++-------
+ drivers/net/ethernet/mediatek/mtk_eth_soc.h | 9 +-
+ 2 files changed, 77 insertions(+), 36 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -1071,9 +1071,9 @@ static int mtk_init_fq_dma(struct mtk_et
+ {
+ const struct mtk_soc_data *soc = eth->soc;
+ dma_addr_t phy_ring_tail;
+- int cnt = MTK_QDMA_RING_SIZE;
++ int cnt = soc->tx.fq_dma_size;
+ dma_addr_t dma_addr;
+- int i;
++ int i, j, len;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM))
+ eth->scratch_ring = eth->sram_base;
+@@ -1082,40 +1082,46 @@ static int mtk_init_fq_dma(struct mtk_et
+ cnt * soc->tx.desc_size,
+ ð->phy_scratch_ring,
+ GFP_KERNEL);
++
+ if (unlikely(!eth->scratch_ring))
+ return -ENOMEM;
+
+- eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
+- if (unlikely(!eth->scratch_head))
+- return -ENOMEM;
++ phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
+
+- dma_addr = dma_map_single(eth->dma_dev,
+- eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
+- DMA_FROM_DEVICE);
+- if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
+- return -ENOMEM;
++ for (j = 0; j < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); j++) {
++ len = min_t(int, cnt - j * MTK_FQ_DMA_LENGTH, MTK_FQ_DMA_LENGTH);
++ eth->scratch_head[j] = kcalloc(len, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
+
+- phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
++ if (unlikely(!eth->scratch_head[j]))
++ return -ENOMEM;
+
+- for (i = 0; i < cnt; i++) {
+- dma_addr_t addr = dma_addr + i * MTK_QDMA_PAGE_SIZE;
+- struct mtk_tx_dma_v2 *txd;
+-
+- txd = eth->scratch_ring + i * soc->tx.desc_size;
+- txd->txd1 = addr;
+- if (i < cnt - 1)
+- txd->txd2 = eth->phy_scratch_ring +
+- (i + 1) * soc->tx.desc_size;
+-
+- txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
+- if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
+- txd->txd3 |= TX_DMA_PREP_ADDR64(addr);
+- txd->txd4 = 0;
+- if (mtk_is_netsys_v2_or_greater(eth)) {
+- txd->txd5 = 0;
+- txd->txd6 = 0;
+- txd->txd7 = 0;
+- txd->txd8 = 0;
++ dma_addr = dma_map_single(eth->dma_dev,
++ eth->scratch_head[j], len * MTK_QDMA_PAGE_SIZE,
++ DMA_FROM_DEVICE);
++
++ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
++ return -ENOMEM;
++
++ for (i = 0; i < cnt; i++) {
++ struct mtk_tx_dma_v2 *txd;
++
++ txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size;
++ txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
++ if (j * MTK_FQ_DMA_LENGTH + i < cnt)
++ txd->txd2 = eth->phy_scratch_ring +
++ (j * MTK_FQ_DMA_LENGTH + i + 1) * soc->tx.desc_size;
++
++ txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
++ if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
++ txd->txd3 |= TX_DMA_PREP_ADDR64(dma_addr + i * MTK_QDMA_PAGE_SIZE);
++
++ txd->txd4 = 0;
++ if (mtk_is_netsys_v2_or_greater(eth)) {
++ txd->txd5 = 0;
++ txd->txd6 = 0;
++ txd->txd7 = 0;
++ txd->txd8 = 0;
++ }
+ }
+ }
+
+@@ -2386,7 +2392,7 @@ static int mtk_tx_alloc(struct mtk_eth *
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
+ ring_size = MTK_QDMA_RING_SIZE;
+ else
+- ring_size = MTK_DMA_SIZE;
++ ring_size = soc->tx.dma_size;
+
+ ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
+ GFP_KERNEL);
+@@ -2394,8 +2400,8 @@ static int mtk_tx_alloc(struct mtk_eth *
+ goto no_tx_mem;
+
+ if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) {
+- ring->dma = eth->sram_base + ring_size * sz;
+- ring->phys = eth->phy_scratch_ring + ring_size * (dma_addr_t)sz;
++ ring->dma = eth->sram_base + soc->tx.fq_dma_size * sz;
++ ring->phys = eth->phy_scratch_ring + soc->tx.fq_dma_size * (dma_addr_t)sz;
+ } else {
+ ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
+ &ring->phys, GFP_KERNEL);
+@@ -2517,6 +2523,7 @@ static void mtk_tx_clean(struct mtk_eth
+ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
+ {
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
++ const struct mtk_soc_data *soc = eth->soc;
+ struct mtk_rx_ring *ring;
+ int rx_data_len, rx_dma_size, tx_ring_size;
+ int i;
+@@ -2524,7 +2531,7 @@ static int mtk_rx_alloc(struct mtk_eth *
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ tx_ring_size = MTK_QDMA_RING_SIZE;
+ else
+- tx_ring_size = MTK_DMA_SIZE;
++ tx_ring_size = soc->tx.dma_size;
+
+ if (rx_flag == MTK_RX_FLAGS_QDMA) {
+ if (ring_no)
+@@ -2539,7 +2546,7 @@ static int mtk_rx_alloc(struct mtk_eth *
+ rx_dma_size = MTK_HW_LRO_DMA_SIZE;
+ } else {
+ rx_data_len = ETH_DATA_LEN;
+- rx_dma_size = MTK_DMA_SIZE;
++ rx_dma_size = soc->rx.dma_size;
+ }
+
+ ring->frag_size = mtk_max_frag_size(rx_data_len);
+@@ -3066,7 +3073,10 @@ static void mtk_dma_free(struct mtk_eth
+ mtk_rx_clean(eth, ð->rx_ring[i], false);
+ }
+
+- kfree(eth->scratch_head);
++ for (i = 0; i < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); i++) {
++ kfree(eth->scratch_head[i]);
++ eth->scratch_head[i] = NULL;
++ }
+ }
+
+ static bool mtk_hw_reset_check(struct mtk_eth *eth)
+@@ -4952,11 +4962,14 @@ static const struct mtk_soc_data mt2701_
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
++ .dma_size = MTK_DMA_SIZE(2K),
++ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
++ .dma_size = MTK_DMA_SIZE(2K),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+@@ -4976,11 +4989,14 @@ static const struct mtk_soc_data mt7621_
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
++ .dma_size = MTK_DMA_SIZE(2K),
++ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
++ .dma_size = MTK_DMA_SIZE(2K),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+@@ -5002,11 +5018,14 @@ static const struct mtk_soc_data mt7622_
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
++ .dma_size = MTK_DMA_SIZE(2K),
++ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
++ .dma_size = MTK_DMA_SIZE(2K),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+@@ -5027,11 +5046,14 @@ static const struct mtk_soc_data mt7623_
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
++ .dma_size = MTK_DMA_SIZE(2K),
++ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
++ .dma_size = MTK_DMA_SIZE(2K),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+@@ -5050,11 +5072,14 @@ static const struct mtk_soc_data mt7629_
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
++ .dma_size = MTK_DMA_SIZE(2K),
++ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
++ .dma_size = MTK_DMA_SIZE(2K),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+@@ -5076,6 +5101,8 @@ static const struct mtk_soc_data mt7981_
+ .desc_size = sizeof(struct mtk_tx_dma_v2),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ .dma_len_offset = 8,
++ .dma_size = MTK_DMA_SIZE(2K),
++ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+@@ -5083,6 +5110,7 @@ static const struct mtk_soc_data mt7981_
+ .dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
++ .dma_size = MTK_DMA_SIZE(2K),
+ },
+ };
+
+@@ -5102,6 +5130,8 @@ static const struct mtk_soc_data mt7986_
+ .desc_size = sizeof(struct mtk_tx_dma_v2),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ .dma_len_offset = 8,
++ .dma_size = MTK_DMA_SIZE(2K),
++ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+@@ -5109,6 +5139,7 @@ static const struct mtk_soc_data mt7986_
+ .dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
++ .dma_size = MTK_DMA_SIZE(2K),
+ },
+ };
+
+@@ -5128,6 +5159,8 @@ static const struct mtk_soc_data mt7988_
+ .desc_size = sizeof(struct mtk_tx_dma_v2),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ .dma_len_offset = 8,
++ .dma_size = MTK_DMA_SIZE(2K),
++ .fq_dma_size = MTK_DMA_SIZE(4K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma_v2),
+@@ -5135,6 +5168,7 @@ static const struct mtk_soc_data mt7988_
+ .dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ .dma_len_offset = 8,
++ .dma_size = MTK_DMA_SIZE(2K),
+ },
+ };
+
+@@ -5149,6 +5183,7 @@ static const struct mtk_soc_data rt5350_
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
++ .dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+@@ -5156,6 +5191,7 @@ static const struct mtk_soc_data rt5350_
+ .dma_l4_valid = RX_DMA_L4_VALID_PDMA,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
++ .dma_size = MTK_DMA_SIZE(2K),
+ },
+ };
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -32,7 +32,9 @@
+ #define MTK_TX_DMA_BUF_LEN 0x3fff
+ #define MTK_TX_DMA_BUF_LEN_V2 0xffff
+ #define MTK_QDMA_RING_SIZE 2048
+-#define MTK_DMA_SIZE 512
++#define MTK_DMA_SIZE(x) (SZ_##x)
++#define MTK_FQ_DMA_HEAD 32
++#define MTK_FQ_DMA_LENGTH 2048
+ #define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + ETH_FCS_LEN)
+ #define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
+ #define MTK_DMA_DUMMY_DESC 0xffffffff
+@@ -1173,6 +1175,8 @@ struct mtk_soc_data {
+ u32 desc_size;
+ u32 dma_max_len;
+ u32 dma_len_offset;
++ u32 dma_size;
++ u32 fq_dma_size;
+ } tx;
+ struct {
+ u32 desc_size;
+@@ -1180,6 +1184,7 @@ struct mtk_soc_data {
+ u32 dma_l4_valid;
+ u32 dma_max_len;
+ u32 dma_len_offset;
++ u32 dma_size;
+ } rx;
+ };
+
+@@ -1261,7 +1266,7 @@ struct mtk_eth {
+ struct napi_struct rx_napi;
+ void *scratch_ring;
+ dma_addr_t phy_scratch_ring;
+- void *scratch_head;
++ void *scratch_head[MTK_FQ_DMA_HEAD];
+ struct clk *clks[MTK_CLK_MAX];
+
+ struct mii_bus *mii_bus;
static const struct phylink_mac_ops mtk_phylink_ops = {
.validate = phylink_generic_validate,
.mac_select_pcs = mtk_mac_select_pcs,
-@@ -4564,8 +4679,21 @@ static int mtk_add_mac(struct mtk_eth *e
+@@ -4574,8 +4689,21 @@ static int mtk_add_mac(struct mtk_eth *e
phy_interface_zero(mac->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
mac->phylink_config.supported_interfaces);
phylink = phylink_create(&mac->phylink_config,
of_fwnode_handle(mac->of_node),
phy_mode, &mtk_phylink_ops);
-@@ -4758,6 +4886,13 @@ static int mtk_probe(struct platform_dev
+@@ -4768,6 +4896,13 @@ static int mtk_probe(struct platform_dev
if (err)
return err;
if (eth->soc->required_pctl) {
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-@@ -499,6 +499,21 @@
+@@ -501,6 +501,21 @@
#define INTF_MODE_RGMII_1000 (TRGMII_MODE | TRGMII_CENTRAL_ALIGNED)
#define INTF_MODE_RGMII_10_100 0
/* GPIO port control registers for GMAC 2*/
#define GPIO_OD33_CTRL8 0x4c0
#define GPIO_BIAS_CTRL 0xed0
-@@ -524,6 +539,7 @@
+@@ -526,6 +541,7 @@
#define SYSCFG0_SGMII_GMAC2 ((3 << 8) & SYSCFG0_SGMII_MASK)
#define SYSCFG0_SGMII_GMAC1_V2 BIT(9)
#define SYSCFG0_SGMII_GMAC2_V2 BIT(8)
/* ethernet subsystem clock register */
-@@ -556,12 +572,74 @@
+@@ -558,12 +574,74 @@
#define ETHSYS_DMA_AG_MAP_QDMA BIT(1)
#define ETHSYS_DMA_AG_MAP_PPE BIT(2)
#define USB_PHY_SWITCH_REG 0x218
#define QPHY_SEL_MASK GENMASK(1, 0)
#define SGMII_QPHY_SEL 0x2
-@@ -586,6 +664,8 @@
+@@ -588,6 +666,8 @@
#define MT7628_SDM_RBCNT (MT7628_SDM_OFFSET + 0x10c)
#define MT7628_SDM_CS_ERR (MT7628_SDM_OFFSET + 0x110)
#define MTK_FE_CDM1_FSM 0x220
#define MTK_FE_CDM2_FSM 0x224
#define MTK_FE_CDM3_FSM 0x238
-@@ -594,6 +674,11 @@
+@@ -596,6 +676,11 @@
#define MTK_FE_CDM6_FSM 0x328
#define MTK_FE_GDM1_FSM 0x228
#define MTK_FE_GDM2_FSM 0x22C
#define MTK_MAC_FSM(x) (0x1010C + ((x) * 0x100))
-@@ -940,6 +1025,8 @@ enum mkt_eth_capabilities {
+@@ -942,6 +1027,8 @@ enum mkt_eth_capabilities {
MTK_RGMII_BIT = 0,
MTK_TRGMII_BIT,
MTK_SGMII_BIT,
MTK_ESW_BIT,
MTK_GEPHY_BIT,
MTK_MUX_BIT,
-@@ -960,8 +1047,11 @@ enum mkt_eth_capabilities {
+@@ -962,8 +1049,11 @@ enum mkt_eth_capabilities {
MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT,
MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT,
MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT,
/* PATH BITS */
MTK_ETH_PATH_GMAC1_RGMII_BIT,
-@@ -969,14 +1059,21 @@ enum mkt_eth_capabilities {
+@@ -971,14 +1061,21 @@ enum mkt_eth_capabilities {
MTK_ETH_PATH_GMAC1_SGMII_BIT,
MTK_ETH_PATH_GMAC2_RGMII_BIT,
MTK_ETH_PATH_GMAC2_SGMII_BIT,
#define MTK_ESW BIT_ULL(MTK_ESW_BIT)
#define MTK_GEPHY BIT_ULL(MTK_GEPHY_BIT)
#define MTK_MUX BIT_ULL(MTK_MUX_BIT)
-@@ -999,10 +1096,16 @@ enum mkt_eth_capabilities {
+@@ -1001,10 +1098,16 @@ enum mkt_eth_capabilities {
BIT_ULL(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT)
#define MTK_ETH_MUX_U3_GMAC2_TO_QPHY \
BIT_ULL(MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT)
/* Supported path present on SoCs */
#define MTK_ETH_PATH_GMAC1_RGMII BIT_ULL(MTK_ETH_PATH_GMAC1_RGMII_BIT)
-@@ -1010,8 +1113,13 @@ enum mkt_eth_capabilities {
+@@ -1012,8 +1115,13 @@ enum mkt_eth_capabilities {
#define MTK_ETH_PATH_GMAC1_SGMII BIT_ULL(MTK_ETH_PATH_GMAC1_SGMII_BIT)
#define MTK_ETH_PATH_GMAC2_RGMII BIT_ULL(MTK_ETH_PATH_GMAC2_RGMII_BIT)
#define MTK_ETH_PATH_GMAC2_SGMII BIT_ULL(MTK_ETH_PATH_GMAC2_SGMII_BIT)
#define MTK_GMAC1_RGMII (MTK_ETH_PATH_GMAC1_RGMII | MTK_RGMII)
#define MTK_GMAC1_TRGMII (MTK_ETH_PATH_GMAC1_TRGMII | MTK_TRGMII)
-@@ -1019,7 +1127,12 @@ enum mkt_eth_capabilities {
+@@ -1021,7 +1129,12 @@ enum mkt_eth_capabilities {
#define MTK_GMAC2_RGMII (MTK_ETH_PATH_GMAC2_RGMII | MTK_RGMII)
#define MTK_GMAC2_SGMII (MTK_ETH_PATH_GMAC2_SGMII | MTK_SGMII)
#define MTK_GMAC2_GEPHY (MTK_ETH_PATH_GMAC2_GEPHY | MTK_GEPHY)
/* MUXes present on SoCs */
/* 0: GDM1 -> GMAC1, 1: GDM1 -> ESW */
-@@ -1038,10 +1151,20 @@ enum mkt_eth_capabilities {
+@@ -1040,10 +1153,20 @@ enum mkt_eth_capabilities {
(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_MUX | \
MTK_SHARED_SGMII)
#define MTK_HAS_CAPS(caps, _x) (((caps) & (_x)) == (_x))
#define MT7621_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | \
-@@ -1073,8 +1196,12 @@ enum mkt_eth_capabilities {
+@@ -1075,8 +1198,12 @@ enum mkt_eth_capabilities {
MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
MTK_RSTCTRL_PPE1 | MTK_SRAM)
struct mtk_tx_dma_desc_info {
dma_addr_t addr;
-@@ -1187,6 +1314,24 @@ struct mtk_soc_data {
+@@ -1192,6 +1319,24 @@ struct mtk_soc_data {
/* currently no SoC has more than 3 macs */
#define MTK_MAX_DEVS 3
/* struct mtk_eth - This is the main datasructure for holding the state
* of the driver
* @dev: The device pointer
-@@ -1207,6 +1352,12 @@ struct mtk_soc_data {
+@@ -1212,6 +1357,12 @@ struct mtk_soc_data {
* @infra: The register map pointing at the range used to setup
* SGMII and GePHY path
* @sgmii_pcs: Pointers to mtk-pcs-lynxi phylink_pcs instances
* @pctl: The register map pointing at the range used to setup
* GMAC port drive/slew values
* @dma_refcnt: track how many netdevs are using the DMA engine
-@@ -1250,6 +1401,10 @@ struct mtk_eth {
+@@ -1255,6 +1406,10 @@ struct mtk_eth {
struct regmap *ethsys;
struct regmap *infra;
struct phylink_pcs *sgmii_pcs[MTK_MAX_DEVS];
struct regmap *pctl;
bool hwlro;
refcount_t dma_refcnt;
-@@ -1437,6 +1592,19 @@ static inline u32 mtk_get_ib2_multicast_
+@@ -1442,6 +1597,19 @@ static inline u32 mtk_get_ib2_multicast_
return MTK_FOE_IB2_MULTICAST;
}
/* read the hardware status register */
void mtk_stats_update_mac(struct mtk_mac *mac);
-@@ -1445,8 +1613,10 @@ u32 mtk_r32(struct mtk_eth *eth, unsigne
+@@ -1450,8 +1618,10 @@ u32 mtk_r32(struct mtk_eth *eth, unsigne
u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg);
int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_eth_offload_init(struct mtk_eth *eth);
int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
-@@ -1456,5 +1626,63 @@ int mtk_flow_offload_cmd(struct mtk_eth
+@@ -1461,5 +1631,63 @@ int mtk_flow_offload_cmd(struct mtk_eth
void mtk_flow_offload_cleanup(struct mtk_eth *eth, struct list_head *list);
void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);