From: Daniel Golle Date: Sun, 16 Jun 2024 12:20:37 +0000 (+0100) Subject: generic: 6.6: backport upstream commits for mtk_eth_soc X-Git-Url: http://git.lede-project.org./?a=commitdiff_plain;h=98ddfbc56a;p=openwrt%2Fstaging%2Fneocturne.git generic: 6.6: backport upstream commits for mtk_eth_soc Backport two commits for mtk_eth_soc: * net: ethernet: mtk_eth_soc: handle dma buffer size soc specific (torvalds/linux@c57e558194430d10d5e5f4acd8a8655b68dade13) * net: ethernet: mtk_eth_soc: ppe: add support for multiple PPEs (torvalds/linux@dee4dd10c79aaca192b73520d8fb64628468ae0f) Refresh pending patches which require that. Signed-off-by: Daniel Golle --- diff --git a/target/linux/generic/backport-6.6/752-25-v6.10-net-ethernet-mtk_eth_soc-handle-dma-buffer-size-soc-.patch b/target/linux/generic/backport-6.6/752-25-v6.10-net-ethernet-mtk_eth_soc-handle-dma-buffer-size-soc-.patch new file mode 100644 index 0000000000..3d72a5f1da --- /dev/null +++ b/target/linux/generic/backport-6.6/752-25-v6.10-net-ethernet-mtk_eth_soc-handle-dma-buffer-size-soc-.patch @@ -0,0 +1,364 @@ +From c57e558194430d10d5e5f4acd8a8655b68dade13 Mon Sep 17 00:00:00 2001 +From: Frank Wunderlich +Date: Mon, 3 Jun 2024 21:25:05 +0200 +Subject: [PATCH] net: ethernet: mtk_eth_soc: handle dma buffer size soc + specific + +The mainline MTK ethernet driver suffers long time from rarly but +annoying tx queue timeouts. We think that this is caused by fixed +dma sizes hardcoded for all SoCs. + +We suspect this problem arises from a low level of free TX DMADs, +the TX Ring alomost full. + +The transmit timeout is caused by the Tx queue not waking up. The +Tx queue stops when the free counter is less than ring->thres, and +it will wake up once the free counter is greater than ring->thres. +If the CPU is too late to wake up the Tx queues, it may cause a +transmit timeout. +Therefore, we increased the TX and RX DMADs to improve this error +situation. + +Use the dma-size implementation from SDK in a per SoC manner. In +difference to SDK we have no RSS feature yet, so all RX/TX sizes +should be raised from 512 to 2048 byte except fqdma on mt7988 to +avoid the tx timeout issue. + +Fixes: 656e705243fd ("net-next: mediatek: add support for MT7623 ethernet") +Suggested-by: Daniel Golle +Signed-off-by: Frank Wunderlich +Reviewed-by: Jacob Keller +Signed-off-by: David S. Miller +--- + drivers/net/ethernet/mediatek/mtk_eth_soc.c | 104 +++++++++++++------- + drivers/net/ethernet/mediatek/mtk_eth_soc.h | 9 +- + 2 files changed, 77 insertions(+), 36 deletions(-) + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -1131,9 +1131,9 @@ static int mtk_init_fq_dma(struct mtk_et + { + const struct mtk_soc_data *soc = eth->soc; + dma_addr_t phy_ring_tail; +- int cnt = MTK_QDMA_RING_SIZE; ++ int cnt = soc->tx.fq_dma_size; + dma_addr_t dma_addr; +- int i; ++ int i, j, len; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) + eth->scratch_ring = eth->sram_base; +@@ -1142,40 +1142,46 @@ static int mtk_init_fq_dma(struct mtk_et + cnt * soc->tx.desc_size, + ð->phy_scratch_ring, + GFP_KERNEL); ++ + if (unlikely(!eth->scratch_ring)) + return -ENOMEM; + +- eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL); +- if (unlikely(!eth->scratch_head)) +- return -ENOMEM; ++ phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1); + +- dma_addr = dma_map_single(eth->dma_dev, +- eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE, +- DMA_FROM_DEVICE); +- if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) +- return -ENOMEM; ++ for (j = 0; j < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); j++) { ++ len = min_t(int, cnt - j * MTK_FQ_DMA_LENGTH, MTK_FQ_DMA_LENGTH); ++ eth->scratch_head[j] = kcalloc(len, MTK_QDMA_PAGE_SIZE, GFP_KERNEL); + +- phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1); ++ if (unlikely(!eth->scratch_head[j])) ++ return -ENOMEM; + +- for (i = 0; i < cnt; i++) { +- dma_addr_t addr = dma_addr + i * MTK_QDMA_PAGE_SIZE; +- struct mtk_tx_dma_v2 *txd; +- +- txd = eth->scratch_ring + i * soc->tx.desc_size; +- txd->txd1 = addr; +- if (i < cnt - 1) +- txd->txd2 = eth->phy_scratch_ring + +- (i + 1) * soc->tx.desc_size; +- +- txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE); +- if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA)) +- txd->txd3 |= TX_DMA_PREP_ADDR64(addr); +- txd->txd4 = 0; +- if (mtk_is_netsys_v2_or_greater(eth)) { +- txd->txd5 = 0; +- txd->txd6 = 0; +- txd->txd7 = 0; +- txd->txd8 = 0; ++ dma_addr = dma_map_single(eth->dma_dev, ++ eth->scratch_head[j], len * MTK_QDMA_PAGE_SIZE, ++ DMA_FROM_DEVICE); ++ ++ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) ++ return -ENOMEM; ++ ++ for (i = 0; i < cnt; i++) { ++ struct mtk_tx_dma_v2 *txd; ++ ++ txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size; ++ txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE; ++ if (j * MTK_FQ_DMA_LENGTH + i < cnt) ++ txd->txd2 = eth->phy_scratch_ring + ++ (j * MTK_FQ_DMA_LENGTH + i + 1) * soc->tx.desc_size; ++ ++ txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE); ++ if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA)) ++ txd->txd3 |= TX_DMA_PREP_ADDR64(dma_addr + i * MTK_QDMA_PAGE_SIZE); ++ ++ txd->txd4 = 0; ++ if (mtk_is_netsys_v2_or_greater(eth)) { ++ txd->txd5 = 0; ++ txd->txd6 = 0; ++ txd->txd7 = 0; ++ txd->txd8 = 0; ++ } + } + } + +@@ -2457,7 +2463,7 @@ static int mtk_tx_alloc(struct mtk_eth * + if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) + ring_size = MTK_QDMA_RING_SIZE; + else +- ring_size = MTK_DMA_SIZE; ++ ring_size = soc->tx.dma_size; + + ring->buf = kcalloc(ring_size, sizeof(*ring->buf), + GFP_KERNEL); +@@ -2465,8 +2471,8 @@ static int mtk_tx_alloc(struct mtk_eth * + goto no_tx_mem; + + if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) { +- ring->dma = eth->sram_base + ring_size * sz; +- ring->phys = eth->phy_scratch_ring + ring_size * (dma_addr_t)sz; ++ ring->dma = eth->sram_base + soc->tx.fq_dma_size * sz; ++ ring->phys = eth->phy_scratch_ring + soc->tx.fq_dma_size * (dma_addr_t)sz; + } else { + ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz, + &ring->phys, GFP_KERNEL); +@@ -2588,6 +2594,7 @@ static void mtk_tx_clean(struct mtk_eth + static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) + { + const struct mtk_reg_map *reg_map = eth->soc->reg_map; ++ const struct mtk_soc_data *soc = eth->soc; + struct mtk_rx_ring *ring; + int rx_data_len, rx_dma_size, tx_ring_size; + int i; +@@ -2595,7 +2602,7 @@ static int mtk_rx_alloc(struct mtk_eth * + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) + tx_ring_size = MTK_QDMA_RING_SIZE; + else +- tx_ring_size = MTK_DMA_SIZE; ++ tx_ring_size = soc->tx.dma_size; + + if (rx_flag == MTK_RX_FLAGS_QDMA) { + if (ring_no) +@@ -2610,7 +2617,7 @@ static int mtk_rx_alloc(struct mtk_eth * + rx_dma_size = MTK_HW_LRO_DMA_SIZE; + } else { + rx_data_len = ETH_DATA_LEN; +- rx_dma_size = MTK_DMA_SIZE; ++ rx_dma_size = soc->rx.dma_size; + } + + ring->frag_size = mtk_max_frag_size(rx_data_len); +@@ -3139,7 +3146,10 @@ static void mtk_dma_free(struct mtk_eth + mtk_rx_clean(eth, ð->rx_ring[i], false); + } + +- kfree(eth->scratch_head); ++ for (i = 0; i < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); i++) { ++ kfree(eth->scratch_head[i]); ++ eth->scratch_head[i] = NULL; ++ } + } + + static bool mtk_hw_reset_check(struct mtk_eth *eth) +@@ -5045,11 +5055,14 @@ static const struct mtk_soc_data mt2701_ + .desc_size = sizeof(struct mtk_tx_dma), + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, ++ .dma_size = MTK_DMA_SIZE(2K), ++ .fq_dma_size = MTK_DMA_SIZE(2K), + }, + .rx = { + .desc_size = sizeof(struct mtk_rx_dma), + .irq_done_mask = MTK_RX_DONE_INT, + .dma_l4_valid = RX_DMA_L4_VALID, ++ .dma_size = MTK_DMA_SIZE(2K), + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, + }, +@@ -5069,11 +5082,14 @@ static const struct mtk_soc_data mt7621_ + .desc_size = sizeof(struct mtk_tx_dma), + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, ++ .dma_size = MTK_DMA_SIZE(2K), ++ .fq_dma_size = MTK_DMA_SIZE(2K), + }, + .rx = { + .desc_size = sizeof(struct mtk_rx_dma), + .irq_done_mask = MTK_RX_DONE_INT, + .dma_l4_valid = RX_DMA_L4_VALID, ++ .dma_size = MTK_DMA_SIZE(2K), + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, + }, +@@ -5095,11 +5111,14 @@ static const struct mtk_soc_data mt7622_ + .desc_size = sizeof(struct mtk_tx_dma), + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, ++ .dma_size = MTK_DMA_SIZE(2K), ++ .fq_dma_size = MTK_DMA_SIZE(2K), + }, + .rx = { + .desc_size = sizeof(struct mtk_rx_dma), + .irq_done_mask = MTK_RX_DONE_INT, + .dma_l4_valid = RX_DMA_L4_VALID, ++ .dma_size = MTK_DMA_SIZE(2K), + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, + }, +@@ -5120,11 +5139,14 @@ static const struct mtk_soc_data mt7623_ + .desc_size = sizeof(struct mtk_tx_dma), + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, ++ .dma_size = MTK_DMA_SIZE(2K), ++ .fq_dma_size = MTK_DMA_SIZE(2K), + }, + .rx = { + .desc_size = sizeof(struct mtk_rx_dma), + .irq_done_mask = MTK_RX_DONE_INT, + .dma_l4_valid = RX_DMA_L4_VALID, ++ .dma_size = MTK_DMA_SIZE(2K), + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, + }, +@@ -5143,11 +5165,14 @@ static const struct mtk_soc_data mt7629_ + .desc_size = sizeof(struct mtk_tx_dma), + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, ++ .dma_size = MTK_DMA_SIZE(2K), ++ .fq_dma_size = MTK_DMA_SIZE(2K), + }, + .rx = { + .desc_size = sizeof(struct mtk_rx_dma), + .irq_done_mask = MTK_RX_DONE_INT, + .dma_l4_valid = RX_DMA_L4_VALID, ++ .dma_size = MTK_DMA_SIZE(2K), + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, + }, +@@ -5169,6 +5194,8 @@ static const struct mtk_soc_data mt7981_ + .desc_size = sizeof(struct mtk_tx_dma_v2), + .dma_max_len = MTK_TX_DMA_BUF_LEN_V2, + .dma_len_offset = 8, ++ .dma_size = MTK_DMA_SIZE(2K), ++ .fq_dma_size = MTK_DMA_SIZE(2K), + }, + .rx = { + .desc_size = sizeof(struct mtk_rx_dma), +@@ -5176,6 +5203,7 @@ static const struct mtk_soc_data mt7981_ + .dma_l4_valid = RX_DMA_L4_VALID_V2, + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, ++ .dma_size = MTK_DMA_SIZE(2K), + }, + }; + +@@ -5195,6 +5223,8 @@ static const struct mtk_soc_data mt7986_ + .desc_size = sizeof(struct mtk_tx_dma_v2), + .dma_max_len = MTK_TX_DMA_BUF_LEN_V2, + .dma_len_offset = 8, ++ .dma_size = MTK_DMA_SIZE(2K), ++ .fq_dma_size = MTK_DMA_SIZE(2K), + }, + .rx = { + .desc_size = sizeof(struct mtk_rx_dma), +@@ -5202,6 +5232,7 @@ static const struct mtk_soc_data mt7986_ + .dma_l4_valid = RX_DMA_L4_VALID_V2, + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, ++ .dma_size = MTK_DMA_SIZE(2K), + }, + }; + +@@ -5221,6 +5252,8 @@ static const struct mtk_soc_data mt7988_ + .desc_size = sizeof(struct mtk_tx_dma_v2), + .dma_max_len = MTK_TX_DMA_BUF_LEN_V2, + .dma_len_offset = 8, ++ .dma_size = MTK_DMA_SIZE(2K), ++ .fq_dma_size = MTK_DMA_SIZE(4K), + }, + .rx = { + .desc_size = sizeof(struct mtk_rx_dma_v2), +@@ -5228,6 +5261,7 @@ static const struct mtk_soc_data mt7988_ + .dma_l4_valid = RX_DMA_L4_VALID_V2, + .dma_max_len = MTK_TX_DMA_BUF_LEN_V2, + .dma_len_offset = 8, ++ .dma_size = MTK_DMA_SIZE(2K), + }, + }; + +@@ -5242,6 +5276,7 @@ static const struct mtk_soc_data rt5350_ + .desc_size = sizeof(struct mtk_tx_dma), + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, ++ .dma_size = MTK_DMA_SIZE(2K), + }, + .rx = { + .desc_size = sizeof(struct mtk_rx_dma), +@@ -5249,6 +5284,7 @@ static const struct mtk_soc_data rt5350_ + .dma_l4_valid = RX_DMA_L4_VALID_PDMA, + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, ++ .dma_size = MTK_DMA_SIZE(2K), + }, + }; + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h +@@ -32,7 +32,9 @@ + #define MTK_TX_DMA_BUF_LEN 0x3fff + #define MTK_TX_DMA_BUF_LEN_V2 0xffff + #define MTK_QDMA_RING_SIZE 2048 +-#define MTK_DMA_SIZE 512 ++#define MTK_DMA_SIZE(x) (SZ_##x) ++#define MTK_FQ_DMA_HEAD 32 ++#define MTK_FQ_DMA_LENGTH 2048 + #define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN) + #define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN) + #define MTK_DMA_DUMMY_DESC 0xffffffff +@@ -1176,6 +1178,8 @@ struct mtk_soc_data { + u32 desc_size; + u32 dma_max_len; + u32 dma_len_offset; ++ u32 dma_size; ++ u32 fq_dma_size; + } tx; + struct { + u32 desc_size; +@@ -1183,6 +1187,7 @@ struct mtk_soc_data { + u32 dma_l4_valid; + u32 dma_max_len; + u32 dma_len_offset; ++ u32 dma_size; + } rx; + }; + +@@ -1264,7 +1269,7 @@ struct mtk_eth { + struct napi_struct rx_napi; + void *scratch_ring; + dma_addr_t phy_scratch_ring; +- void *scratch_head; ++ void *scratch_head[MTK_FQ_DMA_HEAD]; + struct clk *clks[MTK_CLK_MAX]; + + struct mii_bus *mii_bus; diff --git a/target/linux/generic/backport-6.6/752-26-v6.11-net-ethernet-mtk_eth_soc-ppe-add-support-for-multipl.patch b/target/linux/generic/backport-6.6/752-26-v6.11-net-ethernet-mtk_eth_soc-ppe-add-support-for-multipl.patch new file mode 100644 index 0000000000..07e7e86340 --- /dev/null +++ b/target/linux/generic/backport-6.6/752-26-v6.11-net-ethernet-mtk_eth_soc-ppe-add-support-for-multipl.patch @@ -0,0 +1,371 @@ +From dee4dd10c79aaca192b73520d8fb64628468ae0f Mon Sep 17 00:00:00 2001 +From: Elad Yifee +Date: Fri, 7 Jun 2024 11:21:50 +0300 +Subject: [PATCH] net: ethernet: mtk_eth_soc: ppe: add support for multiple + PPEs + +Add the missing pieces to allow multiple PPEs units, one for each GMAC. +mtk_gdm_config has been modified to work on targted mac ID, +the inner loop moved outside of the function to allow unrelated +operations like setting the MAC's PPE index. +Introduce a sanity check in flow_offload_replace to account for +non-MTK ingress devices. +Additional field 'ppe_idx' was added to struct mtk_mac in order +to keep track on the assigned PPE unit. + +Signed-off-by: Elad Yifee +Link: https://lore.kernel.org/r/20240607082155.20021-1-eladwf@gmail.com +Signed-off-by: Jakub Kicinski +--- + drivers/net/ethernet/mediatek/mtk_eth_soc.c | 112 +++++++++++------- + drivers/net/ethernet/mediatek/mtk_eth_soc.h | 8 +- + .../net/ethernet/mediatek/mtk_ppe_offload.c | 17 ++- + 3 files changed, 92 insertions(+), 45 deletions(-) + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -80,7 +80,9 @@ static const struct mtk_reg_map mtk_reg_ + .fq_blen = 0x1b2c, + }, + .gdm1_cnt = 0x2400, +- .gdma_to_ppe = 0x4444, ++ .gdma_to_ppe = { ++ [0] = 0x4444, ++ }, + .ppe_base = 0x0c00, + .wdma_base = { + [0] = 0x2800, +@@ -144,7 +146,10 @@ static const struct mtk_reg_map mt7986_r + .tx_sch_rate = 0x4798, + }, + .gdm1_cnt = 0x1c00, +- .gdma_to_ppe = 0x3333, ++ .gdma_to_ppe = { ++ [0] = 0x3333, ++ [1] = 0x4444, ++ }, + .ppe_base = 0x2000, + .wdma_base = { + [0] = 0x4800, +@@ -192,7 +197,11 @@ static const struct mtk_reg_map mt7988_r + .tx_sch_rate = 0x4798, + }, + .gdm1_cnt = 0x1c00, +- .gdma_to_ppe = 0x3333, ++ .gdma_to_ppe = { ++ [0] = 0x3333, ++ [1] = 0x4444, ++ [2] = 0xcccc, ++ }, + .ppe_base = 0x2000, + .wdma_base = { + [0] = 0x4800, +@@ -2015,6 +2024,7 @@ static int mtk_poll_rx(struct napi_struc + struct mtk_rx_dma_v2 *rxd, trxd; + int done = 0, bytes = 0; + dma_addr_t dma_addr = DMA_MAPPING_ERROR; ++ int ppe_idx = 0; + + while (done < budget) { + unsigned int pktlen, *rxdcsum; +@@ -2058,6 +2068,7 @@ static int mtk_poll_rx(struct napi_struc + goto release_desc; + + netdev = eth->netdev[mac]; ++ ppe_idx = eth->mac[mac]->ppe_idx; + + if (unlikely(test_bit(MTK_RESETTING, ð->state))) + goto release_desc; +@@ -2181,7 +2192,7 @@ static int mtk_poll_rx(struct napi_struc + } + + if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) +- mtk_ppe_check_skb(eth->ppe[0], skb, hash); ++ mtk_ppe_check_skb(eth->ppe[ppe_idx], skb, hash); + + skb_record_rx_queue(skb, 0); + napi_gro_receive(napi, skb); +@@ -3276,37 +3287,27 @@ static int mtk_start_dma(struct mtk_eth + return 0; + } + +-static void mtk_gdm_config(struct mtk_eth *eth, u32 config) ++static void mtk_gdm_config(struct mtk_eth *eth, u32 id, u32 config) + { +- int i; ++ u32 val; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) + return; + +- for (i = 0; i < MTK_MAX_DEVS; i++) { +- u32 val; +- +- if (!eth->netdev[i]) +- continue; ++ val = mtk_r32(eth, MTK_GDMA_FWD_CFG(id)); + +- val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i)); ++ /* default setup the forward port to send frame to PDMA */ ++ val &= ~0xffff; + +- /* default setup the forward port to send frame to PDMA */ +- val &= ~0xffff; ++ /* Enable RX checksum */ ++ val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN; + +- /* Enable RX checksum */ +- val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN; ++ val |= config; + +- val |= config; ++ if (eth->netdev[id] && netdev_uses_dsa(eth->netdev[id])) ++ val |= MTK_GDMA_SPECIAL_TAG; + +- if (netdev_uses_dsa(eth->netdev[i])) +- val |= MTK_GDMA_SPECIAL_TAG; +- +- mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i)); +- } +- /* Reset and enable PSE */ +- mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); +- mtk_w32(eth, 0, MTK_RST_GL); ++ mtk_w32(eth, val, MTK_GDMA_FWD_CFG(id)); + } + + +@@ -3366,7 +3367,10 @@ static int mtk_open(struct net_device *d + { + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; +- int i, err; ++ struct mtk_mac *target_mac; ++ int i, err, ppe_num; ++ ++ ppe_num = eth->soc->ppe_num; + + err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0); + if (err) { +@@ -3390,18 +3394,38 @@ static int mtk_open(struct net_device *d + for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) + mtk_ppe_start(eth->ppe[i]); + +- gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe +- : MTK_GDMA_TO_PDMA; +- mtk_gdm_config(eth, gdm_config); ++ for (i = 0; i < MTK_MAX_DEVS; i++) { ++ if (!eth->netdev[i]) ++ break; ++ ++ target_mac = netdev_priv(eth->netdev[i]); ++ if (!soc->offload_version) { ++ target_mac->ppe_idx = 0; ++ gdm_config = MTK_GDMA_TO_PDMA; ++ } else if (ppe_num >= 3 && target_mac->id == 2) { ++ target_mac->ppe_idx = 2; ++ gdm_config = soc->reg_map->gdma_to_ppe[2]; ++ } else if (ppe_num >= 2 && target_mac->id == 1) { ++ target_mac->ppe_idx = 1; ++ gdm_config = soc->reg_map->gdma_to_ppe[1]; ++ } else { ++ target_mac->ppe_idx = 0; ++ gdm_config = soc->reg_map->gdma_to_ppe[0]; ++ } ++ mtk_gdm_config(eth, target_mac->id, gdm_config); ++ } ++ /* Reset and enable PSE */ ++ mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); ++ mtk_w32(eth, 0, MTK_RST_GL); + + napi_enable(ð->tx_napi); + napi_enable(ð->rx_napi); + mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); + mtk_rx_irq_enable(eth, soc->rx.irq_done_mask); + refcount_set(ð->dma_refcnt, 1); +- } +- else ++ } else { + refcount_inc(ð->dma_refcnt); ++ } + + phylink_start(mac->phylink); + netif_tx_start_all_queues(dev); +@@ -3478,7 +3502,8 @@ static int mtk_stop(struct net_device *d + if (!refcount_dec_and_test(ð->dma_refcnt)) + return 0; + +- mtk_gdm_config(eth, MTK_GDMA_DROP_ALL); ++ for (i = 0; i < MTK_MAX_DEVS; i++) ++ mtk_gdm_config(eth, i, MTK_GDMA_DROP_ALL); + + mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); + mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask); +@@ -4957,23 +4982,24 @@ static int mtk_probe(struct platform_dev + } + + if (eth->soc->offload_version) { +- u32 num_ppe = mtk_is_netsys_v2_or_greater(eth) ? 2 : 1; ++ u8 ppe_num = eth->soc->ppe_num; + +- num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe); +- for (i = 0; i < num_ppe; i++) { +- u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400; ++ ppe_num = min_t(u8, ARRAY_SIZE(eth->ppe), ppe_num); ++ for (i = 0; i < ppe_num; i++) { ++ u32 ppe_addr = eth->soc->reg_map->ppe_base; + ++ ppe_addr += (i == 2 ? 0xc00 : i * 0x400); + eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, i); + + if (!eth->ppe[i]) { + err = -ENOMEM; + goto err_deinit_ppe; + } +- } ++ err = mtk_eth_offload_init(eth, i); + +- err = mtk_eth_offload_init(eth); +- if (err) +- goto err_deinit_ppe; ++ if (err) ++ goto err_deinit_ppe; ++ } + } + + for (i = 0; i < MTK_MAX_DEVS; i++) { +@@ -5076,6 +5102,7 @@ static const struct mtk_soc_data mt7621_ + .required_pctl = false, + .version = 1, + .offload_version = 1, ++ .ppe_num = 1, + .hash_offset = 2, + .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE, + .tx = { +@@ -5104,6 +5131,7 @@ static const struct mtk_soc_data mt7622_ + .required_pctl = false, + .version = 1, + .offload_version = 2, ++ .ppe_num = 1, + .hash_offset = 2, + .has_accounting = true, + .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE, +@@ -5132,6 +5160,7 @@ static const struct mtk_soc_data mt7623_ + .required_pctl = true, + .version = 1, + .offload_version = 1, ++ .ppe_num = 1, + .hash_offset = 2, + .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE, + .disable_pll_modes = true, +@@ -5187,6 +5216,7 @@ static const struct mtk_soc_data mt7981_ + .required_pctl = false, + .version = 2, + .offload_version = 2, ++ .ppe_num = 2, + .hash_offset = 4, + .has_accounting = true, + .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE, +@@ -5216,6 +5246,7 @@ static const struct mtk_soc_data mt7986_ + .required_pctl = false, + .version = 2, + .offload_version = 2, ++ .ppe_num = 2, + .hash_offset = 4, + .has_accounting = true, + .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE, +@@ -5245,6 +5276,7 @@ static const struct mtk_soc_data mt7988_ + .required_pctl = false, + .version = 3, + .offload_version = 2, ++ .ppe_num = 3, + .hash_offset = 4, + .has_accounting = true, + .foe_entry_size = MTK_FOE_ENTRY_V3_SIZE, +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h +@@ -1132,7 +1132,7 @@ struct mtk_reg_map { + u32 tx_sch_rate; /* tx scheduler rate control registers */ + } qdma; + u32 gdm1_cnt; +- u32 gdma_to_ppe; ++ u32 gdma_to_ppe[3]; + u32 ppe_base; + u32 wdma_base[3]; + u32 pse_iq_sta; +@@ -1170,6 +1170,7 @@ struct mtk_soc_data { + u8 offload_version; + u8 hash_offset; + u8 version; ++ u8 ppe_num; + u16 foe_entry_size; + netdev_features_t hw_features; + bool has_accounting; +@@ -1294,7 +1295,7 @@ struct mtk_eth { + + struct metadata_dst *dsa_meta[MTK_MAX_DSA_PORTS]; + +- struct mtk_ppe *ppe[2]; ++ struct mtk_ppe *ppe[3]; + struct rhashtable flow_table; + + struct bpf_prog __rcu *prog; +@@ -1319,6 +1320,7 @@ struct mtk_eth { + struct mtk_mac { + int id; + phy_interface_t interface; ++ u8 ppe_idx; + int speed; + struct device_node *of_node; + struct phylink *phylink; +@@ -1440,7 +1442,7 @@ int mtk_gmac_sgmii_path_setup(struct mtk + int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id); + int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id); + +-int mtk_eth_offload_init(struct mtk_eth *eth); ++int mtk_eth_offload_init(struct mtk_eth *eth, u8 id); + int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data); + int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls, +--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +@@ -245,10 +245,10 @@ mtk_flow_offload_replace(struct mtk_eth + int ppe_index) + { + struct flow_rule *rule = flow_cls_offload_flow_rule(f); ++ struct net_device *idev = NULL, *odev = NULL; + struct flow_action_entry *act; + struct mtk_flow_data data = {}; + struct mtk_foe_entry foe; +- struct net_device *odev = NULL; + struct mtk_flow_entry *entry; + int offload_type = 0; + int wed_index = -1; +@@ -264,6 +264,17 @@ mtk_flow_offload_replace(struct mtk_eth + struct flow_match_meta match; + + flow_rule_match_meta(rule, &match); ++ if (mtk_is_netsys_v2_or_greater(eth)) { ++ idev = __dev_get_by_index(&init_net, match.key->ingress_ifindex); ++ if (idev) { ++ struct mtk_mac *mac = netdev_priv(idev); ++ ++ if (WARN_ON(mac->ppe_idx >= eth->soc->ppe_num)) ++ return -EINVAL; ++ ++ ppe_index = mac->ppe_idx; ++ } ++ } + } else { + return -EOPNOTSUPP; + } +@@ -630,7 +641,9 @@ int mtk_eth_setup_tc(struct net_device * + } + } + +-int mtk_eth_offload_init(struct mtk_eth *eth) ++int mtk_eth_offload_init(struct mtk_eth *eth, u8 id) + { ++ if (!eth->ppe[id] || !eth->ppe[id]->foe_table) ++ return 0; + return rhashtable_init(ð->flow_table, &mtk_flow_ht_params); + } diff --git a/target/linux/generic/pending-6.6/702-net-ethernet-mtk_eth_soc-enable-threaded-NAPI.patch b/target/linux/generic/pending-6.6/702-net-ethernet-mtk_eth_soc-enable-threaded-NAPI.patch index ca5fe771d1..4fbf6288c8 100644 --- a/target/linux/generic/pending-6.6/702-net-ethernet-mtk_eth_soc-enable-threaded-NAPI.patch +++ b/target/linux/generic/pending-6.6/702-net-ethernet-mtk_eth_soc-enable-threaded-NAPI.patch @@ -10,7 +10,7 @@ Signed-off-by: Felix Fietkau --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -4984,6 +4984,8 @@ static int mtk_probe(struct platform_dev +@@ -5020,6 +5020,8 @@ static int mtk_probe(struct platform_dev * for NAPI to work */ init_dummy_netdev(ð->dummy_dev); diff --git a/target/linux/generic/pending-6.6/732-00-net-ethernet-mtk_eth_soc-compile-out-netsys-v2-code-.patch b/target/linux/generic/pending-6.6/732-00-net-ethernet-mtk_eth_soc-compile-out-netsys-v2-code-.patch index 36abf45798..c3297a1087 100644 --- a/target/linux/generic/pending-6.6/732-00-net-ethernet-mtk_eth_soc-compile-out-netsys-v2-code-.patch +++ b/target/linux/generic/pending-6.6/732-00-net-ethernet-mtk_eth_soc-compile-out-netsys-v2-code-.patch @@ -11,7 +11,7 @@ Signed-off-by: Felix Fietkau --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -1329,6 +1329,22 @@ struct mtk_mac { +@@ -1336,6 +1336,22 @@ struct mtk_mac { /* the struct describing the SoC. these are declared in the soc_xyz.c files */ extern const struct of_device_id of_mtk_match[]; @@ -34,7 +34,7 @@ Signed-off-by: Felix Fietkau static inline bool mtk_is_netsys_v1(struct mtk_eth *eth) { return eth->soc->version == 1; -@@ -1343,6 +1359,7 @@ static inline bool mtk_is_netsys_v3_or_g +@@ -1350,6 +1366,7 @@ static inline bool mtk_is_netsys_v3_or_g { return eth->soc->version > 2; } diff --git a/target/linux/generic/pending-6.6/732-01-net-ethernet-mtk_eth_soc-work-around-issue-with-send.patch b/target/linux/generic/pending-6.6/732-01-net-ethernet-mtk_eth_soc-work-around-issue-with-send.patch index 438f83953a..53187934d0 100644 --- a/target/linux/generic/pending-6.6/732-01-net-ethernet-mtk_eth_soc-work-around-issue-with-send.patch +++ b/target/linux/generic/pending-6.6/732-01-net-ethernet-mtk_eth_soc-work-around-issue-with-send.patch @@ -24,7 +24,7 @@ Signed-off-by: Felix Fietkau #include #include "mtk_eth_soc.h" -@@ -1581,12 +1582,28 @@ static void mtk_wake_queue(struct mtk_et +@@ -1596,12 +1597,28 @@ static void mtk_wake_queue(struct mtk_et } } @@ -53,7 +53,7 @@ Signed-off-by: Felix Fietkau bool gso = false; int tx_num; -@@ -1608,6 +1625,18 @@ static netdev_tx_t mtk_start_xmit(struct +@@ -1623,6 +1640,18 @@ static netdev_tx_t mtk_start_xmit(struct return NETDEV_TX_BUSY; } @@ -72,7 +72,7 @@ Signed-off-by: Felix Fietkau /* TSO: fill MSS info in tcp checksum field */ if (skb_is_gso(skb)) { if (skb_cow_head(skb, 0)) { -@@ -1623,8 +1652,14 @@ static netdev_tx_t mtk_start_xmit(struct +@@ -1638,8 +1667,14 @@ static netdev_tx_t mtk_start_xmit(struct } } @@ -91,7 +91,7 @@ Signed-off-by: Felix Fietkau netif_tx_stop_all_queues(dev); --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -268,7 +268,7 @@ +@@ -270,7 +270,7 @@ #define MTK_CHK_DDONE_EN BIT(28) #define MTK_DMAD_WR_WDONE BIT(26) #define MTK_WCOMP_EN BIT(24) diff --git a/target/linux/generic/pending-6.6/732-02-net-ethernet-mtk_eth_soc-set-NETIF_F_ALL_TSO.patch b/target/linux/generic/pending-6.6/732-02-net-ethernet-mtk_eth_soc-set-NETIF_F_ALL_TSO.patch index 11a81dd0bf..bd7a1b96f2 100644 --- a/target/linux/generic/pending-6.6/732-02-net-ethernet-mtk_eth_soc-set-NETIF_F_ALL_TSO.patch +++ b/target/linux/generic/pending-6.6/732-02-net-ethernet-mtk_eth_soc-set-NETIF_F_ALL_TSO.patch @@ -9,7 +9,7 @@ Signed-off-by: Felix Fietkau --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -47,8 +47,7 @@ +@@ -49,8 +49,7 @@ #define MTK_HW_FEATURES (NETIF_F_IP_CSUM | \ NETIF_F_RXCSUM | \ NETIF_F_HW_VLAN_CTAG_TX | \ diff --git a/target/linux/generic/pending-6.6/733-01-net-ethernet-mtk_eth_soc-use-napi_build_skb.patch b/target/linux/generic/pending-6.6/733-01-net-ethernet-mtk_eth_soc-use-napi_build_skb.patch index 1fe291fce9..82ba768fd5 100644 --- a/target/linux/generic/pending-6.6/733-01-net-ethernet-mtk_eth_soc-use-napi_build_skb.patch +++ b/target/linux/generic/pending-6.6/733-01-net-ethernet-mtk_eth_soc-use-napi_build_skb.patch @@ -10,7 +10,7 @@ Signed-off-by: Felix Fietkau --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -2123,7 +2123,7 @@ static int mtk_poll_rx(struct napi_struc +@@ -2140,7 +2140,7 @@ static int mtk_poll_rx(struct napi_struc if (ret != XDP_PASS) goto skip_rx; @@ -19,7 +19,7 @@ Signed-off-by: Felix Fietkau if (unlikely(!skb)) { page_pool_put_full_page(ring->page_pool, page, true); -@@ -2161,7 +2161,7 @@ static int mtk_poll_rx(struct napi_struc +@@ -2178,7 +2178,7 @@ static int mtk_poll_rx(struct napi_struc dma_unmap_single(eth->dma_dev, ((u64)trxd.rxd1 | addr64), ring->buf_size, DMA_FROM_DEVICE); diff --git a/target/linux/generic/pending-6.6/737-net-ethernet-mtk_eth_soc-add-paths-and-SerDes-modes-.patch b/target/linux/generic/pending-6.6/737-net-ethernet-mtk_eth_soc-add-paths-and-SerDes-modes-.patch index ba7699ecad..89dc87e1a2 100644 --- a/target/linux/generic/pending-6.6/737-net-ethernet-mtk_eth_soc-add-paths-and-SerDes-modes-.patch +++ b/target/linux/generic/pending-6.6/737-net-ethernet-mtk_eth_soc-add-paths-and-SerDes-modes-.patch @@ -214,7 +214,7 @@ Signed-off-by: Daniel Golle #include #include #include -@@ -261,12 +263,8 @@ static const char * const mtk_clks_sourc +@@ -270,12 +272,8 @@ static const char * const mtk_clks_sourc "ethwarp_wocpu2", "ethwarp_wocpu1", "ethwarp_wocpu0", @@ -227,7 +227,7 @@ Signed-off-by: Daniel Golle "top_eth_gmii_sel", "top_eth_refck_50m_sel", "top_eth_sys_200m_sel", -@@ -509,6 +507,30 @@ static void mtk_setup_bridge_switch(stru +@@ -518,6 +516,30 @@ static void mtk_setup_bridge_switch(stru MTK_GSW_CFG); } @@ -258,7 +258,7 @@ Signed-off-by: Daniel Golle static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config, phy_interface_t interface) { -@@ -517,6 +539,21 @@ static struct phylink_pcs *mtk_mac_selec +@@ -526,6 +548,21 @@ static struct phylink_pcs *mtk_mac_selec struct mtk_eth *eth = mac->hw; unsigned int sid; @@ -280,7 +280,7 @@ Signed-off-by: Daniel Golle if (interface == PHY_INTERFACE_MODE_SGMII || phy_interface_mode_is_8023z(interface)) { sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? -@@ -568,7 +605,22 @@ static void mtk_mac_config(struct phylin +@@ -577,7 +614,22 @@ static void mtk_mac_config(struct phylin goto init_err; } break; @@ -303,7 +303,7 @@ Signed-off-by: Daniel Golle break; default: goto err_phy; -@@ -615,8 +667,6 @@ static void mtk_mac_config(struct phylin +@@ -624,8 +676,6 @@ static void mtk_mac_config(struct phylin val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id); val |= SYSCFG0_GE_MODE(ge_mode, mac->id); regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); @@ -312,7 +312,7 @@ Signed-off-by: Daniel Golle } /* SGMII */ -@@ -633,21 +683,40 @@ static void mtk_mac_config(struct phylin +@@ -642,21 +692,40 @@ static void mtk_mac_config(struct phylin /* Save the syscfg0 value for mac_finish */ mac->syscfg0 = val; @@ -360,7 +360,7 @@ Signed-off-by: Daniel Golle return; err_phy: -@@ -660,6 +729,18 @@ init_err: +@@ -669,6 +738,18 @@ init_err: mac->id, phy_modes(state->interface), err); } @@ -379,7 +379,7 @@ Signed-off-by: Daniel Golle static int mtk_mac_finish(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { -@@ -668,6 +749,10 @@ static int mtk_mac_finish(struct phylink +@@ -677,6 +758,10 @@ static int mtk_mac_finish(struct phylink struct mtk_eth *eth = mac->hw; u32 mcr_cur, mcr_new; @@ -390,7 +390,7 @@ Signed-off-by: Daniel Golle /* Enable SGMII */ if (interface == PHY_INTERFACE_MODE_SGMII || phy_interface_mode_is_8023z(interface)) -@@ -692,10 +777,14 @@ static void mtk_mac_link_down(struct phy +@@ -701,10 +786,14 @@ static void mtk_mac_link_down(struct phy { struct mtk_mac *mac = container_of(config, struct mtk_mac, phylink_config); @@ -408,7 +408,7 @@ Signed-off-by: Daniel Golle } static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx, -@@ -767,13 +856,11 @@ static void mtk_set_queue_speed(struct m +@@ -776,13 +865,11 @@ static void mtk_set_queue_speed(struct m mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs); } @@ -426,7 +426,7 @@ Signed-off-by: Daniel Golle u32 mcr; mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); -@@ -807,9 +894,63 @@ static void mtk_mac_link_up(struct phyli +@@ -816,9 +903,63 @@ static void mtk_mac_link_up(struct phyli mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); } @@ -490,9 +490,9 @@ Signed-off-by: Daniel Golle .mac_finish = mtk_mac_finish, .mac_link_down = mtk_mac_link_down, .mac_link_up = mtk_mac_link_up, -@@ -3393,6 +3534,9 @@ static int mtk_open(struct net_device *d - struct mtk_eth *eth = mac->hw; - int i, err; +@@ -3407,6 +3548,9 @@ static int mtk_open(struct net_device *d + + ppe_num = eth->soc->ppe_num; + if (mac->pextp) + phy_power_on(mac->pextp); @@ -500,7 +500,7 @@ Signed-off-by: Daniel Golle err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0); if (err) { netdev_err(dev, "%s: could not attach PHY: %d\n", __func__, -@@ -3522,6 +3666,9 @@ static int mtk_stop(struct net_device *d +@@ -3557,6 +3701,9 @@ static int mtk_stop(struct net_device *d for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) mtk_ppe_stop(eth->ppe[i]); @@ -510,7 +510,7 @@ Signed-off-by: Daniel Golle return 0; } -@@ -4519,6 +4666,7 @@ static const struct net_device_ops mtk_n +@@ -4554,6 +4701,7 @@ static const struct net_device_ops mtk_n static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) { const __be32 *_id = of_get_property(np, "reg", NULL); @@ -518,7 +518,7 @@ Signed-off-by: Daniel Golle phy_interface_t phy_mode; struct phylink *phylink; struct mtk_mac *mac; -@@ -4555,16 +4703,41 @@ static int mtk_add_mac(struct mtk_eth *e +@@ -4590,16 +4738,41 @@ static int mtk_add_mac(struct mtk_eth *e mac->id = id; mac->hw = eth; mac->of_node = np; @@ -568,7 +568,7 @@ Signed-off-by: Daniel Golle } memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip)); -@@ -4647,8 +4820,21 @@ static int mtk_add_mac(struct mtk_eth *e +@@ -4682,8 +4855,21 @@ static int mtk_add_mac(struct mtk_eth *e phy_interface_zero(mac->phylink_config.supported_interfaces); __set_bit(PHY_INTERFACE_MODE_INTERNAL, mac->phylink_config.supported_interfaces); @@ -590,7 +590,7 @@ Signed-off-by: Daniel Golle phylink = phylink_create(&mac->phylink_config, of_fwnode_handle(mac->of_node), phy_mode, &mtk_phylink_ops); -@@ -4699,6 +4885,26 @@ free_netdev: +@@ -4734,6 +4920,26 @@ free_netdev: return err; } @@ -617,7 +617,7 @@ Signed-off-by: Daniel Golle void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev) { struct net_device *dev, *tmp; -@@ -4845,7 +5051,8 @@ static int mtk_probe(struct platform_dev +@@ -4880,7 +5086,8 @@ static int mtk_probe(struct platform_dev regmap_write(cci, 0, 3); } @@ -627,7 +627,7 @@ Signed-off-by: Daniel Golle err = mtk_sgmii_init(eth); if (err) -@@ -4956,6 +5163,24 @@ static int mtk_probe(struct platform_dev +@@ -4991,6 +5198,24 @@ static int mtk_probe(struct platform_dev } } @@ -652,7 +652,7 @@ Signed-off-by: Daniel Golle if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) { err = devm_request_irq(eth->dev, eth->irq[0], mtk_handle_irq, 0, -@@ -5058,6 +5283,11 @@ static int mtk_remove(struct platform_de +@@ -5094,6 +5319,11 @@ static int mtk_remove(struct platform_de mtk_stop(eth->netdev[i]); mac = netdev_priv(eth->netdev[i]); phylink_disconnect_phy(mac->phylink); @@ -674,7 +674,7 @@ Signed-off-by: Daniel Golle #include #include #include -@@ -502,6 +503,21 @@ +@@ -504,6 +505,21 @@ #define INTF_MODE_RGMII_1000 (TRGMII_MODE | TRGMII_CENTRAL_ALIGNED) #define INTF_MODE_RGMII_10_100 0 @@ -696,7 +696,7 @@ Signed-off-by: Daniel Golle /* GPIO port control registers for GMAC 2*/ #define GPIO_OD33_CTRL8 0x4c0 #define GPIO_BIAS_CTRL 0xed0 -@@ -527,6 +543,7 @@ +@@ -529,6 +545,7 @@ #define SYSCFG0_SGMII_GMAC2 ((3 << 8) & SYSCFG0_SGMII_MASK) #define SYSCFG0_SGMII_GMAC1_V2 BIT(9) #define SYSCFG0_SGMII_GMAC2_V2 BIT(8) @@ -704,7 +704,7 @@ Signed-off-by: Daniel Golle /* ethernet subsystem clock register */ -@@ -565,6 +582,11 @@ +@@ -567,6 +584,11 @@ #define GEPHY_MAC_SEL BIT(1) /* Top misc registers */ @@ -716,7 +716,7 @@ Signed-off-by: Daniel Golle #define USB_PHY_SWITCH_REG 0x218 #define QPHY_SEL_MASK GENMASK(1, 0) #define SGMII_QPHY_SEL 0x2 -@@ -589,6 +611,8 @@ +@@ -591,6 +613,8 @@ #define MT7628_SDM_RBCNT (MT7628_SDM_OFFSET + 0x10c) #define MT7628_SDM_CS_ERR (MT7628_SDM_OFFSET + 0x110) @@ -725,7 +725,7 @@ Signed-off-by: Daniel Golle #define MTK_FE_CDM1_FSM 0x220 #define MTK_FE_CDM2_FSM 0x224 #define MTK_FE_CDM3_FSM 0x238 -@@ -597,6 +621,11 @@ +@@ -599,6 +623,11 @@ #define MTK_FE_CDM6_FSM 0x328 #define MTK_FE_GDM1_FSM 0x228 #define MTK_FE_GDM2_FSM 0x22C @@ -737,7 +737,7 @@ Signed-off-by: Daniel Golle #define MTK_MAC_FSM(x) (0x1010C + ((x) * 0x100)) -@@ -721,12 +750,8 @@ enum mtk_clks_map { +@@ -723,12 +752,8 @@ enum mtk_clks_map { MTK_CLK_ETHWARP_WOCPU2, MTK_CLK_ETHWARP_WOCPU1, MTK_CLK_ETHWARP_WOCPU0, @@ -750,7 +750,7 @@ Signed-off-by: Daniel Golle MTK_CLK_TOP_ETH_GMII_SEL, MTK_CLK_TOP_ETH_REFCK_50M_SEL, MTK_CLK_TOP_ETH_SYS_200M_SEL, -@@ -797,19 +822,9 @@ enum mtk_clks_map { +@@ -799,19 +824,9 @@ enum mtk_clks_map { BIT_ULL(MTK_CLK_GP3) | BIT_ULL(MTK_CLK_XGP1) | \ BIT_ULL(MTK_CLK_XGP2) | BIT_ULL(MTK_CLK_XGP3) | \ BIT_ULL(MTK_CLK_CRYPTO) | \ @@ -770,7 +770,7 @@ Signed-off-by: Daniel Golle BIT_ULL(MTK_CLK_TOP_ETH_GMII_SEL) | \ BIT_ULL(MTK_CLK_TOP_ETH_REFCK_50M_SEL) | \ BIT_ULL(MTK_CLK_TOP_ETH_SYS_200M_SEL) | \ -@@ -943,6 +958,8 @@ enum mkt_eth_capabilities { +@@ -945,6 +960,8 @@ enum mkt_eth_capabilities { MTK_RGMII_BIT = 0, MTK_TRGMII_BIT, MTK_SGMII_BIT, @@ -779,7 +779,7 @@ Signed-off-by: Daniel Golle MTK_ESW_BIT, MTK_GEPHY_BIT, MTK_MUX_BIT, -@@ -963,8 +980,11 @@ enum mkt_eth_capabilities { +@@ -965,8 +982,11 @@ enum mkt_eth_capabilities { MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT, MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT, MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT, @@ -791,7 +791,7 @@ Signed-off-by: Daniel Golle /* PATH BITS */ MTK_ETH_PATH_GMAC1_RGMII_BIT, -@@ -972,14 +992,21 @@ enum mkt_eth_capabilities { +@@ -974,14 +994,21 @@ enum mkt_eth_capabilities { MTK_ETH_PATH_GMAC1_SGMII_BIT, MTK_ETH_PATH_GMAC2_RGMII_BIT, MTK_ETH_PATH_GMAC2_SGMII_BIT, @@ -813,7 +813,7 @@ Signed-off-by: Daniel Golle #define MTK_ESW BIT_ULL(MTK_ESW_BIT) #define MTK_GEPHY BIT_ULL(MTK_GEPHY_BIT) #define MTK_MUX BIT_ULL(MTK_MUX_BIT) -@@ -1002,10 +1029,16 @@ enum mkt_eth_capabilities { +@@ -1004,10 +1031,16 @@ enum mkt_eth_capabilities { BIT_ULL(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT) #define MTK_ETH_MUX_U3_GMAC2_TO_QPHY \ BIT_ULL(MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT) @@ -830,7 +830,7 @@ Signed-off-by: Daniel Golle /* Supported path present on SoCs */ #define MTK_ETH_PATH_GMAC1_RGMII BIT_ULL(MTK_ETH_PATH_GMAC1_RGMII_BIT) -@@ -1013,8 +1046,13 @@ enum mkt_eth_capabilities { +@@ -1015,8 +1048,13 @@ enum mkt_eth_capabilities { #define MTK_ETH_PATH_GMAC1_SGMII BIT_ULL(MTK_ETH_PATH_GMAC1_SGMII_BIT) #define MTK_ETH_PATH_GMAC2_RGMII BIT_ULL(MTK_ETH_PATH_GMAC2_RGMII_BIT) #define MTK_ETH_PATH_GMAC2_SGMII BIT_ULL(MTK_ETH_PATH_GMAC2_SGMII_BIT) @@ -844,7 +844,7 @@ Signed-off-by: Daniel Golle #define MTK_GMAC1_RGMII (MTK_ETH_PATH_GMAC1_RGMII | MTK_RGMII) #define MTK_GMAC1_TRGMII (MTK_ETH_PATH_GMAC1_TRGMII | MTK_TRGMII) -@@ -1022,7 +1060,12 @@ enum mkt_eth_capabilities { +@@ -1024,7 +1062,12 @@ enum mkt_eth_capabilities { #define MTK_GMAC2_RGMII (MTK_ETH_PATH_GMAC2_RGMII | MTK_RGMII) #define MTK_GMAC2_SGMII (MTK_ETH_PATH_GMAC2_SGMII | MTK_SGMII) #define MTK_GMAC2_GEPHY (MTK_ETH_PATH_GMAC2_GEPHY | MTK_GEPHY) @@ -857,7 +857,7 @@ Signed-off-by: Daniel Golle /* MUXes present on SoCs */ /* 0: GDM1 -> GMAC1, 1: GDM1 -> ESW */ -@@ -1041,10 +1084,20 @@ enum mkt_eth_capabilities { +@@ -1043,10 +1086,20 @@ enum mkt_eth_capabilities { (MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_MUX | \ MTK_SHARED_SGMII) @@ -878,7 +878,7 @@ Signed-off-by: Daniel Golle #define MTK_HAS_CAPS(caps, _x) (((caps) & (_x)) == (_x)) #define MT7621_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | \ -@@ -1076,8 +1129,12 @@ enum mkt_eth_capabilities { +@@ -1078,8 +1131,12 @@ enum mkt_eth_capabilities { MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \ MTK_RSTCTRL_PPE1 | MTK_SRAM) @@ -893,7 +893,7 @@ Signed-off-by: Daniel Golle struct mtk_tx_dma_desc_info { dma_addr_t addr; -@@ -1317,6 +1374,9 @@ struct mtk_mac { +@@ -1324,6 +1381,9 @@ struct mtk_mac { struct device_node *of_node; struct phylink *phylink; struct phylink_config phylink_config; @@ -903,7 +903,7 @@ Signed-off-by: Daniel Golle struct mtk_eth *hw; struct mtk_hw_stats *hw_stats; __be32 hwlro_ip[MTK_MAX_LRO_IP_CNT]; -@@ -1440,6 +1500,19 @@ static inline u32 mtk_get_ib2_multicast_ +@@ -1447,6 +1507,19 @@ static inline u32 mtk_get_ib2_multicast_ return MTK_FOE_IB2_MULTICAST; } @@ -923,7 +923,7 @@ Signed-off-by: Daniel Golle /* read the hardware status register */ void mtk_stats_update_mac(struct mtk_mac *mac); -@@ -1448,8 +1521,10 @@ u32 mtk_r32(struct mtk_eth *eth, unsigne +@@ -1455,8 +1528,10 @@ u32 mtk_r32(struct mtk_eth *eth, unsigne u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg); int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id); @@ -932,5 +932,5 @@ Signed-off-by: Daniel Golle int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id); +int mtk_gmac_usxgmii_path_setup(struct mtk_eth *eth, int mac_id); - int mtk_eth_offload_init(struct mtk_eth *eth); + int mtk_eth_offload_init(struct mtk_eth *eth, u8 id); int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,