+++ /dev/null
-From c57e558194430d10d5e5f4acd8a8655b68dade13 Mon Sep 17 00:00:00 2001
-From: Frank Wunderlich <frank-w@public-files.de>
-Date: Mon, 3 Jun 2024 21:25:05 +0200
-Subject: [PATCH] net: ethernet: mtk_eth_soc: handle dma buffer size soc
- specific
-
-The mainline MTK ethernet driver suffers long time from rarly but
-annoying tx queue timeouts. We think that this is caused by fixed
-dma sizes hardcoded for all SoCs.
-
-We suspect this problem arises from a low level of free TX DMADs,
-the TX Ring alomost full.
-
-The transmit timeout is caused by the Tx queue not waking up. The
-Tx queue stops when the free counter is less than ring->thres, and
-it will wake up once the free counter is greater than ring->thres.
-If the CPU is too late to wake up the Tx queues, it may cause a
-transmit timeout.
-Therefore, we increased the TX and RX DMADs to improve this error
-situation.
-
-Use the dma-size implementation from SDK in a per SoC manner. In
-difference to SDK we have no RSS feature yet, so all RX/TX sizes
-should be raised from 512 to 2048 byte except fqdma on mt7988 to
-avoid the tx timeout issue.
-
-Fixes: 656e705243fd ("net-next: mediatek: add support for MT7623 ethernet")
-Suggested-by: Daniel Golle <daniel@makrotopia.org>
-Signed-off-by: Frank Wunderlich <frank-w@public-files.de>
-Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
-Signed-off-by: David S. Miller <davem@davemloft.net>
----
- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 104 +++++++++++++-------
- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 9 +-
- 2 files changed, 77 insertions(+), 36 deletions(-)
-
---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -1071,9 +1071,9 @@ static int mtk_init_fq_dma(struct mtk_et
- {
- const struct mtk_soc_data *soc = eth->soc;
- dma_addr_t phy_ring_tail;
-- int cnt = MTK_QDMA_RING_SIZE;
-+ int cnt = soc->tx.fq_dma_size;
- dma_addr_t dma_addr;
-- int i;
-+ int i, j, len;
-
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM))
- eth->scratch_ring = eth->sram_base;
-@@ -1082,40 +1082,46 @@ static int mtk_init_fq_dma(struct mtk_et
- cnt * soc->tx.desc_size,
- ð->phy_scratch_ring,
- GFP_KERNEL);
-+
- if (unlikely(!eth->scratch_ring))
- return -ENOMEM;
-
-- eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
-- if (unlikely(!eth->scratch_head))
-- return -ENOMEM;
-+ phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
-
-- dma_addr = dma_map_single(eth->dma_dev,
-- eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
-- DMA_FROM_DEVICE);
-- if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
-- return -ENOMEM;
-+ for (j = 0; j < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); j++) {
-+ len = min_t(int, cnt - j * MTK_FQ_DMA_LENGTH, MTK_FQ_DMA_LENGTH);
-+ eth->scratch_head[j] = kcalloc(len, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
-
-- phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
-+ if (unlikely(!eth->scratch_head[j]))
-+ return -ENOMEM;
-
-- for (i = 0; i < cnt; i++) {
-- dma_addr_t addr = dma_addr + i * MTK_QDMA_PAGE_SIZE;
-- struct mtk_tx_dma_v2 *txd;
--
-- txd = eth->scratch_ring + i * soc->tx.desc_size;
-- txd->txd1 = addr;
-- if (i < cnt - 1)
-- txd->txd2 = eth->phy_scratch_ring +
-- (i + 1) * soc->tx.desc_size;
--
-- txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
-- if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
-- txd->txd3 |= TX_DMA_PREP_ADDR64(addr);
-- txd->txd4 = 0;
-- if (mtk_is_netsys_v2_or_greater(eth)) {
-- txd->txd5 = 0;
-- txd->txd6 = 0;
-- txd->txd7 = 0;
-- txd->txd8 = 0;
-+ dma_addr = dma_map_single(eth->dma_dev,
-+ eth->scratch_head[j], len * MTK_QDMA_PAGE_SIZE,
-+ DMA_FROM_DEVICE);
-+
-+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
-+ return -ENOMEM;
-+
-+ for (i = 0; i < cnt; i++) {
-+ struct mtk_tx_dma_v2 *txd;
-+
-+ txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size;
-+ txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
-+ if (j * MTK_FQ_DMA_LENGTH + i < cnt)
-+ txd->txd2 = eth->phy_scratch_ring +
-+ (j * MTK_FQ_DMA_LENGTH + i + 1) * soc->tx.desc_size;
-+
-+ txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
-+ if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
-+ txd->txd3 |= TX_DMA_PREP_ADDR64(dma_addr + i * MTK_QDMA_PAGE_SIZE);
-+
-+ txd->txd4 = 0;
-+ if (mtk_is_netsys_v2_or_greater(eth)) {
-+ txd->txd5 = 0;
-+ txd->txd6 = 0;
-+ txd->txd7 = 0;
-+ txd->txd8 = 0;
-+ }
- }
- }
-
-@@ -2386,7 +2392,7 @@ static int mtk_tx_alloc(struct mtk_eth *
- if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
- ring_size = MTK_QDMA_RING_SIZE;
- else
-- ring_size = MTK_DMA_SIZE;
-+ ring_size = soc->tx.dma_size;
-
- ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
- GFP_KERNEL);
-@@ -2394,8 +2400,8 @@ static int mtk_tx_alloc(struct mtk_eth *
- goto no_tx_mem;
-
- if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) {
-- ring->dma = eth->sram_base + ring_size * sz;
-- ring->phys = eth->phy_scratch_ring + ring_size * (dma_addr_t)sz;
-+ ring->dma = eth->sram_base + soc->tx.fq_dma_size * sz;
-+ ring->phys = eth->phy_scratch_ring + soc->tx.fq_dma_size * (dma_addr_t)sz;
- } else {
- ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
- &ring->phys, GFP_KERNEL);
-@@ -2517,6 +2523,7 @@ static void mtk_tx_clean(struct mtk_eth
- static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
- {
- const struct mtk_reg_map *reg_map = eth->soc->reg_map;
-+ const struct mtk_soc_data *soc = eth->soc;
- struct mtk_rx_ring *ring;
- int rx_data_len, rx_dma_size, tx_ring_size;
- int i;
-@@ -2524,7 +2531,7 @@ static int mtk_rx_alloc(struct mtk_eth *
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
- tx_ring_size = MTK_QDMA_RING_SIZE;
- else
-- tx_ring_size = MTK_DMA_SIZE;
-+ tx_ring_size = soc->tx.dma_size;
-
- if (rx_flag == MTK_RX_FLAGS_QDMA) {
- if (ring_no)
-@@ -2539,7 +2546,7 @@ static int mtk_rx_alloc(struct mtk_eth *
- rx_dma_size = MTK_HW_LRO_DMA_SIZE;
- } else {
- rx_data_len = ETH_DATA_LEN;
-- rx_dma_size = MTK_DMA_SIZE;
-+ rx_dma_size = soc->rx.dma_size;
- }
-
- ring->frag_size = mtk_max_frag_size(rx_data_len);
-@@ -3066,7 +3073,10 @@ static void mtk_dma_free(struct mtk_eth
- mtk_rx_clean(eth, ð->rx_ring[i], false);
- }
-
-- kfree(eth->scratch_head);
-+ for (i = 0; i < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); i++) {
-+ kfree(eth->scratch_head[i]);
-+ eth->scratch_head[i] = NULL;
-+ }
- }
-
- static bool mtk_hw_reset_check(struct mtk_eth *eth)
-@@ -4952,11 +4962,14 @@ static const struct mtk_soc_data mt2701_
- .desc_size = sizeof(struct mtk_tx_dma),
- .dma_max_len = MTK_TX_DMA_BUF_LEN,
- .dma_len_offset = 16,
-+ .dma_size = MTK_DMA_SIZE(2K),
-+ .fq_dma_size = MTK_DMA_SIZE(2K),
- },
- .rx = {
- .desc_size = sizeof(struct mtk_rx_dma),
- .irq_done_mask = MTK_RX_DONE_INT,
- .dma_l4_valid = RX_DMA_L4_VALID,
-+ .dma_size = MTK_DMA_SIZE(2K),
- .dma_max_len = MTK_TX_DMA_BUF_LEN,
- .dma_len_offset = 16,
- },
-@@ -4976,11 +4989,14 @@ static const struct mtk_soc_data mt7621_
- .desc_size = sizeof(struct mtk_tx_dma),
- .dma_max_len = MTK_TX_DMA_BUF_LEN,
- .dma_len_offset = 16,
-+ .dma_size = MTK_DMA_SIZE(2K),
-+ .fq_dma_size = MTK_DMA_SIZE(2K),
- },
- .rx = {
- .desc_size = sizeof(struct mtk_rx_dma),
- .irq_done_mask = MTK_RX_DONE_INT,
- .dma_l4_valid = RX_DMA_L4_VALID,
-+ .dma_size = MTK_DMA_SIZE(2K),
- .dma_max_len = MTK_TX_DMA_BUF_LEN,
- .dma_len_offset = 16,
- },
-@@ -5002,11 +5018,14 @@ static const struct mtk_soc_data mt7622_
- .desc_size = sizeof(struct mtk_tx_dma),
- .dma_max_len = MTK_TX_DMA_BUF_LEN,
- .dma_len_offset = 16,
-+ .dma_size = MTK_DMA_SIZE(2K),
-+ .fq_dma_size = MTK_DMA_SIZE(2K),
- },
- .rx = {
- .desc_size = sizeof(struct mtk_rx_dma),
- .irq_done_mask = MTK_RX_DONE_INT,
- .dma_l4_valid = RX_DMA_L4_VALID,
-+ .dma_size = MTK_DMA_SIZE(2K),
- .dma_max_len = MTK_TX_DMA_BUF_LEN,
- .dma_len_offset = 16,
- },
-@@ -5027,11 +5046,14 @@ static const struct mtk_soc_data mt7623_
- .desc_size = sizeof(struct mtk_tx_dma),
- .dma_max_len = MTK_TX_DMA_BUF_LEN,
- .dma_len_offset = 16,
-+ .dma_size = MTK_DMA_SIZE(2K),
-+ .fq_dma_size = MTK_DMA_SIZE(2K),
- },
- .rx = {
- .desc_size = sizeof(struct mtk_rx_dma),
- .irq_done_mask = MTK_RX_DONE_INT,
- .dma_l4_valid = RX_DMA_L4_VALID,
-+ .dma_size = MTK_DMA_SIZE(2K),
- .dma_max_len = MTK_TX_DMA_BUF_LEN,
- .dma_len_offset = 16,
- },
-@@ -5050,11 +5072,14 @@ static const struct mtk_soc_data mt7629_
- .desc_size = sizeof(struct mtk_tx_dma),
- .dma_max_len = MTK_TX_DMA_BUF_LEN,
- .dma_len_offset = 16,
-+ .dma_size = MTK_DMA_SIZE(2K),
-+ .fq_dma_size = MTK_DMA_SIZE(2K),
- },
- .rx = {
- .desc_size = sizeof(struct mtk_rx_dma),
- .irq_done_mask = MTK_RX_DONE_INT,
- .dma_l4_valid = RX_DMA_L4_VALID,
-+ .dma_size = MTK_DMA_SIZE(2K),
- .dma_max_len = MTK_TX_DMA_BUF_LEN,
- .dma_len_offset = 16,
- },
-@@ -5076,6 +5101,8 @@ static const struct mtk_soc_data mt7981_
- .desc_size = sizeof(struct mtk_tx_dma_v2),
- .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
- .dma_len_offset = 8,
-+ .dma_size = MTK_DMA_SIZE(2K),
-+ .fq_dma_size = MTK_DMA_SIZE(2K),
- },
- .rx = {
- .desc_size = sizeof(struct mtk_rx_dma),
-@@ -5083,6 +5110,7 @@ static const struct mtk_soc_data mt7981_
- .dma_l4_valid = RX_DMA_L4_VALID_V2,
- .dma_max_len = MTK_TX_DMA_BUF_LEN,
- .dma_len_offset = 16,
-+ .dma_size = MTK_DMA_SIZE(2K),
- },
- };
-
-@@ -5102,6 +5130,8 @@ static const struct mtk_soc_data mt7986_
- .desc_size = sizeof(struct mtk_tx_dma_v2),
- .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
- .dma_len_offset = 8,
-+ .dma_size = MTK_DMA_SIZE(2K),
-+ .fq_dma_size = MTK_DMA_SIZE(2K),
- },
- .rx = {
- .desc_size = sizeof(struct mtk_rx_dma),
-@@ -5109,6 +5139,7 @@ static const struct mtk_soc_data mt7986_
- .dma_l4_valid = RX_DMA_L4_VALID_V2,
- .dma_max_len = MTK_TX_DMA_BUF_LEN,
- .dma_len_offset = 16,
-+ .dma_size = MTK_DMA_SIZE(2K),
- },
- };
-
-@@ -5128,6 +5159,8 @@ static const struct mtk_soc_data mt7988_
- .desc_size = sizeof(struct mtk_tx_dma_v2),
- .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
- .dma_len_offset = 8,
-+ .dma_size = MTK_DMA_SIZE(2K),
-+ .fq_dma_size = MTK_DMA_SIZE(4K),
- },
- .rx = {
- .desc_size = sizeof(struct mtk_rx_dma_v2),
-@@ -5135,6 +5168,7 @@ static const struct mtk_soc_data mt7988_
- .dma_l4_valid = RX_DMA_L4_VALID_V2,
- .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
- .dma_len_offset = 8,
-+ .dma_size = MTK_DMA_SIZE(2K),
- },
- };
-
-@@ -5149,6 +5183,7 @@ static const struct mtk_soc_data rt5350_
- .desc_size = sizeof(struct mtk_tx_dma),
- .dma_max_len = MTK_TX_DMA_BUF_LEN,
- .dma_len_offset = 16,
-+ .dma_size = MTK_DMA_SIZE(2K),
- },
- .rx = {
- .desc_size = sizeof(struct mtk_rx_dma),
-@@ -5156,6 +5191,7 @@ static const struct mtk_soc_data rt5350_
- .dma_l4_valid = RX_DMA_L4_VALID_PDMA,
- .dma_max_len = MTK_TX_DMA_BUF_LEN,
- .dma_len_offset = 16,
-+ .dma_size = MTK_DMA_SIZE(2K),
- },
- };
-
---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-@@ -32,7 +32,9 @@
- #define MTK_TX_DMA_BUF_LEN 0x3fff
- #define MTK_TX_DMA_BUF_LEN_V2 0xffff
- #define MTK_QDMA_RING_SIZE 2048
--#define MTK_DMA_SIZE 512
-+#define MTK_DMA_SIZE(x) (SZ_##x)
-+#define MTK_FQ_DMA_HEAD 32
-+#define MTK_FQ_DMA_LENGTH 2048
- #define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + ETH_FCS_LEN)
- #define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
- #define MTK_DMA_DUMMY_DESC 0xffffffff
-@@ -1173,6 +1175,8 @@ struct mtk_soc_data {
- u32 desc_size;
- u32 dma_max_len;
- u32 dma_len_offset;
-+ u32 dma_size;
-+ u32 fq_dma_size;
- } tx;
- struct {
- u32 desc_size;
-@@ -1180,6 +1184,7 @@ struct mtk_soc_data {
- u32 dma_l4_valid;
- u32 dma_max_len;
- u32 dma_len_offset;
-+ u32 dma_size;
- } rx;
- };
-
-@@ -1261,7 +1266,7 @@ struct mtk_eth {
- struct napi_struct rx_napi;
- void *scratch_ring;
- dma_addr_t phy_scratch_ring;
-- void *scratch_head;
-+ void *scratch_head[MTK_FQ_DMA_HEAD];
- struct clk *clks[MTK_CLK_MAX];
-
- struct mii_bus *mii_bus;
+++ /dev/null
-From ca18300e00d584d5693127eb60c108b84883b8ac Mon Sep 17 00:00:00 2001
-From: Shengyu Qu <wiagn233@outlook.com>
-Date: Fri, 5 Jul 2024 01:26:26 +0800
-Subject: [PATCH] net: ethernet: mtk_ppe: Change PPE entries number to 16K
-
-MT7981,7986 and 7988 all supports 32768 PPE entries, and MT7621/MT7620
-supports 16384 PPE entries, but only set to 8192 entries in driver. So
-incrase max entries to 16384 instead.
-
-Signed-off-by: Elad Yifee <eladwf@gmail.com>
-Signed-off-by: Shengyu Qu <wiagn233@outlook.com>
-Reviewed-by: Simon Horman <horms@kernel.org>
-Link: https://patch.msgid.link/TY3P286MB261103F937DE4EEB0F88437D98DE2@TY3P286MB2611.JPNP286.PROD.OUTLOOK.COM
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/mediatek/mtk_ppe.h | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/drivers/net/ethernet/mediatek/mtk_ppe.h
-+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
-@@ -8,7 +8,7 @@
- #include <linux/bitfield.h>
- #include <linux/rhashtable.h>
-
--#define MTK_PPE_ENTRIES_SHIFT 3
-+#define MTK_PPE_ENTRIES_SHIFT 4
- #define MTK_PPE_ENTRIES (1024 << MTK_PPE_ENTRIES_SHIFT)
- #define MTK_PPE_HASH_MASK (MTK_PPE_ENTRIES - 1)
- #define MTK_PPE_WAIT_TIMEOUT_US 1000000
+++ /dev/null
-From 064fbc4e9b5a6dbda7fe7b67dc7e9e95d31f8d75 Mon Sep 17 00:00:00 2001
-From: Daniel Golle <daniel@makrotopia.org>
-Date: Thu, 4 Jul 2024 11:14:55 +0100
-Subject: [PATCH] net: ethernet: mtk_eth_soc: implement .{get,set}_pauseparam
- ethtool ops
-
-Implement operations to get and set flow-control link parameters.
-Both is done by simply calling phylink_ethtool_{get,set}_pauseparam().
-Fix whitespace in mtk_ethtool_ops while at it.
-
-Signed-off-by: Daniel Golle <daniel@makrotopia.org>
-Reviewed-by: Michal Kubiak <michal.kubiak@intel.com>
-Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
-Tested-by: Rui Salvaterra <rsalvaterra@gmail.com>
-Link: https://patch.msgid.link/e3ece47323444631d6cb479f32af0dfd6d145be0.1720088047.git.daniel@makrotopia.org
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 18 +++++++++++++++++-
- 1 file changed, 17 insertions(+), 1 deletion(-)
-
---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -4361,6 +4361,20 @@ static int mtk_set_rxnfc(struct net_devi
- return ret;
- }
-
-+static void mtk_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
-+{
-+ struct mtk_mac *mac = netdev_priv(dev);
-+
-+ phylink_ethtool_get_pauseparam(mac->phylink, pause);
-+}
-+
-+static int mtk_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
-+{
-+ struct mtk_mac *mac = netdev_priv(dev);
-+
-+ return phylink_ethtool_set_pauseparam(mac->phylink, pause);
-+}
-+
- static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev)
- {
-@@ -4389,8 +4403,10 @@ static const struct ethtool_ops mtk_etht
- .get_strings = mtk_get_strings,
- .get_sset_count = mtk_get_sset_count,
- .get_ethtool_stats = mtk_get_ethtool_stats,
-+ .get_pauseparam = mtk_get_pauseparam,
-+ .set_pauseparam = mtk_set_pauseparam,
- .get_rxnfc = mtk_get_rxnfc,
-- .set_rxnfc = mtk_set_rxnfc,
-+ .set_rxnfc = mtk_set_rxnfc,
- };
-
- static const struct net_device_ops mtk_netdev_ops = {
--- /dev/null
+From: Felix Fietkau <nbd@nbd.name>
+Date: Thu, 18 Jan 2024 12:51:32 +0100
+Subject: [PATCH] net: ethernet: mtk_eth_soc: fix WED + wifi reset
+
+The WLAN + WED reset sequence relies on being able to receive interrupts from
+the card, in order to synchronize individual steps with the firmware.
+When WED is stopped, leave interrupts running and rely on the driver turning
+off unwanted ones.
+WED DMA also needs to be disabled before resetting.
+
+Fixes: f78cd9c783e0 ("net: ethernet: mtk_wed: update mtk_wed_stop")
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -1073,13 +1073,13 @@ mtk_wed_dma_disable(struct mtk_wed_devic
+ static void
+ mtk_wed_stop(struct mtk_wed_device *dev)
+ {
++ mtk_wed_dma_disable(dev);
+ mtk_wed_set_ext_int(dev, false);
+
+ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
+ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
+ wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
+ wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
+- wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
+
+ if (!mtk_wed_get_rx_capa(dev))
+ return;
+@@ -1092,7 +1092,6 @@ static void
+ mtk_wed_deinit(struct mtk_wed_device *dev)
+ {
+ mtk_wed_stop(dev);
+- mtk_wed_dma_disable(dev);
+
+ wed_clr(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WDMA_INT_AGENT_EN |
+@@ -2604,9 +2603,6 @@ mtk_wed_irq_get(struct mtk_wed_device *d
+ static void
+ mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
+ {
+- if (!dev->running)
+- return;
+-
+ mtk_wed_set_ext_int(dev, !!mask);
+ wed_w32(dev, MTK_WED_INT_MASK, mask);
+ }
--- /dev/null
+From c57e558194430d10d5e5f4acd8a8655b68dade13 Mon Sep 17 00:00:00 2001
+From: Frank Wunderlich <frank-w@public-files.de>
+Date: Mon, 3 Jun 2024 21:25:05 +0200
+Subject: [PATCH] net: ethernet: mtk_eth_soc: handle dma buffer size soc
+ specific
+
+The mainline MTK ethernet driver suffers long time from rarly but
+annoying tx queue timeouts. We think that this is caused by fixed
+dma sizes hardcoded for all SoCs.
+
+We suspect this problem arises from a low level of free TX DMADs,
+the TX Ring alomost full.
+
+The transmit timeout is caused by the Tx queue not waking up. The
+Tx queue stops when the free counter is less than ring->thres, and
+it will wake up once the free counter is greater than ring->thres.
+If the CPU is too late to wake up the Tx queues, it may cause a
+transmit timeout.
+Therefore, we increased the TX and RX DMADs to improve this error
+situation.
+
+Use the dma-size implementation from SDK in a per SoC manner. In
+difference to SDK we have no RSS feature yet, so all RX/TX sizes
+should be raised from 512 to 2048 byte except fqdma on mt7988 to
+avoid the tx timeout issue.
+
+Fixes: 656e705243fd ("net-next: mediatek: add support for MT7623 ethernet")
+Suggested-by: Daniel Golle <daniel@makrotopia.org>
+Signed-off-by: Frank Wunderlich <frank-w@public-files.de>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 104 +++++++++++++-------
+ drivers/net/ethernet/mediatek/mtk_eth_soc.h | 9 +-
+ 2 files changed, 77 insertions(+), 36 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -1071,9 +1071,9 @@ static int mtk_init_fq_dma(struct mtk_et
+ {
+ const struct mtk_soc_data *soc = eth->soc;
+ dma_addr_t phy_ring_tail;
+- int cnt = MTK_QDMA_RING_SIZE;
++ int cnt = soc->tx.fq_dma_size;
+ dma_addr_t dma_addr;
+- int i;
++ int i, j, len;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM))
+ eth->scratch_ring = eth->sram_base;
+@@ -1082,40 +1082,46 @@ static int mtk_init_fq_dma(struct mtk_et
+ cnt * soc->tx.desc_size,
+ ð->phy_scratch_ring,
+ GFP_KERNEL);
++
+ if (unlikely(!eth->scratch_ring))
+ return -ENOMEM;
+
+- eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
+- if (unlikely(!eth->scratch_head))
+- return -ENOMEM;
++ phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
+
+- dma_addr = dma_map_single(eth->dma_dev,
+- eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
+- DMA_FROM_DEVICE);
+- if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
+- return -ENOMEM;
++ for (j = 0; j < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); j++) {
++ len = min_t(int, cnt - j * MTK_FQ_DMA_LENGTH, MTK_FQ_DMA_LENGTH);
++ eth->scratch_head[j] = kcalloc(len, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
+
+- phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
++ if (unlikely(!eth->scratch_head[j]))
++ return -ENOMEM;
+
+- for (i = 0; i < cnt; i++) {
+- dma_addr_t addr = dma_addr + i * MTK_QDMA_PAGE_SIZE;
+- struct mtk_tx_dma_v2 *txd;
+-
+- txd = eth->scratch_ring + i * soc->tx.desc_size;
+- txd->txd1 = addr;
+- if (i < cnt - 1)
+- txd->txd2 = eth->phy_scratch_ring +
+- (i + 1) * soc->tx.desc_size;
+-
+- txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
+- if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
+- txd->txd3 |= TX_DMA_PREP_ADDR64(addr);
+- txd->txd4 = 0;
+- if (mtk_is_netsys_v2_or_greater(eth)) {
+- txd->txd5 = 0;
+- txd->txd6 = 0;
+- txd->txd7 = 0;
+- txd->txd8 = 0;
++ dma_addr = dma_map_single(eth->dma_dev,
++ eth->scratch_head[j], len * MTK_QDMA_PAGE_SIZE,
++ DMA_FROM_DEVICE);
++
++ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
++ return -ENOMEM;
++
++ for (i = 0; i < cnt; i++) {
++ struct mtk_tx_dma_v2 *txd;
++
++ txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size;
++ txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
++ if (j * MTK_FQ_DMA_LENGTH + i < cnt)
++ txd->txd2 = eth->phy_scratch_ring +
++ (j * MTK_FQ_DMA_LENGTH + i + 1) * soc->tx.desc_size;
++
++ txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
++ if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
++ txd->txd3 |= TX_DMA_PREP_ADDR64(dma_addr + i * MTK_QDMA_PAGE_SIZE);
++
++ txd->txd4 = 0;
++ if (mtk_is_netsys_v2_or_greater(eth)) {
++ txd->txd5 = 0;
++ txd->txd6 = 0;
++ txd->txd7 = 0;
++ txd->txd8 = 0;
++ }
+ }
+ }
+
+@@ -2386,7 +2392,7 @@ static int mtk_tx_alloc(struct mtk_eth *
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
+ ring_size = MTK_QDMA_RING_SIZE;
+ else
+- ring_size = MTK_DMA_SIZE;
++ ring_size = soc->tx.dma_size;
+
+ ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
+ GFP_KERNEL);
+@@ -2394,8 +2400,8 @@ static int mtk_tx_alloc(struct mtk_eth *
+ goto no_tx_mem;
+
+ if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) {
+- ring->dma = eth->sram_base + ring_size * sz;
+- ring->phys = eth->phy_scratch_ring + ring_size * (dma_addr_t)sz;
++ ring->dma = eth->sram_base + soc->tx.fq_dma_size * sz;
++ ring->phys = eth->phy_scratch_ring + soc->tx.fq_dma_size * (dma_addr_t)sz;
+ } else {
+ ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
+ &ring->phys, GFP_KERNEL);
+@@ -2517,6 +2523,7 @@ static void mtk_tx_clean(struct mtk_eth
+ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
+ {
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
++ const struct mtk_soc_data *soc = eth->soc;
+ struct mtk_rx_ring *ring;
+ int rx_data_len, rx_dma_size, tx_ring_size;
+ int i;
+@@ -2524,7 +2531,7 @@ static int mtk_rx_alloc(struct mtk_eth *
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ tx_ring_size = MTK_QDMA_RING_SIZE;
+ else
+- tx_ring_size = MTK_DMA_SIZE;
++ tx_ring_size = soc->tx.dma_size;
+
+ if (rx_flag == MTK_RX_FLAGS_QDMA) {
+ if (ring_no)
+@@ -2539,7 +2546,7 @@ static int mtk_rx_alloc(struct mtk_eth *
+ rx_dma_size = MTK_HW_LRO_DMA_SIZE;
+ } else {
+ rx_data_len = ETH_DATA_LEN;
+- rx_dma_size = MTK_DMA_SIZE;
++ rx_dma_size = soc->rx.dma_size;
+ }
+
+ ring->frag_size = mtk_max_frag_size(rx_data_len);
+@@ -3066,7 +3073,10 @@ static void mtk_dma_free(struct mtk_eth
+ mtk_rx_clean(eth, ð->rx_ring[i], false);
+ }
+
+- kfree(eth->scratch_head);
++ for (i = 0; i < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); i++) {
++ kfree(eth->scratch_head[i]);
++ eth->scratch_head[i] = NULL;
++ }
+ }
+
+ static bool mtk_hw_reset_check(struct mtk_eth *eth)
+@@ -4952,11 +4962,14 @@ static const struct mtk_soc_data mt2701_
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
++ .dma_size = MTK_DMA_SIZE(2K),
++ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
++ .dma_size = MTK_DMA_SIZE(2K),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+@@ -4976,11 +4989,14 @@ static const struct mtk_soc_data mt7621_
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
++ .dma_size = MTK_DMA_SIZE(2K),
++ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
++ .dma_size = MTK_DMA_SIZE(2K),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+@@ -5002,11 +5018,14 @@ static const struct mtk_soc_data mt7622_
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
++ .dma_size = MTK_DMA_SIZE(2K),
++ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
++ .dma_size = MTK_DMA_SIZE(2K),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+@@ -5027,11 +5046,14 @@ static const struct mtk_soc_data mt7623_
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
++ .dma_size = MTK_DMA_SIZE(2K),
++ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
++ .dma_size = MTK_DMA_SIZE(2K),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+@@ -5050,11 +5072,14 @@ static const struct mtk_soc_data mt7629_
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
++ .dma_size = MTK_DMA_SIZE(2K),
++ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
++ .dma_size = MTK_DMA_SIZE(2K),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+@@ -5076,6 +5101,8 @@ static const struct mtk_soc_data mt7981_
+ .desc_size = sizeof(struct mtk_tx_dma_v2),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ .dma_len_offset = 8,
++ .dma_size = MTK_DMA_SIZE(2K),
++ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+@@ -5083,6 +5110,7 @@ static const struct mtk_soc_data mt7981_
+ .dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
++ .dma_size = MTK_DMA_SIZE(2K),
+ },
+ };
+
+@@ -5102,6 +5130,8 @@ static const struct mtk_soc_data mt7986_
+ .desc_size = sizeof(struct mtk_tx_dma_v2),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ .dma_len_offset = 8,
++ .dma_size = MTK_DMA_SIZE(2K),
++ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+@@ -5109,6 +5139,7 @@ static const struct mtk_soc_data mt7986_
+ .dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
++ .dma_size = MTK_DMA_SIZE(2K),
+ },
+ };
+
+@@ -5128,6 +5159,8 @@ static const struct mtk_soc_data mt7988_
+ .desc_size = sizeof(struct mtk_tx_dma_v2),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ .dma_len_offset = 8,
++ .dma_size = MTK_DMA_SIZE(2K),
++ .fq_dma_size = MTK_DMA_SIZE(4K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma_v2),
+@@ -5135,6 +5168,7 @@ static const struct mtk_soc_data mt7988_
+ .dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ .dma_len_offset = 8,
++ .dma_size = MTK_DMA_SIZE(2K),
+ },
+ };
+
+@@ -5149,6 +5183,7 @@ static const struct mtk_soc_data rt5350_
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
++ .dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+@@ -5156,6 +5191,7 @@ static const struct mtk_soc_data rt5350_
+ .dma_l4_valid = RX_DMA_L4_VALID_PDMA,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
++ .dma_size = MTK_DMA_SIZE(2K),
+ },
+ };
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -32,7 +32,9 @@
+ #define MTK_TX_DMA_BUF_LEN 0x3fff
+ #define MTK_TX_DMA_BUF_LEN_V2 0xffff
+ #define MTK_QDMA_RING_SIZE 2048
+-#define MTK_DMA_SIZE 512
++#define MTK_DMA_SIZE(x) (SZ_##x)
++#define MTK_FQ_DMA_HEAD 32
++#define MTK_FQ_DMA_LENGTH 2048
+ #define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + ETH_FCS_LEN)
+ #define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
+ #define MTK_DMA_DUMMY_DESC 0xffffffff
+@@ -1173,6 +1175,8 @@ struct mtk_soc_data {
+ u32 desc_size;
+ u32 dma_max_len;
+ u32 dma_len_offset;
++ u32 dma_size;
++ u32 fq_dma_size;
+ } tx;
+ struct {
+ u32 desc_size;
+@@ -1180,6 +1184,7 @@ struct mtk_soc_data {
+ u32 dma_l4_valid;
+ u32 dma_max_len;
+ u32 dma_len_offset;
++ u32 dma_size;
+ } rx;
+ };
+
+@@ -1261,7 +1266,7 @@ struct mtk_eth {
+ struct napi_struct rx_napi;
+ void *scratch_ring;
+ dma_addr_t phy_scratch_ring;
+- void *scratch_head;
++ void *scratch_head[MTK_FQ_DMA_HEAD];
+ struct clk *clks[MTK_CLK_MAX];
+
+ struct mii_bus *mii_bus;
--- /dev/null
+From ca18300e00d584d5693127eb60c108b84883b8ac Mon Sep 17 00:00:00 2001
+From: Shengyu Qu <wiagn233@outlook.com>
+Date: Fri, 5 Jul 2024 01:26:26 +0800
+Subject: [PATCH] net: ethernet: mtk_ppe: Change PPE entries number to 16K
+
+MT7981,7986 and 7988 all supports 32768 PPE entries, and MT7621/MT7620
+supports 16384 PPE entries, but only set to 8192 entries in driver. So
+incrase max entries to 16384 instead.
+
+Signed-off-by: Elad Yifee <eladwf@gmail.com>
+Signed-off-by: Shengyu Qu <wiagn233@outlook.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/TY3P286MB261103F937DE4EEB0F88437D98DE2@TY3P286MB2611.JPNP286.PROD.OUTLOOK.COM
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_ppe.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
+@@ -8,7 +8,7 @@
+ #include <linux/bitfield.h>
+ #include <linux/rhashtable.h>
+
+-#define MTK_PPE_ENTRIES_SHIFT 3
++#define MTK_PPE_ENTRIES_SHIFT 4
+ #define MTK_PPE_ENTRIES (1024 << MTK_PPE_ENTRIES_SHIFT)
+ #define MTK_PPE_HASH_MASK (MTK_PPE_ENTRIES - 1)
+ #define MTK_PPE_WAIT_TIMEOUT_US 1000000
--- /dev/null
+From 064fbc4e9b5a6dbda7fe7b67dc7e9e95d31f8d75 Mon Sep 17 00:00:00 2001
+From: Daniel Golle <daniel@makrotopia.org>
+Date: Thu, 4 Jul 2024 11:14:55 +0100
+Subject: [PATCH] net: ethernet: mtk_eth_soc: implement .{get,set}_pauseparam
+ ethtool ops
+
+Implement operations to get and set flow-control link parameters.
+Both is done by simply calling phylink_ethtool_{get,set}_pauseparam().
+Fix whitespace in mtk_ethtool_ops while at it.
+
+Signed-off-by: Daniel Golle <daniel@makrotopia.org>
+Reviewed-by: Michal Kubiak <michal.kubiak@intel.com>
+Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Tested-by: Rui Salvaterra <rsalvaterra@gmail.com>
+Link: https://patch.msgid.link/e3ece47323444631d6cb479f32af0dfd6d145be0.1720088047.git.daniel@makrotopia.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 18 +++++++++++++++++-
+ 1 file changed, 17 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -4361,6 +4361,20 @@ static int mtk_set_rxnfc(struct net_devi
+ return ret;
+ }
+
++static void mtk_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
++{
++ struct mtk_mac *mac = netdev_priv(dev);
++
++ phylink_ethtool_get_pauseparam(mac->phylink, pause);
++}
++
++static int mtk_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
++{
++ struct mtk_mac *mac = netdev_priv(dev);
++
++ return phylink_ethtool_set_pauseparam(mac->phylink, pause);
++}
++
+ static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+ {
+@@ -4389,8 +4403,10 @@ static const struct ethtool_ops mtk_etht
+ .get_strings = mtk_get_strings,
+ .get_sset_count = mtk_get_sset_count,
+ .get_ethtool_stats = mtk_get_ethtool_stats,
++ .get_pauseparam = mtk_get_pauseparam,
++ .set_pauseparam = mtk_set_pauseparam,
+ .get_rxnfc = mtk_get_rxnfc,
+- .set_rxnfc = mtk_set_rxnfc,
++ .set_rxnfc = mtk_set_rxnfc,
+ };
+
+ static const struct net_device_ops mtk_netdev_ops = {
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
-@@ -1322,6 +1322,24 @@ mtk_wed_rro_alloc(struct mtk_wed_device
+@@ -1321,6 +1321,24 @@ mtk_wed_rro_alloc(struct mtk_wed_device
struct device_node *np;
int index;
index = of_property_match_string(dev->hw->node, "memory-region-names",
"wo-dlm");
if (index < 0)
-@@ -1338,6 +1356,7 @@ mtk_wed_rro_alloc(struct mtk_wed_device
+@@ -1337,6 +1355,7 @@ mtk_wed_rro_alloc(struct mtk_wed_device
return -ENODEV;
dev->rro.miod_phys = rmem->base;