include $(TOPDIR)/rules.mk
PKG_NAME:=mt76
-PKG_RELEASE=1
+PKG_RELEASE=2
PKG_LICENSE:=GPLv2
PKG_LICENSE_FILES:=
--- /dev/null
+From 9c7b98c03173a1a201d74203a81b344a0cd637ac Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 18 Sep 2023 12:29:07 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: rename mtk_rxbm_desc in
+ mtk_wed_bm_desc
+
+Rename mtk_rxbm_desc structure in mtk_wed_bm_desc since it will be used
+even on tx side by MT7988 SoC.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/mt7915/mmio.c
++++ b/mt7915/mmio.c
+@@ -591,7 +591,7 @@ static void mt7915_mmio_wed_release_rx_b
+
+ static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
+ {
+- struct mtk_rxbm_desc *desc = wed->rx_buf_ring.desc;
++ struct mtk_wed_bm_desc *desc = wed->rx_buf_ring.desc;
+ struct mt76_txwi_cache *t = NULL;
+ struct mt7915_dev *dev;
+ struct mt76_queue *q;
if (!hw_list[!hw->index]->wed_dev &&
hw->eth->dma_dev != hw->eth->dev)
-@@ -356,40 +380,54 @@ mtk_wed_detach(struct mtk_wed_device *de
+@@ -356,40 +380,47 @@ mtk_wed_detach(struct mtk_wed_device *de
static void
mtk_wed_bus_init(struct mtk_wed_device *dev)
{
+ case MTK_WED_BUS_PCIE: {
+ struct device_node *np = dev->hw->eth->dev->of_node;
+ struct regmap *regs;
-+ u32 val;
+
+ regs = syscon_regmap_lookup_by_phandle(np,
+ "mediatek,wed-pcie");
+ FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1));
+ wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
+
-+ val = wed_r32(dev, MTK_WED_PCIE_CFG_INTM);
-+ val = wed_r32(dev, MTK_WED_PCIE_CFG_BASE);
+ wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180);
+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184);
+
-+ val = wed_r32(dev, MTK_WED_PCIE_CFG_INTM);
-+ val = wed_r32(dev, MTK_WED_PCIE_CFG_BASE);
-+
+ /* pcie interrupt status trigger register */
+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
+ wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER);
+
+ /* pola setting */
-+ val = wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
+ wed_set(dev, MTK_WED_PCIE_INT_CTRL,
+ MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA);
+ break;
}
static void
-@@ -800,12 +838,14 @@ mtk_wed_attach(struct mtk_wed_device *de
+@@ -800,12 +831,14 @@ mtk_wed_attach(struct mtk_wed_device *de
__releases(RCU)
{
struct mtk_wed_hw *hw;
!try_module_get(THIS_MODULE))
ret = -ENODEV;
-@@ -823,8 +863,10 @@ mtk_wed_attach(struct mtk_wed_device *de
+@@ -823,8 +856,10 @@ mtk_wed_attach(struct mtk_wed_device *de
goto out;
}
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
-@@ -1084,11 +1084,11 @@ void mtk_wed_add_hw(struct device_node *
+@@ -1077,11 +1077,11 @@ void mtk_wed_add_hw(struct device_node *
get_device(&pdev->dev);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops);
-@@ -1131,8 +1131,14 @@ void mtk_wed_add_hw(struct device_node *
+@@ -1124,8 +1124,14 @@ void mtk_wed_add_hw(struct device_node *
hw_list[index] = hw;
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
-@@ -1079,7 +1079,7 @@ void mtk_wed_add_hw(struct device_node *
+@@ -1072,7 +1072,7 @@ void mtk_wed_add_hw(struct device_node *
pdev = of_find_device_by_node(np);
if (!pdev)
get_device(&pdev->dev);
irq = platform_get_irq(pdev, 0);
-@@ -1139,6 +1139,8 @@ unlock:
+@@ -1132,6 +1132,8 @@ unlock:
mutex_unlock(&hw_lock);
err_put_device:
put_device(&pdev->dev);
}
void mtk_wed_exit(void)
-@@ -1159,6 +1161,7 @@ void mtk_wed_exit(void)
+@@ -1152,6 +1154,7 @@ void mtk_wed_exit(void)
hw_list[i] = NULL;
debugfs_remove(hw->debugfs_dir);
put_device(hw->dev);
if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
struct device_node *wlan_node;
-@@ -885,9 +888,11 @@ mtk_wed_attach(struct mtk_wed_device *de
+@@ -878,9 +881,11 @@ mtk_wed_attach(struct mtk_wed_device *de
}
mtk_wed_hw_init_early(dev);
}
static void
-@@ -695,10 +695,10 @@ mtk_wed_ring_alloc(struct mtk_wed_device
+@@ -688,10 +688,10 @@ mtk_wed_ring_alloc(struct mtk_wed_device
}
static int
if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size))
return -ENOMEM;
-@@ -812,9 +812,9 @@ mtk_wed_start(struct mtk_wed_device *dev
+@@ -805,9 +805,9 @@ mtk_wed_start(struct mtk_wed_device *dev
{
int i;
mtk_wed_hw_init(dev);
mtk_wed_configure_irq(dev, irq_mask);
-@@ -923,7 +923,7 @@ mtk_wed_tx_ring_setup(struct mtk_wed_dev
+@@ -916,7 +916,7 @@ mtk_wed_tx_ring_setup(struct mtk_wed_dev
sizeof(*ring->desc)))
return -ENOMEM;
if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
struct device_node *wlan_node;
-@@ -441,10 +667,12 @@ mtk_wed_set_wpdma(struct mtk_wed_device
+@@ -434,10 +660,12 @@ mtk_wed_set_wpdma(struct mtk_wed_device
} else {
mtk_wed_bus_init(dev);
}
}
-@@ -494,6 +722,132 @@ mtk_wed_hw_init_early(struct mtk_wed_dev
+@@ -487,6 +715,132 @@ mtk_wed_hw_init_early(struct mtk_wed_dev
}
}
static void
mtk_wed_hw_init(struct mtk_wed_device *dev)
{
-@@ -505,11 +859,11 @@ mtk_wed_hw_init(struct mtk_wed_device *d
+@@ -498,11 +852,11 @@ mtk_wed_hw_init(struct mtk_wed_device *d
wed_w32(dev, MTK_WED_TX_BM_CTRL,
MTK_WED_TX_BM_CTRL_PAUSE |
FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
-@@ -536,9 +890,9 @@ mtk_wed_hw_init(struct mtk_wed_device *d
+@@ -529,9 +883,9 @@ mtk_wed_hw_init(struct mtk_wed_device *d
wed_w32(dev, MTK_WED_TX_TKID_CTRL,
MTK_WED_TX_TKID_CTRL_PAUSE |
FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM,
wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
MTK_WED_TX_TKID_DYN_THR_HI);
-@@ -546,18 +900,28 @@ mtk_wed_hw_init(struct mtk_wed_device *d
+@@ -539,18 +893,28 @@ mtk_wed_hw_init(struct mtk_wed_device *d
mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
{
void *head = (void *)ring->desc;
int i;
-@@ -567,7 +931,10 @@ mtk_wed_ring_reset(struct mtk_wed_ring *
+@@ -560,7 +924,10 @@ mtk_wed_ring_reset(struct mtk_wed_ring *
desc = (struct mtk_wdma_desc *)(head + i * ring->desc_size);
desc->buf0 = 0;
desc->buf1 = 0;
desc->info = 0;
}
-@@ -623,7 +990,8 @@ mtk_wed_reset_dma(struct mtk_wed_device
+@@ -616,7 +983,8 @@ mtk_wed_reset_dma(struct mtk_wed_device
if (!dev->tx_ring[i].desc)
continue;
}
if (mtk_wed_poll_busy(dev))
-@@ -641,6 +1009,9 @@ mtk_wed_reset_dma(struct mtk_wed_device
+@@ -634,6 +1002,9 @@ mtk_wed_reset_dma(struct mtk_wed_device
wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
if (busy) {
mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
-@@ -675,12 +1046,11 @@ mtk_wed_reset_dma(struct mtk_wed_device
+@@ -668,12 +1039,11 @@ mtk_wed_reset_dma(struct mtk_wed_device
MTK_WED_WPDMA_RESET_IDX_RX);
wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0);
}
{
ring->desc = dma_alloc_coherent(dev->hw->dev, size * desc_size,
&ring->desc_phys, GFP_KERNEL);
-@@ -689,7 +1059,7 @@ mtk_wed_ring_alloc(struct mtk_wed_device
+@@ -682,7 +1052,7 @@ mtk_wed_ring_alloc(struct mtk_wed_device
ring->desc_size = desc_size;
ring->size = size;
return 0;
}
-@@ -698,9 +1068,14 @@ static int
+@@ -691,9 +1061,14 @@ static int
mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size)
{
u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
return -ENOMEM;
wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
-@@ -717,6 +1092,60 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_we
+@@ -710,6 +1085,60 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_we
return 0;
}
static void
mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
{
-@@ -739,6 +1168,8 @@ mtk_wed_configure_irq(struct mtk_wed_dev
+@@ -732,6 +1161,8 @@ mtk_wed_configure_irq(struct mtk_wed_dev
wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
} else {
/* initail tx interrupt trigger */
wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
-@@ -757,6 +1188,16 @@ mtk_wed_configure_irq(struct mtk_wed_dev
+@@ -750,6 +1181,16 @@ mtk_wed_configure_irq(struct mtk_wed_dev
FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
dev->wlan.txfree_tbit));
wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
wed_set(dev, MTK_WED_WDMA_INT_CTRL,
FIELD_PREP(MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL,
-@@ -794,9 +1235,15 @@ mtk_wed_dma_enable(struct mtk_wed_device
+@@ -787,9 +1228,15 @@ mtk_wed_dma_enable(struct mtk_wed_device
wdma_set(dev, MTK_WDMA_GLO_CFG,
MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
} else {
wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
-@@ -804,6 +1251,15 @@ mtk_wed_dma_enable(struct mtk_wed_device
+@@ -797,6 +1244,15 @@ mtk_wed_dma_enable(struct mtk_wed_device
wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
}
}
-@@ -829,7 +1285,19 @@ mtk_wed_start(struct mtk_wed_device *dev
+@@ -822,7 +1278,19 @@ mtk_wed_start(struct mtk_wed_device *dev
val |= BIT(0) | (BIT(1) * !!dev->hw->index);
regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
} else {
}
mtk_wed_dma_enable(dev);
-@@ -863,7 +1331,7 @@ mtk_wed_attach(struct mtk_wed_device *de
+@@ -856,7 +1324,7 @@ mtk_wed_attach(struct mtk_wed_device *de
if (!hw) {
module_put(THIS_MODULE);
ret = -ENODEV;
}
device = dev->wlan.bus_type == MTK_WED_BUS_PCIE
-@@ -876,15 +1344,24 @@ mtk_wed_attach(struct mtk_wed_device *de
+@@ -869,15 +1337,24 @@ mtk_wed_attach(struct mtk_wed_device *de
dev->dev = hw->dev;
dev->irq = hw->irq;
dev->wdma_idx = hw->index;
}
mtk_wed_hw_init_early(dev);
-@@ -893,8 +1370,10 @@ mtk_wed_attach(struct mtk_wed_device *de
+@@ -886,8 +1363,10 @@ mtk_wed_attach(struct mtk_wed_device *de
BIT(hw->index), 0);
else
ret = mtk_wed_wo_init(hw);
mutex_unlock(&hw_lock);
return ret;
-@@ -917,10 +1396,11 @@ mtk_wed_tx_ring_setup(struct mtk_wed_dev
+@@ -910,10 +1389,11 @@ mtk_wed_tx_ring_setup(struct mtk_wed_dev
* WDMA RX.
*/
return -ENOMEM;
if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
-@@ -967,6 +1447,37 @@ mtk_wed_txfree_ring_setup(struct mtk_wed
+@@ -960,6 +1440,37 @@ mtk_wed_txfree_ring_setup(struct mtk_wed
return 0;
}
static u32
mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
{
-@@ -1063,7 +1574,9 @@ void mtk_wed_add_hw(struct device_node *
+@@ -1056,7 +1567,9 @@ void mtk_wed_add_hw(struct device_node *
static const struct mtk_wed_ops wed_ops = {
.attach = mtk_wed_attach,
.tx_ring_setup = mtk_wed_tx_ring_setup,
.start = mtk_wed_start,
.stop = mtk_wed_stop,
.reset_dma = mtk_wed_reset_dma,
-@@ -1072,6 +1585,7 @@ void mtk_wed_add_hw(struct device_node *
+@@ -1065,6 +1578,7 @@ void mtk_wed_add_hw(struct device_node *
.irq_get = mtk_wed_irq_get,
.irq_set_mask = mtk_wed_irq_set_mask,
.detach = mtk_wed_detach,
}
static void
-@@ -1297,9 +1297,10 @@ mtk_wed_start(struct mtk_wed_device *dev
+@@ -1290,9 +1290,10 @@ mtk_wed_start(struct mtk_wed_device *dev
if (mtk_wed_rro_cfg(dev))
return;
mtk_wed_dma_enable(dev);
dev->running = true;
}
-@@ -1365,11 +1366,13 @@ mtk_wed_attach(struct mtk_wed_device *de
+@@ -1358,11 +1359,13 @@ mtk_wed_attach(struct mtk_wed_device *de
}
mtk_wed_hw_init_early(dev);
}
if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
-@@ -1006,11 +1009,7 @@ mtk_wed_reset_dma(struct mtk_wed_device
+@@ -999,11 +1002,7 @@ mtk_wed_reset_dma(struct mtk_wed_device
wed_w32(dev, MTK_WED_RESET_IDX, 0);
}
mtk_wdma_rx_reset(dev);
mtk_wed_reset(dev, MTK_WED_RESET_WED);
-@@ -677,7 +691,7 @@ mtk_wed_hw_init_early(struct mtk_wed_dev
+@@ -670,7 +684,7 @@ mtk_wed_hw_init_early(struct mtk_wed_dev
{
u32 mask, set;
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
-@@ -951,42 +951,130 @@ mtk_wed_ring_reset(struct mtk_wed_ring *
+@@ -944,42 +944,130 @@ mtk_wed_ring_reset(struct mtk_wed_ring *
}
static u32
}
static void
-@@ -1004,19 +1092,23 @@ mtk_wed_reset_dma(struct mtk_wed_device
+@@ -997,19 +1085,23 @@ mtk_wed_reset_dma(struct mtk_wed_device
true);
}
if (busy) {
mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
-@@ -1033,6 +1125,9 @@ mtk_wed_reset_dma(struct mtk_wed_device
+@@ -1026,6 +1118,9 @@ mtk_wed_reset_dma(struct mtk_wed_device
MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
}
for (i = 0; i < 100; i++) {
val = wed_r32(dev, MTK_WED_TX_BM_INTF);
if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
-@@ -1040,8 +1135,19 @@ mtk_wed_reset_dma(struct mtk_wed_device
+@@ -1033,8 +1128,19 @@ mtk_wed_reset_dma(struct mtk_wed_device
}
mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT);
if (busy) {
mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
-@@ -1052,6 +1158,17 @@ mtk_wed_reset_dma(struct mtk_wed_device
+@@ -1045,6 +1151,17 @@ mtk_wed_reset_dma(struct mtk_wed_device
MTK_WED_WPDMA_RESET_IDX_RX);
wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0);
}
}
static int
-@@ -1274,6 +1391,9 @@ mtk_wed_start(struct mtk_wed_device *dev
+@@ -1267,6 +1384,9 @@ mtk_wed_start(struct mtk_wed_device *dev
{
int i;
for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
if (!dev->rx_wdma[i].desc)
mtk_wed_wdma_rx_ring_setup(dev, i, 16);
-@@ -1362,10 +1482,6 @@ mtk_wed_attach(struct mtk_wed_device *de
+@@ -1355,10 +1475,6 @@ mtk_wed_attach(struct mtk_wed_device *de
goto out;
if (mtk_wed_get_rx_capa(dev)) {
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
-@@ -1188,7 +1188,8 @@ mtk_wed_ring_alloc(struct mtk_wed_device
+@@ -1181,7 +1181,8 @@ mtk_wed_ring_alloc(struct mtk_wed_device
}
static int
{
u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
struct mtk_wed_ring *wdma;
-@@ -1197,8 +1198,8 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_we
+@@ -1190,8 +1191,8 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_we
return -EINVAL;
wdma = &dev->rx_wdma[idx];
return -ENOMEM;
wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
-@@ -1396,7 +1397,7 @@ mtk_wed_start(struct mtk_wed_device *dev
+@@ -1389,7 +1390,7 @@ mtk_wed_start(struct mtk_wed_device *dev
for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
if (!dev->rx_wdma[i].desc)
mtk_wed_hw_init(dev);
mtk_wed_configure_irq(dev, irq_mask);
-@@ -1505,7 +1506,8 @@ unlock:
+@@ -1498,7 +1499,8 @@ unlock:
}
static int
{
struct mtk_wed_ring *ring = &dev->tx_ring[idx];
-@@ -1524,11 +1526,12 @@ mtk_wed_tx_ring_setup(struct mtk_wed_dev
+@@ -1517,11 +1519,12 @@ mtk_wed_tx_ring_setup(struct mtk_wed_dev
if (WARN_ON(idx >= ARRAY_SIZE(dev->tx_ring)))
return -EINVAL;
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
-@@ -1259,7 +1259,8 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_we
+@@ -1252,7 +1252,8 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_we
}
static int
{
u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
struct mtk_wed_ring *wdma;
-@@ -1268,8 +1269,8 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_we
+@@ -1261,8 +1262,8 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_we
return -EINVAL;
wdma = &dev->tx_wdma[idx];
return -ENOMEM;
wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
-@@ -1279,6 +1280,9 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_we
+@@ -1272,6 +1273,9 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_we
wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0);
if (!idx) {
wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_BASE,
wdma->desc_phys);
-@@ -1618,18 +1622,20 @@ mtk_wed_txfree_ring_setup(struct mtk_wed
+@@ -1611,18 +1615,20 @@ mtk_wed_txfree_ring_setup(struct mtk_wed
}
static int
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
-@@ -786,7 +786,6 @@ mtk_wed_rro_ring_alloc(struct mtk_wed_de
+@@ -779,7 +779,6 @@ mtk_wed_rro_ring_alloc(struct mtk_wed_de
ring->desc_size = sizeof(*ring->desc);
ring->size = size;
mutex_unlock(&hw_lock);
}
-@@ -1545,8 +1550,10 @@ mtk_wed_attach(struct mtk_wed_device *de
+@@ -1538,8 +1543,10 @@ mtk_wed_attach(struct mtk_wed_device *de
ret = mtk_wed_wo_init(hw);
}
out:
pse_port = 8;
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
-@@ -1091,7 +1091,7 @@ mtk_wed_rx_reset(struct mtk_wed_device *
+@@ -1084,7 +1084,7 @@ mtk_wed_rx_reset(struct mtk_wed_device *
} else {
struct mtk_eth *eth = dev->hw->eth;
wed_set(dev, MTK_WED_RESET_IDX,
MTK_WED_RESET_IDX_RX_V2);
else
-@@ -1813,7 +1813,7 @@ void mtk_wed_add_hw(struct device_node *
+@@ -1806,7 +1806,7 @@ void mtk_wed_add_hw(struct device_node *
hw->wdma = wdma;
hw->index = index;
hw->irq = irq;
--- /dev/null
+From: Felix Fietkau <nbd@nbd.name>
+Date: Mon, 20 Mar 2023 11:44:30 +0100
+Subject: [PATCH] net: ethernet: mtk_eth_soc: add code for offloading flows
+ from wlan devices
+
+WED version 2 (on MT7986 and later) can offload flows originating from wireless
+devices. In order to make that work, ndo_setup_tc needs to be implemented on
+the netdevs. This adds the required code to offload flows coming in from WED,
+while keeping track of the incoming wed index used for selecting the correct
+PPE device.
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.h | 3 +
+ .../net/ethernet/mediatek/mtk_ppe_offload.c | 37 ++++---
+ drivers/net/ethernet/mediatek/mtk_wed.c | 101 ++++++++++++++++++
+ include/linux/soc/mediatek/mtk_wed.h | 6 ++
+ 4 files changed, 133 insertions(+), 14 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -1432,6 +1432,9 @@ int mtk_gmac_rgmii_path_setup(struct mtk
+ int mtk_eth_offload_init(struct mtk_eth *eth);
+ int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data);
++int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls,
++ int ppe_index);
++void mtk_flow_offload_cleanup(struct mtk_eth *eth, struct list_head *list);
+ void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
+
+
+--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+@@ -235,7 +235,8 @@ out:
+ }
+
+ static int
+-mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
++mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
++ int ppe_index)
+ {
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct flow_action_entry *act;
+@@ -452,6 +453,7 @@ mtk_flow_offload_replace(struct mtk_eth
+ entry->cookie = f->cookie;
+ memcpy(&entry->data, &foe, sizeof(entry->data));
+ entry->wed_index = wed_index;
++ entry->ppe_index = ppe_index;
+
+ err = mtk_foe_entry_commit(eth->ppe[entry->ppe_index], entry);
+ if (err < 0)
+@@ -520,25 +522,15 @@ mtk_flow_offload_stats(struct mtk_eth *e
+
+ static DEFINE_MUTEX(mtk_flow_offload_mutex);
+
+-static int
+-mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
++int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls,
++ int ppe_index)
+ {
+- struct flow_cls_offload *cls = type_data;
+- struct net_device *dev = cb_priv;
+- struct mtk_mac *mac = netdev_priv(dev);
+- struct mtk_eth *eth = mac->hw;
+ int err;
+
+- if (!tc_can_offload(dev))
+- return -EOPNOTSUPP;
+-
+- if (type != TC_SETUP_CLSFLOWER)
+- return -EOPNOTSUPP;
+-
+ mutex_lock(&mtk_flow_offload_mutex);
+ switch (cls->command) {
+ case FLOW_CLS_REPLACE:
+- err = mtk_flow_offload_replace(eth, cls);
++ err = mtk_flow_offload_replace(eth, cls, ppe_index);
+ break;
+ case FLOW_CLS_DESTROY:
+ err = mtk_flow_offload_destroy(eth, cls);
+@@ -556,6 +548,23 @@ mtk_eth_setup_tc_block_cb(enum tc_setup_
+ }
+
+ static int
++mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
++{
++ struct flow_cls_offload *cls = type_data;
++ struct net_device *dev = cb_priv;
++ struct mtk_mac *mac = netdev_priv(dev);
++ struct mtk_eth *eth = mac->hw;
++
++ if (!tc_can_offload(dev))
++ return -EOPNOTSUPP;
++
++ if (type != TC_SETUP_CLSFLOWER)
++ return -EOPNOTSUPP;
++
++ return mtk_flow_offload_cmd(eth, cls, 0);
++}
++
++static int
+ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
+ {
+ struct mtk_mac *mac = netdev_priv(dev);
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -13,6 +13,8 @@
+ #include <linux/mfd/syscon.h>
+ #include <linux/debugfs.h>
+ #include <linux/soc/mediatek/mtk_wed.h>
++#include <net/flow_offload.h>
++#include <net/pkt_cls.h>
+ #include "mtk_eth_soc.h"
+ #include "mtk_wed_regs.h"
+ #include "mtk_wed.h"
+@@ -41,6 +43,11 @@
+ static struct mtk_wed_hw *hw_list[2];
+ static DEFINE_MUTEX(hw_lock);
+
++struct mtk_wed_flow_block_priv {
++ struct mtk_wed_hw *hw;
++ struct net_device *dev;
++};
++
+ static void
+ wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
+ {
+@@ -1753,6 +1760,99 @@ out:
+ mutex_unlock(&hw_lock);
+ }
+
++static int
++mtk_wed_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
++{
++ struct mtk_wed_flow_block_priv *priv = cb_priv;
++ struct flow_cls_offload *cls = type_data;
++ struct mtk_wed_hw *hw = priv->hw;
++
++ if (!tc_can_offload(priv->dev))
++ return -EOPNOTSUPP;
++
++ if (type != TC_SETUP_CLSFLOWER)
++ return -EOPNOTSUPP;
++
++ return mtk_flow_offload_cmd(hw->eth, cls, hw->index);
++}
++
++static int
++mtk_wed_setup_tc_block(struct mtk_wed_hw *hw, struct net_device *dev,
++ struct flow_block_offload *f)
++{
++ struct mtk_wed_flow_block_priv *priv;
++ static LIST_HEAD(block_cb_list);
++ struct flow_block_cb *block_cb;
++ struct mtk_eth *eth = hw->eth;
++ flow_setup_cb_t *cb;
++
++ if (!eth->soc->offload_version)
++ return -EOPNOTSUPP;
++
++ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
++ return -EOPNOTSUPP;
++
++ cb = mtk_wed_setup_tc_block_cb;
++ f->driver_block_list = &block_cb_list;
++
++ switch (f->command) {
++ case FLOW_BLOCK_BIND:
++ block_cb = flow_block_cb_lookup(f->block, cb, dev);
++ if (block_cb) {
++ flow_block_cb_incref(block_cb);
++ return 0;
++ }
++
++ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
++ if (!priv)
++ return -ENOMEM;
++
++ priv->hw = hw;
++ priv->dev = dev;
++ block_cb = flow_block_cb_alloc(cb, dev, priv, NULL);
++ if (IS_ERR(block_cb)) {
++ kfree(priv);
++ return PTR_ERR(block_cb);
++ }
++
++ flow_block_cb_incref(block_cb);
++ flow_block_cb_add(block_cb, f);
++ list_add_tail(&block_cb->driver_list, &block_cb_list);
++ return 0;
++ case FLOW_BLOCK_UNBIND:
++ block_cb = flow_block_cb_lookup(f->block, cb, dev);
++ if (!block_cb)
++ return -ENOENT;
++
++ if (!flow_block_cb_decref(block_cb)) {
++ flow_block_cb_remove(block_cb, f);
++ list_del(&block_cb->driver_list);
++ kfree(block_cb->cb_priv);
++ }
++ return 0;
++ default:
++ return -EOPNOTSUPP;
++ }
++}
++
++static int
++mtk_wed_setup_tc(struct mtk_wed_device *wed, struct net_device *dev,
++ enum tc_setup_type type, void *type_data)
++{
++ struct mtk_wed_hw *hw = wed->hw;
++
++ if (hw->version < 2)
++ return -EOPNOTSUPP;
++
++ switch (type) {
++ case TC_SETUP_BLOCK:
++ case TC_SETUP_FT:
++ return mtk_wed_setup_tc_block(hw, dev, type_data);
++ default:
++ return -EOPNOTSUPP;
++ }
++}
++
+ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
+ void __iomem *wdma, phys_addr_t wdma_phy,
+ int index)
+@@ -1772,6 +1872,7 @@ void mtk_wed_add_hw(struct device_node *
+ .irq_set_mask = mtk_wed_irq_set_mask,
+ .detach = mtk_wed_detach,
+ .ppe_check = mtk_wed_ppe_check,
++ .setup_tc = mtk_wed_setup_tc,
+ };
+ struct device_node *eth_np = eth->dev->of_node;
+ struct platform_device *pdev;
+--- a/include/linux/soc/mediatek/mtk_wed.h
++++ b/include/linux/soc/mediatek/mtk_wed.h
+@@ -6,6 +6,7 @@
+ #include <linux/regmap.h>
+ #include <linux/pci.h>
+ #include <linux/skbuff.h>
++#include <linux/netdevice.h>
+
+ #define MTK_WED_TX_QUEUES 2
+ #define MTK_WED_RX_QUEUES 2
+@@ -180,6 +181,8 @@ struct mtk_wed_ops {
+
+ u32 (*irq_get)(struct mtk_wed_device *dev, u32 mask);
+ void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
++ int (*setup_tc)(struct mtk_wed_device *wed, struct net_device *dev,
++ enum tc_setup_type type, void *type_data);
+ };
+
+ extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
+@@ -238,6 +241,8 @@ mtk_wed_get_rx_capa(struct mtk_wed_devic
+ (_dev)->ops->msg_update(_dev, _id, _msg, _len)
+ #define mtk_wed_device_stop(_dev) (_dev)->ops->stop(_dev)
+ #define mtk_wed_device_dma_reset(_dev) (_dev)->ops->reset_dma(_dev)
++#define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) \
++ (_dev)->ops->setup_tc(_dev, _netdev, _type, _type_data)
+ #else
+ static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
+ {
+@@ -256,6 +261,7 @@ static inline bool mtk_wed_device_active
+ #define mtk_wed_device_update_msg(_dev, _id, _msg, _len) -ENODEV
+ #define mtk_wed_device_stop(_dev) do {} while (0)
+ #define mtk_wed_device_dma_reset(_dev) do {} while (0)
++#define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) -EOPNOTSUPP
+ #endif
+
+ #endif
--- /dev/null
+From: Felix Fietkau <nbd@nbd.name>
+Date: Mon, 20 Mar 2023 15:37:55 +0100
+Subject: [PATCH] net: ethernet: mediatek: mtk_ppe: prefer newly added l2
+ flows over existing ones
+
+When a device is roaming between interfaces and a new flow entry is created,
+we should assume that its output device is more up to date than whatever
+entry existed already.
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
+@@ -662,10 +662,20 @@ void mtk_foe_entry_clear(struct mtk_ppe
+ static int
+ mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+ {
++ struct mtk_flow_entry *prev;
++
+ entry->type = MTK_FLOW_TYPE_L2;
+
+- return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node,
+- mtk_flow_l2_ht_params);
++ prev = rhashtable_lookup_get_insert_fast(&ppe->l2_flows, &entry->l2_node,
++ mtk_flow_l2_ht_params);
++ if (likely(!prev))
++ return 0;
++
++ if (IS_ERR(prev))
++ return PTR_ERR(prev);
++
++ return rhashtable_replace_fast(&ppe->l2_flows, &prev->l2_node,
++ &entry->l2_node, mtk_flow_l2_ht_params);
+ }
+
+ int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
--- /dev/null
+From: Felix Fietkau <nbd@nbd.name>
+Date: Thu, 23 Mar 2023 10:24:11 +0100
+Subject: [PATCH] net: ethernet: mtk_eth_soc: improve keeping track of
+ offloaded flows
+
+Unify tracking of L2 and L3 flows. Use the generic list field in struct
+mtk_foe_entry for tracking L2 subflows. Preparation for improving
+flow accounting support.
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+ drivers/net/ethernet/mediatek/mtk_ppe.c | 162 ++++++++++++------------
+ drivers/net/ethernet/mediatek/mtk_ppe.h | 15 +--
+ 2 files changed, 86 insertions(+), 91 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
+@@ -482,42 +482,43 @@ int mtk_foe_entry_set_queue(struct mtk_e
+ return 0;
+ }
+
++static int
++mtk_flow_entry_match_len(struct mtk_eth *eth, struct mtk_foe_entry *entry)
++{
++ int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
++
++ if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
++ return offsetof(struct mtk_foe_entry, ipv6._rsv);
++ else
++ return offsetof(struct mtk_foe_entry, ipv4.ib2);
++}
++
+ static bool
+ mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
+- struct mtk_foe_entry *data)
++ struct mtk_foe_entry *data, int len)
+ {
+- int type, len;
+-
+ if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
+ return false;
+
+- type = mtk_get_ib1_pkt_type(eth, entry->data.ib1);
+- if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
+- len = offsetof(struct mtk_foe_entry, ipv6._rsv);
+- else
+- len = offsetof(struct mtk_foe_entry, ipv4.ib2);
+-
+ return !memcmp(&entry->data.data, &data->data, len - 4);
+ }
+
+ static void
+-__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
++__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
++ bool set_state)
+ {
+- struct hlist_head *head;
+ struct hlist_node *tmp;
+
+ if (entry->type == MTK_FLOW_TYPE_L2) {
+ rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
+ mtk_flow_l2_ht_params);
+
+- head = &entry->l2_flows;
+- hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
+- __mtk_foe_entry_clear(ppe, entry);
++ hlist_for_each_entry_safe(entry, tmp, &entry->l2_flows, l2_list)
++ __mtk_foe_entry_clear(ppe, entry, set_state);
+ return;
+ }
+
+- hlist_del_init(&entry->list);
+- if (entry->hash != 0xffff) {
++ if (entry->hash != 0xffff && set_state) {
+ struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash);
+
+ hwe->ib1 &= ~MTK_FOE_IB1_STATE;
+@@ -537,7 +538,8 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp
+ if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
+ return;
+
+- hlist_del_init(&entry->l2_data.list);
++ hlist_del_init(&entry->l2_list);
++ hlist_del_init(&entry->list);
+ kfree(entry);
+ }
+
+@@ -553,66 +555,55 @@ static int __mtk_foe_entry_idle_time(str
+ return now - timestamp;
+ }
+
++static bool
++mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
++{
++ struct mtk_foe_entry foe = {};
++ struct mtk_foe_entry *hwe;
++ u16 hash = entry->hash;
++ int len;
++
++ if (hash == 0xffff)
++ return false;
++
++ hwe = mtk_foe_get_entry(ppe, hash);
++ len = mtk_flow_entry_match_len(ppe->eth, &entry->data);
++ memcpy(&foe, hwe, len);
++
++ if (!mtk_flow_entry_match(ppe->eth, entry, &foe, len) ||
++ FIELD_GET(MTK_FOE_IB1_STATE, foe.ib1) != MTK_FOE_STATE_BIND)
++ return false;
++
++ entry->data.ib1 = foe.ib1;
++
++ return true;
++}
++
+ static void
+ mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+ {
+ u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
+ struct mtk_flow_entry *cur;
+- struct mtk_foe_entry *hwe;
+ struct hlist_node *tmp;
+ int idle;
+
+ idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
+- hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
++ hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_list) {
+ int cur_idle;
+- u32 ib1;
+-
+- hwe = mtk_foe_get_entry(ppe, cur->hash);
+- ib1 = READ_ONCE(hwe->ib1);
+
+- if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
+- cur->hash = 0xffff;
+- __mtk_foe_entry_clear(ppe, cur);
++ if (!mtk_flow_entry_update(ppe, cur)) {
++ __mtk_foe_entry_clear(ppe, entry, false);
+ continue;
+ }
+
+- cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
++ cur_idle = __mtk_foe_entry_idle_time(ppe, cur->data.ib1);
+ if (cur_idle >= idle)
+ continue;
+
+ idle = cur_idle;
+ entry->data.ib1 &= ~ib1_ts_mask;
+- entry->data.ib1 |= hwe->ib1 & ib1_ts_mask;
+- }
+-}
+-
+-static void
+-mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+-{
+- struct mtk_foe_entry foe = {};
+- struct mtk_foe_entry *hwe;
+-
+- spin_lock_bh(&ppe_lock);
+-
+- if (entry->type == MTK_FLOW_TYPE_L2) {
+- mtk_flow_entry_update_l2(ppe, entry);
+- goto out;
++ entry->data.ib1 |= cur->data.ib1 & ib1_ts_mask;
+ }
+-
+- if (entry->hash == 0xffff)
+- goto out;
+-
+- hwe = mtk_foe_get_entry(ppe, entry->hash);
+- memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size);
+- if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) {
+- entry->hash = 0xffff;
+- goto out;
+- }
+-
+- entry->data.ib1 = foe.ib1;
+-
+-out:
+- spin_unlock_bh(&ppe_lock);
+ }
+
+ static void
+@@ -655,7 +646,8 @@ __mtk_foe_entry_commit(struct mtk_ppe *p
+ void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+ {
+ spin_lock_bh(&ppe_lock);
+- __mtk_foe_entry_clear(ppe, entry);
++ __mtk_foe_entry_clear(ppe, entry, true);
++ hlist_del_init(&entry->list);
+ spin_unlock_bh(&ppe_lock);
+ }
+
+@@ -702,8 +694,8 @@ mtk_foe_entry_commit_subflow(struct mtk_
+ {
+ const struct mtk_soc_data *soc = ppe->eth->soc;
+ struct mtk_flow_entry *flow_info;
+- struct mtk_foe_entry foe = {}, *hwe;
+ struct mtk_foe_mac_info *l2;
++ struct mtk_foe_entry *hwe;
+ u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
+ int type;
+
+@@ -711,30 +703,30 @@ mtk_foe_entry_commit_subflow(struct mtk_
+ if (!flow_info)
+ return;
+
+- flow_info->l2_data.base_flow = entry;
+ flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
+ flow_info->hash = hash;
+ hlist_add_head(&flow_info->list,
+ &ppe->foe_flow[hash / soc->hash_offset]);
+- hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
++ hlist_add_head(&flow_info->l2_list, &entry->l2_flows);
+
+ hwe = mtk_foe_get_entry(ppe, hash);
+- memcpy(&foe, hwe, soc->foe_entry_size);
+- foe.ib1 &= ib1_mask;
+- foe.ib1 |= entry->data.ib1 & ~ib1_mask;
++ memcpy(&flow_info->data, hwe, soc->foe_entry_size);
++ flow_info->data.ib1 &= ib1_mask;
++ flow_info->data.ib1 |= entry->data.ib1 & ~ib1_mask;
+
+- l2 = mtk_foe_entry_l2(ppe->eth, &foe);
++ l2 = mtk_foe_entry_l2(ppe->eth, &flow_info->data);
+ memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
+
+- type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1);
++ type = mtk_get_ib1_pkt_type(ppe->eth, flow_info->data.ib1);
+ if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
+- memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
++ memcpy(&flow_info->data.ipv4.new, &flow_info->data.ipv4.orig,
++ sizeof(flow_info->data.ipv4.new));
+ else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
+ l2->etype = ETH_P_IPV6;
+
+- *mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2;
++ *mtk_foe_entry_ib2(ppe->eth, &flow_info->data) = entry->data.bridge.ib2;
+
+- __mtk_foe_entry_commit(ppe, &foe, hash);
++ __mtk_foe_entry_commit(ppe, &flow_info->data, hash);
+ }
+
+ void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
+@@ -744,9 +736,11 @@ void __mtk_ppe_check_skb(struct mtk_ppe
+ struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash);
+ struct mtk_flow_entry *entry;
+ struct mtk_foe_bridge key = {};
++ struct mtk_foe_entry foe = {};
+ struct hlist_node *n;
+ struct ethhdr *eh;
+ bool found = false;
++ int entry_len;
+ u8 *tag;
+
+ spin_lock_bh(&ppe_lock);
+@@ -754,20 +748,14 @@ void __mtk_ppe_check_skb(struct mtk_ppe
+ if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
+ goto out;
+
+- hlist_for_each_entry_safe(entry, n, head, list) {
+- if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
+- if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
+- MTK_FOE_STATE_BIND))
+- continue;
+-
+- entry->hash = 0xffff;
+- __mtk_foe_entry_clear(ppe, entry);
+- continue;
+- }
++ entry_len = mtk_flow_entry_match_len(ppe->eth, hwe);
++ memcpy(&foe, hwe, entry_len);
+
+- if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) {
++ hlist_for_each_entry_safe(entry, n, head, list) {
++ if (found ||
++ !mtk_flow_entry_match(ppe->eth, entry, &foe, entry_len)) {
+ if (entry->hash != 0xffff)
+- entry->hash = 0xffff;
++ __mtk_foe_entry_clear(ppe, entry, false);
+ continue;
+ }
+
+@@ -816,9 +804,17 @@ out:
+
+ int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+ {
+- mtk_flow_entry_update(ppe, entry);
++ int idle;
++
++ spin_lock_bh(&ppe_lock);
++ if (entry->type == MTK_FLOW_TYPE_L2)
++ mtk_flow_entry_update_l2(ppe, entry);
++ else
++ mtk_flow_entry_update(ppe, entry);
++ idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
++ spin_unlock_bh(&ppe_lock);
+
+- return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
++ return idle;
+ }
+
+ int mtk_ppe_prepare_reset(struct mtk_ppe *ppe)
+--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
+@@ -286,7 +286,12 @@ enum {
+
+ struct mtk_flow_entry {
+ union {
+- struct hlist_node list;
++ /* regular flows + L2 subflows */
++ struct {
++ struct hlist_node list;
++ struct hlist_node l2_list;
++ };
++ /* L2 flows */
+ struct {
+ struct rhash_head l2_node;
+ struct hlist_head l2_flows;
+@@ -296,13 +301,7 @@ struct mtk_flow_entry {
+ s8 wed_index;
+ u8 ppe_index;
+ u16 hash;
+- union {
+- struct mtk_foe_entry data;
+- struct {
+- struct mtk_flow_entry *base_flow;
+- struct hlist_node list;
+- } l2_data;
+- };
++ struct mtk_foe_entry data;
+ struct rhash_head node;
+ unsigned long cookie;
+ };
--- /dev/null
+From: Felix Fietkau <nbd@nbd.name>
+Date: Thu, 23 Mar 2023 11:05:22 +0100
+Subject: [PATCH] net: ethernet: mediatek: fix ppe flow accounting for L2
+ flows
+
+For L2 flows, the packet/byte counters should report the sum of the
+counters of their subflows, both current and expired.
+In order to make this work, change the way that accounting data is tracked.
+Reset counters when a flow enters bind. Once it expires (or enters unbind),
+store the last counter value in struct mtk_flow_entry.
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
+@@ -79,9 +79,9 @@ static int mtk_ppe_mib_wait_busy(struct
+ int ret;
+ u32 val;
+
+- ret = readl_poll_timeout(ppe->base + MTK_PPE_MIB_SER_CR, val,
+- !(val & MTK_PPE_MIB_SER_CR_ST),
+- 20, MTK_PPE_WAIT_TIMEOUT_US);
++ ret = readl_poll_timeout_atomic(ppe->base + MTK_PPE_MIB_SER_CR, val,
++ !(val & MTK_PPE_MIB_SER_CR_ST),
++ 20, MTK_PPE_WAIT_TIMEOUT_US);
+
+ if (ret)
+ dev_err(ppe->dev, "MIB table busy");
+@@ -89,17 +89,31 @@ static int mtk_ppe_mib_wait_busy(struct
+ return ret;
+ }
+
+-static int mtk_mib_entry_read(struct mtk_ppe *ppe, u16 index, u64 *bytes, u64 *packets)
++static inline struct mtk_foe_accounting *
++mtk_ppe_acct_data(struct mtk_ppe *ppe, u16 index)
++{
++ if (!ppe->acct_table)
++ return NULL;
++
++ return ppe->acct_table + index * sizeof(struct mtk_foe_accounting);
++}
++
++struct mtk_foe_accounting *mtk_ppe_mib_entry_read(struct mtk_ppe *ppe, u16 index)
+ {
+ u32 val, cnt_r0, cnt_r1, cnt_r2;
++ struct mtk_foe_accounting *acct;
+ int ret;
+
+ val = FIELD_PREP(MTK_PPE_MIB_SER_CR_ADDR, index) | MTK_PPE_MIB_SER_CR_ST;
+ ppe_w32(ppe, MTK_PPE_MIB_SER_CR, val);
+
++ acct = mtk_ppe_acct_data(ppe, index);
++ if (!acct)
++ return NULL;
++
+ ret = mtk_ppe_mib_wait_busy(ppe);
+ if (ret)
+- return ret;
++ return acct;
+
+ cnt_r0 = readl(ppe->base + MTK_PPE_MIB_SER_R0);
+ cnt_r1 = readl(ppe->base + MTK_PPE_MIB_SER_R1);
+@@ -108,19 +122,19 @@ static int mtk_mib_entry_read(struct mtk
+ if (mtk_is_netsys_v3_or_greater(ppe->eth)) {
+ /* 64 bit for each counter */
+ u32 cnt_r3 = readl(ppe->base + MTK_PPE_MIB_SER_R3);
+- *bytes = ((u64)cnt_r1 << 32) | cnt_r0;
+- *packets = ((u64)cnt_r3 << 32) | cnt_r2;
++ acct->bytes += ((u64)cnt_r1 << 32) | cnt_r0;
++ acct->packets += ((u64)cnt_r3 << 32) | cnt_r2;
+ } else {
+ /* 48 bit byte counter, 40 bit packet counter */
+ u32 byte_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R0_BYTE_CNT_LOW, cnt_r0);
+ u32 byte_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R1_BYTE_CNT_HIGH, cnt_r1);
+ u32 pkt_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R1_PKT_CNT_LOW, cnt_r1);
+ u32 pkt_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R2_PKT_CNT_HIGH, cnt_r2);
+- *bytes = ((u64)byte_cnt_high << 32) | byte_cnt_low;
+- *packets = (pkt_cnt_high << 16) | pkt_cnt_low;
++ acct->bytes += ((u64)byte_cnt_high << 32) | byte_cnt_low;
++ acct->packets += (pkt_cnt_high << 16) | pkt_cnt_low;
+ }
+
+- return 0;
++ return acct;
+ }
+
+ static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
+@@ -525,13 +539,6 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp
+ hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
+ dma_wmb();
+ mtk_ppe_cache_clear(ppe);
+- if (ppe->accounting) {
+- struct mtk_foe_accounting *acct;
+-
+- acct = ppe->acct_table + entry->hash * sizeof(*acct);
+- acct->packets = 0;
+- acct->bytes = 0;
+- }
+ }
+ entry->hash = 0xffff;
+
+@@ -556,11 +563,14 @@ static int __mtk_foe_entry_idle_time(str
+ }
+
+ static bool
+-mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
++mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
++ u64 *packets, u64 *bytes)
+ {
++ struct mtk_foe_accounting *acct;
+ struct mtk_foe_entry foe = {};
+ struct mtk_foe_entry *hwe;
+ u16 hash = entry->hash;
++ bool ret = false;
+ int len;
+
+ if (hash == 0xffff)
+@@ -571,18 +581,35 @@ mtk_flow_entry_update(struct mtk_ppe *pp
+ memcpy(&foe, hwe, len);
+
+ if (!mtk_flow_entry_match(ppe->eth, entry, &foe, len) ||
+- FIELD_GET(MTK_FOE_IB1_STATE, foe.ib1) != MTK_FOE_STATE_BIND)
+- return false;
++ FIELD_GET(MTK_FOE_IB1_STATE, foe.ib1) != MTK_FOE_STATE_BIND) {
++ acct = mtk_ppe_acct_data(ppe, hash);
++ if (acct) {
++ entry->prev_packets += acct->packets;
++ entry->prev_bytes += acct->bytes;
++ }
++
++ goto out;
++ }
+
+ entry->data.ib1 = foe.ib1;
++ acct = mtk_ppe_mib_entry_read(ppe, hash);
++ ret = true;
++
++out:
++ if (acct) {
++ *packets += acct->packets;
++ *bytes += acct->bytes;
++ }
+
+- return true;
++ return ret;
+ }
+
+ static void
+ mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+ {
+ u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
++ u64 *packets = &entry->packets;
++ u64 *bytes = &entry->bytes;
+ struct mtk_flow_entry *cur;
+ struct hlist_node *tmp;
+ int idle;
+@@ -591,7 +618,9 @@ mtk_flow_entry_update_l2(struct mtk_ppe
+ hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_list) {
+ int cur_idle;
+
+- if (!mtk_flow_entry_update(ppe, cur)) {
++ if (!mtk_flow_entry_update(ppe, cur, packets, bytes)) {
++ entry->prev_packets += cur->prev_packets;
++ entry->prev_bytes += cur->prev_bytes;
+ __mtk_foe_entry_clear(ppe, entry, false);
+ continue;
+ }
+@@ -606,10 +635,29 @@ mtk_flow_entry_update_l2(struct mtk_ppe
+ }
+ }
+
++void mtk_foe_entry_get_stats(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
++ int *idle)
++{
++ entry->packets = entry->prev_packets;
++ entry->bytes = entry->prev_bytes;
++
++ spin_lock_bh(&ppe_lock);
++
++ if (entry->type == MTK_FLOW_TYPE_L2)
++ mtk_flow_entry_update_l2(ppe, entry);
++ else
++ mtk_flow_entry_update(ppe, entry, &entry->packets, &entry->bytes);
++
++ *idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
++
++ spin_unlock_bh(&ppe_lock);
++}
++
+ static void
+ __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
+ u16 hash)
+ {
++ struct mtk_foe_accounting *acct;
+ struct mtk_eth *eth = ppe->eth;
+ u16 timestamp = mtk_eth_timestamp(eth);
+ struct mtk_foe_entry *hwe;
+@@ -640,6 +688,12 @@ __mtk_foe_entry_commit(struct mtk_ppe *p
+
+ dma_wmb();
+
++ acct = mtk_ppe_mib_entry_read(ppe, hash);
++ if (acct) {
++ acct->packets = 0;
++ acct->bytes = 0;
++ }
++
+ mtk_ppe_cache_clear(ppe);
+ }
+
+@@ -802,21 +856,6 @@ out:
+ spin_unlock_bh(&ppe_lock);
+ }
+
+-int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+-{
+- int idle;
+-
+- spin_lock_bh(&ppe_lock);
+- if (entry->type == MTK_FLOW_TYPE_L2)
+- mtk_flow_entry_update_l2(ppe, entry);
+- else
+- mtk_flow_entry_update(ppe, entry);
+- idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
+- spin_unlock_bh(&ppe_lock);
+-
+- return idle;
+-}
+-
+ int mtk_ppe_prepare_reset(struct mtk_ppe *ppe)
+ {
+ if (!ppe)
+@@ -844,32 +883,6 @@ int mtk_ppe_prepare_reset(struct mtk_ppe
+ return mtk_ppe_wait_busy(ppe);
+ }
+
+-struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index,
+- struct mtk_foe_accounting *diff)
+-{
+- struct mtk_foe_accounting *acct;
+- int size = sizeof(struct mtk_foe_accounting);
+- u64 bytes, packets;
+-
+- if (!ppe->accounting)
+- return NULL;
+-
+- if (mtk_mib_entry_read(ppe, index, &bytes, &packets))
+- return NULL;
+-
+- acct = ppe->acct_table + index * size;
+-
+- acct->bytes += bytes;
+- acct->packets += packets;
+-
+- if (diff) {
+- diff->bytes = bytes;
+- diff->packets = packets;
+- }
+-
+- return acct;
+-}
+-
+ struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int index)
+ {
+ bool accounting = eth->soc->has_accounting;
+--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
+@@ -304,6 +304,8 @@ struct mtk_flow_entry {
+ struct mtk_foe_entry data;
+ struct rhash_head node;
+ unsigned long cookie;
++ u64 prev_packets, prev_bytes;
++ u64 packets, bytes;
+ };
+
+ struct mtk_mib_entry {
+@@ -347,6 +349,7 @@ void mtk_ppe_deinit(struct mtk_eth *eth)
+ void mtk_ppe_start(struct mtk_ppe *ppe);
+ int mtk_ppe_stop(struct mtk_ppe *ppe);
+ int mtk_ppe_prepare_reset(struct mtk_ppe *ppe);
++struct mtk_foe_accounting *mtk_ppe_mib_entry_read(struct mtk_ppe *ppe, u16 index);
+
+ void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash);
+
+@@ -395,9 +398,8 @@ int mtk_foe_entry_set_queue(struct mtk_e
+ unsigned int queue);
+ int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
+ void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
+-int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
+ int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index);
+-struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index,
+- struct mtk_foe_accounting *diff);
++void mtk_foe_entry_get_stats(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
++ int *idle);
+
+ #endif
+--- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
+@@ -96,7 +96,7 @@ mtk_ppe_debugfs_foe_show(struct seq_file
+ if (bind && state != MTK_FOE_STATE_BIND)
+ continue;
+
+- acct = mtk_foe_entry_get_mib(ppe, i, NULL);
++ acct = mtk_ppe_mib_entry_read(ppe, i);
+
+ type = mtk_get_ib1_pkt_type(ppe->eth, entry->ib1);
+ seq_printf(m, "%05x %s %7s", i,
+--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+@@ -499,24 +499,21 @@ static int
+ mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
+ {
+ struct mtk_flow_entry *entry;
+- struct mtk_foe_accounting diff;
+- u32 idle;
++ u64 packets, bytes;
++ int idle;
+
+ entry = rhashtable_lookup(ð->flow_table, &f->cookie,
+ mtk_flow_ht_params);
+ if (!entry)
+ return -ENOENT;
+
+- idle = mtk_foe_entry_idle_time(eth->ppe[entry->ppe_index], entry);
++ packets = entry->packets;
++ bytes = entry->bytes;
++ mtk_foe_entry_get_stats(eth->ppe[entry->ppe_index], entry, &idle);
++ f->stats.pkts += entry->packets - packets;
++ f->stats.bytes += entry->bytes - bytes;
+ f->stats.lastused = jiffies - idle * HZ;
+
+- if (entry->hash != 0xFFFF &&
+- mtk_foe_entry_get_mib(eth->ppe[entry->ppe_index], entry->hash,
+- &diff)) {
+- f->stats.pkts += diff.packets;
+- f->stats.bytes += diff.bytes;
+- }
+-
+ return 0;
+ }
+
+++ /dev/null
-From 3b00a07c2443745d62babfe08dbb2ad8e649526e Mon Sep 17 00:00:00 2001
-From: Ansuel Smith <ansuelsmth@gmail.com>
-Date: Fri, 19 Nov 2021 03:03:49 +0100
-Subject: [PATCH] net: dsa: qca8k: fix internal delay applied to the wrong PAD
- config
-
-With SGMII phy the internal delay is always applied to the PAD0 config.
-This is caused by the falling edge configuration that hardcode the reg
-to PAD0 (as the falling edge bits are present only in PAD0 reg)
-Move the delay configuration before the reg overwrite to correctly apply
-the delay.
-
-Fixes: cef08115846e ("net: dsa: qca8k: set internal delay also for sgmii")
-Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
-Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
-Signed-off-by: David S. Miller <davem@davemloft.net>
----
- drivers/net/dsa/qca8k.c | 12 ++++++------
- 1 file changed, 6 insertions(+), 6 deletions(-)
-
---- a/drivers/net/dsa/qca8k.c
-+++ b/drivers/net/dsa/qca8k.c
-@@ -1433,6 +1433,12 @@ qca8k_phylink_mac_config(struct dsa_swit
-
- qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
-
-+ /* From original code is reported port instability as SGMII also
-+ * require delay set. Apply advised values here or take them from DT.
-+ */
-+ if (state->interface == PHY_INTERFACE_MODE_SGMII)
-+ qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
-+
- /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
- * falling edge is set writing in the PORT0 PAD reg
- */
-@@ -1455,12 +1461,6 @@ qca8k_phylink_mac_config(struct dsa_swit
- QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
- val);
-
-- /* From original code is reported port instability as SGMII also
-- * require delay set. Apply advised values here or take them from DT.
-- */
-- if (state->interface == PHY_INTERFACE_MODE_SGMII)
-- qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
--
- break;
- default:
- dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
--- /dev/null
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Sun, 27 Aug 2023 19:31:41 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: add some more info in wed_txinfo_show
+ handler
+
+Add some new info in Wireless Ethernet Dispatcher wed_txinfo_show
+debugfs handler useful during debugging.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Link: https://lore.kernel.org/r/3390292655d568180b73d2a25576f61aa63310e5.1693157377.git.lorenzo@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
+@@ -127,8 +127,17 @@ wed_txinfo_show(struct seq_file *s, void
+ DUMP_WDMA_RING(WDMA_RING_RX(0)),
+ DUMP_WDMA_RING(WDMA_RING_RX(1)),
+
+- DUMP_STR("TX FREE"),
++ DUMP_STR("WED TX FREE"),
+ DUMP_WED(WED_RX_MIB(0)),
++ DUMP_WED_RING(WED_RING_RX(0)),
++ DUMP_WED(WED_WPDMA_RX_COHERENT_MIB(0)),
++ DUMP_WED(WED_RX_MIB(1)),
++ DUMP_WED_RING(WED_RING_RX(1)),
++ DUMP_WED(WED_WPDMA_RX_COHERENT_MIB(1)),
++
++ DUMP_STR("WED WPDMA TX FREE"),
++ DUMP_WED_RING(WED_WPDMA_RING_RX(0)),
++ DUMP_WED_RING(WED_WPDMA_RING_RX(1)),
+ };
+ struct mtk_wed_hw *hw = s->private;
+ struct mtk_wed_device *dev = hw->wed_dev;
+--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+@@ -266,6 +266,8 @@ struct mtk_wdma_desc {
+
+ #define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4)
+ #define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4)
++#define MTK_WED_WPDMA_RX_MIB(_n) (0x5e0 + (_n) * 4)
++#define MTK_WED_WPDMA_RX_COHERENT_MIB(_n) (0x5f0 + (_n) * 4)
+
+ #define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10)
+ #define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10)
--- /dev/null
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Sun, 27 Aug 2023 19:33:47 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: minor change in wed_{tx,rx}info_show
+
+No functional changes, just cosmetic ones.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Link: https://lore.kernel.org/r/71e046c72a978745f0435af265dda610aa9bfbcf.1693157578.git.lorenzo@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
+@@ -84,7 +84,6 @@ dump_wed_regs(struct seq_file *s, struct
+ }
+ }
+
+-
+ static int
+ wed_txinfo_show(struct seq_file *s, void *data)
+ {
+@@ -142,10 +141,8 @@ wed_txinfo_show(struct seq_file *s, void
+ struct mtk_wed_hw *hw = s->private;
+ struct mtk_wed_device *dev = hw->wed_dev;
+
+- if (!dev)
+- return 0;
+-
+- dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
++ if (dev)
++ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
+
+ return 0;
+ }
+@@ -217,10 +214,8 @@ wed_rxinfo_show(struct seq_file *s, void
+ struct mtk_wed_hw *hw = s->private;
+ struct mtk_wed_device *dev = hw->wed_dev;
+
+- if (!dev)
+- return 0;
+-
+- dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
++ if (dev)
++ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
+
+ return 0;
+ }
--- /dev/null
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Tue, 12 Sep 2023 10:22:56 +0200
+Subject: [PATCH] net: ethernet: mtk_eth_soc: rely on mtk_pse_port definitions
+ in mtk_flow_set_output_device
+
+Similar to ethernet ports, rely on mtk_pse_port definitions for
+pse wdma ports as well.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://lore.kernel.org/r/b86bdb717e963e3246c1dec5f736c810703cf056.1694506814.git.lorenzo@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+@@ -196,10 +196,10 @@ mtk_flow_set_output_device(struct mtk_et
+ if (mtk_is_netsys_v2_or_greater(eth)) {
+ switch (info.wdma_idx) {
+ case 0:
+- pse_port = 8;
++ pse_port = PSE_WDMA0_PORT;
+ break;
+ case 1:
+- pse_port = 9;
++ pse_port = PSE_WDMA1_PORT;
+ break;
+ default:
+ return -EINVAL;
--- /dev/null
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Tue, 12 Sep 2023 10:28:00 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: check update_wo_rx_stats in
+ mtk_wed_update_rx_stats()
+
+Check if update_wo_rx_stats function pointer is properly set in
+mtk_wed_update_rx_stats routine before accessing it.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://lore.kernel.org/r/b0d233386e059bccb59f18f69afb79a7806e5ded.1694507226.git.lorenzo@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+@@ -68,6 +68,9 @@ mtk_wed_update_rx_stats(struct mtk_wed_d
+ struct mtk_wed_wo_rx_stats *stats;
+ int i;
+
++ if (!wed->wlan.update_wo_rx_stats)
++ return;
++
+ if (count * sizeof(*stats) > skb->len - sizeof(u32))
+ return;
+
--- /dev/null
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Wed, 13 Sep 2023 20:42:47 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: do not assume offload callbacks are
+ always set
+
+Check if wlan.offload_enable and wlan.offload_disable callbacks are set
+in mtk_wed_flow_add/mtk_wed_flow_remove since mt7996 will not rely
+on them.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -1712,19 +1712,20 @@ mtk_wed_irq_set_mask(struct mtk_wed_devi
+ int mtk_wed_flow_add(int index)
+ {
+ struct mtk_wed_hw *hw = hw_list[index];
+- int ret;
++ int ret = 0;
+
+- if (!hw || !hw->wed_dev)
+- return -ENODEV;
++ mutex_lock(&hw_lock);
+
+- if (hw->num_flows) {
+- hw->num_flows++;
+- return 0;
++ if (!hw || !hw->wed_dev) {
++ ret = -ENODEV;
++ goto out;
+ }
+
+- mutex_lock(&hw_lock);
+- if (!hw->wed_dev) {
+- ret = -ENODEV;
++ if (!hw->wed_dev->wlan.offload_enable)
++ goto out;
++
++ if (hw->num_flows) {
++ hw->num_flows++;
+ goto out;
+ }
+
+@@ -1743,14 +1744,15 @@ void mtk_wed_flow_remove(int index)
+ {
+ struct mtk_wed_hw *hw = hw_list[index];
+
+- if (!hw)
+- return;
++ mutex_lock(&hw_lock);
+
+- if (--hw->num_flows)
+- return;
++ if (!hw || !hw->wed_dev)
++ goto out;
+
+- mutex_lock(&hw_lock);
+- if (!hw->wed_dev)
++ if (!hw->wed_dev->wlan.offload_disable)
++ goto out;
++
++ if (--hw->num_flows)
+ goto out;
+
+ hw->wed_dev->wlan.offload_disable(hw->wed_dev);
--- /dev/null
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 18 Sep 2023 12:29:05 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: introduce versioning utility routines
+
+Similar to mtk_eth_soc, introduce the following wed versioning
+utility routines:
+- mtk_wed_is_v1
+- mtk_wed_is_v2
+
+This is a preliminary patch to introduce WED support for MT7988 SoC
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -277,7 +277,7 @@ mtk_wed_assign(struct mtk_wed_device *de
+ if (!hw->wed_dev)
+ goto out;
+
+- if (hw->version == 1)
++ if (mtk_wed_is_v1(hw))
+ return NULL;
+
+ /* MT7986 WED devices do not have any pcie slot restrictions */
+@@ -358,7 +358,7 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_d
+ desc->buf0 = cpu_to_le32(buf_phys);
+ desc->buf1 = cpu_to_le32(buf_phys + txd_size);
+
+- if (dev->hw->version == 1)
++ if (mtk_wed_is_v1(dev->hw))
+ ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
+ MTK_WED_BUF_SIZE - txd_size) |
+@@ -497,7 +497,7 @@ mtk_wed_set_ext_int(struct mtk_wed_devic
+ {
+ u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+
+- if (dev->hw->version == 1)
++ if (mtk_wed_is_v1(dev->hw))
+ mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
+ else
+ mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
+@@ -576,7 +576,7 @@ mtk_wed_dma_disable(struct mtk_wed_devic
+ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
+
+- if (dev->hw->version == 1) {
++ if (mtk_wed_is_v1(dev->hw)) {
+ regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
+ wdma_clr(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
+@@ -605,7 +605,7 @@ mtk_wed_stop(struct mtk_wed_device *dev)
+ wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
+
+- if (dev->hw->version == 1)
++ if (mtk_wed_is_v1(dev->hw))
+ return;
+
+ wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0);
+@@ -624,7 +624,7 @@ mtk_wed_deinit(struct mtk_wed_device *de
+ MTK_WED_CTRL_WED_TX_BM_EN |
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+
+- if (dev->hw->version == 1)
++ if (mtk_wed_is_v1(dev->hw))
+ return;
+
+ wed_clr(dev, MTK_WED_CTRL,
+@@ -730,7 +730,7 @@ mtk_wed_bus_init(struct mtk_wed_device *
+ static void
+ mtk_wed_set_wpdma(struct mtk_wed_device *dev)
+ {
+- if (dev->hw->version == 1) {
++ if (mtk_wed_is_v1(dev->hw)) {
+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
+ } else {
+ mtk_wed_bus_init(dev);
+@@ -761,7 +761,7 @@ mtk_wed_hw_init_early(struct mtk_wed_dev
+ MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
+ wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
+
+- if (dev->hw->version == 1) {
++ if (mtk_wed_is_v1(dev->hw)) {
+ u32 offset = dev->hw->index ? 0x04000400 : 0;
+
+ wdma_set(dev, MTK_WDMA_GLO_CFG,
+@@ -934,7 +934,7 @@ mtk_wed_hw_init(struct mtk_wed_device *d
+
+ wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
+
+- if (dev->hw->version == 1) {
++ if (mtk_wed_is_v1(dev->hw)) {
+ wed_w32(dev, MTK_WED_TX_BM_TKID,
+ FIELD_PREP(MTK_WED_TX_BM_TKID_START,
+ dev->wlan.token_start) |
+@@ -967,7 +967,7 @@ mtk_wed_hw_init(struct mtk_wed_device *d
+
+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
+
+- if (dev->hw->version == 1) {
++ if (mtk_wed_is_v1(dev->hw)) {
+ wed_set(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WED_TX_BM_EN |
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+@@ -1217,7 +1217,7 @@ mtk_wed_reset_dma(struct mtk_wed_device
+ }
+
+ dev->init_done = false;
+- if (dev->hw->version == 1)
++ if (mtk_wed_is_v1(dev->hw))
+ return;
+
+ if (!busy) {
+@@ -1343,7 +1343,7 @@ mtk_wed_configure_irq(struct mtk_wed_dev
+ MTK_WED_CTRL_WED_TX_BM_EN |
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+
+- if (dev->hw->version == 1) {
++ if (mtk_wed_is_v1(dev->hw)) {
+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER,
+ MTK_WED_PCIE_INT_TRIGGER_STATUS);
+
+@@ -1416,7 +1416,7 @@ mtk_wed_dma_enable(struct mtk_wed_device
+ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
+
+- if (dev->hw->version == 1) {
++ if (mtk_wed_is_v1(dev->hw)) {
+ wdma_set(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
+ } else {
+@@ -1465,7 +1465,7 @@ mtk_wed_start(struct mtk_wed_device *dev
+
+ mtk_wed_set_ext_int(dev, true);
+
+- if (dev->hw->version == 1) {
++ if (mtk_wed_is_v1(dev->hw)) {
+ u32 val = dev->wlan.wpdma_phys | MTK_PCIE_MIRROR_MAP_EN |
+ FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID,
+ dev->hw->index);
+@@ -1550,7 +1550,7 @@ mtk_wed_attach(struct mtk_wed_device *de
+ }
+
+ mtk_wed_hw_init_early(dev);
+- if (hw->version == 1) {
++ if (mtk_wed_is_v1(hw)) {
+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
+ BIT(hw->index), 0);
+ } else {
+@@ -1618,7 +1618,7 @@ static int
+ mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
+ {
+ struct mtk_wed_ring *ring = &dev->txfree_ring;
+- int i, index = dev->hw->version == 1;
++ int i, index = mtk_wed_is_v1(dev->hw);
+
+ /*
+ * For txfree event handling, the same DMA ring is shared between WED
+@@ -1676,7 +1676,7 @@ mtk_wed_irq_get(struct mtk_wed_device *d
+ {
+ u32 val, ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+
+- if (dev->hw->version == 1)
++ if (mtk_wed_is_v1(dev->hw))
+ ext_mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
+ else
+ ext_mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
+@@ -1843,7 +1843,7 @@ mtk_wed_setup_tc(struct mtk_wed_device *
+ {
+ struct mtk_wed_hw *hw = wed->hw;
+
+- if (hw->version < 2)
++ if (mtk_wed_is_v1(hw))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+@@ -1917,9 +1917,9 @@ void mtk_wed_add_hw(struct device_node *
+ hw->wdma = wdma;
+ hw->index = index;
+ hw->irq = irq;
+- hw->version = mtk_is_netsys_v1(eth) ? 1 : 2;
++ hw->version = eth->soc->version;
+
+- if (hw->version == 1) {
++ if (mtk_wed_is_v1(hw)) {
+ hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
+ "mediatek,pcie-mirror");
+ hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
+--- a/drivers/net/ethernet/mediatek/mtk_wed.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed.h
+@@ -40,6 +40,16 @@ struct mtk_wdma_info {
+ };
+
+ #ifdef CONFIG_NET_MEDIATEK_SOC_WED
++static inline bool mtk_wed_is_v1(struct mtk_wed_hw *hw)
++{
++ return hw->version == 1;
++}
++
++static inline bool mtk_wed_is_v2(struct mtk_wed_hw *hw)
++{
++ return hw->version == 2;
++}
++
+ static inline void
+ wed_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
+ {
+--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
+@@ -263,7 +263,7 @@ void mtk_wed_hw_add_debugfs(struct mtk_w
+ debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
+ debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
+ debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
+- if (hw->version != 1)
++ if (!mtk_wed_is_v1(hw))
+ debugfs_create_file_unsafe("rxinfo", 0400, dir, hw,
+ &wed_rxinfo_fops);
+ }
+--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+@@ -207,7 +207,7 @@ int mtk_wed_mcu_msg_update(struct mtk_we
+ {
+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
+
+- if (dev->hw->version == 1)
++ if (mtk_wed_is_v1(dev->hw))
+ return 0;
+
+ if (WARN_ON(!wo))
--- /dev/null
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 18 Sep 2023 12:29:06 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: do not configure rx offload if not
+ supported
+
+Check if rx offload is supported running mtk_wed_get_rx_capa routine
+before configuring it. This is a preliminary patch to introduce Wireless
+Ethernet Dispatcher (WED) support for MT7988 SoC.
+
+Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -605,7 +605,7 @@ mtk_wed_stop(struct mtk_wed_device *dev)
+ wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
+
+- if (mtk_wed_is_v1(dev->hw))
++ if (!mtk_wed_get_rx_capa(dev))
+ return;
+
+ wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0);
+@@ -732,16 +732,21 @@ mtk_wed_set_wpdma(struct mtk_wed_device
+ {
+ if (mtk_wed_is_v1(dev->hw)) {
+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
+- } else {
+- mtk_wed_bus_init(dev);
+-
+- wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int);
+- wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
+- wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
+- wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
+- wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
+- wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx);
++ return;
+ }
++
++ mtk_wed_bus_init(dev);
++
++ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int);
++ wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
++ wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
++ wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
++
++ if (!mtk_wed_get_rx_capa(dev))
++ return;
++
++ wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
++ wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx);
+ }
+
+ static void
+@@ -973,15 +978,17 @@ mtk_wed_hw_init(struct mtk_wed_device *d
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+ } else {
+ wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
+- /* rx hw init */
+- wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
+- MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
+- MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
+- wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
+-
+- mtk_wed_rx_buffer_hw_init(dev);
+- mtk_wed_rro_hw_init(dev);
+- mtk_wed_route_qm_hw_init(dev);
++ if (mtk_wed_get_rx_capa(dev)) {
++ /* rx hw init */
++ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
++ MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
++ MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
++ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
++
++ mtk_wed_rx_buffer_hw_init(dev);
++ mtk_wed_rro_hw_init(dev);
++ mtk_wed_route_qm_hw_init(dev);
++ }
+ }
+
+ wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
+@@ -1353,8 +1360,6 @@ mtk_wed_configure_irq(struct mtk_wed_dev
+
+ wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
+ } else {
+- wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE,
+- GENMASK(1, 0));
+ /* initail tx interrupt trigger */
+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
+ MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
+@@ -1373,15 +1378,20 @@ mtk_wed_configure_irq(struct mtk_wed_dev
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
+ dev->wlan.txfree_tbit));
+
+- wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
+- MTK_WED_WPDMA_INT_CTRL_RX0_EN |
+- MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
+- MTK_WED_WPDMA_INT_CTRL_RX1_EN |
+- MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
+- FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
+- dev->wlan.rx_tbit[0]) |
+- FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
+- dev->wlan.rx_tbit[1]));
++ if (mtk_wed_get_rx_capa(dev)) {
++ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
++ MTK_WED_WPDMA_INT_CTRL_RX0_EN |
++ MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
++ MTK_WED_WPDMA_INT_CTRL_RX1_EN |
++ MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
++ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
++ dev->wlan.rx_tbit[0]) |
++ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
++ dev->wlan.rx_tbit[1]));
++
++ wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE,
++ GENMASK(1, 0));
++ }
+
+ wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
+ wed_set(dev, MTK_WED_WDMA_INT_CTRL,
+@@ -1400,6 +1410,8 @@ mtk_wed_configure_irq(struct mtk_wed_dev
+ static void
+ mtk_wed_dma_enable(struct mtk_wed_device *dev)
+ {
++ int i;
++
+ wed_set(dev, MTK_WED_WPDMA_INT_CTRL, MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
+
+ wed_set(dev, MTK_WED_GLO_CFG,
+@@ -1419,33 +1431,33 @@ mtk_wed_dma_enable(struct mtk_wed_device
+ if (mtk_wed_is_v1(dev->hw)) {
+ wdma_set(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
+- } else {
+- int i;
++ return;
++ }
+
+- wed_set(dev, MTK_WED_WPDMA_CTRL,
+- MTK_WED_WPDMA_CTRL_SDL1_FIXED);
++ wed_set(dev, MTK_WED_WPDMA_CTRL,
++ MTK_WED_WPDMA_CTRL_SDL1_FIXED);
++ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
++ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
++ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
++ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
++ MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
++ MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
+
+- wed_set(dev, MTK_WED_WDMA_GLO_CFG,
+- MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
+- MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
++ if (!mtk_wed_get_rx_capa(dev))
++ return;
+
+- wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
+- MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
+- MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
+-
+- wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+- MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
+- MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
++ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
++ MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
++ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
+
+- wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
+- MTK_WED_WPDMA_RX_D_RX_DRV_EN |
+- FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
+- FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
+- 0x2));
++ wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
++ MTK_WED_WPDMA_RX_D_RX_DRV_EN |
++ FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
++ FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
++ 0x2));
+
+- for (i = 0; i < MTK_WED_RX_QUEUES; i++)
+- mtk_wed_check_wfdma_rx_fill(dev, i);
+- }
++ for (i = 0; i < MTK_WED_RX_QUEUES; i++)
++ mtk_wed_check_wfdma_rx_fill(dev, i);
+ }
+
+ static void
+@@ -1472,7 +1484,7 @@ mtk_wed_start(struct mtk_wed_device *dev
+
+ val |= BIT(0) | (BIT(1) * !!dev->hw->index);
+ regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
+- } else {
++ } else if (mtk_wed_get_rx_capa(dev)) {
+ /* driver set mid ready and only once */
+ wed_w32(dev, MTK_WED_EXT_INT_MASK1,
+ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
+@@ -1484,7 +1496,6 @@ mtk_wed_start(struct mtk_wed_device *dev
+
+ if (mtk_wed_rro_cfg(dev))
+ return;
+-
+ }
+
+ mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
+@@ -1550,13 +1561,14 @@ mtk_wed_attach(struct mtk_wed_device *de
+ }
+
+ mtk_wed_hw_init_early(dev);
+- if (mtk_wed_is_v1(hw)) {
++ if (mtk_wed_is_v1(hw))
+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
+ BIT(hw->index), 0);
+- } else {
++ else
+ dev->rev_id = wed_r32(dev, MTK_WED_REV_ID);
++
++ if (mtk_wed_get_rx_capa(dev))
+ ret = mtk_wed_wo_init(hw);
+- }
+ out:
+ if (ret) {
+ dev_err(dev->hw->dev, "failed to attach wed device\n");
+--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+@@ -207,7 +207,7 @@ int mtk_wed_mcu_msg_update(struct mtk_we
+ {
+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
+
+- if (mtk_wed_is_v1(dev->hw))
++ if (!mtk_wed_get_rx_capa(dev))
+ return 0;
+
+ if (WARN_ON(!wo))
--- /dev/null
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 18 Sep 2023 12:29:07 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: rename mtk_rxbm_desc in
+ mtk_wed_bm_desc
+
+Rename mtk_rxbm_desc structure in mtk_wed_bm_desc since it will be used
+even on tx side by MT7988 SoC.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -421,7 +421,7 @@ free_pagelist:
+ static int
+ mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev)
+ {
+- struct mtk_rxbm_desc *desc;
++ struct mtk_wed_bm_desc *desc;
+ dma_addr_t desc_phys;
+
+ dev->rx_buf_ring.size = dev->wlan.rx_nbuf;
+@@ -441,7 +441,7 @@ mtk_wed_rx_buffer_alloc(struct mtk_wed_d
+ static void
+ mtk_wed_free_rx_buffer(struct mtk_wed_device *dev)
+ {
+- struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc;
++ struct mtk_wed_bm_desc *desc = dev->rx_buf_ring.desc;
+
+ if (!desc)
+ return;
+--- a/include/linux/soc/mediatek/mtk_wed.h
++++ b/include/linux/soc/mediatek/mtk_wed.h
+@@ -45,7 +45,7 @@ enum mtk_wed_wo_cmd {
+ MTK_WED_WO_CMD_WED_END
+ };
+
+-struct mtk_rxbm_desc {
++struct mtk_wed_bm_desc {
+ __le32 buf0;
+ __le32 token;
+ } __packed __aligned(4);
+@@ -105,7 +105,7 @@ struct mtk_wed_device {
+ struct {
+ int size;
+ struct page_frag_cache rx_page;
+- struct mtk_rxbm_desc *desc;
++ struct mtk_wed_bm_desc *desc;
+ dma_addr_t desc_phys;
+ } rx_buf_ring;
+
--- /dev/null
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 18 Sep 2023 12:29:08 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: introduce mtk_wed_buf structure
+
+Introduce mtk_wed_buf structure to store both virtual and physical
+addresses allocated in mtk_wed_tx_buffer_alloc() routine. This is a
+preliminary patch to add WED support for MT7988 SoC since it relies on a
+different dma descriptor layout not storing page dma addresses.
+
+Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -299,9 +299,9 @@ out:
+ static int
+ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
+ {
++ struct mtk_wed_buf *page_list;
+ struct mtk_wdma_desc *desc;
+ dma_addr_t desc_phys;
+- void **page_list;
+ int token = dev->wlan.token_start;
+ int ring_size;
+ int n_pages;
+@@ -342,7 +342,8 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_d
+ return -ENOMEM;
+ }
+
+- page_list[page_idx++] = page;
++ page_list[page_idx].p = page;
++ page_list[page_idx++].phy_addr = page_phys;
+ dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+@@ -386,8 +387,8 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_d
+ static void
+ mtk_wed_free_tx_buffer(struct mtk_wed_device *dev)
+ {
++ struct mtk_wed_buf *page_list = dev->tx_buf_ring.pages;
+ struct mtk_wdma_desc *desc = dev->tx_buf_ring.desc;
+- void **page_list = dev->tx_buf_ring.pages;
+ int page_idx;
+ int i;
+
+@@ -399,13 +400,12 @@ mtk_wed_free_tx_buffer(struct mtk_wed_de
+
+ for (i = 0, page_idx = 0; i < dev->tx_buf_ring.size;
+ i += MTK_WED_BUF_PER_PAGE) {
+- void *page = page_list[page_idx++];
+- dma_addr_t buf_addr;
++ dma_addr_t buf_addr = page_list[page_idx].phy_addr;
++ void *page = page_list[page_idx++].p;
+
+ if (!page)
+ break;
+
+- buf_addr = le32_to_cpu(desc[i].buf0);
+ dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ __free_page(page);
+--- a/include/linux/soc/mediatek/mtk_wed.h
++++ b/include/linux/soc/mediatek/mtk_wed.h
+@@ -76,6 +76,11 @@ struct mtk_wed_wo_rx_stats {
+ __le32 rx_drop_cnt;
+ };
+
++struct mtk_wed_buf {
++ void *p;
++ dma_addr_t phy_addr;
++};
++
+ struct mtk_wed_device {
+ #ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ const struct mtk_wed_ops *ops;
+@@ -97,7 +102,7 @@ struct mtk_wed_device {
+
+ struct {
+ int size;
+- void **pages;
++ struct mtk_wed_buf *pages;
+ struct mtk_wdma_desc *desc;
+ dma_addr_t desc_phys;
+ } tx_buf_ring;
--- /dev/null
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 18 Sep 2023 12:29:09 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: move mem_region array out of
+ mtk_wed_mcu_load_firmware
+
+Remove mtk_wed_wo_memory_region boot structure in mtk_wed_wo.
+This is a preliminary patch to introduce WED support for MT7988 SoC.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+@@ -16,14 +16,30 @@
+ #include "mtk_wed_wo.h"
+ #include "mtk_wed.h"
+
++static struct mtk_wed_wo_memory_region mem_region[] = {
++ [MTK_WED_WO_REGION_EMI] = {
++ .name = "wo-emi",
++ },
++ [MTK_WED_WO_REGION_ILM] = {
++ .name = "wo-ilm",
++ },
++ [MTK_WED_WO_REGION_DATA] = {
++ .name = "wo-data",
++ .shared = true,
++ },
++ [MTK_WED_WO_REGION_BOOT] = {
++ .name = "wo-boot",
++ },
++};
++
+ static u32 wo_r32(struct mtk_wed_wo *wo, u32 reg)
+ {
+- return readl(wo->boot.addr + reg);
++ return readl(mem_region[MTK_WED_WO_REGION_BOOT].addr + reg);
+ }
+
+ static void wo_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
+ {
+- writel(val, wo->boot.addr + reg);
++ writel(val, mem_region[MTK_WED_WO_REGION_BOOT].addr + reg);
+ }
+
+ static struct sk_buff *
+@@ -294,18 +310,6 @@ next:
+ static int
+ mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo)
+ {
+- static struct mtk_wed_wo_memory_region mem_region[] = {
+- [MTK_WED_WO_REGION_EMI] = {
+- .name = "wo-emi",
+- },
+- [MTK_WED_WO_REGION_ILM] = {
+- .name = "wo-ilm",
+- },
+- [MTK_WED_WO_REGION_DATA] = {
+- .name = "wo-data",
+- .shared = true,
+- },
+- };
+ const struct mtk_wed_fw_trailer *trailer;
+ const struct firmware *fw;
+ const char *fw_name;
+@@ -319,11 +323,6 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
+ return ret;
+ }
+
+- wo->boot.name = "wo-boot";
+- ret = mtk_wed_get_memory_region(wo, &wo->boot);
+- if (ret)
+- return ret;
+-
+ /* set dummy cr */
+ wed_w32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_FWDL,
+ wo->hw->index + 1);
+--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
+@@ -228,7 +228,6 @@ struct mtk_wed_wo_queue {
+
+ struct mtk_wed_wo {
+ struct mtk_wed_hw *hw;
+- struct mtk_wed_wo_memory_region boot;
+
+ struct mtk_wed_wo_queue q_tx;
+ struct mtk_wed_wo_queue q_rx;
--- /dev/null
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 18 Sep 2023 12:29:10 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: make memory region optional
+
+Make mtk_wed_wo_memory_region optionals.
+This is a preliminary patch to introduce Wireless Ethernet Dispatcher
+support for MT7988 SoC since MT7988 WED fw image will have a different
+layout.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+@@ -234,19 +234,13 @@ int mtk_wed_mcu_msg_update(struct mtk_we
+ }
+
+ static int
+-mtk_wed_get_memory_region(struct mtk_wed_wo *wo,
++mtk_wed_get_memory_region(struct mtk_wed_hw *hw, int index,
+ struct mtk_wed_wo_memory_region *region)
+ {
+ struct reserved_mem *rmem;
+ struct device_node *np;
+- int index;
+
+- index = of_property_match_string(wo->hw->node, "memory-region-names",
+- region->name);
+- if (index < 0)
+- return index;
+-
+- np = of_parse_phandle(wo->hw->node, "memory-region", index);
++ np = of_parse_phandle(hw->node, "memory-region", index);
+ if (!np)
+ return -ENODEV;
+
+@@ -258,7 +252,7 @@ mtk_wed_get_memory_region(struct mtk_wed
+
+ region->phy_addr = rmem->base;
+ region->size = rmem->size;
+- region->addr = devm_ioremap(wo->hw->dev, region->phy_addr, region->size);
++ region->addr = devm_ioremap(hw->dev, region->phy_addr, region->size);
+
+ return !region->addr ? -EINVAL : 0;
+ }
+@@ -271,6 +265,9 @@ mtk_wed_mcu_run_firmware(struct mtk_wed_
+ const struct mtk_wed_fw_trailer *trailer;
+ const struct mtk_wed_fw_region *fw_region;
+
++ if (!region->phy_addr || !region->size)
++ return 0;
++
+ trailer_ptr = fw->data + fw->size - sizeof(*trailer);
+ trailer = (const struct mtk_wed_fw_trailer *)trailer_ptr;
+ region_ptr = trailer_ptr - trailer->num_region * sizeof(*fw_region);
+@@ -318,7 +315,13 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
+
+ /* load firmware region metadata */
+ for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
+- ret = mtk_wed_get_memory_region(wo, &mem_region[i]);
++ int index = of_property_match_string(wo->hw->node,
++ "memory-region-names",
++ mem_region[i].name);
++ if (index < 0)
++ continue;
++
++ ret = mtk_wed_get_memory_region(wo->hw, index, &mem_region[i]);
+ if (ret)
+ return ret;
+ }
--- /dev/null
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 18 Sep 2023 12:29:11 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: fix EXT_INT_STATUS_RX_FBUF
+ definitions for MT7986 SoC
+
+Fix MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH and
+MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH definitions for MT7986 (MT7986 is
+the only SoC to use them).
+
+Fixes: de84a090d99a ("net: ethernet: mtk_eth_wed: add wed support for mt7986 chipset")
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+@@ -64,8 +64,8 @@ struct mtk_wdma_desc {
+ #define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID BIT(4)
+ #define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH BIT(8)
+ #define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH BIT(9)
+-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12)
+-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13)
++#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(10) /* wed v2 */
++#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(11) /* wed v2 */
+ #define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16)
+ #define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17)
+ #define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18)
--- /dev/null
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 18 Sep 2023 12:29:12 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: add mtk_wed_soc_data structure
+
+Introduce mtk_wed_soc_data utility structure to contain per-SoC
+definitions.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -48,6 +48,26 @@ struct mtk_wed_flow_block_priv {
+ struct net_device *dev;
+ };
+
++static const struct mtk_wed_soc_data mt7622_data = {
++ .regmap = {
++ .tx_bm_tkid = 0x088,
++ .wpdma_rx_ring0 = 0x770,
++ .reset_idx_tx_mask = GENMASK(3, 0),
++ .reset_idx_rx_mask = GENMASK(17, 16),
++ },
++ .wdma_desc_size = sizeof(struct mtk_wdma_desc),
++};
++
++static const struct mtk_wed_soc_data mt7986_data = {
++ .regmap = {
++ .tx_bm_tkid = 0x0c8,
++ .wpdma_rx_ring0 = 0x770,
++ .reset_idx_tx_mask = GENMASK(1, 0),
++ .reset_idx_rx_mask = GENMASK(7, 6),
++ },
++ .wdma_desc_size = 2 * sizeof(struct mtk_wdma_desc),
++};
++
+ static void
+ wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
+ {
+@@ -746,7 +766,7 @@ mtk_wed_set_wpdma(struct mtk_wed_device
+ return;
+
+ wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
+- wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx);
++ wed_w32(dev, dev->hw->soc->regmap.wpdma_rx_ring0, dev->wlan.wpdma_rx);
+ }
+
+ static void
+@@ -940,22 +960,10 @@ mtk_wed_hw_init(struct mtk_wed_device *d
+ wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
+
+ if (mtk_wed_is_v1(dev->hw)) {
+- wed_w32(dev, MTK_WED_TX_BM_TKID,
+- FIELD_PREP(MTK_WED_TX_BM_TKID_START,
+- dev->wlan.token_start) |
+- FIELD_PREP(MTK_WED_TX_BM_TKID_END,
+- dev->wlan.token_start +
+- dev->wlan.nbuf - 1));
+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
+ MTK_WED_TX_BM_DYN_THR_HI);
+ } else {
+- wed_w32(dev, MTK_WED_TX_BM_TKID_V2,
+- FIELD_PREP(MTK_WED_TX_BM_TKID_START,
+- dev->wlan.token_start) |
+- FIELD_PREP(MTK_WED_TX_BM_TKID_END,
+- dev->wlan.token_start +
+- dev->wlan.nbuf - 1));
+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) |
+ MTK_WED_TX_BM_DYN_THR_HI_V2);
+@@ -970,6 +978,11 @@ mtk_wed_hw_init(struct mtk_wed_device *d
+ MTK_WED_TX_TKID_DYN_THR_HI);
+ }
+
++ wed_w32(dev, dev->hw->soc->regmap.tx_bm_tkid,
++ FIELD_PREP(MTK_WED_TX_BM_TKID_START, dev->wlan.token_start) |
++ FIELD_PREP(MTK_WED_TX_BM_TKID_END,
++ dev->wlan.token_start + dev->wlan.nbuf - 1));
++
+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
+
+ if (mtk_wed_is_v1(dev->hw)) {
+@@ -1104,13 +1117,8 @@ mtk_wed_rx_reset(struct mtk_wed_device *
+ if (ret) {
+ mtk_wed_reset(dev, MTK_WED_RESET_WED_RX_DMA);
+ } else {
+- struct mtk_eth *eth = dev->hw->eth;
+-
+- if (mtk_is_netsys_v2_or_greater(eth))
+- wed_set(dev, MTK_WED_RESET_IDX,
+- MTK_WED_RESET_IDX_RX_V2);
+- else
+- wed_set(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_RX);
++ wed_set(dev, MTK_WED_RESET_IDX,
++ dev->hw->soc->regmap.reset_idx_rx_mask);
+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
+ }
+
+@@ -1163,7 +1171,8 @@ mtk_wed_reset_dma(struct mtk_wed_device
+ if (busy) {
+ mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA);
+ } else {
+- wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_TX);
++ wed_w32(dev, MTK_WED_RESET_IDX,
++ dev->hw->soc->regmap.reset_idx_tx_mask);
+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
+ }
+
+@@ -1255,7 +1264,6 @@ static int
+ mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size,
+ bool reset)
+ {
+- u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
+ struct mtk_wed_ring *wdma;
+
+ if (idx >= ARRAY_SIZE(dev->rx_wdma))
+@@ -1263,7 +1271,7 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_we
+
+ wdma = &dev->rx_wdma[idx];
+ if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
+- desc_size, true))
++ dev->hw->soc->wdma_desc_size, true))
+ return -ENOMEM;
+
+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
+@@ -1284,7 +1292,6 @@ static int
+ mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size,
+ bool reset)
+ {
+- u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
+ struct mtk_wed_ring *wdma;
+
+ if (idx >= ARRAY_SIZE(dev->tx_wdma))
+@@ -1292,7 +1299,7 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_we
+
+ wdma = &dev->tx_wdma[idx];
+ if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
+- desc_size, true))
++ dev->hw->soc->wdma_desc_size, true))
+ return -ENOMEM;
+
+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
+@@ -1931,7 +1938,12 @@ void mtk_wed_add_hw(struct device_node *
+ hw->irq = irq;
+ hw->version = eth->soc->version;
+
+- if (mtk_wed_is_v1(hw)) {
++ switch (hw->version) {
++ case 2:
++ hw->soc = &mt7986_data;
++ break;
++ default:
++ case 1:
+ hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
+ "mediatek,pcie-mirror");
+ hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
+@@ -1945,6 +1957,8 @@ void mtk_wed_add_hw(struct device_node *
+ regmap_write(hw->mirror, 0, 0);
+ regmap_write(hw->mirror, 4, 0);
+ }
++ hw->soc = &mt7622_data;
++ break;
+ }
+
+ mtk_wed_hw_add_debugfs(hw);
+--- a/drivers/net/ethernet/mediatek/mtk_wed.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed.h
+@@ -12,7 +12,18 @@
+ struct mtk_eth;
+ struct mtk_wed_wo;
+
++struct mtk_wed_soc_data {
++ struct {
++ u32 tx_bm_tkid;
++ u32 wpdma_rx_ring0;
++ u32 reset_idx_tx_mask;
++ u32 reset_idx_rx_mask;
++ } regmap;
++ u32 wdma_desc_size;
++};
++
+ struct mtk_wed_hw {
++ const struct mtk_wed_soc_data *soc;
+ struct device_node *node;
+ struct mtk_eth *eth;
+ struct regmap *regs;
+--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+@@ -100,8 +100,6 @@ struct mtk_wdma_desc {
+
+ #define MTK_WED_TX_BM_BASE 0x084
+
+-#define MTK_WED_TX_BM_TKID 0x088
+-#define MTK_WED_TX_BM_TKID_V2 0x0c8
+ #define MTK_WED_TX_BM_TKID_START GENMASK(15, 0)
+ #define MTK_WED_TX_BM_TKID_END GENMASK(31, 16)
+
+@@ -160,9 +158,6 @@ struct mtk_wdma_desc {
+ #define MTK_WED_GLO_CFG_RX_2B_OFFSET BIT(31)
+
+ #define MTK_WED_RESET_IDX 0x20c
+-#define MTK_WED_RESET_IDX_TX GENMASK(3, 0)
+-#define MTK_WED_RESET_IDX_RX GENMASK(17, 16)
+-#define MTK_WED_RESET_IDX_RX_V2 GENMASK(7, 6)
+ #define MTK_WED_RESET_WPDMA_IDX_RX GENMASK(31, 30)
+
+ #define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4)
+@@ -286,7 +281,6 @@ struct mtk_wdma_desc {
+ #define MTK_WED_WPDMA_RX_D_RST_DRV_IDX GENMASK(25, 24)
+
+ #define MTK_WED_WPDMA_RX_GLO_CFG 0x76c
+-#define MTK_WED_WPDMA_RX_RING 0x770
+
+ #define MTK_WED_WPDMA_RX_D_MIB(_n) (0x774 + (_n) * 4)
+ #define MTK_WED_WPDMA_RX_D_PROCESSED_MIB(_n) (0x784 + (_n) * 4)
--- /dev/null
+From: Sujuan Chen <sujuan.chen@mediatek.com>
+Date: Mon, 18 Sep 2023 12:29:13 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: introduce WED support for MT7988
+
+Similar to MT7986 and MT7622, enable Wireless Ethernet Ditpatcher for
+MT7988 in order to offload traffic forwarded from LAN/WLAN to WLAN/LAN
+
+Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -195,6 +195,7 @@ static const struct mtk_reg_map mt7988_r
+ .wdma_base = {
+ [0] = 0x4800,
+ [1] = 0x4c00,
++ [2] = 0x5000,
+ },
+ .pse_iq_sta = 0x0180,
+ .pse_oq_sta = 0x01a0,
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -1129,7 +1129,7 @@ struct mtk_reg_map {
+ u32 gdm1_cnt;
+ u32 gdma_to_ppe0;
+ u32 ppe_base;
+- u32 wdma_base[2];
++ u32 wdma_base[3];
+ u32 pse_iq_sta;
+ u32 pse_oq_sta;
+ };
+--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+@@ -201,6 +201,9 @@ mtk_flow_set_output_device(struct mtk_et
+ case 1:
+ pse_port = PSE_WDMA1_PORT;
+ break;
++ case 2:
++ pse_port = PSE_WDMA2_PORT;
++ break;
+ default:
+ return -EINVAL;
+ }
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -16,17 +16,19 @@
+ #include <net/flow_offload.h>
+ #include <net/pkt_cls.h>
+ #include "mtk_eth_soc.h"
+-#include "mtk_wed_regs.h"
+ #include "mtk_wed.h"
+ #include "mtk_ppe.h"
+ #include "mtk_wed_wo.h"
+
+ #define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
+
+-#define MTK_WED_PKT_SIZE 1900
++#define MTK_WED_PKT_SIZE 1920
+ #define MTK_WED_BUF_SIZE 2048
++#define MTK_WED_PAGE_BUF_SIZE 128
+ #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
++#define MTK_WED_RX_PAGE_BUF_PER_PAGE (PAGE_SIZE / 128)
+ #define MTK_WED_RX_RING_SIZE 1536
++#define MTK_WED_RX_PG_BM_CNT 8192
+
+ #define MTK_WED_TX_RING_SIZE 2048
+ #define MTK_WED_WDMA_RING_SIZE 1024
+@@ -40,7 +42,10 @@
+ #define MTK_WED_RRO_QUE_CNT 8192
+ #define MTK_WED_MIOD_ENTRY_CNT 128
+
+-static struct mtk_wed_hw *hw_list[2];
++#define MTK_WED_TX_BM_DMA_SIZE 65536
++#define MTK_WED_TX_BM_PKT_CNT 32768
++
++static struct mtk_wed_hw *hw_list[3];
+ static DEFINE_MUTEX(hw_lock);
+
+ struct mtk_wed_flow_block_priv {
+@@ -55,6 +60,7 @@ static const struct mtk_wed_soc_data mt7
+ .reset_idx_tx_mask = GENMASK(3, 0),
+ .reset_idx_rx_mask = GENMASK(17, 16),
+ },
++ .tx_ring_desc_size = sizeof(struct mtk_wdma_desc),
+ .wdma_desc_size = sizeof(struct mtk_wdma_desc),
+ };
+
+@@ -65,6 +71,18 @@ static const struct mtk_wed_soc_data mt7
+ .reset_idx_tx_mask = GENMASK(1, 0),
+ .reset_idx_rx_mask = GENMASK(7, 6),
+ },
++ .tx_ring_desc_size = sizeof(struct mtk_wdma_desc),
++ .wdma_desc_size = 2 * sizeof(struct mtk_wdma_desc),
++};
++
++static const struct mtk_wed_soc_data mt7988_data = {
++ .regmap = {
++ .tx_bm_tkid = 0x0c8,
++ .wpdma_rx_ring0 = 0x7d0,
++ .reset_idx_tx_mask = GENMASK(1, 0),
++ .reset_idx_rx_mask = GENMASK(7, 6),
++ },
++ .tx_ring_desc_size = sizeof(struct mtk_wed_bm_desc),
+ .wdma_desc_size = 2 * sizeof(struct mtk_wdma_desc),
+ };
+
+@@ -319,33 +337,38 @@ out:
+ static int
+ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
+ {
++ u32 desc_size = dev->hw->soc->tx_ring_desc_size;
++ int i, page_idx = 0, n_pages, ring_size;
++ int token = dev->wlan.token_start;
+ struct mtk_wed_buf *page_list;
+- struct mtk_wdma_desc *desc;
+ dma_addr_t desc_phys;
+- int token = dev->wlan.token_start;
+- int ring_size;
+- int n_pages;
+- int i, page_idx;
++ void *desc_ptr;
+
+- ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
+- n_pages = ring_size / MTK_WED_BUF_PER_PAGE;
++ if (!mtk_wed_is_v3_or_greater(dev->hw)) {
++ ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
++ dev->tx_buf_ring.size = ring_size;
++ } else {
++ dev->tx_buf_ring.size = MTK_WED_TX_BM_DMA_SIZE;
++ ring_size = MTK_WED_TX_BM_PKT_CNT;
++ }
++ n_pages = dev->tx_buf_ring.size / MTK_WED_BUF_PER_PAGE;
+
+ page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
+ if (!page_list)
+ return -ENOMEM;
+
+- dev->tx_buf_ring.size = ring_size;
+ dev->tx_buf_ring.pages = page_list;
+
+- desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
+- &desc_phys, GFP_KERNEL);
+- if (!desc)
++ desc_ptr = dma_alloc_coherent(dev->hw->dev,
++ dev->tx_buf_ring.size * desc_size,
++ &desc_phys, GFP_KERNEL);
++ if (!desc_ptr)
+ return -ENOMEM;
+
+- dev->tx_buf_ring.desc = desc;
++ dev->tx_buf_ring.desc = desc_ptr;
+ dev->tx_buf_ring.desc_phys = desc_phys;
+
+- for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
++ for (i = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
+ dma_addr_t page_phys, buf_phys;
+ struct page *page;
+ void *buf;
+@@ -371,28 +394,31 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_d
+ buf_phys = page_phys;
+
+ for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
+- u32 txd_size;
+- u32 ctrl;
+-
+- txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
++ struct mtk_wdma_desc *desc = desc_ptr;
+
+ desc->buf0 = cpu_to_le32(buf_phys);
+- desc->buf1 = cpu_to_le32(buf_phys + txd_size);
++ if (!mtk_wed_is_v3_or_greater(dev->hw)) {
++ u32 txd_size, ctrl;
+
+- if (mtk_wed_is_v1(dev->hw))
+- ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
+- FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
+- MTK_WED_BUF_SIZE - txd_size) |
+- MTK_WDMA_DESC_CTRL_LAST_SEG1;
+- else
+- ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
+- FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2,
+- MTK_WED_BUF_SIZE - txd_size) |
+- MTK_WDMA_DESC_CTRL_LAST_SEG0;
+- desc->ctrl = cpu_to_le32(ctrl);
+- desc->info = 0;
+- desc++;
++ txd_size = dev->wlan.init_buf(buf, buf_phys,
++ token++);
++ desc->buf1 = cpu_to_le32(buf_phys + txd_size);
++ ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size);
++ if (mtk_wed_is_v1(dev->hw))
++ ctrl |= MTK_WDMA_DESC_CTRL_LAST_SEG1 |
++ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
++ MTK_WED_BUF_SIZE - txd_size);
++ else
++ ctrl |= MTK_WDMA_DESC_CTRL_LAST_SEG0 |
++ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2,
++ MTK_WED_BUF_SIZE - txd_size);
++ desc->ctrl = cpu_to_le32(ctrl);
++ desc->info = 0;
++ } else {
++ desc->ctrl = cpu_to_le32(token << 16);
++ }
+
++ desc_ptr += desc_size;
+ buf += MTK_WED_BUF_SIZE;
+ buf_phys += MTK_WED_BUF_SIZE;
+ }
+@@ -408,31 +434,31 @@ static void
+ mtk_wed_free_tx_buffer(struct mtk_wed_device *dev)
+ {
+ struct mtk_wed_buf *page_list = dev->tx_buf_ring.pages;
+- struct mtk_wdma_desc *desc = dev->tx_buf_ring.desc;
+- int page_idx;
+- int i;
++ struct mtk_wed_hw *hw = dev->hw;
++ int i, page_idx = 0;
+
+ if (!page_list)
+ return;
+
+- if (!desc)
++ if (!dev->tx_buf_ring.desc)
+ goto free_pagelist;
+
+- for (i = 0, page_idx = 0; i < dev->tx_buf_ring.size;
+- i += MTK_WED_BUF_PER_PAGE) {
+- dma_addr_t buf_addr = page_list[page_idx].phy_addr;
++ for (i = 0; i < dev->tx_buf_ring.size; i += MTK_WED_BUF_PER_PAGE) {
++ dma_addr_t page_phy = page_list[page_idx].phy_addr;
+ void *page = page_list[page_idx++].p;
+
+ if (!page)
+ break;
+
+- dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE,
++ dma_unmap_page(dev->hw->dev, page_phy, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ __free_page(page);
+ }
+
+- dma_free_coherent(dev->hw->dev, dev->tx_buf_ring.size * sizeof(*desc),
+- desc, dev->tx_buf_ring.desc_phys);
++ dma_free_coherent(dev->hw->dev,
++ dev->tx_buf_ring.size * hw->soc->tx_ring_desc_size,
++ dev->tx_buf_ring.desc,
++ dev->tx_buf_ring.desc_phys);
+
+ free_pagelist:
+ kfree(page_list);
+@@ -517,13 +543,23 @@ mtk_wed_set_ext_int(struct mtk_wed_devic
+ {
+ u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+
+- if (mtk_wed_is_v1(dev->hw))
++ switch (dev->hw->version) {
++ case 1:
+ mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
+- else
++ break;
++ case 2:
+ mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
+ MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
+ MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
+ MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
++ break;
++ case 3:
++ mask = MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
++ MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
++ break;
++ default:
++ break;
++ }
+
+ if (!dev->hw->num_flows)
+ mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
+@@ -535,6 +571,9 @@ mtk_wed_set_ext_int(struct mtk_wed_devic
+ static void
+ mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable)
+ {
++ if (!mtk_wed_is_v2(dev->hw))
++ return;
++
+ if (enable) {
+ wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
+ wed_w32(dev, MTK_WED_TXP_DW1,
+@@ -609,6 +648,14 @@ mtk_wed_dma_disable(struct mtk_wed_devic
+ MTK_WED_WPDMA_RX_D_RX_DRV_EN);
+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
++
++ if (mtk_wed_is_v3_or_greater(dev->hw) &&
++ mtk_wed_get_rx_capa(dev)) {
++ wdma_clr(dev, MTK_WDMA_PREF_TX_CFG,
++ MTK_WDMA_PREF_TX_CFG_PREF_EN);
++ wdma_clr(dev, MTK_WDMA_PREF_RX_CFG,
++ MTK_WDMA_PREF_RX_CFG_PREF_EN);
++ }
+ }
+
+ mtk_wed_set_512_support(dev, false);
+@@ -651,6 +698,14 @@ mtk_wed_deinit(struct mtk_wed_device *de
+ MTK_WED_CTRL_RX_ROUTE_QM_EN |
+ MTK_WED_CTRL_WED_RX_BM_EN |
+ MTK_WED_CTRL_RX_RRO_QM_EN);
++
++ if (mtk_wed_is_v3_or_greater(dev->hw)) {
++ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN);
++ wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_TX_AMSDU);
++ wed_clr(dev, MTK_WED_PCIE_INT_CTRL,
++ MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
++ MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER);
++ }
+ }
+
+ static void
+@@ -700,21 +755,37 @@ mtk_wed_detach(struct mtk_wed_device *de
+ mutex_unlock(&hw_lock);
+ }
+
+-#define PCIE_BASE_ADDR0 0x11280000
+ static void
+ mtk_wed_bus_init(struct mtk_wed_device *dev)
+ {
+ switch (dev->wlan.bus_type) {
+ case MTK_WED_BUS_PCIE: {
+ struct device_node *np = dev->hw->eth->dev->of_node;
+- struct regmap *regs;
+
+- regs = syscon_regmap_lookup_by_phandle(np,
+- "mediatek,wed-pcie");
+- if (IS_ERR(regs))
+- break;
++ if (mtk_wed_is_v2(dev->hw)) {
++ struct regmap *regs;
++
++ regs = syscon_regmap_lookup_by_phandle(np,
++ "mediatek,wed-pcie");
++ if (IS_ERR(regs))
++ break;
+
+- regmap_update_bits(regs, 0, BIT(0), BIT(0));
++ regmap_update_bits(regs, 0, BIT(0), BIT(0));
++ }
++
++ if (dev->wlan.msi) {
++ wed_w32(dev, MTK_WED_PCIE_CFG_INTM,
++ dev->hw->pcie_base | 0xc08);
++ wed_w32(dev, MTK_WED_PCIE_CFG_BASE,
++ dev->hw->pcie_base | 0xc04);
++ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(8));
++ } else {
++ wed_w32(dev, MTK_WED_PCIE_CFG_INTM,
++ dev->hw->pcie_base | 0x180);
++ wed_w32(dev, MTK_WED_PCIE_CFG_BASE,
++ dev->hw->pcie_base | 0x184);
++ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
++ }
+
+ wed_w32(dev, MTK_WED_PCIE_INT_CTRL,
+ FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2));
+@@ -722,19 +793,9 @@ mtk_wed_bus_init(struct mtk_wed_device *
+ /* pcie interrupt control: pola/source selection */
+ wed_set(dev, MTK_WED_PCIE_INT_CTRL,
+ MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
+- FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1));
+- wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
+-
+- wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180);
+- wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184);
+-
+- /* pcie interrupt status trigger register */
+- wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
+- wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER);
+-
+- /* pola setting */
+- wed_set(dev, MTK_WED_PCIE_INT_CTRL,
+- MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA);
++ MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER |
++ FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL,
++ dev->hw->index));
+ break;
+ }
+ case MTK_WED_BUS_AXI:
+@@ -772,18 +833,19 @@ mtk_wed_set_wpdma(struct mtk_wed_device
+ static void
+ mtk_wed_hw_init_early(struct mtk_wed_device *dev)
+ {
+- u32 mask, set;
++ u32 set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2);
++ u32 mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE;
+
+ mtk_wed_deinit(dev);
+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
+ mtk_wed_set_wpdma(dev);
+
+- mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
+- MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
+- MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
+- set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
+- MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
+- MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
++ if (!mtk_wed_is_v3_or_greater(dev->hw)) {
++ mask |= MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
++ MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
++ set |= MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
++ MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
++ }
+ wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
+
+ if (mtk_wed_is_v1(dev->hw)) {
+@@ -931,11 +993,18 @@ mtk_wed_route_qm_hw_init(struct mtk_wed_
+ }
+
+ /* configure RX_ROUTE_QM */
+- wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
+- wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
+- wed_set(dev, MTK_WED_RTQM_GLO_CFG,
+- FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index));
+- wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
++ if (mtk_wed_is_v2(dev->hw)) {
++ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
++ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
++ wed_set(dev, MTK_WED_RTQM_GLO_CFG,
++ FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT,
++ 0x3 + dev->hw->index));
++ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
++ } else {
++ wed_set(dev, MTK_WED_RTQM_ENQ_CFG0,
++ FIELD_PREP(MTK_WED_RTQM_ENQ_CFG_TXDMAD_FPORT,
++ 0x3 + dev->hw->index));
++ }
+ /* enable RX_ROUTE_QM */
+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
+ }
+@@ -948,22 +1017,30 @@ mtk_wed_hw_init(struct mtk_wed_device *d
+
+ dev->init_done = true;
+ mtk_wed_set_ext_int(dev, false);
+- wed_w32(dev, MTK_WED_TX_BM_CTRL,
+- MTK_WED_TX_BM_CTRL_PAUSE |
+- FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
+- dev->tx_buf_ring.size / 128) |
+- FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
+- MTK_WED_TX_RING_SIZE / 256));
+
+ wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys);
+-
+ wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
+
+ if (mtk_wed_is_v1(dev->hw)) {
++ wed_w32(dev, MTK_WED_TX_BM_CTRL,
++ MTK_WED_TX_BM_CTRL_PAUSE |
++ FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
++ dev->tx_buf_ring.size / 128) |
++ FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
++ MTK_WED_TX_RING_SIZE / 256));
+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
+ MTK_WED_TX_BM_DYN_THR_HI);
+- } else {
++ } else if (mtk_wed_is_v2(dev->hw)) {
++ wed_w32(dev, MTK_WED_TX_BM_CTRL,
++ MTK_WED_TX_BM_CTRL_PAUSE |
++ FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
++ dev->tx_buf_ring.size / 128) |
++ FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
++ MTK_WED_TX_RING_SIZE / 256));
++ wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
++ FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
++ MTK_WED_TX_TKID_DYN_THR_HI);
+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) |
+ MTK_WED_TX_BM_DYN_THR_HI_V2);
+@@ -973,9 +1050,6 @@ mtk_wed_hw_init(struct mtk_wed_device *d
+ dev->tx_buf_ring.size / 128) |
+ FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM,
+ dev->tx_buf_ring.size / 128));
+- wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
+- FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
+- MTK_WED_TX_TKID_DYN_THR_HI);
+ }
+
+ wed_w32(dev, dev->hw->soc->regmap.tx_bm_tkid,
+@@ -985,26 +1059,62 @@ mtk_wed_hw_init(struct mtk_wed_device *d
+
+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
+
++ if (mtk_wed_is_v3_or_greater(dev->hw)) {
++ /* switch to new bm architecture */
++ wed_clr(dev, MTK_WED_TX_BM_CTRL,
++ MTK_WED_TX_BM_CTRL_LEGACY_EN);
++
++ wed_w32(dev, MTK_WED_TX_TKID_CTRL,
++ MTK_WED_TX_TKID_CTRL_PAUSE |
++ FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM_V3,
++ dev->wlan.nbuf / 128) |
++ FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM_V3,
++ dev->wlan.nbuf / 128));
++ /* return SKBID + SDP back to bm */
++ wed_set(dev, MTK_WED_TX_TKID_CTRL,
++ MTK_WED_TX_TKID_CTRL_FREE_FORMAT);
++
++ wed_w32(dev, MTK_WED_TX_BM_INIT_PTR,
++ MTK_WED_TX_BM_PKT_CNT |
++ MTK_WED_TX_BM_INIT_SW_TAIL_IDX);
++ }
++
+ if (mtk_wed_is_v1(dev->hw)) {
+ wed_set(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WED_TX_BM_EN |
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+- } else {
+- wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
+- if (mtk_wed_get_rx_capa(dev)) {
+- /* rx hw init */
+- wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
+- MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
+- MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
+- wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
+-
+- mtk_wed_rx_buffer_hw_init(dev);
+- mtk_wed_rro_hw_init(dev);
+- mtk_wed_route_qm_hw_init(dev);
+- }
++ } else if (mtk_wed_get_rx_capa(dev)) {
++ /* rx hw init */
++ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
++ MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
++ MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
++ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
++
++ /* reset prefetch index of ring */
++ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX,
++ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
++ wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX,
++ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
++
++ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX,
++ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
++ wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX,
++ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
++
++ /* reset prefetch FIFO of ring */
++ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG,
++ MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R0_CLR |
++ MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R1_CLR);
++ wed_w32(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG, 0);
++
++ mtk_wed_rx_buffer_hw_init(dev);
++ mtk_wed_rro_hw_init(dev);
++ mtk_wed_route_qm_hw_init(dev);
+ }
+
+ wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
++ if (!mtk_wed_is_v1(dev->hw))
++ wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
+ }
+
+ static void
+@@ -1302,6 +1412,24 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_we
+ dev->hw->soc->wdma_desc_size, true))
+ return -ENOMEM;
+
++ if (mtk_wed_is_v3_or_greater(dev->hw)) {
++ struct mtk_wdma_desc *desc = wdma->desc;
++ int i;
++
++ for (i = 0; i < MTK_WED_WDMA_RING_SIZE; i++) {
++ desc->buf0 = 0;
++ desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
++ desc->buf1 = 0;
++ desc->info = cpu_to_le32(MTK_WDMA_TXD0_DESC_INFO_DMA_DONE);
++ desc++;
++ desc->buf0 = 0;
++ desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
++ desc->buf1 = 0;
++ desc->info = cpu_to_le32(MTK_WDMA_TXD1_DESC_INFO_DMA_DONE);
++ desc++;
++ }
++ }
++
+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
+ wdma->desc_phys);
+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
+@@ -1367,6 +1495,9 @@ mtk_wed_configure_irq(struct mtk_wed_dev
+
+ wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
+ } else {
++ if (mtk_wed_is_v3_or_greater(dev->hw))
++ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_TKID_ALI_EN);
++
+ /* initail tx interrupt trigger */
+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
+ MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
+@@ -1419,33 +1550,60 @@ mtk_wed_dma_enable(struct mtk_wed_device
+ {
+ int i;
+
+- wed_set(dev, MTK_WED_WPDMA_INT_CTRL, MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
++ if (!mtk_wed_is_v3_or_greater(dev->hw)) {
++ wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
++ MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
++ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
++ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
++ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
++ wdma_set(dev, MTK_WDMA_GLO_CFG,
++ MTK_WDMA_GLO_CFG_TX_DMA_EN |
++ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
++ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
++ wed_set(dev, MTK_WED_WPDMA_CTRL, MTK_WED_WPDMA_CTRL_SDL1_FIXED);
++ } else {
++ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
++ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
++ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN |
++ MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR);
++ wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
++ }
+
+ wed_set(dev, MTK_WED_GLO_CFG,
+ MTK_WED_GLO_CFG_TX_DMA_EN |
+ MTK_WED_GLO_CFG_RX_DMA_EN);
+- wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
+- MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
+- MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
++
+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
+
+- wdma_set(dev, MTK_WDMA_GLO_CFG,
+- MTK_WDMA_GLO_CFG_TX_DMA_EN |
+- MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
+- MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
+-
+ if (mtk_wed_is_v1(dev->hw)) {
+ wdma_set(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
+ return;
+ }
+
+- wed_set(dev, MTK_WED_WPDMA_CTRL,
+- MTK_WED_WPDMA_CTRL_SDL1_FIXED);
+ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
++
++ if (mtk_wed_is_v3_or_greater(dev->hw)) {
++ wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG,
++ FIELD_PREP(MTK_WED_WDMA_RX_PREF_BURST_SIZE, 0x10) |
++ FIELD_PREP(MTK_WED_WDMA_RX_PREF_LOW_THRES, 0x8));
++ wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
++ MTK_WED_WDMA_RX_PREF_DDONE2_EN);
++ wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, MTK_WED_WDMA_RX_PREF_EN);
++
++ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
++ MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK_LAST);
++ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
++ MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK |
++ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_CHK |
++ MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4);
++
++ wdma_set(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN);
++ }
++
+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
+ MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
+@@ -1457,11 +1615,22 @@ mtk_wed_dma_enable(struct mtk_wed_device
+ MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
+
++ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RXD_READ_LEN);
+ wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
+ MTK_WED_WPDMA_RX_D_RX_DRV_EN |
+ FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
+- FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
+- 0x2));
++ FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL, 0x2));
++
++ if (mtk_wed_is_v3_or_greater(dev->hw)) {
++ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
++ MTK_WED_WPDMA_RX_D_PREF_EN |
++ FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE, 0x10) |
++ FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_LOW_THRES, 0x8));
++
++ wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN);
++ wdma_set(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN);
++ wdma_set(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN);
++ }
+
+ for (i = 0; i < MTK_WED_RX_QUEUES; i++)
+ mtk_wed_check_wfdma_rx_fill(dev, i);
+@@ -1501,6 +1670,12 @@ mtk_wed_start(struct mtk_wed_device *dev
+ wed_r32(dev, MTK_WED_EXT_INT_MASK1);
+ wed_r32(dev, MTK_WED_EXT_INT_MASK2);
+
++ if (mtk_wed_is_v3_or_greater(dev->hw)) {
++ wed_w32(dev, MTK_WED_EXT_INT_MASK3,
++ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
++ wed_r32(dev, MTK_WED_EXT_INT_MASK3);
++ }
++
+ if (mtk_wed_rro_cfg(dev))
+ return;
+ }
+@@ -1552,6 +1727,7 @@ mtk_wed_attach(struct mtk_wed_device *de
+ dev->irq = hw->irq;
+ dev->wdma_idx = hw->index;
+ dev->version = hw->version;
++ dev->hw->pcie_base = mtk_wed_get_pcie_base(dev);
+
+ if (hw->eth->dma_dev == hw->eth->dev &&
+ of_dma_is_coherent(hw->eth->dev->of_node))
+@@ -1619,6 +1795,23 @@ mtk_wed_tx_ring_setup(struct mtk_wed_dev
+ ring->reg_base = MTK_WED_RING_TX(idx);
+ ring->wpdma = regs;
+
++ if (mtk_wed_is_v3_or_greater(dev->hw) && idx == 1) {
++ /* reset prefetch index */
++ wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG,
++ MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR |
++ MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR);
++
++ wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
++ MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR |
++ MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR);
++
++ /* reset prefetch FIFO */
++ wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG,
++ MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR |
++ MTK_WED_WDMA_RX_PREF_FIFO_RX1_CLR);
++ wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG, 0);
++ }
++
+ /* WED -> WPDMA */
+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE);
+@@ -1693,15 +1886,13 @@ mtk_wed_rx_ring_setup(struct mtk_wed_dev
+ static u32
+ mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
+ {
+- u32 val, ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
++ u32 val, ext_mask;
+
+- if (mtk_wed_is_v1(dev->hw))
+- ext_mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
++ if (mtk_wed_is_v3_or_greater(dev->hw))
++ ext_mask = MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
++ MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
+ else
+- ext_mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
+- MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
+- MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
+- MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
++ ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+
+ val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
+ wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
+@@ -1942,6 +2133,9 @@ void mtk_wed_add_hw(struct device_node *
+ case 2:
+ hw->soc = &mt7986_data;
+ break;
++ case 3:
++ hw->soc = &mt7988_data;
++ break;
+ default:
+ case 1:
+ hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
+--- a/drivers/net/ethernet/mediatek/mtk_wed.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed.h
+@@ -9,6 +9,8 @@
+ #include <linux/regmap.h>
+ #include <linux/netdevice.h>
+
++#include "mtk_wed_regs.h"
++
+ struct mtk_eth;
+ struct mtk_wed_wo;
+
+@@ -19,6 +21,7 @@ struct mtk_wed_soc_data {
+ u32 reset_idx_tx_mask;
+ u32 reset_idx_rx_mask;
+ } regmap;
++ u32 tx_ring_desc_size;
+ u32 wdma_desc_size;
+ };
+
+@@ -35,6 +38,7 @@ struct mtk_wed_hw {
+ struct dentry *debugfs_dir;
+ struct mtk_wed_device *wed_dev;
+ struct mtk_wed_wo *wed_wo;
++ u32 pcie_base;
+ u32 debugfs_reg;
+ u32 num_flows;
+ u8 version;
+@@ -61,6 +65,16 @@ static inline bool mtk_wed_is_v2(struct
+ return hw->version == 2;
+ }
+
++static inline bool mtk_wed_is_v3(struct mtk_wed_hw *hw)
++{
++ return hw->version == 3;
++}
++
++static inline bool mtk_wed_is_v3_or_greater(struct mtk_wed_hw *hw)
++{
++ return hw->version > 2;
++}
++
+ static inline void
+ wed_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
+ {
+@@ -143,6 +157,21 @@ wpdma_txfree_w32(struct mtk_wed_device *
+ writel(val, dev->txfree_ring.wpdma + reg);
+ }
+
++static inline u32 mtk_wed_get_pcie_base(struct mtk_wed_device *dev)
++{
++ if (!mtk_wed_is_v3_or_greater(dev->hw))
++ return MTK_WED_PCIE_BASE;
++
++ switch (dev->hw->index) {
++ case 1:
++ return MTK_WED_PCIE_BASE1;
++ case 2:
++ return MTK_WED_PCIE_BASE2;
++ default:
++ return MTK_WED_PCIE_BASE0;
++ }
++}
++
+ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
+ void __iomem *wdma, phys_addr_t wdma_phy,
+ int index);
+--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+@@ -331,10 +331,22 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
+ wo->hw->index + 1);
+
+ /* load firmware */
+- if (of_device_is_compatible(wo->hw->node, "mediatek,mt7981-wed"))
+- fw_name = MT7981_FIRMWARE_WO;
+- else
+- fw_name = wo->hw->index ? MT7986_FIRMWARE_WO1 : MT7986_FIRMWARE_WO0;
++ switch (wo->hw->version) {
++ case 2:
++ if (of_device_is_compatible(wo->hw->node,
++ "mediatek,mt7981-wed"))
++ fw_name = MT7981_FIRMWARE_WO;
++ else
++ fw_name = wo->hw->index ? MT7986_FIRMWARE_WO1
++ : MT7986_FIRMWARE_WO0;
++ break;
++ case 3:
++ fw_name = wo->hw->index ? MT7988_FIRMWARE_WO1
++ : MT7988_FIRMWARE_WO0;
++ break;
++ default:
++ return -EINVAL;
++ }
+
+ ret = request_firmware(&fw, fw_name, wo->hw->dev);
+ if (ret)
+@@ -355,15 +367,16 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
+ }
+
+ /* set the start address */
+- boot_cr = wo->hw->index ? MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR
+- : MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
++ if (!mtk_wed_is_v3_or_greater(wo->hw) && wo->hw->index)
++ boot_cr = MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR;
++ else
++ boot_cr = MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
+ wo_w32(wo, boot_cr, mem_region[MTK_WED_WO_REGION_EMI].phy_addr >> 16);
+ /* wo firmware reset */
+ wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCCR_CLR_ADDR, 0xc00);
+
+- val = wo_r32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR);
+- val |= wo->hw->index ? MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK
+- : MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK;
++ val = wo_r32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR) |
++ MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK;
+ wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val);
+ out:
+ release_firmware(fw);
+@@ -398,3 +411,5 @@ int mtk_wed_mcu_init(struct mtk_wed_wo *
+ MODULE_FIRMWARE(MT7981_FIRMWARE_WO);
+ MODULE_FIRMWARE(MT7986_FIRMWARE_WO0);
+ MODULE_FIRMWARE(MT7986_FIRMWARE_WO1);
++MODULE_FIRMWARE(MT7988_FIRMWARE_WO0);
++MODULE_FIRMWARE(MT7988_FIRMWARE_WO1);
+--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+@@ -13,6 +13,9 @@
+ #define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30)
+ #define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31)
+
++#define MTK_WDMA_TXD0_DESC_INFO_DMA_DONE BIT(29)
++#define MTK_WDMA_TXD1_DESC_INFO_DMA_DONE BIT(31)
++
+ struct mtk_wdma_desc {
+ __le32 buf0;
+ __le32 ctrl;
+@@ -37,6 +40,7 @@ struct mtk_wdma_desc {
+ #define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
+ #define MTK_WED_RESET_RX_RRO_QM BIT(20)
+ #define MTK_WED_RESET_RX_ROUTE_QM BIT(21)
++#define MTK_WED_RESET_TX_AMSDU BIT(22)
+ #define MTK_WED_RESET_WED BIT(31)
+
+ #define MTK_WED_CTRL 0x00c
+@@ -44,6 +48,9 @@ struct mtk_wdma_desc {
+ #define MTK_WED_CTRL_WPDMA_INT_AGENT_BUSY BIT(1)
+ #define MTK_WED_CTRL_WDMA_INT_AGENT_EN BIT(2)
+ #define MTK_WED_CTRL_WDMA_INT_AGENT_BUSY BIT(3)
++#define MTK_WED_CTRL_WED_RX_IND_CMD_EN BIT(5)
++#define MTK_WED_CTRL_WED_RX_PG_BM_EN BIT(6)
++#define MTK_WED_CTRL_WED_RX_PG_BM_BUSY BIT(7)
+ #define MTK_WED_CTRL_WED_TX_BM_EN BIT(8)
+ #define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9)
+ #define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10)
+@@ -54,9 +61,14 @@ struct mtk_wdma_desc {
+ #define MTK_WED_CTRL_RX_RRO_QM_BUSY BIT(15)
+ #define MTK_WED_CTRL_RX_ROUTE_QM_EN BIT(16)
+ #define MTK_WED_CTRL_RX_ROUTE_QM_BUSY BIT(17)
++#define MTK_WED_CTRL_TX_TKID_ALI_EN BIT(20)
++#define MTK_WED_CTRL_TX_TKID_ALI_BUSY BIT(21)
++#define MTK_WED_CTRL_TX_AMSDU_EN BIT(22)
++#define MTK_WED_CTRL_TX_AMSDU_BUSY BIT(23)
+ #define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
+ #define MTK_WED_CTRL_ETH_DMAD_FMT BIT(25)
+ #define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
++#define MTK_WED_CTRL_FLD_MIB_RD_CLR BIT(28)
+
+ #define MTK_WED_EXT_INT_STATUS 0x020
+ #define MTK_WED_EXT_INT_STATUS_TF_LEN_ERR BIT(0)
+@@ -89,6 +101,7 @@ struct mtk_wdma_desc {
+ #define MTK_WED_EXT_INT_MASK 0x028
+ #define MTK_WED_EXT_INT_MASK1 0x02c
+ #define MTK_WED_EXT_INT_MASK2 0x030
++#define MTK_WED_EXT_INT_MASK3 0x034
+
+ #define MTK_WED_STATUS 0x060
+ #define MTK_WED_STATUS_TX GENMASK(15, 8)
+@@ -96,9 +109,14 @@ struct mtk_wdma_desc {
+ #define MTK_WED_TX_BM_CTRL 0x080
+ #define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0)
+ #define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16)
++#define MTK_WED_TX_BM_CTRL_LEGACY_EN BIT(26)
++#define MTK_WED_TX_TKID_CTRL_FREE_FORMAT BIT(27)
+ #define MTK_WED_TX_BM_CTRL_PAUSE BIT(28)
+
+ #define MTK_WED_TX_BM_BASE 0x084
++#define MTK_WED_TX_BM_INIT_PTR 0x088
++#define MTK_WED_TX_BM_SW_TAIL_IDX GENMASK(16, 0)
++#define MTK_WED_TX_BM_INIT_SW_TAIL_IDX BIT(16)
+
+ #define MTK_WED_TX_BM_TKID_START GENMASK(15, 0)
+ #define MTK_WED_TX_BM_TKID_END GENMASK(31, 16)
+@@ -122,6 +140,9 @@ struct mtk_wdma_desc {
+ #define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM GENMASK(22, 16)
+ #define MTK_WED_TX_TKID_CTRL_PAUSE BIT(28)
+
++#define MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM_V3 GENMASK(7, 0)
++#define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM_V3 GENMASK(23, 16)
++
+ #define MTK_WED_TX_TKID_DYN_THR 0x0e0
+ #define MTK_WED_TX_TKID_DYN_THR_LO GENMASK(6, 0)
+ #define MTK_WED_TX_TKID_DYN_THR_HI GENMASK(22, 16)
+@@ -199,12 +220,15 @@ struct mtk_wdma_desc {
+ #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_PKT_PROC BIT(5)
+ #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC BIT(6)
+ #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_CRX_SYNC BIT(7)
+-#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER GENMASK(18, 16)
++#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER GENMASK(15, 12)
++#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4 BIT(18)
+ #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNSUPPORT_FMT BIT(19)
+-#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UEVENT_PKT_FMT_CHK BIT(20)
++#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_CHK BIT(20)
+ #define MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR BIT(21)
+ #define MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP BIT(24)
++#define MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK_LAST BIT(25)
+ #define MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV BIT(28)
++#define MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK BIT(30)
+
+ #define MTK_WED_WPDMA_RESET_IDX 0x50c
+ #define MTK_WED_WPDMA_RESET_IDX_TX GENMASK(3, 0)
+@@ -250,9 +274,10 @@ struct mtk_wdma_desc {
+ #define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16)
+
+ #define MTK_WED_PCIE_INT_CTRL 0x57c
+-#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA BIT(20)
+-#define MTK_WED_PCIE_INT_CTRL_SRC_SEL GENMASK(17, 16)
+ #define MTK_WED_PCIE_INT_CTRL_POLL_EN GENMASK(13, 12)
++#define MTK_WED_PCIE_INT_CTRL_SRC_SEL GENMASK(17, 16)
++#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA BIT(20)
++#define MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER BIT(21)
+
+ #define MTK_WED_WPDMA_CFG_BASE 0x580
+ #define MTK_WED_WPDMA_CFG_INT_MASK 0x584
+@@ -286,6 +311,20 @@ struct mtk_wdma_desc {
+ #define MTK_WED_WPDMA_RX_D_PROCESSED_MIB(_n) (0x784 + (_n) * 4)
+ #define MTK_WED_WPDMA_RX_D_COHERENT_MIB 0x78c
+
++#define MTK_WED_WPDMA_RX_D_PREF_CFG 0x7b4
++#define MTK_WED_WPDMA_RX_D_PREF_EN BIT(0)
++#define MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE GENMASK(12, 8)
++#define MTK_WED_WPDMA_RX_D_PREF_LOW_THRES GENMASK(21, 16)
++
++#define MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX 0x7b8
++#define MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR BIT(15)
++
++#define MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX 0x7bc
++
++#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG 0x7c0
++#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R0_CLR BIT(0)
++#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R1_CLR BIT(16)
++
+ #define MTK_WED_WDMA_RING_TX 0x800
+
+ #define MTK_WED_WDMA_TX_MIB 0x810
+@@ -293,6 +332,18 @@ struct mtk_wdma_desc {
+ #define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10)
+ #define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4)
+
++#define MTK_WED_WDMA_RX_PREF_CFG 0x950
++#define MTK_WED_WDMA_RX_PREF_EN BIT(0)
++#define MTK_WED_WDMA_RX_PREF_BURST_SIZE GENMASK(12, 8)
++#define MTK_WED_WDMA_RX_PREF_LOW_THRES GENMASK(21, 16)
++#define MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR BIT(24)
++#define MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR BIT(25)
++#define MTK_WED_WDMA_RX_PREF_DDONE2_EN BIT(26)
++
++#define MTK_WED_WDMA_RX_PREF_FIFO_CFG 0x95C
++#define MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR BIT(0)
++#define MTK_WED_WDMA_RX_PREF_FIFO_RX1_CLR BIT(16)
++
+ #define MTK_WED_WDMA_GLO_CFG 0xa04
+ #define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0)
+ #define MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK BIT(1)
+@@ -325,6 +376,7 @@ struct mtk_wdma_desc {
+ #define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16)
+
+ #define MTK_WED_WDMA_INT_CTRL 0xa2c
++#define MTK_WED_WDMA_INT_POLL_PRD GENMASK(7, 0)
+ #define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL GENMASK(17, 16)
+
+ #define MTK_WED_WDMA_CFG_BASE 0xaa0
+@@ -388,6 +440,18 @@ struct mtk_wdma_desc {
+ #define MTK_WDMA_INT_GRP1 0x250
+ #define MTK_WDMA_INT_GRP2 0x254
+
++#define MTK_WDMA_PREF_TX_CFG 0x2d0
++#define MTK_WDMA_PREF_TX_CFG_PREF_EN BIT(0)
++
++#define MTK_WDMA_PREF_RX_CFG 0x2dc
++#define MTK_WDMA_PREF_RX_CFG_PREF_EN BIT(0)
++
++#define MTK_WDMA_WRBK_TX_CFG 0x300
++#define MTK_WDMA_WRBK_TX_CFG_WRBK_EN BIT(30)
++
++#define MTK_WDMA_WRBK_RX_CFG 0x344
++#define MTK_WDMA_WRBK_RX_CFG_WRBK_EN BIT(30)
++
+ #define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0)
+ #define MTK_PCIE_MIRROR_MAP_EN BIT(0)
+ #define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1)
+@@ -401,6 +465,30 @@ struct mtk_wdma_desc {
+ #define MTK_WED_RTQM_Q_DBG_BYPASS BIT(5)
+ #define MTK_WED_RTQM_TXDMAD_FPORT GENMASK(23, 20)
+
++#define MTK_WED_RTQM_IGRS0_I2HW_DMAD_CNT 0xb1c
++#define MTK_WED_RTQM_IGRS0_I2H_DMAD_CNT(_n) (0xb20 + (_n) * 0x4)
++#define MTK_WED_RTQM_IGRS0_I2HW_PKT_CNT 0xb28
++#define MTK_WED_RTQM_IGRS0_I2H_PKT_CNT(_n) (0xb2c + (_n) * 0x4)
++#define MTK_WED_RTQM_IGRS0_FDROP_CNT 0xb34
++
++#define MTK_WED_RTQM_IGRS1_I2HW_DMAD_CNT 0xb44
++#define MTK_WED_RTQM_IGRS1_I2H_DMAD_CNT(_n) (0xb48 + (_n) * 0x4)
++#define MTK_WED_RTQM_IGRS1_I2HW_PKT_CNT 0xb50
++#define MTK_WED_RTQM_IGRS1_I2H_PKT_CNT(_n) (0xb54 + (_n) * 0x4)
++#define MTK_WED_RTQM_IGRS1_FDROP_CNT 0xb5c
++
++#define MTK_WED_RTQM_IGRS2_I2HW_DMAD_CNT 0xb6c
++#define MTK_WED_RTQM_IGRS2_I2H_DMAD_CNT(_n) (0xb70 + (_n) * 0x4)
++#define MTK_WED_RTQM_IGRS2_I2HW_PKT_CNT 0xb78
++#define MTK_WED_RTQM_IGRS2_I2H_PKT_CNT(_n) (0xb7c + (_n) * 0x4)
++#define MTK_WED_RTQM_IGRS2_FDROP_CNT 0xb84
++
++#define MTK_WED_RTQM_IGRS3_I2HW_DMAD_CNT 0xb94
++#define MTK_WED_RTQM_IGRS3_I2H_DMAD_CNT(_n) (0xb98 + (_n) * 0x4)
++#define MTK_WED_RTQM_IGRS3_I2HW_PKT_CNT 0xba0
++#define MTK_WED_RTQM_IGRS3_I2H_PKT_CNT(_n) (0xba4 + (_n) * 0x4)
++#define MTK_WED_RTQM_IGRS3_FDROP_CNT 0xbac
++
+ #define MTK_WED_RTQM_R2H_MIB(_n) (0xb70 + (_n) * 0x4)
+ #define MTK_WED_RTQM_R2Q_MIB(_n) (0xb78 + (_n) * 0x4)
+ #define MTK_WED_RTQM_Q2N_MIB 0xb80
+@@ -409,6 +497,24 @@ struct mtk_wdma_desc {
+ #define MTK_WED_RTQM_Q2B_MIB 0xb8c
+ #define MTK_WED_RTQM_PFDBK_MIB 0xb90
+
++#define MTK_WED_RTQM_ENQ_CFG0 0xbb8
++#define MTK_WED_RTQM_ENQ_CFG_TXDMAD_FPORT GENMASK(15, 12)
++
++#define MTK_WED_RTQM_FDROP_MIB 0xb84
++#define MTK_WED_RTQM_ENQ_I2Q_DMAD_CNT 0xbbc
++#define MTK_WED_RTQM_ENQ_I2N_DMAD_CNT 0xbc0
++#define MTK_WED_RTQM_ENQ_I2Q_PKT_CNT 0xbc4
++#define MTK_WED_RTQM_ENQ_I2N_PKT_CNT 0xbc8
++#define MTK_WED_RTQM_ENQ_USED_ENTRY_CNT 0xbcc
++#define MTK_WED_RTQM_ENQ_ERR_CNT 0xbd0
++
++#define MTK_WED_RTQM_DEQ_DMAD_CNT 0xbd8
++#define MTK_WED_RTQM_DEQ_Q2I_DMAD_CNT 0xbdc
++#define MTK_WED_RTQM_DEQ_PKT_CNT 0xbe0
++#define MTK_WED_RTQM_DEQ_Q2I_PKT_CNT 0xbe4
++#define MTK_WED_RTQM_DEQ_USED_PFDBK_CNT 0xbe8
++#define MTK_WED_RTQM_DEQ_ERR_CNT 0xbec
++
+ #define MTK_WED_RROQM_GLO_CFG 0xc04
+ #define MTK_WED_RROQM_RST_IDX 0xc08
+ #define MTK_WED_RROQM_RST_IDX_MIOD BIT(0)
+@@ -458,7 +564,116 @@ struct mtk_wdma_desc {
+ #define MTK_WED_RX_BM_INTF 0xd9c
+ #define MTK_WED_RX_BM_ERR_STS 0xda8
+
++#define MTK_RRO_IND_CMD_SIGNATURE 0xe00
++#define MTK_RRO_IND_CMD_DMA_IDX GENMASK(11, 0)
++#define MTK_RRO_IND_CMD_MAGIC_CNT GENMASK(30, 28)
++
++#define MTK_WED_IND_CMD_RX_CTRL0 0xe04
++#define MTK_WED_IND_CMD_PROC_IDX GENMASK(11, 0)
++#define MTK_WED_IND_CMD_PREFETCH_FREE_CNT GENMASK(19, 16)
++#define MTK_WED_IND_CMD_MAGIC_CNT GENMASK(30, 28)
++
++#define MTK_WED_IND_CMD_RX_CTRL1 0xe08
++#define MTK_WED_IND_CMD_RX_CTRL2 0xe0c
++#define MTK_WED_IND_CMD_MAX_CNT GENMASK(11, 0)
++#define MTK_WED_IND_CMD_BASE_M GENMASK(19, 16)
++
++#define MTK_WED_RRO_CFG0 0xe10
++#define MTK_WED_RRO_CFG1 0xe14
++#define MTK_WED_RRO_CFG1_MAX_WIN_SZ GENMASK(31, 29)
++#define MTK_WED_RRO_CFG1_ACK_SN_BASE_M GENMASK(19, 16)
++#define MTK_WED_RRO_CFG1_PARTICL_SE_ID GENMASK(11, 0)
++
++#define MTK_WED_ADDR_ELEM_CFG0 0xe18
++#define MTK_WED_ADDR_ELEM_CFG1 0xe1c
++#define MTK_WED_ADDR_ELEM_PREFETCH_FREE_CNT GENMASK(19, 16)
++
++#define MTK_WED_ADDR_ELEM_TBL_CFG 0xe20
++#define MTK_WED_ADDR_ELEM_TBL_OFFSET GENMASK(6, 0)
++#define MTK_WED_ADDR_ELEM_TBL_RD_RDY BIT(28)
++#define MTK_WED_ADDR_ELEM_TBL_WR_RDY BIT(29)
++#define MTK_WED_ADDR_ELEM_TBL_RD BIT(30)
++#define MTK_WED_ADDR_ELEM_TBL_WR BIT(31)
++
++#define MTK_WED_RADDR_ELEM_TBL_WDATA 0xe24
++#define MTK_WED_RADDR_ELEM_TBL_RDATA 0xe28
++
++#define MTK_WED_PN_CHECK_CFG 0xe30
++#define MTK_WED_PN_CHECK_SE_ID GENMASK(11, 0)
++#define MTK_WED_PN_CHECK_RD_RDY BIT(28)
++#define MTK_WED_PN_CHECK_WR_RDY BIT(29)
++#define MTK_WED_PN_CHECK_RD BIT(30)
++#define MTK_WED_PN_CHECK_WR BIT(31)
++
++#define MTK_WED_PN_CHECK_WDATA_M 0xe38
++#define MTK_WED_PN_CHECK_IS_FIRST BIT(17)
++
++#define MTK_WED_RRO_MSDU_PG_RING_CFG(_n) (0xe44 + (_n) * 0x8)
++
++#define MTK_WED_RRO_MSDU_PG_RING2_CFG 0xe58
++#define MTK_WED_RRO_MSDU_PG_DRV_CLR BIT(26)
++#define MTK_WED_RRO_MSDU_PG_DRV_EN BIT(31)
++
++#define MTK_WED_RRO_MSDU_PG_CTRL0(_n) (0xe5c + (_n) * 0xc)
++#define MTK_WED_RRO_MSDU_PG_CTRL1(_n) (0xe60 + (_n) * 0xc)
++#define MTK_WED_RRO_MSDU_PG_CTRL2(_n) (0xe64 + (_n) * 0xc)
++
++#define MTK_WED_RRO_RX_D_RX(_n) (0xe80 + (_n) * 0x10)
++
++#define MTK_WED_RRO_RX_MAGIC_CNT BIT(13)
++
++#define MTK_WED_RRO_RX_D_CFG(_n) (0xea0 + (_n) * 0x4)
++#define MTK_WED_RRO_RX_D_DRV_CLR BIT(26)
++#define MTK_WED_RRO_RX_D_DRV_EN BIT(31)
++
++#define MTK_WED_RRO_PG_BM_RX_DMAM 0xeb0
++#define MTK_WED_RRO_PG_BM_RX_SDL0 GENMASK(13, 0)
++
++#define MTK_WED_RRO_PG_BM_BASE 0xeb4
++#define MTK_WED_RRO_PG_BM_INIT_PTR 0xeb8
++#define MTK_WED_RRO_PG_BM_SW_TAIL_IDX GENMASK(15, 0)
++#define MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX BIT(16)
++
++#define MTK_WED_WPDMA_INT_CTRL_RRO_RX 0xeec
++#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN BIT(0)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR BIT(1)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG GENMASK(6, 2)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN BIT(8)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR BIT(9)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG GENMASK(14, 10)
++
++#define MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG 0xef4
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN BIT(0)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR BIT(1)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG GENMASK(6, 2)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN BIT(8)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR BIT(9)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG GENMASK(14, 10)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN BIT(16)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR BIT(17)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG GENMASK(22, 18)
++
++#define MTK_WED_RX_IND_CMD_CNT0 0xf20
++#define MTK_WED_RX_IND_CMD_DBG_CNT_EN BIT(31)
++
++#define MTK_WED_RX_IND_CMD_CNT(_n) (0xf20 + (_n) * 0x4)
++#define MTK_WED_IND_CMD_MAGIC_CNT_FAIL_CNT GENMASK(15, 0)
++
++#define MTK_WED_RX_ADDR_ELEM_CNT(_n) (0xf48 + (_n) * 0x4)
++#define MTK_WED_ADDR_ELEM_SIG_FAIL_CNT GENMASK(15, 0)
++#define MTK_WED_ADDR_ELEM_FIRST_SIG_FAIL_CNT GENMASK(31, 16)
++#define MTK_WED_ADDR_ELEM_ACKSN_CNT GENMASK(27, 0)
++
++#define MTK_WED_RX_MSDU_PG_CNT(_n) (0xf5c + (_n) * 0x4)
++
++#define MTK_WED_RX_PN_CHK_CNT 0xf70
++#define MTK_WED_PN_CHK_FAIL_CNT GENMASK(15, 0)
++
+ #define MTK_WED_WOCPU_VIEW_MIOD_BASE 0x8000
+ #define MTK_WED_PCIE_INT_MASK 0x0
+
++#define MTK_WED_PCIE_BASE 0x11280000
++#define MTK_WED_PCIE_BASE0 0x11300000
++#define MTK_WED_PCIE_BASE1 0x11310000
++#define MTK_WED_PCIE_BASE2 0x11290000
+ #endif
+--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
+@@ -91,6 +91,8 @@ enum mtk_wed_dummy_cr_idx {
+ #define MT7981_FIRMWARE_WO "mediatek/mt7981_wo.bin"
+ #define MT7986_FIRMWARE_WO0 "mediatek/mt7986_wo_0.bin"
+ #define MT7986_FIRMWARE_WO1 "mediatek/mt7986_wo_1.bin"
++#define MT7988_FIRMWARE_WO0 "mediatek/mt7988_wo_0.bin"
++#define MT7988_FIRMWARE_WO1 "mediatek/mt7988_wo_1.bin"
+
+ #define MTK_WO_MCU_CFG_LS_BASE 0
+ #define MTK_WO_MCU_CFG_LS_HW_VER_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x000)
+--- a/include/linux/soc/mediatek/mtk_wed.h
++++ b/include/linux/soc/mediatek/mtk_wed.h
+@@ -139,6 +139,8 @@ struct mtk_wed_device {
+ u32 wpdma_rx;
+
+ bool wcid_512;
++ bool hw_rro;
++ bool msi;
+
+ u16 token_start;
+ unsigned int nbuf;
+@@ -212,10 +214,12 @@ mtk_wed_device_attach(struct mtk_wed_dev
+ return ret;
+ }
+
+-static inline bool
+-mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
++static inline bool mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
+ {
+ #ifdef CONFIG_NET_MEDIATEK_SOC_WED
++ if (dev->version == 3)
++ return dev->wlan.hw_rro;
++
+ return dev->version != 1;
+ #else
+ return false;
--- /dev/null
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 18 Sep 2023 12:29:14 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: refactor mtk_wed_check_wfdma_rx_fill
+ routine
+
+Refactor mtk_wed_check_wfdma_rx_fill() in order to be reused adding HW
+receive offload support for MT7988 SoC.
+
+Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -585,22 +585,15 @@ mtk_wed_set_512_support(struct mtk_wed_d
+ }
+ }
+
+-#define MTK_WFMDA_RX_DMA_EN BIT(2)
+-static void
+-mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
++static int
++mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev,
++ struct mtk_wed_ring *ring)
+ {
+- u32 val;
+ int i;
+
+- if (!(dev->rx_ring[idx].flags & MTK_WED_RING_CONFIGURED))
+- return; /* queue is not configured by mt76 */
+-
+ for (i = 0; i < 3; i++) {
+- u32 cur_idx;
++ u32 cur_idx = readl(ring->wpdma + MTK_WED_RING_OFS_CPU_IDX);
+
+- cur_idx = wed_r32(dev,
+- MTK_WED_WPDMA_RING_RX_DATA(idx) +
+- MTK_WED_RING_OFS_CPU_IDX);
+ if (cur_idx == MTK_WED_RX_RING_SIZE - 1)
+ break;
+
+@@ -609,12 +602,10 @@ mtk_wed_check_wfdma_rx_fill(struct mtk_w
+
+ if (i == 3) {
+ dev_err(dev->hw->dev, "rx dma enable failed\n");
+- return;
++ return -ETIMEDOUT;
+ }
+
+- val = wifi_r32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base) |
+- MTK_WFMDA_RX_DMA_EN;
+- wifi_w32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base, val);
++ return 0;
+ }
+
+ static void
+@@ -1545,6 +1536,7 @@ mtk_wed_configure_irq(struct mtk_wed_dev
+ wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
+ }
+
++#define MTK_WFMDA_RX_DMA_EN BIT(2)
+ static void
+ mtk_wed_dma_enable(struct mtk_wed_device *dev)
+ {
+@@ -1632,8 +1624,26 @@ mtk_wed_dma_enable(struct mtk_wed_device
+ wdma_set(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN);
+ }
+
+- for (i = 0; i < MTK_WED_RX_QUEUES; i++)
+- mtk_wed_check_wfdma_rx_fill(dev, i);
++ for (i = 0; i < MTK_WED_RX_QUEUES; i++) {
++ struct mtk_wed_ring *ring = &dev->rx_ring[i];
++ u32 val;
++
++ if (!(ring->flags & MTK_WED_RING_CONFIGURED))
++ continue; /* queue is not configured by mt76 */
++
++ if (mtk_wed_check_wfdma_rx_fill(dev, ring)) {
++ dev_err(dev->hw->dev,
++ "rx_ring(%d) dma enable failed\n", i);
++ continue;
++ }
++
++ val = wifi_r32(dev,
++ dev->wlan.wpdma_rx_glo -
++ dev->wlan.phy_base) | MTK_WFMDA_RX_DMA_EN;
++ wifi_w32(dev,
++ dev->wlan.wpdma_rx_glo - dev->wlan.phy_base,
++ val);
++ }
+ }
+
+ static void
--- /dev/null
+From: Sujuan Chen <sujuan.chen@mediatek.com>
+Date: Mon, 18 Sep 2023 12:29:15 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: introduce partial AMSDU offload
+ support for MT7988
+
+Introduce partial AMSDU offload support for MT7988 SoC in order to merge
+in hw packets belonging to the same AMSDU before passing them to the
+WLAN nic.
+
+Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
+@@ -438,7 +438,8 @@ int mtk_foe_entry_set_pppoe(struct mtk_e
+ }
+
+ int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+- int wdma_idx, int txq, int bss, int wcid)
++ int wdma_idx, int txq, int bss, int wcid,
++ bool amsdu_en)
+ {
+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
+ u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
+@@ -450,6 +451,7 @@ int mtk_foe_entry_set_wdma(struct mtk_et
+ MTK_FOE_IB2_WDMA_WINFO_V2;
+ l2->w3info = FIELD_PREP(MTK_FOE_WINFO_WCID_V3, wcid) |
+ FIELD_PREP(MTK_FOE_WINFO_BSS_V3, bss);
++ l2->amsdu = FIELD_PREP(MTK_FOE_WINFO_AMSDU_EN, amsdu_en);
+ break;
+ case 2:
+ *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
+--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
+@@ -88,13 +88,13 @@ enum {
+ #define MTK_FOE_WINFO_BSS_V3 GENMASK(23, 16)
+ #define MTK_FOE_WINFO_WCID_V3 GENMASK(15, 0)
+
+-#define MTK_FOE_WINFO_PAO_USR_INFO GENMASK(15, 0)
+-#define MTK_FOE_WINFO_PAO_TID GENMASK(19, 16)
+-#define MTK_FOE_WINFO_PAO_IS_FIXEDRATE BIT(20)
+-#define MTK_FOE_WINFO_PAO_IS_PRIOR BIT(21)
+-#define MTK_FOE_WINFO_PAO_IS_SP BIT(22)
+-#define MTK_FOE_WINFO_PAO_HF BIT(23)
+-#define MTK_FOE_WINFO_PAO_AMSDU_EN BIT(24)
++#define MTK_FOE_WINFO_AMSDU_USR_INFO GENMASK(15, 0)
++#define MTK_FOE_WINFO_AMSDU_TID GENMASK(19, 16)
++#define MTK_FOE_WINFO_AMSDU_IS_FIXEDRATE BIT(20)
++#define MTK_FOE_WINFO_AMSDU_IS_PRIOR BIT(21)
++#define MTK_FOE_WINFO_AMSDU_IS_SP BIT(22)
++#define MTK_FOE_WINFO_AMSDU_HF BIT(23)
++#define MTK_FOE_WINFO_AMSDU_EN BIT(24)
+
+ enum {
+ MTK_FOE_STATE_INVALID,
+@@ -123,7 +123,7 @@ struct mtk_foe_mac_info {
+
+ /* netsys_v3 */
+ u32 w3info;
+- u32 wpao;
++ u32 amsdu;
+ };
+
+ /* software-only entry type */
+@@ -393,7 +393,8 @@ int mtk_foe_entry_set_vlan(struct mtk_et
+ int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int sid);
+ int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+- int wdma_idx, int txq, int bss, int wcid);
++ int wdma_idx, int txq, int bss, int wcid,
++ bool amsdu_en);
+ int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ unsigned int queue);
+ int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
+--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+@@ -111,6 +111,7 @@ mtk_flow_get_wdma_info(struct net_device
+ info->queue = path->mtk_wdma.queue;
+ info->bss = path->mtk_wdma.bss;
+ info->wcid = path->mtk_wdma.wcid;
++ info->amsdu = path->mtk_wdma.amsdu;
+
+ return 0;
+ }
+@@ -192,7 +193,7 @@ mtk_flow_set_output_device(struct mtk_et
+
+ if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
+ mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue,
+- info.bss, info.wcid);
++ info.bss, info.wcid, info.amsdu);
+ if (mtk_is_netsys_v2_or_greater(eth)) {
+ switch (info.wdma_idx) {
+ case 0:
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -29,6 +29,8 @@
+ #define MTK_WED_RX_PAGE_BUF_PER_PAGE (PAGE_SIZE / 128)
+ #define MTK_WED_RX_RING_SIZE 1536
+ #define MTK_WED_RX_PG_BM_CNT 8192
++#define MTK_WED_AMSDU_BUF_SIZE (PAGE_SIZE << 4)
++#define MTK_WED_AMSDU_NPAGES 32
+
+ #define MTK_WED_TX_RING_SIZE 2048
+ #define MTK_WED_WDMA_RING_SIZE 1024
+@@ -172,6 +174,23 @@ mtk_wdma_rx_reset(struct mtk_wed_device
+ return ret;
+ }
+
++static u32
++mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
++{
++ return !!(wed_r32(dev, reg) & mask);
++}
++
++static int
++mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
++{
++ int sleep = 15000;
++ int timeout = 100 * sleep;
++ u32 val;
++
++ return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
++ timeout, false, dev, reg, mask);
++}
++
+ static void
+ mtk_wdma_tx_reset(struct mtk_wed_device *dev)
+ {
+@@ -335,6 +354,118 @@ out:
+ }
+
+ static int
++mtk_wed_amsdu_buffer_alloc(struct mtk_wed_device *dev)
++{
++ struct mtk_wed_hw *hw = dev->hw;
++ struct mtk_wed_amsdu *wed_amsdu;
++ int i;
++
++ if (!mtk_wed_is_v3_or_greater(hw))
++ return 0;
++
++ wed_amsdu = devm_kcalloc(hw->dev, MTK_WED_AMSDU_NPAGES,
++ sizeof(*wed_amsdu), GFP_KERNEL);
++ if (!wed_amsdu)
++ return -ENOMEM;
++
++ for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) {
++ void *ptr;
++
++ /* each segment is 64K */
++ ptr = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN |
++ __GFP_ZERO | __GFP_COMP |
++ GFP_DMA32,
++ get_order(MTK_WED_AMSDU_BUF_SIZE));
++ if (!ptr)
++ goto error;
++
++ wed_amsdu[i].txd = ptr;
++ wed_amsdu[i].txd_phy = dma_map_single(hw->dev, ptr,
++ MTK_WED_AMSDU_BUF_SIZE,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(hw->dev, wed_amsdu[i].txd_phy))
++ goto error;
++ }
++ dev->hw->wed_amsdu = wed_amsdu;
++
++ return 0;
++
++error:
++ for (i--; i >= 0; i--)
++ dma_unmap_single(hw->dev, wed_amsdu[i].txd_phy,
++ MTK_WED_AMSDU_BUF_SIZE, DMA_TO_DEVICE);
++ return -ENOMEM;
++}
++
++static void
++mtk_wed_amsdu_free_buffer(struct mtk_wed_device *dev)
++{
++ struct mtk_wed_amsdu *wed_amsdu = dev->hw->wed_amsdu;
++ int i;
++
++ if (!wed_amsdu)
++ return;
++
++ for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) {
++ dma_unmap_single(dev->hw->dev, wed_amsdu[i].txd_phy,
++ MTK_WED_AMSDU_BUF_SIZE, DMA_TO_DEVICE);
++ free_pages((unsigned long)wed_amsdu[i].txd,
++ get_order(MTK_WED_AMSDU_BUF_SIZE));
++ }
++}
++
++static int
++mtk_wed_amsdu_init(struct mtk_wed_device *dev)
++{
++ struct mtk_wed_amsdu *wed_amsdu = dev->hw->wed_amsdu;
++ int i, ret;
++
++ if (!wed_amsdu)
++ return 0;
++
++ for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++)
++ wed_w32(dev, MTK_WED_AMSDU_HIFTXD_BASE_L(i),
++ wed_amsdu[i].txd_phy);
++
++ /* init all sta parameter */
++ wed_w32(dev, MTK_WED_AMSDU_STA_INFO_INIT, MTK_WED_AMSDU_STA_RMVL |
++ MTK_WED_AMSDU_STA_WTBL_HDRT_MODE |
++ FIELD_PREP(MTK_WED_AMSDU_STA_MAX_AMSDU_LEN,
++ dev->wlan.amsdu_max_len >> 8) |
++ FIELD_PREP(MTK_WED_AMSDU_STA_MAX_AMSDU_NUM,
++ dev->wlan.amsdu_max_subframes));
++
++ wed_w32(dev, MTK_WED_AMSDU_STA_INFO, MTK_WED_AMSDU_STA_INFO_DO_INIT);
++
++ ret = mtk_wed_poll_busy(dev, MTK_WED_AMSDU_STA_INFO,
++ MTK_WED_AMSDU_STA_INFO_DO_INIT);
++ if (ret) {
++ dev_err(dev->hw->dev, "amsdu initialization failed\n");
++ return ret;
++ }
++
++ /* init partial amsdu offload txd src */
++ wed_set(dev, MTK_WED_AMSDU_HIFTXD_CFG,
++ FIELD_PREP(MTK_WED_AMSDU_HIFTXD_SRC, dev->hw->index));
++
++ /* init qmem */
++ wed_set(dev, MTK_WED_AMSDU_PSE, MTK_WED_AMSDU_PSE_RESET);
++ ret = mtk_wed_poll_busy(dev, MTK_WED_MON_AMSDU_QMEM_STS1, BIT(29));
++ if (ret) {
++ pr_info("%s: amsdu qmem initialization failed\n", __func__);
++ return ret;
++ }
++
++ /* eagle E1 PCIE1 tx ring 22 flow control issue */
++ if (dev->wlan.id == 0x7991)
++ wed_clr(dev, MTK_WED_AMSDU_FIFO, MTK_WED_AMSDU_IS_PRIOR0_RING);
++
++ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN);
++
++ return 0;
++}
++
++static int
+ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
+ {
+ u32 desc_size = dev->hw->soc->tx_ring_desc_size;
+@@ -708,6 +839,7 @@ __mtk_wed_detach(struct mtk_wed_device *
+
+ mtk_wdma_rx_reset(dev);
+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
++ mtk_wed_amsdu_free_buffer(dev);
+ mtk_wed_free_tx_buffer(dev);
+ mtk_wed_free_tx_rings(dev);
+
+@@ -1128,23 +1260,6 @@ mtk_wed_ring_reset(struct mtk_wed_ring *
+ }
+ }
+
+-static u32
+-mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
+-{
+- return !!(wed_r32(dev, reg) & mask);
+-}
+-
+-static int
+-mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
+-{
+- int sleep = 15000;
+- int timeout = 100 * sleep;
+- u32 val;
+-
+- return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
+- timeout, false, dev, reg, mask);
+-}
+-
+ static int
+ mtk_wed_rx_reset(struct mtk_wed_device *dev)
+ {
+@@ -1691,6 +1806,7 @@ mtk_wed_start(struct mtk_wed_device *dev
+ }
+
+ mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
++ mtk_wed_amsdu_init(dev);
+
+ mtk_wed_dma_enable(dev);
+ dev->running = true;
+@@ -1747,6 +1863,10 @@ mtk_wed_attach(struct mtk_wed_device *de
+ if (ret)
+ goto out;
+
++ ret = mtk_wed_amsdu_buffer_alloc(dev);
++ if (ret)
++ goto out;
++
+ if (mtk_wed_get_rx_capa(dev)) {
+ ret = mtk_wed_rro_alloc(dev);
+ if (ret)
+--- a/drivers/net/ethernet/mediatek/mtk_wed.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed.h
+@@ -25,6 +25,11 @@ struct mtk_wed_soc_data {
+ u32 wdma_desc_size;
+ };
+
++struct mtk_wed_amsdu {
++ void *txd;
++ dma_addr_t txd_phy;
++};
++
+ struct mtk_wed_hw {
+ const struct mtk_wed_soc_data *soc;
+ struct device_node *node;
+@@ -38,6 +43,7 @@ struct mtk_wed_hw {
+ struct dentry *debugfs_dir;
+ struct mtk_wed_device *wed_dev;
+ struct mtk_wed_wo *wed_wo;
++ struct mtk_wed_amsdu *wed_amsdu;
+ u32 pcie_base;
+ u32 debugfs_reg;
+ u32 num_flows;
+@@ -52,6 +58,7 @@ struct mtk_wdma_info {
+ u8 queue;
+ u16 wcid;
+ u8 bss;
++ u8 amsdu;
+ };
+
+ #ifdef CONFIG_NET_MEDIATEK_SOC_WED
+--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+@@ -672,6 +672,82 @@ struct mtk_wdma_desc {
+ #define MTK_WED_WOCPU_VIEW_MIOD_BASE 0x8000
+ #define MTK_WED_PCIE_INT_MASK 0x0
+
++#define MTK_WED_AMSDU_FIFO 0x1800
++#define MTK_WED_AMSDU_IS_PRIOR0_RING BIT(10)
++
++#define MTK_WED_AMSDU_STA_INFO 0x01810
++#define MTK_WED_AMSDU_STA_INFO_DO_INIT BIT(0)
++#define MTK_WED_AMSDU_STA_INFO_SET_INIT BIT(1)
++
++#define MTK_WED_AMSDU_STA_INFO_INIT 0x01814
++#define MTK_WED_AMSDU_STA_WTBL_HDRT_MODE BIT(0)
++#define MTK_WED_AMSDU_STA_RMVL BIT(1)
++#define MTK_WED_AMSDU_STA_MAX_AMSDU_LEN GENMASK(7, 2)
++#define MTK_WED_AMSDU_STA_MAX_AMSDU_NUM GENMASK(11, 8)
++
++#define MTK_WED_AMSDU_HIFTXD_BASE_L(_n) (0x1980 + (_n) * 0x4)
++
++#define MTK_WED_AMSDU_PSE 0x1910
++#define MTK_WED_AMSDU_PSE_RESET BIT(16)
++
++#define MTK_WED_AMSDU_HIFTXD_CFG 0x1968
++#define MTK_WED_AMSDU_HIFTXD_SRC GENMASK(16, 15)
++
++#define MTK_WED_MON_AMSDU_FIFO_DMAD 0x1a34
++
++#define MTK_WED_MON_AMSDU_ENG_DMAD(_n) (0x1a80 + (_n) * 0x50)
++#define MTK_WED_MON_AMSDU_ENG_QFPL(_n) (0x1a84 + (_n) * 0x50)
++#define MTK_WED_MON_AMSDU_ENG_QENI(_n) (0x1a88 + (_n) * 0x50)
++#define MTK_WED_MON_AMSDU_ENG_QENO(_n) (0x1a8c + (_n) * 0x50)
++#define MTK_WED_MON_AMSDU_ENG_MERG(_n) (0x1a90 + (_n) * 0x50)
++
++#define MTK_WED_MON_AMSDU_ENG_CNT8(_n) (0x1a94 + (_n) * 0x50)
++#define MTK_WED_AMSDU_ENG_MAX_QGPP_CNT GENMASK(10, 0)
++#define MTK_WED_AMSDU_ENG_MAX_PL_CNT GENMASK(27, 16)
++
++#define MTK_WED_MON_AMSDU_ENG_CNT9(_n) (0x1a98 + (_n) * 0x50)
++#define MTK_WED_AMSDU_ENG_CUR_ENTRY GENMASK(10, 0)
++#define MTK_WED_AMSDU_ENG_MAX_BUF_MERGED GENMASK(20, 16)
++#define MTK_WED_AMSDU_ENG_MAX_MSDU_MERGED GENMASK(28, 24)
++
++#define MTK_WED_MON_AMSDU_QMEM_STS1 0x1e04
++
++#define MTK_WED_MON_AMSDU_QMEM_CNT(_n) (0x1e0c + (_n) * 0x4)
++#define MTK_WED_AMSDU_QMEM_FQ_CNT GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_SP_QCNT GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID0_QCNT GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID1_QCNT GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID2_QCNT GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID3_QCNT GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID4_QCNT GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID5_QCNT GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID6_QCNT GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID7_QCNT GENMASK(11, 0)
++
++#define MTK_WED_MON_AMSDU_QMEM_PTR(_n) (0x1e20 + (_n) * 0x4)
++#define MTK_WED_AMSDU_QMEM_FQ_HEAD GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_SP_QHEAD GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID0_QHEAD GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID1_QHEAD GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID2_QHEAD GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID3_QHEAD GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID4_QHEAD GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID5_QHEAD GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID6_QHEAD GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID7_QHEAD GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_FQ_TAIL GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_SP_QTAIL GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID0_QTAIL GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID1_QTAIL GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID2_QTAIL GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID3_QTAIL GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID4_QTAIL GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID5_QTAIL GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID6_QTAIL GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID7_QTAIL GENMASK(11, 0)
++
++#define MTK_WED_MON_AMSDU_HIFTXD_FETCH_MSDU(_n) (0x1ec4 + (_n) * 0x4)
++
+ #define MTK_WED_PCIE_BASE 0x11280000
+ #define MTK_WED_PCIE_BASE0 0x11300000
+ #define MTK_WED_PCIE_BASE1 0x11310000
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -906,6 +906,7 @@ struct net_device_path {
+ u8 queue;
+ u16 wcid;
+ u8 bss;
++ u8 amsdu;
+ } mtk_wdma;
+ };
+ };
+--- a/include/linux/soc/mediatek/mtk_wed.h
++++ b/include/linux/soc/mediatek/mtk_wed.h
+@@ -129,6 +129,7 @@ struct mtk_wed_device {
+ enum mtk_wed_bus_tye bus_type;
+ void __iomem *base;
+ u32 phy_base;
++ u32 id;
+
+ u32 wpdma_phys;
+ u32 wpdma_int;
+@@ -147,10 +148,12 @@ struct mtk_wed_device {
+ unsigned int rx_nbuf;
+ unsigned int rx_npkt;
+ unsigned int rx_size;
++ unsigned int amsdu_max_len;
+
+ u8 tx_tbit[MTK_WED_TX_QUEUES];
+ u8 rx_tbit[MTK_WED_RX_QUEUES];
+ u8 txfree_tbit;
++ u8 amsdu_max_subframes;
+
+ u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
+ int (*offload_enable)(struct mtk_wed_device *wed);
+@@ -224,6 +227,15 @@ static inline bool mtk_wed_get_rx_capa(s
+ #else
+ return false;
+ #endif
++}
++
++static inline bool mtk_wed_is_amsdu_supported(struct mtk_wed_device *dev)
++{
++#ifdef CONFIG_NET_MEDIATEK_SOC_WED
++ return dev->version == 3;
++#else
++ return false;
++#endif
+ }
+
+ #ifdef CONFIG_NET_MEDIATEK_SOC_WED
--- /dev/null
+From: Sujuan Chen <sujuan.chen@mediatek.com>
+Date: Mon, 18 Sep 2023 12:29:16 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: introduce hw_rro support for MT7988
+
+MT7988 SoC support 802.11 receive reordering offload in hw while
+MT7986 SoC implements it through the firmware running on the mcu.
+
+Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -26,7 +26,7 @@
+ #define MTK_WED_BUF_SIZE 2048
+ #define MTK_WED_PAGE_BUF_SIZE 128
+ #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
+-#define MTK_WED_RX_PAGE_BUF_PER_PAGE (PAGE_SIZE / 128)
++#define MTK_WED_RX_BUF_PER_PAGE (PAGE_SIZE / MTK_WED_PAGE_BUF_SIZE)
+ #define MTK_WED_RX_RING_SIZE 1536
+ #define MTK_WED_RX_PG_BM_CNT 8192
+ #define MTK_WED_AMSDU_BUF_SIZE (PAGE_SIZE << 4)
+@@ -596,6 +596,68 @@ free_pagelist:
+ }
+
+ static int
++mtk_wed_hwrro_buffer_alloc(struct mtk_wed_device *dev)
++{
++ int n_pages = MTK_WED_RX_PG_BM_CNT / MTK_WED_RX_BUF_PER_PAGE;
++ struct mtk_wed_buf *page_list;
++ struct mtk_wed_bm_desc *desc;
++ dma_addr_t desc_phys;
++ int i, page_idx = 0;
++
++ if (!dev->wlan.hw_rro)
++ return 0;
++
++ page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
++ if (!page_list)
++ return -ENOMEM;
++
++ dev->hw_rro.size = dev->wlan.rx_nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
++ dev->hw_rro.pages = page_list;
++ desc = dma_alloc_coherent(dev->hw->dev,
++ dev->wlan.rx_nbuf * sizeof(*desc),
++ &desc_phys, GFP_KERNEL);
++ if (!desc)
++ return -ENOMEM;
++
++ dev->hw_rro.desc = desc;
++ dev->hw_rro.desc_phys = desc_phys;
++
++ for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) {
++ dma_addr_t page_phys, buf_phys;
++ struct page *page;
++ int s;
++
++ page = __dev_alloc_page(GFP_KERNEL);
++ if (!page)
++ return -ENOMEM;
++
++ page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
++ DMA_BIDIRECTIONAL);
++ if (dma_mapping_error(dev->hw->dev, page_phys)) {
++ __free_page(page);
++ return -ENOMEM;
++ }
++
++ page_list[page_idx].p = page;
++ page_list[page_idx++].phy_addr = page_phys;
++ dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
++ DMA_BIDIRECTIONAL);
++
++ buf_phys = page_phys;
++ for (s = 0; s < MTK_WED_RX_BUF_PER_PAGE; s++) {
++ desc->buf0 = cpu_to_le32(buf_phys);
++ buf_phys += MTK_WED_PAGE_BUF_SIZE;
++ desc++;
++ }
++
++ dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
++ DMA_BIDIRECTIONAL);
++ }
++
++ return 0;
++}
++
++static int
+ mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev)
+ {
+ struct mtk_wed_bm_desc *desc;
+@@ -612,7 +674,42 @@ mtk_wed_rx_buffer_alloc(struct mtk_wed_d
+ dev->rx_buf_ring.desc_phys = desc_phys;
+ dev->wlan.init_rx_buf(dev, dev->wlan.rx_npkt);
+
+- return 0;
++ return mtk_wed_hwrro_buffer_alloc(dev);
++}
++
++static void
++mtk_wed_hwrro_free_buffer(struct mtk_wed_device *dev)
++{
++ struct mtk_wed_buf *page_list = dev->hw_rro.pages;
++ struct mtk_wed_bm_desc *desc = dev->hw_rro.desc;
++ int i, page_idx = 0;
++
++ if (!dev->wlan.hw_rro)
++ return;
++
++ if (!page_list)
++ return;
++
++ if (!desc)
++ goto free_pagelist;
++
++ for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) {
++ dma_addr_t buf_addr = page_list[page_idx].phy_addr;
++ void *page = page_list[page_idx++].p;
++
++ if (!page)
++ break;
++
++ dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE,
++ DMA_BIDIRECTIONAL);
++ __free_page(page);
++ }
++
++ dma_free_coherent(dev->hw->dev, dev->hw_rro.size * sizeof(*desc),
++ desc, dev->hw_rro.desc_phys);
++
++free_pagelist:
++ kfree(page_list);
+ }
+
+ static void
+@@ -626,6 +723,28 @@ mtk_wed_free_rx_buffer(struct mtk_wed_de
+ dev->wlan.release_rx_buf(dev);
+ dma_free_coherent(dev->hw->dev, dev->rx_buf_ring.size * sizeof(*desc),
+ desc, dev->rx_buf_ring.desc_phys);
++
++ mtk_wed_hwrro_free_buffer(dev);
++}
++
++static void
++mtk_wed_hwrro_init(struct mtk_wed_device *dev)
++{
++ if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro)
++ return;
++
++ wed_set(dev, MTK_WED_RRO_PG_BM_RX_DMAM,
++ FIELD_PREP(MTK_WED_RRO_PG_BM_RX_SDL0, 128));
++
++ wed_w32(dev, MTK_WED_RRO_PG_BM_BASE, dev->hw_rro.desc_phys);
++
++ wed_w32(dev, MTK_WED_RRO_PG_BM_INIT_PTR,
++ MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX |
++ FIELD_PREP(MTK_WED_RRO_PG_BM_SW_TAIL_IDX,
++ MTK_WED_RX_PG_BM_CNT));
++
++ /* enable rx_page_bm to fetch dmad */
++ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN);
+ }
+
+ static void
+@@ -639,6 +758,8 @@ mtk_wed_rx_buffer_hw_init(struct mtk_wed
+ wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH,
+ FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff));
+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
++
++ mtk_wed_hwrro_init(dev);
+ }
+
+ static void
+@@ -934,6 +1055,8 @@ mtk_wed_bus_init(struct mtk_wed_device *
+ static void
+ mtk_wed_set_wpdma(struct mtk_wed_device *dev)
+ {
++ int i;
++
+ if (mtk_wed_is_v1(dev->hw)) {
+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
+ return;
+@@ -951,6 +1074,15 @@ mtk_wed_set_wpdma(struct mtk_wed_device
+
+ wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
+ wed_w32(dev, dev->hw->soc->regmap.wpdma_rx_ring0, dev->wlan.wpdma_rx);
++
++ if (!dev->wlan.hw_rro)
++ return;
++
++ wed_w32(dev, MTK_WED_RRO_RX_D_CFG(0), dev->wlan.wpdma_rx_rro[0]);
++ wed_w32(dev, MTK_WED_RRO_RX_D_CFG(1), dev->wlan.wpdma_rx_rro[1]);
++ for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++)
++ wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING_CFG(i),
++ dev->wlan.wpdma_rx_pg + i * 0x10);
+ }
+
+ static void
+@@ -1762,6 +1894,165 @@ mtk_wed_dma_enable(struct mtk_wed_device
+ }
+
+ static void
++mtk_wed_start_hw_rro(struct mtk_wed_device *dev, u32 irq_mask, bool reset)
++{
++ int i;
++
++ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
++ wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
++
++ if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro)
++ return;
++
++ wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_MSDU_PG_DRV_CLR);
++ wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
++ MTK_WED_RRO_MSDU_PG_DRV_CLR);
++
++ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_RX,
++ MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN |
++ MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR |
++ MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN |
++ MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR |
++ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG,
++ dev->wlan.rro_rx_tbit[0]) |
++ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG,
++ dev->wlan.rro_rx_tbit[1]));
++
++ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG,
++ MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN |
++ MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR |
++ MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN |
++ MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR |
++ MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN |
++ MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR |
++ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG,
++ dev->wlan.rx_pg_tbit[0]) |
++ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG,
++ dev->wlan.rx_pg_tbit[1]) |
++ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG,
++ dev->wlan.rx_pg_tbit[2]));
++
++ /* RRO_MSDU_PG_RING2_CFG1_FLD_DRV_EN should be enabled after
++ * WM FWDL completed, otherwise RRO_MSDU_PG ring may broken
++ */
++ wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
++ MTK_WED_RRO_MSDU_PG_DRV_EN);
++
++ for (i = 0; i < MTK_WED_RX_QUEUES; i++) {
++ struct mtk_wed_ring *ring = &dev->rx_rro_ring[i];
++
++ if (!(ring->flags & MTK_WED_RING_CONFIGURED))
++ continue;
++
++ if (mtk_wed_check_wfdma_rx_fill(dev, ring))
++ dev_err(dev->hw->dev,
++ "rx_rro_ring(%d) initialization failed\n", i);
++ }
++
++ for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) {
++ struct mtk_wed_ring *ring = &dev->rx_page_ring[i];
++
++ if (!(ring->flags & MTK_WED_RING_CONFIGURED))
++ continue;
++
++ if (mtk_wed_check_wfdma_rx_fill(dev, ring))
++ dev_err(dev->hw->dev,
++ "rx_page_ring(%d) initialization failed\n", i);
++ }
++}
++
++static void
++mtk_wed_rro_rx_ring_setup(struct mtk_wed_device *dev, int idx,
++ void __iomem *regs)
++{
++ struct mtk_wed_ring *ring = &dev->rx_rro_ring[idx];
++
++ ring->wpdma = regs;
++ wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_BASE,
++ readl(regs));
++ wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_COUNT,
++ readl(regs + MTK_WED_RING_OFS_COUNT));
++ ring->flags |= MTK_WED_RING_CONFIGURED;
++}
++
++static void
++mtk_wed_msdu_pg_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
++{
++ struct mtk_wed_ring *ring = &dev->rx_page_ring[idx];
++
++ ring->wpdma = regs;
++ wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_BASE,
++ readl(regs));
++ wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_COUNT,
++ readl(regs + MTK_WED_RING_OFS_COUNT));
++ ring->flags |= MTK_WED_RING_CONFIGURED;
++}
++
++static int
++mtk_wed_ind_rx_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
++{
++ struct mtk_wed_ring *ring = &dev->ind_cmd_ring;
++ u32 val = readl(regs + MTK_WED_RING_OFS_COUNT);
++ int i, count = 0;
++
++ ring->wpdma = regs;
++ wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_BASE,
++ readl(regs) & 0xfffffff0);
++
++ wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_COUNT,
++ readl(regs + MTK_WED_RING_OFS_COUNT));
++
++ /* ack sn cr */
++ wed_w32(dev, MTK_WED_RRO_CFG0, dev->wlan.phy_base +
++ dev->wlan.ind_cmd.ack_sn_addr);
++ wed_w32(dev, MTK_WED_RRO_CFG1,
++ FIELD_PREP(MTK_WED_RRO_CFG1_MAX_WIN_SZ,
++ dev->wlan.ind_cmd.win_size) |
++ FIELD_PREP(MTK_WED_RRO_CFG1_PARTICL_SE_ID,
++ dev->wlan.ind_cmd.particular_sid));
++
++ /* particular session addr element */
++ wed_w32(dev, MTK_WED_ADDR_ELEM_CFG0,
++ dev->wlan.ind_cmd.particular_se_phys);
++
++ for (i = 0; i < dev->wlan.ind_cmd.se_group_nums; i++) {
++ wed_w32(dev, MTK_WED_RADDR_ELEM_TBL_WDATA,
++ dev->wlan.ind_cmd.addr_elem_phys[i] >> 4);
++ wed_w32(dev, MTK_WED_ADDR_ELEM_TBL_CFG,
++ MTK_WED_ADDR_ELEM_TBL_WR | (i & 0x7f));
++
++ val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG);
++ while (!(val & MTK_WED_ADDR_ELEM_TBL_WR_RDY) && count++ < 100)
++ val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG);
++ if (count >= 100)
++ dev_err(dev->hw->dev,
++ "write ba session base failed\n");
++ }
++
++ /* pn check init */
++ for (i = 0; i < dev->wlan.ind_cmd.particular_sid; i++) {
++ wed_w32(dev, MTK_WED_PN_CHECK_WDATA_M,
++ MTK_WED_PN_CHECK_IS_FIRST);
++
++ wed_w32(dev, MTK_WED_PN_CHECK_CFG, MTK_WED_PN_CHECK_WR |
++ FIELD_PREP(MTK_WED_PN_CHECK_SE_ID, i));
++
++ count = 0;
++ val = wed_r32(dev, MTK_WED_PN_CHECK_CFG);
++ while (!(val & MTK_WED_PN_CHECK_WR_RDY) && count++ < 100)
++ val = wed_r32(dev, MTK_WED_PN_CHECK_CFG);
++ if (count >= 100)
++ dev_err(dev->hw->dev,
++ "session(%d) initialization failed\n", i);
++ }
++
++ wed_w32(dev, MTK_WED_RX_IND_CMD_CNT0, MTK_WED_RX_IND_CMD_DBG_CNT_EN);
++ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN);
++
++ return 0;
++}
++
++static void
+ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
+ {
+ int i;
+@@ -2215,6 +2506,10 @@ void mtk_wed_add_hw(struct device_node *
+ .detach = mtk_wed_detach,
+ .ppe_check = mtk_wed_ppe_check,
+ .setup_tc = mtk_wed_setup_tc,
++ .start_hw_rro = mtk_wed_start_hw_rro,
++ .rro_rx_ring_setup = mtk_wed_rro_rx_ring_setup,
++ .msdu_pg_rx_ring_setup = mtk_wed_msdu_pg_rx_ring_setup,
++ .ind_rx_ring_setup = mtk_wed_ind_rx_ring_setup,
+ };
+ struct device_node *eth_np = eth->dev->of_node;
+ struct platform_device *pdev;
+--- a/include/linux/soc/mediatek/mtk_wed.h
++++ b/include/linux/soc/mediatek/mtk_wed.h
+@@ -10,6 +10,7 @@
+
+ #define MTK_WED_TX_QUEUES 2
+ #define MTK_WED_RX_QUEUES 2
++#define MTK_WED_RX_PAGE_QUEUES 3
+
+ #define WED_WO_STA_REC 0x6
+
+@@ -99,6 +100,9 @@ struct mtk_wed_device {
+ struct mtk_wed_ring txfree_ring;
+ struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES];
+ struct mtk_wed_ring rx_wdma[MTK_WED_RX_QUEUES];
++ struct mtk_wed_ring rx_rro_ring[MTK_WED_RX_QUEUES];
++ struct mtk_wed_ring rx_page_ring[MTK_WED_RX_PAGE_QUEUES];
++ struct mtk_wed_ring ind_cmd_ring;
+
+ struct {
+ int size;
+@@ -120,6 +124,13 @@ struct mtk_wed_device {
+ dma_addr_t fdbk_phys;
+ } rro;
+
++ struct {
++ int size;
++ struct mtk_wed_buf *pages;
++ struct mtk_wed_bm_desc *desc;
++ dma_addr_t desc_phys;
++ } hw_rro;
++
+ /* filled by driver: */
+ struct {
+ union {
+@@ -138,6 +149,8 @@ struct mtk_wed_device {
+ u32 wpdma_txfree;
+ u32 wpdma_rx_glo;
+ u32 wpdma_rx;
++ u32 wpdma_rx_rro[MTK_WED_RX_QUEUES];
++ u32 wpdma_rx_pg;
+
+ bool wcid_512;
+ bool hw_rro;
+@@ -152,9 +165,20 @@ struct mtk_wed_device {
+
+ u8 tx_tbit[MTK_WED_TX_QUEUES];
+ u8 rx_tbit[MTK_WED_RX_QUEUES];
++ u8 rro_rx_tbit[MTK_WED_RX_QUEUES];
++ u8 rx_pg_tbit[MTK_WED_RX_PAGE_QUEUES];
+ u8 txfree_tbit;
+ u8 amsdu_max_subframes;
+
++ struct {
++ u8 se_group_nums;
++ u16 win_size;
++ u16 particular_sid;
++ u32 ack_sn_addr;
++ dma_addr_t particular_se_phys;
++ dma_addr_t addr_elem_phys[1024];
++ } ind_cmd;
++
+ u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
+ int (*offload_enable)(struct mtk_wed_device *wed);
+ void (*offload_disable)(struct mtk_wed_device *wed);
+@@ -193,6 +217,14 @@ struct mtk_wed_ops {
+ void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
+ int (*setup_tc)(struct mtk_wed_device *wed, struct net_device *dev,
+ enum tc_setup_type type, void *type_data);
++ void (*start_hw_rro)(struct mtk_wed_device *dev, u32 irq_mask,
++ bool reset);
++ void (*rro_rx_ring_setup)(struct mtk_wed_device *dev, int ring,
++ void __iomem *regs);
++ void (*msdu_pg_rx_ring_setup)(struct mtk_wed_device *dev, int ring,
++ void __iomem *regs);
++ int (*ind_rx_ring_setup)(struct mtk_wed_device *dev,
++ void __iomem *regs);
+ };
+
+ extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
+@@ -264,6 +296,15 @@ static inline bool mtk_wed_is_amsdu_supp
+ #define mtk_wed_device_dma_reset(_dev) (_dev)->ops->reset_dma(_dev)
+ #define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) \
+ (_dev)->ops->setup_tc(_dev, _netdev, _type, _type_data)
++#define mtk_wed_device_start_hw_rro(_dev, _mask, _reset) \
++ (_dev)->ops->start_hw_rro(_dev, _mask, _reset)
++#define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) \
++ (_dev)->ops->rro_rx_ring_setup(_dev, _ring, _regs)
++#define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) \
++ (_dev)->ops->msdu_pg_rx_ring_setup(_dev, _ring, _regs)
++#define mtk_wed_device_ind_rx_ring_setup(_dev, _regs) \
++ (_dev)->ops->ind_rx_ring_setup(_dev, _regs)
++
+ #else
+ static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
+ {
+@@ -283,6 +324,10 @@ static inline bool mtk_wed_device_active
+ #define mtk_wed_device_stop(_dev) do {} while (0)
+ #define mtk_wed_device_dma_reset(_dev) do {} while (0)
+ #define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) -EOPNOTSUPP
++#define mtk_wed_device_start_hw_rro(_dev, _mask, _reset) do {} while (0)
++#define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) -ENODEV
++#define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) -ENODEV
++#define mtk_wed_device_ind_rx_ring_setup(_dev, _regs) -ENODEV
+ #endif
+
+ #endif
--- /dev/null
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 18 Sep 2023 12:29:17 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: debugfs: move wed_v2 specific regs
+ out of regs array
+
+Move specific WED2.0 debugfs entries out of regs array. This is a
+preliminary patch to introduce WED 3.0 debugfs info.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
+@@ -151,7 +151,7 @@ DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
+ static int
+ wed_rxinfo_show(struct seq_file *s, void *data)
+ {
+- static const struct reg_dump regs[] = {
++ static const struct reg_dump regs_common[] = {
+ DUMP_STR("WPDMA RX"),
+ DUMP_WPDMA_RX_RING(0),
+ DUMP_WPDMA_RX_RING(1),
+@@ -169,7 +169,7 @@ wed_rxinfo_show(struct seq_file *s, void
+ DUMP_WED_RING(WED_RING_RX_DATA(0)),
+ DUMP_WED_RING(WED_RING_RX_DATA(1)),
+
+- DUMP_STR("WED RRO"),
++ DUMP_STR("WED WO RRO"),
+ DUMP_WED_RRO_RING(WED_RROQM_MIOD_CTRL0),
+ DUMP_WED(WED_RROQM_MID_MIB),
+ DUMP_WED(WED_RROQM_MOD_MIB),
+@@ -180,17 +180,6 @@ wed_rxinfo_show(struct seq_file *s, void
+ DUMP_WED(WED_RROQM_FDBK_ANC_MIB),
+ DUMP_WED(WED_RROQM_FDBK_ANC2H_MIB),
+
+- DUMP_STR("WED Route QM"),
+- DUMP_WED(WED_RTQM_R2H_MIB(0)),
+- DUMP_WED(WED_RTQM_R2Q_MIB(0)),
+- DUMP_WED(WED_RTQM_Q2H_MIB(0)),
+- DUMP_WED(WED_RTQM_R2H_MIB(1)),
+- DUMP_WED(WED_RTQM_R2Q_MIB(1)),
+- DUMP_WED(WED_RTQM_Q2H_MIB(1)),
+- DUMP_WED(WED_RTQM_Q2N_MIB),
+- DUMP_WED(WED_RTQM_Q2B_MIB),
+- DUMP_WED(WED_RTQM_PFDBK_MIB),
+-
+ DUMP_STR("WED WDMA TX"),
+ DUMP_WED(WED_WDMA_TX_MIB),
+ DUMP_WED_RING(WED_WDMA_RING_TX),
+@@ -211,11 +200,25 @@ wed_rxinfo_show(struct seq_file *s, void
+ DUMP_WED(WED_RX_BM_INTF),
+ DUMP_WED(WED_RX_BM_ERR_STS),
+ };
++ static const struct reg_dump regs_wed_v2[] = {
++ DUMP_STR("WED Route QM"),
++ DUMP_WED(WED_RTQM_R2H_MIB(0)),
++ DUMP_WED(WED_RTQM_R2Q_MIB(0)),
++ DUMP_WED(WED_RTQM_Q2H_MIB(0)),
++ DUMP_WED(WED_RTQM_R2H_MIB(1)),
++ DUMP_WED(WED_RTQM_R2Q_MIB(1)),
++ DUMP_WED(WED_RTQM_Q2H_MIB(1)),
++ DUMP_WED(WED_RTQM_Q2N_MIB),
++ DUMP_WED(WED_RTQM_Q2B_MIB),
++ DUMP_WED(WED_RTQM_PFDBK_MIB),
++ };
+ struct mtk_wed_hw *hw = s->private;
+ struct mtk_wed_device *dev = hw->wed_dev;
+
+- if (dev)
+- dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
++ if (dev) {
++ dump_wed_regs(s, dev, regs_common, ARRAY_SIZE(regs_common));
++ dump_wed_regs(s, dev, regs_wed_v2, ARRAY_SIZE(regs_wed_v2));
++ }
+
+ return 0;
+ }
--- /dev/null
+From: Sujuan Chen <sujuan.chen@mediatek.com>
+Date: Mon, 18 Sep 2023 12:29:18 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: debugfs: add WED 3.0 debugfs entries
+
+Introduce WED3.0 debugfs entries useful for debugging.
+
+Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
+@@ -11,6 +11,7 @@ struct reg_dump {
+ u16 offset;
+ u8 type;
+ u8 base;
++ u32 mask;
+ };
+
+ enum {
+@@ -25,6 +26,8 @@ enum {
+
+ #define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
+ #define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ }
++#define DUMP_REG_MASK(_reg, _mask) \
++ { #_mask, MTK_##_reg, DUMP_TYPE_WED, 0, MTK_##_mask }
+ #define DUMP_RING(_prefix, _base, ...) \
+ { _prefix " BASE", _base, __VA_ARGS__ }, \
+ { _prefix " CNT", _base + 0x4, __VA_ARGS__ }, \
+@@ -32,6 +35,7 @@ enum {
+ { _prefix " DIDX", _base + 0xc, __VA_ARGS__ }
+
+ #define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED)
++#define DUMP_WED_MASK(_reg, _mask) DUMP_REG_MASK(_reg, _mask)
+ #define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED)
+
+ #define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA)
+@@ -212,12 +216,58 @@ wed_rxinfo_show(struct seq_file *s, void
+ DUMP_WED(WED_RTQM_Q2B_MIB),
+ DUMP_WED(WED_RTQM_PFDBK_MIB),
+ };
++ static const struct reg_dump regs_wed_v3[] = {
++ DUMP_STR("WED RX RRO DATA"),
++ DUMP_WED_RING(WED_RRO_RX_D_RX(0)),
++ DUMP_WED_RING(WED_RRO_RX_D_RX(1)),
++
++ DUMP_STR("WED RX MSDU PAGE"),
++ DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(0)),
++ DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(1)),
++ DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(2)),
++
++ DUMP_STR("WED RX IND CMD"),
++ DUMP_WED(WED_IND_CMD_RX_CTRL1),
++ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL2, WED_IND_CMD_MAX_CNT),
++ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0, WED_IND_CMD_PROC_IDX),
++ DUMP_WED_MASK(RRO_IND_CMD_SIGNATURE, RRO_IND_CMD_DMA_IDX),
++ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0, WED_IND_CMD_MAGIC_CNT),
++ DUMP_WED_MASK(RRO_IND_CMD_SIGNATURE, RRO_IND_CMD_MAGIC_CNT),
++ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0,
++ WED_IND_CMD_PREFETCH_FREE_CNT),
++ DUMP_WED_MASK(WED_RRO_CFG1, WED_RRO_CFG1_PARTICL_SE_ID),
++
++ DUMP_STR("WED ADDR ELEM"),
++ DUMP_WED(WED_ADDR_ELEM_CFG0),
++ DUMP_WED_MASK(WED_ADDR_ELEM_CFG1,
++ WED_ADDR_ELEM_PREFETCH_FREE_CNT),
++
++ DUMP_STR("WED Route QM"),
++ DUMP_WED(WED_RTQM_ENQ_I2Q_DMAD_CNT),
++ DUMP_WED(WED_RTQM_ENQ_I2N_DMAD_CNT),
++ DUMP_WED(WED_RTQM_ENQ_I2Q_PKT_CNT),
++ DUMP_WED(WED_RTQM_ENQ_I2N_PKT_CNT),
++ DUMP_WED(WED_RTQM_ENQ_USED_ENTRY_CNT),
++ DUMP_WED(WED_RTQM_ENQ_ERR_CNT),
++
++ DUMP_WED(WED_RTQM_DEQ_DMAD_CNT),
++ DUMP_WED(WED_RTQM_DEQ_Q2I_DMAD_CNT),
++ DUMP_WED(WED_RTQM_DEQ_PKT_CNT),
++ DUMP_WED(WED_RTQM_DEQ_Q2I_PKT_CNT),
++ DUMP_WED(WED_RTQM_DEQ_USED_PFDBK_CNT),
++ DUMP_WED(WED_RTQM_DEQ_ERR_CNT),
++ };
+ struct mtk_wed_hw *hw = s->private;
+ struct mtk_wed_device *dev = hw->wed_dev;
+
+ if (dev) {
+ dump_wed_regs(s, dev, regs_common, ARRAY_SIZE(regs_common));
+- dump_wed_regs(s, dev, regs_wed_v2, ARRAY_SIZE(regs_wed_v2));
++ if (mtk_wed_is_v2(hw))
++ dump_wed_regs(s, dev,
++ regs_wed_v2, ARRAY_SIZE(regs_wed_v2));
++ else
++ dump_wed_regs(s, dev,
++ regs_wed_v3, ARRAY_SIZE(regs_wed_v3));
+ }
+
+ return 0;
+@@ -225,6 +275,314 @@ wed_rxinfo_show(struct seq_file *s, void
+ DEFINE_SHOW_ATTRIBUTE(wed_rxinfo);
+
+ static int
++wed_amsdu_show(struct seq_file *s, void *data)
++{
++ static const struct reg_dump regs[] = {
++ DUMP_STR("WED AMDSU INFO"),
++ DUMP_WED(WED_MON_AMSDU_FIFO_DMAD),
++
++ DUMP_STR("WED AMDSU ENG0 INFO"),
++ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(0)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(0)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENI(0)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENO(0)),
++ DUMP_WED(WED_MON_AMSDU_ENG_MERG(0)),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(0),
++ WED_AMSDU_ENG_MAX_PL_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(0),
++ WED_AMSDU_ENG_MAX_QGPP_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(0),
++ WED_AMSDU_ENG_CUR_ENTRY),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(0),
++ WED_AMSDU_ENG_MAX_BUF_MERGED),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(0),
++ WED_AMSDU_ENG_MAX_MSDU_MERGED),
++
++ DUMP_STR("WED AMDSU ENG1 INFO"),
++ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(1)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(1)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENI(1)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENO(1)),
++ DUMP_WED(WED_MON_AMSDU_ENG_MERG(1)),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(1),
++ WED_AMSDU_ENG_MAX_PL_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(1),
++ WED_AMSDU_ENG_MAX_QGPP_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(1),
++ WED_AMSDU_ENG_CUR_ENTRY),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2),
++ WED_AMSDU_ENG_MAX_BUF_MERGED),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2),
++ WED_AMSDU_ENG_MAX_MSDU_MERGED),
++
++ DUMP_STR("WED AMDSU ENG2 INFO"),
++ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(2)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(2)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENI(2)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENO(2)),
++ DUMP_WED(WED_MON_AMSDU_ENG_MERG(2)),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(2),
++ WED_AMSDU_ENG_MAX_PL_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(2),
++ WED_AMSDU_ENG_MAX_QGPP_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2),
++ WED_AMSDU_ENG_CUR_ENTRY),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2),
++ WED_AMSDU_ENG_MAX_BUF_MERGED),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2),
++ WED_AMSDU_ENG_MAX_MSDU_MERGED),
++
++ DUMP_STR("WED AMDSU ENG3 INFO"),
++ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(3)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(3)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENI(3)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENO(3)),
++ DUMP_WED(WED_MON_AMSDU_ENG_MERG(3)),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(3),
++ WED_AMSDU_ENG_MAX_PL_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(3),
++ WED_AMSDU_ENG_MAX_QGPP_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(3),
++ WED_AMSDU_ENG_CUR_ENTRY),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(3),
++ WED_AMSDU_ENG_MAX_BUF_MERGED),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(3),
++ WED_AMSDU_ENG_MAX_MSDU_MERGED),
++
++ DUMP_STR("WED AMDSU ENG4 INFO"),
++ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(4)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(4)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENI(4)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENO(4)),
++ DUMP_WED(WED_MON_AMSDU_ENG_MERG(4)),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(4),
++ WED_AMSDU_ENG_MAX_PL_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(4),
++ WED_AMSDU_ENG_MAX_QGPP_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(4),
++ WED_AMSDU_ENG_CUR_ENTRY),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(4),
++ WED_AMSDU_ENG_MAX_BUF_MERGED),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(4),
++ WED_AMSDU_ENG_MAX_MSDU_MERGED),
++
++ DUMP_STR("WED AMDSU ENG5 INFO"),
++ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(5)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(5)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENI(5)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENO(5)),
++ DUMP_WED(WED_MON_AMSDU_ENG_MERG(5)),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(5),
++ WED_AMSDU_ENG_MAX_PL_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(5),
++ WED_AMSDU_ENG_MAX_QGPP_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(5),
++ WED_AMSDU_ENG_CUR_ENTRY),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(5),
++ WED_AMSDU_ENG_MAX_BUF_MERGED),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(5),
++ WED_AMSDU_ENG_MAX_MSDU_MERGED),
++
++ DUMP_STR("WED AMDSU ENG6 INFO"),
++ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(6)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(6)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENI(6)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENO(6)),
++ DUMP_WED(WED_MON_AMSDU_ENG_MERG(6)),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(6),
++ WED_AMSDU_ENG_MAX_PL_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(6),
++ WED_AMSDU_ENG_MAX_QGPP_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(6),
++ WED_AMSDU_ENG_CUR_ENTRY),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(6),
++ WED_AMSDU_ENG_MAX_BUF_MERGED),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(6),
++ WED_AMSDU_ENG_MAX_MSDU_MERGED),
++
++ DUMP_STR("WED AMDSU ENG7 INFO"),
++ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(7)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(7)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENI(7)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENO(7)),
++ DUMP_WED(WED_MON_AMSDU_ENG_MERG(7)),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(7),
++ WED_AMSDU_ENG_MAX_PL_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(7),
++ WED_AMSDU_ENG_MAX_QGPP_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(7),
++ WED_AMSDU_ENG_CUR_ENTRY),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(7),
++ WED_AMSDU_ENG_MAX_BUF_MERGED),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(4),
++ WED_AMSDU_ENG_MAX_MSDU_MERGED),
++
++ DUMP_STR("WED AMDSU ENG8 INFO"),
++ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(8)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(8)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENI(8)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENO(8)),
++ DUMP_WED(WED_MON_AMSDU_ENG_MERG(8)),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(8),
++ WED_AMSDU_ENG_MAX_PL_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(8),
++ WED_AMSDU_ENG_MAX_QGPP_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(8),
++ WED_AMSDU_ENG_CUR_ENTRY),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(8),
++ WED_AMSDU_ENG_MAX_BUF_MERGED),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(8),
++ WED_AMSDU_ENG_MAX_MSDU_MERGED),
++
++ DUMP_STR("WED QMEM INFO"),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(0), WED_AMSDU_QMEM_FQ_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(0), WED_AMSDU_QMEM_SP_QCNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(1), WED_AMSDU_QMEM_TID0_QCNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(1), WED_AMSDU_QMEM_TID1_QCNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(2), WED_AMSDU_QMEM_TID2_QCNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(2), WED_AMSDU_QMEM_TID3_QCNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(3), WED_AMSDU_QMEM_TID4_QCNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(3), WED_AMSDU_QMEM_TID5_QCNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(4), WED_AMSDU_QMEM_TID6_QCNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(4), WED_AMSDU_QMEM_TID7_QCNT),
++
++ DUMP_STR("WED QMEM HEAD INFO"),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(0), WED_AMSDU_QMEM_FQ_HEAD),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(0), WED_AMSDU_QMEM_SP_QHEAD),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(1), WED_AMSDU_QMEM_TID0_QHEAD),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(1), WED_AMSDU_QMEM_TID1_QHEAD),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(2), WED_AMSDU_QMEM_TID2_QHEAD),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(2), WED_AMSDU_QMEM_TID3_QHEAD),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(3), WED_AMSDU_QMEM_TID4_QHEAD),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(3), WED_AMSDU_QMEM_TID5_QHEAD),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(4), WED_AMSDU_QMEM_TID6_QHEAD),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(4), WED_AMSDU_QMEM_TID7_QHEAD),
++
++ DUMP_STR("WED QMEM TAIL INFO"),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(5), WED_AMSDU_QMEM_FQ_TAIL),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(5), WED_AMSDU_QMEM_SP_QTAIL),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(6), WED_AMSDU_QMEM_TID0_QTAIL),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(6), WED_AMSDU_QMEM_TID1_QTAIL),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(7), WED_AMSDU_QMEM_TID2_QTAIL),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(7), WED_AMSDU_QMEM_TID3_QTAIL),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(8), WED_AMSDU_QMEM_TID4_QTAIL),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(8), WED_AMSDU_QMEM_TID5_QTAIL),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(9), WED_AMSDU_QMEM_TID6_QTAIL),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(9), WED_AMSDU_QMEM_TID7_QTAIL),
++
++ DUMP_STR("WED HIFTXD MSDU INFO"),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(1)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(2)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(3)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(4)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(5)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(6)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(7)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(8)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(9)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(10)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(11)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(12)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(13)),
++ };
++ struct mtk_wed_hw *hw = s->private;
++ struct mtk_wed_device *dev = hw->wed_dev;
++
++ if (dev)
++ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
++
++ return 0;
++}
++DEFINE_SHOW_ATTRIBUTE(wed_amsdu);
++
++static int
++wed_rtqm_show(struct seq_file *s, void *data)
++{
++ static const struct reg_dump regs[] = {
++ DUMP_STR("WED Route QM IGRS0(N2H + Recycle)"),
++ DUMP_WED(WED_RTQM_IGRS0_I2HW_DMAD_CNT),
++ DUMP_WED(WED_RTQM_IGRS0_I2H_DMAD_CNT(0)),
++ DUMP_WED(WED_RTQM_IGRS0_I2H_DMAD_CNT(1)),
++ DUMP_WED(WED_RTQM_IGRS0_I2HW_PKT_CNT),
++ DUMP_WED(WED_RTQM_IGRS0_I2H_PKT_CNT(0)),
++ DUMP_WED(WED_RTQM_IGRS0_I2H_PKT_CNT(0)),
++ DUMP_WED(WED_RTQM_IGRS0_FDROP_CNT),
++
++ DUMP_STR("WED Route QM IGRS1(Legacy)"),
++ DUMP_WED(WED_RTQM_IGRS1_I2HW_DMAD_CNT),
++ DUMP_WED(WED_RTQM_IGRS1_I2H_DMAD_CNT(0)),
++ DUMP_WED(WED_RTQM_IGRS1_I2H_DMAD_CNT(1)),
++ DUMP_WED(WED_RTQM_IGRS1_I2HW_PKT_CNT),
++ DUMP_WED(WED_RTQM_IGRS1_I2H_PKT_CNT(0)),
++ DUMP_WED(WED_RTQM_IGRS1_I2H_PKT_CNT(1)),
++ DUMP_WED(WED_RTQM_IGRS1_FDROP_CNT),
++
++ DUMP_STR("WED Route QM IGRS2(RRO3.0)"),
++ DUMP_WED(WED_RTQM_IGRS2_I2HW_DMAD_CNT),
++ DUMP_WED(WED_RTQM_IGRS2_I2H_DMAD_CNT(0)),
++ DUMP_WED(WED_RTQM_IGRS2_I2H_DMAD_CNT(1)),
++ DUMP_WED(WED_RTQM_IGRS2_I2HW_PKT_CNT),
++ DUMP_WED(WED_RTQM_IGRS2_I2H_PKT_CNT(0)),
++ DUMP_WED(WED_RTQM_IGRS2_I2H_PKT_CNT(1)),
++ DUMP_WED(WED_RTQM_IGRS2_FDROP_CNT),
++
++ DUMP_STR("WED Route QM IGRS3(DEBUG)"),
++ DUMP_WED(WED_RTQM_IGRS2_I2HW_DMAD_CNT),
++ DUMP_WED(WED_RTQM_IGRS3_I2H_DMAD_CNT(0)),
++ DUMP_WED(WED_RTQM_IGRS3_I2H_DMAD_CNT(1)),
++ DUMP_WED(WED_RTQM_IGRS3_I2HW_PKT_CNT),
++ DUMP_WED(WED_RTQM_IGRS3_I2H_PKT_CNT(0)),
++ DUMP_WED(WED_RTQM_IGRS3_I2H_PKT_CNT(1)),
++ DUMP_WED(WED_RTQM_IGRS3_FDROP_CNT),
++ };
++ struct mtk_wed_hw *hw = s->private;
++ struct mtk_wed_device *dev = hw->wed_dev;
++
++ if (dev)
++ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
++
++ return 0;
++}
++DEFINE_SHOW_ATTRIBUTE(wed_rtqm);
++
++static int
++wed_rro_show(struct seq_file *s, void *data)
++{
++ static const struct reg_dump regs[] = {
++ DUMP_STR("RRO/IND CMD CNT"),
++ DUMP_WED(WED_RX_IND_CMD_CNT(1)),
++ DUMP_WED(WED_RX_IND_CMD_CNT(2)),
++ DUMP_WED(WED_RX_IND_CMD_CNT(3)),
++ DUMP_WED(WED_RX_IND_CMD_CNT(4)),
++ DUMP_WED(WED_RX_IND_CMD_CNT(5)),
++ DUMP_WED(WED_RX_IND_CMD_CNT(6)),
++ DUMP_WED(WED_RX_IND_CMD_CNT(7)),
++ DUMP_WED(WED_RX_IND_CMD_CNT(8)),
++ DUMP_WED_MASK(WED_RX_IND_CMD_CNT(9),
++ WED_IND_CMD_MAGIC_CNT_FAIL_CNT),
++
++ DUMP_WED(WED_RX_ADDR_ELEM_CNT(0)),
++ DUMP_WED_MASK(WED_RX_ADDR_ELEM_CNT(1),
++ WED_ADDR_ELEM_SIG_FAIL_CNT),
++ DUMP_WED(WED_RX_MSDU_PG_CNT(1)),
++ DUMP_WED(WED_RX_MSDU_PG_CNT(2)),
++ DUMP_WED(WED_RX_MSDU_PG_CNT(3)),
++ DUMP_WED(WED_RX_MSDU_PG_CNT(4)),
++ DUMP_WED(WED_RX_MSDU_PG_CNT(5)),
++ DUMP_WED_MASK(WED_RX_PN_CHK_CNT,
++ WED_PN_CHK_FAIL_CNT),
++ };
++ struct mtk_wed_hw *hw = s->private;
++ struct mtk_wed_device *dev = hw->wed_dev;
++
++ if (dev)
++ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
++
++ return 0;
++}
++DEFINE_SHOW_ATTRIBUTE(wed_rro);
++
++static int
+ mtk_wed_reg_set(void *data, u64 val)
+ {
+ struct mtk_wed_hw *hw = data;
+@@ -266,7 +624,16 @@ void mtk_wed_hw_add_debugfs(struct mtk_w
+ debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
+ debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
+ debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
+- if (!mtk_wed_is_v1(hw))
++ if (!mtk_wed_is_v1(hw)) {
+ debugfs_create_file_unsafe("rxinfo", 0400, dir, hw,
+ &wed_rxinfo_fops);
++ if (mtk_wed_is_v3_or_greater(hw)) {
++ debugfs_create_file_unsafe("amsdu", 0400, dir, hw,
++ &wed_amsdu_fops);
++ debugfs_create_file_unsafe("rtqm", 0400, dir, hw,
++ &wed_rtqm_fops);
++ debugfs_create_file_unsafe("rro", 0400, dir, hw,
++ &wed_rro_fops);
++ }
++ }
+ }
--- /dev/null
+From: Sujuan Chen <sujuan.chen@mediatek.com>
+Date: Mon, 18 Sep 2023 12:29:19 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: add wed 3.0 reset support
+
+Introduce support for resetting Wireless Ethernet Dispatcher 3.0
+available on MT988 SoC.
+
+Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -148,6 +148,90 @@ mtk_wdma_read_reset(struct mtk_wed_devic
+ return wdma_r32(dev, MTK_WDMA_GLO_CFG);
+ }
+
++static void
++mtk_wdma_v3_rx_reset(struct mtk_wed_device *dev)
++{
++ u32 status;
++
++ if (!mtk_wed_is_v3_or_greater(dev->hw))
++ return;
++
++ wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN);
++ wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN);
++
++ if (read_poll_timeout(wdma_r32, status,
++ !(status & MTK_WDMA_PREF_TX_CFG_PREF_BUSY),
++ 0, 10000, false, dev, MTK_WDMA_PREF_TX_CFG))
++ dev_err(dev->hw->dev, "rx reset failed\n");
++
++ if (read_poll_timeout(wdma_r32, status,
++ !(status & MTK_WDMA_PREF_RX_CFG_PREF_BUSY),
++ 0, 10000, false, dev, MTK_WDMA_PREF_RX_CFG))
++ dev_err(dev->hw->dev, "rx reset failed\n");
++
++ wdma_clr(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN);
++ wdma_clr(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN);
++
++ if (read_poll_timeout(wdma_r32, status,
++ !(status & MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY),
++ 0, 10000, false, dev, MTK_WDMA_WRBK_TX_CFG))
++ dev_err(dev->hw->dev, "rx reset failed\n");
++
++ if (read_poll_timeout(wdma_r32, status,
++ !(status & MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY),
++ 0, 10000, false, dev, MTK_WDMA_WRBK_RX_CFG))
++ dev_err(dev->hw->dev, "rx reset failed\n");
++
++ /* prefetch FIFO */
++ wdma_w32(dev, MTK_WDMA_PREF_RX_FIFO_CFG,
++ MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR |
++ MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR);
++ wdma_clr(dev, MTK_WDMA_PREF_RX_FIFO_CFG,
++ MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR |
++ MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR);
++
++ /* core FIFO */
++ wdma_w32(dev, MTK_WDMA_XDMA_RX_FIFO_CFG,
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR);
++ wdma_clr(dev, MTK_WDMA_XDMA_RX_FIFO_CFG,
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR);
++
++ /* writeback FIFO */
++ wdma_w32(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(0),
++ MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR);
++ wdma_w32(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(1),
++ MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR);
++
++ wdma_clr(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(0),
++ MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR);
++ wdma_clr(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(1),
++ MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR);
++
++ /* prefetch ring status */
++ wdma_w32(dev, MTK_WDMA_PREF_SIDX_CFG,
++ MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR);
++ wdma_clr(dev, MTK_WDMA_PREF_SIDX_CFG,
++ MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR);
++
++ /* writeback ring status */
++ wdma_w32(dev, MTK_WDMA_WRBK_SIDX_CFG,
++ MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR);
++ wdma_clr(dev, MTK_WDMA_WRBK_SIDX_CFG,
++ MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR);
++}
++
+ static int
+ mtk_wdma_rx_reset(struct mtk_wed_device *dev)
+ {
+@@ -160,6 +244,7 @@ mtk_wdma_rx_reset(struct mtk_wed_device
+ if (ret)
+ dev_err(dev->hw->dev, "rx reset failed\n");
+
++ mtk_wdma_v3_rx_reset(dev);
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
+
+@@ -192,6 +277,84 @@ mtk_wed_poll_busy(struct mtk_wed_device
+ }
+
+ static void
++mtk_wdma_v3_tx_reset(struct mtk_wed_device *dev)
++{
++ u32 status;
++
++ if (!mtk_wed_is_v3_or_greater(dev->hw))
++ return;
++
++ wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN);
++ wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN);
++
++ if (read_poll_timeout(wdma_r32, status,
++ !(status & MTK_WDMA_PREF_TX_CFG_PREF_BUSY),
++ 0, 10000, false, dev, MTK_WDMA_PREF_TX_CFG))
++ dev_err(dev->hw->dev, "tx reset failed\n");
++
++ if (read_poll_timeout(wdma_r32, status,
++ !(status & MTK_WDMA_PREF_RX_CFG_PREF_BUSY),
++ 0, 10000, false, dev, MTK_WDMA_PREF_RX_CFG))
++ dev_err(dev->hw->dev, "tx reset failed\n");
++
++ wdma_clr(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN);
++ wdma_clr(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN);
++
++ if (read_poll_timeout(wdma_r32, status,
++ !(status & MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY),
++ 0, 10000, false, dev, MTK_WDMA_WRBK_TX_CFG))
++ dev_err(dev->hw->dev, "tx reset failed\n");
++
++ if (read_poll_timeout(wdma_r32, status,
++ !(status & MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY),
++ 0, 10000, false, dev, MTK_WDMA_WRBK_RX_CFG))
++ dev_err(dev->hw->dev, "tx reset failed\n");
++
++ /* prefetch FIFO */
++ wdma_w32(dev, MTK_WDMA_PREF_TX_FIFO_CFG,
++ MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR |
++ MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR);
++ wdma_clr(dev, MTK_WDMA_PREF_TX_FIFO_CFG,
++ MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR |
++ MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR);
++
++ /* core FIFO */
++ wdma_w32(dev, MTK_WDMA_XDMA_TX_FIFO_CFG,
++ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR |
++ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR |
++ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR |
++ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR);
++ wdma_clr(dev, MTK_WDMA_XDMA_TX_FIFO_CFG,
++ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR |
++ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR |
++ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR |
++ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR);
++
++ /* writeback FIFO */
++ wdma_w32(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(0),
++ MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR);
++ wdma_w32(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(1),
++ MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR);
++
++ wdma_clr(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(0),
++ MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR);
++ wdma_clr(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(1),
++ MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR);
++
++ /* prefetch ring status */
++ wdma_w32(dev, MTK_WDMA_PREF_SIDX_CFG,
++ MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR);
++ wdma_clr(dev, MTK_WDMA_PREF_SIDX_CFG,
++ MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR);
++
++ /* writeback ring status */
++ wdma_w32(dev, MTK_WDMA_WRBK_SIDX_CFG,
++ MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR);
++ wdma_clr(dev, MTK_WDMA_WRBK_SIDX_CFG,
++ MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR);
++}
++
++static void
+ mtk_wdma_tx_reset(struct mtk_wed_device *dev)
+ {
+ u32 status, mask = MTK_WDMA_GLO_CFG_TX_DMA_BUSY;
+@@ -202,6 +365,7 @@ mtk_wdma_tx_reset(struct mtk_wed_device
+ !(status & mask), 0, 10000))
+ dev_err(dev->hw->dev, "tx reset failed\n");
+
++ mtk_wdma_v3_tx_reset(dev);
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX);
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
+
+@@ -1405,13 +1569,33 @@ mtk_wed_rx_reset(struct mtk_wed_device *
+ if (ret)
+ return ret;
+
++ if (dev->wlan.hw_rro) {
++ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN);
++ mtk_wed_poll_busy(dev, MTK_WED_RRO_RX_HW_STS,
++ MTK_WED_RX_IND_CMD_BUSY);
++ mtk_wed_reset(dev, MTK_WED_RESET_RRO_RX_TO_PG);
++ }
++
+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN);
+ ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
+ MTK_WED_WPDMA_RX_D_RX_DRV_BUSY);
++ if (!ret && mtk_wed_is_v3_or_greater(dev->hw))
++ ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
++ MTK_WED_WPDMA_RX_D_PREF_BUSY);
+ if (ret) {
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV);
+ } else {
++ if (mtk_wed_is_v3_or_greater(dev->hw)) {
++ /* 1.a. disable prefetch HW */
++ wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
++ MTK_WED_WPDMA_RX_D_PREF_EN);
++ mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
++ MTK_WED_WPDMA_RX_D_PREF_BUSY);
++ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
++ MTK_WED_WPDMA_RX_D_RST_DRV_IDX_ALL);
++ }
++
+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
+ MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
+ MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
+@@ -1439,23 +1623,52 @@ mtk_wed_rx_reset(struct mtk_wed_device *
+ wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0);
+ }
+
++ if (dev->wlan.hw_rro) {
++ /* disable rro msdu page drv */
++ wed_clr(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
++ MTK_WED_RRO_MSDU_PG_DRV_EN);
++
++ /* disable rro data drv */
++ wed_clr(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN);
++
++ /* rro msdu page drv reset */
++ wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
++ MTK_WED_RRO_MSDU_PG_DRV_CLR);
++ mtk_wed_poll_busy(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
++ MTK_WED_RRO_MSDU_PG_DRV_CLR);
++
++ /* rro data drv reset */
++ wed_w32(dev, MTK_WED_RRO_RX_D_CFG(2),
++ MTK_WED_RRO_RX_D_DRV_CLR);
++ mtk_wed_poll_busy(dev, MTK_WED_RRO_RX_D_CFG(2),
++ MTK_WED_RRO_RX_D_DRV_CLR);
++ }
++
+ /* reset route qm */
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
+ ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_RX_ROUTE_QM_BUSY);
+- if (ret)
++ if (ret) {
+ mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM);
+- else
+- wed_set(dev, MTK_WED_RTQM_GLO_CFG,
+- MTK_WED_RTQM_Q_RST);
++ } else if (mtk_wed_is_v3_or_greater(dev->hw)) {
++ wed_set(dev, MTK_WED_RTQM_RST, BIT(0));
++ wed_clr(dev, MTK_WED_RTQM_RST, BIT(0));
++ mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM);
++ } else {
++ wed_set(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
++ }
+
+ /* reset tx wdma */
+ mtk_wdma_tx_reset(dev);
+
+ /* reset tx wdma drv */
+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN);
+- mtk_wed_poll_busy(dev, MTK_WED_CTRL,
+- MTK_WED_CTRL_WDMA_INT_AGENT_BUSY);
++ if (mtk_wed_is_v3_or_greater(dev->hw))
++ mtk_wed_poll_busy(dev, MTK_WED_WPDMA_STATUS,
++ MTK_WED_WPDMA_STATUS_TX_DRV);
++ else
++ mtk_wed_poll_busy(dev, MTK_WED_CTRL,
++ MTK_WED_CTRL_WDMA_INT_AGENT_BUSY);
+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV);
+
+ /* reset wed rx dma */
+@@ -1476,6 +1689,14 @@ mtk_wed_rx_reset(struct mtk_wed_device *
+ MTK_WED_CTRL_WED_RX_BM_BUSY);
+ mtk_wed_reset(dev, MTK_WED_RESET_RX_BM);
+
++ if (dev->wlan.hw_rro) {
++ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN);
++ mtk_wed_poll_busy(dev, MTK_WED_CTRL,
++ MTK_WED_CTRL_WED_RX_PG_BM_BUSY);
++ wed_set(dev, MTK_WED_RESET, MTK_WED_RESET_RX_PG_BM);
++ wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_RX_PG_BM);
++ }
++
+ /* wo change to enable state */
+ val = MTK_WED_WO_STATE_ENABLE;
+ ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
+@@ -1493,6 +1714,7 @@ mtk_wed_rx_reset(struct mtk_wed_device *
+ false);
+ }
+ mtk_wed_free_rx_buffer(dev);
++ mtk_wed_hwrro_free_buffer(dev);
+
+ return 0;
+ }
+@@ -1526,15 +1748,41 @@ mtk_wed_reset_dma(struct mtk_wed_device
+
+ /* 2. reset WDMA rx DMA */
+ busy = !!mtk_wdma_rx_reset(dev);
+- wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
++ if (mtk_wed_is_v3_or_greater(dev->hw)) {
++ val = MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE |
++ wed_r32(dev, MTK_WED_WDMA_GLO_CFG);
++ val &= ~MTK_WED_WDMA_GLO_CFG_RX_DRV_EN;
++ wed_w32(dev, MTK_WED_WDMA_GLO_CFG, val);
++ } else {
++ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
++ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
++ }
++
+ if (!busy)
+ busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY);
++ if (!busy && mtk_wed_is_v3_or_greater(dev->hw))
++ busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_RX_PREF_CFG,
++ MTK_WED_WDMA_RX_PREF_BUSY);
+
+ if (busy) {
+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
+ } else {
++ if (mtk_wed_is_v3_or_greater(dev->hw)) {
++ /* 1.a. disable prefetch HW */
++ wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
++ MTK_WED_WDMA_RX_PREF_EN);
++ mtk_wed_poll_busy(dev, MTK_WED_WDMA_RX_PREF_CFG,
++ MTK_WED_WDMA_RX_PREF_BUSY);
++ wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
++ MTK_WED_WDMA_RX_PREF_DDONE2_EN);
++
++ /* 2. Reset dma index */
++ wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
++ MTK_WED_WDMA_RESET_IDX_RX_ALL);
++ }
++
+ wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
+ MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV);
+ wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0);
+@@ -1550,8 +1798,13 @@ mtk_wed_reset_dma(struct mtk_wed_device
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+
+ for (i = 0; i < 100; i++) {
+- val = wed_r32(dev, MTK_WED_TX_BM_INTF);
+- if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
++ if (mtk_wed_is_v1(dev->hw))
++ val = FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP,
++ wed_r32(dev, MTK_WED_TX_BM_INTF));
++ else
++ val = FIELD_GET(MTK_WED_TX_TKID_INTF_TKFIFO_FDEP,
++ wed_r32(dev, MTK_WED_TX_TKID_INTF));
++ if (val == 0x40)
+ break;
+ }
+
+@@ -1573,6 +1826,8 @@ mtk_wed_reset_dma(struct mtk_wed_device
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV);
++ if (mtk_wed_is_v3_or_greater(dev->hw))
++ wed_w32(dev, MTK_WED_RX1_CTRL2, 0);
+ } else {
+ wed_w32(dev, MTK_WED_WPDMA_RESET_IDX,
+ MTK_WED_WPDMA_RESET_IDX_TX |
+@@ -1589,7 +1844,14 @@ mtk_wed_reset_dma(struct mtk_wed_device
+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
+ }
+
+- mtk_wed_rx_reset(dev);
++ if (mtk_wed_is_v3_or_greater(dev->hw)) {
++ /* reset amsdu engine */
++ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN);
++ mtk_wed_reset(dev, MTK_WED_RESET_TX_AMSDU);
++ }
++
++ if (mtk_wed_get_rx_capa(dev))
++ mtk_wed_rx_reset(dev);
+ }
+
+ static int
+@@ -1841,6 +2103,7 @@ mtk_wed_dma_enable(struct mtk_wed_device
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4);
+
+ wdma_set(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN);
++ wdma_set(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN);
+ }
+
+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+@@ -1904,6 +2167,12 @@ mtk_wed_start_hw_rro(struct mtk_wed_devi
+ if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro)
+ return;
+
++ if (reset) {
++ wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
++ MTK_WED_RRO_MSDU_PG_DRV_EN);
++ return;
++ }
++
+ wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_MSDU_PG_DRV_CLR);
+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
+ MTK_WED_RRO_MSDU_PG_DRV_CLR);
+--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+@@ -28,6 +28,8 @@ struct mtk_wdma_desc {
+ #define MTK_WED_RESET 0x008
+ #define MTK_WED_RESET_TX_BM BIT(0)
+ #define MTK_WED_RESET_RX_BM BIT(1)
++#define MTK_WED_RESET_RX_PG_BM BIT(2)
++#define MTK_WED_RESET_RRO_RX_TO_PG BIT(3)
+ #define MTK_WED_RESET_TX_FREE_AGENT BIT(4)
+ #define MTK_WED_RESET_WPDMA_TX_DRV BIT(8)
+ #define MTK_WED_RESET_WPDMA_RX_DRV BIT(9)
+@@ -106,6 +108,9 @@ struct mtk_wdma_desc {
+ #define MTK_WED_STATUS 0x060
+ #define MTK_WED_STATUS_TX GENMASK(15, 8)
+
++#define MTK_WED_WPDMA_STATUS 0x068
++#define MTK_WED_WPDMA_STATUS_TX_DRV GENMASK(15, 8)
++
+ #define MTK_WED_TX_BM_CTRL 0x080
+ #define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0)
+ #define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16)
+@@ -140,6 +145,9 @@ struct mtk_wdma_desc {
+ #define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM GENMASK(22, 16)
+ #define MTK_WED_TX_TKID_CTRL_PAUSE BIT(28)
+
++#define MTK_WED_TX_TKID_INTF 0x0dc
++#define MTK_WED_TX_TKID_INTF_TKFIFO_FDEP GENMASK(25, 16)
++
+ #define MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM_V3 GENMASK(7, 0)
+ #define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM_V3 GENMASK(23, 16)
+
+@@ -190,6 +198,7 @@ struct mtk_wdma_desc {
+ #define MTK_WED_RING_RX_DATA(_n) (0x420 + (_n) * 0x10)
+
+ #define MTK_WED_SCR0 0x3c0
++#define MTK_WED_RX1_CTRL2 0x418
+ #define MTK_WED_WPDMA_INT_TRIGGER 0x504
+ #define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1)
+ #define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4)
+@@ -303,6 +312,7 @@ struct mtk_wdma_desc {
+
+ #define MTK_WED_WPDMA_RX_D_RST_IDX 0x760
+ #define MTK_WED_WPDMA_RX_D_RST_CRX_IDX GENMASK(17, 16)
++#define MTK_WED_WPDMA_RX_D_RST_DRV_IDX_ALL BIT(20)
+ #define MTK_WED_WPDMA_RX_D_RST_DRV_IDX GENMASK(25, 24)
+
+ #define MTK_WED_WPDMA_RX_GLO_CFG 0x76c
+@@ -313,6 +323,7 @@ struct mtk_wdma_desc {
+
+ #define MTK_WED_WPDMA_RX_D_PREF_CFG 0x7b4
+ #define MTK_WED_WPDMA_RX_D_PREF_EN BIT(0)
++#define MTK_WED_WPDMA_RX_D_PREF_BUSY BIT(1)
+ #define MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE GENMASK(12, 8)
+ #define MTK_WED_WPDMA_RX_D_PREF_LOW_THRES GENMASK(21, 16)
+
+@@ -334,11 +345,13 @@ struct mtk_wdma_desc {
+
+ #define MTK_WED_WDMA_RX_PREF_CFG 0x950
+ #define MTK_WED_WDMA_RX_PREF_EN BIT(0)
++#define MTK_WED_WDMA_RX_PREF_BUSY BIT(1)
+ #define MTK_WED_WDMA_RX_PREF_BURST_SIZE GENMASK(12, 8)
+ #define MTK_WED_WDMA_RX_PREF_LOW_THRES GENMASK(21, 16)
+ #define MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR BIT(24)
+ #define MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR BIT(25)
+ #define MTK_WED_WDMA_RX_PREF_DDONE2_EN BIT(26)
++#define MTK_WED_WDMA_RX_PREF_DDONE2_BUSY BIT(27)
+
+ #define MTK_WED_WDMA_RX_PREF_FIFO_CFG 0x95C
+ #define MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR BIT(0)
+@@ -367,6 +380,7 @@ struct mtk_wdma_desc {
+
+ #define MTK_WED_WDMA_RESET_IDX 0xa08
+ #define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16)
++#define MTK_WED_WDMA_RESET_IDX_RX_ALL BIT(20)
+ #define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24)
+
+ #define MTK_WED_WDMA_INT_CLR 0xa24
+@@ -437,21 +451,62 @@ struct mtk_wdma_desc {
+ #define MTK_WDMA_INT_MASK_RX_DELAY BIT(30)
+ #define MTK_WDMA_INT_MASK_RX_COHERENT BIT(31)
+
++#define MTK_WDMA_XDMA_TX_FIFO_CFG 0x238
++#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR BIT(0)
++#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR BIT(4)
++#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR BIT(8)
++#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR BIT(12)
++
++#define MTK_WDMA_XDMA_RX_FIFO_CFG 0x23c
++#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR BIT(0)
++#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR BIT(4)
++#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR BIT(8)
++#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR BIT(12)
++#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR BIT(15)
++#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR BIT(18)
++#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR BIT(21)
++
+ #define MTK_WDMA_INT_GRP1 0x250
+ #define MTK_WDMA_INT_GRP2 0x254
+
+ #define MTK_WDMA_PREF_TX_CFG 0x2d0
+ #define MTK_WDMA_PREF_TX_CFG_PREF_EN BIT(0)
++#define MTK_WDMA_PREF_TX_CFG_PREF_BUSY BIT(1)
+
+ #define MTK_WDMA_PREF_RX_CFG 0x2dc
+ #define MTK_WDMA_PREF_RX_CFG_PREF_EN BIT(0)
++#define MTK_WDMA_PREF_RX_CFG_PREF_BUSY BIT(1)
++
++#define MTK_WDMA_PREF_RX_FIFO_CFG 0x2e0
++#define MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR BIT(0)
++#define MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR BIT(16)
++
++#define MTK_WDMA_PREF_TX_FIFO_CFG 0x2d4
++#define MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR BIT(0)
++#define MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR BIT(16)
++
++#define MTK_WDMA_PREF_SIDX_CFG 0x2e4
++#define MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR GENMASK(3, 0)
++#define MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR GENMASK(5, 4)
+
+ #define MTK_WDMA_WRBK_TX_CFG 0x300
++#define MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY BIT(0)
+ #define MTK_WDMA_WRBK_TX_CFG_WRBK_EN BIT(30)
+
++#define MTK_WDMA_WRBK_TX_FIFO_CFG(_n) (0x304 + (_n) * 0x4)
++#define MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR BIT(0)
++
+ #define MTK_WDMA_WRBK_RX_CFG 0x344
++#define MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY BIT(0)
+ #define MTK_WDMA_WRBK_RX_CFG_WRBK_EN BIT(30)
+
++#define MTK_WDMA_WRBK_RX_FIFO_CFG(_n) (0x348 + (_n) * 0x4)
++#define MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR BIT(0)
++
++#define MTK_WDMA_WRBK_SIDX_CFG 0x388
++#define MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR GENMASK(3, 0)
++#define MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR GENMASK(5, 4)
++
+ #define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0)
+ #define MTK_PCIE_MIRROR_MAP_EN BIT(0)
+ #define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1)
+@@ -465,6 +520,8 @@ struct mtk_wdma_desc {
+ #define MTK_WED_RTQM_Q_DBG_BYPASS BIT(5)
+ #define MTK_WED_RTQM_TXDMAD_FPORT GENMASK(23, 20)
+
++#define MTK_WED_RTQM_RST 0xb04
++
+ #define MTK_WED_RTQM_IGRS0_I2HW_DMAD_CNT 0xb1c
+ #define MTK_WED_RTQM_IGRS0_I2H_DMAD_CNT(_n) (0xb20 + (_n) * 0x4)
+ #define MTK_WED_RTQM_IGRS0_I2HW_PKT_CNT 0xb28
+@@ -653,6 +710,9 @@ struct mtk_wdma_desc {
+ #define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR BIT(17)
+ #define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG GENMASK(22, 18)
+
++#define MTK_WED_RRO_RX_HW_STS 0xf00
++#define MTK_WED_RX_IND_CMD_BUSY GENMASK(31, 0)
++
+ #define MTK_WED_RX_IND_CMD_CNT0 0xf20
+ #define MTK_WED_RX_IND_CMD_DBG_CNT_EN BIT(31)
+
+++ /dev/null
-From 65258b9d8cde45689bdc86ca39b50f01f983733b Mon Sep 17 00:00:00 2001
-From: Robert Marko <robert.marko@sartura.hr>
-Date: Fri, 19 Nov 2021 03:03:50 +0100
-Subject: [PATCH] net: dsa: qca8k: fix MTU calculation
-
-qca8k has a global MTU, so its tracking the MTU per port to make sure
-that the largest MTU gets applied.
-Since it uses the frame size instead of MTU the driver MTU change function
-will then add the size of Ethernet header and checksum on top of MTU.
-
-The driver currently populates the per port MTU size as Ethernet frame
-length + checksum which equals 1518.
-
-The issue is that then MTU change function will go through all of the
-ports, find the largest MTU and apply the Ethernet header + checksum on
-top of it again, so for a desired MTU of 1500 you will end up with 1536.
-
-This is obviously incorrect, so to correct it populate the per port struct
-MTU with just the MTU and not include the Ethernet header + checksum size
-as those will be added by the MTU change function.
-
-Fixes: f58d2598cf70 ("net: dsa: qca8k: implement the port MTU callbacks")
-Signed-off-by: Robert Marko <robert.marko@sartura.hr>
-Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
-Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
-Signed-off-by: David S. Miller <davem@davemloft.net>
----
- drivers/net/dsa/qca8k.c | 6 +++++-
- 1 file changed, 5 insertions(+), 1 deletion(-)
-
---- a/drivers/net/dsa/qca8k.c
-+++ b/drivers/net/dsa/qca8k.c
-@@ -1256,8 +1256,12 @@ qca8k_setup(struct dsa_switch *ds)
- /* Set initial MTU for every port.
- * We have only have a general MTU setting. So track
- * every port and set the max across all port.
-+ * Set per port MTU to 1500 as the MTU change function
-+ * will add the overhead and if its set to 1518 then it
-+ * will apply the overhead again and we will end up with
-+ * MTU of 1536 instead of 1518
- */
-- priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN;
-+ priv->port_mtu[i] = ETH_DATA_LEN;
- }
-
- /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
+++ /dev/null
-From b9133f3ef5a2659730cf47a74bd0a9259f1cf8ff Mon Sep 17 00:00:00 2001
-From: Ansuel Smith <ansuelsmth@gmail.com>
-Date: Mon, 22 Nov 2021 16:23:40 +0100
-Subject: net: dsa: qca8k: remove redundant check in parse_port_config
-
-The very next check for port 0 and 6 already makes sure we don't go out
-of bounds with the ports_config delay table.
-Remove the redundant check.
-
-Reported-by: kernel test robot <lkp@intel.com>
-Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
-Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
-Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
-Signed-off-by: David S. Miller <davem@davemloft.net>
----
- drivers/net/dsa/qca8k.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/drivers/net/dsa/qca8k.c
-+++ b/drivers/net/dsa/qca8k.c
-@@ -983,7 +983,7 @@ qca8k_parse_port_config(struct qca8k_pri
- u32 delay;
-
- /* We have 2 CPU port. Check them */
-- for (port = 0; port < QCA8K_NUM_PORTS && cpu_port_index < QCA8K_NUM_CPU_PORTS; port++) {
-+ for (port = 0; port < QCA8K_NUM_PORTS; port++) {
- /* Skip every other port */
- if (port != 0 && port != 6)
- continue;
+++ /dev/null
-From 90ae68bfc2ffcb54a4ba4f64edbeb84a80cbb57c Mon Sep 17 00:00:00 2001
-From: Ansuel Smith <ansuelsmth@gmail.com>
-Date: Mon, 22 Nov 2021 16:23:41 +0100
-Subject: net: dsa: qca8k: convert to GENMASK/FIELD_PREP/FIELD_GET
-
-Convert and try to standardize bit fields using
-GENMASK/FIELD_PREP/FIELD_GET macros. Rework some logic to support the
-standard macro and tidy things up. No functional change intended.
-
-Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
-Signed-off-by: David S. Miller <davem@davemloft.net>
----
- drivers/net/dsa/qca8k.c | 98 +++++++++++++++----------------
- drivers/net/dsa/qca8k.h | 153 ++++++++++++++++++++++++++----------------------
- 2 files changed, 130 insertions(+), 121 deletions(-)
-
---- a/drivers/net/dsa/qca8k.c
-+++ b/drivers/net/dsa/qca8k.c
-@@ -9,6 +9,7 @@
- #include <linux/module.h>
- #include <linux/phy.h>
- #include <linux/netdevice.h>
-+#include <linux/bitfield.h>
- #include <net/dsa.h>
- #include <linux/of_net.h>
- #include <linux/of_mdio.h>
-@@ -319,18 +320,18 @@ qca8k_fdb_read(struct qca8k_priv *priv,
- }
-
- /* vid - 83:72 */
-- fdb->vid = (reg[2] >> QCA8K_ATU_VID_S) & QCA8K_ATU_VID_M;
-+ fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
- /* aging - 67:64 */
-- fdb->aging = reg[2] & QCA8K_ATU_STATUS_M;
-+ fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]);
- /* portmask - 54:48 */
-- fdb->port_mask = (reg[1] >> QCA8K_ATU_PORT_S) & QCA8K_ATU_PORT_M;
-+ fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]);
- /* mac - 47:0 */
-- fdb->mac[0] = (reg[1] >> QCA8K_ATU_ADDR0_S) & 0xff;
-- fdb->mac[1] = reg[1] & 0xff;
-- fdb->mac[2] = (reg[0] >> QCA8K_ATU_ADDR2_S) & 0xff;
-- fdb->mac[3] = (reg[0] >> QCA8K_ATU_ADDR3_S) & 0xff;
-- fdb->mac[4] = (reg[0] >> QCA8K_ATU_ADDR4_S) & 0xff;
-- fdb->mac[5] = reg[0] & 0xff;
-+ fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]);
-+ fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]);
-+ fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]);
-+ fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]);
-+ fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]);
-+ fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]);
-
- return 0;
- }
-@@ -343,18 +344,18 @@ qca8k_fdb_write(struct qca8k_priv *priv,
- int i;
-
- /* vid - 83:72 */
-- reg[2] = (vid & QCA8K_ATU_VID_M) << QCA8K_ATU_VID_S;
-+ reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
- /* aging - 67:64 */
-- reg[2] |= aging & QCA8K_ATU_STATUS_M;
-+ reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging);
- /* portmask - 54:48 */
-- reg[1] = (port_mask & QCA8K_ATU_PORT_M) << QCA8K_ATU_PORT_S;
-+ reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask);
- /* mac - 47:0 */
-- reg[1] |= mac[0] << QCA8K_ATU_ADDR0_S;
-- reg[1] |= mac[1];
-- reg[0] |= mac[2] << QCA8K_ATU_ADDR2_S;
-- reg[0] |= mac[3] << QCA8K_ATU_ADDR3_S;
-- reg[0] |= mac[4] << QCA8K_ATU_ADDR4_S;
-- reg[0] |= mac[5];
-+ reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]);
-+ reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]);
-+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]);
-+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]);
-+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]);
-+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
-
- /* load the array into the ARL table */
- for (i = 0; i < 3; i++)
-@@ -372,7 +373,7 @@ qca8k_fdb_access(struct qca8k_priv *priv
- reg |= cmd;
- if (port >= 0) {
- reg |= QCA8K_ATU_FUNC_PORT_EN;
-- reg |= (port & QCA8K_ATU_FUNC_PORT_M) << QCA8K_ATU_FUNC_PORT_S;
-+ reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port);
- }
-
- /* Write the function register triggering the table access */
-@@ -454,7 +455,7 @@ qca8k_vlan_access(struct qca8k_priv *pri
- /* Set the command and VLAN index */
- reg = QCA8K_VTU_FUNC1_BUSY;
- reg |= cmd;
-- reg |= vid << QCA8K_VTU_FUNC1_VID_S;
-+ reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid);
-
- /* Write the function register triggering the table access */
- ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
-@@ -500,13 +501,11 @@ qca8k_vlan_add(struct qca8k_priv *priv,
- if (ret < 0)
- goto out;
- reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
-- reg &= ~(QCA8K_VTU_FUNC0_EG_MODE_MASK << QCA8K_VTU_FUNC0_EG_MODE_S(port));
-+ reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
- if (untagged)
-- reg |= QCA8K_VTU_FUNC0_EG_MODE_UNTAG <<
-- QCA8K_VTU_FUNC0_EG_MODE_S(port);
-+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port);
- else
-- reg |= QCA8K_VTU_FUNC0_EG_MODE_TAG <<
-- QCA8K_VTU_FUNC0_EG_MODE_S(port);
-+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port);
-
- ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
- if (ret)
-@@ -534,15 +533,13 @@ qca8k_vlan_del(struct qca8k_priv *priv,
- ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®);
- if (ret < 0)
- goto out;
-- reg &= ~(3 << QCA8K_VTU_FUNC0_EG_MODE_S(port));
-- reg |= QCA8K_VTU_FUNC0_EG_MODE_NOT <<
-- QCA8K_VTU_FUNC0_EG_MODE_S(port);
-+ reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
-+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port);
-
- /* Check if we're the last member to be removed */
- del = true;
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
-- mask = QCA8K_VTU_FUNC0_EG_MODE_NOT;
-- mask <<= QCA8K_VTU_FUNC0_EG_MODE_S(i);
-+ mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
-
- if ((reg & mask) != mask) {
- del = false;
-@@ -1014,7 +1011,7 @@ qca8k_parse_port_config(struct qca8k_pri
- mode == PHY_INTERFACE_MODE_RGMII_TXID)
- delay = 1;
-
-- if (delay > QCA8K_MAX_DELAY) {
-+ if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) {
- dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
- delay = 3;
- }
-@@ -1030,7 +1027,7 @@ qca8k_parse_port_config(struct qca8k_pri
- mode == PHY_INTERFACE_MODE_RGMII_RXID)
- delay = 2;
-
-- if (delay > QCA8K_MAX_DELAY) {
-+ if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) {
- dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
- delay = 3;
- }
-@@ -1141,8 +1138,8 @@ qca8k_setup(struct dsa_switch *ds)
- /* Enable QCA header mode on all cpu ports */
- if (dsa_is_cpu_port(ds, i)) {
- ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
-- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S |
-- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S);
-+ FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
-+ FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
- if (ret) {
- dev_err(priv->dev, "failed enabling QCA header mode");
- return ret;
-@@ -1159,10 +1156,10 @@ qca8k_setup(struct dsa_switch *ds)
- * for igmp, unknown, multicast and broadcast packet
- */
- ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
-- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S |
-- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S |
-- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S |
-- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S);
-+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
-+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
-+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
-+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
- if (ret)
- return ret;
-
-@@ -1180,8 +1177,6 @@ qca8k_setup(struct dsa_switch *ds)
-
- /* Individual user ports get connected to CPU port only */
- if (dsa_is_user_port(ds, i)) {
-- int shift = 16 * (i % 2);
--
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
- QCA8K_PORT_LOOKUP_MEMBER,
- BIT(cpu_port));
-@@ -1198,8 +1193,8 @@ qca8k_setup(struct dsa_switch *ds)
- * default egress vid
- */
- ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
-- 0xfff << shift,
-- QCA8K_PORT_VID_DEF << shift);
-+ QCA8K_EGREES_VLAN_PORT_MASK(i),
-+ QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF));
- if (ret)
- return ret;
-
-@@ -1246,7 +1241,7 @@ qca8k_setup(struct dsa_switch *ds)
- QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
- QCA8K_PORT_HOL_CTRL1_WRED_EN;
- qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
-- QCA8K_PORT_HOL_CTRL1_ING_BUF |
-+ QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
- QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
- QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
- QCA8K_PORT_HOL_CTRL1_WRED_EN,
-@@ -1269,8 +1264,8 @@ qca8k_setup(struct dsa_switch *ds)
- mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
- QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
- qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
-- QCA8K_GLOBAL_FC_GOL_XON_THRES_S |
-- QCA8K_GLOBAL_FC_GOL_XOFF_THRES_S,
-+ QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
-+ QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
- mask);
- }
-
-@@ -1935,11 +1930,11 @@ qca8k_port_vlan_filtering(struct dsa_swi
-
- if (vlan_filtering) {
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
-- QCA8K_PORT_LOOKUP_VLAN_MODE,
-+ QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
- QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
- } else {
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
-- QCA8K_PORT_LOOKUP_VLAN_MODE,
-+ QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
- QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
- }
-
-@@ -1963,10 +1958,9 @@ qca8k_port_vlan_add(struct dsa_switch *d
- }
-
- if (pvid) {
-- int shift = 16 * (port % 2);
--
- ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
-- 0xfff << shift, vlan->vid << shift);
-+ QCA8K_EGREES_VLAN_PORT_MASK(port),
-+ QCA8K_EGREES_VLAN_PORT(port, vlan->vid));
- if (ret)
- return ret;
-
-@@ -2060,7 +2054,7 @@ static int qca8k_read_switch_id(struct q
- if (ret < 0)
- return -ENODEV;
-
-- id = QCA8K_MASK_CTRL_DEVICE_ID(val & QCA8K_MASK_CTRL_DEVICE_ID_MASK);
-+ id = QCA8K_MASK_CTRL_DEVICE_ID(val);
- if (id != data->id) {
- dev_err(priv->dev, "Switch id detected %x but expected %x", id, data->id);
- return -ENODEV;
-@@ -2069,7 +2063,7 @@ static int qca8k_read_switch_id(struct q
- priv->switch_id = id;
-
- /* Save revision to communicate to the internal PHY driver */
-- priv->switch_revision = (val & QCA8K_MASK_CTRL_REV_ID_MASK);
-+ priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val);
-
- return 0;
- }
---- a/drivers/net/dsa/qca8k.h
-+++ b/drivers/net/dsa/qca8k.h
-@@ -30,9 +30,9 @@
- /* Global control registers */
- #define QCA8K_REG_MASK_CTRL 0x000
- #define QCA8K_MASK_CTRL_REV_ID_MASK GENMASK(7, 0)
--#define QCA8K_MASK_CTRL_REV_ID(x) ((x) >> 0)
-+#define QCA8K_MASK_CTRL_REV_ID(x) FIELD_GET(QCA8K_MASK_CTRL_REV_ID_MASK, x)
- #define QCA8K_MASK_CTRL_DEVICE_ID_MASK GENMASK(15, 8)
--#define QCA8K_MASK_CTRL_DEVICE_ID(x) ((x) >> 8)
-+#define QCA8K_MASK_CTRL_DEVICE_ID(x) FIELD_GET(QCA8K_MASK_CTRL_DEVICE_ID_MASK, x)
- #define QCA8K_REG_PORT0_PAD_CTRL 0x004
- #define QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN BIT(31)
- #define QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE BIT(19)
-@@ -41,12 +41,11 @@
- #define QCA8K_REG_PORT6_PAD_CTRL 0x00c
- #define QCA8K_PORT_PAD_RGMII_EN BIT(26)
- #define QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK GENMASK(23, 22)
--#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) ((x) << 22)
-+#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) FIELD_PREP(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, x)
- #define QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK GENMASK(21, 20)
--#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) ((x) << 20)
-+#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) FIELD_PREP(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, x)
- #define QCA8K_PORT_PAD_RGMII_TX_DELAY_EN BIT(25)
- #define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24)
--#define QCA8K_MAX_DELAY 3
- #define QCA8K_PORT_PAD_SGMII_EN BIT(7)
- #define QCA8K_REG_PWS 0x010
- #define QCA8K_PWS_POWER_ON_SEL BIT(31)
-@@ -68,10 +67,12 @@
- #define QCA8K_MDIO_MASTER_READ BIT(27)
- #define QCA8K_MDIO_MASTER_WRITE 0
- #define QCA8K_MDIO_MASTER_SUP_PRE BIT(26)
--#define QCA8K_MDIO_MASTER_PHY_ADDR(x) ((x) << 21)
--#define QCA8K_MDIO_MASTER_REG_ADDR(x) ((x) << 16)
--#define QCA8K_MDIO_MASTER_DATA(x) (x)
-+#define QCA8K_MDIO_MASTER_PHY_ADDR_MASK GENMASK(25, 21)
-+#define QCA8K_MDIO_MASTER_PHY_ADDR(x) FIELD_PREP(QCA8K_MDIO_MASTER_PHY_ADDR_MASK, x)
-+#define QCA8K_MDIO_MASTER_REG_ADDR_MASK GENMASK(20, 16)
-+#define QCA8K_MDIO_MASTER_REG_ADDR(x) FIELD_PREP(QCA8K_MDIO_MASTER_REG_ADDR_MASK, x)
- #define QCA8K_MDIO_MASTER_DATA_MASK GENMASK(15, 0)
-+#define QCA8K_MDIO_MASTER_DATA(x) FIELD_PREP(QCA8K_MDIO_MASTER_DATA_MASK, x)
- #define QCA8K_MDIO_MASTER_MAX_PORTS 5
- #define QCA8K_MDIO_MASTER_MAX_REG 32
- #define QCA8K_GOL_MAC_ADDR0 0x60
-@@ -93,9 +94,7 @@
- #define QCA8K_PORT_STATUS_FLOW_AUTO BIT(12)
- #define QCA8K_REG_PORT_HDR_CTRL(_i) (0x9c + (_i * 4))
- #define QCA8K_PORT_HDR_CTRL_RX_MASK GENMASK(3, 2)
--#define QCA8K_PORT_HDR_CTRL_RX_S 2
- #define QCA8K_PORT_HDR_CTRL_TX_MASK GENMASK(1, 0)
--#define QCA8K_PORT_HDR_CTRL_TX_S 0
- #define QCA8K_PORT_HDR_CTRL_ALL 2
- #define QCA8K_PORT_HDR_CTRL_MGMT 1
- #define QCA8K_PORT_HDR_CTRL_NONE 0
-@@ -105,10 +104,11 @@
- #define QCA8K_SGMII_EN_TX BIT(3)
- #define QCA8K_SGMII_EN_SD BIT(4)
- #define QCA8K_SGMII_CLK125M_DELAY BIT(7)
--#define QCA8K_SGMII_MODE_CTRL_MASK (BIT(22) | BIT(23))
--#define QCA8K_SGMII_MODE_CTRL_BASEX (0 << 22)
--#define QCA8K_SGMII_MODE_CTRL_PHY (1 << 22)
--#define QCA8K_SGMII_MODE_CTRL_MAC (2 << 22)
-+#define QCA8K_SGMII_MODE_CTRL_MASK GENMASK(23, 22)
-+#define QCA8K_SGMII_MODE_CTRL(x) FIELD_PREP(QCA8K_SGMII_MODE_CTRL_MASK, x)
-+#define QCA8K_SGMII_MODE_CTRL_BASEX QCA8K_SGMII_MODE_CTRL(0x0)
-+#define QCA8K_SGMII_MODE_CTRL_PHY QCA8K_SGMII_MODE_CTRL(0x1)
-+#define QCA8K_SGMII_MODE_CTRL_MAC QCA8K_SGMII_MODE_CTRL(0x2)
-
- /* MAC_PWR_SEL registers */
- #define QCA8K_REG_MAC_PWR_SEL 0x0e4
-@@ -121,100 +121,115 @@
-
- /* ACL registers */
- #define QCA8K_REG_PORT_VLAN_CTRL0(_i) (0x420 + (_i * 8))
--#define QCA8K_PORT_VLAN_CVID(x) (x << 16)
--#define QCA8K_PORT_VLAN_SVID(x) x
-+#define QCA8K_PORT_VLAN_CVID_MASK GENMASK(27, 16)
-+#define QCA8K_PORT_VLAN_CVID(x) FIELD_PREP(QCA8K_PORT_VLAN_CVID_MASK, x)
-+#define QCA8K_PORT_VLAN_SVID_MASK GENMASK(11, 0)
-+#define QCA8K_PORT_VLAN_SVID(x) FIELD_PREP(QCA8K_PORT_VLAN_SVID_MASK, x)
- #define QCA8K_REG_PORT_VLAN_CTRL1(_i) (0x424 + (_i * 8))
- #define QCA8K_REG_IPV4_PRI_BASE_ADDR 0x470
- #define QCA8K_REG_IPV4_PRI_ADDR_MASK 0x474
-
- /* Lookup registers */
- #define QCA8K_REG_ATU_DATA0 0x600
--#define QCA8K_ATU_ADDR2_S 24
--#define QCA8K_ATU_ADDR3_S 16
--#define QCA8K_ATU_ADDR4_S 8
-+#define QCA8K_ATU_ADDR2_MASK GENMASK(31, 24)
-+#define QCA8K_ATU_ADDR3_MASK GENMASK(23, 16)
-+#define QCA8K_ATU_ADDR4_MASK GENMASK(15, 8)
-+#define QCA8K_ATU_ADDR5_MASK GENMASK(7, 0)
- #define QCA8K_REG_ATU_DATA1 0x604
--#define QCA8K_ATU_PORT_M 0x7f
--#define QCA8K_ATU_PORT_S 16
--#define QCA8K_ATU_ADDR0_S 8
-+#define QCA8K_ATU_PORT_MASK GENMASK(22, 16)
-+#define QCA8K_ATU_ADDR0_MASK GENMASK(15, 8)
-+#define QCA8K_ATU_ADDR1_MASK GENMASK(7, 0)
- #define QCA8K_REG_ATU_DATA2 0x608
--#define QCA8K_ATU_VID_M 0xfff
--#define QCA8K_ATU_VID_S 8
--#define QCA8K_ATU_STATUS_M 0xf
-+#define QCA8K_ATU_VID_MASK GENMASK(19, 8)
-+#define QCA8K_ATU_STATUS_MASK GENMASK(3, 0)
- #define QCA8K_ATU_STATUS_STATIC 0xf
- #define QCA8K_REG_ATU_FUNC 0x60c
- #define QCA8K_ATU_FUNC_BUSY BIT(31)
- #define QCA8K_ATU_FUNC_PORT_EN BIT(14)
- #define QCA8K_ATU_FUNC_MULTI_EN BIT(13)
- #define QCA8K_ATU_FUNC_FULL BIT(12)
--#define QCA8K_ATU_FUNC_PORT_M 0xf
--#define QCA8K_ATU_FUNC_PORT_S 8
-+#define QCA8K_ATU_FUNC_PORT_MASK GENMASK(11, 8)
- #define QCA8K_REG_VTU_FUNC0 0x610
- #define QCA8K_VTU_FUNC0_VALID BIT(20)
- #define QCA8K_VTU_FUNC0_IVL_EN BIT(19)
--#define QCA8K_VTU_FUNC0_EG_MODE_S(_i) (4 + (_i) * 2)
--#define QCA8K_VTU_FUNC0_EG_MODE_MASK 3
--#define QCA8K_VTU_FUNC0_EG_MODE_UNMOD 0
--#define QCA8K_VTU_FUNC0_EG_MODE_UNTAG 1
--#define QCA8K_VTU_FUNC0_EG_MODE_TAG 2
--#define QCA8K_VTU_FUNC0_EG_MODE_NOT 3
-+/* QCA8K_VTU_FUNC0_EG_MODE_MASK GENMASK(17, 4)
-+ * It does contain VLAN_MODE for each port [5:4] for port0,
-+ * [7:6] for port1 ... [17:16] for port6. Use virtual port
-+ * define to handle this.
-+ */
-+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i) (4 + (_i) * 2)
-+#define QCA8K_VTU_FUNC0_EG_MODE_MASK GENMASK(1, 0)
-+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(_i) (GENMASK(1, 0) << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
-+#define QCA8K_VTU_FUNC0_EG_MODE_UNMOD FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x0)
-+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_UNMOD(_i) (QCA8K_VTU_FUNC0_EG_MODE_UNMOD << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
-+#define QCA8K_VTU_FUNC0_EG_MODE_UNTAG FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x1)
-+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(_i) (QCA8K_VTU_FUNC0_EG_MODE_UNTAG << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
-+#define QCA8K_VTU_FUNC0_EG_MODE_TAG FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x2)
-+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(_i) (QCA8K_VTU_FUNC0_EG_MODE_TAG << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
-+#define QCA8K_VTU_FUNC0_EG_MODE_NOT FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x3)
-+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(_i) (QCA8K_VTU_FUNC0_EG_MODE_NOT << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
- #define QCA8K_REG_VTU_FUNC1 0x614
- #define QCA8K_VTU_FUNC1_BUSY BIT(31)
--#define QCA8K_VTU_FUNC1_VID_S 16
-+#define QCA8K_VTU_FUNC1_VID_MASK GENMASK(27, 16)
- #define QCA8K_VTU_FUNC1_FULL BIT(4)
- #define QCA8K_REG_GLOBAL_FW_CTRL0 0x620
- #define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10)
- #define QCA8K_REG_GLOBAL_FW_CTRL1 0x624
--#define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S 24
--#define QCA8K_GLOBAL_FW_CTRL1_BC_DP_S 16
--#define QCA8K_GLOBAL_FW_CTRL1_MC_DP_S 8
--#define QCA8K_GLOBAL_FW_CTRL1_UC_DP_S 0
-+#define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK GENMASK(30, 24)
-+#define QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK GENMASK(22, 16)
-+#define QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK GENMASK(14, 8)
-+#define QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK GENMASK(6, 0)
- #define QCA8K_PORT_LOOKUP_CTRL(_i) (0x660 + (_i) * 0xc)
- #define QCA8K_PORT_LOOKUP_MEMBER GENMASK(6, 0)
--#define QCA8K_PORT_LOOKUP_VLAN_MODE GENMASK(9, 8)
--#define QCA8K_PORT_LOOKUP_VLAN_MODE_NONE (0 << 8)
--#define QCA8K_PORT_LOOKUP_VLAN_MODE_FALLBACK (1 << 8)
--#define QCA8K_PORT_LOOKUP_VLAN_MODE_CHECK (2 << 8)
--#define QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE (3 << 8)
-+#define QCA8K_PORT_LOOKUP_VLAN_MODE_MASK GENMASK(9, 8)
-+#define QCA8K_PORT_LOOKUP_VLAN_MODE(x) FIELD_PREP(QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, x)
-+#define QCA8K_PORT_LOOKUP_VLAN_MODE_NONE QCA8K_PORT_LOOKUP_VLAN_MODE(0x0)
-+#define QCA8K_PORT_LOOKUP_VLAN_MODE_FALLBACK QCA8K_PORT_LOOKUP_VLAN_MODE(0x1)
-+#define QCA8K_PORT_LOOKUP_VLAN_MODE_CHECK QCA8K_PORT_LOOKUP_VLAN_MODE(0x2)
-+#define QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE QCA8K_PORT_LOOKUP_VLAN_MODE(0x3)
- #define QCA8K_PORT_LOOKUP_STATE_MASK GENMASK(18, 16)
--#define QCA8K_PORT_LOOKUP_STATE_DISABLED (0 << 16)
--#define QCA8K_PORT_LOOKUP_STATE_BLOCKING (1 << 16)
--#define QCA8K_PORT_LOOKUP_STATE_LISTENING (2 << 16)
--#define QCA8K_PORT_LOOKUP_STATE_LEARNING (3 << 16)
--#define QCA8K_PORT_LOOKUP_STATE_FORWARD (4 << 16)
--#define QCA8K_PORT_LOOKUP_STATE GENMASK(18, 16)
-+#define QCA8K_PORT_LOOKUP_STATE(x) FIELD_PREP(QCA8K_PORT_LOOKUP_STATE_MASK, x)
-+#define QCA8K_PORT_LOOKUP_STATE_DISABLED QCA8K_PORT_LOOKUP_STATE(0x0)
-+#define QCA8K_PORT_LOOKUP_STATE_BLOCKING QCA8K_PORT_LOOKUP_STATE(0x1)
-+#define QCA8K_PORT_LOOKUP_STATE_LISTENING QCA8K_PORT_LOOKUP_STATE(0x2)
-+#define QCA8K_PORT_LOOKUP_STATE_LEARNING QCA8K_PORT_LOOKUP_STATE(0x3)
-+#define QCA8K_PORT_LOOKUP_STATE_FORWARD QCA8K_PORT_LOOKUP_STATE(0x4)
- #define QCA8K_PORT_LOOKUP_LEARN BIT(20)
-
- #define QCA8K_REG_GLOBAL_FC_THRESH 0x800
--#define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) ((x) << 16)
--#define QCA8K_GLOBAL_FC_GOL_XON_THRES_S GENMASK(24, 16)
--#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES(x) ((x) << 0)
--#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES_S GENMASK(8, 0)
-+#define QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK GENMASK(24, 16)
-+#define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK, x)
-+#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK GENMASK(8, 0)
-+#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK, x)
-
- #define QCA8K_REG_PORT_HOL_CTRL0(_i) (0x970 + (_i) * 0x8)
--#define QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF GENMASK(3, 0)
--#define QCA8K_PORT_HOL_CTRL0_EG_PRI0(x) ((x) << 0)
--#define QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF GENMASK(7, 4)
--#define QCA8K_PORT_HOL_CTRL0_EG_PRI1(x) ((x) << 4)
--#define QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF GENMASK(11, 8)
--#define QCA8K_PORT_HOL_CTRL0_EG_PRI2(x) ((x) << 8)
--#define QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF GENMASK(15, 12)
--#define QCA8K_PORT_HOL_CTRL0_EG_PRI3(x) ((x) << 12)
--#define QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF GENMASK(19, 16)
--#define QCA8K_PORT_HOL_CTRL0_EG_PRI4(x) ((x) << 16)
--#define QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF GENMASK(23, 20)
--#define QCA8K_PORT_HOL_CTRL0_EG_PRI5(x) ((x) << 20)
--#define QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF GENMASK(29, 24)
--#define QCA8K_PORT_HOL_CTRL0_EG_PORT(x) ((x) << 24)
-+#define QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF_MASK GENMASK(3, 0)
-+#define QCA8K_PORT_HOL_CTRL0_EG_PRI0(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF_MASK, x)
-+#define QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF_MASK GENMASK(7, 4)
-+#define QCA8K_PORT_HOL_CTRL0_EG_PRI1(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF_MASK, x)
-+#define QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF_MASK GENMASK(11, 8)
-+#define QCA8K_PORT_HOL_CTRL0_EG_PRI2(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF_MASK, x)
-+#define QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF_MASK GENMASK(15, 12)
-+#define QCA8K_PORT_HOL_CTRL0_EG_PRI3(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF_MASK, x)
-+#define QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF_MASK GENMASK(19, 16)
-+#define QCA8K_PORT_HOL_CTRL0_EG_PRI4(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF_MASK, x)
-+#define QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF_MASK GENMASK(23, 20)
-+#define QCA8K_PORT_HOL_CTRL0_EG_PRI5(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF_MASK, x)
-+#define QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF_MASK GENMASK(29, 24)
-+#define QCA8K_PORT_HOL_CTRL0_EG_PORT(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF_MASK, x)
-
- #define QCA8K_REG_PORT_HOL_CTRL1(_i) (0x974 + (_i) * 0x8)
--#define QCA8K_PORT_HOL_CTRL1_ING_BUF GENMASK(3, 0)
--#define QCA8K_PORT_HOL_CTRL1_ING(x) ((x) << 0)
-+#define QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK GENMASK(3, 0)
-+#define QCA8K_PORT_HOL_CTRL1_ING(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK, x)
- #define QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN BIT(6)
- #define QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN BIT(7)
- #define QCA8K_PORT_HOL_CTRL1_WRED_EN BIT(8)
- #define QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN BIT(16)
-
- /* Pkt edit registers */
-+#define QCA8K_EGREES_VLAN_PORT_SHIFT(_i) (16 * ((_i) % 2))
-+#define QCA8K_EGREES_VLAN_PORT_MASK(_i) (GENMASK(11, 0) << QCA8K_EGREES_VLAN_PORT_SHIFT(_i))
-+#define QCA8K_EGREES_VLAN_PORT(_i, x) ((x) << QCA8K_EGREES_VLAN_PORT_SHIFT(_i))
- #define QCA8K_EGRESS_VLAN(x) (0x0c70 + (4 * (x / 2)))
-
- /* L3 registers */
+++ /dev/null
-From 994c28b6f971fa5db8ae977daea37eee87d93d51 Mon Sep 17 00:00:00 2001
-From: Ansuel Smith <ansuelsmth@gmail.com>
-Date: Mon, 22 Nov 2021 16:23:42 +0100
-Subject: net: dsa: qca8k: remove extra mutex_init in qca8k_setup
-
-Mutex is already init in sw_probe. Remove the extra init in qca8k_setup.
-
-Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
-Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
-Signed-off-by: David S. Miller <davem@davemloft.net>
----
- drivers/net/dsa/qca8k.c | 2 --
- 1 file changed, 2 deletions(-)
-
---- a/drivers/net/dsa/qca8k.c
-+++ b/drivers/net/dsa/qca8k.c
-@@ -1086,8 +1086,6 @@ qca8k_setup(struct dsa_switch *ds)
- if (ret)
- return ret;
-
-- mutex_init(&priv->reg_mutex);
--
- /* Start by setting up the register mapping */
- priv->regmap = devm_regmap_init(ds->dev, NULL, priv,
- &qca8k_regmap_config);
+++ /dev/null
-From 36b8af12f424e7a7f60a935c60a0fd4aa0822378 Mon Sep 17 00:00:00 2001
-From: Ansuel Smith <ansuelsmth@gmail.com>
-Date: Mon, 22 Nov 2021 16:23:43 +0100
-Subject: net: dsa: qca8k: move regmap init in probe and set it mandatory
-
-In preparation for regmap conversion, move regmap init in the probe
-function and make it mandatory as any read/write/rmw operation will be
-converted to regmap API.
-
-Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
-Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
-Signed-off-by: David S. Miller <davem@davemloft.net>
----
- drivers/net/dsa/qca8k.c | 14 ++++++++------
- 1 file changed, 8 insertions(+), 6 deletions(-)
-
---- a/drivers/net/dsa/qca8k.c
-+++ b/drivers/net/dsa/qca8k.c
-@@ -1086,12 +1086,6 @@ qca8k_setup(struct dsa_switch *ds)
- if (ret)
- return ret;
-
-- /* Start by setting up the register mapping */
-- priv->regmap = devm_regmap_init(ds->dev, NULL, priv,
-- &qca8k_regmap_config);
-- if (IS_ERR(priv->regmap))
-- dev_warn(priv->dev, "regmap initialization failed");
--
- ret = qca8k_setup_mdio_bus(priv);
- if (ret)
- return ret;
-@@ -2096,6 +2090,14 @@ qca8k_sw_probe(struct mdio_device *mdiod
- gpiod_set_value_cansleep(priv->reset_gpio, 0);
- }
-
-+ /* Start by setting up the register mapping */
-+ priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv,
-+ &qca8k_regmap_config);
-+ if (IS_ERR(priv->regmap)) {
-+ dev_err(priv->dev, "regmap initialization failed");
-+ return PTR_ERR(priv->regmap);
-+ }
-+
- /* Check the detected switch id */
- ret = qca8k_read_switch_id(priv);
- if (ret)
+++ /dev/null
-From 8b5f3f29a81a71934d004e21a1292c1148b05926 Mon Sep 17 00:00:00 2001
-From: Ansuel Smith <ansuelsmth@gmail.com>
-Date: Mon, 22 Nov 2021 16:23:44 +0100
-Subject: net: dsa: qca8k: initial conversion to regmap helper
-
-Convert any qca8k set/clear/pool to regmap helper and add
-missing config to regmap_config struct.
-Read/write/rmw operation are reworked to use the regmap helper
-internally to keep the delta of this patch low. These additional
-function will then be dropped when the code split will be proposed.
-
-Ipq40xx SoC have the internal switch based on the qca8k regmap but use
-mmio for read/write/rmw operation instead of mdio.
-In preparation for the support of this internal switch, convert the
-driver to regmap API to later split the driver to common and specific
-code. The overhead introduced by the use of regamp API is marginal as the
-internal mdio will bypass it by using its direct access and regmap will be
-used only by configuration functions or fdb access.
-
-Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
-Signed-off-by: David S. Miller <davem@davemloft.net>
----
- drivers/net/dsa/qca8k.c | 107 +++++++++++++++++++++---------------------------
- 1 file changed, 47 insertions(+), 60 deletions(-)
-
---- a/drivers/net/dsa/qca8k.c
-+++ b/drivers/net/dsa/qca8k.c
-@@ -10,6 +10,7 @@
- #include <linux/phy.h>
- #include <linux/netdevice.h>
- #include <linux/bitfield.h>
-+#include <linux/regmap.h>
- #include <net/dsa.h>
- #include <linux/of_net.h>
- #include <linux/of_mdio.h>
-@@ -152,6 +153,25 @@ qca8k_set_page(struct mii_bus *bus, u16
- static int
- qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
- {
-+ return regmap_read(priv->regmap, reg, val);
-+}
-+
-+static int
-+qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
-+{
-+ return regmap_write(priv->regmap, reg, val);
-+}
-+
-+static int
-+qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
-+{
-+ return regmap_update_bits(priv->regmap, reg, mask, write_val);
-+}
-+
-+static int
-+qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
-+{
-+ struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
- struct mii_bus *bus = priv->bus;
- u16 r1, r2, page;
- int ret;
-@@ -172,8 +192,9 @@ exit:
- }
-
- static int
--qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
-+qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
- {
-+ struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
- struct mii_bus *bus = priv->bus;
- u16 r1, r2, page;
- int ret;
-@@ -194,8 +215,9 @@ exit:
- }
-
- static int
--qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
-+qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
- {
-+ struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
- struct mii_bus *bus = priv->bus;
- u16 r1, r2, page;
- u32 val;
-@@ -223,34 +245,6 @@ exit:
- return ret;
- }
-
--static int
--qca8k_reg_set(struct qca8k_priv *priv, u32 reg, u32 val)
--{
-- return qca8k_rmw(priv, reg, 0, val);
--}
--
--static int
--qca8k_reg_clear(struct qca8k_priv *priv, u32 reg, u32 val)
--{
-- return qca8k_rmw(priv, reg, val, 0);
--}
--
--static int
--qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
--{
-- struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
--
-- return qca8k_read(priv, reg, val);
--}
--
--static int
--qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
--{
-- struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
--
-- return qca8k_write(priv, reg, val);
--}
--
- static const struct regmap_range qca8k_readable_ranges[] = {
- regmap_reg_range(0x0000, 0x00e4), /* Global control */
- regmap_reg_range(0x0100, 0x0168), /* EEE control */
-@@ -282,26 +276,19 @@ static struct regmap_config qca8k_regmap
- .max_register = 0x16ac, /* end MIB - Port6 range */
- .reg_read = qca8k_regmap_read,
- .reg_write = qca8k_regmap_write,
-+ .reg_update_bits = qca8k_regmap_update_bits,
- .rd_table = &qca8k_readable_table,
-+ .disable_locking = true, /* Locking is handled by qca8k read/write */
-+ .cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */
- };
-
- static int
- qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
- {
-- int ret, ret1;
- u32 val;
-
-- ret = read_poll_timeout(qca8k_read, ret1, !(val & mask),
-- 0, QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
-- priv, reg, &val);
--
-- /* Check if qca8k_read has failed for a different reason
-- * before returning -ETIMEDOUT
-- */
-- if (ret < 0 && ret1 < 0)
-- return ret1;
--
-- return ret;
-+ return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0,
-+ QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC);
- }
-
- static int
-@@ -568,7 +555,7 @@ qca8k_mib_init(struct qca8k_priv *priv)
- int ret;
-
- mutex_lock(&priv->reg_mutex);
-- ret = qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY);
-+ ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY);
- if (ret)
- goto exit;
-
-@@ -576,7 +563,7 @@ qca8k_mib_init(struct qca8k_priv *priv)
- if (ret)
- goto exit;
-
-- ret = qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
-+ ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
- if (ret)
- goto exit;
-
-@@ -597,9 +584,9 @@ qca8k_port_set_status(struct qca8k_priv
- mask |= QCA8K_PORT_STATUS_LINK_AUTO;
-
- if (enable)
-- qca8k_reg_set(priv, QCA8K_REG_PORT_STATUS(port), mask);
-+ regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
- else
-- qca8k_reg_clear(priv, QCA8K_REG_PORT_STATUS(port), mask);
-+ regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
- }
-
- static u32
-@@ -861,8 +848,8 @@ qca8k_setup_mdio_bus(struct qca8k_priv *
- * a dt-overlay and driver reload changed the configuration
- */
-
-- return qca8k_reg_clear(priv, QCA8K_MDIO_MASTER_CTRL,
-- QCA8K_MDIO_MASTER_EN);
-+ return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL,
-+ QCA8K_MDIO_MASTER_EN);
- }
-
- /* Check if the devicetree declare the port:phy mapping */
-@@ -1099,16 +1086,16 @@ qca8k_setup(struct dsa_switch *ds)
- return ret;
-
- /* Make sure MAC06 is disabled */
-- ret = qca8k_reg_clear(priv, QCA8K_REG_PORT0_PAD_CTRL,
-- QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
-+ ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
-+ QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
- if (ret) {
- dev_err(priv->dev, "failed disabling MAC06 exchange");
- return ret;
- }
-
- /* Enable CPU Port */
-- ret = qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0,
-- QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
-+ ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
-+ QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
- if (ret) {
- dev_err(priv->dev, "failed enabling CPU port");
- return ret;
-@@ -1176,8 +1163,8 @@ qca8k_setup(struct dsa_switch *ds)
- return ret;
-
- /* Enable ARP Auto-learning by default */
-- ret = qca8k_reg_set(priv, QCA8K_PORT_LOOKUP_CTRL(i),
-- QCA8K_PORT_LOOKUP_LEARN);
-+ ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i),
-+ QCA8K_PORT_LOOKUP_LEARN);
- if (ret)
- return ret;
-
-@@ -1745,9 +1732,9 @@ qca8k_port_bridge_join(struct dsa_switch
- /* Add this port to the portvlan mask of the other ports
- * in the bridge
- */
-- ret = qca8k_reg_set(priv,
-- QCA8K_PORT_LOOKUP_CTRL(i),
-- BIT(port));
-+ ret = regmap_set_bits(priv->regmap,
-+ QCA8K_PORT_LOOKUP_CTRL(i),
-+ BIT(port));
- if (ret)
- return ret;
- if (i != port)
-@@ -1777,9 +1764,9 @@ qca8k_port_bridge_leave(struct dsa_switc
- /* Remove this port to the portvlan mask of the other ports
- * in the bridge
- */
-- qca8k_reg_clear(priv,
-- QCA8K_PORT_LOOKUP_CTRL(i),
-- BIT(port));
-+ regmap_clear_bits(priv->regmap,
-+ QCA8K_PORT_LOOKUP_CTRL(i),
-+ BIT(port));
- }
-
- /* Set the cpu port to be the only one in the portvlan mask of
+++ /dev/null
-From c126f118b330ccf0db0dda4a4bd6c729865a205f Mon Sep 17 00:00:00 2001
-From: Ansuel Smith <ansuelsmth@gmail.com>
-Date: Mon, 22 Nov 2021 16:23:45 +0100
-Subject: net: dsa: qca8k: add additional MIB counter and make it dynamic
-
-We are currently missing 2 additionals MIB counter present in QCA833x
-switch.
-QC832x switch have 39 MIB counter and QCA833X have 41 MIB counter.
-Add the additional MIB counter and rework the MIB function to print the
-correct supported counter from the match_data struct.
-
-Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
-Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
-Signed-off-by: David S. Miller <davem@davemloft.net>
----
- drivers/net/dsa/qca8k.c | 23 ++++++++++++++++++++---
- drivers/net/dsa/qca8k.h | 4 ++++
- 2 files changed, 24 insertions(+), 3 deletions(-)
-
---- a/drivers/net/dsa/qca8k.c
-+++ b/drivers/net/dsa/qca8k.c
-@@ -70,6 +70,8 @@ static const struct qca8k_mib_desc ar832
- MIB_DESC(1, 0x9c, "TxExcDefer"),
- MIB_DESC(1, 0xa0, "TxDefer"),
- MIB_DESC(1, 0xa4, "TxLateCol"),
-+ MIB_DESC(1, 0xa8, "RXUnicast"),
-+ MIB_DESC(1, 0xac, "TXUnicast"),
- };
-
- /* The 32bit switch registers are accessed indirectly. To achieve this we need
-@@ -1605,12 +1607,16 @@ qca8k_phylink_mac_link_up(struct dsa_swi
- static void
- qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
- {
-+ const struct qca8k_match_data *match_data;
-+ struct qca8k_priv *priv = ds->priv;
- int i;
-
- if (stringset != ETH_SS_STATS)
- return;
-
-- for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++)
-+ match_data = of_device_get_match_data(priv->dev);
-+
-+ for (i = 0; i < match_data->mib_count; i++)
- strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
- ETH_GSTRING_LEN);
- }
-@@ -1620,12 +1626,15 @@ qca8k_get_ethtool_stats(struct dsa_switc
- uint64_t *data)
- {
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-+ const struct qca8k_match_data *match_data;
- const struct qca8k_mib_desc *mib;
- u32 reg, i, val;
- u32 hi = 0;
- int ret;
-
-- for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++) {
-+ match_data = of_device_get_match_data(priv->dev);
-+
-+ for (i = 0; i < match_data->mib_count; i++) {
- mib = &ar8327_mib[i];
- reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
-
-@@ -1648,10 +1657,15 @@ qca8k_get_ethtool_stats(struct dsa_switc
- static int
- qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
- {
-+ const struct qca8k_match_data *match_data;
-+ struct qca8k_priv *priv = ds->priv;
-+
- if (sset != ETH_SS_STATS)
- return 0;
-
-- return ARRAY_SIZE(ar8327_mib);
-+ match_data = of_device_get_match_data(priv->dev);
-+
-+ return match_data->mib_count;
- }
-
- static int
-@@ -2173,14 +2187,17 @@ static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
- static const struct qca8k_match_data qca8327 = {
- .id = QCA8K_ID_QCA8327,
- .reduced_package = true,
-+ .mib_count = QCA8K_QCA832X_MIB_COUNT,
- };
-
- static const struct qca8k_match_data qca8328 = {
- .id = QCA8K_ID_QCA8327,
-+ .mib_count = QCA8K_QCA832X_MIB_COUNT,
- };
-
- static const struct qca8k_match_data qca833x = {
- .id = QCA8K_ID_QCA8337,
-+ .mib_count = QCA8K_QCA833X_MIB_COUNT,
- };
-
- static const struct of_device_id qca8k_of_match[] = {
---- a/drivers/net/dsa/qca8k.h
-+++ b/drivers/net/dsa/qca8k.h
-@@ -21,6 +21,9 @@
- #define PHY_ID_QCA8337 0x004dd036
- #define QCA8K_ID_QCA8337 0x13
-
-+#define QCA8K_QCA832X_MIB_COUNT 39
-+#define QCA8K_QCA833X_MIB_COUNT 41
-+
- #define QCA8K_BUSY_WAIT_TIMEOUT 2000
-
- #define QCA8K_NUM_FDB_RECORDS 2048
-@@ -279,6 +282,7 @@ struct ar8xxx_port_status {
- struct qca8k_match_data {
- u8 id;
- bool reduced_package;
-+ u8 mib_count;
- };
-
- enum {
+++ /dev/null
-From 4592538bfb0d5d3c3c8a1d7071724d081412ac91 Mon Sep 17 00:00:00 2001
-From: Ansuel Smith <ansuelsmth@gmail.com>
-Date: Mon, 22 Nov 2021 16:23:46 +0100
-Subject: net: dsa: qca8k: add support for port fast aging
-
-The switch supports fast aging by flushing any rule in the ARL
-table for a specific port.
-
-Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
-Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
-Signed-off-by: David S. Miller <davem@davemloft.net>
----
- drivers/net/dsa/qca8k.c | 11 +++++++++++
- drivers/net/dsa/qca8k.h | 1 +
- 2 files changed, 12 insertions(+)
-
---- a/drivers/net/dsa/qca8k.c
-+++ b/drivers/net/dsa/qca8k.c
-@@ -1790,6 +1790,16 @@ qca8k_port_bridge_leave(struct dsa_switc
- QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
- }
-
-+static void
-+qca8k_port_fast_age(struct dsa_switch *ds, int port)
-+{
-+ struct qca8k_priv *priv = ds->priv;
-+
-+ mutex_lock(&priv->reg_mutex);
-+ qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
-+ mutex_unlock(&priv->reg_mutex);
-+}
-+
- static int
- qca8k_port_enable(struct dsa_switch *ds, int port,
- struct phy_device *phy)
-@@ -2017,6 +2027,7 @@ static const struct dsa_switch_ops qca8k
- .port_stp_state_set = qca8k_port_stp_state_set,
- .port_bridge_join = qca8k_port_bridge_join,
- .port_bridge_leave = qca8k_port_bridge_leave,
-+ .port_fast_age = qca8k_port_fast_age,
- .port_fdb_add = qca8k_port_fdb_add,
- .port_fdb_del = qca8k_port_fdb_del,
- .port_fdb_dump = qca8k_port_fdb_dump,
---- a/drivers/net/dsa/qca8k.h
-+++ b/drivers/net/dsa/qca8k.h
-@@ -262,6 +262,7 @@ enum qca8k_fdb_cmd {
- QCA8K_FDB_FLUSH = 1,
- QCA8K_FDB_LOAD = 2,
- QCA8K_FDB_PURGE = 3,
-+ QCA8K_FDB_FLUSH_PORT = 5,
- QCA8K_FDB_NEXT = 6,
- QCA8K_FDB_SEARCH = 7,
- };
+++ /dev/null
-From 6a3bdc5209f45d2af83aa92433ab6e5cf2297aa4 Mon Sep 17 00:00:00 2001
-From: Ansuel Smith <ansuelsmth@gmail.com>
-Date: Mon, 22 Nov 2021 16:23:47 +0100
-Subject: net: dsa: qca8k: add set_ageing_time support
-
-qca8k support setting ageing time in step of 7s. Add support for it and
-set the max value accepted of 7645m.
-Documentation talks about support for 10000m but that values doesn't
-make sense as the value doesn't match the max value in the reg.
-
-Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
-Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
-Signed-off-by: David S. Miller <davem@davemloft.net>
----
- drivers/net/dsa/qca8k.c | 25 +++++++++++++++++++++++++
- drivers/net/dsa/qca8k.h | 3 +++
- 2 files changed, 28 insertions(+)
-
---- a/drivers/net/dsa/qca8k.c
-+++ b/drivers/net/dsa/qca8k.c
-@@ -1261,6 +1261,10 @@ qca8k_setup(struct dsa_switch *ds)
- /* We don't have interrupts for link changes, so we need to poll */
- ds->pcs_poll = true;
-
-+ /* Set min a max ageing value supported */
-+ ds->ageing_time_min = 7000;
-+ ds->ageing_time_max = 458745000;
-+
- return 0;
- }
-
-@@ -1801,6 +1805,26 @@ qca8k_port_fast_age(struct dsa_switch *d
- }
-
- static int
-+qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
-+{
-+ struct qca8k_priv *priv = ds->priv;
-+ unsigned int secs = msecs / 1000;
-+ u32 val;
-+
-+ /* AGE_TIME reg is set in 7s step */
-+ val = secs / 7;
-+
-+ /* Handle case with 0 as val to NOT disable
-+ * learning
-+ */
-+ if (!val)
-+ val = 1;
-+
-+ return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL, QCA8K_ATU_AGE_TIME_MASK,
-+ QCA8K_ATU_AGE_TIME(val));
-+}
-+
-+static int
- qca8k_port_enable(struct dsa_switch *ds, int port,
- struct phy_device *phy)
- {
-@@ -2018,6 +2042,7 @@ static const struct dsa_switch_ops qca8k
- .get_strings = qca8k_get_strings,
- .get_ethtool_stats = qca8k_get_ethtool_stats,
- .get_sset_count = qca8k_get_sset_count,
-+ .set_ageing_time = qca8k_set_ageing_time,
- .get_mac_eee = qca8k_get_mac_eee,
- .set_mac_eee = qca8k_set_mac_eee,
- .port_enable = qca8k_port_enable,
---- a/drivers/net/dsa/qca8k.h
-+++ b/drivers/net/dsa/qca8k.h
-@@ -175,6 +175,9 @@
- #define QCA8K_VTU_FUNC1_BUSY BIT(31)
- #define QCA8K_VTU_FUNC1_VID_MASK GENMASK(27, 16)
- #define QCA8K_VTU_FUNC1_FULL BIT(4)
-+#define QCA8K_REG_ATU_CTRL 0x618
-+#define QCA8K_ATU_AGE_TIME_MASK GENMASK(15, 0)
-+#define QCA8K_ATU_AGE_TIME(x) FIELD_PREP(QCA8K_ATU_AGE_TIME_MASK, (x))
- #define QCA8K_REG_GLOBAL_FW_CTRL0 0x620
- #define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10)
- #define QCA8K_REG_GLOBAL_FW_CTRL1 0x624
+++ /dev/null
-From ba8f870dfa635113ce6e8095a5eb1835ecde2e9e Mon Sep 17 00:00:00 2001
-From: Ansuel Smith <ansuelsmth@gmail.com>
-Date: Mon, 22 Nov 2021 16:23:48 +0100
-Subject: net: dsa: qca8k: add support for mdb_add/del
-
-Add support for mdb add/del function. The ARL table is used to insert
-the rule. The rule will be searched, deleted and reinserted with the
-port mask updated. The function will check if the rule has to be updated
-or insert directly with no deletion of the old rule.
-If every port is removed from the port mask, the rule is removed.
-The rule is set STATIC in the ARL table (aka it doesn't age) to not be
-flushed by fast age function.
-
-Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
-Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
-Signed-off-by: David S. Miller <davem@davemloft.net>
----
- drivers/net/dsa/qca8k.c | 99 +++++++++++++++++++++++++++++++++++++++++++++++++
- 1 file changed, 99 insertions(+)
-
---- a/drivers/net/dsa/qca8k.c
-+++ b/drivers/net/dsa/qca8k.c
-@@ -436,6 +436,81 @@ qca8k_fdb_flush(struct qca8k_priv *priv)
- }
-
- static int
-+qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
-+ const u8 *mac, u16 vid)
-+{
-+ struct qca8k_fdb fdb = { 0 };
-+ int ret;
-+
-+ mutex_lock(&priv->reg_mutex);
-+
-+ qca8k_fdb_write(priv, vid, 0, mac, 0);
-+ ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
-+ if (ret < 0)
-+ goto exit;
-+
-+ ret = qca8k_fdb_read(priv, &fdb);
-+ if (ret < 0)
-+ goto exit;
-+
-+ /* Rule exist. Delete first */
-+ if (!fdb.aging) {
-+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
-+ if (ret)
-+ goto exit;
-+ }
-+
-+ /* Add port to fdb portmask */
-+ fdb.port_mask |= port_mask;
-+
-+ qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
-+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
-+
-+exit:
-+ mutex_unlock(&priv->reg_mutex);
-+ return ret;
-+}
-+
-+static int
-+qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
-+ const u8 *mac, u16 vid)
-+{
-+ struct qca8k_fdb fdb = { 0 };
-+ int ret;
-+
-+ mutex_lock(&priv->reg_mutex);
-+
-+ qca8k_fdb_write(priv, vid, 0, mac, 0);
-+ ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
-+ if (ret < 0)
-+ goto exit;
-+
-+ /* Rule doesn't exist. Why delete? */
-+ if (!fdb.aging) {
-+ ret = -EINVAL;
-+ goto exit;
-+ }
-+
-+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
-+ if (ret)
-+ goto exit;
-+
-+ /* Only port in the rule is this port. Don't re insert */
-+ if (fdb.port_mask == port_mask)
-+ goto exit;
-+
-+ /* Remove port from port mask */
-+ fdb.port_mask &= ~port_mask;
-+
-+ qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
-+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
-+
-+exit:
-+ mutex_unlock(&priv->reg_mutex);
-+ return ret;
-+}
-+
-+static int
- qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid)
- {
- u32 reg;
-@@ -1949,6 +2024,28 @@ qca8k_port_fdb_dump(struct dsa_switch *d
- }
-
- static int
-+qca8k_port_mdb_add(struct dsa_switch *ds, int port,
-+ const struct switchdev_obj_port_mdb *mdb)
-+{
-+ struct qca8k_priv *priv = ds->priv;
-+ const u8 *addr = mdb->addr;
-+ u16 vid = mdb->vid;
-+
-+ return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid);
-+}
-+
-+static int
-+qca8k_port_mdb_del(struct dsa_switch *ds, int port,
-+ const struct switchdev_obj_port_mdb *mdb)
-+{
-+ struct qca8k_priv *priv = ds->priv;
-+ const u8 *addr = mdb->addr;
-+ u16 vid = mdb->vid;
-+
-+ return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
-+}
-+
-+static int
- qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
- struct netlink_ext_ack *extack)
- {
-@@ -2056,6 +2153,8 @@ static const struct dsa_switch_ops qca8k
- .port_fdb_add = qca8k_port_fdb_add,
- .port_fdb_del = qca8k_port_fdb_del,
- .port_fdb_dump = qca8k_port_fdb_dump,
-+ .port_mdb_add = qca8k_port_mdb_add,
-+ .port_mdb_del = qca8k_port_mdb_del,
- .port_vlan_filtering = qca8k_port_vlan_filtering,
- .port_vlan_add = qca8k_port_vlan_add,
- .port_vlan_del = qca8k_port_vlan_del,
+++ /dev/null
-From 2c1bdbc7e7560d7de754cad277d968d56bb1899e Mon Sep 17 00:00:00 2001
-From: Ansuel Smith <ansuelsmth@gmail.com>
-Date: Tue, 23 Nov 2021 03:59:10 +0100
-Subject: net: dsa: qca8k: add support for mirror mode
-
-The switch supports mirror mode. Only one port can set as mirror port and
-every other port can set to both ingress and egress mode. The mirror
-port is disabled and reverted to normal operation once every port is
-removed from sending packet to it.
-
-Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
-Signed-off-by: David S. Miller <davem@davemloft.net>
----
- drivers/net/dsa/qca8k.c | 95 +++++++++++++++++++++++++++++++++++++++++++++++++
- drivers/net/dsa/qca8k.h | 4 +++
- 2 files changed, 99 insertions(+)
-
---- a/drivers/net/dsa/qca8k.c
-+++ b/drivers/net/dsa/qca8k.c
-@@ -2046,6 +2046,99 @@ qca8k_port_mdb_del(struct dsa_switch *ds
- }
-
- static int
-+qca8k_port_mirror_add(struct dsa_switch *ds, int port,
-+ struct dsa_mall_mirror_tc_entry *mirror,
-+ bool ingress)
-+{
-+ struct qca8k_priv *priv = ds->priv;
-+ int monitor_port, ret;
-+ u32 reg, val;
-+
-+ /* Check for existent entry */
-+ if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
-+ return -EEXIST;
-+
-+ ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val);
-+ if (ret)
-+ return ret;
-+
-+ /* QCA83xx can have only one port set to mirror mode.
-+ * Check that the correct port is requested and return error otherwise.
-+ * When no mirror port is set, the values is set to 0xF
-+ */
-+ monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
-+ if (monitor_port != 0xF && monitor_port != mirror->to_local_port)
-+ return -EEXIST;
-+
-+ /* Set the monitor port */
-+ val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM,
-+ mirror->to_local_port);
-+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
-+ QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
-+ if (ret)
-+ return ret;
-+
-+ if (ingress) {
-+ reg = QCA8K_PORT_LOOKUP_CTRL(port);
-+ val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
-+ } else {
-+ reg = QCA8K_REG_PORT_HOL_CTRL1(port);
-+ val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
-+ }
-+
-+ ret = regmap_update_bits(priv->regmap, reg, val, val);
-+ if (ret)
-+ return ret;
-+
-+ /* Track mirror port for tx and rx to decide when the
-+ * mirror port has to be disabled.
-+ */
-+ if (ingress)
-+ priv->mirror_rx |= BIT(port);
-+ else
-+ priv->mirror_tx |= BIT(port);
-+
-+ return 0;
-+}
-+
-+static void
-+qca8k_port_mirror_del(struct dsa_switch *ds, int port,
-+ struct dsa_mall_mirror_tc_entry *mirror)
-+{
-+ struct qca8k_priv *priv = ds->priv;
-+ u32 reg, val;
-+ int ret;
-+
-+ if (mirror->ingress) {
-+ reg = QCA8K_PORT_LOOKUP_CTRL(port);
-+ val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
-+ } else {
-+ reg = QCA8K_REG_PORT_HOL_CTRL1(port);
-+ val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
-+ }
-+
-+ ret = regmap_clear_bits(priv->regmap, reg, val);
-+ if (ret)
-+ goto err;
-+
-+ if (mirror->ingress)
-+ priv->mirror_rx &= ~BIT(port);
-+ else
-+ priv->mirror_tx &= ~BIT(port);
-+
-+ /* No port set to send packet to mirror port. Disable mirror port */
-+ if (!priv->mirror_rx && !priv->mirror_tx) {
-+ val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF);
-+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
-+ QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
-+ if (ret)
-+ goto err;
-+ }
-+err:
-+ dev_err(priv->dev, "Failed to del mirror port from %d", port);
-+}
-+
-+static int
- qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
- struct netlink_ext_ack *extack)
- {
-@@ -2155,6 +2248,8 @@ static const struct dsa_switch_ops qca8k
- .port_fdb_dump = qca8k_port_fdb_dump,
- .port_mdb_add = qca8k_port_mdb_add,
- .port_mdb_del = qca8k_port_mdb_del,
-+ .port_mirror_add = qca8k_port_mirror_add,
-+ .port_mirror_del = qca8k_port_mirror_del,
- .port_vlan_filtering = qca8k_port_vlan_filtering,
- .port_vlan_add = qca8k_port_vlan_add,
- .port_vlan_del = qca8k_port_vlan_del,
---- a/drivers/net/dsa/qca8k.h
-+++ b/drivers/net/dsa/qca8k.h
-@@ -180,6 +180,7 @@
- #define QCA8K_ATU_AGE_TIME(x) FIELD_PREP(QCA8K_ATU_AGE_TIME_MASK, (x))
- #define QCA8K_REG_GLOBAL_FW_CTRL0 0x620
- #define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10)
-+#define QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM GENMASK(7, 4)
- #define QCA8K_REG_GLOBAL_FW_CTRL1 0x624
- #define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK GENMASK(30, 24)
- #define QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK GENMASK(22, 16)
-@@ -201,6 +202,7 @@
- #define QCA8K_PORT_LOOKUP_STATE_LEARNING QCA8K_PORT_LOOKUP_STATE(0x3)
- #define QCA8K_PORT_LOOKUP_STATE_FORWARD QCA8K_PORT_LOOKUP_STATE(0x4)
- #define QCA8K_PORT_LOOKUP_LEARN BIT(20)
-+#define QCA8K_PORT_LOOKUP_ING_MIRROR_EN BIT(25)
-
- #define QCA8K_REG_GLOBAL_FC_THRESH 0x800
- #define QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK GENMASK(24, 16)
-@@ -305,6 +307,8 @@ struct qca8k_ports_config {
- struct qca8k_priv {
- u8 switch_id;
- u8 switch_revision;
-+ u8 mirror_rx;
-+ u8 mirror_tx;
- bool legacy_phy_port_mapping;
- struct qca8k_ports_config ports_config;
- struct regmap *regmap;
+++ /dev/null
-From def975307c01191b6f0170048c3724b0ed3348af Mon Sep 17 00:00:00 2001
-From: Ansuel Smith <ansuelsmth@gmail.com>
-Date: Tue, 23 Nov 2021 03:59:11 +0100
-Subject: net: dsa: qca8k: add LAG support
-
-Add LAG support to this switch. In Documentation this is described as
-trunk mode. A max of 4 LAGs are supported and each can support up to 4
-port. The current tx mode supported is Hash mode with both L2 and L2+3
-mode.
-When no port are present in the trunk, the trunk is disabled in the
-switch.
-When a port is disconnected, the traffic is redirected to the other
-available port.
-The hash mode is global and each LAG require to have the same hash mode
-set. To change the hash mode when multiple LAG are configured, it's
-required to remove each LAG and set the desired hash mode to the last.
-An error is printed when it's asked to set a not supported hadh mode.
-
-Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
-Signed-off-by: David S. Miller <davem@davemloft.net>
----
- drivers/net/dsa/qca8k.c | 177 ++++++++++++++++++++++++++++++++++++++++++++++++
- drivers/net/dsa/qca8k.h | 33 +++++++++
- 2 files changed, 210 insertions(+)
-
---- a/drivers/net/dsa/qca8k.c
-+++ b/drivers/net/dsa/qca8k.c
-@@ -1340,6 +1340,9 @@ qca8k_setup(struct dsa_switch *ds)
- ds->ageing_time_min = 7000;
- ds->ageing_time_max = 458745000;
-
-+ /* Set max number of LAGs supported */
-+ ds->num_lag_ids = QCA8K_NUM_LAGS;
-+
- return 0;
- }
-
-@@ -2226,6 +2229,178 @@ qca8k_get_tag_protocol(struct dsa_switch
- return DSA_TAG_PROTO_QCA;
- }
-
-+static bool
-+qca8k_lag_can_offload(struct dsa_switch *ds,
-+ struct net_device *lag,
-+ struct netdev_lag_upper_info *info)
-+{
-+ struct dsa_port *dp;
-+ int id, members = 0;
-+
-+ id = dsa_lag_id(ds->dst, lag);
-+ if (id < 0 || id >= ds->num_lag_ids)
-+ return false;
-+
-+ dsa_lag_foreach_port(dp, ds->dst, lag)
-+ /* Includes the port joining the LAG */
-+ members++;
-+
-+ if (members > QCA8K_NUM_PORTS_FOR_LAG)
-+ return false;
-+
-+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
-+ return false;
-+
-+ if (info->hash_type != NETDEV_LAG_HASH_L2 ||
-+ info->hash_type != NETDEV_LAG_HASH_L23)
-+ return false;
-+
-+ return true;
-+}
-+
-+static int
-+qca8k_lag_setup_hash(struct dsa_switch *ds,
-+ struct net_device *lag,
-+ struct netdev_lag_upper_info *info)
-+{
-+ struct qca8k_priv *priv = ds->priv;
-+ bool unique_lag = true;
-+ int i, id;
-+ u32 hash;
-+
-+ id = dsa_lag_id(ds->dst, lag);
-+
-+ switch (info->hash_type) {
-+ case NETDEV_LAG_HASH_L23:
-+ hash |= QCA8K_TRUNK_HASH_SIP_EN;
-+ hash |= QCA8K_TRUNK_HASH_DIP_EN;
-+ fallthrough;
-+ case NETDEV_LAG_HASH_L2:
-+ hash |= QCA8K_TRUNK_HASH_SA_EN;
-+ hash |= QCA8K_TRUNK_HASH_DA_EN;
-+ break;
-+ default: /* We should NEVER reach this */
-+ return -EOPNOTSUPP;
-+ }
-+
-+ /* Check if we are the unique configured LAG */
-+ dsa_lags_foreach_id(i, ds->dst)
-+ if (i != id && dsa_lag_dev(ds->dst, i)) {
-+ unique_lag = false;
-+ break;
-+ }
-+
-+ /* Hash Mode is global. Make sure the same Hash Mode
-+ * is set to all the 4 possible lag.
-+ * If we are the unique LAG we can set whatever hash
-+ * mode we want.
-+ * To change hash mode it's needed to remove all LAG
-+ * and change the mode with the latest.
-+ */
-+ if (unique_lag) {
-+ priv->lag_hash_mode = hash;
-+ } else if (priv->lag_hash_mode != hash) {
-+ netdev_err(lag, "Error: Mismateched Hash Mode across different lag is not supported\n");
-+ return -EOPNOTSUPP;
-+ }
-+
-+ return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL,
-+ QCA8K_TRUNK_HASH_MASK, hash);
-+}
-+
-+static int
-+qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
-+ struct net_device *lag, bool delete)
-+{
-+ struct qca8k_priv *priv = ds->priv;
-+ int ret, id, i;
-+ u32 val;
-+
-+ id = dsa_lag_id(ds->dst, lag);
-+
-+ /* Read current port member */
-+ ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
-+ if (ret)
-+ return ret;
-+
-+ /* Shift val to the correct trunk */
-+ val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id);
-+ val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK;
-+ if (delete)
-+ val &= ~BIT(port);
-+ else
-+ val |= BIT(port);
-+
-+ /* Update port member. With empty portmap disable trunk */
-+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0,
-+ QCA8K_REG_GOL_TRUNK_MEMBER(id) |
-+ QCA8K_REG_GOL_TRUNK_EN(id),
-+ !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) |
-+ val << QCA8K_REG_GOL_TRUNK_SHIFT(id));
-+
-+ /* Search empty member if adding or port on deleting */
-+ for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) {
-+ ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val);
-+ if (ret)
-+ return ret;
-+
-+ val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i);
-+ val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK;
-+
-+ if (delete) {
-+ /* If port flagged to be disabled assume this member is
-+ * empty
-+ */
-+ if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
-+ continue;
-+
-+ val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK;
-+ if (val != port)
-+ continue;
-+ } else {
-+ /* If port flagged to be enabled assume this member is
-+ * already set
-+ */
-+ if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
-+ continue;
-+ }
-+
-+ /* We have found the member to add/remove */
-+ break;
-+ }
-+
-+ /* Set port in the correct port mask or disable port if in delete mode */
-+ return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id),
-+ QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) |
-+ QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i),
-+ !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) |
-+ port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i));
-+}
-+
-+static int
-+qca8k_port_lag_join(struct dsa_switch *ds, int port,
-+ struct net_device *lag,
-+ struct netdev_lag_upper_info *info)
-+{
-+ int ret;
-+
-+ if (!qca8k_lag_can_offload(ds, lag, info))
-+ return -EOPNOTSUPP;
-+
-+ ret = qca8k_lag_setup_hash(ds, lag, info);
-+ if (ret)
-+ return ret;
-+
-+ return qca8k_lag_refresh_portmap(ds, port, lag, false);
-+}
-+
-+static int
-+qca8k_port_lag_leave(struct dsa_switch *ds, int port,
-+ struct net_device *lag)
-+{
-+ return qca8k_lag_refresh_portmap(ds, port, lag, true);
-+}
-+
- static const struct dsa_switch_ops qca8k_switch_ops = {
- .get_tag_protocol = qca8k_get_tag_protocol,
- .setup = qca8k_setup,
-@@ -2259,6 +2434,8 @@ static const struct dsa_switch_ops qca8k
- .phylink_mac_link_down = qca8k_phylink_mac_link_down,
- .phylink_mac_link_up = qca8k_phylink_mac_link_up,
- .get_phy_flags = qca8k_get_phy_flags,
-+ .port_lag_join = qca8k_port_lag_join,
-+ .port_lag_leave = qca8k_port_lag_leave,
- };
-
- static int qca8k_read_switch_id(struct qca8k_priv *priv)
---- a/drivers/net/dsa/qca8k.h
-+++ b/drivers/net/dsa/qca8k.h
-@@ -15,6 +15,8 @@
- #define QCA8K_NUM_PORTS 7
- #define QCA8K_NUM_CPU_PORTS 2
- #define QCA8K_MAX_MTU 9000
-+#define QCA8K_NUM_LAGS 4
-+#define QCA8K_NUM_PORTS_FOR_LAG 4
-
- #define PHY_ID_QCA8327 0x004dd034
- #define QCA8K_ID_QCA8327 0x12
-@@ -122,6 +124,14 @@
- #define QCA8K_REG_EEE_CTRL 0x100
- #define QCA8K_REG_EEE_CTRL_LPI_EN(_i) ((_i + 1) * 2)
-
-+/* TRUNK_HASH_EN registers */
-+#define QCA8K_TRUNK_HASH_EN_CTRL 0x270
-+#define QCA8K_TRUNK_HASH_SIP_EN BIT(3)
-+#define QCA8K_TRUNK_HASH_DIP_EN BIT(2)
-+#define QCA8K_TRUNK_HASH_SA_EN BIT(1)
-+#define QCA8K_TRUNK_HASH_DA_EN BIT(0)
-+#define QCA8K_TRUNK_HASH_MASK GENMASK(3, 0)
-+
- /* ACL registers */
- #define QCA8K_REG_PORT_VLAN_CTRL0(_i) (0x420 + (_i * 8))
- #define QCA8K_PORT_VLAN_CVID_MASK GENMASK(27, 16)
-@@ -204,6 +214,28 @@
- #define QCA8K_PORT_LOOKUP_LEARN BIT(20)
- #define QCA8K_PORT_LOOKUP_ING_MIRROR_EN BIT(25)
-
-+#define QCA8K_REG_GOL_TRUNK_CTRL0 0x700
-+/* 4 max trunk first
-+ * first 6 bit for member bitmap
-+ * 7th bit is to enable trunk port
-+ */
-+#define QCA8K_REG_GOL_TRUNK_SHIFT(_i) ((_i) * 8)
-+#define QCA8K_REG_GOL_TRUNK_EN_MASK BIT(7)
-+#define QCA8K_REG_GOL_TRUNK_EN(_i) (QCA8K_REG_GOL_TRUNK_EN_MASK << QCA8K_REG_GOL_TRUNK_SHIFT(_i))
-+#define QCA8K_REG_GOL_TRUNK_MEMBER_MASK GENMASK(6, 0)
-+#define QCA8K_REG_GOL_TRUNK_MEMBER(_i) (QCA8K_REG_GOL_TRUNK_MEMBER_MASK << QCA8K_REG_GOL_TRUNK_SHIFT(_i))
-+/* 0x704 for TRUNK 0-1 --- 0x708 for TRUNK 2-3 */
-+#define QCA8K_REG_GOL_TRUNK_CTRL(_i) (0x704 + (((_i) / 2) * 4))
-+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK GENMASK(3, 0)
-+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK BIT(3)
-+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK GENMASK(2, 0)
-+#define QCA8K_REG_GOL_TRUNK_ID_SHIFT(_i) (((_i) / 2) * 16)
-+#define QCA8K_REG_GOL_MEM_ID_SHIFT(_i) ((_i) * 4)
-+/* Complex shift: FIRST shift for port THEN shift for trunk */
-+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j) (QCA8K_REG_GOL_MEM_ID_SHIFT(_j) + QCA8K_REG_GOL_TRUNK_ID_SHIFT(_i))
-+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(_i, _j) (QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j))
-+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(_i, _j) (QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j))
-+
- #define QCA8K_REG_GLOBAL_FC_THRESH 0x800
- #define QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK GENMASK(24, 16)
- #define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK, x)
-@@ -309,6 +341,7 @@ struct qca8k_priv {
- u8 switch_revision;
- u8 mirror_rx;
- u8 mirror_tx;
-+ u8 lag_hash_mode;
- bool legacy_phy_port_mapping;
- struct qca8k_ports_config ports_config;
- struct regmap *regmap;
--- /dev/null
+From 3b00a07c2443745d62babfe08dbb2ad8e649526e Mon Sep 17 00:00:00 2001
+From: Ansuel Smith <ansuelsmth@gmail.com>
+Date: Fri, 19 Nov 2021 03:03:49 +0100
+Subject: [PATCH] net: dsa: qca8k: fix internal delay applied to the wrong PAD
+ config
+
+With SGMII phy the internal delay is always applied to the PAD0 config.
+This is caused by the falling edge configuration that hardcode the reg
+to PAD0 (as the falling edge bits are present only in PAD0 reg)
+Move the delay configuration before the reg overwrite to correctly apply
+the delay.
+
+Fixes: cef08115846e ("net: dsa: qca8k: set internal delay also for sgmii")
+Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
+Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/dsa/qca8k.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/dsa/qca8k.c
++++ b/drivers/net/dsa/qca8k.c
+@@ -1433,6 +1433,12 @@ qca8k_phylink_mac_config(struct dsa_swit
+
+ qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
+
++ /* From original code is reported port instability as SGMII also
++ * require delay set. Apply advised values here or take them from DT.
++ */
++ if (state->interface == PHY_INTERFACE_MODE_SGMII)
++ qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
++
+ /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
+ * falling edge is set writing in the PORT0 PAD reg
+ */
+@@ -1455,12 +1461,6 @@ qca8k_phylink_mac_config(struct dsa_swit
+ QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
+ val);
+
+- /* From original code is reported port instability as SGMII also
+- * require delay set. Apply advised values here or take them from DT.
+- */
+- if (state->interface == PHY_INTERFACE_MODE_SGMII)
+- qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
+-
+ break;
+ default:
+ dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
--- /dev/null
+From 65258b9d8cde45689bdc86ca39b50f01f983733b Mon Sep 17 00:00:00 2001
+From: Robert Marko <robert.marko@sartura.hr>
+Date: Fri, 19 Nov 2021 03:03:50 +0100
+Subject: [PATCH] net: dsa: qca8k: fix MTU calculation
+
+qca8k has a global MTU, so its tracking the MTU per port to make sure
+that the largest MTU gets applied.
+Since it uses the frame size instead of MTU the driver MTU change function
+will then add the size of Ethernet header and checksum on top of MTU.
+
+The driver currently populates the per port MTU size as Ethernet frame
+length + checksum which equals 1518.
+
+The issue is that then MTU change function will go through all of the
+ports, find the largest MTU and apply the Ethernet header + checksum on
+top of it again, so for a desired MTU of 1500 you will end up with 1536.
+
+This is obviously incorrect, so to correct it populate the per port struct
+MTU with just the MTU and not include the Ethernet header + checksum size
+as those will be added by the MTU change function.
+
+Fixes: f58d2598cf70 ("net: dsa: qca8k: implement the port MTU callbacks")
+Signed-off-by: Robert Marko <robert.marko@sartura.hr>
+Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
+Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/dsa/qca8k.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/dsa/qca8k.c
++++ b/drivers/net/dsa/qca8k.c
+@@ -1256,8 +1256,12 @@ qca8k_setup(struct dsa_switch *ds)
+ /* Set initial MTU for every port.
+ * We have only have a general MTU setting. So track
+ * every port and set the max across all port.
++ * Set per port MTU to 1500 as the MTU change function
++ * will add the overhead and if its set to 1518 then it
++ * will apply the overhead again and we will end up with
++ * MTU of 1536 instead of 1518
+ */
+- priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN;
++ priv->port_mtu[i] = ETH_DATA_LEN;
+ }
+
+ /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
--- /dev/null
+From b9133f3ef5a2659730cf47a74bd0a9259f1cf8ff Mon Sep 17 00:00:00 2001
+From: Ansuel Smith <ansuelsmth@gmail.com>
+Date: Mon, 22 Nov 2021 16:23:40 +0100
+Subject: net: dsa: qca8k: remove redundant check in parse_port_config
+
+The very next check for port 0 and 6 already makes sure we don't go out
+of bounds with the ports_config delay table.
+Remove the redundant check.
+
+Reported-by: kernel test robot <lkp@intel.com>
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
+Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/dsa/qca8k.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/dsa/qca8k.c
++++ b/drivers/net/dsa/qca8k.c
+@@ -983,7 +983,7 @@ qca8k_parse_port_config(struct qca8k_pri
+ u32 delay;
+
+ /* We have 2 CPU port. Check them */
+- for (port = 0; port < QCA8K_NUM_PORTS && cpu_port_index < QCA8K_NUM_CPU_PORTS; port++) {
++ for (port = 0; port < QCA8K_NUM_PORTS; port++) {
+ /* Skip every other port */
+ if (port != 0 && port != 6)
+ continue;
--- /dev/null
+From 90ae68bfc2ffcb54a4ba4f64edbeb84a80cbb57c Mon Sep 17 00:00:00 2001
+From: Ansuel Smith <ansuelsmth@gmail.com>
+Date: Mon, 22 Nov 2021 16:23:41 +0100
+Subject: net: dsa: qca8k: convert to GENMASK/FIELD_PREP/FIELD_GET
+
+Convert and try to standardize bit fields using
+GENMASK/FIELD_PREP/FIELD_GET macros. Rework some logic to support the
+standard macro and tidy things up. No functional change intended.
+
+Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/dsa/qca8k.c | 98 +++++++++++++++----------------
+ drivers/net/dsa/qca8k.h | 153 ++++++++++++++++++++++++++----------------------
+ 2 files changed, 130 insertions(+), 121 deletions(-)
+
+--- a/drivers/net/dsa/qca8k.c
++++ b/drivers/net/dsa/qca8k.c
+@@ -9,6 +9,7 @@
+ #include <linux/module.h>
+ #include <linux/phy.h>
+ #include <linux/netdevice.h>
++#include <linux/bitfield.h>
+ #include <net/dsa.h>
+ #include <linux/of_net.h>
+ #include <linux/of_mdio.h>
+@@ -319,18 +320,18 @@ qca8k_fdb_read(struct qca8k_priv *priv,
+ }
+
+ /* vid - 83:72 */
+- fdb->vid = (reg[2] >> QCA8K_ATU_VID_S) & QCA8K_ATU_VID_M;
++ fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
+ /* aging - 67:64 */
+- fdb->aging = reg[2] & QCA8K_ATU_STATUS_M;
++ fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]);
+ /* portmask - 54:48 */
+- fdb->port_mask = (reg[1] >> QCA8K_ATU_PORT_S) & QCA8K_ATU_PORT_M;
++ fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]);
+ /* mac - 47:0 */
+- fdb->mac[0] = (reg[1] >> QCA8K_ATU_ADDR0_S) & 0xff;
+- fdb->mac[1] = reg[1] & 0xff;
+- fdb->mac[2] = (reg[0] >> QCA8K_ATU_ADDR2_S) & 0xff;
+- fdb->mac[3] = (reg[0] >> QCA8K_ATU_ADDR3_S) & 0xff;
+- fdb->mac[4] = (reg[0] >> QCA8K_ATU_ADDR4_S) & 0xff;
+- fdb->mac[5] = reg[0] & 0xff;
++ fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]);
++ fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]);
++ fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]);
++ fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]);
++ fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]);
++ fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]);
+
+ return 0;
+ }
+@@ -343,18 +344,18 @@ qca8k_fdb_write(struct qca8k_priv *priv,
+ int i;
+
+ /* vid - 83:72 */
+- reg[2] = (vid & QCA8K_ATU_VID_M) << QCA8K_ATU_VID_S;
++ reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
+ /* aging - 67:64 */
+- reg[2] |= aging & QCA8K_ATU_STATUS_M;
++ reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging);
+ /* portmask - 54:48 */
+- reg[1] = (port_mask & QCA8K_ATU_PORT_M) << QCA8K_ATU_PORT_S;
++ reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask);
+ /* mac - 47:0 */
+- reg[1] |= mac[0] << QCA8K_ATU_ADDR0_S;
+- reg[1] |= mac[1];
+- reg[0] |= mac[2] << QCA8K_ATU_ADDR2_S;
+- reg[0] |= mac[3] << QCA8K_ATU_ADDR3_S;
+- reg[0] |= mac[4] << QCA8K_ATU_ADDR4_S;
+- reg[0] |= mac[5];
++ reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]);
++ reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]);
++ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]);
++ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]);
++ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]);
++ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
+
+ /* load the array into the ARL table */
+ for (i = 0; i < 3; i++)
+@@ -372,7 +373,7 @@ qca8k_fdb_access(struct qca8k_priv *priv
+ reg |= cmd;
+ if (port >= 0) {
+ reg |= QCA8K_ATU_FUNC_PORT_EN;
+- reg |= (port & QCA8K_ATU_FUNC_PORT_M) << QCA8K_ATU_FUNC_PORT_S;
++ reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port);
+ }
+
+ /* Write the function register triggering the table access */
+@@ -454,7 +455,7 @@ qca8k_vlan_access(struct qca8k_priv *pri
+ /* Set the command and VLAN index */
+ reg = QCA8K_VTU_FUNC1_BUSY;
+ reg |= cmd;
+- reg |= vid << QCA8K_VTU_FUNC1_VID_S;
++ reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid);
+
+ /* Write the function register triggering the table access */
+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
+@@ -500,13 +501,11 @@ qca8k_vlan_add(struct qca8k_priv *priv,
+ if (ret < 0)
+ goto out;
+ reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
+- reg &= ~(QCA8K_VTU_FUNC0_EG_MODE_MASK << QCA8K_VTU_FUNC0_EG_MODE_S(port));
++ reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
+ if (untagged)
+- reg |= QCA8K_VTU_FUNC0_EG_MODE_UNTAG <<
+- QCA8K_VTU_FUNC0_EG_MODE_S(port);
++ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port);
+ else
+- reg |= QCA8K_VTU_FUNC0_EG_MODE_TAG <<
+- QCA8K_VTU_FUNC0_EG_MODE_S(port);
++ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port);
+
+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
+ if (ret)
+@@ -534,15 +533,13 @@ qca8k_vlan_del(struct qca8k_priv *priv,
+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®);
+ if (ret < 0)
+ goto out;
+- reg &= ~(3 << QCA8K_VTU_FUNC0_EG_MODE_S(port));
+- reg |= QCA8K_VTU_FUNC0_EG_MODE_NOT <<
+- QCA8K_VTU_FUNC0_EG_MODE_S(port);
++ reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
++ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port);
+
+ /* Check if we're the last member to be removed */
+ del = true;
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+- mask = QCA8K_VTU_FUNC0_EG_MODE_NOT;
+- mask <<= QCA8K_VTU_FUNC0_EG_MODE_S(i);
++ mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
+
+ if ((reg & mask) != mask) {
+ del = false;
+@@ -1014,7 +1011,7 @@ qca8k_parse_port_config(struct qca8k_pri
+ mode == PHY_INTERFACE_MODE_RGMII_TXID)
+ delay = 1;
+
+- if (delay > QCA8K_MAX_DELAY) {
++ if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) {
+ dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
+ delay = 3;
+ }
+@@ -1030,7 +1027,7 @@ qca8k_parse_port_config(struct qca8k_pri
+ mode == PHY_INTERFACE_MODE_RGMII_RXID)
+ delay = 2;
+
+- if (delay > QCA8K_MAX_DELAY) {
++ if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) {
+ dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
+ delay = 3;
+ }
+@@ -1141,8 +1138,8 @@ qca8k_setup(struct dsa_switch *ds)
+ /* Enable QCA header mode on all cpu ports */
+ if (dsa_is_cpu_port(ds, i)) {
+ ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
+- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S |
+- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S);
++ FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
++ FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
+ if (ret) {
+ dev_err(priv->dev, "failed enabling QCA header mode");
+ return ret;
+@@ -1159,10 +1156,10 @@ qca8k_setup(struct dsa_switch *ds)
+ * for igmp, unknown, multicast and broadcast packet
+ */
+ ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
+- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S |
+- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S |
+- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S |
+- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S);
++ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
++ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
++ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
++ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
+ if (ret)
+ return ret;
+
+@@ -1180,8 +1177,6 @@ qca8k_setup(struct dsa_switch *ds)
+
+ /* Individual user ports get connected to CPU port only */
+ if (dsa_is_user_port(ds, i)) {
+- int shift = 16 * (i % 2);
+-
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
+ QCA8K_PORT_LOOKUP_MEMBER,
+ BIT(cpu_port));
+@@ -1198,8 +1193,8 @@ qca8k_setup(struct dsa_switch *ds)
+ * default egress vid
+ */
+ ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
+- 0xfff << shift,
+- QCA8K_PORT_VID_DEF << shift);
++ QCA8K_EGREES_VLAN_PORT_MASK(i),
++ QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF));
+ if (ret)
+ return ret;
+
+@@ -1246,7 +1241,7 @@ qca8k_setup(struct dsa_switch *ds)
+ QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
+ QCA8K_PORT_HOL_CTRL1_WRED_EN;
+ qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
+- QCA8K_PORT_HOL_CTRL1_ING_BUF |
++ QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
+ QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
+ QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
+ QCA8K_PORT_HOL_CTRL1_WRED_EN,
+@@ -1269,8 +1264,8 @@ qca8k_setup(struct dsa_switch *ds)
+ mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
+ QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
+ qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
+- QCA8K_GLOBAL_FC_GOL_XON_THRES_S |
+- QCA8K_GLOBAL_FC_GOL_XOFF_THRES_S,
++ QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
++ QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
+ mask);
+ }
+
+@@ -1935,11 +1930,11 @@ qca8k_port_vlan_filtering(struct dsa_swi
+
+ if (vlan_filtering) {
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+- QCA8K_PORT_LOOKUP_VLAN_MODE,
++ QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
+ QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
+ } else {
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+- QCA8K_PORT_LOOKUP_VLAN_MODE,
++ QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
+ QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
+ }
+
+@@ -1963,10 +1958,9 @@ qca8k_port_vlan_add(struct dsa_switch *d
+ }
+
+ if (pvid) {
+- int shift = 16 * (port % 2);
+-
+ ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
+- 0xfff << shift, vlan->vid << shift);
++ QCA8K_EGREES_VLAN_PORT_MASK(port),
++ QCA8K_EGREES_VLAN_PORT(port, vlan->vid));
+ if (ret)
+ return ret;
+
+@@ -2060,7 +2054,7 @@ static int qca8k_read_switch_id(struct q
+ if (ret < 0)
+ return -ENODEV;
+
+- id = QCA8K_MASK_CTRL_DEVICE_ID(val & QCA8K_MASK_CTRL_DEVICE_ID_MASK);
++ id = QCA8K_MASK_CTRL_DEVICE_ID(val);
+ if (id != data->id) {
+ dev_err(priv->dev, "Switch id detected %x but expected %x", id, data->id);
+ return -ENODEV;
+@@ -2069,7 +2063,7 @@ static int qca8k_read_switch_id(struct q
+ priv->switch_id = id;
+
+ /* Save revision to communicate to the internal PHY driver */
+- priv->switch_revision = (val & QCA8K_MASK_CTRL_REV_ID_MASK);
++ priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val);
+
+ return 0;
+ }
+--- a/drivers/net/dsa/qca8k.h
++++ b/drivers/net/dsa/qca8k.h
+@@ -30,9 +30,9 @@
+ /* Global control registers */
+ #define QCA8K_REG_MASK_CTRL 0x000
+ #define QCA8K_MASK_CTRL_REV_ID_MASK GENMASK(7, 0)
+-#define QCA8K_MASK_CTRL_REV_ID(x) ((x) >> 0)
++#define QCA8K_MASK_CTRL_REV_ID(x) FIELD_GET(QCA8K_MASK_CTRL_REV_ID_MASK, x)
+ #define QCA8K_MASK_CTRL_DEVICE_ID_MASK GENMASK(15, 8)
+-#define QCA8K_MASK_CTRL_DEVICE_ID(x) ((x) >> 8)
++#define QCA8K_MASK_CTRL_DEVICE_ID(x) FIELD_GET(QCA8K_MASK_CTRL_DEVICE_ID_MASK, x)
+ #define QCA8K_REG_PORT0_PAD_CTRL 0x004
+ #define QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN BIT(31)
+ #define QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE BIT(19)
+@@ -41,12 +41,11 @@
+ #define QCA8K_REG_PORT6_PAD_CTRL 0x00c
+ #define QCA8K_PORT_PAD_RGMII_EN BIT(26)
+ #define QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK GENMASK(23, 22)
+-#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) ((x) << 22)
++#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) FIELD_PREP(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, x)
+ #define QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK GENMASK(21, 20)
+-#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) ((x) << 20)
++#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) FIELD_PREP(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, x)
+ #define QCA8K_PORT_PAD_RGMII_TX_DELAY_EN BIT(25)
+ #define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24)
+-#define QCA8K_MAX_DELAY 3
+ #define QCA8K_PORT_PAD_SGMII_EN BIT(7)
+ #define QCA8K_REG_PWS 0x010
+ #define QCA8K_PWS_POWER_ON_SEL BIT(31)
+@@ -68,10 +67,12 @@
+ #define QCA8K_MDIO_MASTER_READ BIT(27)
+ #define QCA8K_MDIO_MASTER_WRITE 0
+ #define QCA8K_MDIO_MASTER_SUP_PRE BIT(26)
+-#define QCA8K_MDIO_MASTER_PHY_ADDR(x) ((x) << 21)
+-#define QCA8K_MDIO_MASTER_REG_ADDR(x) ((x) << 16)
+-#define QCA8K_MDIO_MASTER_DATA(x) (x)
++#define QCA8K_MDIO_MASTER_PHY_ADDR_MASK GENMASK(25, 21)
++#define QCA8K_MDIO_MASTER_PHY_ADDR(x) FIELD_PREP(QCA8K_MDIO_MASTER_PHY_ADDR_MASK, x)
++#define QCA8K_MDIO_MASTER_REG_ADDR_MASK GENMASK(20, 16)
++#define QCA8K_MDIO_MASTER_REG_ADDR(x) FIELD_PREP(QCA8K_MDIO_MASTER_REG_ADDR_MASK, x)
+ #define QCA8K_MDIO_MASTER_DATA_MASK GENMASK(15, 0)
++#define QCA8K_MDIO_MASTER_DATA(x) FIELD_PREP(QCA8K_MDIO_MASTER_DATA_MASK, x)
+ #define QCA8K_MDIO_MASTER_MAX_PORTS 5
+ #define QCA8K_MDIO_MASTER_MAX_REG 32
+ #define QCA8K_GOL_MAC_ADDR0 0x60
+@@ -93,9 +94,7 @@
+ #define QCA8K_PORT_STATUS_FLOW_AUTO BIT(12)
+ #define QCA8K_REG_PORT_HDR_CTRL(_i) (0x9c + (_i * 4))
+ #define QCA8K_PORT_HDR_CTRL_RX_MASK GENMASK(3, 2)
+-#define QCA8K_PORT_HDR_CTRL_RX_S 2
+ #define QCA8K_PORT_HDR_CTRL_TX_MASK GENMASK(1, 0)
+-#define QCA8K_PORT_HDR_CTRL_TX_S 0
+ #define QCA8K_PORT_HDR_CTRL_ALL 2
+ #define QCA8K_PORT_HDR_CTRL_MGMT 1
+ #define QCA8K_PORT_HDR_CTRL_NONE 0
+@@ -105,10 +104,11 @@
+ #define QCA8K_SGMII_EN_TX BIT(3)
+ #define QCA8K_SGMII_EN_SD BIT(4)
+ #define QCA8K_SGMII_CLK125M_DELAY BIT(7)
+-#define QCA8K_SGMII_MODE_CTRL_MASK (BIT(22) | BIT(23))
+-#define QCA8K_SGMII_MODE_CTRL_BASEX (0 << 22)
+-#define QCA8K_SGMII_MODE_CTRL_PHY (1 << 22)
+-#define QCA8K_SGMII_MODE_CTRL_MAC (2 << 22)
++#define QCA8K_SGMII_MODE_CTRL_MASK GENMASK(23, 22)
++#define QCA8K_SGMII_MODE_CTRL(x) FIELD_PREP(QCA8K_SGMII_MODE_CTRL_MASK, x)
++#define QCA8K_SGMII_MODE_CTRL_BASEX QCA8K_SGMII_MODE_CTRL(0x0)
++#define QCA8K_SGMII_MODE_CTRL_PHY QCA8K_SGMII_MODE_CTRL(0x1)
++#define QCA8K_SGMII_MODE_CTRL_MAC QCA8K_SGMII_MODE_CTRL(0x2)
+
+ /* MAC_PWR_SEL registers */
+ #define QCA8K_REG_MAC_PWR_SEL 0x0e4
+@@ -121,100 +121,115 @@
+
+ /* ACL registers */
+ #define QCA8K_REG_PORT_VLAN_CTRL0(_i) (0x420 + (_i * 8))
+-#define QCA8K_PORT_VLAN_CVID(x) (x << 16)
+-#define QCA8K_PORT_VLAN_SVID(x) x
++#define QCA8K_PORT_VLAN_CVID_MASK GENMASK(27, 16)
++#define QCA8K_PORT_VLAN_CVID(x) FIELD_PREP(QCA8K_PORT_VLAN_CVID_MASK, x)
++#define QCA8K_PORT_VLAN_SVID_MASK GENMASK(11, 0)
++#define QCA8K_PORT_VLAN_SVID(x) FIELD_PREP(QCA8K_PORT_VLAN_SVID_MASK, x)
+ #define QCA8K_REG_PORT_VLAN_CTRL1(_i) (0x424 + (_i * 8))
+ #define QCA8K_REG_IPV4_PRI_BASE_ADDR 0x470
+ #define QCA8K_REG_IPV4_PRI_ADDR_MASK 0x474
+
+ /* Lookup registers */
+ #define QCA8K_REG_ATU_DATA0 0x600
+-#define QCA8K_ATU_ADDR2_S 24
+-#define QCA8K_ATU_ADDR3_S 16
+-#define QCA8K_ATU_ADDR4_S 8
++#define QCA8K_ATU_ADDR2_MASK GENMASK(31, 24)
++#define QCA8K_ATU_ADDR3_MASK GENMASK(23, 16)
++#define QCA8K_ATU_ADDR4_MASK GENMASK(15, 8)
++#define QCA8K_ATU_ADDR5_MASK GENMASK(7, 0)
+ #define QCA8K_REG_ATU_DATA1 0x604
+-#define QCA8K_ATU_PORT_M 0x7f
+-#define QCA8K_ATU_PORT_S 16
+-#define QCA8K_ATU_ADDR0_S 8
++#define QCA8K_ATU_PORT_MASK GENMASK(22, 16)
++#define QCA8K_ATU_ADDR0_MASK GENMASK(15, 8)
++#define QCA8K_ATU_ADDR1_MASK GENMASK(7, 0)
+ #define QCA8K_REG_ATU_DATA2 0x608
+-#define QCA8K_ATU_VID_M 0xfff
+-#define QCA8K_ATU_VID_S 8
+-#define QCA8K_ATU_STATUS_M 0xf
++#define QCA8K_ATU_VID_MASK GENMASK(19, 8)
++#define QCA8K_ATU_STATUS_MASK GENMASK(3, 0)
+ #define QCA8K_ATU_STATUS_STATIC 0xf
+ #define QCA8K_REG_ATU_FUNC 0x60c
+ #define QCA8K_ATU_FUNC_BUSY BIT(31)
+ #define QCA8K_ATU_FUNC_PORT_EN BIT(14)
+ #define QCA8K_ATU_FUNC_MULTI_EN BIT(13)
+ #define QCA8K_ATU_FUNC_FULL BIT(12)
+-#define QCA8K_ATU_FUNC_PORT_M 0xf
+-#define QCA8K_ATU_FUNC_PORT_S 8
++#define QCA8K_ATU_FUNC_PORT_MASK GENMASK(11, 8)
+ #define QCA8K_REG_VTU_FUNC0 0x610
+ #define QCA8K_VTU_FUNC0_VALID BIT(20)
+ #define QCA8K_VTU_FUNC0_IVL_EN BIT(19)
+-#define QCA8K_VTU_FUNC0_EG_MODE_S(_i) (4 + (_i) * 2)
+-#define QCA8K_VTU_FUNC0_EG_MODE_MASK 3
+-#define QCA8K_VTU_FUNC0_EG_MODE_UNMOD 0
+-#define QCA8K_VTU_FUNC0_EG_MODE_UNTAG 1
+-#define QCA8K_VTU_FUNC0_EG_MODE_TAG 2
+-#define QCA8K_VTU_FUNC0_EG_MODE_NOT 3
++/* QCA8K_VTU_FUNC0_EG_MODE_MASK GENMASK(17, 4)
++ * It does contain VLAN_MODE for each port [5:4] for port0,
++ * [7:6] for port1 ... [17:16] for port6. Use virtual port
++ * define to handle this.
++ */
++#define QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i) (4 + (_i) * 2)
++#define QCA8K_VTU_FUNC0_EG_MODE_MASK GENMASK(1, 0)
++#define QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(_i) (GENMASK(1, 0) << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
++#define QCA8K_VTU_FUNC0_EG_MODE_UNMOD FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x0)
++#define QCA8K_VTU_FUNC0_EG_MODE_PORT_UNMOD(_i) (QCA8K_VTU_FUNC0_EG_MODE_UNMOD << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
++#define QCA8K_VTU_FUNC0_EG_MODE_UNTAG FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x1)
++#define QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(_i) (QCA8K_VTU_FUNC0_EG_MODE_UNTAG << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
++#define QCA8K_VTU_FUNC0_EG_MODE_TAG FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x2)
++#define QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(_i) (QCA8K_VTU_FUNC0_EG_MODE_TAG << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
++#define QCA8K_VTU_FUNC0_EG_MODE_NOT FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x3)
++#define QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(_i) (QCA8K_VTU_FUNC0_EG_MODE_NOT << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
+ #define QCA8K_REG_VTU_FUNC1 0x614
+ #define QCA8K_VTU_FUNC1_BUSY BIT(31)
+-#define QCA8K_VTU_FUNC1_VID_S 16
++#define QCA8K_VTU_FUNC1_VID_MASK GENMASK(27, 16)
+ #define QCA8K_VTU_FUNC1_FULL BIT(4)
+ #define QCA8K_REG_GLOBAL_FW_CTRL0 0x620
+ #define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10)
+ #define QCA8K_REG_GLOBAL_FW_CTRL1 0x624
+-#define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S 24
+-#define QCA8K_GLOBAL_FW_CTRL1_BC_DP_S 16
+-#define QCA8K_GLOBAL_FW_CTRL1_MC_DP_S 8
+-#define QCA8K_GLOBAL_FW_CTRL1_UC_DP_S 0
++#define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK GENMASK(30, 24)
++#define QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK GENMASK(22, 16)
++#define QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK GENMASK(14, 8)
++#define QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK GENMASK(6, 0)
+ #define QCA8K_PORT_LOOKUP_CTRL(_i) (0x660 + (_i) * 0xc)
+ #define QCA8K_PORT_LOOKUP_MEMBER GENMASK(6, 0)
+-#define QCA8K_PORT_LOOKUP_VLAN_MODE GENMASK(9, 8)
+-#define QCA8K_PORT_LOOKUP_VLAN_MODE_NONE (0 << 8)
+-#define QCA8K_PORT_LOOKUP_VLAN_MODE_FALLBACK (1 << 8)
+-#define QCA8K_PORT_LOOKUP_VLAN_MODE_CHECK (2 << 8)
+-#define QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE (3 << 8)
++#define QCA8K_PORT_LOOKUP_VLAN_MODE_MASK GENMASK(9, 8)
++#define QCA8K_PORT_LOOKUP_VLAN_MODE(x) FIELD_PREP(QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, x)
++#define QCA8K_PORT_LOOKUP_VLAN_MODE_NONE QCA8K_PORT_LOOKUP_VLAN_MODE(0x0)
++#define QCA8K_PORT_LOOKUP_VLAN_MODE_FALLBACK QCA8K_PORT_LOOKUP_VLAN_MODE(0x1)
++#define QCA8K_PORT_LOOKUP_VLAN_MODE_CHECK QCA8K_PORT_LOOKUP_VLAN_MODE(0x2)
++#define QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE QCA8K_PORT_LOOKUP_VLAN_MODE(0x3)
+ #define QCA8K_PORT_LOOKUP_STATE_MASK GENMASK(18, 16)
+-#define QCA8K_PORT_LOOKUP_STATE_DISABLED (0 << 16)
+-#define QCA8K_PORT_LOOKUP_STATE_BLOCKING (1 << 16)
+-#define QCA8K_PORT_LOOKUP_STATE_LISTENING (2 << 16)
+-#define QCA8K_PORT_LOOKUP_STATE_LEARNING (3 << 16)
+-#define QCA8K_PORT_LOOKUP_STATE_FORWARD (4 << 16)
+-#define QCA8K_PORT_LOOKUP_STATE GENMASK(18, 16)
++#define QCA8K_PORT_LOOKUP_STATE(x) FIELD_PREP(QCA8K_PORT_LOOKUP_STATE_MASK, x)
++#define QCA8K_PORT_LOOKUP_STATE_DISABLED QCA8K_PORT_LOOKUP_STATE(0x0)
++#define QCA8K_PORT_LOOKUP_STATE_BLOCKING QCA8K_PORT_LOOKUP_STATE(0x1)
++#define QCA8K_PORT_LOOKUP_STATE_LISTENING QCA8K_PORT_LOOKUP_STATE(0x2)
++#define QCA8K_PORT_LOOKUP_STATE_LEARNING QCA8K_PORT_LOOKUP_STATE(0x3)
++#define QCA8K_PORT_LOOKUP_STATE_FORWARD QCA8K_PORT_LOOKUP_STATE(0x4)
+ #define QCA8K_PORT_LOOKUP_LEARN BIT(20)
+
+ #define QCA8K_REG_GLOBAL_FC_THRESH 0x800
+-#define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) ((x) << 16)
+-#define QCA8K_GLOBAL_FC_GOL_XON_THRES_S GENMASK(24, 16)
+-#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES(x) ((x) << 0)
+-#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES_S GENMASK(8, 0)
++#define QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK GENMASK(24, 16)
++#define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK, x)
++#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK GENMASK(8, 0)
++#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK, x)
+
+ #define QCA8K_REG_PORT_HOL_CTRL0(_i) (0x970 + (_i) * 0x8)
+-#define QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF GENMASK(3, 0)
+-#define QCA8K_PORT_HOL_CTRL0_EG_PRI0(x) ((x) << 0)
+-#define QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF GENMASK(7, 4)
+-#define QCA8K_PORT_HOL_CTRL0_EG_PRI1(x) ((x) << 4)
+-#define QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF GENMASK(11, 8)
+-#define QCA8K_PORT_HOL_CTRL0_EG_PRI2(x) ((x) << 8)
+-#define QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF GENMASK(15, 12)
+-#define QCA8K_PORT_HOL_CTRL0_EG_PRI3(x) ((x) << 12)
+-#define QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF GENMASK(19, 16)
+-#define QCA8K_PORT_HOL_CTRL0_EG_PRI4(x) ((x) << 16)
+-#define QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF GENMASK(23, 20)
+-#define QCA8K_PORT_HOL_CTRL0_EG_PRI5(x) ((x) << 20)
+-#define QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF GENMASK(29, 24)
+-#define QCA8K_PORT_HOL_CTRL0_EG_PORT(x) ((x) << 24)
++#define QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF_MASK GENMASK(3, 0)
++#define QCA8K_PORT_HOL_CTRL0_EG_PRI0(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF_MASK, x)
++#define QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF_MASK GENMASK(7, 4)
++#define QCA8K_PORT_HOL_CTRL0_EG_PRI1(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF_MASK, x)
++#define QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF_MASK GENMASK(11, 8)
++#define QCA8K_PORT_HOL_CTRL0_EG_PRI2(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF_MASK, x)
++#define QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF_MASK GENMASK(15, 12)
++#define QCA8K_PORT_HOL_CTRL0_EG_PRI3(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF_MASK, x)
++#define QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF_MASK GENMASK(19, 16)
++#define QCA8K_PORT_HOL_CTRL0_EG_PRI4(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF_MASK, x)
++#define QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF_MASK GENMASK(23, 20)
++#define QCA8K_PORT_HOL_CTRL0_EG_PRI5(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF_MASK, x)
++#define QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF_MASK GENMASK(29, 24)
++#define QCA8K_PORT_HOL_CTRL0_EG_PORT(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF_MASK, x)
+
+ #define QCA8K_REG_PORT_HOL_CTRL1(_i) (0x974 + (_i) * 0x8)
+-#define QCA8K_PORT_HOL_CTRL1_ING_BUF GENMASK(3, 0)
+-#define QCA8K_PORT_HOL_CTRL1_ING(x) ((x) << 0)
++#define QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK GENMASK(3, 0)
++#define QCA8K_PORT_HOL_CTRL1_ING(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK, x)
+ #define QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN BIT(6)
+ #define QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN BIT(7)
+ #define QCA8K_PORT_HOL_CTRL1_WRED_EN BIT(8)
+ #define QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN BIT(16)
+
+ /* Pkt edit registers */
++#define QCA8K_EGREES_VLAN_PORT_SHIFT(_i) (16 * ((_i) % 2))
++#define QCA8K_EGREES_VLAN_PORT_MASK(_i) (GENMASK(11, 0) << QCA8K_EGREES_VLAN_PORT_SHIFT(_i))
++#define QCA8K_EGREES_VLAN_PORT(_i, x) ((x) << QCA8K_EGREES_VLAN_PORT_SHIFT(_i))
+ #define QCA8K_EGRESS_VLAN(x) (0x0c70 + (4 * (x / 2)))
+
+ /* L3 registers */
--- /dev/null
+From 994c28b6f971fa5db8ae977daea37eee87d93d51 Mon Sep 17 00:00:00 2001
+From: Ansuel Smith <ansuelsmth@gmail.com>
+Date: Mon, 22 Nov 2021 16:23:42 +0100
+Subject: net: dsa: qca8k: remove extra mutex_init in qca8k_setup
+
+Mutex is already init in sw_probe. Remove the extra init in qca8k_setup.
+
+Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
+Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/dsa/qca8k.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/net/dsa/qca8k.c
++++ b/drivers/net/dsa/qca8k.c
+@@ -1086,8 +1086,6 @@ qca8k_setup(struct dsa_switch *ds)
+ if (ret)
+ return ret;
+
+- mutex_init(&priv->reg_mutex);
+-
+ /* Start by setting up the register mapping */
+ priv->regmap = devm_regmap_init(ds->dev, NULL, priv,
+ &qca8k_regmap_config);
--- /dev/null
+From 36b8af12f424e7a7f60a935c60a0fd4aa0822378 Mon Sep 17 00:00:00 2001
+From: Ansuel Smith <ansuelsmth@gmail.com>
+Date: Mon, 22 Nov 2021 16:23:43 +0100
+Subject: net: dsa: qca8k: move regmap init in probe and set it mandatory
+
+In preparation for regmap conversion, move regmap init in the probe
+function and make it mandatory as any read/write/rmw operation will be
+converted to regmap API.
+
+Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
+Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/dsa/qca8k.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/dsa/qca8k.c
++++ b/drivers/net/dsa/qca8k.c
+@@ -1086,12 +1086,6 @@ qca8k_setup(struct dsa_switch *ds)
+ if (ret)
+ return ret;
+
+- /* Start by setting up the register mapping */
+- priv->regmap = devm_regmap_init(ds->dev, NULL, priv,
+- &qca8k_regmap_config);
+- if (IS_ERR(priv->regmap))
+- dev_warn(priv->dev, "regmap initialization failed");
+-
+ ret = qca8k_setup_mdio_bus(priv);
+ if (ret)
+ return ret;
+@@ -2096,6 +2090,14 @@ qca8k_sw_probe(struct mdio_device *mdiod
+ gpiod_set_value_cansleep(priv->reset_gpio, 0);
+ }
+
++ /* Start by setting up the register mapping */
++ priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv,
++ &qca8k_regmap_config);
++ if (IS_ERR(priv->regmap)) {
++ dev_err(priv->dev, "regmap initialization failed");
++ return PTR_ERR(priv->regmap);
++ }
++
+ /* Check the detected switch id */
+ ret = qca8k_read_switch_id(priv);
+ if (ret)
--- /dev/null
+From 8b5f3f29a81a71934d004e21a1292c1148b05926 Mon Sep 17 00:00:00 2001
+From: Ansuel Smith <ansuelsmth@gmail.com>
+Date: Mon, 22 Nov 2021 16:23:44 +0100
+Subject: net: dsa: qca8k: initial conversion to regmap helper
+
+Convert any qca8k set/clear/pool to regmap helper and add
+missing config to regmap_config struct.
+Read/write/rmw operation are reworked to use the regmap helper
+internally to keep the delta of this patch low. These additional
+function will then be dropped when the code split will be proposed.
+
+Ipq40xx SoC have the internal switch based on the qca8k regmap but use
+mmio for read/write/rmw operation instead of mdio.
+In preparation for the support of this internal switch, convert the
+driver to regmap API to later split the driver to common and specific
+code. The overhead introduced by the use of regamp API is marginal as the
+internal mdio will bypass it by using its direct access and regmap will be
+used only by configuration functions or fdb access.
+
+Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/dsa/qca8k.c | 107 +++++++++++++++++++++---------------------------
+ 1 file changed, 47 insertions(+), 60 deletions(-)
+
+--- a/drivers/net/dsa/qca8k.c
++++ b/drivers/net/dsa/qca8k.c
+@@ -10,6 +10,7 @@
+ #include <linux/phy.h>
+ #include <linux/netdevice.h>
+ #include <linux/bitfield.h>
++#include <linux/regmap.h>
+ #include <net/dsa.h>
+ #include <linux/of_net.h>
+ #include <linux/of_mdio.h>
+@@ -152,6 +153,25 @@ qca8k_set_page(struct mii_bus *bus, u16
+ static int
+ qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
+ {
++ return regmap_read(priv->regmap, reg, val);
++}
++
++static int
++qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
++{
++ return regmap_write(priv->regmap, reg, val);
++}
++
++static int
++qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
++{
++ return regmap_update_bits(priv->regmap, reg, mask, write_val);
++}
++
++static int
++qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
++{
++ struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
+ struct mii_bus *bus = priv->bus;
+ u16 r1, r2, page;
+ int ret;
+@@ -172,8 +192,9 @@ exit:
+ }
+
+ static int
+-qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
++qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
+ {
++ struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
+ struct mii_bus *bus = priv->bus;
+ u16 r1, r2, page;
+ int ret;
+@@ -194,8 +215,9 @@ exit:
+ }
+
+ static int
+-qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
++qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
+ {
++ struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
+ struct mii_bus *bus = priv->bus;
+ u16 r1, r2, page;
+ u32 val;
+@@ -223,34 +245,6 @@ exit:
+ return ret;
+ }
+
+-static int
+-qca8k_reg_set(struct qca8k_priv *priv, u32 reg, u32 val)
+-{
+- return qca8k_rmw(priv, reg, 0, val);
+-}
+-
+-static int
+-qca8k_reg_clear(struct qca8k_priv *priv, u32 reg, u32 val)
+-{
+- return qca8k_rmw(priv, reg, val, 0);
+-}
+-
+-static int
+-qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
+-{
+- struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
+-
+- return qca8k_read(priv, reg, val);
+-}
+-
+-static int
+-qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
+-{
+- struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
+-
+- return qca8k_write(priv, reg, val);
+-}
+-
+ static const struct regmap_range qca8k_readable_ranges[] = {
+ regmap_reg_range(0x0000, 0x00e4), /* Global control */
+ regmap_reg_range(0x0100, 0x0168), /* EEE control */
+@@ -282,26 +276,19 @@ static struct regmap_config qca8k_regmap
+ .max_register = 0x16ac, /* end MIB - Port6 range */
+ .reg_read = qca8k_regmap_read,
+ .reg_write = qca8k_regmap_write,
++ .reg_update_bits = qca8k_regmap_update_bits,
+ .rd_table = &qca8k_readable_table,
++ .disable_locking = true, /* Locking is handled by qca8k read/write */
++ .cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */
+ };
+
+ static int
+ qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
+ {
+- int ret, ret1;
+ u32 val;
+
+- ret = read_poll_timeout(qca8k_read, ret1, !(val & mask),
+- 0, QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
+- priv, reg, &val);
+-
+- /* Check if qca8k_read has failed for a different reason
+- * before returning -ETIMEDOUT
+- */
+- if (ret < 0 && ret1 < 0)
+- return ret1;
+-
+- return ret;
++ return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0,
++ QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC);
+ }
+
+ static int
+@@ -568,7 +555,7 @@ qca8k_mib_init(struct qca8k_priv *priv)
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+- ret = qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY);
++ ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY);
+ if (ret)
+ goto exit;
+
+@@ -576,7 +563,7 @@ qca8k_mib_init(struct qca8k_priv *priv)
+ if (ret)
+ goto exit;
+
+- ret = qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
++ ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
+ if (ret)
+ goto exit;
+
+@@ -597,9 +584,9 @@ qca8k_port_set_status(struct qca8k_priv
+ mask |= QCA8K_PORT_STATUS_LINK_AUTO;
+
+ if (enable)
+- qca8k_reg_set(priv, QCA8K_REG_PORT_STATUS(port), mask);
++ regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
+ else
+- qca8k_reg_clear(priv, QCA8K_REG_PORT_STATUS(port), mask);
++ regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
+ }
+
+ static u32
+@@ -861,8 +848,8 @@ qca8k_setup_mdio_bus(struct qca8k_priv *
+ * a dt-overlay and driver reload changed the configuration
+ */
+
+- return qca8k_reg_clear(priv, QCA8K_MDIO_MASTER_CTRL,
+- QCA8K_MDIO_MASTER_EN);
++ return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL,
++ QCA8K_MDIO_MASTER_EN);
+ }
+
+ /* Check if the devicetree declare the port:phy mapping */
+@@ -1099,16 +1086,16 @@ qca8k_setup(struct dsa_switch *ds)
+ return ret;
+
+ /* Make sure MAC06 is disabled */
+- ret = qca8k_reg_clear(priv, QCA8K_REG_PORT0_PAD_CTRL,
+- QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
++ ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
++ QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
+ if (ret) {
+ dev_err(priv->dev, "failed disabling MAC06 exchange");
+ return ret;
+ }
+
+ /* Enable CPU Port */
+- ret = qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0,
+- QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
++ ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
++ QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
+ if (ret) {
+ dev_err(priv->dev, "failed enabling CPU port");
+ return ret;
+@@ -1176,8 +1163,8 @@ qca8k_setup(struct dsa_switch *ds)
+ return ret;
+
+ /* Enable ARP Auto-learning by default */
+- ret = qca8k_reg_set(priv, QCA8K_PORT_LOOKUP_CTRL(i),
+- QCA8K_PORT_LOOKUP_LEARN);
++ ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i),
++ QCA8K_PORT_LOOKUP_LEARN);
+ if (ret)
+ return ret;
+
+@@ -1745,9 +1732,9 @@ qca8k_port_bridge_join(struct dsa_switch
+ /* Add this port to the portvlan mask of the other ports
+ * in the bridge
+ */
+- ret = qca8k_reg_set(priv,
+- QCA8K_PORT_LOOKUP_CTRL(i),
+- BIT(port));
++ ret = regmap_set_bits(priv->regmap,
++ QCA8K_PORT_LOOKUP_CTRL(i),
++ BIT(port));
+ if (ret)
+ return ret;
+ if (i != port)
+@@ -1777,9 +1764,9 @@ qca8k_port_bridge_leave(struct dsa_switc
+ /* Remove this port to the portvlan mask of the other ports
+ * in the bridge
+ */
+- qca8k_reg_clear(priv,
+- QCA8K_PORT_LOOKUP_CTRL(i),
+- BIT(port));
++ regmap_clear_bits(priv->regmap,
++ QCA8K_PORT_LOOKUP_CTRL(i),
++ BIT(port));
+ }
+
+ /* Set the cpu port to be the only one in the portvlan mask of
--- /dev/null
+From c126f118b330ccf0db0dda4a4bd6c729865a205f Mon Sep 17 00:00:00 2001
+From: Ansuel Smith <ansuelsmth@gmail.com>
+Date: Mon, 22 Nov 2021 16:23:45 +0100
+Subject: net: dsa: qca8k: add additional MIB counter and make it dynamic
+
+We are currently missing 2 additionals MIB counter present in QCA833x
+switch.
+QC832x switch have 39 MIB counter and QCA833X have 41 MIB counter.
+Add the additional MIB counter and rework the MIB function to print the
+correct supported counter from the match_data struct.
+
+Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
+Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/dsa/qca8k.c | 23 ++++++++++++++++++++---
+ drivers/net/dsa/qca8k.h | 4 ++++
+ 2 files changed, 24 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/dsa/qca8k.c
++++ b/drivers/net/dsa/qca8k.c
+@@ -70,6 +70,8 @@ static const struct qca8k_mib_desc ar832
+ MIB_DESC(1, 0x9c, "TxExcDefer"),
+ MIB_DESC(1, 0xa0, "TxDefer"),
+ MIB_DESC(1, 0xa4, "TxLateCol"),
++ MIB_DESC(1, 0xa8, "RXUnicast"),
++ MIB_DESC(1, 0xac, "TXUnicast"),
+ };
+
+ /* The 32bit switch registers are accessed indirectly. To achieve this we need
+@@ -1605,12 +1607,16 @@ qca8k_phylink_mac_link_up(struct dsa_swi
+ static void
+ qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
+ {
++ const struct qca8k_match_data *match_data;
++ struct qca8k_priv *priv = ds->priv;
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+- for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++)
++ match_data = of_device_get_match_data(priv->dev);
++
++ for (i = 0; i < match_data->mib_count; i++)
+ strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
+ ETH_GSTRING_LEN);
+ }
+@@ -1620,12 +1626,15 @@ qca8k_get_ethtool_stats(struct dsa_switc
+ uint64_t *data)
+ {
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
++ const struct qca8k_match_data *match_data;
+ const struct qca8k_mib_desc *mib;
+ u32 reg, i, val;
+ u32 hi = 0;
+ int ret;
+
+- for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++) {
++ match_data = of_device_get_match_data(priv->dev);
++
++ for (i = 0; i < match_data->mib_count; i++) {
+ mib = &ar8327_mib[i];
+ reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
+
+@@ -1648,10 +1657,15 @@ qca8k_get_ethtool_stats(struct dsa_switc
+ static int
+ qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
+ {
++ const struct qca8k_match_data *match_data;
++ struct qca8k_priv *priv = ds->priv;
++
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+- return ARRAY_SIZE(ar8327_mib);
++ match_data = of_device_get_match_data(priv->dev);
++
++ return match_data->mib_count;
+ }
+
+ static int
+@@ -2173,14 +2187,17 @@ static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
+ static const struct qca8k_match_data qca8327 = {
+ .id = QCA8K_ID_QCA8327,
+ .reduced_package = true,
++ .mib_count = QCA8K_QCA832X_MIB_COUNT,
+ };
+
+ static const struct qca8k_match_data qca8328 = {
+ .id = QCA8K_ID_QCA8327,
++ .mib_count = QCA8K_QCA832X_MIB_COUNT,
+ };
+
+ static const struct qca8k_match_data qca833x = {
+ .id = QCA8K_ID_QCA8337,
++ .mib_count = QCA8K_QCA833X_MIB_COUNT,
+ };
+
+ static const struct of_device_id qca8k_of_match[] = {
+--- a/drivers/net/dsa/qca8k.h
++++ b/drivers/net/dsa/qca8k.h
+@@ -21,6 +21,9 @@
+ #define PHY_ID_QCA8337 0x004dd036
+ #define QCA8K_ID_QCA8337 0x13
+
++#define QCA8K_QCA832X_MIB_COUNT 39
++#define QCA8K_QCA833X_MIB_COUNT 41
++
+ #define QCA8K_BUSY_WAIT_TIMEOUT 2000
+
+ #define QCA8K_NUM_FDB_RECORDS 2048
+@@ -279,6 +282,7 @@ struct ar8xxx_port_status {
+ struct qca8k_match_data {
+ u8 id;
+ bool reduced_package;
++ u8 mib_count;
+ };
+
+ enum {
--- /dev/null
+From 4592538bfb0d5d3c3c8a1d7071724d081412ac91 Mon Sep 17 00:00:00 2001
+From: Ansuel Smith <ansuelsmth@gmail.com>
+Date: Mon, 22 Nov 2021 16:23:46 +0100
+Subject: net: dsa: qca8k: add support for port fast aging
+
+The switch supports fast aging by flushing any rule in the ARL
+table for a specific port.
+
+Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
+Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/dsa/qca8k.c | 11 +++++++++++
+ drivers/net/dsa/qca8k.h | 1 +
+ 2 files changed, 12 insertions(+)
+
+--- a/drivers/net/dsa/qca8k.c
++++ b/drivers/net/dsa/qca8k.c
+@@ -1790,6 +1790,16 @@ qca8k_port_bridge_leave(struct dsa_switc
+ QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
+ }
+
++static void
++qca8k_port_fast_age(struct dsa_switch *ds, int port)
++{
++ struct qca8k_priv *priv = ds->priv;
++
++ mutex_lock(&priv->reg_mutex);
++ qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
++ mutex_unlock(&priv->reg_mutex);
++}
++
+ static int
+ qca8k_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+@@ -2017,6 +2027,7 @@ static const struct dsa_switch_ops qca8k
+ .port_stp_state_set = qca8k_port_stp_state_set,
+ .port_bridge_join = qca8k_port_bridge_join,
+ .port_bridge_leave = qca8k_port_bridge_leave,
++ .port_fast_age = qca8k_port_fast_age,
+ .port_fdb_add = qca8k_port_fdb_add,
+ .port_fdb_del = qca8k_port_fdb_del,
+ .port_fdb_dump = qca8k_port_fdb_dump,
+--- a/drivers/net/dsa/qca8k.h
++++ b/drivers/net/dsa/qca8k.h
+@@ -262,6 +262,7 @@ enum qca8k_fdb_cmd {
+ QCA8K_FDB_FLUSH = 1,
+ QCA8K_FDB_LOAD = 2,
+ QCA8K_FDB_PURGE = 3,
++ QCA8K_FDB_FLUSH_PORT = 5,
+ QCA8K_FDB_NEXT = 6,
+ QCA8K_FDB_SEARCH = 7,
+ };
--- /dev/null
+From 6a3bdc5209f45d2af83aa92433ab6e5cf2297aa4 Mon Sep 17 00:00:00 2001
+From: Ansuel Smith <ansuelsmth@gmail.com>
+Date: Mon, 22 Nov 2021 16:23:47 +0100
+Subject: net: dsa: qca8k: add set_ageing_time support
+
+qca8k support setting ageing time in step of 7s. Add support for it and
+set the max value accepted of 7645m.
+Documentation talks about support for 10000m but that values doesn't
+make sense as the value doesn't match the max value in the reg.
+
+Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
+Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/dsa/qca8k.c | 25 +++++++++++++++++++++++++
+ drivers/net/dsa/qca8k.h | 3 +++
+ 2 files changed, 28 insertions(+)
+
+--- a/drivers/net/dsa/qca8k.c
++++ b/drivers/net/dsa/qca8k.c
+@@ -1261,6 +1261,10 @@ qca8k_setup(struct dsa_switch *ds)
+ /* We don't have interrupts for link changes, so we need to poll */
+ ds->pcs_poll = true;
+
++ /* Set min a max ageing value supported */
++ ds->ageing_time_min = 7000;
++ ds->ageing_time_max = 458745000;
++
+ return 0;
+ }
+
+@@ -1801,6 +1805,26 @@ qca8k_port_fast_age(struct dsa_switch *d
+ }
+
+ static int
++qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
++{
++ struct qca8k_priv *priv = ds->priv;
++ unsigned int secs = msecs / 1000;
++ u32 val;
++
++ /* AGE_TIME reg is set in 7s step */
++ val = secs / 7;
++
++ /* Handle case with 0 as val to NOT disable
++ * learning
++ */
++ if (!val)
++ val = 1;
++
++ return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL, QCA8K_ATU_AGE_TIME_MASK,
++ QCA8K_ATU_AGE_TIME(val));
++}
++
++static int
+ qca8k_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+ {
+@@ -2018,6 +2042,7 @@ static const struct dsa_switch_ops qca8k
+ .get_strings = qca8k_get_strings,
+ .get_ethtool_stats = qca8k_get_ethtool_stats,
+ .get_sset_count = qca8k_get_sset_count,
++ .set_ageing_time = qca8k_set_ageing_time,
+ .get_mac_eee = qca8k_get_mac_eee,
+ .set_mac_eee = qca8k_set_mac_eee,
+ .port_enable = qca8k_port_enable,
+--- a/drivers/net/dsa/qca8k.h
++++ b/drivers/net/dsa/qca8k.h
+@@ -175,6 +175,9 @@
+ #define QCA8K_VTU_FUNC1_BUSY BIT(31)
+ #define QCA8K_VTU_FUNC1_VID_MASK GENMASK(27, 16)
+ #define QCA8K_VTU_FUNC1_FULL BIT(4)
++#define QCA8K_REG_ATU_CTRL 0x618
++#define QCA8K_ATU_AGE_TIME_MASK GENMASK(15, 0)
++#define QCA8K_ATU_AGE_TIME(x) FIELD_PREP(QCA8K_ATU_AGE_TIME_MASK, (x))
+ #define QCA8K_REG_GLOBAL_FW_CTRL0 0x620
+ #define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10)
+ #define QCA8K_REG_GLOBAL_FW_CTRL1 0x624
--- /dev/null
+From ba8f870dfa635113ce6e8095a5eb1835ecde2e9e Mon Sep 17 00:00:00 2001
+From: Ansuel Smith <ansuelsmth@gmail.com>
+Date: Mon, 22 Nov 2021 16:23:48 +0100
+Subject: net: dsa: qca8k: add support for mdb_add/del
+
+Add support for mdb add/del function. The ARL table is used to insert
+the rule. The rule will be searched, deleted and reinserted with the
+port mask updated. The function will check if the rule has to be updated
+or insert directly with no deletion of the old rule.
+If every port is removed from the port mask, the rule is removed.
+The rule is set STATIC in the ARL table (aka it doesn't age) to not be
+flushed by fast age function.
+
+Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
+Reviewed-by: Vladimir Oltean <olteanv@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/dsa/qca8k.c | 99 +++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 99 insertions(+)
+
+--- a/drivers/net/dsa/qca8k.c
++++ b/drivers/net/dsa/qca8k.c
+@@ -436,6 +436,81 @@ qca8k_fdb_flush(struct qca8k_priv *priv)
+ }
+
+ static int
++qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
++ const u8 *mac, u16 vid)
++{
++ struct qca8k_fdb fdb = { 0 };
++ int ret;
++
++ mutex_lock(&priv->reg_mutex);
++
++ qca8k_fdb_write(priv, vid, 0, mac, 0);
++ ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
++ if (ret < 0)
++ goto exit;
++
++ ret = qca8k_fdb_read(priv, &fdb);
++ if (ret < 0)
++ goto exit;
++
++ /* Rule exist. Delete first */
++ if (!fdb.aging) {
++ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
++ if (ret)
++ goto exit;
++ }
++
++ /* Add port to fdb portmask */
++ fdb.port_mask |= port_mask;
++
++ qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
++ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
++
++exit:
++ mutex_unlock(&priv->reg_mutex);
++ return ret;
++}
++
++static int
++qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
++ const u8 *mac, u16 vid)
++{
++ struct qca8k_fdb fdb = { 0 };
++ int ret;
++
++ mutex_lock(&priv->reg_mutex);
++
++ qca8k_fdb_write(priv, vid, 0, mac, 0);
++ ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
++ if (ret < 0)
++ goto exit;
++
++ /* Rule doesn't exist. Why delete? */
++ if (!fdb.aging) {
++ ret = -EINVAL;
++ goto exit;
++ }
++
++ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
++ if (ret)
++ goto exit;
++
++ /* Only port in the rule is this port. Don't re insert */
++ if (fdb.port_mask == port_mask)
++ goto exit;
++
++ /* Remove port from port mask */
++ fdb.port_mask &= ~port_mask;
++
++ qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
++ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
++
++exit:
++ mutex_unlock(&priv->reg_mutex);
++ return ret;
++}
++
++static int
+ qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid)
+ {
+ u32 reg;
+@@ -1949,6 +2024,28 @@ qca8k_port_fdb_dump(struct dsa_switch *d
+ }
+
+ static int
++qca8k_port_mdb_add(struct dsa_switch *ds, int port,
++ const struct switchdev_obj_port_mdb *mdb)
++{
++ struct qca8k_priv *priv = ds->priv;
++ const u8 *addr = mdb->addr;
++ u16 vid = mdb->vid;
++
++ return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid);
++}
++
++static int
++qca8k_port_mdb_del(struct dsa_switch *ds, int port,
++ const struct switchdev_obj_port_mdb *mdb)
++{
++ struct qca8k_priv *priv = ds->priv;
++ const u8 *addr = mdb->addr;
++ u16 vid = mdb->vid;
++
++ return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
++}
++
++static int
+ qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
+ struct netlink_ext_ack *extack)
+ {
+@@ -2056,6 +2153,8 @@ static const struct dsa_switch_ops qca8k
+ .port_fdb_add = qca8k_port_fdb_add,
+ .port_fdb_del = qca8k_port_fdb_del,
+ .port_fdb_dump = qca8k_port_fdb_dump,
++ .port_mdb_add = qca8k_port_mdb_add,
++ .port_mdb_del = qca8k_port_mdb_del,
+ .port_vlan_filtering = qca8k_port_vlan_filtering,
+ .port_vlan_add = qca8k_port_vlan_add,
+ .port_vlan_del = qca8k_port_vlan_del,
--- /dev/null
+From 2c1bdbc7e7560d7de754cad277d968d56bb1899e Mon Sep 17 00:00:00 2001
+From: Ansuel Smith <ansuelsmth@gmail.com>
+Date: Tue, 23 Nov 2021 03:59:10 +0100
+Subject: net: dsa: qca8k: add support for mirror mode
+
+The switch supports mirror mode. Only one port can set as mirror port and
+every other port can set to both ingress and egress mode. The mirror
+port is disabled and reverted to normal operation once every port is
+removed from sending packet to it.
+
+Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/dsa/qca8k.c | 95 +++++++++++++++++++++++++++++++++++++++++++++++++
+ drivers/net/dsa/qca8k.h | 4 +++
+ 2 files changed, 99 insertions(+)
+
+--- a/drivers/net/dsa/qca8k.c
++++ b/drivers/net/dsa/qca8k.c
+@@ -2046,6 +2046,99 @@ qca8k_port_mdb_del(struct dsa_switch *ds
+ }
+
+ static int
++qca8k_port_mirror_add(struct dsa_switch *ds, int port,
++ struct dsa_mall_mirror_tc_entry *mirror,
++ bool ingress)
++{
++ struct qca8k_priv *priv = ds->priv;
++ int monitor_port, ret;
++ u32 reg, val;
++
++ /* Check for existent entry */
++ if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
++ return -EEXIST;
++
++ ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val);
++ if (ret)
++ return ret;
++
++ /* QCA83xx can have only one port set to mirror mode.
++ * Check that the correct port is requested and return error otherwise.
++ * When no mirror port is set, the values is set to 0xF
++ */
++ monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
++ if (monitor_port != 0xF && monitor_port != mirror->to_local_port)
++ return -EEXIST;
++
++ /* Set the monitor port */
++ val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM,
++ mirror->to_local_port);
++ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
++ QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
++ if (ret)
++ return ret;
++
++ if (ingress) {
++ reg = QCA8K_PORT_LOOKUP_CTRL(port);
++ val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
++ } else {
++ reg = QCA8K_REG_PORT_HOL_CTRL1(port);
++ val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
++ }
++
++ ret = regmap_update_bits(priv->regmap, reg, val, val);
++ if (ret)
++ return ret;
++
++ /* Track mirror port for tx and rx to decide when the
++ * mirror port has to be disabled.
++ */
++ if (ingress)
++ priv->mirror_rx |= BIT(port);
++ else
++ priv->mirror_tx |= BIT(port);
++
++ return 0;
++}
++
++static void
++qca8k_port_mirror_del(struct dsa_switch *ds, int port,
++ struct dsa_mall_mirror_tc_entry *mirror)
++{
++ struct qca8k_priv *priv = ds->priv;
++ u32 reg, val;
++ int ret;
++
++ if (mirror->ingress) {
++ reg = QCA8K_PORT_LOOKUP_CTRL(port);
++ val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
++ } else {
++ reg = QCA8K_REG_PORT_HOL_CTRL1(port);
++ val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
++ }
++
++ ret = regmap_clear_bits(priv->regmap, reg, val);
++ if (ret)
++ goto err;
++
++ if (mirror->ingress)
++ priv->mirror_rx &= ~BIT(port);
++ else
++ priv->mirror_tx &= ~BIT(port);
++
++ /* No port set to send packet to mirror port. Disable mirror port */
++ if (!priv->mirror_rx && !priv->mirror_tx) {
++ val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF);
++ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
++ QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
++ if (ret)
++ goto err;
++ }
++err:
++ dev_err(priv->dev, "Failed to del mirror port from %d", port);
++}
++
++static int
+ qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
+ struct netlink_ext_ack *extack)
+ {
+@@ -2155,6 +2248,8 @@ static const struct dsa_switch_ops qca8k
+ .port_fdb_dump = qca8k_port_fdb_dump,
+ .port_mdb_add = qca8k_port_mdb_add,
+ .port_mdb_del = qca8k_port_mdb_del,
++ .port_mirror_add = qca8k_port_mirror_add,
++ .port_mirror_del = qca8k_port_mirror_del,
+ .port_vlan_filtering = qca8k_port_vlan_filtering,
+ .port_vlan_add = qca8k_port_vlan_add,
+ .port_vlan_del = qca8k_port_vlan_del,
+--- a/drivers/net/dsa/qca8k.h
++++ b/drivers/net/dsa/qca8k.h
+@@ -180,6 +180,7 @@
+ #define QCA8K_ATU_AGE_TIME(x) FIELD_PREP(QCA8K_ATU_AGE_TIME_MASK, (x))
+ #define QCA8K_REG_GLOBAL_FW_CTRL0 0x620
+ #define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10)
++#define QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM GENMASK(7, 4)
+ #define QCA8K_REG_GLOBAL_FW_CTRL1 0x624
+ #define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK GENMASK(30, 24)
+ #define QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK GENMASK(22, 16)
+@@ -201,6 +202,7 @@
+ #define QCA8K_PORT_LOOKUP_STATE_LEARNING QCA8K_PORT_LOOKUP_STATE(0x3)
+ #define QCA8K_PORT_LOOKUP_STATE_FORWARD QCA8K_PORT_LOOKUP_STATE(0x4)
+ #define QCA8K_PORT_LOOKUP_LEARN BIT(20)
++#define QCA8K_PORT_LOOKUP_ING_MIRROR_EN BIT(25)
+
+ #define QCA8K_REG_GLOBAL_FC_THRESH 0x800
+ #define QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK GENMASK(24, 16)
+@@ -305,6 +307,8 @@ struct qca8k_ports_config {
+ struct qca8k_priv {
+ u8 switch_id;
+ u8 switch_revision;
++ u8 mirror_rx;
++ u8 mirror_tx;
+ bool legacy_phy_port_mapping;
+ struct qca8k_ports_config ports_config;
+ struct regmap *regmap;
--- /dev/null
+From def975307c01191b6f0170048c3724b0ed3348af Mon Sep 17 00:00:00 2001
+From: Ansuel Smith <ansuelsmth@gmail.com>
+Date: Tue, 23 Nov 2021 03:59:11 +0100
+Subject: net: dsa: qca8k: add LAG support
+
+Add LAG support to this switch. In Documentation this is described as
+trunk mode. A max of 4 LAGs are supported and each can support up to 4
+port. The current tx mode supported is Hash mode with both L2 and L2+3
+mode.
+When no port are present in the trunk, the trunk is disabled in the
+switch.
+When a port is disconnected, the traffic is redirected to the other
+available port.
+The hash mode is global and each LAG require to have the same hash mode
+set. To change the hash mode when multiple LAG are configured, it's
+required to remove each LAG and set the desired hash mode to the last.
+An error is printed when it's asked to set a not supported hadh mode.
+
+Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/dsa/qca8k.c | 177 ++++++++++++++++++++++++++++++++++++++++++++++++
+ drivers/net/dsa/qca8k.h | 33 +++++++++
+ 2 files changed, 210 insertions(+)
+
+--- a/drivers/net/dsa/qca8k.c
++++ b/drivers/net/dsa/qca8k.c
+@@ -1340,6 +1340,9 @@ qca8k_setup(struct dsa_switch *ds)
+ ds->ageing_time_min = 7000;
+ ds->ageing_time_max = 458745000;
+
++ /* Set max number of LAGs supported */
++ ds->num_lag_ids = QCA8K_NUM_LAGS;
++
+ return 0;
+ }
+
+@@ -2226,6 +2229,178 @@ qca8k_get_tag_protocol(struct dsa_switch
+ return DSA_TAG_PROTO_QCA;
+ }
+
++static bool
++qca8k_lag_can_offload(struct dsa_switch *ds,
++ struct net_device *lag,
++ struct netdev_lag_upper_info *info)
++{
++ struct dsa_port *dp;
++ int id, members = 0;
++
++ id = dsa_lag_id(ds->dst, lag);
++ if (id < 0 || id >= ds->num_lag_ids)
++ return false;
++
++ dsa_lag_foreach_port(dp, ds->dst, lag)
++ /* Includes the port joining the LAG */
++ members++;
++
++ if (members > QCA8K_NUM_PORTS_FOR_LAG)
++ return false;
++
++ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
++ return false;
++
++ if (info->hash_type != NETDEV_LAG_HASH_L2 ||
++ info->hash_type != NETDEV_LAG_HASH_L23)
++ return false;
++
++ return true;
++}
++
++static int
++qca8k_lag_setup_hash(struct dsa_switch *ds,
++ struct net_device *lag,
++ struct netdev_lag_upper_info *info)
++{
++ struct qca8k_priv *priv = ds->priv;
++ bool unique_lag = true;
++ int i, id;
++ u32 hash;
++
++ id = dsa_lag_id(ds->dst, lag);
++
++ switch (info->hash_type) {
++ case NETDEV_LAG_HASH_L23:
++ hash |= QCA8K_TRUNK_HASH_SIP_EN;
++ hash |= QCA8K_TRUNK_HASH_DIP_EN;
++ fallthrough;
++ case NETDEV_LAG_HASH_L2:
++ hash |= QCA8K_TRUNK_HASH_SA_EN;
++ hash |= QCA8K_TRUNK_HASH_DA_EN;
++ break;
++ default: /* We should NEVER reach this */
++ return -EOPNOTSUPP;
++ }
++
++ /* Check if we are the unique configured LAG */
++ dsa_lags_foreach_id(i, ds->dst)
++ if (i != id && dsa_lag_dev(ds->dst, i)) {
++ unique_lag = false;
++ break;
++ }
++
++ /* Hash Mode is global. Make sure the same Hash Mode
++ * is set to all the 4 possible lag.
++ * If we are the unique LAG we can set whatever hash
++ * mode we want.
++ * To change hash mode it's needed to remove all LAG
++ * and change the mode with the latest.
++ */
++ if (unique_lag) {
++ priv->lag_hash_mode = hash;
++ } else if (priv->lag_hash_mode != hash) {
++ netdev_err(lag, "Error: Mismateched Hash Mode across different lag is not supported\n");
++ return -EOPNOTSUPP;
++ }
++
++ return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL,
++ QCA8K_TRUNK_HASH_MASK, hash);
++}
++
++static int
++qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
++ struct net_device *lag, bool delete)
++{
++ struct qca8k_priv *priv = ds->priv;
++ int ret, id, i;
++ u32 val;
++
++ id = dsa_lag_id(ds->dst, lag);
++
++ /* Read current port member */
++ ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
++ if (ret)
++ return ret;
++
++ /* Shift val to the correct trunk */
++ val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id);
++ val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK;
++ if (delete)
++ val &= ~BIT(port);
++ else
++ val |= BIT(port);
++
++ /* Update port member. With empty portmap disable trunk */
++ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0,
++ QCA8K_REG_GOL_TRUNK_MEMBER(id) |
++ QCA8K_REG_GOL_TRUNK_EN(id),
++ !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) |
++ val << QCA8K_REG_GOL_TRUNK_SHIFT(id));
++
++ /* Search empty member if adding or port on deleting */
++ for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) {
++ ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val);
++ if (ret)
++ return ret;
++
++ val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i);
++ val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK;
++
++ if (delete) {
++ /* If port flagged to be disabled assume this member is
++ * empty
++ */
++ if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
++ continue;
++
++ val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK;
++ if (val != port)
++ continue;
++ } else {
++ /* If port flagged to be enabled assume this member is
++ * already set
++ */
++ if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
++ continue;
++ }
++
++ /* We have found the member to add/remove */
++ break;
++ }
++
++ /* Set port in the correct port mask or disable port if in delete mode */
++ return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id),
++ QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) |
++ QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i),
++ !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) |
++ port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i));
++}
++
++static int
++qca8k_port_lag_join(struct dsa_switch *ds, int port,
++ struct net_device *lag,
++ struct netdev_lag_upper_info *info)
++{
++ int ret;
++
++ if (!qca8k_lag_can_offload(ds, lag, info))
++ return -EOPNOTSUPP;
++
++ ret = qca8k_lag_setup_hash(ds, lag, info);
++ if (ret)
++ return ret;
++
++ return qca8k_lag_refresh_portmap(ds, port, lag, false);
++}
++
++static int
++qca8k_port_lag_leave(struct dsa_switch *ds, int port,
++ struct net_device *lag)
++{
++ return qca8k_lag_refresh_portmap(ds, port, lag, true);
++}
++
+ static const struct dsa_switch_ops qca8k_switch_ops = {
+ .get_tag_protocol = qca8k_get_tag_protocol,
+ .setup = qca8k_setup,
+@@ -2259,6 +2434,8 @@ static const struct dsa_switch_ops qca8k
+ .phylink_mac_link_down = qca8k_phylink_mac_link_down,
+ .phylink_mac_link_up = qca8k_phylink_mac_link_up,
+ .get_phy_flags = qca8k_get_phy_flags,
++ .port_lag_join = qca8k_port_lag_join,
++ .port_lag_leave = qca8k_port_lag_leave,
+ };
+
+ static int qca8k_read_switch_id(struct qca8k_priv *priv)
+--- a/drivers/net/dsa/qca8k.h
++++ b/drivers/net/dsa/qca8k.h
+@@ -15,6 +15,8 @@
+ #define QCA8K_NUM_PORTS 7
+ #define QCA8K_NUM_CPU_PORTS 2
+ #define QCA8K_MAX_MTU 9000
++#define QCA8K_NUM_LAGS 4
++#define QCA8K_NUM_PORTS_FOR_LAG 4
+
+ #define PHY_ID_QCA8327 0x004dd034
+ #define QCA8K_ID_QCA8327 0x12
+@@ -122,6 +124,14 @@
+ #define QCA8K_REG_EEE_CTRL 0x100
+ #define QCA8K_REG_EEE_CTRL_LPI_EN(_i) ((_i + 1) * 2)
+
++/* TRUNK_HASH_EN registers */
++#define QCA8K_TRUNK_HASH_EN_CTRL 0x270
++#define QCA8K_TRUNK_HASH_SIP_EN BIT(3)
++#define QCA8K_TRUNK_HASH_DIP_EN BIT(2)
++#define QCA8K_TRUNK_HASH_SA_EN BIT(1)
++#define QCA8K_TRUNK_HASH_DA_EN BIT(0)
++#define QCA8K_TRUNK_HASH_MASK GENMASK(3, 0)
++
+ /* ACL registers */
+ #define QCA8K_REG_PORT_VLAN_CTRL0(_i) (0x420 + (_i * 8))
+ #define QCA8K_PORT_VLAN_CVID_MASK GENMASK(27, 16)
+@@ -204,6 +214,28 @@
+ #define QCA8K_PORT_LOOKUP_LEARN BIT(20)
+ #define QCA8K_PORT_LOOKUP_ING_MIRROR_EN BIT(25)
+
++#define QCA8K_REG_GOL_TRUNK_CTRL0 0x700
++/* 4 max trunk first
++ * first 6 bit for member bitmap
++ * 7th bit is to enable trunk port
++ */
++#define QCA8K_REG_GOL_TRUNK_SHIFT(_i) ((_i) * 8)
++#define QCA8K_REG_GOL_TRUNK_EN_MASK BIT(7)
++#define QCA8K_REG_GOL_TRUNK_EN(_i) (QCA8K_REG_GOL_TRUNK_EN_MASK << QCA8K_REG_GOL_TRUNK_SHIFT(_i))
++#define QCA8K_REG_GOL_TRUNK_MEMBER_MASK GENMASK(6, 0)
++#define QCA8K_REG_GOL_TRUNK_MEMBER(_i) (QCA8K_REG_GOL_TRUNK_MEMBER_MASK << QCA8K_REG_GOL_TRUNK_SHIFT(_i))
++/* 0x704 for TRUNK 0-1 --- 0x708 for TRUNK 2-3 */
++#define QCA8K_REG_GOL_TRUNK_CTRL(_i) (0x704 + (((_i) / 2) * 4))
++#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK GENMASK(3, 0)
++#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK BIT(3)
++#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK GENMASK(2, 0)
++#define QCA8K_REG_GOL_TRUNK_ID_SHIFT(_i) (((_i) / 2) * 16)
++#define QCA8K_REG_GOL_MEM_ID_SHIFT(_i) ((_i) * 4)
++/* Complex shift: FIRST shift for port THEN shift for trunk */
++#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j) (QCA8K_REG_GOL_MEM_ID_SHIFT(_j) + QCA8K_REG_GOL_TRUNK_ID_SHIFT(_i))
++#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(_i, _j) (QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j))
++#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(_i, _j) (QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j))
++
+ #define QCA8K_REG_GLOBAL_FC_THRESH 0x800
+ #define QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK GENMASK(24, 16)
+ #define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK, x)
+@@ -309,6 +341,7 @@ struct qca8k_priv {
+ u8 switch_revision;
+ u8 mirror_rx;
+ u8 mirror_tx;
++ u8 lag_hash_mode;
+ bool legacy_phy_port_mapping;
+ struct qca8k_ports_config ports_config;
+ struct regmap *regmap;
--- /dev/null
+From 0898ca67b86e14207d4feb3f3fea8b87cec5aab1 Mon Sep 17 00:00:00 2001
+From: Ansuel Smith <ansuelsmth@gmail.com>
+Date: Tue, 23 Nov 2021 16:44:46 +0100
+Subject: net: dsa: qca8k: fix warning in LAG feature
+
+Fix warning reported by bot.
+Make sure hash is init to 0 and fix wrong logic for hash_type in
+qca8k_lag_can_offload.
+
+Reported-by: kernel test robot <lkp@intel.com>
+Fixes: def975307c01 ("net: dsa: qca8k: add LAG support")
+Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Link: https://lore.kernel.org/r/20211123154446.31019-1-ansuelsmth@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+---
+ drivers/net/dsa/qca8k.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/dsa/qca8k.c
++++ b/drivers/net/dsa/qca8k.c
+@@ -2251,7 +2251,7 @@ qca8k_lag_can_offload(struct dsa_switch
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
+ return false;
+
+- if (info->hash_type != NETDEV_LAG_HASH_L2 ||
++ if (info->hash_type != NETDEV_LAG_HASH_L2 &&
+ info->hash_type != NETDEV_LAG_HASH_L23)
+ return false;
+
+@@ -2265,8 +2265,8 @@ qca8k_lag_setup_hash(struct dsa_switch *
+ {
+ struct qca8k_priv *priv = ds->priv;
+ bool unique_lag = true;
++ u32 hash = 0;
+ int i, id;
+- u32 hash;
+
+ id = dsa_lag_id(ds->dst, lag);
+
+++ /dev/null
-From 0898ca67b86e14207d4feb3f3fea8b87cec5aab1 Mon Sep 17 00:00:00 2001
-From: Ansuel Smith <ansuelsmth@gmail.com>
-Date: Tue, 23 Nov 2021 16:44:46 +0100
-Subject: net: dsa: qca8k: fix warning in LAG feature
-
-Fix warning reported by bot.
-Make sure hash is init to 0 and fix wrong logic for hash_type in
-qca8k_lag_can_offload.
-
-Reported-by: kernel test robot <lkp@intel.com>
-Fixes: def975307c01 ("net: dsa: qca8k: add LAG support")
-Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
-Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
-Link: https://lore.kernel.org/r/20211123154446.31019-1-ansuelsmth@gmail.com
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
----
- drivers/net/dsa/qca8k.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
---- a/drivers/net/dsa/qca8k.c
-+++ b/drivers/net/dsa/qca8k.c
-@@ -2251,7 +2251,7 @@ qca8k_lag_can_offload(struct dsa_switch
- if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
- return false;
-
-- if (info->hash_type != NETDEV_LAG_HASH_L2 ||
-+ if (info->hash_type != NETDEV_LAG_HASH_L2 &&
- info->hash_type != NETDEV_LAG_HASH_L23)
- return false;
-
-@@ -2265,8 +2265,8 @@ qca8k_lag_setup_hash(struct dsa_switch *
- {
- struct qca8k_priv *priv = ds->priv;
- bool unique_lag = true;
-+ u32 hash = 0;
- int i, id;
-- u32 hash;
-
- id = dsa_lag_id(ds->dst, lag);
-
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
-@@ -1642,7 +1642,6 @@ struct net_device_ops {
+@@ -1643,7 +1643,6 @@ struct net_device_ops {
* @IFF_FAILOVER: device is a failover master device
* @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
* @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
* @IFF_TX_SKB_NO_LINEAR: device/driver is capable of xmitting frames with
* skb_headlen(skb) == 0 (data starts from frag0)
*/
-@@ -1677,7 +1676,7 @@ enum netdev_priv_flags {
+@@ -1678,7 +1677,7 @@ enum netdev_priv_flags {
IFF_FAILOVER = 1<<27,
IFF_FAILOVER_SLAVE = 1<<28,
IFF_L3MDEV_RX_HANDLER = 1<<29,
IFF_TX_SKB_NO_LINEAR = BIT_ULL(31),
};
-@@ -1711,7 +1710,6 @@ enum netdev_priv_flags {
+@@ -1712,7 +1711,6 @@ enum netdev_priv_flags {
#define IFF_FAILOVER IFF_FAILOVER
#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
-@@ -1681,6 +1681,10 @@ enum netdev_priv_flags {
+@@ -1682,6 +1682,10 @@ enum netdev_priv_flags {
IFF_TX_SKB_NO_LINEAR = BIT_ULL(31),
};
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
#define IFF_EBRIDGE IFF_EBRIDGE
#define IFF_BONDING IFF_BONDING
-@@ -1712,6 +1716,7 @@ enum netdev_priv_flags {
+@@ -1713,6 +1717,7 @@ enum netdev_priv_flags {
#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
#define IFF_TX_SKB_NO_LINEAR IFF_TX_SKB_NO_LINEAR
/* Specifies the type of the struct net_device::ml_priv pointer */
enum netdev_ml_priv_type {
-@@ -2012,6 +2017,7 @@ struct net_device {
+@@ -2013,6 +2018,7 @@ struct net_device {
/* Read-mostly cache-line for fast-path access */
unsigned int flags;
unsigned int priv_flags;
const struct net_device_ops *netdev_ops;
int ifindex;
unsigned short gflags;
-@@ -2072,6 +2078,11 @@ struct net_device {
+@@ -2073,6 +2079,11 @@ struct net_device {
const struct tlsdev_ops *tlsdev_ops;
#endif
const struct header_ops *header_ops;
unsigned char operstate;
-@@ -2143,6 +2154,10 @@ struct net_device {
+@@ -2144,6 +2155,10 @@ struct net_device {
struct mctp_dev __rcu *mctp_ptr;
#endif
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
-@@ -2098,6 +2098,8 @@ struct net_device {
+@@ -2099,6 +2099,8 @@ struct net_device {
struct netdev_hw_addr_list mc;
struct netdev_hw_addr_list dev_addrs;
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -3097,8 +3097,8 @@ static irqreturn_t mtk_handle_irq_rx(int
+@@ -3098,8 +3098,8 @@ static irqreturn_t mtk_handle_irq_rx(int
eth->rx_events++;
if (likely(napi_schedule_prep(ð->rx_napi))) {
}
return IRQ_HANDLED;
-@@ -3110,8 +3110,8 @@ static irqreturn_t mtk_handle_irq_tx(int
+@@ -3111,8 +3111,8 @@ static irqreturn_t mtk_handle_irq_tx(int
eth->tx_events++;
if (likely(napi_schedule_prep(ð->tx_napi))) {
}
return IRQ_HANDLED;
-@@ -4885,6 +4885,8 @@ static int mtk_probe(struct platform_dev
+@@ -4886,6 +4886,8 @@ static int mtk_probe(struct platform_dev
* for NAPI to work
*/
init_dummy_netdev(ð->dummy_dev);
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -1515,12 +1515,28 @@ static void mtk_wake_queue(struct mtk_et
+@@ -1516,12 +1516,28 @@ static void mtk_wake_queue(struct mtk_et
}
}
bool gso = false;
int tx_num;
-@@ -1542,6 +1558,18 @@ static netdev_tx_t mtk_start_xmit(struct
+@@ -1543,6 +1559,18 @@ static netdev_tx_t mtk_start_xmit(struct
return NETDEV_TX_BUSY;
}
/* TSO: fill MSS info in tcp checksum field */
if (skb_is_gso(skb)) {
if (skb_cow_head(skb, 0)) {
-@@ -1557,8 +1585,14 @@ static netdev_tx_t mtk_start_xmit(struct
+@@ -1558,8 +1586,14 @@ static netdev_tx_t mtk_start_xmit(struct
}
}
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -722,6 +722,7 @@ static void mtk_mac_link_up(struct phyli
+@@ -723,6 +723,7 @@ static void mtk_mac_link_up(struct phyli
MAC_MCR_FORCE_RX_FC);
/* Configure speed */
switch (speed) {
case SPEED_2500:
case SPEED_1000:
-@@ -3290,6 +3291,9 @@ found:
+@@ -3291,6 +3292,9 @@ found:
if (dp->index >= MTK_QDMA_NUM_QUEUES)
return NOTIFY_DONE;
#include <net/dsa.h>
#include "mtk_eth_soc.h"
#include "mtk_ppe.h"
-@@ -781,7 +782,9 @@ void __mtk_ppe_check_skb(struct mtk_ppe
+@@ -835,7 +836,9 @@ void __mtk_ppe_check_skb(struct mtk_ppe
skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
goto out;
+++ /dev/null
-From: Felix Fietkau <nbd@nbd.name>
-Date: Mon, 20 Mar 2023 11:44:30 +0100
-Subject: [PATCH] net: ethernet: mtk_eth_soc: add code for offloading flows
- from wlan devices
-
-WED version 2 (on MT7986 and later) can offload flows originating from wireless
-devices. In order to make that work, ndo_setup_tc needs to be implemented on
-the netdevs. This adds the required code to offload flows coming in from WED,
-while keeping track of the incoming wed index used for selecting the correct
-PPE device.
-
-Signed-off-by: Felix Fietkau <nbd@nbd.name>
----
-
---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-@@ -1448,6 +1448,9 @@ int mtk_gmac_rgmii_path_setup(struct mtk
- int mtk_eth_offload_init(struct mtk_eth *eth);
- int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
- void *type_data);
-+int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls,
-+ int ppe_index);
-+void mtk_flow_offload_cleanup(struct mtk_eth *eth, struct list_head *list);
- void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
-
-
---- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
-+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
-@@ -235,7 +235,8 @@ out:
- }
-
- static int
--mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
-+mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
-+ int ppe_index)
- {
- struct flow_rule *rule = flow_cls_offload_flow_rule(f);
- struct flow_action_entry *act;
-@@ -452,6 +453,7 @@ mtk_flow_offload_replace(struct mtk_eth
- entry->cookie = f->cookie;
- memcpy(&entry->data, &foe, sizeof(entry->data));
- entry->wed_index = wed_index;
-+ entry->ppe_index = ppe_index;
-
- err = mtk_foe_entry_commit(eth->ppe[entry->ppe_index], entry);
- if (err < 0)
-@@ -520,25 +522,15 @@ mtk_flow_offload_stats(struct mtk_eth *e
-
- static DEFINE_MUTEX(mtk_flow_offload_mutex);
-
--static int
--mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
-+int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls,
-+ int ppe_index)
- {
-- struct flow_cls_offload *cls = type_data;
-- struct net_device *dev = cb_priv;
-- struct mtk_mac *mac = netdev_priv(dev);
-- struct mtk_eth *eth = mac->hw;
- int err;
-
-- if (!tc_can_offload(dev))
-- return -EOPNOTSUPP;
--
-- if (type != TC_SETUP_CLSFLOWER)
-- return -EOPNOTSUPP;
--
- mutex_lock(&mtk_flow_offload_mutex);
- switch (cls->command) {
- case FLOW_CLS_REPLACE:
-- err = mtk_flow_offload_replace(eth, cls);
-+ err = mtk_flow_offload_replace(eth, cls, ppe_index);
- break;
- case FLOW_CLS_DESTROY:
- err = mtk_flow_offload_destroy(eth, cls);
-@@ -556,6 +548,23 @@ mtk_eth_setup_tc_block_cb(enum tc_setup_
- }
-
- static int
-+mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
-+{
-+ struct flow_cls_offload *cls = type_data;
-+ struct net_device *dev = cb_priv;
-+ struct mtk_mac *mac = netdev_priv(dev);
-+ struct mtk_eth *eth = mac->hw;
-+
-+ if (!tc_can_offload(dev))
-+ return -EOPNOTSUPP;
-+
-+ if (type != TC_SETUP_CLSFLOWER)
-+ return -EOPNOTSUPP;
-+
-+ return mtk_flow_offload_cmd(eth, cls, 0);
-+}
-+
-+static int
- mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
- {
- struct mtk_mac *mac = netdev_priv(dev);
---- a/drivers/net/ethernet/mediatek/mtk_wed.c
-+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
-@@ -13,6 +13,8 @@
- #include <linux/mfd/syscon.h>
- #include <linux/debugfs.h>
- #include <linux/soc/mediatek/mtk_wed.h>
-+#include <net/flow_offload.h>
-+#include <net/pkt_cls.h>
- #include "mtk_eth_soc.h"
- #include "mtk_wed_regs.h"
- #include "mtk_wed.h"
-@@ -41,6 +43,11 @@
- static struct mtk_wed_hw *hw_list[2];
- static DEFINE_MUTEX(hw_lock);
-
-+struct mtk_wed_flow_block_priv {
-+ struct mtk_wed_hw *hw;
-+ struct net_device *dev;
-+};
-+
- static void
- wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
- {
-@@ -1760,6 +1767,99 @@ out:
- mutex_unlock(&hw_lock);
- }
-
-+static int
-+mtk_wed_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
-+{
-+ struct mtk_wed_flow_block_priv *priv = cb_priv;
-+ struct flow_cls_offload *cls = type_data;
-+ struct mtk_wed_hw *hw = priv->hw;
-+
-+ if (!tc_can_offload(priv->dev))
-+ return -EOPNOTSUPP;
-+
-+ if (type != TC_SETUP_CLSFLOWER)
-+ return -EOPNOTSUPP;
-+
-+ return mtk_flow_offload_cmd(hw->eth, cls, hw->index);
-+}
-+
-+static int
-+mtk_wed_setup_tc_block(struct mtk_wed_hw *hw, struct net_device *dev,
-+ struct flow_block_offload *f)
-+{
-+ struct mtk_wed_flow_block_priv *priv;
-+ static LIST_HEAD(block_cb_list);
-+ struct flow_block_cb *block_cb;
-+ struct mtk_eth *eth = hw->eth;
-+ flow_setup_cb_t *cb;
-+
-+ if (!eth->soc->offload_version)
-+ return -EOPNOTSUPP;
-+
-+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
-+ return -EOPNOTSUPP;
-+
-+ cb = mtk_wed_setup_tc_block_cb;
-+ f->driver_block_list = &block_cb_list;
-+
-+ switch (f->command) {
-+ case FLOW_BLOCK_BIND:
-+ block_cb = flow_block_cb_lookup(f->block, cb, dev);
-+ if (block_cb) {
-+ flow_block_cb_incref(block_cb);
-+ return 0;
-+ }
-+
-+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-+ if (!priv)
-+ return -ENOMEM;
-+
-+ priv->hw = hw;
-+ priv->dev = dev;
-+ block_cb = flow_block_cb_alloc(cb, dev, priv, NULL);
-+ if (IS_ERR(block_cb)) {
-+ kfree(priv);
-+ return PTR_ERR(block_cb);
-+ }
-+
-+ flow_block_cb_incref(block_cb);
-+ flow_block_cb_add(block_cb, f);
-+ list_add_tail(&block_cb->driver_list, &block_cb_list);
-+ return 0;
-+ case FLOW_BLOCK_UNBIND:
-+ block_cb = flow_block_cb_lookup(f->block, cb, dev);
-+ if (!block_cb)
-+ return -ENOENT;
-+
-+ if (!flow_block_cb_decref(block_cb)) {
-+ flow_block_cb_remove(block_cb, f);
-+ list_del(&block_cb->driver_list);
-+ kfree(block_cb->cb_priv);
-+ }
-+ return 0;
-+ default:
-+ return -EOPNOTSUPP;
-+ }
-+}
-+
-+static int
-+mtk_wed_setup_tc(struct mtk_wed_device *wed, struct net_device *dev,
-+ enum tc_setup_type type, void *type_data)
-+{
-+ struct mtk_wed_hw *hw = wed->hw;
-+
-+ if (hw->version < 2)
-+ return -EOPNOTSUPP;
-+
-+ switch (type) {
-+ case TC_SETUP_BLOCK:
-+ case TC_SETUP_FT:
-+ return mtk_wed_setup_tc_block(hw, dev, type_data);
-+ default:
-+ return -EOPNOTSUPP;
-+ }
-+}
-+
- void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
- void __iomem *wdma, phys_addr_t wdma_phy,
- int index)
-@@ -1779,6 +1879,7 @@ void mtk_wed_add_hw(struct device_node *
- .irq_set_mask = mtk_wed_irq_set_mask,
- .detach = mtk_wed_detach,
- .ppe_check = mtk_wed_ppe_check,
-+ .setup_tc = mtk_wed_setup_tc,
- };
- struct device_node *eth_np = eth->dev->of_node;
- struct platform_device *pdev;
---- a/include/linux/soc/mediatek/mtk_wed.h
-+++ b/include/linux/soc/mediatek/mtk_wed.h
-@@ -6,6 +6,7 @@
- #include <linux/regmap.h>
- #include <linux/pci.h>
- #include <linux/skbuff.h>
-+#include <linux/netdevice.h>
-
- #define MTK_WED_TX_QUEUES 2
- #define MTK_WED_RX_QUEUES 2
-@@ -180,6 +181,8 @@ struct mtk_wed_ops {
-
- u32 (*irq_get)(struct mtk_wed_device *dev, u32 mask);
- void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
-+ int (*setup_tc)(struct mtk_wed_device *wed, struct net_device *dev,
-+ enum tc_setup_type type, void *type_data);
- };
-
- extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
-@@ -238,6 +241,8 @@ mtk_wed_get_rx_capa(struct mtk_wed_devic
- (_dev)->ops->msg_update(_dev, _id, _msg, _len)
- #define mtk_wed_device_stop(_dev) (_dev)->ops->stop(_dev)
- #define mtk_wed_device_dma_reset(_dev) (_dev)->ops->reset_dma(_dev)
-+#define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) \
-+ (_dev)->ops->setup_tc(_dev, _netdev, _type, _type_data)
- #else
- static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
- {
-@@ -256,6 +261,7 @@ static inline bool mtk_wed_device_active
- #define mtk_wed_device_update_msg(_dev, _id, _msg, _len) -ENODEV
- #define mtk_wed_device_stop(_dev) do {} while (0)
- #define mtk_wed_device_dma_reset(_dev) do {} while (0)
-+#define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) -EOPNOTSUPP
- #endif
-
- #endif
+++ /dev/null
-From: Felix Fietkau <nbd@nbd.name>
-Date: Mon, 20 Mar 2023 15:37:55 +0100
-Subject: [PATCH] net: ethernet: mediatek: mtk_ppe: prefer newly added l2
- flows over existing ones
-
-When a device is roaming between interfaces and a new flow entry is created,
-we should assume that its output device is more up to date than whatever
-entry existed already.
-
-Signed-off-by: Felix Fietkau <nbd@nbd.name>
----
-
---- a/drivers/net/ethernet/mediatek/mtk_ppe.c
-+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
-@@ -663,10 +663,20 @@ void mtk_foe_entry_clear(struct mtk_ppe
- static int
- mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
- {
-+ struct mtk_flow_entry *prev;
-+
- entry->type = MTK_FLOW_TYPE_L2;
-
-- return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node,
-- mtk_flow_l2_ht_params);
-+ prev = rhashtable_lookup_get_insert_fast(&ppe->l2_flows, &entry->l2_node,
-+ mtk_flow_l2_ht_params);
-+ if (likely(!prev))
-+ return 0;
-+
-+ if (IS_ERR(prev))
-+ return PTR_ERR(prev);
-+
-+ return rhashtable_replace_fast(&ppe->l2_flows, &prev->l2_node,
-+ &entry->l2_node, mtk_flow_l2_ht_params);
- }
-
- int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+++ /dev/null
-From: Felix Fietkau <nbd@nbd.name>
-Date: Thu, 23 Mar 2023 10:24:11 +0100
-Subject: [PATCH] net: ethernet: mtk_eth_soc: improve keeping track of
- offloaded flows
-
-Unify tracking of L2 and L3 flows. Use the generic list field in struct
-mtk_foe_entry for tracking L2 subflows. Preparation for improving
-flow accounting support.
-
-Signed-off-by: Felix Fietkau <nbd@nbd.name>
----
-
---- a/drivers/net/ethernet/mediatek/mtk_ppe.c
-+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
-@@ -483,42 +483,43 @@ int mtk_foe_entry_set_queue(struct mtk_e
- return 0;
- }
-
-+static int
-+mtk_flow_entry_match_len(struct mtk_eth *eth, struct mtk_foe_entry *entry)
-+{
-+ int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
-+
-+ if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
-+ return offsetof(struct mtk_foe_entry, ipv6._rsv);
-+ else
-+ return offsetof(struct mtk_foe_entry, ipv4.ib2);
-+}
-+
- static bool
- mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
-- struct mtk_foe_entry *data)
-+ struct mtk_foe_entry *data, int len)
- {
-- int type, len;
--
- if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
- return false;
-
-- type = mtk_get_ib1_pkt_type(eth, entry->data.ib1);
-- if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
-- len = offsetof(struct mtk_foe_entry, ipv6._rsv);
-- else
-- len = offsetof(struct mtk_foe_entry, ipv4.ib2);
--
- return !memcmp(&entry->data.data, &data->data, len - 4);
- }
-
- static void
--__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
-+__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
-+ bool set_state)
- {
-- struct hlist_head *head;
- struct hlist_node *tmp;
-
- if (entry->type == MTK_FLOW_TYPE_L2) {
- rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
- mtk_flow_l2_ht_params);
-
-- head = &entry->l2_flows;
-- hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
-- __mtk_foe_entry_clear(ppe, entry);
-+ hlist_for_each_entry_safe(entry, tmp, &entry->l2_flows, l2_list)
-+ __mtk_foe_entry_clear(ppe, entry, set_state);
- return;
- }
-
-- hlist_del_init(&entry->list);
-- if (entry->hash != 0xffff) {
-+ if (entry->hash != 0xffff && set_state) {
- struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash);
-
- hwe->ib1 &= ~MTK_FOE_IB1_STATE;
-@@ -538,7 +539,8 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp
- if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
- return;
-
-- hlist_del_init(&entry->l2_data.list);
-+ hlist_del_init(&entry->l2_list);
-+ hlist_del_init(&entry->list);
- kfree(entry);
- }
-
-@@ -554,66 +556,55 @@ static int __mtk_foe_entry_idle_time(str
- return now - timestamp;
- }
-
-+static bool
-+mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
-+{
-+ struct mtk_foe_entry foe = {};
-+ struct mtk_foe_entry *hwe;
-+ u16 hash = entry->hash;
-+ int len;
-+
-+ if (hash == 0xffff)
-+ return false;
-+
-+ hwe = mtk_foe_get_entry(ppe, hash);
-+ len = mtk_flow_entry_match_len(ppe->eth, &entry->data);
-+ memcpy(&foe, hwe, len);
-+
-+ if (!mtk_flow_entry_match(ppe->eth, entry, &foe, len) ||
-+ FIELD_GET(MTK_FOE_IB1_STATE, foe.ib1) != MTK_FOE_STATE_BIND)
-+ return false;
-+
-+ entry->data.ib1 = foe.ib1;
-+
-+ return true;
-+}
-+
- static void
- mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
- {
- u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
- struct mtk_flow_entry *cur;
-- struct mtk_foe_entry *hwe;
- struct hlist_node *tmp;
- int idle;
-
- idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
-- hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
-+ hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_list) {
- int cur_idle;
-- u32 ib1;
--
-- hwe = mtk_foe_get_entry(ppe, cur->hash);
-- ib1 = READ_ONCE(hwe->ib1);
-
-- if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
-- cur->hash = 0xffff;
-- __mtk_foe_entry_clear(ppe, cur);
-+ if (!mtk_flow_entry_update(ppe, cur)) {
-+ __mtk_foe_entry_clear(ppe, entry, false);
- continue;
- }
-
-- cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
-+ cur_idle = __mtk_foe_entry_idle_time(ppe, cur->data.ib1);
- if (cur_idle >= idle)
- continue;
-
- idle = cur_idle;
- entry->data.ib1 &= ~ib1_ts_mask;
-- entry->data.ib1 |= hwe->ib1 & ib1_ts_mask;
-- }
--}
--
--static void
--mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
--{
-- struct mtk_foe_entry foe = {};
-- struct mtk_foe_entry *hwe;
--
-- spin_lock_bh(&ppe_lock);
--
-- if (entry->type == MTK_FLOW_TYPE_L2) {
-- mtk_flow_entry_update_l2(ppe, entry);
-- goto out;
-+ entry->data.ib1 |= cur->data.ib1 & ib1_ts_mask;
- }
--
-- if (entry->hash == 0xffff)
-- goto out;
--
-- hwe = mtk_foe_get_entry(ppe, entry->hash);
-- memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size);
-- if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) {
-- entry->hash = 0xffff;
-- goto out;
-- }
--
-- entry->data.ib1 = foe.ib1;
--
--out:
-- spin_unlock_bh(&ppe_lock);
- }
-
- static void
-@@ -656,7 +647,8 @@ __mtk_foe_entry_commit(struct mtk_ppe *p
- void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
- {
- spin_lock_bh(&ppe_lock);
-- __mtk_foe_entry_clear(ppe, entry);
-+ __mtk_foe_entry_clear(ppe, entry, true);
-+ hlist_del_init(&entry->list);
- spin_unlock_bh(&ppe_lock);
- }
-
-@@ -703,8 +695,8 @@ mtk_foe_entry_commit_subflow(struct mtk_
- {
- const struct mtk_soc_data *soc = ppe->eth->soc;
- struct mtk_flow_entry *flow_info;
-- struct mtk_foe_entry foe = {}, *hwe;
- struct mtk_foe_mac_info *l2;
-+ struct mtk_foe_entry *hwe;
- u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
- int type;
-
-@@ -712,30 +704,30 @@ mtk_foe_entry_commit_subflow(struct mtk_
- if (!flow_info)
- return;
-
-- flow_info->l2_data.base_flow = entry;
- flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
- flow_info->hash = hash;
- hlist_add_head(&flow_info->list,
- &ppe->foe_flow[hash / soc->hash_offset]);
-- hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
-+ hlist_add_head(&flow_info->l2_list, &entry->l2_flows);
-
- hwe = mtk_foe_get_entry(ppe, hash);
-- memcpy(&foe, hwe, soc->foe_entry_size);
-- foe.ib1 &= ib1_mask;
-- foe.ib1 |= entry->data.ib1 & ~ib1_mask;
-+ memcpy(&flow_info->data, hwe, soc->foe_entry_size);
-+ flow_info->data.ib1 &= ib1_mask;
-+ flow_info->data.ib1 |= entry->data.ib1 & ~ib1_mask;
-
-- l2 = mtk_foe_entry_l2(ppe->eth, &foe);
-+ l2 = mtk_foe_entry_l2(ppe->eth, &flow_info->data);
- memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
-
-- type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1);
-+ type = mtk_get_ib1_pkt_type(ppe->eth, flow_info->data.ib1);
- if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
-- memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
-+ memcpy(&flow_info->data.ipv4.new, &flow_info->data.ipv4.orig,
-+ sizeof(flow_info->data.ipv4.new));
- else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
- l2->etype = ETH_P_IPV6;
-
-- *mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2;
-+ *mtk_foe_entry_ib2(ppe->eth, &flow_info->data) = entry->data.bridge.ib2;
-
-- __mtk_foe_entry_commit(ppe, &foe, hash);
-+ __mtk_foe_entry_commit(ppe, &flow_info->data, hash);
- }
-
- void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
-@@ -745,9 +737,11 @@ void __mtk_ppe_check_skb(struct mtk_ppe
- struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash);
- struct mtk_flow_entry *entry;
- struct mtk_foe_bridge key = {};
-+ struct mtk_foe_entry foe = {};
- struct hlist_node *n;
- struct ethhdr *eh;
- bool found = false;
-+ int entry_len;
- u8 *tag;
-
- spin_lock_bh(&ppe_lock);
-@@ -755,20 +749,14 @@ void __mtk_ppe_check_skb(struct mtk_ppe
- if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
- goto out;
-
-- hlist_for_each_entry_safe(entry, n, head, list) {
-- if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
-- if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
-- MTK_FOE_STATE_BIND))
-- continue;
--
-- entry->hash = 0xffff;
-- __mtk_foe_entry_clear(ppe, entry);
-- continue;
-- }
-+ entry_len = mtk_flow_entry_match_len(ppe->eth, hwe);
-+ memcpy(&foe, hwe, entry_len);
-
-- if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) {
-+ hlist_for_each_entry_safe(entry, n, head, list) {
-+ if (found ||
-+ !mtk_flow_entry_match(ppe->eth, entry, &foe, entry_len)) {
- if (entry->hash != 0xffff)
-- entry->hash = 0xffff;
-+ __mtk_foe_entry_clear(ppe, entry, false);
- continue;
- }
-
-@@ -819,9 +807,17 @@ out:
-
- int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
- {
-- mtk_flow_entry_update(ppe, entry);
-+ int idle;
-+
-+ spin_lock_bh(&ppe_lock);
-+ if (entry->type == MTK_FLOW_TYPE_L2)
-+ mtk_flow_entry_update_l2(ppe, entry);
-+ else
-+ mtk_flow_entry_update(ppe, entry);
-+ idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
-+ spin_unlock_bh(&ppe_lock);
-
-- return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
-+ return idle;
- }
-
- int mtk_ppe_prepare_reset(struct mtk_ppe *ppe)
---- a/drivers/net/ethernet/mediatek/mtk_ppe.h
-+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
-@@ -286,7 +286,12 @@ enum {
-
- struct mtk_flow_entry {
- union {
-- struct hlist_node list;
-+ /* regular flows + L2 subflows */
-+ struct {
-+ struct hlist_node list;
-+ struct hlist_node l2_list;
-+ };
-+ /* L2 flows */
- struct {
- struct rhash_head l2_node;
- struct hlist_head l2_flows;
-@@ -296,13 +301,7 @@ struct mtk_flow_entry {
- s8 wed_index;
- u8 ppe_index;
- u16 hash;
-- union {
-- struct mtk_foe_entry data;
-- struct {
-- struct mtk_flow_entry *base_flow;
-- struct hlist_node list;
-- } l2_data;
-- };
-+ struct mtk_foe_entry data;
- struct rhash_head node;
- unsigned long cookie;
- };
+++ /dev/null
-From: Felix Fietkau <nbd@nbd.name>
-Date: Thu, 23 Mar 2023 11:05:22 +0100
-Subject: [PATCH] net: ethernet: mediatek: fix ppe flow accounting for L2
- flows
-
-For L2 flows, the packet/byte counters should report the sum of the
-counters of their subflows, both current and expired.
-In order to make this work, change the way that accounting data is tracked.
-Reset counters when a flow enters bind. Once it expires (or enters unbind),
-store the last counter value in struct mtk_flow_entry.
-
-Signed-off-by: Felix Fietkau <nbd@nbd.name>
----
-
---- a/drivers/net/ethernet/mediatek/mtk_ppe.c
-+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
-@@ -80,9 +80,9 @@ static int mtk_ppe_mib_wait_busy(struct
- int ret;
- u32 val;
-
-- ret = readl_poll_timeout(ppe->base + MTK_PPE_MIB_SER_CR, val,
-- !(val & MTK_PPE_MIB_SER_CR_ST),
-- 20, MTK_PPE_WAIT_TIMEOUT_US);
-+ ret = readl_poll_timeout_atomic(ppe->base + MTK_PPE_MIB_SER_CR, val,
-+ !(val & MTK_PPE_MIB_SER_CR_ST),
-+ 20, MTK_PPE_WAIT_TIMEOUT_US);
-
- if (ret)
- dev_err(ppe->dev, "MIB table busy");
-@@ -90,17 +90,31 @@ static int mtk_ppe_mib_wait_busy(struct
- return ret;
- }
-
--static int mtk_mib_entry_read(struct mtk_ppe *ppe, u16 index, u64 *bytes, u64 *packets)
-+static inline struct mtk_foe_accounting *
-+mtk_ppe_acct_data(struct mtk_ppe *ppe, u16 index)
-+{
-+ if (!ppe->acct_table)
-+ return NULL;
-+
-+ return ppe->acct_table + index * sizeof(struct mtk_foe_accounting);
-+}
-+
-+struct mtk_foe_accounting *mtk_ppe_mib_entry_read(struct mtk_ppe *ppe, u16 index)
- {
- u32 val, cnt_r0, cnt_r1, cnt_r2;
-+ struct mtk_foe_accounting *acct;
- int ret;
-
- val = FIELD_PREP(MTK_PPE_MIB_SER_CR_ADDR, index) | MTK_PPE_MIB_SER_CR_ST;
- ppe_w32(ppe, MTK_PPE_MIB_SER_CR, val);
-
-+ acct = mtk_ppe_acct_data(ppe, index);
-+ if (!acct)
-+ return NULL;
-+
- ret = mtk_ppe_mib_wait_busy(ppe);
- if (ret)
-- return ret;
-+ return acct;
-
- cnt_r0 = readl(ppe->base + MTK_PPE_MIB_SER_R0);
- cnt_r1 = readl(ppe->base + MTK_PPE_MIB_SER_R1);
-@@ -109,19 +123,19 @@ static int mtk_mib_entry_read(struct mtk
- if (mtk_is_netsys_v3_or_greater(ppe->eth)) {
- /* 64 bit for each counter */
- u32 cnt_r3 = readl(ppe->base + MTK_PPE_MIB_SER_R3);
-- *bytes = ((u64)cnt_r1 << 32) | cnt_r0;
-- *packets = ((u64)cnt_r3 << 32) | cnt_r2;
-+ acct->bytes += ((u64)cnt_r1 << 32) | cnt_r0;
-+ acct->packets += ((u64)cnt_r3 << 32) | cnt_r2;
- } else {
- /* 48 bit byte counter, 40 bit packet counter */
- u32 byte_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R0_BYTE_CNT_LOW, cnt_r0);
- u32 byte_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R1_BYTE_CNT_HIGH, cnt_r1);
- u32 pkt_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R1_PKT_CNT_LOW, cnt_r1);
- u32 pkt_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R2_PKT_CNT_HIGH, cnt_r2);
-- *bytes = ((u64)byte_cnt_high << 32) | byte_cnt_low;
-- *packets = (pkt_cnt_high << 16) | pkt_cnt_low;
-+ acct->bytes += ((u64)byte_cnt_high << 32) | byte_cnt_low;
-+ acct->packets += (pkt_cnt_high << 16) | pkt_cnt_low;
- }
-
-- return 0;
-+ return acct;
- }
-
- static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
-@@ -526,13 +540,6 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp
- hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
- dma_wmb();
- mtk_ppe_cache_clear(ppe);
-- if (ppe->accounting) {
-- struct mtk_foe_accounting *acct;
--
-- acct = ppe->acct_table + entry->hash * sizeof(*acct);
-- acct->packets = 0;
-- acct->bytes = 0;
-- }
- }
- entry->hash = 0xffff;
-
-@@ -557,11 +564,14 @@ static int __mtk_foe_entry_idle_time(str
- }
-
- static bool
--mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
-+mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
-+ u64 *packets, u64 *bytes)
- {
-+ struct mtk_foe_accounting *acct;
- struct mtk_foe_entry foe = {};
- struct mtk_foe_entry *hwe;
- u16 hash = entry->hash;
-+ bool ret = false;
- int len;
-
- if (hash == 0xffff)
-@@ -572,18 +582,35 @@ mtk_flow_entry_update(struct mtk_ppe *pp
- memcpy(&foe, hwe, len);
-
- if (!mtk_flow_entry_match(ppe->eth, entry, &foe, len) ||
-- FIELD_GET(MTK_FOE_IB1_STATE, foe.ib1) != MTK_FOE_STATE_BIND)
-- return false;
-+ FIELD_GET(MTK_FOE_IB1_STATE, foe.ib1) != MTK_FOE_STATE_BIND) {
-+ acct = mtk_ppe_acct_data(ppe, hash);
-+ if (acct) {
-+ entry->prev_packets += acct->packets;
-+ entry->prev_bytes += acct->bytes;
-+ }
-+
-+ goto out;
-+ }
-
- entry->data.ib1 = foe.ib1;
-+ acct = mtk_ppe_mib_entry_read(ppe, hash);
-+ ret = true;
-+
-+out:
-+ if (acct) {
-+ *packets += acct->packets;
-+ *bytes += acct->bytes;
-+ }
-
-- return true;
-+ return ret;
- }
-
- static void
- mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
- {
- u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
-+ u64 *packets = &entry->packets;
-+ u64 *bytes = &entry->bytes;
- struct mtk_flow_entry *cur;
- struct hlist_node *tmp;
- int idle;
-@@ -592,7 +619,9 @@ mtk_flow_entry_update_l2(struct mtk_ppe
- hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_list) {
- int cur_idle;
-
-- if (!mtk_flow_entry_update(ppe, cur)) {
-+ if (!mtk_flow_entry_update(ppe, cur, packets, bytes)) {
-+ entry->prev_packets += cur->prev_packets;
-+ entry->prev_bytes += cur->prev_bytes;
- __mtk_foe_entry_clear(ppe, entry, false);
- continue;
- }
-@@ -607,10 +636,29 @@ mtk_flow_entry_update_l2(struct mtk_ppe
- }
- }
-
-+void mtk_foe_entry_get_stats(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
-+ int *idle)
-+{
-+ entry->packets = entry->prev_packets;
-+ entry->bytes = entry->prev_bytes;
-+
-+ spin_lock_bh(&ppe_lock);
-+
-+ if (entry->type == MTK_FLOW_TYPE_L2)
-+ mtk_flow_entry_update_l2(ppe, entry);
-+ else
-+ mtk_flow_entry_update(ppe, entry, &entry->packets, &entry->bytes);
-+
-+ *idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
-+
-+ spin_unlock_bh(&ppe_lock);
-+}
-+
- static void
- __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
- u16 hash)
- {
-+ struct mtk_foe_accounting *acct;
- struct mtk_eth *eth = ppe->eth;
- u16 timestamp = mtk_eth_timestamp(eth);
- struct mtk_foe_entry *hwe;
-@@ -641,6 +689,12 @@ __mtk_foe_entry_commit(struct mtk_ppe *p
-
- dma_wmb();
-
-+ acct = mtk_ppe_mib_entry_read(ppe, hash);
-+ if (acct) {
-+ acct->packets = 0;
-+ acct->bytes = 0;
-+ }
-+
- mtk_ppe_cache_clear(ppe);
- }
-
-@@ -805,21 +859,6 @@ out:
- spin_unlock_bh(&ppe_lock);
- }
-
--int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
--{
-- int idle;
--
-- spin_lock_bh(&ppe_lock);
-- if (entry->type == MTK_FLOW_TYPE_L2)
-- mtk_flow_entry_update_l2(ppe, entry);
-- else
-- mtk_flow_entry_update(ppe, entry);
-- idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
-- spin_unlock_bh(&ppe_lock);
--
-- return idle;
--}
--
- int mtk_ppe_prepare_reset(struct mtk_ppe *ppe)
- {
- if (!ppe)
-@@ -847,32 +886,6 @@ int mtk_ppe_prepare_reset(struct mtk_ppe
- return mtk_ppe_wait_busy(ppe);
- }
-
--struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index,
-- struct mtk_foe_accounting *diff)
--{
-- struct mtk_foe_accounting *acct;
-- int size = sizeof(struct mtk_foe_accounting);
-- u64 bytes, packets;
--
-- if (!ppe->accounting)
-- return NULL;
--
-- if (mtk_mib_entry_read(ppe, index, &bytes, &packets))
-- return NULL;
--
-- acct = ppe->acct_table + index * size;
--
-- acct->bytes += bytes;
-- acct->packets += packets;
--
-- if (diff) {
-- diff->bytes = bytes;
-- diff->packets = packets;
-- }
--
-- return acct;
--}
--
- struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int index)
- {
- bool accounting = eth->soc->has_accounting;
---- a/drivers/net/ethernet/mediatek/mtk_ppe.h
-+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
-@@ -304,6 +304,8 @@ struct mtk_flow_entry {
- struct mtk_foe_entry data;
- struct rhash_head node;
- unsigned long cookie;
-+ u64 prev_packets, prev_bytes;
-+ u64 packets, bytes;
- };
-
- struct mtk_mib_entry {
-@@ -347,6 +349,7 @@ void mtk_ppe_deinit(struct mtk_eth *eth)
- void mtk_ppe_start(struct mtk_ppe *ppe);
- int mtk_ppe_stop(struct mtk_ppe *ppe);
- int mtk_ppe_prepare_reset(struct mtk_ppe *ppe);
-+struct mtk_foe_accounting *mtk_ppe_mib_entry_read(struct mtk_ppe *ppe, u16 index);
-
- void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash);
-
-@@ -395,9 +398,8 @@ int mtk_foe_entry_set_queue(struct mtk_e
- unsigned int queue);
- int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
- void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
--int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
- int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index);
--struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index,
-- struct mtk_foe_accounting *diff);
-+void mtk_foe_entry_get_stats(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
-+ int *idle);
-
- #endif
---- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
-+++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
-@@ -96,7 +96,7 @@ mtk_ppe_debugfs_foe_show(struct seq_file
- if (bind && state != MTK_FOE_STATE_BIND)
- continue;
-
-- acct = mtk_foe_entry_get_mib(ppe, i, NULL);
-+ acct = mtk_ppe_mib_entry_read(ppe, i);
-
- type = mtk_get_ib1_pkt_type(ppe->eth, entry->ib1);
- seq_printf(m, "%05x %s %7s", i,
---- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
-+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
-@@ -499,24 +499,21 @@ static int
- mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
- {
- struct mtk_flow_entry *entry;
-- struct mtk_foe_accounting diff;
-- u32 idle;
-+ u64 packets, bytes;
-+ int idle;
-
- entry = rhashtable_lookup(ð->flow_table, &f->cookie,
- mtk_flow_ht_params);
- if (!entry)
- return -ENOENT;
-
-- idle = mtk_foe_entry_idle_time(eth->ppe[entry->ppe_index], entry);
-+ packets = entry->packets;
-+ bytes = entry->bytes;
-+ mtk_foe_entry_get_stats(eth->ppe[entry->ppe_index], entry, &idle);
-+ f->stats.pkts += entry->packets - packets;
-+ f->stats.bytes += entry->bytes - bytes;
- f->stats.lastused = jiffies - idle * HZ;
-
-- if (entry->hash != 0xFFFF &&
-- mtk_foe_entry_get_mib(eth->ppe[entry->ppe_index], entry->hash,
-- &diff)) {
-- f->stats.pkts += diff.packets;
-- f->stats.bytes += diff.bytes;
-- }
--
- return 0;
- }
-
-
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -431,6 +431,30 @@ static void mtk_setup_bridge_switch(stru
+@@ -432,6 +432,30 @@ static void mtk_setup_bridge_switch(stru
MTK_GSW_CFG);
}
static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
phy_interface_t interface)
{
-@@ -439,12 +463,20 @@ static struct phylink_pcs *mtk_mac_selec
+@@ -440,12 +464,20 @@ static struct phylink_pcs *mtk_mac_selec
struct mtk_eth *eth = mac->hw;
unsigned int sid;
}
return NULL;
-@@ -500,7 +532,22 @@ static void mtk_mac_config(struct phylin
+@@ -501,7 +533,22 @@ static void mtk_mac_config(struct phylin
goto init_err;
}
break;
break;
default:
goto err_phy;
-@@ -555,8 +602,6 @@ static void mtk_mac_config(struct phylin
+@@ -556,8 +603,6 @@ static void mtk_mac_config(struct phylin
val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
}
/* SGMII */
-@@ -573,21 +618,40 @@ static void mtk_mac_config(struct phylin
+@@ -574,21 +619,40 @@ static void mtk_mac_config(struct phylin
/* Save the syscfg0 value for mac_finish */
mac->syscfg0 = val;
return;
err_phy:
-@@ -632,10 +696,14 @@ static void mtk_mac_link_down(struct phy
+@@ -633,10 +697,14 @@ static void mtk_mac_link_down(struct phy
{
struct mtk_mac *mac = container_of(config, struct mtk_mac,
phylink_config);
}
static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
-@@ -707,13 +775,11 @@ static void mtk_set_queue_speed(struct m
+@@ -708,13 +776,11 @@ static void mtk_set_queue_speed(struct m
mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
}
u32 mcr;
mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
-@@ -747,6 +813,55 @@ static void mtk_mac_link_up(struct phyli
+@@ -748,6 +814,55 @@ static void mtk_mac_link_up(struct phyli
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
}
static const struct phylink_mac_ops mtk_phylink_ops = {
.validate = phylink_generic_validate,
.mac_select_pcs = mtk_mac_select_pcs,
-@@ -4560,8 +4675,21 @@ static int mtk_add_mac(struct mtk_eth *e
+@@ -4561,8 +4676,21 @@ static int mtk_add_mac(struct mtk_eth *e
phy_interface_zero(mac->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
mac->phylink_config.supported_interfaces);
phylink = phylink_create(&mac->phylink_config,
of_fwnode_handle(mac->of_node),
phy_mode, &mtk_phylink_ops);
-@@ -4754,6 +4882,13 @@ static int mtk_probe(struct platform_dev
+@@ -4755,6 +4883,13 @@ static int mtk_probe(struct platform_dev
if (err)
return err;
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -4830,7 +4830,10 @@ static int mtk_probe(struct platform_dev
+@@ -4831,7 +4831,10 @@ static int mtk_probe(struct platform_dev
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
/**
* napi_disable - prevent NAPI from scheduling
-@@ -3363,6 +3364,7 @@ struct softnet_data {
+@@ -3364,6 +3365,7 @@ struct softnet_data {
unsigned int processed;
unsigned int time_squeeze;
unsigned int received_rps;
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -323,13 +323,35 @@ static int _mtk_mdio_write(struct mtk_et
+@@ -324,13 +324,35 @@ static int _mtk_mdio_write(struct mtk_et
if (ret < 0)
return ret;
ret = mtk_mdio_busy_wait(eth);
if (ret < 0)
-@@ -346,12 +368,33 @@ static int _mtk_mdio_read(struct mtk_eth
+@@ -347,12 +369,33 @@ static int _mtk_mdio_read(struct mtk_eth
if (ret < 0)
return ret;
ret = mtk_mdio_busy_wait(eth);
if (ret < 0)
-@@ -898,6 +941,7 @@ static int mtk_mdio_init(struct mtk_eth
+@@ -899,6 +942,7 @@ static int mtk_mdio_init(struct mtk_eth
eth->mii_bus->name = "mdio";
eth->mii_bus->read = mtk_mdio_read;
eth->mii_bus->write = mtk_mdio_write;
--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
-@@ -215,8 +215,8 @@ int mtk_wed_mcu_msg_update(struct mtk_we
+@@ -234,8 +234,8 @@ int mtk_wed_mcu_msg_update(struct mtk_we
}
static int
--mtk_wed_get_memory_region(struct mtk_wed_wo *wo,
+-mtk_wed_get_memory_region(struct mtk_wed_hw *hw, int index,
- struct mtk_wed_wo_memory_region *region)
-+mtk_wed_get_reserved_memory_region(struct mtk_wed_wo *wo,
++mtk_wed_get_reserved_memory_region(struct mtk_wed_hw *hw, int index,
+ struct mtk_wed_wo_memory_region *region)
{
struct reserved_mem *rmem;
struct device_node *np;
-@@ -311,13 +311,13 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
+@@ -321,7 +321,7 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
+ if (index < 0)
+ continue;
- /* load firmware region metadata */
- for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
-- ret = mtk_wed_get_memory_region(wo, &mem_region[i]);
-+ ret = mtk_wed_get_reserved_memory_region(wo, &mem_region[i]);
+- ret = mtk_wed_get_memory_region(wo->hw, index, &mem_region[i]);
++ ret = mtk_wed_get_reserved_memory_region(wo->hw, index, &mem_region[i]);
if (ret)
return ret;
}
-
- wo->boot.name = "wo-boot";
-- ret = mtk_wed_get_memory_region(wo, &wo->boot);
-+ ret = mtk_wed_get_reserved_memory_region(wo, &wo->boot);
- if (ret)
- return ret;
-
--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
-@@ -18,12 +18,23 @@
+@@ -34,12 +34,23 @@ static struct mtk_wed_wo_memory_region m
static u32 wo_r32(struct mtk_wed_wo *wo, u32 reg)
{
-- return readl(wo->boot.addr + reg);
+- return readl(mem_region[MTK_WED_WO_REGION_BOOT].addr + reg);
+ u32 val;
+
+ if (!wo->boot_regmap)
-+ return readl(wo->boot.addr + reg);
++ return readl(mem_region[MTK_WED_WO_REGION_BOOT].addr + reg);
+
+ if (regmap_read(wo->boot_regmap, reg, &val))
+ val = ~0;
static void wo_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
{
-- writel(val, wo->boot.addr + reg);
+- writel(val, mem_region[MTK_WED_WO_REGION_BOOT].addr + reg);
+ if (wo->boot_regmap)
+ regmap_write(wo->boot_regmap, reg, val);
+ else
-+ writel(val, wo->boot.addr + reg);
++ writel(val, mem_region[MTK_WED_WO_REGION_BOOT].addr + reg);
}
static struct sk_buff *
-@@ -316,10 +327,21 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
- return ret;
- }
+@@ -313,6 +324,9 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
+ u32 val, boot_cr;
+ int ret, i;
-- wo->boot.name = "wo-boot";
-- ret = mtk_wed_get_reserved_memory_region(wo, &wo->boot);
-- if (ret)
-- return ret;
+ wo->boot_regmap = syscon_regmap_lookup_by_phandle(wo->hw->node,
+ "mediatek,wo-cpuboot");
-+ if (IS_ERR(wo->boot_regmap)) {
-+ if (wo->boot_regmap != ERR_PTR(-ENODEV))
-+ return PTR_ERR(wo->boot_regmap);
+
-+ /* For backward compatibility, we need to check if cpu_boot
-+ * is defined through reserved memory property.
-+ */
-+ wo->boot_regmap = NULL;
-+ wo->boot.name = "wo-boot";
-+ ret = mtk_wed_get_reserved_memory_region(wo, &wo->boot);
-+ if (ret)
-+ return ret;
-+ }
+ /* load firmware region metadata */
+ for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
+ int index = of_property_match_string(wo->hw->node,
+@@ -321,6 +335,9 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
+ if (index < 0)
+ continue;
- /* set dummy cr */
- wed_w32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_FWDL,
++ if (index == MTK_WED_WO_REGION_BOOT && !IS_ERR(wo->boot_regmap))
++ continue;
++
+ ret = mtk_wed_get_reserved_memory_region(wo->hw, index, &mem_region[i]);
+ if (ret)
+ return ret;
--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
-@@ -228,7 +228,8 @@ struct mtk_wed_wo_queue {
-
+@@ -231,6 +231,7 @@ struct mtk_wed_wo_queue {
struct mtk_wed_wo {
struct mtk_wed_hw *hw;
-- struct mtk_wed_wo_memory_region boot;
-+ struct mtk_wed_wo_memory_region boot; /* backward compatibility */
-+ struct regmap *boot_regmap;
++ struct regmap *boot_regmap;
struct mtk_wed_wo_queue q_tx;
struct mtk_wed_wo_queue q_rx;
+
--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
-@@ -300,6 +300,52 @@ next:
+@@ -316,6 +316,39 @@ next:
}
static int
-+mtk_wed_mcu_load_memory_regions(struct mtk_wed_wo *wo,
-+ struct mtk_wed_wo_memory_region *region)
++mtk_wed_mcu_load_ilm(struct mtk_wed_wo *wo)
+{
++ struct mtk_wed_wo_memory_region *ilm_region;
++ struct resource res;
+ struct device_node *np;
+ int ret;
+
-+ /* firmware EMI memory region */
-+ ret = mtk_wed_get_reserved_memory_region(wo,
-+ ®ion[MTK_WED_WO_REGION_EMI]);
-+ if (ret)
-+ return ret;
-+
-+ /* firmware DATA memory region */
-+ ret = mtk_wed_get_reserved_memory_region(wo,
-+ ®ion[MTK_WED_WO_REGION_DATA]);
-+ if (ret)
-+ return ret;
-+
+ np = of_parse_phandle(wo->hw->node, "mediatek,wo-ilm", 0);
-+ if (np) {
-+ struct mtk_wed_wo_memory_region *ilm_region;
-+ struct resource res;
++ if (!np)
++ return 0;
++
++ ret = of_address_to_resource(np, 0, &res);
++ of_node_put(np);
+
-+ ret = of_address_to_resource(np, 0, &res);
-+ of_node_put(np);
++ if (ret < 0)
++ return ret;
+
-+ if (ret < 0)
-+ return ret;
++ ilm_region = &mem_region[MTK_WED_WO_REGION_ILM];
++ ilm_region->phy_addr = res.start;
++ ilm_region->size = resource_size(&res);
++ ilm_region->addr = devm_ioremap(wo->hw->dev, res.start,
++ resource_size(&res));
+
-+ ilm_region = ®ion[MTK_WED_WO_REGION_ILM];
-+ ilm_region->phy_addr = res.start;
-+ ilm_region->size = resource_size(&res);
-+ ilm_region->addr = devm_ioremap(wo->hw->dev, res.start,
-+ resource_size(&res));
++ if (!IS_ERR(ilm_region->addr))
++ return 0;
+
-+ return IS_ERR(ilm_region->addr) ? PTR_ERR(ilm_region->addr) : 0;
-+ }
++ ret = PTR_ERR(ilm_region->addr);
++ ilm_region->addr = NULL;
+
-+ /* For backward compatibility, we need to check if ILM
-+ * node is defined through reserved memory property.
-+ */
-+ return mtk_wed_get_reserved_memory_region(wo,
-+ ®ion[MTK_WED_WO_REGION_ILM]);
++ return ret;
+}
+
+static int
mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo)
{
- static struct mtk_wed_wo_memory_region mem_region[] = {
-@@ -320,12 +366,9 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
+ const struct mtk_wed_fw_trailer *trailer;
+@@ -324,14 +357,20 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
u32 val, boot_cr;
int ret, i;
-- /* load firmware region metadata */
-- for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
-- ret = mtk_wed_get_reserved_memory_region(wo, &mem_region[i]);
-- if (ret)
-- return ret;
-- }
-+ ret = mtk_wed_mcu_load_memory_regions(wo, mem_region);
-+ if (ret)
-+ return ret;
-
++ mtk_wed_mcu_load_ilm(wo);
wo->boot_regmap = syscon_regmap_lookup_by_phandle(wo->hw->node,
"mediatek,wo-cpuboot");
+
+ /* load firmware region metadata */
+ for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
+- int index = of_property_match_string(wo->hw->node,
+- "memory-region-names",
+- mem_region[i].name);
++ int index;
++
++ if (mem_region[i].addr)
++ continue;
++
++ index = of_property_match_string(wo->hw->node,
++ "memory-region-names",
++ mem_region[i].name);
+ if (index < 0)
+ continue;
+
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
-@@ -821,6 +821,24 @@ mtk_wed_rro_alloc(struct mtk_wed_device
+@@ -1320,6 +1320,24 @@ mtk_wed_rro_alloc(struct mtk_wed_device
struct device_node *np;
int index;
index = of_property_match_string(dev->hw->node, "memory-region-names",
"wo-dlm");
if (index < 0)
-@@ -837,6 +855,7 @@ mtk_wed_rro_alloc(struct mtk_wed_device
+@@ -1336,6 +1354,7 @@ mtk_wed_rro_alloc(struct mtk_wed_device
return -ENODEV;
dev->rro.miod_phys = rmem->base;
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -1238,7 +1238,7 @@ static int mtk_init_fq_dma(struct mtk_et
+@@ -1239,7 +1239,7 @@ static int mtk_init_fq_dma(struct mtk_et
eth->scratch_ring = eth->sram_base;
else
eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
ð->phy_scratch_ring,
GFP_KERNEL);
if (unlikely(!eth->scratch_ring))
-@@ -1254,16 +1254,16 @@ static int mtk_init_fq_dma(struct mtk_et
+@@ -1255,16 +1255,16 @@ static int mtk_init_fq_dma(struct mtk_et
if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
return -ENOMEM;
txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
txd->txd4 = 0;
-@@ -1510,7 +1510,7 @@ static int mtk_tx_map(struct sk_buff *sk
+@@ -1511,7 +1511,7 @@ static int mtk_tx_map(struct sk_buff *sk
if (itxd == ring->last_free)
return -ENOMEM;
memset(itx_buf, 0, sizeof(*itx_buf));
txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
-@@ -1551,7 +1551,7 @@ static int mtk_tx_map(struct sk_buff *sk
+@@ -1552,7 +1552,7 @@ static int mtk_tx_map(struct sk_buff *sk
memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
txd_info.size = min_t(unsigned int, frag_size,
txd_info.qid = queue;
txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
!(frag_size - txd_info.size);
-@@ -1564,7 +1564,7 @@ static int mtk_tx_map(struct sk_buff *sk
+@@ -1565,7 +1565,7 @@ static int mtk_tx_map(struct sk_buff *sk
mtk_tx_set_dma_desc(dev, txd, &txd_info);
tx_buf = mtk_desc_to_tx_buf(ring, txd,
if (new_desc)
memset(tx_buf, 0, sizeof(*tx_buf));
tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
-@@ -1607,7 +1607,7 @@ static int mtk_tx_map(struct sk_buff *sk
+@@ -1608,7 +1608,7 @@ static int mtk_tx_map(struct sk_buff *sk
} else {
int next_idx;
ring->dma_size);
mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
}
-@@ -1616,7 +1616,7 @@ static int mtk_tx_map(struct sk_buff *sk
+@@ -1617,7 +1617,7 @@ static int mtk_tx_map(struct sk_buff *sk
err_dma:
do {
/* unmap dma */
mtk_tx_unmap(eth, tx_buf, false);
-@@ -1641,7 +1641,7 @@ static int mtk_cal_txd_req(struct mtk_et
+@@ -1642,7 +1642,7 @@ static int mtk_cal_txd_req(struct mtk_et
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i];
nfrags += DIV_ROUND_UP(skb_frag_size(frag),
}
} else {
nfrags += skb_shinfo(skb)->nr_frags;
-@@ -1782,7 +1782,7 @@ static struct mtk_rx_ring *mtk_get_rx_ri
+@@ -1783,7 +1783,7 @@ static struct mtk_rx_ring *mtk_get_rx_ri
ring = ð->rx_ring[i];
idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
if (rxd->rxd2 & RX_DMA_DONE) {
ring->calc_idx_update = true;
return ring;
-@@ -1950,7 +1950,7 @@ static int mtk_xdp_submit_frame(struct m
+@@ -1951,7 +1951,7 @@ static int mtk_xdp_submit_frame(struct m
}
htxd = txd;
memset(tx_buf, 0, sizeof(*tx_buf));
htx_buf = tx_buf;
-@@ -1970,7 +1970,7 @@ static int mtk_xdp_submit_frame(struct m
+@@ -1971,7 +1971,7 @@ static int mtk_xdp_submit_frame(struct m
goto unmap;
tx_buf = mtk_desc_to_tx_buf(ring, txd,
memset(tx_buf, 0, sizeof(*tx_buf));
n_desc++;
}
-@@ -2007,7 +2007,7 @@ static int mtk_xdp_submit_frame(struct m
+@@ -2008,7 +2008,7 @@ static int mtk_xdp_submit_frame(struct m
} else {
int idx;
mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
MT7628_TX_CTX_IDX0);
}
-@@ -2019,7 +2019,7 @@ static int mtk_xdp_submit_frame(struct m
+@@ -2020,7 +2020,7 @@ static int mtk_xdp_submit_frame(struct m
unmap:
while (htxd != txd) {
txd_pdma = qdma_to_pdma(ring, htxd);
mtk_tx_unmap(eth, tx_buf, false);
htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
-@@ -2147,7 +2147,7 @@ static int mtk_poll_rx(struct napi_struc
+@@ -2148,7 +2148,7 @@ static int mtk_poll_rx(struct napi_struc
goto rx_done;
idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
data = ring->data[idx];
if (!mtk_rx_get_desc(eth, &trxd, rxd))
-@@ -2282,7 +2282,7 @@ static int mtk_poll_rx(struct napi_struc
+@@ -2283,7 +2283,7 @@ static int mtk_poll_rx(struct napi_struc
rxdcsum = &trxd.rxd4;
}
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
-@@ -2403,7 +2403,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
+@@ -2404,7 +2404,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
break;
tx_buf = mtk_desc_to_tx_buf(ring, desc,
if (!tx_buf->data)
break;
-@@ -2451,7 +2451,7 @@ static int mtk_poll_tx_pdma(struct mtk_e
+@@ -2452,7 +2452,7 @@ static int mtk_poll_tx_pdma(struct mtk_e
}
mtk_tx_unmap(eth, tx_buf, true);
ring->last_free = desc;
atomic_inc(&ring->free_count);
-@@ -2540,7 +2540,7 @@ static int mtk_napi_rx(struct napi_struc
+@@ -2541,7 +2541,7 @@ static int mtk_napi_rx(struct napi_struc
do {
int rx_done;
reg_map->pdma.irq_status);
rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
rx_done_total += rx_done;
-@@ -2556,10 +2556,10 @@ static int mtk_napi_rx(struct napi_struc
+@@ -2557,10 +2557,10 @@ static int mtk_napi_rx(struct napi_struc
return budget;
} while (mtk_r32(eth, reg_map->pdma.irq_status) &
return rx_done_total;
}
-@@ -2568,7 +2568,7 @@ static int mtk_tx_alloc(struct mtk_eth *
+@@ -2569,7 +2569,7 @@ static int mtk_tx_alloc(struct mtk_eth *
{
const struct mtk_soc_data *soc = eth->soc;
struct mtk_tx_ring *ring = ð->tx_ring;
struct mtk_tx_dma_v2 *txd;
int ring_size;
u32 ofs, val;
-@@ -2691,14 +2691,14 @@ static void mtk_tx_clean(struct mtk_eth
+@@ -2692,14 +2692,14 @@ static void mtk_tx_clean(struct mtk_eth
}
if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
dma_free_coherent(eth->dma_dev,
ring->dma_pdma, ring->phys_pdma);
ring->dma_pdma = NULL;
}
-@@ -2753,15 +2753,15 @@ static int mtk_rx_alloc(struct mtk_eth *
+@@ -2754,15 +2754,15 @@ static int mtk_rx_alloc(struct mtk_eth *
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
rx_flag != MTK_RX_FLAGS_NORMAL) {
ring->dma = dma_alloc_coherent(eth->dma_dev,
}
if (!ring->dma)
-@@ -2772,7 +2772,7 @@ static int mtk_rx_alloc(struct mtk_eth *
+@@ -2773,7 +2773,7 @@ static int mtk_rx_alloc(struct mtk_eth *
dma_addr_t dma_addr;
void *data;
if (ring->page_pool) {
data = mtk_page_pool_get_buff(ring->page_pool,
&dma_addr, GFP_KERNEL);
-@@ -2861,7 +2861,7 @@ static void mtk_rx_clean(struct mtk_eth
+@@ -2862,7 +2862,7 @@ static void mtk_rx_clean(struct mtk_eth
if (!ring->data[i])
continue;
if (!rxd->rxd1)
continue;
-@@ -2878,7 +2878,7 @@ static void mtk_rx_clean(struct mtk_eth
+@@ -2879,7 +2879,7 @@ static void mtk_rx_clean(struct mtk_eth
if (!in_sram && ring->dma) {
dma_free_coherent(eth->dma_dev,
ring->dma, ring->phys);
ring->dma = NULL;
}
-@@ -3241,7 +3241,7 @@ static void mtk_dma_free(struct mtk_eth
+@@ -3242,7 +3242,7 @@ static void mtk_dma_free(struct mtk_eth
netdev_reset_queue(eth->netdev[i]);
if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
dma_free_coherent(eth->dma_dev,
eth->scratch_ring, eth->phy_scratch_ring);
eth->scratch_ring = NULL;
eth->phy_scratch_ring = 0;
-@@ -3291,7 +3291,7 @@ static irqreturn_t mtk_handle_irq_rx(int
+@@ -3292,7 +3292,7 @@ static irqreturn_t mtk_handle_irq_rx(int
eth->rx_events++;
if (likely(napi_schedule_prep(ð->rx_napi))) {
__napi_schedule(ð->rx_napi);
}
-@@ -3317,9 +3317,9 @@ static irqreturn_t mtk_handle_irq(int ir
+@@ -3318,9 +3318,9 @@ static irqreturn_t mtk_handle_irq(int ir
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
if (mtk_r32(eth, reg_map->pdma.irq_mask) &
mtk_handle_irq_rx(irq, _eth);
}
if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
-@@ -3337,10 +3337,10 @@ static void mtk_poll_controller(struct n
+@@ -3338,10 +3338,10 @@ static void mtk_poll_controller(struct n
struct mtk_eth *eth = mac->hw;
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
}
#endif
-@@ -3505,7 +3505,7 @@ static int mtk_open(struct net_device *d
+@@ -3506,7 +3506,7 @@ static int mtk_open(struct net_device *d
napi_enable(ð->tx_napi);
napi_enable(ð->rx_napi);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
refcount_set(ð->dma_refcnt, 1);
}
else
-@@ -3588,7 +3588,7 @@ static int mtk_stop(struct net_device *d
+@@ -3589,7 +3589,7 @@ static int mtk_stop(struct net_device *d
mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
napi_disable(ð->tx_napi);
napi_disable(ð->rx_napi);
-@@ -4064,9 +4064,9 @@ static int mtk_hw_init(struct mtk_eth *e
+@@ -4065,9 +4065,9 @@ static int mtk_hw_init(struct mtk_eth *e
/* FE int grouping */
mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
if (mtk_is_netsys_v3_or_greater(eth)) {
-@@ -5166,11 +5166,15 @@ static const struct mtk_soc_data mt2701_
+@@ -5167,11 +5167,15 @@ static const struct mtk_soc_data mt2701_
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
.version = 1,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
-@@ -5186,11 +5190,15 @@ static const struct mtk_soc_data mt7621_
+@@ -5187,11 +5191,15 @@ static const struct mtk_soc_data mt7621_
.offload_version = 1,
.hash_offset = 2,
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
-@@ -5208,11 +5216,15 @@ static const struct mtk_soc_data mt7622_
+@@ -5209,11 +5217,15 @@ static const struct mtk_soc_data mt7622_
.hash_offset = 2,
.has_accounting = true,
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
-@@ -5229,11 +5241,15 @@ static const struct mtk_soc_data mt7623_
+@@ -5230,11 +5242,15 @@ static const struct mtk_soc_data mt7623_
.hash_offset = 2,
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
.disable_pll_modes = true,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
-@@ -5248,11 +5264,15 @@ static const struct mtk_soc_data mt7629_
+@@ -5249,11 +5265,15 @@ static const struct mtk_soc_data mt7629_
.required_pctl = false,
.has_accounting = true,
.version = 1,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
-@@ -5270,11 +5290,15 @@ static const struct mtk_soc_data mt7981_
+@@ -5271,11 +5291,15 @@ static const struct mtk_soc_data mt7981_
.hash_offset = 4,
.has_accounting = true,
.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = 8,
},
-@@ -5292,11 +5316,15 @@ static const struct mtk_soc_data mt7986_
+@@ -5293,11 +5317,15 @@ static const struct mtk_soc_data mt7986_
.hash_offset = 4,
.has_accounting = true,
.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = 8,
},
-@@ -5314,11 +5342,15 @@ static const struct mtk_soc_data mt7988_
+@@ -5315,11 +5343,15 @@ static const struct mtk_soc_data mt7988_
.hash_offset = 4,
.has_accounting = true,
.foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = 8,
},
-@@ -5331,11 +5363,15 @@ static const struct mtk_soc_data rt5350_
+@@ -5332,11 +5364,15 @@ static const struct mtk_soc_data rt5350_
.required_clks = MT7628_CLKS_BITMAP,
.required_pctl = false,
.version = 1,
},
.qdma = {
.qtx_cfg = 0x4400,
-@@ -1206,7 +1206,7 @@ static bool mtk_rx_get_desc(struct mtk_e
+@@ -1207,7 +1207,7 @@ static bool mtk_rx_get_desc(struct mtk_e
rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
}
-@@ -2154,7 +2154,7 @@ static int mtk_poll_rx(struct napi_struc
+@@ -2155,7 +2155,7 @@ static int mtk_poll_rx(struct napi_struc
break;
/* find out which mac the packet come from. values start at 1 */
u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
switch (val) {
-@@ -2266,7 +2266,7 @@ static int mtk_poll_rx(struct napi_struc
+@@ -2267,7 +2267,7 @@ static int mtk_poll_rx(struct napi_struc
skb->dev = netdev;
bytes += skb->len;
reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
if (hash != MTK_RXD5_FOE_ENTRY)
-@@ -2807,7 +2807,7 @@ static int mtk_rx_alloc(struct mtk_eth *
+@@ -2808,7 +2808,7 @@ static int mtk_rx_alloc(struct mtk_eth *
rxd->rxd3 = 0;
rxd->rxd4 = 0;
rxd->rxd5 = 0;
rxd->rxd6 = 0;
rxd->rxd7 = 0;
-@@ -4010,7 +4010,7 @@ static int mtk_hw_init(struct mtk_eth *e
+@@ -4011,7 +4011,7 @@ static int mtk_hw_init(struct mtk_eth *e
else
mtk_hw_reset(eth);
/* Set FE to PDMAv2 if necessary */
val = mtk_r32(eth, MTK_FE_GLO_MISC);
mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
-@@ -5296,11 +5296,11 @@ static const struct mtk_soc_data mt7981_
+@@ -5297,11 +5297,11 @@ static const struct mtk_soc_data mt7981_
.dma_len_offset = 8,
},
.rx = {
},
};
-@@ -5322,11 +5322,11 @@ static const struct mtk_soc_data mt7986_
+@@ -5323,11 +5323,11 @@ static const struct mtk_soc_data mt7986_
.dma_len_offset = 8,
},
.rx = {
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -4556,6 +4556,7 @@ static const struct net_device_ops mtk_n
+@@ -4557,6 +4557,7 @@ static const struct net_device_ops mtk_n
static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
{
const __be32 *_id = of_get_property(np, "reg", NULL);
phy_interface_t phy_mode;
struct phylink *phylink;
-@@ -4727,6 +4728,9 @@ static int mtk_add_mac(struct mtk_eth *e
+@@ -4728,6 +4729,9 @@ static int mtk_add_mac(struct mtk_eth *e
register_netdevice_notifier(&mac->device_notifier);
}