From: Felix Fietkau Date: Tue, 30 Nov 2021 10:55:24 +0000 (+0100) Subject: mediatek: add patches for MT7622 WED (wireless ethernet dispatch) X-Git-Url: http://git.lede-project.org./?a=commitdiff_plain;h=0f029b3d2b505b40aca9a24a002838ed1060f83d;p=openwrt%2Fstaging%2Fnbd.git mediatek: add patches for MT7622 WED (wireless ethernet dispatch) This series also contains other improvement for hardware flow offload support Signed-off-by: Felix Fietkau --- diff --git a/target/linux/generic/hack-5.10/721-net-add-packet-mangeling.patch b/target/linux/generic/hack-5.10/721-net-add-packet-mangeling.patch index 67808c35f8..530322eb06 100644 --- a/target/linux/generic/hack-5.10/721-net-add-packet-mangeling.patch +++ b/target/linux/generic/hack-5.10/721-net-add-packet-mangeling.patch @@ -19,7 +19,7 @@ Signed-off-by: Felix Fietkau --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h -@@ -1618,6 +1618,7 @@ enum netdev_priv_flags { +@@ -1625,6 +1625,7 @@ enum netdev_priv_flags { IFF_FAILOVER_SLAVE = 1<<28, IFF_L3MDEV_RX_HANDLER = 1<<29, IFF_LIVE_RENAME_OK = 1<<30, @@ -27,7 +27,7 @@ Signed-off-by: Felix Fietkau }; #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN -@@ -1650,6 +1651,7 @@ enum netdev_priv_flags { +@@ -1657,6 +1658,7 @@ enum netdev_priv_flags { #define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER #define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK @@ -35,7 +35,7 @@ Signed-off-by: Felix Fietkau /* Specifies the type of the struct net_device::ml_priv pointer */ enum netdev_ml_priv_type { -@@ -1990,6 +1992,11 @@ struct net_device { +@@ -1997,6 +1999,11 @@ struct net_device { const struct tlsdev_ops *tlsdev_ops; #endif @@ -47,7 +47,7 @@ Signed-off-by: Felix Fietkau const struct header_ops *header_ops; unsigned int flags; -@@ -2080,6 +2087,10 @@ struct net_device { +@@ -2087,6 +2094,10 @@ struct net_device { struct mpls_dev __rcu *mpls_ptr; #endif @@ -105,7 +105,7 @@ Signed-off-by: Felix Fietkau help --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -3646,6 +3646,11 @@ static int xmit_one(struct sk_buff *skb, +@@ -3650,6 +3650,11 @@ static int xmit_one(struct sk_buff *skb, if (dev_nit_active(dev)) dev_queue_xmit_nit(skb, dev); diff --git a/target/linux/generic/hack-5.15/721-net-add-packet-mangeling.patch b/target/linux/generic/hack-5.15/721-net-add-packet-mangeling.patch index abc0735e36..6abd1ec55f 100644 --- a/target/linux/generic/hack-5.15/721-net-add-packet-mangeling.patch +++ b/target/linux/generic/hack-5.15/721-net-add-packet-mangeling.patch @@ -19,7 +19,7 @@ Signed-off-by: Felix Fietkau --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h -@@ -1648,6 +1648,10 @@ enum netdev_priv_flags { +@@ -1655,6 +1655,10 @@ enum netdev_priv_flags { IFF_TX_SKB_NO_LINEAR = 1<<31, }; @@ -30,7 +30,7 @@ Signed-off-by: Felix Fietkau #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN #define IFF_EBRIDGE IFF_EBRIDGE #define IFF_BONDING IFF_BONDING -@@ -1680,6 +1684,7 @@ enum netdev_priv_flags { +@@ -1687,6 +1691,7 @@ enum netdev_priv_flags { #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER #define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK #define IFF_TX_SKB_NO_LINEAR IFF_TX_SKB_NO_LINEAR @@ -38,7 +38,7 @@ Signed-off-by: Felix Fietkau /* Specifies the type of the struct net_device::ml_priv pointer */ enum netdev_ml_priv_type { -@@ -1981,6 +1986,7 @@ struct net_device { +@@ -1988,6 +1993,7 @@ struct net_device { /* Read-mostly cache-line for fast-path access */ unsigned int flags; unsigned int priv_flags; @@ -46,7 +46,7 @@ Signed-off-by: Felix Fietkau const struct net_device_ops *netdev_ops; int ifindex; unsigned short gflags; -@@ -2041,6 +2047,11 @@ struct net_device { +@@ -2048,6 +2054,11 @@ struct net_device { const struct tlsdev_ops *tlsdev_ops; #endif @@ -58,7 +58,7 @@ Signed-off-by: Felix Fietkau const struct header_ops *header_ops; unsigned char operstate; -@@ -2115,6 +2126,10 @@ struct net_device { +@@ -2122,6 +2133,10 @@ struct net_device { struct mctp_dev __rcu *mctp_ptr; #endif @@ -116,7 +116,7 @@ Signed-off-by: Felix Fietkau help --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -3578,6 +3578,11 @@ static int xmit_one(struct sk_buff *skb, +@@ -3582,6 +3582,11 @@ static int xmit_one(struct sk_buff *skb, if (dev_nit_active(dev)) dev_queue_xmit_nit(skb, dev); diff --git a/target/linux/generic/pending-5.10/701-00-net-ethernet-mtk_eth_soc-add-support-for-coherent-DM.patch b/target/linux/generic/pending-5.10/701-00-net-ethernet-mtk_eth_soc-add-support-for-coherent-DM.patch new file mode 100644 index 0000000000..245a996c96 --- /dev/null +++ b/target/linux/generic/pending-5.10/701-00-net-ethernet-mtk_eth_soc-add-support-for-coherent-DM.patch @@ -0,0 +1,327 @@ +From: Felix Fietkau +Date: Sat, 5 Feb 2022 17:59:07 +0100 +Subject: [PATCH] net: ethernet: mtk_eth_soc: add support for coherent + DMA + +It improves performance by eliminating the need for a cache flush on rx and tx +In preparation for supporting WED (Wireless Ethernet Dispatch), also add a +function for disabling coherent DMA at runtime. + +Signed-off-by: Felix Fietkau +--- + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -9,6 +9,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -821,7 +822,7 @@ static int mtk_init_fq_dma(struct mtk_et + dma_addr_t dma_addr; + int i; + +- eth->scratch_ring = dma_alloc_coherent(eth->dev, ++ eth->scratch_ring = dma_alloc_coherent(eth->dma_dev, + cnt * sizeof(struct mtk_tx_dma), + ð->phy_scratch_ring, + GFP_ATOMIC); +@@ -833,10 +834,10 @@ static int mtk_init_fq_dma(struct mtk_et + if (unlikely(!eth->scratch_head)) + return -ENOMEM; + +- dma_addr = dma_map_single(eth->dev, ++ dma_addr = dma_map_single(eth->dma_dev, + eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE, + DMA_FROM_DEVICE); +- if (unlikely(dma_mapping_error(eth->dev, dma_addr))) ++ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) + return -ENOMEM; + + phy_ring_tail = eth->phy_scratch_ring + +@@ -890,26 +891,26 @@ static void mtk_tx_unmap(struct mtk_eth + { + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { +- dma_unmap_single(eth->dev, ++ dma_unmap_single(eth->dma_dev, + dma_unmap_addr(tx_buf, dma_addr0), + dma_unmap_len(tx_buf, dma_len0), + DMA_TO_DEVICE); + } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { +- dma_unmap_page(eth->dev, ++ dma_unmap_page(eth->dma_dev, + dma_unmap_addr(tx_buf, dma_addr0), + dma_unmap_len(tx_buf, dma_len0), + DMA_TO_DEVICE); + } + } else { + if (dma_unmap_len(tx_buf, dma_len0)) { +- dma_unmap_page(eth->dev, ++ dma_unmap_page(eth->dma_dev, + dma_unmap_addr(tx_buf, dma_addr0), + dma_unmap_len(tx_buf, dma_len0), + DMA_TO_DEVICE); + } + + if (dma_unmap_len(tx_buf, dma_len1)) { +- dma_unmap_page(eth->dev, ++ dma_unmap_page(eth->dma_dev, + dma_unmap_addr(tx_buf, dma_addr1), + dma_unmap_len(tx_buf, dma_len1), + DMA_TO_DEVICE); +@@ -987,9 +988,9 @@ static int mtk_tx_map(struct sk_buff *sk + if (skb_vlan_tag_present(skb)) + txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb); + +- mapped_addr = dma_map_single(eth->dev, skb->data, ++ mapped_addr = dma_map_single(eth->dma_dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); +- if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) ++ if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr))) + return -ENOMEM; + + WRITE_ONCE(itxd->txd1, mapped_addr); +@@ -1028,10 +1029,10 @@ static int mtk_tx_map(struct sk_buff *sk + + + frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); +- mapped_addr = skb_frag_dma_map(eth->dev, frag, offset, ++ mapped_addr = skb_frag_dma_map(eth->dma_dev, frag, offset, + frag_map_size, + DMA_TO_DEVICE); +- if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) ++ if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr))) + goto err_dma; + + if (i == nr_frags - 1 && +@@ -1309,18 +1310,18 @@ static int mtk_poll_rx(struct napi_struc + netdev->stats.rx_dropped++; + goto release_desc; + } +- dma_addr = dma_map_single(eth->dev, ++ dma_addr = dma_map_single(eth->dma_dev, + new_data + NET_SKB_PAD + + eth->ip_align, + ring->buf_size, + DMA_FROM_DEVICE); +- if (unlikely(dma_mapping_error(eth->dev, dma_addr))) { ++ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) { + skb_free_frag(new_data); + netdev->stats.rx_dropped++; + goto release_desc; + } + +- dma_unmap_single(eth->dev, trxd.rxd1, ++ dma_unmap_single(eth->dma_dev, trxd.rxd1, + ring->buf_size, DMA_FROM_DEVICE); + + /* receive data */ +@@ -1593,7 +1594,7 @@ static int mtk_tx_alloc(struct mtk_eth * + if (!ring->buf) + goto no_tx_mem; + +- ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz, ++ ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz, + &ring->phys, GFP_ATOMIC); + if (!ring->dma) + goto no_tx_mem; +@@ -1611,7 +1612,7 @@ static int mtk_tx_alloc(struct mtk_eth * + * descriptors in ring->dma_pdma. + */ + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { +- ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz, ++ ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz, + &ring->phys_pdma, + GFP_ATOMIC); + if (!ring->dma_pdma) +@@ -1670,7 +1671,7 @@ static void mtk_tx_clean(struct mtk_eth + } + + if (ring->dma) { +- dma_free_coherent(eth->dev, ++ dma_free_coherent(eth->dma_dev, + MTK_DMA_SIZE * sizeof(*ring->dma), + ring->dma, + ring->phys); +@@ -1678,7 +1679,7 @@ static void mtk_tx_clean(struct mtk_eth + } + + if (ring->dma_pdma) { +- dma_free_coherent(eth->dev, ++ dma_free_coherent(eth->dma_dev, + MTK_DMA_SIZE * sizeof(*ring->dma_pdma), + ring->dma_pdma, + ring->phys_pdma); +@@ -1723,18 +1724,18 @@ static int mtk_rx_alloc(struct mtk_eth * + return -ENOMEM; + } + +- ring->dma = dma_alloc_coherent(eth->dev, ++ ring->dma = dma_alloc_coherent(eth->dma_dev, + rx_dma_size * sizeof(*ring->dma), + &ring->phys, GFP_ATOMIC); + if (!ring->dma) + return -ENOMEM; + + for (i = 0; i < rx_dma_size; i++) { +- dma_addr_t dma_addr = dma_map_single(eth->dev, ++ dma_addr_t dma_addr = dma_map_single(eth->dma_dev, + ring->data[i] + NET_SKB_PAD + eth->ip_align, + ring->buf_size, + DMA_FROM_DEVICE); +- if (unlikely(dma_mapping_error(eth->dev, dma_addr))) ++ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) + return -ENOMEM; + ring->dma[i].rxd1 = (unsigned int)dma_addr; + +@@ -1770,7 +1771,7 @@ static void mtk_rx_clean(struct mtk_eth + continue; + if (!ring->dma[i].rxd1) + continue; +- dma_unmap_single(eth->dev, ++ dma_unmap_single(eth->dma_dev, + ring->dma[i].rxd1, + ring->buf_size, + DMA_FROM_DEVICE); +@@ -1781,7 +1782,7 @@ static void mtk_rx_clean(struct mtk_eth + } + + if (ring->dma) { +- dma_free_coherent(eth->dev, ++ dma_free_coherent(eth->dma_dev, + ring->dma_size * sizeof(*ring->dma), + ring->dma, + ring->phys); +@@ -2134,7 +2135,7 @@ static void mtk_dma_free(struct mtk_eth + if (eth->netdev[i]) + netdev_reset_queue(eth->netdev[i]); + if (eth->scratch_ring) { +- dma_free_coherent(eth->dev, ++ dma_free_coherent(eth->dma_dev, + MTK_DMA_SIZE * sizeof(struct mtk_tx_dma), + eth->scratch_ring, + eth->phy_scratch_ring); +@@ -2482,6 +2483,8 @@ static void mtk_dim_tx(struct work_struc + + static int mtk_hw_init(struct mtk_eth *eth) + { ++ u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA | ++ ETHSYS_DMA_AG_MAP_PPE; + int i, val, ret; + + if (test_and_set_bit(MTK_HW_INIT, ð->state)) +@@ -2494,6 +2497,10 @@ static int mtk_hw_init(struct mtk_eth *e + if (ret) + goto err_disable_pm; + ++ if (eth->ethsys) ++ regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask, ++ of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask); ++ + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { + ret = device_reset(eth->dev); + if (ret) { +@@ -3043,6 +3050,35 @@ free_netdev: + return err; + } + ++void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev) ++{ ++ struct net_device *dev, *tmp; ++ LIST_HEAD(dev_list); ++ int i; ++ ++ rtnl_lock(); ++ ++ for (i = 0; i < MTK_MAC_COUNT; i++) { ++ dev = eth->netdev[i]; ++ ++ if (!dev || !(dev->flags & IFF_UP)) ++ continue; ++ ++ list_add_tail(&dev->close_list, &dev_list); ++ } ++ ++ dev_close_many(&dev_list, false); ++ ++ eth->dma_dev = dma_dev; ++ ++ list_for_each_entry_safe(dev, tmp, &dev_list, close_list) { ++ list_del_init(&dev->close_list); ++ dev_open(dev, NULL); ++ } ++ ++ rtnl_unlock(); ++} ++ + static int mtk_probe(struct platform_device *pdev) + { + struct device_node *mac_np; +@@ -3056,6 +3092,7 @@ static int mtk_probe(struct platform_dev + eth->soc = of_device_get_match_data(&pdev->dev); + + eth->dev = &pdev->dev; ++ eth->dma_dev = &pdev->dev; + eth->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(eth->base)) + return PTR_ERR(eth->base); +@@ -3104,6 +3141,16 @@ static int mtk_probe(struct platform_dev + } + } + ++ if (of_dma_is_coherent(pdev->dev.of_node)) { ++ struct regmap *cci; ++ ++ cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, ++ "mediatek,cci-control"); ++ /* enable CPU/bus coherency */ ++ if (!IS_ERR(cci)) ++ regmap_write(cci, 0, 3); ++ } ++ + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) { + eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii), + GFP_KERNEL); +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h +@@ -456,6 +456,12 @@ + #define RSTCTRL_FE BIT(6) + #define RSTCTRL_PPE BIT(31) + ++/* ethernet dma channel agent map */ ++#define ETHSYS_DMA_AG_MAP 0x408 ++#define ETHSYS_DMA_AG_MAP_PDMA BIT(0) ++#define ETHSYS_DMA_AG_MAP_QDMA BIT(1) ++#define ETHSYS_DMA_AG_MAP_PPE BIT(2) ++ + /* SGMII subsystem config registers */ + /* Register to auto-negotiation restart */ + #define SGMSYS_PCS_CONTROL_1 0x0 +@@ -873,6 +879,7 @@ struct mtk_sgmii { + /* struct mtk_eth - This is the main datasructure for holding the state + * of the driver + * @dev: The device pointer ++ * @dev: The device pointer used for dma mapping/alloc + * @base: The mapped register i/o base + * @page_lock: Make sure that register operations are atomic + * @tx_irq__lock: Make sure that IRQ register operations are atomic +@@ -916,6 +923,7 @@ struct mtk_sgmii { + + struct mtk_eth { + struct device *dev; ++ struct device *dma_dev; + void __iomem *base; + spinlock_t page_lock; + spinlock_t tx_irq_lock; +@@ -1014,6 +1022,7 @@ int mtk_gmac_rgmii_path_setup(struct mtk + int mtk_eth_offload_init(struct mtk_eth *eth); + int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data); ++void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev); + + + #endif /* MTK_ETH_H */ diff --git a/target/linux/generic/pending-5.10/701-01-arm64-dts-mediatek-mt7622-add-support-for-coherent-D.patch b/target/linux/generic/pending-5.10/701-01-arm64-dts-mediatek-mt7622-add-support-for-coherent-D.patch new file mode 100644 index 0000000000..892e01aece --- /dev/null +++ b/target/linux/generic/pending-5.10/701-01-arm64-dts-mediatek-mt7622-add-support-for-coherent-D.patch @@ -0,0 +1,30 @@ +From: Felix Fietkau +Date: Mon, 7 Feb 2022 10:27:22 +0100 +Subject: [PATCH] arm64: dts: mediatek: mt7622: add support for coherent + DMA + +It improves performance by eliminating the need for a cache flush on rx and tx + +Signed-off-by: Felix Fietkau +--- + +--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi ++++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi +@@ -357,7 +357,7 @@ + }; + + cci_control2: slave-if@5000 { +- compatible = "arm,cci-400-ctrl-if"; ++ compatible = "arm,cci-400-ctrl-if", "syscon"; + interface-type = "ace"; + reg = <0x5000 0x1000>; + }; +@@ -936,6 +936,8 @@ + power-domains = <&scpsys MT7622_POWER_DOMAIN_ETHSYS>; + mediatek,ethsys = <ðsys>; + mediatek,sgmiisys = <&sgmiisys>; ++ mediatek,cci-control = <&cci_control2>; ++ dma-coherent; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; diff --git a/target/linux/generic/pending-5.10/701-02-net-ethernet-mtk_eth_soc-add-support-for-Wireless-Et.patch b/target/linux/generic/pending-5.10/701-02-net-ethernet-mtk_eth_soc-add-support-for-Wireless-Et.patch new file mode 100644 index 0000000000..874f9a8a04 --- /dev/null +++ b/target/linux/generic/pending-5.10/701-02-net-ethernet-mtk_eth_soc-add-support-for-Wireless-Et.patch @@ -0,0 +1,1679 @@ +From: Felix Fietkau +Date: Sat, 5 Feb 2022 17:56:08 +0100 +Subject: [PATCH] net: ethernet: mtk_eth_soc: add support for Wireless + Ethernet Dispatch (WED) + +The Wireless Ethernet Dispatch subsystem on the MT7622 SoC can be +configured to intercept and handle access to the DMA queues and +PCIe interrupts for a MT7615/MT7915 wireless card. +It can manage the internal WDMA (Wireless DMA) controller, which allows +ethernet packets to be passed from the packet switch engine (PSE) to the +wireless card, bypassing the CPU entirely. +This can be used to implement hardware flow offloading from ethernet to +WLAN. + +Signed-off-by: Felix Fietkau +--- + create mode 100644 drivers/net/ethernet/mediatek/mtk_wed.c + create mode 100644 drivers/net/ethernet/mediatek/mtk_wed.h + create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_debugfs.c + create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_ops.c + create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_regs.h + create mode 100644 include/linux/soc/mediatek/mtk_wed.h + +--- a/drivers/net/ethernet/mediatek/Kconfig ++++ b/drivers/net/ethernet/mediatek/Kconfig +@@ -7,6 +7,10 @@ config NET_VENDOR_MEDIATEK + + if NET_VENDOR_MEDIATEK + ++config NET_MEDIATEK_SOC_WED ++ depends on ARCH_MEDIATEK || COMPILE_TEST ++ def_bool NET_MEDIATEK_SOC != n ++ + config NET_MEDIATEK_SOC + tristate "MediaTek SoC Gigabit Ethernet support" + select PHYLINK +--- a/drivers/net/ethernet/mediatek/Makefile ++++ b/drivers/net/ethernet/mediatek/Makefile +@@ -5,4 +5,9 @@ + + obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o + mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o ++mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o ++ifdef CONFIG_DEBUG_FS ++mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o ++endif ++obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o + obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -24,6 +24,7 @@ + #include + + #include "mtk_eth_soc.h" ++#include "mtk_wed.h" + + static int mtk_msg_level = -1; + module_param_named(msg_level, mtk_msg_level, int, 0); +@@ -3173,6 +3174,22 @@ static int mtk_probe(struct platform_dev + } + } + ++ for (i = 0;; i++) { ++ struct device_node *np = of_parse_phandle(pdev->dev.of_node, ++ "mediatek,wed", i); ++ static const u32 wdma_regs[] = { ++ MTK_WDMA0_BASE, ++ MTK_WDMA1_BASE ++ }; ++ void __iomem *wdma; ++ ++ if (!np || i >= ARRAY_SIZE(wdma_regs)) ++ break; ++ ++ wdma = eth->base + wdma_regs[i]; ++ mtk_wed_add_hw(np, eth, wdma, i); ++ } ++ + for (i = 0; i < 3; i++) { + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0) + eth->irq[i] = eth->irq[0]; +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h +@@ -295,6 +295,9 @@ + #define MTK_GDM1_TX_GPCNT 0x2438 + #define MTK_STAT_OFFSET 0x40 + ++#define MTK_WDMA0_BASE 0x2800 ++#define MTK_WDMA1_BASE 0x2c00 ++ + /* QDMA descriptor txd4 */ + #define TX_DMA_CHKSUM (0x7 << 29) + #define TX_DMA_TSO BIT(28) +--- /dev/null ++++ b/drivers/net/ethernet/mediatek/mtk_wed.c +@@ -0,0 +1,875 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (C) 2021 Felix Fietkau */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "mtk_eth_soc.h" ++#include "mtk_wed_regs.h" ++#include "mtk_wed.h" ++#include "mtk_ppe.h" ++ ++#define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000) ++ ++#define MTK_WED_PKT_SIZE 1900 ++#define MTK_WED_BUF_SIZE 2048 ++#define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048) ++ ++#define MTK_WED_TX_RING_SIZE 2048 ++#define MTK_WED_WDMA_RING_SIZE 1024 ++ ++static struct mtk_wed_hw *hw_list[2]; ++static DEFINE_MUTEX(hw_lock); ++ ++static void ++wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) ++{ ++ regmap_update_bits(dev->hw->regs, reg, mask | val, val); ++} ++ ++static void ++wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask) ++{ ++ return wed_m32(dev, reg, 0, mask); ++} ++ ++static void ++wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask) ++{ ++ return wed_m32(dev, reg, mask, 0); ++} ++ ++static void ++wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) ++{ ++ wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val); ++} ++ ++static void ++wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask) ++{ ++ wdma_m32(dev, reg, 0, mask); ++} ++ ++static u32 ++mtk_wed_read_reset(struct mtk_wed_device *dev) ++{ ++ return wed_r32(dev, MTK_WED_RESET); ++} ++ ++static void ++mtk_wed_reset(struct mtk_wed_device *dev, u32 mask) ++{ ++ u32 status; ++ ++ wed_w32(dev, MTK_WED_RESET, mask); ++ if (readx_poll_timeout(mtk_wed_read_reset, dev, status, ++ !(status & mask), 0, 1000)) ++ WARN_ON_ONCE(1); ++} ++ ++static struct mtk_wed_hw * ++mtk_wed_assign(struct mtk_wed_device *dev) ++{ ++ struct mtk_wed_hw *hw; ++ ++ hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)]; ++ if (!hw || hw->wed_dev) ++ return NULL; ++ ++ hw->wed_dev = dev; ++ return hw; ++} ++ ++static int ++mtk_wed_buffer_alloc(struct mtk_wed_device *dev) ++{ ++ struct mtk_wdma_desc *desc; ++ dma_addr_t desc_phys; ++ void **page_list; ++ int token = dev->wlan.token_start; ++ int ring_size; ++ int n_pages; ++ int i, page_idx; ++ ++ ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1); ++ n_pages = ring_size / MTK_WED_BUF_PER_PAGE; ++ ++ page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL); ++ if (!page_list) ++ return -ENOMEM; ++ ++ dev->buf_ring.size = ring_size; ++ dev->buf_ring.pages = page_list; ++ ++ desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc), ++ &desc_phys, GFP_KERNEL); ++ if (!desc) ++ return -ENOMEM; ++ ++ dev->buf_ring.desc = desc; ++ dev->buf_ring.desc_phys = desc_phys; ++ ++ for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) { ++ dma_addr_t page_phys, buf_phys; ++ struct page *page; ++ void *buf; ++ int s; ++ ++ page = __dev_alloc_pages(GFP_KERNEL, 0); ++ if (!page) ++ return -ENOMEM; ++ ++ page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE, ++ DMA_BIDIRECTIONAL); ++ if (dma_mapping_error(dev->hw->dev, page_phys)) { ++ __free_page(page); ++ return -ENOMEM; ++ } ++ ++ page_list[page_idx++] = page; ++ dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE, ++ DMA_BIDIRECTIONAL); ++ ++ buf = page_to_virt(page); ++ buf_phys = page_phys; ++ ++ for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) { ++ u32 txd_size; ++ ++ txd_size = dev->wlan.init_buf(buf, buf_phys, token++); ++ ++ desc->buf0 = buf_phys; ++ desc->buf1 = buf_phys + txd_size; ++ desc->ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, ++ txd_size) | ++ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1, ++ MTK_WED_BUF_SIZE - txd_size) | ++ MTK_WDMA_DESC_CTRL_LAST_SEG1; ++ desc->info = 0; ++ desc++; ++ ++ buf += MTK_WED_BUF_SIZE; ++ buf_phys += MTK_WED_BUF_SIZE; ++ } ++ ++ dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE, ++ DMA_BIDIRECTIONAL); ++ } ++ ++ return 0; ++} ++ ++static void ++mtk_wed_free_buffer(struct mtk_wed_device *dev) ++{ ++ struct mtk_wdma_desc *desc = dev->buf_ring.desc; ++ void **page_list = dev->buf_ring.pages; ++ int page_idx; ++ int i; ++ ++ if (!page_list) ++ return; ++ ++ if (!desc) ++ goto free_pagelist; ++ ++ for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) { ++ void *page = page_list[page_idx++]; ++ ++ if (!page) ++ break; ++ ++ dma_unmap_page(dev->hw->dev, desc[i].buf0, ++ PAGE_SIZE, DMA_BIDIRECTIONAL); ++ __free_page(page); ++ } ++ ++ dma_free_coherent(dev->hw->dev, dev->buf_ring.size * sizeof(*desc), ++ desc, dev->buf_ring.desc_phys); ++ ++free_pagelist: ++ kfree(page_list); ++} ++ ++static void ++mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring) ++{ ++ if (!ring->desc) ++ return; ++ ++ dma_free_coherent(dev->hw->dev, ring->size * sizeof(*ring->desc), ++ ring->desc, ring->desc_phys); ++} ++ ++static void ++mtk_wed_free_tx_rings(struct mtk_wed_device *dev) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) ++ mtk_wed_free_ring(dev, &dev->tx_ring[i]); ++ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) ++ mtk_wed_free_ring(dev, &dev->tx_wdma[i]); ++} ++ ++static void ++mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en) ++{ ++ u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; ++ ++ if (!dev->hw->num_flows) ++ mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; ++ ++ wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0); ++ wed_r32(dev, MTK_WED_EXT_INT_MASK); ++} ++ ++static void ++mtk_wed_stop(struct mtk_wed_device *dev) ++{ ++ regmap_write(dev->hw->mirror, dev->hw->index * 4, 0); ++ mtk_wed_set_ext_int(dev, false); ++ ++ wed_clr(dev, MTK_WED_CTRL, ++ MTK_WED_CTRL_WDMA_INT_AGENT_EN | ++ MTK_WED_CTRL_WPDMA_INT_AGENT_EN | ++ MTK_WED_CTRL_WED_TX_BM_EN | ++ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); ++ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0); ++ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0); ++ wdma_w32(dev, MTK_WDMA_INT_MASK, 0); ++ wdma_w32(dev, MTK_WDMA_INT_GRP2, 0); ++ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0); ++ ++ wed_clr(dev, MTK_WED_GLO_CFG, ++ MTK_WED_GLO_CFG_TX_DMA_EN | ++ MTK_WED_GLO_CFG_RX_DMA_EN); ++ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, ++ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | ++ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); ++ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, ++ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); ++} ++ ++static void ++mtk_wed_detach(struct mtk_wed_device *dev) ++{ ++ struct device_node *wlan_node = dev->wlan.pci_dev->dev.of_node; ++ struct mtk_wed_hw *hw = dev->hw; ++ ++ mutex_lock(&hw_lock); ++ ++ mtk_wed_stop(dev); ++ ++ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); ++ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); ++ ++ mtk_wed_reset(dev, MTK_WED_RESET_WED); ++ ++ mtk_wed_free_buffer(dev); ++ mtk_wed_free_tx_rings(dev); ++ ++ if (of_dma_is_coherent(wlan_node)) ++ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, ++ BIT(hw->index), BIT(hw->index)); ++ ++ if (!hw_list[!hw->index]->wed_dev && ++ hw->eth->dma_dev != hw->eth->dev) ++ mtk_eth_set_dma_device(hw->eth, hw->eth->dev); ++ ++ memset(dev, 0, sizeof(*dev)); ++ module_put(THIS_MODULE); ++ ++ hw->wed_dev = NULL; ++ mutex_unlock(&hw_lock); ++} ++ ++static void ++mtk_wed_hw_init_early(struct mtk_wed_device *dev) ++{ ++ u32 mask, set; ++ u32 offset; ++ ++ mtk_wed_stop(dev); ++ mtk_wed_reset(dev, MTK_WED_RESET_WED); ++ ++ mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE | ++ MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE | ++ MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE; ++ set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) | ++ MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP | ++ MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY; ++ wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set); ++ ++ wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO_PRERES); ++ ++ offset = dev->hw->index ? 0x04000400 : 0; ++ wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset); ++ wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset); ++ ++ wed_w32(dev, MTK_WED_PCIE_CFG_BASE, MTK_PCIE_BASE(dev->hw->index)); ++ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys); ++} ++ ++static void ++mtk_wed_hw_init(struct mtk_wed_device *dev) ++{ ++ if (dev->init_done) ++ return; ++ ++ dev->init_done = true; ++ mtk_wed_set_ext_int(dev, false); ++ wed_w32(dev, MTK_WED_TX_BM_CTRL, ++ MTK_WED_TX_BM_CTRL_PAUSE | ++ FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, ++ dev->buf_ring.size / 128) | ++ FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, ++ MTK_WED_TX_RING_SIZE / 256)); ++ ++ wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys); ++ ++ wed_w32(dev, MTK_WED_TX_BM_TKID, ++ FIELD_PREP(MTK_WED_TX_BM_TKID_START, ++ dev->wlan.token_start) | ++ FIELD_PREP(MTK_WED_TX_BM_TKID_END, ++ dev->wlan.token_start + dev->wlan.nbuf - 1)); ++ ++ wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE); ++ ++ wed_w32(dev, MTK_WED_TX_BM_DYN_THR, ++ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) | ++ MTK_WED_TX_BM_DYN_THR_HI); ++ ++ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); ++ ++ wed_set(dev, MTK_WED_CTRL, ++ MTK_WED_CTRL_WED_TX_BM_EN | ++ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); ++ ++ wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE); ++} ++ ++static void ++mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size) ++{ ++ int i; ++ ++ for (i = 0; i < size; i++) { ++ desc[i].buf0 = 0; ++ desc[i].ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); ++ desc[i].buf1 = 0; ++ desc[i].info = 0; ++ } ++} ++ ++static u32 ++mtk_wed_check_busy(struct mtk_wed_device *dev) ++{ ++ if (wed_r32(dev, MTK_WED_GLO_CFG) & MTK_WED_GLO_CFG_TX_DMA_BUSY) ++ return true; ++ ++ if (wed_r32(dev, MTK_WED_WPDMA_GLO_CFG) & ++ MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY) ++ return true; ++ ++ if (wed_r32(dev, MTK_WED_CTRL) & MTK_WED_CTRL_WDMA_INT_AGENT_BUSY) ++ return true; ++ ++ if (wed_r32(dev, MTK_WED_WDMA_GLO_CFG) & ++ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY) ++ return true; ++ ++ if (wdma_r32(dev, MTK_WDMA_GLO_CFG) & ++ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY) ++ return true; ++ ++ if (wed_r32(dev, MTK_WED_CTRL) & ++ (MTK_WED_CTRL_WED_TX_BM_BUSY | MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY)) ++ return true; ++ ++ return false; ++} ++ ++static int ++mtk_wed_poll_busy(struct mtk_wed_device *dev) ++{ ++ int sleep = 15000; ++ int timeout = 100 * sleep; ++ u32 val; ++ ++ return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep, ++ timeout, false, dev); ++} ++ ++static void ++mtk_wed_reset_dma(struct mtk_wed_device *dev) ++{ ++ bool busy = false; ++ u32 val; ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) { ++ struct mtk_wdma_desc *desc = dev->tx_ring[i].desc; ++ ++ if (!desc) ++ continue; ++ ++ mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE); ++ } ++ ++ if (mtk_wed_poll_busy(dev)) ++ busy = mtk_wed_check_busy(dev); ++ ++ if (busy) { ++ mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA); ++ } else { ++ wed_w32(dev, MTK_WED_RESET_IDX, ++ MTK_WED_RESET_IDX_TX | ++ MTK_WED_RESET_IDX_RX); ++ wed_w32(dev, MTK_WED_RESET_IDX, 0); ++ } ++ ++ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); ++ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); ++ ++ if (busy) { ++ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT); ++ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV); ++ } else { ++ wed_w32(dev, MTK_WED_WDMA_RESET_IDX, ++ MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV); ++ wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0); ++ ++ wed_set(dev, MTK_WED_WDMA_GLO_CFG, ++ MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); ++ ++ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, ++ MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); ++ } ++ ++ for (i = 0; i < 100; i++) { ++ val = wed_r32(dev, MTK_WED_TX_BM_INTF); ++ if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40) ++ break; ++ } ++ ++ mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT); ++ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); ++ ++ if (busy) { ++ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); ++ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV); ++ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV); ++ } else { ++ wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, ++ MTK_WED_WPDMA_RESET_IDX_TX | ++ MTK_WED_WPDMA_RESET_IDX_RX); ++ wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0); ++ } ++ ++} ++ ++static int ++mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, ++ int size) ++{ ++ ring->desc = dma_alloc_coherent(dev->hw->dev, ++ size * sizeof(*ring->desc), ++ &ring->desc_phys, GFP_KERNEL); ++ if (!ring->desc) ++ return -ENOMEM; ++ ++ ring->size = size; ++ mtk_wed_ring_reset(ring->desc, size); ++ ++ return 0; ++} ++ ++static int ++mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size) ++{ ++ struct mtk_wed_ring *wdma = &dev->tx_wdma[idx]; ++ ++ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE)) ++ return -ENOMEM; ++ ++ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, ++ wdma->desc_phys); ++ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, ++ size); ++ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); ++ ++ wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, ++ wdma->desc_phys); ++ wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, ++ size); ++ ++ return 0; ++} ++ ++static void ++mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask) ++{ ++ u32 wdma_mask; ++ u32 val; ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) ++ if (!dev->tx_wdma[i].desc) ++ mtk_wed_wdma_ring_setup(dev, i, 16); ++ ++ wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0)); ++ ++ mtk_wed_hw_init(dev); ++ ++ wed_set(dev, MTK_WED_CTRL, ++ MTK_WED_CTRL_WDMA_INT_AGENT_EN | ++ MTK_WED_CTRL_WPDMA_INT_AGENT_EN | ++ MTK_WED_CTRL_WED_TX_BM_EN | ++ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); ++ ++ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, MTK_WED_PCIE_INT_TRIGGER_STATUS); ++ ++ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, ++ MTK_WED_WPDMA_INT_TRIGGER_RX_DONE | ++ MTK_WED_WPDMA_INT_TRIGGER_TX_DONE); ++ ++ wed_set(dev, MTK_WED_WPDMA_INT_CTRL, ++ MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV); ++ ++ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask); ++ wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask); ++ ++ wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask); ++ wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask); ++ ++ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask); ++ wed_w32(dev, MTK_WED_INT_MASK, irq_mask); ++ ++ wed_set(dev, MTK_WED_GLO_CFG, ++ MTK_WED_GLO_CFG_TX_DMA_EN | ++ MTK_WED_GLO_CFG_RX_DMA_EN); ++ wed_set(dev, MTK_WED_WPDMA_GLO_CFG, ++ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | ++ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); ++ wed_set(dev, MTK_WED_WDMA_GLO_CFG, ++ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); ++ ++ mtk_wed_set_ext_int(dev, true); ++ val = dev->wlan.wpdma_phys | ++ MTK_PCIE_MIRROR_MAP_EN | ++ FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, dev->hw->index); ++ ++ if (dev->hw->index) ++ val |= BIT(1); ++ val |= BIT(0); ++ regmap_write(dev->hw->mirror, dev->hw->index * 4, val); ++ ++ dev->running = true; ++} ++ ++static int ++mtk_wed_attach(struct mtk_wed_device *dev) ++ __releases(RCU) ++{ ++ struct mtk_wed_hw *hw; ++ int ret = 0; ++ ++ RCU_LOCKDEP_WARN(!rcu_read_lock_held(), ++ "mtk_wed_attach without holding the RCU read lock"); ++ ++ if (pci_domain_nr(dev->wlan.pci_dev->bus) > 1 || ++ !try_module_get(THIS_MODULE)) ++ ret = -ENODEV; ++ ++ rcu_read_unlock(); ++ ++ if (ret) ++ return ret; ++ ++ mutex_lock(&hw_lock); ++ ++ hw = mtk_wed_assign(dev); ++ if (!hw) { ++ module_put(THIS_MODULE); ++ ret = -ENODEV; ++ goto out; ++ } ++ ++ dev_info(&dev->wlan.pci_dev->dev, "attaching wed device %d\n", hw->index); ++ ++ dev->hw = hw; ++ dev->dev = hw->dev; ++ dev->irq = hw->irq; ++ dev->wdma_idx = hw->index; ++ ++ if (hw->eth->dma_dev == hw->eth->dev && ++ of_dma_is_coherent(hw->eth->dev->of_node)) ++ mtk_eth_set_dma_device(hw->eth, hw->dev); ++ ++ ret = mtk_wed_buffer_alloc(dev); ++ if (ret) { ++ mtk_wed_detach(dev); ++ goto out; ++ } ++ ++ mtk_wed_hw_init_early(dev); ++ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, BIT(hw->index), 0); ++ ++out: ++ mutex_unlock(&hw_lock); ++ ++ return ret; ++} ++ ++static int ++mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs) ++{ ++ struct mtk_wed_ring *ring = &dev->tx_ring[idx]; ++ ++ /* ++ * Tx ring redirection: ++ * Instead of configuring the WLAN PDMA TX ring directly, the WLAN ++ * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n) ++ * registers. ++ * ++ * WED driver posts its own DMA ring as WLAN PDMA TX and configures it ++ * into MTK_WED_WPDMA_RING_TX(n) registers. ++ * It gets filled with packets picked up from WED TX ring and from ++ * WDMA RX. ++ */ ++ ++ BUG_ON(idx > ARRAY_SIZE(dev->tx_ring)); ++ ++ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE)) ++ return -ENOMEM; ++ ++ if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE)) ++ return -ENOMEM; ++ ++ ring->reg_base = MTK_WED_RING_TX(idx); ++ ring->wpdma = regs; ++ ++ /* WED -> WPDMA */ ++ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys); ++ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE); ++ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0); ++ ++ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, ++ ring->desc_phys); ++ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, ++ MTK_WED_TX_RING_SIZE); ++ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); ++ ++ return 0; ++} ++ ++static int ++mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs) ++{ ++ struct mtk_wed_ring *ring = &dev->txfree_ring; ++ int i; ++ ++ /* ++ * For txfree event handling, the same DMA ring is shared between WED ++ * and WLAN. The WLAN driver accesses the ring index registers through ++ * WED ++ */ ++ ring->reg_base = MTK_WED_RING_RX(1); ++ ring->wpdma = regs; ++ ++ for (i = 0; i < 12; i += 4) { ++ u32 val = readl(regs + i); ++ ++ wed_w32(dev, MTK_WED_RING_RX(1) + i, val); ++ wed_w32(dev, MTK_WED_WPDMA_RING_RX(1) + i, val); ++ } ++ ++ return 0; ++} ++ ++static u32 ++mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask) ++{ ++ u32 val; ++ ++ val = wed_r32(dev, MTK_WED_EXT_INT_STATUS); ++ wed_w32(dev, MTK_WED_EXT_INT_STATUS, val); ++ val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK; ++ if (!dev->hw->num_flows) ++ val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; ++ if (val && net_ratelimit()) ++ pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val); ++ ++ val = wed_r32(dev, MTK_WED_INT_STATUS); ++ val &= mask; ++ wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */ ++ ++ return val; ++} ++ ++static void ++mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask) ++{ ++ if (!dev->running) ++ return; ++ ++ mtk_wed_set_ext_int(dev, !!mask); ++ wed_w32(dev, MTK_WED_INT_MASK, mask); ++} ++ ++int mtk_wed_flow_add(int index) ++{ ++ struct mtk_wed_hw *hw = hw_list[index]; ++ int ret; ++ ++ if (!hw || !hw->wed_dev) ++ return -ENODEV; ++ ++ if (hw->num_flows) { ++ hw->num_flows++; ++ return 0; ++ } ++ ++ mutex_lock(&hw_lock); ++ if (!hw->wed_dev) { ++ ret = -ENODEV; ++ goto out; ++ } ++ ++ ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev); ++ if (!ret) ++ hw->num_flows++; ++ mtk_wed_set_ext_int(hw->wed_dev, true); ++ ++out: ++ mutex_unlock(&hw_lock); ++ ++ return ret; ++} ++ ++void mtk_wed_flow_remove(int index) ++{ ++ struct mtk_wed_hw *hw = hw_list[index]; ++ ++ if (!hw) ++ return; ++ ++ if (--hw->num_flows) ++ return; ++ ++ mutex_lock(&hw_lock); ++ if (!hw->wed_dev) ++ goto out; ++ ++ hw->wed_dev->wlan.offload_disable(hw->wed_dev); ++ mtk_wed_set_ext_int(hw->wed_dev, true); ++ ++out: ++ mutex_unlock(&hw_lock); ++} ++ ++void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, ++ void __iomem *wdma, int index) ++{ ++ static const struct mtk_wed_ops wed_ops = { ++ .attach = mtk_wed_attach, ++ .tx_ring_setup = mtk_wed_tx_ring_setup, ++ .txfree_ring_setup = mtk_wed_txfree_ring_setup, ++ .start = mtk_wed_start, ++ .stop = mtk_wed_stop, ++ .reset_dma = mtk_wed_reset_dma, ++ .reg_read = wed_r32, ++ .reg_write = wed_w32, ++ .irq_get = mtk_wed_irq_get, ++ .irq_set_mask = mtk_wed_irq_set_mask, ++ .detach = mtk_wed_detach, ++ }; ++ struct device_node *eth_np = eth->dev->of_node; ++ struct platform_device *pdev; ++ struct mtk_wed_hw *hw; ++ struct regmap *regs; ++ int irq; ++ ++ if (!np) ++ return; ++ ++ pdev = of_find_device_by_node(np); ++ if (!pdev) ++ return; ++ ++ get_device(&pdev->dev); ++ irq = platform_get_irq(pdev, 0); ++ if (irq < 0) ++ return; ++ ++ regs = syscon_regmap_lookup_by_phandle(np, NULL); ++ if (!regs) ++ return; ++ ++ rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops); ++ ++ mutex_lock(&hw_lock); ++ ++ if (WARN_ON(hw_list[index])) ++ goto unlock; ++ ++ hw = kzalloc(sizeof(*hw), GFP_KERNEL); ++ hw->node = np; ++ hw->regs = regs; ++ hw->eth = eth; ++ hw->dev = &pdev->dev; ++ hw->wdma = wdma; ++ hw->index = index; ++ hw->irq = irq; ++ hw->mirror = syscon_regmap_lookup_by_phandle(eth_np, ++ "mediatek,pcie-mirror"); ++ hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np, ++ "mediatek,hifsys"); ++ if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) { ++ kfree(hw); ++ goto unlock; ++ } ++ ++ if (!index) { ++ regmap_write(hw->mirror, 0, 0); ++ regmap_write(hw->mirror, 4, 0); ++ } ++ mtk_wed_hw_add_debugfs(hw); ++ ++ hw_list[index] = hw; ++ ++unlock: ++ mutex_unlock(&hw_lock); ++} ++ ++void mtk_wed_exit(void) ++{ ++ int i; ++ ++ rcu_assign_pointer(mtk_soc_wed_ops, NULL); ++ ++ synchronize_rcu(); ++ ++ for (i = 0; i < ARRAY_SIZE(hw_list); i++) { ++ struct mtk_wed_hw *hw; ++ ++ hw = hw_list[i]; ++ if (!hw) ++ continue; ++ ++ hw_list[i] = NULL; ++ debugfs_remove(hw->debugfs_dir); ++ put_device(hw->dev); ++ kfree(hw); ++ } ++} +--- /dev/null ++++ b/drivers/net/ethernet/mediatek/mtk_wed.h +@@ -0,0 +1,128 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (C) 2021 Felix Fietkau */ ++ ++#ifndef __MTK_WED_PRIV_H ++#define __MTK_WED_PRIV_H ++ ++#include ++#include ++#include ++ ++struct mtk_eth; ++ ++struct mtk_wed_hw { ++ struct device_node *node; ++ struct mtk_eth *eth; ++ struct regmap *regs; ++ struct regmap *hifsys; ++ struct device *dev; ++ void __iomem *wdma; ++ struct regmap *mirror; ++ struct dentry *debugfs_dir; ++ struct mtk_wed_device *wed_dev; ++ u32 debugfs_reg; ++ u32 num_flows; ++ char dirname[5]; ++ int irq; ++ int index; ++}; ++ ++ ++#ifdef CONFIG_NET_MEDIATEK_SOC_WED ++static inline void ++wed_w32(struct mtk_wed_device *dev, u32 reg, u32 val) ++{ ++ regmap_write(dev->hw->regs, reg, val); ++} ++ ++static inline u32 ++wed_r32(struct mtk_wed_device *dev, u32 reg) ++{ ++ unsigned int val; ++ ++ regmap_read(dev->hw->regs, reg, &val); ++ ++ return val; ++} ++ ++static inline void ++wdma_w32(struct mtk_wed_device *dev, u32 reg, u32 val) ++{ ++ writel(val, dev->hw->wdma + reg); ++} ++ ++static inline u32 ++wdma_r32(struct mtk_wed_device *dev, u32 reg) ++{ ++ return readl(dev->hw->wdma + reg); ++} ++ ++static inline u32 ++wpdma_tx_r32(struct mtk_wed_device *dev, int ring, u32 reg) ++{ ++ if (!dev->tx_ring[ring].wpdma) ++ return 0; ++ ++ return readl(dev->tx_ring[ring].wpdma + reg); ++} ++ ++static inline void ++wpdma_tx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val) ++{ ++ if (!dev->tx_ring[ring].wpdma) ++ return; ++ ++ writel(val, dev->tx_ring[ring].wpdma + reg); ++} ++ ++static inline u32 ++wpdma_txfree_r32(struct mtk_wed_device *dev, u32 reg) ++{ ++ if (!dev->txfree_ring.wpdma) ++ return 0; ++ ++ return readl(dev->txfree_ring.wpdma + reg); ++} ++ ++static inline void ++wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val) ++{ ++ if (!dev->txfree_ring.wpdma) ++ return; ++ ++ writel(val, dev->txfree_ring.wpdma + reg); ++} ++ ++void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, ++ void __iomem *wdma, int index); ++void mtk_wed_exit(void); ++int mtk_wed_flow_add(int index); ++void mtk_wed_flow_remove(int index); ++#else ++static inline void ++mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, ++ void __iomem *wdma, int index) ++{ ++} ++static inline void ++mtk_wed_exit(void) ++{ ++} ++static inline int mtk_wed_flow_add(int index) ++{ ++ return -EINVAL; ++} ++static inline void mtk_wed_flow_remove(int index) ++{ ++} ++#endif ++ ++#ifdef CONFIG_DEBUG_FS ++void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw); ++#else ++static inline void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw) ++{ ++} ++#endif ++ ++#endif +--- /dev/null ++++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c +@@ -0,0 +1,175 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (C) 2021 Felix Fietkau */ ++ ++#include ++#include "mtk_wed.h" ++#include "mtk_wed_regs.h" ++ ++struct reg_dump { ++ const char *name; ++ u16 offset; ++ u8 type; ++ u8 base; ++}; ++ ++enum { ++ DUMP_TYPE_STRING, ++ DUMP_TYPE_WED, ++ DUMP_TYPE_WDMA, ++ DUMP_TYPE_WPDMA_TX, ++ DUMP_TYPE_WPDMA_TXFREE, ++}; ++ ++#define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING } ++#define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ } ++#define DUMP_RING(_prefix, _base, ...) \ ++ { _prefix " BASE", _base, __VA_ARGS__ }, \ ++ { _prefix " CNT", _base + 0x4, __VA_ARGS__ }, \ ++ { _prefix " CIDX", _base + 0x8, __VA_ARGS__ }, \ ++ { _prefix " DIDX", _base + 0xc, __VA_ARGS__ } ++ ++#define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED) ++#define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED) ++ ++#define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA) ++#define DUMP_WDMA_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WDMA) ++ ++#define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n) ++#define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE) ++ ++static void ++print_reg_val(struct seq_file *s, const char *name, u32 val) ++{ ++ seq_printf(s, "%-32s %08x\n", name, val); ++} ++ ++static void ++dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev, ++ const struct reg_dump *regs, int n_regs) ++{ ++ const struct reg_dump *cur; ++ u32 val; ++ ++ for (cur = regs; cur < ®s[n_regs]; cur++) { ++ switch (cur->type) { ++ case DUMP_TYPE_STRING: ++ seq_printf(s, "%s======== %s:\n", ++ cur > regs ? "\n" : "", ++ cur->name); ++ continue; ++ case DUMP_TYPE_WED: ++ val = wed_r32(dev, cur->offset); ++ break; ++ case DUMP_TYPE_WDMA: ++ val = wdma_r32(dev, cur->offset); ++ break; ++ case DUMP_TYPE_WPDMA_TX: ++ val = wpdma_tx_r32(dev, cur->base, cur->offset); ++ break; ++ case DUMP_TYPE_WPDMA_TXFREE: ++ val = wpdma_txfree_r32(dev, cur->offset); ++ break; ++ } ++ print_reg_val(s, cur->name, val); ++ } ++} ++ ++ ++static int ++wed_txinfo_show(struct seq_file *s, void *data) ++{ ++ static const struct reg_dump regs[] = { ++ DUMP_STR("WED TX"), ++ DUMP_WED(WED_TX_MIB(0)), ++ DUMP_WED_RING(WED_RING_TX(0)), ++ ++ DUMP_WED(WED_TX_MIB(1)), ++ DUMP_WED_RING(WED_RING_TX(1)), ++ ++ DUMP_STR("WPDMA TX"), ++ DUMP_WED(WED_WPDMA_TX_MIB(0)), ++ DUMP_WED_RING(WED_WPDMA_RING_TX(0)), ++ DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(0)), ++ ++ DUMP_WED(WED_WPDMA_TX_MIB(1)), ++ DUMP_WED_RING(WED_WPDMA_RING_TX(1)), ++ DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(1)), ++ ++ DUMP_STR("WPDMA TX"), ++ DUMP_WPDMA_TX_RING(0), ++ DUMP_WPDMA_TX_RING(1), ++ ++ DUMP_STR("WED WDMA RX"), ++ DUMP_WED(WED_WDMA_RX_MIB(0)), ++ DUMP_WED_RING(WED_WDMA_RING_RX(0)), ++ DUMP_WED(WED_WDMA_RX_THRES(0)), ++ DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(0)), ++ DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(0)), ++ ++ DUMP_WED(WED_WDMA_RX_MIB(1)), ++ DUMP_WED_RING(WED_WDMA_RING_RX(1)), ++ DUMP_WED(WED_WDMA_RX_THRES(1)), ++ DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(1)), ++ DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(1)), ++ ++ DUMP_STR("WDMA RX"), ++ DUMP_WDMA(WDMA_GLO_CFG), ++ DUMP_WDMA_RING(WDMA_RING_RX(0)), ++ DUMP_WDMA_RING(WDMA_RING_RX(1)), ++ }; ++ struct mtk_wed_hw *hw = s->private; ++ struct mtk_wed_device *dev = hw->wed_dev; ++ ++ if (!dev) ++ return 0; ++ ++ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs)); ++ ++ return 0; ++} ++DEFINE_SHOW_ATTRIBUTE(wed_txinfo); ++ ++ ++static int ++mtk_wed_reg_set(void *data, u64 val) ++{ ++ struct mtk_wed_hw *hw = data; ++ ++ regmap_write(hw->regs, hw->debugfs_reg, val); ++ ++ return 0; ++} ++ ++static int ++mtk_wed_reg_get(void *data, u64 *val) ++{ ++ struct mtk_wed_hw *hw = data; ++ unsigned int regval; ++ int ret; ++ ++ ret = regmap_read(hw->regs, hw->debugfs_reg, ®val); ++ if (ret) ++ return ret; ++ ++ *val = regval; ++ ++ return 0; ++} ++ ++DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mtk_wed_reg_get, mtk_wed_reg_set, ++ "0x%08llx\n"); ++ ++void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw) ++{ ++ struct dentry *dir; ++ ++ snprintf(hw->dirname, sizeof(hw->dirname), "wed%d", hw->index); ++ dir = debugfs_create_dir(hw->dirname, NULL); ++ if (!dir) ++ return; ++ ++ hw->debugfs_dir = dir; ++ debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg); ++ debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval); ++ debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops); ++} +--- /dev/null ++++ b/drivers/net/ethernet/mediatek/mtk_wed_ops.c +@@ -0,0 +1,8 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (C) 2020 Felix Fietkau */ ++ ++#include ++#include ++ ++const struct mtk_wed_ops __rcu *mtk_soc_wed_ops; ++EXPORT_SYMBOL_GPL(mtk_soc_wed_ops); +--- /dev/null ++++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h +@@ -0,0 +1,251 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (C) 2020 Felix Fietkau */ ++ ++#ifndef __MTK_WED_REGS_H ++#define __MTK_WED_REGS_H ++ ++#define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(14, 0) ++#define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(15) ++#define MTK_WDMA_DESC_CTRL_BURST BIT(16) ++#define MTK_WDMA_DESC_CTRL_LEN0 GENMASK(29, 16) ++#define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30) ++#define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31) ++ ++struct mtk_wdma_desc { ++ __le32 buf0; ++ __le32 ctrl; ++ __le32 buf1; ++ __le32 info; ++} __packed __aligned(4); ++ ++#define MTK_WED_RESET 0x008 ++#define MTK_WED_RESET_TX_BM BIT(0) ++#define MTK_WED_RESET_TX_FREE_AGENT BIT(4) ++#define MTK_WED_RESET_WPDMA_TX_DRV BIT(8) ++#define MTK_WED_RESET_WPDMA_RX_DRV BIT(9) ++#define MTK_WED_RESET_WPDMA_INT_AGENT BIT(11) ++#define MTK_WED_RESET_WED_TX_DMA BIT(12) ++#define MTK_WED_RESET_WDMA_RX_DRV BIT(17) ++#define MTK_WED_RESET_WDMA_INT_AGENT BIT(19) ++#define MTK_WED_RESET_WED BIT(31) ++ ++#define MTK_WED_CTRL 0x00c ++#define MTK_WED_CTRL_WPDMA_INT_AGENT_EN BIT(0) ++#define MTK_WED_CTRL_WPDMA_INT_AGENT_BUSY BIT(1) ++#define MTK_WED_CTRL_WDMA_INT_AGENT_EN BIT(2) ++#define MTK_WED_CTRL_WDMA_INT_AGENT_BUSY BIT(3) ++#define MTK_WED_CTRL_WED_TX_BM_EN BIT(8) ++#define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9) ++#define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10) ++#define MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY BIT(11) ++#define MTK_WED_CTRL_RESERVE_EN BIT(12) ++#define MTK_WED_CTRL_RESERVE_BUSY BIT(13) ++#define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24) ++#define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28) ++ ++#define MTK_WED_EXT_INT_STATUS 0x020 ++#define MTK_WED_EXT_INT_STATUS_TF_LEN_ERR BIT(0) ++#define MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD BIT(1) ++#define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID BIT(4) ++#define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH BIT(8) ++#define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH BIT(9) ++#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12) ++#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13) ++#define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16) ++#define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17) ++#define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18) ++#define MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN BIT(19) ++#define MTK_WED_EXT_INT_STATUS_RX_DRV_BM_DMAD_COHERENT BIT(20) ++#define MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR BIT(21) ++#define MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR BIT(22) ++#define MTK_WED_EXT_INT_STATUS_RX_DRV_DMA_RECYCLE BIT(24) ++#define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \ ++ MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \ ++ MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \ ++ MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \ ++ MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \ ++ MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN | \ ++ MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR | \ ++ MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR) ++ ++#define MTK_WED_EXT_INT_MASK 0x028 ++ ++#define MTK_WED_STATUS 0x060 ++#define MTK_WED_STATUS_TX GENMASK(15, 8) ++ ++#define MTK_WED_TX_BM_CTRL 0x080 ++#define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0) ++#define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16) ++#define MTK_WED_TX_BM_CTRL_PAUSE BIT(28) ++ ++#define MTK_WED_TX_BM_BASE 0x084 ++ ++#define MTK_WED_TX_BM_TKID 0x088 ++#define MTK_WED_TX_BM_TKID_START GENMASK(15, 0) ++#define MTK_WED_TX_BM_TKID_END GENMASK(31, 16) ++ ++#define MTK_WED_TX_BM_BUF_LEN 0x08c ++ ++#define MTK_WED_TX_BM_INTF 0x09c ++#define MTK_WED_TX_BM_INTF_TKID GENMASK(15, 0) ++#define MTK_WED_TX_BM_INTF_TKFIFO_FDEP GENMASK(23, 16) ++#define MTK_WED_TX_BM_INTF_TKID_VALID BIT(28) ++#define MTK_WED_TX_BM_INTF_TKID_READ BIT(29) ++ ++#define MTK_WED_TX_BM_DYN_THR 0x0a0 ++#define MTK_WED_TX_BM_DYN_THR_LO GENMASK(6, 0) ++#define MTK_WED_TX_BM_DYN_THR_HI GENMASK(22, 16) ++ ++#define MTK_WED_INT_STATUS 0x200 ++#define MTK_WED_INT_MASK 0x204 ++ ++#define MTK_WED_GLO_CFG 0x208 ++#define MTK_WED_GLO_CFG_TX_DMA_EN BIT(0) ++#define MTK_WED_GLO_CFG_TX_DMA_BUSY BIT(1) ++#define MTK_WED_GLO_CFG_RX_DMA_EN BIT(2) ++#define MTK_WED_GLO_CFG_RX_DMA_BUSY BIT(3) ++#define MTK_WED_GLO_CFG_RX_BT_SIZE GENMASK(5, 4) ++#define MTK_WED_GLO_CFG_TX_WB_DDONE BIT(6) ++#define MTK_WED_GLO_CFG_BIG_ENDIAN BIT(7) ++#define MTK_WED_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8) ++#define MTK_WED_GLO_CFG_TX_BT_SIZE_LO BIT(9) ++#define MTK_WED_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10) ++#define MTK_WED_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12) ++#define MTK_WED_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13) ++#define MTK_WED_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22) ++#define MTK_WED_GLO_CFG_SW_RESET BIT(24) ++#define MTK_WED_GLO_CFG_FIRST_TOKEN_ONLY BIT(26) ++#define MTK_WED_GLO_CFG_OMIT_RX_INFO BIT(27) ++#define MTK_WED_GLO_CFG_OMIT_TX_INFO BIT(28) ++#define MTK_WED_GLO_CFG_BYTE_SWAP BIT(29) ++#define MTK_WED_GLO_CFG_RX_2B_OFFSET BIT(31) ++ ++#define MTK_WED_RESET_IDX 0x20c ++#define MTK_WED_RESET_IDX_TX GENMASK(3, 0) ++#define MTK_WED_RESET_IDX_RX GENMASK(17, 16) ++ ++#define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4) ++ ++#define MTK_WED_RING_TX(_n) (0x300 + (_n) * 0x10) ++ ++#define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10) ++ ++#define MTK_WED_WPDMA_INT_TRIGGER 0x504 ++#define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1) ++#define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4) ++ ++#define MTK_WED_WPDMA_GLO_CFG 0x508 ++#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN BIT(0) ++#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY BIT(1) ++#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN BIT(2) ++#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY BIT(3) ++#define MTK_WED_WPDMA_GLO_CFG_RX_BT_SIZE GENMASK(5, 4) ++#define MTK_WED_WPDMA_GLO_CFG_TX_WB_DDONE BIT(6) ++#define MTK_WED_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7) ++#define MTK_WED_WPDMA_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8) ++#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_LO BIT(9) ++#define MTK_WED_WPDMA_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10) ++#define MTK_WED_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12) ++#define MTK_WED_WPDMA_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13) ++#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22) ++#define MTK_WED_WPDMA_GLO_CFG_SW_RESET BIT(24) ++#define MTK_WED_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY BIT(26) ++#define MTK_WED_WPDMA_GLO_CFG_OMIT_RX_INFO BIT(27) ++#define MTK_WED_WPDMA_GLO_CFG_OMIT_TX_INFO BIT(28) ++#define MTK_WED_WPDMA_GLO_CFG_BYTE_SWAP BIT(29) ++#define MTK_WED_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31) ++ ++#define MTK_WED_WPDMA_RESET_IDX 0x50c ++#define MTK_WED_WPDMA_RESET_IDX_TX GENMASK(3, 0) ++#define MTK_WED_WPDMA_RESET_IDX_RX GENMASK(17, 16) ++ ++#define MTK_WED_WPDMA_INT_CTRL 0x520 ++#define MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV BIT(21) ++ ++#define MTK_WED_WPDMA_INT_MASK 0x524 ++ ++#define MTK_WED_PCIE_CFG_BASE 0x560 ++ ++#define MTK_WED_PCIE_INT_TRIGGER 0x570 ++#define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16) ++ ++#define MTK_WED_WPDMA_CFG_BASE 0x580 ++ ++#define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4) ++#define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4) ++ ++#define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10) ++#define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10) ++#define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10) ++#define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4) ++ ++#define MTK_WED_WDMA_GLO_CFG 0xa04 ++#define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0) ++#define MTK_WED_WDMA_GLO_CFG_RX_DRV_EN BIT(2) ++#define MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY BIT(3) ++#define MTK_WED_WDMA_GLO_CFG_BT_SIZE GENMASK(5, 4) ++#define MTK_WED_WDMA_GLO_CFG_TX_WB_DDONE BIT(6) ++#define MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE BIT(13) ++#define MTK_WED_WDMA_GLO_CFG_WCOMPLETE_SEL BIT(16) ++#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_RXDMA_BYPASS BIT(17) ++#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_BYPASS BIT(18) ++#define MTK_WED_WDMA_GLO_CFG_FSM_RETURN_IDLE BIT(19) ++#define MTK_WED_WDMA_GLO_CFG_WAIT_COHERENT BIT(20) ++#define MTK_WED_WDMA_GLO_CFG_AXI_W_AFTER_AW BIT(21) ++#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY_SINGLE_W BIT(22) ++#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY BIT(23) ++#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP BIT(24) ++#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE BIT(25) ++#define MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE BIT(26) ++#define MTK_WED_WDMA_GLO_CFG_RXDRV_CLKGATE_BYPASS BIT(30) ++ ++#define MTK_WED_WDMA_RESET_IDX 0xa08 ++#define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16) ++#define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24) ++ ++#define MTK_WED_WDMA_INT_TRIGGER 0xa28 ++#define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16) ++ ++#define MTK_WED_WDMA_INT_CTRL 0xa2c ++#define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL GENMASK(17, 16) ++ ++#define MTK_WED_WDMA_OFFSET0 0xaa4 ++#define MTK_WED_WDMA_OFFSET1 0xaa8 ++ ++#define MTK_WED_WDMA_RX_MIB(_n) (0xae0 + (_n) * 4) ++#define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4) ++#define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4) ++ ++#define MTK_WED_RING_OFS_BASE 0x00 ++#define MTK_WED_RING_OFS_COUNT 0x04 ++#define MTK_WED_RING_OFS_CPU_IDX 0x08 ++#define MTK_WED_RING_OFS_DMA_IDX 0x0c ++ ++#define MTK_WDMA_RING_RX(_n) (0x100 + (_n) * 0x10) ++ ++#define MTK_WDMA_GLO_CFG 0x204 ++#define MTK_WDMA_GLO_CFG_RX_INFO_PRERES GENMASK(28, 26) ++ ++#define MTK_WDMA_RESET_IDX 0x208 ++#define MTK_WDMA_RESET_IDX_TX GENMASK(3, 0) ++#define MTK_WDMA_RESET_IDX_RX GENMASK(17, 16) ++ ++#define MTK_WDMA_INT_MASK 0x228 ++#define MTK_WDMA_INT_MASK_TX_DONE GENMASK(3, 0) ++#define MTK_WDMA_INT_MASK_RX_DONE GENMASK(17, 16) ++#define MTK_WDMA_INT_MASK_TX_DELAY BIT(28) ++#define MTK_WDMA_INT_MASK_TX_COHERENT BIT(29) ++#define MTK_WDMA_INT_MASK_RX_DELAY BIT(30) ++#define MTK_WDMA_INT_MASK_RX_COHERENT BIT(31) ++ ++#define MTK_WDMA_INT_GRP1 0x250 ++#define MTK_WDMA_INT_GRP2 0x254 ++ ++#define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0) ++#define MTK_PCIE_MIRROR_MAP_EN BIT(0) ++#define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1) ++ ++/* DMA channel mapping */ ++#define HIFSYS_DMA_AG_MAP 0x008 ++ ++#endif +--- /dev/null ++++ b/include/linux/soc/mediatek/mtk_wed.h +@@ -0,0 +1,131 @@ ++#ifndef __MTK_WED_H ++#define __MTK_WED_H ++ ++#include ++#include ++#include ++#include ++ ++#define MTK_WED_TX_QUEUES 2 ++ ++struct mtk_wed_hw; ++struct mtk_wdma_desc; ++ ++struct mtk_wed_ring { ++ struct mtk_wdma_desc *desc; ++ dma_addr_t desc_phys; ++ int size; ++ ++ u32 reg_base; ++ void __iomem *wpdma; ++}; ++ ++struct mtk_wed_device { ++#ifdef CONFIG_NET_MEDIATEK_SOC_WED ++ const struct mtk_wed_ops *ops; ++ struct device *dev; ++ struct mtk_wed_hw *hw; ++ bool init_done, running; ++ int wdma_idx; ++ int irq; ++ ++ struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES]; ++ struct mtk_wed_ring txfree_ring; ++ struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES]; ++ ++ struct { ++ int size; ++ void **pages; ++ struct mtk_wdma_desc *desc; ++ dma_addr_t desc_phys; ++ } buf_ring; ++ ++ /* filled by driver: */ ++ struct { ++ struct pci_dev *pci_dev; ++ ++ u32 wpdma_phys; ++ ++ u16 token_start; ++ unsigned int nbuf; ++ ++ u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id); ++ int (*offload_enable)(struct mtk_wed_device *wed); ++ void (*offload_disable)(struct mtk_wed_device *wed); ++ } wlan; ++#endif ++}; ++ ++struct mtk_wed_ops { ++ int (*attach)(struct mtk_wed_device *dev); ++ int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring, ++ void __iomem *regs); ++ int (*txfree_ring_setup)(struct mtk_wed_device *dev, ++ void __iomem *regs); ++ void (*detach)(struct mtk_wed_device *dev); ++ ++ void (*stop)(struct mtk_wed_device *dev); ++ void (*start)(struct mtk_wed_device *dev, u32 irq_mask); ++ void (*reset_dma)(struct mtk_wed_device *dev); ++ ++ u32 (*reg_read)(struct mtk_wed_device *dev, u32 reg); ++ void (*reg_write)(struct mtk_wed_device *dev, u32 reg, u32 val); ++ ++ u32 (*irq_get)(struct mtk_wed_device *dev, u32 mask); ++ void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask); ++}; ++ ++extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops; ++ ++static inline int ++mtk_wed_device_attach(struct mtk_wed_device *dev) ++{ ++ int ret = -ENODEV; ++ ++#ifdef CONFIG_NET_MEDIATEK_SOC_WED ++ rcu_read_lock(); ++ dev->ops = rcu_dereference(mtk_soc_wed_ops); ++ if (dev->ops) ++ ret = dev->ops->attach(dev); ++ else ++ rcu_read_unlock(); ++ ++ if (ret) ++ dev->ops = NULL; ++#endif ++ ++ return ret; ++} ++ ++#ifdef CONFIG_NET_MEDIATEK_SOC_WED ++#define mtk_wed_device_active(_dev) !!(_dev)->ops ++#define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev) ++#define mtk_wed_device_start(_dev, _mask) (_dev)->ops->start(_dev, _mask) ++#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) \ ++ (_dev)->ops->tx_ring_setup(_dev, _ring, _regs) ++#define mtk_wed_device_txfree_ring_setup(_dev, _regs) \ ++ (_dev)->ops->txfree_ring_setup(_dev, _regs) ++#define mtk_wed_device_reg_read(_dev, _reg) \ ++ (_dev)->ops->reg_read(_dev, _reg) ++#define mtk_wed_device_reg_write(_dev, _reg, _val) \ ++ (_dev)->ops->reg_write(_dev, _reg, _val) ++#define mtk_wed_device_irq_get(_dev, _mask) \ ++ (_dev)->ops->irq_get(_dev, _mask) ++#define mtk_wed_device_irq_set_mask(_dev, _mask) \ ++ (_dev)->ops->irq_set_mask(_dev, _mask) ++#else ++static inline bool mtk_wed_device_active(struct mtk_wed_device *dev) ++{ ++ return false; ++} ++#define mtk_wed_device_detach(_dev) do {} while (0) ++#define mtk_wed_device_start(_dev, _mask) do {} while (0) ++#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) -ENODEV ++#define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV ++#define mtk_wed_device_reg_read(_dev, _reg) 0 ++#define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0) ++#define mtk_wed_device_irq_get(_dev, _mask) 0 ++#define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0) ++#endif ++ ++#endif diff --git a/target/linux/generic/pending-5.10/701-03-net-ethernet-mtk_eth_soc-implement-flow-offloading-t.patch b/target/linux/generic/pending-5.10/701-03-net-ethernet-mtk_eth_soc-implement-flow-offloading-t.patch new file mode 100644 index 0000000000..1193cb0f9d --- /dev/null +++ b/target/linux/generic/pending-5.10/701-03-net-ethernet-mtk_eth_soc-implement-flow-offloading-t.patch @@ -0,0 +1,269 @@ +From: Felix Fietkau +Date: Sat, 5 Feb 2022 18:29:22 +0100 +Subject: [PATCH] net: ethernet: mtk_eth_soc: implement flow offloading + to WED devices + +This allows hardware flow offloading from Ethernet to WLAN on MT7622 SoC + +Co-developed-by: Lorenzo Bianconi +Signed-off-by: Lorenzo Bianconi +Signed-off-by: Felix Fietkau +--- + +--- a/drivers/net/ethernet/mediatek/mtk_ppe.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c +@@ -329,6 +329,24 @@ int mtk_foe_entry_set_pppoe(struct mtk_f + return 0; + } + ++int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq, ++ int bss, int wcid) ++{ ++ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry); ++ u32 *ib2 = mtk_foe_entry_ib2(entry); ++ ++ *ib2 &= ~MTK_FOE_IB2_PORT_MG; ++ *ib2 |= MTK_FOE_IB2_WDMA_WINFO; ++ if (wdma_idx) ++ *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX; ++ ++ l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) | ++ FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) | ++ FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq); ++ ++ return 0; ++} ++ + static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry) + { + return !(entry->ib1 & MTK_FOE_IB1_STATIC) && +--- a/drivers/net/ethernet/mediatek/mtk_ppe.h ++++ b/drivers/net/ethernet/mediatek/mtk_ppe.h +@@ -48,9 +48,9 @@ enum { + #define MTK_FOE_IB2_DEST_PORT GENMASK(7, 5) + #define MTK_FOE_IB2_MULTICAST BIT(8) + +-#define MTK_FOE_IB2_WHNAT_QID2 GENMASK(13, 12) +-#define MTK_FOE_IB2_WHNAT_DEVIDX BIT(16) +-#define MTK_FOE_IB2_WHNAT_NAT BIT(17) ++#define MTK_FOE_IB2_WDMA_QID2 GENMASK(13, 12) ++#define MTK_FOE_IB2_WDMA_DEVIDX BIT(16) ++#define MTK_FOE_IB2_WDMA_WINFO BIT(17) + + #define MTK_FOE_IB2_PORT_MG GENMASK(17, 12) + +@@ -58,9 +58,9 @@ enum { + + #define MTK_FOE_IB2_DSCP GENMASK(31, 24) + +-#define MTK_FOE_VLAN2_WHNAT_BSS GEMMASK(5, 0) +-#define MTK_FOE_VLAN2_WHNAT_WCID GENMASK(13, 6) +-#define MTK_FOE_VLAN2_WHNAT_RING GENMASK(15, 14) ++#define MTK_FOE_VLAN2_WINFO_BSS GENMASK(5, 0) ++#define MTK_FOE_VLAN2_WINFO_WCID GENMASK(13, 6) ++#define MTK_FOE_VLAN2_WINFO_RING GENMASK(15, 14) + + enum { + MTK_FOE_STATE_INVALID, +@@ -281,6 +281,8 @@ int mtk_foe_entry_set_ipv6_tuple(struct + int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port); + int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid); + int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid); ++int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq, ++ int bss, int wcid); + int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, + u16 timestamp); + int mtk_ppe_debugfs_init(struct mtk_ppe *ppe); +--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +@@ -11,6 +11,7 @@ + #include + #include + #include "mtk_eth_soc.h" ++#include "mtk_wed.h" + + struct mtk_flow_data { + struct ethhdr eth; +@@ -40,6 +41,7 @@ struct mtk_flow_entry { + struct rhash_head node; + unsigned long cookie; + u16 hash; ++ s8 wed_index; + }; + + static const struct rhashtable_params mtk_flow_ht_params = { +@@ -81,6 +83,35 @@ mtk_flow_offload_mangle_eth(const struct + memcpy(dest, src, act->mangle.mask ? 2 : 4); + } + ++static int ++mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info) ++{ ++ struct net_device_path_ctx ctx = { ++ .dev = dev, ++ .daddr = addr, ++ }; ++ struct net_device_path path = {}; ++ ++ if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)) ++ return -1; ++ ++ if (!dev->netdev_ops->ndo_fill_forward_path) ++ return -1; ++ ++ if (dev->netdev_ops->ndo_fill_forward_path(&ctx, &path)) ++ return -1; ++ ++ if (path.type != DEV_PATH_MTK_WDMA) ++ return -1; ++ ++ info->wdma_idx = path.mtk_wdma.wdma_idx; ++ info->queue = path.mtk_wdma.queue; ++ info->bss = path.mtk_wdma.bss; ++ info->wcid = path.mtk_wdma.wcid; ++ ++ return 0; ++} ++ + + static int + mtk_flow_mangle_ports(const struct flow_action_entry *act, +@@ -150,10 +181,20 @@ mtk_flow_get_dsa_port(struct net_device + + static int + mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe, +- struct net_device *dev) ++ struct net_device *dev, const u8 *dest_mac, ++ int *wed_index) + { ++ struct mtk_wdma_info info = {}; + int pse_port, dsa_port; + ++ if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) { ++ mtk_foe_entry_set_wdma(foe, info.wdma_idx, info.queue, info.bss, ++ info.wcid); ++ pse_port = 3; ++ *wed_index = info.wdma_idx; ++ goto out; ++ } ++ + dsa_port = mtk_flow_get_dsa_port(&dev); + if (dsa_port >= 0) + mtk_foe_entry_set_dsa(foe, dsa_port); +@@ -165,6 +206,7 @@ mtk_flow_set_output_device(struct mtk_et + else + return -EOPNOTSUPP; + ++out: + mtk_foe_entry_set_pse_port(foe, pse_port); + + return 0; +@@ -180,6 +222,7 @@ mtk_flow_offload_replace(struct mtk_eth + struct net_device *odev = NULL; + struct mtk_flow_entry *entry; + int offload_type = 0; ++ int wed_index = -1; + u16 addr_type = 0; + u32 timestamp; + u8 l4proto = 0; +@@ -327,10 +370,14 @@ mtk_flow_offload_replace(struct mtk_eth + if (data.pppoe.num == 1) + mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid); + +- err = mtk_flow_set_output_device(eth, &foe, odev); ++ err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest, ++ &wed_index); + if (err) + return err; + ++ if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0) ++ return err; ++ + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; +@@ -344,6 +391,7 @@ mtk_flow_offload_replace(struct mtk_eth + } + + entry->hash = hash; ++ entry->wed_index = wed_index; + err = rhashtable_insert_fast(ð->flow_table, &entry->node, + mtk_flow_ht_params); + if (err < 0) +@@ -354,6 +402,8 @@ clear_flow: + mtk_foe_entry_clear(ð->ppe, hash); + free: + kfree(entry); ++ if (wed_index >= 0) ++ mtk_wed_flow_remove(wed_index); + return err; + } + +@@ -370,6 +420,8 @@ mtk_flow_offload_destroy(struct mtk_eth + mtk_foe_entry_clear(ð->ppe, entry->hash); + rhashtable_remove_fast(ð->flow_table, &entry->node, + mtk_flow_ht_params); ++ if (entry->wed_index >= 0) ++ mtk_wed_flow_remove(entry->wed_index); + kfree(entry); + + return 0; +--- a/drivers/net/ethernet/mediatek/mtk_wed.h ++++ b/drivers/net/ethernet/mediatek/mtk_wed.h +@@ -7,6 +7,7 @@ + #include + #include + #include ++#include + + struct mtk_eth; + +@@ -27,6 +28,12 @@ struct mtk_wed_hw { + int index; + }; + ++struct mtk_wdma_info { ++ u8 wdma_idx; ++ u8 queue; ++ u16 wcid; ++ u8 bss; ++}; + + #ifdef CONFIG_NET_MEDIATEK_SOC_WED + static inline void +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -833,6 +833,7 @@ enum net_device_path_type { + DEV_PATH_BRIDGE, + DEV_PATH_PPPOE, + DEV_PATH_DSA, ++ DEV_PATH_MTK_WDMA, + }; + + struct net_device_path { +@@ -858,6 +859,12 @@ struct net_device_path { + int port; + u16 proto; + } dsa; ++ struct { ++ u8 wdma_idx; ++ u8 queue; ++ u16 wcid; ++ u8 bss; ++ } mtk_wdma; + }; + }; + +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -883,6 +883,10 @@ int dev_fill_forward_path(const struct n + if (WARN_ON_ONCE(last_dev == ctx.dev)) + return -1; + } ++ ++ if (!ctx.dev) ++ return ret; ++ + path = dev_fwd_path(stack); + if (!path) + return -1; diff --git a/target/linux/generic/pending-5.10/701-04-arm64-dts-mediatek-mt7622-introduce-nodes-for-Wirele.patch b/target/linux/generic/pending-5.10/701-04-arm64-dts-mediatek-mt7622-introduce-nodes-for-Wirele.patch new file mode 100644 index 0000000000..91d706f85b --- /dev/null +++ b/target/linux/generic/pending-5.10/701-04-arm64-dts-mediatek-mt7622-introduce-nodes-for-Wirele.patch @@ -0,0 +1,62 @@ +From: Felix Fietkau +Date: Sat, 5 Feb 2022 18:36:36 +0100 +Subject: [PATCH] arm64: dts: mediatek: mt7622: introduce nodes for + Wireless Ethernet Dispatch + +Introduce wed0 and wed1 nodes in order to enable offloading forwarding +between ethernet and wireless devices on the mt7622 chipset. + +Signed-off-by: Felix Fietkau +--- + +--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi ++++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi +@@ -892,6 +892,11 @@ + }; + }; + ++ hifsys: syscon@1af00000 { ++ compatible = "mediatek,mt7622-hifsys", "syscon"; ++ reg = <0 0x1af00000 0 0x70>; ++ }; ++ + ethsys: syscon@1b000000 { + compatible = "mediatek,mt7622-ethsys", + "syscon"; +@@ -910,6 +915,26 @@ + #dma-cells = <1>; + }; + ++ pcie_mirror: pcie-mirror@10000400 { ++ compatible = "mediatek,mt7622-pcie-mirror", ++ "syscon"; ++ reg = <0 0x10000400 0 0x10>; ++ }; ++ ++ wed0: wed@1020a000 { ++ compatible = "mediatek,mt7622-wed", ++ "syscon"; ++ reg = <0 0x1020a000 0 0x1000>; ++ interrupts = ; ++ }; ++ ++ wed1: wed@1020b000 { ++ compatible = "mediatek,mt7622-wed", ++ "syscon"; ++ reg = <0 0x1020b000 0 0x1000>; ++ interrupts = ; ++ }; ++ + eth: ethernet@1b100000 { + compatible = "mediatek,mt7622-eth", + "mediatek,mt2701-eth", +@@ -937,6 +962,9 @@ + mediatek,ethsys = <ðsys>; + mediatek,sgmiisys = <&sgmiisys>; + mediatek,cci-control = <&cci_control2>; ++ mediatek,wed = <&wed0>, <&wed1>; ++ mediatek,pcie-mirror = <&pcie_mirror>; ++ mediatek,hifsys = <&hifsys>; + dma-coherent; + #address-cells = <1>; + #size-cells = <0>; diff --git a/target/linux/generic/pending-5.10/701-05-net-ethernet-mtk_eth_soc-add-ipv6-flow-offload-suppo.patch b/target/linux/generic/pending-5.10/701-05-net-ethernet-mtk_eth_soc-add-ipv6-flow-offload-suppo.patch new file mode 100644 index 0000000000..925c16ac69 --- /dev/null +++ b/target/linux/generic/pending-5.10/701-05-net-ethernet-mtk_eth_soc-add-ipv6-flow-offload-suppo.patch @@ -0,0 +1,79 @@ +From: David Bentham +Date: Mon, 21 Feb 2022 15:36:16 +0100 +Subject: [PATCH] net: ethernet: mtk_eth_soc: add ipv6 flow offload + support + +Add the missing IPv6 flow offloading support for routing only. +Hardware flow offloading is done by the packet processing engine (PPE) +of the Ethernet MAC and as it doesn't support mangling of IPv6 packets, +IPv6 NAT cannot be supported. + +Signed-off-by: David Bentham +Signed-off-by: Felix Fietkau +--- + +--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +@@ -7,6 +7,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -21,6 +22,11 @@ struct mtk_flow_data { + __be32 src_addr; + __be32 dst_addr; + } v4; ++ ++ struct { ++ struct in6_addr src_addr; ++ struct in6_addr dst_addr; ++ } v6; + }; + + __be16 src_port; +@@ -66,6 +72,14 @@ mtk_flow_set_ipv4_addr(struct mtk_foe_en + data->v4.dst_addr, data->dst_port); + } + ++static int ++mtk_flow_set_ipv6_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data) ++{ ++ return mtk_foe_entry_set_ipv6_tuple(foe, ++ data->v6.src_addr.s6_addr32, data->src_port, ++ data->v6.dst_addr.s6_addr32, data->dst_port); ++} ++ + static void + mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth) + { +@@ -297,6 +311,9 @@ mtk_flow_offload_replace(struct mtk_eth + case FLOW_DISSECTOR_KEY_IPV4_ADDRS: + offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT; + break; ++ case FLOW_DISSECTOR_KEY_IPV6_ADDRS: ++ offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T; ++ break; + default: + return -EOPNOTSUPP; + } +@@ -332,6 +349,17 @@ mtk_flow_offload_replace(struct mtk_eth + mtk_flow_set_ipv4_addr(&foe, &data, false); + } + ++ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { ++ struct flow_match_ipv6_addrs addrs; ++ ++ flow_rule_match_ipv6_addrs(rule, &addrs); ++ ++ data.v6.src_addr = addrs.key->src; ++ data.v6.dst_addr = addrs.key->dst; ++ ++ mtk_flow_set_ipv6_addr(&foe, &data); ++ } ++ + flow_action_for_each(i, act, &rule->action) { + if (act->id != FLOW_ACTION_MANGLE) + continue; diff --git a/target/linux/generic/pending-5.10/701-06-net-ethernet-mtk_eth_soc-support-TC_SETUP_BLOCK-for-.patch b/target/linux/generic/pending-5.10/701-06-net-ethernet-mtk_eth_soc-support-TC_SETUP_BLOCK-for-.patch new file mode 100644 index 0000000000..1950d81ebb --- /dev/null +++ b/target/linux/generic/pending-5.10/701-06-net-ethernet-mtk_eth_soc-support-TC_SETUP_BLOCK-for-.patch @@ -0,0 +1,29 @@ +From: Felix Fietkau +Date: Mon, 21 Feb 2022 15:37:21 +0100 +Subject: [PATCH] net: ethernet: mtk_eth_soc: support TC_SETUP_BLOCK for + PPE offload + +This allows offload entries to be created from user space + +Signed-off-by: Felix Fietkau +--- + +--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +@@ -564,10 +564,13 @@ mtk_eth_setup_tc_block(struct net_device + int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) + { +- if (type == TC_SETUP_FT) ++ switch (type) { ++ case TC_SETUP_BLOCK: ++ case TC_SETUP_FT: + return mtk_eth_setup_tc_block(dev, type_data); +- +- return -EOPNOTSUPP; ++ default: ++ return -EOPNOTSUPP; ++ } + } + + int mtk_eth_offload_init(struct mtk_eth *eth) diff --git a/target/linux/generic/pending-5.10/701-07-net-ethernet-mtk_eth_soc-allocate-struct-mtk_ppe-sep.patch b/target/linux/generic/pending-5.10/701-07-net-ethernet-mtk_eth_soc-allocate-struct-mtk_ppe-sep.patch new file mode 100644 index 0000000000..a1c4fbfc55 --- /dev/null +++ b/target/linux/generic/pending-5.10/701-07-net-ethernet-mtk_eth_soc-allocate-struct-mtk_ppe-sep.patch @@ -0,0 +1,159 @@ +From: Felix Fietkau +Date: Mon, 21 Feb 2022 15:38:20 +0100 +Subject: [PATCH] net: ethernet: mtk_eth_soc: allocate struct mtk_ppe + separately + +Preparation for adding more data to it, which will increase its size. + +Signed-off-by: Felix Fietkau +--- + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -2305,7 +2305,7 @@ static int mtk_open(struct net_device *d + if (err) + return err; + +- if (eth->soc->offload_version && mtk_ppe_start(ð->ppe) == 0) ++ if (eth->soc->offload_version && mtk_ppe_start(eth->ppe) == 0) + gdm_config = MTK_GDMA_TO_PPE; + + mtk_gdm_config(eth, gdm_config); +@@ -2379,7 +2379,7 @@ static int mtk_stop(struct net_device *d + mtk_dma_free(eth); + + if (eth->soc->offload_version) +- mtk_ppe_stop(ð->ppe); ++ mtk_ppe_stop(eth->ppe); + + return 0; + } +@@ -3265,10 +3265,11 @@ static int mtk_probe(struct platform_dev + } + + if (eth->soc->offload_version) { +- err = mtk_ppe_init(ð->ppe, eth->dev, +- eth->base + MTK_ETH_PPE_BASE, 2); +- if (err) ++ eth->ppe = mtk_ppe_init(eth->dev, eth->base + MTK_ETH_PPE_BASE, 2); ++ if (!eth->ppe) { ++ err = -ENOMEM; + goto err_free_dev; ++ } + + err = mtk_eth_offload_init(eth); + if (err) +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h +@@ -976,7 +976,7 @@ struct mtk_eth { + u32 rx_dma_l4_valid; + int ip_align; + +- struct mtk_ppe ppe; ++ struct mtk_ppe *ppe; + struct rhashtable flow_table; + }; + +--- a/drivers/net/ethernet/mediatek/mtk_ppe.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c +@@ -384,10 +384,15 @@ int mtk_foe_entry_commit(struct mtk_ppe + return hash; + } + +-int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base, ++struct mtk_ppe *mtk_ppe_init(struct device *dev, void __iomem *base, + int version) + { + struct mtk_foe_entry *foe; ++ struct mtk_ppe *ppe; ++ ++ ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL); ++ if (!ppe) ++ return NULL; + + /* need to allocate a separate device, since it PPE DMA access is + * not coherent. +@@ -399,13 +404,13 @@ int mtk_ppe_init(struct mtk_ppe *ppe, st + foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe), + &ppe->foe_phys, GFP_KERNEL); + if (!foe) +- return -ENOMEM; ++ return NULL; + + ppe->foe_table = foe; + + mtk_ppe_debugfs_init(ppe); + +- return 0; ++ return ppe; + } + + static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe) +--- a/drivers/net/ethernet/mediatek/mtk_ppe.h ++++ b/drivers/net/ethernet/mediatek/mtk_ppe.h +@@ -246,8 +246,7 @@ struct mtk_ppe { + void *acct_table; + }; + +-int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base, +- int version); ++struct mtk_ppe *mtk_ppe_init(struct device *dev, void __iomem *base, int version); + int mtk_ppe_start(struct mtk_ppe *ppe); + int mtk_ppe_stop(struct mtk_ppe *ppe); + +--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +@@ -412,7 +412,7 @@ mtk_flow_offload_replace(struct mtk_eth + + entry->cookie = f->cookie; + timestamp = mtk_eth_timestamp(eth); +- hash = mtk_foe_entry_commit(ð->ppe, &foe, timestamp); ++ hash = mtk_foe_entry_commit(eth->ppe, &foe, timestamp); + if (hash < 0) { + err = hash; + goto free; +@@ -427,7 +427,7 @@ mtk_flow_offload_replace(struct mtk_eth + + return 0; + clear_flow: +- mtk_foe_entry_clear(ð->ppe, hash); ++ mtk_foe_entry_clear(eth->ppe, hash); + free: + kfree(entry); + if (wed_index >= 0) +@@ -445,7 +445,7 @@ mtk_flow_offload_destroy(struct mtk_eth + if (!entry) + return -ENOENT; + +- mtk_foe_entry_clear(ð->ppe, entry->hash); ++ mtk_foe_entry_clear(eth->ppe, entry->hash); + rhashtable_remove_fast(ð->flow_table, &entry->node, + mtk_flow_ht_params); + if (entry->wed_index >= 0) +@@ -467,7 +467,7 @@ mtk_flow_offload_stats(struct mtk_eth *e + if (!entry) + return -ENOENT; + +- timestamp = mtk_foe_entry_timestamp(ð->ppe, entry->hash); ++ timestamp = mtk_foe_entry_timestamp(eth->ppe, entry->hash); + if (timestamp < 0) + return -ETIMEDOUT; + +@@ -523,7 +523,7 @@ mtk_eth_setup_tc_block(struct net_device + struct flow_block_cb *block_cb; + flow_setup_cb_t *cb; + +- if (!eth->ppe.foe_table) ++ if (!eth->ppe || !eth->ppe->foe_table) + return -EOPNOTSUPP; + + if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) +@@ -575,7 +575,7 @@ int mtk_eth_setup_tc(struct net_device * + + int mtk_eth_offload_init(struct mtk_eth *eth) + { +- if (!eth->ppe.foe_table) ++ if (!eth->ppe || !eth->ppe->foe_table) + return 0; + + return rhashtable_init(ð->flow_table, &mtk_flow_ht_params); diff --git a/target/linux/generic/pending-5.10/701-08-net-ethernet-mtk_eth_soc-rework-hardware-flow-table-.patch b/target/linux/generic/pending-5.10/701-08-net-ethernet-mtk_eth_soc-rework-hardware-flow-table-.patch new file mode 100644 index 0000000000..ac67f8078c --- /dev/null +++ b/target/linux/generic/pending-5.10/701-08-net-ethernet-mtk_eth_soc-rework-hardware-flow-table-.patch @@ -0,0 +1,424 @@ +From: Felix Fietkau +Date: Mon, 21 Feb 2022 15:39:18 +0100 +Subject: [PATCH] net: ethernet: mtk_eth_soc: rework hardware flow table + management + +The hardware was designed to handle flow detection and creation of flow entries +by itself, relying on the software primarily for filling in egress routing +information. +When there is a hash collision between multiple flows, this allows the hardware +to maintain the entry for the most active flow. +Additionally, the hardware only keeps offloading active for entries with at +least 30 packets per second. + +With this rework, the code no longer creates a hardware entries directly. +Instead, the hardware entry is only created when the PPE reports a matching +unbound flow with the minimum target rate. +In order to reduce CPU overhead, looking for flows belonging to a hash entry +is rate limited to once every 100ms. + +This rework is also used as preparation for emulating bridge offload by +managing L4 offload entries on demand. + +Signed-off-by: Felix Fietkau +--- + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -21,6 +21,7 @@ + #include + #include + #include ++#include + #include + + #include "mtk_eth_soc.h" +@@ -1274,7 +1275,7 @@ static int mtk_poll_rx(struct napi_struc + struct net_device *netdev; + unsigned int pktlen; + dma_addr_t dma_addr; +- u32 hash; ++ u32 hash, reason; + int mac; + + ring = mtk_get_rx_ring(eth); +@@ -1350,6 +1351,11 @@ static int mtk_poll_rx(struct napi_struc + skb_set_hash(skb, hash, PKT_HASH_TYPE_L4); + } + ++ reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4); ++ if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) ++ mtk_ppe_check_skb(eth->ppe, skb, ++ trxd.rxd4 & MTK_RXD4_FOE_ENTRY); ++ + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX && + (trxd.rxd2 & RX_DMA_VTAG)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), +@@ -3265,7 +3271,7 @@ static int mtk_probe(struct platform_dev + } + + if (eth->soc->offload_version) { +- eth->ppe = mtk_ppe_init(eth->dev, eth->base + MTK_ETH_PPE_BASE, 2); ++ eth->ppe = mtk_ppe_init(eth, eth->base + MTK_ETH_PPE_BASE, 2); + if (!eth->ppe) { + err = -ENOMEM; + goto err_free_dev; +--- a/drivers/net/ethernet/mediatek/mtk_ppe.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c +@@ -6,9 +6,12 @@ + #include + #include + #include ++#include "mtk_eth_soc.h" + #include "mtk_ppe.h" + #include "mtk_ppe_regs.h" + ++static DEFINE_SPINLOCK(ppe_lock); ++ + static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val) + { + writel(val, ppe->base + reg); +@@ -41,6 +44,11 @@ static u32 ppe_clear(struct mtk_ppe *ppe + return ppe_m32(ppe, reg, val, 0); + } + ++static u32 mtk_eth_timestamp(struct mtk_eth *eth) ++{ ++ return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP; ++} ++ + static int mtk_ppe_wait_busy(struct mtk_ppe *ppe) + { + int ret; +@@ -353,26 +361,59 @@ static inline bool mtk_foe_entry_usable( + FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND; + } + +-int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, +- u16 timestamp) ++static bool ++mtk_flow_entry_match(struct mtk_flow_entry *entry, struct mtk_foe_entry *data) ++{ ++ int type, len; ++ ++ if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP) ++ return false; ++ ++ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1); ++ if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE) ++ len = offsetof(struct mtk_foe_entry, ipv6._rsv); ++ else ++ len = offsetof(struct mtk_foe_entry, ipv4.ib2); ++ ++ return !memcmp(&entry->data.data, &data->data, len - 4); ++} ++ ++static void ++mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) + { + struct mtk_foe_entry *hwe; +- u32 hash; ++ struct mtk_foe_entry foe; + ++ spin_lock_bh(&ppe_lock); ++ if (entry->hash == 0xffff) ++ goto out; ++ ++ hwe = &ppe->foe_table[entry->hash]; ++ memcpy(&foe, hwe, sizeof(foe)); ++ if (!mtk_flow_entry_match(entry, &foe)) { ++ entry->hash = 0xffff; ++ goto out; ++ } ++ ++ entry->data.ib1 = foe.ib1; ++ ++out: ++ spin_unlock_bh(&ppe_lock); ++} ++ ++static void ++__mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, ++ u16 hash) ++{ ++ struct mtk_foe_entry *hwe; ++ u16 timestamp; ++ ++ timestamp = mtk_eth_timestamp(ppe->eth); + timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP; + entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP; + entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp); + +- hash = mtk_ppe_hash_entry(entry); + hwe = &ppe->foe_table[hash]; +- if (!mtk_foe_entry_usable(hwe)) { +- hwe++; +- hash++; +- +- if (!mtk_foe_entry_usable(hwe)) +- return -ENOSPC; +- } +- + memcpy(&hwe->data, &entry->data, sizeof(hwe->data)); + wmb(); + hwe->ib1 = entry->ib1; +@@ -380,13 +421,77 @@ int mtk_foe_entry_commit(struct mtk_ppe + dma_wmb(); + + mtk_ppe_cache_clear(ppe); ++} + +- return hash; ++void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) ++{ ++ spin_lock_bh(&ppe_lock); ++ hlist_del_init(&entry->list); ++ if (entry->hash != 0xffff) { ++ ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE; ++ ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, ++ MTK_FOE_STATE_BIND); ++ dma_wmb(); ++ } ++ entry->hash = 0xffff; ++ spin_unlock_bh(&ppe_lock); ++} ++ ++int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) ++{ ++ u32 hash = mtk_ppe_hash_entry(&entry->data); ++ ++ entry->hash = 0xffff; ++ spin_lock_bh(&ppe_lock); ++ hlist_add_head(&entry->list, &ppe->foe_flow[hash / 2]); ++ spin_unlock_bh(&ppe_lock); ++ ++ return 0; ++} ++ ++void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash) ++{ ++ struct hlist_head *head = &ppe->foe_flow[hash / 2]; ++ struct mtk_flow_entry *entry; ++ struct mtk_foe_entry *hwe = &ppe->foe_table[hash]; ++ bool found = false; ++ ++ if (hlist_empty(head)) ++ return; ++ ++ spin_lock_bh(&ppe_lock); ++ hlist_for_each_entry(entry, head, list) { ++ if (found || !mtk_flow_entry_match(entry, hwe)) { ++ if (entry->hash != 0xffff) ++ entry->hash = 0xffff; ++ continue; ++ } ++ ++ entry->hash = hash; ++ __mtk_foe_entry_commit(ppe, &entry->data, hash); ++ found = true; ++ } ++ spin_unlock_bh(&ppe_lock); ++} ++ ++int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) ++{ ++ u16 now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP; ++ u16 timestamp; ++ ++ mtk_flow_entry_update(ppe, entry); ++ timestamp = entry->data.ib1 & MTK_FOE_IB1_BIND_TIMESTAMP; ++ ++ if (timestamp > now) ++ return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now; ++ else ++ return now - timestamp; + } + +-struct mtk_ppe *mtk_ppe_init(struct device *dev, void __iomem *base, ++struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, + int version) + { ++ struct device *dev = eth->dev; + struct mtk_foe_entry *foe; + struct mtk_ppe *ppe; + +@@ -398,6 +503,7 @@ struct mtk_ppe *mtk_ppe_init(struct devi + * not coherent. + */ + ppe->base = base; ++ ppe->eth = eth; + ppe->dev = dev; + ppe->version = version; + +--- a/drivers/net/ethernet/mediatek/mtk_ppe.h ++++ b/drivers/net/ethernet/mediatek/mtk_ppe.h +@@ -235,7 +235,17 @@ enum { + MTK_PPE_CPU_REASON_INVALID = 0x1f, + }; + ++struct mtk_flow_entry { ++ struct rhash_head node; ++ struct hlist_node list; ++ unsigned long cookie; ++ struct mtk_foe_entry data; ++ u16 hash; ++ s8 wed_index; ++}; ++ + struct mtk_ppe { ++ struct mtk_eth *eth; + struct device *dev; + void __iomem *base; + int version; +@@ -243,18 +253,33 @@ struct mtk_ppe { + struct mtk_foe_entry *foe_table; + dma_addr_t foe_phys; + ++ u16 foe_check_time[MTK_PPE_ENTRIES]; ++ struct hlist_head foe_flow[MTK_PPE_ENTRIES / 2]; ++ + void *acct_table; + }; + +-struct mtk_ppe *mtk_ppe_init(struct device *dev, void __iomem *base, int version); ++struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int version); + int mtk_ppe_start(struct mtk_ppe *ppe); + int mtk_ppe_stop(struct mtk_ppe *ppe); + ++void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash); ++ + static inline void +-mtk_foe_entry_clear(struct mtk_ppe *ppe, u16 hash) ++mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash) + { +- ppe->foe_table[hash].ib1 = 0; +- dma_wmb(); ++ u16 now, diff; ++ ++ if (!ppe) ++ return; ++ ++ now = (u16)jiffies; ++ diff = now - ppe->foe_check_time[hash]; ++ if (diff < HZ / 10) ++ return; ++ ++ ppe->foe_check_time[hash] = now; ++ __mtk_ppe_check_skb(ppe, skb, hash); + } + + static inline int +@@ -282,8 +307,9 @@ int mtk_foe_entry_set_vlan(struct mtk_fo + int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid); + int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq, + int bss, int wcid); +-int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, +- u16 timestamp); ++int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); ++void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); ++int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); + int mtk_ppe_debugfs_init(struct mtk_ppe *ppe); + + #endif +--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +@@ -43,13 +43,6 @@ struct mtk_flow_data { + } pppoe; + }; + +-struct mtk_flow_entry { +- struct rhash_head node; +- unsigned long cookie; +- u16 hash; +- s8 wed_index; +-}; +- + static const struct rhashtable_params mtk_flow_ht_params = { + .head_offset = offsetof(struct mtk_flow_entry, node), + .key_offset = offsetof(struct mtk_flow_entry, cookie), +@@ -57,12 +50,6 @@ static const struct rhashtable_params mt + .automatic_shrinking = true, + }; + +-static u32 +-mtk_eth_timestamp(struct mtk_eth *eth) +-{ +- return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP; +-} +- + static int + mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data, + bool egress) +@@ -238,10 +225,8 @@ mtk_flow_offload_replace(struct mtk_eth + int offload_type = 0; + int wed_index = -1; + u16 addr_type = 0; +- u32 timestamp; + u8 l4proto = 0; + int err = 0; +- int hash; + int i; + + if (rhashtable_lookup(ð->flow_table, &f->cookie, mtk_flow_ht_params)) +@@ -411,23 +396,21 @@ mtk_flow_offload_replace(struct mtk_eth + return -ENOMEM; + + entry->cookie = f->cookie; +- timestamp = mtk_eth_timestamp(eth); +- hash = mtk_foe_entry_commit(eth->ppe, &foe, timestamp); +- if (hash < 0) { +- err = hash; ++ memcpy(&entry->data, &foe, sizeof(entry->data)); ++ entry->wed_index = wed_index; ++ ++ if (mtk_foe_entry_commit(eth->ppe, entry) < 0) + goto free; +- } + +- entry->hash = hash; +- entry->wed_index = wed_index; + err = rhashtable_insert_fast(ð->flow_table, &entry->node, + mtk_flow_ht_params); + if (err < 0) +- goto clear_flow; ++ goto clear; + + return 0; +-clear_flow: +- mtk_foe_entry_clear(eth->ppe, hash); ++ ++clear: ++ mtk_foe_entry_clear(eth->ppe, entry); + free: + kfree(entry); + if (wed_index >= 0) +@@ -445,7 +428,7 @@ mtk_flow_offload_destroy(struct mtk_eth + if (!entry) + return -ENOENT; + +- mtk_foe_entry_clear(eth->ppe, entry->hash); ++ mtk_foe_entry_clear(eth->ppe, entry); + rhashtable_remove_fast(ð->flow_table, &entry->node, + mtk_flow_ht_params); + if (entry->wed_index >= 0) +@@ -459,7 +442,6 @@ static int + mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f) + { + struct mtk_flow_entry *entry; +- int timestamp; + u32 idle; + + entry = rhashtable_lookup(ð->flow_table, &f->cookie, +@@ -467,11 +449,7 @@ mtk_flow_offload_stats(struct mtk_eth *e + if (!entry) + return -ENOENT; + +- timestamp = mtk_foe_entry_timestamp(eth->ppe, entry->hash); +- if (timestamp < 0) +- return -ETIMEDOUT; +- +- idle = mtk_eth_timestamp(eth) - timestamp; ++ idle = mtk_foe_entry_idle_time(eth->ppe, entry); + f->stats.lastused = jiffies - idle * HZ; + + return 0; diff --git a/target/linux/generic/pending-5.10/701-09-net-ethernet-mtk_eth_soc-remove-bridge-flow-offload-.patch b/target/linux/generic/pending-5.10/701-09-net-ethernet-mtk_eth_soc-remove-bridge-flow-offload-.patch new file mode 100644 index 0000000000..2ff0b341f9 --- /dev/null +++ b/target/linux/generic/pending-5.10/701-09-net-ethernet-mtk_eth_soc-remove-bridge-flow-offload-.patch @@ -0,0 +1,44 @@ +From: Felix Fietkau +Date: Mon, 21 Feb 2022 15:55:19 +0100 +Subject: [PATCH] net: ethernet: mtk_eth_soc: remove bridge flow offload + type entry support + +According to MediaTek, this feature is not supported in current hardware + +Signed-off-by: Felix Fietkau +--- + +--- a/drivers/net/ethernet/mediatek/mtk_ppe.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c +@@ -84,13 +84,6 @@ static u32 mtk_ppe_hash_entry(struct mtk + u32 hash; + + switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) { +- case MTK_PPE_PKT_TYPE_BRIDGE: +- hv1 = e->bridge.src_mac_lo; +- hv1 ^= ((e->bridge.src_mac_hi & 0xffff) << 16); +- hv2 = e->bridge.src_mac_hi >> 16; +- hv2 ^= e->bridge.dest_mac_lo; +- hv3 = e->bridge.dest_mac_hi; +- break; + case MTK_PPE_PKT_TYPE_IPV4_ROUTE: + case MTK_PPE_PKT_TYPE_IPV4_HNAPT: + hv1 = e->ipv4.orig.ports; +@@ -572,7 +565,6 @@ int mtk_ppe_start(struct mtk_ppe *ppe) + MTK_PPE_FLOW_CFG_IP4_NAT | + MTK_PPE_FLOW_CFG_IP4_NAPT | + MTK_PPE_FLOW_CFG_IP4_DSLITE | +- MTK_PPE_FLOW_CFG_L2_BRIDGE | + MTK_PPE_FLOW_CFG_IP4_NAT_FRAG; + ppe_w32(ppe, MTK_PPE_FLOW_CFG, val); + +--- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c +@@ -32,7 +32,6 @@ static const char *mtk_foe_pkt_type_str( + static const char * const type_str[] = { + [MTK_PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T", + [MTK_PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T", +- [MTK_PPE_PKT_TYPE_BRIDGE] = "L2", + [MTK_PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE", + [MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T", + [MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T", diff --git a/target/linux/generic/pending-5.10/701-10-net-ethernet-mtk_eth_soc-support-creating-mac-addres.patch b/target/linux/generic/pending-5.10/701-10-net-ethernet-mtk_eth_soc-support-creating-mac-addres.patch new file mode 100644 index 0000000000..99e597a3d1 --- /dev/null +++ b/target/linux/generic/pending-5.10/701-10-net-ethernet-mtk_eth_soc-support-creating-mac-addres.patch @@ -0,0 +1,553 @@ +From: Felix Fietkau +Date: Wed, 23 Feb 2022 10:56:34 +0100 +Subject: [PATCH] net: ethernet: mtk_eth_soc: support creating mac + address based offload entries + +This will be used to implement a limited form of bridge offloading. +Since the hardware does not support flow table entries with just source +and destination MAC address, the driver has to emulate it. + +The hardware automatically creates entries entries for incoming flows, even +when they are bridged instead of routed, and reports when packets for these +flows have reached the minimum PPS rate for offloading. + +After this happens, we look up the L2 flow offload entry based on the MAC +header and fill in the output routing information in the flow table. +The dynamically created per-flow entries are automatically removed when +either the hardware flowtable entry expires, is replaced, or if the offload +rule they belong to is removed + +Signed-off-by: Felix Fietkau +--- + +--- a/drivers/net/ethernet/mediatek/mtk_ppe.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c +@@ -6,12 +6,22 @@ + #include + #include + #include ++#include ++#include ++#include + #include "mtk_eth_soc.h" + #include "mtk_ppe.h" + #include "mtk_ppe_regs.h" + + static DEFINE_SPINLOCK(ppe_lock); + ++static const struct rhashtable_params mtk_flow_l2_ht_params = { ++ .head_offset = offsetof(struct mtk_flow_entry, l2_node), ++ .key_offset = offsetof(struct mtk_flow_entry, data.bridge), ++ .key_len = offsetof(struct mtk_foe_bridge, key_end), ++ .automatic_shrinking = true, ++}; ++ + static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val) + { + writel(val, ppe->base + reg); +@@ -123,6 +133,9 @@ mtk_foe_entry_l2(struct mtk_foe_entry *e + { + int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1); + ++ if (type == MTK_PPE_PKT_TYPE_BRIDGE) ++ return &entry->bridge.l2; ++ + if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) + return &entry->ipv6.l2; + +@@ -134,6 +147,9 @@ mtk_foe_entry_ib2(struct mtk_foe_entry * + { + int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1); + ++ if (type == MTK_PPE_PKT_TYPE_BRIDGE) ++ return &entry->bridge.ib2; ++ + if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) + return &entry->ipv6.ib2; + +@@ -168,7 +184,12 @@ int mtk_foe_entry_prepare(struct mtk_foe + if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T) + entry->ipv6.ports = ports_pad; + +- if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) { ++ if (type == MTK_PPE_PKT_TYPE_BRIDGE) { ++ ether_addr_copy(entry->bridge.src_mac, src_mac); ++ ether_addr_copy(entry->bridge.dest_mac, dest_mac); ++ entry->bridge.ib2 = val; ++ l2 = &entry->bridge.l2; ++ } else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) { + entry->ipv6.ib2 = val; + l2 = &entry->ipv6.l2; + } else { +@@ -372,12 +393,96 @@ mtk_flow_entry_match(struct mtk_flow_ent + } + + static void ++__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) ++{ ++ struct hlist_head *head; ++ struct hlist_node *tmp; ++ ++ if (entry->type == MTK_FLOW_TYPE_L2) { ++ rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node, ++ mtk_flow_l2_ht_params); ++ ++ head = &entry->l2_flows; ++ hlist_for_each_entry_safe(entry, tmp, head, l2_data.list) ++ __mtk_foe_entry_clear(ppe, entry); ++ return; ++ } ++ ++ hlist_del_init(&entry->list); ++ if (entry->hash != 0xffff) { ++ ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE; ++ ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, ++ MTK_FOE_STATE_BIND); ++ dma_wmb(); ++ } ++ entry->hash = 0xffff; ++ ++ if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW) ++ return; ++ ++ hlist_del_init(&entry->l2_data.list); ++ kfree(entry); ++} ++ ++static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1) ++{ ++ u16 timestamp; ++ u16 now; ++ ++ now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP; ++ timestamp = ib1 & MTK_FOE_IB1_BIND_TIMESTAMP; ++ ++ if (timestamp > now) ++ return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now; ++ else ++ return now - timestamp; ++} ++ ++static void ++mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) ++{ ++ struct mtk_flow_entry *cur; ++ struct mtk_foe_entry *hwe; ++ struct hlist_node *tmp; ++ int idle; ++ ++ idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1); ++ hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) { ++ int cur_idle; ++ u32 ib1; ++ ++ hwe = &ppe->foe_table[cur->hash]; ++ ib1 = READ_ONCE(hwe->ib1); ++ ++ if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) { ++ cur->hash = 0xffff; ++ __mtk_foe_entry_clear(ppe, cur); ++ continue; ++ } ++ ++ cur_idle = __mtk_foe_entry_idle_time(ppe, ib1); ++ if (cur_idle >= idle) ++ continue; ++ ++ idle = cur_idle; ++ entry->data.ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP; ++ entry->data.ib1 |= hwe->ib1 & MTK_FOE_IB1_BIND_TIMESTAMP; ++ } ++} ++ ++static void + mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) + { + struct mtk_foe_entry *hwe; + struct mtk_foe_entry foe; + + spin_lock_bh(&ppe_lock); ++ ++ if (entry->type == MTK_FLOW_TYPE_L2) { ++ mtk_flow_entry_update_l2(ppe, entry); ++ goto out; ++ } ++ + if (entry->hash == 0xffff) + goto out; + +@@ -419,21 +524,28 @@ __mtk_foe_entry_commit(struct mtk_ppe *p + void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) + { + spin_lock_bh(&ppe_lock); +- hlist_del_init(&entry->list); +- if (entry->hash != 0xffff) { +- ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE; +- ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, +- MTK_FOE_STATE_BIND); +- dma_wmb(); +- } +- entry->hash = 0xffff; ++ __mtk_foe_entry_clear(ppe, entry); + spin_unlock_bh(&ppe_lock); + } + ++static int ++mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) ++{ ++ entry->type = MTK_FLOW_TYPE_L2; ++ ++ return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node, ++ mtk_flow_l2_ht_params); ++} ++ + int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) + { +- u32 hash = mtk_ppe_hash_entry(&entry->data); ++ int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1); ++ u32 hash; ++ ++ if (type == MTK_PPE_PKT_TYPE_BRIDGE) ++ return mtk_foe_entry_commit_l2(ppe, entry); + ++ hash = mtk_ppe_hash_entry(&entry->data); + entry->hash = 0xffff; + spin_lock_bh(&ppe_lock); + hlist_add_head(&entry->list, &ppe->foe_flow[hash / 2]); +@@ -442,18 +554,72 @@ int mtk_foe_entry_commit(struct mtk_ppe + return 0; + } + ++static void ++mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry, ++ u16 hash) ++{ ++ struct mtk_flow_entry *flow_info; ++ struct mtk_foe_entry foe, *hwe; ++ struct mtk_foe_mac_info *l2; ++ u32 ib1_mask = MTK_FOE_IB1_PACKET_TYPE | MTK_FOE_IB1_UDP; ++ int type; ++ ++ flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end), ++ GFP_ATOMIC); ++ if (!flow_info) ++ return; ++ ++ flow_info->l2_data.base_flow = entry; ++ flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW; ++ flow_info->hash = hash; ++ hlist_add_head(&flow_info->list, &ppe->foe_flow[hash / 2]); ++ hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows); ++ ++ hwe = &ppe->foe_table[hash]; ++ memcpy(&foe, hwe, sizeof(foe)); ++ foe.ib1 &= ib1_mask; ++ foe.ib1 |= entry->data.ib1 & ~ib1_mask; ++ ++ l2 = mtk_foe_entry_l2(&foe); ++ memcpy(l2, &entry->data.bridge.l2, sizeof(*l2)); ++ ++ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, foe.ib1); ++ if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT) ++ memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new)); ++ else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP) ++ l2->etype = ETH_P_IPV6; ++ ++ *mtk_foe_entry_ib2(&foe) = entry->data.bridge.ib2; ++ ++ __mtk_foe_entry_commit(ppe, &foe, hash); ++} ++ + void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash) + { + struct hlist_head *head = &ppe->foe_flow[hash / 2]; +- struct mtk_flow_entry *entry; + struct mtk_foe_entry *hwe = &ppe->foe_table[hash]; ++ struct mtk_flow_entry *entry; ++ struct mtk_foe_bridge key = {}; ++ struct ethhdr *eh; + bool found = false; +- +- if (hlist_empty(head)) +- return; ++ u8 *tag; + + spin_lock_bh(&ppe_lock); ++ ++ if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND) ++ goto out; ++ + hlist_for_each_entry(entry, head, list) { ++ if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) { ++ if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == ++ MTK_FOE_STATE_BIND)) ++ continue; ++ ++ entry->hash = 0xffff; ++ __mtk_foe_entry_clear(ppe, entry); ++ continue; ++ } ++ + if (found || !mtk_flow_entry_match(entry, hwe)) { + if (entry->hash != 0xffff) + entry->hash = 0xffff; +@@ -464,21 +630,50 @@ void __mtk_ppe_check_skb(struct mtk_ppe + __mtk_foe_entry_commit(ppe, &entry->data, hash); + found = true; + } ++ ++ if (found) ++ goto out; ++ ++ eh = eth_hdr(skb); ++ ether_addr_copy(key.dest_mac, eh->h_dest); ++ ether_addr_copy(key.src_mac, eh->h_source); ++ tag = skb->data - 2; ++ key.vlan = 0; ++ switch (skb->protocol) { ++#if IS_ENABLED(CONFIG_NET_DSA) ++ case htons(ETH_P_XDSA): ++ if (!netdev_uses_dsa(skb->dev) || ++ skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK) ++ goto out; ++ ++ tag += 4; ++ if (get_unaligned_be16(tag) != ETH_P_8021Q) ++ break; ++ ++ fallthrough; ++#endif ++ case htons(ETH_P_8021Q): ++ key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK; ++ break; ++ default: ++ break; ++ } ++ ++ entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params); ++ if (!entry) ++ goto out; ++ ++ mtk_foe_entry_commit_subflow(ppe, entry, hash); ++ ++out: + spin_unlock_bh(&ppe_lock); + } + + int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) + { +- u16 now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP; +- u16 timestamp; +- + mtk_flow_entry_update(ppe, entry); +- timestamp = entry->data.ib1 & MTK_FOE_IB1_BIND_TIMESTAMP; + +- if (timestamp > now) +- return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now; +- else +- return now - timestamp; ++ return __mtk_foe_entry_idle_time(ppe, entry->data.ib1); + } + + struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, +@@ -492,6 +687,8 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_ + if (!ppe) + return NULL; + ++ rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params); ++ + /* need to allocate a separate device, since it PPE DMA access is + * not coherent. + */ +--- a/drivers/net/ethernet/mediatek/mtk_ppe.h ++++ b/drivers/net/ethernet/mediatek/mtk_ppe.h +@@ -6,6 +6,7 @@ + + #include + #include ++#include + + #define MTK_ETH_PPE_BASE 0xc00 + +@@ -84,19 +85,16 @@ struct mtk_foe_mac_info { + u16 src_mac_lo; + }; + ++/* software-only entry type */ + struct mtk_foe_bridge { +- u32 dest_mac_hi; +- +- u16 src_mac_lo; +- u16 dest_mac_lo; ++ u8 dest_mac[ETH_ALEN]; ++ u8 src_mac[ETH_ALEN]; ++ u16 vlan; + +- u32 src_mac_hi; ++ struct {} key_end; + + u32 ib2; + +- u32 _rsv[5]; +- +- u32 udf_tsid; + struct mtk_foe_mac_info l2; + }; + +@@ -235,13 +233,33 @@ enum { + MTK_PPE_CPU_REASON_INVALID = 0x1f, + }; + ++enum { ++ MTK_FLOW_TYPE_L4, ++ MTK_FLOW_TYPE_L2, ++ MTK_FLOW_TYPE_L2_SUBFLOW, ++}; ++ + struct mtk_flow_entry { ++ union { ++ struct hlist_node list; ++ struct { ++ struct rhash_head l2_node; ++ struct hlist_head l2_flows; ++ }; ++ }; ++ u8 type; ++ s8 wed_index; ++ u16 hash; ++ union { ++ struct mtk_foe_entry data; ++ struct { ++ struct mtk_flow_entry *base_flow; ++ struct hlist_node list; ++ struct {} end; ++ } l2_data; ++ }; + struct rhash_head node; +- struct hlist_node list; + unsigned long cookie; +- struct mtk_foe_entry data; +- u16 hash; +- s8 wed_index; + }; + + struct mtk_ppe { +@@ -256,6 +274,8 @@ struct mtk_ppe { + u16 foe_check_time[MTK_PPE_ENTRIES]; + struct hlist_head foe_flow[MTK_PPE_ENTRIES / 2]; + ++ struct rhashtable l2_flows; ++ + void *acct_table; + }; + +--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +@@ -32,6 +32,8 @@ struct mtk_flow_data { + __be16 src_port; + __be16 dst_port; + ++ u16 vlan_in; ++ + struct { + u16 id; + __be16 proto; +@@ -258,9 +260,45 @@ mtk_flow_offload_replace(struct mtk_eth + return -EOPNOTSUPP; + } + ++ switch (addr_type) { ++ case 0: ++ offload_type = MTK_PPE_PKT_TYPE_BRIDGE; ++ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { ++ struct flow_match_eth_addrs match; ++ ++ flow_rule_match_eth_addrs(rule, &match); ++ memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN); ++ memcpy(data.eth.h_source, match.key->src, ETH_ALEN); ++ } else { ++ return -EOPNOTSUPP; ++ } ++ ++ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { ++ struct flow_match_vlan match; ++ ++ flow_rule_match_vlan(rule, &match); ++ ++ if (match.key->vlan_tpid != cpu_to_be16(ETH_P_8021Q)) ++ return -EOPNOTSUPP; ++ ++ data.vlan_in = match.key->vlan_id; ++ } ++ break; ++ case FLOW_DISSECTOR_KEY_IPV4_ADDRS: ++ offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT; ++ break; ++ case FLOW_DISSECTOR_KEY_IPV6_ADDRS: ++ offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T; ++ break; ++ default: ++ return -EOPNOTSUPP; ++ } ++ + flow_action_for_each(i, act, &rule->action) { + switch (act->id) { + case FLOW_ACTION_MANGLE: ++ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) ++ return -EOPNOTSUPP; + if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH) + mtk_flow_offload_mangle_eth(act, &data.eth); + break; +@@ -292,17 +330,6 @@ mtk_flow_offload_replace(struct mtk_eth + } + } + +- switch (addr_type) { +- case FLOW_DISSECTOR_KEY_IPV4_ADDRS: +- offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT; +- break; +- case FLOW_DISSECTOR_KEY_IPV6_ADDRS: +- offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T; +- break; +- default: +- return -EOPNOTSUPP; +- } +- + if (!is_valid_ether_addr(data.eth.h_source) || + !is_valid_ether_addr(data.eth.h_dest)) + return -EINVAL; +@@ -316,10 +343,13 @@ mtk_flow_offload_replace(struct mtk_eth + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports ports; + ++ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) ++ return -EOPNOTSUPP; ++ + flow_rule_match_ports(rule, &ports); + data.src_port = ports.key->src; + data.dst_port = ports.key->dst; +- } else { ++ } else if (offload_type != MTK_PPE_PKT_TYPE_BRIDGE) { + return -EOPNOTSUPP; + } + +@@ -349,6 +379,9 @@ mtk_flow_offload_replace(struct mtk_eth + if (act->id != FLOW_ACTION_MANGLE) + continue; + ++ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) ++ return -EOPNOTSUPP; ++ + switch (act->mangle.htype) { + case FLOW_ACT_MANGLE_HDR_TYPE_TCP: + case FLOW_ACT_MANGLE_HDR_TYPE_UDP: +@@ -374,6 +407,9 @@ mtk_flow_offload_replace(struct mtk_eth + return err; + } + ++ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) ++ foe.bridge.vlan = data.vlan_in; ++ + if (data.vlan.num == 1) { + if (data.vlan.proto != htons(ETH_P_8021Q)) + return -EOPNOTSUPP; diff --git a/target/linux/generic/pending-5.10/701-net-ethernet-mtk_eth_soc-add-ipv6-flow-offloading-support.patch b/target/linux/generic/pending-5.10/701-net-ethernet-mtk_eth_soc-add-ipv6-flow-offloading-support.patch deleted file mode 100644 index 7140c6877e..0000000000 --- a/target/linux/generic/pending-5.10/701-net-ethernet-mtk_eth_soc-add-ipv6-flow-offloading-support.patch +++ /dev/null @@ -1,65 +0,0 @@ ---- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c -@@ -7,6 +7,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -20,6 +21,11 @@ struct mtk_flow_data { - __be32 src_addr; - __be32 dst_addr; - } v4; -+ -+ struct { -+ struct in6_addr src_addr; -+ struct in6_addr dst_addr; -+ } v6; - }; - - __be16 src_port; -@@ -64,6 +70,14 @@ mtk_flow_set_ipv4_addr(struct mtk_foe_en - data->v4.dst_addr, data->dst_port); - } - -+static int -+mtk_flow_set_ipv6_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data) -+{ -+ return mtk_foe_entry_set_ipv6_tuple(foe, -+ data->v6.src_addr.s6_addr32, data->src_port, -+ data->v6.dst_addr.s6_addr32, data->dst_port); -+} -+ - static void - mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth) - { -@@ -254,6 +268,9 @@ mtk_flow_offload_replace(struct mtk_eth - case FLOW_DISSECTOR_KEY_IPV4_ADDRS: - offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT; - break; -+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS: -+ offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T; -+ break; - default: - return -EOPNOTSUPP; - } -@@ -289,6 +306,17 @@ mtk_flow_offload_replace(struct mtk_eth - mtk_flow_set_ipv4_addr(&foe, &data, false); - } - -+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { -+ struct flow_match_ipv6_addrs addrs; -+ -+ flow_rule_match_ipv6_addrs(rule, &addrs); -+ -+ data.v6.src_addr = addrs.key->src; -+ data.v6.dst_addr = addrs.key->dst; -+ -+ mtk_flow_set_ipv6_addr(&foe, &data); -+ } -+ - flow_action_for_each(i, act, &rule->action) { - if (act->id != FLOW_ACTION_MANGLE) - continue; diff --git a/target/linux/generic/pending-5.15/701-00-net-ethernet-mtk_eth_soc-add-support-for-coherent-DM.patch b/target/linux/generic/pending-5.15/701-00-net-ethernet-mtk_eth_soc-add-support-for-coherent-DM.patch new file mode 100644 index 0000000000..ebecbfb067 --- /dev/null +++ b/target/linux/generic/pending-5.15/701-00-net-ethernet-mtk_eth_soc-add-support-for-coherent-DM.patch @@ -0,0 +1,327 @@ +From: Felix Fietkau +Date: Sat, 5 Feb 2022 17:59:07 +0100 +Subject: [PATCH] net: ethernet: mtk_eth_soc: add support for coherent + DMA + +It improves performance by eliminating the need for a cache flush on rx and tx +In preparation for supporting WED (Wireless Ethernet Dispatch), also add a +function for disabling coherent DMA at runtime. + +Signed-off-by: Felix Fietkau +--- + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -9,6 +9,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -828,7 +829,7 @@ static int mtk_init_fq_dma(struct mtk_et + dma_addr_t dma_addr; + int i; + +- eth->scratch_ring = dma_alloc_coherent(eth->dev, ++ eth->scratch_ring = dma_alloc_coherent(eth->dma_dev, + cnt * sizeof(struct mtk_tx_dma), + ð->phy_scratch_ring, + GFP_ATOMIC); +@@ -840,10 +841,10 @@ static int mtk_init_fq_dma(struct mtk_et + if (unlikely(!eth->scratch_head)) + return -ENOMEM; + +- dma_addr = dma_map_single(eth->dev, ++ dma_addr = dma_map_single(eth->dma_dev, + eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE, + DMA_FROM_DEVICE); +- if (unlikely(dma_mapping_error(eth->dev, dma_addr))) ++ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) + return -ENOMEM; + + phy_ring_tail = eth->phy_scratch_ring + +@@ -897,26 +898,26 @@ static void mtk_tx_unmap(struct mtk_eth + { + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { +- dma_unmap_single(eth->dev, ++ dma_unmap_single(eth->dma_dev, + dma_unmap_addr(tx_buf, dma_addr0), + dma_unmap_len(tx_buf, dma_len0), + DMA_TO_DEVICE); + } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { +- dma_unmap_page(eth->dev, ++ dma_unmap_page(eth->dma_dev, + dma_unmap_addr(tx_buf, dma_addr0), + dma_unmap_len(tx_buf, dma_len0), + DMA_TO_DEVICE); + } + } else { + if (dma_unmap_len(tx_buf, dma_len0)) { +- dma_unmap_page(eth->dev, ++ dma_unmap_page(eth->dma_dev, + dma_unmap_addr(tx_buf, dma_addr0), + dma_unmap_len(tx_buf, dma_len0), + DMA_TO_DEVICE); + } + + if (dma_unmap_len(tx_buf, dma_len1)) { +- dma_unmap_page(eth->dev, ++ dma_unmap_page(eth->dma_dev, + dma_unmap_addr(tx_buf, dma_addr1), + dma_unmap_len(tx_buf, dma_len1), + DMA_TO_DEVICE); +@@ -994,9 +995,9 @@ static int mtk_tx_map(struct sk_buff *sk + if (skb_vlan_tag_present(skb)) + txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb); + +- mapped_addr = dma_map_single(eth->dev, skb->data, ++ mapped_addr = dma_map_single(eth->dma_dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); +- if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) ++ if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr))) + return -ENOMEM; + + WRITE_ONCE(itxd->txd1, mapped_addr); +@@ -1035,10 +1036,10 @@ static int mtk_tx_map(struct sk_buff *sk + + + frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); +- mapped_addr = skb_frag_dma_map(eth->dev, frag, offset, ++ mapped_addr = skb_frag_dma_map(eth->dma_dev, frag, offset, + frag_map_size, + DMA_TO_DEVICE); +- if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) ++ if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr))) + goto err_dma; + + if (i == nr_frags - 1 && +@@ -1316,18 +1317,18 @@ static int mtk_poll_rx(struct napi_struc + netdev->stats.rx_dropped++; + goto release_desc; + } +- dma_addr = dma_map_single(eth->dev, ++ dma_addr = dma_map_single(eth->dma_dev, + new_data + NET_SKB_PAD + + eth->ip_align, + ring->buf_size, + DMA_FROM_DEVICE); +- if (unlikely(dma_mapping_error(eth->dev, dma_addr))) { ++ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) { + skb_free_frag(new_data); + netdev->stats.rx_dropped++; + goto release_desc; + } + +- dma_unmap_single(eth->dev, trxd.rxd1, ++ dma_unmap_single(eth->dma_dev, trxd.rxd1, + ring->buf_size, DMA_FROM_DEVICE); + + /* receive data */ +@@ -1600,7 +1601,7 @@ static int mtk_tx_alloc(struct mtk_eth * + if (!ring->buf) + goto no_tx_mem; + +- ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz, ++ ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz, + &ring->phys, GFP_ATOMIC); + if (!ring->dma) + goto no_tx_mem; +@@ -1618,7 +1619,7 @@ static int mtk_tx_alloc(struct mtk_eth * + * descriptors in ring->dma_pdma. + */ + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { +- ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz, ++ ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz, + &ring->phys_pdma, + GFP_ATOMIC); + if (!ring->dma_pdma) +@@ -1677,7 +1678,7 @@ static void mtk_tx_clean(struct mtk_eth + } + + if (ring->dma) { +- dma_free_coherent(eth->dev, ++ dma_free_coherent(eth->dma_dev, + MTK_DMA_SIZE * sizeof(*ring->dma), + ring->dma, + ring->phys); +@@ -1685,7 +1686,7 @@ static void mtk_tx_clean(struct mtk_eth + } + + if (ring->dma_pdma) { +- dma_free_coherent(eth->dev, ++ dma_free_coherent(eth->dma_dev, + MTK_DMA_SIZE * sizeof(*ring->dma_pdma), + ring->dma_pdma, + ring->phys_pdma); +@@ -1730,18 +1731,18 @@ static int mtk_rx_alloc(struct mtk_eth * + return -ENOMEM; + } + +- ring->dma = dma_alloc_coherent(eth->dev, ++ ring->dma = dma_alloc_coherent(eth->dma_dev, + rx_dma_size * sizeof(*ring->dma), + &ring->phys, GFP_ATOMIC); + if (!ring->dma) + return -ENOMEM; + + for (i = 0; i < rx_dma_size; i++) { +- dma_addr_t dma_addr = dma_map_single(eth->dev, ++ dma_addr_t dma_addr = dma_map_single(eth->dma_dev, + ring->data[i] + NET_SKB_PAD + eth->ip_align, + ring->buf_size, + DMA_FROM_DEVICE); +- if (unlikely(dma_mapping_error(eth->dev, dma_addr))) ++ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) + return -ENOMEM; + ring->dma[i].rxd1 = (unsigned int)dma_addr; + +@@ -1777,7 +1778,7 @@ static void mtk_rx_clean(struct mtk_eth + continue; + if (!ring->dma[i].rxd1) + continue; +- dma_unmap_single(eth->dev, ++ dma_unmap_single(eth->dma_dev, + ring->dma[i].rxd1, + ring->buf_size, + DMA_FROM_DEVICE); +@@ -1788,7 +1789,7 @@ static void mtk_rx_clean(struct mtk_eth + } + + if (ring->dma) { +- dma_free_coherent(eth->dev, ++ dma_free_coherent(eth->dma_dev, + ring->dma_size * sizeof(*ring->dma), + ring->dma, + ring->phys); +@@ -2141,7 +2142,7 @@ static void mtk_dma_free(struct mtk_eth + if (eth->netdev[i]) + netdev_reset_queue(eth->netdev[i]); + if (eth->scratch_ring) { +- dma_free_coherent(eth->dev, ++ dma_free_coherent(eth->dma_dev, + MTK_DMA_SIZE * sizeof(struct mtk_tx_dma), + eth->scratch_ring, + eth->phy_scratch_ring); +@@ -2491,6 +2492,8 @@ static void mtk_dim_tx(struct work_struc + + static int mtk_hw_init(struct mtk_eth *eth) + { ++ u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA | ++ ETHSYS_DMA_AG_MAP_PPE; + int i, val, ret; + + if (test_and_set_bit(MTK_HW_INIT, ð->state)) +@@ -2503,6 +2506,10 @@ static int mtk_hw_init(struct mtk_eth *e + if (ret) + goto err_disable_pm; + ++ if (eth->ethsys) ++ regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask, ++ of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask); ++ + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { + ret = device_reset(eth->dev); + if (ret) { +@@ -3056,6 +3063,35 @@ free_netdev: + return err; + } + ++void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev) ++{ ++ struct net_device *dev, *tmp; ++ LIST_HEAD(dev_list); ++ int i; ++ ++ rtnl_lock(); ++ ++ for (i = 0; i < MTK_MAC_COUNT; i++) { ++ dev = eth->netdev[i]; ++ ++ if (!dev || !(dev->flags & IFF_UP)) ++ continue; ++ ++ list_add_tail(&dev->close_list, &dev_list); ++ } ++ ++ dev_close_many(&dev_list, false); ++ ++ eth->dma_dev = dma_dev; ++ ++ list_for_each_entry_safe(dev, tmp, &dev_list, close_list) { ++ list_del_init(&dev->close_list); ++ dev_open(dev, NULL); ++ } ++ ++ rtnl_unlock(); ++} ++ + static int mtk_probe(struct platform_device *pdev) + { + struct device_node *mac_np; +@@ -3069,6 +3105,7 @@ static int mtk_probe(struct platform_dev + eth->soc = of_device_get_match_data(&pdev->dev); + + eth->dev = &pdev->dev; ++ eth->dma_dev = &pdev->dev; + eth->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(eth->base)) + return PTR_ERR(eth->base); +@@ -3117,6 +3154,16 @@ static int mtk_probe(struct platform_dev + } + } + ++ if (of_dma_is_coherent(pdev->dev.of_node)) { ++ struct regmap *cci; ++ ++ cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, ++ "mediatek,cci-control"); ++ /* enable CPU/bus coherency */ ++ if (!IS_ERR(cci)) ++ regmap_write(cci, 0, 3); ++ } ++ + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) { + eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii), + GFP_KERNEL); +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h +@@ -462,6 +462,12 @@ + #define RSTCTRL_FE BIT(6) + #define RSTCTRL_PPE BIT(31) + ++/* ethernet dma channel agent map */ ++#define ETHSYS_DMA_AG_MAP 0x408 ++#define ETHSYS_DMA_AG_MAP_PDMA BIT(0) ++#define ETHSYS_DMA_AG_MAP_QDMA BIT(1) ++#define ETHSYS_DMA_AG_MAP_PPE BIT(2) ++ + /* SGMII subsystem config registers */ + /* Register to auto-negotiation restart */ + #define SGMSYS_PCS_CONTROL_1 0x0 +@@ -879,6 +885,7 @@ struct mtk_sgmii { + /* struct mtk_eth - This is the main datasructure for holding the state + * of the driver + * @dev: The device pointer ++ * @dev: The device pointer used for dma mapping/alloc + * @base: The mapped register i/o base + * @page_lock: Make sure that register operations are atomic + * @tx_irq__lock: Make sure that IRQ register operations are atomic +@@ -922,6 +929,7 @@ struct mtk_sgmii { + + struct mtk_eth { + struct device *dev; ++ struct device *dma_dev; + void __iomem *base; + spinlock_t page_lock; + spinlock_t tx_irq_lock; +@@ -1020,6 +1028,7 @@ int mtk_gmac_rgmii_path_setup(struct mtk + int mtk_eth_offload_init(struct mtk_eth *eth); + int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data); ++void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev); + + + #endif /* MTK_ETH_H */ diff --git a/target/linux/generic/pending-5.15/701-01-arm64-dts-mediatek-mt7622-add-support-for-coherent-D.patch b/target/linux/generic/pending-5.15/701-01-arm64-dts-mediatek-mt7622-add-support-for-coherent-D.patch new file mode 100644 index 0000000000..d9015d4805 --- /dev/null +++ b/target/linux/generic/pending-5.15/701-01-arm64-dts-mediatek-mt7622-add-support-for-coherent-D.patch @@ -0,0 +1,30 @@ +From: Felix Fietkau +Date: Mon, 7 Feb 2022 10:27:22 +0100 +Subject: [PATCH] arm64: dts: mediatek: mt7622: add support for coherent + DMA + +It improves performance by eliminating the need for a cache flush on rx and tx + +Signed-off-by: Felix Fietkau +--- + +--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi ++++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi +@@ -357,7 +357,7 @@ + }; + + cci_control2: slave-if@5000 { +- compatible = "arm,cci-400-ctrl-if"; ++ compatible = "arm,cci-400-ctrl-if", "syscon"; + interface-type = "ace"; + reg = <0x5000 0x1000>; + }; +@@ -937,6 +937,8 @@ + power-domains = <&scpsys MT7622_POWER_DOMAIN_ETHSYS>; + mediatek,ethsys = <ðsys>; + mediatek,sgmiisys = <&sgmiisys>; ++ mediatek,cci-control = <&cci_control2>; ++ dma-coherent; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; diff --git a/target/linux/generic/pending-5.15/701-02-net-ethernet-mtk_eth_soc-add-support-for-Wireless-Et.patch b/target/linux/generic/pending-5.15/701-02-net-ethernet-mtk_eth_soc-add-support-for-Wireless-Et.patch new file mode 100644 index 0000000000..84642ff146 --- /dev/null +++ b/target/linux/generic/pending-5.15/701-02-net-ethernet-mtk_eth_soc-add-support-for-Wireless-Et.patch @@ -0,0 +1,1679 @@ +From: Felix Fietkau +Date: Sat, 5 Feb 2022 17:56:08 +0100 +Subject: [PATCH] net: ethernet: mtk_eth_soc: add support for Wireless + Ethernet Dispatch (WED) + +The Wireless Ethernet Dispatch subsystem on the MT7622 SoC can be +configured to intercept and handle access to the DMA queues and +PCIe interrupts for a MT7615/MT7915 wireless card. +It can manage the internal WDMA (Wireless DMA) controller, which allows +ethernet packets to be passed from the packet switch engine (PSE) to the +wireless card, bypassing the CPU entirely. +This can be used to implement hardware flow offloading from ethernet to +WLAN. + +Signed-off-by: Felix Fietkau +--- + create mode 100644 drivers/net/ethernet/mediatek/mtk_wed.c + create mode 100644 drivers/net/ethernet/mediatek/mtk_wed.h + create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_debugfs.c + create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_ops.c + create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_regs.h + create mode 100644 include/linux/soc/mediatek/mtk_wed.h + +--- a/drivers/net/ethernet/mediatek/Kconfig ++++ b/drivers/net/ethernet/mediatek/Kconfig +@@ -7,6 +7,10 @@ config NET_VENDOR_MEDIATEK + + if NET_VENDOR_MEDIATEK + ++config NET_MEDIATEK_SOC_WED ++ depends on ARCH_MEDIATEK || COMPILE_TEST ++ def_bool NET_MEDIATEK_SOC != n ++ + config NET_MEDIATEK_SOC + tristate "MediaTek SoC Gigabit Ethernet support" + depends on NET_DSA || !NET_DSA +--- a/drivers/net/ethernet/mediatek/Makefile ++++ b/drivers/net/ethernet/mediatek/Makefile +@@ -5,4 +5,9 @@ + + obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o + mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o ++mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o ++ifdef CONFIG_DEBUG_FS ++mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o ++endif ++obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o + obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -24,6 +24,7 @@ + #include + + #include "mtk_eth_soc.h" ++#include "mtk_wed.h" + + static int mtk_msg_level = -1; + module_param_named(msg_level, mtk_msg_level, int, 0); +@@ -3186,6 +3187,22 @@ static int mtk_probe(struct platform_dev + } + } + ++ for (i = 0;; i++) { ++ struct device_node *np = of_parse_phandle(pdev->dev.of_node, ++ "mediatek,wed", i); ++ static const u32 wdma_regs[] = { ++ MTK_WDMA0_BASE, ++ MTK_WDMA1_BASE ++ }; ++ void __iomem *wdma; ++ ++ if (!np || i >= ARRAY_SIZE(wdma_regs)) ++ break; ++ ++ wdma = eth->base + wdma_regs[i]; ++ mtk_wed_add_hw(np, eth, wdma, i); ++ } ++ + for (i = 0; i < 3; i++) { + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0) + eth->irq[i] = eth->irq[0]; +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h +@@ -295,6 +295,9 @@ + #define MTK_GDM1_TX_GPCNT 0x2438 + #define MTK_STAT_OFFSET 0x40 + ++#define MTK_WDMA0_BASE 0x2800 ++#define MTK_WDMA1_BASE 0x2c00 ++ + /* QDMA descriptor txd4 */ + #define TX_DMA_CHKSUM (0x7 << 29) + #define TX_DMA_TSO BIT(28) +--- /dev/null ++++ b/drivers/net/ethernet/mediatek/mtk_wed.c +@@ -0,0 +1,875 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (C) 2021 Felix Fietkau */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "mtk_eth_soc.h" ++#include "mtk_wed_regs.h" ++#include "mtk_wed.h" ++#include "mtk_ppe.h" ++ ++#define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000) ++ ++#define MTK_WED_PKT_SIZE 1900 ++#define MTK_WED_BUF_SIZE 2048 ++#define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048) ++ ++#define MTK_WED_TX_RING_SIZE 2048 ++#define MTK_WED_WDMA_RING_SIZE 1024 ++ ++static struct mtk_wed_hw *hw_list[2]; ++static DEFINE_MUTEX(hw_lock); ++ ++static void ++wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) ++{ ++ regmap_update_bits(dev->hw->regs, reg, mask | val, val); ++} ++ ++static void ++wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask) ++{ ++ return wed_m32(dev, reg, 0, mask); ++} ++ ++static void ++wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask) ++{ ++ return wed_m32(dev, reg, mask, 0); ++} ++ ++static void ++wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) ++{ ++ wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val); ++} ++ ++static void ++wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask) ++{ ++ wdma_m32(dev, reg, 0, mask); ++} ++ ++static u32 ++mtk_wed_read_reset(struct mtk_wed_device *dev) ++{ ++ return wed_r32(dev, MTK_WED_RESET); ++} ++ ++static void ++mtk_wed_reset(struct mtk_wed_device *dev, u32 mask) ++{ ++ u32 status; ++ ++ wed_w32(dev, MTK_WED_RESET, mask); ++ if (readx_poll_timeout(mtk_wed_read_reset, dev, status, ++ !(status & mask), 0, 1000)) ++ WARN_ON_ONCE(1); ++} ++ ++static struct mtk_wed_hw * ++mtk_wed_assign(struct mtk_wed_device *dev) ++{ ++ struct mtk_wed_hw *hw; ++ ++ hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)]; ++ if (!hw || hw->wed_dev) ++ return NULL; ++ ++ hw->wed_dev = dev; ++ return hw; ++} ++ ++static int ++mtk_wed_buffer_alloc(struct mtk_wed_device *dev) ++{ ++ struct mtk_wdma_desc *desc; ++ dma_addr_t desc_phys; ++ void **page_list; ++ int token = dev->wlan.token_start; ++ int ring_size; ++ int n_pages; ++ int i, page_idx; ++ ++ ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1); ++ n_pages = ring_size / MTK_WED_BUF_PER_PAGE; ++ ++ page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL); ++ if (!page_list) ++ return -ENOMEM; ++ ++ dev->buf_ring.size = ring_size; ++ dev->buf_ring.pages = page_list; ++ ++ desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc), ++ &desc_phys, GFP_KERNEL); ++ if (!desc) ++ return -ENOMEM; ++ ++ dev->buf_ring.desc = desc; ++ dev->buf_ring.desc_phys = desc_phys; ++ ++ for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) { ++ dma_addr_t page_phys, buf_phys; ++ struct page *page; ++ void *buf; ++ int s; ++ ++ page = __dev_alloc_pages(GFP_KERNEL, 0); ++ if (!page) ++ return -ENOMEM; ++ ++ page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE, ++ DMA_BIDIRECTIONAL); ++ if (dma_mapping_error(dev->hw->dev, page_phys)) { ++ __free_page(page); ++ return -ENOMEM; ++ } ++ ++ page_list[page_idx++] = page; ++ dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE, ++ DMA_BIDIRECTIONAL); ++ ++ buf = page_to_virt(page); ++ buf_phys = page_phys; ++ ++ for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) { ++ u32 txd_size; ++ ++ txd_size = dev->wlan.init_buf(buf, buf_phys, token++); ++ ++ desc->buf0 = buf_phys; ++ desc->buf1 = buf_phys + txd_size; ++ desc->ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, ++ txd_size) | ++ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1, ++ MTK_WED_BUF_SIZE - txd_size) | ++ MTK_WDMA_DESC_CTRL_LAST_SEG1; ++ desc->info = 0; ++ desc++; ++ ++ buf += MTK_WED_BUF_SIZE; ++ buf_phys += MTK_WED_BUF_SIZE; ++ } ++ ++ dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE, ++ DMA_BIDIRECTIONAL); ++ } ++ ++ return 0; ++} ++ ++static void ++mtk_wed_free_buffer(struct mtk_wed_device *dev) ++{ ++ struct mtk_wdma_desc *desc = dev->buf_ring.desc; ++ void **page_list = dev->buf_ring.pages; ++ int page_idx; ++ int i; ++ ++ if (!page_list) ++ return; ++ ++ if (!desc) ++ goto free_pagelist; ++ ++ for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) { ++ void *page = page_list[page_idx++]; ++ ++ if (!page) ++ break; ++ ++ dma_unmap_page(dev->hw->dev, desc[i].buf0, ++ PAGE_SIZE, DMA_BIDIRECTIONAL); ++ __free_page(page); ++ } ++ ++ dma_free_coherent(dev->hw->dev, dev->buf_ring.size * sizeof(*desc), ++ desc, dev->buf_ring.desc_phys); ++ ++free_pagelist: ++ kfree(page_list); ++} ++ ++static void ++mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring) ++{ ++ if (!ring->desc) ++ return; ++ ++ dma_free_coherent(dev->hw->dev, ring->size * sizeof(*ring->desc), ++ ring->desc, ring->desc_phys); ++} ++ ++static void ++mtk_wed_free_tx_rings(struct mtk_wed_device *dev) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) ++ mtk_wed_free_ring(dev, &dev->tx_ring[i]); ++ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) ++ mtk_wed_free_ring(dev, &dev->tx_wdma[i]); ++} ++ ++static void ++mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en) ++{ ++ u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; ++ ++ if (!dev->hw->num_flows) ++ mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; ++ ++ wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0); ++ wed_r32(dev, MTK_WED_EXT_INT_MASK); ++} ++ ++static void ++mtk_wed_stop(struct mtk_wed_device *dev) ++{ ++ regmap_write(dev->hw->mirror, dev->hw->index * 4, 0); ++ mtk_wed_set_ext_int(dev, false); ++ ++ wed_clr(dev, MTK_WED_CTRL, ++ MTK_WED_CTRL_WDMA_INT_AGENT_EN | ++ MTK_WED_CTRL_WPDMA_INT_AGENT_EN | ++ MTK_WED_CTRL_WED_TX_BM_EN | ++ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); ++ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0); ++ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0); ++ wdma_w32(dev, MTK_WDMA_INT_MASK, 0); ++ wdma_w32(dev, MTK_WDMA_INT_GRP2, 0); ++ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0); ++ ++ wed_clr(dev, MTK_WED_GLO_CFG, ++ MTK_WED_GLO_CFG_TX_DMA_EN | ++ MTK_WED_GLO_CFG_RX_DMA_EN); ++ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, ++ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | ++ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); ++ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, ++ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); ++} ++ ++static void ++mtk_wed_detach(struct mtk_wed_device *dev) ++{ ++ struct device_node *wlan_node = dev->wlan.pci_dev->dev.of_node; ++ struct mtk_wed_hw *hw = dev->hw; ++ ++ mutex_lock(&hw_lock); ++ ++ mtk_wed_stop(dev); ++ ++ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); ++ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); ++ ++ mtk_wed_reset(dev, MTK_WED_RESET_WED); ++ ++ mtk_wed_free_buffer(dev); ++ mtk_wed_free_tx_rings(dev); ++ ++ if (of_dma_is_coherent(wlan_node)) ++ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, ++ BIT(hw->index), BIT(hw->index)); ++ ++ if (!hw_list[!hw->index]->wed_dev && ++ hw->eth->dma_dev != hw->eth->dev) ++ mtk_eth_set_dma_device(hw->eth, hw->eth->dev); ++ ++ memset(dev, 0, sizeof(*dev)); ++ module_put(THIS_MODULE); ++ ++ hw->wed_dev = NULL; ++ mutex_unlock(&hw_lock); ++} ++ ++static void ++mtk_wed_hw_init_early(struct mtk_wed_device *dev) ++{ ++ u32 mask, set; ++ u32 offset; ++ ++ mtk_wed_stop(dev); ++ mtk_wed_reset(dev, MTK_WED_RESET_WED); ++ ++ mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE | ++ MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE | ++ MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE; ++ set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) | ++ MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP | ++ MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY; ++ wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set); ++ ++ wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO_PRERES); ++ ++ offset = dev->hw->index ? 0x04000400 : 0; ++ wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset); ++ wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset); ++ ++ wed_w32(dev, MTK_WED_PCIE_CFG_BASE, MTK_PCIE_BASE(dev->hw->index)); ++ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys); ++} ++ ++static void ++mtk_wed_hw_init(struct mtk_wed_device *dev) ++{ ++ if (dev->init_done) ++ return; ++ ++ dev->init_done = true; ++ mtk_wed_set_ext_int(dev, false); ++ wed_w32(dev, MTK_WED_TX_BM_CTRL, ++ MTK_WED_TX_BM_CTRL_PAUSE | ++ FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, ++ dev->buf_ring.size / 128) | ++ FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, ++ MTK_WED_TX_RING_SIZE / 256)); ++ ++ wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys); ++ ++ wed_w32(dev, MTK_WED_TX_BM_TKID, ++ FIELD_PREP(MTK_WED_TX_BM_TKID_START, ++ dev->wlan.token_start) | ++ FIELD_PREP(MTK_WED_TX_BM_TKID_END, ++ dev->wlan.token_start + dev->wlan.nbuf - 1)); ++ ++ wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE); ++ ++ wed_w32(dev, MTK_WED_TX_BM_DYN_THR, ++ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) | ++ MTK_WED_TX_BM_DYN_THR_HI); ++ ++ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); ++ ++ wed_set(dev, MTK_WED_CTRL, ++ MTK_WED_CTRL_WED_TX_BM_EN | ++ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); ++ ++ wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE); ++} ++ ++static void ++mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size) ++{ ++ int i; ++ ++ for (i = 0; i < size; i++) { ++ desc[i].buf0 = 0; ++ desc[i].ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); ++ desc[i].buf1 = 0; ++ desc[i].info = 0; ++ } ++} ++ ++static u32 ++mtk_wed_check_busy(struct mtk_wed_device *dev) ++{ ++ if (wed_r32(dev, MTK_WED_GLO_CFG) & MTK_WED_GLO_CFG_TX_DMA_BUSY) ++ return true; ++ ++ if (wed_r32(dev, MTK_WED_WPDMA_GLO_CFG) & ++ MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY) ++ return true; ++ ++ if (wed_r32(dev, MTK_WED_CTRL) & MTK_WED_CTRL_WDMA_INT_AGENT_BUSY) ++ return true; ++ ++ if (wed_r32(dev, MTK_WED_WDMA_GLO_CFG) & ++ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY) ++ return true; ++ ++ if (wdma_r32(dev, MTK_WDMA_GLO_CFG) & ++ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY) ++ return true; ++ ++ if (wed_r32(dev, MTK_WED_CTRL) & ++ (MTK_WED_CTRL_WED_TX_BM_BUSY | MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY)) ++ return true; ++ ++ return false; ++} ++ ++static int ++mtk_wed_poll_busy(struct mtk_wed_device *dev) ++{ ++ int sleep = 15000; ++ int timeout = 100 * sleep; ++ u32 val; ++ ++ return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep, ++ timeout, false, dev); ++} ++ ++static void ++mtk_wed_reset_dma(struct mtk_wed_device *dev) ++{ ++ bool busy = false; ++ u32 val; ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) { ++ struct mtk_wdma_desc *desc = dev->tx_ring[i].desc; ++ ++ if (!desc) ++ continue; ++ ++ mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE); ++ } ++ ++ if (mtk_wed_poll_busy(dev)) ++ busy = mtk_wed_check_busy(dev); ++ ++ if (busy) { ++ mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA); ++ } else { ++ wed_w32(dev, MTK_WED_RESET_IDX, ++ MTK_WED_RESET_IDX_TX | ++ MTK_WED_RESET_IDX_RX); ++ wed_w32(dev, MTK_WED_RESET_IDX, 0); ++ } ++ ++ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); ++ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); ++ ++ if (busy) { ++ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT); ++ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV); ++ } else { ++ wed_w32(dev, MTK_WED_WDMA_RESET_IDX, ++ MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV); ++ wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0); ++ ++ wed_set(dev, MTK_WED_WDMA_GLO_CFG, ++ MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); ++ ++ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, ++ MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); ++ } ++ ++ for (i = 0; i < 100; i++) { ++ val = wed_r32(dev, MTK_WED_TX_BM_INTF); ++ if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40) ++ break; ++ } ++ ++ mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT); ++ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); ++ ++ if (busy) { ++ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); ++ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV); ++ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV); ++ } else { ++ wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, ++ MTK_WED_WPDMA_RESET_IDX_TX | ++ MTK_WED_WPDMA_RESET_IDX_RX); ++ wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0); ++ } ++ ++} ++ ++static int ++mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, ++ int size) ++{ ++ ring->desc = dma_alloc_coherent(dev->hw->dev, ++ size * sizeof(*ring->desc), ++ &ring->desc_phys, GFP_KERNEL); ++ if (!ring->desc) ++ return -ENOMEM; ++ ++ ring->size = size; ++ mtk_wed_ring_reset(ring->desc, size); ++ ++ return 0; ++} ++ ++static int ++mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size) ++{ ++ struct mtk_wed_ring *wdma = &dev->tx_wdma[idx]; ++ ++ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE)) ++ return -ENOMEM; ++ ++ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, ++ wdma->desc_phys); ++ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, ++ size); ++ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); ++ ++ wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, ++ wdma->desc_phys); ++ wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, ++ size); ++ ++ return 0; ++} ++ ++static void ++mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask) ++{ ++ u32 wdma_mask; ++ u32 val; ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) ++ if (!dev->tx_wdma[i].desc) ++ mtk_wed_wdma_ring_setup(dev, i, 16); ++ ++ wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0)); ++ ++ mtk_wed_hw_init(dev); ++ ++ wed_set(dev, MTK_WED_CTRL, ++ MTK_WED_CTRL_WDMA_INT_AGENT_EN | ++ MTK_WED_CTRL_WPDMA_INT_AGENT_EN | ++ MTK_WED_CTRL_WED_TX_BM_EN | ++ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); ++ ++ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, MTK_WED_PCIE_INT_TRIGGER_STATUS); ++ ++ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, ++ MTK_WED_WPDMA_INT_TRIGGER_RX_DONE | ++ MTK_WED_WPDMA_INT_TRIGGER_TX_DONE); ++ ++ wed_set(dev, MTK_WED_WPDMA_INT_CTRL, ++ MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV); ++ ++ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask); ++ wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask); ++ ++ wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask); ++ wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask); ++ ++ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask); ++ wed_w32(dev, MTK_WED_INT_MASK, irq_mask); ++ ++ wed_set(dev, MTK_WED_GLO_CFG, ++ MTK_WED_GLO_CFG_TX_DMA_EN | ++ MTK_WED_GLO_CFG_RX_DMA_EN); ++ wed_set(dev, MTK_WED_WPDMA_GLO_CFG, ++ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | ++ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); ++ wed_set(dev, MTK_WED_WDMA_GLO_CFG, ++ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); ++ ++ mtk_wed_set_ext_int(dev, true); ++ val = dev->wlan.wpdma_phys | ++ MTK_PCIE_MIRROR_MAP_EN | ++ FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, dev->hw->index); ++ ++ if (dev->hw->index) ++ val |= BIT(1); ++ val |= BIT(0); ++ regmap_write(dev->hw->mirror, dev->hw->index * 4, val); ++ ++ dev->running = true; ++} ++ ++static int ++mtk_wed_attach(struct mtk_wed_device *dev) ++ __releases(RCU) ++{ ++ struct mtk_wed_hw *hw; ++ int ret = 0; ++ ++ RCU_LOCKDEP_WARN(!rcu_read_lock_held(), ++ "mtk_wed_attach without holding the RCU read lock"); ++ ++ if (pci_domain_nr(dev->wlan.pci_dev->bus) > 1 || ++ !try_module_get(THIS_MODULE)) ++ ret = -ENODEV; ++ ++ rcu_read_unlock(); ++ ++ if (ret) ++ return ret; ++ ++ mutex_lock(&hw_lock); ++ ++ hw = mtk_wed_assign(dev); ++ if (!hw) { ++ module_put(THIS_MODULE); ++ ret = -ENODEV; ++ goto out; ++ } ++ ++ dev_info(&dev->wlan.pci_dev->dev, "attaching wed device %d\n", hw->index); ++ ++ dev->hw = hw; ++ dev->dev = hw->dev; ++ dev->irq = hw->irq; ++ dev->wdma_idx = hw->index; ++ ++ if (hw->eth->dma_dev == hw->eth->dev && ++ of_dma_is_coherent(hw->eth->dev->of_node)) ++ mtk_eth_set_dma_device(hw->eth, hw->dev); ++ ++ ret = mtk_wed_buffer_alloc(dev); ++ if (ret) { ++ mtk_wed_detach(dev); ++ goto out; ++ } ++ ++ mtk_wed_hw_init_early(dev); ++ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, BIT(hw->index), 0); ++ ++out: ++ mutex_unlock(&hw_lock); ++ ++ return ret; ++} ++ ++static int ++mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs) ++{ ++ struct mtk_wed_ring *ring = &dev->tx_ring[idx]; ++ ++ /* ++ * Tx ring redirection: ++ * Instead of configuring the WLAN PDMA TX ring directly, the WLAN ++ * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n) ++ * registers. ++ * ++ * WED driver posts its own DMA ring as WLAN PDMA TX and configures it ++ * into MTK_WED_WPDMA_RING_TX(n) registers. ++ * It gets filled with packets picked up from WED TX ring and from ++ * WDMA RX. ++ */ ++ ++ BUG_ON(idx > ARRAY_SIZE(dev->tx_ring)); ++ ++ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE)) ++ return -ENOMEM; ++ ++ if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE)) ++ return -ENOMEM; ++ ++ ring->reg_base = MTK_WED_RING_TX(idx); ++ ring->wpdma = regs; ++ ++ /* WED -> WPDMA */ ++ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys); ++ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE); ++ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0); ++ ++ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, ++ ring->desc_phys); ++ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, ++ MTK_WED_TX_RING_SIZE); ++ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); ++ ++ return 0; ++} ++ ++static int ++mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs) ++{ ++ struct mtk_wed_ring *ring = &dev->txfree_ring; ++ int i; ++ ++ /* ++ * For txfree event handling, the same DMA ring is shared between WED ++ * and WLAN. The WLAN driver accesses the ring index registers through ++ * WED ++ */ ++ ring->reg_base = MTK_WED_RING_RX(1); ++ ring->wpdma = regs; ++ ++ for (i = 0; i < 12; i += 4) { ++ u32 val = readl(regs + i); ++ ++ wed_w32(dev, MTK_WED_RING_RX(1) + i, val); ++ wed_w32(dev, MTK_WED_WPDMA_RING_RX(1) + i, val); ++ } ++ ++ return 0; ++} ++ ++static u32 ++mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask) ++{ ++ u32 val; ++ ++ val = wed_r32(dev, MTK_WED_EXT_INT_STATUS); ++ wed_w32(dev, MTK_WED_EXT_INT_STATUS, val); ++ val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK; ++ if (!dev->hw->num_flows) ++ val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; ++ if (val && net_ratelimit()) ++ pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val); ++ ++ val = wed_r32(dev, MTK_WED_INT_STATUS); ++ val &= mask; ++ wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */ ++ ++ return val; ++} ++ ++static void ++mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask) ++{ ++ if (!dev->running) ++ return; ++ ++ mtk_wed_set_ext_int(dev, !!mask); ++ wed_w32(dev, MTK_WED_INT_MASK, mask); ++} ++ ++int mtk_wed_flow_add(int index) ++{ ++ struct mtk_wed_hw *hw = hw_list[index]; ++ int ret; ++ ++ if (!hw || !hw->wed_dev) ++ return -ENODEV; ++ ++ if (hw->num_flows) { ++ hw->num_flows++; ++ return 0; ++ } ++ ++ mutex_lock(&hw_lock); ++ if (!hw->wed_dev) { ++ ret = -ENODEV; ++ goto out; ++ } ++ ++ ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev); ++ if (!ret) ++ hw->num_flows++; ++ mtk_wed_set_ext_int(hw->wed_dev, true); ++ ++out: ++ mutex_unlock(&hw_lock); ++ ++ return ret; ++} ++ ++void mtk_wed_flow_remove(int index) ++{ ++ struct mtk_wed_hw *hw = hw_list[index]; ++ ++ if (!hw) ++ return; ++ ++ if (--hw->num_flows) ++ return; ++ ++ mutex_lock(&hw_lock); ++ if (!hw->wed_dev) ++ goto out; ++ ++ hw->wed_dev->wlan.offload_disable(hw->wed_dev); ++ mtk_wed_set_ext_int(hw->wed_dev, true); ++ ++out: ++ mutex_unlock(&hw_lock); ++} ++ ++void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, ++ void __iomem *wdma, int index) ++{ ++ static const struct mtk_wed_ops wed_ops = { ++ .attach = mtk_wed_attach, ++ .tx_ring_setup = mtk_wed_tx_ring_setup, ++ .txfree_ring_setup = mtk_wed_txfree_ring_setup, ++ .start = mtk_wed_start, ++ .stop = mtk_wed_stop, ++ .reset_dma = mtk_wed_reset_dma, ++ .reg_read = wed_r32, ++ .reg_write = wed_w32, ++ .irq_get = mtk_wed_irq_get, ++ .irq_set_mask = mtk_wed_irq_set_mask, ++ .detach = mtk_wed_detach, ++ }; ++ struct device_node *eth_np = eth->dev->of_node; ++ struct platform_device *pdev; ++ struct mtk_wed_hw *hw; ++ struct regmap *regs; ++ int irq; ++ ++ if (!np) ++ return; ++ ++ pdev = of_find_device_by_node(np); ++ if (!pdev) ++ return; ++ ++ get_device(&pdev->dev); ++ irq = platform_get_irq(pdev, 0); ++ if (irq < 0) ++ return; ++ ++ regs = syscon_regmap_lookup_by_phandle(np, NULL); ++ if (!regs) ++ return; ++ ++ rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops); ++ ++ mutex_lock(&hw_lock); ++ ++ if (WARN_ON(hw_list[index])) ++ goto unlock; ++ ++ hw = kzalloc(sizeof(*hw), GFP_KERNEL); ++ hw->node = np; ++ hw->regs = regs; ++ hw->eth = eth; ++ hw->dev = &pdev->dev; ++ hw->wdma = wdma; ++ hw->index = index; ++ hw->irq = irq; ++ hw->mirror = syscon_regmap_lookup_by_phandle(eth_np, ++ "mediatek,pcie-mirror"); ++ hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np, ++ "mediatek,hifsys"); ++ if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) { ++ kfree(hw); ++ goto unlock; ++ } ++ ++ if (!index) { ++ regmap_write(hw->mirror, 0, 0); ++ regmap_write(hw->mirror, 4, 0); ++ } ++ mtk_wed_hw_add_debugfs(hw); ++ ++ hw_list[index] = hw; ++ ++unlock: ++ mutex_unlock(&hw_lock); ++} ++ ++void mtk_wed_exit(void) ++{ ++ int i; ++ ++ rcu_assign_pointer(mtk_soc_wed_ops, NULL); ++ ++ synchronize_rcu(); ++ ++ for (i = 0; i < ARRAY_SIZE(hw_list); i++) { ++ struct mtk_wed_hw *hw; ++ ++ hw = hw_list[i]; ++ if (!hw) ++ continue; ++ ++ hw_list[i] = NULL; ++ debugfs_remove(hw->debugfs_dir); ++ put_device(hw->dev); ++ kfree(hw); ++ } ++} +--- /dev/null ++++ b/drivers/net/ethernet/mediatek/mtk_wed.h +@@ -0,0 +1,128 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (C) 2021 Felix Fietkau */ ++ ++#ifndef __MTK_WED_PRIV_H ++#define __MTK_WED_PRIV_H ++ ++#include ++#include ++#include ++ ++struct mtk_eth; ++ ++struct mtk_wed_hw { ++ struct device_node *node; ++ struct mtk_eth *eth; ++ struct regmap *regs; ++ struct regmap *hifsys; ++ struct device *dev; ++ void __iomem *wdma; ++ struct regmap *mirror; ++ struct dentry *debugfs_dir; ++ struct mtk_wed_device *wed_dev; ++ u32 debugfs_reg; ++ u32 num_flows; ++ char dirname[5]; ++ int irq; ++ int index; ++}; ++ ++ ++#ifdef CONFIG_NET_MEDIATEK_SOC_WED ++static inline void ++wed_w32(struct mtk_wed_device *dev, u32 reg, u32 val) ++{ ++ regmap_write(dev->hw->regs, reg, val); ++} ++ ++static inline u32 ++wed_r32(struct mtk_wed_device *dev, u32 reg) ++{ ++ unsigned int val; ++ ++ regmap_read(dev->hw->regs, reg, &val); ++ ++ return val; ++} ++ ++static inline void ++wdma_w32(struct mtk_wed_device *dev, u32 reg, u32 val) ++{ ++ writel(val, dev->hw->wdma + reg); ++} ++ ++static inline u32 ++wdma_r32(struct mtk_wed_device *dev, u32 reg) ++{ ++ return readl(dev->hw->wdma + reg); ++} ++ ++static inline u32 ++wpdma_tx_r32(struct mtk_wed_device *dev, int ring, u32 reg) ++{ ++ if (!dev->tx_ring[ring].wpdma) ++ return 0; ++ ++ return readl(dev->tx_ring[ring].wpdma + reg); ++} ++ ++static inline void ++wpdma_tx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val) ++{ ++ if (!dev->tx_ring[ring].wpdma) ++ return; ++ ++ writel(val, dev->tx_ring[ring].wpdma + reg); ++} ++ ++static inline u32 ++wpdma_txfree_r32(struct mtk_wed_device *dev, u32 reg) ++{ ++ if (!dev->txfree_ring.wpdma) ++ return 0; ++ ++ return readl(dev->txfree_ring.wpdma + reg); ++} ++ ++static inline void ++wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val) ++{ ++ if (!dev->txfree_ring.wpdma) ++ return; ++ ++ writel(val, dev->txfree_ring.wpdma + reg); ++} ++ ++void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, ++ void __iomem *wdma, int index); ++void mtk_wed_exit(void); ++int mtk_wed_flow_add(int index); ++void mtk_wed_flow_remove(int index); ++#else ++static inline void ++mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, ++ void __iomem *wdma, int index) ++{ ++} ++static inline void ++mtk_wed_exit(void) ++{ ++} ++static inline int mtk_wed_flow_add(int index) ++{ ++ return -EINVAL; ++} ++static inline void mtk_wed_flow_remove(int index) ++{ ++} ++#endif ++ ++#ifdef CONFIG_DEBUG_FS ++void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw); ++#else ++static inline void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw) ++{ ++} ++#endif ++ ++#endif +--- /dev/null ++++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c +@@ -0,0 +1,175 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (C) 2021 Felix Fietkau */ ++ ++#include ++#include "mtk_wed.h" ++#include "mtk_wed_regs.h" ++ ++struct reg_dump { ++ const char *name; ++ u16 offset; ++ u8 type; ++ u8 base; ++}; ++ ++enum { ++ DUMP_TYPE_STRING, ++ DUMP_TYPE_WED, ++ DUMP_TYPE_WDMA, ++ DUMP_TYPE_WPDMA_TX, ++ DUMP_TYPE_WPDMA_TXFREE, ++}; ++ ++#define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING } ++#define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ } ++#define DUMP_RING(_prefix, _base, ...) \ ++ { _prefix " BASE", _base, __VA_ARGS__ }, \ ++ { _prefix " CNT", _base + 0x4, __VA_ARGS__ }, \ ++ { _prefix " CIDX", _base + 0x8, __VA_ARGS__ }, \ ++ { _prefix " DIDX", _base + 0xc, __VA_ARGS__ } ++ ++#define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED) ++#define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED) ++ ++#define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA) ++#define DUMP_WDMA_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WDMA) ++ ++#define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n) ++#define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE) ++ ++static void ++print_reg_val(struct seq_file *s, const char *name, u32 val) ++{ ++ seq_printf(s, "%-32s %08x\n", name, val); ++} ++ ++static void ++dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev, ++ const struct reg_dump *regs, int n_regs) ++{ ++ const struct reg_dump *cur; ++ u32 val; ++ ++ for (cur = regs; cur < ®s[n_regs]; cur++) { ++ switch (cur->type) { ++ case DUMP_TYPE_STRING: ++ seq_printf(s, "%s======== %s:\n", ++ cur > regs ? "\n" : "", ++ cur->name); ++ continue; ++ case DUMP_TYPE_WED: ++ val = wed_r32(dev, cur->offset); ++ break; ++ case DUMP_TYPE_WDMA: ++ val = wdma_r32(dev, cur->offset); ++ break; ++ case DUMP_TYPE_WPDMA_TX: ++ val = wpdma_tx_r32(dev, cur->base, cur->offset); ++ break; ++ case DUMP_TYPE_WPDMA_TXFREE: ++ val = wpdma_txfree_r32(dev, cur->offset); ++ break; ++ } ++ print_reg_val(s, cur->name, val); ++ } ++} ++ ++ ++static int ++wed_txinfo_show(struct seq_file *s, void *data) ++{ ++ static const struct reg_dump regs[] = { ++ DUMP_STR("WED TX"), ++ DUMP_WED(WED_TX_MIB(0)), ++ DUMP_WED_RING(WED_RING_TX(0)), ++ ++ DUMP_WED(WED_TX_MIB(1)), ++ DUMP_WED_RING(WED_RING_TX(1)), ++ ++ DUMP_STR("WPDMA TX"), ++ DUMP_WED(WED_WPDMA_TX_MIB(0)), ++ DUMP_WED_RING(WED_WPDMA_RING_TX(0)), ++ DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(0)), ++ ++ DUMP_WED(WED_WPDMA_TX_MIB(1)), ++ DUMP_WED_RING(WED_WPDMA_RING_TX(1)), ++ DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(1)), ++ ++ DUMP_STR("WPDMA TX"), ++ DUMP_WPDMA_TX_RING(0), ++ DUMP_WPDMA_TX_RING(1), ++ ++ DUMP_STR("WED WDMA RX"), ++ DUMP_WED(WED_WDMA_RX_MIB(0)), ++ DUMP_WED_RING(WED_WDMA_RING_RX(0)), ++ DUMP_WED(WED_WDMA_RX_THRES(0)), ++ DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(0)), ++ DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(0)), ++ ++ DUMP_WED(WED_WDMA_RX_MIB(1)), ++ DUMP_WED_RING(WED_WDMA_RING_RX(1)), ++ DUMP_WED(WED_WDMA_RX_THRES(1)), ++ DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(1)), ++ DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(1)), ++ ++ DUMP_STR("WDMA RX"), ++ DUMP_WDMA(WDMA_GLO_CFG), ++ DUMP_WDMA_RING(WDMA_RING_RX(0)), ++ DUMP_WDMA_RING(WDMA_RING_RX(1)), ++ }; ++ struct mtk_wed_hw *hw = s->private; ++ struct mtk_wed_device *dev = hw->wed_dev; ++ ++ if (!dev) ++ return 0; ++ ++ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs)); ++ ++ return 0; ++} ++DEFINE_SHOW_ATTRIBUTE(wed_txinfo); ++ ++ ++static int ++mtk_wed_reg_set(void *data, u64 val) ++{ ++ struct mtk_wed_hw *hw = data; ++ ++ regmap_write(hw->regs, hw->debugfs_reg, val); ++ ++ return 0; ++} ++ ++static int ++mtk_wed_reg_get(void *data, u64 *val) ++{ ++ struct mtk_wed_hw *hw = data; ++ unsigned int regval; ++ int ret; ++ ++ ret = regmap_read(hw->regs, hw->debugfs_reg, ®val); ++ if (ret) ++ return ret; ++ ++ *val = regval; ++ ++ return 0; ++} ++ ++DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mtk_wed_reg_get, mtk_wed_reg_set, ++ "0x%08llx\n"); ++ ++void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw) ++{ ++ struct dentry *dir; ++ ++ snprintf(hw->dirname, sizeof(hw->dirname), "wed%d", hw->index); ++ dir = debugfs_create_dir(hw->dirname, NULL); ++ if (!dir) ++ return; ++ ++ hw->debugfs_dir = dir; ++ debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg); ++ debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval); ++ debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops); ++} +--- /dev/null ++++ b/drivers/net/ethernet/mediatek/mtk_wed_ops.c +@@ -0,0 +1,8 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (C) 2020 Felix Fietkau */ ++ ++#include ++#include ++ ++const struct mtk_wed_ops __rcu *mtk_soc_wed_ops; ++EXPORT_SYMBOL_GPL(mtk_soc_wed_ops); +--- /dev/null ++++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h +@@ -0,0 +1,251 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (C) 2020 Felix Fietkau */ ++ ++#ifndef __MTK_WED_REGS_H ++#define __MTK_WED_REGS_H ++ ++#define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(14, 0) ++#define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(15) ++#define MTK_WDMA_DESC_CTRL_BURST BIT(16) ++#define MTK_WDMA_DESC_CTRL_LEN0 GENMASK(29, 16) ++#define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30) ++#define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31) ++ ++struct mtk_wdma_desc { ++ __le32 buf0; ++ __le32 ctrl; ++ __le32 buf1; ++ __le32 info; ++} __packed __aligned(4); ++ ++#define MTK_WED_RESET 0x008 ++#define MTK_WED_RESET_TX_BM BIT(0) ++#define MTK_WED_RESET_TX_FREE_AGENT BIT(4) ++#define MTK_WED_RESET_WPDMA_TX_DRV BIT(8) ++#define MTK_WED_RESET_WPDMA_RX_DRV BIT(9) ++#define MTK_WED_RESET_WPDMA_INT_AGENT BIT(11) ++#define MTK_WED_RESET_WED_TX_DMA BIT(12) ++#define MTK_WED_RESET_WDMA_RX_DRV BIT(17) ++#define MTK_WED_RESET_WDMA_INT_AGENT BIT(19) ++#define MTK_WED_RESET_WED BIT(31) ++ ++#define MTK_WED_CTRL 0x00c ++#define MTK_WED_CTRL_WPDMA_INT_AGENT_EN BIT(0) ++#define MTK_WED_CTRL_WPDMA_INT_AGENT_BUSY BIT(1) ++#define MTK_WED_CTRL_WDMA_INT_AGENT_EN BIT(2) ++#define MTK_WED_CTRL_WDMA_INT_AGENT_BUSY BIT(3) ++#define MTK_WED_CTRL_WED_TX_BM_EN BIT(8) ++#define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9) ++#define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10) ++#define MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY BIT(11) ++#define MTK_WED_CTRL_RESERVE_EN BIT(12) ++#define MTK_WED_CTRL_RESERVE_BUSY BIT(13) ++#define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24) ++#define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28) ++ ++#define MTK_WED_EXT_INT_STATUS 0x020 ++#define MTK_WED_EXT_INT_STATUS_TF_LEN_ERR BIT(0) ++#define MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD BIT(1) ++#define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID BIT(4) ++#define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH BIT(8) ++#define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH BIT(9) ++#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12) ++#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13) ++#define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16) ++#define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17) ++#define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18) ++#define MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN BIT(19) ++#define MTK_WED_EXT_INT_STATUS_RX_DRV_BM_DMAD_COHERENT BIT(20) ++#define MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR BIT(21) ++#define MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR BIT(22) ++#define MTK_WED_EXT_INT_STATUS_RX_DRV_DMA_RECYCLE BIT(24) ++#define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \ ++ MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \ ++ MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \ ++ MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \ ++ MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \ ++ MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN | \ ++ MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR | \ ++ MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR) ++ ++#define MTK_WED_EXT_INT_MASK 0x028 ++ ++#define MTK_WED_STATUS 0x060 ++#define MTK_WED_STATUS_TX GENMASK(15, 8) ++ ++#define MTK_WED_TX_BM_CTRL 0x080 ++#define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0) ++#define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16) ++#define MTK_WED_TX_BM_CTRL_PAUSE BIT(28) ++ ++#define MTK_WED_TX_BM_BASE 0x084 ++ ++#define MTK_WED_TX_BM_TKID 0x088 ++#define MTK_WED_TX_BM_TKID_START GENMASK(15, 0) ++#define MTK_WED_TX_BM_TKID_END GENMASK(31, 16) ++ ++#define MTK_WED_TX_BM_BUF_LEN 0x08c ++ ++#define MTK_WED_TX_BM_INTF 0x09c ++#define MTK_WED_TX_BM_INTF_TKID GENMASK(15, 0) ++#define MTK_WED_TX_BM_INTF_TKFIFO_FDEP GENMASK(23, 16) ++#define MTK_WED_TX_BM_INTF_TKID_VALID BIT(28) ++#define MTK_WED_TX_BM_INTF_TKID_READ BIT(29) ++ ++#define MTK_WED_TX_BM_DYN_THR 0x0a0 ++#define MTK_WED_TX_BM_DYN_THR_LO GENMASK(6, 0) ++#define MTK_WED_TX_BM_DYN_THR_HI GENMASK(22, 16) ++ ++#define MTK_WED_INT_STATUS 0x200 ++#define MTK_WED_INT_MASK 0x204 ++ ++#define MTK_WED_GLO_CFG 0x208 ++#define MTK_WED_GLO_CFG_TX_DMA_EN BIT(0) ++#define MTK_WED_GLO_CFG_TX_DMA_BUSY BIT(1) ++#define MTK_WED_GLO_CFG_RX_DMA_EN BIT(2) ++#define MTK_WED_GLO_CFG_RX_DMA_BUSY BIT(3) ++#define MTK_WED_GLO_CFG_RX_BT_SIZE GENMASK(5, 4) ++#define MTK_WED_GLO_CFG_TX_WB_DDONE BIT(6) ++#define MTK_WED_GLO_CFG_BIG_ENDIAN BIT(7) ++#define MTK_WED_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8) ++#define MTK_WED_GLO_CFG_TX_BT_SIZE_LO BIT(9) ++#define MTK_WED_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10) ++#define MTK_WED_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12) ++#define MTK_WED_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13) ++#define MTK_WED_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22) ++#define MTK_WED_GLO_CFG_SW_RESET BIT(24) ++#define MTK_WED_GLO_CFG_FIRST_TOKEN_ONLY BIT(26) ++#define MTK_WED_GLO_CFG_OMIT_RX_INFO BIT(27) ++#define MTK_WED_GLO_CFG_OMIT_TX_INFO BIT(28) ++#define MTK_WED_GLO_CFG_BYTE_SWAP BIT(29) ++#define MTK_WED_GLO_CFG_RX_2B_OFFSET BIT(31) ++ ++#define MTK_WED_RESET_IDX 0x20c ++#define MTK_WED_RESET_IDX_TX GENMASK(3, 0) ++#define MTK_WED_RESET_IDX_RX GENMASK(17, 16) ++ ++#define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4) ++ ++#define MTK_WED_RING_TX(_n) (0x300 + (_n) * 0x10) ++ ++#define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10) ++ ++#define MTK_WED_WPDMA_INT_TRIGGER 0x504 ++#define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1) ++#define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4) ++ ++#define MTK_WED_WPDMA_GLO_CFG 0x508 ++#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN BIT(0) ++#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY BIT(1) ++#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN BIT(2) ++#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY BIT(3) ++#define MTK_WED_WPDMA_GLO_CFG_RX_BT_SIZE GENMASK(5, 4) ++#define MTK_WED_WPDMA_GLO_CFG_TX_WB_DDONE BIT(6) ++#define MTK_WED_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7) ++#define MTK_WED_WPDMA_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8) ++#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_LO BIT(9) ++#define MTK_WED_WPDMA_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10) ++#define MTK_WED_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12) ++#define MTK_WED_WPDMA_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13) ++#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22) ++#define MTK_WED_WPDMA_GLO_CFG_SW_RESET BIT(24) ++#define MTK_WED_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY BIT(26) ++#define MTK_WED_WPDMA_GLO_CFG_OMIT_RX_INFO BIT(27) ++#define MTK_WED_WPDMA_GLO_CFG_OMIT_TX_INFO BIT(28) ++#define MTK_WED_WPDMA_GLO_CFG_BYTE_SWAP BIT(29) ++#define MTK_WED_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31) ++ ++#define MTK_WED_WPDMA_RESET_IDX 0x50c ++#define MTK_WED_WPDMA_RESET_IDX_TX GENMASK(3, 0) ++#define MTK_WED_WPDMA_RESET_IDX_RX GENMASK(17, 16) ++ ++#define MTK_WED_WPDMA_INT_CTRL 0x520 ++#define MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV BIT(21) ++ ++#define MTK_WED_WPDMA_INT_MASK 0x524 ++ ++#define MTK_WED_PCIE_CFG_BASE 0x560 ++ ++#define MTK_WED_PCIE_INT_TRIGGER 0x570 ++#define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16) ++ ++#define MTK_WED_WPDMA_CFG_BASE 0x580 ++ ++#define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4) ++#define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4) ++ ++#define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10) ++#define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10) ++#define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10) ++#define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4) ++ ++#define MTK_WED_WDMA_GLO_CFG 0xa04 ++#define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0) ++#define MTK_WED_WDMA_GLO_CFG_RX_DRV_EN BIT(2) ++#define MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY BIT(3) ++#define MTK_WED_WDMA_GLO_CFG_BT_SIZE GENMASK(5, 4) ++#define MTK_WED_WDMA_GLO_CFG_TX_WB_DDONE BIT(6) ++#define MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE BIT(13) ++#define MTK_WED_WDMA_GLO_CFG_WCOMPLETE_SEL BIT(16) ++#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_RXDMA_BYPASS BIT(17) ++#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_BYPASS BIT(18) ++#define MTK_WED_WDMA_GLO_CFG_FSM_RETURN_IDLE BIT(19) ++#define MTK_WED_WDMA_GLO_CFG_WAIT_COHERENT BIT(20) ++#define MTK_WED_WDMA_GLO_CFG_AXI_W_AFTER_AW BIT(21) ++#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY_SINGLE_W BIT(22) ++#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY BIT(23) ++#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP BIT(24) ++#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE BIT(25) ++#define MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE BIT(26) ++#define MTK_WED_WDMA_GLO_CFG_RXDRV_CLKGATE_BYPASS BIT(30) ++ ++#define MTK_WED_WDMA_RESET_IDX 0xa08 ++#define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16) ++#define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24) ++ ++#define MTK_WED_WDMA_INT_TRIGGER 0xa28 ++#define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16) ++ ++#define MTK_WED_WDMA_INT_CTRL 0xa2c ++#define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL GENMASK(17, 16) ++ ++#define MTK_WED_WDMA_OFFSET0 0xaa4 ++#define MTK_WED_WDMA_OFFSET1 0xaa8 ++ ++#define MTK_WED_WDMA_RX_MIB(_n) (0xae0 + (_n) * 4) ++#define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4) ++#define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4) ++ ++#define MTK_WED_RING_OFS_BASE 0x00 ++#define MTK_WED_RING_OFS_COUNT 0x04 ++#define MTK_WED_RING_OFS_CPU_IDX 0x08 ++#define MTK_WED_RING_OFS_DMA_IDX 0x0c ++ ++#define MTK_WDMA_RING_RX(_n) (0x100 + (_n) * 0x10) ++ ++#define MTK_WDMA_GLO_CFG 0x204 ++#define MTK_WDMA_GLO_CFG_RX_INFO_PRERES GENMASK(28, 26) ++ ++#define MTK_WDMA_RESET_IDX 0x208 ++#define MTK_WDMA_RESET_IDX_TX GENMASK(3, 0) ++#define MTK_WDMA_RESET_IDX_RX GENMASK(17, 16) ++ ++#define MTK_WDMA_INT_MASK 0x228 ++#define MTK_WDMA_INT_MASK_TX_DONE GENMASK(3, 0) ++#define MTK_WDMA_INT_MASK_RX_DONE GENMASK(17, 16) ++#define MTK_WDMA_INT_MASK_TX_DELAY BIT(28) ++#define MTK_WDMA_INT_MASK_TX_COHERENT BIT(29) ++#define MTK_WDMA_INT_MASK_RX_DELAY BIT(30) ++#define MTK_WDMA_INT_MASK_RX_COHERENT BIT(31) ++ ++#define MTK_WDMA_INT_GRP1 0x250 ++#define MTK_WDMA_INT_GRP2 0x254 ++ ++#define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0) ++#define MTK_PCIE_MIRROR_MAP_EN BIT(0) ++#define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1) ++ ++/* DMA channel mapping */ ++#define HIFSYS_DMA_AG_MAP 0x008 ++ ++#endif +--- /dev/null ++++ b/include/linux/soc/mediatek/mtk_wed.h +@@ -0,0 +1,131 @@ ++#ifndef __MTK_WED_H ++#define __MTK_WED_H ++ ++#include ++#include ++#include ++#include ++ ++#define MTK_WED_TX_QUEUES 2 ++ ++struct mtk_wed_hw; ++struct mtk_wdma_desc; ++ ++struct mtk_wed_ring { ++ struct mtk_wdma_desc *desc; ++ dma_addr_t desc_phys; ++ int size; ++ ++ u32 reg_base; ++ void __iomem *wpdma; ++}; ++ ++struct mtk_wed_device { ++#ifdef CONFIG_NET_MEDIATEK_SOC_WED ++ const struct mtk_wed_ops *ops; ++ struct device *dev; ++ struct mtk_wed_hw *hw; ++ bool init_done, running; ++ int wdma_idx; ++ int irq; ++ ++ struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES]; ++ struct mtk_wed_ring txfree_ring; ++ struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES]; ++ ++ struct { ++ int size; ++ void **pages; ++ struct mtk_wdma_desc *desc; ++ dma_addr_t desc_phys; ++ } buf_ring; ++ ++ /* filled by driver: */ ++ struct { ++ struct pci_dev *pci_dev; ++ ++ u32 wpdma_phys; ++ ++ u16 token_start; ++ unsigned int nbuf; ++ ++ u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id); ++ int (*offload_enable)(struct mtk_wed_device *wed); ++ void (*offload_disable)(struct mtk_wed_device *wed); ++ } wlan; ++#endif ++}; ++ ++struct mtk_wed_ops { ++ int (*attach)(struct mtk_wed_device *dev); ++ int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring, ++ void __iomem *regs); ++ int (*txfree_ring_setup)(struct mtk_wed_device *dev, ++ void __iomem *regs); ++ void (*detach)(struct mtk_wed_device *dev); ++ ++ void (*stop)(struct mtk_wed_device *dev); ++ void (*start)(struct mtk_wed_device *dev, u32 irq_mask); ++ void (*reset_dma)(struct mtk_wed_device *dev); ++ ++ u32 (*reg_read)(struct mtk_wed_device *dev, u32 reg); ++ void (*reg_write)(struct mtk_wed_device *dev, u32 reg, u32 val); ++ ++ u32 (*irq_get)(struct mtk_wed_device *dev, u32 mask); ++ void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask); ++}; ++ ++extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops; ++ ++static inline int ++mtk_wed_device_attach(struct mtk_wed_device *dev) ++{ ++ int ret = -ENODEV; ++ ++#ifdef CONFIG_NET_MEDIATEK_SOC_WED ++ rcu_read_lock(); ++ dev->ops = rcu_dereference(mtk_soc_wed_ops); ++ if (dev->ops) ++ ret = dev->ops->attach(dev); ++ else ++ rcu_read_unlock(); ++ ++ if (ret) ++ dev->ops = NULL; ++#endif ++ ++ return ret; ++} ++ ++#ifdef CONFIG_NET_MEDIATEK_SOC_WED ++#define mtk_wed_device_active(_dev) !!(_dev)->ops ++#define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev) ++#define mtk_wed_device_start(_dev, _mask) (_dev)->ops->start(_dev, _mask) ++#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) \ ++ (_dev)->ops->tx_ring_setup(_dev, _ring, _regs) ++#define mtk_wed_device_txfree_ring_setup(_dev, _regs) \ ++ (_dev)->ops->txfree_ring_setup(_dev, _regs) ++#define mtk_wed_device_reg_read(_dev, _reg) \ ++ (_dev)->ops->reg_read(_dev, _reg) ++#define mtk_wed_device_reg_write(_dev, _reg, _val) \ ++ (_dev)->ops->reg_write(_dev, _reg, _val) ++#define mtk_wed_device_irq_get(_dev, _mask) \ ++ (_dev)->ops->irq_get(_dev, _mask) ++#define mtk_wed_device_irq_set_mask(_dev, _mask) \ ++ (_dev)->ops->irq_set_mask(_dev, _mask) ++#else ++static inline bool mtk_wed_device_active(struct mtk_wed_device *dev) ++{ ++ return false; ++} ++#define mtk_wed_device_detach(_dev) do {} while (0) ++#define mtk_wed_device_start(_dev, _mask) do {} while (0) ++#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) -ENODEV ++#define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV ++#define mtk_wed_device_reg_read(_dev, _reg) 0 ++#define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0) ++#define mtk_wed_device_irq_get(_dev, _mask) 0 ++#define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0) ++#endif ++ ++#endif diff --git a/target/linux/generic/pending-5.15/701-03-net-ethernet-mtk_eth_soc-implement-flow-offloading-t.patch b/target/linux/generic/pending-5.15/701-03-net-ethernet-mtk_eth_soc-implement-flow-offloading-t.patch new file mode 100644 index 0000000000..9abb68c356 --- /dev/null +++ b/target/linux/generic/pending-5.15/701-03-net-ethernet-mtk_eth_soc-implement-flow-offloading-t.patch @@ -0,0 +1,269 @@ +From: Felix Fietkau +Date: Sat, 5 Feb 2022 18:29:22 +0100 +Subject: [PATCH] net: ethernet: mtk_eth_soc: implement flow offloading + to WED devices + +This allows hardware flow offloading from Ethernet to WLAN on MT7622 SoC + +Co-developed-by: Lorenzo Bianconi +Signed-off-by: Lorenzo Bianconi +Signed-off-by: Felix Fietkau +--- + +--- a/drivers/net/ethernet/mediatek/mtk_ppe.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c +@@ -329,6 +329,24 @@ int mtk_foe_entry_set_pppoe(struct mtk_f + return 0; + } + ++int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq, ++ int bss, int wcid) ++{ ++ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry); ++ u32 *ib2 = mtk_foe_entry_ib2(entry); ++ ++ *ib2 &= ~MTK_FOE_IB2_PORT_MG; ++ *ib2 |= MTK_FOE_IB2_WDMA_WINFO; ++ if (wdma_idx) ++ *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX; ++ ++ l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) | ++ FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) | ++ FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq); ++ ++ return 0; ++} ++ + static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry) + { + return !(entry->ib1 & MTK_FOE_IB1_STATIC) && +--- a/drivers/net/ethernet/mediatek/mtk_ppe.h ++++ b/drivers/net/ethernet/mediatek/mtk_ppe.h +@@ -48,9 +48,9 @@ enum { + #define MTK_FOE_IB2_DEST_PORT GENMASK(7, 5) + #define MTK_FOE_IB2_MULTICAST BIT(8) + +-#define MTK_FOE_IB2_WHNAT_QID2 GENMASK(13, 12) +-#define MTK_FOE_IB2_WHNAT_DEVIDX BIT(16) +-#define MTK_FOE_IB2_WHNAT_NAT BIT(17) ++#define MTK_FOE_IB2_WDMA_QID2 GENMASK(13, 12) ++#define MTK_FOE_IB2_WDMA_DEVIDX BIT(16) ++#define MTK_FOE_IB2_WDMA_WINFO BIT(17) + + #define MTK_FOE_IB2_PORT_MG GENMASK(17, 12) + +@@ -58,9 +58,9 @@ enum { + + #define MTK_FOE_IB2_DSCP GENMASK(31, 24) + +-#define MTK_FOE_VLAN2_WHNAT_BSS GEMMASK(5, 0) +-#define MTK_FOE_VLAN2_WHNAT_WCID GENMASK(13, 6) +-#define MTK_FOE_VLAN2_WHNAT_RING GENMASK(15, 14) ++#define MTK_FOE_VLAN2_WINFO_BSS GENMASK(5, 0) ++#define MTK_FOE_VLAN2_WINFO_WCID GENMASK(13, 6) ++#define MTK_FOE_VLAN2_WINFO_RING GENMASK(15, 14) + + enum { + MTK_FOE_STATE_INVALID, +@@ -281,6 +281,8 @@ int mtk_foe_entry_set_ipv6_tuple(struct + int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port); + int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid); + int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid); ++int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq, ++ int bss, int wcid); + int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, + u16 timestamp); + int mtk_ppe_debugfs_init(struct mtk_ppe *ppe); +--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +@@ -10,6 +10,7 @@ + #include + #include + #include "mtk_eth_soc.h" ++#include "mtk_wed.h" + + struct mtk_flow_data { + struct ethhdr eth; +@@ -39,6 +40,7 @@ struct mtk_flow_entry { + struct rhash_head node; + unsigned long cookie; + u16 hash; ++ s8 wed_index; + }; + + static const struct rhashtable_params mtk_flow_ht_params = { +@@ -80,6 +82,35 @@ mtk_flow_offload_mangle_eth(const struct + memcpy(dest, src, act->mangle.mask ? 2 : 4); + } + ++static int ++mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info) ++{ ++ struct net_device_path_ctx ctx = { ++ .dev = dev, ++ .daddr = addr, ++ }; ++ struct net_device_path path = {}; ++ ++ if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)) ++ return -1; ++ ++ if (!dev->netdev_ops->ndo_fill_forward_path) ++ return -1; ++ ++ if (dev->netdev_ops->ndo_fill_forward_path(&ctx, &path)) ++ return -1; ++ ++ if (path.type != DEV_PATH_MTK_WDMA) ++ return -1; ++ ++ info->wdma_idx = path.mtk_wdma.wdma_idx; ++ info->queue = path.mtk_wdma.queue; ++ info->bss = path.mtk_wdma.bss; ++ info->wcid = path.mtk_wdma.wcid; ++ ++ return 0; ++} ++ + + static int + mtk_flow_mangle_ports(const struct flow_action_entry *act, +@@ -149,10 +180,20 @@ mtk_flow_get_dsa_port(struct net_device + + static int + mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe, +- struct net_device *dev) ++ struct net_device *dev, const u8 *dest_mac, ++ int *wed_index) + { ++ struct mtk_wdma_info info = {}; + int pse_port, dsa_port; + ++ if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) { ++ mtk_foe_entry_set_wdma(foe, info.wdma_idx, info.queue, info.bss, ++ info.wcid); ++ pse_port = 3; ++ *wed_index = info.wdma_idx; ++ goto out; ++ } ++ + dsa_port = mtk_flow_get_dsa_port(&dev); + if (dsa_port >= 0) + mtk_foe_entry_set_dsa(foe, dsa_port); +@@ -164,6 +205,7 @@ mtk_flow_set_output_device(struct mtk_et + else + return -EOPNOTSUPP; + ++out: + mtk_foe_entry_set_pse_port(foe, pse_port); + + return 0; +@@ -179,6 +221,7 @@ mtk_flow_offload_replace(struct mtk_eth + struct net_device *odev = NULL; + struct mtk_flow_entry *entry; + int offload_type = 0; ++ int wed_index = -1; + u16 addr_type = 0; + u32 timestamp; + u8 l4proto = 0; +@@ -329,10 +372,14 @@ mtk_flow_offload_replace(struct mtk_eth + if (data.pppoe.num == 1) + mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid); + +- err = mtk_flow_set_output_device(eth, &foe, odev); ++ err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest, ++ &wed_index); + if (err) + return err; + ++ if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0) ++ return err; ++ + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; +@@ -346,6 +393,7 @@ mtk_flow_offload_replace(struct mtk_eth + } + + entry->hash = hash; ++ entry->wed_index = wed_index; + err = rhashtable_insert_fast(ð->flow_table, &entry->node, + mtk_flow_ht_params); + if (err < 0) +@@ -356,6 +404,8 @@ clear_flow: + mtk_foe_entry_clear(ð->ppe, hash); + free: + kfree(entry); ++ if (wed_index >= 0) ++ mtk_wed_flow_remove(wed_index); + return err; + } + +@@ -372,6 +422,8 @@ mtk_flow_offload_destroy(struct mtk_eth + mtk_foe_entry_clear(ð->ppe, entry->hash); + rhashtable_remove_fast(ð->flow_table, &entry->node, + mtk_flow_ht_params); ++ if (entry->wed_index >= 0) ++ mtk_wed_flow_remove(entry->wed_index); + kfree(entry); + + return 0; +--- a/drivers/net/ethernet/mediatek/mtk_wed.h ++++ b/drivers/net/ethernet/mediatek/mtk_wed.h +@@ -7,6 +7,7 @@ + #include + #include + #include ++#include + + struct mtk_eth; + +@@ -27,6 +28,12 @@ struct mtk_wed_hw { + int index; + }; + ++struct mtk_wdma_info { ++ u8 wdma_idx; ++ u8 queue; ++ u16 wcid; ++ u8 bss; ++}; + + #ifdef CONFIG_NET_MEDIATEK_SOC_WED + static inline void +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -849,6 +849,7 @@ enum net_device_path_type { + DEV_PATH_BRIDGE, + DEV_PATH_PPPOE, + DEV_PATH_DSA, ++ DEV_PATH_MTK_WDMA, + }; + + struct net_device_path { +@@ -874,6 +875,12 @@ struct net_device_path { + int port; + u16 proto; + } dsa; ++ struct { ++ u8 wdma_idx; ++ u8 queue; ++ u16 wcid; ++ u8 bss; ++ } mtk_wdma; + }; + }; + +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -761,6 +761,10 @@ int dev_fill_forward_path(const struct n + if (WARN_ON_ONCE(last_dev == ctx.dev)) + return -1; + } ++ ++ if (!ctx.dev) ++ return ret; ++ + path = dev_fwd_path(stack); + if (!path) + return -1; diff --git a/target/linux/generic/pending-5.15/701-04-arm64-dts-mediatek-mt7622-introduce-nodes-for-Wirele.patch b/target/linux/generic/pending-5.15/701-04-arm64-dts-mediatek-mt7622-introduce-nodes-for-Wirele.patch new file mode 100644 index 0000000000..f59a364a73 --- /dev/null +++ b/target/linux/generic/pending-5.15/701-04-arm64-dts-mediatek-mt7622-introduce-nodes-for-Wirele.patch @@ -0,0 +1,62 @@ +From: Felix Fietkau +Date: Sat, 5 Feb 2022 18:36:36 +0100 +Subject: [PATCH] arm64: dts: mediatek: mt7622: introduce nodes for + Wireless Ethernet Dispatch + +Introduce wed0 and wed1 nodes in order to enable offloading forwarding +between ethernet and wireless devices on the mt7622 chipset. + +Signed-off-by: Felix Fietkau +--- + +--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi ++++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi +@@ -893,6 +893,11 @@ + }; + }; + ++ hifsys: syscon@1af00000 { ++ compatible = "mediatek,mt7622-hifsys", "syscon"; ++ reg = <0 0x1af00000 0 0x70>; ++ }; ++ + ethsys: syscon@1b000000 { + compatible = "mediatek,mt7622-ethsys", + "syscon"; +@@ -911,6 +916,26 @@ + #dma-cells = <1>; + }; + ++ pcie_mirror: pcie-mirror@10000400 { ++ compatible = "mediatek,mt7622-pcie-mirror", ++ "syscon"; ++ reg = <0 0x10000400 0 0x10>; ++ }; ++ ++ wed0: wed@1020a000 { ++ compatible = "mediatek,mt7622-wed", ++ "syscon"; ++ reg = <0 0x1020a000 0 0x1000>; ++ interrupts = ; ++ }; ++ ++ wed1: wed@1020b000 { ++ compatible = "mediatek,mt7622-wed", ++ "syscon"; ++ reg = <0 0x1020b000 0 0x1000>; ++ interrupts = ; ++ }; ++ + eth: ethernet@1b100000 { + compatible = "mediatek,mt7622-eth", + "mediatek,mt2701-eth", +@@ -938,6 +963,9 @@ + mediatek,ethsys = <ðsys>; + mediatek,sgmiisys = <&sgmiisys>; + mediatek,cci-control = <&cci_control2>; ++ mediatek,wed = <&wed0>, <&wed1>; ++ mediatek,pcie-mirror = <&pcie_mirror>; ++ mediatek,hifsys = <&hifsys>; + dma-coherent; + #address-cells = <1>; + #size-cells = <0>; diff --git a/target/linux/generic/pending-5.15/701-05-net-ethernet-mtk_eth_soc-add-ipv6-flow-offload-suppo.patch b/target/linux/generic/pending-5.15/701-05-net-ethernet-mtk_eth_soc-add-ipv6-flow-offload-suppo.patch new file mode 100644 index 0000000000..b2114eb02d --- /dev/null +++ b/target/linux/generic/pending-5.15/701-05-net-ethernet-mtk_eth_soc-add-ipv6-flow-offload-suppo.patch @@ -0,0 +1,79 @@ +From: David Bentham +Date: Mon, 21 Feb 2022 15:36:16 +0100 +Subject: [PATCH] net: ethernet: mtk_eth_soc: add ipv6 flow offload + support + +Add the missing IPv6 flow offloading support for routing only. +Hardware flow offloading is done by the packet processing engine (PPE) +of the Ethernet MAC and as it doesn't support mangling of IPv6 packets, +IPv6 NAT cannot be supported. + +Signed-off-by: David Bentham +Signed-off-by: Felix Fietkau +--- + +--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +@@ -6,6 +6,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -20,6 +21,11 @@ struct mtk_flow_data { + __be32 src_addr; + __be32 dst_addr; + } v4; ++ ++ struct { ++ struct in6_addr src_addr; ++ struct in6_addr dst_addr; ++ } v6; + }; + + __be16 src_port; +@@ -65,6 +71,14 @@ mtk_flow_set_ipv4_addr(struct mtk_foe_en + data->v4.dst_addr, data->dst_port); + } + ++static int ++mtk_flow_set_ipv6_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data) ++{ ++ return mtk_foe_entry_set_ipv6_tuple(foe, ++ data->v6.src_addr.s6_addr32, data->src_port, ++ data->v6.dst_addr.s6_addr32, data->dst_port); ++} ++ + static void + mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth) + { +@@ -299,6 +313,9 @@ mtk_flow_offload_replace(struct mtk_eth + case FLOW_DISSECTOR_KEY_IPV4_ADDRS: + offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT; + break; ++ case FLOW_DISSECTOR_KEY_IPV6_ADDRS: ++ offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T; ++ break; + default: + return -EOPNOTSUPP; + } +@@ -334,6 +351,17 @@ mtk_flow_offload_replace(struct mtk_eth + mtk_flow_set_ipv4_addr(&foe, &data, false); + } + ++ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { ++ struct flow_match_ipv6_addrs addrs; ++ ++ flow_rule_match_ipv6_addrs(rule, &addrs); ++ ++ data.v6.src_addr = addrs.key->src; ++ data.v6.dst_addr = addrs.key->dst; ++ ++ mtk_flow_set_ipv6_addr(&foe, &data); ++ } ++ + flow_action_for_each(i, act, &rule->action) { + if (act->id != FLOW_ACTION_MANGLE) + continue; diff --git a/target/linux/generic/pending-5.15/701-06-net-ethernet-mtk_eth_soc-support-TC_SETUP_BLOCK-for-.patch b/target/linux/generic/pending-5.15/701-06-net-ethernet-mtk_eth_soc-support-TC_SETUP_BLOCK-for-.patch new file mode 100644 index 0000000000..a9fc70533d --- /dev/null +++ b/target/linux/generic/pending-5.15/701-06-net-ethernet-mtk_eth_soc-support-TC_SETUP_BLOCK-for-.patch @@ -0,0 +1,29 @@ +From: Felix Fietkau +Date: Mon, 21 Feb 2022 15:37:21 +0100 +Subject: [PATCH] net: ethernet: mtk_eth_soc: support TC_SETUP_BLOCK for + PPE offload + +This allows offload entries to be created from user space + +Signed-off-by: Felix Fietkau +--- + +--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +@@ -566,10 +566,13 @@ mtk_eth_setup_tc_block(struct net_device + int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) + { +- if (type == TC_SETUP_FT) ++ switch (type) { ++ case TC_SETUP_BLOCK: ++ case TC_SETUP_FT: + return mtk_eth_setup_tc_block(dev, type_data); +- +- return -EOPNOTSUPP; ++ default: ++ return -EOPNOTSUPP; ++ } + } + + int mtk_eth_offload_init(struct mtk_eth *eth) diff --git a/target/linux/generic/pending-5.15/701-07-net-ethernet-mtk_eth_soc-allocate-struct-mtk_ppe-sep.patch b/target/linux/generic/pending-5.15/701-07-net-ethernet-mtk_eth_soc-allocate-struct-mtk_ppe-sep.patch new file mode 100644 index 0000000000..7c6931d333 --- /dev/null +++ b/target/linux/generic/pending-5.15/701-07-net-ethernet-mtk_eth_soc-allocate-struct-mtk_ppe-sep.patch @@ -0,0 +1,159 @@ +From: Felix Fietkau +Date: Mon, 21 Feb 2022 15:38:20 +0100 +Subject: [PATCH] net: ethernet: mtk_eth_soc: allocate struct mtk_ppe + separately + +Preparation for adding more data to it, which will increase its size. + +Signed-off-by: Felix Fietkau +--- + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -2312,7 +2312,7 @@ static int mtk_open(struct net_device *d + if (err) + return err; + +- if (eth->soc->offload_version && mtk_ppe_start(ð->ppe) == 0) ++ if (eth->soc->offload_version && mtk_ppe_start(eth->ppe) == 0) + gdm_config = MTK_GDMA_TO_PPE; + + mtk_gdm_config(eth, gdm_config); +@@ -2386,7 +2386,7 @@ static int mtk_stop(struct net_device *d + mtk_dma_free(eth); + + if (eth->soc->offload_version) +- mtk_ppe_stop(ð->ppe); ++ mtk_ppe_stop(eth->ppe); + + return 0; + } +@@ -3278,10 +3278,11 @@ static int mtk_probe(struct platform_dev + } + + if (eth->soc->offload_version) { +- err = mtk_ppe_init(ð->ppe, eth->dev, +- eth->base + MTK_ETH_PPE_BASE, 2); +- if (err) ++ eth->ppe = mtk_ppe_init(eth->dev, eth->base + MTK_ETH_PPE_BASE, 2); ++ if (!eth->ppe) { ++ err = -ENOMEM; + goto err_free_dev; ++ } + + err = mtk_eth_offload_init(eth); + if (err) +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h +@@ -982,7 +982,7 @@ struct mtk_eth { + u32 rx_dma_l4_valid; + int ip_align; + +- struct mtk_ppe ppe; ++ struct mtk_ppe *ppe; + struct rhashtable flow_table; + }; + +--- a/drivers/net/ethernet/mediatek/mtk_ppe.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c +@@ -384,10 +384,15 @@ int mtk_foe_entry_commit(struct mtk_ppe + return hash; + } + +-int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base, ++struct mtk_ppe *mtk_ppe_init(struct device *dev, void __iomem *base, + int version) + { + struct mtk_foe_entry *foe; ++ struct mtk_ppe *ppe; ++ ++ ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL); ++ if (!ppe) ++ return NULL; + + /* need to allocate a separate device, since it PPE DMA access is + * not coherent. +@@ -399,13 +404,13 @@ int mtk_ppe_init(struct mtk_ppe *ppe, st + foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe), + &ppe->foe_phys, GFP_KERNEL); + if (!foe) +- return -ENOMEM; ++ return NULL; + + ppe->foe_table = foe; + + mtk_ppe_debugfs_init(ppe); + +- return 0; ++ return ppe; + } + + static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe) +--- a/drivers/net/ethernet/mediatek/mtk_ppe.h ++++ b/drivers/net/ethernet/mediatek/mtk_ppe.h +@@ -246,8 +246,7 @@ struct mtk_ppe { + void *acct_table; + }; + +-int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base, +- int version); ++struct mtk_ppe *mtk_ppe_init(struct device *dev, void __iomem *base, int version); + int mtk_ppe_start(struct mtk_ppe *ppe); + int mtk_ppe_stop(struct mtk_ppe *ppe); + +--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +@@ -414,7 +414,7 @@ mtk_flow_offload_replace(struct mtk_eth + + entry->cookie = f->cookie; + timestamp = mtk_eth_timestamp(eth); +- hash = mtk_foe_entry_commit(ð->ppe, &foe, timestamp); ++ hash = mtk_foe_entry_commit(eth->ppe, &foe, timestamp); + if (hash < 0) { + err = hash; + goto free; +@@ -429,7 +429,7 @@ mtk_flow_offload_replace(struct mtk_eth + + return 0; + clear_flow: +- mtk_foe_entry_clear(ð->ppe, hash); ++ mtk_foe_entry_clear(eth->ppe, hash); + free: + kfree(entry); + if (wed_index >= 0) +@@ -447,7 +447,7 @@ mtk_flow_offload_destroy(struct mtk_eth + if (!entry) + return -ENOENT; + +- mtk_foe_entry_clear(ð->ppe, entry->hash); ++ mtk_foe_entry_clear(eth->ppe, entry->hash); + rhashtable_remove_fast(ð->flow_table, &entry->node, + mtk_flow_ht_params); + if (entry->wed_index >= 0) +@@ -469,7 +469,7 @@ mtk_flow_offload_stats(struct mtk_eth *e + if (!entry) + return -ENOENT; + +- timestamp = mtk_foe_entry_timestamp(ð->ppe, entry->hash); ++ timestamp = mtk_foe_entry_timestamp(eth->ppe, entry->hash); + if (timestamp < 0) + return -ETIMEDOUT; + +@@ -525,7 +525,7 @@ mtk_eth_setup_tc_block(struct net_device + struct flow_block_cb *block_cb; + flow_setup_cb_t *cb; + +- if (!eth->ppe.foe_table) ++ if (!eth->ppe || !eth->ppe->foe_table) + return -EOPNOTSUPP; + + if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) +@@ -577,7 +577,7 @@ int mtk_eth_setup_tc(struct net_device * + + int mtk_eth_offload_init(struct mtk_eth *eth) + { +- if (!eth->ppe.foe_table) ++ if (!eth->ppe || !eth->ppe->foe_table) + return 0; + + return rhashtable_init(ð->flow_table, &mtk_flow_ht_params); diff --git a/target/linux/generic/pending-5.15/701-08-net-ethernet-mtk_eth_soc-rework-hardware-flow-table-.patch b/target/linux/generic/pending-5.15/701-08-net-ethernet-mtk_eth_soc-rework-hardware-flow-table-.patch new file mode 100644 index 0000000000..5c726c60a1 --- /dev/null +++ b/target/linux/generic/pending-5.15/701-08-net-ethernet-mtk_eth_soc-rework-hardware-flow-table-.patch @@ -0,0 +1,424 @@ +From: Felix Fietkau +Date: Mon, 21 Feb 2022 15:39:18 +0100 +Subject: [PATCH] net: ethernet: mtk_eth_soc: rework hardware flow table + management + +The hardware was designed to handle flow detection and creation of flow entries +by itself, relying on the software primarily for filling in egress routing +information. +When there is a hash collision between multiple flows, this allows the hardware +to maintain the entry for the most active flow. +Additionally, the hardware only keeps offloading active for entries with at +least 30 packets per second. + +With this rework, the code no longer creates a hardware entries directly. +Instead, the hardware entry is only created when the PPE reports a matching +unbound flow with the minimum target rate. +In order to reduce CPU overhead, looking for flows belonging to a hash entry +is rate limited to once every 100ms. + +This rework is also used as preparation for emulating bridge offload by +managing L4 offload entries on demand. + +Signed-off-by: Felix Fietkau +--- + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -21,6 +21,7 @@ + #include + #include + #include ++#include + #include + + #include "mtk_eth_soc.h" +@@ -1281,7 +1282,7 @@ static int mtk_poll_rx(struct napi_struc + struct net_device *netdev; + unsigned int pktlen; + dma_addr_t dma_addr; +- u32 hash; ++ u32 hash, reason; + int mac; + + ring = mtk_get_rx_ring(eth); +@@ -1357,6 +1358,11 @@ static int mtk_poll_rx(struct napi_struc + skb_set_hash(skb, hash, PKT_HASH_TYPE_L4); + } + ++ reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4); ++ if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) ++ mtk_ppe_check_skb(eth->ppe, skb, ++ trxd.rxd4 & MTK_RXD4_FOE_ENTRY); ++ + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX && + (trxd.rxd2 & RX_DMA_VTAG)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), +@@ -3278,7 +3284,7 @@ static int mtk_probe(struct platform_dev + } + + if (eth->soc->offload_version) { +- eth->ppe = mtk_ppe_init(eth->dev, eth->base + MTK_ETH_PPE_BASE, 2); ++ eth->ppe = mtk_ppe_init(eth, eth->base + MTK_ETH_PPE_BASE, 2); + if (!eth->ppe) { + err = -ENOMEM; + goto err_free_dev; +--- a/drivers/net/ethernet/mediatek/mtk_ppe.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c +@@ -6,9 +6,12 @@ + #include + #include + #include ++#include "mtk_eth_soc.h" + #include "mtk_ppe.h" + #include "mtk_ppe_regs.h" + ++static DEFINE_SPINLOCK(ppe_lock); ++ + static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val) + { + writel(val, ppe->base + reg); +@@ -41,6 +44,11 @@ static u32 ppe_clear(struct mtk_ppe *ppe + return ppe_m32(ppe, reg, val, 0); + } + ++static u32 mtk_eth_timestamp(struct mtk_eth *eth) ++{ ++ return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP; ++} ++ + static int mtk_ppe_wait_busy(struct mtk_ppe *ppe) + { + int ret; +@@ -353,26 +361,59 @@ static inline bool mtk_foe_entry_usable( + FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND; + } + +-int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, +- u16 timestamp) ++static bool ++mtk_flow_entry_match(struct mtk_flow_entry *entry, struct mtk_foe_entry *data) ++{ ++ int type, len; ++ ++ if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP) ++ return false; ++ ++ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1); ++ if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE) ++ len = offsetof(struct mtk_foe_entry, ipv6._rsv); ++ else ++ len = offsetof(struct mtk_foe_entry, ipv4.ib2); ++ ++ return !memcmp(&entry->data.data, &data->data, len - 4); ++} ++ ++static void ++mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) + { + struct mtk_foe_entry *hwe; +- u32 hash; ++ struct mtk_foe_entry foe; + ++ spin_lock_bh(&ppe_lock); ++ if (entry->hash == 0xffff) ++ goto out; ++ ++ hwe = &ppe->foe_table[entry->hash]; ++ memcpy(&foe, hwe, sizeof(foe)); ++ if (!mtk_flow_entry_match(entry, &foe)) { ++ entry->hash = 0xffff; ++ goto out; ++ } ++ ++ entry->data.ib1 = foe.ib1; ++ ++out: ++ spin_unlock_bh(&ppe_lock); ++} ++ ++static void ++__mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, ++ u16 hash) ++{ ++ struct mtk_foe_entry *hwe; ++ u16 timestamp; ++ ++ timestamp = mtk_eth_timestamp(ppe->eth); + timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP; + entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP; + entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp); + +- hash = mtk_ppe_hash_entry(entry); + hwe = &ppe->foe_table[hash]; +- if (!mtk_foe_entry_usable(hwe)) { +- hwe++; +- hash++; +- +- if (!mtk_foe_entry_usable(hwe)) +- return -ENOSPC; +- } +- + memcpy(&hwe->data, &entry->data, sizeof(hwe->data)); + wmb(); + hwe->ib1 = entry->ib1; +@@ -380,13 +421,77 @@ int mtk_foe_entry_commit(struct mtk_ppe + dma_wmb(); + + mtk_ppe_cache_clear(ppe); ++} + +- return hash; ++void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) ++{ ++ spin_lock_bh(&ppe_lock); ++ hlist_del_init(&entry->list); ++ if (entry->hash != 0xffff) { ++ ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE; ++ ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, ++ MTK_FOE_STATE_BIND); ++ dma_wmb(); ++ } ++ entry->hash = 0xffff; ++ spin_unlock_bh(&ppe_lock); ++} ++ ++int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) ++{ ++ u32 hash = mtk_ppe_hash_entry(&entry->data); ++ ++ entry->hash = 0xffff; ++ spin_lock_bh(&ppe_lock); ++ hlist_add_head(&entry->list, &ppe->foe_flow[hash / 2]); ++ spin_unlock_bh(&ppe_lock); ++ ++ return 0; ++} ++ ++void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash) ++{ ++ struct hlist_head *head = &ppe->foe_flow[hash / 2]; ++ struct mtk_flow_entry *entry; ++ struct mtk_foe_entry *hwe = &ppe->foe_table[hash]; ++ bool found = false; ++ ++ if (hlist_empty(head)) ++ return; ++ ++ spin_lock_bh(&ppe_lock); ++ hlist_for_each_entry(entry, head, list) { ++ if (found || !mtk_flow_entry_match(entry, hwe)) { ++ if (entry->hash != 0xffff) ++ entry->hash = 0xffff; ++ continue; ++ } ++ ++ entry->hash = hash; ++ __mtk_foe_entry_commit(ppe, &entry->data, hash); ++ found = true; ++ } ++ spin_unlock_bh(&ppe_lock); ++} ++ ++int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) ++{ ++ u16 now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP; ++ u16 timestamp; ++ ++ mtk_flow_entry_update(ppe, entry); ++ timestamp = entry->data.ib1 & MTK_FOE_IB1_BIND_TIMESTAMP; ++ ++ if (timestamp > now) ++ return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now; ++ else ++ return now - timestamp; + } + +-struct mtk_ppe *mtk_ppe_init(struct device *dev, void __iomem *base, ++struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, + int version) + { ++ struct device *dev = eth->dev; + struct mtk_foe_entry *foe; + struct mtk_ppe *ppe; + +@@ -398,6 +503,7 @@ struct mtk_ppe *mtk_ppe_init(struct devi + * not coherent. + */ + ppe->base = base; ++ ppe->eth = eth; + ppe->dev = dev; + ppe->version = version; + +--- a/drivers/net/ethernet/mediatek/mtk_ppe.h ++++ b/drivers/net/ethernet/mediatek/mtk_ppe.h +@@ -235,7 +235,17 @@ enum { + MTK_PPE_CPU_REASON_INVALID = 0x1f, + }; + ++struct mtk_flow_entry { ++ struct rhash_head node; ++ struct hlist_node list; ++ unsigned long cookie; ++ struct mtk_foe_entry data; ++ u16 hash; ++ s8 wed_index; ++}; ++ + struct mtk_ppe { ++ struct mtk_eth *eth; + struct device *dev; + void __iomem *base; + int version; +@@ -243,18 +253,33 @@ struct mtk_ppe { + struct mtk_foe_entry *foe_table; + dma_addr_t foe_phys; + ++ u16 foe_check_time[MTK_PPE_ENTRIES]; ++ struct hlist_head foe_flow[MTK_PPE_ENTRIES / 2]; ++ + void *acct_table; + }; + +-struct mtk_ppe *mtk_ppe_init(struct device *dev, void __iomem *base, int version); ++struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int version); + int mtk_ppe_start(struct mtk_ppe *ppe); + int mtk_ppe_stop(struct mtk_ppe *ppe); + ++void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash); ++ + static inline void +-mtk_foe_entry_clear(struct mtk_ppe *ppe, u16 hash) ++mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash) + { +- ppe->foe_table[hash].ib1 = 0; +- dma_wmb(); ++ u16 now, diff; ++ ++ if (!ppe) ++ return; ++ ++ now = (u16)jiffies; ++ diff = now - ppe->foe_check_time[hash]; ++ if (diff < HZ / 10) ++ return; ++ ++ ppe->foe_check_time[hash] = now; ++ __mtk_ppe_check_skb(ppe, skb, hash); + } + + static inline int +@@ -282,8 +307,9 @@ int mtk_foe_entry_set_vlan(struct mtk_fo + int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid); + int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq, + int bss, int wcid); +-int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, +- u16 timestamp); ++int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); ++void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); ++int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); + int mtk_ppe_debugfs_init(struct mtk_ppe *ppe); + + #endif +--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +@@ -42,13 +42,6 @@ struct mtk_flow_data { + } pppoe; + }; + +-struct mtk_flow_entry { +- struct rhash_head node; +- unsigned long cookie; +- u16 hash; +- s8 wed_index; +-}; +- + static const struct rhashtable_params mtk_flow_ht_params = { + .head_offset = offsetof(struct mtk_flow_entry, node), + .key_offset = offsetof(struct mtk_flow_entry, cookie), +@@ -56,12 +49,6 @@ static const struct rhashtable_params mt + .automatic_shrinking = true, + }; + +-static u32 +-mtk_eth_timestamp(struct mtk_eth *eth) +-{ +- return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP; +-} +- + static int + mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data, + bool egress) +@@ -237,10 +224,8 @@ mtk_flow_offload_replace(struct mtk_eth + int offload_type = 0; + int wed_index = -1; + u16 addr_type = 0; +- u32 timestamp; + u8 l4proto = 0; + int err = 0; +- int hash; + int i; + + if (rhashtable_lookup(ð->flow_table, &f->cookie, mtk_flow_ht_params)) +@@ -413,23 +398,21 @@ mtk_flow_offload_replace(struct mtk_eth + return -ENOMEM; + + entry->cookie = f->cookie; +- timestamp = mtk_eth_timestamp(eth); +- hash = mtk_foe_entry_commit(eth->ppe, &foe, timestamp); +- if (hash < 0) { +- err = hash; ++ memcpy(&entry->data, &foe, sizeof(entry->data)); ++ entry->wed_index = wed_index; ++ ++ if (mtk_foe_entry_commit(eth->ppe, entry) < 0) + goto free; +- } + +- entry->hash = hash; +- entry->wed_index = wed_index; + err = rhashtable_insert_fast(ð->flow_table, &entry->node, + mtk_flow_ht_params); + if (err < 0) +- goto clear_flow; ++ goto clear; + + return 0; +-clear_flow: +- mtk_foe_entry_clear(eth->ppe, hash); ++ ++clear: ++ mtk_foe_entry_clear(eth->ppe, entry); + free: + kfree(entry); + if (wed_index >= 0) +@@ -447,7 +430,7 @@ mtk_flow_offload_destroy(struct mtk_eth + if (!entry) + return -ENOENT; + +- mtk_foe_entry_clear(eth->ppe, entry->hash); ++ mtk_foe_entry_clear(eth->ppe, entry); + rhashtable_remove_fast(ð->flow_table, &entry->node, + mtk_flow_ht_params); + if (entry->wed_index >= 0) +@@ -461,7 +444,6 @@ static int + mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f) + { + struct mtk_flow_entry *entry; +- int timestamp; + u32 idle; + + entry = rhashtable_lookup(ð->flow_table, &f->cookie, +@@ -469,11 +451,7 @@ mtk_flow_offload_stats(struct mtk_eth *e + if (!entry) + return -ENOENT; + +- timestamp = mtk_foe_entry_timestamp(eth->ppe, entry->hash); +- if (timestamp < 0) +- return -ETIMEDOUT; +- +- idle = mtk_eth_timestamp(eth) - timestamp; ++ idle = mtk_foe_entry_idle_time(eth->ppe, entry); + f->stats.lastused = jiffies - idle * HZ; + + return 0; diff --git a/target/linux/generic/pending-5.15/701-09-net-ethernet-mtk_eth_soc-remove-bridge-flow-offload-.patch b/target/linux/generic/pending-5.15/701-09-net-ethernet-mtk_eth_soc-remove-bridge-flow-offload-.patch new file mode 100644 index 0000000000..2ff0b341f9 --- /dev/null +++ b/target/linux/generic/pending-5.15/701-09-net-ethernet-mtk_eth_soc-remove-bridge-flow-offload-.patch @@ -0,0 +1,44 @@ +From: Felix Fietkau +Date: Mon, 21 Feb 2022 15:55:19 +0100 +Subject: [PATCH] net: ethernet: mtk_eth_soc: remove bridge flow offload + type entry support + +According to MediaTek, this feature is not supported in current hardware + +Signed-off-by: Felix Fietkau +--- + +--- a/drivers/net/ethernet/mediatek/mtk_ppe.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c +@@ -84,13 +84,6 @@ static u32 mtk_ppe_hash_entry(struct mtk + u32 hash; + + switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) { +- case MTK_PPE_PKT_TYPE_BRIDGE: +- hv1 = e->bridge.src_mac_lo; +- hv1 ^= ((e->bridge.src_mac_hi & 0xffff) << 16); +- hv2 = e->bridge.src_mac_hi >> 16; +- hv2 ^= e->bridge.dest_mac_lo; +- hv3 = e->bridge.dest_mac_hi; +- break; + case MTK_PPE_PKT_TYPE_IPV4_ROUTE: + case MTK_PPE_PKT_TYPE_IPV4_HNAPT: + hv1 = e->ipv4.orig.ports; +@@ -572,7 +565,6 @@ int mtk_ppe_start(struct mtk_ppe *ppe) + MTK_PPE_FLOW_CFG_IP4_NAT | + MTK_PPE_FLOW_CFG_IP4_NAPT | + MTK_PPE_FLOW_CFG_IP4_DSLITE | +- MTK_PPE_FLOW_CFG_L2_BRIDGE | + MTK_PPE_FLOW_CFG_IP4_NAT_FRAG; + ppe_w32(ppe, MTK_PPE_FLOW_CFG, val); + +--- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c +@@ -32,7 +32,6 @@ static const char *mtk_foe_pkt_type_str( + static const char * const type_str[] = { + [MTK_PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T", + [MTK_PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T", +- [MTK_PPE_PKT_TYPE_BRIDGE] = "L2", + [MTK_PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE", + [MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T", + [MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T", diff --git a/target/linux/generic/pending-5.15/701-10-net-ethernet-mtk_eth_soc-support-creating-mac-addres.patch b/target/linux/generic/pending-5.15/701-10-net-ethernet-mtk_eth_soc-support-creating-mac-addres.patch new file mode 100644 index 0000000000..67c02d6dbc --- /dev/null +++ b/target/linux/generic/pending-5.15/701-10-net-ethernet-mtk_eth_soc-support-creating-mac-addres.patch @@ -0,0 +1,553 @@ +From: Felix Fietkau +Date: Wed, 23 Feb 2022 10:56:34 +0100 +Subject: [PATCH] net: ethernet: mtk_eth_soc: support creating mac + address based offload entries + +This will be used to implement a limited form of bridge offloading. +Since the hardware does not support flow table entries with just source +and destination MAC address, the driver has to emulate it. + +The hardware automatically creates entries entries for incoming flows, even +when they are bridged instead of routed, and reports when packets for these +flows have reached the minimum PPS rate for offloading. + +After this happens, we look up the L2 flow offload entry based on the MAC +header and fill in the output routing information in the flow table. +The dynamically created per-flow entries are automatically removed when +either the hardware flowtable entry expires, is replaced, or if the offload +rule they belong to is removed + +Signed-off-by: Felix Fietkau +--- + +--- a/drivers/net/ethernet/mediatek/mtk_ppe.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c +@@ -6,12 +6,22 @@ + #include + #include + #include ++#include ++#include ++#include + #include "mtk_eth_soc.h" + #include "mtk_ppe.h" + #include "mtk_ppe_regs.h" + + static DEFINE_SPINLOCK(ppe_lock); + ++static const struct rhashtable_params mtk_flow_l2_ht_params = { ++ .head_offset = offsetof(struct mtk_flow_entry, l2_node), ++ .key_offset = offsetof(struct mtk_flow_entry, data.bridge), ++ .key_len = offsetof(struct mtk_foe_bridge, key_end), ++ .automatic_shrinking = true, ++}; ++ + static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val) + { + writel(val, ppe->base + reg); +@@ -123,6 +133,9 @@ mtk_foe_entry_l2(struct mtk_foe_entry *e + { + int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1); + ++ if (type == MTK_PPE_PKT_TYPE_BRIDGE) ++ return &entry->bridge.l2; ++ + if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) + return &entry->ipv6.l2; + +@@ -134,6 +147,9 @@ mtk_foe_entry_ib2(struct mtk_foe_entry * + { + int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1); + ++ if (type == MTK_PPE_PKT_TYPE_BRIDGE) ++ return &entry->bridge.ib2; ++ + if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) + return &entry->ipv6.ib2; + +@@ -168,7 +184,12 @@ int mtk_foe_entry_prepare(struct mtk_foe + if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T) + entry->ipv6.ports = ports_pad; + +- if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) { ++ if (type == MTK_PPE_PKT_TYPE_BRIDGE) { ++ ether_addr_copy(entry->bridge.src_mac, src_mac); ++ ether_addr_copy(entry->bridge.dest_mac, dest_mac); ++ entry->bridge.ib2 = val; ++ l2 = &entry->bridge.l2; ++ } else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) { + entry->ipv6.ib2 = val; + l2 = &entry->ipv6.l2; + } else { +@@ -372,12 +393,96 @@ mtk_flow_entry_match(struct mtk_flow_ent + } + + static void ++__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) ++{ ++ struct hlist_head *head; ++ struct hlist_node *tmp; ++ ++ if (entry->type == MTK_FLOW_TYPE_L2) { ++ rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node, ++ mtk_flow_l2_ht_params); ++ ++ head = &entry->l2_flows; ++ hlist_for_each_entry_safe(entry, tmp, head, l2_data.list) ++ __mtk_foe_entry_clear(ppe, entry); ++ return; ++ } ++ ++ hlist_del_init(&entry->list); ++ if (entry->hash != 0xffff) { ++ ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE; ++ ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, ++ MTK_FOE_STATE_BIND); ++ dma_wmb(); ++ } ++ entry->hash = 0xffff; ++ ++ if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW) ++ return; ++ ++ hlist_del_init(&entry->l2_data.list); ++ kfree(entry); ++} ++ ++static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1) ++{ ++ u16 timestamp; ++ u16 now; ++ ++ now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP; ++ timestamp = ib1 & MTK_FOE_IB1_BIND_TIMESTAMP; ++ ++ if (timestamp > now) ++ return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now; ++ else ++ return now - timestamp; ++} ++ ++static void ++mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) ++{ ++ struct mtk_flow_entry *cur; ++ struct mtk_foe_entry *hwe; ++ struct hlist_node *tmp; ++ int idle; ++ ++ idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1); ++ hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) { ++ int cur_idle; ++ u32 ib1; ++ ++ hwe = &ppe->foe_table[cur->hash]; ++ ib1 = READ_ONCE(hwe->ib1); ++ ++ if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) { ++ cur->hash = 0xffff; ++ __mtk_foe_entry_clear(ppe, cur); ++ continue; ++ } ++ ++ cur_idle = __mtk_foe_entry_idle_time(ppe, ib1); ++ if (cur_idle >= idle) ++ continue; ++ ++ idle = cur_idle; ++ entry->data.ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP; ++ entry->data.ib1 |= hwe->ib1 & MTK_FOE_IB1_BIND_TIMESTAMP; ++ } ++} ++ ++static void + mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) + { + struct mtk_foe_entry *hwe; + struct mtk_foe_entry foe; + + spin_lock_bh(&ppe_lock); ++ ++ if (entry->type == MTK_FLOW_TYPE_L2) { ++ mtk_flow_entry_update_l2(ppe, entry); ++ goto out; ++ } ++ + if (entry->hash == 0xffff) + goto out; + +@@ -419,21 +524,28 @@ __mtk_foe_entry_commit(struct mtk_ppe *p + void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) + { + spin_lock_bh(&ppe_lock); +- hlist_del_init(&entry->list); +- if (entry->hash != 0xffff) { +- ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE; +- ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, +- MTK_FOE_STATE_BIND); +- dma_wmb(); +- } +- entry->hash = 0xffff; ++ __mtk_foe_entry_clear(ppe, entry); + spin_unlock_bh(&ppe_lock); + } + ++static int ++mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) ++{ ++ entry->type = MTK_FLOW_TYPE_L2; ++ ++ return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node, ++ mtk_flow_l2_ht_params); ++} ++ + int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) + { +- u32 hash = mtk_ppe_hash_entry(&entry->data); ++ int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1); ++ u32 hash; ++ ++ if (type == MTK_PPE_PKT_TYPE_BRIDGE) ++ return mtk_foe_entry_commit_l2(ppe, entry); + ++ hash = mtk_ppe_hash_entry(&entry->data); + entry->hash = 0xffff; + spin_lock_bh(&ppe_lock); + hlist_add_head(&entry->list, &ppe->foe_flow[hash / 2]); +@@ -442,18 +554,72 @@ int mtk_foe_entry_commit(struct mtk_ppe + return 0; + } + ++static void ++mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry, ++ u16 hash) ++{ ++ struct mtk_flow_entry *flow_info; ++ struct mtk_foe_entry foe, *hwe; ++ struct mtk_foe_mac_info *l2; ++ u32 ib1_mask = MTK_FOE_IB1_PACKET_TYPE | MTK_FOE_IB1_UDP; ++ int type; ++ ++ flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end), ++ GFP_ATOMIC); ++ if (!flow_info) ++ return; ++ ++ flow_info->l2_data.base_flow = entry; ++ flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW; ++ flow_info->hash = hash; ++ hlist_add_head(&flow_info->list, &ppe->foe_flow[hash / 2]); ++ hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows); ++ ++ hwe = &ppe->foe_table[hash]; ++ memcpy(&foe, hwe, sizeof(foe)); ++ foe.ib1 &= ib1_mask; ++ foe.ib1 |= entry->data.ib1 & ~ib1_mask; ++ ++ l2 = mtk_foe_entry_l2(&foe); ++ memcpy(l2, &entry->data.bridge.l2, sizeof(*l2)); ++ ++ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, foe.ib1); ++ if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT) ++ memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new)); ++ else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP) ++ l2->etype = ETH_P_IPV6; ++ ++ *mtk_foe_entry_ib2(&foe) = entry->data.bridge.ib2; ++ ++ __mtk_foe_entry_commit(ppe, &foe, hash); ++} ++ + void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash) + { + struct hlist_head *head = &ppe->foe_flow[hash / 2]; +- struct mtk_flow_entry *entry; + struct mtk_foe_entry *hwe = &ppe->foe_table[hash]; ++ struct mtk_flow_entry *entry; ++ struct mtk_foe_bridge key = {}; ++ struct ethhdr *eh; + bool found = false; +- +- if (hlist_empty(head)) +- return; ++ u8 *tag; + + spin_lock_bh(&ppe_lock); ++ ++ if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND) ++ goto out; ++ + hlist_for_each_entry(entry, head, list) { ++ if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) { ++ if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == ++ MTK_FOE_STATE_BIND)) ++ continue; ++ ++ entry->hash = 0xffff; ++ __mtk_foe_entry_clear(ppe, entry); ++ continue; ++ } ++ + if (found || !mtk_flow_entry_match(entry, hwe)) { + if (entry->hash != 0xffff) + entry->hash = 0xffff; +@@ -464,21 +630,50 @@ void __mtk_ppe_check_skb(struct mtk_ppe + __mtk_foe_entry_commit(ppe, &entry->data, hash); + found = true; + } ++ ++ if (found) ++ goto out; ++ ++ eh = eth_hdr(skb); ++ ether_addr_copy(key.dest_mac, eh->h_dest); ++ ether_addr_copy(key.src_mac, eh->h_source); ++ tag = skb->data - 2; ++ key.vlan = 0; ++ switch (skb->protocol) { ++#if IS_ENABLED(CONFIG_NET_DSA) ++ case htons(ETH_P_XDSA): ++ if (!netdev_uses_dsa(skb->dev) || ++ skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK) ++ goto out; ++ ++ tag += 4; ++ if (get_unaligned_be16(tag) != ETH_P_8021Q) ++ break; ++ ++ fallthrough; ++#endif ++ case htons(ETH_P_8021Q): ++ key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK; ++ break; ++ default: ++ break; ++ } ++ ++ entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params); ++ if (!entry) ++ goto out; ++ ++ mtk_foe_entry_commit_subflow(ppe, entry, hash); ++ ++out: + spin_unlock_bh(&ppe_lock); + } + + int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) + { +- u16 now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP; +- u16 timestamp; +- + mtk_flow_entry_update(ppe, entry); +- timestamp = entry->data.ib1 & MTK_FOE_IB1_BIND_TIMESTAMP; + +- if (timestamp > now) +- return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now; +- else +- return now - timestamp; ++ return __mtk_foe_entry_idle_time(ppe, entry->data.ib1); + } + + struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, +@@ -492,6 +687,8 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_ + if (!ppe) + return NULL; + ++ rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params); ++ + /* need to allocate a separate device, since it PPE DMA access is + * not coherent. + */ +--- a/drivers/net/ethernet/mediatek/mtk_ppe.h ++++ b/drivers/net/ethernet/mediatek/mtk_ppe.h +@@ -6,6 +6,7 @@ + + #include + #include ++#include + + #define MTK_ETH_PPE_BASE 0xc00 + +@@ -84,19 +85,16 @@ struct mtk_foe_mac_info { + u16 src_mac_lo; + }; + ++/* software-only entry type */ + struct mtk_foe_bridge { +- u32 dest_mac_hi; +- +- u16 src_mac_lo; +- u16 dest_mac_lo; ++ u8 dest_mac[ETH_ALEN]; ++ u8 src_mac[ETH_ALEN]; ++ u16 vlan; + +- u32 src_mac_hi; ++ struct {} key_end; + + u32 ib2; + +- u32 _rsv[5]; +- +- u32 udf_tsid; + struct mtk_foe_mac_info l2; + }; + +@@ -235,13 +233,33 @@ enum { + MTK_PPE_CPU_REASON_INVALID = 0x1f, + }; + ++enum { ++ MTK_FLOW_TYPE_L4, ++ MTK_FLOW_TYPE_L2, ++ MTK_FLOW_TYPE_L2_SUBFLOW, ++}; ++ + struct mtk_flow_entry { ++ union { ++ struct hlist_node list; ++ struct { ++ struct rhash_head l2_node; ++ struct hlist_head l2_flows; ++ }; ++ }; ++ u8 type; ++ s8 wed_index; ++ u16 hash; ++ union { ++ struct mtk_foe_entry data; ++ struct { ++ struct mtk_flow_entry *base_flow; ++ struct hlist_node list; ++ struct {} end; ++ } l2_data; ++ }; + struct rhash_head node; +- struct hlist_node list; + unsigned long cookie; +- struct mtk_foe_entry data; +- u16 hash; +- s8 wed_index; + }; + + struct mtk_ppe { +@@ -256,6 +274,8 @@ struct mtk_ppe { + u16 foe_check_time[MTK_PPE_ENTRIES]; + struct hlist_head foe_flow[MTK_PPE_ENTRIES / 2]; + ++ struct rhashtable l2_flows; ++ + void *acct_table; + }; + +--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +@@ -31,6 +31,8 @@ struct mtk_flow_data { + __be16 src_port; + __be16 dst_port; + ++ u16 vlan_in; ++ + struct { + u16 id; + __be16 proto; +@@ -260,9 +262,45 @@ mtk_flow_offload_replace(struct mtk_eth + return -EOPNOTSUPP; + } + ++ switch (addr_type) { ++ case 0: ++ offload_type = MTK_PPE_PKT_TYPE_BRIDGE; ++ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { ++ struct flow_match_eth_addrs match; ++ ++ flow_rule_match_eth_addrs(rule, &match); ++ memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN); ++ memcpy(data.eth.h_source, match.key->src, ETH_ALEN); ++ } else { ++ return -EOPNOTSUPP; ++ } ++ ++ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { ++ struct flow_match_vlan match; ++ ++ flow_rule_match_vlan(rule, &match); ++ ++ if (match.key->vlan_tpid != cpu_to_be16(ETH_P_8021Q)) ++ return -EOPNOTSUPP; ++ ++ data.vlan_in = match.key->vlan_id; ++ } ++ break; ++ case FLOW_DISSECTOR_KEY_IPV4_ADDRS: ++ offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT; ++ break; ++ case FLOW_DISSECTOR_KEY_IPV6_ADDRS: ++ offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T; ++ break; ++ default: ++ return -EOPNOTSUPP; ++ } ++ + flow_action_for_each(i, act, &rule->action) { + switch (act->id) { + case FLOW_ACTION_MANGLE: ++ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) ++ return -EOPNOTSUPP; + if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH) + mtk_flow_offload_mangle_eth(act, &data.eth); + break; +@@ -294,17 +332,6 @@ mtk_flow_offload_replace(struct mtk_eth + } + } + +- switch (addr_type) { +- case FLOW_DISSECTOR_KEY_IPV4_ADDRS: +- offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT; +- break; +- case FLOW_DISSECTOR_KEY_IPV6_ADDRS: +- offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T; +- break; +- default: +- return -EOPNOTSUPP; +- } +- + if (!is_valid_ether_addr(data.eth.h_source) || + !is_valid_ether_addr(data.eth.h_dest)) + return -EINVAL; +@@ -318,10 +345,13 @@ mtk_flow_offload_replace(struct mtk_eth + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports ports; + ++ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) ++ return -EOPNOTSUPP; ++ + flow_rule_match_ports(rule, &ports); + data.src_port = ports.key->src; + data.dst_port = ports.key->dst; +- } else { ++ } else if (offload_type != MTK_PPE_PKT_TYPE_BRIDGE) { + return -EOPNOTSUPP; + } + +@@ -351,6 +381,9 @@ mtk_flow_offload_replace(struct mtk_eth + if (act->id != FLOW_ACTION_MANGLE) + continue; + ++ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) ++ return -EOPNOTSUPP; ++ + switch (act->mangle.htype) { + case FLOW_ACT_MANGLE_HDR_TYPE_TCP: + case FLOW_ACT_MANGLE_HDR_TYPE_UDP: +@@ -376,6 +409,9 @@ mtk_flow_offload_replace(struct mtk_eth + return err; + } + ++ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) ++ foe.bridge.vlan = data.vlan_in; ++ + if (data.vlan.num == 1) { + if (data.vlan.proto != htons(ETH_P_8021Q)) + return -EOPNOTSUPP; diff --git a/target/linux/generic/pending-5.15/701-net-ethernet-mtk_eth_soc-add-ipv6-flow-offloading-support.patch b/target/linux/generic/pending-5.15/701-net-ethernet-mtk_eth_soc-add-ipv6-flow-offloading-support.patch deleted file mode 100644 index a36ae0e6e4..0000000000 --- a/target/linux/generic/pending-5.15/701-net-ethernet-mtk_eth_soc-add-ipv6-flow-offloading-support.patch +++ /dev/null @@ -1,65 +0,0 @@ ---- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c -@@ -6,6 +6,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -19,6 +20,11 @@ struct mtk_flow_data { - __be32 src_addr; - __be32 dst_addr; - } v4; -+ -+ struct { -+ struct in6_addr src_addr; -+ struct in6_addr dst_addr; -+ } v6; - }; - - __be16 src_port; -@@ -63,6 +69,14 @@ mtk_flow_set_ipv4_addr(struct mtk_foe_en - data->v4.dst_addr, data->dst_port); - } - -+static int -+mtk_flow_set_ipv6_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data) -+{ -+ return mtk_foe_entry_set_ipv6_tuple(foe, -+ data->v6.src_addr.s6_addr32, data->src_port, -+ data->v6.dst_addr.s6_addr32, data->dst_port); -+} -+ - static void - mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth) - { -@@ -256,6 +270,9 @@ mtk_flow_offload_replace(struct mtk_eth - case FLOW_DISSECTOR_KEY_IPV4_ADDRS: - offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT; - break; -+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS: -+ offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T; -+ break; - default: - return -EOPNOTSUPP; - } -@@ -291,6 +308,17 @@ mtk_flow_offload_replace(struct mtk_eth - mtk_flow_set_ipv4_addr(&foe, &data, false); - } - -+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { -+ struct flow_match_ipv6_addrs addrs; -+ -+ flow_rule_match_ipv6_addrs(rule, &addrs); -+ -+ data.v6.src_addr = addrs.key->src; -+ data.v6.dst_addr = addrs.key->dst; -+ -+ mtk_flow_set_ipv6_addr(&foe, &data); -+ } -+ - flow_action_for_each(i, act, &rule->action) { - if (act->id != FLOW_ACTION_MANGLE) - continue; diff --git a/target/linux/generic/pending-5.15/702-net-ethernet-mtk_eth_soc-enable-threaded-NAPI.patch b/target/linux/generic/pending-5.15/702-net-ethernet-mtk_eth_soc-enable-threaded-NAPI.patch index d5ce94d74a..4365359e00 100644 --- a/target/linux/generic/pending-5.15/702-net-ethernet-mtk_eth_soc-enable-threaded-NAPI.patch +++ b/target/linux/generic/pending-5.15/702-net-ethernet-mtk_eth_soc-enable-threaded-NAPI.patch @@ -10,7 +10,7 @@ Signed-off-by: Felix Fietkau --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -2178,8 +2178,8 @@ static irqreturn_t mtk_handle_irq_rx(int +@@ -2186,8 +2186,8 @@ static irqreturn_t mtk_handle_irq_rx(int eth->rx_events++; if (likely(napi_schedule_prep(ð->rx_napi))) { @@ -20,7 +20,7 @@ Signed-off-by: Felix Fietkau } return IRQ_HANDLED; -@@ -2191,8 +2191,8 @@ static irqreturn_t mtk_handle_irq_tx(int +@@ -2199,8 +2199,8 @@ static irqreturn_t mtk_handle_irq_tx(int eth->tx_events++; if (likely(napi_schedule_prep(ð->tx_napi))) { @@ -30,7 +30,7 @@ Signed-off-by: Felix Fietkau } return IRQ_HANDLED; -@@ -3242,6 +3242,8 @@ static int mtk_probe(struct platform_dev +@@ -3313,6 +3313,8 @@ static int mtk_probe(struct platform_dev * for NAPI to work */ init_dummy_netdev(ð->dummy_dev); diff --git a/target/linux/mediatek/patches-5.10/510-net-mediatek-add-flow-offload-for-mt7623.patch b/target/linux/mediatek/patches-5.10/510-net-mediatek-add-flow-offload-for-mt7623.patch index dd2b70aa10..43464b27d5 100644 --- a/target/linux/mediatek/patches-5.10/510-net-mediatek-add-flow-offload-for-mt7623.patch +++ b/target/linux/mediatek/patches-5.10/510-net-mediatek-add-flow-offload-for-mt7623.patch @@ -14,7 +14,7 @@ Signed-off-by: Frank Wunderlich --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -3304,6 +3304,7 @@ static const struct mtk_soc_data mt7623_ +@@ -3373,6 +3373,7 @@ static const struct mtk_soc_data mt7623_ .hw_features = MTK_HW_FEATURES, .required_clks = MT7623_CLKS_BITMAP, .required_pctl = true, diff --git a/target/linux/mediatek/patches-5.10/700-net-ethernet-mtk_eth_soc-add-support-for-coherent-DM.patch b/target/linux/mediatek/patches-5.10/700-net-ethernet-mtk_eth_soc-add-support-for-coherent-DM.patch deleted file mode 100644 index a0c892d706..0000000000 --- a/target/linux/mediatek/patches-5.10/700-net-ethernet-mtk_eth_soc-add-support-for-coherent-DM.patch +++ /dev/null @@ -1,85 +0,0 @@ -From: Felix Fietkau -Date: Fri, 4 Sep 2020 18:36:06 +0200 -Subject: [PATCH] net: ethernet: mtk_eth_soc: add support for coherent DMA - -It improves performance by eliminating the need for a cache flush on rx and tx - -Signed-off-by: Felix Fietkau ---- - ---- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi -+++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi -@@ -364,7 +364,7 @@ - }; - - cci_control2: slave-if@5000 { -- compatible = "arm,cci-400-ctrl-if"; -+ compatible = "arm,cci-400-ctrl-if", "syscon"; - interface-type = "ace"; - reg = <0x5000 0x1000>; - }; -@@ -977,6 +977,8 @@ - power-domains = <&scpsys MT7622_POWER_DOMAIN_ETHSYS>; - mediatek,ethsys = <ðsys>; - mediatek,sgmiisys = <&sgmiisys>; -+ mediatek,cci-control = <&cci_control2>; -+ dma-coherent; - #address-cells = <1>; - #size-cells = <0>; - status = "disabled"; ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -9,6 +9,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -2494,6 +2495,13 @@ static int mtk_hw_init(struct mtk_eth *e - if (ret) - goto err_disable_pm; - -+ if (of_dma_is_coherent(eth->dev->of_node)) { -+ u32 mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA | -+ ETHSYS_DMA_AG_MAP_PPE; -+ -+ regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, mask, mask); -+ } -+ - if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { - ret = device_reset(eth->dev); - if (ret) { -@@ -3104,6 +3112,16 @@ static int mtk_probe(struct platform_dev - } - } - -+ if (of_dma_is_coherent(pdev->dev.of_node)) { -+ struct regmap *cci; -+ -+ cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, -+ "mediatek,cci-control"); -+ /* enable CPU/bus coherency */ -+ if (!IS_ERR(cci)) -+ regmap_write(cci, 0, 3); -+ } -+ - if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) { - eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii), - GFP_KERNEL); ---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h -+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -456,6 +456,12 @@ - #define RSTCTRL_FE BIT(6) - #define RSTCTRL_PPE BIT(31) - -+/* ethernet dma channel agent map */ -+#define ETHSYS_DMA_AG_MAP 0x408 -+#define ETHSYS_DMA_AG_MAP_PDMA BIT(0) -+#define ETHSYS_DMA_AG_MAP_QDMA BIT(1) -+#define ETHSYS_DMA_AG_MAP_PPE BIT(2) -+ - /* SGMII subsystem config registers */ - /* Register to auto-negotiation restart */ - #define SGMSYS_PCS_CONTROL_1 0x0 diff --git a/target/linux/mediatek/patches-5.10/701-v5.17-net-ethernet-mtk_eth_soc-fix-return-values-and-refac.patch b/target/linux/mediatek/patches-5.10/701-v5.17-net-ethernet-mtk_eth_soc-fix-return-values-and-refac.patch index 56b293257e..be9dcfc3ed 100644 --- a/target/linux/mediatek/patches-5.10/701-v5.17-net-ethernet-mtk_eth_soc-fix-return-values-and-refac.patch +++ b/target/linux/mediatek/patches-5.10/701-v5.17-net-ethernet-mtk_eth_soc-fix-return-values-and-refac.patch @@ -22,7 +22,7 @@ Signed-off-by: David S. Miller --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -92,46 +92,53 @@ static int mtk_mdio_busy_wait(struct mtk +@@ -94,46 +94,53 @@ static int mtk_mdio_busy_wait(struct mtk } dev_err(eth->dev, "mdio: MDIO timeout\n"); @@ -103,7 +103,7 @@ Signed-off-by: David S. Miller static int mtk_mdio_write(struct mii_bus *bus, int phy_addr, --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -341,11 +341,17 @@ +@@ -344,11 +344,17 @@ /* PHY Indirect Access Control registers */ #define MTK_PHY_IAC 0x10004 #define PHY_IAC_ACCESS BIT(31) diff --git a/target/linux/mediatek/patches-5.10/703-v5.17-net-ethernet-mtk_eth_soc-implement-Clause-45-MDIO-ac.patch b/target/linux/mediatek/patches-5.10/703-v5.17-net-ethernet-mtk_eth_soc-implement-Clause-45-MDIO-ac.patch index 65e6bd2cbf..289398ce3a 100644 --- a/target/linux/mediatek/patches-5.10/703-v5.17-net-ethernet-mtk_eth_soc-implement-Clause-45-MDIO-ac.patch +++ b/target/linux/mediatek/patches-5.10/703-v5.17-net-ethernet-mtk_eth_soc-implement-Clause-45-MDIO-ac.patch @@ -20,7 +20,7 @@ Signed-off-by: David S. Miller --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -104,13 +104,35 @@ static int _mtk_mdio_write(struct mtk_et +@@ -106,13 +106,35 @@ static int _mtk_mdio_write(struct mtk_et if (ret < 0) return ret; @@ -63,7 +63,7 @@ Signed-off-by: David S. Miller ret = mtk_mdio_busy_wait(eth); if (ret < 0) -@@ -127,12 +149,33 @@ static int _mtk_mdio_read(struct mtk_eth +@@ -129,12 +151,33 @@ static int _mtk_mdio_read(struct mtk_eth if (ret < 0) return ret; @@ -103,7 +103,7 @@ Signed-off-by: David S. Miller ret = mtk_mdio_busy_wait(eth); if (ret < 0) -@@ -591,6 +634,7 @@ static int mtk_mdio_init(struct mtk_eth +@@ -593,6 +636,7 @@ static int mtk_mdio_init(struct mtk_eth eth->mii_bus->name = "mdio"; eth->mii_bus->read = mtk_mdio_read; eth->mii_bus->write = mtk_mdio_write; @@ -113,7 +113,7 @@ Signed-off-by: David S. Miller --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -346,9 +346,12 @@ +@@ -349,9 +349,12 @@ #define PHY_IAC_ADDR_MASK GENMASK(24, 20) #define PHY_IAC_ADDR(x) FIELD_PREP(PHY_IAC_ADDR_MASK, (x)) #define PHY_IAC_CMD_MASK GENMASK(19, 18) diff --git a/target/linux/mediatek/patches-5.10/704-net-ethernet-mtk_eth_soc-announce-2500baseT.patch b/target/linux/mediatek/patches-5.10/704-net-ethernet-mtk_eth_soc-announce-2500baseT.patch index 3afbc01017..e9d4188a45 100644 --- a/target/linux/mediatek/patches-5.10/704-net-ethernet-mtk_eth_soc-announce-2500baseT.patch +++ b/target/linux/mediatek/patches-5.10/704-net-ethernet-mtk_eth_soc-announce-2500baseT.patch @@ -1,6 +1,6 @@ --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -575,6 +575,7 @@ static void mtk_validate(struct phylink_ +@@ -577,6 +577,7 @@ static void mtk_validate(struct phylink_ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) { phylink_set(mask, 1000baseT_Full); phylink_set(mask, 1000baseX_Full); diff --git a/target/linux/mediatek/patches-5.10/710-pci-pcie-mediatek-add-support-for-coherent-DMA.patch b/target/linux/mediatek/patches-5.10/710-pci-pcie-mediatek-add-support-for-coherent-DMA.patch index ec5f13b4ea..32fc0a9ad5 100644 --- a/target/linux/mediatek/patches-5.10/710-pci-pcie-mediatek-add-support-for-coherent-DMA.patch +++ b/target/linux/mediatek/patches-5.10/710-pci-pcie-mediatek-add-support-for-coherent-DMA.patch @@ -44,18 +44,6 @@ Signed-off-by: Felix Fietkau slot1: pcie@1,0 { reg = <0x0800 0 0 0 0>; -@@ -933,6 +939,11 @@ - }; - }; - -+ hifsys: syscon@1af00000 { -+ compatible = "mediatek,mt7622-hifsys", "syscon"; -+ reg = <0 0x1af00000 0 0x70>; -+ }; -+ - ethsys: syscon@1b000000 { - compatible = "mediatek,mt7622-ethsys", - "syscon"; --- a/drivers/pci/controller/pcie-mediatek.c +++ b/drivers/pci/controller/pcie-mediatek.c @@ -20,6 +20,7 @@ diff --git a/target/linux/ramips/patches-5.10/700-net-ethernet-mediatek-support-net-labels.patch b/target/linux/ramips/patches-5.10/700-net-ethernet-mediatek-support-net-labels.patch index 9a4971d117..65b61e7720 100644 --- a/target/linux/ramips/patches-5.10/700-net-ethernet-mediatek-support-net-labels.patch +++ b/target/linux/ramips/patches-5.10/700-net-ethernet-mediatek-support-net-labels.patch @@ -14,7 +14,7 @@ Signed-off-by: René van Dorst --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -2941,6 +2941,7 @@ static const struct net_device_ops mtk_n +@@ -2949,6 +2949,7 @@ static const struct net_device_ops mtk_n static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) { @@ -22,7 +22,7 @@ Signed-off-by: René van Dorst const __be32 *_id = of_get_property(np, "reg", NULL); phy_interface_t phy_mode; struct phylink *phylink; -@@ -3036,6 +3037,9 @@ static int mtk_add_mac(struct mtk_eth *e +@@ -3044,6 +3045,9 @@ static int mtk_add_mac(struct mtk_eth *e else eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;