From: Felix Fietkau Date: Thu, 2 Feb 2023 10:08:09 +0000 (+0100) Subject: mt76: update to the latest version, import WED related mtk_eth_soc patches X-Git-Url: http://git.lede-project.org./?a=commitdiff_plain;h=521efb62ebb55dcc67dc618757aa2e3ccbdf3774;p=openwrt%2Fstaging%2Fthess.git mt76: update to the latest version, import WED related mtk_eth_soc patches 6c256218e59e wifi: mt76: dma: use napi_build_skb 679254c50f27 mt7915: add CONFIG_MT76_LEDS to cflags 15b9dd6b1b6a wifi: mt76: mt7915: call mt7915_mcu_set_thermal_throttling() only after init_work 8e5c21fe7c5c wifi: mt76: mt7915: rework mt7915_mcu_set_thermal_throttling 87cb74fe42d9 wifi: mt76: mt7915: rework mt7915_thermal_temp_store() c6f24b83eba5 wifi: mt76: mt7915: add error message in mt7915_thermal_set_cur_throttle_state() 99e96b89ee4d wifi: mt76: mt7915: add chip id condition in mt7915_check_eeprom() 833cd420480f wifi: mt76: mt7921: fix channel switch fail in monitor mode f1f8bae6092d wifi: mt76: mt7921: add ack signal support f47087a6dd62 wifi: mt76: mt7996: fix chainmask calculation in mt7996_set_antenna() 2f3b0acc1588 wifi: mt76: mt7996: update register for CFEND_RATE 7e9540dcbd70 wifi: mt76: mt7996: do not hardcode vht beamform cap a37e427d0959 wifi: mt76: connac: fix POWER_CTRL command name typo 98aa346042bd wifi: mt76: mt7915: remove BW160 and BW80+80 support 94fed6a43541 wifi: mt76: mt7921: fix invalid remain_on_channel duration 3c162384d80a wifi: mt76: introduce mt76_queue_is_wed_rx utility routine a409a9454587 wifi: mt76: mt7915: fix memory leak in mt7915_mcu_exit 8b27ecd3a684 wifi: mt76: mt7996: fix memory leak in mt7996_mcu_exit 683760461dd0 wifi: mt76: dma: free rx_head in mt76_dma_rx_cleanup 0c750cf08f85 wifi: mt76: dma: fix memory leak running mt76_dma_tx_cleanup 5de9ae29bea2 wifi: mt76: mt7915: avoid mcu_restart function pointer dad96dd3e62d wifi: mt76: mt7603: avoid mcu_restart function pointer 19d36dd9c8ea wifi: mt76: mt7615: avoid mcu_restart function pointer 6fe2c2383d3d wifi: mt76: mt7921: avoid mcu_restart function pointer 9df89143bf71 wifi: mt76: mt7915: get rid of wed rx_buf_ring page_frag_cache 8d51d11760cb wifi: mt76: fix switch default case in mt7996_reverse_frag0_hdr_trans 0d8057dbd51c wifi: mt76: mt7921u: add support for Comfast CF-952AX ddbf4e933d54 wifi: mt76: mt7915: set sku initial value to zero 06a8904e954e wifi: mt76: mt7915: wed: enable red per-band token drop 724a337caef9 wifi: mt76: mt7915: fix WED TxS reporting 747ca943a5bb wifi: mt76: add flexible polling wait-interval support 133d7859977a wifi: mt76: mt7921: reduce polling time in pmctrl 5fe319a0550e wifi: mt76: add memory barrier to SDIO queue kick 822f060b9d19 wifi: mt76: mt7921: fix rx filter incorrect by drv/fw inconsistent c6794954a723 wifi: mt76: mt7915: fix memory leak in mt7915_mmio_wed_init_rx_buf 9686cd7cc65c wifi: mt76: switch to page_pool allocator 04da4eaa8235 wifi: mt76: enable page_pool stats 1af4a911ebcb wifi: mt76: mt7915: release rxwi in mt7915_wed_release_rx_buf e8c10835cf06 wifi: mt76: fix compile error without CONFIG_PAGE_POOL_STATS 0cf0ede7cc42 net: ethernet: mtk_wed: add reset to rx_ring_setup callback 715b3ed9708a net: ethernet: mtk_wed: add reset to tx_ring_setup callback 9107381d0ff3 wifi: mt76: mt7921: fix error code of return in mt7921_acpi_read 36d2a5bf7802 wifi: mt76: mt7996: rely on mt76_connac2_mac_tx_rate_val c67f57d2cda2 wifi: mt76: dma: add reset to mt76_dma_wed_setup signature 3dace36e2941 wifi: mt76: dma: reset wed queues in mt76_dma_rx_reset 4b229d2da562 wifi: mt76: mt7915: add mt7915 wed reset callbacks f83958376085 wifi: mt76: mt7915: complete wed reset support 321edbb414dc wifi: mt76: mt7996: rely on mt76_connac_txp_common structure bdb7dc38a6d1 wifi: mt76: mt7996: rely on mt76_connac_txp_skb_unmap 8688756305c6 wifi: mt76: mt7996: rely on mt76_connac_tx_complete_skb fbf986dbd4c0 wifi: mt76: mt7996: rely on mt76_connac2_mac_decode_he_radiotap adc556cbce37 wifi: mt76: mt7996: avoid mcu_restart function pointer 5eb4e2303be4 wifi: mt76: remove __mt76_mcu_restart macro e7a61c5f70f5 wifi: mt76: add EHT phy type b375845abc10 wifi: mt76: connac: add CMD_CBW_320MHZ 68b17a243332 wifi: mt76: connac: add helpers for EHT capability 02ec1f61b3a2 wifi: mt76: connac: add cmd id related to EHT support 9209294cd81b wifi: mt76: increase wcid size to 1088 5e85136c9b2f wifi: mt76: add EHT rate stats for ethtool a171f672fdeb wifi: mt76: mt7996: add variants support eda8fd62c105 wifi: mt76: mt7996: add helpers for wtbl and interface limit 4a5a9f4cdc3b wifi: mt76: mt7996: rework capability init 06b73c155680 wifi: mt76: mt7996: add EHT capability init ae71a1b8294f wifi: mt76: mt7996: add support for EHT rate report 65bdfae2991d wifi: mt76: mt7996: enable EHT support in firmware b2360d59747c wifi: mt76: mt7996: add EHT beamforming support Signed-off-by: Felix Fietkau --- diff --git a/package/kernel/mt76/Makefile b/package/kernel/mt76/Makefile index 6e852bc03f..03c59cf0c1 100644 --- a/package/kernel/mt76/Makefile +++ b/package/kernel/mt76/Makefile @@ -8,9 +8,9 @@ PKG_LICENSE_FILES:= PKG_SOURCE_URL:=https://github.com/openwrt/mt76 PKG_SOURCE_PROTO:=git -PKG_SOURCE_DATE:=2022-12-22 -PKG_SOURCE_VERSION:=5b509e80384ab019ac11aa90c81ec0dbb5b0d7f2 -PKG_MIRROR_HASH:=6fc25df4d28becd010ff4971b23731c08b53e69381a9e4c868091899712f78a9 +PKG_SOURCE_DATE:=2022-02-02 +PKG_SOURCE_VERSION:=b2360d59747c6fed2b65bc1c3563c10593c83f3e +PKG_MIRROR_HASH:=a89353a4e42c3b44c5a9a6a5f2248c5e985f91620b08cbee7b1cbd0dd53865d7 PKG_MAINTAINER:=Felix Fietkau PKG_USE_NINJA:=0 @@ -40,7 +40,8 @@ define KernelPackage/mt76-default SUBMENU:=Wireless Drivers DEPENDS:= \ +kmod-mac80211 \ - +@DRIVER_11AC_SUPPORT + +@DRIVER_11AC_SUPPORT \ + +@KERNEL_PAGE_POOL endef define KernelPackage/mt76 diff --git a/target/linux/generic/backport-5.15/729-01-v6.1-net-ethernet-mtk_wed-introduce-wed-mcu-support.patch b/target/linux/generic/backport-5.15/729-01-v6.1-net-ethernet-mtk_wed-introduce-wed-mcu-support.patch new file mode 100644 index 0000000000..c48613929d --- /dev/null +++ b/target/linux/generic/backport-5.15/729-01-v6.1-net-ethernet-mtk_wed-introduce-wed-mcu-support.patch @@ -0,0 +1,591 @@ +From: Sujuan Chen +Date: Sat, 5 Nov 2022 23:36:18 +0100 +Subject: [PATCH] net: ethernet: mtk_wed: introduce wed mcu support + +Introduce WED mcu support used to configure WED WO chip. +This is a preliminary patch in order to add RX Wireless +Ethernet Dispatch available on MT7986 SoC. + +Tested-by: Daniel Golle +Co-developed-by: Lorenzo Bianconi +Signed-off-by: Lorenzo Bianconi +Signed-off-by: Sujuan Chen +Signed-off-by: David S. Miller +--- + create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_mcu.c + create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_wo.h + +--- a/drivers/net/ethernet/mediatek/Makefile ++++ b/drivers/net/ethernet/mediatek/Makefile +@@ -5,7 +5,7 @@ + + obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o + mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o +-mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o ++mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o mtk_wed_mcu.o + ifdef CONFIG_DEBUG_FS + mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o + endif +--- /dev/null ++++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c +@@ -0,0 +1,359 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (C) 2022 MediaTek Inc. ++ * ++ * Author: Lorenzo Bianconi ++ * Sujuan Chen ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "mtk_wed_regs.h" ++#include "mtk_wed_wo.h" ++#include "mtk_wed.h" ++ ++static u32 wo_r32(struct mtk_wed_wo *wo, u32 reg) ++{ ++ return readl(wo->boot.addr + reg); ++} ++ ++static void wo_w32(struct mtk_wed_wo *wo, u32 reg, u32 val) ++{ ++ writel(val, wo->boot.addr + reg); ++} ++ ++static struct sk_buff * ++mtk_wed_mcu_msg_alloc(const void *data, int data_len) ++{ ++ int length = sizeof(struct mtk_wed_mcu_hdr) + data_len; ++ struct sk_buff *skb; ++ ++ skb = alloc_skb(length, GFP_KERNEL); ++ if (!skb) ++ return NULL; ++ ++ memset(skb->head, 0, length); ++ skb_reserve(skb, sizeof(struct mtk_wed_mcu_hdr)); ++ if (data && data_len) ++ skb_put_data(skb, data, data_len); ++ ++ return skb; ++} ++ ++static struct sk_buff * ++mtk_wed_mcu_get_response(struct mtk_wed_wo *wo, unsigned long expires) ++{ ++ if (!time_is_after_jiffies(expires)) ++ return NULL; ++ ++ wait_event_timeout(wo->mcu.wait, !skb_queue_empty(&wo->mcu.res_q), ++ expires - jiffies); ++ return skb_dequeue(&wo->mcu.res_q); ++} ++ ++void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo, struct sk_buff *skb) ++{ ++ skb_queue_tail(&wo->mcu.res_q, skb); ++ wake_up(&wo->mcu.wait); ++} ++ ++void mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo, ++ struct sk_buff *skb) ++{ ++ struct mtk_wed_mcu_hdr *hdr = (struct mtk_wed_mcu_hdr *)skb->data; ++ ++ switch (hdr->cmd) { ++ case MTK_WED_WO_EVT_LOG_DUMP: { ++ const char *msg = (const char *)(skb->data + sizeof(*hdr)); ++ ++ dev_notice(wo->hw->dev, "%s\n", msg); ++ break; ++ } ++ case MTK_WED_WO_EVT_PROFILING: { ++ struct mtk_wed_wo_log_info *info; ++ u32 count = (skb->len - sizeof(*hdr)) / sizeof(*info); ++ int i; ++ ++ info = (struct mtk_wed_wo_log_info *)(skb->data + sizeof(*hdr)); ++ for (i = 0 ; i < count ; i++) ++ dev_notice(wo->hw->dev, ++ "SN:%u latency: total=%u, rro:%u, mod:%u\n", ++ le32_to_cpu(info[i].sn), ++ le32_to_cpu(info[i].total), ++ le32_to_cpu(info[i].rro), ++ le32_to_cpu(info[i].mod)); ++ break; ++ } ++ case MTK_WED_WO_EVT_RXCNT_INFO: ++ break; ++ default: ++ break; ++ } ++ ++ dev_kfree_skb(skb); ++} ++ ++static int ++mtk_wed_mcu_skb_send_msg(struct mtk_wed_wo *wo, struct sk_buff *skb, ++ int id, int cmd, u16 *wait_seq, bool wait_resp) ++{ ++ struct mtk_wed_mcu_hdr *hdr; ++ ++ /* TODO: make it dynamic based on cmd */ ++ wo->mcu.timeout = 20 * HZ; ++ ++ hdr = (struct mtk_wed_mcu_hdr *)skb_push(skb, sizeof(*hdr)); ++ hdr->cmd = cmd; ++ hdr->length = cpu_to_le16(skb->len); ++ ++ if (wait_resp && wait_seq) { ++ u16 seq = ++wo->mcu.seq; ++ ++ if (!seq) ++ seq = ++wo->mcu.seq; ++ *wait_seq = seq; ++ ++ hdr->flag |= cpu_to_le16(MTK_WED_WARP_CMD_FLAG_NEED_RSP); ++ hdr->seq = cpu_to_le16(seq); ++ } ++ if (id == MTK_WED_MODULE_ID_WO) ++ hdr->flag |= cpu_to_le16(MTK_WED_WARP_CMD_FLAG_FROM_TO_WO); ++ ++ dev_kfree_skb(skb); ++ return 0; ++} ++ ++static int ++mtk_wed_mcu_parse_response(struct mtk_wed_wo *wo, struct sk_buff *skb, ++ int cmd, int seq) ++{ ++ struct mtk_wed_mcu_hdr *hdr; ++ ++ if (!skb) { ++ dev_err(wo->hw->dev, "Message %08x (seq %d) timeout\n", ++ cmd, seq); ++ return -ETIMEDOUT; ++ } ++ ++ hdr = (struct mtk_wed_mcu_hdr *)skb->data; ++ if (le16_to_cpu(hdr->seq) != seq) ++ return -EAGAIN; ++ ++ skb_pull(skb, sizeof(*hdr)); ++ switch (cmd) { ++ case MTK_WED_WO_CMD_RXCNT_INFO: ++ default: ++ break; ++ } ++ ++ return 0; ++} ++ ++int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo, int id, int cmd, ++ const void *data, int len, bool wait_resp) ++{ ++ unsigned long expires; ++ struct sk_buff *skb; ++ u16 seq; ++ int ret; ++ ++ skb = mtk_wed_mcu_msg_alloc(data, len); ++ if (!skb) ++ return -ENOMEM; ++ ++ mutex_lock(&wo->mcu.mutex); ++ ++ ret = mtk_wed_mcu_skb_send_msg(wo, skb, id, cmd, &seq, wait_resp); ++ if (ret || !wait_resp) ++ goto unlock; ++ ++ expires = jiffies + wo->mcu.timeout; ++ do { ++ skb = mtk_wed_mcu_get_response(wo, expires); ++ ret = mtk_wed_mcu_parse_response(wo, skb, cmd, seq); ++ dev_kfree_skb(skb); ++ } while (ret == -EAGAIN); ++ ++unlock: ++ mutex_unlock(&wo->mcu.mutex); ++ ++ return ret; ++} ++ ++static int ++mtk_wed_get_memory_region(struct mtk_wed_wo *wo, ++ struct mtk_wed_wo_memory_region *region) ++{ ++ struct reserved_mem *rmem; ++ struct device_node *np; ++ int index; ++ ++ index = of_property_match_string(wo->hw->node, "memory-region-names", ++ region->name); ++ if (index < 0) ++ return index; ++ ++ np = of_parse_phandle(wo->hw->node, "memory-region", index); ++ if (!np) ++ return -ENODEV; ++ ++ rmem = of_reserved_mem_lookup(np); ++ of_node_put(np); ++ ++ if (!rmem) ++ return -ENODEV; ++ ++ region->phy_addr = rmem->base; ++ region->size = rmem->size; ++ region->addr = devm_ioremap(wo->hw->dev, region->phy_addr, region->size); ++ ++ return !region->addr ? -EINVAL : 0; ++} ++ ++static int ++mtk_wed_mcu_run_firmware(struct mtk_wed_wo *wo, const struct firmware *fw, ++ struct mtk_wed_wo_memory_region *region) ++{ ++ const u8 *first_region_ptr, *region_ptr, *trailer_ptr, *ptr = fw->data; ++ const struct mtk_wed_fw_trailer *trailer; ++ const struct mtk_wed_fw_region *fw_region; ++ ++ trailer_ptr = fw->data + fw->size - sizeof(*trailer); ++ trailer = (const struct mtk_wed_fw_trailer *)trailer_ptr; ++ region_ptr = trailer_ptr - trailer->num_region * sizeof(*fw_region); ++ first_region_ptr = region_ptr; ++ ++ while (region_ptr < trailer_ptr) { ++ u32 length; ++ ++ fw_region = (const struct mtk_wed_fw_region *)region_ptr; ++ length = le32_to_cpu(fw_region->len); ++ ++ if (region->phy_addr != le32_to_cpu(fw_region->addr)) ++ goto next; ++ ++ if (region->size < length) ++ goto next; ++ ++ if (first_region_ptr < ptr + length) ++ goto next; ++ ++ if (region->shared && region->consumed) ++ return 0; ++ ++ if (!region->shared || !region->consumed) { ++ memcpy_toio(region->addr, ptr, length); ++ region->consumed = true; ++ return 0; ++ } ++next: ++ region_ptr += sizeof(*fw_region); ++ ptr += length; ++ } ++ ++ return -EINVAL; ++} ++ ++static int ++mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo) ++{ ++ static struct mtk_wed_wo_memory_region mem_region[] = { ++ [MTK_WED_WO_REGION_EMI] = { ++ .name = "wo-emi", ++ }, ++ [MTK_WED_WO_REGION_ILM] = { ++ .name = "wo-ilm", ++ }, ++ [MTK_WED_WO_REGION_DATA] = { ++ .name = "wo-data", ++ .shared = true, ++ }, ++ }; ++ const struct mtk_wed_fw_trailer *trailer; ++ const struct firmware *fw; ++ const char *fw_name; ++ u32 val, boot_cr; ++ int ret, i; ++ ++ /* load firmware region metadata */ ++ for (i = 0; i < ARRAY_SIZE(mem_region); i++) { ++ ret = mtk_wed_get_memory_region(wo, &mem_region[i]); ++ if (ret) ++ return ret; ++ } ++ ++ wo->boot.name = "wo-boot"; ++ ret = mtk_wed_get_memory_region(wo, &wo->boot); ++ if (ret) ++ return ret; ++ ++ /* set dummy cr */ ++ wed_w32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_FWDL, ++ wo->hw->index + 1); ++ ++ /* load firmware */ ++ fw_name = wo->hw->index ? MT7986_FIRMWARE_WO1 : MT7986_FIRMWARE_WO0; ++ ret = request_firmware(&fw, fw_name, wo->hw->dev); ++ if (ret) ++ return ret; ++ ++ trailer = (void *)(fw->data + fw->size - ++ sizeof(struct mtk_wed_fw_trailer)); ++ dev_info(wo->hw->dev, ++ "MTK WED WO Firmware Version: %.10s, Build Time: %.15s\n", ++ trailer->fw_ver, trailer->build_date); ++ dev_info(wo->hw->dev, "MTK WED WO Chip ID %02x Region %d\n", ++ trailer->chip_id, trailer->num_region); ++ ++ for (i = 0; i < ARRAY_SIZE(mem_region); i++) { ++ ret = mtk_wed_mcu_run_firmware(wo, fw, &mem_region[i]); ++ if (ret) ++ goto out; ++ } ++ ++ /* set the start address */ ++ boot_cr = wo->hw->index ? MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR ++ : MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR; ++ wo_w32(wo, boot_cr, mem_region[MTK_WED_WO_REGION_EMI].phy_addr >> 16); ++ /* wo firmware reset */ ++ wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCCR_CLR_ADDR, 0xc00); ++ ++ val = wo_r32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR); ++ val |= wo->hw->index ? MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK ++ : MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK; ++ wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val); ++out: ++ release_firmware(fw); ++ ++ return ret; ++} ++ ++static u32 ++mtk_wed_mcu_read_fw_dl(struct mtk_wed_wo *wo) ++{ ++ return wed_r32(wo->hw->wed_dev, ++ MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_FWDL); ++} ++ ++int mtk_wed_mcu_init(struct mtk_wed_wo *wo) ++{ ++ u32 val; ++ int ret; ++ ++ skb_queue_head_init(&wo->mcu.res_q); ++ init_waitqueue_head(&wo->mcu.wait); ++ mutex_init(&wo->mcu.mutex); ++ ++ ret = mtk_wed_mcu_load_firmware(wo); ++ if (ret) ++ return ret; ++ ++ return readx_poll_timeout(mtk_wed_mcu_read_fw_dl, wo, val, !val, ++ 100, MTK_FW_DL_TIMEOUT); ++} ++ ++MODULE_FIRMWARE(MT7986_FIRMWARE_WO0); ++MODULE_FIRMWARE(MT7986_FIRMWARE_WO1); +--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h ++++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h +@@ -152,6 +152,7 @@ struct mtk_wdma_desc { + + #define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10) + ++#define MTK_WED_SCR0 0x3c0 + #define MTK_WED_WPDMA_INT_TRIGGER 0x504 + #define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1) + #define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4) +--- /dev/null ++++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h +@@ -0,0 +1,150 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++/* Copyright (C) 2022 Lorenzo Bianconi */ ++ ++#ifndef __MTK_WED_WO_H ++#define __MTK_WED_WO_H ++ ++#include ++#include ++ ++struct mtk_wed_hw; ++ ++struct mtk_wed_mcu_hdr { ++ /* DW0 */ ++ u8 version; ++ u8 cmd; ++ __le16 length; ++ ++ /* DW1 */ ++ __le16 seq; ++ __le16 flag; ++ ++ /* DW2 */ ++ __le32 status; ++ ++ /* DW3 */ ++ u8 rsv[20]; ++}; ++ ++struct mtk_wed_wo_log_info { ++ __le32 sn; ++ __le32 total; ++ __le32 rro; ++ __le32 mod; ++}; ++ ++enum mtk_wed_wo_event { ++ MTK_WED_WO_EVT_LOG_DUMP = 0x1, ++ MTK_WED_WO_EVT_PROFILING = 0x2, ++ MTK_WED_WO_EVT_RXCNT_INFO = 0x3, ++}; ++ ++#define MTK_WED_MODULE_ID_WO 1 ++#define MTK_FW_DL_TIMEOUT 4000000 /* us */ ++#define MTK_WOCPU_TIMEOUT 2000000 /* us */ ++ ++enum { ++ MTK_WED_WARP_CMD_FLAG_RSP = BIT(0), ++ MTK_WED_WARP_CMD_FLAG_NEED_RSP = BIT(1), ++ MTK_WED_WARP_CMD_FLAG_FROM_TO_WO = BIT(2), ++}; ++ ++enum { ++ MTK_WED_WO_REGION_EMI, ++ MTK_WED_WO_REGION_ILM, ++ MTK_WED_WO_REGION_DATA, ++ MTK_WED_WO_REGION_BOOT, ++ __MTK_WED_WO_REGION_MAX, ++}; ++ ++enum mtk_wed_dummy_cr_idx { ++ MTK_WED_DUMMY_CR_FWDL, ++ MTK_WED_DUMMY_CR_WO_STATUS, ++}; ++ ++#define MT7986_FIRMWARE_WO0 "mediatek/mt7986_wo_0.bin" ++#define MT7986_FIRMWARE_WO1 "mediatek/mt7986_wo_1.bin" ++ ++#define MTK_WO_MCU_CFG_LS_BASE 0 ++#define MTK_WO_MCU_CFG_LS_HW_VER_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x000) ++#define MTK_WO_MCU_CFG_LS_FW_VER_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x004) ++#define MTK_WO_MCU_CFG_LS_CFG_DBG1_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x00c) ++#define MTK_WO_MCU_CFG_LS_CFG_DBG2_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x010) ++#define MTK_WO_MCU_CFG_LS_WF_MCCR_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x014) ++#define MTK_WO_MCU_CFG_LS_WF_MCCR_SET_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x018) ++#define MTK_WO_MCU_CFG_LS_WF_MCCR_CLR_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x01c) ++#define MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x050) ++#define MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x060) ++#define MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x064) ++ ++#define MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK BIT(5) ++#define MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK BIT(0) ++ ++struct mtk_wed_wo_memory_region { ++ const char *name; ++ void __iomem *addr; ++ phys_addr_t phy_addr; ++ u32 size; ++ bool shared:1; ++ bool consumed:1; ++}; ++ ++struct mtk_wed_fw_region { ++ __le32 decomp_crc; ++ __le32 decomp_len; ++ __le32 decomp_blk_sz; ++ u8 rsv0[4]; ++ __le32 addr; ++ __le32 len; ++ u8 feature_set; ++ u8 rsv1[15]; ++} __packed; ++ ++struct mtk_wed_fw_trailer { ++ u8 chip_id; ++ u8 eco_code; ++ u8 num_region; ++ u8 format_ver; ++ u8 format_flag; ++ u8 rsv[2]; ++ char fw_ver[10]; ++ char build_date[15]; ++ u32 crc; ++}; ++ ++struct mtk_wed_wo { ++ struct mtk_wed_hw *hw; ++ struct mtk_wed_wo_memory_region boot; ++ ++ struct { ++ struct mutex mutex; ++ int timeout; ++ u16 seq; ++ ++ struct sk_buff_head res_q; ++ wait_queue_head_t wait; ++ } mcu; ++}; ++ ++static inline int ++mtk_wed_mcu_check_msg(struct mtk_wed_wo *wo, struct sk_buff *skb) ++{ ++ struct mtk_wed_mcu_hdr *hdr = (struct mtk_wed_mcu_hdr *)skb->data; ++ ++ if (hdr->version) ++ return -EINVAL; ++ ++ if (skb->len < sizeof(*hdr) || skb->len != le16_to_cpu(hdr->length)) ++ return -EINVAL; ++ ++ return 0; ++} ++ ++void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo, struct sk_buff *skb); ++void mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo, ++ struct sk_buff *skb); ++int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo, int id, int cmd, ++ const void *data, int len, bool wait_resp); ++int mtk_wed_mcu_init(struct mtk_wed_wo *wo); ++ ++#endif /* __MTK_WED_WO_H */ +--- a/include/linux/soc/mediatek/mtk_wed.h ++++ b/include/linux/soc/mediatek/mtk_wed.h +@@ -11,6 +11,35 @@ + struct mtk_wed_hw; + struct mtk_wdma_desc; + ++enum mtk_wed_wo_cmd { ++ MTK_WED_WO_CMD_WED_CFG, ++ MTK_WED_WO_CMD_WED_RX_STAT, ++ MTK_WED_WO_CMD_RRO_SER, ++ MTK_WED_WO_CMD_DBG_INFO, ++ MTK_WED_WO_CMD_DEV_INFO, ++ MTK_WED_WO_CMD_BSS_INFO, ++ MTK_WED_WO_CMD_STA_REC, ++ MTK_WED_WO_CMD_DEV_INFO_DUMP, ++ MTK_WED_WO_CMD_BSS_INFO_DUMP, ++ MTK_WED_WO_CMD_STA_REC_DUMP, ++ MTK_WED_WO_CMD_BA_INFO_DUMP, ++ MTK_WED_WO_CMD_FBCMD_Q_DUMP, ++ MTK_WED_WO_CMD_FW_LOG_CTRL, ++ MTK_WED_WO_CMD_LOG_FLUSH, ++ MTK_WED_WO_CMD_CHANGE_STATE, ++ MTK_WED_WO_CMD_CPU_STATS_ENABLE, ++ MTK_WED_WO_CMD_CPU_STATS_DUMP, ++ MTK_WED_WO_CMD_EXCEPTION_INIT, ++ MTK_WED_WO_CMD_PROF_CTRL, ++ MTK_WED_WO_CMD_STA_BA_DUMP, ++ MTK_WED_WO_CMD_BA_CTRL_DUMP, ++ MTK_WED_WO_CMD_RXCNT_CTRL, ++ MTK_WED_WO_CMD_RXCNT_INFO, ++ MTK_WED_WO_CMD_SET_CAP, ++ MTK_WED_WO_CMD_CCIF_RING_DUMP, ++ MTK_WED_WO_CMD_WED_END ++}; ++ + enum mtk_wed_bus_tye { + MTK_WED_BUS_PCIE, + MTK_WED_BUS_AXI, diff --git a/target/linux/generic/backport-5.15/729-02-v6.1-net-ethernet-mtk_wed-introduce-wed-wo-support.patch b/target/linux/generic/backport-5.15/729-02-v6.1-net-ethernet-mtk_wed-introduce-wed-wo-support.patch new file mode 100644 index 0000000000..dbd7e30fbb --- /dev/null +++ b/target/linux/generic/backport-5.15/729-02-v6.1-net-ethernet-mtk_wed-introduce-wed-wo-support.patch @@ -0,0 +1,737 @@ +From: Lorenzo Bianconi +Date: Sat, 5 Nov 2022 23:36:19 +0100 +Subject: [PATCH] net: ethernet: mtk_wed: introduce wed wo support + +Introduce WO chip support to mtk wed driver. MTK WED WO is used to +implement RX Wireless Ethernet Dispatch and offload traffic received by +wlan nic to the wired interface. + +Tested-by: Daniel Golle +Co-developed-by: Sujuan Chen +Signed-off-by: Sujuan Chen +Signed-off-by: Lorenzo Bianconi +Signed-off-by: David S. Miller +--- + create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_wo.c + +--- a/drivers/net/ethernet/mediatek/Makefile ++++ b/drivers/net/ethernet/mediatek/Makefile +@@ -5,7 +5,7 @@ + + obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o + mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o +-mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o mtk_wed_mcu.o ++mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o mtk_wed_mcu.o mtk_wed_wo.o + ifdef CONFIG_DEBUG_FS + mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o + endif +--- a/drivers/net/ethernet/mediatek/mtk_wed.c ++++ b/drivers/net/ethernet/mediatek/mtk_wed.c +@@ -16,6 +16,7 @@ + #include "mtk_wed_regs.h" + #include "mtk_wed.h" + #include "mtk_ppe.h" ++#include "mtk_wed_wo.h" + + #define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000) + +@@ -355,6 +356,8 @@ mtk_wed_detach(struct mtk_wed_device *de + + mtk_wed_free_buffer(dev); + mtk_wed_free_tx_rings(dev); ++ if (hw->version != 1) ++ mtk_wed_wo_deinit(hw); + + if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) { + struct device_node *wlan_node; +@@ -885,9 +888,11 @@ mtk_wed_attach(struct mtk_wed_device *de + } + + mtk_wed_hw_init_early(dev); +- if (hw->hifsys) ++ if (hw->version == 1) + regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, + BIT(hw->index), 0); ++ else ++ ret = mtk_wed_wo_init(hw); + + out: + mutex_unlock(&hw_lock); +--- a/drivers/net/ethernet/mediatek/mtk_wed.h ++++ b/drivers/net/ethernet/mediatek/mtk_wed.h +@@ -10,6 +10,7 @@ + #include + + struct mtk_eth; ++struct mtk_wed_wo; + + struct mtk_wed_hw { + struct device_node *node; +@@ -22,6 +23,7 @@ struct mtk_wed_hw { + struct regmap *mirror; + struct dentry *debugfs_dir; + struct mtk_wed_device *wed_dev; ++ struct mtk_wed_wo *wed_wo; + u32 debugfs_reg; + u32 num_flows; + u8 version; +--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c ++++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c +@@ -122,8 +122,7 @@ mtk_wed_mcu_skb_send_msg(struct mtk_wed_ + if (id == MTK_WED_MODULE_ID_WO) + hdr->flag |= cpu_to_le16(MTK_WED_WARP_CMD_FLAG_FROM_TO_WO); + +- dev_kfree_skb(skb); +- return 0; ++ return mtk_wed_wo_queue_tx_skb(wo, &wo->q_tx, skb); + } + + static int +--- /dev/null ++++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c +@@ -0,0 +1,508 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (C) 2022 MediaTek Inc. ++ * ++ * Author: Lorenzo Bianconi ++ * Sujuan Chen ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "mtk_wed.h" ++#include "mtk_wed_regs.h" ++#include "mtk_wed_wo.h" ++ ++static u32 ++mtk_wed_mmio_r32(struct mtk_wed_wo *wo, u32 reg) ++{ ++ u32 val; ++ ++ if (regmap_read(wo->mmio.regs, reg, &val)) ++ val = ~0; ++ ++ return val; ++} ++ ++static void ++mtk_wed_mmio_w32(struct mtk_wed_wo *wo, u32 reg, u32 val) ++{ ++ regmap_write(wo->mmio.regs, reg, val); ++} ++ ++static u32 ++mtk_wed_wo_get_isr(struct mtk_wed_wo *wo) ++{ ++ u32 val = mtk_wed_mmio_r32(wo, MTK_WED_WO_CCIF_RCHNUM); ++ ++ return val & MTK_WED_WO_CCIF_RCHNUM_MASK; ++} ++ ++static void ++mtk_wed_wo_set_isr(struct mtk_wed_wo *wo, u32 mask) ++{ ++ mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, mask); ++} ++ ++static void ++mtk_wed_wo_set_ack(struct mtk_wed_wo *wo, u32 mask) ++{ ++ mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_ACK, mask); ++} ++ ++static void ++mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, u32 mask, u32 val, bool set) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&wo->mmio.lock, flags); ++ wo->mmio.irq_mask &= ~mask; ++ wo->mmio.irq_mask |= val; ++ if (set) ++ mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask); ++ spin_unlock_irqrestore(&wo->mmio.lock, flags); ++} ++ ++static void ++mtk_wed_wo_irq_enable(struct mtk_wed_wo *wo, u32 mask) ++{ ++ mtk_wed_wo_set_isr_mask(wo, 0, mask, false); ++ tasklet_schedule(&wo->mmio.irq_tasklet); ++} ++ ++static void ++mtk_wed_wo_irq_disable(struct mtk_wed_wo *wo, u32 mask) ++{ ++ mtk_wed_wo_set_isr_mask(wo, mask, 0, true); ++} ++ ++static void ++mtk_wed_wo_kickout(struct mtk_wed_wo *wo) ++{ ++ mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_BUSY, 1 << MTK_WED_WO_TXCH_NUM); ++ mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM); ++} ++ ++static void ++mtk_wed_wo_queue_kick(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, ++ u32 val) ++{ ++ wmb(); ++ mtk_wed_mmio_w32(wo, q->regs.cpu_idx, val); ++} ++ ++static void * ++mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len, ++ bool flush) ++{ ++ int buf_len = SKB_WITH_OVERHEAD(q->buf_size); ++ int index = (q->tail + 1) % q->n_desc; ++ struct mtk_wed_wo_queue_entry *entry; ++ struct mtk_wed_wo_queue_desc *desc; ++ void *buf; ++ ++ if (!q->queued) ++ return NULL; ++ ++ if (flush) ++ q->desc[index].ctrl |= cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE); ++ else if (!(q->desc[index].ctrl & cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE))) ++ return NULL; ++ ++ q->tail = index; ++ q->queued--; ++ ++ desc = &q->desc[index]; ++ entry = &q->entry[index]; ++ buf = entry->buf; ++ if (len) ++ *len = FIELD_GET(MTK_WED_WO_CTL_SD_LEN0, ++ le32_to_cpu(READ_ONCE(desc->ctrl))); ++ if (buf) ++ dma_unmap_single(wo->hw->dev, entry->addr, buf_len, ++ DMA_FROM_DEVICE); ++ entry->buf = NULL; ++ ++ return buf; ++} ++ ++static int ++mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, ++ gfp_t gfp, bool rx) ++{ ++ enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE; ++ int n_buf = 0; ++ ++ spin_lock_bh(&q->lock); ++ while (q->queued < q->n_desc) { ++ void *buf = page_frag_alloc(&q->cache, q->buf_size, gfp); ++ struct mtk_wed_wo_queue_entry *entry; ++ dma_addr_t addr; ++ ++ if (!buf) ++ break; ++ ++ addr = dma_map_single(wo->hw->dev, buf, q->buf_size, dir); ++ if (unlikely(dma_mapping_error(wo->hw->dev, addr))) { ++ skb_free_frag(buf); ++ break; ++ } ++ ++ q->head = (q->head + 1) % q->n_desc; ++ entry = &q->entry[q->head]; ++ entry->addr = addr; ++ entry->len = q->buf_size; ++ q->entry[q->head].buf = buf; ++ ++ if (rx) { ++ struct mtk_wed_wo_queue_desc *desc = &q->desc[q->head]; ++ u32 ctrl = MTK_WED_WO_CTL_LAST_SEC0 | ++ FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0, ++ entry->len); ++ ++ WRITE_ONCE(desc->buf0, cpu_to_le32(addr)); ++ WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); ++ } ++ q->queued++; ++ n_buf++; ++ } ++ spin_unlock_bh(&q->lock); ++ ++ return n_buf; ++} ++ ++static void ++mtk_wed_wo_rx_complete(struct mtk_wed_wo *wo) ++{ ++ mtk_wed_wo_set_ack(wo, MTK_WED_WO_RXCH_INT_MASK); ++ mtk_wed_wo_irq_enable(wo, MTK_WED_WO_RXCH_INT_MASK); ++} ++ ++static void ++mtk_wed_wo_rx_run_queue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) ++{ ++ for (;;) { ++ struct mtk_wed_mcu_hdr *hdr; ++ struct sk_buff *skb; ++ void *data; ++ u32 len; ++ ++ data = mtk_wed_wo_dequeue(wo, q, &len, false); ++ if (!data) ++ break; ++ ++ skb = build_skb(data, q->buf_size); ++ if (!skb) { ++ skb_free_frag(data); ++ continue; ++ } ++ ++ __skb_put(skb, len); ++ if (mtk_wed_mcu_check_msg(wo, skb)) { ++ dev_kfree_skb(skb); ++ continue; ++ } ++ ++ hdr = (struct mtk_wed_mcu_hdr *)skb->data; ++ if (hdr->flag & cpu_to_le16(MTK_WED_WARP_CMD_FLAG_RSP)) ++ mtk_wed_mcu_rx_event(wo, skb); ++ else ++ mtk_wed_mcu_rx_unsolicited_event(wo, skb); ++ } ++ ++ if (mtk_wed_wo_queue_refill(wo, q, GFP_ATOMIC, true)) { ++ u32 index = (q->head - 1) % q->n_desc; ++ ++ mtk_wed_wo_queue_kick(wo, q, index); ++ } ++} ++ ++static irqreturn_t ++mtk_wed_wo_irq_handler(int irq, void *data) ++{ ++ struct mtk_wed_wo *wo = data; ++ ++ mtk_wed_wo_set_isr(wo, 0); ++ tasklet_schedule(&wo->mmio.irq_tasklet); ++ ++ return IRQ_HANDLED; ++} ++ ++static void mtk_wed_wo_irq_tasklet(struct tasklet_struct *t) ++{ ++ struct mtk_wed_wo *wo = from_tasklet(wo, t, mmio.irq_tasklet); ++ u32 intr, mask; ++ ++ /* disable interrupts */ ++ mtk_wed_wo_set_isr(wo, 0); ++ ++ intr = mtk_wed_wo_get_isr(wo); ++ intr &= wo->mmio.irq_mask; ++ mask = intr & (MTK_WED_WO_RXCH_INT_MASK | MTK_WED_WO_EXCEPTION_INT_MASK); ++ mtk_wed_wo_irq_disable(wo, mask); ++ ++ if (intr & MTK_WED_WO_RXCH_INT_MASK) { ++ mtk_wed_wo_rx_run_queue(wo, &wo->q_rx); ++ mtk_wed_wo_rx_complete(wo); ++ } ++} ++ ++/* mtk wed wo hw queues */ ++ ++static int ++mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, ++ int n_desc, int buf_size, int index, ++ struct mtk_wed_wo_queue_regs *regs) ++{ ++ spin_lock_init(&q->lock); ++ q->regs = *regs; ++ q->n_desc = n_desc; ++ q->buf_size = buf_size; ++ ++ q->desc = dmam_alloc_coherent(wo->hw->dev, n_desc * sizeof(*q->desc), ++ &q->desc_dma, GFP_KERNEL); ++ if (!q->desc) ++ return -ENOMEM; ++ ++ q->entry = devm_kzalloc(wo->hw->dev, n_desc * sizeof(*q->entry), ++ GFP_KERNEL); ++ if (!q->entry) ++ return -ENOMEM; ++ ++ return 0; ++} ++ ++static void ++mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) ++{ ++ mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0); ++ dma_free_coherent(wo->hw->dev, q->n_desc * sizeof(*q->desc), q->desc, ++ q->desc_dma); ++} ++ ++static void ++mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) ++{ ++ struct page *page; ++ int i; ++ ++ spin_lock_bh(&q->lock); ++ for (i = 0; i < q->n_desc; i++) { ++ struct mtk_wed_wo_queue_entry *entry = &q->entry[i]; ++ ++ dma_unmap_single(wo->hw->dev, entry->addr, entry->len, ++ DMA_TO_DEVICE); ++ skb_free_frag(entry->buf); ++ entry->buf = NULL; ++ } ++ spin_unlock_bh(&q->lock); ++ ++ if (!q->cache.va) ++ return; ++ ++ page = virt_to_page(q->cache.va); ++ __page_frag_cache_drain(page, q->cache.pagecnt_bias); ++ memset(&q->cache, 0, sizeof(q->cache)); ++} ++ ++static void ++mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) ++{ ++ struct page *page; ++ ++ spin_lock_bh(&q->lock); ++ for (;;) { ++ void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true); ++ ++ if (!buf) ++ break; ++ ++ skb_free_frag(buf); ++ } ++ spin_unlock_bh(&q->lock); ++ ++ if (!q->cache.va) ++ return; ++ ++ page = virt_to_page(q->cache.va); ++ __page_frag_cache_drain(page, q->cache.pagecnt_bias); ++ memset(&q->cache, 0, sizeof(q->cache)); ++} ++ ++static void ++mtk_wed_wo_queue_reset(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) ++{ ++ mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0); ++ mtk_wed_mmio_w32(wo, q->regs.desc_base, q->desc_dma); ++ mtk_wed_mmio_w32(wo, q->regs.ring_size, q->n_desc); ++} ++ ++int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, ++ struct sk_buff *skb) ++{ ++ struct mtk_wed_wo_queue_entry *entry; ++ struct mtk_wed_wo_queue_desc *desc; ++ int ret = 0, index; ++ u32 ctrl; ++ ++ spin_lock_bh(&q->lock); ++ ++ q->tail = mtk_wed_mmio_r32(wo, q->regs.dma_idx); ++ index = (q->head + 1) % q->n_desc; ++ if (q->tail == index) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ entry = &q->entry[index]; ++ if (skb->len > entry->len) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ desc = &q->desc[index]; ++ q->head = index; ++ ++ dma_sync_single_for_cpu(wo->hw->dev, entry->addr, skb->len, ++ DMA_TO_DEVICE); ++ memcpy(entry->buf, skb->data, skb->len); ++ dma_sync_single_for_device(wo->hw->dev, entry->addr, skb->len, ++ DMA_TO_DEVICE); ++ ++ ctrl = FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0, skb->len) | ++ MTK_WED_WO_CTL_LAST_SEC0 | MTK_WED_WO_CTL_DMA_DONE; ++ WRITE_ONCE(desc->buf0, cpu_to_le32(entry->addr)); ++ WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); ++ ++ mtk_wed_wo_queue_kick(wo, q, q->head); ++ mtk_wed_wo_kickout(wo); ++out: ++ spin_unlock_bh(&q->lock); ++ ++ dev_kfree_skb(skb); ++ ++ return ret; ++} ++ ++static int ++mtk_wed_wo_exception_init(struct mtk_wed_wo *wo) ++{ ++ return 0; ++} ++ ++static int ++mtk_wed_wo_hardware_init(struct mtk_wed_wo *wo) ++{ ++ struct mtk_wed_wo_queue_regs regs; ++ struct device_node *np; ++ int ret; ++ ++ np = of_parse_phandle(wo->hw->node, "mediatek,wo-ccif", 0); ++ if (!np) ++ return -ENODEV; ++ ++ wo->mmio.regs = syscon_regmap_lookup_by_phandle(np, NULL); ++ if (IS_ERR_OR_NULL(wo->mmio.regs)) ++ return PTR_ERR(wo->mmio.regs); ++ ++ wo->mmio.irq = irq_of_parse_and_map(np, 0); ++ wo->mmio.irq_mask = MTK_WED_WO_ALL_INT_MASK; ++ spin_lock_init(&wo->mmio.lock); ++ tasklet_setup(&wo->mmio.irq_tasklet, mtk_wed_wo_irq_tasklet); ++ ++ ret = devm_request_irq(wo->hw->dev, wo->mmio.irq, ++ mtk_wed_wo_irq_handler, IRQF_TRIGGER_HIGH, ++ KBUILD_MODNAME, wo); ++ if (ret) ++ goto error; ++ ++ regs.desc_base = MTK_WED_WO_CCIF_DUMMY1; ++ regs.ring_size = MTK_WED_WO_CCIF_DUMMY2; ++ regs.dma_idx = MTK_WED_WO_CCIF_SHADOW4; ++ regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY3; ++ ++ ret = mtk_wed_wo_queue_alloc(wo, &wo->q_tx, MTK_WED_WO_RING_SIZE, ++ MTK_WED_WO_CMD_LEN, MTK_WED_WO_TXCH_NUM, ++ ®s); ++ if (ret) ++ goto error; ++ ++ mtk_wed_wo_queue_refill(wo, &wo->q_tx, GFP_KERNEL, false); ++ mtk_wed_wo_queue_reset(wo, &wo->q_tx); ++ ++ regs.desc_base = MTK_WED_WO_CCIF_DUMMY5; ++ regs.ring_size = MTK_WED_WO_CCIF_DUMMY6; ++ regs.dma_idx = MTK_WED_WO_CCIF_SHADOW8; ++ regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY7; ++ ++ ret = mtk_wed_wo_queue_alloc(wo, &wo->q_rx, MTK_WED_WO_RING_SIZE, ++ MTK_WED_WO_CMD_LEN, MTK_WED_WO_RXCH_NUM, ++ ®s); ++ if (ret) ++ goto error; ++ ++ mtk_wed_wo_queue_refill(wo, &wo->q_rx, GFP_KERNEL, true); ++ mtk_wed_wo_queue_reset(wo, &wo->q_rx); ++ ++ /* rx queue irqmask */ ++ mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask); ++ ++ return 0; ++ ++error: ++ devm_free_irq(wo->hw->dev, wo->mmio.irq, wo); ++ ++ return ret; ++} ++ ++static void ++mtk_wed_wo_hw_deinit(struct mtk_wed_wo *wo) ++{ ++ /* disable interrupts */ ++ mtk_wed_wo_set_isr(wo, 0); ++ ++ tasklet_disable(&wo->mmio.irq_tasklet); ++ ++ disable_irq(wo->mmio.irq); ++ devm_free_irq(wo->hw->dev, wo->mmio.irq, wo); ++ ++ mtk_wed_wo_queue_tx_clean(wo, &wo->q_tx); ++ mtk_wed_wo_queue_rx_clean(wo, &wo->q_rx); ++ mtk_wed_wo_queue_free(wo, &wo->q_tx); ++ mtk_wed_wo_queue_free(wo, &wo->q_rx); ++} ++ ++int mtk_wed_wo_init(struct mtk_wed_hw *hw) ++{ ++ struct mtk_wed_wo *wo; ++ int ret; ++ ++ wo = devm_kzalloc(hw->dev, sizeof(*wo), GFP_KERNEL); ++ if (!wo) ++ return -ENOMEM; ++ ++ hw->wed_wo = wo; ++ wo->hw = hw; ++ ++ ret = mtk_wed_wo_hardware_init(wo); ++ if (ret) ++ return ret; ++ ++ ret = mtk_wed_mcu_init(wo); ++ if (ret) ++ return ret; ++ ++ return mtk_wed_wo_exception_init(wo); ++} ++ ++void mtk_wed_wo_deinit(struct mtk_wed_hw *hw) ++{ ++ struct mtk_wed_wo *wo = hw->wed_wo; ++ ++ mtk_wed_wo_hw_deinit(wo); ++} +--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h ++++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h +@@ -80,6 +80,54 @@ enum mtk_wed_dummy_cr_idx { + #define MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK BIT(5) + #define MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK BIT(0) + ++#define MTK_WED_WO_RING_SIZE 256 ++#define MTK_WED_WO_CMD_LEN 1504 ++ ++#define MTK_WED_WO_TXCH_NUM 0 ++#define MTK_WED_WO_RXCH_NUM 1 ++#define MTK_WED_WO_RXCH_WO_EXCEPTION 7 ++ ++#define MTK_WED_WO_TXCH_INT_MASK BIT(0) ++#define MTK_WED_WO_RXCH_INT_MASK BIT(1) ++#define MTK_WED_WO_EXCEPTION_INT_MASK BIT(7) ++#define MTK_WED_WO_ALL_INT_MASK (MTK_WED_WO_RXCH_INT_MASK | \ ++ MTK_WED_WO_EXCEPTION_INT_MASK) ++ ++#define MTK_WED_WO_CCIF_BUSY 0x004 ++#define MTK_WED_WO_CCIF_START 0x008 ++#define MTK_WED_WO_CCIF_TCHNUM 0x00c ++#define MTK_WED_WO_CCIF_RCHNUM 0x010 ++#define MTK_WED_WO_CCIF_RCHNUM_MASK GENMASK(7, 0) ++ ++#define MTK_WED_WO_CCIF_ACK 0x014 ++#define MTK_WED_WO_CCIF_IRQ0_MASK 0x018 ++#define MTK_WED_WO_CCIF_IRQ1_MASK 0x01c ++#define MTK_WED_WO_CCIF_DUMMY1 0x020 ++#define MTK_WED_WO_CCIF_DUMMY2 0x024 ++#define MTK_WED_WO_CCIF_DUMMY3 0x028 ++#define MTK_WED_WO_CCIF_DUMMY4 0x02c ++#define MTK_WED_WO_CCIF_SHADOW1 0x030 ++#define MTK_WED_WO_CCIF_SHADOW2 0x034 ++#define MTK_WED_WO_CCIF_SHADOW3 0x038 ++#define MTK_WED_WO_CCIF_SHADOW4 0x03c ++#define MTK_WED_WO_CCIF_DUMMY5 0x050 ++#define MTK_WED_WO_CCIF_DUMMY6 0x054 ++#define MTK_WED_WO_CCIF_DUMMY7 0x058 ++#define MTK_WED_WO_CCIF_DUMMY8 0x05c ++#define MTK_WED_WO_CCIF_SHADOW5 0x060 ++#define MTK_WED_WO_CCIF_SHADOW6 0x064 ++#define MTK_WED_WO_CCIF_SHADOW7 0x068 ++#define MTK_WED_WO_CCIF_SHADOW8 0x06c ++ ++#define MTK_WED_WO_CTL_SD_LEN1 GENMASK(13, 0) ++#define MTK_WED_WO_CTL_LAST_SEC1 BIT(14) ++#define MTK_WED_WO_CTL_BURST BIT(15) ++#define MTK_WED_WO_CTL_SD_LEN0_SHIFT 16 ++#define MTK_WED_WO_CTL_SD_LEN0 GENMASK(29, 16) ++#define MTK_WED_WO_CTL_LAST_SEC0 BIT(30) ++#define MTK_WED_WO_CTL_DMA_DONE BIT(31) ++#define MTK_WED_WO_INFO_WINFO GENMASK(15, 0) ++ + struct mtk_wed_wo_memory_region { + const char *name; + void __iomem *addr; +@@ -112,10 +160,53 @@ struct mtk_wed_fw_trailer { + u32 crc; + }; + ++struct mtk_wed_wo_queue_regs { ++ u32 desc_base; ++ u32 ring_size; ++ u32 cpu_idx; ++ u32 dma_idx; ++}; ++ ++struct mtk_wed_wo_queue_desc { ++ __le32 buf0; ++ __le32 ctrl; ++ __le32 buf1; ++ __le32 info; ++ __le32 reserved[4]; ++} __packed __aligned(32); ++ ++struct mtk_wed_wo_queue_entry { ++ dma_addr_t addr; ++ void *buf; ++ u32 len; ++}; ++ ++struct mtk_wed_wo_queue { ++ struct mtk_wed_wo_queue_regs regs; ++ ++ struct page_frag_cache cache; ++ spinlock_t lock; ++ ++ struct mtk_wed_wo_queue_desc *desc; ++ dma_addr_t desc_dma; ++ ++ struct mtk_wed_wo_queue_entry *entry; ++ ++ u16 head; ++ u16 tail; ++ int n_desc; ++ int queued; ++ int buf_size; ++ ++}; ++ + struct mtk_wed_wo { + struct mtk_wed_hw *hw; + struct mtk_wed_wo_memory_region boot; + ++ struct mtk_wed_wo_queue q_tx; ++ struct mtk_wed_wo_queue q_rx; ++ + struct { + struct mutex mutex; + int timeout; +@@ -124,6 +215,15 @@ struct mtk_wed_wo { + struct sk_buff_head res_q; + wait_queue_head_t wait; + } mcu; ++ ++ struct { ++ struct regmap *regs; ++ ++ spinlock_t lock; ++ struct tasklet_struct irq_tasklet; ++ int irq; ++ u32 irq_mask; ++ } mmio; + }; + + static inline int +@@ -146,5 +246,9 @@ void mtk_wed_mcu_rx_unsolicited_event(st + int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo, int id, int cmd, + const void *data, int len, bool wait_resp); + int mtk_wed_mcu_init(struct mtk_wed_wo *wo); ++int mtk_wed_wo_init(struct mtk_wed_hw *hw); ++void mtk_wed_wo_deinit(struct mtk_wed_hw *hw); ++int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *dev, struct mtk_wed_wo_queue *q, ++ struct sk_buff *skb); + + #endif /* __MTK_WED_WO_H */ diff --git a/target/linux/generic/backport-5.15/729-03-v6.1-net-ethernet-mtk_wed-rename-tx_wdma-array-in-rx_wdma.patch b/target/linux/generic/backport-5.15/729-03-v6.1-net-ethernet-mtk_wed-rename-tx_wdma-array-in-rx_wdma.patch new file mode 100644 index 0000000000..ffd6bc3589 --- /dev/null +++ b/target/linux/generic/backport-5.15/729-03-v6.1-net-ethernet-mtk_wed-rename-tx_wdma-array-in-rx_wdma.patch @@ -0,0 +1,79 @@ +From: Lorenzo Bianconi +Date: Sat, 5 Nov 2022 23:36:20 +0100 +Subject: [PATCH] net: ethernet: mtk_wed: rename tx_wdma array in rx_wdma + +Rename tx_wdma queue array in rx_wdma since this is rx side of wdma soc. +Moreover rename mtk_wed_wdma_ring_setup routine in +mtk_wed_wdma_rx_ring_setup() + +Signed-off-by: Lorenzo Bianconi +Signed-off-by: David S. Miller +--- + +--- a/drivers/net/ethernet/mediatek/mtk_wed.c ++++ b/drivers/net/ethernet/mediatek/mtk_wed.c +@@ -253,8 +253,8 @@ mtk_wed_free_tx_rings(struct mtk_wed_dev + + for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) + mtk_wed_free_ring(dev, &dev->tx_ring[i]); +- for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) +- mtk_wed_free_ring(dev, &dev->tx_wdma[i]); ++ for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) ++ mtk_wed_free_ring(dev, &dev->rx_wdma[i]); + } + + static void +@@ -695,10 +695,10 @@ mtk_wed_ring_alloc(struct mtk_wed_device + } + + static int +-mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size) ++mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size) + { + u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version; +- struct mtk_wed_ring *wdma = &dev->tx_wdma[idx]; ++ struct mtk_wed_ring *wdma = &dev->rx_wdma[idx]; + + if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size)) + return -ENOMEM; +@@ -812,9 +812,9 @@ mtk_wed_start(struct mtk_wed_device *dev + { + int i; + +- for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) +- if (!dev->tx_wdma[i].desc) +- mtk_wed_wdma_ring_setup(dev, i, 16); ++ for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) ++ if (!dev->rx_wdma[i].desc) ++ mtk_wed_wdma_rx_ring_setup(dev, i, 16); + + mtk_wed_hw_init(dev); + mtk_wed_configure_irq(dev, irq_mask); +@@ -923,7 +923,7 @@ mtk_wed_tx_ring_setup(struct mtk_wed_dev + sizeof(*ring->desc))) + return -ENOMEM; + +- if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE)) ++ if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE)) + return -ENOMEM; + + ring->reg_base = MTK_WED_RING_TX(idx); +--- a/include/linux/soc/mediatek/mtk_wed.h ++++ b/include/linux/soc/mediatek/mtk_wed.h +@@ -7,6 +7,7 @@ + #include + + #define MTK_WED_TX_QUEUES 2 ++#define MTK_WED_RX_QUEUES 2 + + struct mtk_wed_hw; + struct mtk_wdma_desc; +@@ -66,7 +67,7 @@ struct mtk_wed_device { + + struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES]; + struct mtk_wed_ring txfree_ring; +- struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES]; ++ struct mtk_wed_ring rx_wdma[MTK_WED_RX_QUEUES]; + + struct { + int size; diff --git a/target/linux/generic/backport-5.15/729-04-v6.1-net-ethernet-mtk_wed-add-configure-wed-wo-support.patch b/target/linux/generic/backport-5.15/729-04-v6.1-net-ethernet-mtk_wed-add-configure-wed-wo-support.patch new file mode 100644 index 0000000000..4c34d0cb33 --- /dev/null +++ b/target/linux/generic/backport-5.15/729-04-v6.1-net-ethernet-mtk_wed-add-configure-wed-wo-support.patch @@ -0,0 +1,1521 @@ +From: Lorenzo Bianconi +Date: Sat, 5 Nov 2022 23:36:21 +0100 +Subject: [PATCH] net: ethernet: mtk_wed: add configure wed wo support + +Enable RX Wireless Ethernet Dispatch available on MT7986 Soc. + +Tested-by: Daniel Golle +Co-developed-by: Sujuan Chen +Signed-off-by: Sujuan Chen +Signed-off-by: Lorenzo Bianconi +Signed-off-by: David S. Miller +--- + +--- a/drivers/net/ethernet/mediatek/mtk_wed.c ++++ b/drivers/net/ethernet/mediatek/mtk_wed.c +@@ -9,6 +9,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -23,6 +24,7 @@ + #define MTK_WED_PKT_SIZE 1900 + #define MTK_WED_BUF_SIZE 2048 + #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048) ++#define MTK_WED_RX_RING_SIZE 1536 + + #define MTK_WED_TX_RING_SIZE 2048 + #define MTK_WED_WDMA_RING_SIZE 1024 +@@ -31,6 +33,10 @@ + #define MTK_WED_PER_GROUP_PKT 128 + + #define MTK_WED_FBUF_SIZE 128 ++#define MTK_WED_MIOD_CNT 16 ++#define MTK_WED_FB_CMD_CNT 1024 ++#define MTK_WED_RRO_QUE_CNT 8192 ++#define MTK_WED_MIOD_ENTRY_CNT 128 + + static struct mtk_wed_hw *hw_list[2]; + static DEFINE_MUTEX(hw_lock); +@@ -65,12 +71,76 @@ wdma_set(struct mtk_wed_device *dev, u32 + wdma_m32(dev, reg, 0, mask); + } + ++static void ++wdma_clr(struct mtk_wed_device *dev, u32 reg, u32 mask) ++{ ++ wdma_m32(dev, reg, mask, 0); ++} ++ ++static u32 ++wifi_r32(struct mtk_wed_device *dev, u32 reg) ++{ ++ return readl(dev->wlan.base + reg); ++} ++ ++static void ++wifi_w32(struct mtk_wed_device *dev, u32 reg, u32 val) ++{ ++ writel(val, dev->wlan.base + reg); ++} ++ + static u32 + mtk_wed_read_reset(struct mtk_wed_device *dev) + { + return wed_r32(dev, MTK_WED_RESET); + } + ++static u32 ++mtk_wdma_read_reset(struct mtk_wed_device *dev) ++{ ++ return wdma_r32(dev, MTK_WDMA_GLO_CFG); ++} ++ ++static void ++mtk_wdma_rx_reset(struct mtk_wed_device *dev) ++{ ++ u32 status, mask = MTK_WDMA_GLO_CFG_RX_DMA_BUSY; ++ int i; ++ ++ wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_DMA_EN); ++ if (readx_poll_timeout(mtk_wdma_read_reset, dev, status, ++ !(status & mask), 0, 1000)) ++ dev_err(dev->hw->dev, "rx reset failed\n"); ++ ++ for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) { ++ if (dev->rx_wdma[i].desc) ++ continue; ++ ++ wdma_w32(dev, ++ MTK_WDMA_RING_RX(i) + MTK_WED_RING_OFS_CPU_IDX, 0); ++ } ++} ++ ++static void ++mtk_wdma_tx_reset(struct mtk_wed_device *dev) ++{ ++ u32 status, mask = MTK_WDMA_GLO_CFG_TX_DMA_BUSY; ++ int i; ++ ++ wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN); ++ if (readx_poll_timeout(mtk_wdma_read_reset, dev, status, ++ !(status & mask), 0, 1000)) ++ dev_err(dev->hw->dev, "tx reset failed\n"); ++ ++ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) { ++ if (dev->tx_wdma[i].desc) ++ continue; ++ ++ wdma_w32(dev, ++ MTK_WDMA_RING_TX(i) + MTK_WED_RING_OFS_CPU_IDX, 0); ++ } ++} ++ + static void + mtk_wed_reset(struct mtk_wed_device *dev, u32 mask) + { +@@ -82,6 +152,54 @@ mtk_wed_reset(struct mtk_wed_device *dev + WARN_ON_ONCE(1); + } + ++static u32 ++mtk_wed_wo_read_status(struct mtk_wed_device *dev) ++{ ++ return wed_r32(dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_WO_STATUS); ++} ++ ++static void ++mtk_wed_wo_reset(struct mtk_wed_device *dev) ++{ ++ struct mtk_wed_wo *wo = dev->hw->wed_wo; ++ u8 state = MTK_WED_WO_STATE_DISABLE; ++ void __iomem *reg; ++ u32 val; ++ ++ mtk_wdma_tx_reset(dev); ++ mtk_wed_reset(dev, MTK_WED_RESET_WED); ++ ++ mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, ++ MTK_WED_WO_CMD_CHANGE_STATE, &state, ++ sizeof(state), false); ++ ++ if (readx_poll_timeout(mtk_wed_wo_read_status, dev, val, ++ val == MTK_WED_WOIF_DISABLE_DONE, ++ 100, MTK_WOCPU_TIMEOUT)) ++ dev_err(dev->hw->dev, "failed to disable wed-wo\n"); ++ ++ reg = ioremap(MTK_WED_WO_CPU_MCUSYS_RESET_ADDR, 4); ++ ++ val = readl(reg); ++ switch (dev->hw->index) { ++ case 0: ++ val |= MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK; ++ writel(val, reg); ++ val &= ~MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK; ++ writel(val, reg); ++ break; ++ case 1: ++ val |= MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK; ++ writel(val, reg); ++ val &= ~MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK; ++ writel(val, reg); ++ break; ++ default: ++ break; ++ } ++ iounmap(reg); ++} ++ + static struct mtk_wed_hw * + mtk_wed_assign(struct mtk_wed_device *dev) + { +@@ -116,7 +234,7 @@ out: + } + + static int +-mtk_wed_buffer_alloc(struct mtk_wed_device *dev) ++mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev) + { + struct mtk_wdma_desc *desc; + dma_addr_t desc_phys; +@@ -133,16 +251,16 @@ mtk_wed_buffer_alloc(struct mtk_wed_devi + if (!page_list) + return -ENOMEM; + +- dev->buf_ring.size = ring_size; +- dev->buf_ring.pages = page_list; ++ dev->tx_buf_ring.size = ring_size; ++ dev->tx_buf_ring.pages = page_list; + + desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc), + &desc_phys, GFP_KERNEL); + if (!desc) + return -ENOMEM; + +- dev->buf_ring.desc = desc; +- dev->buf_ring.desc_phys = desc_phys; ++ dev->tx_buf_ring.desc = desc; ++ dev->tx_buf_ring.desc_phys = desc_phys; + + for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) { + dma_addr_t page_phys, buf_phys; +@@ -203,10 +321,10 @@ mtk_wed_buffer_alloc(struct mtk_wed_devi + } + + static void +-mtk_wed_free_buffer(struct mtk_wed_device *dev) ++mtk_wed_free_tx_buffer(struct mtk_wed_device *dev) + { +- struct mtk_wdma_desc *desc = dev->buf_ring.desc; +- void **page_list = dev->buf_ring.pages; ++ struct mtk_wdma_desc *desc = dev->tx_buf_ring.desc; ++ void **page_list = dev->tx_buf_ring.pages; + int page_idx; + int i; + +@@ -216,7 +334,8 @@ mtk_wed_free_buffer(struct mtk_wed_devic + if (!desc) + goto free_pagelist; + +- for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) { ++ for (i = 0, page_idx = 0; i < dev->tx_buf_ring.size; ++ i += MTK_WED_BUF_PER_PAGE) { + void *page = page_list[page_idx++]; + dma_addr_t buf_addr; + +@@ -229,13 +348,59 @@ mtk_wed_free_buffer(struct mtk_wed_devic + __free_page(page); + } + +- dma_free_coherent(dev->hw->dev, dev->buf_ring.size * sizeof(*desc), +- desc, dev->buf_ring.desc_phys); ++ dma_free_coherent(dev->hw->dev, dev->tx_buf_ring.size * sizeof(*desc), ++ desc, dev->tx_buf_ring.desc_phys); + + free_pagelist: + kfree(page_list); + } + ++static int ++mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev) ++{ ++ struct mtk_rxbm_desc *desc; ++ dma_addr_t desc_phys; ++ ++ dev->rx_buf_ring.size = dev->wlan.rx_nbuf; ++ desc = dma_alloc_coherent(dev->hw->dev, ++ dev->wlan.rx_nbuf * sizeof(*desc), ++ &desc_phys, GFP_KERNEL); ++ if (!desc) ++ return -ENOMEM; ++ ++ dev->rx_buf_ring.desc = desc; ++ dev->rx_buf_ring.desc_phys = desc_phys; ++ dev->wlan.init_rx_buf(dev, dev->wlan.rx_npkt); ++ ++ return 0; ++} ++ ++static void ++mtk_wed_free_rx_buffer(struct mtk_wed_device *dev) ++{ ++ struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc; ++ ++ if (!desc) ++ return; ++ ++ dev->wlan.release_rx_buf(dev); ++ dma_free_coherent(dev->hw->dev, dev->rx_buf_ring.size * sizeof(*desc), ++ desc, dev->rx_buf_ring.desc_phys); ++} ++ ++static void ++mtk_wed_rx_buffer_hw_init(struct mtk_wed_device *dev) ++{ ++ wed_w32(dev, MTK_WED_RX_BM_RX_DMAD, ++ FIELD_PREP(MTK_WED_RX_BM_RX_DMAD_SDL0, dev->wlan.rx_size)); ++ wed_w32(dev, MTK_WED_RX_BM_BASE, dev->rx_buf_ring.desc_phys); ++ wed_w32(dev, MTK_WED_RX_BM_INIT_PTR, MTK_WED_RX_BM_INIT_SW_TAIL | ++ FIELD_PREP(MTK_WED_RX_BM_SW_TAIL, dev->wlan.rx_npkt)); ++ wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH, ++ FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff)); ++ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN); ++} ++ + static void + mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring) + { +@@ -247,6 +412,13 @@ mtk_wed_free_ring(struct mtk_wed_device + } + + static void ++mtk_wed_free_rx_rings(struct mtk_wed_device *dev) ++{ ++ mtk_wed_free_rx_buffer(dev); ++ mtk_wed_free_ring(dev, &dev->rro.ring); ++} ++ ++static void + mtk_wed_free_tx_rings(struct mtk_wed_device *dev) + { + int i; +@@ -291,6 +463,38 @@ mtk_wed_set_512_support(struct mtk_wed_d + } + } + ++#define MTK_WFMDA_RX_DMA_EN BIT(2) ++static void ++mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx) ++{ ++ u32 val; ++ int i; ++ ++ if (!(dev->rx_ring[idx].flags & MTK_WED_RING_CONFIGURED)) ++ return; /* queue is not configured by mt76 */ ++ ++ for (i = 0; i < 3; i++) { ++ u32 cur_idx; ++ ++ cur_idx = wed_r32(dev, ++ MTK_WED_WPDMA_RING_RX_DATA(idx) + ++ MTK_WED_RING_OFS_CPU_IDX); ++ if (cur_idx == MTK_WED_RX_RING_SIZE - 1) ++ break; ++ ++ usleep_range(100000, 200000); ++ } ++ ++ if (i == 3) { ++ dev_err(dev->hw->dev, "rx dma enable failed\n"); ++ return; ++ } ++ ++ val = wifi_r32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base) | ++ MTK_WFMDA_RX_DMA_EN; ++ wifi_w32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base, val); ++} ++ + static void + mtk_wed_dma_disable(struct mtk_wed_device *dev) + { +@@ -304,20 +508,25 @@ mtk_wed_dma_disable(struct mtk_wed_devic + MTK_WED_GLO_CFG_TX_DMA_EN | + MTK_WED_GLO_CFG_RX_DMA_EN); + +- wdma_m32(dev, MTK_WDMA_GLO_CFG, ++ wdma_clr(dev, MTK_WDMA_GLO_CFG, + MTK_WDMA_GLO_CFG_TX_DMA_EN | + MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | +- MTK_WDMA_GLO_CFG_RX_INFO2_PRERES, 0); ++ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); + + if (dev->hw->version == 1) { + regmap_write(dev->hw->mirror, dev->hw->index * 4, 0); +- wdma_m32(dev, MTK_WDMA_GLO_CFG, +- MTK_WDMA_GLO_CFG_RX_INFO3_PRERES, 0); ++ wdma_clr(dev, MTK_WDMA_GLO_CFG, ++ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); + } else { + wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, + MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | + MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); + ++ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, ++ MTK_WED_WPDMA_RX_D_RX_DRV_EN); ++ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, ++ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); ++ + mtk_wed_set_512_support(dev, false); + } + } +@@ -338,6 +547,13 @@ mtk_wed_stop(struct mtk_wed_device *dev) + wdma_w32(dev, MTK_WDMA_INT_MASK, 0); + wdma_w32(dev, MTK_WDMA_INT_GRP2, 0); + wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0); ++ ++ if (dev->hw->version == 1) ++ return; ++ ++ wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0); ++ wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0); ++ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN); + } + + static void +@@ -353,11 +569,21 @@ mtk_wed_detach(struct mtk_wed_device *de + wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); + + mtk_wed_reset(dev, MTK_WED_RESET_WED); ++ if (mtk_wed_get_rx_capa(dev)) { ++ wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN); ++ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX); ++ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); ++ } + +- mtk_wed_free_buffer(dev); ++ mtk_wed_free_tx_buffer(dev); + mtk_wed_free_tx_rings(dev); +- if (hw->version != 1) ++ ++ if (mtk_wed_get_rx_capa(dev)) { ++ mtk_wed_wo_reset(dev); ++ mtk_wed_free_rx_rings(dev); + mtk_wed_wo_deinit(hw); ++ mtk_wdma_rx_reset(dev); ++ } + + if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) { + struct device_node *wlan_node; +@@ -441,10 +667,12 @@ mtk_wed_set_wpdma(struct mtk_wed_device + } else { + mtk_wed_bus_init(dev); + +- wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int); +- wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask); +- wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx); +- wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree); ++ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int); ++ wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask); ++ wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx); ++ wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree); ++ wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo); ++ wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx); + } + } + +@@ -494,6 +722,132 @@ mtk_wed_hw_init_early(struct mtk_wed_dev + } + } + ++static int ++mtk_wed_rro_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, ++ int size) ++{ ++ ring->desc = dma_alloc_coherent(dev->hw->dev, ++ size * sizeof(*ring->desc), ++ &ring->desc_phys, GFP_KERNEL); ++ if (!ring->desc) ++ return -ENOMEM; ++ ++ ring->desc_size = sizeof(*ring->desc); ++ ring->size = size; ++ memset(ring->desc, 0, size); ++ ++ return 0; ++} ++ ++#define MTK_WED_MIOD_COUNT (MTK_WED_MIOD_ENTRY_CNT * MTK_WED_MIOD_CNT) ++static int ++mtk_wed_rro_alloc(struct mtk_wed_device *dev) ++{ ++ struct reserved_mem *rmem; ++ struct device_node *np; ++ int index; ++ ++ index = of_property_match_string(dev->hw->node, "memory-region-names", ++ "wo-dlm"); ++ if (index < 0) ++ return index; ++ ++ np = of_parse_phandle(dev->hw->node, "memory-region", index); ++ if (!np) ++ return -ENODEV; ++ ++ rmem = of_reserved_mem_lookup(np); ++ of_node_put(np); ++ ++ if (!rmem) ++ return -ENODEV; ++ ++ dev->rro.miod_phys = rmem->base; ++ dev->rro.fdbk_phys = MTK_WED_MIOD_COUNT + dev->rro.miod_phys; ++ ++ return mtk_wed_rro_ring_alloc(dev, &dev->rro.ring, ++ MTK_WED_RRO_QUE_CNT); ++} ++ ++static int ++mtk_wed_rro_cfg(struct mtk_wed_device *dev) ++{ ++ struct mtk_wed_wo *wo = dev->hw->wed_wo; ++ struct { ++ struct { ++ __le32 base; ++ __le32 cnt; ++ __le32 unit; ++ } ring[2]; ++ __le32 wed; ++ u8 version; ++ } req = { ++ .ring[0] = { ++ .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE), ++ .cnt = cpu_to_le32(MTK_WED_MIOD_CNT), ++ .unit = cpu_to_le32(MTK_WED_MIOD_ENTRY_CNT), ++ }, ++ .ring[1] = { ++ .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE + ++ MTK_WED_MIOD_COUNT), ++ .cnt = cpu_to_le32(MTK_WED_FB_CMD_CNT), ++ .unit = cpu_to_le32(4), ++ }, ++ }; ++ ++ return mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, ++ MTK_WED_WO_CMD_WED_CFG, ++ &req, sizeof(req), true); ++} ++ ++static void ++mtk_wed_rro_hw_init(struct mtk_wed_device *dev) ++{ ++ wed_w32(dev, MTK_WED_RROQM_MIOD_CFG, ++ FIELD_PREP(MTK_WED_RROQM_MIOD_MID_DW, 0x70 >> 2) | ++ FIELD_PREP(MTK_WED_RROQM_MIOD_MOD_DW, 0x10 >> 2) | ++ FIELD_PREP(MTK_WED_RROQM_MIOD_ENTRY_DW, ++ MTK_WED_MIOD_ENTRY_CNT >> 2)); ++ ++ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL0, dev->rro.miod_phys); ++ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL1, ++ FIELD_PREP(MTK_WED_RROQM_MIOD_CNT, MTK_WED_MIOD_CNT)); ++ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL0, dev->rro.fdbk_phys); ++ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL1, ++ FIELD_PREP(MTK_WED_RROQM_FDBK_CNT, MTK_WED_FB_CMD_CNT)); ++ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL2, 0); ++ wed_w32(dev, MTK_WED_RROQ_BASE_L, dev->rro.ring.desc_phys); ++ ++ wed_set(dev, MTK_WED_RROQM_RST_IDX, ++ MTK_WED_RROQM_RST_IDX_MIOD | ++ MTK_WED_RROQM_RST_IDX_FDBK); ++ ++ wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0); ++ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL2, MTK_WED_MIOD_CNT - 1); ++ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN); ++} ++ ++static void ++mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev) ++{ ++ wed_w32(dev, MTK_WED_RESET, MTK_WED_RESET_RX_ROUTE_QM); ++ ++ for (;;) { ++ usleep_range(100, 200); ++ if (!(wed_r32(dev, MTK_WED_RESET) & MTK_WED_RESET_RX_ROUTE_QM)) ++ break; ++ } ++ ++ /* configure RX_ROUTE_QM */ ++ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); ++ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT); ++ wed_set(dev, MTK_WED_RTQM_GLO_CFG, ++ FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index)); ++ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); ++ /* enable RX_ROUTE_QM */ ++ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN); ++} ++ + static void + mtk_wed_hw_init(struct mtk_wed_device *dev) + { +@@ -505,11 +859,11 @@ mtk_wed_hw_init(struct mtk_wed_device *d + wed_w32(dev, MTK_WED_TX_BM_CTRL, + MTK_WED_TX_BM_CTRL_PAUSE | + FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, +- dev->buf_ring.size / 128) | ++ dev->tx_buf_ring.size / 128) | + FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, + MTK_WED_TX_RING_SIZE / 256)); + +- wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys); ++ wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys); + + wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE); + +@@ -536,9 +890,9 @@ mtk_wed_hw_init(struct mtk_wed_device *d + wed_w32(dev, MTK_WED_TX_TKID_CTRL, + MTK_WED_TX_TKID_CTRL_PAUSE | + FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM, +- dev->buf_ring.size / 128) | ++ dev->tx_buf_ring.size / 128) | + FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM, +- dev->buf_ring.size / 128)); ++ dev->tx_buf_ring.size / 128)); + wed_w32(dev, MTK_WED_TX_TKID_DYN_THR, + FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) | + MTK_WED_TX_TKID_DYN_THR_HI); +@@ -546,18 +900,28 @@ mtk_wed_hw_init(struct mtk_wed_device *d + + mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); + +- if (dev->hw->version == 1) ++ if (dev->hw->version == 1) { + wed_set(dev, MTK_WED_CTRL, + MTK_WED_CTRL_WED_TX_BM_EN | + MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); +- else ++ } else { + wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE); ++ /* rx hw init */ ++ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, ++ MTK_WED_WPDMA_RX_D_RST_CRX_IDX | ++ MTK_WED_WPDMA_RX_D_RST_DRV_IDX); ++ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0); ++ ++ mtk_wed_rx_buffer_hw_init(dev); ++ mtk_wed_rro_hw_init(dev); ++ mtk_wed_route_qm_hw_init(dev); ++ } + + wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE); + } + + static void +-mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size) ++mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size, bool tx) + { + void *head = (void *)ring->desc; + int i; +@@ -567,7 +931,10 @@ mtk_wed_ring_reset(struct mtk_wed_ring * + + desc = (struct mtk_wdma_desc *)(head + i * ring->desc_size); + desc->buf0 = 0; +- desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); ++ if (tx) ++ desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); ++ else ++ desc->ctrl = cpu_to_le32(MTK_WFDMA_DESC_CTRL_TO_HOST); + desc->buf1 = 0; + desc->info = 0; + } +@@ -623,7 +990,8 @@ mtk_wed_reset_dma(struct mtk_wed_device + if (!dev->tx_ring[i].desc) + continue; + +- mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE); ++ mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE, ++ true); + } + + if (mtk_wed_poll_busy(dev)) +@@ -641,6 +1009,9 @@ mtk_wed_reset_dma(struct mtk_wed_device + wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); + wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); + ++ if (mtk_wed_get_rx_capa(dev)) ++ mtk_wdma_rx_reset(dev); ++ + if (busy) { + mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT); + mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV); +@@ -675,12 +1046,11 @@ mtk_wed_reset_dma(struct mtk_wed_device + MTK_WED_WPDMA_RESET_IDX_RX); + wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0); + } +- + } + + static int + mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, +- int size, u32 desc_size) ++ int size, u32 desc_size, bool tx) + { + ring->desc = dma_alloc_coherent(dev->hw->dev, size * desc_size, + &ring->desc_phys, GFP_KERNEL); +@@ -689,7 +1059,7 @@ mtk_wed_ring_alloc(struct mtk_wed_device + + ring->desc_size = desc_size; + ring->size = size; +- mtk_wed_ring_reset(ring, size); ++ mtk_wed_ring_reset(ring, size, tx); + + return 0; + } +@@ -698,9 +1068,14 @@ static int + mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size) + { + u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version; +- struct mtk_wed_ring *wdma = &dev->rx_wdma[idx]; ++ struct mtk_wed_ring *wdma; + +- if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size)) ++ if (idx >= ARRAY_SIZE(dev->rx_wdma)) ++ return -EINVAL; ++ ++ wdma = &dev->rx_wdma[idx]; ++ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size, ++ true)) + return -ENOMEM; + + wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, +@@ -717,6 +1092,60 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_we + return 0; + } + ++static int ++mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size) ++{ ++ u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version; ++ struct mtk_wed_ring *wdma; ++ ++ if (idx >= ARRAY_SIZE(dev->tx_wdma)) ++ return -EINVAL; ++ ++ wdma = &dev->tx_wdma[idx]; ++ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size, ++ true)) ++ return -ENOMEM; ++ ++ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, ++ wdma->desc_phys); ++ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, ++ size); ++ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); ++ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0); ++ ++ if (!idx) { ++ wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_BASE, ++ wdma->desc_phys); ++ wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_COUNT, ++ size); ++ wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_CPU_IDX, ++ 0); ++ wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_DMA_IDX, ++ 0); ++ } ++ ++ return 0; ++} ++ ++static void ++mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb, ++ u32 reason, u32 hash) ++{ ++ struct mtk_eth *eth = dev->hw->eth; ++ struct ethhdr *eh; ++ ++ if (!skb) ++ return; ++ ++ if (reason != MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) ++ return; ++ ++ skb_set_mac_header(skb, 0); ++ eh = eth_hdr(skb); ++ skb->protocol = eh->h_proto; ++ mtk_ppe_check_skb(eth->ppe[dev->hw->index], skb, hash); ++} ++ + static void + mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask) + { +@@ -739,6 +1168,8 @@ mtk_wed_configure_irq(struct mtk_wed_dev + + wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask); + } else { ++ wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE, ++ GENMASK(1, 0)); + /* initail tx interrupt trigger */ + wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX, + MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN | +@@ -757,6 +1188,16 @@ mtk_wed_configure_irq(struct mtk_wed_dev + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG, + dev->wlan.txfree_tbit)); + ++ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX, ++ MTK_WED_WPDMA_INT_CTRL_RX0_EN | ++ MTK_WED_WPDMA_INT_CTRL_RX0_CLR | ++ MTK_WED_WPDMA_INT_CTRL_RX1_EN | ++ MTK_WED_WPDMA_INT_CTRL_RX1_CLR | ++ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG, ++ dev->wlan.rx_tbit[0]) | ++ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG, ++ dev->wlan.rx_tbit[1])); ++ + wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask); + wed_set(dev, MTK_WED_WDMA_INT_CTRL, + FIELD_PREP(MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL, +@@ -794,9 +1235,15 @@ mtk_wed_dma_enable(struct mtk_wed_device + wdma_set(dev, MTK_WDMA_GLO_CFG, + MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); + } else { ++ int i; ++ + wed_set(dev, MTK_WED_WPDMA_CTRL, + MTK_WED_WPDMA_CTRL_SDL1_FIXED); + ++ wed_set(dev, MTK_WED_WDMA_GLO_CFG, ++ MTK_WED_WDMA_GLO_CFG_TX_DRV_EN | ++ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); ++ + wed_set(dev, MTK_WED_WPDMA_GLO_CFG, + MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | + MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); +@@ -804,6 +1251,15 @@ mtk_wed_dma_enable(struct mtk_wed_device + wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, + MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP | + MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV); ++ ++ wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, ++ MTK_WED_WPDMA_RX_D_RX_DRV_EN | ++ FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) | ++ FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL, ++ 0x2)); ++ ++ for (i = 0; i < MTK_WED_RX_QUEUES; i++) ++ mtk_wed_check_wfdma_rx_fill(dev, i); + } + } + +@@ -829,7 +1285,19 @@ mtk_wed_start(struct mtk_wed_device *dev + val |= BIT(0) | (BIT(1) * !!dev->hw->index); + regmap_write(dev->hw->mirror, dev->hw->index * 4, val); + } else { +- mtk_wed_set_512_support(dev, true); ++ /* driver set mid ready and only once */ ++ wed_w32(dev, MTK_WED_EXT_INT_MASK1, ++ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); ++ wed_w32(dev, MTK_WED_EXT_INT_MASK2, ++ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); ++ ++ wed_r32(dev, MTK_WED_EXT_INT_MASK1); ++ wed_r32(dev, MTK_WED_EXT_INT_MASK2); ++ ++ if (mtk_wed_rro_cfg(dev)) ++ return; ++ ++ mtk_wed_set_512_support(dev, dev->wlan.wcid_512); + } + + mtk_wed_dma_enable(dev); +@@ -863,7 +1331,7 @@ mtk_wed_attach(struct mtk_wed_device *de + if (!hw) { + module_put(THIS_MODULE); + ret = -ENODEV; +- goto out; ++ goto unlock; + } + + device = dev->wlan.bus_type == MTK_WED_BUS_PCIE +@@ -876,15 +1344,24 @@ mtk_wed_attach(struct mtk_wed_device *de + dev->dev = hw->dev; + dev->irq = hw->irq; + dev->wdma_idx = hw->index; ++ dev->version = hw->version; + + if (hw->eth->dma_dev == hw->eth->dev && + of_dma_is_coherent(hw->eth->dev->of_node)) + mtk_eth_set_dma_device(hw->eth, hw->dev); + +- ret = mtk_wed_buffer_alloc(dev); +- if (ret) { +- mtk_wed_detach(dev); ++ ret = mtk_wed_tx_buffer_alloc(dev); ++ if (ret) + goto out; ++ ++ if (mtk_wed_get_rx_capa(dev)) { ++ ret = mtk_wed_rx_buffer_alloc(dev); ++ if (ret) ++ goto out; ++ ++ ret = mtk_wed_rro_alloc(dev); ++ if (ret) ++ goto out; + } + + mtk_wed_hw_init_early(dev); +@@ -893,8 +1370,10 @@ mtk_wed_attach(struct mtk_wed_device *de + BIT(hw->index), 0); + else + ret = mtk_wed_wo_init(hw); +- + out: ++ if (ret) ++ mtk_wed_detach(dev); ++unlock: + mutex_unlock(&hw_lock); + + return ret; +@@ -917,10 +1396,11 @@ mtk_wed_tx_ring_setup(struct mtk_wed_dev + * WDMA RX. + */ + +- BUG_ON(idx >= ARRAY_SIZE(dev->tx_ring)); ++ if (WARN_ON(idx >= ARRAY_SIZE(dev->tx_ring))) ++ return -EINVAL; + + if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, +- sizeof(*ring->desc))) ++ sizeof(*ring->desc), true)) + return -ENOMEM; + + if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE)) +@@ -967,6 +1447,37 @@ mtk_wed_txfree_ring_setup(struct mtk_wed + return 0; + } + ++static int ++mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs) ++{ ++ struct mtk_wed_ring *ring = &dev->rx_ring[idx]; ++ ++ if (WARN_ON(idx >= ARRAY_SIZE(dev->rx_ring))) ++ return -EINVAL; ++ ++ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE, ++ sizeof(*ring->desc), false)) ++ return -ENOMEM; ++ ++ if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE)) ++ return -ENOMEM; ++ ++ ring->reg_base = MTK_WED_RING_RX_DATA(idx); ++ ring->wpdma = regs; ++ ring->flags |= MTK_WED_RING_CONFIGURED; ++ ++ /* WPDMA -> WED */ ++ wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys); ++ wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_RX_RING_SIZE); ++ ++ wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_BASE, ++ ring->desc_phys); ++ wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_COUNT, ++ MTK_WED_RX_RING_SIZE); ++ ++ return 0; ++} ++ + static u32 + mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask) + { +@@ -1063,7 +1574,9 @@ void mtk_wed_add_hw(struct device_node * + static const struct mtk_wed_ops wed_ops = { + .attach = mtk_wed_attach, + .tx_ring_setup = mtk_wed_tx_ring_setup, ++ .rx_ring_setup = mtk_wed_rx_ring_setup, + .txfree_ring_setup = mtk_wed_txfree_ring_setup, ++ .msg_update = mtk_wed_mcu_msg_update, + .start = mtk_wed_start, + .stop = mtk_wed_stop, + .reset_dma = mtk_wed_reset_dma, +@@ -1072,6 +1585,7 @@ void mtk_wed_add_hw(struct device_node * + .irq_get = mtk_wed_irq_get, + .irq_set_mask = mtk_wed_irq_set_mask, + .detach = mtk_wed_detach, ++ .ppe_check = mtk_wed_ppe_check, + }; + struct device_node *eth_np = eth->dev->of_node; + struct platform_device *pdev; +--- a/drivers/net/ethernet/mediatek/mtk_wed.h ++++ b/drivers/net/ethernet/mediatek/mtk_wed.h +@@ -87,6 +87,24 @@ wpdma_tx_w32(struct mtk_wed_device *dev, + } + + static inline u32 ++wpdma_rx_r32(struct mtk_wed_device *dev, int ring, u32 reg) ++{ ++ if (!dev->rx_ring[ring].wpdma) ++ return 0; ++ ++ return readl(dev->rx_ring[ring].wpdma + reg); ++} ++ ++static inline void ++wpdma_rx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val) ++{ ++ if (!dev->rx_ring[ring].wpdma) ++ return; ++ ++ writel(val, dev->rx_ring[ring].wpdma + reg); ++} ++ ++static inline u32 + wpdma_txfree_r32(struct mtk_wed_device *dev, u32 reg) + { + if (!dev->txfree_ring.wpdma) +@@ -128,6 +146,7 @@ static inline int mtk_wed_flow_add(int i + static inline void mtk_wed_flow_remove(int index) + { + } ++ + #endif + + #ifdef CONFIG_DEBUG_FS +--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c ++++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c +@@ -10,6 +10,7 @@ + #include + #include + #include ++#include + + #include "mtk_wed_regs.h" + #include "mtk_wed_wo.h" +@@ -60,24 +61,37 @@ void mtk_wed_mcu_rx_event(struct mtk_wed + wake_up(&wo->mcu.wait); + } + ++static void ++mtk_wed_update_rx_stats(struct mtk_wed_device *wed, struct sk_buff *skb) ++{ ++ u32 count = get_unaligned_le32(skb->data); ++ struct mtk_wed_wo_rx_stats *stats; ++ int i; ++ ++ if (count * sizeof(*stats) > skb->len - sizeof(u32)) ++ return; ++ ++ stats = (struct mtk_wed_wo_rx_stats *)(skb->data + sizeof(u32)); ++ for (i = 0 ; i < count ; i++) ++ wed->wlan.update_wo_rx_stats(wed, &stats[i]); ++} ++ + void mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo, + struct sk_buff *skb) + { + struct mtk_wed_mcu_hdr *hdr = (struct mtk_wed_mcu_hdr *)skb->data; + +- switch (hdr->cmd) { +- case MTK_WED_WO_EVT_LOG_DUMP: { +- const char *msg = (const char *)(skb->data + sizeof(*hdr)); ++ skb_pull(skb, sizeof(*hdr)); + +- dev_notice(wo->hw->dev, "%s\n", msg); ++ switch (hdr->cmd) { ++ case MTK_WED_WO_EVT_LOG_DUMP: ++ dev_notice(wo->hw->dev, "%s\n", skb->data); + break; +- } + case MTK_WED_WO_EVT_PROFILING: { +- struct mtk_wed_wo_log_info *info; +- u32 count = (skb->len - sizeof(*hdr)) / sizeof(*info); ++ struct mtk_wed_wo_log_info *info = (void *)skb->data; ++ u32 count = skb->len / sizeof(*info); + int i; + +- info = (struct mtk_wed_wo_log_info *)(skb->data + sizeof(*hdr)); + for (i = 0 ; i < count ; i++) + dev_notice(wo->hw->dev, + "SN:%u latency: total=%u, rro:%u, mod:%u\n", +@@ -88,6 +102,7 @@ void mtk_wed_mcu_rx_unsolicited_event(st + break; + } + case MTK_WED_WO_EVT_RXCNT_INFO: ++ mtk_wed_update_rx_stats(wo->hw->wed_dev, skb); + break; + default: + break; +@@ -144,6 +159,8 @@ mtk_wed_mcu_parse_response(struct mtk_we + skb_pull(skb, sizeof(*hdr)); + switch (cmd) { + case MTK_WED_WO_CMD_RXCNT_INFO: ++ mtk_wed_update_rx_stats(wo->hw->wed_dev, skb); ++ break; + default: + break; + } +@@ -182,6 +199,18 @@ unlock: + return ret; + } + ++int mtk_wed_mcu_msg_update(struct mtk_wed_device *dev, int id, void *data, ++ int len) ++{ ++ struct mtk_wed_wo *wo = dev->hw->wed_wo; ++ ++ if (dev->hw->version == 1) ++ return 0; ++ ++ return mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, id, data, len, ++ true); ++} ++ + static int + mtk_wed_get_memory_region(struct mtk_wed_wo *wo, + struct mtk_wed_wo_memory_region *region) +--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h ++++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h +@@ -4,6 +4,7 @@ + #ifndef __MTK_WED_REGS_H + #define __MTK_WED_REGS_H + ++#define MTK_WFDMA_DESC_CTRL_TO_HOST BIT(8) + #define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(14, 0) + #define MTK_WDMA_DESC_CTRL_LEN1_V2 GENMASK(13, 0) + #define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(15) +@@ -28,6 +29,8 @@ struct mtk_wdma_desc { + #define MTK_WED_RESET_WED_TX_DMA BIT(12) + #define MTK_WED_RESET_WDMA_RX_DRV BIT(17) + #define MTK_WED_RESET_WDMA_INT_AGENT BIT(19) ++#define MTK_WED_RESET_RX_RRO_QM BIT(20) ++#define MTK_WED_RESET_RX_ROUTE_QM BIT(21) + #define MTK_WED_RESET_WED BIT(31) + + #define MTK_WED_CTRL 0x00c +@@ -39,8 +42,12 @@ struct mtk_wdma_desc { + #define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9) + #define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10) + #define MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY BIT(11) +-#define MTK_WED_CTRL_RESERVE_EN BIT(12) +-#define MTK_WED_CTRL_RESERVE_BUSY BIT(13) ++#define MTK_WED_CTRL_WED_RX_BM_EN BIT(12) ++#define MTK_WED_CTRL_WED_RX_BM_BUSY BIT(13) ++#define MTK_WED_CTRL_RX_RRO_QM_EN BIT(14) ++#define MTK_WED_CTRL_RX_RRO_QM_BUSY BIT(15) ++#define MTK_WED_CTRL_RX_ROUTE_QM_EN BIT(16) ++#define MTK_WED_CTRL_RX_ROUTE_QM_BUSY BIT(17) + #define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24) + #define MTK_WED_CTRL_ETH_DMAD_FMT BIT(25) + #define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28) +@@ -62,6 +69,9 @@ struct mtk_wdma_desc { + #define MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR BIT(22) + #define MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR BIT(23) + #define MTK_WED_EXT_INT_STATUS_RX_DRV_DMA_RECYCLE BIT(24) ++#define MTK_WED_EXT_INT_STATUS_RX_DRV_GET_BM_DMAD_SKIP BIT(25) ++#define MTK_WED_EXT_INT_STATUS_WPDMA_RX_D_DRV_ERR BIT(26) ++#define MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY BIT(27) + #define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \ + MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \ + MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \ +@@ -71,6 +81,8 @@ struct mtk_wdma_desc { + MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR) + + #define MTK_WED_EXT_INT_MASK 0x028 ++#define MTK_WED_EXT_INT_MASK1 0x02c ++#define MTK_WED_EXT_INT_MASK2 0x030 + + #define MTK_WED_STATUS 0x060 + #define MTK_WED_STATUS_TX GENMASK(15, 8) +@@ -151,6 +163,7 @@ struct mtk_wdma_desc { + #define MTK_WED_RING_TX(_n) (0x300 + (_n) * 0x10) + + #define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10) ++#define MTK_WED_RING_RX_DATA(_n) (0x420 + (_n) * 0x10) + + #define MTK_WED_SCR0 0x3c0 + #define MTK_WED_WPDMA_INT_TRIGGER 0x504 +@@ -213,6 +226,12 @@ struct mtk_wdma_desc { + #define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG GENMASK(14, 10) + + #define MTK_WED_WPDMA_INT_CTRL_RX 0x534 ++#define MTK_WED_WPDMA_INT_CTRL_RX0_EN BIT(0) ++#define MTK_WED_WPDMA_INT_CTRL_RX0_CLR BIT(1) ++#define MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG GENMASK(6, 2) ++#define MTK_WED_WPDMA_INT_CTRL_RX1_EN BIT(8) ++#define MTK_WED_WPDMA_INT_CTRL_RX1_CLR BIT(9) ++#define MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG GENMASK(14, 10) + + #define MTK_WED_WPDMA_INT_CTRL_TX_FREE 0x538 + #define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN BIT(0) +@@ -242,11 +261,34 @@ struct mtk_wdma_desc { + + #define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10) + #define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10) ++#define MTK_WED_WPDMA_RING_RX_DATA(_n) (0x730 + (_n) * 0x10) ++ ++#define MTK_WED_WPDMA_RX_D_GLO_CFG 0x75c ++#define MTK_WED_WPDMA_RX_D_RX_DRV_EN BIT(0) ++#define MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL GENMASK(11, 7) ++#define MTK_WED_WPDMA_RX_D_RXD_READ_LEN GENMASK(31, 24) ++ ++#define MTK_WED_WPDMA_RX_D_RST_IDX 0x760 ++#define MTK_WED_WPDMA_RX_D_RST_CRX_IDX GENMASK(17, 16) ++#define MTK_WED_WPDMA_RX_D_RST_DRV_IDX GENMASK(25, 24) ++ ++#define MTK_WED_WPDMA_RX_GLO_CFG 0x76c ++#define MTK_WED_WPDMA_RX_RING 0x770 ++ ++#define MTK_WED_WPDMA_RX_D_MIB(_n) (0x774 + (_n) * 4) ++#define MTK_WED_WPDMA_RX_D_PROCESSED_MIB(_n) (0x784 + (_n) * 4) ++#define MTK_WED_WPDMA_RX_D_COHERENT_MIB 0x78c ++ ++#define MTK_WED_WDMA_RING_TX 0x800 ++ ++#define MTK_WED_WDMA_TX_MIB 0x810 ++ + #define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10) + #define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4) + + #define MTK_WED_WDMA_GLO_CFG 0xa04 + #define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0) ++#define MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK BIT(1) + #define MTK_WED_WDMA_GLO_CFG_RX_DRV_EN BIT(2) + #define MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY BIT(3) + #define MTK_WED_WDMA_GLO_CFG_BT_SIZE GENMASK(5, 4) +@@ -291,6 +333,20 @@ struct mtk_wdma_desc { + #define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4) + #define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4) + ++#define MTK_WED_RX_BM_RX_DMAD 0xd80 ++#define MTK_WED_RX_BM_RX_DMAD_SDL0 GENMASK(13, 0) ++ ++#define MTK_WED_RX_BM_BASE 0xd84 ++#define MTK_WED_RX_BM_INIT_PTR 0xd88 ++#define MTK_WED_RX_BM_SW_TAIL GENMASK(15, 0) ++#define MTK_WED_RX_BM_INIT_SW_TAIL BIT(16) ++ ++#define MTK_WED_RX_PTR 0xd8c ++ ++#define MTK_WED_RX_BM_DYN_ALLOC_TH 0xdb4 ++#define MTK_WED_RX_BM_DYN_ALLOC_TH_H GENMASK(31, 16) ++#define MTK_WED_RX_BM_DYN_ALLOC_TH_L GENMASK(15, 0) ++ + #define MTK_WED_RING_OFS_BASE 0x00 + #define MTK_WED_RING_OFS_COUNT 0x04 + #define MTK_WED_RING_OFS_CPU_IDX 0x08 +@@ -301,7 +357,9 @@ struct mtk_wdma_desc { + + #define MTK_WDMA_GLO_CFG 0x204 + #define MTK_WDMA_GLO_CFG_TX_DMA_EN BIT(0) ++#define MTK_WDMA_GLO_CFG_TX_DMA_BUSY BIT(1) + #define MTK_WDMA_GLO_CFG_RX_DMA_EN BIT(2) ++#define MTK_WDMA_GLO_CFG_RX_DMA_BUSY BIT(3) + #define MTK_WDMA_GLO_CFG_RX_INFO3_PRERES BIT(26) + #define MTK_WDMA_GLO_CFG_RX_INFO2_PRERES BIT(27) + #define MTK_WDMA_GLO_CFG_RX_INFO1_PRERES BIT(28) +@@ -330,4 +388,70 @@ struct mtk_wdma_desc { + /* DMA channel mapping */ + #define HIFSYS_DMA_AG_MAP 0x008 + ++#define MTK_WED_RTQM_GLO_CFG 0xb00 ++#define MTK_WED_RTQM_BUSY BIT(1) ++#define MTK_WED_RTQM_Q_RST BIT(2) ++#define MTK_WED_RTQM_Q_DBG_BYPASS BIT(5) ++#define MTK_WED_RTQM_TXDMAD_FPORT GENMASK(23, 20) ++ ++#define MTK_WED_RTQM_R2H_MIB(_n) (0xb70 + (_n) * 0x4) ++#define MTK_WED_RTQM_R2Q_MIB(_n) (0xb78 + (_n) * 0x4) ++#define MTK_WED_RTQM_Q2N_MIB 0xb80 ++#define MTK_WED_RTQM_Q2H_MIB(_n) (0xb84 + (_n) * 0x4) ++ ++#define MTK_WED_RTQM_Q2B_MIB 0xb8c ++#define MTK_WED_RTQM_PFDBK_MIB 0xb90 ++ ++#define MTK_WED_RROQM_GLO_CFG 0xc04 ++#define MTK_WED_RROQM_RST_IDX 0xc08 ++#define MTK_WED_RROQM_RST_IDX_MIOD BIT(0) ++#define MTK_WED_RROQM_RST_IDX_FDBK BIT(4) ++ ++#define MTK_WED_RROQM_MIOD_CTRL0 0xc40 ++#define MTK_WED_RROQM_MIOD_CTRL1 0xc44 ++#define MTK_WED_RROQM_MIOD_CNT GENMASK(11, 0) ++ ++#define MTK_WED_RROQM_MIOD_CTRL2 0xc48 ++#define MTK_WED_RROQM_MIOD_CTRL3 0xc4c ++ ++#define MTK_WED_RROQM_FDBK_CTRL0 0xc50 ++#define MTK_WED_RROQM_FDBK_CTRL1 0xc54 ++#define MTK_WED_RROQM_FDBK_CNT GENMASK(11, 0) ++ ++#define MTK_WED_RROQM_FDBK_CTRL2 0xc58 ++ ++#define MTK_WED_RROQ_BASE_L 0xc80 ++#define MTK_WED_RROQ_BASE_H 0xc84 ++ ++#define MTK_WED_RROQM_MIOD_CFG 0xc8c ++#define MTK_WED_RROQM_MIOD_MID_DW GENMASK(5, 0) ++#define MTK_WED_RROQM_MIOD_MOD_DW GENMASK(13, 8) ++#define MTK_WED_RROQM_MIOD_ENTRY_DW GENMASK(22, 16) ++ ++#define MTK_WED_RROQM_MID_MIB 0xcc0 ++#define MTK_WED_RROQM_MOD_MIB 0xcc4 ++#define MTK_WED_RROQM_MOD_COHERENT_MIB 0xcc8 ++#define MTK_WED_RROQM_FDBK_MIB 0xcd0 ++#define MTK_WED_RROQM_FDBK_COHERENT_MIB 0xcd4 ++#define MTK_WED_RROQM_FDBK_IND_MIB 0xce0 ++#define MTK_WED_RROQM_FDBK_ENQ_MIB 0xce4 ++#define MTK_WED_RROQM_FDBK_ANC_MIB 0xce8 ++#define MTK_WED_RROQM_FDBK_ANC2H_MIB 0xcec ++ ++#define MTK_WED_RX_BM_RX_DMAD 0xd80 ++#define MTK_WED_RX_BM_BASE 0xd84 ++#define MTK_WED_RX_BM_INIT_PTR 0xd88 ++#define MTK_WED_RX_BM_PTR 0xd8c ++#define MTK_WED_RX_BM_PTR_HEAD GENMASK(32, 16) ++#define MTK_WED_RX_BM_PTR_TAIL GENMASK(15, 0) ++ ++#define MTK_WED_RX_BM_BLEN 0xd90 ++#define MTK_WED_RX_BM_STS 0xd94 ++#define MTK_WED_RX_BM_INTF2 0xd98 ++#define MTK_WED_RX_BM_INTF 0xd9c ++#define MTK_WED_RX_BM_ERR_STS 0xda8 ++ ++#define MTK_WED_WOCPU_VIEW_MIOD_BASE 0x8000 ++#define MTK_WED_PCIE_INT_MASK 0x0 ++ + #endif +--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h ++++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h +@@ -49,6 +49,10 @@ enum { + MTK_WED_WARP_CMD_FLAG_FROM_TO_WO = BIT(2), + }; + ++#define MTK_WED_WO_CPU_MCUSYS_RESET_ADDR 0x15194050 ++#define MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK 0x20 ++#define MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK 0x1 ++ + enum { + MTK_WED_WO_REGION_EMI, + MTK_WED_WO_REGION_ILM, +@@ -57,6 +61,28 @@ enum { + __MTK_WED_WO_REGION_MAX, + }; + ++enum mtk_wed_wo_state { ++ MTK_WED_WO_STATE_UNDEFINED, ++ MTK_WED_WO_STATE_INIT, ++ MTK_WED_WO_STATE_ENABLE, ++ MTK_WED_WO_STATE_DISABLE, ++ MTK_WED_WO_STATE_HALT, ++ MTK_WED_WO_STATE_GATING, ++ MTK_WED_WO_STATE_SER_RESET, ++ MTK_WED_WO_STATE_WF_RESET, ++}; ++ ++enum mtk_wed_wo_done_state { ++ MTK_WED_WOIF_UNDEFINED, ++ MTK_WED_WOIF_DISABLE_DONE, ++ MTK_WED_WOIF_TRIGGER_ENABLE, ++ MTK_WED_WOIF_ENABLE_DONE, ++ MTK_WED_WOIF_TRIGGER_GATING, ++ MTK_WED_WOIF_GATING_DONE, ++ MTK_WED_WOIF_TRIGGER_HALT, ++ MTK_WED_WOIF_HALT_DONE, ++}; ++ + enum mtk_wed_dummy_cr_idx { + MTK_WED_DUMMY_CR_FWDL, + MTK_WED_DUMMY_CR_WO_STATUS, +@@ -245,6 +271,8 @@ void mtk_wed_mcu_rx_unsolicited_event(st + struct sk_buff *skb); + int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo, int id, int cmd, + const void *data, int len, bool wait_resp); ++int mtk_wed_mcu_msg_update(struct mtk_wed_device *dev, int id, void *data, ++ int len); + int mtk_wed_mcu_init(struct mtk_wed_wo *wo); + int mtk_wed_wo_init(struct mtk_wed_hw *hw); + void mtk_wed_wo_deinit(struct mtk_wed_hw *hw); +--- a/include/linux/soc/mediatek/mtk_wed.h ++++ b/include/linux/soc/mediatek/mtk_wed.h +@@ -5,10 +5,13 @@ + #include + #include + #include ++#include + + #define MTK_WED_TX_QUEUES 2 + #define MTK_WED_RX_QUEUES 2 + ++#define WED_WO_STA_REC 0x6 ++ + struct mtk_wed_hw; + struct mtk_wdma_desc; + +@@ -41,21 +44,37 @@ enum mtk_wed_wo_cmd { + MTK_WED_WO_CMD_WED_END + }; + ++struct mtk_rxbm_desc { ++ __le32 buf0; ++ __le32 token; ++} __packed __aligned(4); ++ + enum mtk_wed_bus_tye { + MTK_WED_BUS_PCIE, + MTK_WED_BUS_AXI, + }; + ++#define MTK_WED_RING_CONFIGURED BIT(0) + struct mtk_wed_ring { + struct mtk_wdma_desc *desc; + dma_addr_t desc_phys; + u32 desc_size; + int size; ++ u32 flags; + + u32 reg_base; + void __iomem *wpdma; + }; + ++struct mtk_wed_wo_rx_stats { ++ __le16 wlan_idx; ++ __le16 tid; ++ __le32 rx_pkt_cnt; ++ __le32 rx_byte_cnt; ++ __le32 rx_err_cnt; ++ __le32 rx_drop_cnt; ++}; ++ + struct mtk_wed_device { + #ifdef CONFIG_NET_MEDIATEK_SOC_WED + const struct mtk_wed_ops *ops; +@@ -64,9 +83,12 @@ struct mtk_wed_device { + bool init_done, running; + int wdma_idx; + int irq; ++ u8 version; + + struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES]; ++ struct mtk_wed_ring rx_ring[MTK_WED_RX_QUEUES]; + struct mtk_wed_ring txfree_ring; ++ struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES]; + struct mtk_wed_ring rx_wdma[MTK_WED_RX_QUEUES]; + + struct { +@@ -74,7 +96,20 @@ struct mtk_wed_device { + void **pages; + struct mtk_wdma_desc *desc; + dma_addr_t desc_phys; +- } buf_ring; ++ } tx_buf_ring; ++ ++ struct { ++ int size; ++ struct page_frag_cache rx_page; ++ struct mtk_rxbm_desc *desc; ++ dma_addr_t desc_phys; ++ } rx_buf_ring; ++ ++ struct { ++ struct mtk_wed_ring ring; ++ dma_addr_t miod_phys; ++ dma_addr_t fdbk_phys; ++ } rro; + + /* filled by driver: */ + struct { +@@ -83,22 +118,36 @@ struct mtk_wed_device { + struct pci_dev *pci_dev; + }; + enum mtk_wed_bus_tye bus_type; ++ void __iomem *base; ++ u32 phy_base; + + u32 wpdma_phys; + u32 wpdma_int; + u32 wpdma_mask; + u32 wpdma_tx; + u32 wpdma_txfree; ++ u32 wpdma_rx_glo; ++ u32 wpdma_rx; ++ ++ bool wcid_512; + + u16 token_start; + unsigned int nbuf; ++ unsigned int rx_nbuf; ++ unsigned int rx_npkt; ++ unsigned int rx_size; + + u8 tx_tbit[MTK_WED_TX_QUEUES]; ++ u8 rx_tbit[MTK_WED_RX_QUEUES]; + u8 txfree_tbit; + + u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id); + int (*offload_enable)(struct mtk_wed_device *wed); + void (*offload_disable)(struct mtk_wed_device *wed); ++ u32 (*init_rx_buf)(struct mtk_wed_device *wed, int size); ++ void (*release_rx_buf)(struct mtk_wed_device *wed); ++ void (*update_wo_rx_stats)(struct mtk_wed_device *wed, ++ struct mtk_wed_wo_rx_stats *stats); + } wlan; + #endif + }; +@@ -107,9 +156,15 @@ struct mtk_wed_ops { + int (*attach)(struct mtk_wed_device *dev); + int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring, + void __iomem *regs); ++ int (*rx_ring_setup)(struct mtk_wed_device *dev, int ring, ++ void __iomem *regs); + int (*txfree_ring_setup)(struct mtk_wed_device *dev, + void __iomem *regs); ++ int (*msg_update)(struct mtk_wed_device *dev, int cmd_id, ++ void *data, int len); + void (*detach)(struct mtk_wed_device *dev); ++ void (*ppe_check)(struct mtk_wed_device *dev, struct sk_buff *skb, ++ u32 reason, u32 hash); + + void (*stop)(struct mtk_wed_device *dev); + void (*start)(struct mtk_wed_device *dev, u32 irq_mask); +@@ -144,6 +199,16 @@ mtk_wed_device_attach(struct mtk_wed_dev + return ret; + } + ++static inline bool ++mtk_wed_get_rx_capa(struct mtk_wed_device *dev) ++{ ++#ifdef CONFIG_NET_MEDIATEK_SOC_WED ++ return dev->version != 1; ++#else ++ return false; ++#endif ++} ++ + #ifdef CONFIG_NET_MEDIATEK_SOC_WED + #define mtk_wed_device_active(_dev) !!(_dev)->ops + #define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev) +@@ -160,6 +225,12 @@ mtk_wed_device_attach(struct mtk_wed_dev + (_dev)->ops->irq_get(_dev, _mask) + #define mtk_wed_device_irq_set_mask(_dev, _mask) \ + (_dev)->ops->irq_set_mask(_dev, _mask) ++#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) \ ++ (_dev)->ops->rx_ring_setup(_dev, _ring, _regs) ++#define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) \ ++ (_dev)->ops->ppe_check(_dev, _skb, _reason, _hash) ++#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) \ ++ (_dev)->ops->msg_update(_dev, _id, _msg, _len) + #else + static inline bool mtk_wed_device_active(struct mtk_wed_device *dev) + { +@@ -173,6 +244,9 @@ static inline bool mtk_wed_device_active + #define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0) + #define mtk_wed_device_irq_get(_dev, _mask) 0 + #define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0) ++#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) -ENODEV ++#define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) do {} while (0) ++#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) -ENODEV + #endif + + #endif diff --git a/target/linux/generic/backport-5.15/729-05-v6.1-net-ethernet-mtk_wed-add-rx-mib-counters.patch b/target/linux/generic/backport-5.15/729-05-v6.1-net-ethernet-mtk_wed-add-rx-mib-counters.patch new file mode 100644 index 0000000000..bb1066dece --- /dev/null +++ b/target/linux/generic/backport-5.15/729-05-v6.1-net-ethernet-mtk_wed-add-rx-mib-counters.patch @@ -0,0 +1,149 @@ +From: Lorenzo Bianconi +Date: Sat, 5 Nov 2022 23:36:22 +0100 +Subject: [PATCH] net: ethernet: mtk_wed: add rx mib counters + +Introduce WED RX MIB counters support available on MT7986a SoC. + +Tested-by: Daniel Golle +Co-developed-by: Sujuan Chen +Signed-off-by: Sujuan Chen +Signed-off-by: Lorenzo Bianconi +Signed-off-by: David S. Miller +--- + +--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c ++++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c +@@ -2,6 +2,7 @@ + /* Copyright (C) 2021 Felix Fietkau */ + + #include ++#include + #include "mtk_wed.h" + #include "mtk_wed_regs.h" + +@@ -18,6 +19,8 @@ enum { + DUMP_TYPE_WDMA, + DUMP_TYPE_WPDMA_TX, + DUMP_TYPE_WPDMA_TXFREE, ++ DUMP_TYPE_WPDMA_RX, ++ DUMP_TYPE_WED_RRO, + }; + + #define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING } +@@ -36,6 +39,9 @@ enum { + + #define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n) + #define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE) ++#define DUMP_WPDMA_RX_RING(_n) DUMP_RING("WPDMA_RX" #_n, 0, DUMP_TYPE_WPDMA_RX, _n) ++#define DUMP_WED_RRO_RING(_base)DUMP_RING("WED_RRO_MIOD", MTK_##_base, DUMP_TYPE_WED_RRO) ++#define DUMP_WED_RRO_FDBK(_base)DUMP_RING("WED_RRO_FDBK", MTK_##_base, DUMP_TYPE_WED_RRO) + + static void + print_reg_val(struct seq_file *s, const char *name, u32 val) +@@ -57,6 +63,7 @@ dump_wed_regs(struct seq_file *s, struct + cur > regs ? "\n" : "", + cur->name); + continue; ++ case DUMP_TYPE_WED_RRO: + case DUMP_TYPE_WED: + val = wed_r32(dev, cur->offset); + break; +@@ -69,6 +76,9 @@ dump_wed_regs(struct seq_file *s, struct + case DUMP_TYPE_WPDMA_TXFREE: + val = wpdma_txfree_r32(dev, cur->offset); + break; ++ case DUMP_TYPE_WPDMA_RX: ++ val = wpdma_rx_r32(dev, cur->base, cur->offset); ++ break; + } + print_reg_val(s, cur->name, val); + } +@@ -132,6 +142,80 @@ wed_txinfo_show(struct seq_file *s, void + } + DEFINE_SHOW_ATTRIBUTE(wed_txinfo); + ++static int ++wed_rxinfo_show(struct seq_file *s, void *data) ++{ ++ static const struct reg_dump regs[] = { ++ DUMP_STR("WPDMA RX"), ++ DUMP_WPDMA_RX_RING(0), ++ DUMP_WPDMA_RX_RING(1), ++ ++ DUMP_STR("WPDMA RX"), ++ DUMP_WED(WED_WPDMA_RX_D_MIB(0)), ++ DUMP_WED_RING(WED_WPDMA_RING_RX_DATA(0)), ++ DUMP_WED(WED_WPDMA_RX_D_PROCESSED_MIB(0)), ++ DUMP_WED(WED_WPDMA_RX_D_MIB(1)), ++ DUMP_WED_RING(WED_WPDMA_RING_RX_DATA(1)), ++ DUMP_WED(WED_WPDMA_RX_D_PROCESSED_MIB(1)), ++ DUMP_WED(WED_WPDMA_RX_D_COHERENT_MIB), ++ ++ DUMP_STR("WED RX"), ++ DUMP_WED_RING(WED_RING_RX_DATA(0)), ++ DUMP_WED_RING(WED_RING_RX_DATA(1)), ++ ++ DUMP_STR("WED RRO"), ++ DUMP_WED_RRO_RING(WED_RROQM_MIOD_CTRL0), ++ DUMP_WED(WED_RROQM_MID_MIB), ++ DUMP_WED(WED_RROQM_MOD_MIB), ++ DUMP_WED(WED_RROQM_MOD_COHERENT_MIB), ++ DUMP_WED_RRO_FDBK(WED_RROQM_FDBK_CTRL0), ++ DUMP_WED(WED_RROQM_FDBK_IND_MIB), ++ DUMP_WED(WED_RROQM_FDBK_ENQ_MIB), ++ DUMP_WED(WED_RROQM_FDBK_ANC_MIB), ++ DUMP_WED(WED_RROQM_FDBK_ANC2H_MIB), ++ ++ DUMP_STR("WED Route QM"), ++ DUMP_WED(WED_RTQM_R2H_MIB(0)), ++ DUMP_WED(WED_RTQM_R2Q_MIB(0)), ++ DUMP_WED(WED_RTQM_Q2H_MIB(0)), ++ DUMP_WED(WED_RTQM_R2H_MIB(1)), ++ DUMP_WED(WED_RTQM_R2Q_MIB(1)), ++ DUMP_WED(WED_RTQM_Q2H_MIB(1)), ++ DUMP_WED(WED_RTQM_Q2N_MIB), ++ DUMP_WED(WED_RTQM_Q2B_MIB), ++ DUMP_WED(WED_RTQM_PFDBK_MIB), ++ ++ DUMP_STR("WED WDMA TX"), ++ DUMP_WED(WED_WDMA_TX_MIB), ++ DUMP_WED_RING(WED_WDMA_RING_TX), ++ ++ DUMP_STR("WDMA TX"), ++ DUMP_WDMA(WDMA_GLO_CFG), ++ DUMP_WDMA_RING(WDMA_RING_TX(0)), ++ DUMP_WDMA_RING(WDMA_RING_TX(1)), ++ ++ DUMP_STR("WED RX BM"), ++ DUMP_WED(WED_RX_BM_BASE), ++ DUMP_WED(WED_RX_BM_RX_DMAD), ++ DUMP_WED(WED_RX_BM_PTR), ++ DUMP_WED(WED_RX_BM_TKID_MIB), ++ DUMP_WED(WED_RX_BM_BLEN), ++ DUMP_WED(WED_RX_BM_STS), ++ DUMP_WED(WED_RX_BM_INTF2), ++ DUMP_WED(WED_RX_BM_INTF), ++ DUMP_WED(WED_RX_BM_ERR_STS), ++ }; ++ struct mtk_wed_hw *hw = s->private; ++ struct mtk_wed_device *dev = hw->wed_dev; ++ ++ if (!dev) ++ return 0; ++ ++ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs)); ++ ++ return 0; ++} ++DEFINE_SHOW_ATTRIBUTE(wed_rxinfo); + + static int + mtk_wed_reg_set(void *data, u64 val) +@@ -175,4 +259,7 @@ void mtk_wed_hw_add_debugfs(struct mtk_w + debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg); + debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval); + debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops); ++ if (hw->version != 1) ++ debugfs_create_file_unsafe("rxinfo", 0400, dir, hw, ++ &wed_rxinfo_fops); + } diff --git a/target/linux/generic/backport-5.15/729-06-v6.1-net-ethernet-mtk_eth_soc-do-not-overwrite-mtu-config.patch b/target/linux/generic/backport-5.15/729-06-v6.1-net-ethernet-mtk_eth_soc-do-not-overwrite-mtu-config.patch new file mode 100644 index 0000000000..f9e7caa27c --- /dev/null +++ b/target/linux/generic/backport-5.15/729-06-v6.1-net-ethernet-mtk_eth_soc-do-not-overwrite-mtu-config.patch @@ -0,0 +1,98 @@ +From: Lorenzo Bianconi +Date: Thu, 17 Nov 2022 00:35:04 +0100 +Subject: [PATCH] net: ethernet: mtk_eth_soc: do not overwrite mtu + configuration running reset routine + +Restore user configured MTU running mtk_hw_init() during tx timeout routine +since it will be overwritten after a hw reset. + +Reported-by: Felix Fietkau +Fixes: 9ea4d311509f ("net: ethernet: mediatek: add the whole ethernet reset into the reset process") +Signed-off-by: Lorenzo Bianconi +Signed-off-by: David S. Miller +--- + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -3175,6 +3175,30 @@ static void mtk_dim_tx(struct work_struc + dim->state = DIM_START_MEASURE; + } + ++static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val) ++{ ++ struct mtk_eth *eth = mac->hw; ++ u32 mcr_cur, mcr_new; ++ ++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) ++ return; ++ ++ mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); ++ mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK; ++ ++ if (val <= 1518) ++ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518); ++ else if (val <= 1536) ++ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536); ++ else if (val <= 1552) ++ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552); ++ else ++ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048); ++ ++ if (mcr_new != mcr_cur) ++ mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id)); ++} ++ + static int mtk_hw_init(struct mtk_eth *eth) + { + u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA | +@@ -3249,8 +3273,16 @@ static int mtk_hw_init(struct mtk_eth *e + * up with the more appropriate value when mtk_mac_config call is being + * invoked. + */ +- for (i = 0; i < MTK_MAC_COUNT; i++) ++ for (i = 0; i < MTK_MAC_COUNT; i++) { ++ struct net_device *dev = eth->netdev[i]; ++ + mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i)); ++ if (dev) { ++ struct mtk_mac *mac = netdev_priv(dev); ++ ++ mtk_set_mcr_max_rx(mac, dev->mtu + MTK_RX_ETH_HLEN); ++ } ++ } + + /* Indicates CDM to parse the MTK special tag from CPU + * which also is working out for untag packets. +@@ -3366,7 +3398,6 @@ static int mtk_change_mtu(struct net_dev + int length = new_mtu + MTK_RX_ETH_HLEN; + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; +- u32 mcr_cur, mcr_new; + + if (rcu_access_pointer(eth->prog) && + length > MTK_PP_MAX_BUF_SIZE) { +@@ -3374,23 +3405,7 @@ static int mtk_change_mtu(struct net_dev + return -EINVAL; + } + +- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { +- mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); +- mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK; +- +- if (length <= 1518) +- mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518); +- else if (length <= 1536) +- mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536); +- else if (length <= 1552) +- mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552); +- else +- mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048); +- +- if (mcr_new != mcr_cur) +- mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id)); +- } +- ++ mtk_set_mcr_max_rx(mac, length); + dev->mtu = new_mtu; + + return 0; diff --git a/target/linux/generic/backport-5.15/729-07-v6.1-net-ethernet-mtk_eth_soc-remove-cpu_relax-in-mtk_pen.patch b/target/linux/generic/backport-5.15/729-07-v6.1-net-ethernet-mtk_eth_soc-remove-cpu_relax-in-mtk_pen.patch new file mode 100644 index 0000000000..e51dd7b97a --- /dev/null +++ b/target/linux/generic/backport-5.15/729-07-v6.1-net-ethernet-mtk_eth_soc-remove-cpu_relax-in-mtk_pen.patch @@ -0,0 +1,36 @@ +From: Lorenzo Bianconi +Date: Thu, 17 Nov 2022 00:58:46 +0100 +Subject: [PATCH] net: ethernet: mtk_eth_soc: remove cpu_relax in + mtk_pending_work + +Get rid of cpu_relax in mtk_pending_work routine since MTK_RESETTING is +set only in mtk_pending_work() and it runs holding rtnl lock + +Signed-off-by: Lorenzo Bianconi +Signed-off-by: David S. Miller +--- + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -3436,11 +3436,8 @@ static void mtk_pending_work(struct work + rtnl_lock(); + + dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__); ++ set_bit(MTK_RESETTING, ð->state); + +- while (test_and_set_bit_lock(MTK_RESETTING, ð->state)) +- cpu_relax(); +- +- dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__); + /* stop all devices to make sure that dma is properly shut down */ + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->netdev[i]) +@@ -3474,7 +3471,7 @@ static void mtk_pending_work(struct work + + dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__); + +- clear_bit_unlock(MTK_RESETTING, ð->state); ++ clear_bit(MTK_RESETTING, ð->state); + + rtnl_unlock(); + } diff --git a/target/linux/generic/backport-5.15/729-08-v6.2-net-ethernet-mtk_eth_soc-fix-RSTCTRL_PPE-0-1-definit.patch b/target/linux/generic/backport-5.15/729-08-v6.2-net-ethernet-mtk_eth_soc-fix-RSTCTRL_PPE-0-1-definit.patch new file mode 100644 index 0000000000..69df58eed1 --- /dev/null +++ b/target/linux/generic/backport-5.15/729-08-v6.2-net-ethernet-mtk_eth_soc-fix-RSTCTRL_PPE-0-1-definit.patch @@ -0,0 +1,63 @@ +From: Lorenzo Bianconi +Date: Thu, 17 Nov 2022 15:29:53 +0100 +Subject: [PATCH] net: ethernet: mtk_eth_soc: fix RSTCTRL_PPE{0,1} definitions + +Fix RSTCTRL_PPE0 and RSTCTRL_PPE1 register mask definitions for +MTK_NETSYS_V2. +Remove duplicated definitions. + +Fixes: 160d3a9b1929 ("net: ethernet: mtk_eth_soc: introduce MTK_NETSYS_V2 support") +Signed-off-by: Lorenzo Bianconi +Signed-off-by: David S. Miller +--- + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -3238,16 +3238,17 @@ static int mtk_hw_init(struct mtk_eth *e + return 0; + } + +- val = RSTCTRL_FE | RSTCTRL_PPE; + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0); +- +- val |= RSTCTRL_ETH; +- if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) +- val |= RSTCTRL_PPE1; ++ val = RSTCTRL_PPE0_V2; ++ } else { ++ val = RSTCTRL_PPE0; + } + +- ethsys_reset(eth, val); ++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) ++ val |= RSTCTRL_PPE1; ++ ++ ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val); + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h +@@ -444,18 +444,14 @@ + /* ethernet reset control register */ + #define ETHSYS_RSTCTRL 0x34 + #define RSTCTRL_FE BIT(6) +-#define RSTCTRL_PPE BIT(31) +-#define RSTCTRL_PPE1 BIT(30) ++#define RSTCTRL_PPE0 BIT(31) ++#define RSTCTRL_PPE0_V2 BIT(30) ++#define RSTCTRL_PPE1 BIT(31) + #define RSTCTRL_ETH BIT(23) + + /* ethernet reset check idle register */ + #define ETHSYS_FE_RST_CHK_IDLE_EN 0x28 + +-/* ethernet reset control register */ +-#define ETHSYS_RSTCTRL 0x34 +-#define RSTCTRL_FE BIT(6) +-#define RSTCTRL_PPE BIT(31) +- + /* ethernet dma channel agent map */ + #define ETHSYS_DMA_AG_MAP 0x408 + #define ETHSYS_DMA_AG_MAP_PDMA BIT(0) diff --git a/target/linux/generic/backport-5.15/729-09-v6.2-net-ethernet-mtk_wed-add-wcid-overwritten-support-fo.patch b/target/linux/generic/backport-5.15/729-09-v6.2-net-ethernet-mtk_wed-add-wcid-overwritten-support-fo.patch new file mode 100644 index 0000000000..d91d829911 --- /dev/null +++ b/target/linux/generic/backport-5.15/729-09-v6.2-net-ethernet-mtk_wed-add-wcid-overwritten-support-fo.patch @@ -0,0 +1,80 @@ +From: Sujuan Chen +Date: Thu, 24 Nov 2022 11:18:14 +0800 +Subject: [PATCH] net: ethernet: mtk_wed: add wcid overwritten support for wed + v1 + +All wed versions should enable the wcid overwritten feature, +since the wcid size is controlled by the wlan driver. + +Tested-by: Sujuan Chen +Co-developed-by: Bo Jiao +Signed-off-by: Bo Jiao +Signed-off-by: Sujuan Chen +Signed-off-by: David S. Miller +--- + +--- a/drivers/net/ethernet/mediatek/mtk_wed.c ++++ b/drivers/net/ethernet/mediatek/mtk_wed.c +@@ -526,9 +526,9 @@ mtk_wed_dma_disable(struct mtk_wed_devic + MTK_WED_WPDMA_RX_D_RX_DRV_EN); + wed_clr(dev, MTK_WED_WDMA_GLO_CFG, + MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); +- +- mtk_wed_set_512_support(dev, false); + } ++ ++ mtk_wed_set_512_support(dev, false); + } + + static void +@@ -1297,9 +1297,10 @@ mtk_wed_start(struct mtk_wed_device *dev + if (mtk_wed_rro_cfg(dev)) + return; + +- mtk_wed_set_512_support(dev, dev->wlan.wcid_512); + } + ++ mtk_wed_set_512_support(dev, dev->wlan.wcid_512); ++ + mtk_wed_dma_enable(dev); + dev->running = true; + } +@@ -1365,11 +1366,13 @@ mtk_wed_attach(struct mtk_wed_device *de + } + + mtk_wed_hw_init_early(dev); +- if (hw->version == 1) ++ if (hw->version == 1) { + regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, + BIT(hw->index), 0); +- else ++ } else { ++ dev->rev_id = wed_r32(dev, MTK_WED_REV_ID); + ret = mtk_wed_wo_init(hw); ++ } + out: + if (ret) + mtk_wed_detach(dev); +--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h ++++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h +@@ -20,6 +20,8 @@ struct mtk_wdma_desc { + __le32 info; + } __packed __aligned(4); + ++#define MTK_WED_REV_ID 0x004 ++ + #define MTK_WED_RESET 0x008 + #define MTK_WED_RESET_TX_BM BIT(0) + #define MTK_WED_RESET_TX_FREE_AGENT BIT(4) +--- a/include/linux/soc/mediatek/mtk_wed.h ++++ b/include/linux/soc/mediatek/mtk_wed.h +@@ -85,6 +85,9 @@ struct mtk_wed_device { + int irq; + u8 version; + ++ /* used by wlan driver */ ++ u32 rev_id; ++ + struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES]; + struct mtk_wed_ring rx_ring[MTK_WED_RX_QUEUES]; + struct mtk_wed_ring txfree_ring; diff --git a/target/linux/generic/backport-5.15/729-10-v6.2-net-ethernet-mtk_wed-return-status-value-in-mtk_wdma.patch b/target/linux/generic/backport-5.15/729-10-v6.2-net-ethernet-mtk_wed-return-status-value-in-mtk_wdma.patch new file mode 100644 index 0000000000..d97bb715e0 --- /dev/null +++ b/target/linux/generic/backport-5.15/729-10-v6.2-net-ethernet-mtk_wed-return-status-value-in-mtk_wdma.patch @@ -0,0 +1,85 @@ +From: Lorenzo Bianconi +Date: Thu, 24 Nov 2022 16:22:51 +0100 +Subject: [PATCH] net: ethernet: mtk_wed: return status value in + mtk_wdma_rx_reset + +Move MTK_WDMA_RESET_IDX configuration in mtk_wdma_rx_reset routine. +Increase poll timeout to 10ms in order to be aligned with vendor sdk. +This is a preliminary patch to add Wireless Ethernet Dispatcher reset +support. + +Co-developed-by: Sujuan Chen +Signed-off-by: Sujuan Chen +Signed-off-by: Lorenzo Bianconi +Signed-off-by: Paolo Abeni +--- + +--- a/drivers/net/ethernet/mediatek/mtk_wed.c ++++ b/drivers/net/ethernet/mediatek/mtk_wed.c +@@ -101,17 +101,21 @@ mtk_wdma_read_reset(struct mtk_wed_devic + return wdma_r32(dev, MTK_WDMA_GLO_CFG); + } + +-static void ++static int + mtk_wdma_rx_reset(struct mtk_wed_device *dev) + { + u32 status, mask = MTK_WDMA_GLO_CFG_RX_DMA_BUSY; +- int i; ++ int i, ret; + + wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_DMA_EN); +- if (readx_poll_timeout(mtk_wdma_read_reset, dev, status, +- !(status & mask), 0, 1000)) ++ ret = readx_poll_timeout(mtk_wdma_read_reset, dev, status, ++ !(status & mask), 0, 10000); ++ if (ret) + dev_err(dev->hw->dev, "rx reset failed\n"); + ++ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); ++ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); ++ + for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) { + if (dev->rx_wdma[i].desc) + continue; +@@ -119,6 +123,8 @@ mtk_wdma_rx_reset(struct mtk_wed_device + wdma_w32(dev, + MTK_WDMA_RING_RX(i) + MTK_WED_RING_OFS_CPU_IDX, 0); + } ++ ++ return ret; + } + + static void +@@ -565,9 +571,7 @@ mtk_wed_detach(struct mtk_wed_device *de + + mtk_wed_stop(dev); + +- wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); +- wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); +- ++ mtk_wdma_rx_reset(dev); + mtk_wed_reset(dev, MTK_WED_RESET_WED); + if (mtk_wed_get_rx_capa(dev)) { + wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN); +@@ -582,7 +586,6 @@ mtk_wed_detach(struct mtk_wed_device *de + mtk_wed_wo_reset(dev); + mtk_wed_free_rx_rings(dev); + mtk_wed_wo_deinit(hw); +- mtk_wdma_rx_reset(dev); + } + + if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) { +@@ -1006,11 +1009,7 @@ mtk_wed_reset_dma(struct mtk_wed_device + wed_w32(dev, MTK_WED_RESET_IDX, 0); + } + +- wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); +- wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); +- +- if (mtk_wed_get_rx_capa(dev)) +- mtk_wdma_rx_reset(dev); ++ mtk_wdma_rx_reset(dev); + + if (busy) { + mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT); diff --git a/target/linux/generic/backport-5.15/729-11-v6.2-net-ethernet-mtk_wed-move-MTK_WDMA_RESET_IDX_TX-conf.patch b/target/linux/generic/backport-5.15/729-11-v6.2-net-ethernet-mtk_wed-move-MTK_WDMA_RESET_IDX_TX-conf.patch new file mode 100644 index 0000000000..10c1732c96 --- /dev/null +++ b/target/linux/generic/backport-5.15/729-11-v6.2-net-ethernet-mtk_wed-move-MTK_WDMA_RESET_IDX_TX-conf.patch @@ -0,0 +1,52 @@ +From: Lorenzo Bianconi +Date: Thu, 24 Nov 2022 16:22:52 +0100 +Subject: [PATCH] net: ethernet: mtk_wed: move MTK_WDMA_RESET_IDX_TX + configuration in mtk_wdma_tx_reset + +Remove duplicated code. Increase poll timeout to 10ms in order to be +aligned with vendor sdk. +This is a preliminary patch to add Wireless Ethernet Dispatcher reset +support. + +Co-developed-by: Sujuan Chen +Signed-off-by: Sujuan Chen +Signed-off-by: Lorenzo Bianconi +Signed-off-by: Paolo Abeni +--- + +--- a/drivers/net/ethernet/mediatek/mtk_wed.c ++++ b/drivers/net/ethernet/mediatek/mtk_wed.c +@@ -135,16 +135,15 @@ mtk_wdma_tx_reset(struct mtk_wed_device + + wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN); + if (readx_poll_timeout(mtk_wdma_read_reset, dev, status, +- !(status & mask), 0, 1000)) ++ !(status & mask), 0, 10000)) + dev_err(dev->hw->dev, "tx reset failed\n"); + +- for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) { +- if (dev->tx_wdma[i].desc) +- continue; ++ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX); ++ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); + ++ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) + wdma_w32(dev, + MTK_WDMA_RING_TX(i) + MTK_WED_RING_OFS_CPU_IDX, 0); +- } + } + + static void +@@ -573,12 +572,6 @@ mtk_wed_detach(struct mtk_wed_device *de + + mtk_wdma_rx_reset(dev); + mtk_wed_reset(dev, MTK_WED_RESET_WED); +- if (mtk_wed_get_rx_capa(dev)) { +- wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN); +- wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX); +- wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); +- } +- + mtk_wed_free_tx_buffer(dev); + mtk_wed_free_tx_rings(dev); + diff --git a/target/linux/generic/backport-5.15/729-12-v6.2-net-ethernet-mtk_wed-update-mtk_wed_stop.patch b/target/linux/generic/backport-5.15/729-12-v6.2-net-ethernet-mtk_wed-update-mtk_wed_stop.patch new file mode 100644 index 0000000000..dfc0f8c3f3 --- /dev/null +++ b/target/linux/generic/backport-5.15/729-12-v6.2-net-ethernet-mtk_wed-update-mtk_wed_stop.patch @@ -0,0 +1,98 @@ +From: Lorenzo Bianconi +Date: Thu, 24 Nov 2022 16:22:53 +0100 +Subject: [PATCH] net: ethernet: mtk_wed: update mtk_wed_stop + +Update mtk_wed_stop routine and rename old mtk_wed_stop() to +mtk_wed_deinit(). This is a preliminary patch to add Wireless Ethernet +Dispatcher reset support. + +Co-developed-by: Sujuan Chen +Signed-off-by: Sujuan Chen +Signed-off-by: Lorenzo Bianconi +Signed-off-by: Paolo Abeni +--- + +--- a/drivers/net/ethernet/mediatek/mtk_wed.c ++++ b/drivers/net/ethernet/mediatek/mtk_wed.c +@@ -539,14 +539,8 @@ mtk_wed_dma_disable(struct mtk_wed_devic + static void + mtk_wed_stop(struct mtk_wed_device *dev) + { +- mtk_wed_dma_disable(dev); + mtk_wed_set_ext_int(dev, false); + +- wed_clr(dev, MTK_WED_CTRL, +- MTK_WED_CTRL_WDMA_INT_AGENT_EN | +- MTK_WED_CTRL_WPDMA_INT_AGENT_EN | +- MTK_WED_CTRL_WED_TX_BM_EN | +- MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); + wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0); + wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0); + wdma_w32(dev, MTK_WDMA_INT_MASK, 0); +@@ -558,7 +552,27 @@ mtk_wed_stop(struct mtk_wed_device *dev) + + wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0); + wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0); +- wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN); ++} ++ ++static void ++mtk_wed_deinit(struct mtk_wed_device *dev) ++{ ++ mtk_wed_stop(dev); ++ mtk_wed_dma_disable(dev); ++ ++ wed_clr(dev, MTK_WED_CTRL, ++ MTK_WED_CTRL_WDMA_INT_AGENT_EN | ++ MTK_WED_CTRL_WPDMA_INT_AGENT_EN | ++ MTK_WED_CTRL_WED_TX_BM_EN | ++ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); ++ ++ if (dev->hw->version == 1) ++ return; ++ ++ wed_clr(dev, MTK_WED_CTRL, ++ MTK_WED_CTRL_RX_ROUTE_QM_EN | ++ MTK_WED_CTRL_WED_RX_BM_EN | ++ MTK_WED_CTRL_RX_RRO_QM_EN); + } + + static void +@@ -568,7 +582,7 @@ mtk_wed_detach(struct mtk_wed_device *de + + mutex_lock(&hw_lock); + +- mtk_wed_stop(dev); ++ mtk_wed_deinit(dev); + + mtk_wdma_rx_reset(dev); + mtk_wed_reset(dev, MTK_WED_RESET_WED); +@@ -677,7 +691,7 @@ mtk_wed_hw_init_early(struct mtk_wed_dev + { + u32 mask, set; + +- mtk_wed_stop(dev); ++ mtk_wed_deinit(dev); + mtk_wed_reset(dev, MTK_WED_RESET_WED); + mtk_wed_set_wpdma(dev); + +--- a/include/linux/soc/mediatek/mtk_wed.h ++++ b/include/linux/soc/mediatek/mtk_wed.h +@@ -234,6 +234,8 @@ mtk_wed_get_rx_capa(struct mtk_wed_devic + (_dev)->ops->ppe_check(_dev, _skb, _reason, _hash) + #define mtk_wed_device_update_msg(_dev, _id, _msg, _len) \ + (_dev)->ops->msg_update(_dev, _id, _msg, _len) ++#define mtk_wed_device_stop(_dev) (_dev)->ops->stop(_dev) ++#define mtk_wed_device_dma_reset(_dev) (_dev)->ops->reset_dma(_dev) + #else + static inline bool mtk_wed_device_active(struct mtk_wed_device *dev) + { +@@ -250,6 +252,8 @@ static inline bool mtk_wed_device_active + #define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) -ENODEV + #define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) do {} while (0) + #define mtk_wed_device_update_msg(_dev, _id, _msg, _len) -ENODEV ++#define mtk_wed_device_stop(_dev) do {} while (0) ++#define mtk_wed_device_dma_reset(_dev) do {} while (0) + #endif + + #endif diff --git a/target/linux/generic/backport-5.15/729-13-v6.2-net-ethernet-mtk_wed-add-mtk_wed_rx_reset-routine.patch b/target/linux/generic/backport-5.15/729-13-v6.2-net-ethernet-mtk_wed-add-mtk_wed_rx_reset-routine.patch new file mode 100644 index 0000000000..2205fea513 --- /dev/null +++ b/target/linux/generic/backport-5.15/729-13-v6.2-net-ethernet-mtk_wed-add-mtk_wed_rx_reset-routine.patch @@ -0,0 +1,309 @@ +From: Lorenzo Bianconi +Date: Thu, 24 Nov 2022 16:22:54 +0100 +Subject: [PATCH] net: ethernet: mtk_wed: add mtk_wed_rx_reset routine + +Introduce mtk_wed_rx_reset routine in order to reset rx DMA for Wireless +Ethernet Dispatcher available on MT7986 SoC. + +Co-developed-by: Sujuan Chen +Signed-off-by: Sujuan Chen +Signed-off-by: Lorenzo Bianconi +Signed-off-by: Paolo Abeni +--- + +--- a/drivers/net/ethernet/mediatek/mtk_wed.c ++++ b/drivers/net/ethernet/mediatek/mtk_wed.c +@@ -951,42 +951,130 @@ mtk_wed_ring_reset(struct mtk_wed_ring * + } + + static u32 +-mtk_wed_check_busy(struct mtk_wed_device *dev) ++mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) + { +- if (wed_r32(dev, MTK_WED_GLO_CFG) & MTK_WED_GLO_CFG_TX_DMA_BUSY) +- return true; +- +- if (wed_r32(dev, MTK_WED_WPDMA_GLO_CFG) & +- MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY) +- return true; +- +- if (wed_r32(dev, MTK_WED_CTRL) & MTK_WED_CTRL_WDMA_INT_AGENT_BUSY) +- return true; +- +- if (wed_r32(dev, MTK_WED_WDMA_GLO_CFG) & +- MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY) +- return true; +- +- if (wdma_r32(dev, MTK_WDMA_GLO_CFG) & +- MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY) +- return true; +- +- if (wed_r32(dev, MTK_WED_CTRL) & +- (MTK_WED_CTRL_WED_TX_BM_BUSY | MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY)) +- return true; +- +- return false; ++ return !!(wed_r32(dev, reg) & mask); + } + + static int +-mtk_wed_poll_busy(struct mtk_wed_device *dev) ++mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) + { + int sleep = 15000; + int timeout = 100 * sleep; + u32 val; + + return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep, +- timeout, false, dev); ++ timeout, false, dev, reg, mask); ++} ++ ++static int ++mtk_wed_rx_reset(struct mtk_wed_device *dev) ++{ ++ struct mtk_wed_wo *wo = dev->hw->wed_wo; ++ u8 val = MTK_WED_WO_STATE_SER_RESET; ++ int i, ret; ++ ++ ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, ++ MTK_WED_WO_CMD_CHANGE_STATE, &val, ++ sizeof(val), true); ++ if (ret) ++ return ret; ++ ++ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN); ++ ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, ++ MTK_WED_WPDMA_RX_D_RX_DRV_BUSY); ++ if (ret) { ++ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); ++ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV); ++ } else { ++ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, ++ MTK_WED_WPDMA_RX_D_RST_CRX_IDX | ++ MTK_WED_WPDMA_RX_D_RST_DRV_IDX); ++ ++ wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, ++ MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE | ++ MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE); ++ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, ++ MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE | ++ MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE); ++ ++ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0); ++ } ++ ++ /* reset rro qm */ ++ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN); ++ ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL, ++ MTK_WED_CTRL_RX_RRO_QM_BUSY); ++ if (ret) { ++ mtk_wed_reset(dev, MTK_WED_RESET_RX_RRO_QM); ++ } else { ++ wed_set(dev, MTK_WED_RROQM_RST_IDX, ++ MTK_WED_RROQM_RST_IDX_MIOD | ++ MTK_WED_RROQM_RST_IDX_FDBK); ++ wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0); ++ } ++ ++ /* reset route qm */ ++ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN); ++ ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL, ++ MTK_WED_CTRL_RX_ROUTE_QM_BUSY); ++ if (ret) ++ mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM); ++ else ++ wed_set(dev, MTK_WED_RTQM_GLO_CFG, ++ MTK_WED_RTQM_Q_RST); ++ ++ /* reset tx wdma */ ++ mtk_wdma_tx_reset(dev); ++ ++ /* reset tx wdma drv */ ++ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN); ++ mtk_wed_poll_busy(dev, MTK_WED_CTRL, ++ MTK_WED_CTRL_WDMA_INT_AGENT_BUSY); ++ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV); ++ ++ /* reset wed rx dma */ ++ ret = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG, ++ MTK_WED_GLO_CFG_RX_DMA_BUSY); ++ wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_RX_DMA_EN); ++ if (ret) { ++ mtk_wed_reset(dev, MTK_WED_RESET_WED_RX_DMA); ++ } else { ++ struct mtk_eth *eth = dev->hw->eth; ++ ++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) ++ wed_set(dev, MTK_WED_RESET_IDX, ++ MTK_WED_RESET_IDX_RX_V2); ++ else ++ wed_set(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_RX); ++ wed_w32(dev, MTK_WED_RESET_IDX, 0); ++ } ++ ++ /* reset rx bm */ ++ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN); ++ mtk_wed_poll_busy(dev, MTK_WED_CTRL, ++ MTK_WED_CTRL_WED_RX_BM_BUSY); ++ mtk_wed_reset(dev, MTK_WED_RESET_RX_BM); ++ ++ /* wo change to enable state */ ++ val = MTK_WED_WO_STATE_ENABLE; ++ ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, ++ MTK_WED_WO_CMD_CHANGE_STATE, &val, ++ sizeof(val), true); ++ if (ret) ++ return ret; ++ ++ /* wed_rx_ring_reset */ ++ for (i = 0; i < ARRAY_SIZE(dev->rx_ring); i++) { ++ if (!dev->rx_ring[i].desc) ++ continue; ++ ++ mtk_wed_ring_reset(&dev->rx_ring[i], MTK_WED_RX_RING_SIZE, ++ false); ++ } ++ mtk_wed_free_rx_buffer(dev); ++ ++ return 0; + } + + static void +@@ -1004,19 +1092,23 @@ mtk_wed_reset_dma(struct mtk_wed_device + true); + } + +- if (mtk_wed_poll_busy(dev)) +- busy = mtk_wed_check_busy(dev); +- ++ /* 1. reset WED tx DMA */ ++ wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_TX_DMA_EN); ++ busy = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG, ++ MTK_WED_GLO_CFG_TX_DMA_BUSY); + if (busy) { + mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA); + } else { +- wed_w32(dev, MTK_WED_RESET_IDX, +- MTK_WED_RESET_IDX_TX | +- MTK_WED_RESET_IDX_RX); ++ wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_TX); + wed_w32(dev, MTK_WED_RESET_IDX, 0); + } + +- mtk_wdma_rx_reset(dev); ++ /* 2. reset WDMA rx DMA */ ++ busy = !!mtk_wdma_rx_reset(dev); ++ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); ++ if (!busy) ++ busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG, ++ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY); + + if (busy) { + mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT); +@@ -1033,6 +1125,9 @@ mtk_wed_reset_dma(struct mtk_wed_device + MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); + } + ++ /* 3. reset WED WPDMA tx */ ++ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); ++ + for (i = 0; i < 100; i++) { + val = wed_r32(dev, MTK_WED_TX_BM_INTF); + if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40) +@@ -1040,8 +1135,19 @@ mtk_wed_reset_dma(struct mtk_wed_device + } + + mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT); ++ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_BM_EN); + mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); + ++ /* 4. reset WED WPDMA tx */ ++ busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG, ++ MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY); ++ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, ++ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | ++ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); ++ if (!busy) ++ busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG, ++ MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY); ++ + if (busy) { + mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); + mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV); +@@ -1052,6 +1158,17 @@ mtk_wed_reset_dma(struct mtk_wed_device + MTK_WED_WPDMA_RESET_IDX_RX); + wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0); + } ++ ++ dev->init_done = false; ++ if (dev->hw->version == 1) ++ return; ++ ++ if (!busy) { ++ wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_WPDMA_IDX_RX); ++ wed_w32(dev, MTK_WED_RESET_IDX, 0); ++ } ++ ++ mtk_wed_rx_reset(dev); + } + + static int +@@ -1274,6 +1391,9 @@ mtk_wed_start(struct mtk_wed_device *dev + { + int i; + ++ if (mtk_wed_get_rx_capa(dev) && mtk_wed_rx_buffer_alloc(dev)) ++ return; ++ + for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) + if (!dev->rx_wdma[i].desc) + mtk_wed_wdma_rx_ring_setup(dev, i, 16); +@@ -1362,10 +1482,6 @@ mtk_wed_attach(struct mtk_wed_device *de + goto out; + + if (mtk_wed_get_rx_capa(dev)) { +- ret = mtk_wed_rx_buffer_alloc(dev); +- if (ret) +- goto out; +- + ret = mtk_wed_rro_alloc(dev); + if (ret) + goto out; +--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h ++++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h +@@ -24,11 +24,15 @@ struct mtk_wdma_desc { + + #define MTK_WED_RESET 0x008 + #define MTK_WED_RESET_TX_BM BIT(0) ++#define MTK_WED_RESET_RX_BM BIT(1) + #define MTK_WED_RESET_TX_FREE_AGENT BIT(4) + #define MTK_WED_RESET_WPDMA_TX_DRV BIT(8) + #define MTK_WED_RESET_WPDMA_RX_DRV BIT(9) ++#define MTK_WED_RESET_WPDMA_RX_D_DRV BIT(10) + #define MTK_WED_RESET_WPDMA_INT_AGENT BIT(11) + #define MTK_WED_RESET_WED_TX_DMA BIT(12) ++#define MTK_WED_RESET_WED_RX_DMA BIT(13) ++#define MTK_WED_RESET_WDMA_TX_DRV BIT(16) + #define MTK_WED_RESET_WDMA_RX_DRV BIT(17) + #define MTK_WED_RESET_WDMA_INT_AGENT BIT(19) + #define MTK_WED_RESET_RX_RRO_QM BIT(20) +@@ -158,6 +162,8 @@ struct mtk_wdma_desc { + #define MTK_WED_RESET_IDX 0x20c + #define MTK_WED_RESET_IDX_TX GENMASK(3, 0) + #define MTK_WED_RESET_IDX_RX GENMASK(17, 16) ++#define MTK_WED_RESET_IDX_RX_V2 GENMASK(7, 6) ++#define MTK_WED_RESET_WPDMA_IDX_RX GENMASK(31, 30) + + #define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4) + #define MTK_WED_RX_MIB(_n) (0x2e0 + (_n) * 4) +@@ -267,6 +273,9 @@ struct mtk_wdma_desc { + + #define MTK_WED_WPDMA_RX_D_GLO_CFG 0x75c + #define MTK_WED_WPDMA_RX_D_RX_DRV_EN BIT(0) ++#define MTK_WED_WPDMA_RX_D_RX_DRV_BUSY BIT(1) ++#define MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE BIT(3) ++#define MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE BIT(4) + #define MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL GENMASK(11, 7) + #define MTK_WED_WPDMA_RX_D_RXD_READ_LEN GENMASK(31, 24) + diff --git a/target/linux/generic/backport-5.15/729-14-v6.2-net-ethernet-mtk_wed-add-reset-to-tx_ring_setup-call.patch b/target/linux/generic/backport-5.15/729-14-v6.2-net-ethernet-mtk_wed-add-reset-to-tx_ring_setup-call.patch new file mode 100644 index 0000000000..602483bcb8 --- /dev/null +++ b/target/linux/generic/backport-5.15/729-14-v6.2-net-ethernet-mtk_wed-add-reset-to-tx_ring_setup-call.patch @@ -0,0 +1,103 @@ +From: Lorenzo Bianconi +Date: Thu, 24 Nov 2022 16:22:55 +0100 +Subject: [PATCH] net: ethernet: mtk_wed: add reset to tx_ring_setup callback + +Introduce reset parameter to mtk_wed_tx_ring_setup signature. +This is a preliminary patch to add Wireless Ethernet Dispatcher reset +support. + +Co-developed-by: Sujuan Chen +Signed-off-by: Sujuan Chen +Signed-off-by: Lorenzo Bianconi +Signed-off-by: Paolo Abeni +--- + +--- a/drivers/net/ethernet/mediatek/mtk_wed.c ++++ b/drivers/net/ethernet/mediatek/mtk_wed.c +@@ -1188,7 +1188,8 @@ mtk_wed_ring_alloc(struct mtk_wed_device + } + + static int +-mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size) ++mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size, ++ bool reset) + { + u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version; + struct mtk_wed_ring *wdma; +@@ -1197,8 +1198,8 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_we + return -EINVAL; + + wdma = &dev->rx_wdma[idx]; +- if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size, +- true)) ++ if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, ++ desc_size, true)) + return -ENOMEM; + + wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, +@@ -1396,7 +1397,7 @@ mtk_wed_start(struct mtk_wed_device *dev + + for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) + if (!dev->rx_wdma[i].desc) +- mtk_wed_wdma_rx_ring_setup(dev, i, 16); ++ mtk_wed_wdma_rx_ring_setup(dev, i, 16, false); + + mtk_wed_hw_init(dev); + mtk_wed_configure_irq(dev, irq_mask); +@@ -1505,7 +1506,8 @@ unlock: + } + + static int +-mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs) ++mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs, ++ bool reset) + { + struct mtk_wed_ring *ring = &dev->tx_ring[idx]; + +@@ -1524,11 +1526,12 @@ mtk_wed_tx_ring_setup(struct mtk_wed_dev + if (WARN_ON(idx >= ARRAY_SIZE(dev->tx_ring))) + return -EINVAL; + +- if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, +- sizeof(*ring->desc), true)) ++ if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, ++ sizeof(*ring->desc), true)) + return -ENOMEM; + +- if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE)) ++ if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE, ++ reset)) + return -ENOMEM; + + ring->reg_base = MTK_WED_RING_TX(idx); +--- a/include/linux/soc/mediatek/mtk_wed.h ++++ b/include/linux/soc/mediatek/mtk_wed.h +@@ -158,7 +158,7 @@ struct mtk_wed_device { + struct mtk_wed_ops { + int (*attach)(struct mtk_wed_device *dev); + int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring, +- void __iomem *regs); ++ void __iomem *regs, bool reset); + int (*rx_ring_setup)(struct mtk_wed_device *dev, int ring, + void __iomem *regs); + int (*txfree_ring_setup)(struct mtk_wed_device *dev, +@@ -216,8 +216,8 @@ mtk_wed_get_rx_capa(struct mtk_wed_devic + #define mtk_wed_device_active(_dev) !!(_dev)->ops + #define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev) + #define mtk_wed_device_start(_dev, _mask) (_dev)->ops->start(_dev, _mask) +-#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) \ +- (_dev)->ops->tx_ring_setup(_dev, _ring, _regs) ++#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs, _reset) \ ++ (_dev)->ops->tx_ring_setup(_dev, _ring, _regs, _reset) + #define mtk_wed_device_txfree_ring_setup(_dev, _regs) \ + (_dev)->ops->txfree_ring_setup(_dev, _regs) + #define mtk_wed_device_reg_read(_dev, _reg) \ +@@ -243,7 +243,7 @@ static inline bool mtk_wed_device_active + } + #define mtk_wed_device_detach(_dev) do {} while (0) + #define mtk_wed_device_start(_dev, _mask) do {} while (0) +-#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) -ENODEV ++#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs, _reset) -ENODEV + #define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV + #define mtk_wed_device_reg_read(_dev, _reg) 0 + #define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0) diff --git a/target/linux/generic/backport-5.15/729-15-v6.2-net-ethernet-mtk_wed-fix-sleep-while-atomic-in-mtk_w.patch b/target/linux/generic/backport-5.15/729-15-v6.2-net-ethernet-mtk_wed-fix-sleep-while-atomic-in-mtk_w.patch new file mode 100644 index 0000000000..f9b11326b1 --- /dev/null +++ b/target/linux/generic/backport-5.15/729-15-v6.2-net-ethernet-mtk_wed-fix-sleep-while-atomic-in-mtk_w.patch @@ -0,0 +1,103 @@ +From: Lorenzo Bianconi +Date: Thu, 1 Dec 2022 16:26:53 +0100 +Subject: [PATCH] net: ethernet: mtk_wed: fix sleep while atomic in + mtk_wed_wo_queue_refill + +In order to fix the following sleep while atomic bug always alloc pages +with GFP_ATOMIC in mtk_wed_wo_queue_refill since page_frag_alloc runs in +spin_lock critical section. + +[ 9.049719] Hardware name: MediaTek MT7986a RFB (DT) +[ 9.054665] Call trace: +[ 9.057096] dump_backtrace+0x0/0x154 +[ 9.060751] show_stack+0x14/0x1c +[ 9.064052] dump_stack_lvl+0x64/0x7c +[ 9.067702] dump_stack+0x14/0x2c +[ 9.071001] ___might_sleep+0xec/0x120 +[ 9.074736] __might_sleep+0x4c/0x9c +[ 9.078296] __alloc_pages+0x184/0x2e4 +[ 9.082030] page_frag_alloc_align+0x98/0x1ac +[ 9.086369] mtk_wed_wo_queue_refill+0x134/0x234 +[ 9.090974] mtk_wed_wo_init+0x174/0x2c0 +[ 9.094881] mtk_wed_attach+0x7c8/0x7e0 +[ 9.098701] mt7915_mmio_wed_init+0x1f0/0x3a0 [mt7915e] +[ 9.103940] mt7915_pci_probe+0xec/0x3bc [mt7915e] +[ 9.108727] pci_device_probe+0xac/0x13c +[ 9.112638] really_probe.part.0+0x98/0x2f4 +[ 9.116807] __driver_probe_device+0x94/0x13c +[ 9.121147] driver_probe_device+0x40/0x114 +[ 9.125314] __driver_attach+0x7c/0x180 +[ 9.129133] bus_for_each_dev+0x5c/0x90 +[ 9.132953] driver_attach+0x20/0x2c +[ 9.136513] bus_add_driver+0x104/0x1fc +[ 9.140333] driver_register+0x74/0x120 +[ 9.144153] __pci_register_driver+0x40/0x50 +[ 9.148407] mt7915_init+0x5c/0x1000 [mt7915e] +[ 9.152848] do_one_initcall+0x40/0x25c +[ 9.156669] do_init_module+0x44/0x230 +[ 9.160403] load_module+0x1f30/0x2750 +[ 9.164135] __do_sys_init_module+0x150/0x200 +[ 9.168475] __arm64_sys_init_module+0x18/0x20 +[ 9.172901] invoke_syscall.constprop.0+0x4c/0xe0 +[ 9.177589] do_el0_svc+0x48/0xe0 +[ 9.180889] el0_svc+0x14/0x50 +[ 9.183929] el0t_64_sync_handler+0x9c/0x120 +[ 9.188183] el0t_64_sync+0x158/0x15c + +Fixes: 799684448e3e ("net: ethernet: mtk_wed: introduce wed wo support") +Signed-off-by: Lorenzo Bianconi +Reviewed-by: Pavan Chebbi +Link: https://lore.kernel.org/r/67ca94bdd3d9eaeb86e52b3050fbca0bcf7bb02f.1669908312.git.lorenzo@kernel.org +Signed-off-by: Jakub Kicinski +--- + +--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.c ++++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c +@@ -133,17 +133,18 @@ mtk_wed_wo_dequeue(struct mtk_wed_wo *wo + + static int + mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, +- gfp_t gfp, bool rx) ++ bool rx) + { + enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + int n_buf = 0; + + spin_lock_bh(&q->lock); + while (q->queued < q->n_desc) { +- void *buf = page_frag_alloc(&q->cache, q->buf_size, gfp); + struct mtk_wed_wo_queue_entry *entry; + dma_addr_t addr; ++ void *buf; + ++ buf = page_frag_alloc(&q->cache, q->buf_size, GFP_ATOMIC); + if (!buf) + break; + +@@ -215,7 +216,7 @@ mtk_wed_wo_rx_run_queue(struct mtk_wed_w + mtk_wed_mcu_rx_unsolicited_event(wo, skb); + } + +- if (mtk_wed_wo_queue_refill(wo, q, GFP_ATOMIC, true)) { ++ if (mtk_wed_wo_queue_refill(wo, q, true)) { + u32 index = (q->head - 1) % q->n_desc; + + mtk_wed_wo_queue_kick(wo, q, index); +@@ -432,7 +433,7 @@ mtk_wed_wo_hardware_init(struct mtk_wed_ + if (ret) + goto error; + +- mtk_wed_wo_queue_refill(wo, &wo->q_tx, GFP_KERNEL, false); ++ mtk_wed_wo_queue_refill(wo, &wo->q_tx, false); + mtk_wed_wo_queue_reset(wo, &wo->q_tx); + + regs.desc_base = MTK_WED_WO_CCIF_DUMMY5; +@@ -446,7 +447,7 @@ mtk_wed_wo_hardware_init(struct mtk_wed_ + if (ret) + goto error; + +- mtk_wed_wo_queue_refill(wo, &wo->q_rx, GFP_KERNEL, true); ++ mtk_wed_wo_queue_refill(wo, &wo->q_rx, true); + mtk_wed_wo_queue_reset(wo, &wo->q_rx); + + /* rx queue irqmask */ diff --git a/target/linux/generic/backport-5.15/729-16-v6.3-net-ethernet-mtk_wed-get-rid-of-queue-lock-for-rx-qu.patch b/target/linux/generic/backport-5.15/729-16-v6.3-net-ethernet-mtk_wed-get-rid-of-queue-lock-for-rx-qu.patch new file mode 100644 index 0000000000..fa6f56dbe7 --- /dev/null +++ b/target/linux/generic/backport-5.15/729-16-v6.3-net-ethernet-mtk_wed-get-rid-of-queue-lock-for-rx-qu.patch @@ -0,0 +1,52 @@ +From: Lorenzo Bianconi +Date: Tue, 10 Jan 2023 10:31:26 +0100 +Subject: [PATCH] net: ethernet: mtk_wed: get rid of queue lock for rx queue + +Queue spinlock is currently held in mtk_wed_wo_queue_rx_clean and +mtk_wed_wo_queue_refill routines for MTK Wireless Ethernet Dispatcher +MCU rx queue. mtk_wed_wo_queue_refill() is running during initialization +and in rx tasklet while mtk_wed_wo_queue_rx_clean() is running in +mtk_wed_wo_hw_deinit() during hw de-init phase after rx tasklet has been +disabled. Since mtk_wed_wo_queue_rx_clean and mtk_wed_wo_queue_refill +routines can't run concurrently get rid of spinlock for mcu rx queue. + +Reviewed-by: Alexander Duyck +Signed-off-by: Lorenzo Bianconi +Link: https://lore.kernel.org/r/36ec3b729542ea60898471d890796f745479ba32.1673342990.git.lorenzo@kernel.org +Signed-off-by: Jakub Kicinski +--- + +--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.c ++++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c +@@ -138,7 +138,6 @@ mtk_wed_wo_queue_refill(struct mtk_wed_w + enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + int n_buf = 0; + +- spin_lock_bh(&q->lock); + while (q->queued < q->n_desc) { + struct mtk_wed_wo_queue_entry *entry; + dma_addr_t addr; +@@ -172,7 +171,6 @@ mtk_wed_wo_queue_refill(struct mtk_wed_w + q->queued++; + n_buf++; + } +- spin_unlock_bh(&q->lock); + + return n_buf; + } +@@ -316,7 +314,6 @@ mtk_wed_wo_queue_rx_clean(struct mtk_wed + { + struct page *page; + +- spin_lock_bh(&q->lock); + for (;;) { + void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true); + +@@ -325,7 +322,6 @@ mtk_wed_wo_queue_rx_clean(struct mtk_wed + + skb_free_frag(buf); + } +- spin_unlock_bh(&q->lock); + + if (!q->cache.va) + return; diff --git a/target/linux/generic/backport-5.15/729-17-v6.3-net-ethernet-mtk_wed-get-rid-of-queue-lock-for-tx-qu.patch b/target/linux/generic/backport-5.15/729-17-v6.3-net-ethernet-mtk_wed-get-rid-of-queue-lock-for-tx-qu.patch new file mode 100644 index 0000000000..9b1e4c3250 --- /dev/null +++ b/target/linux/generic/backport-5.15/729-17-v6.3-net-ethernet-mtk_wed-get-rid-of-queue-lock-for-tx-qu.patch @@ -0,0 +1,75 @@ +From: Lorenzo Bianconi +Date: Thu, 12 Jan 2023 10:21:29 +0100 +Subject: [PATCH] net: ethernet: mtk_wed: get rid of queue lock for tx queue + +Similar to MTK Wireless Ethernet Dispatcher (WED) MCU rx queue, +we do not need to protect WED MCU tx queue with a spin lock since +the tx queue is accessed in the two following routines: +- mtk_wed_wo_queue_tx_skb(): + it is run at initialization and during mt7915 normal operation. + Moreover MCU messages are serialized through MCU mutex. +- mtk_wed_wo_queue_tx_clean(): + it runs just at mt7915 driver module unload when no more messages + are sent to the MCU. + +Remove tx queue spinlock. + +Signed-off-by: Lorenzo Bianconi +Link: https://lore.kernel.org/r/7bd0337b2a13ab1a63673b7c03fd35206b3b284e.1673515140.git.lorenzo@kernel.org +Signed-off-by: Jakub Kicinski +--- + +--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.c ++++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c +@@ -258,7 +258,6 @@ mtk_wed_wo_queue_alloc(struct mtk_wed_wo + int n_desc, int buf_size, int index, + struct mtk_wed_wo_queue_regs *regs) + { +- spin_lock_init(&q->lock); + q->regs = *regs; + q->n_desc = n_desc; + q->buf_size = buf_size; +@@ -290,7 +289,6 @@ mtk_wed_wo_queue_tx_clean(struct mtk_wed + struct page *page; + int i; + +- spin_lock_bh(&q->lock); + for (i = 0; i < q->n_desc; i++) { + struct mtk_wed_wo_queue_entry *entry = &q->entry[i]; + +@@ -299,7 +297,6 @@ mtk_wed_wo_queue_tx_clean(struct mtk_wed + skb_free_frag(entry->buf); + entry->buf = NULL; + } +- spin_unlock_bh(&q->lock); + + if (!q->cache.va) + return; +@@ -347,8 +344,6 @@ int mtk_wed_wo_queue_tx_skb(struct mtk_w + int ret = 0, index; + u32 ctrl; + +- spin_lock_bh(&q->lock); +- + q->tail = mtk_wed_mmio_r32(wo, q->regs.dma_idx); + index = (q->head + 1) % q->n_desc; + if (q->tail == index) { +@@ -379,8 +374,6 @@ int mtk_wed_wo_queue_tx_skb(struct mtk_w + mtk_wed_wo_queue_kick(wo, q, q->head); + mtk_wed_wo_kickout(wo); + out: +- spin_unlock_bh(&q->lock); +- + dev_kfree_skb(skb); + + return ret; +--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h ++++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h +@@ -211,7 +211,6 @@ struct mtk_wed_wo_queue { + struct mtk_wed_wo_queue_regs regs; + + struct page_frag_cache cache; +- spinlock_t lock; + + struct mtk_wed_wo_queue_desc *desc; + dma_addr_t desc_dma; diff --git a/target/linux/generic/backport-5.15/729-18-v6.3-net-ethernet-mtk_eth_soc-introduce-mtk_hw_reset-util.patch b/target/linux/generic/backport-5.15/729-18-v6.3-net-ethernet-mtk_eth_soc-introduce-mtk_hw_reset-util.patch new file mode 100644 index 0000000000..f5e9dddd42 --- /dev/null +++ b/target/linux/generic/backport-5.15/729-18-v6.3-net-ethernet-mtk_eth_soc-introduce-mtk_hw_reset-util.patch @@ -0,0 +1,70 @@ +From: Lorenzo Bianconi +Date: Sat, 14 Jan 2023 18:01:28 +0100 +Subject: [PATCH] net: ethernet: mtk_eth_soc: introduce mtk_hw_reset utility + routine + +This is a preliminary patch to add Wireless Ethernet Dispatcher reset +support. + +Reviewed-by: Leon Romanovsky +Tested-by: Daniel Golle +Co-developed-by: Sujuan Chen +Signed-off-by: Sujuan Chen +Signed-off-by: Lorenzo Bianconi +Signed-off-by: Paolo Abeni +--- + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -3199,6 +3199,27 @@ static void mtk_set_mcr_max_rx(struct mt + mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id)); + } + ++static void mtk_hw_reset(struct mtk_eth *eth) ++{ ++ u32 val; ++ ++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { ++ regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0); ++ val = RSTCTRL_PPE0_V2; ++ } else { ++ val = RSTCTRL_PPE0; ++ } ++ ++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) ++ val |= RSTCTRL_PPE1; ++ ++ ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val); ++ ++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) ++ regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, ++ 0x3ffffff); ++} ++ + static int mtk_hw_init(struct mtk_eth *eth) + { + u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA | +@@ -3238,22 +3259,9 @@ static int mtk_hw_init(struct mtk_eth *e + return 0; + } + +- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { +- regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0); +- val = RSTCTRL_PPE0_V2; +- } else { +- val = RSTCTRL_PPE0; +- } +- +- if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) +- val |= RSTCTRL_PPE1; +- +- ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val); ++ mtk_hw_reset(eth); + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { +- regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, +- 0x3ffffff); +- + /* Set FE to PDMAv2 if necessary */ + val = mtk_r32(eth, MTK_FE_GLO_MISC); + mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC); diff --git a/target/linux/generic/backport-5.15/729-19-v6.3-net-ethernet-mtk_eth_soc-introduce-mtk_hw_warm_reset.patch b/target/linux/generic/backport-5.15/729-19-v6.3-net-ethernet-mtk_eth_soc-introduce-mtk_hw_warm_reset.patch new file mode 100644 index 0000000000..f2c9a7e53d --- /dev/null +++ b/target/linux/generic/backport-5.15/729-19-v6.3-net-ethernet-mtk_eth_soc-introduce-mtk_hw_warm_reset.patch @@ -0,0 +1,107 @@ +From: Lorenzo Bianconi +Date: Sat, 14 Jan 2023 18:01:29 +0100 +Subject: [PATCH] net: ethernet: mtk_eth_soc: introduce mtk_hw_warm_reset + support + +Introduce mtk_hw_warm_reset utility routine. This is a preliminary patch +to align reset procedure to vendor sdk and avoid to power down the chip +during hw reset. + +Reviewed-by: Leon Romanovsky +Tested-by: Daniel Golle +Co-developed-by: Sujuan Chen +Signed-off-by: Sujuan Chen +Signed-off-by: Lorenzo Bianconi +Signed-off-by: Paolo Abeni +--- + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -3220,7 +3220,54 @@ static void mtk_hw_reset(struct mtk_eth + 0x3ffffff); + } + +-static int mtk_hw_init(struct mtk_eth *eth) ++static u32 mtk_hw_reset_read(struct mtk_eth *eth) ++{ ++ u32 val; ++ ++ regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val); ++ return val; ++} ++ ++static void mtk_hw_warm_reset(struct mtk_eth *eth) ++{ ++ u32 rst_mask, val; ++ ++ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE, ++ RSTCTRL_FE); ++ if (readx_poll_timeout_atomic(mtk_hw_reset_read, eth, val, ++ val & RSTCTRL_FE, 1, 1000)) { ++ dev_err(eth->dev, "warm reset failed\n"); ++ mtk_hw_reset(eth); ++ return; ++ } ++ ++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) ++ rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2; ++ else ++ rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0; ++ ++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) ++ rst_mask |= RSTCTRL_PPE1; ++ ++ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask); ++ ++ udelay(1); ++ val = mtk_hw_reset_read(eth); ++ if (!(val & rst_mask)) ++ dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n", ++ val, rst_mask); ++ ++ rst_mask |= RSTCTRL_FE; ++ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask); ++ ++ udelay(1); ++ val = mtk_hw_reset_read(eth); ++ if (val & rst_mask) ++ dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n", ++ val, rst_mask); ++} ++ ++static int mtk_hw_init(struct mtk_eth *eth, bool reset) + { + u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA | + ETHSYS_DMA_AG_MAP_PPE; +@@ -3259,7 +3306,12 @@ static int mtk_hw_init(struct mtk_eth *e + return 0; + } + +- mtk_hw_reset(eth); ++ msleep(100); ++ ++ if (reset) ++ mtk_hw_warm_reset(eth); ++ else ++ mtk_hw_reset(eth); + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + /* Set FE to PDMAv2 if necessary */ +@@ -3464,7 +3516,7 @@ static void mtk_pending_work(struct work + if (eth->dev->pins) + pinctrl_select_state(eth->dev->pins->p, + eth->dev->pins->default_state); +- mtk_hw_init(eth); ++ mtk_hw_init(eth, true); + + /* restart DMA and enable IRQs */ + for (i = 0; i < MTK_MAC_COUNT; i++) { +@@ -4056,7 +4108,7 @@ static int mtk_probe(struct platform_dev + eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE); + INIT_WORK(ð->pending_work, mtk_pending_work); + +- err = mtk_hw_init(eth); ++ err = mtk_hw_init(eth, false); + if (err) + goto err_wed_exit; + diff --git a/target/linux/generic/backport-5.15/729-20-v6.3-net-ethernet-mtk_eth_soc-align-reset-procedure-to-ve.patch b/target/linux/generic/backport-5.15/729-20-v6.3-net-ethernet-mtk_eth_soc-align-reset-procedure-to-ve.patch new file mode 100644 index 0000000000..02e34bb7cd --- /dev/null +++ b/target/linux/generic/backport-5.15/729-20-v6.3-net-ethernet-mtk_eth_soc-align-reset-procedure-to-ve.patch @@ -0,0 +1,262 @@ +From: Lorenzo Bianconi +Date: Sat, 14 Jan 2023 18:01:30 +0100 +Subject: [PATCH] net: ethernet: mtk_eth_soc: align reset procedure to vendor + sdk + +Avoid to power-down the ethernet chip during hw reset and align reset +procedure to vendor sdk. + +Reviewed-by: Leon Romanovsky +Tested-by: Daniel Golle +Co-developed-by: Sujuan Chen +Signed-off-by: Sujuan Chen +Signed-off-by: Lorenzo Bianconi +Signed-off-by: Paolo Abeni +--- + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -2785,14 +2785,29 @@ static void mtk_dma_free(struct mtk_eth + kfree(eth->scratch_head); + } + ++static bool mtk_hw_reset_check(struct mtk_eth *eth) ++{ ++ u32 val = mtk_r32(eth, MTK_INT_STATUS2); ++ ++ return (val & MTK_FE_INT_FQ_EMPTY) || (val & MTK_FE_INT_RFIFO_UF) || ++ (val & MTK_FE_INT_RFIFO_OV) || (val & MTK_FE_INT_TSO_FAIL) || ++ (val & MTK_FE_INT_TSO_ALIGN) || (val & MTK_FE_INT_TSO_ILLEGAL); ++} ++ + static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue) + { + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + ++ if (test_bit(MTK_RESETTING, ð->state)) ++ return; ++ ++ if (!mtk_hw_reset_check(eth)) ++ return; ++ + eth->netdev[mac->id]->stats.tx_errors++; +- netif_err(eth, tx_err, dev, +- "transmit timed out\n"); ++ netif_err(eth, tx_err, dev, "transmit timed out\n"); ++ + schedule_work(ð->pending_work); + } + +@@ -3274,15 +3289,17 @@ static int mtk_hw_init(struct mtk_eth *e + const struct mtk_reg_map *reg_map = eth->soc->reg_map; + int i, val, ret; + +- if (test_and_set_bit(MTK_HW_INIT, ð->state)) ++ if (!reset && test_and_set_bit(MTK_HW_INIT, ð->state)) + return 0; + +- pm_runtime_enable(eth->dev); +- pm_runtime_get_sync(eth->dev); ++ if (!reset) { ++ pm_runtime_enable(eth->dev); ++ pm_runtime_get_sync(eth->dev); + +- ret = mtk_clk_enable(eth); +- if (ret) +- goto err_disable_pm; ++ ret = mtk_clk_enable(eth); ++ if (ret) ++ goto err_disable_pm; ++ } + + if (eth->ethsys) + regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask, +@@ -3408,8 +3425,10 @@ static int mtk_hw_init(struct mtk_eth *e + return 0; + + err_disable_pm: +- pm_runtime_put_sync(eth->dev); +- pm_runtime_disable(eth->dev); ++ if (!reset) { ++ pm_runtime_put_sync(eth->dev); ++ pm_runtime_disable(eth->dev); ++ } + + return ret; + } +@@ -3488,30 +3507,53 @@ static int mtk_do_ioctl(struct net_devic + return -EOPNOTSUPP; + } + ++static void mtk_prepare_for_reset(struct mtk_eth *eth) ++{ ++ u32 val; ++ int i; ++ ++ /* disabe FE P3 and P4 */ ++ val = mtk_r32(eth, MTK_FE_GLO_CFG) | MTK_FE_LINK_DOWN_P3; ++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) ++ val |= MTK_FE_LINK_DOWN_P4; ++ mtk_w32(eth, val, MTK_FE_GLO_CFG); ++ ++ /* adjust PPE configurations to prepare for reset */ ++ for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) ++ mtk_ppe_prepare_reset(eth->ppe[i]); ++ ++ /* disable NETSYS interrupts */ ++ mtk_w32(eth, 0, MTK_FE_INT_ENABLE); ++ ++ /* force link down GMAC */ ++ for (i = 0; i < 2; i++) { ++ val = mtk_r32(eth, MTK_MAC_MCR(i)) & ~MAC_MCR_FORCE_LINK; ++ mtk_w32(eth, val, MTK_MAC_MCR(i)); ++ } ++} ++ + static void mtk_pending_work(struct work_struct *work) + { + struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work); +- int err, i; + unsigned long restart = 0; ++ u32 val; ++ int i; + + rtnl_lock(); +- +- dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__); + set_bit(MTK_RESETTING, ð->state); + ++ mtk_prepare_for_reset(eth); ++ + /* stop all devices to make sure that dma is properly shut down */ + for (i = 0; i < MTK_MAC_COUNT; i++) { +- if (!eth->netdev[i]) ++ if (!eth->netdev[i] || !netif_running(eth->netdev[i])) + continue; ++ + mtk_stop(eth->netdev[i]); + __set_bit(i, &restart); + } +- dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__); + +- /* restart underlying hardware such as power, clock, pin mux +- * and the connected phy +- */ +- mtk_hw_deinit(eth); ++ usleep_range(15000, 16000); + + if (eth->dev->pins) + pinctrl_select_state(eth->dev->pins->p, +@@ -3522,15 +3564,19 @@ static void mtk_pending_work(struct work + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!test_bit(i, &restart)) + continue; +- err = mtk_open(eth->netdev[i]); +- if (err) { ++ ++ if (mtk_open(eth->netdev[i])) { + netif_alert(eth, ifup, eth->netdev[i], +- "Driver up/down cycle failed, closing device.\n"); ++ "Driver up/down cycle failed\n"); + dev_close(eth->netdev[i]); + } + } + +- dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__); ++ /* enabe FE P3 and P4 */ ++ val = mtk_r32(eth, MTK_FE_GLO_CFG) & ~MTK_FE_LINK_DOWN_P3; ++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) ++ val &= ~MTK_FE_LINK_DOWN_P4; ++ mtk_w32(eth, val, MTK_FE_GLO_CFG); + + clear_bit(MTK_RESETTING, ð->state); + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h +@@ -72,12 +72,24 @@ + #define MTK_HW_LRO_REPLACE_DELTA 1000 + #define MTK_HW_LRO_SDL_REMAIN_ROOM 1522 + ++/* Frame Engine Global Configuration */ ++#define MTK_FE_GLO_CFG 0x00 ++#define MTK_FE_LINK_DOWN_P3 BIT(11) ++#define MTK_FE_LINK_DOWN_P4 BIT(12) ++ + /* Frame Engine Global Reset Register */ + #define MTK_RST_GL 0x04 + #define RST_GL_PSE BIT(0) + + /* Frame Engine Interrupt Status Register */ + #define MTK_INT_STATUS2 0x08 ++#define MTK_FE_INT_ENABLE 0x0c ++#define MTK_FE_INT_FQ_EMPTY BIT(8) ++#define MTK_FE_INT_TSO_FAIL BIT(12) ++#define MTK_FE_INT_TSO_ILLEGAL BIT(13) ++#define MTK_FE_INT_TSO_ALIGN BIT(14) ++#define MTK_FE_INT_RFIFO_OV BIT(18) ++#define MTK_FE_INT_RFIFO_UF BIT(19) + #define MTK_GDM1_AF BIT(28) + #define MTK_GDM2_AF BIT(29) + +--- a/drivers/net/ethernet/mediatek/mtk_ppe.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c +@@ -716,6 +716,33 @@ int mtk_foe_entry_idle_time(struct mtk_p + return __mtk_foe_entry_idle_time(ppe, entry->data.ib1); + } + ++int mtk_ppe_prepare_reset(struct mtk_ppe *ppe) ++{ ++ if (!ppe) ++ return -EINVAL; ++ ++ /* disable KA */ ++ ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE); ++ ppe_clear(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE); ++ ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0); ++ usleep_range(10000, 11000); ++ ++ /* set KA timer to maximum */ ++ ppe_set(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE); ++ ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0xffffffff); ++ ++ /* set KA tick select */ ++ ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_TICK_SEL); ++ ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE); ++ usleep_range(10000, 11000); ++ ++ /* disable scan mode */ ++ ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_SCAN_MODE); ++ usleep_range(10000, 11000); ++ ++ return mtk_ppe_wait_busy(ppe); ++} ++ + struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, + int version, int index) + { +--- a/drivers/net/ethernet/mediatek/mtk_ppe.h ++++ b/drivers/net/ethernet/mediatek/mtk_ppe.h +@@ -306,6 +306,7 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_ + int version, int index); + void mtk_ppe_start(struct mtk_ppe *ppe); + int mtk_ppe_stop(struct mtk_ppe *ppe); ++int mtk_ppe_prepare_reset(struct mtk_ppe *ppe); + + void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash); + +--- a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h ++++ b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h +@@ -58,6 +58,12 @@ + #define MTK_PPE_TB_CFG_SCAN_MODE GENMASK(17, 16) + #define MTK_PPE_TB_CFG_HASH_DEBUG GENMASK(19, 18) + #define MTK_PPE_TB_CFG_INFO_SEL BIT(20) ++#define MTK_PPE_TB_TICK_SEL BIT(24) ++ ++#define MTK_PPE_BIND_LMT1 0x230 ++#define MTK_PPE_NTU_KEEPALIVE GENMASK(23, 16) ++ ++#define MTK_PPE_KEEPALIVE 0x234 + + enum { + MTK_PPE_SCAN_MODE_DISABLED, diff --git a/target/linux/generic/backport-5.15/729-21-v6.3-net-ethernet-mtk_eth_soc-add-dma-checks-to-mtk_hw_re.patch b/target/linux/generic/backport-5.15/729-21-v6.3-net-ethernet-mtk_eth_soc-add-dma-checks-to-mtk_hw_re.patch new file mode 100644 index 0000000000..240e7897c6 --- /dev/null +++ b/target/linux/generic/backport-5.15/729-21-v6.3-net-ethernet-mtk_eth_soc-add-dma-checks-to-mtk_hw_re.patch @@ -0,0 +1,249 @@ +From: Lorenzo Bianconi +Date: Sat, 14 Jan 2023 18:01:31 +0100 +Subject: [PATCH] net: ethernet: mtk_eth_soc: add dma checks to + mtk_hw_reset_check + +Introduce mtk_hw_check_dma_hang routine to monitor possible dma hangs. + +Reviewed-by: Leon Romanovsky +Tested-by: Daniel Golle +Co-developed-by: Sujuan Chen +Signed-off-by: Sujuan Chen +Signed-off-by: Lorenzo Bianconi +Signed-off-by: Paolo Abeni +--- + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -50,6 +50,7 @@ static const struct mtk_reg_map mtk_reg_ + .delay_irq = 0x0a0c, + .irq_status = 0x0a20, + .irq_mask = 0x0a28, ++ .adma_rx_dbg0 = 0x0a38, + .int_grp = 0x0a50, + }, + .qdma = { +@@ -79,6 +80,8 @@ static const struct mtk_reg_map mtk_reg_ + [0] = 0x2800, + [1] = 0x2c00, + }, ++ .pse_iq_sta = 0x0110, ++ .pse_oq_sta = 0x0118, + }; + + static const struct mtk_reg_map mt7628_reg_map = { +@@ -109,6 +112,7 @@ static const struct mtk_reg_map mt7986_r + .delay_irq = 0x620c, + .irq_status = 0x6220, + .irq_mask = 0x6228, ++ .adma_rx_dbg0 = 0x6238, + .int_grp = 0x6250, + }, + .qdma = { +@@ -138,6 +142,8 @@ static const struct mtk_reg_map mt7986_r + [0] = 0x4800, + [1] = 0x4c00, + }, ++ .pse_iq_sta = 0x0180, ++ .pse_oq_sta = 0x01a0, + }; + + /* strings used by ethtool */ +@@ -3282,6 +3288,102 @@ static void mtk_hw_warm_reset(struct mtk + val, rst_mask); + } + ++static bool mtk_hw_check_dma_hang(struct mtk_eth *eth) ++{ ++ const struct mtk_reg_map *reg_map = eth->soc->reg_map; ++ bool gmac1_tx, gmac2_tx, gdm1_tx, gdm2_tx; ++ bool oq_hang, cdm1_busy, adma_busy; ++ bool wtx_busy, cdm_full, oq_free; ++ u32 wdidx, val, gdm1_fc, gdm2_fc; ++ bool qfsm_hang, qfwd_hang; ++ bool ret = false; ++ ++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) ++ return false; ++ ++ /* WDMA sanity checks */ ++ wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc); ++ ++ val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204); ++ wtx_busy = FIELD_GET(MTK_TX_DMA_BUSY, val); ++ ++ val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230); ++ cdm_full = !FIELD_GET(MTK_CDM_TXFIFO_RDY, val); ++ ++ oq_free = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) && ++ !(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) && ++ !(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16))); ++ ++ if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) { ++ if (++eth->reset.wdma_hang_count > 2) { ++ eth->reset.wdma_hang_count = 0; ++ ret = true; ++ } ++ goto out; ++ } ++ ++ /* QDMA sanity checks */ ++ qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234); ++ qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308); ++ ++ gdm1_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM1_FSM)) > 0; ++ gdm2_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM2_FSM)) > 0; ++ gmac1_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(0))) != 1; ++ gmac2_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(1))) != 1; ++ gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24); ++ gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64); ++ ++ if (qfsm_hang && qfwd_hang && ++ ((gdm1_tx && gmac1_tx && gdm1_fc < 1) || ++ (gdm2_tx && gmac2_tx && gdm2_fc < 1))) { ++ if (++eth->reset.qdma_hang_count > 2) { ++ eth->reset.qdma_hang_count = 0; ++ ret = true; ++ } ++ goto out; ++ } ++ ++ /* ADMA sanity checks */ ++ oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0)); ++ cdm1_busy = !!(mtk_r32(eth, MTK_FE_CDM1_FSM) & GENMASK(31, 16)); ++ adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) && ++ !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6)); ++ ++ if (oq_hang && cdm1_busy && adma_busy) { ++ if (++eth->reset.adma_hang_count > 2) { ++ eth->reset.adma_hang_count = 0; ++ ret = true; ++ } ++ goto out; ++ } ++ ++ eth->reset.wdma_hang_count = 0; ++ eth->reset.qdma_hang_count = 0; ++ eth->reset.adma_hang_count = 0; ++out: ++ eth->reset.wdidx = wdidx; ++ ++ return ret; ++} ++ ++static void mtk_hw_reset_monitor_work(struct work_struct *work) ++{ ++ struct delayed_work *del_work = to_delayed_work(work); ++ struct mtk_eth *eth = container_of(del_work, struct mtk_eth, ++ reset.monitor_work); ++ ++ if (test_bit(MTK_RESETTING, ð->state)) ++ goto out; ++ ++ /* DMA stuck checks */ ++ if (mtk_hw_check_dma_hang(eth)) ++ schedule_work(ð->pending_work); ++ ++out: ++ schedule_delayed_work(ð->reset.monitor_work, ++ MTK_DMA_MONITOR_TIMEOUT); ++} ++ + static int mtk_hw_init(struct mtk_eth *eth, bool reset) + { + u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA | +@@ -3614,6 +3716,7 @@ static int mtk_cleanup(struct mtk_eth *e + mtk_unreg_dev(eth); + mtk_free_dev(eth); + cancel_work_sync(ð->pending_work); ++ cancel_delayed_work_sync(ð->reset.monitor_work); + + return 0; + } +@@ -4041,6 +4144,7 @@ static int mtk_probe(struct platform_dev + + eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + INIT_WORK(ð->rx_dim.work, mtk_dim_rx); ++ INIT_DELAYED_WORK(ð->reset.monitor_work, mtk_hw_reset_monitor_work); + + eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + INIT_WORK(ð->tx_dim.work, mtk_dim_tx); +@@ -4245,6 +4349,8 @@ static int mtk_probe(struct platform_dev + NAPI_POLL_WEIGHT); + + platform_set_drvdata(pdev, eth); ++ schedule_delayed_work(ð->reset.monitor_work, ++ MTK_DMA_MONITOR_TIMEOUT); + + return 0; + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h +@@ -256,6 +256,8 @@ + + #define MTK_RX_DONE_INT_V2 BIT(14) + ++#define MTK_CDM_TXFIFO_RDY BIT(7) ++ + /* QDMA Interrupt grouping registers */ + #define MTK_RLS_DONE_INT BIT(0) + +@@ -537,6 +539,17 @@ + #define MT7628_SDM_RBCNT (MT7628_SDM_OFFSET + 0x10c) + #define MT7628_SDM_CS_ERR (MT7628_SDM_OFFSET + 0x110) + ++#define MTK_FE_CDM1_FSM 0x220 ++#define MTK_FE_CDM2_FSM 0x224 ++#define MTK_FE_CDM3_FSM 0x238 ++#define MTK_FE_CDM4_FSM 0x298 ++#define MTK_FE_CDM5_FSM 0x318 ++#define MTK_FE_CDM6_FSM 0x328 ++#define MTK_FE_GDM1_FSM 0x228 ++#define MTK_FE_GDM2_FSM 0x22C ++ ++#define MTK_MAC_FSM(x) (0x1010C + ((x) * 0x100)) ++ + struct mtk_rx_dma { + unsigned int rxd1; + unsigned int rxd2; +@@ -933,6 +946,7 @@ struct mtk_reg_map { + u32 delay_irq; /* delay interrupt */ + u32 irq_status; /* interrupt status */ + u32 irq_mask; /* interrupt mask */ ++ u32 adma_rx_dbg0; + u32 int_grp; + } pdma; + struct { +@@ -959,6 +973,8 @@ struct mtk_reg_map { + u32 gdma_to_ppe0; + u32 ppe_base; + u32 wdma_base[2]; ++ u32 pse_iq_sta; ++ u32 pse_oq_sta; + }; + + /* struct mtk_eth_data - This is the structure holding all differences +@@ -1001,6 +1017,8 @@ struct mtk_soc_data { + } txrx; + }; + ++#define MTK_DMA_MONITOR_TIMEOUT msecs_to_jiffies(1000) ++ + /* currently no SoC has more than 2 macs */ + #define MTK_MAX_DEVS 2 + +@@ -1123,6 +1141,14 @@ struct mtk_eth { + struct rhashtable flow_table; + + struct bpf_prog __rcu *prog; ++ ++ struct { ++ struct delayed_work monitor_work; ++ u32 wdidx; ++ u8 wdma_hang_count; ++ u8 qdma_hang_count; ++ u8 adma_hang_count; ++ } reset; + }; + + /* struct mtk_mac - the structure that holds the info about the MACs of the diff --git a/target/linux/generic/backport-5.15/729-22-v6.3-net-ethernet-mtk_wed-add-reset-reset_complete-callba.patch b/target/linux/generic/backport-5.15/729-22-v6.3-net-ethernet-mtk_wed-add-reset-reset_complete-callba.patch new file mode 100644 index 0000000000..9f67e97e6f --- /dev/null +++ b/target/linux/generic/backport-5.15/729-22-v6.3-net-ethernet-mtk_wed-add-reset-reset_complete-callba.patch @@ -0,0 +1,124 @@ +From: Lorenzo Bianconi +Date: Sat, 14 Jan 2023 18:01:32 +0100 +Subject: [PATCH] net: ethernet: mtk_wed: add reset/reset_complete callbacks + +Introduce reset and reset_complete wlan callback to schedule WLAN driver +reset when ethernet/wed driver is resetting. + +Tested-by: Daniel Golle +Co-developed-by: Sujuan Chen +Signed-off-by: Sujuan Chen +Signed-off-by: Lorenzo Bianconi +Signed-off-by: Paolo Abeni +--- + +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -3645,6 +3645,11 @@ static void mtk_pending_work(struct work + set_bit(MTK_RESETTING, ð->state); + + mtk_prepare_for_reset(eth); ++ mtk_wed_fe_reset(); ++ /* Run again reset preliminary configuration in order to avoid any ++ * possible race during FE reset since it can run releasing RTNL lock. ++ */ ++ mtk_prepare_for_reset(eth); + + /* stop all devices to make sure that dma is properly shut down */ + for (i = 0; i < MTK_MAC_COUNT; i++) { +@@ -3682,6 +3687,8 @@ static void mtk_pending_work(struct work + + clear_bit(MTK_RESETTING, ð->state); + ++ mtk_wed_fe_reset_complete(); ++ + rtnl_unlock(); + } + +--- a/drivers/net/ethernet/mediatek/mtk_wed.c ++++ b/drivers/net/ethernet/mediatek/mtk_wed.c +@@ -205,6 +205,48 @@ mtk_wed_wo_reset(struct mtk_wed_device * + iounmap(reg); + } + ++void mtk_wed_fe_reset(void) ++{ ++ int i; ++ ++ mutex_lock(&hw_lock); ++ ++ for (i = 0; i < ARRAY_SIZE(hw_list); i++) { ++ struct mtk_wed_hw *hw = hw_list[i]; ++ struct mtk_wed_device *dev = hw->wed_dev; ++ int err; ++ ++ if (!dev || !dev->wlan.reset) ++ continue; ++ ++ /* reset callback blocks until WLAN reset is completed */ ++ err = dev->wlan.reset(dev); ++ if (err) ++ dev_err(dev->dev, "wlan reset failed: %d\n", err); ++ } ++ ++ mutex_unlock(&hw_lock); ++} ++ ++void mtk_wed_fe_reset_complete(void) ++{ ++ int i; ++ ++ mutex_lock(&hw_lock); ++ ++ for (i = 0; i < ARRAY_SIZE(hw_list); i++) { ++ struct mtk_wed_hw *hw = hw_list[i]; ++ struct mtk_wed_device *dev = hw->wed_dev; ++ ++ if (!dev || !dev->wlan.reset_complete) ++ continue; ++ ++ dev->wlan.reset_complete(dev); ++ } ++ ++ mutex_unlock(&hw_lock); ++} ++ + static struct mtk_wed_hw * + mtk_wed_assign(struct mtk_wed_device *dev) + { +--- a/drivers/net/ethernet/mediatek/mtk_wed.h ++++ b/drivers/net/ethernet/mediatek/mtk_wed.h +@@ -128,6 +128,8 @@ void mtk_wed_add_hw(struct device_node * + void mtk_wed_exit(void); + int mtk_wed_flow_add(int index); + void mtk_wed_flow_remove(int index); ++void mtk_wed_fe_reset(void); ++void mtk_wed_fe_reset_complete(void); + #else + static inline void + mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, +@@ -147,6 +149,13 @@ static inline void mtk_wed_flow_remove(i + { + } + ++static inline void mtk_wed_fe_reset(void) ++{ ++} ++ ++static inline void mtk_wed_fe_reset_complete(void) ++{ ++} + #endif + + #ifdef CONFIG_DEBUG_FS +--- a/include/linux/soc/mediatek/mtk_wed.h ++++ b/include/linux/soc/mediatek/mtk_wed.h +@@ -151,6 +151,8 @@ struct mtk_wed_device { + void (*release_rx_buf)(struct mtk_wed_device *wed); + void (*update_wo_rx_stats)(struct mtk_wed_device *wed, + struct mtk_wed_wo_rx_stats *stats); ++ int (*reset)(struct mtk_wed_device *wed); ++ void (*reset_complete)(struct mtk_wed_device *wed); + } wlan; + #endif + }; diff --git a/target/linux/generic/backport-5.15/729-23-v6.3-net-ethernet-mtk_wed-add-reset-to-rx_ring_setup-call.patch b/target/linux/generic/backport-5.15/729-23-v6.3-net-ethernet-mtk_wed-add-reset-to-rx_ring_setup-call.patch new file mode 100644 index 0000000000..cf81acf491 --- /dev/null +++ b/target/linux/generic/backport-5.15/729-23-v6.3-net-ethernet-mtk_wed-add-reset-to-rx_ring_setup-call.patch @@ -0,0 +1,106 @@ +From: Lorenzo Bianconi +Date: Mon, 5 Dec 2022 12:34:42 +0100 +Subject: [PATCH] net: ethernet: mtk_wed: add reset to rx_ring_setup callback + +This patch adds reset parameter to mtk_wed_rx_ring_setup signature +in order to align rx_ring_setup callback to tx_ring_setup one introduced +in 'commit 23dca7a90017 ("net: ethernet: mtk_wed: add reset to +tx_ring_setup callback")' + +Co-developed-by: Sujuan Chen +Signed-off-by: Sujuan Chen +Signed-off-by: Lorenzo Bianconi +Reviewed-by: Leon Romanovsky +Link: https://lore.kernel.org/r/29c6e7a5469e784406cf3e2920351d1207713d05.1670239984.git.lorenzo@kernel.org +Signed-off-by: Jakub Kicinski +--- + +--- a/drivers/net/ethernet/mediatek/mtk_wed.c ++++ b/drivers/net/ethernet/mediatek/mtk_wed.c +@@ -1259,7 +1259,8 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_we + } + + static int +-mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size) ++mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size, ++ bool reset) + { + u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version; + struct mtk_wed_ring *wdma; +@@ -1268,8 +1269,8 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_we + return -EINVAL; + + wdma = &dev->tx_wdma[idx]; +- if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size, +- true)) ++ if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, ++ desc_size, true)) + return -ENOMEM; + + wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, +@@ -1279,6 +1280,9 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_we + wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); + wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0); + ++ if (reset) ++ mtk_wed_ring_reset(wdma, MTK_WED_WDMA_RING_SIZE, true); ++ + if (!idx) { + wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_BASE, + wdma->desc_phys); +@@ -1618,18 +1622,20 @@ mtk_wed_txfree_ring_setup(struct mtk_wed + } + + static int +-mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs) ++mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs, ++ bool reset) + { + struct mtk_wed_ring *ring = &dev->rx_ring[idx]; + + if (WARN_ON(idx >= ARRAY_SIZE(dev->rx_ring))) + return -EINVAL; + +- if (mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE, +- sizeof(*ring->desc), false)) ++ if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE, ++ sizeof(*ring->desc), false)) + return -ENOMEM; + +- if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE)) ++ if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE, ++ reset)) + return -ENOMEM; + + ring->reg_base = MTK_WED_RING_RX_DATA(idx); +--- a/include/linux/soc/mediatek/mtk_wed.h ++++ b/include/linux/soc/mediatek/mtk_wed.h +@@ -162,7 +162,7 @@ struct mtk_wed_ops { + int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring, + void __iomem *regs, bool reset); + int (*rx_ring_setup)(struct mtk_wed_device *dev, int ring, +- void __iomem *regs); ++ void __iomem *regs, bool reset); + int (*txfree_ring_setup)(struct mtk_wed_device *dev, + void __iomem *regs); + int (*msg_update)(struct mtk_wed_device *dev, int cmd_id, +@@ -230,8 +230,8 @@ mtk_wed_get_rx_capa(struct mtk_wed_devic + (_dev)->ops->irq_get(_dev, _mask) + #define mtk_wed_device_irq_set_mask(_dev, _mask) \ + (_dev)->ops->irq_set_mask(_dev, _mask) +-#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) \ +- (_dev)->ops->rx_ring_setup(_dev, _ring, _regs) ++#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs, _reset) \ ++ (_dev)->ops->rx_ring_setup(_dev, _ring, _regs, _reset) + #define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) \ + (_dev)->ops->ppe_check(_dev, _skb, _reason, _hash) + #define mtk_wed_device_update_msg(_dev, _id, _msg, _len) \ +@@ -251,7 +251,7 @@ static inline bool mtk_wed_device_active + #define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0) + #define mtk_wed_device_irq_get(_dev, _mask) 0 + #define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0) +-#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) -ENODEV ++#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs, _reset) -ENODEV + #define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) do {} while (0) + #define mtk_wed_device_update_msg(_dev, _id, _msg, _len) -ENODEV + #define mtk_wed_device_stop(_dev) do {} while (0) diff --git a/target/linux/generic/hack-5.10/100-update-mtk_wed_h.patch b/target/linux/generic/hack-5.10/100-update-mtk_wed_h.patch index fe49100e4f..151f2e8054 100644 --- a/target/linux/generic/hack-5.10/100-update-mtk_wed_h.patch +++ b/target/linux/generic/hack-5.10/100-update-mtk_wed_h.patch @@ -77,11 +77,14 @@ struct mtk_wed_device { #ifdef CONFIG_NET_MEDIATEK_SOC_WED const struct mtk_wed_ops *ops; -@@ -28,30 +83,71 @@ struct mtk_wed_device { +@@ -28,30 +83,76 @@ struct mtk_wed_device { bool init_done, running; int wdma_idx; int irq; + u8 version; ++ ++ /* used by wlan driver */ ++ u32 rev_id; struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES]; + struct mtk_wed_ring rx_ring[MTK_WED_RX_QUEUES]; @@ -148,15 +151,19 @@ + void (*release_rx_buf)(struct mtk_wed_device *wed); + void (*update_wo_rx_stats)(struct mtk_wed_device *wed, + struct mtk_wed_wo_rx_stats *stats); ++ int (*reset)(struct mtk_wed_device *wed); ++ void (*reset_complete)(struct mtk_wed_device *wed); } wlan; #endif }; -@@ -60,9 +156,15 @@ struct mtk_wed_ops { +@@ -59,10 +160,16 @@ struct mtk_wed_device { + struct mtk_wed_ops { int (*attach)(struct mtk_wed_device *dev); int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring, - void __iomem *regs); +- void __iomem *regs); ++ void __iomem *regs, bool reset); + int (*rx_ring_setup)(struct mtk_wed_device *dev, int ring, -+ void __iomem *regs); ++ void __iomem *regs, bool reset); int (*txfree_ring_setup)(struct mtk_wed_device *dev, void __iomem *regs); + int (*msg_update)(struct mtk_wed_device *dev, int cmd_id, @@ -167,7 +174,7 @@ void (*stop)(struct mtk_wed_device *dev); void (*start)(struct mtk_wed_device *dev, u32 irq_mask); -@@ -97,6 +199,16 @@ mtk_wed_device_attach(struct mtk_wed_dev +@@ -97,12 +204,22 @@ mtk_wed_device_attach(struct mtk_wed_dev return ret; } @@ -184,26 +191,45 @@ #ifdef CONFIG_NET_MEDIATEK_SOC_WED #define mtk_wed_device_active(_dev) !!(_dev)->ops #define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev) -@@ -113,6 +225,12 @@ mtk_wed_device_attach(struct mtk_wed_dev + #define mtk_wed_device_start(_dev, _mask) (_dev)->ops->start(_dev, _mask) +-#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) \ +- (_dev)->ops->tx_ring_setup(_dev, _ring, _regs) ++#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs, _reset) \ ++ (_dev)->ops->tx_ring_setup(_dev, _ring, _regs, _reset) + #define mtk_wed_device_txfree_ring_setup(_dev, _regs) \ + (_dev)->ops->txfree_ring_setup(_dev, _regs) + #define mtk_wed_device_reg_read(_dev, _reg) \ +@@ -113,6 +230,14 @@ mtk_wed_device_attach(struct mtk_wed_dev (_dev)->ops->irq_get(_dev, _mask) #define mtk_wed_device_irq_set_mask(_dev, _mask) \ (_dev)->ops->irq_set_mask(_dev, _mask) -+#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) \ -+ (_dev)->ops->rx_ring_setup(_dev, _ring, _regs) ++#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs, _reset) \ ++ (_dev)->ops->rx_ring_setup(_dev, _ring, _regs, _reset) +#define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) \ + (_dev)->ops->ppe_check(_dev, _skb, _reason, _hash) +#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) \ + (_dev)->ops->msg_update(_dev, _id, _msg, _len) ++#define mtk_wed_device_stop(_dev) (_dev)->ops->stop(_dev) ++#define mtk_wed_device_dma_reset(_dev) (_dev)->ops->reset_dma(_dev) #else static inline bool mtk_wed_device_active(struct mtk_wed_device *dev) { -@@ -126,6 +244,9 @@ static inline bool mtk_wed_device_active +@@ -120,12 +245,17 @@ static inline bool mtk_wed_device_active + } + #define mtk_wed_device_detach(_dev) do {} while (0) + #define mtk_wed_device_start(_dev, _mask) do {} while (0) +-#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) -ENODEV ++#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs, _reset) -ENODEV + #define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV + #define mtk_wed_device_reg_read(_dev, _reg) 0 #define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0) #define mtk_wed_device_irq_get(_dev, _mask) 0 #define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0) -+#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) -ENODEV ++#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs, _reset) -ENODEV +#define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) do {} while (0) +#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) -ENODEV ++#define mtk_wed_device_stop(_dev) do {} while (0) ++#define mtk_wed_device_dma_reset(_dev) do {} while (0) #endif #endif diff --git a/target/linux/generic/pending-5.15/702-net-ethernet-mtk_eth_soc-enable-threaded-NAPI.patch b/target/linux/generic/pending-5.15/702-net-ethernet-mtk_eth_soc-enable-threaded-NAPI.patch index 3be68a9e35..b046c3eb5f 100644 --- a/target/linux/generic/pending-5.15/702-net-ethernet-mtk_eth_soc-enable-threaded-NAPI.patch +++ b/target/linux/generic/pending-5.15/702-net-ethernet-mtk_eth_soc-enable-threaded-NAPI.patch @@ -10,7 +10,7 @@ Signed-off-by: Felix Fietkau --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -2802,8 +2802,8 @@ static irqreturn_t mtk_handle_irq_rx(int +@@ -2823,8 +2823,8 @@ static irqreturn_t mtk_handle_irq_rx(int eth->rx_events++; if (likely(napi_schedule_prep(ð->rx_napi))) { @@ -20,7 +20,7 @@ Signed-off-by: Felix Fietkau } return IRQ_HANDLED; -@@ -2815,8 +2815,8 @@ static irqreturn_t mtk_handle_irq_tx(int +@@ -2836,8 +2836,8 @@ static irqreturn_t mtk_handle_irq_tx(int eth->tx_events++; if (likely(napi_schedule_prep(ð->tx_napi))) { @@ -30,7 +30,7 @@ Signed-off-by: Felix Fietkau } return IRQ_HANDLED; -@@ -4120,6 +4120,8 @@ static int mtk_probe(struct platform_dev +@@ -4350,6 +4350,8 @@ static int mtk_probe(struct platform_dev * for NAPI to work */ init_dummy_netdev(ð->dummy_dev); diff --git a/target/linux/generic/pending-5.15/731-net-ethernet-mediatek-ppe-add-support-for-flow-accou.patch b/target/linux/generic/pending-5.15/731-net-ethernet-mediatek-ppe-add-support-for-flow-accou.patch index c76a00eabd..26b88e4967 100644 --- a/target/linux/generic/pending-5.15/731-net-ethernet-mediatek-ppe-add-support-for-flow-accou.patch +++ b/target/linux/generic/pending-5.15/731-net-ethernet-mediatek-ppe-add-support-for-flow-accou.patch @@ -53,7 +53,7 @@ v2: fix wrong variable name in return value check spotted by Denis Kirjanov --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -4090,7 +4090,9 @@ static int mtk_probe(struct platform_dev +@@ -4320,7 +4320,9 @@ static int mtk_probe(struct platform_dev u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400; eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, @@ -64,7 +64,7 @@ v2: fix wrong variable name in return value check spotted by Denis Kirjanov if (!eth->ppe[i]) { err = -ENOMEM; goto err_free_dev; -@@ -4213,6 +4215,7 @@ static const struct mtk_soc_data mt7622_ +@@ -4445,6 +4447,7 @@ static const struct mtk_soc_data mt7622_ .required_pctl = false, .offload_version = 2, .hash_offset = 2, @@ -72,7 +72,7 @@ v2: fix wrong variable name in return value check spotted by Denis Kirjanov .foe_entry_size = sizeof(struct mtk_foe_entry) - 16, .txrx = { .txd_size = sizeof(struct mtk_tx_dma), -@@ -4250,6 +4253,7 @@ static const struct mtk_soc_data mt7629_ +@@ -4482,6 +4485,7 @@ static const struct mtk_soc_data mt7629_ .hw_features = MTK_HW_FEATURES, .required_clks = MT7629_CLKS_BITMAP, .required_pctl = false, @@ -80,7 +80,7 @@ v2: fix wrong variable name in return value check spotted by Denis Kirjanov .txrx = { .txd_size = sizeof(struct mtk_tx_dma), .rxd_size = sizeof(struct mtk_rx_dma), -@@ -4270,6 +4274,7 @@ static const struct mtk_soc_data mt7986_ +@@ -4502,6 +4506,7 @@ static const struct mtk_soc_data mt7986_ .offload_version = 2, .hash_offset = 4, .foe_entry_size = sizeof(struct mtk_foe_entry), @@ -90,7 +90,7 @@ v2: fix wrong variable name in return value check spotted by Denis Kirjanov .rxd_size = sizeof(struct mtk_rx_dma_v2), --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -983,6 +983,7 @@ struct mtk_soc_data { +@@ -1007,6 +1007,7 @@ struct mtk_soc_data { u8 hash_offset; u16 foe_entry_size; netdev_features_t hw_features; @@ -171,8 +171,8 @@ v2: fix wrong variable name in return value check spotted by Denis Kirjanov dma_wmb(); mtk_ppe_cache_clear(ppe); -@@ -716,14 +766,42 @@ int mtk_foe_entry_idle_time(struct mtk_p - return __mtk_foe_entry_idle_time(ppe, entry->data.ib1); +@@ -743,14 +793,42 @@ int mtk_ppe_prepare_reset(struct mtk_ppe + return mtk_ppe_wait_busy(ppe); } +struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index, @@ -215,7 +215,7 @@ v2: fix wrong variable name in return value check spotted by Denis Kirjanov ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL); if (!ppe) -@@ -738,6 +816,7 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_ +@@ -765,6 +843,7 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_ ppe->eth = eth; ppe->dev = dev; ppe->version = version; @@ -223,7 +223,7 @@ v2: fix wrong variable name in return value check spotted by Denis Kirjanov foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * soc->foe_entry_size, -@@ -753,6 +832,25 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_ +@@ -780,6 +859,25 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_ if (!ppe->foe_flow) return NULL; @@ -249,7 +249,7 @@ v2: fix wrong variable name in return value check spotted by Denis Kirjanov mtk_ppe_debugfs_init(ppe, index); return ppe; -@@ -867,6 +965,16 @@ void mtk_ppe_start(struct mtk_ppe *ppe) +@@ -894,6 +992,16 @@ void mtk_ppe_start(struct mtk_ppe *ppe) ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777); ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f); } @@ -319,8 +319,8 @@ v2: fix wrong variable name in return value check spotted by Denis Kirjanov + int version, int index, bool accounting); void mtk_ppe_start(struct mtk_ppe *ppe); int mtk_ppe_stop(struct mtk_ppe *ppe); - -@@ -354,5 +373,7 @@ int mtk_foe_entry_commit(struct mtk_ppe + int mtk_ppe_prepare_reset(struct mtk_ppe *ppe); +@@ -355,5 +374,7 @@ int mtk_foe_entry_commit(struct mtk_ppe void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index); @@ -386,7 +386,7 @@ v2: fix wrong variable name in return value check spotted by Denis Kirjanov --- a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h +++ b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h -@@ -143,6 +143,20 @@ enum { +@@ -149,6 +149,20 @@ enum { #define MTK_PPE_MIB_TB_BASE 0x338 diff --git a/target/linux/generic/pending-5.15/732-02-net-ethernet-mtk_eth_soc-increase-tx-ring-side-for-Q.patch b/target/linux/generic/pending-5.15/732-02-net-ethernet-mtk_eth_soc-increase-tx-ring-side-for-Q.patch index c6526a39a8..85c29e77de 100644 --- a/target/linux/generic/pending-5.15/732-02-net-ethernet-mtk_eth_soc-increase-tx-ring-side-for-Q.patch +++ b/target/linux/generic/pending-5.15/732-02-net-ethernet-mtk_eth_soc-increase-tx-ring-side-for-Q.patch @@ -12,7 +12,7 @@ Signed-off-by: Felix Fietkau --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -894,7 +894,7 @@ static int mtk_init_fq_dma(struct mtk_et +@@ -900,7 +900,7 @@ static int mtk_init_fq_dma(struct mtk_et { const struct mtk_soc_data *soc = eth->soc; dma_addr_t phy_ring_tail; @@ -21,7 +21,7 @@ Signed-off-by: Felix Fietkau dma_addr_t dma_addr; int i; -@@ -2148,19 +2148,25 @@ static int mtk_tx_alloc(struct mtk_eth * +@@ -2154,19 +2154,25 @@ static int mtk_tx_alloc(struct mtk_eth * struct mtk_tx_ring *ring = ð->tx_ring; int i, sz = soc->txrx.txd_size; struct mtk_tx_dma_v2 *txd; @@ -51,7 +51,7 @@ Signed-off-by: Felix Fietkau u32 next_ptr = ring->phys + next * sz; txd = ring->dma + i * sz; -@@ -2180,22 +2186,22 @@ static int mtk_tx_alloc(struct mtk_eth * +@@ -2186,22 +2192,22 @@ static int mtk_tx_alloc(struct mtk_eth * * descriptors in ring->dma_pdma. */ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { @@ -79,7 +79,7 @@ Signed-off-by: Felix Fietkau ring->thresh = MAX_SKB_FRAGS; /* make sure that all changes to the dma ring are flushed before we -@@ -2207,14 +2213,14 @@ static int mtk_tx_alloc(struct mtk_eth * +@@ -2213,14 +2219,14 @@ static int mtk_tx_alloc(struct mtk_eth * mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr); mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr); mtk_w32(eth, @@ -96,7 +96,7 @@ Signed-off-by: Felix Fietkau mtk_w32(eth, 0, MT7628_TX_CTX_IDX0); mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx); } -@@ -2232,7 +2238,7 @@ static void mtk_tx_clean(struct mtk_eth +@@ -2238,7 +2244,7 @@ static void mtk_tx_clean(struct mtk_eth int i; if (ring->buf) { @@ -105,7 +105,7 @@ Signed-off-by: Felix Fietkau mtk_tx_unmap(eth, &ring->buf[i], false); kfree(ring->buf); ring->buf = NULL; -@@ -2240,14 +2246,14 @@ static void mtk_tx_clean(struct mtk_eth +@@ -2246,14 +2252,14 @@ static void mtk_tx_clean(struct mtk_eth if (ring->dma) { dma_free_coherent(eth->dma_dev, @@ -122,7 +122,7 @@ Signed-off-by: Felix Fietkau ring->dma_pdma, ring->phys_pdma); ring->dma_pdma = NULL; } -@@ -2767,7 +2773,7 @@ static void mtk_dma_free(struct mtk_eth +@@ -2773,7 +2779,7 @@ static void mtk_dma_free(struct mtk_eth netdev_reset_queue(eth->netdev[i]); if (eth->scratch_ring) { dma_free_coherent(eth->dma_dev, diff --git a/target/linux/generic/pending-5.15/732-03-net-ethernet-mtk_eth_soc-avoid-port_mg-assignment-on.patch b/target/linux/generic/pending-5.15/732-03-net-ethernet-mtk_eth_soc-avoid-port_mg-assignment-on.patch index 2e9594c872..8c425bc8c4 100644 --- a/target/linux/generic/pending-5.15/732-03-net-ethernet-mtk_eth_soc-avoid-port_mg-assignment-on.patch +++ b/target/linux/generic/pending-5.15/732-03-net-ethernet-mtk_eth_soc-avoid-port_mg-assignment-on.patch @@ -12,7 +12,7 @@ Signed-off-by: Felix Fietkau --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -4199,7 +4199,7 @@ static const struct mtk_soc_data mt7621_ +@@ -4431,7 +4431,7 @@ static const struct mtk_soc_data mt7621_ .hw_features = MTK_HW_FEATURES, .required_clks = MT7621_CLKS_BITMAP, .required_pctl = false, @@ -21,7 +21,7 @@ Signed-off-by: Felix Fietkau .hash_offset = 2, .foe_entry_size = sizeof(struct mtk_foe_entry) - 16, .txrx = { -@@ -4239,7 +4239,7 @@ static const struct mtk_soc_data mt7623_ +@@ -4471,7 +4471,7 @@ static const struct mtk_soc_data mt7623_ .hw_features = MTK_HW_FEATURES, .required_clks = MT7623_CLKS_BITMAP, .required_pctl = true, diff --git a/target/linux/generic/pending-5.15/732-04-net-ethernet-mtk_eth_soc-implement-multi-queue-suppo.patch b/target/linux/generic/pending-5.15/732-04-net-ethernet-mtk_eth_soc-implement-multi-queue-suppo.patch index 0e817cd272..8a801f77e8 100644 --- a/target/linux/generic/pending-5.15/732-04-net-ethernet-mtk_eth_soc-implement-multi-queue-suppo.patch +++ b/target/linux/generic/pending-5.15/732-04-net-ethernet-mtk_eth_soc-implement-multi-queue-suppo.patch @@ -22,7 +22,7 @@ Signed-off-by: Felix Fietkau --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -54,6 +54,7 @@ static const struct mtk_reg_map mtk_reg_ +@@ -55,6 +55,7 @@ static const struct mtk_reg_map mtk_reg_ }, .qdma = { .qtx_cfg = 0x1800, @@ -30,7 +30,7 @@ Signed-off-by: Felix Fietkau .rx_ptr = 0x1900, .rx_cnt_cfg = 0x1904, .qcrx_ptr = 0x1908, -@@ -61,6 +62,7 @@ static const struct mtk_reg_map mtk_reg_ +@@ -62,6 +63,7 @@ static const struct mtk_reg_map mtk_reg_ .rst_idx = 0x1a08, .delay_irq = 0x1a0c, .fc_th = 0x1a10, @@ -38,7 +38,7 @@ Signed-off-by: Felix Fietkau .int_grp = 0x1a20, .hred = 0x1a44, .ctx_ptr = 0x1b00, -@@ -113,6 +115,7 @@ static const struct mtk_reg_map mt7986_r +@@ -117,6 +119,7 @@ static const struct mtk_reg_map mt7986_r }, .qdma = { .qtx_cfg = 0x4400, @@ -46,7 +46,7 @@ Signed-off-by: Felix Fietkau .rx_ptr = 0x4500, .rx_cnt_cfg = 0x4504, .qcrx_ptr = 0x4508, -@@ -130,6 +133,7 @@ static const struct mtk_reg_map mt7986_r +@@ -134,6 +137,7 @@ static const struct mtk_reg_map mt7986_r .fq_tail = 0x4724, .fq_count = 0x4728, .fq_blen = 0x472c, @@ -54,7 +54,7 @@ Signed-off-by: Felix Fietkau }, .gdm1_cnt = 0x1c00, .gdma_to_ppe0 = 0x3333, -@@ -570,6 +574,75 @@ static void mtk_mac_link_down(struct phy +@@ -576,6 +580,75 @@ static void mtk_mac_link_down(struct phy mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); } @@ -130,7 +130,7 @@ Signed-off-by: Felix Fietkau static void mtk_mac_link_up(struct phylink_config *config, struct phy_device *phy, unsigned int mode, phy_interface_t interface, -@@ -595,6 +668,8 @@ static void mtk_mac_link_up(struct phyli +@@ -601,6 +674,8 @@ static void mtk_mac_link_up(struct phyli break; } @@ -139,7 +139,7 @@ Signed-off-by: Felix Fietkau /* Configure duplex */ if (duplex == DUPLEX_FULL) mcr |= MAC_MCR_FORCE_DPX; -@@ -1053,7 +1128,8 @@ static void mtk_tx_set_dma_desc_v1(struc +@@ -1059,7 +1134,8 @@ static void mtk_tx_set_dma_desc_v1(struc WRITE_ONCE(desc->txd1, info->addr); @@ -149,7 +149,7 @@ Signed-off-by: Felix Fietkau if (info->last) data |= TX_DMA_LS0; WRITE_ONCE(desc->txd3, data); -@@ -1087,9 +1163,6 @@ static void mtk_tx_set_dma_desc_v2(struc +@@ -1093,9 +1169,6 @@ static void mtk_tx_set_dma_desc_v2(struc data |= TX_DMA_LS0; WRITE_ONCE(desc->txd3, data); @@ -159,7 +159,7 @@ Signed-off-by: Felix Fietkau data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */ data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid); WRITE_ONCE(desc->txd4, data); -@@ -1133,11 +1206,12 @@ static int mtk_tx_map(struct sk_buff *sk +@@ -1139,11 +1212,12 @@ static int mtk_tx_map(struct sk_buff *sk .gso = gso, .csum = skb->ip_summed == CHECKSUM_PARTIAL, .vlan = skb_vlan_tag_present(skb), @@ -173,7 +173,7 @@ Signed-off-by: Felix Fietkau struct mtk_mac *mac = netdev_priv(dev); struct mtk_eth *eth = mac->hw; const struct mtk_soc_data *soc = eth->soc; -@@ -1145,8 +1219,10 @@ static int mtk_tx_map(struct sk_buff *sk +@@ -1151,8 +1225,10 @@ static int mtk_tx_map(struct sk_buff *sk struct mtk_tx_dma *itxd_pdma, *txd_pdma; struct mtk_tx_buf *itx_buf, *tx_buf; int i, n_desc = 1; @@ -184,7 +184,7 @@ Signed-off-by: Felix Fietkau itxd = ring->next_free; itxd_pdma = qdma_to_pdma(ring, itxd); if (itxd == ring->last_free) -@@ -1195,7 +1271,7 @@ static int mtk_tx_map(struct sk_buff *sk +@@ -1201,7 +1277,7 @@ static int mtk_tx_map(struct sk_buff *sk memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info)); txd_info.size = min_t(unsigned int, frag_size, soc->txrx.dma_max_len); @@ -193,7 +193,7 @@ Signed-off-by: Felix Fietkau txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 && !(frag_size - txd_info.size); txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag, -@@ -1234,7 +1310,7 @@ static int mtk_tx_map(struct sk_buff *sk +@@ -1240,7 +1316,7 @@ static int mtk_tx_map(struct sk_buff *sk txd_pdma->txd2 |= TX_DMA_LS1; } @@ -202,7 +202,7 @@ Signed-off-by: Felix Fietkau skb_tx_timestamp(skb); ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2); -@@ -1246,8 +1322,7 @@ static int mtk_tx_map(struct sk_buff *sk +@@ -1252,8 +1328,7 @@ static int mtk_tx_map(struct sk_buff *sk wmb(); if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { @@ -212,7 +212,7 @@ Signed-off-by: Felix Fietkau mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr); } else { int next_idx; -@@ -1316,7 +1391,7 @@ static void mtk_wake_queue(struct mtk_et +@@ -1322,7 +1397,7 @@ static void mtk_wake_queue(struct mtk_et for (i = 0; i < MTK_MAC_COUNT; i++) { if (!eth->netdev[i]) continue; @@ -221,7 +221,7 @@ Signed-off-by: Felix Fietkau } } -@@ -1340,7 +1415,7 @@ static netdev_tx_t mtk_start_xmit(struct +@@ -1346,7 +1421,7 @@ static netdev_tx_t mtk_start_xmit(struct tx_num = mtk_cal_txd_req(eth, skb); if (unlikely(atomic_read(&ring->free_count) <= tx_num)) { @@ -230,7 +230,7 @@ Signed-off-by: Felix Fietkau netif_err(eth, tx_queued, dev, "Tx Ring full when queue awake!\n"); spin_unlock(ð->page_lock); -@@ -1366,7 +1441,7 @@ static netdev_tx_t mtk_start_xmit(struct +@@ -1372,7 +1447,7 @@ static netdev_tx_t mtk_start_xmit(struct goto drop; if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) @@ -239,7 +239,7 @@ Signed-off-by: Felix Fietkau spin_unlock(ð->page_lock); -@@ -1533,10 +1608,12 @@ static int mtk_xdp_submit_frame(struct m +@@ -1539,10 +1614,12 @@ static int mtk_xdp_submit_frame(struct m struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); const struct mtk_soc_data *soc = eth->soc; struct mtk_tx_ring *ring = ð->tx_ring; @@ -252,7 +252,7 @@ Signed-off-by: Felix Fietkau }; int err, index = 0, n_desc = 1, nr_frags; struct mtk_tx_dma *htxd, *txd, *txd_pdma; -@@ -1587,6 +1664,7 @@ static int mtk_xdp_submit_frame(struct m +@@ -1593,6 +1670,7 @@ static int mtk_xdp_submit_frame(struct m memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info)); txd_info.size = skb_frag_size(&sinfo->frags[index]); txd_info.last = index + 1 == nr_frags; @@ -260,7 +260,7 @@ Signed-off-by: Felix Fietkau data = skb_frag_address(&sinfo->frags[index]); index++; -@@ -1938,8 +2016,46 @@ rx_done: +@@ -1944,8 +2022,46 @@ rx_done: return done; } @@ -308,7 +308,7 @@ Signed-off-by: Felix Fietkau { const struct mtk_reg_map *reg_map = eth->soc->reg_map; struct mtk_tx_ring *ring = ð->tx_ring; -@@ -1969,12 +2085,9 @@ static int mtk_poll_tx_qdma(struct mtk_e +@@ -1975,12 +2091,9 @@ static int mtk_poll_tx_qdma(struct mtk_e break; if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) { @@ -323,7 +323,7 @@ Signed-off-by: Felix Fietkau budget--; } mtk_tx_unmap(eth, tx_buf, true); -@@ -1992,7 +2105,7 @@ static int mtk_poll_tx_qdma(struct mtk_e +@@ -1998,7 +2111,7 @@ static int mtk_poll_tx_qdma(struct mtk_e } static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget, @@ -332,7 +332,7 @@ Signed-off-by: Felix Fietkau { struct mtk_tx_ring *ring = ð->tx_ring; struct mtk_tx_buf *tx_buf; -@@ -2008,12 +2121,8 @@ static int mtk_poll_tx_pdma(struct mtk_e +@@ -2014,12 +2127,8 @@ static int mtk_poll_tx_pdma(struct mtk_e break; if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) { @@ -347,7 +347,7 @@ Signed-off-by: Felix Fietkau budget--; } mtk_tx_unmap(eth, tx_buf, true); -@@ -2034,26 +2143,15 @@ static int mtk_poll_tx(struct mtk_eth *e +@@ -2040,26 +2149,15 @@ static int mtk_poll_tx(struct mtk_eth *e { struct mtk_tx_ring *ring = ð->tx_ring; struct dim_sample dim_sample = {}; @@ -379,7 +379,7 @@ Signed-off-by: Felix Fietkau dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes, &dim_sample); -@@ -2063,7 +2161,7 @@ static int mtk_poll_tx(struct mtk_eth *e +@@ -2069,7 +2167,7 @@ static int mtk_poll_tx(struct mtk_eth *e (atomic_read(&ring->free_count) > ring->thresh)) mtk_wake_queue(eth); @@ -388,7 +388,7 @@ Signed-off-by: Felix Fietkau } static void mtk_handle_status_irq(struct mtk_eth *eth) -@@ -2149,6 +2247,7 @@ static int mtk_tx_alloc(struct mtk_eth * +@@ -2155,6 +2253,7 @@ static int mtk_tx_alloc(struct mtk_eth * int i, sz = soc->txrx.txd_size; struct mtk_tx_dma_v2 *txd; int ring_size; @@ -396,7 +396,7 @@ Signed-off-by: Felix Fietkau if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) ring_size = MTK_QDMA_RING_SIZE; -@@ -2216,8 +2315,25 @@ static int mtk_tx_alloc(struct mtk_eth * +@@ -2222,8 +2321,25 @@ static int mtk_tx_alloc(struct mtk_eth * ring->phys + ((ring_size - 1) * sz), soc->reg_map->qdma.crx_ptr); mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr); @@ -424,7 +424,7 @@ Signed-off-by: Felix Fietkau } else { mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0); mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0); -@@ -2882,7 +2998,7 @@ static int mtk_start_dma(struct mtk_eth +@@ -2903,7 +3019,7 @@ static int mtk_start_dma(struct mtk_eth if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) val |= MTK_MUTLI_CNT | MTK_RESV_BUF | MTK_WCOMP_EN | MTK_DMAD_WR_WDONE | @@ -433,7 +433,7 @@ Signed-off-by: Felix Fietkau else val |= MTK_RX_BT_32DWORDS; mtk_w32(eth, val, reg_map->qdma.glo_cfg); -@@ -2928,6 +3044,45 @@ static void mtk_gdm_config(struct mtk_et +@@ -2949,6 +3065,45 @@ static void mtk_gdm_config(struct mtk_et mtk_w32(eth, 0, MTK_RST_GL); } @@ -479,7 +479,7 @@ Signed-off-by: Felix Fietkau static int mtk_open(struct net_device *dev) { struct mtk_mac *mac = netdev_priv(dev); -@@ -2972,7 +3127,8 @@ static int mtk_open(struct net_device *d +@@ -2993,7 +3148,8 @@ static int mtk_open(struct net_device *d refcount_inc(ð->dma_refcnt); phylink_start(mac->phylink); @@ -489,7 +489,7 @@ Signed-off-by: Felix Fietkau return 0; } -@@ -3488,8 +3644,12 @@ static int mtk_unreg_dev(struct mtk_eth +@@ -3716,8 +3872,12 @@ static int mtk_unreg_dev(struct mtk_eth int i; for (i = 0; i < MTK_MAC_COUNT; i++) { @@ -502,7 +502,7 @@ Signed-off-by: Felix Fietkau unregister_netdev(eth->netdev[i]); } -@@ -3705,6 +3865,23 @@ static int mtk_set_rxnfc(struct net_devi +@@ -3934,6 +4094,23 @@ static int mtk_set_rxnfc(struct net_devi return ret; } @@ -526,7 +526,7 @@ Signed-off-by: Felix Fietkau static const struct ethtool_ops mtk_ethtool_ops = { .get_link_ksettings = mtk_get_link_ksettings, .set_link_ksettings = mtk_set_link_ksettings, -@@ -3740,6 +3917,7 @@ static const struct net_device_ops mtk_n +@@ -3969,6 +4146,7 @@ static const struct net_device_ops mtk_n .ndo_setup_tc = mtk_eth_setup_tc, .ndo_bpf = mtk_xdp, .ndo_xdp_xmit = mtk_xdp_xmit, @@ -534,7 +534,7 @@ Signed-off-by: Felix Fietkau }; static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) -@@ -3749,6 +3927,7 @@ static int mtk_add_mac(struct mtk_eth *e +@@ -3978,6 +4156,7 @@ static int mtk_add_mac(struct mtk_eth *e struct phylink *phylink; struct mtk_mac *mac; int id, err; @@ -542,7 +542,7 @@ Signed-off-by: Felix Fietkau if (!_id) { dev_err(eth->dev, "missing mac id\n"); -@@ -3766,7 +3945,10 @@ static int mtk_add_mac(struct mtk_eth *e +@@ -3995,7 +4174,10 @@ static int mtk_add_mac(struct mtk_eth *e return -EINVAL; } @@ -554,7 +554,7 @@ Signed-off-by: Felix Fietkau if (!eth->netdev[id]) { dev_err(eth->dev, "alloc_etherdev failed\n"); return -ENOMEM; -@@ -3863,6 +4045,11 @@ static int mtk_add_mac(struct mtk_eth *e +@@ -4092,6 +4274,11 @@ static int mtk_add_mac(struct mtk_eth *e else eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN; @@ -576,7 +576,7 @@ Signed-off-by: Felix Fietkau #define MTK_QDMA_PAGE_SIZE 2048 #define MTK_MAX_RX_LENGTH 1536 #define MTK_MAX_RX_LENGTH_2K 2048 -@@ -203,8 +204,26 @@ +@@ -215,8 +216,26 @@ #define MTK_RING_MAX_AGG_CNT_H ((MTK_HW_LRO_MAX_AGG_CNT >> 6) & 0x3) /* QDMA TX Queue Configuration Registers */ @@ -603,7 +603,7 @@ Signed-off-by: Felix Fietkau /* QDMA Global Configuration Register */ #define MTK_RX_2B_OFFSET BIT(31) #define MTK_RX_BT_32DWORDS (3 << 11) -@@ -223,6 +242,7 @@ +@@ -235,6 +254,7 @@ #define MTK_WCOMP_EN BIT(24) #define MTK_RESV_BUF (0x40 << 16) #define MTK_MUTLI_CNT (0x4 << 12) @@ -611,7 +611,7 @@ Signed-off-by: Felix Fietkau /* QDMA Flow Control Register */ #define FC_THRES_DROP_MODE BIT(20) -@@ -251,8 +271,6 @@ +@@ -265,8 +285,6 @@ #define MTK_STAT_OFFSET 0x40 /* QDMA TX NUM */ @@ -620,7 +620,7 @@ Signed-off-by: Felix Fietkau #define QID_BITS_V2(x) (((x) & 0x3f) << 16) #define MTK_QDMA_GMAC2_QID 8 -@@ -282,6 +300,7 @@ +@@ -296,6 +314,7 @@ #define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset) #define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len) #define TX_DMA_SWC BIT(14) @@ -628,7 +628,7 @@ Signed-off-by: Felix Fietkau /* PDMA on MT7628 */ #define TX_DMA_DONE BIT(31) -@@ -930,6 +949,7 @@ struct mtk_reg_map { +@@ -952,6 +971,7 @@ struct mtk_reg_map { } pdma; struct { u32 qtx_cfg; /* tx queue configuration */ @@ -636,7 +636,7 @@ Signed-off-by: Felix Fietkau u32 rx_ptr; /* rx base pointer */ u32 rx_cnt_cfg; /* rx max count configuration */ u32 qcrx_ptr; /* rx cpu pointer */ -@@ -947,6 +967,7 @@ struct mtk_reg_map { +@@ -969,6 +989,7 @@ struct mtk_reg_map { u32 fq_tail; /* fq tail pointer */ u32 fq_count; /* fq free page count */ u32 fq_blen; /* fq free page buffer length */ @@ -644,7 +644,7 @@ Signed-off-by: Felix Fietkau } qdma; u32 gdm1_cnt; u32 gdma_to_ppe0; -@@ -1139,6 +1160,7 @@ struct mtk_mac { +@@ -1173,6 +1194,7 @@ struct mtk_mac { __be32 hwlro_ip[MTK_MAX_LRO_IP_CNT]; int hwlro_ip_cnt; unsigned int syscfg0; diff --git a/target/linux/generic/pending-5.15/732-06-net-ethernet-mediatek-ppe-assign-per-port-queues-for.patch b/target/linux/generic/pending-5.15/732-06-net-ethernet-mediatek-ppe-assign-per-port-queues-for.patch index 05161c3479..8a7cb9f91e 100644 --- a/target/linux/generic/pending-5.15/732-06-net-ethernet-mediatek-ppe-assign-per-port-queues-for.patch +++ b/target/linux/generic/pending-5.15/732-06-net-ethernet-mediatek-ppe-assign-per-port-queues-for.patch @@ -47,7 +47,7 @@ Signed-off-by: Felix Fietkau #define MTK_FOE_IB2_DEST_PORT_V2 GENMASK(12, 9) #define MTK_FOE_IB2_MULTICAST_V2 BIT(13) #define MTK_FOE_IB2_WDMA_WINFO_V2 BIT(19) -@@ -369,6 +371,8 @@ int mtk_foe_entry_set_pppoe(struct mtk_e +@@ -370,6 +372,8 @@ int mtk_foe_entry_set_pppoe(struct mtk_e int sid); int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry, int wdma_idx, int txq, int bss, int wcid); diff --git a/target/linux/generic/pending-5.15/732-07-net-ethernet-mtk_eth_soc-compile-out-netsys-v2-code-.patch b/target/linux/generic/pending-5.15/732-07-net-ethernet-mtk_eth_soc-compile-out-netsys-v2-code-.patch index 8c711ba802..d7e7492aa2 100644 --- a/target/linux/generic/pending-5.15/732-07-net-ethernet-mtk_eth_soc-compile-out-netsys-v2-code-.patch +++ b/target/linux/generic/pending-5.15/732-07-net-ethernet-mtk_eth_soc-compile-out-netsys-v2-code-.patch @@ -11,7 +11,7 @@ Signed-off-by: Felix Fietkau --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -895,7 +895,13 @@ enum mkt_eth_capabilities { +@@ -916,7 +916,13 @@ enum mkt_eth_capabilities { #define MTK_MUX_GMAC12_TO_GEPHY_SGMII \ (MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII | MTK_MUX) diff --git a/target/linux/generic/pending-5.15/732-09-net-ethernet-mtk_eth_soc-fix-VLAN-rx-hardware-accele.patch b/target/linux/generic/pending-5.15/732-09-net-ethernet-mtk_eth_soc-fix-VLAN-rx-hardware-accele.patch index 933112c17a..a130358101 100644 --- a/target/linux/generic/pending-5.15/732-09-net-ethernet-mtk_eth_soc-fix-VLAN-rx-hardware-accele.patch +++ b/target/linux/generic/pending-5.15/732-09-net-ethernet-mtk_eth_soc-fix-VLAN-rx-hardware-accele.patch @@ -22,7 +22,7 @@ Signed-off-by: Felix Fietkau #include "mtk_eth_soc.h" #include "mtk_wed.h" -@@ -1967,16 +1968,22 @@ static int mtk_poll_rx(struct napi_struc +@@ -1973,16 +1974,22 @@ static int mtk_poll_rx(struct napi_struc htons(RX_DMA_VPID(trxd.rxd4)), RX_DMA_VID(trxd.rxd4)); } else if (trxd.rxd2 & RX_DMA_VTAG) { @@ -52,7 +52,7 @@ Signed-off-by: Felix Fietkau } skb_record_rx_queue(skb, 0); -@@ -2793,15 +2800,30 @@ static netdev_features_t mtk_fix_feature +@@ -2799,15 +2806,30 @@ static netdev_features_t mtk_fix_feature static int mtk_set_features(struct net_device *dev, netdev_features_t features) { @@ -88,7 +88,7 @@ Signed-off-by: Felix Fietkau } /* wait for DMA to finish whatever it is doing before we start using it again */ -@@ -3083,11 +3105,45 @@ found: +@@ -3104,11 +3126,45 @@ found: return NOTIFY_DONE; } @@ -135,7 +135,7 @@ Signed-off-by: Felix Fietkau err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0); if (err) { -@@ -3419,6 +3475,10 @@ static int mtk_hw_init(struct mtk_eth *e +@@ -3631,6 +3687,10 @@ static int mtk_hw_init(struct mtk_eth *e */ val = mtk_r32(eth, MTK_CDMQ_IG_CTRL); mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL); @@ -146,7 +146,7 @@ Signed-off-by: Felix Fietkau /* Enable RX VLan Offloading */ mtk_w32(eth, 1, MTK_CDMP_EG_CTRL); -@@ -3636,6 +3696,12 @@ static int mtk_free_dev(struct mtk_eth * +@@ -3864,6 +3924,12 @@ static int mtk_free_dev(struct mtk_eth * free_netdev(eth->netdev[i]); } @@ -171,7 +171,7 @@ Signed-off-by: Felix Fietkau #define MTK_QDMA_NUM_QUEUES 16 #define MTK_QDMA_PAGE_SIZE 2048 #define MTK_MAX_RX_LENGTH 1536 -@@ -93,6 +96,9 @@ +@@ -105,6 +108,9 @@ #define MTK_CDMQ_IG_CTRL 0x1400 #define MTK_CDMQ_STAG_EN BIT(0) @@ -181,7 +181,7 @@ Signed-off-by: Felix Fietkau /* CDMP Ingress Control Register */ #define MTK_CDMP_IG_CTRL 0x400 #define MTK_CDMP_STAG_EN BIT(0) -@@ -1140,6 +1146,8 @@ struct mtk_eth { +@@ -1166,6 +1172,8 @@ struct mtk_eth { int ip_align; diff --git a/target/linux/generic/pending-5.15/732-10-net-ethernet-mtk_eth_soc-work-around-issue-with-send.patch b/target/linux/generic/pending-5.15/732-10-net-ethernet-mtk_eth_soc-work-around-issue-with-send.patch index f19128d809..777e6f0ed1 100644 --- a/target/linux/generic/pending-5.15/732-10-net-ethernet-mtk_eth_soc-work-around-issue-with-send.patch +++ b/target/linux/generic/pending-5.15/732-10-net-ethernet-mtk_eth_soc-work-around-issue-with-send.patch @@ -16,7 +16,7 @@ Signed-off-by: Felix Fietkau --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -1396,12 +1396,28 @@ static void mtk_wake_queue(struct mtk_et +@@ -1402,12 +1402,28 @@ static void mtk_wake_queue(struct mtk_et } } @@ -45,7 +45,7 @@ Signed-off-by: Felix Fietkau bool gso = false; int tx_num; -@@ -1423,6 +1439,18 @@ static netdev_tx_t mtk_start_xmit(struct +@@ -1429,6 +1445,18 @@ static netdev_tx_t mtk_start_xmit(struct return NETDEV_TX_BUSY; } @@ -64,7 +64,7 @@ Signed-off-by: Felix Fietkau /* TSO: fill MSS info in tcp checksum field */ if (skb_is_gso(skb)) { if (skb_cow_head(skb, 0)) { -@@ -1438,8 +1466,14 @@ static netdev_tx_t mtk_start_xmit(struct +@@ -1444,8 +1472,14 @@ static netdev_tx_t mtk_start_xmit(struct } } @@ -83,7 +83,7 @@ Signed-off-by: Felix Fietkau netif_tx_stop_all_queues(dev); --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -246,7 +246,7 @@ +@@ -258,7 +258,7 @@ #define MTK_CHK_DDONE_EN BIT(28) #define MTK_DMAD_WR_WDONE BIT(26) #define MTK_WCOMP_EN BIT(24) diff --git a/target/linux/generic/pending-5.15/732-12-net-ethernet-mtk_eth_soc-drop-packets-to-WDMA-if-the.patch b/target/linux/generic/pending-5.15/732-12-net-ethernet-mtk_eth_soc-drop-packets-to-WDMA-if-the.patch index 416dfb6cf7..6c81d880f9 100644 --- a/target/linux/generic/pending-5.15/732-12-net-ethernet-mtk_eth_soc-drop-packets-to-WDMA-if-the.patch +++ b/target/linux/generic/pending-5.15/732-12-net-ethernet-mtk_eth_soc-drop-packets-to-WDMA-if-the.patch @@ -11,7 +11,7 @@ Signed-off-by: Felix Fietkau --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -3533,9 +3533,12 @@ static int mtk_hw_init(struct mtk_eth *e +@@ -3745,9 +3745,12 @@ static int mtk_hw_init(struct mtk_eth *e mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP); if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { @@ -27,7 +27,7 @@ Signed-off-by: Felix Fietkau --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -127,6 +127,7 @@ +@@ -139,6 +139,7 @@ #define PSE_FQFC_CFG1 0x100 #define PSE_FQFC_CFG2 0x104 #define PSE_DROP_CFG 0x108 diff --git a/target/linux/generic/pending-5.15/732-14-net-ethernet-mtk_eth_soc-drop-generic-vlan-rx-offloa.patch b/target/linux/generic/pending-5.15/732-14-net-ethernet-mtk_eth_soc-drop-generic-vlan-rx-offloa.patch index 0d575b84a0..128501f38c 100644 --- a/target/linux/generic/pending-5.15/732-14-net-ethernet-mtk_eth_soc-drop-generic-vlan-rx-offloa.patch +++ b/target/linux/generic/pending-5.15/732-14-net-ethernet-mtk_eth_soc-drop-generic-vlan-rx-offloa.patch @@ -17,7 +17,7 @@ Signed-off-by: Felix Fietkau --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -1995,29 +1995,16 @@ static int mtk_poll_rx(struct napi_struc +@@ -2001,29 +2001,16 @@ static int mtk_poll_rx(struct napi_struc if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) mtk_ppe_check_skb(eth->ppe[0], skb, hash); @@ -50,7 +50,7 @@ Signed-off-by: Felix Fietkau } skb_record_rx_queue(skb, 0); -@@ -2834,29 +2821,11 @@ static netdev_features_t mtk_fix_feature +@@ -2840,29 +2827,11 @@ static netdev_features_t mtk_fix_feature static int mtk_set_features(struct net_device *dev, netdev_features_t features) { @@ -80,7 +80,7 @@ Signed-off-by: Felix Fietkau return 0; } -@@ -3155,30 +3124,6 @@ static int mtk_open(struct net_device *d +@@ -3176,30 +3145,6 @@ static int mtk_open(struct net_device *d struct mtk_eth *eth = mac->hw; int i, err; @@ -111,7 +111,7 @@ Signed-off-by: Felix Fietkau err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0); if (err) { netdev_err(dev, "%s: could not attach PHY: %d\n", __func__, -@@ -3219,6 +3164,35 @@ static int mtk_open(struct net_device *d +@@ -3240,6 +3185,35 @@ static int mtk_open(struct net_device *d phylink_start(mac->phylink); netif_tx_start_all_queues(dev); @@ -147,7 +147,7 @@ Signed-off-by: Felix Fietkau return 0; } -@@ -3512,10 +3486,9 @@ static int mtk_hw_init(struct mtk_eth *e +@@ -3724,10 +3698,9 @@ static int mtk_hw_init(struct mtk_eth *e if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { val = mtk_r32(eth, MTK_CDMP_IG_CTRL); mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL); @@ -160,7 +160,7 @@ Signed-off-by: Felix Fietkau /* set interrupt delays based on current Net DIM sample */ mtk_dim_rx(ð->rx_dim.work); -@@ -4136,7 +4109,7 @@ static int mtk_add_mac(struct mtk_eth *e +@@ -4365,7 +4338,7 @@ static int mtk_add_mac(struct mtk_eth *e eth->netdev[id]->hw_features |= NETIF_F_LRO; eth->netdev[id]->vlan_features = eth->soc->hw_features & diff --git a/target/linux/generic/pending-5.15/733-01-net-ethernet-mtk_wed-introduce-wed-mcu-support.patch b/target/linux/generic/pending-5.15/733-01-net-ethernet-mtk_wed-introduce-wed-mcu-support.patch deleted file mode 100644 index c48613929d..0000000000 --- a/target/linux/generic/pending-5.15/733-01-net-ethernet-mtk_wed-introduce-wed-mcu-support.patch +++ /dev/null @@ -1,591 +0,0 @@ -From: Sujuan Chen -Date: Sat, 5 Nov 2022 23:36:18 +0100 -Subject: [PATCH] net: ethernet: mtk_wed: introduce wed mcu support - -Introduce WED mcu support used to configure WED WO chip. -This is a preliminary patch in order to add RX Wireless -Ethernet Dispatch available on MT7986 SoC. - -Tested-by: Daniel Golle -Co-developed-by: Lorenzo Bianconi -Signed-off-by: Lorenzo Bianconi -Signed-off-by: Sujuan Chen -Signed-off-by: David S. Miller ---- - create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_mcu.c - create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_wo.h - ---- a/drivers/net/ethernet/mediatek/Makefile -+++ b/drivers/net/ethernet/mediatek/Makefile -@@ -5,7 +5,7 @@ - - obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o - mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o --mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o -+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o mtk_wed_mcu.o - ifdef CONFIG_DEBUG_FS - mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o - endif ---- /dev/null -+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c -@@ -0,0 +1,359 @@ -+// SPDX-License-Identifier: GPL-2.0-only -+/* Copyright (C) 2022 MediaTek Inc. -+ * -+ * Author: Lorenzo Bianconi -+ * Sujuan Chen -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include "mtk_wed_regs.h" -+#include "mtk_wed_wo.h" -+#include "mtk_wed.h" -+ -+static u32 wo_r32(struct mtk_wed_wo *wo, u32 reg) -+{ -+ return readl(wo->boot.addr + reg); -+} -+ -+static void wo_w32(struct mtk_wed_wo *wo, u32 reg, u32 val) -+{ -+ writel(val, wo->boot.addr + reg); -+} -+ -+static struct sk_buff * -+mtk_wed_mcu_msg_alloc(const void *data, int data_len) -+{ -+ int length = sizeof(struct mtk_wed_mcu_hdr) + data_len; -+ struct sk_buff *skb; -+ -+ skb = alloc_skb(length, GFP_KERNEL); -+ if (!skb) -+ return NULL; -+ -+ memset(skb->head, 0, length); -+ skb_reserve(skb, sizeof(struct mtk_wed_mcu_hdr)); -+ if (data && data_len) -+ skb_put_data(skb, data, data_len); -+ -+ return skb; -+} -+ -+static struct sk_buff * -+mtk_wed_mcu_get_response(struct mtk_wed_wo *wo, unsigned long expires) -+{ -+ if (!time_is_after_jiffies(expires)) -+ return NULL; -+ -+ wait_event_timeout(wo->mcu.wait, !skb_queue_empty(&wo->mcu.res_q), -+ expires - jiffies); -+ return skb_dequeue(&wo->mcu.res_q); -+} -+ -+void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo, struct sk_buff *skb) -+{ -+ skb_queue_tail(&wo->mcu.res_q, skb); -+ wake_up(&wo->mcu.wait); -+} -+ -+void mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo, -+ struct sk_buff *skb) -+{ -+ struct mtk_wed_mcu_hdr *hdr = (struct mtk_wed_mcu_hdr *)skb->data; -+ -+ switch (hdr->cmd) { -+ case MTK_WED_WO_EVT_LOG_DUMP: { -+ const char *msg = (const char *)(skb->data + sizeof(*hdr)); -+ -+ dev_notice(wo->hw->dev, "%s\n", msg); -+ break; -+ } -+ case MTK_WED_WO_EVT_PROFILING: { -+ struct mtk_wed_wo_log_info *info; -+ u32 count = (skb->len - sizeof(*hdr)) / sizeof(*info); -+ int i; -+ -+ info = (struct mtk_wed_wo_log_info *)(skb->data + sizeof(*hdr)); -+ for (i = 0 ; i < count ; i++) -+ dev_notice(wo->hw->dev, -+ "SN:%u latency: total=%u, rro:%u, mod:%u\n", -+ le32_to_cpu(info[i].sn), -+ le32_to_cpu(info[i].total), -+ le32_to_cpu(info[i].rro), -+ le32_to_cpu(info[i].mod)); -+ break; -+ } -+ case MTK_WED_WO_EVT_RXCNT_INFO: -+ break; -+ default: -+ break; -+ } -+ -+ dev_kfree_skb(skb); -+} -+ -+static int -+mtk_wed_mcu_skb_send_msg(struct mtk_wed_wo *wo, struct sk_buff *skb, -+ int id, int cmd, u16 *wait_seq, bool wait_resp) -+{ -+ struct mtk_wed_mcu_hdr *hdr; -+ -+ /* TODO: make it dynamic based on cmd */ -+ wo->mcu.timeout = 20 * HZ; -+ -+ hdr = (struct mtk_wed_mcu_hdr *)skb_push(skb, sizeof(*hdr)); -+ hdr->cmd = cmd; -+ hdr->length = cpu_to_le16(skb->len); -+ -+ if (wait_resp && wait_seq) { -+ u16 seq = ++wo->mcu.seq; -+ -+ if (!seq) -+ seq = ++wo->mcu.seq; -+ *wait_seq = seq; -+ -+ hdr->flag |= cpu_to_le16(MTK_WED_WARP_CMD_FLAG_NEED_RSP); -+ hdr->seq = cpu_to_le16(seq); -+ } -+ if (id == MTK_WED_MODULE_ID_WO) -+ hdr->flag |= cpu_to_le16(MTK_WED_WARP_CMD_FLAG_FROM_TO_WO); -+ -+ dev_kfree_skb(skb); -+ return 0; -+} -+ -+static int -+mtk_wed_mcu_parse_response(struct mtk_wed_wo *wo, struct sk_buff *skb, -+ int cmd, int seq) -+{ -+ struct mtk_wed_mcu_hdr *hdr; -+ -+ if (!skb) { -+ dev_err(wo->hw->dev, "Message %08x (seq %d) timeout\n", -+ cmd, seq); -+ return -ETIMEDOUT; -+ } -+ -+ hdr = (struct mtk_wed_mcu_hdr *)skb->data; -+ if (le16_to_cpu(hdr->seq) != seq) -+ return -EAGAIN; -+ -+ skb_pull(skb, sizeof(*hdr)); -+ switch (cmd) { -+ case MTK_WED_WO_CMD_RXCNT_INFO: -+ default: -+ break; -+ } -+ -+ return 0; -+} -+ -+int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo, int id, int cmd, -+ const void *data, int len, bool wait_resp) -+{ -+ unsigned long expires; -+ struct sk_buff *skb; -+ u16 seq; -+ int ret; -+ -+ skb = mtk_wed_mcu_msg_alloc(data, len); -+ if (!skb) -+ return -ENOMEM; -+ -+ mutex_lock(&wo->mcu.mutex); -+ -+ ret = mtk_wed_mcu_skb_send_msg(wo, skb, id, cmd, &seq, wait_resp); -+ if (ret || !wait_resp) -+ goto unlock; -+ -+ expires = jiffies + wo->mcu.timeout; -+ do { -+ skb = mtk_wed_mcu_get_response(wo, expires); -+ ret = mtk_wed_mcu_parse_response(wo, skb, cmd, seq); -+ dev_kfree_skb(skb); -+ } while (ret == -EAGAIN); -+ -+unlock: -+ mutex_unlock(&wo->mcu.mutex); -+ -+ return ret; -+} -+ -+static int -+mtk_wed_get_memory_region(struct mtk_wed_wo *wo, -+ struct mtk_wed_wo_memory_region *region) -+{ -+ struct reserved_mem *rmem; -+ struct device_node *np; -+ int index; -+ -+ index = of_property_match_string(wo->hw->node, "memory-region-names", -+ region->name); -+ if (index < 0) -+ return index; -+ -+ np = of_parse_phandle(wo->hw->node, "memory-region", index); -+ if (!np) -+ return -ENODEV; -+ -+ rmem = of_reserved_mem_lookup(np); -+ of_node_put(np); -+ -+ if (!rmem) -+ return -ENODEV; -+ -+ region->phy_addr = rmem->base; -+ region->size = rmem->size; -+ region->addr = devm_ioremap(wo->hw->dev, region->phy_addr, region->size); -+ -+ return !region->addr ? -EINVAL : 0; -+} -+ -+static int -+mtk_wed_mcu_run_firmware(struct mtk_wed_wo *wo, const struct firmware *fw, -+ struct mtk_wed_wo_memory_region *region) -+{ -+ const u8 *first_region_ptr, *region_ptr, *trailer_ptr, *ptr = fw->data; -+ const struct mtk_wed_fw_trailer *trailer; -+ const struct mtk_wed_fw_region *fw_region; -+ -+ trailer_ptr = fw->data + fw->size - sizeof(*trailer); -+ trailer = (const struct mtk_wed_fw_trailer *)trailer_ptr; -+ region_ptr = trailer_ptr - trailer->num_region * sizeof(*fw_region); -+ first_region_ptr = region_ptr; -+ -+ while (region_ptr < trailer_ptr) { -+ u32 length; -+ -+ fw_region = (const struct mtk_wed_fw_region *)region_ptr; -+ length = le32_to_cpu(fw_region->len); -+ -+ if (region->phy_addr != le32_to_cpu(fw_region->addr)) -+ goto next; -+ -+ if (region->size < length) -+ goto next; -+ -+ if (first_region_ptr < ptr + length) -+ goto next; -+ -+ if (region->shared && region->consumed) -+ return 0; -+ -+ if (!region->shared || !region->consumed) { -+ memcpy_toio(region->addr, ptr, length); -+ region->consumed = true; -+ return 0; -+ } -+next: -+ region_ptr += sizeof(*fw_region); -+ ptr += length; -+ } -+ -+ return -EINVAL; -+} -+ -+static int -+mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo) -+{ -+ static struct mtk_wed_wo_memory_region mem_region[] = { -+ [MTK_WED_WO_REGION_EMI] = { -+ .name = "wo-emi", -+ }, -+ [MTK_WED_WO_REGION_ILM] = { -+ .name = "wo-ilm", -+ }, -+ [MTK_WED_WO_REGION_DATA] = { -+ .name = "wo-data", -+ .shared = true, -+ }, -+ }; -+ const struct mtk_wed_fw_trailer *trailer; -+ const struct firmware *fw; -+ const char *fw_name; -+ u32 val, boot_cr; -+ int ret, i; -+ -+ /* load firmware region metadata */ -+ for (i = 0; i < ARRAY_SIZE(mem_region); i++) { -+ ret = mtk_wed_get_memory_region(wo, &mem_region[i]); -+ if (ret) -+ return ret; -+ } -+ -+ wo->boot.name = "wo-boot"; -+ ret = mtk_wed_get_memory_region(wo, &wo->boot); -+ if (ret) -+ return ret; -+ -+ /* set dummy cr */ -+ wed_w32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_FWDL, -+ wo->hw->index + 1); -+ -+ /* load firmware */ -+ fw_name = wo->hw->index ? MT7986_FIRMWARE_WO1 : MT7986_FIRMWARE_WO0; -+ ret = request_firmware(&fw, fw_name, wo->hw->dev); -+ if (ret) -+ return ret; -+ -+ trailer = (void *)(fw->data + fw->size - -+ sizeof(struct mtk_wed_fw_trailer)); -+ dev_info(wo->hw->dev, -+ "MTK WED WO Firmware Version: %.10s, Build Time: %.15s\n", -+ trailer->fw_ver, trailer->build_date); -+ dev_info(wo->hw->dev, "MTK WED WO Chip ID %02x Region %d\n", -+ trailer->chip_id, trailer->num_region); -+ -+ for (i = 0; i < ARRAY_SIZE(mem_region); i++) { -+ ret = mtk_wed_mcu_run_firmware(wo, fw, &mem_region[i]); -+ if (ret) -+ goto out; -+ } -+ -+ /* set the start address */ -+ boot_cr = wo->hw->index ? MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR -+ : MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR; -+ wo_w32(wo, boot_cr, mem_region[MTK_WED_WO_REGION_EMI].phy_addr >> 16); -+ /* wo firmware reset */ -+ wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCCR_CLR_ADDR, 0xc00); -+ -+ val = wo_r32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR); -+ val |= wo->hw->index ? MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK -+ : MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK; -+ wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val); -+out: -+ release_firmware(fw); -+ -+ return ret; -+} -+ -+static u32 -+mtk_wed_mcu_read_fw_dl(struct mtk_wed_wo *wo) -+{ -+ return wed_r32(wo->hw->wed_dev, -+ MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_FWDL); -+} -+ -+int mtk_wed_mcu_init(struct mtk_wed_wo *wo) -+{ -+ u32 val; -+ int ret; -+ -+ skb_queue_head_init(&wo->mcu.res_q); -+ init_waitqueue_head(&wo->mcu.wait); -+ mutex_init(&wo->mcu.mutex); -+ -+ ret = mtk_wed_mcu_load_firmware(wo); -+ if (ret) -+ return ret; -+ -+ return readx_poll_timeout(mtk_wed_mcu_read_fw_dl, wo, val, !val, -+ 100, MTK_FW_DL_TIMEOUT); -+} -+ -+MODULE_FIRMWARE(MT7986_FIRMWARE_WO0); -+MODULE_FIRMWARE(MT7986_FIRMWARE_WO1); ---- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h -+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h -@@ -152,6 +152,7 @@ struct mtk_wdma_desc { - - #define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10) - -+#define MTK_WED_SCR0 0x3c0 - #define MTK_WED_WPDMA_INT_TRIGGER 0x504 - #define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1) - #define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4) ---- /dev/null -+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h -@@ -0,0 +1,150 @@ -+/* SPDX-License-Identifier: GPL-2.0-only */ -+/* Copyright (C) 2022 Lorenzo Bianconi */ -+ -+#ifndef __MTK_WED_WO_H -+#define __MTK_WED_WO_H -+ -+#include -+#include -+ -+struct mtk_wed_hw; -+ -+struct mtk_wed_mcu_hdr { -+ /* DW0 */ -+ u8 version; -+ u8 cmd; -+ __le16 length; -+ -+ /* DW1 */ -+ __le16 seq; -+ __le16 flag; -+ -+ /* DW2 */ -+ __le32 status; -+ -+ /* DW3 */ -+ u8 rsv[20]; -+}; -+ -+struct mtk_wed_wo_log_info { -+ __le32 sn; -+ __le32 total; -+ __le32 rro; -+ __le32 mod; -+}; -+ -+enum mtk_wed_wo_event { -+ MTK_WED_WO_EVT_LOG_DUMP = 0x1, -+ MTK_WED_WO_EVT_PROFILING = 0x2, -+ MTK_WED_WO_EVT_RXCNT_INFO = 0x3, -+}; -+ -+#define MTK_WED_MODULE_ID_WO 1 -+#define MTK_FW_DL_TIMEOUT 4000000 /* us */ -+#define MTK_WOCPU_TIMEOUT 2000000 /* us */ -+ -+enum { -+ MTK_WED_WARP_CMD_FLAG_RSP = BIT(0), -+ MTK_WED_WARP_CMD_FLAG_NEED_RSP = BIT(1), -+ MTK_WED_WARP_CMD_FLAG_FROM_TO_WO = BIT(2), -+}; -+ -+enum { -+ MTK_WED_WO_REGION_EMI, -+ MTK_WED_WO_REGION_ILM, -+ MTK_WED_WO_REGION_DATA, -+ MTK_WED_WO_REGION_BOOT, -+ __MTK_WED_WO_REGION_MAX, -+}; -+ -+enum mtk_wed_dummy_cr_idx { -+ MTK_WED_DUMMY_CR_FWDL, -+ MTK_WED_DUMMY_CR_WO_STATUS, -+}; -+ -+#define MT7986_FIRMWARE_WO0 "mediatek/mt7986_wo_0.bin" -+#define MT7986_FIRMWARE_WO1 "mediatek/mt7986_wo_1.bin" -+ -+#define MTK_WO_MCU_CFG_LS_BASE 0 -+#define MTK_WO_MCU_CFG_LS_HW_VER_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x000) -+#define MTK_WO_MCU_CFG_LS_FW_VER_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x004) -+#define MTK_WO_MCU_CFG_LS_CFG_DBG1_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x00c) -+#define MTK_WO_MCU_CFG_LS_CFG_DBG2_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x010) -+#define MTK_WO_MCU_CFG_LS_WF_MCCR_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x014) -+#define MTK_WO_MCU_CFG_LS_WF_MCCR_SET_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x018) -+#define MTK_WO_MCU_CFG_LS_WF_MCCR_CLR_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x01c) -+#define MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x050) -+#define MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x060) -+#define MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x064) -+ -+#define MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK BIT(5) -+#define MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK BIT(0) -+ -+struct mtk_wed_wo_memory_region { -+ const char *name; -+ void __iomem *addr; -+ phys_addr_t phy_addr; -+ u32 size; -+ bool shared:1; -+ bool consumed:1; -+}; -+ -+struct mtk_wed_fw_region { -+ __le32 decomp_crc; -+ __le32 decomp_len; -+ __le32 decomp_blk_sz; -+ u8 rsv0[4]; -+ __le32 addr; -+ __le32 len; -+ u8 feature_set; -+ u8 rsv1[15]; -+} __packed; -+ -+struct mtk_wed_fw_trailer { -+ u8 chip_id; -+ u8 eco_code; -+ u8 num_region; -+ u8 format_ver; -+ u8 format_flag; -+ u8 rsv[2]; -+ char fw_ver[10]; -+ char build_date[15]; -+ u32 crc; -+}; -+ -+struct mtk_wed_wo { -+ struct mtk_wed_hw *hw; -+ struct mtk_wed_wo_memory_region boot; -+ -+ struct { -+ struct mutex mutex; -+ int timeout; -+ u16 seq; -+ -+ struct sk_buff_head res_q; -+ wait_queue_head_t wait; -+ } mcu; -+}; -+ -+static inline int -+mtk_wed_mcu_check_msg(struct mtk_wed_wo *wo, struct sk_buff *skb) -+{ -+ struct mtk_wed_mcu_hdr *hdr = (struct mtk_wed_mcu_hdr *)skb->data; -+ -+ if (hdr->version) -+ return -EINVAL; -+ -+ if (skb->len < sizeof(*hdr) || skb->len != le16_to_cpu(hdr->length)) -+ return -EINVAL; -+ -+ return 0; -+} -+ -+void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo, struct sk_buff *skb); -+void mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo, -+ struct sk_buff *skb); -+int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo, int id, int cmd, -+ const void *data, int len, bool wait_resp); -+int mtk_wed_mcu_init(struct mtk_wed_wo *wo); -+ -+#endif /* __MTK_WED_WO_H */ ---- a/include/linux/soc/mediatek/mtk_wed.h -+++ b/include/linux/soc/mediatek/mtk_wed.h -@@ -11,6 +11,35 @@ - struct mtk_wed_hw; - struct mtk_wdma_desc; - -+enum mtk_wed_wo_cmd { -+ MTK_WED_WO_CMD_WED_CFG, -+ MTK_WED_WO_CMD_WED_RX_STAT, -+ MTK_WED_WO_CMD_RRO_SER, -+ MTK_WED_WO_CMD_DBG_INFO, -+ MTK_WED_WO_CMD_DEV_INFO, -+ MTK_WED_WO_CMD_BSS_INFO, -+ MTK_WED_WO_CMD_STA_REC, -+ MTK_WED_WO_CMD_DEV_INFO_DUMP, -+ MTK_WED_WO_CMD_BSS_INFO_DUMP, -+ MTK_WED_WO_CMD_STA_REC_DUMP, -+ MTK_WED_WO_CMD_BA_INFO_DUMP, -+ MTK_WED_WO_CMD_FBCMD_Q_DUMP, -+ MTK_WED_WO_CMD_FW_LOG_CTRL, -+ MTK_WED_WO_CMD_LOG_FLUSH, -+ MTK_WED_WO_CMD_CHANGE_STATE, -+ MTK_WED_WO_CMD_CPU_STATS_ENABLE, -+ MTK_WED_WO_CMD_CPU_STATS_DUMP, -+ MTK_WED_WO_CMD_EXCEPTION_INIT, -+ MTK_WED_WO_CMD_PROF_CTRL, -+ MTK_WED_WO_CMD_STA_BA_DUMP, -+ MTK_WED_WO_CMD_BA_CTRL_DUMP, -+ MTK_WED_WO_CMD_RXCNT_CTRL, -+ MTK_WED_WO_CMD_RXCNT_INFO, -+ MTK_WED_WO_CMD_SET_CAP, -+ MTK_WED_WO_CMD_CCIF_RING_DUMP, -+ MTK_WED_WO_CMD_WED_END -+}; -+ - enum mtk_wed_bus_tye { - MTK_WED_BUS_PCIE, - MTK_WED_BUS_AXI, diff --git a/target/linux/generic/pending-5.15/733-02-net-ethernet-mtk_wed-introduce-wed-wo-support.patch b/target/linux/generic/pending-5.15/733-02-net-ethernet-mtk_wed-introduce-wed-wo-support.patch deleted file mode 100644 index dbd7e30fbb..0000000000 --- a/target/linux/generic/pending-5.15/733-02-net-ethernet-mtk_wed-introduce-wed-wo-support.patch +++ /dev/null @@ -1,737 +0,0 @@ -From: Lorenzo Bianconi -Date: Sat, 5 Nov 2022 23:36:19 +0100 -Subject: [PATCH] net: ethernet: mtk_wed: introduce wed wo support - -Introduce WO chip support to mtk wed driver. MTK WED WO is used to -implement RX Wireless Ethernet Dispatch and offload traffic received by -wlan nic to the wired interface. - -Tested-by: Daniel Golle -Co-developed-by: Sujuan Chen -Signed-off-by: Sujuan Chen -Signed-off-by: Lorenzo Bianconi -Signed-off-by: David S. Miller ---- - create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_wo.c - ---- a/drivers/net/ethernet/mediatek/Makefile -+++ b/drivers/net/ethernet/mediatek/Makefile -@@ -5,7 +5,7 @@ - - obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o - mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o --mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o mtk_wed_mcu.o -+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o mtk_wed_mcu.o mtk_wed_wo.o - ifdef CONFIG_DEBUG_FS - mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o - endif ---- a/drivers/net/ethernet/mediatek/mtk_wed.c -+++ b/drivers/net/ethernet/mediatek/mtk_wed.c -@@ -16,6 +16,7 @@ - #include "mtk_wed_regs.h" - #include "mtk_wed.h" - #include "mtk_ppe.h" -+#include "mtk_wed_wo.h" - - #define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000) - -@@ -355,6 +356,8 @@ mtk_wed_detach(struct mtk_wed_device *de - - mtk_wed_free_buffer(dev); - mtk_wed_free_tx_rings(dev); -+ if (hw->version != 1) -+ mtk_wed_wo_deinit(hw); - - if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) { - struct device_node *wlan_node; -@@ -885,9 +888,11 @@ mtk_wed_attach(struct mtk_wed_device *de - } - - mtk_wed_hw_init_early(dev); -- if (hw->hifsys) -+ if (hw->version == 1) - regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, - BIT(hw->index), 0); -+ else -+ ret = mtk_wed_wo_init(hw); - - out: - mutex_unlock(&hw_lock); ---- a/drivers/net/ethernet/mediatek/mtk_wed.h -+++ b/drivers/net/ethernet/mediatek/mtk_wed.h -@@ -10,6 +10,7 @@ - #include - - struct mtk_eth; -+struct mtk_wed_wo; - - struct mtk_wed_hw { - struct device_node *node; -@@ -22,6 +23,7 @@ struct mtk_wed_hw { - struct regmap *mirror; - struct dentry *debugfs_dir; - struct mtk_wed_device *wed_dev; -+ struct mtk_wed_wo *wed_wo; - u32 debugfs_reg; - u32 num_flows; - u8 version; ---- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c -+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c -@@ -122,8 +122,7 @@ mtk_wed_mcu_skb_send_msg(struct mtk_wed_ - if (id == MTK_WED_MODULE_ID_WO) - hdr->flag |= cpu_to_le16(MTK_WED_WARP_CMD_FLAG_FROM_TO_WO); - -- dev_kfree_skb(skb); -- return 0; -+ return mtk_wed_wo_queue_tx_skb(wo, &wo->q_tx, skb); - } - - static int ---- /dev/null -+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c -@@ -0,0 +1,508 @@ -+// SPDX-License-Identifier: GPL-2.0-only -+/* Copyright (C) 2022 MediaTek Inc. -+ * -+ * Author: Lorenzo Bianconi -+ * Sujuan Chen -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "mtk_wed.h" -+#include "mtk_wed_regs.h" -+#include "mtk_wed_wo.h" -+ -+static u32 -+mtk_wed_mmio_r32(struct mtk_wed_wo *wo, u32 reg) -+{ -+ u32 val; -+ -+ if (regmap_read(wo->mmio.regs, reg, &val)) -+ val = ~0; -+ -+ return val; -+} -+ -+static void -+mtk_wed_mmio_w32(struct mtk_wed_wo *wo, u32 reg, u32 val) -+{ -+ regmap_write(wo->mmio.regs, reg, val); -+} -+ -+static u32 -+mtk_wed_wo_get_isr(struct mtk_wed_wo *wo) -+{ -+ u32 val = mtk_wed_mmio_r32(wo, MTK_WED_WO_CCIF_RCHNUM); -+ -+ return val & MTK_WED_WO_CCIF_RCHNUM_MASK; -+} -+ -+static void -+mtk_wed_wo_set_isr(struct mtk_wed_wo *wo, u32 mask) -+{ -+ mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, mask); -+} -+ -+static void -+mtk_wed_wo_set_ack(struct mtk_wed_wo *wo, u32 mask) -+{ -+ mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_ACK, mask); -+} -+ -+static void -+mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, u32 mask, u32 val, bool set) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave(&wo->mmio.lock, flags); -+ wo->mmio.irq_mask &= ~mask; -+ wo->mmio.irq_mask |= val; -+ if (set) -+ mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask); -+ spin_unlock_irqrestore(&wo->mmio.lock, flags); -+} -+ -+static void -+mtk_wed_wo_irq_enable(struct mtk_wed_wo *wo, u32 mask) -+{ -+ mtk_wed_wo_set_isr_mask(wo, 0, mask, false); -+ tasklet_schedule(&wo->mmio.irq_tasklet); -+} -+ -+static void -+mtk_wed_wo_irq_disable(struct mtk_wed_wo *wo, u32 mask) -+{ -+ mtk_wed_wo_set_isr_mask(wo, mask, 0, true); -+} -+ -+static void -+mtk_wed_wo_kickout(struct mtk_wed_wo *wo) -+{ -+ mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_BUSY, 1 << MTK_WED_WO_TXCH_NUM); -+ mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM); -+} -+ -+static void -+mtk_wed_wo_queue_kick(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, -+ u32 val) -+{ -+ wmb(); -+ mtk_wed_mmio_w32(wo, q->regs.cpu_idx, val); -+} -+ -+static void * -+mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len, -+ bool flush) -+{ -+ int buf_len = SKB_WITH_OVERHEAD(q->buf_size); -+ int index = (q->tail + 1) % q->n_desc; -+ struct mtk_wed_wo_queue_entry *entry; -+ struct mtk_wed_wo_queue_desc *desc; -+ void *buf; -+ -+ if (!q->queued) -+ return NULL; -+ -+ if (flush) -+ q->desc[index].ctrl |= cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE); -+ else if (!(q->desc[index].ctrl & cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE))) -+ return NULL; -+ -+ q->tail = index; -+ q->queued--; -+ -+ desc = &q->desc[index]; -+ entry = &q->entry[index]; -+ buf = entry->buf; -+ if (len) -+ *len = FIELD_GET(MTK_WED_WO_CTL_SD_LEN0, -+ le32_to_cpu(READ_ONCE(desc->ctrl))); -+ if (buf) -+ dma_unmap_single(wo->hw->dev, entry->addr, buf_len, -+ DMA_FROM_DEVICE); -+ entry->buf = NULL; -+ -+ return buf; -+} -+ -+static int -+mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, -+ gfp_t gfp, bool rx) -+{ -+ enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE; -+ int n_buf = 0; -+ -+ spin_lock_bh(&q->lock); -+ while (q->queued < q->n_desc) { -+ void *buf = page_frag_alloc(&q->cache, q->buf_size, gfp); -+ struct mtk_wed_wo_queue_entry *entry; -+ dma_addr_t addr; -+ -+ if (!buf) -+ break; -+ -+ addr = dma_map_single(wo->hw->dev, buf, q->buf_size, dir); -+ if (unlikely(dma_mapping_error(wo->hw->dev, addr))) { -+ skb_free_frag(buf); -+ break; -+ } -+ -+ q->head = (q->head + 1) % q->n_desc; -+ entry = &q->entry[q->head]; -+ entry->addr = addr; -+ entry->len = q->buf_size; -+ q->entry[q->head].buf = buf; -+ -+ if (rx) { -+ struct mtk_wed_wo_queue_desc *desc = &q->desc[q->head]; -+ u32 ctrl = MTK_WED_WO_CTL_LAST_SEC0 | -+ FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0, -+ entry->len); -+ -+ WRITE_ONCE(desc->buf0, cpu_to_le32(addr)); -+ WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); -+ } -+ q->queued++; -+ n_buf++; -+ } -+ spin_unlock_bh(&q->lock); -+ -+ return n_buf; -+} -+ -+static void -+mtk_wed_wo_rx_complete(struct mtk_wed_wo *wo) -+{ -+ mtk_wed_wo_set_ack(wo, MTK_WED_WO_RXCH_INT_MASK); -+ mtk_wed_wo_irq_enable(wo, MTK_WED_WO_RXCH_INT_MASK); -+} -+ -+static void -+mtk_wed_wo_rx_run_queue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) -+{ -+ for (;;) { -+ struct mtk_wed_mcu_hdr *hdr; -+ struct sk_buff *skb; -+ void *data; -+ u32 len; -+ -+ data = mtk_wed_wo_dequeue(wo, q, &len, false); -+ if (!data) -+ break; -+ -+ skb = build_skb(data, q->buf_size); -+ if (!skb) { -+ skb_free_frag(data); -+ continue; -+ } -+ -+ __skb_put(skb, len); -+ if (mtk_wed_mcu_check_msg(wo, skb)) { -+ dev_kfree_skb(skb); -+ continue; -+ } -+ -+ hdr = (struct mtk_wed_mcu_hdr *)skb->data; -+ if (hdr->flag & cpu_to_le16(MTK_WED_WARP_CMD_FLAG_RSP)) -+ mtk_wed_mcu_rx_event(wo, skb); -+ else -+ mtk_wed_mcu_rx_unsolicited_event(wo, skb); -+ } -+ -+ if (mtk_wed_wo_queue_refill(wo, q, GFP_ATOMIC, true)) { -+ u32 index = (q->head - 1) % q->n_desc; -+ -+ mtk_wed_wo_queue_kick(wo, q, index); -+ } -+} -+ -+static irqreturn_t -+mtk_wed_wo_irq_handler(int irq, void *data) -+{ -+ struct mtk_wed_wo *wo = data; -+ -+ mtk_wed_wo_set_isr(wo, 0); -+ tasklet_schedule(&wo->mmio.irq_tasklet); -+ -+ return IRQ_HANDLED; -+} -+ -+static void mtk_wed_wo_irq_tasklet(struct tasklet_struct *t) -+{ -+ struct mtk_wed_wo *wo = from_tasklet(wo, t, mmio.irq_tasklet); -+ u32 intr, mask; -+ -+ /* disable interrupts */ -+ mtk_wed_wo_set_isr(wo, 0); -+ -+ intr = mtk_wed_wo_get_isr(wo); -+ intr &= wo->mmio.irq_mask; -+ mask = intr & (MTK_WED_WO_RXCH_INT_MASK | MTK_WED_WO_EXCEPTION_INT_MASK); -+ mtk_wed_wo_irq_disable(wo, mask); -+ -+ if (intr & MTK_WED_WO_RXCH_INT_MASK) { -+ mtk_wed_wo_rx_run_queue(wo, &wo->q_rx); -+ mtk_wed_wo_rx_complete(wo); -+ } -+} -+ -+/* mtk wed wo hw queues */ -+ -+static int -+mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, -+ int n_desc, int buf_size, int index, -+ struct mtk_wed_wo_queue_regs *regs) -+{ -+ spin_lock_init(&q->lock); -+ q->regs = *regs; -+ q->n_desc = n_desc; -+ q->buf_size = buf_size; -+ -+ q->desc = dmam_alloc_coherent(wo->hw->dev, n_desc * sizeof(*q->desc), -+ &q->desc_dma, GFP_KERNEL); -+ if (!q->desc) -+ return -ENOMEM; -+ -+ q->entry = devm_kzalloc(wo->hw->dev, n_desc * sizeof(*q->entry), -+ GFP_KERNEL); -+ if (!q->entry) -+ return -ENOMEM; -+ -+ return 0; -+} -+ -+static void -+mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) -+{ -+ mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0); -+ dma_free_coherent(wo->hw->dev, q->n_desc * sizeof(*q->desc), q->desc, -+ q->desc_dma); -+} -+ -+static void -+mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) -+{ -+ struct page *page; -+ int i; -+ -+ spin_lock_bh(&q->lock); -+ for (i = 0; i < q->n_desc; i++) { -+ struct mtk_wed_wo_queue_entry *entry = &q->entry[i]; -+ -+ dma_unmap_single(wo->hw->dev, entry->addr, entry->len, -+ DMA_TO_DEVICE); -+ skb_free_frag(entry->buf); -+ entry->buf = NULL; -+ } -+ spin_unlock_bh(&q->lock); -+ -+ if (!q->cache.va) -+ return; -+ -+ page = virt_to_page(q->cache.va); -+ __page_frag_cache_drain(page, q->cache.pagecnt_bias); -+ memset(&q->cache, 0, sizeof(q->cache)); -+} -+ -+static void -+mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) -+{ -+ struct page *page; -+ -+ spin_lock_bh(&q->lock); -+ for (;;) { -+ void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true); -+ -+ if (!buf) -+ break; -+ -+ skb_free_frag(buf); -+ } -+ spin_unlock_bh(&q->lock); -+ -+ if (!q->cache.va) -+ return; -+ -+ page = virt_to_page(q->cache.va); -+ __page_frag_cache_drain(page, q->cache.pagecnt_bias); -+ memset(&q->cache, 0, sizeof(q->cache)); -+} -+ -+static void -+mtk_wed_wo_queue_reset(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) -+{ -+ mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0); -+ mtk_wed_mmio_w32(wo, q->regs.desc_base, q->desc_dma); -+ mtk_wed_mmio_w32(wo, q->regs.ring_size, q->n_desc); -+} -+ -+int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, -+ struct sk_buff *skb) -+{ -+ struct mtk_wed_wo_queue_entry *entry; -+ struct mtk_wed_wo_queue_desc *desc; -+ int ret = 0, index; -+ u32 ctrl; -+ -+ spin_lock_bh(&q->lock); -+ -+ q->tail = mtk_wed_mmio_r32(wo, q->regs.dma_idx); -+ index = (q->head + 1) % q->n_desc; -+ if (q->tail == index) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ entry = &q->entry[index]; -+ if (skb->len > entry->len) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ -+ desc = &q->desc[index]; -+ q->head = index; -+ -+ dma_sync_single_for_cpu(wo->hw->dev, entry->addr, skb->len, -+ DMA_TO_DEVICE); -+ memcpy(entry->buf, skb->data, skb->len); -+ dma_sync_single_for_device(wo->hw->dev, entry->addr, skb->len, -+ DMA_TO_DEVICE); -+ -+ ctrl = FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0, skb->len) | -+ MTK_WED_WO_CTL_LAST_SEC0 | MTK_WED_WO_CTL_DMA_DONE; -+ WRITE_ONCE(desc->buf0, cpu_to_le32(entry->addr)); -+ WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); -+ -+ mtk_wed_wo_queue_kick(wo, q, q->head); -+ mtk_wed_wo_kickout(wo); -+out: -+ spin_unlock_bh(&q->lock); -+ -+ dev_kfree_skb(skb); -+ -+ return ret; -+} -+ -+static int -+mtk_wed_wo_exception_init(struct mtk_wed_wo *wo) -+{ -+ return 0; -+} -+ -+static int -+mtk_wed_wo_hardware_init(struct mtk_wed_wo *wo) -+{ -+ struct mtk_wed_wo_queue_regs regs; -+ struct device_node *np; -+ int ret; -+ -+ np = of_parse_phandle(wo->hw->node, "mediatek,wo-ccif", 0); -+ if (!np) -+ return -ENODEV; -+ -+ wo->mmio.regs = syscon_regmap_lookup_by_phandle(np, NULL); -+ if (IS_ERR_OR_NULL(wo->mmio.regs)) -+ return PTR_ERR(wo->mmio.regs); -+ -+ wo->mmio.irq = irq_of_parse_and_map(np, 0); -+ wo->mmio.irq_mask = MTK_WED_WO_ALL_INT_MASK; -+ spin_lock_init(&wo->mmio.lock); -+ tasklet_setup(&wo->mmio.irq_tasklet, mtk_wed_wo_irq_tasklet); -+ -+ ret = devm_request_irq(wo->hw->dev, wo->mmio.irq, -+ mtk_wed_wo_irq_handler, IRQF_TRIGGER_HIGH, -+ KBUILD_MODNAME, wo); -+ if (ret) -+ goto error; -+ -+ regs.desc_base = MTK_WED_WO_CCIF_DUMMY1; -+ regs.ring_size = MTK_WED_WO_CCIF_DUMMY2; -+ regs.dma_idx = MTK_WED_WO_CCIF_SHADOW4; -+ regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY3; -+ -+ ret = mtk_wed_wo_queue_alloc(wo, &wo->q_tx, MTK_WED_WO_RING_SIZE, -+ MTK_WED_WO_CMD_LEN, MTK_WED_WO_TXCH_NUM, -+ ®s); -+ if (ret) -+ goto error; -+ -+ mtk_wed_wo_queue_refill(wo, &wo->q_tx, GFP_KERNEL, false); -+ mtk_wed_wo_queue_reset(wo, &wo->q_tx); -+ -+ regs.desc_base = MTK_WED_WO_CCIF_DUMMY5; -+ regs.ring_size = MTK_WED_WO_CCIF_DUMMY6; -+ regs.dma_idx = MTK_WED_WO_CCIF_SHADOW8; -+ regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY7; -+ -+ ret = mtk_wed_wo_queue_alloc(wo, &wo->q_rx, MTK_WED_WO_RING_SIZE, -+ MTK_WED_WO_CMD_LEN, MTK_WED_WO_RXCH_NUM, -+ ®s); -+ if (ret) -+ goto error; -+ -+ mtk_wed_wo_queue_refill(wo, &wo->q_rx, GFP_KERNEL, true); -+ mtk_wed_wo_queue_reset(wo, &wo->q_rx); -+ -+ /* rx queue irqmask */ -+ mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask); -+ -+ return 0; -+ -+error: -+ devm_free_irq(wo->hw->dev, wo->mmio.irq, wo); -+ -+ return ret; -+} -+ -+static void -+mtk_wed_wo_hw_deinit(struct mtk_wed_wo *wo) -+{ -+ /* disable interrupts */ -+ mtk_wed_wo_set_isr(wo, 0); -+ -+ tasklet_disable(&wo->mmio.irq_tasklet); -+ -+ disable_irq(wo->mmio.irq); -+ devm_free_irq(wo->hw->dev, wo->mmio.irq, wo); -+ -+ mtk_wed_wo_queue_tx_clean(wo, &wo->q_tx); -+ mtk_wed_wo_queue_rx_clean(wo, &wo->q_rx); -+ mtk_wed_wo_queue_free(wo, &wo->q_tx); -+ mtk_wed_wo_queue_free(wo, &wo->q_rx); -+} -+ -+int mtk_wed_wo_init(struct mtk_wed_hw *hw) -+{ -+ struct mtk_wed_wo *wo; -+ int ret; -+ -+ wo = devm_kzalloc(hw->dev, sizeof(*wo), GFP_KERNEL); -+ if (!wo) -+ return -ENOMEM; -+ -+ hw->wed_wo = wo; -+ wo->hw = hw; -+ -+ ret = mtk_wed_wo_hardware_init(wo); -+ if (ret) -+ return ret; -+ -+ ret = mtk_wed_mcu_init(wo); -+ if (ret) -+ return ret; -+ -+ return mtk_wed_wo_exception_init(wo); -+} -+ -+void mtk_wed_wo_deinit(struct mtk_wed_hw *hw) -+{ -+ struct mtk_wed_wo *wo = hw->wed_wo; -+ -+ mtk_wed_wo_hw_deinit(wo); -+} ---- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h -+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h -@@ -80,6 +80,54 @@ enum mtk_wed_dummy_cr_idx { - #define MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK BIT(5) - #define MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK BIT(0) - -+#define MTK_WED_WO_RING_SIZE 256 -+#define MTK_WED_WO_CMD_LEN 1504 -+ -+#define MTK_WED_WO_TXCH_NUM 0 -+#define MTK_WED_WO_RXCH_NUM 1 -+#define MTK_WED_WO_RXCH_WO_EXCEPTION 7 -+ -+#define MTK_WED_WO_TXCH_INT_MASK BIT(0) -+#define MTK_WED_WO_RXCH_INT_MASK BIT(1) -+#define MTK_WED_WO_EXCEPTION_INT_MASK BIT(7) -+#define MTK_WED_WO_ALL_INT_MASK (MTK_WED_WO_RXCH_INT_MASK | \ -+ MTK_WED_WO_EXCEPTION_INT_MASK) -+ -+#define MTK_WED_WO_CCIF_BUSY 0x004 -+#define MTK_WED_WO_CCIF_START 0x008 -+#define MTK_WED_WO_CCIF_TCHNUM 0x00c -+#define MTK_WED_WO_CCIF_RCHNUM 0x010 -+#define MTK_WED_WO_CCIF_RCHNUM_MASK GENMASK(7, 0) -+ -+#define MTK_WED_WO_CCIF_ACK 0x014 -+#define MTK_WED_WO_CCIF_IRQ0_MASK 0x018 -+#define MTK_WED_WO_CCIF_IRQ1_MASK 0x01c -+#define MTK_WED_WO_CCIF_DUMMY1 0x020 -+#define MTK_WED_WO_CCIF_DUMMY2 0x024 -+#define MTK_WED_WO_CCIF_DUMMY3 0x028 -+#define MTK_WED_WO_CCIF_DUMMY4 0x02c -+#define MTK_WED_WO_CCIF_SHADOW1 0x030 -+#define MTK_WED_WO_CCIF_SHADOW2 0x034 -+#define MTK_WED_WO_CCIF_SHADOW3 0x038 -+#define MTK_WED_WO_CCIF_SHADOW4 0x03c -+#define MTK_WED_WO_CCIF_DUMMY5 0x050 -+#define MTK_WED_WO_CCIF_DUMMY6 0x054 -+#define MTK_WED_WO_CCIF_DUMMY7 0x058 -+#define MTK_WED_WO_CCIF_DUMMY8 0x05c -+#define MTK_WED_WO_CCIF_SHADOW5 0x060 -+#define MTK_WED_WO_CCIF_SHADOW6 0x064 -+#define MTK_WED_WO_CCIF_SHADOW7 0x068 -+#define MTK_WED_WO_CCIF_SHADOW8 0x06c -+ -+#define MTK_WED_WO_CTL_SD_LEN1 GENMASK(13, 0) -+#define MTK_WED_WO_CTL_LAST_SEC1 BIT(14) -+#define MTK_WED_WO_CTL_BURST BIT(15) -+#define MTK_WED_WO_CTL_SD_LEN0_SHIFT 16 -+#define MTK_WED_WO_CTL_SD_LEN0 GENMASK(29, 16) -+#define MTK_WED_WO_CTL_LAST_SEC0 BIT(30) -+#define MTK_WED_WO_CTL_DMA_DONE BIT(31) -+#define MTK_WED_WO_INFO_WINFO GENMASK(15, 0) -+ - struct mtk_wed_wo_memory_region { - const char *name; - void __iomem *addr; -@@ -112,10 +160,53 @@ struct mtk_wed_fw_trailer { - u32 crc; - }; - -+struct mtk_wed_wo_queue_regs { -+ u32 desc_base; -+ u32 ring_size; -+ u32 cpu_idx; -+ u32 dma_idx; -+}; -+ -+struct mtk_wed_wo_queue_desc { -+ __le32 buf0; -+ __le32 ctrl; -+ __le32 buf1; -+ __le32 info; -+ __le32 reserved[4]; -+} __packed __aligned(32); -+ -+struct mtk_wed_wo_queue_entry { -+ dma_addr_t addr; -+ void *buf; -+ u32 len; -+}; -+ -+struct mtk_wed_wo_queue { -+ struct mtk_wed_wo_queue_regs regs; -+ -+ struct page_frag_cache cache; -+ spinlock_t lock; -+ -+ struct mtk_wed_wo_queue_desc *desc; -+ dma_addr_t desc_dma; -+ -+ struct mtk_wed_wo_queue_entry *entry; -+ -+ u16 head; -+ u16 tail; -+ int n_desc; -+ int queued; -+ int buf_size; -+ -+}; -+ - struct mtk_wed_wo { - struct mtk_wed_hw *hw; - struct mtk_wed_wo_memory_region boot; - -+ struct mtk_wed_wo_queue q_tx; -+ struct mtk_wed_wo_queue q_rx; -+ - struct { - struct mutex mutex; - int timeout; -@@ -124,6 +215,15 @@ struct mtk_wed_wo { - struct sk_buff_head res_q; - wait_queue_head_t wait; - } mcu; -+ -+ struct { -+ struct regmap *regs; -+ -+ spinlock_t lock; -+ struct tasklet_struct irq_tasklet; -+ int irq; -+ u32 irq_mask; -+ } mmio; - }; - - static inline int -@@ -146,5 +246,9 @@ void mtk_wed_mcu_rx_unsolicited_event(st - int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo, int id, int cmd, - const void *data, int len, bool wait_resp); - int mtk_wed_mcu_init(struct mtk_wed_wo *wo); -+int mtk_wed_wo_init(struct mtk_wed_hw *hw); -+void mtk_wed_wo_deinit(struct mtk_wed_hw *hw); -+int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *dev, struct mtk_wed_wo_queue *q, -+ struct sk_buff *skb); - - #endif /* __MTK_WED_WO_H */ diff --git a/target/linux/generic/pending-5.15/733-03-net-ethernet-mtk_wed-rename-tx_wdma-array-in-rx_wdma.patch b/target/linux/generic/pending-5.15/733-03-net-ethernet-mtk_wed-rename-tx_wdma-array-in-rx_wdma.patch deleted file mode 100644 index ffd6bc3589..0000000000 --- a/target/linux/generic/pending-5.15/733-03-net-ethernet-mtk_wed-rename-tx_wdma-array-in-rx_wdma.patch +++ /dev/null @@ -1,79 +0,0 @@ -From: Lorenzo Bianconi -Date: Sat, 5 Nov 2022 23:36:20 +0100 -Subject: [PATCH] net: ethernet: mtk_wed: rename tx_wdma array in rx_wdma - -Rename tx_wdma queue array in rx_wdma since this is rx side of wdma soc. -Moreover rename mtk_wed_wdma_ring_setup routine in -mtk_wed_wdma_rx_ring_setup() - -Signed-off-by: Lorenzo Bianconi -Signed-off-by: David S. Miller ---- - ---- a/drivers/net/ethernet/mediatek/mtk_wed.c -+++ b/drivers/net/ethernet/mediatek/mtk_wed.c -@@ -253,8 +253,8 @@ mtk_wed_free_tx_rings(struct mtk_wed_dev - - for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) - mtk_wed_free_ring(dev, &dev->tx_ring[i]); -- for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) -- mtk_wed_free_ring(dev, &dev->tx_wdma[i]); -+ for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) -+ mtk_wed_free_ring(dev, &dev->rx_wdma[i]); - } - - static void -@@ -695,10 +695,10 @@ mtk_wed_ring_alloc(struct mtk_wed_device - } - - static int --mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size) -+mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size) - { - u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version; -- struct mtk_wed_ring *wdma = &dev->tx_wdma[idx]; -+ struct mtk_wed_ring *wdma = &dev->rx_wdma[idx]; - - if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size)) - return -ENOMEM; -@@ -812,9 +812,9 @@ mtk_wed_start(struct mtk_wed_device *dev - { - int i; - -- for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) -- if (!dev->tx_wdma[i].desc) -- mtk_wed_wdma_ring_setup(dev, i, 16); -+ for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) -+ if (!dev->rx_wdma[i].desc) -+ mtk_wed_wdma_rx_ring_setup(dev, i, 16); - - mtk_wed_hw_init(dev); - mtk_wed_configure_irq(dev, irq_mask); -@@ -923,7 +923,7 @@ mtk_wed_tx_ring_setup(struct mtk_wed_dev - sizeof(*ring->desc))) - return -ENOMEM; - -- if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE)) -+ if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE)) - return -ENOMEM; - - ring->reg_base = MTK_WED_RING_TX(idx); ---- a/include/linux/soc/mediatek/mtk_wed.h -+++ b/include/linux/soc/mediatek/mtk_wed.h -@@ -7,6 +7,7 @@ - #include - - #define MTK_WED_TX_QUEUES 2 -+#define MTK_WED_RX_QUEUES 2 - - struct mtk_wed_hw; - struct mtk_wdma_desc; -@@ -66,7 +67,7 @@ struct mtk_wed_device { - - struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES]; - struct mtk_wed_ring txfree_ring; -- struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES]; -+ struct mtk_wed_ring rx_wdma[MTK_WED_RX_QUEUES]; - - struct { - int size; diff --git a/target/linux/generic/pending-5.15/733-04-net-ethernet-mtk_wed-add-configure-wed-wo-support.patch b/target/linux/generic/pending-5.15/733-04-net-ethernet-mtk_wed-add-configure-wed-wo-support.patch deleted file mode 100644 index 4c34d0cb33..0000000000 --- a/target/linux/generic/pending-5.15/733-04-net-ethernet-mtk_wed-add-configure-wed-wo-support.patch +++ /dev/null @@ -1,1521 +0,0 @@ -From: Lorenzo Bianconi -Date: Sat, 5 Nov 2022 23:36:21 +0100 -Subject: [PATCH] net: ethernet: mtk_wed: add configure wed wo support - -Enable RX Wireless Ethernet Dispatch available on MT7986 Soc. - -Tested-by: Daniel Golle -Co-developed-by: Sujuan Chen -Signed-off-by: Sujuan Chen -Signed-off-by: Lorenzo Bianconi -Signed-off-by: David S. Miller ---- - ---- a/drivers/net/ethernet/mediatek/mtk_wed.c -+++ b/drivers/net/ethernet/mediatek/mtk_wed.c -@@ -9,6 +9,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -23,6 +24,7 @@ - #define MTK_WED_PKT_SIZE 1900 - #define MTK_WED_BUF_SIZE 2048 - #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048) -+#define MTK_WED_RX_RING_SIZE 1536 - - #define MTK_WED_TX_RING_SIZE 2048 - #define MTK_WED_WDMA_RING_SIZE 1024 -@@ -31,6 +33,10 @@ - #define MTK_WED_PER_GROUP_PKT 128 - - #define MTK_WED_FBUF_SIZE 128 -+#define MTK_WED_MIOD_CNT 16 -+#define MTK_WED_FB_CMD_CNT 1024 -+#define MTK_WED_RRO_QUE_CNT 8192 -+#define MTK_WED_MIOD_ENTRY_CNT 128 - - static struct mtk_wed_hw *hw_list[2]; - static DEFINE_MUTEX(hw_lock); -@@ -65,12 +71,76 @@ wdma_set(struct mtk_wed_device *dev, u32 - wdma_m32(dev, reg, 0, mask); - } - -+static void -+wdma_clr(struct mtk_wed_device *dev, u32 reg, u32 mask) -+{ -+ wdma_m32(dev, reg, mask, 0); -+} -+ -+static u32 -+wifi_r32(struct mtk_wed_device *dev, u32 reg) -+{ -+ return readl(dev->wlan.base + reg); -+} -+ -+static void -+wifi_w32(struct mtk_wed_device *dev, u32 reg, u32 val) -+{ -+ writel(val, dev->wlan.base + reg); -+} -+ - static u32 - mtk_wed_read_reset(struct mtk_wed_device *dev) - { - return wed_r32(dev, MTK_WED_RESET); - } - -+static u32 -+mtk_wdma_read_reset(struct mtk_wed_device *dev) -+{ -+ return wdma_r32(dev, MTK_WDMA_GLO_CFG); -+} -+ -+static void -+mtk_wdma_rx_reset(struct mtk_wed_device *dev) -+{ -+ u32 status, mask = MTK_WDMA_GLO_CFG_RX_DMA_BUSY; -+ int i; -+ -+ wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_DMA_EN); -+ if (readx_poll_timeout(mtk_wdma_read_reset, dev, status, -+ !(status & mask), 0, 1000)) -+ dev_err(dev->hw->dev, "rx reset failed\n"); -+ -+ for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) { -+ if (dev->rx_wdma[i].desc) -+ continue; -+ -+ wdma_w32(dev, -+ MTK_WDMA_RING_RX(i) + MTK_WED_RING_OFS_CPU_IDX, 0); -+ } -+} -+ -+static void -+mtk_wdma_tx_reset(struct mtk_wed_device *dev) -+{ -+ u32 status, mask = MTK_WDMA_GLO_CFG_TX_DMA_BUSY; -+ int i; -+ -+ wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN); -+ if (readx_poll_timeout(mtk_wdma_read_reset, dev, status, -+ !(status & mask), 0, 1000)) -+ dev_err(dev->hw->dev, "tx reset failed\n"); -+ -+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) { -+ if (dev->tx_wdma[i].desc) -+ continue; -+ -+ wdma_w32(dev, -+ MTK_WDMA_RING_TX(i) + MTK_WED_RING_OFS_CPU_IDX, 0); -+ } -+} -+ - static void - mtk_wed_reset(struct mtk_wed_device *dev, u32 mask) - { -@@ -82,6 +152,54 @@ mtk_wed_reset(struct mtk_wed_device *dev - WARN_ON_ONCE(1); - } - -+static u32 -+mtk_wed_wo_read_status(struct mtk_wed_device *dev) -+{ -+ return wed_r32(dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_WO_STATUS); -+} -+ -+static void -+mtk_wed_wo_reset(struct mtk_wed_device *dev) -+{ -+ struct mtk_wed_wo *wo = dev->hw->wed_wo; -+ u8 state = MTK_WED_WO_STATE_DISABLE; -+ void __iomem *reg; -+ u32 val; -+ -+ mtk_wdma_tx_reset(dev); -+ mtk_wed_reset(dev, MTK_WED_RESET_WED); -+ -+ mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, -+ MTK_WED_WO_CMD_CHANGE_STATE, &state, -+ sizeof(state), false); -+ -+ if (readx_poll_timeout(mtk_wed_wo_read_status, dev, val, -+ val == MTK_WED_WOIF_DISABLE_DONE, -+ 100, MTK_WOCPU_TIMEOUT)) -+ dev_err(dev->hw->dev, "failed to disable wed-wo\n"); -+ -+ reg = ioremap(MTK_WED_WO_CPU_MCUSYS_RESET_ADDR, 4); -+ -+ val = readl(reg); -+ switch (dev->hw->index) { -+ case 0: -+ val |= MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK; -+ writel(val, reg); -+ val &= ~MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK; -+ writel(val, reg); -+ break; -+ case 1: -+ val |= MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK; -+ writel(val, reg); -+ val &= ~MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK; -+ writel(val, reg); -+ break; -+ default: -+ break; -+ } -+ iounmap(reg); -+} -+ - static struct mtk_wed_hw * - mtk_wed_assign(struct mtk_wed_device *dev) - { -@@ -116,7 +234,7 @@ out: - } - - static int --mtk_wed_buffer_alloc(struct mtk_wed_device *dev) -+mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev) - { - struct mtk_wdma_desc *desc; - dma_addr_t desc_phys; -@@ -133,16 +251,16 @@ mtk_wed_buffer_alloc(struct mtk_wed_devi - if (!page_list) - return -ENOMEM; - -- dev->buf_ring.size = ring_size; -- dev->buf_ring.pages = page_list; -+ dev->tx_buf_ring.size = ring_size; -+ dev->tx_buf_ring.pages = page_list; - - desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc), - &desc_phys, GFP_KERNEL); - if (!desc) - return -ENOMEM; - -- dev->buf_ring.desc = desc; -- dev->buf_ring.desc_phys = desc_phys; -+ dev->tx_buf_ring.desc = desc; -+ dev->tx_buf_ring.desc_phys = desc_phys; - - for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) { - dma_addr_t page_phys, buf_phys; -@@ -203,10 +321,10 @@ mtk_wed_buffer_alloc(struct mtk_wed_devi - } - - static void --mtk_wed_free_buffer(struct mtk_wed_device *dev) -+mtk_wed_free_tx_buffer(struct mtk_wed_device *dev) - { -- struct mtk_wdma_desc *desc = dev->buf_ring.desc; -- void **page_list = dev->buf_ring.pages; -+ struct mtk_wdma_desc *desc = dev->tx_buf_ring.desc; -+ void **page_list = dev->tx_buf_ring.pages; - int page_idx; - int i; - -@@ -216,7 +334,8 @@ mtk_wed_free_buffer(struct mtk_wed_devic - if (!desc) - goto free_pagelist; - -- for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) { -+ for (i = 0, page_idx = 0; i < dev->tx_buf_ring.size; -+ i += MTK_WED_BUF_PER_PAGE) { - void *page = page_list[page_idx++]; - dma_addr_t buf_addr; - -@@ -229,13 +348,59 @@ mtk_wed_free_buffer(struct mtk_wed_devic - __free_page(page); - } - -- dma_free_coherent(dev->hw->dev, dev->buf_ring.size * sizeof(*desc), -- desc, dev->buf_ring.desc_phys); -+ dma_free_coherent(dev->hw->dev, dev->tx_buf_ring.size * sizeof(*desc), -+ desc, dev->tx_buf_ring.desc_phys); - - free_pagelist: - kfree(page_list); - } - -+static int -+mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev) -+{ -+ struct mtk_rxbm_desc *desc; -+ dma_addr_t desc_phys; -+ -+ dev->rx_buf_ring.size = dev->wlan.rx_nbuf; -+ desc = dma_alloc_coherent(dev->hw->dev, -+ dev->wlan.rx_nbuf * sizeof(*desc), -+ &desc_phys, GFP_KERNEL); -+ if (!desc) -+ return -ENOMEM; -+ -+ dev->rx_buf_ring.desc = desc; -+ dev->rx_buf_ring.desc_phys = desc_phys; -+ dev->wlan.init_rx_buf(dev, dev->wlan.rx_npkt); -+ -+ return 0; -+} -+ -+static void -+mtk_wed_free_rx_buffer(struct mtk_wed_device *dev) -+{ -+ struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc; -+ -+ if (!desc) -+ return; -+ -+ dev->wlan.release_rx_buf(dev); -+ dma_free_coherent(dev->hw->dev, dev->rx_buf_ring.size * sizeof(*desc), -+ desc, dev->rx_buf_ring.desc_phys); -+} -+ -+static void -+mtk_wed_rx_buffer_hw_init(struct mtk_wed_device *dev) -+{ -+ wed_w32(dev, MTK_WED_RX_BM_RX_DMAD, -+ FIELD_PREP(MTK_WED_RX_BM_RX_DMAD_SDL0, dev->wlan.rx_size)); -+ wed_w32(dev, MTK_WED_RX_BM_BASE, dev->rx_buf_ring.desc_phys); -+ wed_w32(dev, MTK_WED_RX_BM_INIT_PTR, MTK_WED_RX_BM_INIT_SW_TAIL | -+ FIELD_PREP(MTK_WED_RX_BM_SW_TAIL, dev->wlan.rx_npkt)); -+ wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH, -+ FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff)); -+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN); -+} -+ - static void - mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring) - { -@@ -247,6 +412,13 @@ mtk_wed_free_ring(struct mtk_wed_device - } - - static void -+mtk_wed_free_rx_rings(struct mtk_wed_device *dev) -+{ -+ mtk_wed_free_rx_buffer(dev); -+ mtk_wed_free_ring(dev, &dev->rro.ring); -+} -+ -+static void - mtk_wed_free_tx_rings(struct mtk_wed_device *dev) - { - int i; -@@ -291,6 +463,38 @@ mtk_wed_set_512_support(struct mtk_wed_d - } - } - -+#define MTK_WFMDA_RX_DMA_EN BIT(2) -+static void -+mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx) -+{ -+ u32 val; -+ int i; -+ -+ if (!(dev->rx_ring[idx].flags & MTK_WED_RING_CONFIGURED)) -+ return; /* queue is not configured by mt76 */ -+ -+ for (i = 0; i < 3; i++) { -+ u32 cur_idx; -+ -+ cur_idx = wed_r32(dev, -+ MTK_WED_WPDMA_RING_RX_DATA(idx) + -+ MTK_WED_RING_OFS_CPU_IDX); -+ if (cur_idx == MTK_WED_RX_RING_SIZE - 1) -+ break; -+ -+ usleep_range(100000, 200000); -+ } -+ -+ if (i == 3) { -+ dev_err(dev->hw->dev, "rx dma enable failed\n"); -+ return; -+ } -+ -+ val = wifi_r32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base) | -+ MTK_WFMDA_RX_DMA_EN; -+ wifi_w32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base, val); -+} -+ - static void - mtk_wed_dma_disable(struct mtk_wed_device *dev) - { -@@ -304,20 +508,25 @@ mtk_wed_dma_disable(struct mtk_wed_devic - MTK_WED_GLO_CFG_TX_DMA_EN | - MTK_WED_GLO_CFG_RX_DMA_EN); - -- wdma_m32(dev, MTK_WDMA_GLO_CFG, -+ wdma_clr(dev, MTK_WDMA_GLO_CFG, - MTK_WDMA_GLO_CFG_TX_DMA_EN | - MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | -- MTK_WDMA_GLO_CFG_RX_INFO2_PRERES, 0); -+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); - - if (dev->hw->version == 1) { - regmap_write(dev->hw->mirror, dev->hw->index * 4, 0); -- wdma_m32(dev, MTK_WDMA_GLO_CFG, -- MTK_WDMA_GLO_CFG_RX_INFO3_PRERES, 0); -+ wdma_clr(dev, MTK_WDMA_GLO_CFG, -+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); - } else { - wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, - MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | - MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); - -+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, -+ MTK_WED_WPDMA_RX_D_RX_DRV_EN); -+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, -+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); -+ - mtk_wed_set_512_support(dev, false); - } - } -@@ -338,6 +547,13 @@ mtk_wed_stop(struct mtk_wed_device *dev) - wdma_w32(dev, MTK_WDMA_INT_MASK, 0); - wdma_w32(dev, MTK_WDMA_INT_GRP2, 0); - wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0); -+ -+ if (dev->hw->version == 1) -+ return; -+ -+ wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0); -+ wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0); -+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN); - } - - static void -@@ -353,11 +569,21 @@ mtk_wed_detach(struct mtk_wed_device *de - wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); - - mtk_wed_reset(dev, MTK_WED_RESET_WED); -+ if (mtk_wed_get_rx_capa(dev)) { -+ wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN); -+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX); -+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); -+ } - -- mtk_wed_free_buffer(dev); -+ mtk_wed_free_tx_buffer(dev); - mtk_wed_free_tx_rings(dev); -- if (hw->version != 1) -+ -+ if (mtk_wed_get_rx_capa(dev)) { -+ mtk_wed_wo_reset(dev); -+ mtk_wed_free_rx_rings(dev); - mtk_wed_wo_deinit(hw); -+ mtk_wdma_rx_reset(dev); -+ } - - if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) { - struct device_node *wlan_node; -@@ -441,10 +667,12 @@ mtk_wed_set_wpdma(struct mtk_wed_device - } else { - mtk_wed_bus_init(dev); - -- wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int); -- wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask); -- wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx); -- wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree); -+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int); -+ wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask); -+ wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx); -+ wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree); -+ wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo); -+ wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx); - } - } - -@@ -494,6 +722,132 @@ mtk_wed_hw_init_early(struct mtk_wed_dev - } - } - -+static int -+mtk_wed_rro_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, -+ int size) -+{ -+ ring->desc = dma_alloc_coherent(dev->hw->dev, -+ size * sizeof(*ring->desc), -+ &ring->desc_phys, GFP_KERNEL); -+ if (!ring->desc) -+ return -ENOMEM; -+ -+ ring->desc_size = sizeof(*ring->desc); -+ ring->size = size; -+ memset(ring->desc, 0, size); -+ -+ return 0; -+} -+ -+#define MTK_WED_MIOD_COUNT (MTK_WED_MIOD_ENTRY_CNT * MTK_WED_MIOD_CNT) -+static int -+mtk_wed_rro_alloc(struct mtk_wed_device *dev) -+{ -+ struct reserved_mem *rmem; -+ struct device_node *np; -+ int index; -+ -+ index = of_property_match_string(dev->hw->node, "memory-region-names", -+ "wo-dlm"); -+ if (index < 0) -+ return index; -+ -+ np = of_parse_phandle(dev->hw->node, "memory-region", index); -+ if (!np) -+ return -ENODEV; -+ -+ rmem = of_reserved_mem_lookup(np); -+ of_node_put(np); -+ -+ if (!rmem) -+ return -ENODEV; -+ -+ dev->rro.miod_phys = rmem->base; -+ dev->rro.fdbk_phys = MTK_WED_MIOD_COUNT + dev->rro.miod_phys; -+ -+ return mtk_wed_rro_ring_alloc(dev, &dev->rro.ring, -+ MTK_WED_RRO_QUE_CNT); -+} -+ -+static int -+mtk_wed_rro_cfg(struct mtk_wed_device *dev) -+{ -+ struct mtk_wed_wo *wo = dev->hw->wed_wo; -+ struct { -+ struct { -+ __le32 base; -+ __le32 cnt; -+ __le32 unit; -+ } ring[2]; -+ __le32 wed; -+ u8 version; -+ } req = { -+ .ring[0] = { -+ .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE), -+ .cnt = cpu_to_le32(MTK_WED_MIOD_CNT), -+ .unit = cpu_to_le32(MTK_WED_MIOD_ENTRY_CNT), -+ }, -+ .ring[1] = { -+ .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE + -+ MTK_WED_MIOD_COUNT), -+ .cnt = cpu_to_le32(MTK_WED_FB_CMD_CNT), -+ .unit = cpu_to_le32(4), -+ }, -+ }; -+ -+ return mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, -+ MTK_WED_WO_CMD_WED_CFG, -+ &req, sizeof(req), true); -+} -+ -+static void -+mtk_wed_rro_hw_init(struct mtk_wed_device *dev) -+{ -+ wed_w32(dev, MTK_WED_RROQM_MIOD_CFG, -+ FIELD_PREP(MTK_WED_RROQM_MIOD_MID_DW, 0x70 >> 2) | -+ FIELD_PREP(MTK_WED_RROQM_MIOD_MOD_DW, 0x10 >> 2) | -+ FIELD_PREP(MTK_WED_RROQM_MIOD_ENTRY_DW, -+ MTK_WED_MIOD_ENTRY_CNT >> 2)); -+ -+ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL0, dev->rro.miod_phys); -+ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL1, -+ FIELD_PREP(MTK_WED_RROQM_MIOD_CNT, MTK_WED_MIOD_CNT)); -+ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL0, dev->rro.fdbk_phys); -+ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL1, -+ FIELD_PREP(MTK_WED_RROQM_FDBK_CNT, MTK_WED_FB_CMD_CNT)); -+ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL2, 0); -+ wed_w32(dev, MTK_WED_RROQ_BASE_L, dev->rro.ring.desc_phys); -+ -+ wed_set(dev, MTK_WED_RROQM_RST_IDX, -+ MTK_WED_RROQM_RST_IDX_MIOD | -+ MTK_WED_RROQM_RST_IDX_FDBK); -+ -+ wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0); -+ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL2, MTK_WED_MIOD_CNT - 1); -+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN); -+} -+ -+static void -+mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev) -+{ -+ wed_w32(dev, MTK_WED_RESET, MTK_WED_RESET_RX_ROUTE_QM); -+ -+ for (;;) { -+ usleep_range(100, 200); -+ if (!(wed_r32(dev, MTK_WED_RESET) & MTK_WED_RESET_RX_ROUTE_QM)) -+ break; -+ } -+ -+ /* configure RX_ROUTE_QM */ -+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); -+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT); -+ wed_set(dev, MTK_WED_RTQM_GLO_CFG, -+ FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index)); -+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); -+ /* enable RX_ROUTE_QM */ -+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN); -+} -+ - static void - mtk_wed_hw_init(struct mtk_wed_device *dev) - { -@@ -505,11 +859,11 @@ mtk_wed_hw_init(struct mtk_wed_device *d - wed_w32(dev, MTK_WED_TX_BM_CTRL, - MTK_WED_TX_BM_CTRL_PAUSE | - FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, -- dev->buf_ring.size / 128) | -+ dev->tx_buf_ring.size / 128) | - FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, - MTK_WED_TX_RING_SIZE / 256)); - -- wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys); -+ wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys); - - wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE); - -@@ -536,9 +890,9 @@ mtk_wed_hw_init(struct mtk_wed_device *d - wed_w32(dev, MTK_WED_TX_TKID_CTRL, - MTK_WED_TX_TKID_CTRL_PAUSE | - FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM, -- dev->buf_ring.size / 128) | -+ dev->tx_buf_ring.size / 128) | - FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM, -- dev->buf_ring.size / 128)); -+ dev->tx_buf_ring.size / 128)); - wed_w32(dev, MTK_WED_TX_TKID_DYN_THR, - FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) | - MTK_WED_TX_TKID_DYN_THR_HI); -@@ -546,18 +900,28 @@ mtk_wed_hw_init(struct mtk_wed_device *d - - mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); - -- if (dev->hw->version == 1) -+ if (dev->hw->version == 1) { - wed_set(dev, MTK_WED_CTRL, - MTK_WED_CTRL_WED_TX_BM_EN | - MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); -- else -+ } else { - wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE); -+ /* rx hw init */ -+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, -+ MTK_WED_WPDMA_RX_D_RST_CRX_IDX | -+ MTK_WED_WPDMA_RX_D_RST_DRV_IDX); -+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0); -+ -+ mtk_wed_rx_buffer_hw_init(dev); -+ mtk_wed_rro_hw_init(dev); -+ mtk_wed_route_qm_hw_init(dev); -+ } - - wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE); - } - - static void --mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size) -+mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size, bool tx) - { - void *head = (void *)ring->desc; - int i; -@@ -567,7 +931,10 @@ mtk_wed_ring_reset(struct mtk_wed_ring * - - desc = (struct mtk_wdma_desc *)(head + i * ring->desc_size); - desc->buf0 = 0; -- desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); -+ if (tx) -+ desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); -+ else -+ desc->ctrl = cpu_to_le32(MTK_WFDMA_DESC_CTRL_TO_HOST); - desc->buf1 = 0; - desc->info = 0; - } -@@ -623,7 +990,8 @@ mtk_wed_reset_dma(struct mtk_wed_device - if (!dev->tx_ring[i].desc) - continue; - -- mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE); -+ mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE, -+ true); - } - - if (mtk_wed_poll_busy(dev)) -@@ -641,6 +1009,9 @@ mtk_wed_reset_dma(struct mtk_wed_device - wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); - wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); - -+ if (mtk_wed_get_rx_capa(dev)) -+ mtk_wdma_rx_reset(dev); -+ - if (busy) { - mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT); - mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV); -@@ -675,12 +1046,11 @@ mtk_wed_reset_dma(struct mtk_wed_device - MTK_WED_WPDMA_RESET_IDX_RX); - wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0); - } -- - } - - static int - mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, -- int size, u32 desc_size) -+ int size, u32 desc_size, bool tx) - { - ring->desc = dma_alloc_coherent(dev->hw->dev, size * desc_size, - &ring->desc_phys, GFP_KERNEL); -@@ -689,7 +1059,7 @@ mtk_wed_ring_alloc(struct mtk_wed_device - - ring->desc_size = desc_size; - ring->size = size; -- mtk_wed_ring_reset(ring, size); -+ mtk_wed_ring_reset(ring, size, tx); - - return 0; - } -@@ -698,9 +1068,14 @@ static int - mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size) - { - u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version; -- struct mtk_wed_ring *wdma = &dev->rx_wdma[idx]; -+ struct mtk_wed_ring *wdma; - -- if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size)) -+ if (idx >= ARRAY_SIZE(dev->rx_wdma)) -+ return -EINVAL; -+ -+ wdma = &dev->rx_wdma[idx]; -+ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size, -+ true)) - return -ENOMEM; - - wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, -@@ -717,6 +1092,60 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_we - return 0; - } - -+static int -+mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size) -+{ -+ u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version; -+ struct mtk_wed_ring *wdma; -+ -+ if (idx >= ARRAY_SIZE(dev->tx_wdma)) -+ return -EINVAL; -+ -+ wdma = &dev->tx_wdma[idx]; -+ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size, -+ true)) -+ return -ENOMEM; -+ -+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, -+ wdma->desc_phys); -+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, -+ size); -+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); -+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0); -+ -+ if (!idx) { -+ wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_BASE, -+ wdma->desc_phys); -+ wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_COUNT, -+ size); -+ wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_CPU_IDX, -+ 0); -+ wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_DMA_IDX, -+ 0); -+ } -+ -+ return 0; -+} -+ -+static void -+mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb, -+ u32 reason, u32 hash) -+{ -+ struct mtk_eth *eth = dev->hw->eth; -+ struct ethhdr *eh; -+ -+ if (!skb) -+ return; -+ -+ if (reason != MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) -+ return; -+ -+ skb_set_mac_header(skb, 0); -+ eh = eth_hdr(skb); -+ skb->protocol = eh->h_proto; -+ mtk_ppe_check_skb(eth->ppe[dev->hw->index], skb, hash); -+} -+ - static void - mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask) - { -@@ -739,6 +1168,8 @@ mtk_wed_configure_irq(struct mtk_wed_dev - - wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask); - } else { -+ wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE, -+ GENMASK(1, 0)); - /* initail tx interrupt trigger */ - wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX, - MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN | -@@ -757,6 +1188,16 @@ mtk_wed_configure_irq(struct mtk_wed_dev - FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG, - dev->wlan.txfree_tbit)); - -+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX, -+ MTK_WED_WPDMA_INT_CTRL_RX0_EN | -+ MTK_WED_WPDMA_INT_CTRL_RX0_CLR | -+ MTK_WED_WPDMA_INT_CTRL_RX1_EN | -+ MTK_WED_WPDMA_INT_CTRL_RX1_CLR | -+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG, -+ dev->wlan.rx_tbit[0]) | -+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG, -+ dev->wlan.rx_tbit[1])); -+ - wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask); - wed_set(dev, MTK_WED_WDMA_INT_CTRL, - FIELD_PREP(MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL, -@@ -794,9 +1235,15 @@ mtk_wed_dma_enable(struct mtk_wed_device - wdma_set(dev, MTK_WDMA_GLO_CFG, - MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); - } else { -+ int i; -+ - wed_set(dev, MTK_WED_WPDMA_CTRL, - MTK_WED_WPDMA_CTRL_SDL1_FIXED); - -+ wed_set(dev, MTK_WED_WDMA_GLO_CFG, -+ MTK_WED_WDMA_GLO_CFG_TX_DRV_EN | -+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); -+ - wed_set(dev, MTK_WED_WPDMA_GLO_CFG, - MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | - MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); -@@ -804,6 +1251,15 @@ mtk_wed_dma_enable(struct mtk_wed_device - wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, - MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP | - MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV); -+ -+ wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, -+ MTK_WED_WPDMA_RX_D_RX_DRV_EN | -+ FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) | -+ FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL, -+ 0x2)); -+ -+ for (i = 0; i < MTK_WED_RX_QUEUES; i++) -+ mtk_wed_check_wfdma_rx_fill(dev, i); - } - } - -@@ -829,7 +1285,19 @@ mtk_wed_start(struct mtk_wed_device *dev - val |= BIT(0) | (BIT(1) * !!dev->hw->index); - regmap_write(dev->hw->mirror, dev->hw->index * 4, val); - } else { -- mtk_wed_set_512_support(dev, true); -+ /* driver set mid ready and only once */ -+ wed_w32(dev, MTK_WED_EXT_INT_MASK1, -+ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); -+ wed_w32(dev, MTK_WED_EXT_INT_MASK2, -+ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); -+ -+ wed_r32(dev, MTK_WED_EXT_INT_MASK1); -+ wed_r32(dev, MTK_WED_EXT_INT_MASK2); -+ -+ if (mtk_wed_rro_cfg(dev)) -+ return; -+ -+ mtk_wed_set_512_support(dev, dev->wlan.wcid_512); - } - - mtk_wed_dma_enable(dev); -@@ -863,7 +1331,7 @@ mtk_wed_attach(struct mtk_wed_device *de - if (!hw) { - module_put(THIS_MODULE); - ret = -ENODEV; -- goto out; -+ goto unlock; - } - - device = dev->wlan.bus_type == MTK_WED_BUS_PCIE -@@ -876,15 +1344,24 @@ mtk_wed_attach(struct mtk_wed_device *de - dev->dev = hw->dev; - dev->irq = hw->irq; - dev->wdma_idx = hw->index; -+ dev->version = hw->version; - - if (hw->eth->dma_dev == hw->eth->dev && - of_dma_is_coherent(hw->eth->dev->of_node)) - mtk_eth_set_dma_device(hw->eth, hw->dev); - -- ret = mtk_wed_buffer_alloc(dev); -- if (ret) { -- mtk_wed_detach(dev); -+ ret = mtk_wed_tx_buffer_alloc(dev); -+ if (ret) - goto out; -+ -+ if (mtk_wed_get_rx_capa(dev)) { -+ ret = mtk_wed_rx_buffer_alloc(dev); -+ if (ret) -+ goto out; -+ -+ ret = mtk_wed_rro_alloc(dev); -+ if (ret) -+ goto out; - } - - mtk_wed_hw_init_early(dev); -@@ -893,8 +1370,10 @@ mtk_wed_attach(struct mtk_wed_device *de - BIT(hw->index), 0); - else - ret = mtk_wed_wo_init(hw); -- - out: -+ if (ret) -+ mtk_wed_detach(dev); -+unlock: - mutex_unlock(&hw_lock); - - return ret; -@@ -917,10 +1396,11 @@ mtk_wed_tx_ring_setup(struct mtk_wed_dev - * WDMA RX. - */ - -- BUG_ON(idx >= ARRAY_SIZE(dev->tx_ring)); -+ if (WARN_ON(idx >= ARRAY_SIZE(dev->tx_ring))) -+ return -EINVAL; - - if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, -- sizeof(*ring->desc))) -+ sizeof(*ring->desc), true)) - return -ENOMEM; - - if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE)) -@@ -967,6 +1447,37 @@ mtk_wed_txfree_ring_setup(struct mtk_wed - return 0; - } - -+static int -+mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs) -+{ -+ struct mtk_wed_ring *ring = &dev->rx_ring[idx]; -+ -+ if (WARN_ON(idx >= ARRAY_SIZE(dev->rx_ring))) -+ return -EINVAL; -+ -+ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE, -+ sizeof(*ring->desc), false)) -+ return -ENOMEM; -+ -+ if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE)) -+ return -ENOMEM; -+ -+ ring->reg_base = MTK_WED_RING_RX_DATA(idx); -+ ring->wpdma = regs; -+ ring->flags |= MTK_WED_RING_CONFIGURED; -+ -+ /* WPDMA -> WED */ -+ wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys); -+ wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_RX_RING_SIZE); -+ -+ wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_BASE, -+ ring->desc_phys); -+ wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_COUNT, -+ MTK_WED_RX_RING_SIZE); -+ -+ return 0; -+} -+ - static u32 - mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask) - { -@@ -1063,7 +1574,9 @@ void mtk_wed_add_hw(struct device_node * - static const struct mtk_wed_ops wed_ops = { - .attach = mtk_wed_attach, - .tx_ring_setup = mtk_wed_tx_ring_setup, -+ .rx_ring_setup = mtk_wed_rx_ring_setup, - .txfree_ring_setup = mtk_wed_txfree_ring_setup, -+ .msg_update = mtk_wed_mcu_msg_update, - .start = mtk_wed_start, - .stop = mtk_wed_stop, - .reset_dma = mtk_wed_reset_dma, -@@ -1072,6 +1585,7 @@ void mtk_wed_add_hw(struct device_node * - .irq_get = mtk_wed_irq_get, - .irq_set_mask = mtk_wed_irq_set_mask, - .detach = mtk_wed_detach, -+ .ppe_check = mtk_wed_ppe_check, - }; - struct device_node *eth_np = eth->dev->of_node; - struct platform_device *pdev; ---- a/drivers/net/ethernet/mediatek/mtk_wed.h -+++ b/drivers/net/ethernet/mediatek/mtk_wed.h -@@ -87,6 +87,24 @@ wpdma_tx_w32(struct mtk_wed_device *dev, - } - - static inline u32 -+wpdma_rx_r32(struct mtk_wed_device *dev, int ring, u32 reg) -+{ -+ if (!dev->rx_ring[ring].wpdma) -+ return 0; -+ -+ return readl(dev->rx_ring[ring].wpdma + reg); -+} -+ -+static inline void -+wpdma_rx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val) -+{ -+ if (!dev->rx_ring[ring].wpdma) -+ return; -+ -+ writel(val, dev->rx_ring[ring].wpdma + reg); -+} -+ -+static inline u32 - wpdma_txfree_r32(struct mtk_wed_device *dev, u32 reg) - { - if (!dev->txfree_ring.wpdma) -@@ -128,6 +146,7 @@ static inline int mtk_wed_flow_add(int i - static inline void mtk_wed_flow_remove(int index) - { - } -+ - #endif - - #ifdef CONFIG_DEBUG_FS ---- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c -+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c -@@ -10,6 +10,7 @@ - #include - #include - #include -+#include - - #include "mtk_wed_regs.h" - #include "mtk_wed_wo.h" -@@ -60,24 +61,37 @@ void mtk_wed_mcu_rx_event(struct mtk_wed - wake_up(&wo->mcu.wait); - } - -+static void -+mtk_wed_update_rx_stats(struct mtk_wed_device *wed, struct sk_buff *skb) -+{ -+ u32 count = get_unaligned_le32(skb->data); -+ struct mtk_wed_wo_rx_stats *stats; -+ int i; -+ -+ if (count * sizeof(*stats) > skb->len - sizeof(u32)) -+ return; -+ -+ stats = (struct mtk_wed_wo_rx_stats *)(skb->data + sizeof(u32)); -+ for (i = 0 ; i < count ; i++) -+ wed->wlan.update_wo_rx_stats(wed, &stats[i]); -+} -+ - void mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo, - struct sk_buff *skb) - { - struct mtk_wed_mcu_hdr *hdr = (struct mtk_wed_mcu_hdr *)skb->data; - -- switch (hdr->cmd) { -- case MTK_WED_WO_EVT_LOG_DUMP: { -- const char *msg = (const char *)(skb->data + sizeof(*hdr)); -+ skb_pull(skb, sizeof(*hdr)); - -- dev_notice(wo->hw->dev, "%s\n", msg); -+ switch (hdr->cmd) { -+ case MTK_WED_WO_EVT_LOG_DUMP: -+ dev_notice(wo->hw->dev, "%s\n", skb->data); - break; -- } - case MTK_WED_WO_EVT_PROFILING: { -- struct mtk_wed_wo_log_info *info; -- u32 count = (skb->len - sizeof(*hdr)) / sizeof(*info); -+ struct mtk_wed_wo_log_info *info = (void *)skb->data; -+ u32 count = skb->len / sizeof(*info); - int i; - -- info = (struct mtk_wed_wo_log_info *)(skb->data + sizeof(*hdr)); - for (i = 0 ; i < count ; i++) - dev_notice(wo->hw->dev, - "SN:%u latency: total=%u, rro:%u, mod:%u\n", -@@ -88,6 +102,7 @@ void mtk_wed_mcu_rx_unsolicited_event(st - break; - } - case MTK_WED_WO_EVT_RXCNT_INFO: -+ mtk_wed_update_rx_stats(wo->hw->wed_dev, skb); - break; - default: - break; -@@ -144,6 +159,8 @@ mtk_wed_mcu_parse_response(struct mtk_we - skb_pull(skb, sizeof(*hdr)); - switch (cmd) { - case MTK_WED_WO_CMD_RXCNT_INFO: -+ mtk_wed_update_rx_stats(wo->hw->wed_dev, skb); -+ break; - default: - break; - } -@@ -182,6 +199,18 @@ unlock: - return ret; - } - -+int mtk_wed_mcu_msg_update(struct mtk_wed_device *dev, int id, void *data, -+ int len) -+{ -+ struct mtk_wed_wo *wo = dev->hw->wed_wo; -+ -+ if (dev->hw->version == 1) -+ return 0; -+ -+ return mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, id, data, len, -+ true); -+} -+ - static int - mtk_wed_get_memory_region(struct mtk_wed_wo *wo, - struct mtk_wed_wo_memory_region *region) ---- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h -+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h -@@ -4,6 +4,7 @@ - #ifndef __MTK_WED_REGS_H - #define __MTK_WED_REGS_H - -+#define MTK_WFDMA_DESC_CTRL_TO_HOST BIT(8) - #define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(14, 0) - #define MTK_WDMA_DESC_CTRL_LEN1_V2 GENMASK(13, 0) - #define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(15) -@@ -28,6 +29,8 @@ struct mtk_wdma_desc { - #define MTK_WED_RESET_WED_TX_DMA BIT(12) - #define MTK_WED_RESET_WDMA_RX_DRV BIT(17) - #define MTK_WED_RESET_WDMA_INT_AGENT BIT(19) -+#define MTK_WED_RESET_RX_RRO_QM BIT(20) -+#define MTK_WED_RESET_RX_ROUTE_QM BIT(21) - #define MTK_WED_RESET_WED BIT(31) - - #define MTK_WED_CTRL 0x00c -@@ -39,8 +42,12 @@ struct mtk_wdma_desc { - #define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9) - #define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10) - #define MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY BIT(11) --#define MTK_WED_CTRL_RESERVE_EN BIT(12) --#define MTK_WED_CTRL_RESERVE_BUSY BIT(13) -+#define MTK_WED_CTRL_WED_RX_BM_EN BIT(12) -+#define MTK_WED_CTRL_WED_RX_BM_BUSY BIT(13) -+#define MTK_WED_CTRL_RX_RRO_QM_EN BIT(14) -+#define MTK_WED_CTRL_RX_RRO_QM_BUSY BIT(15) -+#define MTK_WED_CTRL_RX_ROUTE_QM_EN BIT(16) -+#define MTK_WED_CTRL_RX_ROUTE_QM_BUSY BIT(17) - #define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24) - #define MTK_WED_CTRL_ETH_DMAD_FMT BIT(25) - #define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28) -@@ -62,6 +69,9 @@ struct mtk_wdma_desc { - #define MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR BIT(22) - #define MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR BIT(23) - #define MTK_WED_EXT_INT_STATUS_RX_DRV_DMA_RECYCLE BIT(24) -+#define MTK_WED_EXT_INT_STATUS_RX_DRV_GET_BM_DMAD_SKIP BIT(25) -+#define MTK_WED_EXT_INT_STATUS_WPDMA_RX_D_DRV_ERR BIT(26) -+#define MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY BIT(27) - #define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \ - MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \ - MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \ -@@ -71,6 +81,8 @@ struct mtk_wdma_desc { - MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR) - - #define MTK_WED_EXT_INT_MASK 0x028 -+#define MTK_WED_EXT_INT_MASK1 0x02c -+#define MTK_WED_EXT_INT_MASK2 0x030 - - #define MTK_WED_STATUS 0x060 - #define MTK_WED_STATUS_TX GENMASK(15, 8) -@@ -151,6 +163,7 @@ struct mtk_wdma_desc { - #define MTK_WED_RING_TX(_n) (0x300 + (_n) * 0x10) - - #define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10) -+#define MTK_WED_RING_RX_DATA(_n) (0x420 + (_n) * 0x10) - - #define MTK_WED_SCR0 0x3c0 - #define MTK_WED_WPDMA_INT_TRIGGER 0x504 -@@ -213,6 +226,12 @@ struct mtk_wdma_desc { - #define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG GENMASK(14, 10) - - #define MTK_WED_WPDMA_INT_CTRL_RX 0x534 -+#define MTK_WED_WPDMA_INT_CTRL_RX0_EN BIT(0) -+#define MTK_WED_WPDMA_INT_CTRL_RX0_CLR BIT(1) -+#define MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG GENMASK(6, 2) -+#define MTK_WED_WPDMA_INT_CTRL_RX1_EN BIT(8) -+#define MTK_WED_WPDMA_INT_CTRL_RX1_CLR BIT(9) -+#define MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG GENMASK(14, 10) - - #define MTK_WED_WPDMA_INT_CTRL_TX_FREE 0x538 - #define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN BIT(0) -@@ -242,11 +261,34 @@ struct mtk_wdma_desc { - - #define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10) - #define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10) -+#define MTK_WED_WPDMA_RING_RX_DATA(_n) (0x730 + (_n) * 0x10) -+ -+#define MTK_WED_WPDMA_RX_D_GLO_CFG 0x75c -+#define MTK_WED_WPDMA_RX_D_RX_DRV_EN BIT(0) -+#define MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL GENMASK(11, 7) -+#define MTK_WED_WPDMA_RX_D_RXD_READ_LEN GENMASK(31, 24) -+ -+#define MTK_WED_WPDMA_RX_D_RST_IDX 0x760 -+#define MTK_WED_WPDMA_RX_D_RST_CRX_IDX GENMASK(17, 16) -+#define MTK_WED_WPDMA_RX_D_RST_DRV_IDX GENMASK(25, 24) -+ -+#define MTK_WED_WPDMA_RX_GLO_CFG 0x76c -+#define MTK_WED_WPDMA_RX_RING 0x770 -+ -+#define MTK_WED_WPDMA_RX_D_MIB(_n) (0x774 + (_n) * 4) -+#define MTK_WED_WPDMA_RX_D_PROCESSED_MIB(_n) (0x784 + (_n) * 4) -+#define MTK_WED_WPDMA_RX_D_COHERENT_MIB 0x78c -+ -+#define MTK_WED_WDMA_RING_TX 0x800 -+ -+#define MTK_WED_WDMA_TX_MIB 0x810 -+ - #define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10) - #define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4) - - #define MTK_WED_WDMA_GLO_CFG 0xa04 - #define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0) -+#define MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK BIT(1) - #define MTK_WED_WDMA_GLO_CFG_RX_DRV_EN BIT(2) - #define MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY BIT(3) - #define MTK_WED_WDMA_GLO_CFG_BT_SIZE GENMASK(5, 4) -@@ -291,6 +333,20 @@ struct mtk_wdma_desc { - #define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4) - #define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4) - -+#define MTK_WED_RX_BM_RX_DMAD 0xd80 -+#define MTK_WED_RX_BM_RX_DMAD_SDL0 GENMASK(13, 0) -+ -+#define MTK_WED_RX_BM_BASE 0xd84 -+#define MTK_WED_RX_BM_INIT_PTR 0xd88 -+#define MTK_WED_RX_BM_SW_TAIL GENMASK(15, 0) -+#define MTK_WED_RX_BM_INIT_SW_TAIL BIT(16) -+ -+#define MTK_WED_RX_PTR 0xd8c -+ -+#define MTK_WED_RX_BM_DYN_ALLOC_TH 0xdb4 -+#define MTK_WED_RX_BM_DYN_ALLOC_TH_H GENMASK(31, 16) -+#define MTK_WED_RX_BM_DYN_ALLOC_TH_L GENMASK(15, 0) -+ - #define MTK_WED_RING_OFS_BASE 0x00 - #define MTK_WED_RING_OFS_COUNT 0x04 - #define MTK_WED_RING_OFS_CPU_IDX 0x08 -@@ -301,7 +357,9 @@ struct mtk_wdma_desc { - - #define MTK_WDMA_GLO_CFG 0x204 - #define MTK_WDMA_GLO_CFG_TX_DMA_EN BIT(0) -+#define MTK_WDMA_GLO_CFG_TX_DMA_BUSY BIT(1) - #define MTK_WDMA_GLO_CFG_RX_DMA_EN BIT(2) -+#define MTK_WDMA_GLO_CFG_RX_DMA_BUSY BIT(3) - #define MTK_WDMA_GLO_CFG_RX_INFO3_PRERES BIT(26) - #define MTK_WDMA_GLO_CFG_RX_INFO2_PRERES BIT(27) - #define MTK_WDMA_GLO_CFG_RX_INFO1_PRERES BIT(28) -@@ -330,4 +388,70 @@ struct mtk_wdma_desc { - /* DMA channel mapping */ - #define HIFSYS_DMA_AG_MAP 0x008 - -+#define MTK_WED_RTQM_GLO_CFG 0xb00 -+#define MTK_WED_RTQM_BUSY BIT(1) -+#define MTK_WED_RTQM_Q_RST BIT(2) -+#define MTK_WED_RTQM_Q_DBG_BYPASS BIT(5) -+#define MTK_WED_RTQM_TXDMAD_FPORT GENMASK(23, 20) -+ -+#define MTK_WED_RTQM_R2H_MIB(_n) (0xb70 + (_n) * 0x4) -+#define MTK_WED_RTQM_R2Q_MIB(_n) (0xb78 + (_n) * 0x4) -+#define MTK_WED_RTQM_Q2N_MIB 0xb80 -+#define MTK_WED_RTQM_Q2H_MIB(_n) (0xb84 + (_n) * 0x4) -+ -+#define MTK_WED_RTQM_Q2B_MIB 0xb8c -+#define MTK_WED_RTQM_PFDBK_MIB 0xb90 -+ -+#define MTK_WED_RROQM_GLO_CFG 0xc04 -+#define MTK_WED_RROQM_RST_IDX 0xc08 -+#define MTK_WED_RROQM_RST_IDX_MIOD BIT(0) -+#define MTK_WED_RROQM_RST_IDX_FDBK BIT(4) -+ -+#define MTK_WED_RROQM_MIOD_CTRL0 0xc40 -+#define MTK_WED_RROQM_MIOD_CTRL1 0xc44 -+#define MTK_WED_RROQM_MIOD_CNT GENMASK(11, 0) -+ -+#define MTK_WED_RROQM_MIOD_CTRL2 0xc48 -+#define MTK_WED_RROQM_MIOD_CTRL3 0xc4c -+ -+#define MTK_WED_RROQM_FDBK_CTRL0 0xc50 -+#define MTK_WED_RROQM_FDBK_CTRL1 0xc54 -+#define MTK_WED_RROQM_FDBK_CNT GENMASK(11, 0) -+ -+#define MTK_WED_RROQM_FDBK_CTRL2 0xc58 -+ -+#define MTK_WED_RROQ_BASE_L 0xc80 -+#define MTK_WED_RROQ_BASE_H 0xc84 -+ -+#define MTK_WED_RROQM_MIOD_CFG 0xc8c -+#define MTK_WED_RROQM_MIOD_MID_DW GENMASK(5, 0) -+#define MTK_WED_RROQM_MIOD_MOD_DW GENMASK(13, 8) -+#define MTK_WED_RROQM_MIOD_ENTRY_DW GENMASK(22, 16) -+ -+#define MTK_WED_RROQM_MID_MIB 0xcc0 -+#define MTK_WED_RROQM_MOD_MIB 0xcc4 -+#define MTK_WED_RROQM_MOD_COHERENT_MIB 0xcc8 -+#define MTK_WED_RROQM_FDBK_MIB 0xcd0 -+#define MTK_WED_RROQM_FDBK_COHERENT_MIB 0xcd4 -+#define MTK_WED_RROQM_FDBK_IND_MIB 0xce0 -+#define MTK_WED_RROQM_FDBK_ENQ_MIB 0xce4 -+#define MTK_WED_RROQM_FDBK_ANC_MIB 0xce8 -+#define MTK_WED_RROQM_FDBK_ANC2H_MIB 0xcec -+ -+#define MTK_WED_RX_BM_RX_DMAD 0xd80 -+#define MTK_WED_RX_BM_BASE 0xd84 -+#define MTK_WED_RX_BM_INIT_PTR 0xd88 -+#define MTK_WED_RX_BM_PTR 0xd8c -+#define MTK_WED_RX_BM_PTR_HEAD GENMASK(32, 16) -+#define MTK_WED_RX_BM_PTR_TAIL GENMASK(15, 0) -+ -+#define MTK_WED_RX_BM_BLEN 0xd90 -+#define MTK_WED_RX_BM_STS 0xd94 -+#define MTK_WED_RX_BM_INTF2 0xd98 -+#define MTK_WED_RX_BM_INTF 0xd9c -+#define MTK_WED_RX_BM_ERR_STS 0xda8 -+ -+#define MTK_WED_WOCPU_VIEW_MIOD_BASE 0x8000 -+#define MTK_WED_PCIE_INT_MASK 0x0 -+ - #endif ---- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h -+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h -@@ -49,6 +49,10 @@ enum { - MTK_WED_WARP_CMD_FLAG_FROM_TO_WO = BIT(2), - }; - -+#define MTK_WED_WO_CPU_MCUSYS_RESET_ADDR 0x15194050 -+#define MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK 0x20 -+#define MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK 0x1 -+ - enum { - MTK_WED_WO_REGION_EMI, - MTK_WED_WO_REGION_ILM, -@@ -57,6 +61,28 @@ enum { - __MTK_WED_WO_REGION_MAX, - }; - -+enum mtk_wed_wo_state { -+ MTK_WED_WO_STATE_UNDEFINED, -+ MTK_WED_WO_STATE_INIT, -+ MTK_WED_WO_STATE_ENABLE, -+ MTK_WED_WO_STATE_DISABLE, -+ MTK_WED_WO_STATE_HALT, -+ MTK_WED_WO_STATE_GATING, -+ MTK_WED_WO_STATE_SER_RESET, -+ MTK_WED_WO_STATE_WF_RESET, -+}; -+ -+enum mtk_wed_wo_done_state { -+ MTK_WED_WOIF_UNDEFINED, -+ MTK_WED_WOIF_DISABLE_DONE, -+ MTK_WED_WOIF_TRIGGER_ENABLE, -+ MTK_WED_WOIF_ENABLE_DONE, -+ MTK_WED_WOIF_TRIGGER_GATING, -+ MTK_WED_WOIF_GATING_DONE, -+ MTK_WED_WOIF_TRIGGER_HALT, -+ MTK_WED_WOIF_HALT_DONE, -+}; -+ - enum mtk_wed_dummy_cr_idx { - MTK_WED_DUMMY_CR_FWDL, - MTK_WED_DUMMY_CR_WO_STATUS, -@@ -245,6 +271,8 @@ void mtk_wed_mcu_rx_unsolicited_event(st - struct sk_buff *skb); - int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo, int id, int cmd, - const void *data, int len, bool wait_resp); -+int mtk_wed_mcu_msg_update(struct mtk_wed_device *dev, int id, void *data, -+ int len); - int mtk_wed_mcu_init(struct mtk_wed_wo *wo); - int mtk_wed_wo_init(struct mtk_wed_hw *hw); - void mtk_wed_wo_deinit(struct mtk_wed_hw *hw); ---- a/include/linux/soc/mediatek/mtk_wed.h -+++ b/include/linux/soc/mediatek/mtk_wed.h -@@ -5,10 +5,13 @@ - #include - #include - #include -+#include - - #define MTK_WED_TX_QUEUES 2 - #define MTK_WED_RX_QUEUES 2 - -+#define WED_WO_STA_REC 0x6 -+ - struct mtk_wed_hw; - struct mtk_wdma_desc; - -@@ -41,21 +44,37 @@ enum mtk_wed_wo_cmd { - MTK_WED_WO_CMD_WED_END - }; - -+struct mtk_rxbm_desc { -+ __le32 buf0; -+ __le32 token; -+} __packed __aligned(4); -+ - enum mtk_wed_bus_tye { - MTK_WED_BUS_PCIE, - MTK_WED_BUS_AXI, - }; - -+#define MTK_WED_RING_CONFIGURED BIT(0) - struct mtk_wed_ring { - struct mtk_wdma_desc *desc; - dma_addr_t desc_phys; - u32 desc_size; - int size; -+ u32 flags; - - u32 reg_base; - void __iomem *wpdma; - }; - -+struct mtk_wed_wo_rx_stats { -+ __le16 wlan_idx; -+ __le16 tid; -+ __le32 rx_pkt_cnt; -+ __le32 rx_byte_cnt; -+ __le32 rx_err_cnt; -+ __le32 rx_drop_cnt; -+}; -+ - struct mtk_wed_device { - #ifdef CONFIG_NET_MEDIATEK_SOC_WED - const struct mtk_wed_ops *ops; -@@ -64,9 +83,12 @@ struct mtk_wed_device { - bool init_done, running; - int wdma_idx; - int irq; -+ u8 version; - - struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES]; -+ struct mtk_wed_ring rx_ring[MTK_WED_RX_QUEUES]; - struct mtk_wed_ring txfree_ring; -+ struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES]; - struct mtk_wed_ring rx_wdma[MTK_WED_RX_QUEUES]; - - struct { -@@ -74,7 +96,20 @@ struct mtk_wed_device { - void **pages; - struct mtk_wdma_desc *desc; - dma_addr_t desc_phys; -- } buf_ring; -+ } tx_buf_ring; -+ -+ struct { -+ int size; -+ struct page_frag_cache rx_page; -+ struct mtk_rxbm_desc *desc; -+ dma_addr_t desc_phys; -+ } rx_buf_ring; -+ -+ struct { -+ struct mtk_wed_ring ring; -+ dma_addr_t miod_phys; -+ dma_addr_t fdbk_phys; -+ } rro; - - /* filled by driver: */ - struct { -@@ -83,22 +118,36 @@ struct mtk_wed_device { - struct pci_dev *pci_dev; - }; - enum mtk_wed_bus_tye bus_type; -+ void __iomem *base; -+ u32 phy_base; - - u32 wpdma_phys; - u32 wpdma_int; - u32 wpdma_mask; - u32 wpdma_tx; - u32 wpdma_txfree; -+ u32 wpdma_rx_glo; -+ u32 wpdma_rx; -+ -+ bool wcid_512; - - u16 token_start; - unsigned int nbuf; -+ unsigned int rx_nbuf; -+ unsigned int rx_npkt; -+ unsigned int rx_size; - - u8 tx_tbit[MTK_WED_TX_QUEUES]; -+ u8 rx_tbit[MTK_WED_RX_QUEUES]; - u8 txfree_tbit; - - u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id); - int (*offload_enable)(struct mtk_wed_device *wed); - void (*offload_disable)(struct mtk_wed_device *wed); -+ u32 (*init_rx_buf)(struct mtk_wed_device *wed, int size); -+ void (*release_rx_buf)(struct mtk_wed_device *wed); -+ void (*update_wo_rx_stats)(struct mtk_wed_device *wed, -+ struct mtk_wed_wo_rx_stats *stats); - } wlan; - #endif - }; -@@ -107,9 +156,15 @@ struct mtk_wed_ops { - int (*attach)(struct mtk_wed_device *dev); - int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring, - void __iomem *regs); -+ int (*rx_ring_setup)(struct mtk_wed_device *dev, int ring, -+ void __iomem *regs); - int (*txfree_ring_setup)(struct mtk_wed_device *dev, - void __iomem *regs); -+ int (*msg_update)(struct mtk_wed_device *dev, int cmd_id, -+ void *data, int len); - void (*detach)(struct mtk_wed_device *dev); -+ void (*ppe_check)(struct mtk_wed_device *dev, struct sk_buff *skb, -+ u32 reason, u32 hash); - - void (*stop)(struct mtk_wed_device *dev); - void (*start)(struct mtk_wed_device *dev, u32 irq_mask); -@@ -144,6 +199,16 @@ mtk_wed_device_attach(struct mtk_wed_dev - return ret; - } - -+static inline bool -+mtk_wed_get_rx_capa(struct mtk_wed_device *dev) -+{ -+#ifdef CONFIG_NET_MEDIATEK_SOC_WED -+ return dev->version != 1; -+#else -+ return false; -+#endif -+} -+ - #ifdef CONFIG_NET_MEDIATEK_SOC_WED - #define mtk_wed_device_active(_dev) !!(_dev)->ops - #define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev) -@@ -160,6 +225,12 @@ mtk_wed_device_attach(struct mtk_wed_dev - (_dev)->ops->irq_get(_dev, _mask) - #define mtk_wed_device_irq_set_mask(_dev, _mask) \ - (_dev)->ops->irq_set_mask(_dev, _mask) -+#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) \ -+ (_dev)->ops->rx_ring_setup(_dev, _ring, _regs) -+#define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) \ -+ (_dev)->ops->ppe_check(_dev, _skb, _reason, _hash) -+#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) \ -+ (_dev)->ops->msg_update(_dev, _id, _msg, _len) - #else - static inline bool mtk_wed_device_active(struct mtk_wed_device *dev) - { -@@ -173,6 +244,9 @@ static inline bool mtk_wed_device_active - #define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0) - #define mtk_wed_device_irq_get(_dev, _mask) 0 - #define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0) -+#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) -ENODEV -+#define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) do {} while (0) -+#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) -ENODEV - #endif - - #endif diff --git a/target/linux/generic/pending-5.15/733-05-net-ethernet-mtk_wed-add-rx-mib-counters.patch b/target/linux/generic/pending-5.15/733-05-net-ethernet-mtk_wed-add-rx-mib-counters.patch deleted file mode 100644 index bb1066dece..0000000000 --- a/target/linux/generic/pending-5.15/733-05-net-ethernet-mtk_wed-add-rx-mib-counters.patch +++ /dev/null @@ -1,149 +0,0 @@ -From: Lorenzo Bianconi -Date: Sat, 5 Nov 2022 23:36:22 +0100 -Subject: [PATCH] net: ethernet: mtk_wed: add rx mib counters - -Introduce WED RX MIB counters support available on MT7986a SoC. - -Tested-by: Daniel Golle -Co-developed-by: Sujuan Chen -Signed-off-by: Sujuan Chen -Signed-off-by: Lorenzo Bianconi -Signed-off-by: David S. Miller ---- - ---- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c -+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c -@@ -2,6 +2,7 @@ - /* Copyright (C) 2021 Felix Fietkau */ - - #include -+#include - #include "mtk_wed.h" - #include "mtk_wed_regs.h" - -@@ -18,6 +19,8 @@ enum { - DUMP_TYPE_WDMA, - DUMP_TYPE_WPDMA_TX, - DUMP_TYPE_WPDMA_TXFREE, -+ DUMP_TYPE_WPDMA_RX, -+ DUMP_TYPE_WED_RRO, - }; - - #define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING } -@@ -36,6 +39,9 @@ enum { - - #define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n) - #define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE) -+#define DUMP_WPDMA_RX_RING(_n) DUMP_RING("WPDMA_RX" #_n, 0, DUMP_TYPE_WPDMA_RX, _n) -+#define DUMP_WED_RRO_RING(_base)DUMP_RING("WED_RRO_MIOD", MTK_##_base, DUMP_TYPE_WED_RRO) -+#define DUMP_WED_RRO_FDBK(_base)DUMP_RING("WED_RRO_FDBK", MTK_##_base, DUMP_TYPE_WED_RRO) - - static void - print_reg_val(struct seq_file *s, const char *name, u32 val) -@@ -57,6 +63,7 @@ dump_wed_regs(struct seq_file *s, struct - cur > regs ? "\n" : "", - cur->name); - continue; -+ case DUMP_TYPE_WED_RRO: - case DUMP_TYPE_WED: - val = wed_r32(dev, cur->offset); - break; -@@ -69,6 +76,9 @@ dump_wed_regs(struct seq_file *s, struct - case DUMP_TYPE_WPDMA_TXFREE: - val = wpdma_txfree_r32(dev, cur->offset); - break; -+ case DUMP_TYPE_WPDMA_RX: -+ val = wpdma_rx_r32(dev, cur->base, cur->offset); -+ break; - } - print_reg_val(s, cur->name, val); - } -@@ -132,6 +142,80 @@ wed_txinfo_show(struct seq_file *s, void - } - DEFINE_SHOW_ATTRIBUTE(wed_txinfo); - -+static int -+wed_rxinfo_show(struct seq_file *s, void *data) -+{ -+ static const struct reg_dump regs[] = { -+ DUMP_STR("WPDMA RX"), -+ DUMP_WPDMA_RX_RING(0), -+ DUMP_WPDMA_RX_RING(1), -+ -+ DUMP_STR("WPDMA RX"), -+ DUMP_WED(WED_WPDMA_RX_D_MIB(0)), -+ DUMP_WED_RING(WED_WPDMA_RING_RX_DATA(0)), -+ DUMP_WED(WED_WPDMA_RX_D_PROCESSED_MIB(0)), -+ DUMP_WED(WED_WPDMA_RX_D_MIB(1)), -+ DUMP_WED_RING(WED_WPDMA_RING_RX_DATA(1)), -+ DUMP_WED(WED_WPDMA_RX_D_PROCESSED_MIB(1)), -+ DUMP_WED(WED_WPDMA_RX_D_COHERENT_MIB), -+ -+ DUMP_STR("WED RX"), -+ DUMP_WED_RING(WED_RING_RX_DATA(0)), -+ DUMP_WED_RING(WED_RING_RX_DATA(1)), -+ -+ DUMP_STR("WED RRO"), -+ DUMP_WED_RRO_RING(WED_RROQM_MIOD_CTRL0), -+ DUMP_WED(WED_RROQM_MID_MIB), -+ DUMP_WED(WED_RROQM_MOD_MIB), -+ DUMP_WED(WED_RROQM_MOD_COHERENT_MIB), -+ DUMP_WED_RRO_FDBK(WED_RROQM_FDBK_CTRL0), -+ DUMP_WED(WED_RROQM_FDBK_IND_MIB), -+ DUMP_WED(WED_RROQM_FDBK_ENQ_MIB), -+ DUMP_WED(WED_RROQM_FDBK_ANC_MIB), -+ DUMP_WED(WED_RROQM_FDBK_ANC2H_MIB), -+ -+ DUMP_STR("WED Route QM"), -+ DUMP_WED(WED_RTQM_R2H_MIB(0)), -+ DUMP_WED(WED_RTQM_R2Q_MIB(0)), -+ DUMP_WED(WED_RTQM_Q2H_MIB(0)), -+ DUMP_WED(WED_RTQM_R2H_MIB(1)), -+ DUMP_WED(WED_RTQM_R2Q_MIB(1)), -+ DUMP_WED(WED_RTQM_Q2H_MIB(1)), -+ DUMP_WED(WED_RTQM_Q2N_MIB), -+ DUMP_WED(WED_RTQM_Q2B_MIB), -+ DUMP_WED(WED_RTQM_PFDBK_MIB), -+ -+ DUMP_STR("WED WDMA TX"), -+ DUMP_WED(WED_WDMA_TX_MIB), -+ DUMP_WED_RING(WED_WDMA_RING_TX), -+ -+ DUMP_STR("WDMA TX"), -+ DUMP_WDMA(WDMA_GLO_CFG), -+ DUMP_WDMA_RING(WDMA_RING_TX(0)), -+ DUMP_WDMA_RING(WDMA_RING_TX(1)), -+ -+ DUMP_STR("WED RX BM"), -+ DUMP_WED(WED_RX_BM_BASE), -+ DUMP_WED(WED_RX_BM_RX_DMAD), -+ DUMP_WED(WED_RX_BM_PTR), -+ DUMP_WED(WED_RX_BM_TKID_MIB), -+ DUMP_WED(WED_RX_BM_BLEN), -+ DUMP_WED(WED_RX_BM_STS), -+ DUMP_WED(WED_RX_BM_INTF2), -+ DUMP_WED(WED_RX_BM_INTF), -+ DUMP_WED(WED_RX_BM_ERR_STS), -+ }; -+ struct mtk_wed_hw *hw = s->private; -+ struct mtk_wed_device *dev = hw->wed_dev; -+ -+ if (!dev) -+ return 0; -+ -+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs)); -+ -+ return 0; -+} -+DEFINE_SHOW_ATTRIBUTE(wed_rxinfo); - - static int - mtk_wed_reg_set(void *data, u64 val) -@@ -175,4 +259,7 @@ void mtk_wed_hw_add_debugfs(struct mtk_w - debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg); - debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval); - debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops); -+ if (hw->version != 1) -+ debugfs_create_file_unsafe("rxinfo", 0400, dir, hw, -+ &wed_rxinfo_fops); - } diff --git a/target/linux/generic/pending-5.15/734-net-ethernet-mtk_eth_soc-ppe-fix-L2-offloading-with-.patch b/target/linux/generic/pending-5.15/734-net-ethernet-mtk_eth_soc-ppe-fix-L2-offloading-with-.patch index 39d6d0c879..7bd4df5fff 100644 --- a/target/linux/generic/pending-5.15/734-net-ethernet-mtk_eth_soc-ppe-fix-L2-offloading-with-.patch +++ b/target/linux/generic/pending-5.15/734-net-ethernet-mtk_eth_soc-ppe-fix-L2-offloading-with-.patch @@ -12,7 +12,7 @@ Signed-off-by: Felix Fietkau --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -1992,9 +1992,6 @@ static int mtk_poll_rx(struct napi_struc +@@ -1998,9 +1998,6 @@ static int mtk_poll_rx(struct napi_struc skb_checksum_none_assert(skb); skb->protocol = eth_type_trans(skb, netdev); @@ -22,7 +22,7 @@ Signed-off-by: Felix Fietkau /* When using VLAN untagging in combination with DSA, the * hardware treats the MTK special tag as a VLAN and untags it. */ -@@ -2007,6 +2004,9 @@ static int mtk_poll_rx(struct napi_struc +@@ -2013,6 +2010,9 @@ static int mtk_poll_rx(struct napi_struc skb_dst_set_noref(skb, ð->dsa_meta[port]->dst); } diff --git a/target/linux/mediatek/patches-5.15/703-v5.17-net-ethernet-mtk_eth_soc-implement-Clause-45-MDIO-ac.patch b/target/linux/mediatek/patches-5.15/703-v5.17-net-ethernet-mtk_eth_soc-implement-Clause-45-MDIO-ac.patch index 82cd4088c9..757750832c 100644 --- a/target/linux/mediatek/patches-5.15/703-v5.17-net-ethernet-mtk_eth_soc-implement-Clause-45-MDIO-ac.patch +++ b/target/linux/mediatek/patches-5.15/703-v5.17-net-ethernet-mtk_eth_soc-implement-Clause-45-MDIO-ac.patch @@ -20,7 +20,7 @@ Signed-off-by: David S. Miller --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c -@@ -224,13 +224,35 @@ static int _mtk_mdio_write(struct mtk_et +@@ -230,13 +230,35 @@ static int _mtk_mdio_write(struct mtk_et if (ret < 0) return ret; @@ -63,7 +63,7 @@ Signed-off-by: David S. Miller ret = mtk_mdio_busy_wait(eth); if (ret < 0) -@@ -247,12 +269,33 @@ static int _mtk_mdio_read(struct mtk_eth +@@ -253,12 +275,33 @@ static int _mtk_mdio_read(struct mtk_eth if (ret < 0) return ret; @@ -103,7 +103,7 @@ Signed-off-by: David S. Miller ret = mtk_mdio_busy_wait(eth); if (ret < 0) -@@ -720,6 +763,7 @@ static int mtk_mdio_init(struct mtk_eth +@@ -726,6 +769,7 @@ static int mtk_mdio_init(struct mtk_eth eth->mii_bus->name = "mdio"; eth->mii_bus->read = mtk_mdio_read; eth->mii_bus->write = mtk_mdio_write; @@ -113,7 +113,7 @@ Signed-off-by: David S. Miller --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h -@@ -355,9 +355,12 @@ +@@ -369,9 +369,12 @@ #define PHY_IAC_ADDR_MASK GENMASK(24, 20) #define PHY_IAC_ADDR(x) FIELD_PREP(PHY_IAC_ADDR_MASK, (x)) #define PHY_IAC_CMD_MASK GENMASK(19, 18)