This is an automatically generated commit which aids following Kernel patch history,
as git will see the move and copy as a rename thus defeating the purpose.
See: https://lists.openwrt.org/pipermail/openwrt-devel/2023-October/041673.html
for the original discussion.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
--- /dev/null
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_APQ_GCC_8084 is not set
+# CONFIG_APQ_MMCC_8084 is not set
+CONFIG_ARCH_32BIT_OFF_T=y
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_ARCH_IPQ40XX=y
+CONFIG_ARCH_KEEP_MEMBLOCK=y
+# CONFIG_ARCH_MDM9615 is not set
+CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y
+# CONFIG_ARCH_MSM8909 is not set
+# CONFIG_ARCH_MSM8916 is not set
+# CONFIG_ARCH_MSM8960 is not set
+# CONFIG_ARCH_MSM8974 is not set
+# CONFIG_ARCH_MSM8X60 is not set
+CONFIG_ARCH_MULTIPLATFORM=y
+CONFIG_ARCH_MULTI_V6_V7=y
+CONFIG_ARCH_MULTI_V7=y
+CONFIG_ARCH_NR_GPIO=0
+CONFIG_ARCH_OPTIONAL_KERNEL_RWX=y
+CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_ARM=y
+CONFIG_ARM_AMBA=y
+CONFIG_ARM_APPENDED_DTB=y
+CONFIG_ARM_ARCH_TIMER=y
+CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y
+# CONFIG_ARM_ATAG_DTB_COMPAT is not set
+CONFIG_ARM_CPUIDLE=y
+# CONFIG_ARM_CPU_TOPOLOGY is not set
+CONFIG_ARM_GIC=y
+CONFIG_ARM_HAS_GROUP_RELOCS=y
+CONFIG_ARM_L1_CACHE_SHIFT=6
+CONFIG_ARM_L1_CACHE_SHIFT_6=y
+CONFIG_ARM_PATCH_IDIV=y
+CONFIG_ARM_PATCH_PHYS_VIRT=y
+# CONFIG_ARM_QCOM_CPUFREQ_HW is not set
+# CONFIG_ARM_QCOM_CPUFREQ_NVMEM is not set
+# CONFIG_ARM_QCOM_SPM_CPUIDLE is not set
+# CONFIG_ARM_SMMU is not set
+CONFIG_ARM_THUMB=y
+CONFIG_ARM_UNWIND=y
+CONFIG_ARM_VIRT_EXT=y
+CONFIG_AT803X_PHY=y
+CONFIG_AUTO_ZRELADDR=y
+CONFIG_BCH=y
+CONFIG_BINFMT_FLAT_ARGVP_ENVP_ON_STACK=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_MQ_PCI=y
+CONFIG_BOUNCE=y
+# CONFIG_CACHE_L2X0 is not set
+CONFIG_CC_HAVE_STACKPROTECTOR_TLS=y
+CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5"
+CONFIG_CC_NO_ARRAY_BOUNDS=y
+CONFIG_CLKSRC_QCOM=y
+CONFIG_CLONE_BACKWARDS=y
+CONFIG_CMDLINE_PARTITION=y
+CONFIG_COMMON_CLK=y
+CONFIG_COMMON_CLK_QCOM=y
+CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1
+CONFIG_COMPAT_32BIT_TIME=y
+CONFIG_CONTEXT_TRACKING=y
+CONFIG_CONTEXT_TRACKING_IDLE=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_CPUFREQ_DT_PLATDEV=y
+CONFIG_CPU_32v6K=y
+CONFIG_CPU_32v7=y
+CONFIG_CPU_ABRT_EV7=y
+CONFIG_CPU_CACHE_V7=y
+CONFIG_CPU_CACHE_VIPT=y
+CONFIG_CPU_COPY_V6=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+CONFIG_CPU_FREQ=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_ATTR_SET=y
+CONFIG_CPU_FREQ_GOV_COMMON=y
+# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
+# CONFIG_CPU_FREQ_GOV_USERSPACE is not set
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_HAS_ASID=y
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_CPU_IDLE_GOV_MENU=y
+CONFIG_CPU_IDLE_MULTIPLE_DRIVERS=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_PABRT_V7=y
+CONFIG_CPU_PM=y
+CONFIG_CPU_RMAP=y
+CONFIG_CPU_SPECTRE=y
+CONFIG_CPU_THERMAL=y
+CONFIG_CPU_THUMB_CAPABLE=y
+CONFIG_CPU_TLB_V7=y
+CONFIG_CPU_V7=y
+CONFIG_CRC16=y
+# CONFIG_CRC32_SARWATE is not set
+CONFIG_CRC32_SLICEBY8=y
+CONFIG_CRC8=y
+CONFIG_CRYPTO_AES_ARM=y
+CONFIG_CRYPTO_AES_ARM_BS=y
+CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S=y
+CONFIG_CRYPTO_BLAKE2S_ARM=y
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_CRYPTD=y
+CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_DEV_QCE=y
+# CONFIG_CRYPTO_DEV_QCE_ENABLE_AEAD is not set
+# CONFIG_CRYPTO_DEV_QCE_ENABLE_ALL is not set
+# CONFIG_CRYPTO_DEV_QCE_ENABLE_SHA is not set
+CONFIG_CRYPTO_DEV_QCE_ENABLE_SKCIPHER=y
+CONFIG_CRYPTO_DEV_QCE_SKCIPHER=y
+CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN=512
+CONFIG_CRYPTO_DEV_QCOM_RNG=y
+CONFIG_CRYPTO_DRBG=y
+CONFIG_CRYPTO_DRBG_HMAC=y
+CONFIG_CRYPTO_DRBG_MENU=y
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_HASH_INFO=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_HW=y
+CONFIG_CRYPTO_JITTERENTROPY=y
+CONFIG_CRYPTO_LIB_DES=y
+CONFIG_CRYPTO_LIB_SHA1=y
+CONFIG_CRYPTO_LIB_SHA256=y
+CONFIG_CRYPTO_LIB_UTILS=y
+CONFIG_CRYPTO_LZO=y
+CONFIG_CRYPTO_RNG=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_RNG_DEFAULT=y
+CONFIG_CRYPTO_SEQIV=y
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_SHA256_ARM=y
+CONFIG_CRYPTO_SHA512=y
+CONFIG_CRYPTO_SIMD=y
+CONFIG_CRYPTO_XTS=y
+CONFIG_CRYPTO_ZSTD=y
+CONFIG_CURRENT_POINTER_IN_TPIDRURO=y
+CONFIG_DCACHE_WORD_ACCESS=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_LL_INCLUDE="mach/debug-macro.S"
+CONFIG_DEBUG_MISC=y
+CONFIG_DMADEVICES=y
+CONFIG_DMA_ENGINE=y
+CONFIG_DMA_OF=y
+CONFIG_DMA_OPS=y
+CONFIG_DMA_SHARED_BUFFER=y
+CONFIG_DMA_VIRTUAL_CHANNELS=y
+CONFIG_DTC=y
+CONFIG_DT_IDLE_STATES=y
+CONFIG_EDAC_ATOMIC_SCRUB=y
+CONFIG_EDAC_SUPPORT=y
+CONFIG_EEPROM_AT24=y
+CONFIG_EXCLUSIVE_SYSTEM_RAM=y
+CONFIG_EXTCON=y
+CONFIG_FIXED_PHY=y
+CONFIG_FIX_EARLYCON_MEM=y
+CONFIG_FWNODE_MDIO=y
+CONFIG_FW_LOADER_PAGED_BUF=y
+CONFIG_FW_LOADER_SYSFS=y
+CONFIG_GCC11_NO_ARRAY_BOUNDS=y
+CONFIG_GENERIC_ALLOCATOR=y
+CONFIG_GENERIC_BUG=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+CONFIG_GENERIC_CPU_AUTOPROBE=y
+CONFIG_GENERIC_CPU_VULNERABILITIES=y
+CONFIG_GENERIC_EARLY_IOREMAP=y
+CONFIG_GENERIC_GETTIMEOFDAY=y
+CONFIG_GENERIC_IDLE_POLL_SETUP=y
+CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y
+CONFIG_GENERIC_IRQ_MULTI_HANDLER=y
+CONFIG_GENERIC_IRQ_SHOW=y
+CONFIG_GENERIC_IRQ_SHOW_LEVEL=y
+CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y
+CONFIG_GENERIC_MSI_IRQ=y
+CONFIG_GENERIC_MSI_IRQ_DOMAIN=y
+CONFIG_GENERIC_PCI_IOMAP=y
+CONFIG_GENERIC_PHY=y
+CONFIG_GENERIC_PINCONF=y
+CONFIG_GENERIC_PINCTRL_GROUPS=y
+CONFIG_GENERIC_PINMUX_FUNCTIONS=y
+CONFIG_GENERIC_SCHED_CLOCK=y
+CONFIG_GENERIC_SMP_IDLE_THREAD=y
+CONFIG_GENERIC_STRNCPY_FROM_USER=y
+CONFIG_GENERIC_STRNLEN_USER=y
+CONFIG_GENERIC_TIME_VSYSCALL=y
+CONFIG_GENERIC_VDSO_32=y
+CONFIG_GPIOLIB_IRQCHIP=y
+CONFIG_GPIO_74X164=y
+CONFIG_GPIO_CDEV=y
+CONFIG_GPIO_WATCHDOG=y
+CONFIG_GPIO_WATCHDOG_ARCH_INITCALL=y
+CONFIG_GRO_CELLS=y
+CONFIG_HARDEN_BRANCH_PREDICTOR=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_HAS_DMA=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT_MAP=y
+CONFIG_HAVE_SMP=y
+CONFIG_HIGHMEM=y
+# CONFIG_HIGHPTE is not set
+CONFIG_HWSPINLOCK=y
+CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_OPTEE=y
+CONFIG_HZ_FIXED=0
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_HELPER_AUTO=y
+# CONFIG_I2C_QCOM_CCI is not set
+CONFIG_I2C_QUP=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_IOMMU_DEBUGFS is not set
+# CONFIG_IOMMU_IO_PGTABLE_ARMV7S is not set
+# CONFIG_IOMMU_IO_PGTABLE_LPAE is not set
+CONFIG_IOMMU_SUPPORT=y
+# CONFIG_IPQ_APSS_PLL is not set
+CONFIG_IPQ_GCC_4019=y
+# CONFIG_IPQ_GCC_6018 is not set
+# CONFIG_IPQ_GCC_806X is not set
+# CONFIG_IPQ_GCC_8074 is not set
+# CONFIG_IPQ_LCC_806X is not set
+CONFIG_IRQCHIP=y
+CONFIG_IRQSTACKS=y
+CONFIG_IRQ_DOMAIN=y
+CONFIG_IRQ_DOMAIN_HIERARCHY=y
+CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS=y
+CONFIG_IRQ_FORCED_THREADING=y
+CONFIG_IRQ_WORK=y
+CONFIG_KMAP_LOCAL=y
+CONFIG_KMAP_LOCAL_NON_LINEAR_PTE_ARRAY=y
+# CONFIG_KPSS_XCC is not set
+# CONFIG_KRAITCC is not set
+CONFIG_LED_TRIGGER_PHY=y
+CONFIG_LEDS_LP5523=y
+CONFIG_LEDS_LP5562=y
+CONFIG_LEDS_LP55XX_COMMON=y
+CONFIG_LEDS_TLC591XX=y
+CONFIG_LIBFDT=y
+CONFIG_LOCK_DEBUGGING_SUPPORT=y
+CONFIG_LOCK_SPIN_ON_OWNER=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_MDIO_BITBANG=y
+CONFIG_MDIO_BUS=y
+CONFIG_MDIO_DEVICE=y
+CONFIG_MDIO_DEVRES=y
+CONFIG_MDIO_GPIO=y
+CONFIG_MDIO_IPQ4019=y
+# CONFIG_MDM_GCC_9615 is not set
+# CONFIG_MDM_LCC_9615 is not set
+CONFIG_MEMFD_CREATE=y
+# CONFIG_MFD_HI6421_SPMI is not set
+# CONFIG_MFD_QCOM_RPM is not set
+# CONFIG_MFD_SPMI_PMIC is not set
+CONFIG_MFD_SYSCON=y
+CONFIG_MIGHT_HAVE_CACHE_L2X0=y
+CONFIG_MIGRATION=y
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_CQHCI=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_IO_ACCESSORS=y
+CONFIG_MMC_SDHCI_MSM=y
+# CONFIG_MMC_SDHCI_PCI is not set
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MODULES_USE_ELF_REL=y
+# CONFIG_MSM_GCC_8660 is not set
+# CONFIG_MSM_GCC_8909 is not set
+# CONFIG_MSM_GCC_8916 is not set
+# CONFIG_MSM_GCC_8939 is not set
+# CONFIG_MSM_GCC_8960 is not set
+# CONFIG_MSM_GCC_8974 is not set
+# CONFIG_MSM_GCC_8976 is not set
+# CONFIG_MSM_GCC_8994 is not set
+# CONFIG_MSM_GCC_8996 is not set
+# CONFIG_MSM_GCC_8998 is not set
+# CONFIG_MSM_GPUCC_8998 is not set
+# CONFIG_MSM_LCC_8960 is not set
+# CONFIG_MSM_MMCC_8960 is not set
+# CONFIG_MSM_MMCC_8974 is not set
+# CONFIG_MSM_MMCC_8996 is not set
+# CONFIG_MSM_MMCC_8998 is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_NAND_CORE=y
+CONFIG_MTD_NAND_ECC=y
+CONFIG_MTD_NAND_ECC_SW_BCH=y
+CONFIG_MTD_NAND_ECC_SW_HAMMING=y
+CONFIG_MTD_NAND_QCOM=y
+# CONFIG_MTD_QCOMSMEM_PARTS is not set
+CONFIG_MTD_RAW_NAND=y
+CONFIG_MTD_SPI_NAND=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_MTD_SPLIT_FIRMWARE=y
+CONFIG_MTD_SPLIT_FIT_FW=y
+CONFIG_MTD_SPLIT_WRGG_FW=y
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_BEB_LIMIT=20
+CONFIG_MTD_UBI_BLOCK=y
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MUTEX_SPIN_ON_OWNER=y
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_NEON=y
+CONFIG_NET_DEVLINK=y
+CONFIG_NET_DSA=y
+CONFIG_NET_DSA_QCA8K_IPQ4019=y
+CONFIG_NET_DSA_TAG_OOB=y
+CONFIG_NET_FLOW_LIMIT=y
+CONFIG_NET_PTP_CLASSIFY=y
+CONFIG_NET_SELFTESTS=y
+CONFIG_NET_SWITCHDEV=y
+CONFIG_NLS=y
+CONFIG_NO_HZ=y
+CONFIG_NO_HZ_COMMON=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NR_CPUS=4
+CONFIG_NVMEM=y
+CONFIG_NVMEM_QCOM_QFPROM=y
+# CONFIG_NVMEM_QCOM_SEC_QFPROM is not set
+# CONFIG_NVMEM_SPMI_SDAM is not set
+CONFIG_NVMEM_SYSFS=y
+CONFIG_OF=y
+CONFIG_OF_ADDRESS=y
+CONFIG_OF_EARLY_FLATTREE=y
+CONFIG_OF_FLATTREE=y
+CONFIG_OF_GPIO=y
+CONFIG_OF_IRQ=y
+CONFIG_OF_KOBJ=y
+CONFIG_OF_MDIO=y
+CONFIG_OLD_SIGACTION=y
+CONFIG_OLD_SIGSUSPEND3=y
+CONFIG_OPTEE=y
+CONFIG_PADATA=y
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_PAGE_POOL=y
+CONFIG_PAGE_SIZE_LESS_THAN_256KB=y
+CONFIG_PAGE_SIZE_LESS_THAN_64KB=y
+CONFIG_PCI=y
+CONFIG_PCIEAER=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCIE_DW=y
+CONFIG_PCIE_DW_HOST=y
+CONFIG_PCIE_QCOM=y
+CONFIG_PCI_DISABLE_COMMON_QUIRKS=y
+CONFIG_PCI_DOMAINS=y
+CONFIG_PCI_DOMAINS_GENERIC=y
+CONFIG_PCI_MSI=y
+CONFIG_PCI_MSI_IRQ_DOMAIN=y
+CONFIG_PERF_USE_VMALLOC=y
+CONFIG_PGTABLE_LEVELS=2
+CONFIG_PHYLIB=y
+CONFIG_PHYLINK=y
+# CONFIG_PHY_QCOM_APQ8064_SATA is not set
+# CONFIG_PHY_QCOM_EDP is not set
+CONFIG_PHY_QCOM_IPQ4019_USB=y
+# CONFIG_PHY_QCOM_IPQ806X_SATA is not set
+# CONFIG_PHY_QCOM_IPQ806X_USB is not set
+# CONFIG_PHY_QCOM_PCIE2 is not set
+# CONFIG_PHY_QCOM_QMP is not set
+# CONFIG_PHY_QCOM_QUSB2 is not set
+# CONFIG_PHY_QCOM_USB_HS_28NM is not set
+# CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2 is not set
+# CONFIG_PHY_QCOM_USB_SS is not set
+CONFIG_PINCTRL=y
+# CONFIG_PINCTRL_APQ8064 is not set
+# CONFIG_PINCTRL_APQ8084 is not set
+CONFIG_PINCTRL_IPQ4019=y
+# CONFIG_PINCTRL_IPQ8064 is not set
+# CONFIG_PINCTRL_MDM9615 is not set
+CONFIG_PINCTRL_MSM=y
+# CONFIG_PINCTRL_MSM8226 is not set
+# CONFIG_PINCTRL_MSM8660 is not set
+# CONFIG_PINCTRL_MSM8909 is not set
+# CONFIG_PINCTRL_MSM8916 is not set
+# CONFIG_PINCTRL_MSM8960 is not set
+# CONFIG_PINCTRL_QCOM_SPMI_PMIC is not set
+# CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set
+# CONFIG_PINCTRL_SDX65 is not set
+CONFIG_PM_OPP=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_GPIO_RESTART=y
+CONFIG_POWER_RESET_MSM=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_PPS=y
+CONFIG_PREEMPT_NONE_BUILD=y
+CONFIG_PRINTK_TIME=y
+CONFIG_PTP_1588_CLOCK=y
+CONFIG_PTP_1588_CLOCK_OPTIONAL=y
+CONFIG_QCA807X_PHY=y
+# CONFIG_QCM_DISPCC_2290 is not set
+# CONFIG_QCM_GCC_2290 is not set
+CONFIG_QCOM_A53PLL=y
+# CONFIG_QCOM_ADM is not set
+CONFIG_QCOM_BAM_DMA=y
+# CONFIG_QCOM_COMMAND_DB is not set
+# CONFIG_QCOM_CPR is not set
+# CONFIG_QCOM_EBI2 is not set
+# CONFIG_QCOM_GENI_SE is not set
+# CONFIG_QCOM_GSBI is not set
+# CONFIG_QCOM_HFPLL is not set
+# CONFIG_QCOM_ICC_BWMON is not set
+# CONFIG_QCOM_IOMMU is not set
+CONFIG_QCOM_IPQ4019_ESS_EDMA=y
+# CONFIG_QCOM_LLCC is not set
+# CONFIG_QCOM_OCMEM is not set
+# CONFIG_QCOM_PDC is not set
+# CONFIG_QCOM_RMTFS_MEM is not set
+# CONFIG_QCOM_RPMH is not set
+CONFIG_QCOM_SCM=y
+# CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT is not set
+CONFIG_QCOM_SMEM=y
+# CONFIG_QCOM_SMSM is not set
+# CONFIG_QCOM_SOCINFO is not set
+# CONFIG_QCOM_SPM is not set
+# CONFIG_QCOM_STATS is not set
+CONFIG_QCOM_TCSR=y
+# CONFIG_QCOM_TSENS is not set
+CONFIG_QCOM_WDT=y
+# CONFIG_QCS_GCC_404 is not set
+# CONFIG_QCS_Q6SSTOP_404 is not set
+# CONFIG_QCS_TURING_404 is not set
+CONFIG_RANDSTRUCT_NONE=y
+CONFIG_RAS=y
+CONFIG_RATIONAL=y
+CONFIG_REGMAP=y
+CONFIG_REGMAP_I2C=y
+CONFIG_REGMAP_MMIO=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+# CONFIG_REGULATOR_QCOM_LABIBB is not set
+# CONFIG_REGULATOR_QCOM_SPMI is not set
+# CONFIG_REGULATOR_QCOM_USB_VBUS is not set
+CONFIG_REGULATOR_VCTRL=y
+CONFIG_REGULATOR_VQMMC_IPQ4019=y
+CONFIG_RESET_CONTROLLER=y
+# CONFIG_RESET_QCOM_AOSS is not set
+# CONFIG_RESET_QCOM_PDC is not set
+CONFIG_RFS_ACCEL=y
+CONFIG_RPS=y
+CONFIG_RTC_CLASS=y
+# CONFIG_RTC_DRV_OPTEE is not set
+CONFIG_RTC_I2C_AND_SPI=y
+CONFIG_RTC_MC146818_LIB=y
+CONFIG_RWSEM_SPIN_ON_OWNER=y
+# CONFIG_SC_CAMCC_7280 is not set
+# CONFIG_SC_DISPCC_7180 is not set
+# CONFIG_SC_GCC_7180 is not set
+# CONFIG_SC_GCC_8280XP is not set
+# CONFIG_SC_GPUCC_7180 is not set
+# CONFIG_SC_LPASSCC_7280 is not set
+# CONFIG_SC_LPASS_CORECC_7180 is not set
+# CONFIG_SC_LPASS_CORECC_7280 is not set
+# CONFIG_SC_MSS_7180 is not set
+# CONFIG_SC_VIDEOCC_7180 is not set
+# CONFIG_SDM_CAMCC_845 is not set
+# CONFIG_SDM_DISPCC_845 is not set
+# CONFIG_SDM_GCC_660 is not set
+# CONFIG_SDM_GCC_845 is not set
+# CONFIG_SDM_GPUCC_845 is not set
+# CONFIG_SDM_LPASSCC_845 is not set
+# CONFIG_SDM_VIDEOCC_845 is not set
+# CONFIG_SDX_GCC_65 is not set
+CONFIG_SERIAL_8250_FSL=y
+CONFIG_SERIAL_MCTRL_GPIO=y
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SGL_ALLOC=y
+CONFIG_SKB_EXTENSIONS=y
+CONFIG_SMP=y
+CONFIG_SMP_ON_UP=y
+# CONFIG_SM_CAMCC_8450 is not set
+# CONFIG_SM_GCC_8150 is not set
+# CONFIG_SM_GCC_8250 is not set
+# CONFIG_SM_GCC_8450 is not set
+# CONFIG_SM_GPUCC_6350 is not set
+# CONFIG_SM_GPUCC_8150 is not set
+# CONFIG_SM_GPUCC_8250 is not set
+# CONFIG_SM_GPUCC_8350 is not set
+# CONFIG_SM_VIDEOCC_8150 is not set
+# CONFIG_SM_VIDEOCC_8250 is not set
+CONFIG_SOCK_RX_QUEUE_MAPPING=y
+CONFIG_SOFTIRQ_ON_OWN_STACK=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_SPI=y
+CONFIG_SPI_BITBANG=y
+CONFIG_SPI_GPIO=y
+CONFIG_SPI_MASTER=y
+CONFIG_SPI_MEM=y
+CONFIG_SPI_QUP=y
+CONFIG_SPMI=y
+# CONFIG_SPMI_HISI3670 is not set
+CONFIG_SPMI_MSM_PMIC_ARB=y
+# CONFIG_SPMI_PMIC_CLKDIV is not set
+CONFIG_SRCU=y
+CONFIG_SWPHY=y
+CONFIG_SWP_EMULATE=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_TEE=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
+CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0
+CONFIG_THERMAL_GOV_STEP_WISE=y
+CONFIG_THERMAL_OF=y
+CONFIG_THREAD_INFO_IN_TASK=y
+CONFIG_TICK_CPU_ACCOUNTING=y
+CONFIG_TIMER_OF=y
+CONFIG_TIMER_PROBE=y
+CONFIG_TREE_RCU=y
+CONFIG_TREE_SRCU=y
+CONFIG_UBIFS_FS=y
+CONFIG_UEVENT_HELPER_PATH=""
+CONFIG_UNCOMPRESS_INCLUDE="debug/uncompress.h"
+CONFIG_UNWINDER_ARM=y
+CONFIG_USB=y
+CONFIG_USB_COMMON=y
+CONFIG_USB_SUPPORT=y
+CONFIG_USE_OF=y
+CONFIG_VFP=y
+CONFIG_VFPv3=y
+CONFIG_WATCHDOG_CORE=y
+CONFIG_XPS=y
+CONFIG_XXHASH=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_BCJ=y
+CONFIG_ZBOOT_ROM_BSS=0
+CONFIG_ZBOOT_ROM_TEXT=0
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZSTD_COMMON=y
+CONFIG_ZSTD_COMPRESS=y
+CONFIG_ZSTD_DECOMPRESS=y
--- /dev/null
+From be59072c6eeb7535bf9a339fb9d5a8bfae17ac22 Mon Sep 17 00:00:00 2001
+From: Robert Marko <robert.marko@sartura.hr>
+Date: Mon, 14 Aug 2023 12:40:23 +0200
+Subject: [PATCH] dt-bindings: clock: qcom: ipq4019: add missing networking
+ resets
+
+Add bindings for the missing networking resets found in IPQ4019 GCC.
+
+Signed-off-by: Robert Marko <robert.marko@sartura.hr>
+Acked-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Link: https://lore.kernel.org/r/20230814104119.96858-1-robert.marko@sartura.hr
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+---
+ include/dt-bindings/clock/qcom,gcc-ipq4019.h | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/include/dt-bindings/clock/qcom,gcc-ipq4019.h
++++ b/include/dt-bindings/clock/qcom,gcc-ipq4019.h
+@@ -165,5 +165,11 @@
+ #define GCC_QDSS_BCR 69
+ #define GCC_MPM_BCR 70
+ #define GCC_SPDM_BCR 71
++#define ESS_MAC1_ARES 72
++#define ESS_MAC2_ARES 73
++#define ESS_MAC3_ARES 74
++#define ESS_MAC4_ARES 75
++#define ESS_MAC5_ARES 76
++#define ESS_PSGMII_ARES 77
+
+ #endif
--- /dev/null
+From 20014461691efc9e274c3870357152db7f091820 Mon Sep 17 00:00:00 2001
+From: Robert Marko <robert.marko@sartura.hr>
+Date: Mon, 14 Aug 2023 12:40:24 +0200
+Subject: [PATCH] clk: qcom: gcc-ipq4019: add missing networking resets
+
+IPQ4019 has more networking related resets that will be required for future
+wired networking support, so lets add them.
+
+Signed-off-by: Robert Marko <robert.marko@sartura.hr>
+Link: https://lore.kernel.org/r/20230814104119.96858-2-robert.marko@sartura.hr
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+---
+ drivers/clk/qcom/gcc-ipq4019.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/clk/qcom/gcc-ipq4019.c
++++ b/drivers/clk/qcom/gcc-ipq4019.c
+@@ -1707,6 +1707,12 @@ static const struct qcom_reset_map gcc_i
+ [GCC_TCSR_BCR] = {0x22000, 0},
+ [GCC_MPM_BCR] = {0x24000, 0},
+ [GCC_SPDM_BCR] = {0x25000, 0},
++ [ESS_MAC1_ARES] = {0x1200C, 0},
++ [ESS_MAC2_ARES] = {0x1200C, 1},
++ [ESS_MAC3_ARES] = {0x1200C, 2},
++ [ESS_MAC4_ARES] = {0x1200C, 3},
++ [ESS_MAC5_ARES] = {0x1200C, 4},
++ [ESS_PSGMII_ARES] = {0x1200C, 5},
+ };
+
+ static const struct regmap_config gcc_ipq4019_regmap_config = {
--- /dev/null
+From ff4aa3bc98258a240b9bbab53fd8d2fb8184c485 Mon Sep 17 00:00:00 2001
+From: Robert Marko <robimarko@gmail.com>
+Date: Wed, 16 Aug 2023 18:45:39 +0200
+Subject: [PATCH] firmware: qcom_scm: disable SDI if required
+
+IPQ5018 has SDI (Secure Debug Image) enabled by TZ by default, and that
+means that WDT being asserted or just trying to reboot will hang the board
+in the debug mode and only pulling the power and repowering will help.
+Some IPQ4019 boards like Google WiFI have it enabled as well.
+
+Luckily, SDI can be disabled via an SCM call.
+
+So, lets use the boolean DT property to identify boards that have SDI
+enabled by default and use the SCM call to disable SDI during SCM probe.
+It is important to disable it as soon as possible as we might have a WDT
+assertion at any time which would then leave the board in debug mode,
+thus disabling it during SCM removal is not enough.
+
+Signed-off-by: Robert Marko <robimarko@gmail.com>
+Reviewed-by: Guru Das Srinagesh <quic_gurus@quicinc.com>
+Link: https://lore.kernel.org/r/20230816164641.3371878-2-robimarko@gmail.com
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+---
+ drivers/firmware/qcom_scm.c | 30 ++++++++++++++++++++++++++++++
+ drivers/firmware/qcom_scm.h | 1 +
+ 2 files changed, 31 insertions(+)
+
+--- a/drivers/firmware/qcom_scm.c
++++ b/drivers/firmware/qcom_scm.c
+@@ -407,6 +407,29 @@ int qcom_scm_set_remote_state(u32 state,
+ }
+ EXPORT_SYMBOL(qcom_scm_set_remote_state);
+
++static int qcom_scm_disable_sdi(void)
++{
++ int ret;
++ struct qcom_scm_desc desc = {
++ .svc = QCOM_SCM_SVC_BOOT,
++ .cmd = QCOM_SCM_BOOT_SDI_CONFIG,
++ .args[0] = 1, /* Disable watchdog debug */
++ .args[1] = 0, /* Disable SDI */
++ .arginfo = QCOM_SCM_ARGS(2),
++ .owner = ARM_SMCCC_OWNER_SIP,
++ };
++ struct qcom_scm_res res;
++
++ ret = qcom_scm_clk_enable();
++ if (ret)
++ return ret;
++ ret = qcom_scm_call(__scm->dev, &desc, &res);
++
++ qcom_scm_clk_disable();
++
++ return ret ? : res.result[0];
++}
++
+ static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
+ {
+ struct qcom_scm_desc desc = {
+@@ -1411,6 +1434,13 @@ static int qcom_scm_probe(struct platfor
+
+ __get_convention();
+
++
++ /*
++ * Disable SDI if indicated by DT that it is enabled by default.
++ */
++ if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled"))
++ qcom_scm_disable_sdi();
++
+ /*
+ * If requested enable "download mode", from this point on warmboot
+ * will cause the boot stages to enter download mode, unless
+--- a/drivers/firmware/qcom_scm.h
++++ b/drivers/firmware/qcom_scm.h
+@@ -77,6 +77,7 @@ extern int scm_legacy_call(struct device
+ #define QCOM_SCM_SVC_BOOT 0x01
+ #define QCOM_SCM_BOOT_SET_ADDR 0x01
+ #define QCOM_SCM_BOOT_TERMINATE_PC 0x02
++#define QCOM_SCM_BOOT_SDI_CONFIG 0x09
+ #define QCOM_SCM_BOOT_SET_DLOAD_MODE 0x10
+ #define QCOM_SCM_BOOT_SET_ADDR_MC 0x11
+ #define QCOM_SCM_BOOT_SET_REMOTE_STATE 0x0a
--- /dev/null
+From ea9fba16d972becc84cd2a82d25030975dc609a5 Mon Sep 17 00:00:00 2001
+From: Robert Marko <robimarko@gmail.com>
+Date: Sat, 30 Sep 2023 13:09:27 +0200
+Subject: [PATCH] ARM: dts: qcom: ipq4019: add label to SCM
+
+Some IPQ4019 boards require SDI to be disabled by adding a property to the
+SCM node, so lets make that easy by adding a label to the SCM node.
+
+Signed-off-by: Robert Marko <robimarko@gmail.com>
+---
+ arch/arm/boot/dts/qcom-ipq4019.dtsi | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+@@ -155,7 +155,7 @@
+ };
+
+ firmware {
+- scm {
++ scm: scm {
+ compatible = "qcom,scm-ipq4019", "qcom,scm";
+ };
+ };
--- /dev/null
+From f2b87dc1028b710ec8ce25808b9d21f92b376184 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey@googlemail.com>
+Date: Sun, 11 Mar 2018 14:41:31 +0100
+Subject: [PATCH 2/2] clk: fix apss cpu overclocking
+
+There's an interaction issue between the clk changes:"
+clk: qcom: ipq4019: Add the apss cpu pll divider clock node
+clk: qcom: ipq4019: remove fixed clocks and add pll clocks
+" and the cpufreq-dt.
+
+cpufreq-dt is now spamming the kernel-log with the following:
+
+[ 1099.190658] cpu cpu0: dev_pm_opp_set_rate: failed to find current OPP
+for freq 761142857 (-34)
+
+This only happens on certain devices like the Compex WPJ428
+and AVM FritzBox!4040. However, other devices like the Asus
+RT-AC58U and Meraki MR33 work just fine.
+
+The issue stem from the fact that all higher CPU-Clocks
+are achieved by switching the clock-parent to the P_DDRPLLAPSS
+(ddrpllapss). Which is set by Qualcomm's proprietary bootcode
+as part of the DDR calibration.
+
+For example, the FB4040 uses 256 MiB Nanya NT5CC128M16IP clocked
+at round 533 MHz (ddrpllsdcc = 190285714 Hz).
+
+whereas the 128 MiB Nanya NT5CC64M16GP-DI in the ASUS RT-AC58U is
+clocked at a slightly higher 537 MHz ( ddrpllsdcc = 192000000 Hz).
+
+This patch attempts to fix the issue by modifying
+clk_cpu_div_round_rate(), clk_cpu_div_set_rate(), clk_cpu_div_recalc_rate()
+to use a new qcom_find_freq_close() function, which returns the closest
+matching frequency, instead of the next higher. This way, the SoC in
+the FB4040 (with its max clock speed of 710.4 MHz) will no longer
+try to overclock to 761 MHz.
+
+Fixes: d83dcacea18 ("clk: qcom: ipq4019: Add the apss cpu pll divider clock node")
+Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
+Signed-off-by: John Crispin <john@phrozen.org>
+---
+ drivers/clk/qcom/gcc-ipq4019.c | 34 +++++++++++++++++++++++++++++++---
+ 1 file changed, 31 insertions(+), 3 deletions(-)
+
+--- a/drivers/clk/qcom/gcc-ipq4019.c
++++ b/drivers/clk/qcom/gcc-ipq4019.c
+@@ -1243,6 +1243,29 @@ static const struct clk_fepll_vco gcc_fe
+ .reg = 0x2f020,
+ };
+
++
++const struct freq_tbl *qcom_find_freq_close(const struct freq_tbl *f,
++ unsigned long rate)
++{
++ const struct freq_tbl *last = NULL;
++
++ for ( ; f->freq; f++) {
++ if (rate == f->freq)
++ return f;
++
++ if (f->freq > rate) {
++ if (!last ||
++ (f->freq - rate) < (rate - last->freq))
++ return f;
++ else
++ return last;
++ }
++ last = f;
++ }
++
++ return last;
++}
++
+ /*
+ * Round rate function for APSS CPU PLL Clock divider.
+ * It looks up the frequency table and returns the next higher frequency
+@@ -1255,7 +1278,7 @@ static long clk_cpu_div_round_rate(struc
+ struct clk_hw *p_hw;
+ const struct freq_tbl *f;
+
+- f = qcom_find_freq(pll->freq_tbl, rate);
++ f = qcom_find_freq_close(pll->freq_tbl, rate);
+ if (!f)
+ return -EINVAL;
+
+@@ -1277,7 +1300,7 @@ static int clk_cpu_div_set_rate(struct c
+ const struct freq_tbl *f;
+ u32 mask;
+
+- f = qcom_find_freq(pll->freq_tbl, rate);
++ f = qcom_find_freq_close(pll->freq_tbl, rate);
+ if (!f)
+ return -EINVAL;
+
+@@ -1304,6 +1327,7 @@ static unsigned long
+ clk_cpu_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+ {
++ const struct freq_tbl *f;
+ struct clk_fepll *pll = to_clk_fepll(hw);
+ u32 cdiv, pre_div;
+ u64 rate;
+@@ -1324,7 +1348,11 @@ clk_cpu_div_recalc_rate(struct clk_hw *h
+ rate = clk_fepll_vco_calc_rate(pll, parent_rate) * 2;
+ do_div(rate, pre_div);
+
+- return rate;
++ f = qcom_find_freq_close(pll->freq_tbl, rate);
++ if (!f)
++ return rate;
++
++ return f->freq;
+ };
+
+ static const struct clk_ops clk_regmap_cpu_div_ops = {
--- /dev/null
+From 0843a61d6913bdac8889eb048ed89f7903059787 Mon Sep 17 00:00:00 2001
+From: Robert Marko <robimarko@gmail.com>
+Date: Fri, 30 Oct 2020 13:36:31 +0100
+Subject: [PATCH] arm: compressed: add appended DTB section
+
+This adds a appended_dtb section to the ARM decompressor
+linker script.
+
+This allows using the existing ARM zImage appended DTB support for
+appending a DTB to the raw ELF kernel.
+
+Its size is set to 1MB max to match the zImage appended DTB size limit.
+
+To use it to pass the DTB to the kernel, objcopy is used:
+
+objcopy --set-section-flags=.appended_dtb=alloc,contents \
+ --update-section=.appended_dtb=<target>.dtb vmlinux
+
+This is based off the following patch:
+https://github.com/openwrt/openwrt/commit/c063e27e02a9dcac0e7f5877fb154e58fa3e1a69
+
+Signed-off-by: Robert Marko <robimarko@gmail.com>
+---
+ arch/arm/boot/compressed/vmlinux.lds.S | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/boot/compressed/vmlinux.lds.S
++++ b/arch/arm/boot/compressed/vmlinux.lds.S
+@@ -103,6 +103,13 @@ SECTIONS
+
+ _edata = .;
+
++ .appended_dtb : {
++ /* leave space for appended DTB */
++ . += 0x100000;
++ }
++
++ _edata_dtb = .;
++
+ /*
+ * The image_end section appears after any additional loadable sections
+ * that the linker may decide to insert in the binary image. Having
+@@ -140,4 +147,4 @@ SECTIONS
+
+ ARM_ASSERTS
+ }
+-ASSERT(_edata_real == _edata, "error: zImage file size is incorrect");
++ASSERT(_edata_real == _edata_dtb, "error: zImage file size is incorrect");
--- /dev/null
+From 11d6a6128a5a07c429941afc202b6e62a19771be Mon Sep 17 00:00:00 2001
+From: John Thomson <git@johnthomson.fastmail.com.au>
+Date: Fri, 23 Oct 2020 19:42:36 +1000
+Subject: [PATCH 2/2] arm: compressed: set ipq40xx watchdog to allow boot
+
+For IPQ40XX systems where the SoC watchdog is activated before linux,
+the watchdog timer may be too small for linux to finish uncompress,
+boot, and watchdog management start.
+If the watchdog is enabled, set the timeout for it to 30 seconds.
+The functionality and offsets were copied from:
+drivers/watchdog/qcom-wdt.c qcom_wdt_set_timeout & qcom_wdt_start
+The watchdog memory address was taken from:
+arch/arm/boot/dts/qcom-ipq4019.dtsi
+
+This was required on Mikrotik IPQ40XX consumer hardware using Mikrotik's
+RouterBoot bootloader.
+
+Signed-off-by: John Thomson <git@johnthomson.fastmail.com.au>
+---
+ arch/arm/boot/compressed/head.S | 35 +++++++++++++++++++++++++++++++++
+ 1 file changed, 35 insertions(+)
+
+--- a/arch/arm/boot/compressed/head.S
++++ b/arch/arm/boot/compressed/head.S
+@@ -620,6 +620,41 @@ not_relocated: mov r0, #0
+ bic r4, r4, #1
+ blne cache_on
+
++/* Set the Qualcom IPQ40xx watchdog timeout to 30 seconds
++ * if it is enabled, so that there is time for kernel
++ * to decompress, boot, and take over the watchdog.
++ * data and functionality from drivers/watchdog/qcom-wdt.c
++ * address from arch/arm/boot/dts/qcom-ipq4019.dtsi
++ */
++#ifdef CONFIG_ARCH_IPQ40XX
++watchdog_set:
++ /* offsets:
++ * 0x04 reset (=1 resets countdown)
++ * 0x08 enable (=0 disables)
++ * 0x0c status (=1 when SoC was reset by watchdog)
++ * 0x10 bark (=timeout warning in ticks)
++ * 0x14 bite (=timeout reset in ticks)
++ * clock rate is 1<<15 hertz
++ */
++ .equ watchdog, 0x0b017000 @Store watchdog base address
++ movw r0, #:lower16:watchdog
++ movt r0, #:upper16:watchdog
++ ldr r1, [r0, #0x08] @Get enabled?
++ cmp r1, #1 @If not enabled, do not change
++ bne watchdog_finished
++ mov r1, #0
++ str r1, [r0, #0x08] @Disable the watchdog
++ mov r1, #1
++ str r1, [r0, #0x04] @Pet the watchdog
++ mov r1, #30 @30 seconds timeout
++ lsl r1, r1, #15 @converted to ticks
++ str r1, [r0, #0x10] @Set the bark timeout
++ str r1, [r0, #0x14] @Set the bite timeout
++ mov r1, #1
++ str r1, [r0, #0x08] @Enable the watchdog
++watchdog_finished:
++#endif /* CONFIG_ARCH_IPQ40XX */
++
+ /*
+ * The C runtime environment should now be setup sufficiently.
+ * Set up some pointers, and start decompressing.
--- /dev/null
+From f63ea127643a605da97090ce585fdd7c2d17fa42 Mon Sep 17 00:00:00 2001
+From: Robert Marko <robert.marko@sartura.hr>
+Date: Mon, 14 Dec 2020 13:35:35 +0100
+Subject: [PATCH] mmc: sdhci-msm: use sdhci_set_clock
+
+When using sdhci_msm_set_clock clock setting will fail, so lets
+use the generic sdhci_set_clock.
+
+Signed-off-by: Robert Marko <robert.marko@sartura.hr>
+---
+ drivers/mmc/host/sdhci-msm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/mmc/host/sdhci-msm.c
++++ b/drivers/mmc/host/sdhci-msm.c
+@@ -2451,7 +2451,7 @@ MODULE_DEVICE_TABLE(of, sdhci_msm_dt_mat
+
+ static const struct sdhci_ops sdhci_msm_ops = {
+ .reset = sdhci_msm_reset,
+- .set_clock = sdhci_msm_set_clock,
++ .set_clock = sdhci_set_clock,
+ .get_min_clock = sdhci_msm_get_min_clock,
+ .get_max_clock = sdhci_msm_get_max_clock,
+ .set_bus_width = sdhci_set_bus_width,
--- /dev/null
+From 28edd829133766eb3cefaf2e49d3ee701968061b Mon Sep 17 00:00:00 2001
+From: Christian Marangi <ansuelsmth@gmail.com>
+Date: Tue, 9 May 2023 01:57:17 +0200
+Subject: [PATCH] mmc: sdhci-msm: comment unused sdhci_msm_set_clock
+
+comment unused sdhci_msm_set_clock and __sdhci_msm_set_clock as due to some
+current problem, we are forced to use sdhci_set_clock.
+
+Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
+---
+ drivers/mmc/host/sdhci-msm.c | 86 ++++++++++++++++++------------------
+ 1 file changed, 43 insertions(+), 43 deletions(-)
+
+--- a/drivers/mmc/host/sdhci-msm.c
++++ b/drivers/mmc/host/sdhci-msm.c
+@@ -1751,49 +1751,49 @@ static unsigned int sdhci_msm_get_min_cl
+ return SDHCI_MSM_MIN_CLOCK;
+ }
+
+-/*
+- * __sdhci_msm_set_clock - sdhci_msm clock control.
+- *
+- * Description:
+- * MSM controller does not use internal divider and
+- * instead directly control the GCC clock as per
+- * HW recommendation.
+- **/
+-static void __sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
+-{
+- u16 clk;
+-
+- sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+-
+- if (clock == 0)
+- return;
+-
+- /*
+- * MSM controller do not use clock divider.
+- * Thus read SDHCI_CLOCK_CONTROL and only enable
+- * clock with no divider value programmed.
+- */
+- clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+- sdhci_enable_clk(host, clk);
+-}
+-
+-/* sdhci_msm_set_clock - Called with (host->lock) spinlock held. */
+-static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
+-{
+- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+- struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+-
+- if (!clock) {
+- host->mmc->actual_clock = msm_host->clk_rate = 0;
+- goto out;
+- }
+-
+- sdhci_msm_hc_select_mode(host);
+-
+- msm_set_clock_rate_for_bus_mode(host, clock);
+-out:
+- __sdhci_msm_set_clock(host, clock);
+-}
++// /*
++// * __sdhci_msm_set_clock - sdhci_msm clock control.
++// *
++// * Description:
++// * MSM controller does not use internal divider and
++// * instead directly control the GCC clock as per
++// * HW recommendation.
++// **/
++// static void __sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
++// {
++// u16 clk;
++
++// sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
++
++// if (clock == 0)
++// return;
++
++// /*
++// * MSM controller do not use clock divider.
++// * Thus read SDHCI_CLOCK_CONTROL and only enable
++// * clock with no divider value programmed.
++// */
++// clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
++// sdhci_enable_clk(host, clk);
++// }
++
++// /* sdhci_msm_set_clock - Called with (host->lock) spinlock held. */
++// static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
++// {
++// struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
++// struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
++
++// if (!clock) {
++// host->mmc->actual_clock = msm_host->clk_rate = 0;
++// goto out;
++// }
++
++// sdhci_msm_hc_select_mode(host);
++
++// msm_set_clock_rate_for_bus_mode(host, clock);
++// out:
++// __sdhci_msm_set_clock(host, clock);
++// }
+
+ /*****************************************************************************\
+ * *
--- /dev/null
+From aaa675f07e781e248fcf169ce9a917b48bc2cc9b Mon Sep 17 00:00:00 2001
+From: Brian Norris <computersforpeace@gmail.com>
+Date: Fri, 28 Jul 2023 12:06:23 +0200
+Subject: [PATCH 3/3] firmware: qcom: scm: fix SCM cold boot address
+
+This effectively reverts upstream Linux commit 13e77747800e ("firmware:
+qcom: scm: Use atomic SCM for cold boot"), because Google WiFi boot
+firmwares don't support the atomic variant.
+
+This fixes SMP support for Google WiFi.
+
+Signed-off-by: Brian Norris <computersforpeace@gmail.com>
+---
+ drivers/firmware/qcom_scm-legacy.c | 62 +++++++++++++++++++++++++-----
+ drivers/firmware/qcom_scm.c | 11 ++++++
+ 2 files changed, 63 insertions(+), 10 deletions(-)
+
+--- a/drivers/firmware/qcom_scm-legacy.c
++++ b/drivers/firmware/qcom_scm-legacy.c
+@@ -13,6 +13,9 @@
+ #include <linux/arm-smccc.h>
+ #include <linux/dma-mapping.h>
+
++#include <asm/cacheflush.h>
++#include <asm/outercache.h>
++
+ #include "qcom_scm.h"
+
+ static DEFINE_MUTEX(qcom_scm_lock);
+@@ -117,6 +120,25 @@ static void __scm_legacy_do(const struct
+ } while (res->a0 == QCOM_SCM_INTERRUPTED);
+ }
+
++static void qcom_scm_inv_range(unsigned long start, unsigned long end)
++{
++ u32 cacheline_size, ctr;
++
++ asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
++ cacheline_size = 4 << ((ctr >> 16) & 0xf);
++
++ start = round_down(start, cacheline_size);
++ end = round_up(end, cacheline_size);
++ outer_inv_range(start, end);
++ while (start < end) {
++ asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start)
++ : "memory");
++ start += cacheline_size;
++ }
++ dsb();
++ isb();
++}
++
+ /**
+ * scm_legacy_call() - Sends a command to the SCM and waits for the command to
+ * finish processing.
+@@ -163,10 +185,16 @@ int scm_legacy_call(struct device *dev,
+
+ rsp = scm_legacy_command_to_response(cmd);
+
+- cmd_phys = dma_map_single(dev, cmd, alloc_len, DMA_TO_DEVICE);
+- if (dma_mapping_error(dev, cmd_phys)) {
+- kfree(cmd);
+- return -ENOMEM;
++ if (dev) {
++ cmd_phys = dma_map_single(dev, cmd, alloc_len, DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, cmd_phys)) {
++ kfree(cmd);
++ return -ENOMEM;
++ }
++ } else {
++ cmd_phys = virt_to_phys(cmd);
++ __cpuc_flush_dcache_area(cmd, alloc_len);
++ outer_flush_range(cmd_phys, cmd_phys + alloc_len);
+ }
+
+ smc.args[0] = 1;
+@@ -182,13 +210,26 @@ int scm_legacy_call(struct device *dev,
+ goto out;
+
+ do {
+- dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len,
+- sizeof(*rsp), DMA_FROM_DEVICE);
++ if (dev) {
++ dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) +
++ cmd_len, sizeof(*rsp),
++ DMA_FROM_DEVICE);
++ } else {
++ unsigned long start = (uintptr_t)cmd + sizeof(*cmd) +
++ cmd_len;
++ qcom_scm_inv_range(start, start + sizeof(*rsp));
++ }
+ } while (!rsp->is_complete);
+
+- dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len +
+- le32_to_cpu(rsp->buf_offset),
+- resp_len, DMA_FROM_DEVICE);
++ if (dev) {
++ dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len +
++ le32_to_cpu(rsp->buf_offset),
++ resp_len, DMA_FROM_DEVICE);
++ } else {
++ unsigned long start = (uintptr_t)cmd + sizeof(*cmd) + cmd_len +
++ le32_to_cpu(rsp->buf_offset);
++ qcom_scm_inv_range(start, start + resp_len);
++ }
+
+ if (res) {
+ res_buf = scm_legacy_get_response_buffer(rsp);
+@@ -196,7 +237,8 @@ int scm_legacy_call(struct device *dev,
+ res->result[i] = le32_to_cpu(res_buf[i]);
+ }
+ out:
+- dma_unmap_single(dev, cmd_phys, alloc_len, DMA_TO_DEVICE);
++ if (dev)
++ dma_unmap_single(dev, cmd_phys, alloc_len, DMA_TO_DEVICE);
+ kfree(cmd);
+ return ret;
+ }
+--- a/drivers/firmware/qcom_scm.c
++++ b/drivers/firmware/qcom_scm.c
+@@ -312,6 +312,17 @@ static int qcom_scm_set_boot_addr(void *
+ desc.args[0] = flags;
+ desc.args[1] = virt_to_phys(entry);
+
++ /*
++ * Factory firmware doesn't support the atomic variant. Non-atomic SCMs
++ * require ugly DMA invalidation support that was dropped upstream a
++ * while ago. For more info, see:
++ *
++ * [RFC] qcom_scm: IPQ4019 firmware does not support atomic API?
++ * https://lore.kernel.org/linux-arm-msm/20200913201608.GA3162100@bDebian/
++ */
++ if (of_machine_is_compatible("google,wifi"))
++ return qcom_scm_call(__scm ? __scm->dev : NULL, &desc, NULL);
++
+ return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
+ }
+
--- /dev/null
+From 35ca7e3e6ccd120d694a3425f37fc6374ad2e11e Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Andreas=20B=C3=B6hler?= <dev@aboehler.at>
+Date: Wed, 20 Apr 2022 12:08:38 +0200
+Subject: [PATCH] mtd: rawnand: add support for Toshiba TC58NVG0S3HTA00
+ NAND flash
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The Toshiba TC58NVG0S3HTA00 is detected with 64 byte OOB while the flash
+has 128 bytes OOB. This adds a static NAND ID entry to correct this.
+
+Tested on FRITZ!Box 7530 flashed with OpenWrt.
+
+Signed-off-by: Andreas Böhler <dev@aboehler.at>
+(changed id_len to 8, added comment about possible counterfeits)
+---
+--- a/drivers/mtd/nand/raw/nand_ids.c
++++ b/drivers/mtd/nand/raw/nand_ids.c
+@@ -29,6 +29,9 @@ struct nand_flash_dev nand_flash_ids[] =
+ {"TC58NVG0S3E 1G 3.3V 8-bit",
+ { .id = {0x98, 0xd1, 0x90, 0x15, 0x76, 0x14, 0x01, 0x00} },
+ SZ_2K, SZ_128, SZ_128K, 0, 8, 64, NAND_ECC_INFO(1, SZ_512), },
++ {"TC58NVG0S3HTA00 1G 3.3V 8-bit", /* possibly counterfeit chip - see commit */
++ { .id = {0x98, 0xf1, 0x80, 0x15} }, /* should be more bytes */
++ SZ_2K, SZ_128, SZ_128K, 0, 8, 128, NAND_ECC_INFO(8, SZ_512), },
+ {"TC58NVG2S0F 4G 3.3V 8-bit",
+ { .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x15, 0x01, 0x08} },
+ SZ_4K, SZ_512, SZ_256K, 0, 8, 224, NAND_ECC_INFO(4, SZ_512) },
--- /dev/null
+From 76e25c1f46456416ba5358be8a0677f1ab8196b6 Mon Sep 17 00:00:00 2001
+From: Maxime Chevallier <maxime.chevallier@bootlin.com>
+Date: Fri, 4 Nov 2022 18:41:48 +0100
+Subject: [PATCH] net: ipqess: introduce the Qualcomm IPQESS driver
+
+The Qualcomm IPQESS controller is a simple 1G Ethernet controller found
+on the IPQ4019 chip. This controller has some specificities, in that the
+IPQ4019 platform that includes that controller also has an internal
+switch, based on the QCA8K IP.
+
+It is connected to that switch through an internal link, and doesn't
+expose directly any external interface, hence it only supports the
+PHY_INTERFACE_MODE_INTERNAL for now.
+
+It has 16 RX and TX queues, with a very basic RSS fanout configured at
+init time.
+
+Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
+---
+ MAINTAINERS | 7 +
+ drivers/net/ethernet/qualcomm/Kconfig | 11 +
+ drivers/net/ethernet/qualcomm/Makefile | 2 +
+ drivers/net/ethernet/qualcomm/ipqess/Makefile | 8 +
+ drivers/net/ethernet/qualcomm/ipqess/ipqess.c | 1246 +++++++++++++++++
+ drivers/net/ethernet/qualcomm/ipqess/ipqess.h | 518 +++++++
+ .../ethernet/qualcomm/ipqess/ipqess_ethtool.c | 164 +++
+ 7 files changed, 1956 insertions(+)
+ create mode 100644 drivers/net/ethernet/qualcomm/ipqess/Makefile
+ create mode 100644 drivers/net/ethernet/qualcomm/ipqess/ipqess.c
+ create mode 100644 drivers/net/ethernet/qualcomm/ipqess/ipqess.h
+ create mode 100644 drivers/net/ethernet/qualcomm/ipqess/ipqess_ethtool.c
+
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -17075,6 +17075,13 @@ L: netdev@vger.kernel.org
+ S: Maintained
+ F: drivers/net/ethernet/qualcomm/emac/
+
++QUALCOMM IPQESS ETHERNET DRIVER
++M: Maxime Chevallier <maxime.chevallier@bootlin.com>
++L: netdev@vger.kernel.org
++S: Maintained
++F: Documentation/devicetree/bindings/net/qcom,ipq4019-ess-edma.yaml
++F: drivers/net/ethernet/qualcomm/ipqess/
++
+ QUALCOMM ETHQOS ETHERNET DRIVER
+ M: Vinod Koul <vkoul@kernel.org>
+ R: Bhupesh Sharma <bhupesh.sharma@linaro.org>
+--- a/drivers/net/ethernet/qualcomm/Kconfig
++++ b/drivers/net/ethernet/qualcomm/Kconfig
+@@ -60,6 +60,17 @@ config QCOM_EMAC
+ low power, Receive-Side Scaling (RSS), and IEEE 1588-2008
+ Precision Clock Synchronization Protocol.
+
++config QCOM_IPQ4019_ESS_EDMA
++ tristate "Qualcomm Atheros IPQ4019 ESS EDMA support"
++ depends on (OF && ARCH_QCOM) || COMPILE_TEST
++ select PHYLINK
++ help
++ This driver supports the Qualcomm Atheros IPQ40xx built-in
++ ESS EDMA ethernet controller.
++
++ To compile this driver as a module, choose M here: the
++ module will be called ipqess.
++
+ source "drivers/net/ethernet/qualcomm/rmnet/Kconfig"
+
+ endif # NET_VENDOR_QUALCOMM
+--- a/drivers/net/ethernet/qualcomm/Makefile
++++ b/drivers/net/ethernet/qualcomm/Makefile
+@@ -11,4 +11,6 @@ qcauart-objs := qca_uart.o
+
+ obj-y += emac/
+
++obj-$(CONFIG_QCOM_IPQ4019_ESS_EDMA) += ipqess/
++
+ obj-$(CONFIG_RMNET) += rmnet/
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ipqess/Makefile
+@@ -0,0 +1,8 @@
++# SPDX-License-Identifier: GPL-2.0-only
++#
++# Makefile for the IPQ ESS driver
++#
++
++obj-$(CONFIG_QCOM_IPQ4019_ESS_EDMA) += ipq_ess.o
++
++ipq_ess-objs := ipqess.o ipqess_ethtool.o
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ipqess/ipqess.c
+@@ -0,0 +1,1246 @@
++// SPDX-License-Identifier: GPL-2.0 OR ISC
++/* Copyright (c) 2014 - 2017, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2017 - 2018, John Crispin <john@phrozen.org>
++ * Copyright (c) 2018 - 2019, Christian Lamparter <chunkeey@gmail.com>
++ * Copyright (c) 2020 - 2021, Gabor Juhos <j4g8y7@gmail.com>
++ * Copyright (c) 2021 - 2022, Maxime Chevallier <maxime.chevallier@bootlin.com>
++ *
++ */
++
++#include <linux/bitfield.h>
++#include <linux/clk.h>
++#include <linux/if_vlan.h>
++#include <linux/interrupt.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/of_device.h>
++#include <linux/of_mdio.h>
++#include <linux/of_net.h>
++#include <linux/phylink.h>
++#include <linux/platform_device.h>
++#include <linux/reset.h>
++#include <linux/skbuff.h>
++#include <linux/vmalloc.h>
++#include <net/checksum.h>
++#include <net/ip6_checksum.h>
++
++#include "ipqess.h"
++
++#define IPQESS_RRD_SIZE 16
++#define IPQESS_NEXT_IDX(X, Y) (((X) + 1) & ((Y) - 1))
++#define IPQESS_TX_DMA_BUF_LEN 0x3fff
++
++static void ipqess_w32(struct ipqess *ess, u32 reg, u32 val)
++{
++ writel(val, ess->hw_addr + reg);
++}
++
++static u32 ipqess_r32(struct ipqess *ess, u16 reg)
++{
++ return readl(ess->hw_addr + reg);
++}
++
++static void ipqess_m32(struct ipqess *ess, u32 mask, u32 val, u16 reg)
++{
++ u32 _val = ipqess_r32(ess, reg);
++
++ _val &= ~mask;
++ _val |= val;
++
++ ipqess_w32(ess, reg, _val);
++}
++
++void ipqess_update_hw_stats(struct ipqess *ess)
++{
++ u32 *p;
++ u32 stat;
++ int i;
++
++ lockdep_assert_held(&ess->stats_lock);
++
++ p = (u32 *)&ess->ipqess_stats;
++ for (i = 0; i < IPQESS_MAX_TX_QUEUE; i++) {
++ stat = ipqess_r32(ess, IPQESS_REG_TX_STAT_PKT_Q(i));
++ *p += stat;
++ p++;
++ }
++
++ for (i = 0; i < IPQESS_MAX_TX_QUEUE; i++) {
++ stat = ipqess_r32(ess, IPQESS_REG_TX_STAT_BYTE_Q(i));
++ *p += stat;
++ p++;
++ }
++
++ for (i = 0; i < IPQESS_MAX_RX_QUEUE; i++) {
++ stat = ipqess_r32(ess, IPQESS_REG_RX_STAT_PKT_Q(i));
++ *p += stat;
++ p++;
++ }
++
++ for (i = 0; i < IPQESS_MAX_RX_QUEUE; i++) {
++ stat = ipqess_r32(ess, IPQESS_REG_RX_STAT_BYTE_Q(i));
++ *p += stat;
++ p++;
++ }
++}
++
++static int ipqess_tx_ring_alloc(struct ipqess *ess)
++{
++ struct device *dev = &ess->pdev->dev;
++ int i;
++
++ for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
++ struct ipqess_tx_ring *tx_ring = &ess->tx_ring[i];
++ size_t size;
++ u32 idx;
++
++ tx_ring->ess = ess;
++ tx_ring->ring_id = i;
++ tx_ring->idx = i * 4;
++ tx_ring->count = IPQESS_TX_RING_SIZE;
++ tx_ring->nq = netdev_get_tx_queue(ess->netdev, i);
++
++ size = sizeof(struct ipqess_buf) * IPQESS_TX_RING_SIZE;
++ tx_ring->buf = devm_kzalloc(dev, size, GFP_KERNEL);
++ if (!tx_ring->buf)
++ return -ENOMEM;
++
++ size = sizeof(struct ipqess_tx_desc) * IPQESS_TX_RING_SIZE;
++ tx_ring->hw_desc = dmam_alloc_coherent(dev, size, &tx_ring->dma,
++ GFP_KERNEL);
++ if (!tx_ring->hw_desc)
++ return -ENOMEM;
++
++ ipqess_w32(ess, IPQESS_REG_TPD_BASE_ADDR_Q(tx_ring->idx),
++ (u32)tx_ring->dma);
++
++ idx = ipqess_r32(ess, IPQESS_REG_TPD_IDX_Q(tx_ring->idx));
++ idx >>= IPQESS_TPD_CONS_IDX_SHIFT; /* need u32 here */
++ idx &= 0xffff;
++ tx_ring->head = idx;
++ tx_ring->tail = idx;
++
++ ipqess_m32(ess, IPQESS_TPD_PROD_IDX_MASK << IPQESS_TPD_PROD_IDX_SHIFT,
++ idx, IPQESS_REG_TPD_IDX_Q(tx_ring->idx));
++ ipqess_w32(ess, IPQESS_REG_TX_SW_CONS_IDX_Q(tx_ring->idx), idx);
++ ipqess_w32(ess, IPQESS_REG_TPD_RING_SIZE, IPQESS_TX_RING_SIZE);
++ }
++
++ return 0;
++}
++
++static int ipqess_tx_unmap_and_free(struct device *dev, struct ipqess_buf *buf)
++{
++ int len = 0;
++
++ if (buf->flags & IPQESS_DESC_SINGLE)
++ dma_unmap_single(dev, buf->dma, buf->length, DMA_TO_DEVICE);
++ else if (buf->flags & IPQESS_DESC_PAGE)
++ dma_unmap_page(dev, buf->dma, buf->length, DMA_TO_DEVICE);
++
++ if (buf->flags & IPQESS_DESC_LAST) {
++ len = buf->skb->len;
++ dev_kfree_skb_any(buf->skb);
++ }
++
++ buf->flags = 0;
++
++ return len;
++}
++
++static void ipqess_tx_ring_free(struct ipqess *ess)
++{
++ int i;
++
++ for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
++ int j;
++
++ if (ess->tx_ring[i].hw_desc)
++ continue;
++
++ for (j = 0; j < IPQESS_TX_RING_SIZE; j++) {
++ struct ipqess_buf *buf = &ess->tx_ring[i].buf[j];
++
++ ipqess_tx_unmap_and_free(&ess->pdev->dev, buf);
++ }
++
++ ess->tx_ring[i].buf = NULL;
++ }
++}
++
++static int ipqess_rx_buf_prepare(struct ipqess_buf *buf,
++ struct ipqess_rx_ring *rx_ring)
++{
++ memset(buf->skb->data, 0, sizeof(struct ipqess_rx_desc));
++
++ buf->dma = dma_map_single(rx_ring->ppdev, buf->skb->data,
++ IPQESS_RX_HEAD_BUFF_SIZE, DMA_FROM_DEVICE);
++ if (dma_mapping_error(rx_ring->ppdev, buf->dma)) {
++ dev_kfree_skb_any(buf->skb);
++ buf->skb = NULL;
++ return -EFAULT;
++ }
++
++ buf->length = IPQESS_RX_HEAD_BUFF_SIZE;
++ rx_ring->hw_desc[rx_ring->head] = (struct ipqess_rx_desc *)buf->dma;
++ rx_ring->head = (rx_ring->head + 1) % IPQESS_RX_RING_SIZE;
++
++ ipqess_m32(rx_ring->ess, IPQESS_RFD_PROD_IDX_BITS,
++ (rx_ring->head + IPQESS_RX_RING_SIZE - 1) % IPQESS_RX_RING_SIZE,
++ IPQESS_REG_RFD_IDX_Q(rx_ring->idx));
++
++ return 0;
++}
++
++/* locking is handled by the caller */
++static int ipqess_rx_buf_alloc_napi(struct ipqess_rx_ring *rx_ring)
++{
++ struct ipqess_buf *buf = &rx_ring->buf[rx_ring->head];
++
++ buf->skb = napi_alloc_skb(&rx_ring->napi_rx, IPQESS_RX_HEAD_BUFF_SIZE);
++ if (!buf->skb)
++ return -ENOMEM;
++
++ return ipqess_rx_buf_prepare(buf, rx_ring);
++}
++
++static int ipqess_rx_buf_alloc(struct ipqess_rx_ring *rx_ring)
++{
++ struct ipqess_buf *buf = &rx_ring->buf[rx_ring->head];
++
++ buf->skb = netdev_alloc_skb_ip_align(rx_ring->ess->netdev,
++ IPQESS_RX_HEAD_BUFF_SIZE);
++
++ if (!buf->skb)
++ return -ENOMEM;
++
++ return ipqess_rx_buf_prepare(buf, rx_ring);
++}
++
++static void ipqess_refill_work(struct work_struct *work)
++{
++ struct ipqess_rx_ring_refill *rx_refill = container_of(work,
++ struct ipqess_rx_ring_refill, refill_work);
++ struct ipqess_rx_ring *rx_ring = rx_refill->rx_ring;
++ int refill = 0;
++
++ /* don't let this loop by accident. */
++ while (atomic_dec_and_test(&rx_ring->refill_count)) {
++ napi_disable(&rx_ring->napi_rx);
++ if (ipqess_rx_buf_alloc(rx_ring)) {
++ refill++;
++ dev_dbg(rx_ring->ppdev,
++ "Not all buffers were reallocated");
++ }
++ napi_enable(&rx_ring->napi_rx);
++ }
++
++ if (atomic_add_return(refill, &rx_ring->refill_count))
++ schedule_work(&rx_refill->refill_work);
++}
++
++static int ipqess_rx_ring_alloc(struct ipqess *ess)
++{
++ int i;
++
++ for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
++ int j;
++
++ ess->rx_ring[i].ess = ess;
++ ess->rx_ring[i].ppdev = &ess->pdev->dev;
++ ess->rx_ring[i].ring_id = i;
++ ess->rx_ring[i].idx = i * 2;
++
++ ess->rx_ring[i].buf = devm_kzalloc(&ess->pdev->dev,
++ sizeof(struct ipqess_buf) * IPQESS_RX_RING_SIZE,
++ GFP_KERNEL);
++
++ if (!ess->rx_ring[i].buf)
++ return -ENOMEM;
++
++ ess->rx_ring[i].hw_desc =
++ dmam_alloc_coherent(&ess->pdev->dev,
++ sizeof(struct ipqess_rx_desc) * IPQESS_RX_RING_SIZE,
++ &ess->rx_ring[i].dma, GFP_KERNEL);
++
++ if (!ess->rx_ring[i].hw_desc)
++ return -ENOMEM;
++
++ for (j = 0; j < IPQESS_RX_RING_SIZE; j++)
++ if (ipqess_rx_buf_alloc(&ess->rx_ring[i]) < 0)
++ return -ENOMEM;
++
++ ess->rx_refill[i].rx_ring = &ess->rx_ring[i];
++ INIT_WORK(&ess->rx_refill[i].refill_work, ipqess_refill_work);
++
++ ipqess_w32(ess, IPQESS_REG_RFD_BASE_ADDR_Q(ess->rx_ring[i].idx),
++ (u32)(ess->rx_ring[i].dma));
++ }
++
++ ipqess_w32(ess, IPQESS_REG_RX_DESC0,
++ (IPQESS_RX_HEAD_BUFF_SIZE << IPQESS_RX_BUF_SIZE_SHIFT) |
++ (IPQESS_RX_RING_SIZE << IPQESS_RFD_RING_SIZE_SHIFT));
++
++ return 0;
++}
++
++static void ipqess_rx_ring_free(struct ipqess *ess)
++{
++ int i;
++
++ for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
++ int j;
++
++ cancel_work_sync(&ess->rx_refill[i].refill_work);
++ atomic_set(&ess->rx_ring[i].refill_count, 0);
++
++ for (j = 0; j < IPQESS_RX_RING_SIZE; j++) {
++ dma_unmap_single(&ess->pdev->dev,
++ ess->rx_ring[i].buf[j].dma,
++ ess->rx_ring[i].buf[j].length,
++ DMA_FROM_DEVICE);
++ dev_kfree_skb_any(ess->rx_ring[i].buf[j].skb);
++ }
++ }
++}
++
++static struct net_device_stats *ipqess_get_stats(struct net_device *netdev)
++{
++ struct ipqess *ess = netdev_priv(netdev);
++
++ spin_lock(&ess->stats_lock);
++ ipqess_update_hw_stats(ess);
++ spin_unlock(&ess->stats_lock);
++
++ return &ess->stats;
++}
++
++static int ipqess_rx_poll(struct ipqess_rx_ring *rx_ring, int budget)
++{
++ u32 length = 0, num_desc, tail, rx_ring_tail;
++ int done = 0;
++
++ rx_ring_tail = rx_ring->tail;
++
++ tail = ipqess_r32(rx_ring->ess, IPQESS_REG_RFD_IDX_Q(rx_ring->idx));
++ tail >>= IPQESS_RFD_CONS_IDX_SHIFT;
++ tail &= IPQESS_RFD_CONS_IDX_MASK;
++
++ while (done < budget) {
++ struct ipqess_rx_desc *rd;
++ struct sk_buff *skb;
++
++ if (rx_ring_tail == tail)
++ break;
++
++ dma_unmap_single(rx_ring->ppdev,
++ rx_ring->buf[rx_ring_tail].dma,
++ rx_ring->buf[rx_ring_tail].length,
++ DMA_FROM_DEVICE);
++
++ skb = xchg(&rx_ring->buf[rx_ring_tail].skb, NULL);
++ rd = (struct ipqess_rx_desc *)skb->data;
++ rx_ring_tail = IPQESS_NEXT_IDX(rx_ring_tail, IPQESS_RX_RING_SIZE);
++
++ /* Check if RRD is valid */
++ if (!(rd->rrd7 & cpu_to_le16(IPQESS_RRD_DESC_VALID))) {
++ num_desc = 1;
++ dev_kfree_skb_any(skb);
++ goto skip;
++ }
++
++ num_desc = le16_to_cpu(rd->rrd1) & IPQESS_RRD_NUM_RFD_MASK;
++ length = le16_to_cpu(rd->rrd6) & IPQESS_RRD_PKT_SIZE_MASK;
++
++ skb_reserve(skb, IPQESS_RRD_SIZE);
++ if (num_desc > 1) {
++ struct sk_buff *skb_prev = NULL;
++ int size_remaining;
++ int i;
++
++ skb->data_len = 0;
++ skb->tail += (IPQESS_RX_HEAD_BUFF_SIZE - IPQESS_RRD_SIZE);
++ skb->len = length;
++ skb->truesize = length;
++ size_remaining = length - (IPQESS_RX_HEAD_BUFF_SIZE - IPQESS_RRD_SIZE);
++
++ for (i = 1; i < num_desc; i++) {
++ struct sk_buff *skb_temp = rx_ring->buf[rx_ring_tail].skb;
++
++ dma_unmap_single(rx_ring->ppdev,
++ rx_ring->buf[rx_ring_tail].dma,
++ rx_ring->buf[rx_ring_tail].length,
++ DMA_FROM_DEVICE);
++
++ skb_put(skb_temp, min(size_remaining, IPQESS_RX_HEAD_BUFF_SIZE));
++ if (skb_prev)
++ skb_prev->next = rx_ring->buf[rx_ring_tail].skb;
++ else
++ skb_shinfo(skb)->frag_list = rx_ring->buf[rx_ring_tail].skb;
++ skb_prev = rx_ring->buf[rx_ring_tail].skb;
++ rx_ring->buf[rx_ring_tail].skb->next = NULL;
++
++ skb->data_len += rx_ring->buf[rx_ring_tail].skb->len;
++ size_remaining -= rx_ring->buf[rx_ring_tail].skb->len;
++
++ rx_ring_tail = IPQESS_NEXT_IDX(rx_ring_tail, IPQESS_RX_RING_SIZE);
++ }
++
++ } else {
++ skb_put(skb, length);
++ }
++
++ skb->dev = rx_ring->ess->netdev;
++ skb->protocol = eth_type_trans(skb, rx_ring->ess->netdev);
++ skb_record_rx_queue(skb, rx_ring->ring_id);
++
++ if (rd->rrd6 & cpu_to_le16(IPQESS_RRD_CSUM_FAIL_MASK))
++ skb_checksum_none_assert(skb);
++ else
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++
++ if (rd->rrd7 & cpu_to_le16(IPQESS_RRD_CVLAN))
++ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
++ le16_to_cpu(rd->rrd4));
++ else if (rd->rrd1 & cpu_to_le16(IPQESS_RRD_SVLAN))
++ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
++ le16_to_cpu(rd->rrd4));
++
++ napi_gro_receive(&rx_ring->napi_rx, skb);
++
++ rx_ring->ess->stats.rx_packets++;
++ rx_ring->ess->stats.rx_bytes += length;
++
++ done++;
++skip:
++
++ num_desc += atomic_xchg(&rx_ring->refill_count, 0);
++ while (num_desc) {
++ if (ipqess_rx_buf_alloc_napi(rx_ring)) {
++ num_desc = atomic_add_return(num_desc,
++ &rx_ring->refill_count);
++ if (num_desc >= DIV_ROUND_UP(IPQESS_RX_RING_SIZE * 4, 7))
++ schedule_work(&rx_ring->ess->rx_refill[rx_ring->ring_id].refill_work);
++ break;
++ }
++ num_desc--;
++ }
++ }
++
++ ipqess_w32(rx_ring->ess, IPQESS_REG_RX_SW_CONS_IDX_Q(rx_ring->idx),
++ rx_ring_tail);
++ rx_ring->tail = rx_ring_tail;
++
++ return done;
++}
++
++static int ipqess_tx_complete(struct ipqess_tx_ring *tx_ring, int budget)
++{
++ int total = 0, ret;
++ int done = 0;
++ u32 tail;
++
++ tail = ipqess_r32(tx_ring->ess, IPQESS_REG_TPD_IDX_Q(tx_ring->idx));
++ tail >>= IPQESS_TPD_CONS_IDX_SHIFT;
++ tail &= IPQESS_TPD_CONS_IDX_MASK;
++
++ do {
++ ret = ipqess_tx_unmap_and_free(&tx_ring->ess->pdev->dev,
++ &tx_ring->buf[tx_ring->tail]);
++ tx_ring->tail = IPQESS_NEXT_IDX(tx_ring->tail, tx_ring->count);
++
++ total += ret;
++ } while ((++done < budget) && (tx_ring->tail != tail));
++
++ ipqess_w32(tx_ring->ess, IPQESS_REG_TX_SW_CONS_IDX_Q(tx_ring->idx),
++ tx_ring->tail);
++
++ if (netif_tx_queue_stopped(tx_ring->nq)) {
++ netdev_dbg(tx_ring->ess->netdev, "waking up tx queue %d\n",
++ tx_ring->idx);
++ netif_tx_wake_queue(tx_ring->nq);
++ }
++
++ netdev_tx_completed_queue(tx_ring->nq, done, total);
++
++ return done;
++}
++
++static int ipqess_tx_napi(struct napi_struct *napi, int budget)
++{
++ struct ipqess_tx_ring *tx_ring = container_of(napi, struct ipqess_tx_ring,
++ napi_tx);
++ int work_done = 0;
++ u32 tx_status;
++
++ tx_status = ipqess_r32(tx_ring->ess, IPQESS_REG_TX_ISR);
++ tx_status &= BIT(tx_ring->idx);
++
++ work_done = ipqess_tx_complete(tx_ring, budget);
++
++ ipqess_w32(tx_ring->ess, IPQESS_REG_TX_ISR, tx_status);
++
++ if (likely(work_done < budget)) {
++ if (napi_complete_done(napi, work_done))
++ ipqess_w32(tx_ring->ess,
++ IPQESS_REG_TX_INT_MASK_Q(tx_ring->idx), 0x1);
++ }
++
++ return work_done;
++}
++
++static int ipqess_rx_napi(struct napi_struct *napi, int budget)
++{
++ struct ipqess_rx_ring *rx_ring = container_of(napi, struct ipqess_rx_ring,
++ napi_rx);
++ struct ipqess *ess = rx_ring->ess;
++ u32 rx_mask = BIT(rx_ring->idx);
++ int remaining_budget = budget;
++ int rx_done;
++ u32 status;
++
++ do {
++ ipqess_w32(ess, IPQESS_REG_RX_ISR, rx_mask);
++ rx_done = ipqess_rx_poll(rx_ring, remaining_budget);
++ remaining_budget -= rx_done;
++
++ status = ipqess_r32(ess, IPQESS_REG_RX_ISR);
++ } while (remaining_budget > 0 && (status & rx_mask));
++
++ if (remaining_budget <= 0)
++ return budget;
++
++ if (napi_complete_done(napi, budget - remaining_budget))
++ ipqess_w32(ess, IPQESS_REG_RX_INT_MASK_Q(rx_ring->idx), 0x1);
++
++ return budget - remaining_budget;
++}
++
++static irqreturn_t ipqess_interrupt_tx(int irq, void *priv)
++{
++ struct ipqess_tx_ring *tx_ring = (struct ipqess_tx_ring *)priv;
++
++ if (likely(napi_schedule_prep(&tx_ring->napi_tx))) {
++ __napi_schedule(&tx_ring->napi_tx);
++ ipqess_w32(tx_ring->ess, IPQESS_REG_TX_INT_MASK_Q(tx_ring->idx),
++ 0x0);
++ }
++
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t ipqess_interrupt_rx(int irq, void *priv)
++{
++ struct ipqess_rx_ring *rx_ring = (struct ipqess_rx_ring *)priv;
++
++ if (likely(napi_schedule_prep(&rx_ring->napi_rx))) {
++ __napi_schedule(&rx_ring->napi_rx);
++ ipqess_w32(rx_ring->ess, IPQESS_REG_RX_INT_MASK_Q(rx_ring->idx),
++ 0x0);
++ }
++
++ return IRQ_HANDLED;
++}
++
++static void ipqess_irq_enable(struct ipqess *ess)
++{
++ int i;
++
++ ipqess_w32(ess, IPQESS_REG_RX_ISR, 0xff);
++ ipqess_w32(ess, IPQESS_REG_TX_ISR, 0xffff);
++ for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
++ ipqess_w32(ess, IPQESS_REG_RX_INT_MASK_Q(ess->rx_ring[i].idx), 1);
++ ipqess_w32(ess, IPQESS_REG_TX_INT_MASK_Q(ess->tx_ring[i].idx), 1);
++ }
++}
++
++static void ipqess_irq_disable(struct ipqess *ess)
++{
++ int i;
++
++ for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
++ ipqess_w32(ess, IPQESS_REG_RX_INT_MASK_Q(ess->rx_ring[i].idx), 0);
++ ipqess_w32(ess, IPQESS_REG_TX_INT_MASK_Q(ess->tx_ring[i].idx), 0);
++ }
++}
++
++static int __init ipqess_init(struct net_device *netdev)
++{
++ struct ipqess *ess = netdev_priv(netdev);
++ struct device_node *of_node = ess->pdev->dev.of_node;
++ int ret;
++
++ ret = of_get_ethdev_address(of_node, netdev);
++ if (ret)
++ eth_hw_addr_random(netdev);
++
++ return phylink_of_phy_connect(ess->phylink, of_node, 0);
++}
++
++static void ipqess_uninit(struct net_device *netdev)
++{
++ struct ipqess *ess = netdev_priv(netdev);
++
++ phylink_disconnect_phy(ess->phylink);
++}
++
++static int ipqess_open(struct net_device *netdev)
++{
++ struct ipqess *ess = netdev_priv(netdev);
++ int i, err;
++
++ for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
++ int qid;
++
++ qid = ess->tx_ring[i].idx;
++ err = devm_request_irq(&netdev->dev, ess->tx_irq[qid],
++ ipqess_interrupt_tx, 0,
++ ess->tx_irq_names[qid],
++ &ess->tx_ring[i]);
++ if (err)
++ return err;
++
++ qid = ess->rx_ring[i].idx;
++ err = devm_request_irq(&netdev->dev, ess->rx_irq[qid],
++ ipqess_interrupt_rx, 0,
++ ess->rx_irq_names[qid],
++ &ess->rx_ring[i]);
++ if (err)
++ return err;
++
++ napi_enable(&ess->tx_ring[i].napi_tx);
++ napi_enable(&ess->rx_ring[i].napi_rx);
++ }
++
++ ipqess_irq_enable(ess);
++ phylink_start(ess->phylink);
++ netif_tx_start_all_queues(netdev);
++
++ return 0;
++}
++
++static int ipqess_stop(struct net_device *netdev)
++{
++ struct ipqess *ess = netdev_priv(netdev);
++ int i;
++
++ netif_tx_stop_all_queues(netdev);
++ phylink_stop(ess->phylink);
++ ipqess_irq_disable(ess);
++ for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
++ napi_disable(&ess->tx_ring[i].napi_tx);
++ napi_disable(&ess->rx_ring[i].napi_rx);
++ }
++
++ return 0;
++}
++
++static int ipqess_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
++{
++ struct ipqess *ess = netdev_priv(netdev);
++
++ return phylink_mii_ioctl(ess->phylink, ifr, cmd);
++}
++
++static u16 ipqess_tx_desc_available(struct ipqess_tx_ring *tx_ring)
++{
++ u16 count = 0;
++
++ if (tx_ring->tail <= tx_ring->head)
++ count = IPQESS_TX_RING_SIZE;
++
++ count += tx_ring->tail - tx_ring->head - 1;
++
++ return count;
++}
++
++static int ipqess_cal_txd_req(struct sk_buff *skb)
++{
++ int tpds;
++
++ /* one TPD for the header, and one for each fragments */
++ tpds = 1 + skb_shinfo(skb)->nr_frags;
++ if (skb_is_gso(skb) && skb_is_gso_v6(skb)) {
++ /* for LSOv2 one extra TPD is needed */
++ tpds++;
++ }
++
++ return tpds;
++}
++
++static struct ipqess_buf *ipqess_get_tx_buffer(struct ipqess_tx_ring *tx_ring,
++ struct ipqess_tx_desc *desc)
++{
++ return &tx_ring->buf[desc - tx_ring->hw_desc];
++}
++
++static struct ipqess_tx_desc *ipqess_tx_desc_next(struct ipqess_tx_ring *tx_ring)
++{
++ struct ipqess_tx_desc *desc;
++
++ desc = &tx_ring->hw_desc[tx_ring->head];
++ tx_ring->head = IPQESS_NEXT_IDX(tx_ring->head, tx_ring->count);
++
++ return desc;
++}
++
++static void ipqess_rollback_tx(struct ipqess *eth,
++ struct ipqess_tx_desc *first_desc, int ring_id)
++{
++ struct ipqess_tx_ring *tx_ring = ð->tx_ring[ring_id];
++ struct ipqess_tx_desc *desc = NULL;
++ struct ipqess_buf *buf;
++ u16 start_index, index;
++
++ start_index = first_desc - tx_ring->hw_desc;
++
++ index = start_index;
++ while (index != tx_ring->head) {
++ desc = &tx_ring->hw_desc[index];
++ buf = &tx_ring->buf[index];
++ ipqess_tx_unmap_and_free(ð->pdev->dev, buf);
++ memset(desc, 0, sizeof(*desc));
++ if (++index == tx_ring->count)
++ index = 0;
++ }
++ tx_ring->head = start_index;
++}
++
++static int ipqess_tx_map_and_fill(struct ipqess_tx_ring *tx_ring,
++ struct sk_buff *skb)
++{
++ struct ipqess_tx_desc *desc = NULL, *first_desc = NULL;
++ u32 word1 = 0, word3 = 0, lso_word1 = 0, svlan_tag = 0;
++ struct platform_device *pdev = tx_ring->ess->pdev;
++ struct ipqess_buf *buf = NULL;
++ u16 len;
++ int i;
++
++ if (skb_is_gso(skb)) {
++ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
++ lso_word1 |= IPQESS_TPD_IPV4_EN;
++ ip_hdr(skb)->check = 0;
++ tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
++ ip_hdr(skb)->daddr,
++ 0, IPPROTO_TCP, 0);
++ } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
++ lso_word1 |= IPQESS_TPD_LSO_V2_EN;
++ ipv6_hdr(skb)->payload_len = 0;
++ tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
++ &ipv6_hdr(skb)->daddr,
++ 0, IPPROTO_TCP, 0);
++ }
++
++ lso_word1 |= IPQESS_TPD_LSO_EN |
++ ((skb_shinfo(skb)->gso_size & IPQESS_TPD_MSS_MASK) <<
++ IPQESS_TPD_MSS_SHIFT) |
++ (skb_transport_offset(skb) << IPQESS_TPD_HDR_SHIFT);
++ } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
++ u8 css, cso;
++
++ cso = skb_checksum_start_offset(skb);
++ css = cso + skb->csum_offset;
++
++ word1 |= (IPQESS_TPD_CUSTOM_CSUM_EN);
++ word1 |= (cso >> 1) << IPQESS_TPD_HDR_SHIFT;
++ word1 |= ((css >> 1) << IPQESS_TPD_CUSTOM_CSUM_SHIFT);
++ }
++
++ if (skb_vlan_tag_present(skb)) {
++ switch (skb->vlan_proto) {
++ case htons(ETH_P_8021Q):
++ word3 |= BIT(IPQESS_TX_INS_CVLAN);
++ word3 |= skb_vlan_tag_get(skb) << IPQESS_TX_CVLAN_TAG_SHIFT;
++ break;
++ case htons(ETH_P_8021AD):
++ word1 |= BIT(IPQESS_TX_INS_SVLAN);
++ svlan_tag = skb_vlan_tag_get(skb);
++ break;
++ default:
++ dev_err(&pdev->dev, "no ctag or stag present\n");
++ goto vlan_tag_error;
++ }
++ }
++
++ if (eth_type_vlan(skb->protocol))
++ word1 |= IPQESS_TPD_VLAN_TAGGED;
++
++ if (skb->protocol == htons(ETH_P_PPP_SES))
++ word1 |= IPQESS_TPD_PPPOE_EN;
++
++ len = skb_headlen(skb);
++
++ first_desc = ipqess_tx_desc_next(tx_ring);
++ desc = first_desc;
++ if (lso_word1 & IPQESS_TPD_LSO_V2_EN) {
++ desc->addr = cpu_to_le32(skb->len);
++ desc->word1 = cpu_to_le32(word1 | lso_word1);
++ desc->svlan_tag = cpu_to_le16(svlan_tag);
++ desc->word3 = cpu_to_le32(word3);
++ desc = ipqess_tx_desc_next(tx_ring);
++ }
++
++ buf = ipqess_get_tx_buffer(tx_ring, desc);
++ buf->length = len;
++ buf->dma = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
++
++ if (dma_mapping_error(&pdev->dev, buf->dma))
++ goto dma_error;
++
++ desc->addr = cpu_to_le32(buf->dma);
++ desc->len = cpu_to_le16(len);
++
++ buf->flags |= IPQESS_DESC_SINGLE;
++ desc->word1 = cpu_to_le32(word1 | lso_word1);
++ desc->svlan_tag = cpu_to_le16(svlan_tag);
++ desc->word3 = cpu_to_le32(word3);
++
++ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
++ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
++
++ len = skb_frag_size(frag);
++ desc = ipqess_tx_desc_next(tx_ring);
++ buf = ipqess_get_tx_buffer(tx_ring, desc);
++ buf->length = len;
++ buf->flags |= IPQESS_DESC_PAGE;
++ buf->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len,
++ DMA_TO_DEVICE);
++
++ if (dma_mapping_error(&pdev->dev, buf->dma))
++ goto dma_error;
++
++ desc->addr = cpu_to_le32(buf->dma);
++ desc->len = cpu_to_le16(len);
++ desc->svlan_tag = cpu_to_le16(svlan_tag);
++ desc->word1 = cpu_to_le32(word1 | lso_word1);
++ desc->word3 = cpu_to_le32(word3);
++ }
++ desc->word1 |= cpu_to_le32(1 << IPQESS_TPD_EOP_SHIFT);
++ buf->skb = skb;
++ buf->flags |= IPQESS_DESC_LAST;
++
++ return 0;
++
++dma_error:
++ ipqess_rollback_tx(tx_ring->ess, first_desc, tx_ring->ring_id);
++ dev_err(&pdev->dev, "TX DMA map failed\n");
++
++vlan_tag_error:
++ return -ENOMEM;
++}
++
++static void ipqess_kick_tx(struct ipqess_tx_ring *tx_ring)
++{
++ /* Ensure that all TPDs has been written completely */
++ dma_wmb();
++
++ /* update software producer index */
++ ipqess_w32(tx_ring->ess, IPQESS_REG_TPD_IDX_Q(tx_ring->idx),
++ tx_ring->head);
++}
++
++static netdev_tx_t ipqess_xmit(struct sk_buff *skb, struct net_device *netdev)
++{
++ struct ipqess *ess = netdev_priv(netdev);
++ struct ipqess_tx_ring *tx_ring;
++ int avail;
++ int tx_num;
++ int ret;
++
++ tx_ring = &ess->tx_ring[skb_get_queue_mapping(skb)];
++ tx_num = ipqess_cal_txd_req(skb);
++ avail = ipqess_tx_desc_available(tx_ring);
++ if (avail < tx_num) {
++ netdev_dbg(netdev,
++ "stopping tx queue %d, avail=%d req=%d im=%x\n",
++ tx_ring->idx, avail, tx_num,
++ ipqess_r32(tx_ring->ess,
++ IPQESS_REG_TX_INT_MASK_Q(tx_ring->idx)));
++ netif_tx_stop_queue(tx_ring->nq);
++ ipqess_w32(tx_ring->ess, IPQESS_REG_TX_INT_MASK_Q(tx_ring->idx), 0x1);
++ ipqess_kick_tx(tx_ring);
++ return NETDEV_TX_BUSY;
++ }
++
++ ret = ipqess_tx_map_and_fill(tx_ring, skb);
++ if (ret) {
++ dev_kfree_skb_any(skb);
++ ess->stats.tx_errors++;
++ goto err_out;
++ }
++
++ ess->stats.tx_packets++;
++ ess->stats.tx_bytes += skb->len;
++ netdev_tx_sent_queue(tx_ring->nq, skb->len);
++
++ if (!netdev_xmit_more() || netif_xmit_stopped(tx_ring->nq))
++ ipqess_kick_tx(tx_ring);
++
++err_out:
++ return NETDEV_TX_OK;
++}
++
++static int ipqess_set_mac_address(struct net_device *netdev, void *p)
++{
++ struct ipqess *ess = netdev_priv(netdev);
++ const char *macaddr = netdev->dev_addr;
++ int ret = eth_mac_addr(netdev, p);
++
++ if (ret)
++ return ret;
++
++ ipqess_w32(ess, IPQESS_REG_MAC_CTRL1, (macaddr[0] << 8) | macaddr[1]);
++ ipqess_w32(ess, IPQESS_REG_MAC_CTRL0,
++ (macaddr[2] << 24) | (macaddr[3] << 16) | (macaddr[4] << 8) |
++ macaddr[5]);
++
++ return 0;
++}
++
++static void ipqess_tx_timeout(struct net_device *netdev, unsigned int txq_id)
++{
++ struct ipqess *ess = netdev_priv(netdev);
++ struct ipqess_tx_ring *tr = &ess->tx_ring[txq_id];
++
++ netdev_warn(netdev, "TX timeout on queue %d\n", tr->idx);
++}
++
++static const struct net_device_ops ipqess_axi_netdev_ops = {
++ .ndo_init = ipqess_init,
++ .ndo_uninit = ipqess_uninit,
++ .ndo_open = ipqess_open,
++ .ndo_stop = ipqess_stop,
++ .ndo_do_ioctl = ipqess_do_ioctl,
++ .ndo_start_xmit = ipqess_xmit,
++ .ndo_get_stats = ipqess_get_stats,
++ .ndo_set_mac_address = ipqess_set_mac_address,
++ .ndo_tx_timeout = ipqess_tx_timeout,
++};
++
++static void ipqess_hw_stop(struct ipqess *ess)
++{
++ int i;
++
++ /* disable all RX queue IRQs */
++ for (i = 0; i < IPQESS_MAX_RX_QUEUE; i++)
++ ipqess_w32(ess, IPQESS_REG_RX_INT_MASK_Q(i), 0);
++
++ /* disable all TX queue IRQs */
++ for (i = 0; i < IPQESS_MAX_TX_QUEUE; i++)
++ ipqess_w32(ess, IPQESS_REG_TX_INT_MASK_Q(i), 0);
++
++ /* disable all other IRQs */
++ ipqess_w32(ess, IPQESS_REG_MISC_IMR, 0);
++ ipqess_w32(ess, IPQESS_REG_WOL_IMR, 0);
++
++ /* clear the IRQ status registers */
++ ipqess_w32(ess, IPQESS_REG_RX_ISR, 0xff);
++ ipqess_w32(ess, IPQESS_REG_TX_ISR, 0xffff);
++ ipqess_w32(ess, IPQESS_REG_MISC_ISR, 0x1fff);
++ ipqess_w32(ess, IPQESS_REG_WOL_ISR, 0x1);
++ ipqess_w32(ess, IPQESS_REG_WOL_CTRL, 0);
++
++ /* disable RX and TX queues */
++ ipqess_m32(ess, IPQESS_RXQ_CTRL_EN_MASK, 0, IPQESS_REG_RXQ_CTRL);
++ ipqess_m32(ess, IPQESS_TXQ_CTRL_TXQ_EN, 0, IPQESS_REG_TXQ_CTRL);
++}
++
++static int ipqess_hw_init(struct ipqess *ess)
++{
++ int i, err;
++ u32 tmp;
++
++ ipqess_hw_stop(ess);
++
++ ipqess_m32(ess, BIT(IPQESS_INTR_SW_IDX_W_TYP_SHIFT),
++ IPQESS_INTR_SW_IDX_W_TYPE << IPQESS_INTR_SW_IDX_W_TYP_SHIFT,
++ IPQESS_REG_INTR_CTRL);
++
++ /* enable IRQ delay slot */
++ ipqess_w32(ess, IPQESS_REG_IRQ_MODRT_TIMER_INIT,
++ (IPQESS_TX_IMT << IPQESS_IRQ_MODRT_TX_TIMER_SHIFT) |
++ (IPQESS_RX_IMT << IPQESS_IRQ_MODRT_RX_TIMER_SHIFT));
++
++ /* Set Customer and Service VLAN TPIDs */
++ ipqess_w32(ess, IPQESS_REG_VLAN_CFG,
++ (ETH_P_8021Q << IPQESS_VLAN_CFG_CVLAN_TPID_SHIFT) |
++ (ETH_P_8021AD << IPQESS_VLAN_CFG_SVLAN_TPID_SHIFT));
++
++ /* Configure the TX Queue bursting */
++ ipqess_w32(ess, IPQESS_REG_TXQ_CTRL,
++ (IPQESS_TPD_BURST << IPQESS_TXQ_NUM_TPD_BURST_SHIFT) |
++ (IPQESS_TXF_BURST << IPQESS_TXQ_TXF_BURST_NUM_SHIFT) |
++ IPQESS_TXQ_CTRL_TPD_BURST_EN);
++
++ /* Set RSS type */
++ ipqess_w32(ess, IPQESS_REG_RSS_TYPE,
++ IPQESS_RSS_TYPE_IPV4TCP | IPQESS_RSS_TYPE_IPV6_TCP |
++ IPQESS_RSS_TYPE_IPV4_UDP | IPQESS_RSS_TYPE_IPV6UDP |
++ IPQESS_RSS_TYPE_IPV4 | IPQESS_RSS_TYPE_IPV6);
++
++ /* Set RFD ring burst and threshold */
++ ipqess_w32(ess, IPQESS_REG_RX_DESC1,
++ (IPQESS_RFD_BURST << IPQESS_RXQ_RFD_BURST_NUM_SHIFT) |
++ (IPQESS_RFD_THR << IPQESS_RXQ_RFD_PF_THRESH_SHIFT) |
++ (IPQESS_RFD_LTHR << IPQESS_RXQ_RFD_LOW_THRESH_SHIFT));
++
++ /* Set Rx FIFO
++ * - threshold to start to DMA data to host
++ */
++ ipqess_w32(ess, IPQESS_REG_RXQ_CTRL,
++ IPQESS_FIFO_THRESH_128_BYTE | IPQESS_RXQ_CTRL_RMV_VLAN);
++
++ err = ipqess_rx_ring_alloc(ess);
++ if (err)
++ return err;
++
++ err = ipqess_tx_ring_alloc(ess);
++ if (err)
++ goto err_rx_ring_free;
++
++ /* Load all of ring base addresses above into the dma engine */
++ ipqess_m32(ess, 0, BIT(IPQESS_LOAD_PTR_SHIFT), IPQESS_REG_TX_SRAM_PART);
++
++ /* Disable TX FIFO low watermark and high watermark */
++ ipqess_w32(ess, IPQESS_REG_TXF_WATER_MARK, 0);
++
++ /* Configure RSS indirection table.
++ * 128 hash will be configured in the following
++ * pattern: hash{0,1,2,3} = {Q0,Q2,Q4,Q6} respectively
++ * and so on
++ */
++ for (i = 0; i < IPQESS_NUM_IDT; i++)
++ ipqess_w32(ess, IPQESS_REG_RSS_IDT(i), IPQESS_RSS_IDT_VALUE);
++
++ /* Configure load balance mapping table.
++ * 4 table entry will be configured according to the
++ * following pattern: load_balance{0,1,2,3} = {Q0,Q1,Q3,Q4}
++ * respectively.
++ */
++ ipqess_w32(ess, IPQESS_REG_LB_RING, IPQESS_LB_REG_VALUE);
++
++ /* Configure Virtual queue for Tx rings */
++ ipqess_w32(ess, IPQESS_REG_VQ_CTRL0, IPQESS_VQ_REG_VALUE);
++ ipqess_w32(ess, IPQESS_REG_VQ_CTRL1, IPQESS_VQ_REG_VALUE);
++
++ /* Configure Max AXI Burst write size to 128 bytes*/
++ ipqess_w32(ess, IPQESS_REG_AXIW_CTRL_MAXWRSIZE,
++ IPQESS_AXIW_MAXWRSIZE_VALUE);
++
++ /* Enable TX queues */
++ ipqess_m32(ess, 0, IPQESS_TXQ_CTRL_TXQ_EN, IPQESS_REG_TXQ_CTRL);
++
++ /* Enable RX queues */
++ tmp = 0;
++ for (i = 0; i < IPQESS_NETDEV_QUEUES; i++)
++ tmp |= IPQESS_RXQ_CTRL_EN(ess->rx_ring[i].idx);
++
++ ipqess_m32(ess, IPQESS_RXQ_CTRL_EN_MASK, tmp, IPQESS_REG_RXQ_CTRL);
++
++ return 0;
++
++err_rx_ring_free:
++
++ ipqess_rx_ring_free(ess);
++ return err;
++}
++
++static void ipqess_mac_config(struct phylink_config *config, unsigned int mode,
++ const struct phylink_link_state *state)
++{
++ /* Nothing to do, use fixed Internal mode */
++}
++
++static void ipqess_mac_link_down(struct phylink_config *config,
++ unsigned int mode,
++ phy_interface_t interface)
++{
++ /* Nothing to do, use fixed Internal mode */
++}
++
++static void ipqess_mac_link_up(struct phylink_config *config,
++ struct phy_device *phy, unsigned int mode,
++ phy_interface_t interface,
++ int speed, int duplex,
++ bool tx_pause, bool rx_pause)
++{
++ /* Nothing to do, use fixed Internal mode */
++}
++
++static struct phylink_mac_ops ipqess_phylink_mac_ops = {
++ .validate = phylink_generic_validate,
++ .mac_config = ipqess_mac_config,
++ .mac_link_up = ipqess_mac_link_up,
++ .mac_link_down = ipqess_mac_link_down,
++};
++
++static void ipqess_reset(struct ipqess *ess)
++{
++ reset_control_assert(ess->ess_rst);
++
++ mdelay(10);
++
++ reset_control_deassert(ess->ess_rst);
++
++ /* Waiting for all inner tables to be flushed and reinitialized.
++ * This takes between 5 and 10 ms
++ */
++
++ mdelay(10);
++}
++
++static int ipqess_axi_probe(struct platform_device *pdev)
++{
++ struct device_node *np = pdev->dev.of_node;
++ struct net_device *netdev;
++ phy_interface_t phy_mode;
++ struct ipqess *ess;
++ int i, err = 0;
++
++ netdev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof(*ess),
++ IPQESS_NETDEV_QUEUES,
++ IPQESS_NETDEV_QUEUES);
++ if (!netdev)
++ return -ENOMEM;
++
++ ess = netdev_priv(netdev);
++ ess->netdev = netdev;
++ ess->pdev = pdev;
++ spin_lock_init(&ess->stats_lock);
++ SET_NETDEV_DEV(netdev, &pdev->dev);
++ platform_set_drvdata(pdev, netdev);
++
++ ess->hw_addr = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
++ if (IS_ERR(ess->hw_addr))
++ return PTR_ERR(ess->hw_addr);
++
++ err = of_get_phy_mode(np, &phy_mode);
++ if (err) {
++ dev_err(&pdev->dev, "incorrect phy-mode\n");
++ return err;
++ }
++
++ ess->ess_clk = devm_clk_get(&pdev->dev, NULL);
++ if (!IS_ERR(ess->ess_clk))
++ clk_prepare_enable(ess->ess_clk);
++
++ ess->ess_rst = devm_reset_control_get(&pdev->dev, NULL);
++ if (IS_ERR(ess->ess_rst))
++ goto err_clk;
++
++ ipqess_reset(ess);
++
++ ess->phylink_config.dev = &netdev->dev;
++ ess->phylink_config.type = PHYLINK_NETDEV;
++ ess->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 |
++ MAC_100 | MAC_1000FD;
++
++ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
++ ess->phylink_config.supported_interfaces);
++
++ ess->phylink = phylink_create(&ess->phylink_config,
++ of_fwnode_handle(np), phy_mode,
++ &ipqess_phylink_mac_ops);
++ if (IS_ERR(ess->phylink)) {
++ err = PTR_ERR(ess->phylink);
++ goto err_clk;
++ }
++
++ for (i = 0; i < IPQESS_MAX_TX_QUEUE; i++) {
++ ess->tx_irq[i] = platform_get_irq(pdev, i);
++ scnprintf(ess->tx_irq_names[i], sizeof(ess->tx_irq_names[i]),
++ "%s:txq%d", pdev->name, i);
++ }
++
++ for (i = 0; i < IPQESS_MAX_RX_QUEUE; i++) {
++ ess->rx_irq[i] = platform_get_irq(pdev, i + IPQESS_MAX_TX_QUEUE);
++ scnprintf(ess->rx_irq_names[i], sizeof(ess->rx_irq_names[i]),
++ "%s:rxq%d", pdev->name, i);
++ }
++
++ netdev->netdev_ops = &ipqess_axi_netdev_ops;
++ netdev->features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
++ NETIF_F_HW_VLAN_CTAG_RX |
++ NETIF_F_HW_VLAN_CTAG_TX |
++ NETIF_F_TSO | NETIF_F_GRO | NETIF_F_SG;
++ /* feature change is not supported yet */
++ netdev->hw_features = 0;
++ netdev->vlan_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_RXCSUM |
++ NETIF_F_TSO |
++ NETIF_F_GRO;
++ netdev->watchdog_timeo = 5 * HZ;
++ netdev->base_addr = (u32)ess->hw_addr;
++ netdev->max_mtu = 9000;
++ netdev->gso_max_segs = IPQESS_TX_RING_SIZE / 2;
++
++ ipqess_set_ethtool_ops(netdev);
++
++ err = ipqess_hw_init(ess);
++ if (err)
++ goto err_phylink;
++
++ for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
++ netif_napi_add_tx(netdev, &ess->tx_ring[i].napi_tx, ipqess_tx_napi);
++ netif_napi_add(netdev, &ess->rx_ring[i].napi_rx, ipqess_rx_napi);
++ }
++
++ err = register_netdev(netdev);
++ if (err)
++ goto err_hw_stop;
++
++ return 0;
++
++err_hw_stop:
++ ipqess_hw_stop(ess);
++
++ ipqess_tx_ring_free(ess);
++ ipqess_rx_ring_free(ess);
++err_phylink:
++ phylink_destroy(ess->phylink);
++
++err_clk:
++ clk_disable_unprepare(ess->ess_clk);
++
++ return err;
++}
++
++static int ipqess_axi_remove(struct platform_device *pdev)
++{
++ const struct net_device *netdev = platform_get_drvdata(pdev);
++ struct ipqess *ess = netdev_priv(netdev);
++
++ unregister_netdev(ess->netdev);
++ ipqess_hw_stop(ess);
++
++ ipqess_tx_ring_free(ess);
++ ipqess_rx_ring_free(ess);
++
++ phylink_destroy(ess->phylink);
++ clk_disable_unprepare(ess->ess_clk);
++
++ return 0;
++}
++
++static const struct of_device_id ipqess_of_mtable[] = {
++ {.compatible = "qcom,ipq4019-ess-edma" },
++ {}
++};
++MODULE_DEVICE_TABLE(of, ipqess_of_mtable);
++
++static struct platform_driver ipqess_axi_driver = {
++ .driver = {
++ .name = "ipqess-edma",
++ .of_match_table = ipqess_of_mtable,
++ },
++ .probe = ipqess_axi_probe,
++ .remove = ipqess_axi_remove,
++};
++
++module_platform_driver(ipqess_axi_driver);
++
++MODULE_AUTHOR("Qualcomm Atheros Inc");
++MODULE_AUTHOR("John Crispin <john@phrozen.org>");
++MODULE_AUTHOR("Christian Lamparter <chunkeey@gmail.com>");
++MODULE_AUTHOR("Gabor Juhos <j4g8y7@gmail.com>");
++MODULE_AUTHOR("Maxime Chevallier <maxime.chevallier@bootlin.com>");
++MODULE_LICENSE("GPL");
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ipqess/ipqess.h
+@@ -0,0 +1,518 @@
++/* SPDX-License-Identifier: (GPL-2.0 OR ISC) */
++/* Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2017 - 2018, John Crispin <john@phrozen.org>
++ * Copyright (c) 2018 - 2019, Christian Lamparter <chunkeey@gmail.com>
++ * Copyright (c) 2020 - 2021, Gabor Juhos <j4g8y7@gmail.com>
++ * Copyright (c) 2021 - 2022, Maxime Chevallier <maxime.chevallier@bootlin.com>
++ *
++ */
++
++#ifndef _IPQESS_H_
++#define _IPQESS_H_
++
++#define IPQESS_NETDEV_QUEUES 4
++
++#define IPQESS_TPD_EOP_SHIFT 31
++
++#define IPQESS_PORT_ID_SHIFT 12
++#define IPQESS_PORT_ID_MASK 0x7
++
++/* tpd word 3 bit 18-28 */
++#define IPQESS_TPD_PORT_BITMAP_SHIFT 18
++
++#define IPQESS_TPD_FROM_CPU_SHIFT 25
++
++#define IPQESS_RX_RING_SIZE 128
++#define IPQESS_RX_HEAD_BUFF_SIZE 1540
++#define IPQESS_TX_RING_SIZE 128
++#define IPQESS_MAX_RX_QUEUE 8
++#define IPQESS_MAX_TX_QUEUE 16
++
++/* Configurations */
++#define IPQESS_INTR_CLEAR_TYPE 0
++#define IPQESS_INTR_SW_IDX_W_TYPE 0
++#define IPQESS_FIFO_THRESH_TYPE 0
++#define IPQESS_RSS_TYPE 0
++#define IPQESS_RX_IMT 0x0020
++#define IPQESS_TX_IMT 0x0050
++#define IPQESS_TPD_BURST 5
++#define IPQESS_TXF_BURST 0x100
++#define IPQESS_RFD_BURST 8
++#define IPQESS_RFD_THR 16
++#define IPQESS_RFD_LTHR 0
++
++/* Flags used in transmit direction */
++#define IPQESS_DESC_LAST 0x1
++#define IPQESS_DESC_SINGLE 0x2
++#define IPQESS_DESC_PAGE 0x4
++
++struct ipqess_statistics {
++ u32 tx_q0_pkt;
++ u32 tx_q1_pkt;
++ u32 tx_q2_pkt;
++ u32 tx_q3_pkt;
++ u32 tx_q4_pkt;
++ u32 tx_q5_pkt;
++ u32 tx_q6_pkt;
++ u32 tx_q7_pkt;
++ u32 tx_q8_pkt;
++ u32 tx_q9_pkt;
++ u32 tx_q10_pkt;
++ u32 tx_q11_pkt;
++ u32 tx_q12_pkt;
++ u32 tx_q13_pkt;
++ u32 tx_q14_pkt;
++ u32 tx_q15_pkt;
++ u32 tx_q0_byte;
++ u32 tx_q1_byte;
++ u32 tx_q2_byte;
++ u32 tx_q3_byte;
++ u32 tx_q4_byte;
++ u32 tx_q5_byte;
++ u32 tx_q6_byte;
++ u32 tx_q7_byte;
++ u32 tx_q8_byte;
++ u32 tx_q9_byte;
++ u32 tx_q10_byte;
++ u32 tx_q11_byte;
++ u32 tx_q12_byte;
++ u32 tx_q13_byte;
++ u32 tx_q14_byte;
++ u32 tx_q15_byte;
++ u32 rx_q0_pkt;
++ u32 rx_q1_pkt;
++ u32 rx_q2_pkt;
++ u32 rx_q3_pkt;
++ u32 rx_q4_pkt;
++ u32 rx_q5_pkt;
++ u32 rx_q6_pkt;
++ u32 rx_q7_pkt;
++ u32 rx_q0_byte;
++ u32 rx_q1_byte;
++ u32 rx_q2_byte;
++ u32 rx_q3_byte;
++ u32 rx_q4_byte;
++ u32 rx_q5_byte;
++ u32 rx_q6_byte;
++ u32 rx_q7_byte;
++ u32 tx_desc_error;
++};
++
++struct ipqess_tx_desc {
++ __le16 len;
++ __le16 svlan_tag;
++ __le32 word1;
++ __le32 addr;
++ __le32 word3;
++} __aligned(16) __packed;
++
++struct ipqess_rx_desc {
++ __le16 rrd0;
++ __le16 rrd1;
++ __le16 rrd2;
++ __le16 rrd3;
++ __le16 rrd4;
++ __le16 rrd5;
++ __le16 rrd6;
++ __le16 rrd7;
++} __aligned(16) __packed;
++
++struct ipqess_buf {
++ struct sk_buff *skb;
++ dma_addr_t dma;
++ u32 flags;
++ u16 length;
++};
++
++struct ipqess_tx_ring {
++ struct napi_struct napi_tx;
++ u32 idx;
++ int ring_id;
++ struct ipqess *ess;
++ struct netdev_queue *nq;
++ struct ipqess_tx_desc *hw_desc;
++ struct ipqess_buf *buf;
++ dma_addr_t dma;
++ u16 count;
++ u16 head;
++ u16 tail;
++};
++
++struct ipqess_rx_ring {
++ struct napi_struct napi_rx;
++ u32 idx;
++ int ring_id;
++ struct ipqess *ess;
++ struct device *ppdev;
++ struct ipqess_rx_desc **hw_desc;
++ struct ipqess_buf *buf;
++ dma_addr_t dma;
++ u16 head;
++ u16 tail;
++ atomic_t refill_count;
++};
++
++struct ipqess_rx_ring_refill {
++ struct ipqess_rx_ring *rx_ring;
++ struct work_struct refill_work;
++};
++
++#define IPQESS_IRQ_NAME_LEN 32
++
++struct ipqess {
++ struct net_device *netdev;
++ void __iomem *hw_addr;
++
++ struct clk *ess_clk;
++ struct reset_control *ess_rst;
++
++ struct ipqess_rx_ring rx_ring[IPQESS_NETDEV_QUEUES];
++
++ struct platform_device *pdev;
++ struct phylink *phylink;
++ struct phylink_config phylink_config;
++ struct ipqess_tx_ring tx_ring[IPQESS_NETDEV_QUEUES];
++
++ struct ipqess_statistics ipqess_stats;
++
++ /* Protects stats */
++ spinlock_t stats_lock;
++ struct net_device_stats stats;
++
++ struct ipqess_rx_ring_refill rx_refill[IPQESS_NETDEV_QUEUES];
++ u32 tx_irq[IPQESS_MAX_TX_QUEUE];
++ char tx_irq_names[IPQESS_MAX_TX_QUEUE][IPQESS_IRQ_NAME_LEN];
++ u32 rx_irq[IPQESS_MAX_RX_QUEUE];
++ char rx_irq_names[IPQESS_MAX_TX_QUEUE][IPQESS_IRQ_NAME_LEN];
++};
++
++void ipqess_set_ethtool_ops(struct net_device *netdev);
++void ipqess_update_hw_stats(struct ipqess *ess);
++
++/* register definition */
++#define IPQESS_REG_MAS_CTRL 0x0
++#define IPQESS_REG_TIMEOUT_CTRL 0x004
++#define IPQESS_REG_DBG0 0x008
++#define IPQESS_REG_DBG1 0x00C
++#define IPQESS_REG_SW_CTRL0 0x100
++#define IPQESS_REG_SW_CTRL1 0x104
++
++/* Interrupt Status Register */
++#define IPQESS_REG_RX_ISR 0x200
++#define IPQESS_REG_TX_ISR 0x208
++#define IPQESS_REG_MISC_ISR 0x210
++#define IPQESS_REG_WOL_ISR 0x218
++
++#define IPQESS_MISC_ISR_RX_URG_Q(x) (1 << (x))
++
++#define IPQESS_MISC_ISR_AXIR_TIMEOUT 0x00000100
++#define IPQESS_MISC_ISR_AXIR_ERR 0x00000200
++#define IPQESS_MISC_ISR_TXF_DEAD 0x00000400
++#define IPQESS_MISC_ISR_AXIW_ERR 0x00000800
++#define IPQESS_MISC_ISR_AXIW_TIMEOUT 0x00001000
++
++#define IPQESS_WOL_ISR 0x00000001
++
++/* Interrupt Mask Register */
++#define IPQESS_REG_MISC_IMR 0x214
++#define IPQESS_REG_WOL_IMR 0x218
++
++#define IPQESS_RX_IMR_NORMAL_MASK 0x1
++#define IPQESS_TX_IMR_NORMAL_MASK 0x1
++#define IPQESS_MISC_IMR_NORMAL_MASK 0x80001FFF
++#define IPQESS_WOL_IMR_NORMAL_MASK 0x1
++
++/* Edma receive consumer index */
++#define IPQESS_REG_RX_SW_CONS_IDX_Q(x) (0x220 + ((x) << 2)) /* x is the queue id */
++
++/* Edma transmit consumer index */
++#define IPQESS_REG_TX_SW_CONS_IDX_Q(x) (0x240 + ((x) << 2)) /* x is the queue id */
++
++/* IRQ Moderator Initial Timer Register */
++#define IPQESS_REG_IRQ_MODRT_TIMER_INIT 0x280
++#define IPQESS_IRQ_MODRT_TIMER_MASK 0xFFFF
++#define IPQESS_IRQ_MODRT_RX_TIMER_SHIFT 0
++#define IPQESS_IRQ_MODRT_TX_TIMER_SHIFT 16
++
++/* Interrupt Control Register */
++#define IPQESS_REG_INTR_CTRL 0x284
++#define IPQESS_INTR_CLR_TYP_SHIFT 0
++#define IPQESS_INTR_SW_IDX_W_TYP_SHIFT 1
++#define IPQESS_INTR_CLEAR_TYPE_W1 0
++#define IPQESS_INTR_CLEAR_TYPE_R 1
++
++/* RX Interrupt Mask Register */
++#define IPQESS_REG_RX_INT_MASK_Q(x) (0x300 + ((x) << 2)) /* x = queue id */
++
++/* TX Interrupt mask register */
++#define IPQESS_REG_TX_INT_MASK_Q(x) (0x340 + ((x) << 2)) /* x = queue id */
++
++/* Load Ptr Register
++ * Software sets this bit after the initialization of the head and tail
++ */
++#define IPQESS_REG_TX_SRAM_PART 0x400
++#define IPQESS_LOAD_PTR_SHIFT 16
++
++/* TXQ Control Register */
++#define IPQESS_REG_TXQ_CTRL 0x404
++#define IPQESS_TXQ_CTRL_IP_OPTION_EN 0x10
++#define IPQESS_TXQ_CTRL_TXQ_EN 0x20
++#define IPQESS_TXQ_CTRL_ENH_MODE 0x40
++#define IPQESS_TXQ_CTRL_LS_8023_EN 0x80
++#define IPQESS_TXQ_CTRL_TPD_BURST_EN 0x100
++#define IPQESS_TXQ_CTRL_LSO_BREAK_EN 0x200
++#define IPQESS_TXQ_NUM_TPD_BURST_MASK 0xF
++#define IPQESS_TXQ_TXF_BURST_NUM_MASK 0xFFFF
++#define IPQESS_TXQ_NUM_TPD_BURST_SHIFT 0
++#define IPQESS_TXQ_TXF_BURST_NUM_SHIFT 16
++
++#define IPQESS_REG_TXF_WATER_MARK 0x408 /* In 8-bytes */
++#define IPQESS_TXF_WATER_MARK_MASK 0x0FFF
++#define IPQESS_TXF_LOW_WATER_MARK_SHIFT 0
++#define IPQESS_TXF_HIGH_WATER_MARK_SHIFT 16
++#define IPQESS_TXQ_CTRL_BURST_MODE_EN 0x80000000
++
++/* WRR Control Register */
++#define IPQESS_REG_WRR_CTRL_Q0_Q3 0x40c
++#define IPQESS_REG_WRR_CTRL_Q4_Q7 0x410
++#define IPQESS_REG_WRR_CTRL_Q8_Q11 0x414
++#define IPQESS_REG_WRR_CTRL_Q12_Q15 0x418
++
++/* Weight round robin(WRR), it takes queue as input, and computes
++ * starting bits where we need to write the weight for a particular
++ * queue
++ */
++#define IPQESS_WRR_SHIFT(x) (((x) * 5) % 20)
++
++/* Tx Descriptor Control Register */
++#define IPQESS_REG_TPD_RING_SIZE 0x41C
++#define IPQESS_TPD_RING_SIZE_SHIFT 0
++#define IPQESS_TPD_RING_SIZE_MASK 0xFFFF
++
++/* Transmit descriptor base address */
++#define IPQESS_REG_TPD_BASE_ADDR_Q(x) (0x420 + ((x) << 2)) /* x = queue id */
++
++/* TPD Index Register */
++#define IPQESS_REG_TPD_IDX_Q(x) (0x460 + ((x) << 2)) /* x = queue id */
++
++#define IPQESS_TPD_PROD_IDX_BITS 0x0000FFFF
++#define IPQESS_TPD_CONS_IDX_BITS 0xFFFF0000
++#define IPQESS_TPD_PROD_IDX_MASK 0xFFFF
++#define IPQESS_TPD_CONS_IDX_MASK 0xFFFF
++#define IPQESS_TPD_PROD_IDX_SHIFT 0
++#define IPQESS_TPD_CONS_IDX_SHIFT 16
++
++/* TX Virtual Queue Mapping Control Register */
++#define IPQESS_REG_VQ_CTRL0 0x4A0
++#define IPQESS_REG_VQ_CTRL1 0x4A4
++
++/* Virtual QID shift, it takes queue as input, and computes
++ * Virtual QID position in virtual qid control register
++ */
++#define IPQESS_VQ_ID_SHIFT(i) (((i) * 3) % 24)
++
++/* Virtual Queue Default Value */
++#define IPQESS_VQ_REG_VALUE 0x240240
++
++/* Tx side Port Interface Control Register */
++#define IPQESS_REG_PORT_CTRL 0x4A8
++#define IPQESS_PAD_EN_SHIFT 15
++
++/* Tx side VLAN Configuration Register */
++#define IPQESS_REG_VLAN_CFG 0x4AC
++
++#define IPQESS_VLAN_CFG_SVLAN_TPID_SHIFT 0
++#define IPQESS_VLAN_CFG_SVLAN_TPID_MASK 0xffff
++#define IPQESS_VLAN_CFG_CVLAN_TPID_SHIFT 16
++#define IPQESS_VLAN_CFG_CVLAN_TPID_MASK 0xffff
++
++#define IPQESS_TX_CVLAN 16
++#define IPQESS_TX_INS_CVLAN 17
++#define IPQESS_TX_CVLAN_TAG_SHIFT 0
++
++#define IPQESS_TX_SVLAN 14
++#define IPQESS_TX_INS_SVLAN 15
++#define IPQESS_TX_SVLAN_TAG_SHIFT 16
++
++/* Tx Queue Packet Statistic Register */
++#define IPQESS_REG_TX_STAT_PKT_Q(x) (0x700 + ((x) << 3)) /* x = queue id */
++
++#define IPQESS_TX_STAT_PKT_MASK 0xFFFFFF
++
++/* Tx Queue Byte Statistic Register */
++#define IPQESS_REG_TX_STAT_BYTE_Q(x) (0x704 + ((x) << 3)) /* x = queue id */
++
++/* Load Balance Based Ring Offset Register */
++#define IPQESS_REG_LB_RING 0x800
++#define IPQESS_LB_RING_ENTRY_MASK 0xff
++#define IPQESS_LB_RING_ID_MASK 0x7
++#define IPQESS_LB_RING_PROFILE_ID_MASK 0x3
++#define IPQESS_LB_RING_ENTRY_BIT_OFFSET 8
++#define IPQESS_LB_RING_ID_OFFSET 0
++#define IPQESS_LB_RING_PROFILE_ID_OFFSET 3
++#define IPQESS_LB_REG_VALUE 0x6040200
++
++/* Load Balance Priority Mapping Register */
++#define IPQESS_REG_LB_PRI_START 0x804
++#define IPQESS_REG_LB_PRI_END 0x810
++#define IPQESS_LB_PRI_REG_INC 4
++#define IPQESS_LB_PRI_ENTRY_BIT_OFFSET 4
++#define IPQESS_LB_PRI_ENTRY_MASK 0xf
++
++/* RSS Priority Mapping Register */
++#define IPQESS_REG_RSS_PRI 0x820
++#define IPQESS_RSS_PRI_ENTRY_MASK 0xf
++#define IPQESS_RSS_RING_ID_MASK 0x7
++#define IPQESS_RSS_PRI_ENTRY_BIT_OFFSET 4
++
++/* RSS Indirection Register */
++#define IPQESS_REG_RSS_IDT(x) (0x840 + ((x) << 2)) /* x = No. of indirection table */
++#define IPQESS_NUM_IDT 16
++#define IPQESS_RSS_IDT_VALUE 0x64206420
++
++/* Default RSS Ring Register */
++#define IPQESS_REG_DEF_RSS 0x890
++#define IPQESS_DEF_RSS_MASK 0x7
++
++/* RSS Hash Function Type Register */
++#define IPQESS_REG_RSS_TYPE 0x894
++#define IPQESS_RSS_TYPE_NONE 0x01
++#define IPQESS_RSS_TYPE_IPV4TCP 0x02
++#define IPQESS_RSS_TYPE_IPV6_TCP 0x04
++#define IPQESS_RSS_TYPE_IPV4_UDP 0x08
++#define IPQESS_RSS_TYPE_IPV6UDP 0x10
++#define IPQESS_RSS_TYPE_IPV4 0x20
++#define IPQESS_RSS_TYPE_IPV6 0x40
++#define IPQESS_RSS_HASH_MODE_MASK 0x7f
++
++#define IPQESS_REG_RSS_HASH_VALUE 0x8C0
++
++#define IPQESS_REG_RSS_TYPE_RESULT 0x8C4
++
++#define IPQESS_HASH_TYPE_START 0
++#define IPQESS_HASH_TYPE_END 5
++#define IPQESS_HASH_TYPE_SHIFT 12
++
++#define IPQESS_RFS_FLOW_ENTRIES 1024
++#define IPQESS_RFS_FLOW_ENTRIES_MASK (IPQESS_RFS_FLOW_ENTRIES - 1)
++#define IPQESS_RFS_EXPIRE_COUNT_PER_CALL 128
++
++/* RFD Base Address Register */
++#define IPQESS_REG_RFD_BASE_ADDR_Q(x) (0x950 + ((x) << 2)) /* x = queue id */
++
++/* RFD Index Register */
++#define IPQESS_REG_RFD_IDX_Q(x) (0x9B0 + ((x) << 2)) /* x = queue id */
++
++#define IPQESS_RFD_PROD_IDX_BITS 0x00000FFF
++#define IPQESS_RFD_CONS_IDX_BITS 0x0FFF0000
++#define IPQESS_RFD_PROD_IDX_MASK 0xFFF
++#define IPQESS_RFD_CONS_IDX_MASK 0xFFF
++#define IPQESS_RFD_PROD_IDX_SHIFT 0
++#define IPQESS_RFD_CONS_IDX_SHIFT 16
++
++/* Rx Descriptor Control Register */
++#define IPQESS_REG_RX_DESC0 0xA10
++#define IPQESS_RFD_RING_SIZE_MASK 0xFFF
++#define IPQESS_RX_BUF_SIZE_MASK 0xFFFF
++#define IPQESS_RFD_RING_SIZE_SHIFT 0
++#define IPQESS_RX_BUF_SIZE_SHIFT 16
++
++#define IPQESS_REG_RX_DESC1 0xA14
++#define IPQESS_RXQ_RFD_BURST_NUM_MASK 0x3F
++#define IPQESS_RXQ_RFD_PF_THRESH_MASK 0x1F
++#define IPQESS_RXQ_RFD_LOW_THRESH_MASK 0xFFF
++#define IPQESS_RXQ_RFD_BURST_NUM_SHIFT 0
++#define IPQESS_RXQ_RFD_PF_THRESH_SHIFT 8
++#define IPQESS_RXQ_RFD_LOW_THRESH_SHIFT 16
++
++/* RXQ Control Register */
++#define IPQESS_REG_RXQ_CTRL 0xA18
++#define IPQESS_FIFO_THRESH_TYPE_SHIF 0
++#define IPQESS_FIFO_THRESH_128_BYTE 0x0
++#define IPQESS_FIFO_THRESH_64_BYTE 0x1
++#define IPQESS_RXQ_CTRL_RMV_VLAN 0x00000002
++#define IPQESS_RXQ_CTRL_EN_MASK GENMASK(15, 8)
++#define IPQESS_RXQ_CTRL_EN(__qid) BIT(8 + (__qid))
++
++/* AXI Burst Size Config */
++#define IPQESS_REG_AXIW_CTRL_MAXWRSIZE 0xA1C
++#define IPQESS_AXIW_MAXWRSIZE_VALUE 0x0
++
++/* Rx Statistics Register */
++#define IPQESS_REG_RX_STAT_BYTE_Q(x) (0xA30 + ((x) << 2)) /* x = queue id */
++#define IPQESS_REG_RX_STAT_PKT_Q(x) (0xA50 + ((x) << 2)) /* x = queue id */
++
++/* WoL Pattern Length Register */
++#define IPQESS_REG_WOL_PATTERN_LEN0 0xC00
++#define IPQESS_WOL_PT_LEN_MASK 0xFF
++#define IPQESS_WOL_PT0_LEN_SHIFT 0
++#define IPQESS_WOL_PT1_LEN_SHIFT 8
++#define IPQESS_WOL_PT2_LEN_SHIFT 16
++#define IPQESS_WOL_PT3_LEN_SHIFT 24
++
++#define IPQESS_REG_WOL_PATTERN_LEN1 0xC04
++#define IPQESS_WOL_PT4_LEN_SHIFT 0
++#define IPQESS_WOL_PT5_LEN_SHIFT 8
++#define IPQESS_WOL_PT6_LEN_SHIFT 16
++
++/* WoL Control Register */
++#define IPQESS_REG_WOL_CTRL 0xC08
++#define IPQESS_WOL_WK_EN 0x00000001
++#define IPQESS_WOL_MG_EN 0x00000002
++#define IPQESS_WOL_PT0_EN 0x00000004
++#define IPQESS_WOL_PT1_EN 0x00000008
++#define IPQESS_WOL_PT2_EN 0x00000010
++#define IPQESS_WOL_PT3_EN 0x00000020
++#define IPQESS_WOL_PT4_EN 0x00000040
++#define IPQESS_WOL_PT5_EN 0x00000080
++#define IPQESS_WOL_PT6_EN 0x00000100
++
++/* MAC Control Register */
++#define IPQESS_REG_MAC_CTRL0 0xC20
++#define IPQESS_REG_MAC_CTRL1 0xC24
++
++/* WoL Pattern Register */
++#define IPQESS_REG_WOL_PATTERN_START 0x5000
++#define IPQESS_PATTERN_PART_REG_OFFSET 0x40
++
++/* TX descriptor fields */
++#define IPQESS_TPD_HDR_SHIFT 0
++#define IPQESS_TPD_PPPOE_EN 0x00000100
++#define IPQESS_TPD_IP_CSUM_EN 0x00000200
++#define IPQESS_TPD_TCP_CSUM_EN 0x0000400
++#define IPQESS_TPD_UDP_CSUM_EN 0x00000800
++#define IPQESS_TPD_CUSTOM_CSUM_EN 0x00000C00
++#define IPQESS_TPD_LSO_EN 0x00001000
++#define IPQESS_TPD_LSO_V2_EN 0x00002000
++/* The VLAN_TAGGED bit is not used in the publicly available
++ * drivers. The definition has been stolen from the Atheros
++ * 'alx' driver (drivers/net/ethernet/atheros/alx/hw.h). It
++ * seems that it has the same meaning in regard to the EDMA
++ * hardware.
++ */
++#define IPQESS_TPD_VLAN_TAGGED 0x00004000
++#define IPQESS_TPD_IPV4_EN 0x00010000
++#define IPQESS_TPD_MSS_MASK 0x1FFF
++#define IPQESS_TPD_MSS_SHIFT 18
++#define IPQESS_TPD_CUSTOM_CSUM_SHIFT 18
++
++/* RRD descriptor fields */
++#define IPQESS_RRD_NUM_RFD_MASK 0x000F
++#define IPQESS_RRD_PKT_SIZE_MASK 0x3FFF
++#define IPQESS_RRD_SRC_PORT_NUM_MASK 0x4000
++#define IPQESS_RRD_SVLAN 0x8000
++#define IPQESS_RRD_FLOW_COOKIE_MASK 0x07FF
++
++#define IPQESS_RRD_PKT_SIZE_MASK 0x3FFF
++#define IPQESS_RRD_CSUM_FAIL_MASK 0xC000
++#define IPQESS_RRD_CVLAN 0x0001
++#define IPQESS_RRD_DESC_VALID 0x8000
++
++#define IPQESS_RRD_PRIORITY_SHIFT 4
++#define IPQESS_RRD_PRIORITY_MASK 0x7
++#define IPQESS_RRD_PORT_TYPE_SHIFT 7
++#define IPQESS_RRD_PORT_TYPE_MASK 0x1F
++
++#define IPQESS_RRD_PORT_ID_MASK 0x7000
++
++#endif
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ipqess/ipqess_ethtool.c
+@@ -0,0 +1,164 @@
++// SPDX-License-Identifier: GPL-2.0 OR ISC
++/* Copyright (c) 2015 - 2016, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2017 - 2018, John Crispin <john@phrozen.org>
++ * Copyright (c) 2021 - 2022, Maxime Chevallier <maxime.chevallier@bootlin.com>
++ *
++ */
++
++#include <linux/ethtool.h>
++#include <linux/netdevice.h>
++#include <linux/string.h>
++#include <linux/phylink.h>
++
++#include "ipqess.h"
++
++struct ipqess_ethtool_stats {
++ u8 string[ETH_GSTRING_LEN];
++ u32 offset;
++};
++
++#define IPQESS_STAT(m) offsetof(struct ipqess_statistics, m)
++#define DRVINFO_LEN 32
++
++static const struct ipqess_ethtool_stats ipqess_stats[] = {
++ {"tx_q0_pkt", IPQESS_STAT(tx_q0_pkt)},
++ {"tx_q1_pkt", IPQESS_STAT(tx_q1_pkt)},
++ {"tx_q2_pkt", IPQESS_STAT(tx_q2_pkt)},
++ {"tx_q3_pkt", IPQESS_STAT(tx_q3_pkt)},
++ {"tx_q4_pkt", IPQESS_STAT(tx_q4_pkt)},
++ {"tx_q5_pkt", IPQESS_STAT(tx_q5_pkt)},
++ {"tx_q6_pkt", IPQESS_STAT(tx_q6_pkt)},
++ {"tx_q7_pkt", IPQESS_STAT(tx_q7_pkt)},
++ {"tx_q8_pkt", IPQESS_STAT(tx_q8_pkt)},
++ {"tx_q9_pkt", IPQESS_STAT(tx_q9_pkt)},
++ {"tx_q10_pkt", IPQESS_STAT(tx_q10_pkt)},
++ {"tx_q11_pkt", IPQESS_STAT(tx_q11_pkt)},
++ {"tx_q12_pkt", IPQESS_STAT(tx_q12_pkt)},
++ {"tx_q13_pkt", IPQESS_STAT(tx_q13_pkt)},
++ {"tx_q14_pkt", IPQESS_STAT(tx_q14_pkt)},
++ {"tx_q15_pkt", IPQESS_STAT(tx_q15_pkt)},
++ {"tx_q0_byte", IPQESS_STAT(tx_q0_byte)},
++ {"tx_q1_byte", IPQESS_STAT(tx_q1_byte)},
++ {"tx_q2_byte", IPQESS_STAT(tx_q2_byte)},
++ {"tx_q3_byte", IPQESS_STAT(tx_q3_byte)},
++ {"tx_q4_byte", IPQESS_STAT(tx_q4_byte)},
++ {"tx_q5_byte", IPQESS_STAT(tx_q5_byte)},
++ {"tx_q6_byte", IPQESS_STAT(tx_q6_byte)},
++ {"tx_q7_byte", IPQESS_STAT(tx_q7_byte)},
++ {"tx_q8_byte", IPQESS_STAT(tx_q8_byte)},
++ {"tx_q9_byte", IPQESS_STAT(tx_q9_byte)},
++ {"tx_q10_byte", IPQESS_STAT(tx_q10_byte)},
++ {"tx_q11_byte", IPQESS_STAT(tx_q11_byte)},
++ {"tx_q12_byte", IPQESS_STAT(tx_q12_byte)},
++ {"tx_q13_byte", IPQESS_STAT(tx_q13_byte)},
++ {"tx_q14_byte", IPQESS_STAT(tx_q14_byte)},
++ {"tx_q15_byte", IPQESS_STAT(tx_q15_byte)},
++ {"rx_q0_pkt", IPQESS_STAT(rx_q0_pkt)},
++ {"rx_q1_pkt", IPQESS_STAT(rx_q1_pkt)},
++ {"rx_q2_pkt", IPQESS_STAT(rx_q2_pkt)},
++ {"rx_q3_pkt", IPQESS_STAT(rx_q3_pkt)},
++ {"rx_q4_pkt", IPQESS_STAT(rx_q4_pkt)},
++ {"rx_q5_pkt", IPQESS_STAT(rx_q5_pkt)},
++ {"rx_q6_pkt", IPQESS_STAT(rx_q6_pkt)},
++ {"rx_q7_pkt", IPQESS_STAT(rx_q7_pkt)},
++ {"rx_q0_byte", IPQESS_STAT(rx_q0_byte)},
++ {"rx_q1_byte", IPQESS_STAT(rx_q1_byte)},
++ {"rx_q2_byte", IPQESS_STAT(rx_q2_byte)},
++ {"rx_q3_byte", IPQESS_STAT(rx_q3_byte)},
++ {"rx_q4_byte", IPQESS_STAT(rx_q4_byte)},
++ {"rx_q5_byte", IPQESS_STAT(rx_q5_byte)},
++ {"rx_q6_byte", IPQESS_STAT(rx_q6_byte)},
++ {"rx_q7_byte", IPQESS_STAT(rx_q7_byte)},
++ {"tx_desc_error", IPQESS_STAT(tx_desc_error)},
++};
++
++static int ipqess_get_strset_count(struct net_device *netdev, int sset)
++{
++ switch (sset) {
++ case ETH_SS_STATS:
++ return ARRAY_SIZE(ipqess_stats);
++ default:
++ netdev_dbg(netdev, "%s: Unsupported string set", __func__);
++ return -EOPNOTSUPP;
++ }
++}
++
++static void ipqess_get_strings(struct net_device *netdev, u32 stringset,
++ u8 *data)
++{
++ u8 *p = data;
++ u32 i;
++
++ switch (stringset) {
++ case ETH_SS_STATS:
++ for (i = 0; i < ARRAY_SIZE(ipqess_stats); i++)
++ ethtool_puts(&p, ipqess_stats[i].string);
++ break;
++ }
++}
++
++static void ipqess_get_ethtool_stats(struct net_device *netdev,
++ struct ethtool_stats *stats,
++ uint64_t *data)
++{
++ struct ipqess *ess = netdev_priv(netdev);
++ u32 *essstats = (u32 *)&ess->ipqess_stats;
++ int i;
++
++ spin_lock(&ess->stats_lock);
++
++ ipqess_update_hw_stats(ess);
++
++ for (i = 0; i < ARRAY_SIZE(ipqess_stats); i++)
++ data[i] = *(u32 *)(essstats + (ipqess_stats[i].offset / sizeof(u32)));
++
++ spin_unlock(&ess->stats_lock);
++}
++
++static void ipqess_get_drvinfo(struct net_device *dev,
++ struct ethtool_drvinfo *info)
++{
++ strscpy(info->driver, "qca_ipqess", DRVINFO_LEN);
++ strscpy(info->bus_info, "axi", ETHTOOL_BUSINFO_LEN);
++}
++
++static int ipqess_get_link_ksettings(struct net_device *netdev,
++ struct ethtool_link_ksettings *cmd)
++{
++ struct ipqess *ess = netdev_priv(netdev);
++
++ return phylink_ethtool_ksettings_get(ess->phylink, cmd);
++}
++
++static int ipqess_set_link_ksettings(struct net_device *netdev,
++ const struct ethtool_link_ksettings *cmd)
++{
++ struct ipqess *ess = netdev_priv(netdev);
++
++ return phylink_ethtool_ksettings_set(ess->phylink, cmd);
++}
++
++static void ipqess_get_ringparam(struct net_device *netdev,
++ struct ethtool_ringparam *ring,
++ struct kernel_ethtool_ringparam *kernel_ering,
++ struct netlink_ext_ack *extack)
++{
++ ring->tx_max_pending = IPQESS_TX_RING_SIZE;
++ ring->rx_max_pending = IPQESS_RX_RING_SIZE;
++}
++
++static const struct ethtool_ops ipqesstool_ops = {
++ .get_drvinfo = &ipqess_get_drvinfo,
++ .get_link = ðtool_op_get_link,
++ .get_link_ksettings = &ipqess_get_link_ksettings,
++ .set_link_ksettings = &ipqess_set_link_ksettings,
++ .get_strings = &ipqess_get_strings,
++ .get_sset_count = &ipqess_get_strset_count,
++ .get_ethtool_stats = &ipqess_get_ethtool_stats,
++ .get_ringparam = ipqess_get_ringparam,
++};
++
++void ipqess_set_ethtool_ops(struct net_device *netdev)
++{
++ netdev->ethtool_ops = &ipqesstool_ops;
++}
--- /dev/null
+From a32e16b3c2fc1954ad6e09737439f60e5890278e Mon Sep 17 00:00:00 2001
+From: Maxime Chevallier <maxime.chevallier@bootlin.com>
+Date: Fri, 4 Nov 2022 18:41:49 +0100
+Subject: [PATCH] net: dsa: add out-of-band tagging protocol
+
+This tagging protocol is designed for the situation where the link
+between the MAC and the Switch is designed such that the Destination
+Port, which is usually embedded in some part of the Ethernet Header, is
+sent out-of-band, and isn't present at all in the Ethernet frame.
+
+This can happen when the MAC and Switch are tightly integrated on an
+SoC, as is the case with the Qualcomm IPQ4019 for example, where the DSA
+tag is inserted directly into the DMA descriptors. In that case,
+the MAC driver is responsible for sending the tag to the switch using
+the out-of-band medium. To do so, the MAC driver needs to have the
+information of the destination port for that skb.
+
+Add a new tagging protocol based on SKB extensions to convey the
+information about the destination port to the MAC driver
+
+Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
+---
+ Documentation/networking/dsa/dsa.rst | 13 +++++++-
+ MAINTAINERS | 1 +
+ include/linux/dsa/oob.h | 16 +++++++++
+ include/linux/skbuff.h | 3 ++
+ include/net/dsa.h | 2 ++
+ net/core/skbuff.c | 10 ++++++
+ net/dsa/Kconfig | 9 +++++
+ net/dsa/Makefile | 1 +
+ net/dsa/tag_oob.c | 49 ++++++++++++++++++++++++++++
+ 9 files changed, 103 insertions(+), 1 deletion(-)
+ create mode 100644 include/linux/dsa/oob.h
+ create mode 100644 net/dsa/tag_oob.c
+
+--- a/Documentation/networking/dsa/dsa.rst
++++ b/Documentation/networking/dsa/dsa.rst
+@@ -66,7 +66,8 @@ Switch tagging protocols
+ ------------------------
+
+ DSA supports many vendor-specific tagging protocols, one software-defined
+-tagging protocol, and a tag-less mode as well (``DSA_TAG_PROTO_NONE``).
++tagging protocol, a tag-less mode as well (``DSA_TAG_PROTO_NONE``) and an
++out-of-band tagging protocol (``DSA_TAG_PROTO_OOB``).
+
+ The exact format of the tag protocol is vendor specific, but in general, they
+ all contain something which:
+@@ -217,6 +218,16 @@ receive all frames regardless of the val
+ setting the ``promisc_on_master`` property of the ``struct dsa_device_ops``.
+ Note that this assumes a DSA-unaware master driver, which is the norm.
+
++Some SoCs have a tight integration between the conduit network interface and the
++embedded switch, such that the DSA tag isn't transmitted in the packet data,
++but through another media, using so-called out-of-band tagging. In that case,
++the host MAC driver is in charge of transmitting the tag to the switch.
++An example is the IPQ4019 SoC, that transmits the tag between the ipqess
++ethernet controller and the qca8k switch using the DMA descriptor. In that
++configuration, tag-chaining is permitted, but the OOB tag will always be the
++top-most switch in the tree. The tagger (``DSA_TAG_PROTO_OOB``) uses skb
++extensions to transmit the tag to and from the MAC driver.
++
+ Master network devices
+ ----------------------
+
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -17081,6 +17081,7 @@ L: netdev@vger.kernel.org
+ S: Maintained
+ F: Documentation/devicetree/bindings/net/qcom,ipq4019-ess-edma.yaml
+ F: drivers/net/ethernet/qualcomm/ipqess/
++F: net/dsa/tag_oob.c
+
+ QUALCOMM ETHQOS ETHERNET DRIVER
+ M: Vinod Koul <vkoul@kernel.org>
+--- /dev/null
++++ b/include/linux/dsa/oob.h
+@@ -0,0 +1,16 @@
++/* SPDX-License-Identifier: GPL-2.0-only
++ * Copyright (C) 2022 Maxime Chevallier <maxime.chevallier@bootlin.com>
++ */
++
++#ifndef _NET_DSA_OOB_H
++#define _NET_DSA_OOB_H
++
++#include <linux/skbuff.h>
++
++struct dsa_oob_tag_info {
++ u16 port;
++};
++
++int dsa_oob_tag_push(struct sk_buff *skb, struct dsa_oob_tag_info *ti);
++int dsa_oob_tag_pop(struct sk_buff *skb, struct dsa_oob_tag_info *ti);
++#endif
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -4588,6 +4588,9 @@ enum skb_ext_id {
+ #if IS_ENABLED(CONFIG_MCTP_FLOWS)
+ SKB_EXT_MCTP,
+ #endif
++#if IS_ENABLED(CONFIG_NET_DSA_TAG_OOB)
++ SKB_EXT_DSA_OOB,
++#endif
+ SKB_EXT_NUM, /* must be last */
+ };
+
+--- a/include/net/dsa.h
++++ b/include/net/dsa.h
+@@ -55,6 +55,7 @@ struct phylink_link_state;
+ #define DSA_TAG_PROTO_RTL8_4T_VALUE 25
+ #define DSA_TAG_PROTO_RZN1_A5PSW_VALUE 26
+ #define DSA_TAG_PROTO_LAN937X_VALUE 27
++#define DSA_TAG_PROTO_OOB_VALUE 28
+
+ enum dsa_tag_protocol {
+ DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE,
+@@ -85,6 +86,7 @@ enum dsa_tag_protocol {
+ DSA_TAG_PROTO_RTL8_4T = DSA_TAG_PROTO_RTL8_4T_VALUE,
+ DSA_TAG_PROTO_RZN1_A5PSW = DSA_TAG_PROTO_RZN1_A5PSW_VALUE,
+ DSA_TAG_PROTO_LAN937X = DSA_TAG_PROTO_LAN937X_VALUE,
++ DSA_TAG_PROTO_OOB = DSA_TAG_PROTO_OOB_VALUE,
+ };
+
+ struct dsa_switch;
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -62,8 +62,12 @@
+ #include <linux/mpls.h>
+ #include <linux/kcov.h>
+ #include <linux/if.h>
++#ifdef CONFIG_NET_DSA_TAG_OOB
++#include <linux/dsa/oob.h>
++#endif
+
+ #include <net/protocol.h>
++#include <net/dsa.h>
+ #include <net/dst.h>
+ #include <net/sock.h>
+ #include <net/checksum.h>
+@@ -4517,6 +4521,9 @@ static const u8 skb_ext_type_len[] = {
+ #if IS_ENABLED(CONFIG_MCTP_FLOWS)
+ [SKB_EXT_MCTP] = SKB_EXT_CHUNKSIZEOF(struct mctp_flow),
+ #endif
++#if IS_ENABLED(CONFIG_NET_DSA_TAG_OOB)
++ [SKB_EXT_DSA_OOB] = SKB_EXT_CHUNKSIZEOF(struct dsa_oob_tag_info),
++#endif
+ };
+
+ static __always_inline unsigned int skb_ext_total_length(void)
+@@ -4537,6 +4544,9 @@ static __always_inline unsigned int skb_
+ #if IS_ENABLED(CONFIG_MCTP_FLOWS)
+ skb_ext_type_len[SKB_EXT_MCTP] +
+ #endif
++#if IS_ENABLED(CONFIG_NET_DSA_TAG_OOB)
++ skb_ext_type_len[SKB_EXT_DSA_OOB] +
++#endif
+ 0;
+ }
+
+--- a/net/dsa/Kconfig
++++ b/net/dsa/Kconfig
+@@ -113,6 +113,15 @@ config NET_DSA_TAG_OCELOT_8021Q
+ this mode, less TCAM resources (VCAP IS1, IS2, ES0) are available for
+ use with tc-flower.
+
++config NET_DSA_TAG_OOB
++ select SKB_EXTENSIONS
++ tristate "Tag driver for Out-of-band tagging drivers"
++ help
++ Say Y or M if you want to enable support for pairs of embedded
++ switches and host MAC drivers which perform demultiplexing and
++ packet steering to ports using out of band metadata processed
++ by the DSA master, rather than tags present in the packets.
++
+ config NET_DSA_TAG_QCA
+ tristate "Tag driver for Qualcomm Atheros QCA8K switches"
+ help
+--- a/net/dsa/Makefile
++++ b/net/dsa/Makefile
+@@ -22,6 +22,7 @@ obj-$(CONFIG_NET_DSA_TAG_LAN9303) += tag
+ obj-$(CONFIG_NET_DSA_TAG_MTK) += tag_mtk.o
+ obj-$(CONFIG_NET_DSA_TAG_OCELOT) += tag_ocelot.o
+ obj-$(CONFIG_NET_DSA_TAG_OCELOT_8021Q) += tag_ocelot_8021q.o
++obj-$(CONFIG_NET_DSA_TAG_OOB) += tag_oob.o
+ obj-$(CONFIG_NET_DSA_TAG_QCA) += tag_qca.o
+ obj-$(CONFIG_NET_DSA_TAG_RTL4_A) += tag_rtl4_a.o
+ obj-$(CONFIG_NET_DSA_TAG_RTL8_4) += tag_rtl8_4.o
+--- /dev/null
++++ b/net/dsa/tag_oob.c
+@@ -0,0 +1,49 @@
++// SPDX-License-Identifier: GPL-2.0-only
++
++/* Copyright (c) 2022, Maxime Chevallier <maxime.chevallier@bootlin.com> */
++
++#include <linux/bitfield.h>
++#include <linux/dsa/oob.h>
++#include <linux/skbuff.h>
++
++#include "dsa_priv.h"
++
++static struct sk_buff *oob_tag_xmit(struct sk_buff *skb,
++ struct net_device *dev)
++{
++ struct dsa_oob_tag_info *tag_info = skb_ext_add(skb, SKB_EXT_DSA_OOB);
++ struct dsa_port *dp = dsa_slave_to_port(dev);
++
++ tag_info->port = dp->index;
++
++ return skb;
++}
++
++static struct sk_buff *oob_tag_rcv(struct sk_buff *skb,
++ struct net_device *dev)
++{
++ struct dsa_oob_tag_info *tag_info = skb_ext_find(skb, SKB_EXT_DSA_OOB);
++
++ if (!tag_info)
++ return NULL;
++
++ skb->dev = dsa_master_find_slave(dev, 0, tag_info->port);
++ if (!skb->dev)
++ return NULL;
++
++ return skb;
++}
++
++static const struct dsa_device_ops oob_tag_dsa_ops = {
++ .name = "oob",
++ .proto = DSA_TAG_PROTO_OOB,
++ .xmit = oob_tag_xmit,
++ .rcv = oob_tag_rcv,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("DSA tag driver for out-of-band tagging");
++MODULE_AUTHOR("Maxime Chevallier <maxime.chevallier@bootlin.com>");
++MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_OOB);
++
++module_dsa_tag_driver(oob_tag_dsa_ops);
--- /dev/null
+From 4975e2b3f1d37bba04f262784cef0d5b7e0a30a4 Mon Sep 17 00:00:00 2001
+From: Maxime Chevallier <maxime.chevallier@bootlin.com>
+Date: Fri, 4 Nov 2022 18:41:50 +0100
+Subject: [PATCH] net: ipqess: Add out-of-band DSA tagging support
+
+On the IPQ4019, there's an 5 ports switch connected to the CPU through
+the IPQESS Ethernet controller. The way the DSA tag is sent-out to that
+switch is through the DMA descriptor, due to how tightly it is
+integrated with the switch.
+
+We use the out-of-band tagging protocol by getting the source
+port from the descriptor, push it into the skb extensions, and have the
+tagger pull it to infer the destination netdev. The reverse process is
+done on the TX side, where the driver pulls the tag from the skb and
+builds the descriptor accordingly.
+
+Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
+---
+ drivers/net/ethernet/qualcomm/Kconfig | 1 +
+ drivers/net/ethernet/qualcomm/ipqess/ipqess.c | 64 ++++++++++++++++++-
+ drivers/net/ethernet/qualcomm/ipqess/ipqess.h | 4 ++
+ 3 files changed, 68 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/qualcomm/Kconfig
++++ b/drivers/net/ethernet/qualcomm/Kconfig
+@@ -64,6 +64,7 @@ config QCOM_IPQ4019_ESS_EDMA
+ tristate "Qualcomm Atheros IPQ4019 ESS EDMA support"
+ depends on (OF && ARCH_QCOM) || COMPILE_TEST
+ select PHYLINK
++ select NET_DSA_TAG_OOB
+ help
+ This driver supports the Qualcomm Atheros IPQ40xx built-in
+ ESS EDMA ethernet controller.
+--- a/drivers/net/ethernet/qualcomm/ipqess/ipqess.c
++++ b/drivers/net/ethernet/qualcomm/ipqess/ipqess.c
+@@ -9,6 +9,7 @@
+
+ #include <linux/bitfield.h>
+ #include <linux/clk.h>
++#include <linux/dsa/oob.h>
+ #include <linux/if_vlan.h>
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
+@@ -22,6 +23,7 @@
+ #include <linux/skbuff.h>
+ #include <linux/vmalloc.h>
+ #include <net/checksum.h>
++#include <net/dsa.h>
+ #include <net/ip6_checksum.h>
+
+ #include "ipqess.h"
+@@ -327,6 +329,7 @@ static int ipqess_rx_poll(struct ipqess_
+ tail &= IPQESS_RFD_CONS_IDX_MASK;
+
+ while (done < budget) {
++ struct dsa_oob_tag_info *tag_info;
+ struct ipqess_rx_desc *rd;
+ struct sk_buff *skb;
+
+@@ -406,6 +409,12 @@ static int ipqess_rx_poll(struct ipqess_
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
+ le16_to_cpu(rd->rrd4));
+
++ if (likely(rx_ring->ess->dsa_ports)) {
++ tag_info = skb_ext_add(skb, SKB_EXT_DSA_OOB);
++ tag_info->port = FIELD_GET(IPQESS_RRD_PORT_ID_MASK,
++ le16_to_cpu(rd->rrd1));
++ }
++
+ napi_gro_receive(&rx_ring->napi_rx, skb);
+
+ rx_ring->ess->stats.rx_packets++;
+@@ -706,6 +715,23 @@ static void ipqess_rollback_tx(struct ip
+ tx_ring->head = start_index;
+ }
+
++static void ipqess_process_dsa_tag_sh(struct ipqess *ess, struct sk_buff *skb,
++ u32 *word3)
++{
++ struct dsa_oob_tag_info *tag_info;
++
++ if (unlikely(!ess->dsa_ports))
++ return;
++
++ tag_info = skb_ext_find(skb, SKB_EXT_DSA_OOB);
++ if (!tag_info)
++ return;
++
++ *word3 |= tag_info->port << IPQESS_TPD_PORT_BITMAP_SHIFT;
++ *word3 |= BIT(IPQESS_TPD_FROM_CPU_SHIFT);
++ *word3 |= 0x3e << IPQESS_TPD_PORT_BITMAP_SHIFT;
++}
++
+ static int ipqess_tx_map_and_fill(struct ipqess_tx_ring *tx_ring,
+ struct sk_buff *skb)
+ {
+@@ -716,6 +742,8 @@ static int ipqess_tx_map_and_fill(struct
+ u16 len;
+ int i;
+
++ ipqess_process_dsa_tag_sh(tx_ring->ess, skb, &word3);
++
+ if (skb_is_gso(skb)) {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
+ lso_word1 |= IPQESS_TPD_IPV4_EN;
+@@ -917,6 +945,33 @@ static const struct net_device_ops ipqes
+ .ndo_tx_timeout = ipqess_tx_timeout,
+ };
+
++static int ipqess_netdevice_event(struct notifier_block *nb,
++ unsigned long event, void *ptr)
++{
++ struct ipqess *ess = container_of(nb, struct ipqess, netdev_notifier);
++ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
++ struct netdev_notifier_changeupper_info *info;
++
++ if (dev != ess->netdev)
++ return NOTIFY_DONE;
++
++ switch (event) {
++ case NETDEV_CHANGEUPPER:
++ info = ptr;
++
++ if (!dsa_slave_dev_check(info->upper_dev))
++ return NOTIFY_DONE;
++
++ if (info->linking)
++ ess->dsa_ports++;
++ else
++ ess->dsa_ports--;
++
++ return NOTIFY_DONE;
++ }
++ return NOTIFY_OK;
++}
++
+ static void ipqess_hw_stop(struct ipqess *ess)
+ {
+ int i;
+@@ -1184,12 +1239,19 @@ static int ipqess_axi_probe(struct platf
+ netif_napi_add(netdev, &ess->rx_ring[i].napi_rx, ipqess_rx_napi);
+ }
+
+- err = register_netdev(netdev);
++ ess->netdev_notifier.notifier_call = ipqess_netdevice_event;
++ err = register_netdevice_notifier(&ess->netdev_notifier);
+ if (err)
+ goto err_hw_stop;
+
++ err = register_netdev(netdev);
++ if (err)
++ goto err_notifier_unregister;
++
+ return 0;
+
++err_notifier_unregister:
++ unregister_netdevice_notifier(&ess->netdev_notifier);
+ err_hw_stop:
+ ipqess_hw_stop(ess);
+
+--- a/drivers/net/ethernet/qualcomm/ipqess/ipqess.h
++++ b/drivers/net/ethernet/qualcomm/ipqess/ipqess.h
+@@ -171,6 +171,10 @@ struct ipqess {
+ struct platform_device *pdev;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
++
++ struct notifier_block netdev_notifier;
++ int dsa_ports;
++
+ struct ipqess_tx_ring tx_ring[IPQESS_NETDEV_QUEUES];
+
+ struct ipqess_statistics ipqess_stats;
--- /dev/null
+From 5f15f7f170c76220dfd36cb9037d7848d1fc4aaf Mon Sep 17 00:00:00 2001
+From: Robert Marko <robimarko@gmail.com>
+Date: Tue, 15 Aug 2023 14:30:50 +0200
+Subject: [PATCH] net: qualcomm: ipqess: release IRQ-s on network device stop
+
+Currently, IPQESS driver is obtaining the IRQ-s during ndo_open, but they
+are never freed as they are device managed.
+
+However, it is not enough for them to be released when device is removed
+as the same network device can be stopped and started multiple times which
+on the second start would lead to IRQ request to fail with -EBUSY as they
+have already been requested before and are not of the shared type with:
+[ 34.480769] ipqess-edma c080000.ethernet eth0: Link is Down
+[ 34.488070] ipqess-edma c080000.ethernet eth0: ipqess_open
+[ 34.488131] genirq: Flags mismatch irq 37. 00000001 (c080000.ethernet:txq0) vs. 00000001 (c080000.ethernet:txq0)
+[ 34.494527] ipqess-edma c080000.ethernet eth0: ipqess_open
+[ 34.502892] genirq: Flags mismatch irq 37. 00000001 (c080000.ethernet:txq0) vs. 00000001 (c080000.ethernet:txq0)
+[ 34.508137] qca8k-ipq4019 c000000.switch lan1: failed to open master eth0
+[ 34.518966] br-lan: port 1(lan1) entered blocking state
+[ 34.525165] br-lan: port 1(lan1) entered disabled state
+[ 34.530633] device lan1 entered promiscuous mode
+[ 34.548598] ipqess-edma c080000.ethernet eth0: ipqess_open
+[ 34.548660] genirq: Flags mismatch irq 37. 00000001 (c080000.ethernet:txq0) vs. 00000001 (c080000.ethernet:txq0)
+[ 34.553111] qca8k-ipq4019 c000000.switch lan2: failed to open master eth0
+[ 34.563841] br-lan: port 2(lan2) entered blocking state
+[ 34.570083] br-lan: port 2(lan2) entered disabled state
+[ 34.575530] device lan2 entered promiscuous mode
+[ 34.587067] ipqess-edma c080000.ethernet eth0: ipqess_open
+[ 34.587132] genirq: Flags mismatch irq 37. 00000001 (c080000.ethernet:txq0) vs. 00000001 (c080000.ethernet:txq0)
+[ 34.591579] qca8k-ipq4019 c000000.switch lan3: failed to open master eth0
+[ 34.602451] br-lan: port 3(lan3) entered blocking state
+[ 34.608496] br-lan: port 3(lan3) entered disabled state
+[ 34.614084] device lan3 entered promiscuous mode
+[ 34.626405] ipqess-edma c080000.ethernet eth0: ipqess_open
+[ 34.626468] genirq: Flags mismatch irq 37. 00000001 (c080000.ethernet:txq0) vs. 00000001 (c080000.ethernet:txq0)
+[ 34.630871] qca8k-ipq4019 c000000.switch lan4: failed to open master eth0
+[ 34.641689] br-lan: port 4(lan4) entered blocking state
+[ 34.647834] br-lan: port 4(lan4) entered disabled state
+[ 34.653455] device lan4 entered promiscuous mode
+[ 34.667282] ipqess-edma c080000.ethernet eth0: ipqess_open
+[ 34.667364] genirq: Flags mismatch irq 37. 00000001 (c080000.ethernet:txq0) vs. 00000001 (c080000.ethernet:txq0)
+[ 34.671830] qca8k-ipq4019 c000000.switch wan: failed to open master eth0
+
+So, lets free the IRQ-s on ndo_stop after stopping NAPI and HW IRQ-s.
+
+Signed-off-by: Robert Marko <robimarko@gmail.com>
+---
+ drivers/net/ethernet/qualcomm/ipqess/ipqess.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/drivers/net/ethernet/qualcomm/ipqess/ipqess.c
++++ b/drivers/net/ethernet/qualcomm/ipqess/ipqess.c
+@@ -636,9 +636,22 @@ static int ipqess_stop(struct net_device
+ netif_tx_stop_all_queues(netdev);
+ phylink_stop(ess->phylink);
+ ipqess_irq_disable(ess);
++
+ for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
++ int qid;
++
+ napi_disable(&ess->tx_ring[i].napi_tx);
+ napi_disable(&ess->rx_ring[i].napi_rx);
++
++ qid = ess->tx_ring[i].idx;
++ devm_free_irq(&netdev->dev,
++ ess->tx_irq[qid],
++ &ess->tx_ring[i]);
++
++ qid = ess->rx_ring[i].idx;
++ devm_free_irq(&netdev->dev,
++ ess->rx_irq[qid],
++ &ess->rx_ring[i]);
+ }
+
+ return 0;
--- /dev/null
+From 9fa4a57a65e270e4d579cace4de5c438f46c7d12 Mon Sep 17 00:00:00 2001
+From: Robert Marko <robimarko@gmail.com>
+Date: Tue, 15 Aug 2023 14:38:44 +0200
+Subject: [PATCH] net: qualcomm: ipqess: enable threaded NAPI by default
+
+Threaded NAPI provides a nice performance boost, so lets enable it by
+default.
+
+We do however need to move the __napi_schedule() after HW IRQ has been
+cleared in order to avoid concurency issues.
+
+Signed-off-by: Robert Marko <robimarko@gmail.com>
+---
+ drivers/net/ethernet/qualcomm/ipqess/ipqess.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/qualcomm/ipqess/ipqess.c
++++ b/drivers/net/ethernet/qualcomm/ipqess/ipqess.c
+@@ -530,9 +530,9 @@ static irqreturn_t ipqess_interrupt_tx(i
+ struct ipqess_tx_ring *tx_ring = (struct ipqess_tx_ring *)priv;
+
+ if (likely(napi_schedule_prep(&tx_ring->napi_tx))) {
+- __napi_schedule(&tx_ring->napi_tx);
+ ipqess_w32(tx_ring->ess, IPQESS_REG_TX_INT_MASK_Q(tx_ring->idx),
+ 0x0);
++ __napi_schedule(&tx_ring->napi_tx);
+ }
+
+ return IRQ_HANDLED;
+@@ -543,9 +543,9 @@ static irqreturn_t ipqess_interrupt_rx(i
+ struct ipqess_rx_ring *rx_ring = (struct ipqess_rx_ring *)priv;
+
+ if (likely(napi_schedule_prep(&rx_ring->napi_rx))) {
+- __napi_schedule(&rx_ring->napi_rx);
+ ipqess_w32(rx_ring->ess, IPQESS_REG_RX_INT_MASK_Q(rx_ring->idx),
+ 0x0);
++ __napi_schedule(&rx_ring->napi_rx);
+ }
+
+ return IRQ_HANDLED;
+@@ -1261,6 +1261,8 @@ static int ipqess_axi_probe(struct platf
+ if (err)
+ goto err_notifier_unregister;
+
++ dev_set_threaded(netdev, true);
++
+ return 0;
+
+ err_notifier_unregister:
--- /dev/null
+From 5b71dbb867680887d47954ce1cc145cb747cbce6 Mon Sep 17 00:00:00 2001
+From: Maxime Chevallier <maxime.chevallier@bootlin.com>
+Date: Fri, 4 Nov 2022 18:41:51 +0100
+Subject: [PATCH] ARM: dts: qcom: ipq4019: Add description for the IPQESS
+ Ethernet controller
+
+The Qualcomm IPQ4019 includes an internal 5 ports switch, which is
+connected to the CPU through the internal IPQESS Ethernet controller.
+
+Add support for this internal interface, which is internally connected to a
+modified version of the QCA8K Ethernet switch.
+
+This Ethernet controller only support a specific internal interface mode
+for connection to the switch.
+
+Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
+Reviewed-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+---
+ arch/arm/boot/dts/qcom-ipq4019.dtsi | 48 +++++++++++++++++++++++++++++
+ 1 file changed, 48 insertions(+)
+
+--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+@@ -594,6 +594,54 @@
+ status = "disabled";
+ };
+
++ gmac: ethernet@c080000 {
++ compatible = "qcom,ipq4019-ess-edma";
++ reg = <0xc080000 0x8000>;
++ resets = <&gcc ESS_RESET>;
++ reset-names = "ess";
++ clocks = <&gcc GCC_ESS_CLK>;
++ clock-names = "ess";
++ interrupts = <GIC_SPI 65 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 66 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 67 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 68 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 69 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 70 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 71 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 72 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 73 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 74 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 75 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 76 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 77 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 78 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 79 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 80 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 240 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 241 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 242 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 243 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 244 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 245 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 246 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 247 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 248 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 249 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 250 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 251 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 252 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 253 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 254 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 255 IRQ_TYPE_EDGE_RISING>;
++ phy-mode = "internal";
++ status = "disabled";
++ fixed-link {
++ speed = <1000>;
++ full-duplex;
++ pause;
++ };
++ };
++
+ mdio: mdio@90000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
--- /dev/null
+From a38126870488398932e017dd9d76174b4aadbbbb Mon Sep 17 00:00:00 2001
+From: Robert Marko <robert.marko@sartura.hr>
+Date: Sat, 10 Sep 2022 15:46:09 +0200
+Subject: [PATCH] net: dsa: qca8k: add IPQ4019 built-in switch support
+
+Qualcomm IPQ40xx SoC-s have a variant of QCA8337N switch built-in.
+
+It shares most of the stuff with its external counterpart, however it is
+modified for the SoC.
+Namely, it doesn't have second CPU port (Port 6), so it has 6 ports
+instead of 7.
+It also has no built-in PHY-s but rather requires external PSGMII based
+companion PHY-s (QCA8072 and QCA8075) for which it first needs to carry
+out calibration before using them.
+PSGMII has a SoC built-in PHY that is used to connect to the PHY-s which
+unfortunately requires some magic values as the datasheet doesnt document
+the bits that are being set or the register at all.
+
+Since its built-in it is MMIO like other peripherals and doesn't have its
+own MDIO bus but depends on the SoC provided one.
+
+CPU connection is at Port 0 and it uses some kind of a internal connection
+and no traditional RGMII/SGMII.
+
+It also doesn't use in-band tagging like other qca8k switches so a out of
+band based tagger is used.
+
+Signed-off-by: Robert Marko <robert.marko@sartura.hr>
+---
+ drivers/net/dsa/qca/Kconfig | 8 +
+ drivers/net/dsa/qca/Makefile | 1 +
+ drivers/net/dsa/qca/qca8k-common.c | 6 +-
+ drivers/net/dsa/qca/qca8k-ipq4019.c | 948 ++++++++++++++++++++++++++++
+ drivers/net/dsa/qca/qca8k.h | 56 ++
+ 5 files changed, 1016 insertions(+), 3 deletions(-)
+ create mode 100644 drivers/net/dsa/qca/qca8k-ipq4019.c
+
+--- a/drivers/net/dsa/qca/Kconfig
++++ b/drivers/net/dsa/qca/Kconfig
+@@ -23,3 +23,11 @@ config NET_DSA_QCA8K_LEDS_SUPPORT
+ help
+ This enabled support for LEDs present on the Qualcomm Atheros
+ QCA8K Ethernet switch chips.
++
++config NET_DSA_QCA8K_IPQ4019
++ tristate "Qualcomm Atheros IPQ4019 Ethernet switch support"
++ select NET_DSA_TAG_OOB
++ select REGMAP_MMIO
++ help
++ This enables support for the switch built-into Qualcomm Atheros
++ IPQ4019 SoCs.
+--- a/drivers/net/dsa/qca/Makefile
++++ b/drivers/net/dsa/qca/Makefile
+@@ -5,3 +5,4 @@ qca8k-y += qca8k-common.o qca8k-8xxx.
+ ifdef CONFIG_NET_DSA_QCA8K_LEDS_SUPPORT
+ qca8k-y += qca8k-leds.o
+ endif
++obj-$(CONFIG_NET_DSA_QCA8K_IPQ4019) += qca8k-ipq4019.o qca8k-common.o
+--- a/drivers/net/dsa/qca/qca8k-common.c
++++ b/drivers/net/dsa/qca/qca8k-common.c
+@@ -412,7 +412,7 @@ static int qca8k_vlan_del(struct qca8k_p
+
+ /* Check if we're the last member to be removed */
+ del = true;
+- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
++ for (i = 0; i < priv->ds->num_ports; i++) {
+ mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
+
+ if ((reg & mask) != mask) {
+@@ -653,7 +653,7 @@ int qca8k_port_bridge_join(struct dsa_sw
+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+ port_mask = BIT(cpu_port);
+
+- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
++ for (i = 0; i < ds->num_ports; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ continue;
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
+@@ -685,7 +685,7 @@ void qca8k_port_bridge_leave(struct dsa_
+
+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+
+- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
++ for (i = 0; i < ds->num_ports; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ continue;
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
+--- /dev/null
++++ b/drivers/net/dsa/qca/qca8k-ipq4019.c
+@@ -0,0 +1,948 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
++ * Copyright (C) 2011-2012, 2020-2021 Gabor Juhos <juhosg@openwrt.org>
++ * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2016 John Crispin <john@phrozen.org>
++ * Copyright (c) 2022 Robert Marko <robert.marko@sartura.hr>
++ */
++
++#include <linux/module.h>
++#include <linux/phy.h>
++#include <linux/netdevice.h>
++#include <linux/bitfield.h>
++#include <linux/regmap.h>
++#include <net/dsa.h>
++#include <linux/of_net.h>
++#include <linux/of_mdio.h>
++#include <linux/of_platform.h>
++#include <linux/mdio.h>
++#include <linux/phylink.h>
++
++#include "qca8k.h"
++
++static struct regmap_config qca8k_ipq4019_regmap_config = {
++ .reg_bits = 32,
++ .val_bits = 32,
++ .reg_stride = 4,
++ .max_register = 0x16ac, /* end MIB - Port6 range */
++ .rd_table = &qca8k_readable_table,
++};
++
++static struct regmap_config qca8k_ipq4019_psgmii_phy_regmap_config = {
++ .name = "psgmii-phy",
++ .reg_bits = 32,
++ .val_bits = 32,
++ .reg_stride = 4,
++ .max_register = 0x7fc,
++};
++
++static enum dsa_tag_protocol
++qca8k_ipq4019_get_tag_protocol(struct dsa_switch *ds, int port,
++ enum dsa_tag_protocol mp)
++{
++ return DSA_TAG_PROTO_OOB;
++}
++
++static struct phylink_pcs *
++qca8k_ipq4019_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
++ phy_interface_t interface)
++{
++ struct qca8k_priv *priv = ds->priv;
++ struct phylink_pcs *pcs = NULL;
++
++ switch (interface) {
++ case PHY_INTERFACE_MODE_PSGMII:
++ switch (port) {
++ case 0:
++ pcs = &priv->pcs_port_0.pcs;
++ break;
++ }
++ break;
++ default:
++ break;
++ }
++
++ return pcs;
++}
++
++static int qca8k_ipq4019_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
++ phy_interface_t interface,
++ const unsigned long *advertising,
++ bool permit_pause_to_mac)
++{
++ return 0;
++}
++
++static void qca8k_ipq4019_pcs_an_restart(struct phylink_pcs *pcs)
++{
++}
++
++static struct qca8k_pcs *pcs_to_qca8k_pcs(struct phylink_pcs *pcs)
++{
++ return container_of(pcs, struct qca8k_pcs, pcs);
++}
++
++static void qca8k_ipq4019_pcs_get_state(struct phylink_pcs *pcs,
++ struct phylink_link_state *state)
++{
++ struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
++ int port = pcs_to_qca8k_pcs(pcs)->port;
++ u32 reg;
++ int ret;
++
++ ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), ®);
++ if (ret < 0) {
++ state->link = false;
++ return;
++ }
++
++ state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
++ state->an_complete = state->link;
++ state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
++ DUPLEX_HALF;
++
++ switch (reg & QCA8K_PORT_STATUS_SPEED) {
++ case QCA8K_PORT_STATUS_SPEED_10:
++ state->speed = SPEED_10;
++ break;
++ case QCA8K_PORT_STATUS_SPEED_100:
++ state->speed = SPEED_100;
++ break;
++ case QCA8K_PORT_STATUS_SPEED_1000:
++ state->speed = SPEED_1000;
++ break;
++ default:
++ state->speed = SPEED_UNKNOWN;
++ break;
++ }
++
++ if (reg & QCA8K_PORT_STATUS_RXFLOW)
++ state->pause |= MLO_PAUSE_RX;
++ if (reg & QCA8K_PORT_STATUS_TXFLOW)
++ state->pause |= MLO_PAUSE_TX;
++}
++
++static const struct phylink_pcs_ops qca8k_pcs_ops = {
++ .pcs_get_state = qca8k_ipq4019_pcs_get_state,
++ .pcs_config = qca8k_ipq4019_pcs_config,
++ .pcs_an_restart = qca8k_ipq4019_pcs_an_restart,
++};
++
++static void qca8k_ipq4019_setup_pcs(struct qca8k_priv *priv,
++ struct qca8k_pcs *qpcs,
++ int port)
++{
++ qpcs->pcs.ops = &qca8k_pcs_ops;
++
++ /* We don't have interrupts for link changes, so we need to poll */
++ qpcs->pcs.poll = true;
++ qpcs->priv = priv;
++ qpcs->port = port;
++}
++
++static void qca8k_ipq4019_phylink_get_caps(struct dsa_switch *ds, int port,
++ struct phylink_config *config)
++{
++ switch (port) {
++ case 0: /* CPU port */
++ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
++ config->supported_interfaces);
++ break;
++
++ case 1:
++ case 2:
++ case 3:
++ __set_bit(PHY_INTERFACE_MODE_PSGMII,
++ config->supported_interfaces);
++ break;
++ case 4:
++ case 5:
++ phy_interface_set_rgmii(config->supported_interfaces);
++ __set_bit(PHY_INTERFACE_MODE_PSGMII,
++ config->supported_interfaces);
++ break;
++ }
++
++ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
++ MAC_10 | MAC_100 | MAC_1000FD;
++
++ config->legacy_pre_march2020 = false;
++}
++
++static void
++qca8k_phylink_ipq4019_mac_link_down(struct dsa_switch *ds, int port,
++ unsigned int mode,
++ phy_interface_t interface)
++{
++ struct qca8k_priv *priv = ds->priv;
++
++ qca8k_port_set_status(priv, port, 0);
++}
++
++static void
++qca8k_phylink_ipq4019_mac_link_up(struct dsa_switch *ds, int port,
++ unsigned int mode, phy_interface_t interface,
++ struct phy_device *phydev, int speed,
++ int duplex, bool tx_pause, bool rx_pause)
++{
++ struct qca8k_priv *priv = ds->priv;
++ u32 reg;
++
++ if (phylink_autoneg_inband(mode)) {
++ reg = QCA8K_PORT_STATUS_LINK_AUTO;
++ } else {
++ switch (speed) {
++ case SPEED_10:
++ reg = QCA8K_PORT_STATUS_SPEED_10;
++ break;
++ case SPEED_100:
++ reg = QCA8K_PORT_STATUS_SPEED_100;
++ break;
++ case SPEED_1000:
++ reg = QCA8K_PORT_STATUS_SPEED_1000;
++ break;
++ default:
++ reg = QCA8K_PORT_STATUS_LINK_AUTO;
++ break;
++ }
++
++ if (duplex == DUPLEX_FULL)
++ reg |= QCA8K_PORT_STATUS_DUPLEX;
++
++ if (rx_pause || dsa_is_cpu_port(ds, port))
++ reg |= QCA8K_PORT_STATUS_RXFLOW;
++
++ if (tx_pause || dsa_is_cpu_port(ds, port))
++ reg |= QCA8K_PORT_STATUS_TXFLOW;
++ }
++
++ reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
++
++ qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
++}
++
++static int psgmii_vco_calibrate(struct qca8k_priv *priv)
++{
++ int val, ret;
++
++ if (!priv->psgmii_ethphy) {
++ dev_err(priv->dev, "PSGMII eth PHY missing, calibration failed!\n");
++ return -ENODEV;
++ }
++
++ /* Fix PSGMII RX 20bit */
++ ret = phy_write(priv->psgmii_ethphy, MII_BMCR, 0x5b);
++ /* Reset PHY PSGMII */
++ ret = phy_write(priv->psgmii_ethphy, MII_BMCR, 0x1b);
++ /* Release PHY PSGMII reset */
++ ret = phy_write(priv->psgmii_ethphy, MII_BMCR, 0x5b);
++
++ /* Poll for VCO PLL calibration finish - Malibu(QCA8075) */
++ ret = phy_read_mmd_poll_timeout(priv->psgmii_ethphy,
++ MDIO_MMD_PMAPMD,
++ 0x28, val,
++ (val & BIT(0)),
++ 10000, 1000000,
++ false);
++ if (ret) {
++ dev_err(priv->dev, "QCA807x PSGMII VCO calibration PLL not ready\n");
++ return ret;
++ }
++ mdelay(50);
++
++ /* Freeze PSGMII RX CDR */
++ ret = phy_write(priv->psgmii_ethphy, MII_RESV2, 0x2230);
++
++ /* Start PSGMIIPHY VCO PLL calibration */
++ ret = regmap_set_bits(priv->psgmii,
++ PSGMIIPHY_VCO_CALIBRATION_CONTROL_REGISTER_1,
++ PSGMIIPHY_REG_PLL_VCO_CALIB_RESTART);
++
++ /* Poll for PSGMIIPHY PLL calibration finish - Dakota(IPQ40xx) */
++ ret = regmap_read_poll_timeout(priv->psgmii,
++ PSGMIIPHY_VCO_CALIBRATION_CONTROL_REGISTER_2,
++ val, val & PSGMIIPHY_REG_PLL_VCO_CALIB_READY,
++ 10000, 1000000);
++ if (ret) {
++ dev_err(priv->dev, "IPQ PSGMIIPHY VCO calibration PLL not ready\n");
++ return ret;
++ }
++ mdelay(50);
++
++ /* Release PSGMII RX CDR */
++ ret = phy_write(priv->psgmii_ethphy, MII_RESV2, 0x3230);
++ /* Release PSGMII RX 20bit */
++ ret = phy_write(priv->psgmii_ethphy, MII_BMCR, 0x5f);
++ mdelay(200);
++
++ return ret;
++}
++
++static void
++qca8k_switch_port_loopback_on_off(struct qca8k_priv *priv, int port, int on)
++{
++ u32 val = QCA8K_PORT_LOOKUP_LOOPBACK_EN;
++
++ if (on == 0)
++ val = 0;
++
++ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
++ QCA8K_PORT_LOOKUP_LOOPBACK_EN, val);
++}
++
++static int
++qca8k_wait_for_phy_link_state(struct phy_device *phy, int need_status)
++{
++ int a;
++ u16 status;
++
++ for (a = 0; a < 100; a++) {
++ status = phy_read(phy, MII_QCA8075_SSTATUS);
++ status &= QCA8075_PHY_SPEC_STATUS_LINK;
++ status = !!status;
++ if (status == need_status)
++ return 0;
++ mdelay(8);
++ }
++
++ return -1;
++}
++
++static void
++qca8k_phy_loopback_on_off(struct qca8k_priv *priv, struct phy_device *phy,
++ int sw_port, int on)
++{
++ if (on) {
++ phy_write(phy, MII_BMCR, BMCR_ANENABLE | BMCR_RESET);
++ phy_modify(phy, MII_BMCR, BMCR_PDOWN, BMCR_PDOWN);
++ qca8k_wait_for_phy_link_state(phy, 0);
++ qca8k_write(priv, QCA8K_REG_PORT_STATUS(sw_port), 0);
++ phy_write(phy, MII_BMCR,
++ BMCR_SPEED1000 |
++ BMCR_FULLDPLX |
++ BMCR_LOOPBACK);
++ qca8k_wait_for_phy_link_state(phy, 1);
++ qca8k_write(priv, QCA8K_REG_PORT_STATUS(sw_port),
++ QCA8K_PORT_STATUS_SPEED_1000 |
++ QCA8K_PORT_STATUS_TXMAC |
++ QCA8K_PORT_STATUS_RXMAC |
++ QCA8K_PORT_STATUS_DUPLEX);
++ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(sw_port),
++ QCA8K_PORT_LOOKUP_STATE_FORWARD,
++ QCA8K_PORT_LOOKUP_STATE_FORWARD);
++ } else { /* off */
++ qca8k_write(priv, QCA8K_REG_PORT_STATUS(sw_port), 0);
++ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(sw_port),
++ QCA8K_PORT_LOOKUP_STATE_DISABLED,
++ QCA8K_PORT_LOOKUP_STATE_DISABLED);
++ phy_write(phy, MII_BMCR, BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_RESET);
++ /* turn off the power of the phys - so that unused
++ ports do not raise links */
++ phy_modify(phy, MII_BMCR, BMCR_PDOWN, BMCR_PDOWN);
++ }
++}
++
++static void
++qca8k_phy_pkt_gen_prep(struct qca8k_priv *priv, struct phy_device *phy,
++ int pkts_num, int on)
++{
++ if (on) {
++ /* enable CRC checker and packets counters */
++ phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_CRC_AND_PKTS_COUNT, 0);
++ phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_CRC_AND_PKTS_COUNT,
++ QCA8075_MMD7_CNT_FRAME_CHK_EN | QCA8075_MMD7_CNT_SELFCLR);
++ qca8k_wait_for_phy_link_state(phy, 1);
++ /* packet number */
++ phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_PKT_GEN_PKT_NUMB, pkts_num);
++ /* pkt size - 1504 bytes + 20 bytes */
++ phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_PKT_GEN_PKT_SIZE, 1504);
++ } else { /* off */
++ /* packet number */
++ phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_PKT_GEN_PKT_NUMB, 0);
++ /* disable CRC checker and packet counter */
++ phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_CRC_AND_PKTS_COUNT, 0);
++ /* disable traffic gen */
++ phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_PKT_GEN_CTRL, 0);
++ }
++}
++
++static void
++qca8k_wait_for_phy_pkt_gen_fin(struct qca8k_priv *priv, struct phy_device *phy)
++{
++ int val;
++ /* wait for all traffic end: 4096(pkt num)*1524(size)*8ns(125MHz)=49938us */
++ phy_read_mmd_poll_timeout(phy, MDIO_MMD_AN, QCA8075_MMD7_PKT_GEN_CTRL,
++ val, !(val & QCA8075_MMD7_PKT_GEN_INPROGR),
++ 50000, 1000000, true);
++}
++
++static void
++qca8k_start_phy_pkt_gen(struct phy_device *phy)
++{
++ /* start traffic gen */
++ phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_PKT_GEN_CTRL,
++ QCA8075_MMD7_PKT_GEN_START | QCA8075_MMD7_PKT_GEN_INPROGR);
++}
++
++static int
++qca8k_start_all_phys_pkt_gens(struct qca8k_priv *priv)
++{
++ struct phy_device *phy;
++ phy = phy_device_create(priv->bus, QCA8075_MDIO_BRDCST_PHY_ADDR,
++ 0, 0, NULL);
++ if (!phy) {
++ dev_err(priv->dev, "unable to create mdio broadcast PHY(0x%x)\n",
++ QCA8075_MDIO_BRDCST_PHY_ADDR);
++ return -ENODEV;
++ }
++
++ qca8k_start_phy_pkt_gen(phy);
++
++ phy_device_free(phy);
++ return 0;
++}
++
++static int
++qca8k_get_phy_pkt_gen_test_result(struct phy_device *phy, int pkts_num)
++{
++ u32 tx_ok, tx_error;
++ u32 rx_ok, rx_error;
++ u32 tx_ok_high16;
++ u32 rx_ok_high16;
++ u32 tx_all_ok, rx_all_ok;
++
++ /* check counters */
++ tx_ok = phy_read_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_EG_FRAME_RECV_CNT_LO);
++ tx_ok_high16 = phy_read_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_EG_FRAME_RECV_CNT_HI);
++ tx_error = phy_read_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_EG_FRAME_ERR_CNT);
++ rx_ok = phy_read_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_IG_FRAME_RECV_CNT_LO);
++ rx_ok_high16 = phy_read_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_IG_FRAME_RECV_CNT_HI);
++ rx_error = phy_read_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_IG_FRAME_ERR_CNT);
++ tx_all_ok = tx_ok + (tx_ok_high16 << 16);
++ rx_all_ok = rx_ok + (rx_ok_high16 << 16);
++
++ if (tx_all_ok < pkts_num)
++ return -1;
++ if(rx_all_ok < pkts_num)
++ return -2;
++ if(tx_error)
++ return -3;
++ if(rx_error)
++ return -4;
++ return 0; /* test is ok */
++}
++
++static
++void qca8k_phy_broadcast_write_on_off(struct qca8k_priv *priv,
++ struct phy_device *phy, int on)
++{
++ u32 val;
++
++ val = phy_read_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_MDIO_BRDCST_WRITE);
++
++ if (on == 0)
++ val &= ~QCA8075_MMD7_MDIO_BRDCST_WRITE_EN;
++ else
++ val |= QCA8075_MMD7_MDIO_BRDCST_WRITE_EN;
++
++ phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_MDIO_BRDCST_WRITE, val);
++}
++
++static int
++qca8k_test_dsa_port_for_errors(struct qca8k_priv *priv, struct phy_device *phy,
++ int port, int test_phase)
++{
++ int res = 0;
++ const int test_pkts_num = QCA8075_PKT_GEN_PKTS_COUNT;
++
++ if (test_phase == 1) { /* start test preps */
++ qca8k_phy_loopback_on_off(priv, phy, port, 1);
++ qca8k_switch_port_loopback_on_off(priv, port, 1);
++ qca8k_phy_broadcast_write_on_off(priv, phy, 1);
++ qca8k_phy_pkt_gen_prep(priv, phy, test_pkts_num, 1);
++ } else if (test_phase == 2) {
++ /* wait for test results, collect it and cleanup */
++ qca8k_wait_for_phy_pkt_gen_fin(priv, phy);
++ res = qca8k_get_phy_pkt_gen_test_result(phy, test_pkts_num);
++ qca8k_phy_pkt_gen_prep(priv, phy, test_pkts_num, 0);
++ qca8k_phy_broadcast_write_on_off(priv, phy, 0);
++ qca8k_switch_port_loopback_on_off(priv, port, 0);
++ qca8k_phy_loopback_on_off(priv, phy, port, 0);
++ }
++
++ return res;
++}
++
++static int
++qca8k_do_dsa_sw_ports_self_test(struct qca8k_priv *priv, int parallel_test)
++{
++ struct device_node *dn = priv->dev->of_node;
++ struct device_node *ports, *port;
++ struct device_node *phy_dn;
++ struct phy_device *phy;
++ int reg, err = 0, test_phase;
++ u32 tests_result = 0;
++
++ ports = of_get_child_by_name(dn, "ports");
++ if (!ports) {
++ dev_err(priv->dev, "no ports child node found\n");
++ return -EINVAL;
++ }
++
++ for (test_phase = 1; test_phase <= 2; test_phase++) {
++ if (parallel_test && test_phase == 2) {
++ err = qca8k_start_all_phys_pkt_gens(priv);
++ if (err)
++ goto error;
++ }
++ for_each_available_child_of_node(ports, port) {
++ err = of_property_read_u32(port, "reg", ®);
++ if (err)
++ goto error;
++ if (reg >= QCA8K_NUM_PORTS) {
++ err = -EINVAL;
++ goto error;
++ }
++ phy_dn = of_parse_phandle(port, "phy-handle", 0);
++ if (phy_dn) {
++ phy = of_phy_find_device(phy_dn);
++ of_node_put(phy_dn);
++ if (phy) {
++ int result;
++ result = qca8k_test_dsa_port_for_errors(priv,
++ phy, reg, test_phase);
++ if (!parallel_test && test_phase == 1)
++ qca8k_start_phy_pkt_gen(phy);
++ put_device(&phy->mdio.dev);
++ if (test_phase == 2) {
++ tests_result <<= 1;
++ if (result)
++ tests_result |= 1;
++ }
++ }
++ }
++ }
++ }
++
++end:
++ of_node_put(ports);
++ qca8k_fdb_flush(priv);
++ return tests_result;
++error:
++ tests_result |= 0xf000;
++ goto end;
++}
++
++static int
++psgmii_vco_calibrate_and_test(struct dsa_switch *ds)
++{
++ int ret, a, test_result;
++ struct qca8k_priv *priv = ds->priv;
++
++ for (a = 0; a <= QCA8K_PSGMII_CALB_NUM; a++) {
++ ret = psgmii_vco_calibrate(priv);
++ if (ret)
++ return ret;
++ /* first we run serial test */
++ test_result = qca8k_do_dsa_sw_ports_self_test(priv, 0);
++ /* and if it is ok then we run the test in parallel */
++ if (!test_result)
++ test_result = qca8k_do_dsa_sw_ports_self_test(priv, 1);
++ if (!test_result) {
++ if (a > 0) {
++ dev_warn(priv->dev, "PSGMII work was stabilized after %d "
++ "calibration retries !\n", a);
++ }
++ return 0;
++ } else {
++ schedule();
++ if (a > 0 && a % 10 == 0) {
++ dev_err(priv->dev, "PSGMII work is unstable !!! "
++ "Let's try to wait a bit ... %d\n", a);
++ set_current_state(TASK_INTERRUPTIBLE);
++ schedule_timeout(msecs_to_jiffies(a * 100));
++ }
++ }
++ }
++
++ panic("PSGMII work is unstable !!! "
++ "Repeated recalibration attempts did not help(0x%x) !\n",
++ test_result);
++
++ return -EFAULT;
++}
++
++static int
++ipq4019_psgmii_configure(struct dsa_switch *ds)
++{
++ struct qca8k_priv *priv = ds->priv;
++ int ret;
++
++ if (!priv->psgmii_calibrated) {
++ dev_info(ds->dev, "PSGMII calibration!\n");
++ ret = psgmii_vco_calibrate_and_test(ds);
++
++ ret = regmap_clear_bits(priv->psgmii, PSGMIIPHY_MODE_CONTROL,
++ PSGMIIPHY_MODE_ATHR_CSCO_MODE_25M);
++ ret = regmap_write(priv->psgmii, PSGMIIPHY_TX_CONTROL,
++ PSGMIIPHY_TX_CONTROL_MAGIC_VALUE);
++
++ priv->psgmii_calibrated = true;
++
++ return ret;
++ }
++
++ return 0;
++}
++
++static void
++qca8k_phylink_ipq4019_mac_config(struct dsa_switch *ds, int port,
++ unsigned int mode,
++ const struct phylink_link_state *state)
++{
++ struct qca8k_priv *priv = ds->priv;
++
++ switch (port) {
++ case 0:
++ /* CPU port, no configuration needed */
++ return;
++ case 1:
++ case 2:
++ case 3:
++ if (state->interface == PHY_INTERFACE_MODE_PSGMII)
++ if (ipq4019_psgmii_configure(ds))
++ dev_err(ds->dev, "PSGMII configuration failed!\n");
++ return;
++ case 4:
++ case 5:
++ if (state->interface == PHY_INTERFACE_MODE_RGMII ||
++ state->interface == PHY_INTERFACE_MODE_RGMII_ID ||
++ state->interface == PHY_INTERFACE_MODE_RGMII_RXID ||
++ state->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
++ regmap_set_bits(priv->regmap,
++ QCA8K_IPQ4019_REG_RGMII_CTRL,
++ QCA8K_IPQ4019_RGMII_CTRL_CLK);
++ }
++
++ if (state->interface == PHY_INTERFACE_MODE_PSGMII)
++ if (ipq4019_psgmii_configure(ds))
++ dev_err(ds->dev, "PSGMII configuration failed!\n");
++ return;
++ default:
++ dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
++ return;
++ }
++}
++
++static int
++qca8k_ipq4019_setup_port(struct dsa_switch *ds, int port)
++{
++ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
++ int ret;
++
++ /* CPU port gets connected to all user ports of the switch */
++ if (dsa_is_cpu_port(ds, port)) {
++ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
++ QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
++ if (ret)
++ return ret;
++
++ /* Disable CPU ARP Auto-learning by default */
++ ret = regmap_clear_bits(priv->regmap,
++ QCA8K_PORT_LOOKUP_CTRL(port),
++ QCA8K_PORT_LOOKUP_LEARN);
++ if (ret)
++ return ret;
++ }
++
++ /* Individual user ports get connected to CPU port only */
++ if (dsa_is_user_port(ds, port)) {
++ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
++ QCA8K_PORT_LOOKUP_MEMBER,
++ BIT(QCA8K_IPQ4019_CPU_PORT));
++ if (ret)
++ return ret;
++
++ /* Enable ARP Auto-learning by default */
++ ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(port),
++ QCA8K_PORT_LOOKUP_LEARN);
++ if (ret)
++ return ret;
++
++ /* For port based vlans to work we need to set the
++ * default egress vid
++ */
++ ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
++ QCA8K_EGREES_VLAN_PORT_MASK(port),
++ QCA8K_EGREES_VLAN_PORT(port, QCA8K_PORT_VID_DEF));
++ if (ret)
++ return ret;
++
++ ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
++ QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
++ QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
++ if (ret)
++ return ret;
++ }
++
++ return 0;
++}
++
++static int
++qca8k_ipq4019_setup(struct dsa_switch *ds)
++{
++ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
++ int ret, i;
++
++ /* Make sure that port 0 is the cpu port */
++ if (!dsa_is_cpu_port(ds, QCA8K_IPQ4019_CPU_PORT)) {
++ dev_err(priv->dev, "port %d is not the CPU port",
++ QCA8K_IPQ4019_CPU_PORT);
++ return -EINVAL;
++ }
++
++ qca8k_ipq4019_setup_pcs(priv, &priv->pcs_port_0, 0);
++
++ /* Enable CPU Port */
++ ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
++ QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
++ if (ret) {
++ dev_err(priv->dev, "failed enabling CPU port");
++ return ret;
++ }
++
++ /* Enable MIB counters */
++ ret = qca8k_mib_init(priv);
++ if (ret)
++ dev_warn(priv->dev, "MIB init failed");
++
++ /* Disable forwarding by default on all ports */
++ for (i = 0; i < QCA8K_IPQ4019_NUM_PORTS; i++) {
++ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
++ QCA8K_PORT_LOOKUP_MEMBER, 0);
++ if (ret)
++ return ret;
++ }
++
++ /* Enable QCA header mode on the CPU port */
++ ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(QCA8K_IPQ4019_CPU_PORT),
++ FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
++ FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
++ if (ret) {
++ dev_err(priv->dev, "failed enabling QCA header mode");
++ return ret;
++ }
++
++ /* Disable MAC by default on all ports */
++ for (i = 0; i < QCA8K_IPQ4019_NUM_PORTS; i++) {
++ if (dsa_is_user_port(ds, i))
++ qca8k_port_set_status(priv, i, 0);
++ }
++
++ /* Forward all unknown frames to CPU port for Linux processing */
++ ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
++ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(QCA8K_IPQ4019_CPU_PORT)) |
++ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(QCA8K_IPQ4019_CPU_PORT)) |
++ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(QCA8K_IPQ4019_CPU_PORT)) |
++ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(QCA8K_IPQ4019_CPU_PORT)));
++ if (ret)
++ return ret;
++
++ /* Setup connection between CPU port & user ports */
++ for (i = 0; i < QCA8K_IPQ4019_NUM_PORTS; i++) {
++ ret = qca8k_ipq4019_setup_port(ds, i);
++ if (ret)
++ return ret;
++ }
++
++ /* Setup our port MTUs to match power on defaults */
++ ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
++ if (ret)
++ dev_warn(priv->dev, "failed setting MTU settings");
++
++ /* Flush the FDB table */
++ qca8k_fdb_flush(priv);
++
++ /* Set min a max ageing value supported */
++ ds->ageing_time_min = 7000;
++ ds->ageing_time_max = 458745000;
++
++ /* Set max number of LAGs supported */
++ ds->num_lag_ids = QCA8K_NUM_LAGS;
++
++ /* CPU port HW learning doesnt work correctly, so let DSA handle it */
++ ds->assisted_learning_on_cpu_port = true;
++
++ return 0;
++}
++
++static const struct dsa_switch_ops qca8k_ipq4019_switch_ops = {
++ .get_tag_protocol = qca8k_ipq4019_get_tag_protocol,
++ .setup = qca8k_ipq4019_setup,
++ .get_strings = qca8k_get_strings,
++ .get_ethtool_stats = qca8k_get_ethtool_stats,
++ .get_sset_count = qca8k_get_sset_count,
++ .set_ageing_time = qca8k_set_ageing_time,
++ .get_mac_eee = qca8k_get_mac_eee,
++ .set_mac_eee = qca8k_set_mac_eee,
++ .port_enable = qca8k_port_enable,
++ .port_disable = qca8k_port_disable,
++ .port_change_mtu = qca8k_port_change_mtu,
++ .port_max_mtu = qca8k_port_max_mtu,
++ .port_stp_state_set = qca8k_port_stp_state_set,
++ .port_bridge_join = qca8k_port_bridge_join,
++ .port_bridge_leave = qca8k_port_bridge_leave,
++ .port_fast_age = qca8k_port_fast_age,
++ .port_fdb_add = qca8k_port_fdb_add,
++ .port_fdb_del = qca8k_port_fdb_del,
++ .port_fdb_dump = qca8k_port_fdb_dump,
++ .port_mdb_add = qca8k_port_mdb_add,
++ .port_mdb_del = qca8k_port_mdb_del,
++ .port_mirror_add = qca8k_port_mirror_add,
++ .port_mirror_del = qca8k_port_mirror_del,
++ .port_vlan_filtering = qca8k_port_vlan_filtering,
++ .port_vlan_add = qca8k_port_vlan_add,
++ .port_vlan_del = qca8k_port_vlan_del,
++ .phylink_mac_select_pcs = qca8k_ipq4019_phylink_mac_select_pcs,
++ .phylink_get_caps = qca8k_ipq4019_phylink_get_caps,
++ .phylink_mac_config = qca8k_phylink_ipq4019_mac_config,
++ .phylink_mac_link_down = qca8k_phylink_ipq4019_mac_link_down,
++ .phylink_mac_link_up = qca8k_phylink_ipq4019_mac_link_up,
++ .port_lag_join = qca8k_port_lag_join,
++ .port_lag_leave = qca8k_port_lag_leave,
++};
++
++static const struct qca8k_match_data ipq4019 = {
++ .id = QCA8K_ID_IPQ4019,
++ .mib_count = QCA8K_QCA833X_MIB_COUNT,
++};
++
++static int
++qca8k_ipq4019_probe(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ struct qca8k_priv *priv;
++ void __iomem *base, *psgmii;
++ struct device_node *np = dev->of_node, *mdio_np, *psgmii_ethphy_np;
++ int ret;
++
++ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
++ if (!priv)
++ return -ENOMEM;
++
++ priv->dev = dev;
++ priv->info = &ipq4019;
++
++ /* Start by setting up the register mapping */
++ base = devm_platform_ioremap_resource_byname(pdev, "base");
++ if (IS_ERR(base))
++ return PTR_ERR(base);
++
++ priv->regmap = devm_regmap_init_mmio(dev, base,
++ &qca8k_ipq4019_regmap_config);
++ if (IS_ERR(priv->regmap)) {
++ ret = PTR_ERR(priv->regmap);
++ dev_err(dev, "base regmap initialization failed, %d\n", ret);
++ return ret;
++ }
++
++ psgmii = devm_platform_ioremap_resource_byname(pdev, "psgmii_phy");
++ if (IS_ERR(psgmii))
++ return PTR_ERR(psgmii);
++
++ priv->psgmii = devm_regmap_init_mmio(dev, psgmii,
++ &qca8k_ipq4019_psgmii_phy_regmap_config);
++ if (IS_ERR(priv->psgmii)) {
++ ret = PTR_ERR(priv->psgmii);
++ dev_err(dev, "PSGMII regmap initialization failed, %d\n", ret);
++ return ret;
++ }
++
++ mdio_np = of_parse_phandle(np, "mdio", 0);
++ if (!mdio_np) {
++ dev_err(dev, "unable to get MDIO bus phandle\n");
++ of_node_put(mdio_np);
++ return -EINVAL;
++ }
++
++ priv->bus = of_mdio_find_bus(mdio_np);
++ of_node_put(mdio_np);
++ if (!priv->bus) {
++ dev_err(dev, "unable to find MDIO bus\n");
++ return -EPROBE_DEFER;
++ }
++
++ psgmii_ethphy_np = of_parse_phandle(np, "psgmii-ethphy", 0);
++ if (!psgmii_ethphy_np) {
++ dev_dbg(dev, "unable to get PSGMII eth PHY phandle\n");
++ of_node_put(psgmii_ethphy_np);
++ }
++
++ if (psgmii_ethphy_np) {
++ priv->psgmii_ethphy = of_phy_find_device(psgmii_ethphy_np);
++ of_node_put(psgmii_ethphy_np);
++ if (!priv->psgmii_ethphy) {
++ dev_err(dev, "unable to get PSGMII eth PHY\n");
++ return -ENODEV;
++ }
++ }
++
++ /* Check the detected switch id */
++ ret = qca8k_read_switch_id(priv);
++ if (ret)
++ return ret;
++
++ priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
++ if (!priv->ds)
++ return -ENOMEM;
++
++ priv->ds->dev = dev;
++ priv->ds->num_ports = QCA8K_IPQ4019_NUM_PORTS;
++ priv->ds->priv = priv;
++ priv->ds->ops = &qca8k_ipq4019_switch_ops;
++ mutex_init(&priv->reg_mutex);
++ platform_set_drvdata(pdev, priv);
++
++ return dsa_register_switch(priv->ds);
++}
++
++static int
++qca8k_ipq4019_remove(struct platform_device *pdev)
++{
++ struct qca8k_priv *priv = dev_get_drvdata(&pdev->dev);
++ int i;
++
++ if (!priv)
++ return 0;
++
++ for (i = 0; i < QCA8K_IPQ4019_NUM_PORTS; i++)
++ qca8k_port_set_status(priv, i, 0);
++
++ dsa_unregister_switch(priv->ds);
++
++ platform_set_drvdata(pdev, NULL);
++
++ return 0;
++}
++
++static const struct of_device_id qca8k_ipq4019_of_match[] = {
++ { .compatible = "qca,ipq4019-qca8337n", },
++ { /* sentinel */ },
++};
++
++static struct platform_driver qca8k_ipq4019_driver = {
++ .probe = qca8k_ipq4019_probe,
++ .remove = qca8k_ipq4019_remove,
++ .driver = {
++ .name = "qca8k-ipq4019",
++ .of_match_table = qca8k_ipq4019_of_match,
++ },
++};
++
++module_platform_driver(qca8k_ipq4019_driver);
++
++MODULE_AUTHOR("Mathieu Olivari, John Crispin <john@phrozen.org>");
++MODULE_AUTHOR("Gabor Juhos <j4g8y7@gmail.com>, Robert Marko <robert.marko@sartura.hr>");
++MODULE_DESCRIPTION("Qualcomm IPQ4019 built-in switch driver");
++MODULE_LICENSE("GPL");
+--- a/drivers/net/dsa/qca/qca8k.h
++++ b/drivers/net/dsa/qca/qca8k.h
+@@ -19,7 +19,10 @@
+ #define QCA8K_ETHERNET_TIMEOUT 5
+
+ #define QCA8K_NUM_PORTS 7
++#define QCA8K_IPQ4019_NUM_PORTS 6
+ #define QCA8K_NUM_CPU_PORTS 2
++#define QCA8K_IPQ4019_NUM_CPU_PORTS 1
++#define QCA8K_IPQ4019_CPU_PORT 0
+ #define QCA8K_MAX_MTU 9000
+ #define QCA8K_NUM_LAGS 4
+ #define QCA8K_NUM_PORTS_FOR_LAG 4
+@@ -28,6 +31,7 @@
+ #define QCA8K_ID_QCA8327 0x12
+ #define PHY_ID_QCA8337 0x004dd036
+ #define QCA8K_ID_QCA8337 0x13
++#define QCA8K_ID_IPQ4019 0x14
+
+ #define QCA8K_QCA832X_MIB_COUNT 39
+ #define QCA8K_QCA833X_MIB_COUNT 41
+@@ -265,6 +269,7 @@
+ #define QCA8K_PORT_LOOKUP_STATE_LEARNING QCA8K_PORT_LOOKUP_STATE(0x3)
+ #define QCA8K_PORT_LOOKUP_STATE_FORWARD QCA8K_PORT_LOOKUP_STATE(0x4)
+ #define QCA8K_PORT_LOOKUP_LEARN BIT(20)
++#define QCA8K_PORT_LOOKUP_LOOPBACK_EN BIT(21)
+ #define QCA8K_PORT_LOOKUP_ING_MIRROR_EN BIT(25)
+
+ #define QCA8K_REG_GOL_TRUNK_CTRL0 0x700
+@@ -341,6 +346,53 @@
+ #define MII_ATH_MMD_ADDR 0x0d
+ #define MII_ATH_MMD_DATA 0x0e
+
++/* IPQ4019 PSGMII PHY registers */
++#define QCA8K_IPQ4019_REG_RGMII_CTRL 0x004
++#define QCA8K_IPQ4019_RGMII_CTRL_RGMII_RXC GENMASK(1, 0)
++#define QCA8K_IPQ4019_RGMII_CTRL_RGMII_TXC GENMASK(9, 8)
++/* Some kind of CLK selection
++ * 0: gcc_ess_dly2ns
++ * 1: gcc_ess_clk
++ */
++#define QCA8K_IPQ4019_RGMII_CTRL_CLK BIT(10)
++#define QCA8K_IPQ4019_RGMII_CTRL_DELAY_RMII0 GENMASK(17, 16)
++#define QCA8K_IPQ4019_RGMII_CTRL_INVERT_RMII0_REF_CLK BIT(18)
++#define QCA8K_IPQ4019_RGMII_CTRL_DELAY_RMII1 GENMASK(20, 19)
++#define QCA8K_IPQ4019_RGMII_CTRL_INVERT_RMII1_REF_CLK BIT(21)
++#define QCA8K_IPQ4019_RGMII_CTRL_INVERT_RMII0_MASTER_EN BIT(24)
++#define QCA8K_IPQ4019_RGMII_CTRL_INVERT_RMII1_MASTER_EN BIT(25)
++
++#define PSGMIIPHY_MODE_CONTROL 0x1b4
++#define PSGMIIPHY_MODE_ATHR_CSCO_MODE_25M BIT(0)
++#define PSGMIIPHY_TX_CONTROL 0x288
++#define PSGMIIPHY_TX_CONTROL_MAGIC_VALUE 0x8380
++#define PSGMIIPHY_VCO_CALIBRATION_CONTROL_REGISTER_1 0x9c
++#define PSGMIIPHY_REG_PLL_VCO_CALIB_RESTART BIT(14)
++#define PSGMIIPHY_VCO_CALIBRATION_CONTROL_REGISTER_2 0xa0
++#define PSGMIIPHY_REG_PLL_VCO_CALIB_READY BIT(0)
++
++#define QCA8K_PSGMII_CALB_NUM 100
++#define MII_QCA8075_SSTATUS 0x11
++#define QCA8075_PHY_SPEC_STATUS_LINK BIT(10)
++#define QCA8075_MMD7_CRC_AND_PKTS_COUNT 0x8029
++#define QCA8075_MMD7_PKT_GEN_PKT_NUMB 0x8021
++#define QCA8075_MMD7_PKT_GEN_PKT_SIZE 0x8062
++#define QCA8075_MMD7_PKT_GEN_CTRL 0x8020
++#define QCA8075_MMD7_CNT_SELFCLR BIT(1)
++#define QCA8075_MMD7_CNT_FRAME_CHK_EN BIT(0)
++#define QCA8075_MMD7_PKT_GEN_START BIT(13)
++#define QCA8075_MMD7_PKT_GEN_INPROGR BIT(15)
++#define QCA8075_MMD7_IG_FRAME_RECV_CNT_HI 0x802a
++#define QCA8075_MMD7_IG_FRAME_RECV_CNT_LO 0x802b
++#define QCA8075_MMD7_IG_FRAME_ERR_CNT 0x802c
++#define QCA8075_MMD7_EG_FRAME_RECV_CNT_HI 0x802d
++#define QCA8075_MMD7_EG_FRAME_RECV_CNT_LO 0x802e
++#define QCA8075_MMD7_EG_FRAME_ERR_CNT 0x802f
++#define QCA8075_MMD7_MDIO_BRDCST_WRITE 0x8028
++#define QCA8075_MMD7_MDIO_BRDCST_WRITE_EN BIT(15)
++#define QCA8075_MDIO_BRDCST_PHY_ADDR 0x1f
++#define QCA8075_PKT_GEN_PKTS_COUNT 4096
++
+ enum {
+ QCA8K_PORT_SPEED_10M = 0,
+ QCA8K_PORT_SPEED_100M = 1,
+@@ -466,6 +518,10 @@ struct qca8k_priv {
+ struct qca8k_pcs pcs_port_6;
+ const struct qca8k_match_data *info;
+ struct qca8k_led ports_led[QCA8K_LED_COUNT];
++ /* IPQ4019 specific */
++ struct regmap *psgmii;
++ struct phy_device *psgmii_ethphy;
++ bool psgmii_calibrated;
+ };
+
+ struct qca8k_mib_desc {
--- /dev/null
+From 19c507c3fe4a6fc60317dcae2c55de452aecb7d5 Mon Sep 17 00:00:00 2001
+From: Robert Marko <robert.marko@sartura.hr>
+Date: Mon, 1 Nov 2021 18:15:04 +0100
+Subject: [PATCH] arm: dts: ipq4019: add switch node
+
+Since the built-in IPQ40xx switch now has a driver, add the required node
+for it to work.
+
+Signed-off-by: Robert Marko <robert.marko@sartura.hr>
+---
+ arch/arm/boot/dts/qcom-ipq4019.dtsi | 76 +++++++++++++++++++++++++++++
+ 1 file changed, 76 insertions(+)
+
+--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+@@ -594,6 +594,82 @@
+ status = "disabled";
+ };
+
++ switch: switch@c000000 {
++ compatible = "qca,ipq4019-qca8337n";
++ reg = <0xc000000 0x80000>, <0x98000 0x800>;
++ reg-names = "base", "psgmii_phy";
++ resets = <&gcc ESS_PSGMII_ARES>;
++ reset-names = "psgmii_rst";
++ mdio = <&mdio>;
++ psgmii-ethphy = <&psgmiiphy>;
++
++ status = "disabled";
++
++ ports {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ port@0 { /* MAC0 */
++ reg = <0>;
++ label = "cpu";
++ ethernet = <&gmac>;
++ phy-mode = "internal";
++
++ fixed-link {
++ speed = <1000>;
++ full-duplex;
++ pause;
++ asym-pause;
++ };
++ };
++
++ swport1: port@1 { /* MAC1 */
++ reg = <1>;
++ label = "lan1";
++ phy-handle = <ðphy0>;
++ phy-mode = "psgmii";
++
++ status = "disabled";
++ };
++
++ swport2: port@2 { /* MAC2 */
++ reg = <2>;
++ label = "lan2";
++ phy-handle = <ðphy1>;
++ phy-mode = "psgmii";
++
++ status = "disabled";
++ };
++
++ swport3: port@3 { /* MAC3 */
++ reg = <3>;
++ label = "lan3";
++ phy-handle = <ðphy2>;
++ phy-mode = "psgmii";
++
++ status = "disabled";
++ };
++
++ swport4: port@4 { /* MAC4 */
++ reg = <4>;
++ label = "lan4";
++ phy-handle = <ðphy3>;
++ phy-mode = "psgmii";
++
++ status = "disabled";
++ };
++
++ swport5: port@5 { /* MAC5 */
++ reg = <5>;
++ label = "wan";
++ phy-handle = <ðphy4>;
++ phy-mode = "psgmii";
++
++ status = "disabled";
++ };
++ };
++ };
++
+ gmac: ethernet@c080000 {
+ compatible = "qcom,ipq4019-ess-edma";
+ reg = <0xc080000 0x8000>;
--- /dev/null
+From 5ac078c8fe18f3e8318547b8ed0ed782730c5039 Mon Sep 17 00:00:00 2001
+From: Christian Marangi <ansuelsmth@gmail.com>
+Date: Sat, 10 Feb 2024 22:28:27 +0100
+Subject: [PATCH] ARM: dts: qcom: ipq4019: add QCA8075 PHY Package nodes
+
+Add QCA8075 PHY Package nodes. The PHY nodes that were previously
+defined never worked and actually never had a driver to correctly setup
+these PHY. Now that we have a correct driver, correctly add the PHY
+Package node and set the default value of 300mw for tx driver strength
+following specification of ipq4019 SoC.
+
+Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
+---
+ arch/arm/boot/dts//qcom-ipq4019.dtsi | 35 +++++++++++++++---------
+ 1 file changed, 22 insertions(+), 13 deletions(-)
+
+--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+@@ -725,24 +725,33 @@
+ reg = <0x90000 0x64>;
+ status = "disabled";
+
+- ethphy0: ethernet-phy@0 {
++ ethernet-phy-package@0 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ compatible = "qcom,qca8075-package";
+ reg = <0>;
+- };
+-
+- ethphy1: ethernet-phy@1 {
+- reg = <1>;
+- };
+
+- ethphy2: ethernet-phy@2 {
+- reg = <2>;
+- };
+-
+- ethphy3: ethernet-phy@3 {
+- reg = <3>;
+- };
++ qcom,tx-drive-strength-milliwatt = <300>;
+
+- ethphy4: ethernet-phy@4 {
+- reg = <4>;
++ ethphy0: ethernet-phy@0 {
++ reg = <0>;
++ };
++
++ ethphy1: ethernet-phy@1 {
++ reg = <1>;
++ };
++
++ ethphy2: ethernet-phy@2 {
++ reg = <2>;
++ };
++
++ ethphy3: ethernet-phy@3 {
++ reg = <3>;
++ };
++
++ ethphy4: ethernet-phy@4 {
++ reg = <4>;
++ };
+ };
+ };
+
--- /dev/null
+From 79b38b9f85da868ca59b66715c20aa55104b640b Mon Sep 17 00:00:00 2001
+From: Robert Marko <robert.marko@sartura.hr>
+Date: Fri, 2 Oct 2020 10:43:26 +0200
+Subject: [PATCH] arm: dts: ipq4019: QCA807x properties
+
+This adds necessary DT properties for QCA807x PHY-s to IPQ4019 DTSI.
+
+Signed-off-by: Robert Marko <robert.marko@sartura.hr>
+---
+ arch/arm/boot/dts/qcom-ipq4019.dtsi | 17 +++++++++++++++++
+ 1 file changed, 17 insertions(+)
+
+--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+@@ -752,6 +752,10 @@
+ ethphy4: ethernet-phy@4 {
+ reg = <4>;
+ };
++
++ psgmiiphy: psgmii-phy@5 {
++ reg = <5>;
++ };
+ };
+ };
+
--- /dev/null
+From d0055b03d9c8d48ad2b971821989b09ba95c39f8 Mon Sep 17 00:00:00 2001
+From: Christian Marangi <ansuelsmth@gmail.com>
+Date: Sun, 17 Sep 2023 20:18:31 +0200
+Subject: [PATCH] net: qualcomm: ipqess: fix TX timeout errors
+
+Currently logic to handle napi tx completion is flawed and on the long
+run on loaded condition cause TX timeout error with the queue not being
+able to handle any new packet.
+
+There are 2 main cause of this:
+- incrementing the packet done value wrongly
+- handling 2 times the tx_ring tail
+
+ipqess_tx_unmap_and_free may return 2 kind values:
+- 0: we are handling first and middle descriptor for the packet
+- packet len: we are at the last descriptor for the packet
+
+Done value was wrongly incremented also for first and intermediate
+descriptor for the packet resulting causing panic and TX timeouts by
+comunicating to the kernel an inconsistent value of packet handling not
+matching the expected ones.
+
+Tx_ring tail was handled twice for ipqess_tx_complete run resulting in
+again done value incremented wrongly and also problem with idx handling
+by actually skipping descriptor for some packets.
+
+Rework the loop logic to fix these 2 problem and also add some comments
+to make sure ipqess_tx_unmap_and_free ret value is better
+understandable.
+
+Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
+---
+ drivers/net/ethernet/qualcomm/ipqess/ipqess.c | 13 ++++++++++---
+ 1 file changed, 10 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/qualcomm/ipqess/ipqess.c
++++ b/drivers/net/ethernet/qualcomm/ipqess/ipqess.c
+@@ -453,13 +453,22 @@ static int ipqess_tx_complete(struct ipq
+ tail >>= IPQESS_TPD_CONS_IDX_SHIFT;
+ tail &= IPQESS_TPD_CONS_IDX_MASK;
+
+- do {
++ while ((tx_ring->tail != tail) && (done < budget)) {
+ ret = ipqess_tx_unmap_and_free(&tx_ring->ess->pdev->dev,
+ &tx_ring->buf[tx_ring->tail]);
+- tx_ring->tail = IPQESS_NEXT_IDX(tx_ring->tail, tx_ring->count);
++ /* ipqess_tx_unmap_and_free may return 2 kind values:
++ * - 0: we are handling first and middle descriptor for the packet
++ * - packet len: we are at the last descriptor for the packet
++ * Increment total bytes handled and packet done only if we are
++ * handling the last descriptor for the packet.
++ */
++ if (ret) {
++ total += ret;
++ done++;
++ }
+
+- total += ret;
+- } while ((++done < budget) && (tx_ring->tail != tail));
++ tx_ring->tail = IPQESS_NEXT_IDX(tx_ring->tail, tx_ring->count);
++ };
+
+ ipqess_w32(tx_ring->ess, IPQESS_REG_TX_SW_CONS_IDX_Q(tx_ring->idx),
+ tx_ring->tail);
--- /dev/null
+From: Christian Lamparter <chunkeey@googlemail.com>
+Subject: SoC: add qualcomm syscon
+--- a/drivers/soc/qcom/Kconfig
++++ b/drivers/soc/qcom/Kconfig
+@@ -248,4 +248,11 @@ config QCOM_ICC_BWMON
+ the fixed bandwidth votes from cpufreq (CPU nodes) thus achieve high
+ memory throughput even with lower CPU frequencies.
+
++config QCOM_TCSR
++ tristate "QCOM Top Control and Status Registers"
++ depends on ARCH_QCOM
++ help
++ Say y here to enable TCSR support. The TCSR provides control
++ functions for various peripherals.
++
+ endmenu
+--- a/drivers/soc/qcom/Makefile
++++ b/drivers/soc/qcom/Makefile
+@@ -29,3 +29,4 @@ obj-$(CONFIG_QCOM_RPMHPD) += rpmhpd.o
+ obj-$(CONFIG_QCOM_RPMPD) += rpmpd.o
+ obj-$(CONFIG_QCOM_KRYO_L2_ACCESSORS) += kryo-l2-accessors.o
+ obj-$(CONFIG_QCOM_ICC_BWMON) += icc-bwmon.o
++obj-$(CONFIG_QCOM_TCSR) += qcom_tcsr.o
+--- /dev/null
++++ b/drivers/soc/qcom/qcom_tcsr.c
+@@ -0,0 +1,98 @@
++/*
++ * Copyright (c) 2014, The Linux foundation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License rev 2 and
++ * only rev 2 as published by the free Software foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++
++#include <linux/clk.h>
++#include <linux/err.h>
++#include <linux/io.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/of_platform.h>
++#include <linux/platform_device.h>
++
++#define TCSR_USB_PORT_SEL 0xb0
++#define TCSR_USB_HSPHY_CONFIG 0xC
++
++#define TCSR_ESS_INTERFACE_SEL_OFFSET 0x0
++#define TCSR_ESS_INTERFACE_SEL_MASK 0xf
++
++#define TCSR_WIFI0_GLB_CFG_OFFSET 0x0
++#define TCSR_WIFI1_GLB_CFG_OFFSET 0x4
++#define TCSR_PNOC_SNOC_MEMTYPE_M0_M2 0x4
++
++static int tcsr_probe(struct platform_device *pdev)
++{
++ struct resource *res;
++ const struct device_node *node = pdev->dev.of_node;
++ void __iomem *base;
++ u32 val;
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ base = devm_ioremap_resource(&pdev->dev, res);
++ if (IS_ERR(base))
++ return PTR_ERR(base);
++
++ if (!of_property_read_u32(node, "qcom,usb-ctrl-select", &val)) {
++ dev_err(&pdev->dev, "setting usb port select = %d\n", val);
++ writel(val, base + TCSR_USB_PORT_SEL);
++ }
++
++ if (!of_property_read_u32(node, "qcom,usb-hsphy-mode-select", &val)) {
++ dev_info(&pdev->dev, "setting usb hs phy mode select = %x\n", val);
++ writel(val, base + TCSR_USB_HSPHY_CONFIG);
++ }
++
++ if (!of_property_read_u32(node, "qcom,ess-interface-select", &val)) {
++ u32 tmp = 0;
++ dev_info(&pdev->dev, "setting ess interface select = %x\n", val);
++ tmp = readl(base + TCSR_ESS_INTERFACE_SEL_OFFSET);
++ tmp = tmp & (~TCSR_ESS_INTERFACE_SEL_MASK);
++ tmp = tmp | (val&TCSR_ESS_INTERFACE_SEL_MASK);
++ writel(tmp, base + TCSR_ESS_INTERFACE_SEL_OFFSET);
++ }
++
++ if (!of_property_read_u32(node, "qcom,wifi_glb_cfg", &val)) {
++ dev_info(&pdev->dev, "setting wifi_glb_cfg = %x\n", val);
++ writel(val, base + TCSR_WIFI0_GLB_CFG_OFFSET);
++ writel(val, base + TCSR_WIFI1_GLB_CFG_OFFSET);
++ }
++
++ if (!of_property_read_u32(node, "qcom,wifi_noc_memtype_m0_m2", &val)) {
++ dev_info(&pdev->dev,
++ "setting wifi_noc_memtype_m0_m2 = %x\n", val);
++ writel(val, base + TCSR_PNOC_SNOC_MEMTYPE_M0_M2);
++ }
++
++ return 0;
++}
++
++static const struct of_device_id tcsr_dt_match[] = {
++ { .compatible = "qcom,tcsr", },
++ { },
++};
++
++MODULE_DEVICE_TABLE(of, tcsr_dt_match);
++
++static struct platform_driver tcsr_driver = {
++ .driver = {
++ .name = "tcsr",
++ .owner = THIS_MODULE,
++ .of_match_table = tcsr_dt_match,
++ },
++ .probe = tcsr_probe,
++};
++
++module_platform_driver(tcsr_driver);
++
++MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
++MODULE_DESCRIPTION("QCOM TCSR driver");
++MODULE_LICENSE("GPL v2");
+--- /dev/null
++++ b/include/dt-bindings/soc/qcom,tcsr.h
+@@ -0,0 +1,48 @@
++/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 and
++ * only version 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ */
++#ifndef __DT_BINDINGS_QCOM_TCSR_H
++#define __DT_BINDINGS_QCOM_TCSR_H
++
++#define TCSR_USB_SELECT_USB3_P0 0x1
++#define TCSR_USB_SELECT_USB3_P1 0x2
++#define TCSR_USB_SELECT_USB3_DUAL 0x3
++
++/* IPQ40xx HS PHY Mode Select */
++#define TCSR_USB_HSPHY_HOST_MODE 0x00E700E7
++#define TCSR_USB_HSPHY_DEVICE_MODE 0x00C700E7
++
++/* IPQ40xx ess interface mode select */
++#define TCSR_ESS_PSGMII 0
++#define TCSR_ESS_PSGMII_RGMII5 1
++#define TCSR_ESS_PSGMII_RMII0 2
++#define TCSR_ESS_PSGMII_RMII1 4
++#define TCSR_ESS_PSGMII_RMII0_RMII1 6
++#define TCSR_ESS_PSGMII_RGMII4 9
++
++/*
++ * IPQ40xx WiFi Global Config
++ * Bit 30:AXID_EN
++ * Enable AXI master bus Axid translating to confirm all txn submitted by order
++ * Bit 24: Use locally generated socslv_wxi_bvalid
++ * 1: use locally generate socslv_wxi_bvalid for performance.
++ * 0: use SNOC socslv_wxi_bvalid.
++ */
++#define TCSR_WIFI_GLB_CFG 0x41000000
++
++/* IPQ40xx MEM_TYPE_SEL_M0_M2 Select Bit 26:24 - 2 NORMAL */
++#define TCSR_WIFI_NOC_MEMTYPE_M0_M2 0x02222222
++
++/* TCSR A/B REG */
++#define IPQ806X_TCSR_REG_A_ADM_CRCI_MUX_SEL 0
++#define IPQ806X_TCSR_REG_B_ADM_CRCI_MUX_SEL 1
++
++#endif
--- /dev/null
+From c668fd2c4d9ad4a510fd214a2da83bd9b67a2508 Mon Sep 17 00:00:00 2001
+From: Robert Marko <robimarko@gmail.com>
+Date: Sun, 13 Aug 2023 18:13:08 +0200
+Subject: [PATCH] Revert "firmware: qcom_scm: Clear download bit during reboot"
+
+This reverts commit a3ea89b5978dbcd0fa55f675c5a1e04611093709.
+
+It is breaking reboot on IPQ4019 boards, so revert until a proper fix
+is found.
+
+Signed-off-by: Robert Marko <robimarko@gmail.com>
+---
+ drivers/firmware/qcom_scm.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/firmware/qcom_scm.c
++++ b/drivers/firmware/qcom_scm.c
+@@ -1466,7 +1466,8 @@ static int qcom_scm_probe(struct platfor
+ static void qcom_scm_shutdown(struct platform_device *pdev)
+ {
+ /* Clean shutdown, disable download mode to allow normal restart */
+- qcom_scm_set_download_mode(false);
++ if (download_mode)
++ qcom_scm_set_download_mode(false);
+ }
+
+ static const struct of_device_id qcom_scm_dt_match[] = {
--- /dev/null
+From: John Crispin <blogic@openwrt.org>
+Date: Fri, 3 Aug 2012 10:27:25 +0200
+Subject: [PATCH 04/36] MIPS: lantiq: add atm hack
+
+Signed-off-by: John Crispin <blogic@openwrt.org>
+--- a/include/uapi/linux/atm.h
++++ b/include/uapi/linux/atm.h
+@@ -131,8 +131,14 @@
+ #define ATM_ABR 4
+ #define ATM_ANYCLASS 5 /* compatible with everything */
+
++#define ATM_VBR_NRT ATM_VBR
++#define ATM_VBR_RT 6
++#define ATM_UBR_PLUS 7
++#define ATM_GFR 8
++
+ #define ATM_MAX_PCR -1 /* maximum available PCR */
+
++
+ struct atm_trafprm {
+ unsigned char traffic_class; /* traffic class (ATM_UBR, ...) */
+ int max_pcr; /* maximum PCR in cells per second */
+@@ -155,6 +161,9 @@ struct atm_trafprm {
+ unsigned int adtf :10; /* ACR Decrease Time Factor (10-bit) */
+ unsigned int cdf :3; /* Cutoff Decrease Factor (3-bit) */
+ unsigned int spare :9; /* spare bits */
++ int scr; /* sustained rate in cells per second */
++ int mbs; /* maximum burst size (MBS) in cells */
++ int cdv; /* Cell delay variation */
+ };
+
+ struct atm_qos {
+--- a/net/atm/proc.c
++++ b/net/atm/proc.c
+@@ -141,7 +141,7 @@ static void *vcc_seq_next(struct seq_fil
+ static void pvc_info(struct seq_file *seq, struct atm_vcc *vcc)
+ {
+ static const char *const class_name[] = {
+- "off", "UBR", "CBR", "VBR", "ABR"};
++ "off","UBR","CBR","NTR-VBR","ABR","ANY","RT-VBR","UBR+","GFR"};
+ static const char *const aal_name[] = {
+ "---", "1", "2", "3/4", /* 0- 3 */
+ "???", "5", "???", "???", /* 4- 7 */
--- /dev/null
+From: Subhra Banerjee <subhrax.banerjee@intel.com>
+Date: Fri, 31 Aug 2018 12:01:19 +0530
+Subject: [PATCH] UGW_SW-29163: ATM oam support
+
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -2953,6 +2953,22 @@ char *ppp_dev_name(struct ppp_channel *c
+ return name;
+ }
+
++/*
++ * Return the PPP device interface pointer
++ */
++struct net_device *ppp_device(struct ppp_channel *chan)
++{
++ struct channel *pch = chan->ppp;
++ struct net_device *dev = NULL;
++
++ if (pch) {
++ read_lock_bh(&pch->upl);
++ if (pch->ppp && pch->ppp->dev)
++ dev = pch->ppp->dev;
++ read_unlock_bh(&pch->upl);
++ }
++ return dev;
++}
+
+ /*
+ * Disconnect a channel from the generic layer.
+@@ -3599,6 +3615,7 @@ EXPORT_SYMBOL(ppp_unregister_channel);
+ EXPORT_SYMBOL(ppp_channel_index);
+ EXPORT_SYMBOL(ppp_unit_number);
+ EXPORT_SYMBOL(ppp_dev_name);
++EXPORT_SYMBOL(ppp_device);
+ EXPORT_SYMBOL(ppp_input);
+ EXPORT_SYMBOL(ppp_input_error);
+ EXPORT_SYMBOL(ppp_output_wakeup);
+--- a/include/linux/ppp_channel.h
++++ b/include/linux/ppp_channel.h
+@@ -76,6 +76,9 @@ extern int ppp_unit_number(struct ppp_ch
+ /* Get the device name associated with a channel, or NULL if none */
+ extern char *ppp_dev_name(struct ppp_channel *);
+
++/* Get the device pointer associated with a channel, or NULL if none */
++extern struct net_device *ppp_device(struct ppp_channel *);
++
+ /*
+ * SMP locking notes:
+ * The channel code must ensure that when it calls ppp_unregister_channel,
+--- a/net/atm/Kconfig
++++ b/net/atm/Kconfig
+@@ -56,6 +56,12 @@ config ATM_MPOA
+ subnetwork boundaries. These shortcut connections bypass routers
+ enhancing overall network performance.
+
++config ATM_MPOA_INTEL_DSL_PHY_SUPPORT
++ bool "Intel DSL Phy MPOA support"
++ depends on ATM && INET && ATM_MPOA!=n
++ help
++ Add support for Intel DSL Phy ATM MPOA
++
+ config ATM_BR2684
+ tristate "RFC1483/2684 Bridged protocols"
+ depends on ATM && INET
+--- a/net/atm/br2684.c
++++ b/net/atm/br2684.c
+@@ -598,6 +598,11 @@ static int br2684_regvcc(struct atm_vcc
+ atmvcc->push = br2684_push;
+ atmvcc->pop = br2684_pop;
+ atmvcc->release_cb = br2684_release_cb;
++#if IS_ENABLED(CONFIG_ATM_MPOA_INTEL_DSL_PHY_SUPPORT)
++ if (atm_hook_mpoa_setup) /* IPoA or EoA w/o FCS */
++ atm_hook_mpoa_setup(atmvcc, brdev->payload == p_routed ? 3 : 0,
++ brvcc->encaps == BR2684_ENCAPS_LLC ? 1 : 0, net_dev);
++#endif
+ atmvcc->owner = THIS_MODULE;
+
+ /* initialize netdev carrier state */
+--- a/net/atm/common.c
++++ b/net/atm/common.c
+@@ -137,6 +137,11 @@ static struct proto vcc_proto = {
+ .release_cb = vcc_release_cb,
+ };
+
++#if IS_ENABLED(CONFIG_ATM_MPOA_INTEL_DSL_PHY_SUPPORT)
++void (*atm_hook_mpoa_setup)(struct atm_vcc *, int, int, struct net_device *) = NULL;
++EXPORT_SYMBOL(atm_hook_mpoa_setup);
++#endif
++
+ int vcc_create(struct net *net, struct socket *sock, int protocol, int family, int kern)
+ {
+ struct sock *sk;
+--- a/net/atm/common.h
++++ b/net/atm/common.h
+@@ -53,4 +53,6 @@ int svc_change_qos(struct atm_vcc *vcc,s
+
+ void atm_dev_release_vccs(struct atm_dev *dev);
+
++extern void (*atm_hook_mpoa_setup)(struct atm_vcc *, int, int, struct net_device *);
++
+ #endif
+--- a/net/atm/mpc.c
++++ b/net/atm/mpc.c
+@@ -31,6 +31,7 @@
+ /* Modular too */
+ #include <linux/module.h>
+
++#include "common.h"
+ #include "lec.h"
+ #include "mpc.h"
+ #include "resources.h"
+@@ -645,6 +646,10 @@ static int atm_mpoa_vcc_attach(struct at
+ vcc->proto_data = mpc->dev;
+ vcc->push = mpc_push;
+
++#if IS_ENABLED(CONFIG_ATM_MPOA_INTEL_DSL_PHY_SUPPORT)
++ if (atm_hook_mpoa_setup) /* IPoA, LLC */
++ atm_hook_mpoa_setup(vcc, 3, 1, mpc->dev);
++#endif
+ return 0;
+ }
+
+--- a/net/atm/pppoatm.c
++++ b/net/atm/pppoatm.c
+@@ -422,6 +422,12 @@ static int pppoatm_assign_vcc(struct atm
+ atmvcc->user_back = pvcc;
+ atmvcc->push = pppoatm_push;
+ atmvcc->pop = pppoatm_pop;
++#if IS_ENABLED(CONFIG_ATM_MPOA_INTEL_DSL_PHY_SUPPORT)
++ if (atm_hook_mpoa_setup) /* PPPoA */
++ atm_hook_mpoa_setup(atmvcc, 2,
++ pvcc->encaps == e_llc ? 1 : 0,
++ ppp_device(&pvcc->chan));
++#endif
+ atmvcc->release_cb = pppoatm_release_cb;
+ __module_get(THIS_MODULE);
+ atmvcc->owner = THIS_MODULE;