qualcommbe: ipq95xx: Add initial support for new target
authorChristian Marangi <ansuelsmth@gmail.com>
Wed, 4 Dec 2024 00:53:47 +0000 (01:53 +0100)
committerChristian Marangi <ansuelsmth@gmail.com>
Wed, 4 Dec 2024 01:02:18 +0000 (02:02 +0100)
Add initial support for new target with the initial patch for ethernet
support using pending upstream patches for PCS UNIPHY, PPE and EDMA.

Only initramfs currently working as support for new SPI/NAND
implementation, USB, CPUFreq and other devices is still unfinished and
needs to be evaluated.

Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
90 files changed:
target/linux/qualcommbe/Makefile [new file with mode: 0644]
target/linux/qualcommbe/config-6.6 [new file with mode: 0644]
target/linux/qualcommbe/image/Makefile [new file with mode: 0644]
target/linux/qualcommbe/image/ipq95xx.mk [new file with mode: 0644]
target/linux/qualcommbe/ipq95xx/base-files/etc/board.d/02_network [new file with mode: 0644]
target/linux/qualcommbe/ipq95xx/base-files/lib/upgrade/platform.sh [new file with mode: 0644]
target/linux/qualcommbe/ipq95xx/config-default [new file with mode: 0644]
target/linux/qualcommbe/ipq95xx/target.mk [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/001-v6.8-arm64-dts-qcom-ipq9574-Add-common-RDP-dtsi-file.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/002-01-v6.11-dt-bindings-clock-add-qca8386-qca8084-clock-and-rese.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/002-02-v6.11-clk-qcom-add-clock-controller-driver-for-qca8386-qca.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/003-v6.11-arm64-dts-qcom-ipq9574-add-MDIO-bus.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/004-01-v.610-clk-qcom-clk-rcg-introduce-support-for-multiple-conf.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/004-02-v6.10-clk-qcom-clk-rcg2-add-support-for-rcg2-freq-multi-op.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/005-v6.11-clk-qcom-branch-Add-clk_branch2_prepare_ops.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/006-v6.11-clk-qcom-common-commonize-qcom_cc_really_probe.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/007-v6.11-net-phy-introduce-core-support-for-phy-mode-10g-qxgm.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/008-v6.9-clk-Provide-managed-helper-to-get-and-enable-bulk-cl.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/009-v6.13-clk-Provide-devm_clk_bulk_get_all_enabled-helper.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/010-01-v6.11-dt-bindings-clock-Add-PCIe-pipe-related-clocks-for-I.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/010-02-v6.11-clk-qcom-gcc-ipq9574-Add-PCIe-pipe-clocks.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/011-v6.11-arm64-dts-qcom-ipq9574-drop-power-domain-cells-prope.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/012-01-v6.11-interconnect-icc-clk-Specify-master-slave-ids.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/012-02-v6.11-dt-bindings-interconnect-Add-Qualcomm-IPQ9574-suppor.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/012-03-v6.11-interconnect-icc-clk-Add-devm_icc_clk_register.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/012-04-v6.11-clk-qcom-common-Add-interconnect-clocks-support.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/012-05-v6.11-clk-qcom-ipq9574-Use-icc-clk-for-enabling-NoC-relate.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/012-06-v6.11-arm64-dts-qcom-ipq9574-Add-icc-provider-ability-to-g.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/100-02-mtd-rawnand-qcom-cleanup-qcom_nandc-driver.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/100-03-mtd-rawnand-qcom-Add-qcom-prefix-to-common-api.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/100-04-mtd-nand-Add-qpic_common-API-file.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/100-05-mtd-rawnand-qcom-use-FIELD_PREP-and-GENMASK.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/100-06-spi-spi-qpic-add-driver-for-QCOM-SPI-NAND-flash-Inte.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/101-arm64-dts-qcom-ipq9574-Add-SPI-nand-support.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/102-arm64-dts-qcom-ipq9574-Disable-eMMC-node.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-01-dt-bindings-net-Document-Qualcomm-QCA8084-PHY-packag.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-02-net-phy-qca808x-Add-QCA8084-ethernet-phy-support.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-03-net-phy-qca808x-Add-config_init-function-for-QCA8084.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-04-net-phy-qca808x-Add-link_change_notify-function-for-.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-05-net-phy-qca808x-Add-register-access-support-routines.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-06-net-phy-qca808x-Add-QCA8084-probe-function.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-07-net-phy-qca808x-Add-package-clocks-and-resets-for-QC.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-08-net-phy-qca808x-Add-QCA8084-package-init-function.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-11-net-pcs-Add-driver-for-Qualcomm-IPQ-UNIPHY-PCS.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-12-net-pcs-Add-10GBASER-interface-mode-support-to-IPQ-U.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-13-net-pcs-Add-2500BASEX-interface-mode-support-to-IPQ-.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-14-net-pcs-Add-1000BASEX-interface-mode-support-to-IPQ-.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-15-net-pcs-Add-10G_QXGMII-interface-mode-support-to-IPQ.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-18-net-ethernet-qualcomm-Add-PPE-driver-for-IPQ9574-SoC.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-19-net-ethernet-qualcomm-Add-PPE-buffer-manager-configu.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-20-net-ethernet-qualcomm-Add-PPE-queue-management-confi.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-21-net-ethernet-qualcomm-Add-PPE-scheduler-config.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-22-net-ethernet-qualcomm-Initialize-PPE-queue-settings.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-23-net-ethernet-qualcomm-Add-PPE-service-code-config.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-24-net-ethernet-qualcomm-Add-PPE-port-control-config.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-25-net-ethernet-qualcomm-Add-PPE-RSS-hash-config.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-26-net-ethernet-qualcomm-Add-PPE-queue-map-function.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-27-net-ethernet-qualcomm-Add-PPE-L2-bridge-initializati.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-28-net-ethernet-qualcomm-Add-PPE-debugfs-support.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-32-net-ethernet-qualcomm-Add-phylink-support-for-PPE-MA.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-33-net-ethernet-qualcomm-Add-PPE-port-MAC-MIB-statistic.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-34-net-ethernet-qualcomm-Add-PPE-port-MAC-address-and-E.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-35-net-ethernet-qualcomm-Add-API-to-configure-PPE-port-.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-38-net-ethernet-qualcomm-Add-EDMA-support-for-QCOM-IPQ9.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-39-net-ethernet-qualcomm-Add-netdevice-support-for-QCOM.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-40-net-ethernet-qualcomm-Add-Rx-Ethernet-DMA-support.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-41-net-ethernet-qualcomm-Add-Tx-Ethernet-DMA-support.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-42-net-ethernet-qualcomm-Add-miscellaneous-error-interr.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-43-net-ethernet-qualcomm-Add-ethtool-support-for-EDMA.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-44-net-ethernet-qualcomm-Add-module-parameters-for-driv.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/103-45-net-ethernet-qualcomm-Add-sysctl-for-RPS-bitmap.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/104-01-dt-bindings-clock-qcom-Add-CMN-PLL-clock-controller-.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/104-02-clk-qcom-Add-CMN-PLL-clock-controller-driver-for-IPQ.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/104-04-arm64-dts-qcom-Add-CMN-PLL-node-for-IPQ9574-SoC.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/104-05-arm64-dts-qcom-Update-IPQ9574-xo_board_clk-to-use-fi.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/105-01-clk-qcom-clk-alpha-pll-Add-NSS-HUAYRA-ALPHA-PLL-supp.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/105-02-dt-bindings-clock-gcc-ipq9574-Add-definition-for-GPL.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/105-03-clk-qcom-gcc-ipq9574-Add-support-for-gpll0_out_aux-c.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/105-04-dt-bindings-clock-Add-ipq9574-NSSCC-clock-and-reset-.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/105-05-clk-qcom-Add-NSS-clock-Controller-driver-for-IPQ9574.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/105-06-arm64-dts-qcom-ipq9574-Add-nsscc-node.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/200-01-arm64-dts-qcom-ipq9574-Add-PCS-UNIPHY-device-tree-su.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/200-02-arm64-dts-qcom-Add-IPQ9574-MDIO-device-node.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/200-03-arm64-dts-qcom-Add-IPQ9574-PPE-base-device-node.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/200-04-arm64-dts-qcom-Add-EDMA-node-for-IPQ9574.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/200-05-arm64-dts-qcom-Add-IPQ9574-RDP433-port-node.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/300-dt-bindings-clock-Add-clock-ID-for-IPQ-PCS-UNIPHY.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/301-net-ethernet-qualcomm-Add-support-for-label-property.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/302-arm64-dts-qcom-Add-missing-clock-for-nsscc-from-pcs-.patch [new file with mode: 0644]
target/linux/qualcommbe/patches-6.6/900-arm64-dts-qcom-Add-label-to-EDMA-port-for-IPQ9574-RD.patch [new file with mode: 0644]

diff --git a/target/linux/qualcommbe/Makefile b/target/linux/qualcommbe/Makefile
new file mode 100644 (file)
index 0000000..54d0873
--- /dev/null
@@ -0,0 +1,20 @@
+include $(TOPDIR)/rules.mk
+
+ARCH:=aarch64
+BOARD:=qualcommbe
+BOARDNAME:=Qualcomm Atheros 802.11be WiSoC-s
+FEATURES:=squashfs ramdisk fpu nand rtc emmc
+KERNELNAME:=Image
+CPU_TYPE:=cortex-a53
+SUBTARGETS:=ipq95xx
+
+KERNEL_PATCHVER:=6.6
+
+include $(INCLUDE_DIR)/target.mk
+DEFAULT_PACKAGES += \
+       kmod-leds-gpio kmod-gpio-button-hotplug \
+       kmod-qca-nss-dp \
+       wpad-basic-mbedtls uboot-envtools \
+       e2fsprogs kmod-fs-ext4 losetup
+
+$(eval $(call BuildTarget))
diff --git a/target/linux/qualcommbe/config-6.6 b/target/linux/qualcommbe/config-6.6
new file mode 100644 (file)
index 0000000..ac50596
--- /dev/null
@@ -0,0 +1,579 @@
+CONFIG_64BIT=y
+CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS=y
+CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE=y
+CONFIG_ARCH_DEFAULT_KEXEC_IMAGE_VERIFY_SIG=y
+CONFIG_ARCH_DMA_ADDR_T_64BIT=y
+CONFIG_ARCH_FORCE_MAX_ORDER=10
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_ARCH_KEEP_MEMBLOCK=y
+CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE=y
+CONFIG_ARCH_MMAP_RND_BITS=18
+CONFIG_ARCH_MMAP_RND_BITS_MAX=24
+CONFIG_ARCH_MMAP_RND_BITS_MIN=18
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11
+CONFIG_ARCH_PROC_KCORE_TEXT=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_ARCH_STACKWALK=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_ARCH_WANTS_NO_INSTR=y
+CONFIG_ARCH_WANTS_THP_SWAP=y
+CONFIG_ARM64=y
+CONFIG_ARM64_4K_PAGES=y
+CONFIG_ARM64_ERRATUM_1165522=y
+CONFIG_ARM64_ERRATUM_1286807=y
+CONFIG_ARM64_ERRATUM_2051678=y
+CONFIG_ARM64_ERRATUM_2054223=y
+CONFIG_ARM64_ERRATUM_2067961=y
+CONFIG_ARM64_ERRATUM_2077057=y
+CONFIG_ARM64_ERRATUM_2658417=y
+CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419=y
+CONFIG_ARM64_PAGE_SHIFT=12
+CONFIG_ARM64_PA_BITS=48
+CONFIG_ARM64_PA_BITS_48=y
+CONFIG_ARM64_PTR_AUTH=y
+CONFIG_ARM64_PTR_AUTH_KERNEL=y
+CONFIG_ARM64_SME=y
+CONFIG_ARM64_SVE=y
+CONFIG_ARM64_TAGGED_ADDR_ABI=y
+CONFIG_ARM64_VA_BITS=39
+CONFIG_ARM64_VA_BITS_39=y
+CONFIG_ARM64_WORKAROUND_REPEAT_TLBI=y
+CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT=y
+CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE=y
+CONFIG_ARM_AMBA=y
+CONFIG_ARM_ARCH_TIMER=y
+CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y
+CONFIG_ARM_GIC=y
+CONFIG_ARM_GIC_V2M=y
+CONFIG_ARM_GIC_V3=y
+CONFIG_ARM_GIC_V3_ITS=y
+CONFIG_ARM_GIC_V3_ITS_PCI=y
+# CONFIG_ARM_MHU_V2 is not set
+CONFIG_ARM_PSCI_CPUIDLE=y
+CONFIG_ARM_PSCI_FW=y
+# CONFIG_ARM_QCOM_CPUFREQ_HW is not set
+CONFIG_ARM_QCOM_CPUFREQ_NVMEM=y
+CONFIG_AT803X_PHY=y
+CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_MQ_PCI=y
+CONFIG_BLK_MQ_VIRTIO=y
+CONFIG_BLK_PM=y
+CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC=y
+CONFIG_CAVIUM_TX2_ERRATUM_219=y
+CONFIG_CC_HAVE_SHADOW_CALL_STACK=y
+CONFIG_CC_HAVE_STACKPROTECTOR_SYSREG=y
+CONFIG_CLONE_BACKWARDS=y
+CONFIG_COMMON_CLK=y
+CONFIG_COMMON_CLK_QCOM=y
+CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1
+# CONFIG_COMPAT_32BIT_TIME is not set
+CONFIG_CONTEXT_TRACKING=y
+CONFIG_CONTEXT_TRACKING_IDLE=y
+CONFIG_COREDUMP=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_CPUFREQ_DT_PLATDEV=y
+CONFIG_CPU_FREQ=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL=y
+CONFIG_CPU_FREQ_GOV_ATTR_SET=y
+# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
+# CONFIG_CPU_FREQ_GOV_ONDEMAND is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+# CONFIG_CPU_FREQ_GOV_USERSPACE is not set
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_THERMAL=y
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_IDLE_GOV_MENU=y
+CONFIG_CPU_IDLE_MULTIPLE_DRIVERS=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_PM=y
+CONFIG_CPU_RMAP=y
+CONFIG_CPU_THERMAL=y
+CONFIG_CRC16=y
+CONFIG_CRC8=y
+CONFIG_CRYPTO_AUTHENC=y
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRYPTO_DEV_QCE=y
+CONFIG_CRYPTO_DEV_QCE_AEAD=y
+# CONFIG_CRYPTO_DEV_QCE_ENABLE_AEAD is not set
+CONFIG_CRYPTO_DEV_QCE_ENABLE_ALL=y
+# CONFIG_CRYPTO_DEV_QCE_ENABLE_SHA is not set
+# CONFIG_CRYPTO_DEV_QCE_ENABLE_SKCIPHER is not set
+CONFIG_CRYPTO_DEV_QCE_SHA=y
+CONFIG_CRYPTO_DEV_QCE_SKCIPHER=y
+CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN=512
+CONFIG_CRYPTO_DEV_QCOM_RNG=y
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_HASH_INFO=y
+CONFIG_CRYPTO_HW=y
+CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y
+CONFIG_CRYPTO_LIB_DES=y
+CONFIG_CRYPTO_LIB_GF128MUL=y
+CONFIG_CRYPTO_LIB_SHA1=y
+CONFIG_CRYPTO_LIB_SHA256=y
+CONFIG_CRYPTO_LIB_UTILS=y
+CONFIG_CRYPTO_LZO=y
+CONFIG_CRYPTO_RNG=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_XTS=y
+CONFIG_CRYPTO_ZSTD=y
+CONFIG_DCACHE_WORD_ACCESS=y
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEV_COREDUMP=y
+CONFIG_DMADEVICES=y
+CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC=y
+CONFIG_DMA_DIRECT_REMAP=y
+CONFIG_DMA_ENGINE=y
+CONFIG_DMA_OF=y
+CONFIG_DMA_VIRTUAL_CHANNELS=y
+CONFIG_DTC=y
+CONFIG_DT_IDLE_STATES=y
+CONFIG_EDAC_SUPPORT=y
+CONFIG_EXCLUSIVE_SYSTEM_RAM=y
+CONFIG_FIXED_PHY=y
+CONFIG_FIX_EARLYCON_MEM=y
+CONFIG_FRAME_POINTER=y
+CONFIG_FS_IOMAP=y
+CONFIG_FUJITSU_ERRATUM_010001=y
+CONFIG_FUNCTION_ALIGNMENT=4
+CONFIG_FUNCTION_ALIGNMENT_4B=y
+CONFIG_FWNODE_MDIO=y
+CONFIG_FW_LOADER_PAGED_BUF=y
+CONFIG_FW_LOADER_SYSFS=y
+CONFIG_GCC_ASM_GOTO_OUTPUT_WORKAROUND=y
+CONFIG_GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS=y
+CONFIG_GENERIC_ALLOCATOR=y
+CONFIG_GENERIC_ARCH_TOPOLOGY=y
+CONFIG_GENERIC_BUG=y
+CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+CONFIG_GENERIC_CPU_AUTOPROBE=y
+CONFIG_GENERIC_CPU_VULNERABILITIES=y
+CONFIG_GENERIC_CSUM=y
+CONFIG_GENERIC_EARLY_IOREMAP=y
+CONFIG_GENERIC_GETTIMEOFDAY=y
+CONFIG_GENERIC_IDLE_POLL_SETUP=y
+CONFIG_GENERIC_IOREMAP=y
+CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y
+CONFIG_GENERIC_IRQ_SHOW=y
+CONFIG_GENERIC_IRQ_SHOW_LEVEL=y
+CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y
+CONFIG_GENERIC_MSI_IRQ=y
+CONFIG_GENERIC_PCI_IOMAP=y
+CONFIG_GENERIC_PHY=y
+CONFIG_GENERIC_PINCONF=y
+CONFIG_GENERIC_PINCTRL_GROUPS=y
+CONFIG_GENERIC_PINMUX_FUNCTIONS=y
+CONFIG_GENERIC_SCHED_CLOCK=y
+CONFIG_GENERIC_SMP_IDLE_THREAD=y
+CONFIG_GENERIC_STRNCPY_FROM_USER=y
+CONFIG_GENERIC_STRNLEN_USER=y
+CONFIG_GENERIC_TIME_VSYSCALL=y
+CONFIG_GLOB=y
+CONFIG_GPIOLIB_IRQCHIP=y
+CONFIG_GPIO_CDEV=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_HAS_DMA=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_IOPORT_MAP=y
+CONFIG_HWSPINLOCK=y
+CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_HELPER_AUTO=y
+# CONFIG_I2C_QCOM_CCI is not set
+CONFIG_I2C_QUP=y
+CONFIG_IIO=y
+CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_IPQ_APSS_6018=y
+CONFIG_IPQ_APSS_PLL=y
+# CONFIG_IPQ_GCC_4019 is not set
+# CONFIG_IPQ_GCC_5018 is not set
+# CONFIG_IPQ_GCC_5332 is not set
+# CONFIG_IPQ_GCC_6018 is not set
+# CONFIG_IPQ_GCC_8074 is not set
+# CONFIG_IPQ_GCC_9574 is not set
+CONFIG_IRQCHIP=y
+CONFIG_IRQ_DOMAIN=y
+CONFIG_IRQ_DOMAIN_HIERARCHY=y
+CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS=y
+CONFIG_IRQ_FORCED_THREADING=y
+CONFIG_IRQ_WORK=y
+# CONFIG_KPSS_XCC is not set
+CONFIG_LEDS_TLC591XX=y
+CONFIG_LIBFDT=y
+CONFIG_LOCK_DEBUGGING_SUPPORT=y
+CONFIG_LOCK_SPIN_ON_OWNER=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_MAILBOX=y
+# CONFIG_MAILBOX_TEST is not set
+CONFIG_MDIO_BUS=y
+CONFIG_MDIO_DEVICE=y
+CONFIG_MDIO_DEVRES=y
+CONFIG_MDIO_IPQ4019=y
+# CONFIG_MFD_QCOM_RPM is not set
+CONFIG_MFD_SYSCON=y
+CONFIG_MIGRATION=y
+# CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY is not set
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_CQHCI=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_IO_ACCESSORS=y
+CONFIG_MMC_SDHCI_MSM=y
+# CONFIG_MMC_SDHCI_PCI is not set
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMU_LAZY_TLB_REFCOUNT=y
+CONFIG_MODULES_USE_ELF_RELA=y
+# CONFIG_MSM_GCC_8916 is not set
+# CONFIG_MSM_GCC_8917 is not set
+# CONFIG_MSM_GCC_8939 is not set
+# CONFIG_MSM_GCC_8976 is not set
+# CONFIG_MSM_GCC_8994 is not set
+# CONFIG_MSM_GCC_8996 is not set
+# CONFIG_MSM_GCC_8998 is not set
+# CONFIG_MSM_GPUCC_8998 is not set
+# CONFIG_MSM_MMCC_8996 is not set
+# CONFIG_MSM_MMCC_8998 is not set
+CONFIG_MTD_NAND_CORE=y
+CONFIG_MTD_NAND_ECC=y
+CONFIG_MTD_NAND_ECC_SW_HAMMING=y
+CONFIG_MTD_NAND_QCOM=y
+CONFIG_MTD_QCOMSMEM_PARTS=y
+CONFIG_MTD_RAW_NAND=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_BEB_LIMIT=20
+CONFIG_MTD_UBI_BLOCK=y
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MUTEX_SPIN_ON_OWNER=y
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_NEED_SG_DMA_LENGTH=y
+CONFIG_NET_EGRESS=y
+CONFIG_NET_FLOW_LIMIT=y
+CONFIG_NET_INGRESS=y
+CONFIG_NET_SELFTESTS=y
+CONFIG_NET_SWITCHDEV=y
+CONFIG_NET_XGRESS=y
+CONFIG_NLS=y
+CONFIG_NO_HZ_COMMON=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NR_CPUS=4
+CONFIG_NVIDIA_CARMEL_CNP_ERRATUM=y
+CONFIG_NVMEM=y
+CONFIG_NVMEM_LAYOUTS=y
+CONFIG_NVMEM_QCOM_QFPROM=y
+# CONFIG_NVMEM_QCOM_SEC_QFPROM is not set
+CONFIG_NVMEM_SYSFS=y
+CONFIG_NVMEM_U_BOOT_ENV=y
+CONFIG_OF=y
+CONFIG_OF_ADDRESS=y
+CONFIG_OF_EARLY_FLATTREE=y
+CONFIG_OF_FLATTREE=y
+CONFIG_OF_GPIO=y
+CONFIG_OF_IRQ=y
+CONFIG_OF_KOBJ=y
+CONFIG_OF_MDIO=y
+CONFIG_PADATA=y
+CONFIG_PAGE_POOL=y
+CONFIG_PAGE_SIZE_LESS_THAN_256KB=y
+CONFIG_PAGE_SIZE_LESS_THAN_64KB=y
+CONFIG_PAHOLE_HAS_LANG_EXCLUDE=y
+CONFIG_PARTITION_PERCPU=y
+CONFIG_PCI=y
+CONFIG_PCIEAER=y
+CONFIG_PCIEASPM=y
+CONFIG_PCIEASPM_DEFAULT=y
+# CONFIG_PCIEASPM_PERFORMANCE is not set
+# CONFIG_PCIEASPM_POWERSAVE is not set
+# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCIE_DW=y
+CONFIG_PCIE_DW_HOST=y
+CONFIG_PCIE_PME=y
+CONFIG_PCIE_QCOM=y
+CONFIG_PCI_DOMAINS=y
+CONFIG_PCI_DOMAINS_GENERIC=y
+CONFIG_PCI_MSI=y
+CONFIG_PER_VMA_LOCK=y
+CONFIG_PGTABLE_LEVELS=3
+CONFIG_PHYLIB=y
+CONFIG_PHYLIB_LEDS=y
+CONFIG_PHYS_ADDR_T_64BIT=y
+# CONFIG_PHY_QCOM_APQ8064_SATA is not set
+# CONFIG_PHY_QCOM_EDP is not set
+# CONFIG_PHY_QCOM_EUSB2_REPEATER is not set
+# CONFIG_PHY_QCOM_IPQ4019_USB is not set
+# CONFIG_PHY_QCOM_IPQ806X_SATA is not set
+# CONFIG_PHY_QCOM_IPQ806X_USB is not set
+# CONFIG_PHY_QCOM_M31_USB is not set
+# CONFIG_PHY_QCOM_PCIE2 is not set
+CONFIG_PHY_QCOM_QMP=y
+CONFIG_PHY_QCOM_QMP_COMBO=y
+CONFIG_PHY_QCOM_QMP_PCIE=y
+CONFIG_PHY_QCOM_QMP_PCIE_8996=y
+CONFIG_PHY_QCOM_QMP_UFS=y
+CONFIG_PHY_QCOM_QMP_USB=y
+# CONFIG_PHY_QCOM_QMP_USB_LEGACY is not set
+CONFIG_PHY_QCOM_QUSB2=y
+# CONFIG_PHY_QCOM_SGMII_ETH is not set
+# CONFIG_PHY_QCOM_SNPS_EUSB2 is not set
+# CONFIG_PHY_QCOM_USB_HS_28NM is not set
+# CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2 is not set
+# CONFIG_PHY_QCOM_USB_SS is not set
+CONFIG_PINCTRL=y
+# CONFIG_PINCTRL_IPQ5018 is not set
+# CONFIG_PINCTRL_IPQ5332 is not set
+# CONFIG_PINCTRL_IPQ6018 is not set
+# CONFIG_PINCTRL_IPQ8074 is not set
+# CONFIG_PINCTRL_IPQ9574 is not set
+CONFIG_PINCTRL_MSM=y
+# CONFIG_PINCTRL_MSM8916 is not set
+# CONFIG_PINCTRL_MSM8976 is not set
+# CONFIG_PINCTRL_MSM8994 is not set
+# CONFIG_PINCTRL_MSM8996 is not set
+# CONFIG_PINCTRL_MSM8998 is not set
+# CONFIG_PINCTRL_QCM2290 is not set
+# CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set
+# CONFIG_PINCTRL_QCS404 is not set
+# CONFIG_PINCTRL_QDU1000 is not set
+# CONFIG_PINCTRL_SA8775P is not set
+# CONFIG_PINCTRL_SC7180 is not set
+# CONFIG_PINCTRL_SC8280XP is not set
+# CONFIG_PINCTRL_SDM660 is not set
+# CONFIG_PINCTRL_SDM670 is not set
+# CONFIG_PINCTRL_SDM845 is not set
+# CONFIG_PINCTRL_SDX75 is not set
+# CONFIG_PINCTRL_SM6350 is not set
+# CONFIG_PINCTRL_SM6375 is not set
+# CONFIG_PINCTRL_SM7150 is not set
+# CONFIG_PINCTRL_SM8150 is not set
+# CONFIG_PINCTRL_SM8250 is not set
+# CONFIG_PINCTRL_SM8450 is not set
+# CONFIG_PINCTRL_SM8550 is not set
+CONFIG_PM=y
+CONFIG_PM_CLK=y
+CONFIG_PM_OPP=y
+CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y
+CONFIG_POWER_RESET=y
+# CONFIG_POWER_RESET_MSM is not set
+CONFIG_POWER_SUPPLY=y
+CONFIG_PREEMPT_NONE_BUILD=y
+CONFIG_PRINTK_TIME=y
+CONFIG_PTP_1588_CLOCK_OPTIONAL=y
+CONFIG_QCA807X_PHY=y
+CONFIG_QCA808X_PHY=y
+# CONFIG_QCM_DISPCC_2290 is not set
+# CONFIG_QCM_GCC_2290 is not set
+# CONFIG_QCOM_A53PLL is not set
+# CONFIG_QCOM_AOSS_QMP is not set
+CONFIG_QCOM_APCS_IPC=y
+# CONFIG_QCOM_APM is not set
+# CONFIG_QCOM_APR is not set
+CONFIG_QCOM_BAM_DMA=y
+# CONFIG_QCOM_CLK_APCC_MSM8996 is not set
+# CONFIG_QCOM_CLK_APCS_MSM8916 is not set
+# CONFIG_QCOM_COMMAND_DB is not set
+# CONFIG_QCOM_CPR is not set
+# CONFIG_QCOM_EBI2 is not set
+# CONFIG_QCOM_FASTRPC is not set
+# CONFIG_QCOM_GENI_SE is not set
+# CONFIG_QCOM_GSBI is not set
+# CONFIG_QCOM_HFPLL is not set
+# CONFIG_QCOM_ICC_BWMON is not set
+# CONFIG_QCOM_IPCC is not set
+# CONFIG_QCOM_LLCC is not set
+CONFIG_QCOM_MDT_LOADER=y
+# CONFIG_QCOM_MPM is not set
+CONFIG_QCOM_NET_PHYLIB=y
+# CONFIG_QCOM_OCMEM is not set
+# CONFIG_QCOM_PDC is not set
+CONFIG_QCOM_PIL_INFO=y
+# CONFIG_QCOM_Q6V5_ADSP is not set
+CONFIG_QCOM_Q6V5_COMMON=y
+# CONFIG_QCOM_Q6V5_MSS is not set
+# CONFIG_QCOM_Q6V5_PAS is not set
+CONFIG_QCOM_Q6V5_WCSS=y
+# CONFIG_QCOM_RAMP_CTRL is not set
+# CONFIG_QCOM_RMTFS_MEM is not set
+# CONFIG_QCOM_RPMH is not set
+# CONFIG_QCOM_RPM_MASTER_STATS is not set
+CONFIG_QCOM_RPROC_COMMON=y
+CONFIG_QCOM_SCM=y
+# CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT is not set
+# CONFIG_QCOM_SMD_RPM is not set
+CONFIG_QCOM_SMEM=y
+CONFIG_QCOM_SMEM_STATE=y
+CONFIG_QCOM_SMP2P=y
+# CONFIG_QCOM_SMSM is not set
+CONFIG_QCOM_SOCINFO=y
+# CONFIG_QCOM_SPM is not set
+# CONFIG_QCOM_STATS is not set
+# CONFIG_QCOM_SYSMON is not set
+CONFIG_QCOM_TSENS=y
+# CONFIG_QCOM_WCNSS_CTRL is not set
+# CONFIG_QCOM_WCNSS_PIL is not set
+CONFIG_QCOM_WDT=y
+# CONFIG_QCS_GCC_404 is not set
+# CONFIG_QCS_Q6SSTOP_404 is not set
+# CONFIG_QCS_TURING_404 is not set
+# CONFIG_QDU_GCC_1000 is not set
+CONFIG_QUEUED_RWLOCKS=y
+CONFIG_QUEUED_SPINLOCKS=y
+CONFIG_RANDSTRUCT_NONE=y
+CONFIG_RAS=y
+CONFIG_RATIONAL=y
+CONFIG_REGMAP=y
+CONFIG_REGMAP_I2C=y
+CONFIG_REGMAP_MMIO=y
+CONFIG_REGULATOR=y
+# CONFIG_REGULATOR_CPR3 is not set
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+# CONFIG_REGULATOR_QCOM_REFGEN is not set
+# CONFIG_REGULATOR_VQMMC_IPQ4019 is not set
+CONFIG_RELOCATABLE=y
+CONFIG_REMOTEPROC=y
+CONFIG_REMOTEPROC_CDEV=y
+CONFIG_RESET_CONTROLLER=y
+# CONFIG_RESET_QCOM_AOSS is not set
+# CONFIG_RESET_QCOM_PDC is not set
+CONFIG_RFS_ACCEL=y
+CONFIG_RODATA_FULL_DEFAULT_ENABLED=y
+CONFIG_RPMSG=y
+CONFIG_RPMSG_CHAR=y
+# CONFIG_RPMSG_CTRL is not set
+# CONFIG_RPMSG_NS is not set
+CONFIG_RPMSG_QCOM_GLINK=y
+CONFIG_RPMSG_QCOM_GLINK_RPM=y
+CONFIG_RPMSG_QCOM_GLINK_SMEM=y
+CONFIG_RPMSG_QCOM_SMD=y
+# CONFIG_RPMSG_TTY is not set
+CONFIG_RPS=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_I2C_AND_SPI=y
+CONFIG_RWSEM_SPIN_ON_OWNER=y
+# CONFIG_SA_GCC_8775P is not set
+# CONFIG_SA_GPUCC_8775P is not set
+# CONFIG_SCHED_CORE is not set
+CONFIG_SCHED_MC=y
+CONFIG_SCHED_SMT=y
+CONFIG_SCHED_THERMAL_PRESSURE=y
+CONFIG_SCSI=y
+CONFIG_SCSI_COMMON=y
+# CONFIG_SCSI_LOWLEVEL is not set
+# CONFIG_SCSI_PROC_FS is not set
+# CONFIG_SC_CAMCC_7280 is not set
+# CONFIG_SC_DISPCC_7180 is not set
+# CONFIG_SC_DISPCC_8280XP is not set
+# CONFIG_SC_GCC_7180 is not set
+# CONFIG_SC_GCC_8280XP is not set
+# CONFIG_SC_GPUCC_7180 is not set
+# CONFIG_SC_LPASSCC_7280 is not set
+# CONFIG_SC_LPASSCC_8280XP is not set
+# CONFIG_SC_LPASS_CORECC_7180 is not set
+# CONFIG_SC_LPASS_CORECC_7280 is not set
+# CONFIG_SC_MSS_7180 is not set
+# CONFIG_SC_VIDEOCC_7180 is not set
+# CONFIG_SDM_CAMCC_845 is not set
+# CONFIG_SDM_DISPCC_845 is not set
+# CONFIG_SDM_GCC_660 is not set
+# CONFIG_SDM_GCC_845 is not set
+# CONFIG_SDM_GPUCC_845 is not set
+# CONFIG_SDM_LPASSCC_845 is not set
+# CONFIG_SDM_VIDEOCC_845 is not set
+# CONFIG_SDX_GCC_75 is not set
+CONFIG_SERIAL_8250_FSL=y
+CONFIG_SERIAL_MCTRL_GPIO=y
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SGL_ALLOC=y
+CONFIG_SG_POOL=y
+CONFIG_SMP=y
+# CONFIG_SM_CAMCC_6350 is not set
+# CONFIG_SM_CAMCC_8450 is not set
+# CONFIG_SM_GCC_7150 is not set
+# CONFIG_SM_GCC_8150 is not set
+# CONFIG_SM_GCC_8250 is not set
+# CONFIG_SM_GCC_8450 is not set
+# CONFIG_SM_GCC_8550 is not set
+# CONFIG_SM_GPUCC_6115 is not set
+# CONFIG_SM_GPUCC_6125 is not set
+# CONFIG_SM_GPUCC_6350 is not set
+# CONFIG_SM_GPUCC_6375 is not set
+# CONFIG_SM_GPUCC_8150 is not set
+# CONFIG_SM_GPUCC_8250 is not set
+# CONFIG_SM_GPUCC_8350 is not set
+# CONFIG_SM_GPUCC_8450 is not set
+# CONFIG_SM_GPUCC_8550 is not set
+# CONFIG_SM_TCSRCC_8550 is not set
+# CONFIG_SM_VIDEOCC_8150 is not set
+# CONFIG_SM_VIDEOCC_8250 is not set
+# CONFIG_SM_VIDEOCC_8350 is not set
+# CONFIG_SM_VIDEOCC_8450 is not set
+# CONFIG_SM_VIDEOCC_8550 is not set
+CONFIG_SOCK_RX_QUEUE_MAPPING=y
+CONFIG_SOC_BUS=y
+CONFIG_SOFTIRQ_ON_OWN_STACK=y
+CONFIG_SPARSEMEM=y
+CONFIG_SPARSEMEM_EXTREME=y
+CONFIG_SPARSEMEM_VMEMMAP=y
+CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_SPI=y
+CONFIG_SPI_MASTER=y
+CONFIG_SPI_MEM=y
+CONFIG_SPI_QUP=y
+CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y
+CONFIG_SWIOTLB=y
+CONFIG_SWPHY=y
+CONFIG_SYSCTL_EXCEPTION_TRACE=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
+CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0
+CONFIG_THERMAL_GOV_STEP_WISE=y
+CONFIG_THERMAL_OF=y
+CONFIG_THREAD_INFO_IN_TASK=y
+CONFIG_TICK_CPU_ACCOUNTING=y
+CONFIG_TIMER_OF=y
+CONFIG_TIMER_PROBE=y
+CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y
+CONFIG_TREE_RCU=y
+CONFIG_TREE_SRCU=y
+CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_ADVANCED_COMPR=y
+# CONFIG_UCLAMP_TASK is not set
+CONFIG_UNMAP_KERNEL_AT_EL0=y
+CONFIG_USB=y
+CONFIG_USB_COMMON=y
+CONFIG_USB_SUPPORT=y
+CONFIG_VIRTIO=y
+CONFIG_VIRTIO_ANCHOR=y
+# CONFIG_VIRTIO_BLK is not set
+# CONFIG_VIRTIO_NET is not set
+CONFIG_VMAP_STACK=y
+CONFIG_WANT_DEV_COREDUMP=y
+CONFIG_WATCHDOG_CORE=y
+CONFIG_WATCHDOG_SYSFS=y
+CONFIG_XPS=y
+CONFIG_XXHASH=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZONE_DMA32=y
+CONFIG_ZSTD_COMMON=y
+CONFIG_ZSTD_COMPRESS=y
+CONFIG_ZSTD_DECOMPRESS=y
diff --git a/target/linux/qualcommbe/image/Makefile b/target/linux/qualcommbe/image/Makefile
new file mode 100644 (file)
index 0000000..c12fc97
--- /dev/null
@@ -0,0 +1,41 @@
+include $(TOPDIR)/rules.mk
+include $(INCLUDE_DIR)/image.mk
+
+define Device/Default
+       PROFILES := Default
+       KERNEL_LOADADDR := 0x41000000
+       DEVICE_DTS = $$(SOC)-$(lastword $(subst _, ,$(1)))
+       DEVICE_DTS_CONFIG := config@1
+       DEVICE_DTS_DIR := $(DTS_DIR)/qcom
+       IMAGES := sysupgrade.bin
+       IMAGE/sysupgrade.bin = sysupgrade-tar | append-metadata
+       IMAGE/sysupgrade.bin/squashfs :=
+endef
+
+define Device/FitImage
+       KERNEL_SUFFIX := -uImage.itb
+       KERNEL = kernel-bin | libdeflate-gzip | fit gzip $$(KDIR)/image-$$(DEVICE_DTS).dtb
+       KERNEL_NAME := Image
+endef
+
+define Device/FitImageLzma
+       KERNEL_SUFFIX := -uImage.itb
+       KERNEL = kernel-bin | lzma | fit lzma $$(KDIR)/image-$$(DEVICE_DTS).dtb
+       KERNEL_NAME := Image
+endef
+
+define Device/EmmcImage
+       IMAGES += factory.bin
+       IMAGE/factory.bin := append-rootfs | pad-rootfs | pad-to 64k
+       IMAGE/sysupgrade.bin/squashfs := append-rootfs | pad-to 64k | sysupgrade-tar rootfs=$$$$@ | append-metadata
+endef
+
+define Device/UbiFit
+       KERNEL_IN_UBI := 1
+       IMAGES += factory.ubi
+       IMAGE/factory.ubi := append-ubi
+endef
+
+include $(SUBTARGET).mk
+
+$(eval $(call BuildImage))
diff --git a/target/linux/qualcommbe/image/ipq95xx.mk b/target/linux/qualcommbe/image/ipq95xx.mk
new file mode 100644 (file)
index 0000000..af5e9d9
--- /dev/null
@@ -0,0 +1,14 @@
+define Device/qcom_rdp433
+       $(call Device/FitImageLzma)
+       DEVICE_VENDOR := Qualcomm Technologies, Inc.
+       DEVICE_MODEL := RDP433
+       DEVICE_VARIANT := AP-AL02-C4
+       BOARD_NAME := ap-al02.1-c4
+       DEVICE_DTS_CONFIG := config@rdp433
+       SOC := ipq9574
+       KERNEL_INSTALL := 1
+       KERNEL_SIZE := 6096k
+       IMAGE_SIZE := 25344k
+       IMAGE/sysupgrade.bin := append-kernel | pad-to 64k | append-rootfs | pad-rootfs | check-size | append-metadata
+endef
+TARGET_DEVICES += qcom_rdp433
diff --git a/target/linux/qualcommbe/ipq95xx/base-files/etc/board.d/02_network b/target/linux/qualcommbe/ipq95xx/base-files/etc/board.d/02_network
new file mode 100644 (file)
index 0000000..3d08015
--- /dev/null
@@ -0,0 +1,28 @@
+#
+# Copyright (c) 2015 The Linux Foundation. All rights reserved.
+# Copyright (c) 2011-2015 OpenWrt.org
+#
+
+. /lib/functions/uci-defaults.sh
+. /lib/functions/system.sh
+
+ipq95xx_setup_interfaces()
+{
+       local board="$1"
+
+       case "$board" in
+       qcom,ipq9574-ap-al02-c7)
+               ucidef_set_interfaces_lan_wan "lan1 lan2 lan3 lan4 lan5" "wan"
+               ;;
+       *)
+               echo "Unsupported hardware. Network interfaces not initialized"
+               ;;
+       esac
+}
+
+board_config_update
+board=$(board_name)
+ipq95xx_setup_interfaces $board
+board_config_flush
+
+exit 0
diff --git a/target/linux/qualcommbe/ipq95xx/base-files/lib/upgrade/platform.sh b/target/linux/qualcommbe/ipq95xx/base-files/lib/upgrade/platform.sh
new file mode 100644 (file)
index 0000000..30099a9
--- /dev/null
@@ -0,0 +1,12 @@
+PART_NAME=firmware
+
+RAMFS_COPY_BIN='fw_printenv fw_setenv head'
+RAMFS_COPY_DATA='/etc/fw_env.config /var/lock/fw_printenv.lock'
+
+platform_do_upgrade() {
+       case "$(board_name)" in
+       *)
+               default_do_upgrade "$1"
+               ;;
+       esac
+}
diff --git a/target/linux/qualcommbe/ipq95xx/config-default b/target/linux/qualcommbe/ipq95xx/config-default
new file mode 100644 (file)
index 0000000..3c20e22
--- /dev/null
@@ -0,0 +1,48 @@
+CONFIG_ARM_PSCI_CPUIDLE_DOMAIN=y
+CONFIG_DT_IDLE_GENPD=y
+CONFIG_GRO_CELLS=y
+CONFIG_IPQ_GCC_9574=y
+CONFIG_MDIO_BITBANG=y
+CONFIG_MDIO_GPIO=y
+# CONFIG_MFD_HI6421_SPMI is not set
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_NET_DEVLINK=y
+CONFIG_NET_DSA=y
+CONFIG_NET_DSA_QCA8K=y
+CONFIG_NET_DSA_TAG_QCA=y
+# CONFIG_NVMEM_SPMI_SDAM is not set
+CONFIG_PHYLINK=y
+CONFIG_PINCTRL_IPQ9574=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+# CONFIG_PM8916_WATCHDOG is not set
+CONFIG_PM_GENERIC_DOMAINS=y
+CONFIG_PM_GENERIC_DOMAINS_OF=y
+# CONFIG_POWER_RESET_QCOM_PON is not set
+CONFIG_QCA83XX_PHY=y
+CONFIG_QCOM_APM=y
+# CONFIG_QCOM_COINCELL is not set
+CONFIG_QCOM_GDSC=y
+CONFIG_QCOM_SPMI_ADC5=y
+# CONFIG_QCOM_SPMI_RRADC is not set
+CONFIG_QCOM_VADC_COMMON=y
+CONFIG_REGMAP_SPMI=y
+CONFIG_REGULATOR_CPR3=y
+# CONFIG_REGULATOR_CPR3_NPU is not set
+CONFIG_REGULATOR_CPR4_APSS=y
+# CONFIG_REGULATOR_QCOM_LABIBB is not set
+CONFIG_REGULATOR_QCOM_SPMI=y
+# CONFIG_REGULATOR_QCOM_USB_VBUS is not set
+CONFIG_REGULATOR_USERSPACE_CONSUMER=y
+CONFIG_RTC_DRV_PM8XXX=y
+CONFIG_SPMI=y
+# CONFIG_SPMI_HISI3670 is not set
+CONFIG_SPMI_MSM_PMIC_ARB=y
+# CONFIG_SPMI_PMIC_CLKDIV is not set
+CONFIG_SPI_QPIC_SNAND=y
+CONFIG_IPQ_CMN_PLL=y
+CONFIG_IPQ_NSSCC_9574=y
+CONFIG_IPQ_NSSCC_QCA8K=y
+CONFIG_QCOM_PPE=y
+CONFIG_QCOM_IPA=y
+CONFIG_INTERCONNECT_QCOM=y
+CONFIG_INTERCONNECT_QCOM_OSM_L3=y
diff --git a/target/linux/qualcommbe/ipq95xx/target.mk b/target/linux/qualcommbe/ipq95xx/target.mk
new file mode 100644 (file)
index 0000000..0e8463e
--- /dev/null
@@ -0,0 +1,7 @@
+SUBTARGET:=ipq95xx
+BOARDNAME:=Qualcomm Atheros IPQ95xx
+DEFAULT_PACKAGES += 
+
+define Target/Description
+       Build firmware images for Qualcomm Atheros IPQ95XX based boards.
+endef
diff --git a/target/linux/qualcommbe/patches-6.6/001-v6.8-arm64-dts-qcom-ipq9574-Add-common-RDP-dtsi-file.patch b/target/linux/qualcommbe/patches-6.6/001-v6.8-arm64-dts-qcom-ipq9574-Add-common-RDP-dtsi-file.patch
new file mode 100644 (file)
index 0000000..dc41c33
--- /dev/null
@@ -0,0 +1,601 @@
+From 0e8527d076cfb3fa55777a2ece735852fcf3e850 Mon Sep 17 00:00:00 2001
+From: Anusha Rao <quic_anusha@quicinc.com>
+Date: Wed, 27 Sep 2023 12:13:18 +0530
+Subject: [PATCH] arm64: dts: qcom: ipq9574: Add common RDP dtsi file
+
+Add a dtsi file to include interfaces that are common
+across RDPs.
+
+Signed-off-by: Anusha Rao <quic_anusha@quicinc.com>
+Signed-off-by: Kathiravan Thirumoorthy <quic_kathirav@quicinc.com>
+Link: https://lore.kernel.org/r/20230927-common-rdp-v3-1-3d07b3ff6d42@quicinc.com
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+---
+ .../boot/dts/qcom/ipq9574-rdp-common.dtsi     | 125 ++++++++++++++++++
+ arch/arm64/boot/dts/qcom/ipq9574-rdp418.dts   |  63 +--------
+ arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts   |  91 +------------
+ arch/arm64/boot/dts/qcom/ipq9574-rdp449.dts   |  65 +--------
+ arch/arm64/boot/dts/qcom/ipq9574-rdp453.dts   |  65 +--------
+ arch/arm64/boot/dts/qcom/ipq9574-rdp454.dts   |  66 +--------
+ 6 files changed, 130 insertions(+), 345 deletions(-)
+ create mode 100644 arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
+
+diff --git a/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi b/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
+new file mode 100644
+index 000000000000..40a7aefd0540
+--- /dev/null
++++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
+@@ -0,0 +1,125 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++/*
++ * IPQ9574 RDP board common device tree source
++ *
++ * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++/dts-v1/;
++
++#include "ipq9574.dtsi"
++
++/ {
++      aliases {
++              serial0 = &blsp1_uart2;
++      };
++
++      chosen {
++              stdout-path = "serial0:115200n8";
++      };
++
++      regulator_fixed_3p3: s3300 {
++              compatible = "regulator-fixed";
++              regulator-min-microvolt = <3300000>;
++              regulator-max-microvolt = <3300000>;
++              regulator-boot-on;
++              regulator-always-on;
++              regulator-name = "fixed_3p3";
++      };
++
++      regulator_fixed_0p925: s0925 {
++              compatible = "regulator-fixed";
++              regulator-min-microvolt = <925000>;
++              regulator-max-microvolt = <925000>;
++              regulator-boot-on;
++              regulator-always-on;
++              regulator-name = "fixed_0p925";
++      };
++};
++
++&blsp1_spi0 {
++      pinctrl-0 = <&spi_0_pins>;
++      pinctrl-names = "default";
++      status = "okay";
++
++      flash@0 {
++              compatible = "micron,n25q128a11", "jedec,spi-nor";
++              reg = <0>;
++              #address-cells = <1>;
++              #size-cells = <1>;
++              spi-max-frequency = <50000000>;
++      };
++};
++
++&blsp1_uart2 {
++      pinctrl-0 = <&uart2_pins>;
++      pinctrl-names = "default";
++      status = "okay";
++};
++
++&rpm_requests {
++      regulators {
++              compatible = "qcom,rpm-mp5496-regulators";
++
++              ipq9574_s1: s1 {
++              /*
++               * During kernel bootup, the SoC runs at 800MHz with 875mV set by the bootloaders.
++               * During regulator registration, kernel not knowing the initial voltage,
++               * considers it as zero and brings up the regulators with minimum supported voltage.
++               * Update the regulator-min-microvolt with SVS voltage of 725mV so that
++               * the regulators are brought up with 725mV which is sufficient for all the
++               * corner parts to operate at 800MHz
++               */
++                      regulator-min-microvolt = <725000>;
++                      regulator-max-microvolt = <1075000>;
++              };
++
++              mp5496_l2: l2 {
++                      regulator-min-microvolt = <1800000>;
++                      regulator-max-microvolt = <1800000>;
++                      regulator-always-on;
++                      regulator-boot-on;
++              };
++      };
++};
++
++&sleep_clk {
++      clock-frequency = <32000>;
++};
++
++&tlmm {
++      spi_0_pins: spi-0-state {
++              pins = "gpio11", "gpio12", "gpio13", "gpio14";
++              function = "blsp0_spi";
++              drive-strength = <8>;
++              bias-disable;
++      };
++};
++
++&usb_0_dwc3 {
++      dr_mode = "host";
++};
++
++&usb_0_qmpphy {
++      vdda-pll-supply = <&mp5496_l2>;
++      vdda-phy-supply = <&regulator_fixed_0p925>;
++
++      status = "okay";
++};
++
++&usb_0_qusbphy {
++      vdd-supply = <&regulator_fixed_0p925>;
++      vdda-pll-supply = <&mp5496_l2>;
++      vdda-phy-dpdm-supply = <&regulator_fixed_3p3>;
++
++      status = "okay";
++};
++
++&usb3 {
++      status = "okay";
++};
++
++&xo_board_clk {
++      clock-frequency = <24000000>;
++};
+diff --git a/arch/arm64/boot/dts/qcom/ipq9574-rdp418.dts b/arch/arm64/boot/dts/qcom/ipq9574-rdp418.dts
+index 2b093e02637b..f4f9199d4ab1 100644
+--- a/arch/arm64/boot/dts/qcom/ipq9574-rdp418.dts
++++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp418.dts
+@@ -8,58 +8,12 @@
+ /dts-v1/;
+-#include "ipq9574.dtsi"
++#include "ipq9574-rdp-common.dtsi"
+ / {
+       model = "Qualcomm Technologies, Inc. IPQ9574/AP-AL02-C2";
+       compatible = "qcom,ipq9574-ap-al02-c2", "qcom,ipq9574";
+-      aliases {
+-              serial0 = &blsp1_uart2;
+-      };
+-
+-      chosen {
+-              stdout-path = "serial0:115200n8";
+-      };
+-};
+-
+-&blsp1_spi0 {
+-      pinctrl-0 = <&spi_0_pins>;
+-      pinctrl-names = "default";
+-      status = "okay";
+-
+-      flash@0 {
+-              compatible = "micron,n25q128a11", "jedec,spi-nor";
+-              reg = <0>;
+-              #address-cells = <1>;
+-              #size-cells = <1>;
+-              spi-max-frequency = <50000000>;
+-      };
+-};
+-
+-&blsp1_uart2 {
+-      pinctrl-0 = <&uart2_pins>;
+-      pinctrl-names = "default";
+-      status = "okay";
+-};
+-
+-&rpm_requests {
+-      regulators {
+-              compatible = "qcom,rpm-mp5496-regulators";
+-
+-              ipq9574_s1: s1 {
+-              /*
+-               * During kernel bootup, the SoC runs at 800MHz with 875mV set by the bootloaders.
+-               * During regulator registration, kernel not knowing the initial voltage,
+-               * considers it as zero and brings up the regulators with minimum supported voltage.
+-               * Update the regulator-min-microvolt with SVS voltage of 725mV so that
+-               * the regulators are brought up with 725mV which is sufficient for all the
+-               * corner parts to operate at 800MHz
+-               */
+-                      regulator-min-microvolt = <725000>;
+-                      regulator-max-microvolt = <1075000>;
+-              };
+-      };
+ };
+ &sdhc_1 {
+@@ -74,10 +28,6 @@ &sdhc_1 {
+       status = "okay";
+ };
+-&sleep_clk {
+-      clock-frequency = <32000>;
+-};
+-
+ &tlmm {
+       sdc_default_state: sdc-default-state {
+               clk-pins {
+@@ -110,15 +60,4 @@ rclk-pins {
+                       bias-pull-down;
+               };
+       };
+-
+-      spi_0_pins: spi-0-state {
+-              pins = "gpio11", "gpio12", "gpio13", "gpio14";
+-              function = "blsp0_spi";
+-              drive-strength = <8>;
+-              bias-disable;
+-      };
+-};
+-
+-&xo_board_clk {
+-      clock-frequency = <24000000>;
+ };
+diff --git a/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts b/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts
+index 877026ccc6e2..1bb8d96c9a82 100644
+--- a/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts
++++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts
+@@ -8,69 +8,11 @@
+ /dts-v1/;
+-#include "ipq9574.dtsi"
++#include "ipq9574-rdp-common.dtsi"
+ / {
+       model = "Qualcomm Technologies, Inc. IPQ9574/AP-AL02-C7";
+       compatible = "qcom,ipq9574-ap-al02-c7", "qcom,ipq9574";
+-
+-      aliases {
+-              serial0 = &blsp1_uart2;
+-      };
+-
+-      chosen {
+-              stdout-path = "serial0:115200n8";
+-      };
+-
+-      regulator_fixed_3p3: s3300 {
+-              compatible = "regulator-fixed";
+-              regulator-min-microvolt = <3300000>;
+-              regulator-max-microvolt = <3300000>;
+-              regulator-boot-on;
+-              regulator-always-on;
+-              regulator-name = "fixed_3p3";
+-      };
+-
+-      regulator_fixed_0p925: s0925 {
+-              compatible = "regulator-fixed";
+-              regulator-min-microvolt = <925000>;
+-              regulator-max-microvolt = <925000>;
+-              regulator-boot-on;
+-              regulator-always-on;
+-              regulator-name = "fixed_0p925";
+-      };
+-};
+-
+-&blsp1_uart2 {
+-      pinctrl-0 = <&uart2_pins>;
+-      pinctrl-names = "default";
+-      status = "okay";
+-};
+-
+-&rpm_requests {
+-      regulators {
+-              compatible = "qcom,rpm-mp5496-regulators";
+-
+-              ipq9574_s1: s1 {
+-              /*
+-               * During kernel bootup, the SoC runs at 800MHz with 875mV set by the bootloaders.
+-               * During regulator registration, kernel not knowing the initial voltage,
+-               * considers it as zero and brings up the regulators with minimum supported voltage.
+-               * Update the regulator-min-microvolt with SVS voltage of 725mV so that
+-               * the regulators are brought up with 725mV which is sufficient for all the
+-               * corner parts to operate at 800MHz
+-               */
+-                      regulator-min-microvolt = <725000>;
+-                      regulator-max-microvolt = <1075000>;
+-              };
+-
+-              mp5496_l2: l2 {
+-                      regulator-min-microvolt = <1800000>;
+-                      regulator-max-microvolt = <1800000>;
+-                      regulator-always-on;
+-                      regulator-boot-on;
+-              };
+-      };
+ };
+ &sdhc_1 {
+@@ -85,10 +27,6 @@ &sdhc_1 {
+       status = "okay";
+ };
+-&sleep_clk {
+-      clock-frequency = <32000>;
+-};
+-
+ &tlmm {
+       sdc_default_state: sdc-default-state {
+               clk-pins {
+@@ -122,30 +60,3 @@ rclk-pins {
+               };
+       };
+ };
+-
+-&usb_0_dwc3 {
+-      dr_mode = "host";
+-};
+-
+-&usb_0_qmpphy {
+-      vdda-pll-supply = <&mp5496_l2>;
+-      vdda-phy-supply = <&regulator_fixed_0p925>;
+-
+-      status = "okay";
+-};
+-
+-&usb_0_qusbphy {
+-      vdd-supply = <&regulator_fixed_0p925>;
+-      vdda-pll-supply = <&mp5496_l2>;
+-      vdda-phy-dpdm-supply = <&regulator_fixed_3p3>;
+-
+-      status = "okay";
+-};
+-
+-&usb3 {
+-      status = "okay";
+-};
+-
+-&xo_board_clk {
+-      clock-frequency = <24000000>;
+-};
+diff --git a/arch/arm64/boot/dts/qcom/ipq9574-rdp449.dts b/arch/arm64/boot/dts/qcom/ipq9574-rdp449.dts
+index c8fa54e1a62c..d36d1078763e 100644
+--- a/arch/arm64/boot/dts/qcom/ipq9574-rdp449.dts
++++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp449.dts
+@@ -8,73 +8,10 @@
+ /dts-v1/;
+-#include "ipq9574.dtsi"
++#include "ipq9574-rdp-common.dtsi"
+ / {
+       model = "Qualcomm Technologies, Inc. IPQ9574/AP-AL02-C6";
+       compatible = "qcom,ipq9574-ap-al02-c6", "qcom,ipq9574";
+-      aliases {
+-              serial0 = &blsp1_uart2;
+-      };
+-
+-      chosen {
+-              stdout-path = "serial0:115200n8";
+-      };
+-};
+-
+-&blsp1_spi0 {
+-      pinctrl-0 = <&spi_0_pins>;
+-      pinctrl-names = "default";
+-      status = "okay";
+-
+-      flash@0 {
+-              compatible = "micron,n25q128a11", "jedec,spi-nor";
+-              reg = <0>;
+-              #address-cells = <1>;
+-              #size-cells = <1>;
+-              spi-max-frequency = <50000000>;
+-      };
+-};
+-
+-&blsp1_uart2 {
+-      pinctrl-0 = <&uart2_pins>;
+-      pinctrl-names = "default";
+-      status = "okay";
+-};
+-
+-&rpm_requests {
+-      regulators {
+-              compatible = "qcom,rpm-mp5496-regulators";
+-
+-              ipq9574_s1: s1 {
+-              /*
+-               * During kernel bootup, the SoC runs at 800MHz with 875mV set by the bootloaders.
+-               * During regulator registration, kernel not knowing the initial voltage,
+-               * considers it as zero and brings up the regulators with minimum supported voltage.
+-               * Update the regulator-min-microvolt with SVS voltage of 725mV so that
+-               * the regulators are brought up with 725mV which is sufficient for all the
+-               * corner parts to operate at 800MHz
+-               */
+-                      regulator-min-microvolt = <725000>;
+-                      regulator-max-microvolt = <1075000>;
+-              };
+-      };
+-};
+-
+-&sleep_clk {
+-      clock-frequency = <32000>;
+-};
+-
+-&tlmm {
+-      spi_0_pins: spi-0-state {
+-              pins = "gpio11", "gpio12", "gpio13", "gpio14";
+-              function = "blsp0_spi";
+-              drive-strength = <8>;
+-              bias-disable;
+-      };
+-};
+-
+-&xo_board_clk {
+-      clock-frequency = <24000000>;
+ };
+diff --git a/arch/arm64/boot/dts/qcom/ipq9574-rdp453.dts b/arch/arm64/boot/dts/qcom/ipq9574-rdp453.dts
+index f01de6628c3b..c30c9fbedf26 100644
+--- a/arch/arm64/boot/dts/qcom/ipq9574-rdp453.dts
++++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp453.dts
+@@ -8,73 +8,10 @@
+ /dts-v1/;
+-#include "ipq9574.dtsi"
++#include "ipq9574-rdp-common.dtsi"
+ / {
+       model = "Qualcomm Technologies, Inc. IPQ9574/AP-AL02-C8";
+       compatible = "qcom,ipq9574-ap-al02-c8", "qcom,ipq9574";
+-      aliases {
+-              serial0 = &blsp1_uart2;
+-      };
+-
+-      chosen {
+-              stdout-path = "serial0:115200n8";
+-      };
+-};
+-
+-&blsp1_spi0 {
+-      pinctrl-0 = <&spi_0_pins>;
+-      pinctrl-names = "default";
+-      status = "okay";
+-
+-      flash@0 {
+-              compatible = "micron,n25q128a11", "jedec,spi-nor";
+-              reg = <0>;
+-              #address-cells = <1>;
+-              #size-cells = <1>;
+-              spi-max-frequency = <50000000>;
+-      };
+-};
+-
+-&blsp1_uart2 {
+-      pinctrl-0 = <&uart2_pins>;
+-      pinctrl-names = "default";
+-      status = "okay";
+-};
+-
+-&rpm_requests {
+-      regulators {
+-              compatible = "qcom,rpm-mp5496-regulators";
+-
+-              ipq9574_s1: s1 {
+-              /*
+-               * During kernel bootup, the SoC runs at 800MHz with 875mV set by the bootloaders.
+-               * During regulator registration, kernel not knowing the initial voltage,
+-               * considers it as zero and brings up the regulators with minimum supported voltage.
+-               * Update the regulator-min-microvolt with SVS voltage of 725mV so that
+-               * the regulators are brought up with 725mV which is sufficient for all the
+-               * corner parts to operate at 800MHz
+-               */
+-                      regulator-min-microvolt = <725000>;
+-                      regulator-max-microvolt = <1075000>;
+-              };
+-      };
+-};
+-
+-&sleep_clk {
+-      clock-frequency = <32000>;
+-};
+-
+-&tlmm {
+-      spi_0_pins: spi-0-state {
+-              pins = "gpio11", "gpio12", "gpio13", "gpio14";
+-              function = "blsp0_spi";
+-              drive-strength = <8>;
+-              bias-disable;
+-      };
+-};
+-
+-&xo_board_clk {
+-      clock-frequency = <24000000>;
+ };
+diff --git a/arch/arm64/boot/dts/qcom/ipq9574-rdp454.dts b/arch/arm64/boot/dts/qcom/ipq9574-rdp454.dts
+index 6efae3426cb8..0dc382f5d5ec 100644
+--- a/arch/arm64/boot/dts/qcom/ipq9574-rdp454.dts
++++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp454.dts
+@@ -8,73 +8,9 @@
+ /dts-v1/;
+-#include "ipq9574.dtsi"
++#include "ipq9574-rdp-common.dtsi"
+ / {
+       model = "Qualcomm Technologies, Inc. IPQ9574/AP-AL02-C9";
+       compatible = "qcom,ipq9574-ap-al02-c9", "qcom,ipq9574";
+-
+-      aliases {
+-              serial0 = &blsp1_uart2;
+-      };
+-
+-      chosen {
+-              stdout-path = "serial0:115200n8";
+-      };
+-};
+-
+-&blsp1_spi0 {
+-      pinctrl-0 = <&spi_0_pins>;
+-      pinctrl-names = "default";
+-      status = "okay";
+-
+-      flash@0 {
+-              compatible = "micron,n25q128a11", "jedec,spi-nor";
+-              reg = <0>;
+-              #address-cells = <1>;
+-              #size-cells = <1>;
+-              spi-max-frequency = <50000000>;
+-      };
+-};
+-
+-&blsp1_uart2 {
+-      pinctrl-0 = <&uart2_pins>;
+-      pinctrl-names = "default";
+-      status = "okay";
+-};
+-
+-&rpm_requests {
+-      regulators {
+-              compatible = "qcom,rpm-mp5496-regulators";
+-
+-              ipq9574_s1: s1 {
+-              /*
+-               * During kernel bootup, the SoC runs at 800MHz with 875mV set by the bootloaders.
+-               * During regulator registration, kernel not knowing the initial voltage,
+-               * considers it as zero and brings up the regulators with minimum supported voltage.
+-               * Update the regulator-min-microvolt with SVS voltage of 725mV so that
+-               * the regulators are brought up with 725mV which is sufficient for all the
+-               * corner parts to operate at 800MHz
+-               */
+-                      regulator-min-microvolt = <725000>;
+-                      regulator-max-microvolt = <1075000>;
+-              };
+-      };
+-};
+-
+-&sleep_clk {
+-      clock-frequency = <32000>;
+-};
+-
+-&tlmm {
+-      spi_0_pins: spi-0-state {
+-              pins = "gpio11", "gpio12", "gpio13", "gpio14";
+-              function = "blsp0_spi";
+-              drive-strength = <8>;
+-              bias-disable;
+-      };
+-};
+-
+-&xo_board_clk {
+-      clock-frequency = <24000000>;
+ };
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/002-01-v6.11-dt-bindings-clock-add-qca8386-qca8084-clock-and-rese.patch b/target/linux/qualcommbe/patches-6.6/002-01-v6.11-dt-bindings-clock-add-qca8386-qca8084-clock-and-rese.patch
new file mode 100644 (file)
index 0000000..0e300ca
--- /dev/null
@@ -0,0 +1,307 @@
+From 80bbd1c355d661678d2a25bd36e739b6925e7a4e Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Wed, 5 Jun 2024 20:45:39 +0800
+Subject: [PATCH] dt-bindings: clock: add qca8386/qca8084 clock and reset
+ definitions
+
+QCA8386/QCA8084 includes the clock & reset controller that is
+accessed by MDIO bus. Two work modes are supported, qca8386 works
+as switch mode, qca8084 works as PHY mode.
+
+Reviewed-by: Rob Herring <robh@kernel.org>
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+Link: https://lore.kernel.org/r/20240605124541.2711467-3-quic_luoj@quicinc.com
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+---
+ .../bindings/clock/qcom,qca8k-nsscc.yaml      |  86 +++++++++++++++
+ include/dt-bindings/clock/qcom,qca8k-nsscc.h  | 101 ++++++++++++++++++
+ include/dt-bindings/reset/qcom,qca8k-nsscc.h  |  76 +++++++++++++
+ 3 files changed, 263 insertions(+)
+ create mode 100644 Documentation/devicetree/bindings/clock/qcom,qca8k-nsscc.yaml
+ create mode 100644 include/dt-bindings/clock/qcom,qca8k-nsscc.h
+ create mode 100644 include/dt-bindings/reset/qcom,qca8k-nsscc.h
+
+diff --git a/Documentation/devicetree/bindings/clock/qcom,qca8k-nsscc.yaml b/Documentation/devicetree/bindings/clock/qcom,qca8k-nsscc.yaml
+new file mode 100644
+index 000000000000..61473385da2d
+--- /dev/null
++++ b/Documentation/devicetree/bindings/clock/qcom,qca8k-nsscc.yaml
+@@ -0,0 +1,86 @@
++# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
++%YAML 1.2
++---
++$id: http://devicetree.org/schemas/clock/qcom,qca8k-nsscc.yaml#
++$schema: http://devicetree.org/meta-schemas/core.yaml#
++
++title: Qualcomm NSS Clock & Reset Controller on QCA8386/QCA8084
++
++maintainers:
++  - Bjorn Andersson <andersson@kernel.org>
++  - Luo Jie <quic_luoj@quicinc.com>
++
++description: |
++  Qualcomm NSS clock control module provides the clocks and resets
++  on QCA8386(switch mode)/QCA8084(PHY mode)
++
++  See also::
++    include/dt-bindings/clock/qcom,qca8k-nsscc.h
++    include/dt-bindings/reset/qcom,qca8k-nsscc.h
++
++properties:
++  compatible:
++    oneOf:
++      - const: qcom,qca8084-nsscc
++      - items:
++          - enum:
++              - qcom,qca8082-nsscc
++              - qcom,qca8085-nsscc
++              - qcom,qca8384-nsscc
++              - qcom,qca8385-nsscc
++              - qcom,qca8386-nsscc
++          - const: qcom,qca8084-nsscc
++
++  clocks:
++    items:
++      - description: Chip reference clock source
++      - description: UNIPHY0 RX 312P5M/125M clock source
++      - description: UNIPHY0 TX 312P5M/125M clock source
++      - description: UNIPHY1 RX 312P5M/125M clock source
++      - description: UNIPHY1 TX 312P5M/125M clock source
++      - description: UNIPHY1 RX 312P5M clock source
++      - description: UNIPHY1 TX 312P5M clock source
++
++  reg:
++    items:
++      - description: MDIO bus address for Clock & Reset Controller register
++
++  reset-gpios:
++    description: GPIO connected to the chip
++    maxItems: 1
++
++required:
++  - compatible
++  - clocks
++  - reg
++  - reset-gpios
++
++allOf:
++  - $ref: qcom,gcc.yaml#
++
++unevaluatedProperties: false
++
++examples:
++  - |
++    #include <dt-bindings/gpio/gpio.h>
++    mdio {
++      #address-cells = <1>;
++      #size-cells = <0>;
++
++      clock-controller@18 {
++        compatible = "qcom,qca8084-nsscc";
++        reg = <0x18>;
++        reset-gpios = <&tlmm 51 GPIO_ACTIVE_LOW>;
++        clocks = <&pcs0_pll>,
++                 <&qca8k_uniphy0_rx>,
++                 <&qca8k_uniphy0_tx>,
++                 <&qca8k_uniphy1_rx>,
++                 <&qca8k_uniphy1_tx>,
++                 <&qca8k_uniphy1_rx312p5m>,
++                 <&qca8k_uniphy1_tx312p5m>;
++        #clock-cells = <1>;
++        #reset-cells = <1>;
++        #power-domain-cells = <1>;
++      };
++    };
++...
+diff --git a/include/dt-bindings/clock/qcom,qca8k-nsscc.h b/include/dt-bindings/clock/qcom,qca8k-nsscc.h
+new file mode 100644
+index 000000000000..0ac3e4c69a1a
+--- /dev/null
++++ b/include/dt-bindings/clock/qcom,qca8k-nsscc.h
+@@ -0,0 +1,101 @@
++/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
++/*
++ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++#ifndef _DT_BINDINGS_CLK_QCOM_QCA8K_NSS_CC_H
++#define _DT_BINDINGS_CLK_QCOM_QCA8K_NSS_CC_H
++
++#define NSS_CC_SWITCH_CORE_CLK_SRC                            0
++#define NSS_CC_SWITCH_CORE_CLK                                        1
++#define NSS_CC_APB_BRIDGE_CLK                                 2
++#define NSS_CC_MAC0_TX_CLK_SRC                                        3
++#define NSS_CC_MAC0_TX_DIV_CLK_SRC                            4
++#define NSS_CC_MAC0_TX_CLK                                    5
++#define NSS_CC_MAC0_TX_SRDS1_CLK                              6
++#define NSS_CC_MAC0_RX_CLK_SRC                                        7
++#define NSS_CC_MAC0_RX_DIV_CLK_SRC                            8
++#define NSS_CC_MAC0_RX_CLK                                    9
++#define NSS_CC_MAC0_RX_SRDS1_CLK                              10
++#define NSS_CC_MAC1_TX_CLK_SRC                                        11
++#define NSS_CC_MAC1_TX_DIV_CLK_SRC                            12
++#define NSS_CC_MAC1_SRDS1_CH0_XGMII_RX_DIV_CLK_SRC            13
++#define NSS_CC_MAC1_SRDS1_CH0_RX_CLK                          14
++#define NSS_CC_MAC1_TX_CLK                                    15
++#define NSS_CC_MAC1_GEPHY0_TX_CLK                             16
++#define NSS_CC_MAC1_SRDS1_CH0_XGMII_RX_CLK                    17
++#define NSS_CC_MAC1_RX_CLK_SRC                                        18
++#define NSS_CC_MAC1_RX_DIV_CLK_SRC                            19
++#define NSS_CC_MAC1_SRDS1_CH0_XGMII_TX_DIV_CLK_SRC            20
++#define NSS_CC_MAC1_SRDS1_CH0_TX_CLK                          21
++#define NSS_CC_MAC1_RX_CLK                                    22
++#define NSS_CC_MAC1_GEPHY0_RX_CLK                             23
++#define NSS_CC_MAC1_SRDS1_CH0_XGMII_TX_CLK                    24
++#define NSS_CC_MAC2_TX_CLK_SRC                                        25
++#define NSS_CC_MAC2_TX_DIV_CLK_SRC                            26
++#define NSS_CC_MAC2_SRDS1_CH1_XGMII_RX_DIV_CLK_SRC            27
++#define NSS_CC_MAC2_SRDS1_CH1_RX_CLK                          28
++#define NSS_CC_MAC2_TX_CLK                                    29
++#define NSS_CC_MAC2_GEPHY1_TX_CLK                             30
++#define NSS_CC_MAC2_SRDS1_CH1_XGMII_RX_CLK                    31
++#define NSS_CC_MAC2_RX_CLK_SRC                                        32
++#define NSS_CC_MAC2_RX_DIV_CLK_SRC                            33
++#define NSS_CC_MAC2_SRDS1_CH1_XGMII_TX_DIV_CLK_SRC            34
++#define NSS_CC_MAC2_SRDS1_CH1_TX_CLK                          35
++#define NSS_CC_MAC2_RX_CLK                                    36
++#define NSS_CC_MAC2_GEPHY1_RX_CLK                             37
++#define NSS_CC_MAC2_SRDS1_CH1_XGMII_TX_CLK                    38
++#define NSS_CC_MAC3_TX_CLK_SRC                                        39
++#define NSS_CC_MAC3_TX_DIV_CLK_SRC                            40
++#define NSS_CC_MAC3_SRDS1_CH2_XGMII_RX_DIV_CLK_SRC            41
++#define NSS_CC_MAC3_SRDS1_CH2_RX_CLK                          42
++#define NSS_CC_MAC3_TX_CLK                                    43
++#define NSS_CC_MAC3_GEPHY2_TX_CLK                             44
++#define NSS_CC_MAC3_SRDS1_CH2_XGMII_RX_CLK                    45
++#define NSS_CC_MAC3_RX_CLK_SRC                                        46
++#define NSS_CC_MAC3_RX_DIV_CLK_SRC                            47
++#define NSS_CC_MAC3_SRDS1_CH2_XGMII_TX_DIV_CLK_SRC            48
++#define NSS_CC_MAC3_SRDS1_CH2_TX_CLK                          49
++#define NSS_CC_MAC3_RX_CLK                                    50
++#define NSS_CC_MAC3_GEPHY2_RX_CLK                             51
++#define NSS_CC_MAC3_SRDS1_CH2_XGMII_TX_CLK                    52
++#define NSS_CC_MAC4_TX_CLK_SRC                                        53
++#define NSS_CC_MAC4_TX_DIV_CLK_SRC                            54
++#define NSS_CC_MAC4_SRDS1_CH3_XGMII_RX_DIV_CLK_SRC            55
++#define NSS_CC_MAC4_SRDS1_CH3_RX_CLK                          56
++#define NSS_CC_MAC4_TX_CLK                                    57
++#define NSS_CC_MAC4_GEPHY3_TX_CLK                             58
++#define NSS_CC_MAC4_SRDS1_CH3_XGMII_RX_CLK                    59
++#define NSS_CC_MAC4_RX_CLK_SRC                                        60
++#define NSS_CC_MAC4_RX_DIV_CLK_SRC                            61
++#define NSS_CC_MAC4_SRDS1_CH3_XGMII_TX_DIV_CLK_SRC            62
++#define NSS_CC_MAC4_SRDS1_CH3_TX_CLK                          63
++#define NSS_CC_MAC4_RX_CLK                                    64
++#define NSS_CC_MAC4_GEPHY3_RX_CLK                             65
++#define NSS_CC_MAC4_SRDS1_CH3_XGMII_TX_CLK                    66
++#define NSS_CC_MAC5_TX_CLK_SRC                                        67
++#define NSS_CC_MAC5_TX_DIV_CLK_SRC                            68
++#define NSS_CC_MAC5_TX_SRDS0_CLK                              69
++#define NSS_CC_MAC5_TX_CLK                                    70
++#define NSS_CC_MAC5_RX_CLK_SRC                                        71
++#define NSS_CC_MAC5_RX_DIV_CLK_SRC                            72
++#define NSS_CC_MAC5_RX_SRDS0_CLK                              73
++#define NSS_CC_MAC5_RX_CLK                                    74
++#define NSS_CC_MAC5_TX_SRDS0_CLK_SRC                          75
++#define NSS_CC_MAC5_RX_SRDS0_CLK_SRC                          76
++#define NSS_CC_AHB_CLK_SRC                                    77
++#define NSS_CC_AHB_CLK                                                78
++#define NSS_CC_SEC_CTRL_AHB_CLK                                       79
++#define NSS_CC_TLMM_CLK                                               80
++#define NSS_CC_TLMM_AHB_CLK                                   81
++#define NSS_CC_CNOC_AHB_CLK                                   82
++#define NSS_CC_MDIO_AHB_CLK                                   83
++#define NSS_CC_MDIO_MASTER_AHB_CLK                            84
++#define NSS_CC_SYS_CLK_SRC                                    85
++#define NSS_CC_SRDS0_SYS_CLK                                  86
++#define NSS_CC_SRDS1_SYS_CLK                                  87
++#define NSS_CC_GEPHY0_SYS_CLK                                 88
++#define NSS_CC_GEPHY1_SYS_CLK                                 89
++#define NSS_CC_GEPHY2_SYS_CLK                                 90
++#define NSS_CC_GEPHY3_SYS_CLK                                 91
++#endif
+diff --git a/include/dt-bindings/reset/qcom,qca8k-nsscc.h b/include/dt-bindings/reset/qcom,qca8k-nsscc.h
+new file mode 100644
+index 000000000000..c71167a3bd41
+--- /dev/null
++++ b/include/dt-bindings/reset/qcom,qca8k-nsscc.h
+@@ -0,0 +1,76 @@
++/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
++/*
++ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++#ifndef _DT_BINDINGS_RESET_QCOM_QCA8K_NSS_CC_H
++#define _DT_BINDINGS_RESET_QCOM_QCA8K_NSS_CC_H
++
++#define NSS_CC_SWITCH_CORE_ARES                               1
++#define NSS_CC_APB_BRIDGE_ARES                                2
++#define NSS_CC_MAC0_TX_ARES                           3
++#define NSS_CC_MAC0_TX_SRDS1_ARES                     4
++#define NSS_CC_MAC0_RX_ARES                           5
++#define NSS_CC_MAC0_RX_SRDS1_ARES                     6
++#define NSS_CC_MAC1_SRDS1_CH0_RX_ARES                 7
++#define NSS_CC_MAC1_TX_ARES                           8
++#define NSS_CC_MAC1_GEPHY0_TX_ARES                    9
++#define NSS_CC_MAC1_SRDS1_CH0_XGMII_RX_ARES           10
++#define NSS_CC_MAC1_SRDS1_CH0_TX_ARES                 11
++#define NSS_CC_MAC1_RX_ARES                           12
++#define NSS_CC_MAC1_GEPHY0_RX_ARES                    13
++#define NSS_CC_MAC1_SRDS1_CH0_XGMII_TX_ARES           14
++#define NSS_CC_MAC2_SRDS1_CH1_RX_ARES                 15
++#define NSS_CC_MAC2_TX_ARES                           16
++#define NSS_CC_MAC2_GEPHY1_TX_ARES                    17
++#define NSS_CC_MAC2_SRDS1_CH1_XGMII_RX_ARES           18
++#define NSS_CC_MAC2_SRDS1_CH1_TX_ARES                 19
++#define NSS_CC_MAC2_RX_ARES                           20
++#define NSS_CC_MAC2_GEPHY1_RX_ARES                    21
++#define NSS_CC_MAC2_SRDS1_CH1_XGMII_TX_ARES           22
++#define NSS_CC_MAC3_SRDS1_CH2_RX_ARES                 23
++#define NSS_CC_MAC3_TX_ARES                           24
++#define NSS_CC_MAC3_GEPHY2_TX_ARES                    25
++#define NSS_CC_MAC3_SRDS1_CH2_XGMII_RX_ARES           26
++#define NSS_CC_MAC3_SRDS1_CH2_TX_ARES                 27
++#define NSS_CC_MAC3_RX_ARES                           28
++#define NSS_CC_MAC3_GEPHY2_RX_ARES                    29
++#define NSS_CC_MAC3_SRDS1_CH2_XGMII_TX_ARES           30
++#define NSS_CC_MAC4_SRDS1_CH3_RX_ARES                 31
++#define NSS_CC_MAC4_TX_ARES                           32
++#define NSS_CC_MAC4_GEPHY3_TX_ARES                    33
++#define NSS_CC_MAC4_SRDS1_CH3_XGMII_RX_ARES           34
++#define NSS_CC_MAC4_SRDS1_CH3_TX_ARES                 35
++#define NSS_CC_MAC4_RX_ARES                           36
++#define NSS_CC_MAC4_GEPHY3_RX_ARES                    37
++#define NSS_CC_MAC4_SRDS1_CH3_XGMII_TX_ARES           38
++#define NSS_CC_MAC5_TX_ARES                           39
++#define NSS_CC_MAC5_TX_SRDS0_ARES                     40
++#define NSS_CC_MAC5_RX_ARES                           41
++#define NSS_CC_MAC5_RX_SRDS0_ARES                     42
++#define NSS_CC_AHB_ARES                                       43
++#define NSS_CC_SEC_CTRL_AHB_ARES                      44
++#define NSS_CC_TLMM_ARES                              45
++#define NSS_CC_TLMM_AHB_ARES                          46
++#define NSS_CC_CNOC_AHB_ARES                          47
++#define NSS_CC_MDIO_AHB_ARES                          48
++#define NSS_CC_MDIO_MASTER_AHB_ARES                   49
++#define NSS_CC_SRDS0_SYS_ARES                         50
++#define NSS_CC_SRDS1_SYS_ARES                         51
++#define NSS_CC_GEPHY0_SYS_ARES                                52
++#define NSS_CC_GEPHY1_SYS_ARES                                53
++#define NSS_CC_GEPHY2_SYS_ARES                                54
++#define NSS_CC_GEPHY3_SYS_ARES                                55
++#define NSS_CC_SEC_CTRL_ARES                          56
++#define NSS_CC_SEC_CTRL_SENSE_ARES                    57
++#define NSS_CC_SLEEP_ARES                             58
++#define NSS_CC_DEBUG_ARES                             59
++#define NSS_CC_GEPHY0_ARES                            60
++#define NSS_CC_GEPHY1_ARES                            61
++#define NSS_CC_GEPHY2_ARES                            62
++#define NSS_CC_GEPHY3_ARES                            63
++#define NSS_CC_DSP_ARES                                       64
++#define NSS_CC_GEPHY_FULL_ARES                                65
++#define NSS_CC_GLOBAL_ARES                            66
++#define NSS_CC_XPCS_ARES                              67
++#endif
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/002-02-v6.11-clk-qcom-add-clock-controller-driver-for-qca8386-qca.patch b/target/linux/qualcommbe/patches-6.6/002-02-v6.11-clk-qcom-add-clock-controller-driver-for-qca8386-qca.patch
new file mode 100644 (file)
index 0000000..d831bb6
--- /dev/null
@@ -0,0 +1,2296 @@
+From 2441b965c4c7adae0b4a7825f7acb67d44c3cd38 Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Wed, 5 Jun 2024 20:45:41 +0800
+Subject: [PATCH] clk: qcom: add clock controller driver for qca8386/qca8084
+
+The clock controller driver of qca8386/qca8084 is registered
+as the MDIO device, the hardware register is accessed by MDIO bus
+that is normally used to access general PHY device, which is
+different from the current existed qcom clock controller drivers
+using ioremap to access hardware clock registers, nsscc-qca8k is
+accessed via an MDIO bus.
+
+MDIO bus is commonly utilized by both qca8386/qca8084 and other
+PHY devices, so the mutex lock mdio_bus->mdio_lock should be
+used instead of using the mutex lock of remap.
+
+To access the hardware clock registers of qca8386/qca8084, there
+is a special MDIO frame sequence, which needs to be sent to the
+device.
+
+Enable the reference clock before resetting the clock controller,
+the reference clock rate is fixed to 50MHZ.
+
+Reviewed-by: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+Link: https://lore.kernel.org/r/20240605124541.2711467-5-quic_luoj@quicinc.com
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+---
+ drivers/clk/qcom/Kconfig       |    9 +
+ drivers/clk/qcom/Makefile      |    1 +
+ drivers/clk/qcom/nsscc-qca8k.c | 2221 ++++++++++++++++++++++++++++++++
+ 3 files changed, 2231 insertions(+)
+ create mode 100644 drivers/clk/qcom/nsscc-qca8k.c
+
+diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
+index 20ba2eeb24ec..4432b1cce478 100644
+--- a/drivers/clk/qcom/Kconfig
++++ b/drivers/clk/qcom/Kconfig
+@@ -249,6 +249,15 @@ config IPQ_GCC_9574
+         i2c, USB, SD/eMMC, etc. Select this for the root clock
+         of ipq9574.
++config IPQ_NSSCC_QCA8K
++      tristate "QCA8K(QCA8386 or QCA8084) NSS Clock Controller"
++      depends on MDIO_BUS || COMPILE_TEST
++      help
++        Support for NSS(Network SubSystem) clock controller on
++        qca8386/qca8084 chip.
++        Say Y or M if you want to use network features of switch or
++        PHY device. Select this for the root clock of qca8k.
++
+ config MSM_GCC_8660
+       tristate "MSM8660 Global Clock Controller"
+       depends on ARM || COMPILE_TEST
+diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
+index b7de8600dc3d..0241f00689bc 100644
+--- a/drivers/clk/qcom/Makefile
++++ b/drivers/clk/qcom/Makefile
+@@ -36,6 +36,7 @@ obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
+ obj-$(CONFIG_IPQ_GCC_8074) += gcc-ipq8074.o
+ obj-$(CONFIG_IPQ_GCC_9574) += gcc-ipq9574.o
+ obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
++obj-$(CONFIG_IPQ_NSSCC_QCA8K) += nsscc-qca8k.o
+ obj-$(CONFIG_MDM_GCC_9607) += gcc-mdm9607.o
+ obj-$(CONFIG_MDM_GCC_9615) += gcc-mdm9615.o
+ obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
+diff --git a/drivers/clk/qcom/nsscc-qca8k.c b/drivers/clk/qcom/nsscc-qca8k.c
+new file mode 100644
+index 000000000000..5c8324e2bcca
+--- /dev/null
++++ b/drivers/clk/qcom/nsscc-qca8k.c
+@@ -0,0 +1,2221 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++#include <linux/clk-provider.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/regmap.h>
++#include <linux/phy.h>
++#include <linux/mdio.h>
++#include <linux/clk.h>
++#include <linux/gpio/consumer.h>
++
++#include <dt-bindings/clock/qcom,qca8k-nsscc.h>
++#include <dt-bindings/reset/qcom,qca8k-nsscc.h>
++
++#include "clk-branch.h"
++#include "clk-rcg.h"
++#include "clk-regmap.h"
++#include "clk-regmap-divider.h"
++#include "clk-regmap-mux.h"
++#include "common.h"
++#include "reset.h"
++
++#define QCA8K_CLK_REG_BASE            0x800000
++#define QCA8K_HIGH_ADDR_PREFIX                0x18
++#define QCA8K_LOW_ADDR_PREFIX         0x10
++#define QCA8K_CFG_PAGE_REG            0xc
++#define QCA8K_CLK_REG_MASK            GENMASK(4, 0)
++#define QCA8K_CLK_PHY_ADDR_MASK               GENMASK(7, 5)
++#define QCA8K_CLK_PAGE_MASK           GENMASK(23, 8)
++#define QCA8K_REG_DATA_UPPER_16_BITS  BIT(1)
++
++enum {
++      DT_XO,
++      DT_UNIPHY0_RX_CLK,
++      DT_UNIPHY0_TX_CLK,
++      DT_UNIPHY1_RX_CLK,
++      DT_UNIPHY1_TX_CLK,
++      DT_UNIPHY1_RX312P5M_CLK,
++      DT_UNIPHY1_TX312P5M_CLK,
++};
++
++enum {
++      P_XO,
++      P_UNIPHY0_RX,
++      P_UNIPHY0_TX,
++      P_UNIPHY1_RX,
++      P_UNIPHY1_TX,
++      P_UNIPHY1_RX312P5M,
++      P_UNIPHY1_TX312P5M,
++      P_MAC4_RX_DIV,
++      P_MAC4_TX_DIV,
++      P_MAC5_RX_DIV,
++      P_MAC5_TX_DIV,
++};
++
++static const struct clk_parent_data nss_cc_uniphy1_tx312p5m_data[] = {
++      { .index = DT_XO },
++      { .index = DT_UNIPHY1_TX312P5M_CLK },
++};
++
++static const struct parent_map nss_cc_uniphy1_tx312p5m_map[] = {
++      { P_XO, 0 },
++      { P_UNIPHY1_TX312P5M, 1 },
++};
++
++static struct clk_rcg2 nss_cc_switch_core_clk_src = {
++      .cmd_rcgr = 0x0,
++      .hid_width = 5,
++      .parent_map = nss_cc_uniphy1_tx312p5m_map,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_switch_core_clk_src",
++              .parent_data = nss_cc_uniphy1_tx312p5m_data,
++              .num_parents = ARRAY_SIZE(nss_cc_uniphy1_tx312p5m_data),
++              .ops = &clk_rcg2_mux_closest_ops,
++      },
++};
++
++static struct clk_branch nss_cc_switch_core_clk = {
++      .halt_reg = 0x8,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x8,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_switch_core_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_switch_core_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_apb_bridge_clk = {
++      .halt_reg = 0x10,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x10,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_apb_bridge_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_switch_core_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static const struct clk_parent_data nss_cc_uniphy1_tx_data[] = {
++      { .index = DT_XO },
++      { .index = DT_UNIPHY1_TX_CLK },
++};
++
++static const struct parent_map nss_cc_uniphy1_tx_map[] = {
++      { P_XO, 0 },
++      { P_UNIPHY1_TX, 2 },
++};
++
++static struct clk_rcg2 nss_cc_mac0_tx_clk_src = {
++      .cmd_rcgr = 0x14,
++      .hid_width = 5,
++      .parent_map = nss_cc_uniphy1_tx_map,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_mac0_tx_clk_src",
++              .parent_data = nss_cc_uniphy1_tx_data,
++              .num_parents = ARRAY_SIZE(nss_cc_uniphy1_tx_data),
++              .flags = CLK_SET_RATE_PARENT,
++              .ops = &clk_rcg2_mux_closest_ops,
++      },
++};
++
++static struct clk_regmap_div nss_cc_mac0_tx_div_clk_src = {
++      .reg = 0x1c,
++      .shift = 0,
++      .width = 4,
++      .clkr = {
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac0_tx_div_clk_src",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac0_tx_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_regmap_div_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac0_tx_clk = {
++      .halt_reg = 0x20,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x20,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac0_tx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac0_tx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac0_tx_srds1_clk = {
++      .halt_reg = 0x24,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x24,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac0_tx_srds1_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac0_tx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static const struct clk_parent_data nss_cc_uniphy1_rx_tx_data[] = {
++      { .index = DT_XO },
++      { .index = DT_UNIPHY1_RX_CLK },
++      { .index = DT_UNIPHY1_TX_CLK },
++};
++
++static const struct parent_map nss_cc_uniphy1_rx_tx_map[] = {
++      { P_XO, 0 },
++      { P_UNIPHY1_RX, 1 },
++      { P_UNIPHY1_TX, 2 },
++};
++
++static struct clk_rcg2 nss_cc_mac0_rx_clk_src = {
++      .cmd_rcgr = 0x28,
++      .hid_width = 5,
++      .parent_map = nss_cc_uniphy1_rx_tx_map,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_mac0_rx_clk_src",
++              .parent_data = nss_cc_uniphy1_rx_tx_data,
++              .num_parents = ARRAY_SIZE(nss_cc_uniphy1_rx_tx_data),
++              .flags = CLK_SET_RATE_PARENT,
++              .ops = &clk_rcg2_mux_closest_ops,
++      },
++};
++
++static struct clk_regmap_div nss_cc_mac0_rx_div_clk_src = {
++      .reg = 0x30,
++      .shift = 0,
++      .width = 4,
++      .clkr = {
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac0_rx_div_clk_src",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac0_rx_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_regmap_div_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac0_rx_clk = {
++      .halt_reg = 0x34,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x34,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac0_rx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac0_rx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac0_rx_srds1_clk = {
++      .halt_reg = 0x3c,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x3c,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac0_rx_srds1_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac0_rx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static const struct clk_parent_data nss_cc_uniphy1_rx_tx312p5m_data[] = {
++      { .index = DT_XO },
++      { .index = DT_UNIPHY1_TX312P5M_CLK },
++      { .index = DT_UNIPHY1_RX312P5M_CLK },
++};
++
++static const struct parent_map nss_cc_uniphy1_rx_tx312p5m_map[] = {
++      { P_XO, 0 },
++      { P_UNIPHY1_TX312P5M, 6 },
++      { P_UNIPHY1_RX312P5M, 7 },
++};
++
++static const struct freq_conf ftbl_nss_cc_mac1_tx_clk_src_25[] = {
++      C(P_UNIPHY1_TX312P5M, 12.5, 0, 0),
++      C(P_UNIPHY1_RX312P5M, 12.5, 0, 0),
++};
++
++static const struct freq_conf ftbl_nss_cc_mac1_tx_clk_src_125[] = {
++      C(P_UNIPHY1_TX312P5M, 2.5, 0, 0),
++      C(P_UNIPHY1_RX312P5M, 2.5, 0, 0),
++};
++
++static const struct freq_conf ftbl_nss_cc_mac1_tx_clk_src_312p5[] = {
++      C(P_UNIPHY1_TX312P5M, 1, 0, 0),
++      C(P_UNIPHY1_RX312P5M, 1, 0, 0),
++};
++
++static const struct freq_multi_tbl ftbl_nss_cc_mac1_tx_clk_src[] = {
++      FM(25000000, ftbl_nss_cc_mac1_tx_clk_src_25),
++      FMS(50000000, P_XO, 1, 0, 0),
++      FM(125000000, ftbl_nss_cc_mac1_tx_clk_src_125),
++      FM(312500000, ftbl_nss_cc_mac1_tx_clk_src_312p5),
++      { }
++};
++
++static struct clk_rcg2 nss_cc_mac1_tx_clk_src = {
++      .cmd_rcgr = 0x40,
++      .freq_multi_tbl = ftbl_nss_cc_mac1_tx_clk_src,
++      .hid_width = 5,
++      .parent_map = nss_cc_uniphy1_rx_tx312p5m_map,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_mac1_tx_clk_src",
++              .parent_data = nss_cc_uniphy1_rx_tx312p5m_data,
++              .num_parents = ARRAY_SIZE(nss_cc_uniphy1_rx_tx312p5m_data),
++              .ops = &clk_rcg2_fm_ops,
++      },
++};
++
++static struct clk_regmap_div nss_cc_mac1_tx_div_clk_src = {
++      .reg = 0x48,
++      .shift = 0,
++      .width = 4,
++      .clkr = {
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac1_tx_div_clk_src",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac1_tx_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_regmap_div_ops,
++              },
++      },
++};
++
++static struct clk_regmap_div nss_cc_mac1_srds1_ch0_xgmii_rx_div_clk_src = {
++      .reg = 0x4c,
++      .shift = 0,
++      .width = 4,
++      .clkr = {
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac1_srds1_ch0_xgmii_rx_div_clk_src",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac1_tx_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_regmap_div_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac1_srds1_ch0_rx_clk = {
++      .halt_reg = 0x50,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x50,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac1_srds1_ch0_rx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac1_tx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac1_tx_clk = {
++      .halt_reg = 0x54,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x54,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac1_tx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac1_tx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac1_gephy0_tx_clk = {
++      .halt_reg = 0x58,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x58,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac1_gephy0_tx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac1_tx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac1_srds1_ch0_xgmii_rx_clk = {
++      .halt_reg = 0x5c,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x5c,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac1_srds1_ch0_xgmii_rx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac1_srds1_ch0_xgmii_rx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static const struct clk_parent_data nss_cc_uniphy1_tx312p5m_prx_data[] = {
++      { .index = DT_XO },
++      { .index = DT_UNIPHY1_TX312P5M_CLK },
++};
++
++static const struct parent_map nss_cc_uniphy1_tx312p5m_prx_map[] = {
++      { P_XO, 0 },
++      { P_UNIPHY1_TX312P5M, 6 },
++};
++
++static const struct freq_tbl ftbl_nss_cc_mac1_rx_clk_src[] = {
++      F(25000000, P_UNIPHY1_TX312P5M, 12.5, 0, 0),
++      F(50000000, P_XO, 1, 0, 0),
++      F(125000000, P_UNIPHY1_TX312P5M, 2.5, 0, 0),
++      F(312500000, P_UNIPHY1_TX312P5M, 1, 0, 0),
++      { }
++};
++
++static struct clk_rcg2 nss_cc_mac1_rx_clk_src = {
++      .cmd_rcgr = 0x60,
++      .freq_tbl = ftbl_nss_cc_mac1_rx_clk_src,
++      .hid_width = 5,
++      .parent_map = nss_cc_uniphy1_tx312p5m_prx_map,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_mac1_rx_clk_src",
++              .parent_data = nss_cc_uniphy1_tx312p5m_prx_data,
++              .num_parents = ARRAY_SIZE(nss_cc_uniphy1_tx312p5m_prx_data),
++              .ops = &clk_rcg2_ops,
++      },
++};
++
++static struct clk_regmap_div nss_cc_mac1_rx_div_clk_src = {
++      .reg = 0x68,
++      .shift = 0,
++      .width = 4,
++      .clkr = {
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac1_rx_div_clk_src",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac1_rx_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_regmap_div_ops,
++              },
++      },
++};
++
++static struct clk_regmap_div nss_cc_mac1_srds1_ch0_xgmii_tx_div_clk_src = {
++      .reg = 0x6c,
++      .shift = 0,
++      .width = 4,
++      .clkr = {
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac1_srds1_ch0_xgmii_tx_div_clk_src",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac1_rx_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_regmap_div_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac1_srds1_ch0_tx_clk = {
++      .halt_reg = 0x70,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x70,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac1_srds1_ch0_tx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac1_rx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac1_rx_clk = {
++      .halt_reg = 0x74,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x74,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac1_rx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac1_rx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac1_gephy0_rx_clk = {
++      .halt_reg = 0x78,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x78,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac1_gephy0_rx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac1_rx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac1_srds1_ch0_xgmii_tx_clk = {
++      .halt_reg = 0x7c,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x7c,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac1_srds1_ch0_xgmii_tx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac1_srds1_ch0_xgmii_tx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_rcg2 nss_cc_mac2_tx_clk_src = {
++      .cmd_rcgr = 0x80,
++      .freq_multi_tbl = ftbl_nss_cc_mac1_tx_clk_src,
++      .hid_width = 5,
++      .parent_map = nss_cc_uniphy1_rx_tx312p5m_map,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_mac2_tx_clk_src",
++              .parent_data = nss_cc_uniphy1_rx_tx312p5m_data,
++              .num_parents = ARRAY_SIZE(nss_cc_uniphy1_rx_tx312p5m_data),
++              .ops = &clk_rcg2_fm_ops,
++      },
++};
++
++static struct clk_regmap_div nss_cc_mac2_tx_div_clk_src = {
++      .reg = 0x88,
++      .shift = 0,
++      .width = 4,
++      .clkr = {
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac2_tx_div_clk_src",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac2_tx_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_regmap_div_ops,
++              },
++      },
++};
++
++static struct clk_regmap_div nss_cc_mac2_srds1_ch1_xgmii_rx_div_clk_src = {
++      .reg = 0x8c,
++      .shift = 0,
++      .width = 4,
++      .clkr = {
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac2_srds1_ch1_xgmii_rx_div_clk_src",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac2_tx_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_regmap_div_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac2_srds1_ch1_rx_clk = {
++      .halt_reg = 0x90,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x90,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac2_srds1_ch1_rx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac2_tx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac2_tx_clk = {
++      .halt_reg = 0x94,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x94,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac2_tx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac2_tx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac2_gephy1_tx_clk = {
++      .halt_reg = 0x98,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x98,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac2_gephy1_tx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac2_tx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac2_srds1_ch1_xgmii_rx_clk = {
++      .halt_reg = 0x9c,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x9c,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac2_srds1_ch1_xgmii_rx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac2_srds1_ch1_xgmii_rx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_rcg2 nss_cc_mac2_rx_clk_src = {
++      .cmd_rcgr = 0xa0,
++      .freq_tbl = ftbl_nss_cc_mac1_rx_clk_src,
++      .hid_width = 5,
++      .parent_map = nss_cc_uniphy1_tx312p5m_prx_map,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_mac2_rx_clk_src",
++              .parent_data = nss_cc_uniphy1_tx312p5m_prx_data,
++              .num_parents = ARRAY_SIZE(nss_cc_uniphy1_tx312p5m_prx_data),
++              .ops = &clk_rcg2_ops,
++      },
++};
++
++static struct clk_regmap_div nss_cc_mac2_rx_div_clk_src = {
++      .reg = 0xa8,
++      .shift = 0,
++      .width = 4,
++      .clkr = {
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac2_rx_div_clk_src",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac2_rx_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_regmap_div_ops,
++              },
++      },
++};
++
++static struct clk_regmap_div nss_cc_mac2_srds1_ch1_xgmii_tx_div_clk_src = {
++      .reg = 0xac,
++      .shift = 0,
++      .width = 4,
++      .clkr = {
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac2_srds1_ch1_xgmii_tx_div_clk_src",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac2_rx_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_regmap_div_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac2_srds1_ch1_tx_clk = {
++      .halt_reg = 0xb0,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0xb0,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac2_srds1_ch1_tx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac2_rx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac2_rx_clk = {
++      .halt_reg = 0xb4,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0xb4,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac2_rx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac2_rx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac2_gephy1_rx_clk = {
++      .halt_reg = 0xb8,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0xb8,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac2_gephy1_rx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac2_rx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac2_srds1_ch1_xgmii_tx_clk = {
++      .halt_reg = 0xbc,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0xbc,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac2_srds1_ch1_xgmii_tx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac2_srds1_ch1_xgmii_tx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_rcg2 nss_cc_mac3_tx_clk_src = {
++      .cmd_rcgr = 0xc0,
++      .freq_multi_tbl = ftbl_nss_cc_mac1_tx_clk_src,
++      .hid_width = 5,
++      .parent_map = nss_cc_uniphy1_rx_tx312p5m_map,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_mac3_tx_clk_src",
++              .parent_data = nss_cc_uniphy1_rx_tx312p5m_data,
++              .num_parents = ARRAY_SIZE(nss_cc_uniphy1_rx_tx312p5m_data),
++              .ops = &clk_rcg2_fm_ops,
++      },
++};
++
++static struct clk_regmap_div nss_cc_mac3_tx_div_clk_src = {
++      .reg = 0xc8,
++      .shift = 0,
++      .width = 4,
++      .clkr = {
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac3_tx_div_clk_src",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac3_tx_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_regmap_div_ops,
++              },
++      },
++};
++
++static struct clk_regmap_div nss_cc_mac3_srds1_ch2_xgmii_rx_div_clk_src = {
++      .reg = 0xcc,
++      .shift = 0,
++      .width = 4,
++      .clkr = {
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac3_srds1_ch2_xgmii_rx_div_clk_src",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac3_tx_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_regmap_div_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac3_srds1_ch2_rx_clk = {
++      .halt_reg = 0xd0,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0xd0,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac3_srds1_ch2_rx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac3_tx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac3_tx_clk = {
++      .halt_reg = 0xd4,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0xd4,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac3_tx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac3_tx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac3_gephy2_tx_clk = {
++      .halt_reg = 0xd8,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0xd8,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac3_gephy2_tx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac3_tx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac3_srds1_ch2_xgmii_rx_clk = {
++      .halt_reg = 0xdc,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0xdc,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac3_srds1_ch2_xgmii_rx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac3_srds1_ch2_xgmii_rx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_rcg2 nss_cc_mac3_rx_clk_src = {
++      .cmd_rcgr = 0xe0,
++      .freq_tbl = ftbl_nss_cc_mac1_rx_clk_src,
++      .hid_width = 5,
++      .parent_map = nss_cc_uniphy1_tx312p5m_prx_map,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_mac3_rx_clk_src",
++              .parent_data = nss_cc_uniphy1_tx312p5m_prx_data,
++              .num_parents = ARRAY_SIZE(nss_cc_uniphy1_tx312p5m_prx_data),
++              .ops = &clk_rcg2_ops,
++      },
++};
++
++static struct clk_regmap_div nss_cc_mac3_rx_div_clk_src = {
++      .reg = 0xe8,
++      .shift = 0,
++      .width = 4,
++      .clkr = {
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac3_rx_div_clk_src",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac3_rx_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_regmap_div_ops,
++              },
++      },
++};
++
++static struct clk_regmap_div nss_cc_mac3_srds1_ch2_xgmii_tx_div_clk_src = {
++      .reg = 0xec,
++      .shift = 0,
++      .width = 4,
++      .clkr = {
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac3_srds1_ch2_xgmii_tx_div_clk_src",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac3_rx_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_regmap_div_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac3_srds1_ch2_tx_clk = {
++      .halt_reg = 0xf0,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0xf0,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac3_srds1_ch2_tx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac3_rx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac3_rx_clk = {
++      .halt_reg = 0xf4,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0xf4,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac3_rx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac3_rx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac3_gephy2_rx_clk = {
++      .halt_reg = 0xf8,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0xf8,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac3_gephy2_rx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac3_rx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac3_srds1_ch2_xgmii_tx_clk = {
++      .halt_reg = 0xfc,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0xfc,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac3_srds1_ch2_xgmii_tx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac3_srds1_ch2_xgmii_tx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static const struct clk_parent_data nss_cc_uniphy0_rx_uniphy1_rx_tx312p5m_data[] = {
++      { .index = DT_XO },
++      { .index = DT_UNIPHY0_RX_CLK },
++      { .index = DT_UNIPHY1_TX312P5M_CLK },
++      { .index = DT_UNIPHY1_RX312P5M_CLK },
++};
++
++static const struct parent_map nss_cc_uniphy0_rx_uniphy1_rx_tx312p5m_map[] = {
++      { P_XO, 0 },
++      { P_UNIPHY0_RX, 1 },
++      { P_UNIPHY1_TX312P5M, 3 },
++      { P_UNIPHY1_RX312P5M, 7 },
++};
++
++static const struct freq_conf ftbl_nss_cc_mac4_tx_clk_src_25[] = {
++      C(P_UNIPHY0_RX, 12.5, 0, 0),
++      C(P_UNIPHY0_RX, 5, 0, 0),
++      C(P_UNIPHY1_TX312P5M, 12.5, 0, 0),
++      C(P_UNIPHY1_RX312P5M, 12.5, 0, 0),
++};
++
++static const struct freq_conf ftbl_nss_cc_mac4_tx_clk_src_125[] = {
++      C(P_UNIPHY0_RX, 1, 0, 0),
++      C(P_UNIPHY0_RX, 2.5, 0, 0),
++      C(P_UNIPHY1_TX312P5M, 2.5, 0, 0),
++      C(P_UNIPHY1_RX312P5M, 2.5, 0, 0),
++};
++
++static const struct freq_conf ftbl_nss_cc_mac4_tx_clk_src_312p5[] = {
++      C(P_UNIPHY0_RX, 1, 0, 0),
++      C(P_UNIPHY1_TX312P5M, 1, 0, 0),
++      C(P_UNIPHY1_RX312P5M, 1, 0, 0),
++};
++
++static const struct freq_multi_tbl ftbl_nss_cc_mac4_tx_clk_src[] = {
++      FM(25000000, ftbl_nss_cc_mac4_tx_clk_src_25),
++      FMS(50000000, P_XO, 1, 0, 0),
++      FM(125000000, ftbl_nss_cc_mac4_tx_clk_src_125),
++      FM(312500000, ftbl_nss_cc_mac4_tx_clk_src_312p5),
++      { }
++};
++
++static struct clk_rcg2 nss_cc_mac4_tx_clk_src = {
++      .cmd_rcgr = 0x100,
++      .freq_multi_tbl = ftbl_nss_cc_mac4_tx_clk_src,
++      .hid_width = 5,
++      .parent_map = nss_cc_uniphy0_rx_uniphy1_rx_tx312p5m_map,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_mac4_tx_clk_src",
++              .parent_data = nss_cc_uniphy0_rx_uniphy1_rx_tx312p5m_data,
++              .num_parents = ARRAY_SIZE(nss_cc_uniphy0_rx_uniphy1_rx_tx312p5m_data),
++              .ops = &clk_rcg2_fm_ops,
++      },
++};
++
++static struct clk_regmap_div nss_cc_mac4_tx_div_clk_src = {
++      .reg = 0x108,
++      .shift = 0,
++      .width = 4,
++      .clkr = {
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac4_tx_div_clk_src",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac4_tx_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_regmap_div_ops,
++              },
++      },
++};
++
++static struct clk_regmap_div nss_cc_mac4_srds1_ch3_xgmii_rx_div_clk_src = {
++      .reg = 0x10c,
++      .shift = 0,
++      .width = 4,
++      .clkr = {
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac4_srds1_ch3_xgmii_rx_div_clk_src",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac4_tx_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_regmap_div_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac4_srds1_ch3_rx_clk = {
++      .halt_reg = 0x110,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x110,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac4_srds1_ch3_rx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac4_tx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac4_tx_clk = {
++      .halt_reg = 0x114,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x114,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac4_tx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac4_tx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac4_gephy3_tx_clk = {
++      .halt_reg = 0x118,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x118,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac4_gephy3_tx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac4_tx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac4_srds1_ch3_xgmii_rx_clk = {
++      .halt_reg = 0x11c,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x11c,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac4_srds1_ch3_xgmii_rx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac4_srds1_ch3_xgmii_rx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static const struct clk_parent_data nss_cc_uniphy0_tx_uniphy1_tx312p5m_data[] = {
++      { .index = DT_XO },
++      { .index = DT_UNIPHY0_TX_CLK },
++      { .index = DT_UNIPHY1_TX312P5M_CLK },
++};
++
++static const struct parent_map nss_cc_uniphy0_tx_uniphy1_tx312p5m_map[] = {
++      { P_XO, 0 },
++      { P_UNIPHY0_TX, 2 },
++      { P_UNIPHY1_TX312P5M, 3 },
++};
++
++static const struct freq_conf ftbl_nss_cc_mac4_rx_clk_src_25[] = {
++      C(P_UNIPHY0_TX, 12.5, 0, 0),
++      C(P_UNIPHY0_TX, 5, 0, 0),
++      C(P_UNIPHY1_TX312P5M, 12.5, 0, 0),
++};
++
++static const struct freq_conf ftbl_nss_cc_mac4_rx_clk_src_125[] = {
++      C(P_UNIPHY0_TX, 1, 0, 0),
++      C(P_UNIPHY0_TX, 2.5, 0, 0),
++      C(P_UNIPHY1_TX312P5M, 2.5, 0, 0),
++};
++
++static const struct freq_conf ftbl_nss_cc_mac4_rx_clk_src_312p5[] = {
++      C(P_UNIPHY0_TX, 1, 0, 0),
++      C(P_UNIPHY1_TX312P5M, 1, 0, 0),
++};
++
++static const struct freq_multi_tbl ftbl_nss_cc_mac4_rx_clk_src[] = {
++      FM(25000000, ftbl_nss_cc_mac4_rx_clk_src_25),
++      FMS(50000000, P_XO, 1, 0, 0),
++      FM(125000000, ftbl_nss_cc_mac4_rx_clk_src_125),
++      FM(312500000, ftbl_nss_cc_mac4_rx_clk_src_312p5),
++      { }
++};
++
++static struct clk_rcg2 nss_cc_mac4_rx_clk_src = {
++      .cmd_rcgr = 0x120,
++      .freq_multi_tbl = ftbl_nss_cc_mac4_rx_clk_src,
++      .hid_width = 5,
++      .parent_map = nss_cc_uniphy0_tx_uniphy1_tx312p5m_map,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_mac4_rx_clk_src",
++              .parent_data = nss_cc_uniphy0_tx_uniphy1_tx312p5m_data,
++              .num_parents = ARRAY_SIZE(nss_cc_uniphy0_tx_uniphy1_tx312p5m_data),
++              .ops = &clk_rcg2_fm_ops,
++      },
++};
++
++static struct clk_regmap_div nss_cc_mac4_rx_div_clk_src = {
++      .reg = 0x128,
++      .shift = 0,
++      .width = 4,
++      .clkr = {
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac4_rx_div_clk_src",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac4_rx_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_regmap_div_ops,
++              },
++      },
++};
++
++static struct clk_regmap_div nss_cc_mac4_srds1_ch3_xgmii_tx_div_clk_src = {
++      .reg = 0x12c,
++      .shift = 0,
++      .width = 4,
++      .clkr = {
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac4_srds1_ch3_xgmii_tx_div_clk_src",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac4_rx_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_regmap_div_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac4_srds1_ch3_tx_clk = {
++      .halt_reg = 0x130,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x130,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac4_srds1_ch3_tx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac4_rx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac4_rx_clk = {
++      .halt_reg = 0x134,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x134,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac4_rx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac4_rx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac4_gephy3_rx_clk = {
++      .halt_reg = 0x138,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x138,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac4_gephy3_rx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac4_rx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac4_srds1_ch3_xgmii_tx_clk = {
++      .halt_reg = 0x13c,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x13c,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac4_srds1_ch3_xgmii_tx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac4_srds1_ch3_xgmii_tx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static const struct clk_parent_data nss_cc_uniphy0_tx_data[] = {
++      { .index = DT_XO },
++      { .index = DT_UNIPHY0_TX_CLK },
++};
++
++static const struct parent_map nss_cc_uniphy0_tx_map[] = {
++      { P_XO, 0 },
++      { P_UNIPHY0_TX, 2 },
++};
++
++static struct clk_rcg2 nss_cc_mac5_tx_clk_src = {
++      .cmd_rcgr = 0x140,
++      .hid_width = 5,
++      .parent_map = nss_cc_uniphy0_tx_map,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_mac5_tx_clk_src",
++              .parent_data = nss_cc_uniphy0_tx_data,
++              .num_parents = ARRAY_SIZE(nss_cc_uniphy0_tx_data),
++              .flags = CLK_SET_RATE_PARENT,
++              .ops = &clk_rcg2_mux_closest_ops,
++      },
++};
++
++static struct clk_regmap_div nss_cc_mac5_tx_div_clk_src = {
++      .reg = 0x148,
++      .shift = 0,
++      .width = 4,
++      .clkr = {
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac5_tx_div_clk_src",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac5_tx_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_regmap_div_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac5_tx_clk = {
++      .halt_reg = 0x14c,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x14c,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac5_tx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac5_tx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static const struct clk_parent_data nss_cc_uniphy0_rx_tx_data[] = {
++      { .index = DT_XO },
++      { .index = DT_UNIPHY0_RX_CLK },
++      { .index = DT_UNIPHY0_TX_CLK },
++};
++
++static const struct parent_map nss_cc_uniphy0_rx_tx_map[] = {
++      { P_XO, 0 },
++      { P_UNIPHY0_RX, 1 },
++      { P_UNIPHY0_TX, 2 },
++};
++
++static struct clk_rcg2 nss_cc_mac5_rx_clk_src = {
++      .cmd_rcgr = 0x154,
++      .hid_width = 5,
++      .parent_map = nss_cc_uniphy0_rx_tx_map,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_mac5_rx_clk_src",
++              .parent_data = nss_cc_uniphy0_rx_tx_data,
++              .num_parents = ARRAY_SIZE(nss_cc_uniphy0_rx_tx_data),
++              .flags = CLK_SET_RATE_PARENT,
++              .ops = &clk_rcg2_mux_closest_ops,
++      },
++};
++
++static struct clk_regmap_div nss_cc_mac5_rx_div_clk_src = {
++      .reg = 0x15c,
++      .shift = 0,
++      .width = 4,
++      .clkr = {
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac5_rx_div_clk_src",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac5_rx_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_regmap_div_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac5_rx_clk = {
++      .halt_reg = 0x160,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x160,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac5_rx_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac5_rx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static const struct parent_map nss_cc_mac4_rx_div_mac5_tx_div_map[] = {
++      { P_MAC4_RX_DIV, 0 },
++      { P_MAC5_TX_DIV, 1 },
++};
++
++static struct clk_regmap_mux nss_cc_mac5_tx_srds0_clk_src = {
++      .reg = 0x300,
++      .shift = 0,
++      .width = 1,
++      .parent_map = nss_cc_mac4_rx_div_mac5_tx_div_map,
++      .clkr = {
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac5_tx_srds0_clk_src",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac4_rx_div_clk_src.clkr.hw,
++                              &nss_cc_mac5_tx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 2,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_regmap_mux_closest_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac5_tx_srds0_clk = {
++      .halt_reg = 0x150,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x150,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac5_tx_srds0_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac5_tx_srds0_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static const struct parent_map nss_cc_mac4_tx_div_mac5_rx_div_map[] = {
++      { P_MAC4_TX_DIV, 0 },
++      { P_MAC5_RX_DIV, 1 },
++};
++
++static struct clk_regmap_mux nss_cc_mac5_rx_srds0_clk_src = {
++      .reg = 0x300,
++      .shift = 1,
++      .width = 1,
++      .parent_map = nss_cc_mac4_tx_div_mac5_rx_div_map,
++      .clkr = {
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac5_rx_srds0_clk_src",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac4_tx_div_clk_src.clkr.hw,
++                              &nss_cc_mac5_rx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 2,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_regmap_mux_closest_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mac5_rx_srds0_clk = {
++      .halt_reg = 0x164,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x164,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mac5_rx_srds0_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_mac5_rx_srds0_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static const struct parent_map nss_cc_uniphy1_tx312p5m_map2[] = {
++      { P_XO, 0 },
++      { P_UNIPHY1_TX312P5M, 2 },
++};
++
++static const struct freq_tbl ftbl_nss_cc_ahb_clk_src[] = {
++      F(50000000, P_XO, 1, 0, 0),
++      F(104170000, P_UNIPHY1_TX312P5M, 3, 0, 0),
++      { }
++};
++
++static struct clk_rcg2 nss_cc_ahb_clk_src = {
++      .cmd_rcgr = 0x168,
++      .freq_tbl = ftbl_nss_cc_ahb_clk_src,
++      .hid_width = 5,
++      .parent_map = nss_cc_uniphy1_tx312p5m_map2,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_ahb_clk_src",
++              .parent_data = nss_cc_uniphy1_tx312p5m_data,
++              .num_parents = ARRAY_SIZE(nss_cc_uniphy1_tx312p5m_data),
++              .ops = &clk_rcg2_ops,
++      },
++};
++
++static struct clk_branch nss_cc_ahb_clk = {
++      .halt_reg = 0x170,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x170,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_ahb_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_ahb_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_sec_ctrl_ahb_clk = {
++      .halt_reg = 0x174,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x174,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_sec_ctrl_ahb_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_ahb_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_tlmm_clk = {
++      .halt_reg = 0x178,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x178,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_tlmm_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_ahb_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_tlmm_ahb_clk = {
++      .halt_reg = 0x190,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x190,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_tlmm_ahb_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_ahb_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_cnoc_ahb_clk = {
++      .halt_reg = 0x194,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x194,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_cnoc_ahb_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_ahb_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mdio_ahb_clk = {
++      .halt_reg = 0x198,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x198,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mdio_ahb_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_ahb_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_mdio_master_ahb_clk = {
++      .halt_reg = 0x19c,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x19c,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_mdio_master_ahb_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_ahb_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static const struct clk_parent_data nss_cc_xo_data[] = {
++      { .index = DT_XO },
++};
++
++static const struct parent_map nss_cc_xo_map[] = {
++      { P_XO, 0 },
++};
++
++static const struct freq_tbl ftbl_nss_cc_sys_clk_src[] = {
++      F(25000000, P_XO, 2, 0, 0),
++      { }
++};
++
++static struct clk_rcg2 nss_cc_sys_clk_src = {
++      .cmd_rcgr = 0x1a0,
++      .freq_tbl = ftbl_nss_cc_sys_clk_src,
++      .hid_width = 5,
++      .parent_map = nss_cc_xo_map,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_sys_clk_src",
++              .parent_data = nss_cc_xo_data,
++              .num_parents = ARRAY_SIZE(nss_cc_xo_data),
++              .ops = &clk_rcg2_ops,
++      },
++};
++
++static struct clk_branch nss_cc_srds0_sys_clk = {
++      .halt_reg = 0x1a8,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x1a8,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_srds0_sys_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_sys_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_srds1_sys_clk = {
++      .halt_reg = 0x1ac,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x1ac,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_srds1_sys_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_sys_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_gephy0_sys_clk = {
++      .halt_reg = 0x1b0,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x1b0,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_gephy0_sys_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_sys_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_gephy1_sys_clk = {
++      .halt_reg = 0x1b4,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x1b4,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_gephy1_sys_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_sys_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_gephy2_sys_clk = {
++      .halt_reg = 0x1b8,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x1b8,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_gephy2_sys_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_sys_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_gephy3_sys_clk = {
++      .halt_reg = 0x1bc,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x1bc,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_gephy3_sys_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &nss_cc_sys_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .ops = &clk_branch2_prepare_ops,
++              },
++      },
++};
++
++static struct clk_regmap *nss_cc_qca8k_clocks[] = {
++      [NSS_CC_SWITCH_CORE_CLK_SRC] = &nss_cc_switch_core_clk_src.clkr,
++      [NSS_CC_SWITCH_CORE_CLK] = &nss_cc_switch_core_clk.clkr,
++      [NSS_CC_APB_BRIDGE_CLK] = &nss_cc_apb_bridge_clk.clkr,
++      [NSS_CC_MAC0_TX_CLK_SRC] = &nss_cc_mac0_tx_clk_src.clkr,
++      [NSS_CC_MAC0_TX_DIV_CLK_SRC] = &nss_cc_mac0_tx_div_clk_src.clkr,
++      [NSS_CC_MAC0_TX_CLK] = &nss_cc_mac0_tx_clk.clkr,
++      [NSS_CC_MAC0_TX_SRDS1_CLK] = &nss_cc_mac0_tx_srds1_clk.clkr,
++      [NSS_CC_MAC0_RX_CLK_SRC] = &nss_cc_mac0_rx_clk_src.clkr,
++      [NSS_CC_MAC0_RX_DIV_CLK_SRC] = &nss_cc_mac0_rx_div_clk_src.clkr,
++      [NSS_CC_MAC0_RX_CLK] = &nss_cc_mac0_rx_clk.clkr,
++      [NSS_CC_MAC0_RX_SRDS1_CLK] = &nss_cc_mac0_rx_srds1_clk.clkr,
++      [NSS_CC_MAC1_TX_CLK_SRC] = &nss_cc_mac1_tx_clk_src.clkr,
++      [NSS_CC_MAC1_TX_DIV_CLK_SRC] = &nss_cc_mac1_tx_div_clk_src.clkr,
++      [NSS_CC_MAC1_SRDS1_CH0_XGMII_RX_DIV_CLK_SRC] =
++              &nss_cc_mac1_srds1_ch0_xgmii_rx_div_clk_src.clkr,
++      [NSS_CC_MAC1_SRDS1_CH0_RX_CLK] = &nss_cc_mac1_srds1_ch0_rx_clk.clkr,
++      [NSS_CC_MAC1_TX_CLK] = &nss_cc_mac1_tx_clk.clkr,
++      [NSS_CC_MAC1_GEPHY0_TX_CLK] = &nss_cc_mac1_gephy0_tx_clk.clkr,
++      [NSS_CC_MAC1_SRDS1_CH0_XGMII_RX_CLK] = &nss_cc_mac1_srds1_ch0_xgmii_rx_clk.clkr,
++      [NSS_CC_MAC1_RX_CLK_SRC] = &nss_cc_mac1_rx_clk_src.clkr,
++      [NSS_CC_MAC1_RX_DIV_CLK_SRC] = &nss_cc_mac1_rx_div_clk_src.clkr,
++      [NSS_CC_MAC1_SRDS1_CH0_XGMII_TX_DIV_CLK_SRC] =
++              &nss_cc_mac1_srds1_ch0_xgmii_tx_div_clk_src.clkr,
++      [NSS_CC_MAC1_SRDS1_CH0_TX_CLK] = &nss_cc_mac1_srds1_ch0_tx_clk.clkr,
++      [NSS_CC_MAC1_RX_CLK] = &nss_cc_mac1_rx_clk.clkr,
++      [NSS_CC_MAC1_GEPHY0_RX_CLK] = &nss_cc_mac1_gephy0_rx_clk.clkr,
++      [NSS_CC_MAC1_SRDS1_CH0_XGMII_TX_CLK] = &nss_cc_mac1_srds1_ch0_xgmii_tx_clk.clkr,
++      [NSS_CC_MAC2_TX_CLK_SRC] = &nss_cc_mac2_tx_clk_src.clkr,
++      [NSS_CC_MAC2_TX_DIV_CLK_SRC] = &nss_cc_mac2_tx_div_clk_src.clkr,
++      [NSS_CC_MAC2_SRDS1_CH1_XGMII_RX_DIV_CLK_SRC] =
++              &nss_cc_mac2_srds1_ch1_xgmii_rx_div_clk_src.clkr,
++      [NSS_CC_MAC2_SRDS1_CH1_RX_CLK] = &nss_cc_mac2_srds1_ch1_rx_clk.clkr,
++      [NSS_CC_MAC2_TX_CLK] = &nss_cc_mac2_tx_clk.clkr,
++      [NSS_CC_MAC2_GEPHY1_TX_CLK] = &nss_cc_mac2_gephy1_tx_clk.clkr,
++      [NSS_CC_MAC2_SRDS1_CH1_XGMII_RX_CLK] = &nss_cc_mac2_srds1_ch1_xgmii_rx_clk.clkr,
++      [NSS_CC_MAC2_RX_CLK_SRC] = &nss_cc_mac2_rx_clk_src.clkr,
++      [NSS_CC_MAC2_RX_DIV_CLK_SRC] = &nss_cc_mac2_rx_div_clk_src.clkr,
++      [NSS_CC_MAC2_SRDS1_CH1_XGMII_TX_DIV_CLK_SRC] =
++              &nss_cc_mac2_srds1_ch1_xgmii_tx_div_clk_src.clkr,
++      [NSS_CC_MAC2_SRDS1_CH1_TX_CLK] = &nss_cc_mac2_srds1_ch1_tx_clk.clkr,
++      [NSS_CC_MAC2_RX_CLK] = &nss_cc_mac2_rx_clk.clkr,
++      [NSS_CC_MAC2_GEPHY1_RX_CLK] = &nss_cc_mac2_gephy1_rx_clk.clkr,
++      [NSS_CC_MAC2_SRDS1_CH1_XGMII_TX_CLK] = &nss_cc_mac2_srds1_ch1_xgmii_tx_clk.clkr,
++      [NSS_CC_MAC3_TX_CLK_SRC] = &nss_cc_mac3_tx_clk_src.clkr,
++      [NSS_CC_MAC3_TX_DIV_CLK_SRC] = &nss_cc_mac3_tx_div_clk_src.clkr,
++      [NSS_CC_MAC3_SRDS1_CH2_XGMII_RX_DIV_CLK_SRC] =
++              &nss_cc_mac3_srds1_ch2_xgmii_rx_div_clk_src.clkr,
++      [NSS_CC_MAC3_SRDS1_CH2_RX_CLK] = &nss_cc_mac3_srds1_ch2_rx_clk.clkr,
++      [NSS_CC_MAC3_TX_CLK] = &nss_cc_mac3_tx_clk.clkr,
++      [NSS_CC_MAC3_GEPHY2_TX_CLK] = &nss_cc_mac3_gephy2_tx_clk.clkr,
++      [NSS_CC_MAC3_SRDS1_CH2_XGMII_RX_CLK] = &nss_cc_mac3_srds1_ch2_xgmii_rx_clk.clkr,
++      [NSS_CC_MAC3_RX_CLK_SRC] = &nss_cc_mac3_rx_clk_src.clkr,
++      [NSS_CC_MAC3_RX_DIV_CLK_SRC] = &nss_cc_mac3_rx_div_clk_src.clkr,
++      [NSS_CC_MAC3_SRDS1_CH2_XGMII_TX_DIV_CLK_SRC] =
++              &nss_cc_mac3_srds1_ch2_xgmii_tx_div_clk_src.clkr,
++      [NSS_CC_MAC3_SRDS1_CH2_TX_CLK] = &nss_cc_mac3_srds1_ch2_tx_clk.clkr,
++      [NSS_CC_MAC3_RX_CLK] = &nss_cc_mac3_rx_clk.clkr,
++      [NSS_CC_MAC3_GEPHY2_RX_CLK] = &nss_cc_mac3_gephy2_rx_clk.clkr,
++      [NSS_CC_MAC3_SRDS1_CH2_XGMII_TX_CLK] = &nss_cc_mac3_srds1_ch2_xgmii_tx_clk.clkr,
++      [NSS_CC_MAC4_TX_CLK_SRC] = &nss_cc_mac4_tx_clk_src.clkr,
++      [NSS_CC_MAC4_TX_DIV_CLK_SRC] = &nss_cc_mac4_tx_div_clk_src.clkr,
++      [NSS_CC_MAC4_SRDS1_CH3_XGMII_RX_DIV_CLK_SRC] =
++              &nss_cc_mac4_srds1_ch3_xgmii_rx_div_clk_src.clkr,
++      [NSS_CC_MAC4_SRDS1_CH3_RX_CLK] = &nss_cc_mac4_srds1_ch3_rx_clk.clkr,
++      [NSS_CC_MAC4_TX_CLK] = &nss_cc_mac4_tx_clk.clkr,
++      [NSS_CC_MAC4_GEPHY3_TX_CLK] = &nss_cc_mac4_gephy3_tx_clk.clkr,
++      [NSS_CC_MAC4_SRDS1_CH3_XGMII_RX_CLK] = &nss_cc_mac4_srds1_ch3_xgmii_rx_clk.clkr,
++      [NSS_CC_MAC4_RX_CLK_SRC] = &nss_cc_mac4_rx_clk_src.clkr,
++      [NSS_CC_MAC4_RX_DIV_CLK_SRC] = &nss_cc_mac4_rx_div_clk_src.clkr,
++      [NSS_CC_MAC4_SRDS1_CH3_XGMII_TX_DIV_CLK_SRC] =
++              &nss_cc_mac4_srds1_ch3_xgmii_tx_div_clk_src.clkr,
++      [NSS_CC_MAC4_SRDS1_CH3_TX_CLK] = &nss_cc_mac4_srds1_ch3_tx_clk.clkr,
++      [NSS_CC_MAC4_RX_CLK] = &nss_cc_mac4_rx_clk.clkr,
++      [NSS_CC_MAC4_GEPHY3_RX_CLK] = &nss_cc_mac4_gephy3_rx_clk.clkr,
++      [NSS_CC_MAC4_SRDS1_CH3_XGMII_TX_CLK] = &nss_cc_mac4_srds1_ch3_xgmii_tx_clk.clkr,
++      [NSS_CC_MAC5_TX_CLK_SRC] = &nss_cc_mac5_tx_clk_src.clkr,
++      [NSS_CC_MAC5_TX_DIV_CLK_SRC] = &nss_cc_mac5_tx_div_clk_src.clkr,
++      [NSS_CC_MAC5_TX_SRDS0_CLK] = &nss_cc_mac5_tx_srds0_clk.clkr,
++      [NSS_CC_MAC5_TX_CLK] = &nss_cc_mac5_tx_clk.clkr,
++      [NSS_CC_MAC5_RX_CLK_SRC] = &nss_cc_mac5_rx_clk_src.clkr,
++      [NSS_CC_MAC5_RX_DIV_CLK_SRC] = &nss_cc_mac5_rx_div_clk_src.clkr,
++      [NSS_CC_MAC5_RX_SRDS0_CLK] = &nss_cc_mac5_rx_srds0_clk.clkr,
++      [NSS_CC_MAC5_RX_CLK] = &nss_cc_mac5_rx_clk.clkr,
++      [NSS_CC_MAC5_TX_SRDS0_CLK_SRC] = &nss_cc_mac5_tx_srds0_clk_src.clkr,
++      [NSS_CC_MAC5_RX_SRDS0_CLK_SRC] = &nss_cc_mac5_rx_srds0_clk_src.clkr,
++      [NSS_CC_AHB_CLK_SRC] = &nss_cc_ahb_clk_src.clkr,
++      [NSS_CC_AHB_CLK] = &nss_cc_ahb_clk.clkr,
++      [NSS_CC_SEC_CTRL_AHB_CLK] = &nss_cc_sec_ctrl_ahb_clk.clkr,
++      [NSS_CC_TLMM_CLK] = &nss_cc_tlmm_clk.clkr,
++      [NSS_CC_TLMM_AHB_CLK] = &nss_cc_tlmm_ahb_clk.clkr,
++      [NSS_CC_CNOC_AHB_CLK] = &nss_cc_cnoc_ahb_clk.clkr,
++      [NSS_CC_MDIO_AHB_CLK] = &nss_cc_mdio_ahb_clk.clkr,
++      [NSS_CC_MDIO_MASTER_AHB_CLK] = &nss_cc_mdio_master_ahb_clk.clkr,
++      [NSS_CC_SYS_CLK_SRC] = &nss_cc_sys_clk_src.clkr,
++      [NSS_CC_SRDS0_SYS_CLK] = &nss_cc_srds0_sys_clk.clkr,
++      [NSS_CC_SRDS1_SYS_CLK] = &nss_cc_srds1_sys_clk.clkr,
++      [NSS_CC_GEPHY0_SYS_CLK] = &nss_cc_gephy0_sys_clk.clkr,
++      [NSS_CC_GEPHY1_SYS_CLK] = &nss_cc_gephy1_sys_clk.clkr,
++      [NSS_CC_GEPHY2_SYS_CLK] = &nss_cc_gephy2_sys_clk.clkr,
++      [NSS_CC_GEPHY3_SYS_CLK] = &nss_cc_gephy3_sys_clk.clkr,
++};
++
++static const struct qcom_reset_map nss_cc_qca8k_resets[] = {
++      [NSS_CC_SWITCH_CORE_ARES] = { 0xc, 2 },
++      [NSS_CC_APB_BRIDGE_ARES] = { 0x10, 2 },
++      [NSS_CC_MAC0_TX_ARES] = { 0x20, 2 },
++      [NSS_CC_MAC0_TX_SRDS1_ARES] = { 0x24, 2 },
++      [NSS_CC_MAC0_RX_ARES] = { 0x34, 2 },
++      [NSS_CC_MAC0_RX_SRDS1_ARES] = { 0x3c, 2 },
++      [NSS_CC_MAC1_SRDS1_CH0_RX_ARES] = { 0x50, 2 },
++      [NSS_CC_MAC1_TX_ARES] = { 0x54, 2 },
++      [NSS_CC_MAC1_GEPHY0_TX_ARES] = { 0x58, 2 },
++      [NSS_CC_MAC1_SRDS1_CH0_XGMII_RX_ARES] = { 0x5c, 2 },
++      [NSS_CC_MAC1_SRDS1_CH0_TX_ARES] = { 0x70, 2 },
++      [NSS_CC_MAC1_RX_ARES] = { 0x74, 2 },
++      [NSS_CC_MAC1_GEPHY0_RX_ARES] = { 0x78, 2 },
++      [NSS_CC_MAC1_SRDS1_CH0_XGMII_TX_ARES] = { 0x7c, 2 },
++      [NSS_CC_MAC2_SRDS1_CH1_RX_ARES] = { 0x90, 2 },
++      [NSS_CC_MAC2_TX_ARES] = { 0x94, 2 },
++      [NSS_CC_MAC2_GEPHY1_TX_ARES] = { 0x98, 2 },
++      [NSS_CC_MAC2_SRDS1_CH1_XGMII_RX_ARES] = { 0x9c, 2 },
++      [NSS_CC_MAC2_SRDS1_CH1_TX_ARES] = { 0xb0, 2 },
++      [NSS_CC_MAC2_RX_ARES] = { 0xb4, 2 },
++      [NSS_CC_MAC2_GEPHY1_RX_ARES] = { 0xb8, 2 },
++      [NSS_CC_MAC2_SRDS1_CH1_XGMII_TX_ARES] = { 0xbc, 2 },
++      [NSS_CC_MAC3_SRDS1_CH2_RX_ARES] = { 0xd0, 2 },
++      [NSS_CC_MAC3_TX_ARES] = { 0xd4, 2 },
++      [NSS_CC_MAC3_GEPHY2_TX_ARES] = { 0xd8, 2 },
++      [NSS_CC_MAC3_SRDS1_CH2_XGMII_RX_ARES] = { 0xdc, 2 },
++      [NSS_CC_MAC3_SRDS1_CH2_TX_ARES] = { 0xf0, 2 },
++      [NSS_CC_MAC3_RX_ARES] = { 0xf4, 2 },
++      [NSS_CC_MAC3_GEPHY2_RX_ARES] = { 0xf8, 2 },
++      [NSS_CC_MAC3_SRDS1_CH2_XGMII_TX_ARES] = { 0xfc, 2 },
++      [NSS_CC_MAC4_SRDS1_CH3_RX_ARES] = { 0x110, 2 },
++      [NSS_CC_MAC4_TX_ARES] = { 0x114, 2 },
++      [NSS_CC_MAC4_GEPHY3_TX_ARES] = { 0x118, 2 },
++      [NSS_CC_MAC4_SRDS1_CH3_XGMII_RX_ARES] = { 0x11c, 2 },
++      [NSS_CC_MAC4_SRDS1_CH3_TX_ARES] = { 0x130, 2 },
++      [NSS_CC_MAC4_RX_ARES] = { 0x134, 2 },
++      [NSS_CC_MAC4_GEPHY3_RX_ARES] = { 0x138, 2 },
++      [NSS_CC_MAC4_SRDS1_CH3_XGMII_TX_ARES] = { 0x13c, 2 },
++      [NSS_CC_MAC5_TX_ARES] = { 0x14c, 2 },
++      [NSS_CC_MAC5_TX_SRDS0_ARES] = { 0x150, 2 },
++      [NSS_CC_MAC5_RX_ARES] = { 0x160, 2 },
++      [NSS_CC_MAC5_RX_SRDS0_ARES] = { 0x164, 2 },
++      [NSS_CC_AHB_ARES] = { 0x170, 2 },
++      [NSS_CC_SEC_CTRL_AHB_ARES] = { 0x174, 2 },
++      [NSS_CC_TLMM_ARES] = { 0x178, 2 },
++      [NSS_CC_TLMM_AHB_ARES] = { 0x190, 2 },
++      [NSS_CC_CNOC_AHB_ARES] = { 0x194, 2 }, /* reset CNOC AHB & APB */
++      [NSS_CC_MDIO_AHB_ARES] = { 0x198, 2 },
++      [NSS_CC_MDIO_MASTER_AHB_ARES] = { 0x19c, 2 },
++      [NSS_CC_SRDS0_SYS_ARES] = { 0x1a8, 2 },
++      [NSS_CC_SRDS1_SYS_ARES] = { 0x1ac, 2 },
++      [NSS_CC_GEPHY0_SYS_ARES] = { 0x1b0, 2 },
++      [NSS_CC_GEPHY1_SYS_ARES] = { 0x1b4, 2 },
++      [NSS_CC_GEPHY2_SYS_ARES] = { 0x1b8, 2 },
++      [NSS_CC_GEPHY3_SYS_ARES] = { 0x1bc, 2 },
++      [NSS_CC_SEC_CTRL_ARES] = { 0x1c8, 2 },
++      [NSS_CC_SEC_CTRL_SENSE_ARES] = { 0x1d0, 2 },
++      [NSS_CC_SLEEP_ARES] = { 0x1e0, 2 },
++      [NSS_CC_DEBUG_ARES] = { 0x1e8, 2 },
++      [NSS_CC_GEPHY0_ARES] = { 0x304, 0 },
++      [NSS_CC_GEPHY1_ARES] = { 0x304, 1 },
++      [NSS_CC_GEPHY2_ARES] = { 0x304, 2 },
++      [NSS_CC_GEPHY3_ARES] = { 0x304, 3 },
++      [NSS_CC_DSP_ARES] = { 0x304, 4 },
++      [NSS_CC_GEPHY_FULL_ARES] = { .reg = 0x304, .bitmask = GENMASK(4, 0) },
++      [NSS_CC_GLOBAL_ARES] = { 0x308, 0 },
++      [NSS_CC_XPCS_ARES] = { 0x30c, 0 },
++};
++
++/* For each read/write operation of clock register, there are three MDIO frames
++ * sent to the device.
++ *
++ * 1. The high address part[23:8] of register is packaged into the first MDIO frame
++ *    for selecting page.
++ * 2. The low address part[7:0] of register is packaged into the second MDIO frame
++ *    with the low 16bit data to read/write.
++ * 3. The low address part[7:0] of register is packaged into the last MDIO frame
++ *    with the high 16bit data to read/write.
++ *
++ * The clause22 MDIO frame format used by device is as below.
++ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
++ * | ST| OP|   ADDR  |   REG   | TA|             DATA              |
++ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
++ */
++static inline void convert_reg_to_mii_addr(u32 regaddr, u16 *reg, u16 *phy_addr, u16 *page)
++{
++      *reg = FIELD_GET(QCA8K_CLK_REG_MASK, regaddr);
++      *phy_addr = FIELD_GET(QCA8K_CLK_PHY_ADDR_MASK, regaddr) | QCA8K_LOW_ADDR_PREFIX;
++      *page = FIELD_GET(QCA8K_CLK_PAGE_MASK, regaddr);
++}
++
++static int qca8k_mii_read(struct mii_bus *bus, u16 switch_phy_id, u32 reg, u32 *val)
++{
++      int ret, data;
++
++      ret = __mdiobus_read(bus, switch_phy_id, reg);
++      if (ret >= 0) {
++              data = ret;
++
++              ret = __mdiobus_read(bus, switch_phy_id, (reg | QCA8K_REG_DATA_UPPER_16_BITS));
++              if (ret >= 0)
++                      *val = data | ret << 16;
++      }
++
++      if (ret < 0)
++              dev_err_ratelimited(&bus->dev, "fail to read qca8k mii register\n");
++
++      return ret < 0 ? ret : 0;
++}
++
++static void qca8k_mii_write(struct mii_bus *bus, u16 switch_phy_id, u32 reg, u32 val)
++{
++      int ret;
++
++      ret = __mdiobus_write(bus, switch_phy_id, reg, lower_16_bits(val));
++      if (ret >= 0)
++              ret = __mdiobus_write(bus, switch_phy_id, (reg | QCA8K_REG_DATA_UPPER_16_BITS),
++                                    upper_16_bits(val));
++
++      if (ret < 0)
++              dev_err_ratelimited(&bus->dev, "fail to write qca8k mii register\n");
++}
++
++static int qca8k_mii_page_set(struct mii_bus *bus, u16 switch_phy_id, u32 reg, u16 page)
++{
++      int ret;
++
++      ret = __mdiobus_write(bus, switch_phy_id, reg, page);
++      if (ret < 0)
++              dev_err_ratelimited(&bus->dev, "fail to set page\n");
++
++      return ret;
++}
++
++static int qca8k_regmap_read(void *context, unsigned int regaddr, unsigned int *val)
++{
++      struct mii_bus *bus = context;
++      u16 reg, phy_addr, page;
++      int ret;
++
++      regaddr += QCA8K_CLK_REG_BASE;
++      convert_reg_to_mii_addr(regaddr, &reg, &phy_addr, &page);
++
++      mutex_lock(&bus->mdio_lock);
++      ret = qca8k_mii_page_set(bus, QCA8K_HIGH_ADDR_PREFIX, QCA8K_CFG_PAGE_REG, page);
++      if (ret < 0)
++              goto qca8k_read_exit;
++
++      ret = qca8k_mii_read(bus, phy_addr, reg, val);
++
++qca8k_read_exit:
++      mutex_unlock(&bus->mdio_lock);
++      return ret;
++};
++
++static int qca8k_regmap_write(void *context, unsigned int regaddr, unsigned int val)
++{
++      struct mii_bus *bus = context;
++      u16 reg, phy_addr, page;
++      int ret;
++
++      regaddr += QCA8K_CLK_REG_BASE;
++      convert_reg_to_mii_addr(regaddr, &reg, &phy_addr, &page);
++
++      mutex_lock(&bus->mdio_lock);
++      ret = qca8k_mii_page_set(bus, QCA8K_HIGH_ADDR_PREFIX, QCA8K_CFG_PAGE_REG, page);
++      if (ret < 0)
++              goto qca8k_write_exit;
++
++      qca8k_mii_write(bus, phy_addr, reg, val);
++
++qca8k_write_exit:
++      mutex_unlock(&bus->mdio_lock);
++      return ret;
++};
++
++static int qca8k_regmap_update_bits(void *context, unsigned int regaddr,
++                                  unsigned int mask, unsigned int value)
++{
++      struct mii_bus *bus = context;
++      u16 reg, phy_addr, page;
++      int ret;
++      u32 val;
++
++      regaddr += QCA8K_CLK_REG_BASE;
++      convert_reg_to_mii_addr(regaddr, &reg, &phy_addr, &page);
++
++      mutex_lock(&bus->mdio_lock);
++      ret = qca8k_mii_page_set(bus, QCA8K_HIGH_ADDR_PREFIX, QCA8K_CFG_PAGE_REG, page);
++      if (ret < 0)
++              goto qca8k_update_exit;
++
++      ret = qca8k_mii_read(bus, phy_addr, reg, &val);
++      if (ret < 0)
++              goto qca8k_update_exit;
++
++      val &= ~mask;
++      val |= value;
++      qca8k_mii_write(bus, phy_addr, reg, val);
++
++qca8k_update_exit:
++      mutex_unlock(&bus->mdio_lock);
++      return ret;
++}
++
++static const struct regmap_config nss_cc_qca8k_regmap_config = {
++      .reg_bits = 12,
++      .reg_stride = 4,
++      .val_bits = 32,
++      .max_register = 0x30c,
++      .reg_read = qca8k_regmap_read,
++      .reg_write = qca8k_regmap_write,
++      .reg_update_bits = qca8k_regmap_update_bits,
++      .disable_locking = true,
++};
++
++static const struct qcom_cc_desc nss_cc_qca8k_desc = {
++      .config = &nss_cc_qca8k_regmap_config,
++      .clks = nss_cc_qca8k_clocks,
++      .num_clks = ARRAY_SIZE(nss_cc_qca8k_clocks),
++      .resets = nss_cc_qca8k_resets,
++      .num_resets = ARRAY_SIZE(nss_cc_qca8k_resets),
++};
++
++/*
++ * The reference clock of QCA8k NSSCC needs to be enabled to make sure
++ * the GPIO reset taking effect.
++ */
++static int nss_cc_qca8k_clock_enable_and_reset(struct device *dev)
++{
++      struct gpio_desc *gpiod;
++      struct clk *clk;
++
++      clk = devm_clk_get_enabled(dev, NULL);
++      if (IS_ERR(clk))
++              return PTR_ERR(clk);
++
++      gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
++      if (IS_ERR(gpiod)) {
++              return PTR_ERR(gpiod);
++      } else if (gpiod) {
++              msleep(100);
++              gpiod_set_value_cansleep(gpiod, 0);
++      }
++
++      return 0;
++}
++
++static int nss_cc_qca8k_probe(struct mdio_device *mdiodev)
++{
++      struct regmap *regmap;
++      int ret;
++
++      ret = nss_cc_qca8k_clock_enable_and_reset(&mdiodev->dev);
++      if (ret)
++              return dev_err_probe(&mdiodev->dev, ret, "Fail to reset NSSCC\n");
++
++      regmap = devm_regmap_init(&mdiodev->dev, NULL, mdiodev->bus, nss_cc_qca8k_desc.config);
++      if (IS_ERR(regmap))
++              return dev_err_probe(&mdiodev->dev, PTR_ERR(regmap), "Failed to init regmap\n");
++
++      return qcom_cc_really_probe(&mdiodev->dev, &nss_cc_qca8k_desc, regmap);
++}
++
++static const struct of_device_id nss_cc_qca8k_match_table[] = {
++      { .compatible = "qcom,qca8084-nsscc" },
++      { }
++};
++MODULE_DEVICE_TABLE(of, nss_cc_qca8k_match_table);
++
++static struct mdio_driver nss_cc_qca8k_driver = {
++      .mdiodrv.driver = {
++              .name = "qcom,qca8k-nsscc",
++              .of_match_table = nss_cc_qca8k_match_table,
++      },
++      .probe = nss_cc_qca8k_probe,
++};
++
++mdio_module_driver(nss_cc_qca8k_driver);
++
++MODULE_DESCRIPTION("QCOM NSS_CC QCA8K Driver");
++MODULE_LICENSE("GPL");
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/003-v6.11-arm64-dts-qcom-ipq9574-add-MDIO-bus.patch b/target/linux/qualcommbe/patches-6.6/003-v6.11-arm64-dts-qcom-ipq9574-add-MDIO-bus.patch
new file mode 100644 (file)
index 0000000..90a7ad3
--- /dev/null
@@ -0,0 +1,39 @@
+From e60ac570137b42ef61a01a6b26133a8e2d7e8d4b Mon Sep 17 00:00:00 2001
+From: Alexandru Gagniuc <mr.nuke.me@gmail.com>
+Date: Mon, 6 May 2024 21:47:58 -0500
+Subject: [PATCH] arm64: dts: qcom: ipq9574: add MDIO bus
+
+The IPQ95xx uses an IPQ4019 compatible MDIO controller that is already
+supported. Add a DT node to expose it.
+
+Signed-off-by: Alexandru Gagniuc <mr.nuke.me@gmail.com>
+Link: https://lore.kernel.org/r/20240507024758.2810514-2-mr.nuke.me@gmail.com
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+---
+ arch/arm64/boot/dts/qcom/ipq9574.dtsi | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/arch/arm64/boot/dts/qcom/ipq9574.dtsi b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+index 7f2e5cbf3bbb..ded02bc39275 100644
+--- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+@@ -232,6 +232,16 @@ rng: rng@e3000 {
+                       clock-names = "core";
+               };
++              mdio: mdio@90000 {
++                      compatible =  "qcom,ipq9574-mdio", "qcom,ipq4019-mdio";
++                      reg = <0x00090000 0x64>;
++                      #address-cells = <1>;
++                      #size-cells = <0>;
++                      clocks = <&gcc GCC_MDIO_AHB_CLK>;
++                      clock-names = "gcc_mdio_ahb_clk";
++                      status = "disabled";
++              };
++
+               qfprom: efuse@a4000 {
+                       compatible = "qcom,ipq9574-qfprom", "qcom,qfprom";
+                       reg = <0x000a4000 0x5a1>;
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/004-01-v.610-clk-qcom-clk-rcg-introduce-support-for-multiple-conf.patch b/target/linux/qualcommbe/patches-6.6/004-01-v.610-clk-qcom-clk-rcg-introduce-support-for-multiple-conf.patch
new file mode 100644 (file)
index 0000000..c75cd16
--- /dev/null
@@ -0,0 +1,88 @@
+From d06b1043644a1831ab141bbee2669002bba15b0f Mon Sep 17 00:00:00 2001
+From: Christian Marangi <ansuelsmth@gmail.com>
+Date: Wed, 20 Dec 2023 23:17:22 +0100
+Subject: [PATCH 1/2] clk: qcom: clk-rcg: introduce support for multiple conf
+ for same freq
+
+Some RCG frequency can be reached by multiple configuration.
+
+We currently declare multiple configuration for the same frequency but
+that is not supported and always the first configuration will be taken.
+
+These multiple configuration are needed as based on the current parent
+configuration, it may be needed to use a different configuration to
+reach the same frequency.
+
+To handle this introduce 3 new macro, C, FM and FMS:
+
+- C is used to declare a freq_conf where src, pre_div, m and n are
+  provided.
+
+- FM is used to declare a freq_multi_tbl with the frequency and an
+  array of confs to insert all the config for the provided frequency.
+
+- FMS is used to declare a freq_multi_tbl with the frequency and an
+  array of a single conf with the provided src, pre_div, m and n.
+
+Struct clk_rcg2 is changed to add a union type to reference a simple
+freq_tbl or a complex freq_multi_tbl.
+
+Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
+Acked-by: Stephen Boyd <sboyd@kernel.org>
+Link: https://lore.kernel.org/r/20231220221724.3822-2-ansuelsmth@gmail.com
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+---
+ drivers/clk/qcom/clk-rcg.h | 23 ++++++++++++++++++++++-
+ 1 file changed, 22 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
+index e6d84c8c7989..c50e6616d02c 100644
+--- a/drivers/clk/qcom/clk-rcg.h
++++ b/drivers/clk/qcom/clk-rcg.h
+@@ -17,6 +17,23 @@ struct freq_tbl {
+       u16 n;
+ };
++#define C(s, h, m, n) { (s), (2 * (h) - 1), (m), (n) }
++#define FM(f, confs) { (f), ARRAY_SIZE(confs), (confs) }
++#define FMS(f, s, h, m, n) { (f), 1, (const struct freq_conf []){ C(s, h, m, n) } }
++
++struct freq_conf {
++      u8 src;
++      u8 pre_div;
++      u16 m;
++      u16 n;
++};
++
++struct freq_multi_tbl {
++      unsigned long freq;
++      size_t num_confs;
++      const struct freq_conf *confs;
++};
++
+ /**
+  * struct mn - M/N:D counter
+  * @mnctr_en_bit: bit to enable mn counter
+@@ -138,6 +155,7 @@ extern const struct clk_ops clk_dyn_rcg_ops;
+  * @safe_src_index: safe src index value
+  * @parent_map: map from software's parent index to hardware's src_sel field
+  * @freq_tbl: frequency table
++ * @freq_multi_tbl: frequency table for clocks reachable with multiple RCGs conf
+  * @clkr: regmap clock handle
+  * @cfg_off: defines the cfg register offset from the CMD_RCGR + CFG_REG
+  * @parked_cfg: cached value of the CFG register for parked RCGs
+@@ -149,7 +167,10 @@ struct clk_rcg2 {
+       u8                      hid_width;
+       u8                      safe_src_index;
+       const struct parent_map *parent_map;
+-      const struct freq_tbl   *freq_tbl;
++      union {
++              const struct freq_tbl           *freq_tbl;
++              const struct freq_multi_tbl     *freq_multi_tbl;
++      };
+       struct clk_regmap       clkr;
+       u8                      cfg_off;
+       u32                     parked_cfg;
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/004-02-v6.10-clk-qcom-clk-rcg2-add-support-for-rcg2-freq-multi-op.patch b/target/linux/qualcommbe/patches-6.6/004-02-v6.10-clk-qcom-clk-rcg2-add-support-for-rcg2-freq-multi-op.patch
new file mode 100644 (file)
index 0000000..94451fd
--- /dev/null
@@ -0,0 +1,307 @@
+From 89da22456af0762477d8c1345fdd17961b3ada80 Mon Sep 17 00:00:00 2001
+From: Christian Marangi <ansuelsmth@gmail.com>
+Date: Wed, 20 Dec 2023 23:17:23 +0100
+Subject: [PATCH 2/2] clk: qcom: clk-rcg2: add support for rcg2 freq multi ops
+
+Some RCG frequency can be reached by multiple configuration.
+
+Add clk_rcg2_fm_ops ops to support these special RCG configurations.
+
+These alternative ops will select the frequency using a CEIL policy.
+
+When the correct frequency is found, the correct config is selected by
+calculating the final rate (by checking the defined parent and values
+in the config that is being checked) and deciding based on the one that
+is less different than the requested one.
+
+These check are skipped if there is just one config for the requested
+freq.
+
+qcom_find_freq_multi is added to search the freq with the new struct
+freq_multi_tbl.
+__clk_rcg2_select_conf is used to select the correct conf by simulating
+the final clock.
+If a conf can't be found due to parent not reachable, a WARN is printed
+and -EINVAL is returned.
+
+Tested-by: Wei Lei <quic_leiwei@quicinc.com>
+Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
+Acked-by: Stephen Boyd <sboyd@kernel.org>
+Link: https://lore.kernel.org/r/20231220221724.3822-3-ansuelsmth@gmail.com
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+---
+ drivers/clk/qcom/clk-rcg.h  |   1 +
+ drivers/clk/qcom/clk-rcg2.c | 166 ++++++++++++++++++++++++++++++++++++
+ drivers/clk/qcom/common.c   |  18 ++++
+ drivers/clk/qcom/common.h   |   2 +
+ 4 files changed, 187 insertions(+)
+
+diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
+index c50e6616d02c..d7414361e432 100644
+--- a/drivers/clk/qcom/clk-rcg.h
++++ b/drivers/clk/qcom/clk-rcg.h
+@@ -190,6 +190,7 @@ struct clk_rcg2_gfx3d {
+ extern const struct clk_ops clk_rcg2_ops;
+ extern const struct clk_ops clk_rcg2_floor_ops;
++extern const struct clk_ops clk_rcg2_fm_ops;
+ extern const struct clk_ops clk_rcg2_mux_closest_ops;
+ extern const struct clk_ops clk_edp_pixel_ops;
+ extern const struct clk_ops clk_byte_ops;
+diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
+index 5183c74b074f..9b3aaa7f20ac 100644
+--- a/drivers/clk/qcom/clk-rcg2.c
++++ b/drivers/clk/qcom/clk-rcg2.c
+@@ -260,6 +260,115 @@ static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
+       return 0;
+ }
++static const struct freq_conf *
++__clk_rcg2_select_conf(struct clk_hw *hw, const struct freq_multi_tbl *f,
++                     unsigned long req_rate)
++{
++      unsigned long rate_diff, best_rate_diff = ULONG_MAX;
++      const struct freq_conf *conf, *best_conf = NULL;
++      struct clk_rcg2 *rcg = to_clk_rcg2(hw);
++      const char *name = clk_hw_get_name(hw);
++      unsigned long parent_rate, rate;
++      struct clk_hw *p;
++      int index, i;
++
++      /* Exit early if only one config is defined */
++      if (f->num_confs == 1) {
++              best_conf = f->confs;
++              goto exit;
++      }
++
++      /* Search in each provided config the one that is near the wanted rate */
++      for (i = 0, conf = f->confs; i < f->num_confs; i++, conf++) {
++              index = qcom_find_src_index(hw, rcg->parent_map, conf->src);
++              if (index < 0)
++                      continue;
++
++              p = clk_hw_get_parent_by_index(hw, index);
++              if (!p)
++                      continue;
++
++              parent_rate =  clk_hw_get_rate(p);
++              rate = calc_rate(parent_rate, conf->n, conf->m, conf->n, conf->pre_div);
++
++              if (rate == req_rate) {
++                      best_conf = conf;
++                      goto exit;
++              }
++
++              rate_diff = abs_diff(req_rate, rate);
++              if (rate_diff < best_rate_diff) {
++                      best_rate_diff = rate_diff;
++                      best_conf = conf;
++              }
++      }
++
++      /*
++       * Very unlikely. Warn if we couldn't find a correct config
++       * due to parent not found in every config.
++       */
++      if (unlikely(!best_conf)) {
++              WARN(1, "%s: can't find a configuration for rate %lu\n",
++                   name, req_rate);
++              return ERR_PTR(-EINVAL);
++      }
++
++exit:
++      return best_conf;
++}
++
++static int _freq_tbl_fm_determine_rate(struct clk_hw *hw, const struct freq_multi_tbl *f,
++                                     struct clk_rate_request *req)
++{
++      unsigned long clk_flags, rate = req->rate;
++      struct clk_rcg2 *rcg = to_clk_rcg2(hw);
++      const struct freq_conf *conf;
++      struct clk_hw *p;
++      int index;
++
++      f = qcom_find_freq_multi(f, rate);
++      if (!f || !f->confs)
++              return -EINVAL;
++
++      conf = __clk_rcg2_select_conf(hw, f, rate);
++      if (IS_ERR(conf))
++              return PTR_ERR(conf);
++      index = qcom_find_src_index(hw, rcg->parent_map, conf->src);
++      if (index < 0)
++              return index;
++
++      clk_flags = clk_hw_get_flags(hw);
++      p = clk_hw_get_parent_by_index(hw, index);
++      if (!p)
++              return -EINVAL;
++
++      if (clk_flags & CLK_SET_RATE_PARENT) {
++              rate = f->freq;
++              if (conf->pre_div) {
++                      if (!rate)
++                              rate = req->rate;
++                      rate /= 2;
++                      rate *= conf->pre_div + 1;
++              }
++
++              if (conf->n) {
++                      u64 tmp = rate;
++
++                      tmp = tmp * conf->n;
++                      do_div(tmp, conf->m);
++                      rate = tmp;
++              }
++      } else {
++              rate =  clk_hw_get_rate(p);
++      }
++
++      req->best_parent_hw = p;
++      req->best_parent_rate = rate;
++      req->rate = f->freq;
++
++      return 0;
++}
++
+ static int clk_rcg2_determine_rate(struct clk_hw *hw,
+                                  struct clk_rate_request *req)
+ {
+@@ -276,6 +385,14 @@ static int clk_rcg2_determine_floor_rate(struct clk_hw *hw,
+       return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR);
+ }
++static int clk_rcg2_fm_determine_rate(struct clk_hw *hw,
++                                    struct clk_rate_request *req)
++{
++      struct clk_rcg2 *rcg = to_clk_rcg2(hw);
++
++      return _freq_tbl_fm_determine_rate(hw, rcg->freq_multi_tbl, req);
++}
++
+ static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f,
+                               u32 *_cfg)
+ {
+@@ -371,6 +488,30 @@ static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
+       return clk_rcg2_configure(rcg, f);
+ }
++static int __clk_rcg2_fm_set_rate(struct clk_hw *hw, unsigned long rate)
++{
++      struct clk_rcg2 *rcg = to_clk_rcg2(hw);
++      const struct freq_multi_tbl *f;
++      const struct freq_conf *conf;
++      struct freq_tbl f_tbl = {};
++
++      f = qcom_find_freq_multi(rcg->freq_multi_tbl, rate);
++      if (!f || !f->confs)
++              return -EINVAL;
++
++      conf = __clk_rcg2_select_conf(hw, f, rate);
++      if (IS_ERR(conf))
++              return PTR_ERR(conf);
++
++      f_tbl.freq = f->freq;
++      f_tbl.src = conf->src;
++      f_tbl.pre_div = conf->pre_div;
++      f_tbl.m = conf->m;
++      f_tbl.n = conf->n;
++
++      return clk_rcg2_configure(rcg, &f_tbl);
++}
++
+ static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
+                           unsigned long parent_rate)
+ {
+@@ -383,6 +524,12 @@ static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate,
+       return __clk_rcg2_set_rate(hw, rate, FLOOR);
+ }
++static int clk_rcg2_fm_set_rate(struct clk_hw *hw, unsigned long rate,
++                              unsigned long parent_rate)
++{
++      return __clk_rcg2_fm_set_rate(hw, rate);
++}
++
+ static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw,
+               unsigned long rate, unsigned long parent_rate, u8 index)
+ {
+@@ -395,6 +542,12 @@ static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw,
+       return __clk_rcg2_set_rate(hw, rate, FLOOR);
+ }
++static int clk_rcg2_fm_set_rate_and_parent(struct clk_hw *hw,
++              unsigned long rate, unsigned long parent_rate, u8 index)
++{
++      return __clk_rcg2_fm_set_rate(hw, rate);
++}
++
+ static int clk_rcg2_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
+ {
+       struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+@@ -505,6 +658,19 @@ const struct clk_ops clk_rcg2_floor_ops = {
+ };
+ EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);
++const struct clk_ops clk_rcg2_fm_ops = {
++      .is_enabled = clk_rcg2_is_enabled,
++      .get_parent = clk_rcg2_get_parent,
++      .set_parent = clk_rcg2_set_parent,
++      .recalc_rate = clk_rcg2_recalc_rate,
++      .determine_rate = clk_rcg2_fm_determine_rate,
++      .set_rate = clk_rcg2_fm_set_rate,
++      .set_rate_and_parent = clk_rcg2_fm_set_rate_and_parent,
++      .get_duty_cycle = clk_rcg2_get_duty_cycle,
++      .set_duty_cycle = clk_rcg2_set_duty_cycle,
++};
++EXPORT_SYMBOL_GPL(clk_rcg2_fm_ops);
++
+ const struct clk_ops clk_rcg2_mux_closest_ops = {
+       .determine_rate = __clk_mux_determine_rate_closest,
+       .get_parent = clk_rcg2_get_parent,
+diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
+index 75f09e6e057e..48f81e3a5e80 100644
+--- a/drivers/clk/qcom/common.c
++++ b/drivers/clk/qcom/common.c
+@@ -41,6 +41,24 @@ struct freq_tbl *qcom_find_freq(const struct freq_tbl *f, unsigned long rate)
+ }
+ EXPORT_SYMBOL_GPL(qcom_find_freq);
++const struct freq_multi_tbl *qcom_find_freq_multi(const struct freq_multi_tbl *f,
++                                                unsigned long rate)
++{
++      if (!f)
++              return NULL;
++
++      if (!f->freq)
++              return f;
++
++      for (; f->freq; f++)
++              if (rate <= f->freq)
++                      return f;
++
++      /* Default to our fastest rate */
++      return f - 1;
++}
++EXPORT_SYMBOL_GPL(qcom_find_freq_multi);
++
+ const struct freq_tbl *qcom_find_freq_floor(const struct freq_tbl *f,
+                                           unsigned long rate)
+ {
+diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
+index 9c8f7b798d9f..2d4a8a837e6c 100644
+--- a/drivers/clk/qcom/common.h
++++ b/drivers/clk/qcom/common.h
+@@ -45,6 +45,8 @@ extern const struct freq_tbl *qcom_find_freq(const struct freq_tbl *f,
+                                            unsigned long rate);
+ extern const struct freq_tbl *qcom_find_freq_floor(const struct freq_tbl *f,
+                                                  unsigned long rate);
++extern const struct freq_multi_tbl *qcom_find_freq_multi(const struct freq_multi_tbl *f,
++                                                       unsigned long rate);
+ extern void
+ qcom_pll_set_fsm_mode(struct regmap *m, u32 reg, u8 bias_count, u8 lock_count);
+ extern int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map,
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/005-v6.11-clk-qcom-branch-Add-clk_branch2_prepare_ops.patch b/target/linux/qualcommbe/patches-6.6/005-v6.11-clk-qcom-branch-Add-clk_branch2_prepare_ops.patch
new file mode 100644 (file)
index 0000000..7790af5
--- /dev/null
@@ -0,0 +1,52 @@
+From 7311bbfff31c4961c57d94c165fa843f155f8236 Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Wed, 5 Jun 2024 20:45:38 +0800
+Subject: [PATCH] clk: qcom: branch: Add clk_branch2_prepare_ops
+
+Add the clk_branch2_prepare_ops for supporting clock controller
+where the hardware register is accessed by MDIO bus, and the
+spin lock can't be used because of sleep during the MDIO
+operation.
+
+The clock is enabled by the .prepare instead of .enable when
+the clk_branch2_prepare_ops is used.
+
+Acked-by: Stephen Boyd <sboyd@kernel.org>
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+Link: https://lore.kernel.org/r/20240605124541.2711467-2-quic_luoj@quicinc.com
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+---
+ drivers/clk/qcom/clk-branch.c | 7 +++++++
+ drivers/clk/qcom/clk-branch.h | 1 +
+ 2 files changed, 8 insertions(+)
+
+diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
+index c1dba33ac31a..229480c5b075 100644
+--- a/drivers/clk/qcom/clk-branch.c
++++ b/drivers/clk/qcom/clk-branch.c
+@@ -191,3 +191,10 @@ const struct clk_ops clk_branch_simple_ops = {
+       .is_enabled = clk_is_enabled_regmap,
+ };
+ EXPORT_SYMBOL_GPL(clk_branch_simple_ops);
++
++const struct clk_ops clk_branch2_prepare_ops = {
++      .prepare = clk_branch2_enable,
++      .unprepare = clk_branch2_disable,
++      .is_prepared = clk_is_enabled_regmap,
++};
++EXPORT_SYMBOL_GPL(clk_branch2_prepare_ops);
+diff --git a/drivers/clk/qcom/clk-branch.h b/drivers/clk/qcom/clk-branch.h
+index f1b3b635ff32..292756435f53 100644
+--- a/drivers/clk/qcom/clk-branch.h
++++ b/drivers/clk/qcom/clk-branch.h
+@@ -109,6 +109,7 @@ extern const struct clk_ops clk_branch2_ops;
+ extern const struct clk_ops clk_branch_simple_ops;
+ extern const struct clk_ops clk_branch2_aon_ops;
++extern const struct clk_ops clk_branch2_prepare_ops;
+ #define to_clk_branch(_hw) \
+       container_of(to_clk_regmap(_hw), struct clk_branch, clkr)
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/006-v6.11-clk-qcom-common-commonize-qcom_cc_really_probe.patch b/target/linux/qualcommbe/patches-6.6/006-v6.11-clk-qcom-common-commonize-qcom_cc_really_probe.patch
new file mode 100644 (file)
index 0000000..d5db371
--- /dev/null
@@ -0,0 +1,1337 @@
+From 9f93a0a428606341da25bf2a00244701b58e08b9 Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Wed, 5 Jun 2024 20:45:40 +0800
+Subject: [PATCH] clk: qcom: common: commonize qcom_cc_really_probe
+
+The previous wrapper qcom_cc_really_probe takes the platform
+device as parameter, which is limited to platform driver.
+
+As for qca8k clock controller driver, which is registered as
+the MDIO device, which also follows the qcom clock framework.
+
+To commonize qcom_cc_really_probe, updating it to take the
+struct device as parameter, so that the qcom_cc_really_probe
+can be utilized by the previous platform device and the new
+added MDIO device.
+
+Also update the current clock controller drivers to take
+&pdev->dev as parameter when calling qcom_cc_really_probe.
+
+Reviewed-by: Stephen Boyd <sboyd@kernel.org>
+Tested-by: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
+Reviewed-by: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+Link: https://lore.kernel.org/r/20240605124541.2711467-4-quic_luoj@quicinc.com
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+---
+ drivers/clk/qcom/apss-ipq6018.c        | 2 +-
+ drivers/clk/qcom/camcc-sc7180.c        | 2 +-
+ drivers/clk/qcom/camcc-sc7280.c        | 2 +-
+ drivers/clk/qcom/camcc-sc8280xp.c      | 2 +-
+ drivers/clk/qcom/camcc-sdm845.c        | 2 +-
+ drivers/clk/qcom/camcc-sm6350.c        | 2 +-
+ drivers/clk/qcom/camcc-sm7150.c        | 2 +-
+ drivers/clk/qcom/camcc-sm8250.c        | 2 +-
+ drivers/clk/qcom/camcc-sm8450.c        | 2 +-
+ drivers/clk/qcom/camcc-sm8550.c        | 2 +-
+ drivers/clk/qcom/camcc-x1e80100.c      | 2 +-
+ drivers/clk/qcom/common.c              | 7 +++----
+ drivers/clk/qcom/common.h              | 2 +-
+ drivers/clk/qcom/dispcc-qcm2290.c      | 2 +-
+ drivers/clk/qcom/dispcc-sc7180.c       | 2 +-
+ drivers/clk/qcom/dispcc-sc7280.c       | 2 +-
+ drivers/clk/qcom/dispcc-sc8280xp.c     | 2 +-
+ drivers/clk/qcom/dispcc-sdm845.c       | 2 +-
+ drivers/clk/qcom/dispcc-sm6115.c       | 2 +-
+ drivers/clk/qcom/dispcc-sm6125.c       | 2 +-
+ drivers/clk/qcom/dispcc-sm6350.c       | 2 +-
+ drivers/clk/qcom/dispcc-sm6375.c       | 2 +-
+ drivers/clk/qcom/dispcc-sm7150.c       | 2 +-
+ drivers/clk/qcom/dispcc-sm8250.c       | 2 +-
+ drivers/clk/qcom/dispcc-sm8450.c       | 2 +-
+ drivers/clk/qcom/dispcc-sm8550.c       | 2 +-
+ drivers/clk/qcom/dispcc-sm8650.c       | 2 +-
+ drivers/clk/qcom/dispcc-x1e80100.c     | 2 +-
+ drivers/clk/qcom/ecpricc-qdu1000.c     | 2 +-
+ drivers/clk/qcom/gcc-ipq5018.c         | 2 +-
+ drivers/clk/qcom/gcc-ipq6018.c         | 2 +-
+ drivers/clk/qcom/gcc-ipq8074.c         | 2 +-
+ drivers/clk/qcom/gcc-mdm9607.c         | 2 +-
+ drivers/clk/qcom/gcc-mdm9615.c         | 2 +-
+ drivers/clk/qcom/gcc-msm8917.c         | 2 +-
+ drivers/clk/qcom/gcc-msm8939.c         | 2 +-
+ drivers/clk/qcom/gcc-msm8953.c         | 2 +-
+ drivers/clk/qcom/gcc-msm8976.c         | 2 +-
+ drivers/clk/qcom/gcc-msm8996.c         | 2 +-
+ drivers/clk/qcom/gcc-msm8998.c         | 2 +-
+ drivers/clk/qcom/gcc-qcm2290.c         | 2 +-
+ drivers/clk/qcom/gcc-qcs404.c          | 2 +-
+ drivers/clk/qcom/gcc-qdu1000.c         | 2 +-
+ drivers/clk/qcom/gcc-sa8775p.c         | 2 +-
+ drivers/clk/qcom/gcc-sc7180.c          | 2 +-
+ drivers/clk/qcom/gcc-sc7280.c          | 2 +-
+ drivers/clk/qcom/gcc-sc8180x.c         | 2 +-
+ drivers/clk/qcom/gcc-sc8280xp.c        | 2 +-
+ drivers/clk/qcom/gcc-sdm660.c          | 2 +-
+ drivers/clk/qcom/gcc-sdm845.c          | 2 +-
+ drivers/clk/qcom/gcc-sdx55.c           | 2 +-
+ drivers/clk/qcom/gcc-sdx65.c           | 2 +-
+ drivers/clk/qcom/gcc-sdx75.c           | 2 +-
+ drivers/clk/qcom/gcc-sm4450.c          | 2 +-
+ drivers/clk/qcom/gcc-sm6115.c          | 2 +-
+ drivers/clk/qcom/gcc-sm6125.c          | 2 +-
+ drivers/clk/qcom/gcc-sm6350.c          | 2 +-
+ drivers/clk/qcom/gcc-sm6375.c          | 2 +-
+ drivers/clk/qcom/gcc-sm7150.c          | 2 +-
+ drivers/clk/qcom/gcc-sm8150.c          | 2 +-
+ drivers/clk/qcom/gcc-sm8250.c          | 2 +-
+ drivers/clk/qcom/gcc-sm8350.c          | 2 +-
+ drivers/clk/qcom/gcc-sm8450.c          | 2 +-
+ drivers/clk/qcom/gcc-sm8550.c          | 2 +-
+ drivers/clk/qcom/gcc-sm8650.c          | 2 +-
+ drivers/clk/qcom/gcc-x1e80100.c        | 2 +-
+ drivers/clk/qcom/gpucc-msm8998.c       | 2 +-
+ drivers/clk/qcom/gpucc-sa8775p.c       | 2 +-
+ drivers/clk/qcom/gpucc-sc7180.c        | 2 +-
+ drivers/clk/qcom/gpucc-sc7280.c        | 2 +-
+ drivers/clk/qcom/gpucc-sc8280xp.c      | 2 +-
+ drivers/clk/qcom/gpucc-sdm660.c        | 2 +-
+ drivers/clk/qcom/gpucc-sdm845.c        | 2 +-
+ drivers/clk/qcom/gpucc-sm6115.c        | 2 +-
+ drivers/clk/qcom/gpucc-sm6125.c        | 2 +-
+ drivers/clk/qcom/gpucc-sm6350.c        | 2 +-
+ drivers/clk/qcom/gpucc-sm6375.c        | 2 +-
+ drivers/clk/qcom/gpucc-sm8150.c        | 2 +-
+ drivers/clk/qcom/gpucc-sm8250.c        | 2 +-
+ drivers/clk/qcom/gpucc-sm8350.c        | 2 +-
+ drivers/clk/qcom/gpucc-sm8450.c        | 2 +-
+ drivers/clk/qcom/gpucc-sm8550.c        | 2 +-
+ drivers/clk/qcom/gpucc-sm8650.c        | 2 +-
+ drivers/clk/qcom/gpucc-x1e80100.c      | 2 +-
+ drivers/clk/qcom/lcc-ipq806x.c         | 2 +-
+ drivers/clk/qcom/lcc-msm8960.c         | 2 +-
+ drivers/clk/qcom/lpassaudiocc-sc7280.c | 4 ++--
+ drivers/clk/qcom/lpasscorecc-sc7180.c  | 2 +-
+ drivers/clk/qcom/lpasscorecc-sc7280.c  | 2 +-
+ drivers/clk/qcom/mmcc-msm8960.c        | 2 +-
+ drivers/clk/qcom/mmcc-msm8974.c        | 2 +-
+ drivers/clk/qcom/mmcc-msm8994.c        | 2 +-
+ drivers/clk/qcom/mmcc-msm8996.c        | 2 +-
+ drivers/clk/qcom/mmcc-msm8998.c        | 2 +-
+ drivers/clk/qcom/mmcc-sdm660.c         | 2 +-
+ drivers/clk/qcom/tcsrcc-sm8550.c       | 2 +-
+ drivers/clk/qcom/videocc-sc7180.c      | 2 +-
+ drivers/clk/qcom/videocc-sc7280.c      | 2 +-
+ drivers/clk/qcom/videocc-sdm845.c      | 2 +-
+ drivers/clk/qcom/videocc-sm7150.c      | 2 +-
+ drivers/clk/qcom/videocc-sm8150.c      | 2 +-
+ drivers/clk/qcom/videocc-sm8250.c      | 2 +-
+ drivers/clk/qcom/videocc-sm8350.c      | 2 +-
+ drivers/clk/qcom/videocc-sm8450.c      | 2 +-
+ drivers/clk/qcom/videocc-sm8550.c      | 2 +-
+ 105 files changed, 108 insertions(+), 109 deletions(-)
+
+diff --git a/drivers/clk/qcom/apss-ipq6018.c b/drivers/clk/qcom/apss-ipq6018.c
+index e6295b832686..c89d126ebac3 100644
+--- a/drivers/clk/qcom/apss-ipq6018.c
++++ b/drivers/clk/qcom/apss-ipq6018.c
+@@ -123,7 +123,7 @@ static int apss_ipq6018_probe(struct platform_device *pdev)
+       if (!regmap)
+               return -ENODEV;
+-      return qcom_cc_really_probe(pdev, &apss_ipq6018_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &apss_ipq6018_desc, regmap);
+ }
+ static struct platform_driver apss_ipq6018_driver = {
+diff --git a/drivers/clk/qcom/camcc-sc7280.c b/drivers/clk/qcom/camcc-sc7280.c
+index da29d3b953ee..accd257632df 100644
+--- a/drivers/clk/qcom/camcc-sc7280.c
++++ b/drivers/clk/qcom/camcc-sc7280.c
+@@ -2481,7 +2481,7 @@ static int cam_cc_sc7280_probe(struct platform_device *pdev)
+       clk_lucid_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config);
+       clk_lucid_pll_configure(&cam_cc_pll6, regmap, &cam_cc_pll6_config);
+-      return qcom_cc_really_probe(pdev, &cam_cc_sc7280_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &cam_cc_sc7280_desc, regmap);
+ }
+ static struct platform_driver cam_cc_sc7280_driver = {
+diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
+index 8466d03e0d05..40022a10f8c0 100644
+--- a/drivers/clk/qcom/camcc-sdm845.c
++++ b/drivers/clk/qcom/camcc-sdm845.c
+@@ -1735,7 +1735,7 @@ static int cam_cc_sdm845_probe(struct platform_device *pdev)
+       cam_cc_pll_config.l = 0x14;
+       clk_fabia_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll_config);
+-      return qcom_cc_really_probe(pdev, &cam_cc_sdm845_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &cam_cc_sdm845_desc, regmap);
+ }
+ static struct platform_driver cam_cc_sdm845_driver = {
+diff --git a/drivers/clk/qcom/camcc-sm6350.c b/drivers/clk/qcom/camcc-sm6350.c
+index c6fe684aa780..f6634cc8663e 100644
+--- a/drivers/clk/qcom/camcc-sm6350.c
++++ b/drivers/clk/qcom/camcc-sm6350.c
+@@ -1879,7 +1879,7 @@ static int camcc_sm6350_probe(struct platform_device *pdev)
+       clk_agera_pll_configure(&camcc_pll2, regmap, &camcc_pll2_config);
+       clk_fabia_pll_configure(&camcc_pll3, regmap, &camcc_pll3_config);
+-      return qcom_cc_really_probe(pdev, &camcc_sm6350_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &camcc_sm6350_desc, regmap);
+ }
+ static struct platform_driver camcc_sm6350_driver = {
+diff --git a/drivers/clk/qcom/camcc-sm8250.c b/drivers/clk/qcom/camcc-sm8250.c
+index 96103eeda586..34d2f17520dc 100644
+--- a/drivers/clk/qcom/camcc-sm8250.c
++++ b/drivers/clk/qcom/camcc-sm8250.c
+@@ -2433,7 +2433,7 @@ static int cam_cc_sm8250_probe(struct platform_device *pdev)
+       clk_lucid_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
+       clk_lucid_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
+-      return qcom_cc_really_probe(pdev, &cam_cc_sm8250_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &cam_cc_sm8250_desc, regmap);
+ }
+ static struct platform_driver cam_cc_sm8250_driver = {
+diff --git a/drivers/clk/qcom/camcc-sm8450.c b/drivers/clk/qcom/camcc-sm8450.c
+index 51338a2884d2..26b78eed15ef 100644
+--- a/drivers/clk/qcom/camcc-sm8450.c
++++ b/drivers/clk/qcom/camcc-sm8450.c
+@@ -2839,7 +2839,7 @@ static int cam_cc_sm8450_probe(struct platform_device *pdev)
+       clk_lucid_evo_pll_configure(&cam_cc_pll7, regmap, &cam_cc_pll7_config);
+       clk_lucid_evo_pll_configure(&cam_cc_pll8, regmap, &cam_cc_pll8_config);
+-      return qcom_cc_really_probe(pdev, &cam_cc_sm8450_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &cam_cc_sm8450_desc, regmap);
+ }
+ static struct platform_driver cam_cc_sm8450_driver = {
+diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
+index 1e79f05d5226..c92e10c60322 100644
+--- a/drivers/clk/qcom/common.c
++++ b/drivers/clk/qcom/common.c
+@@ -252,11 +252,10 @@ static struct clk_hw *qcom_cc_clk_hw_get(struct of_phandle_args *clkspec,
+       return cc->rclks[idx] ? &cc->rclks[idx]->hw : NULL;
+ }
+-int qcom_cc_really_probe(struct platform_device *pdev,
++int qcom_cc_really_probe(struct device *dev,
+                        const struct qcom_cc_desc *desc, struct regmap *regmap)
+ {
+       int i, ret;
+-      struct device *dev = &pdev->dev;
+       struct qcom_reset_controller *reset;
+       struct qcom_cc *cc;
+       struct gdsc_desc *scd;
+@@ -333,7 +332,7 @@ int qcom_cc_probe(struct platform_device *pdev, const struct qcom_cc_desc *desc)
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+-      return qcom_cc_really_probe(pdev, desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, desc, regmap);
+ }
+ EXPORT_SYMBOL_GPL(qcom_cc_probe);
+@@ -351,7 +350,7 @@ int qcom_cc_probe_by_index(struct platform_device *pdev, int index,
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+-      return qcom_cc_really_probe(pdev, desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, desc, regmap);
+ }
+ EXPORT_SYMBOL_GPL(qcom_cc_probe_by_index);
+diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
+index 2d4a8a837e6c..d048bdeeba10 100644
+--- a/drivers/clk/qcom/common.h
++++ b/drivers/clk/qcom/common.h
+@@ -60,7 +60,7 @@ extern int qcom_cc_register_sleep_clk(struct device *dev);
+ extern struct regmap *qcom_cc_map(struct platform_device *pdev,
+                                 const struct qcom_cc_desc *desc);
+-extern int qcom_cc_really_probe(struct platform_device *pdev,
++extern int qcom_cc_really_probe(struct device *dev,
+                               const struct qcom_cc_desc *desc,
+                               struct regmap *regmap);
+ extern int qcom_cc_probe(struct platform_device *pdev,
+diff --git a/drivers/clk/qcom/dispcc-qcm2290.c b/drivers/clk/qcom/dispcc-qcm2290.c
+index 654a10d53e5c..449ffea2295d 100644
+--- a/drivers/clk/qcom/dispcc-qcm2290.c
++++ b/drivers/clk/qcom/dispcc-qcm2290.c
+@@ -522,7 +522,7 @@ static int disp_cc_qcm2290_probe(struct platform_device *pdev)
+       /* Keep some clocks always-on */
+       qcom_branch_set_clk_en(regmap, 0x604c); /* DISP_CC_XO_CLK */
+-      ret = qcom_cc_really_probe(pdev, &disp_cc_qcm2290_desc, regmap);
++      ret = qcom_cc_really_probe(&pdev->dev, &disp_cc_qcm2290_desc, regmap);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to register DISP CC clocks\n");
+               return ret;
+diff --git a/drivers/clk/qcom/dispcc-sc7180.c b/drivers/clk/qcom/dispcc-sc7180.c
+index 38d7859981c7..4710247be530 100644
+--- a/drivers/clk/qcom/dispcc-sc7180.c
++++ b/drivers/clk/qcom/dispcc-sc7180.c
+@@ -713,7 +713,7 @@ static int disp_cc_sc7180_probe(struct platform_device *pdev)
+       clk_fabia_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll_config);
+-      return qcom_cc_really_probe(pdev, &disp_cc_sc7180_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &disp_cc_sc7180_desc, regmap);
+ }
+ static struct platform_driver disp_cc_sc7180_driver = {
+diff --git a/drivers/clk/qcom/dispcc-sc7280.c b/drivers/clk/qcom/dispcc-sc7280.c
+index fbeb8fccb99a..db0745954894 100644
+--- a/drivers/clk/qcom/dispcc-sc7280.c
++++ b/drivers/clk/qcom/dispcc-sc7280.c
+@@ -881,7 +881,7 @@ static int disp_cc_sc7280_probe(struct platform_device *pdev)
+       /* Keep some clocks always-on */
+       qcom_branch_set_clk_en(regmap, 0x5008); /* DISP_CC_XO_CLK */
+-      return qcom_cc_really_probe(pdev, &disp_cc_sc7280_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &disp_cc_sc7280_desc, regmap);
+ }
+ static struct platform_driver disp_cc_sc7280_driver = {
+diff --git a/drivers/clk/qcom/dispcc-sc8280xp.c b/drivers/clk/qcom/dispcc-sc8280xp.c
+index 91172f5b2f15..f1ca9ae0b33f 100644
+--- a/drivers/clk/qcom/dispcc-sc8280xp.c
++++ b/drivers/clk/qcom/dispcc-sc8280xp.c
+@@ -3172,7 +3172,7 @@ static int disp_cc_sc8280xp_probe(struct platform_device *pdev)
+       clk_lucid_pll_configure(clkr_to_alpha_clk_pll(desc->clks[DISP_CC_PLL1]), regmap, &disp_cc_pll1_config);
+       clk_lucid_pll_configure(clkr_to_alpha_clk_pll(desc->clks[DISP_CC_PLL2]), regmap, &disp_cc_pll2_config);
+-      ret = qcom_cc_really_probe(pdev, desc, regmap);
++      ret = qcom_cc_really_probe(&pdev->dev, desc, regmap);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to register display clock controller\n");
+               goto out_pm_runtime_put;
+diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
+index b84fdd17c3d8..e6139e8f74dc 100644
+--- a/drivers/clk/qcom/dispcc-sdm845.c
++++ b/drivers/clk/qcom/dispcc-sdm845.c
+@@ -863,7 +863,7 @@ static int disp_cc_sdm845_probe(struct platform_device *pdev)
+       /* Enable hardware clock gating for DSI and MDP clocks */
+       regmap_update_bits(regmap, 0x8000, 0x7f0, 0x7f0);
+-      return qcom_cc_really_probe(pdev, &disp_cc_sdm845_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &disp_cc_sdm845_desc, regmap);
+ }
+ static struct platform_driver disp_cc_sdm845_driver = {
+diff --git a/drivers/clk/qcom/dispcc-sm6115.c b/drivers/clk/qcom/dispcc-sm6115.c
+index bd07f26af35a..939887f82ecc 100644
+--- a/drivers/clk/qcom/dispcc-sm6115.c
++++ b/drivers/clk/qcom/dispcc-sm6115.c
+@@ -586,7 +586,7 @@ static int disp_cc_sm6115_probe(struct platform_device *pdev)
+       /* Keep some clocks always-on */
+       qcom_branch_set_clk_en(regmap, 0x604c); /* DISP_CC_XO_CLK */
+-      ret = qcom_cc_really_probe(pdev, &disp_cc_sm6115_desc, regmap);
++      ret = qcom_cc_really_probe(&pdev->dev, &disp_cc_sm6115_desc, regmap);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to register DISP CC clocks\n");
+               return ret;
+diff --git a/drivers/clk/qcom/dispcc-sm6125.c b/drivers/clk/qcom/dispcc-sm6125.c
+index 85e07731cce2..51c7492816fb 100644
+--- a/drivers/clk/qcom/dispcc-sm6125.c
++++ b/drivers/clk/qcom/dispcc-sm6125.c
+@@ -682,7 +682,7 @@ static int disp_cc_sm6125_probe(struct platform_device *pdev)
+       clk_alpha_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+-      return qcom_cc_really_probe(pdev, &disp_cc_sm6125_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &disp_cc_sm6125_desc, regmap);
+ }
+ static struct platform_driver disp_cc_sm6125_driver = {
+diff --git a/drivers/clk/qcom/dispcc-sm6350.c b/drivers/clk/qcom/dispcc-sm6350.c
+index f712cbef9456..50facb36701a 100644
+--- a/drivers/clk/qcom/dispcc-sm6350.c
++++ b/drivers/clk/qcom/dispcc-sm6350.c
+@@ -761,7 +761,7 @@ static int disp_cc_sm6350_probe(struct platform_device *pdev)
+       clk_fabia_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+-      return qcom_cc_really_probe(pdev, &disp_cc_sm6350_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &disp_cc_sm6350_desc, regmap);
+ }
+ static struct platform_driver disp_cc_sm6350_driver = {
+diff --git a/drivers/clk/qcom/dispcc-sm6375.c b/drivers/clk/qcom/dispcc-sm6375.c
+index 2d42f85f184b..167dd369a794 100644
+--- a/drivers/clk/qcom/dispcc-sm6375.c
++++ b/drivers/clk/qcom/dispcc-sm6375.c
+@@ -583,7 +583,7 @@ static int disp_cc_sm6375_probe(struct platform_device *pdev)
+       clk_lucid_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+-      return qcom_cc_really_probe(pdev, &disp_cc_sm6375_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &disp_cc_sm6375_desc, regmap);
+ }
+ static struct platform_driver disp_cc_sm6375_driver = {
+diff --git a/drivers/clk/qcom/dispcc-sm8250.c b/drivers/clk/qcom/dispcc-sm8250.c
+index 43307c8a342c..5a09009b7289 100644
+--- a/drivers/clk/qcom/dispcc-sm8250.c
++++ b/drivers/clk/qcom/dispcc-sm8250.c
+@@ -1366,7 +1366,7 @@ static int disp_cc_sm8250_probe(struct platform_device *pdev)
+       /* Keep some clocks always-on */
+       qcom_branch_set_clk_en(regmap, 0x605c); /* DISP_CC_XO_CLK */
+-      ret = qcom_cc_really_probe(pdev, &disp_cc_sm8250_desc, regmap);
++      ret = qcom_cc_really_probe(&pdev->dev, &disp_cc_sm8250_desc, regmap);
+       pm_runtime_put(&pdev->dev);
+diff --git a/drivers/clk/qcom/dispcc-sm8450.c b/drivers/clk/qcom/dispcc-sm8450.c
+index 5d028871624e..d1d3f60789ee 100644
+--- a/drivers/clk/qcom/dispcc-sm8450.c
++++ b/drivers/clk/qcom/dispcc-sm8450.c
+@@ -1778,7 +1778,7 @@ static int disp_cc_sm8450_probe(struct platform_device *pdev)
+       /* Keep some clocks always-on */
+       qcom_branch_set_clk_en(regmap, 0xe05c); /* DISP_CC_XO_CLK */
+-      ret = qcom_cc_really_probe(pdev, &disp_cc_sm8450_desc, regmap);
++      ret = qcom_cc_really_probe(&pdev->dev, &disp_cc_sm8450_desc, regmap);
+       if (ret)
+               goto err_put_rpm;
+diff --git a/drivers/clk/qcom/dispcc-sm8550.c b/drivers/clk/qcom/dispcc-sm8550.c
+index 88f9347ab77c..31ae46f180a5 100644
+--- a/drivers/clk/qcom/dispcc-sm8550.c
++++ b/drivers/clk/qcom/dispcc-sm8550.c
+@@ -1771,7 +1771,7 @@ static int disp_cc_sm8550_probe(struct platform_device *pdev)
+       /* Keep some clocks always-on */
+       qcom_branch_set_clk_en(regmap, 0xe054); /* DISP_CC_XO_CLK */
+-      ret = qcom_cc_really_probe(pdev, &disp_cc_sm8550_desc, regmap);
++      ret = qcom_cc_really_probe(&pdev->dev, &disp_cc_sm8550_desc, regmap);
+       if (ret)
+               goto err_put_rpm;
+diff --git a/drivers/clk/qcom/gcc-ipq5018.c b/drivers/clk/qcom/gcc-ipq5018.c
+index c1732d70e3a2..70f5dcb96700 100644
+--- a/drivers/clk/qcom/gcc-ipq5018.c
++++ b/drivers/clk/qcom/gcc-ipq5018.c
+@@ -3698,7 +3698,7 @@ static int gcc_ipq5018_probe(struct platform_device *pdev)
+       clk_alpha_pll_configure(&ubi32_pll_main, regmap, &ubi32_pll_config);
+-      return qcom_cc_really_probe(pdev, &ipq5018_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &ipq5018_desc, regmap);
+ }
+ static struct platform_driver gcc_ipq5018_driver = {
+diff --git a/drivers/clk/qcom/gcc-ipq6018.c b/drivers/clk/qcom/gcc-ipq6018.c
+index 7e69de34c310..9e5885101366 100644
+--- a/drivers/clk/qcom/gcc-ipq6018.c
++++ b/drivers/clk/qcom/gcc-ipq6018.c
+@@ -4642,7 +4642,7 @@ static int gcc_ipq6018_probe(struct platform_device *pdev)
+       clk_alpha_pll_configure(&nss_crypto_pll_main, regmap,
+                               &nss_crypto_pll_config);
+-      return qcom_cc_really_probe(pdev, &gcc_ipq6018_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gcc_ipq6018_desc, regmap);
+ }
+ static struct platform_driver gcc_ipq6018_driver = {
+diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
+index d2be56c5892d..32fd01ef469a 100644
+--- a/drivers/clk/qcom/gcc-ipq8074.c
++++ b/drivers/clk/qcom/gcc-ipq8074.c
+@@ -4760,7 +4760,7 @@ static int gcc_ipq8074_probe(struct platform_device *pdev)
+       clk_alpha_pll_configure(&nss_crypto_pll_main, regmap,
+                               &nss_crypto_pll_config);
+-      return qcom_cc_really_probe(pdev, &gcc_ipq8074_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gcc_ipq8074_desc, regmap);
+ }
+ static struct platform_driver gcc_ipq8074_driver = {
+diff --git a/drivers/clk/qcom/gcc-mdm9607.c b/drivers/clk/qcom/gcc-mdm9607.c
+index fb290e73ce94..6e6068b168e6 100644
+--- a/drivers/clk/qcom/gcc-mdm9607.c
++++ b/drivers/clk/qcom/gcc-mdm9607.c
+@@ -1604,7 +1604,7 @@ static int gcc_mdm9607_probe(struct platform_device *pdev)
+       /* Vote for GPLL0 to turn on. Needed by acpuclock. */
+       regmap_update_bits(regmap, 0x45000, BIT(0), BIT(0));
+-      return qcom_cc_really_probe(pdev, &gcc_mdm9607_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gcc_mdm9607_desc, regmap);
+ }
+ static struct platform_driver gcc_mdm9607_driver = {
+diff --git a/drivers/clk/qcom/gcc-mdm9615.c b/drivers/clk/qcom/gcc-mdm9615.c
+index aec7c4a1d3de..33987b957737 100644
+--- a/drivers/clk/qcom/gcc-mdm9615.c
++++ b/drivers/clk/qcom/gcc-mdm9615.c
+@@ -1736,7 +1736,7 @@ static int gcc_mdm9615_probe(struct platform_device *pdev)
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+-      return qcom_cc_really_probe(pdev, &gcc_mdm9615_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gcc_mdm9615_desc, regmap);
+ }
+ static struct platform_driver gcc_mdm9615_driver = {
+diff --git a/drivers/clk/qcom/gcc-msm8917.c b/drivers/clk/qcom/gcc-msm8917.c
+index f2b8729e4198..3e2a2ae2ee6e 100644
+--- a/drivers/clk/qcom/gcc-msm8917.c
++++ b/drivers/clk/qcom/gcc-msm8917.c
+@@ -3270,7 +3270,7 @@ static int gcc_msm8917_probe(struct platform_device *pdev)
+       clk_alpha_pll_configure(&gpll3_early, regmap, &gpll3_early_config);
+-      return qcom_cc_really_probe(pdev, gcc_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, gcc_desc, regmap);
+ }
+ static const struct of_device_id gcc_msm8917_match_table[] = {
+diff --git a/drivers/clk/qcom/gcc-msm8939.c b/drivers/clk/qcom/gcc-msm8939.c
+index 7b9a3e99b589..7431c9a65044 100644
+--- a/drivers/clk/qcom/gcc-msm8939.c
++++ b/drivers/clk/qcom/gcc-msm8939.c
+@@ -4108,7 +4108,7 @@ static int gcc_msm8939_probe(struct platform_device *pdev)
+       clk_pll_configure_sr_hpm_lp(&gpll3, regmap, &gpll3_config, true);
+       clk_pll_configure_sr_hpm_lp(&gpll4, regmap, &gpll4_config, true);
+-      return qcom_cc_really_probe(pdev, &gcc_msm8939_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gcc_msm8939_desc, regmap);
+ }
+ static struct platform_driver gcc_msm8939_driver = {
+diff --git a/drivers/clk/qcom/gcc-msm8953.c b/drivers/clk/qcom/gcc-msm8953.c
+index 7563bff58118..855a61966f3e 100644
+--- a/drivers/clk/qcom/gcc-msm8953.c
++++ b/drivers/clk/qcom/gcc-msm8953.c
+@@ -4220,7 +4220,7 @@ static int gcc_msm8953_probe(struct platform_device *pdev)
+       clk_alpha_pll_configure(&gpll3_early, regmap, &gpll3_early_config);
+-      return qcom_cc_really_probe(pdev, &gcc_msm8953_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gcc_msm8953_desc, regmap);
+ }
+ static const struct of_device_id gcc_msm8953_match_table[] = {
+diff --git a/drivers/clk/qcom/gcc-msm8976.c b/drivers/clk/qcom/gcc-msm8976.c
+index 7fac0ca594aa..399f22033c29 100644
+--- a/drivers/clk/qcom/gcc-msm8976.c
++++ b/drivers/clk/qcom/gcc-msm8976.c
+@@ -4129,7 +4129,7 @@ static int gcc_msm8976_probe(struct platform_device *pdev)
+       if (ret)
+               return ret;
+-      return qcom_cc_really_probe(pdev, &gcc_msm8976_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gcc_msm8976_desc, regmap);
+ }
+ static struct platform_driver gcc_msm8976_driver = {
+diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
+index e7b03a17514a..4fc667b94cf2 100644
+--- a/drivers/clk/qcom/gcc-msm8996.c
++++ b/drivers/clk/qcom/gcc-msm8996.c
+@@ -3620,7 +3620,7 @@ static int gcc_msm8996_probe(struct platform_device *pdev)
+        */
+       regmap_update_bits(regmap, 0x52008, BIT(21), BIT(21));
+-      return qcom_cc_really_probe(pdev, &gcc_msm8996_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gcc_msm8996_desc, regmap);
+ }
+ static struct platform_driver gcc_msm8996_driver = {
+diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
+index 5f8c87c1793f..90b66caba2cd 100644
+--- a/drivers/clk/qcom/gcc-msm8998.c
++++ b/drivers/clk/qcom/gcc-msm8998.c
+@@ -3292,7 +3292,7 @@ static int gcc_msm8998_probe(struct platform_device *pdev)
+       regmap_write(regmap, GCC_MMSS_MISC, 0x10003);
+       regmap_write(regmap, GCC_GPU_MISC, 0x10003);
+-      return qcom_cc_really_probe(pdev, &gcc_msm8998_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gcc_msm8998_desc, regmap);
+ }
+ static const struct of_device_id gcc_msm8998_match_table[] = {
+diff --git a/drivers/clk/qcom/gcc-qcm2290.c b/drivers/clk/qcom/gcc-qcm2290.c
+index 48995e50c6bd..9a6703365e61 100644
+--- a/drivers/clk/qcom/gcc-qcm2290.c
++++ b/drivers/clk/qcom/gcc-qcm2290.c
+@@ -2994,7 +2994,7 @@ static int gcc_qcm2290_probe(struct platform_device *pdev)
+       clk_alpha_pll_configure(&gpll8, regmap, &gpll8_config);
+       clk_alpha_pll_configure(&gpll9, regmap, &gpll9_config);
+-      return qcom_cc_really_probe(pdev, &gcc_qcm2290_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gcc_qcm2290_desc, regmap);
+ }
+ static struct platform_driver gcc_qcm2290_driver = {
+diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c
+index a39c4990b29d..c3cfd572e7c1 100644
+--- a/drivers/clk/qcom/gcc-qcs404.c
++++ b/drivers/clk/qcom/gcc-qcs404.c
+@@ -2824,7 +2824,7 @@ static int gcc_qcs404_probe(struct platform_device *pdev)
+       clk_alpha_pll_configure(&gpll3_out_main, regmap, &gpll3_config);
+-      return qcom_cc_really_probe(pdev, &gcc_qcs404_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gcc_qcs404_desc, regmap);
+ }
+ static struct platform_driver gcc_qcs404_driver = {
+diff --git a/drivers/clk/qcom/gcc-qdu1000.c b/drivers/clk/qcom/gcc-qdu1000.c
+index 9f42d2601464..dbe9e9437939 100644
+--- a/drivers/clk/qcom/gcc-qdu1000.c
++++ b/drivers/clk/qcom/gcc-qdu1000.c
+@@ -2674,7 +2674,7 @@ static int gcc_qdu1000_probe(struct platform_device *pdev)
+       if (ret)
+               return ret;
+-      ret = qcom_cc_really_probe(pdev, &gcc_qdu1000_desc, regmap);
++      ret = qcom_cc_really_probe(&pdev->dev, &gcc_qdu1000_desc, regmap);
+       if (ret)
+               return dev_err_probe(&pdev->dev, ret, "Failed to register GCC clocks\n");
+diff --git a/drivers/clk/qcom/gcc-sa8775p.c b/drivers/clk/qcom/gcc-sa8775p.c
+index 5bcbfbf52cb9..9f31ce4cea18 100644
+--- a/drivers/clk/qcom/gcc-sa8775p.c
++++ b/drivers/clk/qcom/gcc-sa8775p.c
+@@ -4753,7 +4753,7 @@ static int gcc_sa8775p_probe(struct platform_device *pdev)
+       qcom_branch_set_clk_en(regmap, 0x34004); /* GCC_VIDEO_AHB_CLK */
+       qcom_branch_set_clk_en(regmap, 0x34024); /* GCC_VIDEO_XO_CLK */
+-      return qcom_cc_really_probe(pdev, &gcc_sa8775p_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gcc_sa8775p_desc, regmap);
+ }
+ static struct platform_driver gcc_sa8775p_driver = {
+diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c
+index 6a5f785c0ced..4a49ad7a9e5b 100644
+--- a/drivers/clk/qcom/gcc-sc7180.c
++++ b/drivers/clk/qcom/gcc-sc7180.c
+@@ -2458,7 +2458,7 @@ static int gcc_sc7180_probe(struct platform_device *pdev)
+       if (ret)
+               return ret;
+-      return qcom_cc_really_probe(pdev, &gcc_sc7180_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gcc_sc7180_desc, regmap);
+ }
+ static struct platform_driver gcc_sc7180_driver = {
+diff --git a/drivers/clk/qcom/gcc-sc7280.c b/drivers/clk/qcom/gcc-sc7280.c
+index b937d513b814..4502926a2691 100644
+--- a/drivers/clk/qcom/gcc-sc7280.c
++++ b/drivers/clk/qcom/gcc-sc7280.c
+@@ -3481,7 +3481,7 @@ static int gcc_sc7280_probe(struct platform_device *pdev)
+       if (ret)
+               return ret;
+-      return qcom_cc_really_probe(pdev, &gcc_sc7280_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gcc_sc7280_desc, regmap);
+ }
+ static struct platform_driver gcc_sc7280_driver = {
+diff --git a/drivers/clk/qcom/gcc-sc8180x.c b/drivers/clk/qcom/gcc-sc8180x.c
+index ad905affd376..ad135bfa4c76 100644
+--- a/drivers/clk/qcom/gcc-sc8180x.c
++++ b/drivers/clk/qcom/gcc-sc8180x.c
+@@ -4623,7 +4623,7 @@ static int gcc_sc8180x_probe(struct platform_device *pdev)
+       regmap_update_bits(regmap, 0x4d110, 0x3, 0x3);
+       regmap_update_bits(regmap, 0x71028, 0x3, 0x3);
+-      return qcom_cc_really_probe(pdev, &gcc_sc8180x_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gcc_sc8180x_desc, regmap);
+ }
+ static struct platform_driver gcc_sc8180x_driver = {
+diff --git a/drivers/clk/qcom/gcc-sc8280xp.c b/drivers/clk/qcom/gcc-sc8280xp.c
+index 082d7b5504eb..5f11760cf73f 100644
+--- a/drivers/clk/qcom/gcc-sc8280xp.c
++++ b/drivers/clk/qcom/gcc-sc8280xp.c
+@@ -7558,7 +7558,7 @@ static int gcc_sc8280xp_probe(struct platform_device *pdev)
+       if (ret)
+               goto err_put_rpm;
+-      ret = qcom_cc_really_probe(pdev, &gcc_sc8280xp_desc, regmap);
++      ret = qcom_cc_really_probe(&pdev->dev, &gcc_sc8280xp_desc, regmap);
+       if (ret)
+               goto err_put_rpm;
+diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
+index c4fe70871b6d..df79298a1a25 100644
+--- a/drivers/clk/qcom/gcc-sdm660.c
++++ b/drivers/clk/qcom/gcc-sdm660.c
+@@ -2474,7 +2474,7 @@ static int gcc_sdm660_probe(struct platform_device *pdev)
+       if (ret)
+               return ret;
+-      return qcom_cc_really_probe(pdev, &gcc_sdm660_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gcc_sdm660_desc, regmap);
+ }
+ static struct platform_driver gcc_sdm660_driver = {
+diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
+index ea4c3bf4fb9b..dc3aa7014c3e 100644
+--- a/drivers/clk/qcom/gcc-sdm845.c
++++ b/drivers/clk/qcom/gcc-sdm845.c
+@@ -4011,7 +4011,7 @@ static int gcc_sdm845_probe(struct platform_device *pdev)
+               return ret;
+       gcc_desc = of_device_get_match_data(&pdev->dev);
+-      return qcom_cc_really_probe(pdev, gcc_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, gcc_desc, regmap);
+ }
+ static struct platform_driver gcc_sdm845_driver = {
+diff --git a/drivers/clk/qcom/gcc-sdx55.c b/drivers/clk/qcom/gcc-sdx55.c
+index 26279b8d321a..84c507656e8f 100644
+--- a/drivers/clk/qcom/gcc-sdx55.c
++++ b/drivers/clk/qcom/gcc-sdx55.c
+@@ -1616,7 +1616,7 @@ static int gcc_sdx55_probe(struct platform_device *pdev)
+       regmap_update_bits(regmap, 0x6d008, BIT(21), BIT(21)); /* GCC_CPUSS_AHB_CLK */
+       regmap_update_bits(regmap, 0x6d008, BIT(22), BIT(22)); /* GCC_CPUSS_GNOC_CLK */
+-      return qcom_cc_really_probe(pdev, &gcc_sdx55_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gcc_sdx55_desc, regmap);
+ }
+ static struct platform_driver gcc_sdx55_driver = {
+diff --git a/drivers/clk/qcom/gcc-sdx65.c b/drivers/clk/qcom/gcc-sdx65.c
+index 8fde6463574b..fe297c606f97 100644
+--- a/drivers/clk/qcom/gcc-sdx65.c
++++ b/drivers/clk/qcom/gcc-sdx65.c
+@@ -1580,7 +1580,7 @@ static int gcc_sdx65_probe(struct platform_device *pdev)
+       regmap_update_bits(regmap, 0x6d008, BIT(21), BIT(21)); /* GCC_CPUSS_AHB_CLK */
+       regmap_update_bits(regmap, 0x6d008, BIT(22), BIT(22)); /* GCC_CPUSS_GNOC_CLK */
+-      return qcom_cc_really_probe(pdev, &gcc_sdx65_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gcc_sdx65_desc, regmap);
+ }
+ static struct platform_driver gcc_sdx65_driver = {
+diff --git a/drivers/clk/qcom/gcc-sdx75.c b/drivers/clk/qcom/gcc-sdx75.c
+index c51338f08ef1..453a6bf8e878 100644
+--- a/drivers/clk/qcom/gcc-sdx75.c
++++ b/drivers/clk/qcom/gcc-sdx75.c
+@@ -2940,7 +2940,7 @@ static int gcc_sdx75_probe(struct platform_device *pdev)
+       qcom_branch_set_clk_en(regmap, 0x3e004); /* GCC_AHB_PCIE_LINK_CLK */
+       qcom_branch_set_clk_en(regmap, 0x3e008); /* GCC_XO_PCIE_LINK_CLK */
+-      return qcom_cc_really_probe(pdev, &gcc_sdx75_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gcc_sdx75_desc, regmap);
+ }
+ static struct platform_driver gcc_sdx75_driver = {
+diff --git a/drivers/clk/qcom/gcc-sm6115.c b/drivers/clk/qcom/gcc-sm6115.c
+index 167e344ad399..4c3804701e24 100644
+--- a/drivers/clk/qcom/gcc-sm6115.c
++++ b/drivers/clk/qcom/gcc-sm6115.c
+@@ -3513,7 +3513,7 @@ static int gcc_sm6115_probe(struct platform_device *pdev)
+       clk_alpha_pll_configure(&gpll10, regmap, &gpll10_config);
+       clk_alpha_pll_configure(&gpll11, regmap, &gpll11_config);
+-      return qcom_cc_really_probe(pdev, &gcc_sm6115_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gcc_sm6115_desc, regmap);
+ }
+ static struct platform_driver gcc_sm6115_driver = {
+diff --git a/drivers/clk/qcom/gcc-sm6125.c b/drivers/clk/qcom/gcc-sm6125.c
+index da554efee2ce..07bb1e5c4a30 100644
+--- a/drivers/clk/qcom/gcc-sm6125.c
++++ b/drivers/clk/qcom/gcc-sm6125.c
+@@ -4161,7 +4161,7 @@ static int gcc_sm6125_probe(struct platform_device *pdev)
+       if (ret)
+               return ret;
+-      return qcom_cc_really_probe(pdev, &gcc_sm6125_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gcc_sm6125_desc, regmap);
+ }
+ static struct platform_driver gcc_sm6125_driver = {
+diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c
+index cf4a7b6e0b23..0dcc8eeb77e6 100644
+--- a/drivers/clk/qcom/gcc-sm6350.c
++++ b/drivers/clk/qcom/gcc-sm6350.c
+@@ -2559,7 +2559,7 @@ static int gcc_sm6350_probe(struct platform_device *pdev)
+       if (ret)
+               return ret;
+-      return qcom_cc_really_probe(pdev, &gcc_sm6350_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gcc_sm6350_desc, regmap);
+ }
+ static struct platform_driver gcc_sm6350_driver = {
+diff --git a/drivers/clk/qcom/gcc-sm6375.c b/drivers/clk/qcom/gcc-sm6375.c
+index ac1ed2d728f9..f47dc2808095 100644
+--- a/drivers/clk/qcom/gcc-sm6375.c
++++ b/drivers/clk/qcom/gcc-sm6375.c
+@@ -3892,7 +3892,7 @@ static int gcc_sm6375_probe(struct platform_device *pdev)
+       clk_lucid_pll_configure(&gpll8, regmap, &gpll8_config);
+       clk_zonda_pll_configure(&gpll9, regmap, &gpll9_config);
+-      return qcom_cc_really_probe(pdev, &gcc_sm6375_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gcc_sm6375_desc, regmap);
+ }
+ static struct platform_driver gcc_sm6375_driver = {
+diff --git a/drivers/clk/qcom/gcc-sm7150.c b/drivers/clk/qcom/gcc-sm7150.c
+index b0c50ebb86be..7eabaf0e1b57 100644
+--- a/drivers/clk/qcom/gcc-sm7150.c
++++ b/drivers/clk/qcom/gcc-sm7150.c
+@@ -3017,7 +3017,7 @@ static int gcc_sm7150_probe(struct platform_device *pdev)
+       if (ret)
+               return ret;
+-      return qcom_cc_really_probe(pdev, &gcc_sm7150_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gcc_sm7150_desc, regmap);
+ }
+ static struct platform_driver gcc_sm7150_driver = {
+diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c
+index 1f748141d12c..cefceb780889 100644
+--- a/drivers/clk/qcom/gcc-sm8150.c
++++ b/drivers/clk/qcom/gcc-sm8150.c
+@@ -3797,7 +3797,7 @@ static int gcc_sm8150_probe(struct platform_device *pdev)
+       if (ret)
+               dev_err_probe(&pdev->dev, ret, "Failed to register with DFS!\n");
+-      return qcom_cc_really_probe(pdev, &gcc_sm8150_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gcc_sm8150_desc, regmap);
+ }
+ static struct platform_driver gcc_sm8150_driver = {
+diff --git a/drivers/clk/qcom/gcc-sm8250.c b/drivers/clk/qcom/gcc-sm8250.c
+index e630bfa2d0c1..991cd8b8d597 100644
+--- a/drivers/clk/qcom/gcc-sm8250.c
++++ b/drivers/clk/qcom/gcc-sm8250.c
+@@ -3656,7 +3656,7 @@ static int gcc_sm8250_probe(struct platform_device *pdev)
+       if (ret)
+               return ret;
+-      return qcom_cc_really_probe(pdev, &gcc_sm8250_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gcc_sm8250_desc, regmap);
+ }
+ static struct platform_driver gcc_sm8250_driver = {
+diff --git a/drivers/clk/qcom/gcc-sm8350.c b/drivers/clk/qcom/gcc-sm8350.c
+index fc0402e8a2a7..2d94f3046b71 100644
+--- a/drivers/clk/qcom/gcc-sm8350.c
++++ b/drivers/clk/qcom/gcc-sm8350.c
+@@ -3822,7 +3822,7 @@ static int gcc_sm8350_probe(struct platform_device *pdev)
+       /* FORCE_MEM_CORE_ON for ufs phy ice core clocks */
+       regmap_update_bits(regmap, gcc_ufs_phy_ice_core_clk.halt_reg, BIT(14), BIT(14));
+-      return qcom_cc_really_probe(pdev, &gcc_sm8350_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gcc_sm8350_desc, regmap);
+ }
+ static struct platform_driver gcc_sm8350_driver = {
+diff --git a/drivers/clk/qcom/gcc-sm8450.c b/drivers/clk/qcom/gcc-sm8450.c
+index e86c58bc5e48..639a9a955914 100644
+--- a/drivers/clk/qcom/gcc-sm8450.c
++++ b/drivers/clk/qcom/gcc-sm8450.c
+@@ -3289,7 +3289,7 @@ static int gcc_sm8450_probe(struct platform_device *pdev)
+       qcom_branch_set_clk_en(regmap, 0x42004); /* GCC_VIDEO_AHB_CLK */
+       qcom_branch_set_clk_en(regmap, 0x42028); /* GCC_VIDEO_XO_CLK */
+-      return qcom_cc_really_probe(pdev, &gcc_sm8450_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gcc_sm8450_desc, regmap);
+ }
+ static struct platform_driver gcc_sm8450_driver = {
+diff --git a/drivers/clk/qcom/gcc-sm8550.c b/drivers/clk/qcom/gcc-sm8550.c
+index 26d7349e7642..7944ddb4b47d 100644
+--- a/drivers/clk/qcom/gcc-sm8550.c
++++ b/drivers/clk/qcom/gcc-sm8550.c
+@@ -3364,7 +3364,7 @@ static int gcc_sm8550_probe(struct platform_device *pdev)
+       /* Clear GDSC_SLEEP_ENA_VOTE to stop votes being auto-removed in sleep. */
+       regmap_write(regmap, 0x52024, 0x0);
+-      return qcom_cc_really_probe(pdev, &gcc_sm8550_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gcc_sm8550_desc, regmap);
+ }
+ static struct platform_driver gcc_sm8550_driver = {
+diff --git a/drivers/clk/qcom/gpucc-msm8998.c b/drivers/clk/qcom/gpucc-msm8998.c
+index 7b1cb44e31b2..9efeab2691ba 100644
+--- a/drivers/clk/qcom/gpucc-msm8998.c
++++ b/drivers/clk/qcom/gpucc-msm8998.c
+@@ -334,7 +334,7 @@ static int gpucc_msm8998_probe(struct platform_device *pdev)
+       /* tweak droop detector (GPUCC_GPU_DD_WRAP_CTRL) to reduce leakage */
+       regmap_write_bits(regmap, gfx3d_clk.clkr.enable_reg, BIT(0), BIT(0));
+-      return qcom_cc_really_probe(pdev, &gpucc_msm8998_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gpucc_msm8998_desc, regmap);
+ }
+ static struct platform_driver gpucc_msm8998_driver = {
+diff --git a/drivers/clk/qcom/gpucc-sa8775p.c b/drivers/clk/qcom/gpucc-sa8775p.c
+index 1167c42da39d..ac7552b146c7 100644
+--- a/drivers/clk/qcom/gpucc-sa8775p.c
++++ b/drivers/clk/qcom/gpucc-sa8775p.c
+@@ -598,7 +598,7 @@ static int gpu_cc_sa8775p_probe(struct platform_device *pdev)
+       clk_lucid_evo_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
+       clk_lucid_evo_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+-      return qcom_cc_really_probe(pdev, &gpu_cc_sa8775p_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gpu_cc_sa8775p_desc, regmap);
+ }
+ static struct platform_driver gpu_cc_sa8775p_driver = {
+diff --git a/drivers/clk/qcom/gpucc-sc7180.c b/drivers/clk/qcom/gpucc-sc7180.c
+index 66f5b48cbf87..08f3983d016f 100644
+--- a/drivers/clk/qcom/gpucc-sc7180.c
++++ b/drivers/clk/qcom/gpucc-sc7180.c
+@@ -241,7 +241,7 @@ static int gpu_cc_sc7180_probe(struct platform_device *pdev)
+       value = 0xF << CX_GMU_CBCR_WAKE_SHIFT | 0xF << CX_GMU_CBCR_SLEEP_SHIFT;
+       regmap_update_bits(regmap, 0x1098, mask, value);
+-      return qcom_cc_really_probe(pdev, &gpu_cc_sc7180_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gpu_cc_sc7180_desc, regmap);
+ }
+ static struct platform_driver gpu_cc_sc7180_driver = {
+diff --git a/drivers/clk/qcom/gpucc-sc7280.c b/drivers/clk/qcom/gpucc-sc7280.c
+index ebda57eac979..bd699a624517 100644
+--- a/drivers/clk/qcom/gpucc-sc7280.c
++++ b/drivers/clk/qcom/gpucc-sc7280.c
+@@ -469,7 +469,7 @@ static int gpu_cc_sc7280_probe(struct platform_device *pdev)
+       qcom_branch_set_clk_en(regmap, 0x1098); /* GPUCC_CX_GMU_CLK */
+       regmap_update_bits(regmap, 0x1098, BIT(13), BIT(13));
+-      return qcom_cc_really_probe(pdev, &gpu_cc_sc7280_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gpu_cc_sc7280_desc, regmap);
+ }
+ static struct platform_driver gpu_cc_sc7280_driver = {
+diff --git a/drivers/clk/qcom/gpucc-sc8280xp.c b/drivers/clk/qcom/gpucc-sc8280xp.c
+index 3611d2d1823d..c96be61e3f47 100644
+--- a/drivers/clk/qcom/gpucc-sc8280xp.c
++++ b/drivers/clk/qcom/gpucc-sc8280xp.c
+@@ -449,7 +449,7 @@ static int gpu_cc_sc8280xp_probe(struct platform_device *pdev)
+       qcom_branch_set_clk_en(regmap, 0x1170); /* GPU_CC_CB_CLK */
+       qcom_branch_set_clk_en(regmap, 0x109c); /* GPU_CC_CXO_CLK */
+-      ret = qcom_cc_really_probe(pdev, &gpu_cc_sc8280xp_desc, regmap);
++      ret = qcom_cc_really_probe(&pdev->dev, &gpu_cc_sc8280xp_desc, regmap);
+       pm_runtime_put(&pdev->dev);
+       return ret;
+diff --git a/drivers/clk/qcom/gpucc-sdm660.c b/drivers/clk/qcom/gpucc-sdm660.c
+index a52d98b7cf4c..3ae1b80e38d9 100644
+--- a/drivers/clk/qcom/gpucc-sdm660.c
++++ b/drivers/clk/qcom/gpucc-sdm660.c
+@@ -330,7 +330,7 @@ static int gpucc_sdm660_probe(struct platform_device *pdev)
+       gpu_pll_config.alpha_hi = 0x8a;
+       clk_alpha_pll_configure(&gpu_pll1_pll_out_main, regmap, &gpu_pll_config);
+-      return qcom_cc_really_probe(pdev, &gpucc_sdm660_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gpucc_sdm660_desc, regmap);
+ }
+ static struct platform_driver gpucc_sdm660_driver = {
+diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c
+index c87c3215dfe3..ef26690cf504 100644
+--- a/drivers/clk/qcom/gpucc-sdm845.c
++++ b/drivers/clk/qcom/gpucc-sdm845.c
+@@ -192,7 +192,7 @@ static int gpu_cc_sdm845_probe(struct platform_device *pdev)
+       value = 0xf << CX_GMU_CBCR_WAKE_SHIFT | 0xf << CX_GMU_CBCR_SLEEP_SHIFT;
+       regmap_update_bits(regmap, 0x1098, mask, value);
+-      return qcom_cc_really_probe(pdev, &gpu_cc_sdm845_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gpu_cc_sdm845_desc, regmap);
+ }
+ static struct platform_driver gpu_cc_sdm845_driver = {
+diff --git a/drivers/clk/qcom/gpucc-sm6115.c b/drivers/clk/qcom/gpucc-sm6115.c
+index 9793dd9a2596..d43c86cf73a5 100644
+--- a/drivers/clk/qcom/gpucc-sm6115.c
++++ b/drivers/clk/qcom/gpucc-sm6115.c
+@@ -488,7 +488,7 @@ static int gpu_cc_sm6115_probe(struct platform_device *pdev)
+       qcom_branch_set_force_mem_core(regmap, gpu_cc_gx_gfx3d_clk, true);
+       qcom_branch_set_force_periph_on(regmap, gpu_cc_gx_gfx3d_clk, true);
+-      return qcom_cc_really_probe(pdev, &gpu_cc_sm6115_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gpu_cc_sm6115_desc, regmap);
+ }
+ static struct platform_driver gpu_cc_sm6115_driver = {
+diff --git a/drivers/clk/qcom/gpucc-sm6125.c b/drivers/clk/qcom/gpucc-sm6125.c
+index b719a48fe706..ed6a6e505801 100644
+--- a/drivers/clk/qcom/gpucc-sm6125.c
++++ b/drivers/clk/qcom/gpucc-sm6125.c
+@@ -409,7 +409,7 @@ static int gpu_cc_sm6125_probe(struct platform_device *pdev)
+       qcom_branch_set_force_mem_core(regmap, gpu_cc_gx_gfx3d_clk, true);
+       qcom_branch_set_force_periph_on(regmap, gpu_cc_gx_gfx3d_clk, true);
+-      return qcom_cc_really_probe(pdev, &gpu_cc_sm6125_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gpu_cc_sm6125_desc, regmap);
+ }
+ static struct platform_driver gpu_cc_sm6125_driver = {
+diff --git a/drivers/clk/qcom/gpucc-sm6350.c b/drivers/clk/qcom/gpucc-sm6350.c
+index 0bcbba2a2943..1e12ad8948db 100644
+--- a/drivers/clk/qcom/gpucc-sm6350.c
++++ b/drivers/clk/qcom/gpucc-sm6350.c
+@@ -502,7 +502,7 @@ static int gpu_cc_sm6350_probe(struct platform_device *pdev)
+       value = 0xF << CX_GMU_CBCR_WAKE_SHIFT | 0xF << CX_GMU_CBCR_SLEEP_SHIFT;
+       regmap_update_bits(regmap, 0x1098, mask, value);
+-      return qcom_cc_really_probe(pdev, &gpu_cc_sm6350_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gpu_cc_sm6350_desc, regmap);
+ }
+ static struct platform_driver gpu_cc_sm6350_driver = {
+diff --git a/drivers/clk/qcom/gpucc-sm6375.c b/drivers/clk/qcom/gpucc-sm6375.c
+index 4e9a30a080d3..41f59024143e 100644
+--- a/drivers/clk/qcom/gpucc-sm6375.c
++++ b/drivers/clk/qcom/gpucc-sm6375.c
+@@ -455,7 +455,7 @@ static int gpucc_sm6375_probe(struct platform_device *pdev)
+       clk_lucid_pll_configure(&gpucc_pll0, regmap, &gpucc_pll0_config);
+       clk_lucid_pll_configure(&gpucc_pll1, regmap, &gpucc_pll1_config);
+-      ret = qcom_cc_really_probe(pdev, &gpucc_sm6375_desc, regmap);
++      ret = qcom_cc_really_probe(&pdev->dev, &gpucc_sm6375_desc, regmap);
+       pm_runtime_put(&pdev->dev);
+       return ret;
+diff --git a/drivers/clk/qcom/gpucc-sm8150.c b/drivers/clk/qcom/gpucc-sm8150.c
+index 135601629cba..d711464a71b6 100644
+--- a/drivers/clk/qcom/gpucc-sm8150.c
++++ b/drivers/clk/qcom/gpucc-sm8150.c
+@@ -304,7 +304,7 @@ static int gpu_cc_sm8150_probe(struct platform_device *pdev)
+       clk_trion_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+-      return qcom_cc_really_probe(pdev, &gpu_cc_sm8150_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gpu_cc_sm8150_desc, regmap);
+ }
+ static struct platform_driver gpu_cc_sm8150_driver = {
+diff --git a/drivers/clk/qcom/gpucc-sm8250.c b/drivers/clk/qcom/gpucc-sm8250.c
+index 012bd1380f55..113b486a6d2f 100644
+--- a/drivers/clk/qcom/gpucc-sm8250.c
++++ b/drivers/clk/qcom/gpucc-sm8250.c
+@@ -320,7 +320,7 @@ static int gpu_cc_sm8250_probe(struct platform_device *pdev)
+       value = 0xf << CX_GMU_CBCR_WAKE_SHIFT | 0xf << CX_GMU_CBCR_SLEEP_SHIFT;
+       regmap_update_bits(regmap, 0x1098, mask, value);
+-      return qcom_cc_really_probe(pdev, &gpu_cc_sm8250_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gpu_cc_sm8250_desc, regmap);
+ }
+ static struct platform_driver gpu_cc_sm8250_driver = {
+diff --git a/drivers/clk/qcom/gpucc-sm8350.c b/drivers/clk/qcom/gpucc-sm8350.c
+index 9437d316d145..f6bb8244dd40 100644
+--- a/drivers/clk/qcom/gpucc-sm8350.c
++++ b/drivers/clk/qcom/gpucc-sm8350.c
+@@ -604,7 +604,7 @@ static int gpu_cc_sm8350_probe(struct platform_device *pdev)
+       clk_lucid_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
+       clk_lucid_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+-      return qcom_cc_really_probe(pdev, &gpu_cc_sm8350_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gpu_cc_sm8350_desc, regmap);
+ }
+ static const struct of_device_id gpu_cc_sm8350_match_table[] = {
+diff --git a/drivers/clk/qcom/gpucc-sm8450.c b/drivers/clk/qcom/gpucc-sm8450.c
+index 7b329a803289..b3c5d6923cd2 100644
+--- a/drivers/clk/qcom/gpucc-sm8450.c
++++ b/drivers/clk/qcom/gpucc-sm8450.c
+@@ -751,7 +751,7 @@ static int gpu_cc_sm8450_probe(struct platform_device *pdev)
+       clk_lucid_evo_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
+       clk_lucid_evo_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+-      return qcom_cc_really_probe(pdev, &gpu_cc_sm8450_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gpu_cc_sm8450_desc, regmap);
+ }
+ static struct platform_driver gpu_cc_sm8450_driver = {
+diff --git a/drivers/clk/qcom/gpucc-sm8550.c b/drivers/clk/qcom/gpucc-sm8550.c
+index 4fc69c6026e5..7486edf56160 100644
+--- a/drivers/clk/qcom/gpucc-sm8550.c
++++ b/drivers/clk/qcom/gpucc-sm8550.c
+@@ -579,7 +579,7 @@ static int gpu_cc_sm8550_probe(struct platform_device *pdev)
+       qcom_branch_set_clk_en(regmap, 0x9004); /* GPU_CC_CXO_AON_CLK */
+       qcom_branch_set_clk_en(regmap, 0x900c); /* GPU_CC_DEMET_CLK */
+-      return qcom_cc_really_probe(pdev, &gpu_cc_sm8550_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &gpu_cc_sm8550_desc, regmap);
+ }
+ static struct platform_driver gpu_cc_sm8550_driver = {
+diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c
+index fa8cda63cf20..bf5320a43e8c 100644
+--- a/drivers/clk/qcom/lcc-ipq806x.c
++++ b/drivers/clk/qcom/lcc-ipq806x.c
+@@ -454,7 +454,7 @@ static int lcc_ipq806x_probe(struct platform_device *pdev)
+       /* Enable PLL4 source on the LPASS Primary PLL Mux */
+       regmap_write(regmap, 0xc4, 0x1);
+-      return qcom_cc_really_probe(pdev, &lcc_ipq806x_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &lcc_ipq806x_desc, regmap);
+ }
+ static struct platform_driver lcc_ipq806x_driver = {
+diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c
+index e725e7b9c456..d53bf315e9c3 100644
+--- a/drivers/clk/qcom/lcc-msm8960.c
++++ b/drivers/clk/qcom/lcc-msm8960.c
+@@ -481,7 +481,7 @@ static int lcc_msm8960_probe(struct platform_device *pdev)
+       /* Enable PLL4 source on the LPASS Primary PLL Mux */
+       regmap_write(regmap, 0xc4, 0x1);
+-      return qcom_cc_really_probe(pdev, &lcc_msm8960_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &lcc_msm8960_desc, regmap);
+ }
+ static struct platform_driver lcc_msm8960_driver = {
+diff --git a/drivers/clk/qcom/lpassaudiocc-sc7280.c b/drivers/clk/qcom/lpassaudiocc-sc7280.c
+index c43d0b1af7f7..45e726477086 100644
+--- a/drivers/clk/qcom/lpassaudiocc-sc7280.c
++++ b/drivers/clk/qcom/lpassaudiocc-sc7280.c
+@@ -772,7 +772,7 @@ static int lpass_audio_cc_sc7280_probe(struct platform_device *pdev)
+       regmap_write(regmap, 0x4, 0x3b);
+       regmap_write(regmap, 0x8, 0xff05);
+-      ret = qcom_cc_really_probe(pdev, &lpass_audio_cc_sc7280_desc, regmap);
++      ret = qcom_cc_really_probe(&pdev->dev, &lpass_audio_cc_sc7280_desc, regmap);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to register LPASS AUDIO CC clocks\n");
+               goto exit;
+@@ -847,7 +847,7 @@ static int lpass_aon_cc_sc7280_probe(struct platform_device *pdev)
+       clk_lucid_pll_configure(&lpass_aon_cc_pll, regmap, &lpass_aon_cc_pll_config);
+-      ret = qcom_cc_really_probe(pdev, &lpass_aon_cc_sc7280_desc, regmap);
++      ret = qcom_cc_really_probe(&pdev->dev, &lpass_aon_cc_sc7280_desc, regmap);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to register LPASS AON CC clocks\n");
+               goto exit;
+diff --git a/drivers/clk/qcom/lpasscorecc-sc7180.c b/drivers/clk/qcom/lpasscorecc-sc7180.c
+index 8ac72d26087e..726c6378752f 100644
+--- a/drivers/clk/qcom/lpasscorecc-sc7180.c
++++ b/drivers/clk/qcom/lpasscorecc-sc7180.c
+@@ -411,7 +411,7 @@ static int lpass_core_cc_sc7180_probe(struct platform_device *pdev)
+       clk_fabia_pll_configure(&lpass_lpaaudio_dig_pll, regmap,
+                               &lpass_lpaaudio_dig_pll_config);
+-      ret = qcom_cc_really_probe(pdev, &lpass_core_cc_sc7180_desc, regmap);
++      ret = qcom_cc_really_probe(&pdev->dev, &lpass_core_cc_sc7180_desc, regmap);
+       pm_runtime_mark_last_busy(&pdev->dev);
+ exit:
+diff --git a/drivers/clk/qcom/lpasscorecc-sc7280.c b/drivers/clk/qcom/lpasscorecc-sc7280.c
+index a2f1e6ad6da4..b0888cd2460b 100644
+--- a/drivers/clk/qcom/lpasscorecc-sc7280.c
++++ b/drivers/clk/qcom/lpasscorecc-sc7280.c
+@@ -406,7 +406,7 @@ static int lpass_core_cc_sc7280_probe(struct platform_device *pdev)
+       clk_lucid_pll_configure(&lpass_core_cc_dig_pll, regmap, &lpass_core_cc_dig_pll_config);
+-      return qcom_cc_really_probe(pdev, &lpass_core_cc_sc7280_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &lpass_core_cc_sc7280_desc, regmap);
+ }
+ static struct platform_driver lpass_core_cc_sc7280_driver = {
+diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c
+index 50638ab341ec..1061322534c4 100644
+--- a/drivers/clk/qcom/mmcc-msm8960.c
++++ b/drivers/clk/qcom/mmcc-msm8960.c
+@@ -3122,7 +3122,7 @@ static int mmcc_msm8960_probe(struct platform_device *pdev)
+       clk_pll_configure_sr(&pll15, regmap, &pll15_config, false);
+-      return qcom_cc_really_probe(pdev, match->data, regmap);
++      return qcom_cc_really_probe(&pdev->dev, match->data, regmap);
+ }
+ static struct platform_driver mmcc_msm8960_driver = {
+diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
+index 36f460b78be2..d5bcb09ebd0c 100644
+--- a/drivers/clk/qcom/mmcc-msm8974.c
++++ b/drivers/clk/qcom/mmcc-msm8974.c
+@@ -2768,7 +2768,7 @@ static int mmcc_msm8974_probe(struct platform_device *pdev)
+               msm8226_clock_override();
+       }
+-      return qcom_cc_really_probe(pdev, desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, desc, regmap);
+ }
+ static struct platform_driver mmcc_msm8974_driver = {
+diff --git a/drivers/clk/qcom/mmcc-msm8994.c b/drivers/clk/qcom/mmcc-msm8994.c
+index f19080cf715b..78e5083eaf0f 100644
+--- a/drivers/clk/qcom/mmcc-msm8994.c
++++ b/drivers/clk/qcom/mmcc-msm8994.c
+@@ -2602,7 +2602,7 @@ static int mmcc_msm8994_probe(struct platform_device *pdev)
+       clk_alpha_pll_configure(&mmpll3_early, regmap, &mmpll_p_config);
+       clk_alpha_pll_configure(&mmpll5_early, regmap, &mmpll_p_config);
+-      return qcom_cc_really_probe(pdev, &mmcc_msm8994_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &mmcc_msm8994_desc, regmap);
+ }
+ static struct platform_driver mmcc_msm8994_driver = {
+diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c
+index 92287d40c3a5..1a32c6eb8217 100644
+--- a/drivers/clk/qcom/mmcc-msm8996.c
++++ b/drivers/clk/qcom/mmcc-msm8996.c
+@@ -3626,7 +3626,7 @@ static int mmcc_msm8996_probe(struct platform_device *pdev)
+       /* Disable the NoC FSM for mmss_mmagic_cfg_ahb_clk */
+       regmap_update_bits(regmap, 0x5054, BIT(15), 0);
+-      return qcom_cc_really_probe(pdev, &mmcc_msm8996_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &mmcc_msm8996_desc, regmap);
+ }
+ static struct platform_driver mmcc_msm8996_driver = {
+diff --git a/drivers/clk/qcom/mmcc-msm8998.c b/drivers/clk/qcom/mmcc-msm8998.c
+index 275fb3b71ede..5738445a8656 100644
+--- a/drivers/clk/qcom/mmcc-msm8998.c
++++ b/drivers/clk/qcom/mmcc-msm8998.c
+@@ -2866,7 +2866,7 @@ static int mmcc_msm8998_probe(struct platform_device *pdev)
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+-      return qcom_cc_really_probe(pdev, &mmcc_msm8998_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &mmcc_msm8998_desc, regmap);
+ }
+ static struct platform_driver mmcc_msm8998_driver = {
+diff --git a/drivers/clk/qcom/mmcc-sdm660.c b/drivers/clk/qcom/mmcc-sdm660.c
+index 4b8380c2d648..98ba5b4518fb 100644
+--- a/drivers/clk/qcom/mmcc-sdm660.c
++++ b/drivers/clk/qcom/mmcc-sdm660.c
+@@ -2847,7 +2847,7 @@ static int mmcc_660_probe(struct platform_device *pdev)
+       clk_alpha_pll_configure(&mmpll8, regmap, &mmpll8_config);
+       clk_alpha_pll_configure(&mmpll10, regmap, &mmpll10_config);
+-      return qcom_cc_really_probe(pdev, &mmcc_660_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &mmcc_660_desc, regmap);
+ }
+ static struct platform_driver mmcc_660_driver = {
+diff --git a/drivers/clk/qcom/tcsrcc-sm8550.c b/drivers/clk/qcom/tcsrcc-sm8550.c
+index 552a3eb1fd91..e5e8f2e82b94 100644
+--- a/drivers/clk/qcom/tcsrcc-sm8550.c
++++ b/drivers/clk/qcom/tcsrcc-sm8550.c
+@@ -166,7 +166,7 @@ static int tcsr_cc_sm8550_probe(struct platform_device *pdev)
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+-      return qcom_cc_really_probe(pdev, &tcsr_cc_sm8550_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &tcsr_cc_sm8550_desc, regmap);
+ }
+ static struct platform_driver tcsr_cc_sm8550_driver = {
+diff --git a/drivers/clk/qcom/videocc-sc7180.c b/drivers/clk/qcom/videocc-sc7180.c
+index ae0f812f83e8..d7f845480396 100644
+--- a/drivers/clk/qcom/videocc-sc7180.c
++++ b/drivers/clk/qcom/videocc-sc7180.c
+@@ -226,7 +226,7 @@ static int video_cc_sc7180_probe(struct platform_device *pdev)
+       /* Keep VIDEO_CC_XO_CLK ALWAYS-ON */
+       regmap_update_bits(regmap, 0x984, 0x1, 0x1);
+-      return qcom_cc_really_probe(pdev, &video_cc_sc7180_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &video_cc_sc7180_desc, regmap);
+ }
+ static struct platform_driver video_cc_sc7180_driver = {
+diff --git a/drivers/clk/qcom/videocc-sc7280.c b/drivers/clk/qcom/videocc-sc7280.c
+index 119a3ed6eb6a..317b325d6daf 100644
+--- a/drivers/clk/qcom/videocc-sc7280.c
++++ b/drivers/clk/qcom/videocc-sc7280.c
+@@ -305,7 +305,7 @@ static int video_cc_sc7280_probe(struct platform_device *pdev)
+       clk_lucid_pll_configure(&video_pll0, regmap, &video_pll0_config);
+-      return qcom_cc_really_probe(pdev, &video_cc_sc7280_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &video_cc_sc7280_desc, regmap);
+ }
+ static struct platform_driver video_cc_sc7280_driver = {
+diff --git a/drivers/clk/qcom/videocc-sdm845.c b/drivers/clk/qcom/videocc-sdm845.c
+index 80095a283a86..f77a07779477 100644
+--- a/drivers/clk/qcom/videocc-sdm845.c
++++ b/drivers/clk/qcom/videocc-sdm845.c
+@@ -329,7 +329,7 @@ static int video_cc_sdm845_probe(struct platform_device *pdev)
+       clk_fabia_pll_configure(&video_pll0, regmap, &video_pll0_config);
+-      return qcom_cc_really_probe(pdev, &video_cc_sdm845_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &video_cc_sdm845_desc, regmap);
+ }
+ static struct platform_driver video_cc_sdm845_driver = {
+diff --git a/drivers/clk/qcom/videocc-sm8150.c b/drivers/clk/qcom/videocc-sm8150.c
+index 554631aa279b..daab3237eec1 100644
+--- a/drivers/clk/qcom/videocc-sm8150.c
++++ b/drivers/clk/qcom/videocc-sm8150.c
+@@ -262,7 +262,7 @@ static int video_cc_sm8150_probe(struct platform_device *pdev)
+       /* Keep VIDEO_CC_XO_CLK ALWAYS-ON */
+       regmap_update_bits(regmap, 0x984, 0x1, 0x1);
+-      return qcom_cc_really_probe(pdev, &video_cc_sm8150_desc, regmap);
++      return qcom_cc_really_probe(&pdev->dev, &video_cc_sm8150_desc, regmap);
+ }
+ static struct platform_driver video_cc_sm8150_driver = {
+diff --git a/drivers/clk/qcom/videocc-sm8250.c b/drivers/clk/qcom/videocc-sm8250.c
+index 914eddd0ae15..d7e0c32284c1 100644
+--- a/drivers/clk/qcom/videocc-sm8250.c
++++ b/drivers/clk/qcom/videocc-sm8250.c
+@@ -387,7 +387,7 @@ static int video_cc_sm8250_probe(struct platform_device *pdev)
+       qcom_branch_set_clk_en(regmap, 0xe58); /* VIDEO_CC_AHB_CLK */
+       qcom_branch_set_clk_en(regmap, 0xeec); /* VIDEO_CC_XO_CLK */
+-      ret = qcom_cc_really_probe(pdev, &video_cc_sm8250_desc, regmap);
++      ret = qcom_cc_really_probe(&pdev->dev, &video_cc_sm8250_desc, regmap);
+       pm_runtime_put(&pdev->dev);
+diff --git a/drivers/clk/qcom/videocc-sm8350.c b/drivers/clk/qcom/videocc-sm8350.c
+index f7aec28d4c87..5bd6fe3e1298 100644
+--- a/drivers/clk/qcom/videocc-sm8350.c
++++ b/drivers/clk/qcom/videocc-sm8350.c
+@@ -562,7 +562,7 @@ static int video_cc_sm8350_probe(struct platform_device *pdev)
+       qcom_branch_set_clk_en(regmap, 0xe58); /* VIDEO_CC_AHB_CLK */
+       qcom_branch_set_clk_en(regmap, video_cc_xo_clk_cbcr); /* VIDEO_CC_XO_CLK */
+-      ret = qcom_cc_really_probe(pdev, &video_cc_sm8350_desc, regmap);
++      ret = qcom_cc_really_probe(&pdev->dev, &video_cc_sm8350_desc, regmap);
+       pm_runtime_put(&pdev->dev);
+       return ret;
+diff --git a/drivers/clk/qcom/videocc-sm8450.c b/drivers/clk/qcom/videocc-sm8450.c
+index 67df40f16423..ed9163d64244 100644
+--- a/drivers/clk/qcom/videocc-sm8450.c
++++ b/drivers/clk/qcom/videocc-sm8450.c
+@@ -428,7 +428,7 @@ static int video_cc_sm8450_probe(struct platform_device *pdev)
+       qcom_branch_set_clk_en(regmap, 0x8130); /* VIDEO_CC_SLEEP_CLK */
+       qcom_branch_set_clk_en(regmap, 0x8114); /* VIDEO_CC_XO_CLK */
+-      ret = qcom_cc_really_probe(pdev, &video_cc_sm8450_desc, regmap);
++      ret = qcom_cc_really_probe(&pdev->dev, &video_cc_sm8450_desc, regmap);
+       pm_runtime_put(&pdev->dev);
+diff --git a/drivers/clk/qcom/videocc-sm8550.c b/drivers/clk/qcom/videocc-sm8550.c
+index d73f747d2474..4d36362db1d0 100644
+--- a/drivers/clk/qcom/videocc-sm8550.c
++++ b/drivers/clk/qcom/videocc-sm8550.c
+@@ -433,7 +433,7 @@ static int video_cc_sm8550_probe(struct platform_device *pdev)
+       qcom_branch_set_clk_en(regmap, 0x8140); /* VIDEO_CC_SLEEP_CLK */
+       qcom_branch_set_clk_en(regmap, 0x8124); /* VIDEO_CC_XO_CLK */
+-      ret = qcom_cc_really_probe(pdev, &video_cc_sm8550_desc, regmap);
++      ret = qcom_cc_really_probe(&pdev->dev, &video_cc_sm8550_desc, regmap);
+       pm_runtime_put(&pdev->dev);
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/007-v6.11-net-phy-introduce-core-support-for-phy-mode-10g-qxgm.patch b/target/linux/qualcommbe/patches-6.6/007-v6.11-net-phy-introduce-core-support-for-phy-mode-10g-qxgm.patch
new file mode 100644 (file)
index 0000000..8b0f93a
--- /dev/null
@@ -0,0 +1,141 @@
+From 777b8afb8179155353ec14b1d8153122410aba29 Mon Sep 17 00:00:00 2001
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+Date: Sat, 15 Jun 2024 20:00:27 +0800
+Subject: [PATCH] net: phy: introduce core support for phy-mode = "10g-qxgmii"
+
+10G-QXGMII is a MAC-to-PHY interface defined by the USXGMII multiport
+specification. It uses the same signaling as USXGMII, but it multiplexes
+4 ports over the link, resulting in a maximum speed of 2.5G per port.
+
+Some in-tree SoCs like the NXP LS1028A use "usxgmii" when they mean
+either the single-port USXGMII or the quad-port 10G-QXGMII variant, and
+they could get away just fine with that thus far. But there is a need to
+distinguish between the 2 as far as SerDes drivers are concerned.
+
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+ Documentation/networking/phy.rst | 6 ++++++
+ drivers/net/phy/phy-core.c       | 1 +
+ drivers/net/phy/phylink.c        | 9 ++++++++-
+ include/linux/phy.h              | 4 ++++
+ include/linux/phylink.h          | 1 +
+ 5 files changed, 20 insertions(+), 1 deletion(-)
+
+diff --git a/Documentation/networking/phy.rst b/Documentation/networking/phy.rst
+index 1283240d7620..f64641417c54 100644
+--- a/Documentation/networking/phy.rst
++++ b/Documentation/networking/phy.rst
+@@ -327,6 +327,12 @@ Some of the interface modes are described below:
+     This is the Penta SGMII mode, it is similar to QSGMII but it combines 5
+     SGMII lines into a single link compared to 4 on QSGMII.
++``PHY_INTERFACE_MODE_10G_QXGMII``
++    Represents the 10G-QXGMII PHY-MAC interface as defined by the Cisco USXGMII
++    Multiport Copper Interface document. It supports 4 ports over a 10.3125 GHz
++    SerDes lane, each port having speeds of 2.5G / 1G / 100M / 10M achieved
++    through symbol replication. The PCS expects the standard USXGMII code word.
++
+ Pause frames / flow control
+ ===========================
+diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
+index 15f349e5995a..a235ea2264a7 100644
+--- a/drivers/net/phy/phy-core.c
++++ b/drivers/net/phy/phy-core.c
+@@ -141,6 +141,7 @@ int phy_interface_num_ports(phy_interface_t interface)
+               return 1;
+       case PHY_INTERFACE_MODE_QSGMII:
+       case PHY_INTERFACE_MODE_QUSGMII:
++      case PHY_INTERFACE_MODE_10G_QXGMII:
+               return 4;
+       case PHY_INTERFACE_MODE_PSGMII:
+               return 5;
+diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
+index 02427378acfd..6c24c48dcf0f 100644
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -231,6 +231,7 @@ static int phylink_interface_max_speed(phy_interface_t interface)
+               return SPEED_1000;
+       case PHY_INTERFACE_MODE_2500BASEX:
++      case PHY_INTERFACE_MODE_10G_QXGMII:
+               return SPEED_2500;
+       case PHY_INTERFACE_MODE_5GBASER:
+@@ -500,7 +501,11 @@ static unsigned long phylink_get_capabilities(phy_interface_t interface,
+       switch (interface) {
+       case PHY_INTERFACE_MODE_USXGMII:
+-              caps |= MAC_10000FD | MAC_5000FD | MAC_2500FD;
++              caps |= MAC_10000FD | MAC_5000FD;
++              fallthrough;
++
++      case PHY_INTERFACE_MODE_10G_QXGMII:
++              caps |= MAC_2500FD;
+               fallthrough;
+       case PHY_INTERFACE_MODE_RGMII_TXID:
+@@ -926,6 +931,7 @@ static int phylink_parse_mode(struct phylink *pl,
+               case PHY_INTERFACE_MODE_5GBASER:
+               case PHY_INTERFACE_MODE_25GBASER:
+               case PHY_INTERFACE_MODE_USXGMII:
++              case PHY_INTERFACE_MODE_10G_QXGMII:
+               case PHY_INTERFACE_MODE_10GKR:
+               case PHY_INTERFACE_MODE_10GBASER:
+               case PHY_INTERFACE_MODE_XLGMII:
+diff --git a/include/linux/phy.h b/include/linux/phy.h
+index e6e83304558e..205fccfc0f60 100644
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -128,6 +128,7 @@ extern const int phy_10gbit_features_array[1];
+  * @PHY_INTERFACE_MODE_10GKR: 10GBASE-KR - with Clause 73 AN
+  * @PHY_INTERFACE_MODE_QUSGMII: Quad Universal SGMII
+  * @PHY_INTERFACE_MODE_1000BASEKX: 1000Base-KX - with Clause 73 AN
++ * @PHY_INTERFACE_MODE_10G_QXGMII: 10G-QXGMII - 4 ports over 10G USXGMII
+  * @PHY_INTERFACE_MODE_MAX: Book keeping
+  *
+  * Describes the interface between the MAC and PHY.
+@@ -168,6 +169,7 @@ typedef enum {
+       PHY_INTERFACE_MODE_10GKR,
+       PHY_INTERFACE_MODE_QUSGMII,
+       PHY_INTERFACE_MODE_1000BASEKX,
++      PHY_INTERFACE_MODE_10G_QXGMII,
+       PHY_INTERFACE_MODE_MAX,
+ } phy_interface_t;
+@@ -289,6 +291,8 @@ static inline const char *phy_modes(phy_interface_t interface)
+               return "100base-x";
+       case PHY_INTERFACE_MODE_QUSGMII:
+               return "qusgmii";
++      case PHY_INTERFACE_MODE_10G_QXGMII:
++              return "10g-qxgmii";
+       default:
+               return "unknown";
+       }
+diff --git a/include/linux/phylink.h b/include/linux/phylink.h
+index a30a692acc32..2381e07429a2 100644
+--- a/include/linux/phylink.h
++++ b/include/linux/phylink.h
+@@ -124,6 +130,7 @@ static unsigned int phylink_pcs_neg_mode(unsigned int mode,
+       case PHY_INTERFACE_MODE_QSGMII:
+       case PHY_INTERFACE_MODE_QUSGMII:
+       case PHY_INTERFACE_MODE_USXGMII:
++      case PHY_INTERFACE_MODE_10G_QXGMII:
+               /* These protocols are designed for use with a PHY which
+                * communicates its negotiation result back to the MAC via
+                * inband communication. Note: there exist PHYs that run
+@@ -654,6 +654,7 @@ static inline int phylink_get_link_timer_ns(phy_interface_t interface)
+       case PHY_INTERFACE_MODE_SGMII:
+       case PHY_INTERFACE_MODE_QSGMII:
+       case PHY_INTERFACE_MODE_USXGMII:
++      case PHY_INTERFACE_MODE_10G_QXGMII:
+               return 1600000;
+       case PHY_INTERFACE_MODE_1000BASEX:
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/008-v6.9-clk-Provide-managed-helper-to-get-and-enable-bulk-cl.patch b/target/linux/qualcommbe/patches-6.6/008-v6.9-clk-Provide-managed-helper-to-get-and-enable-bulk-cl.patch
new file mode 100644 (file)
index 0000000..ab61f0b
--- /dev/null
@@ -0,0 +1,114 @@
+From 265b07df758a998f60cf5b5aec6bd72ca676655e Mon Sep 17 00:00:00 2001
+From: Shradha Todi <shradha.t@samsung.com>
+Date: Tue, 20 Feb 2024 14:10:45 +0530
+Subject: [PATCH] clk: Provide managed helper to get and enable bulk clocks
+
+Provide a managed devm_clk_bulk* wrapper to get and enable all
+bulk clocks in order to simplify drivers that keeps all clocks
+enabled for the time of driver operation.
+
+Suggested-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Reviewed-by: Alim Akhtar <alim.akhtar@samsung.com>
+Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Signed-off-by: Shradha Todi <shradha.t@samsung.com>
+Link: https://lore.kernel.org/r/20240220084046.23786-2-shradha.t@samsung.com
+Signed-off-by: Stephen Boyd <sboyd@kernel.org>
+---
+ drivers/clk/clk-devres.c | 40 ++++++++++++++++++++++++++++++++++++++++
+ include/linux/clk.h      | 22 ++++++++++++++++++++++
+ 2 files changed, 62 insertions(+)
+
+diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c
+index 737aa70e2cb3..90e6078fb6e1 100644
+--- a/drivers/clk/clk-devres.c
++++ b/drivers/clk/clk-devres.c
+@@ -182,6 +182,46 @@ int __must_check devm_clk_bulk_get_all(struct device *dev,
+ }
+ EXPORT_SYMBOL_GPL(devm_clk_bulk_get_all);
++static void devm_clk_bulk_release_all_enable(struct device *dev, void *res)
++{
++      struct clk_bulk_devres *devres = res;
++
++      clk_bulk_disable_unprepare(devres->num_clks, devres->clks);
++      clk_bulk_put_all(devres->num_clks, devres->clks);
++}
++
++int __must_check devm_clk_bulk_get_all_enable(struct device *dev,
++                                            struct clk_bulk_data **clks)
++{
++      struct clk_bulk_devres *devres;
++      int ret;
++
++      devres = devres_alloc(devm_clk_bulk_release_all_enable,
++                            sizeof(*devres), GFP_KERNEL);
++      if (!devres)
++              return -ENOMEM;
++
++      ret = clk_bulk_get_all(dev, &devres->clks);
++      if (ret > 0) {
++              *clks = devres->clks;
++              devres->num_clks = ret;
++      } else {
++              devres_free(devres);
++              return ret;
++      }
++
++      ret = clk_bulk_prepare_enable(devres->num_clks, *clks);
++      if (!ret) {
++              devres_add(dev, devres);
++      } else {
++              clk_bulk_put_all(devres->num_clks, devres->clks);
++              devres_free(devres);
++      }
++
++      return ret;
++}
++EXPORT_SYMBOL_GPL(devm_clk_bulk_get_all_enable);
++
+ static int devm_clk_match(struct device *dev, void *res, void *data)
+ {
+       struct clk **c = res;
+diff --git a/include/linux/clk.h b/include/linux/clk.h
+index 06f1b292f8a0..0f44d3863de2 100644
+--- a/include/linux/clk.h
++++ b/include/linux/clk.h
+@@ -478,6 +478,22 @@ int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks,
+ int __must_check devm_clk_bulk_get_all(struct device *dev,
+                                      struct clk_bulk_data **clks);
++/**
++ * devm_clk_bulk_get_all_enable - Get and enable all clocks of the consumer (managed)
++ * @dev: device for clock "consumer"
++ * @clks: pointer to the clk_bulk_data table of consumer
++ *
++ * Returns success (0) or negative errno.
++ *
++ * This helper function allows drivers to get all clocks of the
++ * consumer and enables them in one operation with management.
++ * The clks will automatically be disabled and freed when the device
++ * is unbound.
++ */
++
++int __must_check devm_clk_bulk_get_all_enable(struct device *dev,
++                                            struct clk_bulk_data **clks);
++
+ /**
+  * devm_clk_get - lookup and obtain a managed reference to a clock producer.
+  * @dev: device for clock "consumer"
+@@ -968,6 +984,12 @@ static inline int __must_check devm_clk_bulk_get_all(struct device *dev,
+       return 0;
+ }
++static inline int __must_check devm_clk_bulk_get_all_enable(struct device *dev,
++                                              struct clk_bulk_data **clks)
++{
++      return 0;
++}
++
+ static inline struct clk *devm_get_clk_from_child(struct device *dev,
+                               struct device_node *np, const char *con_id)
+ {
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/009-v6.13-clk-Provide-devm_clk_bulk_get_all_enabled-helper.patch b/target/linux/qualcommbe/patches-6.6/009-v6.13-clk-Provide-devm_clk_bulk_get_all_enabled-helper.patch
new file mode 100644 (file)
index 0000000..8d9bd9c
--- /dev/null
@@ -0,0 +1,125 @@
+From 51e32e897539663957f7a0950f66b48f8896efee Mon Sep 17 00:00:00 2001
+From: Cristian Ciocaltea <cristian.ciocaltea@collabora.com>
+Date: Sat, 19 Oct 2024 14:16:00 +0300
+Subject: [PATCH] clk: Provide devm_clk_bulk_get_all_enabled() helper
+
+Commit 265b07df758a ("clk: Provide managed helper to get and enable bulk
+clocks") added devm_clk_bulk_get_all_enable() function, but missed to
+return the number of clocks stored in the clk_bulk_data table referenced
+by the clks argument.  Without knowing the number, it's not possible to
+iterate these clocks when needed, hence the argument is useless and
+could have been simply removed.
+
+Introduce devm_clk_bulk_get_all_enabled() variant, which is consistent
+with devm_clk_bulk_get_all() in terms of the returned value:
+
+ > 0 if one or more clocks have been stored
+ = 0 if there are no clocks
+ < 0 if an error occurred
+
+Moreover, the naming is consistent with devm_clk_get_enabled(), i.e. use
+the past form of 'enable'.
+
+To reduce code duplication and improve patch readability, make
+devm_clk_bulk_get_all_enable() use the new helper, as suggested by
+Stephen Boyd.
+
+Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Signed-off-by: Cristian Ciocaltea <cristian.ciocaltea@collabora.com>
+Link: https://lore.kernel.org/r/20241019-clk_bulk_ena_fix-v4-1-57f108f64e70@collabora.com
+Signed-off-by: Stephen Boyd <sboyd@kernel.org>
+---
+ drivers/clk/clk-devres.c |  9 +++++----
+ include/linux/clk.h      | 21 ++++++++++++++++-----
+ 2 files changed, 21 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c
+index 82ae1f26e634..5368d92d9b39 100644
+--- a/drivers/clk/clk-devres.c
++++ b/drivers/clk/clk-devres.c
+@@ -218,8 +218,8 @@ static void devm_clk_bulk_release_all_enable(struct device *dev, void *res)
+       clk_bulk_put_all(devres->num_clks, devres->clks);
+ }
+-int __must_check devm_clk_bulk_get_all_enable(struct device *dev,
+-                                            struct clk_bulk_data **clks)
++int __must_check devm_clk_bulk_get_all_enabled(struct device *dev,
++                                             struct clk_bulk_data **clks)
+ {
+       struct clk_bulk_devres *devres;
+       int ret;
+@@ -244,11 +244,12 @@ int __must_check devm_clk_bulk_get_all_enable(struct device *dev,
+       } else {
+               clk_bulk_put_all(devres->num_clks, devres->clks);
+               devres_free(devres);
++              return ret;
+       }
+-      return ret;
++      return devres->num_clks;
+ }
+-EXPORT_SYMBOL_GPL(devm_clk_bulk_get_all_enable);
++EXPORT_SYMBOL_GPL(devm_clk_bulk_get_all_enabled);
+ static int devm_clk_match(struct device *dev, void *res, void *data)
+ {
+diff --git a/include/linux/clk.h b/include/linux/clk.h
+index 851a0f2cf42c..1dcee6d701e4 100644
+--- a/include/linux/clk.h
++++ b/include/linux/clk.h
+@@ -496,11 +496,13 @@ int __must_check devm_clk_bulk_get_all(struct device *dev,
+                                      struct clk_bulk_data **clks);
+ /**
+- * devm_clk_bulk_get_all_enable - Get and enable all clocks of the consumer (managed)
++ * devm_clk_bulk_get_all_enabled - Get and enable all clocks of the consumer (managed)
+  * @dev: device for clock "consumer"
+  * @clks: pointer to the clk_bulk_data table of consumer
+  *
+- * Returns success (0) or negative errno.
++ * Returns a positive value for the number of clocks obtained while the
++ * clock references are stored in the clk_bulk_data table in @clks field.
++ * Returns 0 if there're none and a negative value if something failed.
+  *
+  * This helper function allows drivers to get all clocks of the
+  * consumer and enables them in one operation with management.
+@@ -508,8 +510,8 @@ int __must_check devm_clk_bulk_get_all(struct device *dev,
+  * is unbound.
+  */
+-int __must_check devm_clk_bulk_get_all_enable(struct device *dev,
+-                                            struct clk_bulk_data **clks);
++int __must_check devm_clk_bulk_get_all_enabled(struct device *dev,
++                                             struct clk_bulk_data **clks);
+ /**
+  * devm_clk_get - lookup and obtain a managed reference to a clock producer.
+@@ -1034,7 +1036,7 @@ static inline int __must_check devm_clk_bulk_get_all(struct device *dev,
+       return 0;
+ }
+-static inline int __must_check devm_clk_bulk_get_all_enable(struct device *dev,
++static inline int __must_check devm_clk_bulk_get_all_enabled(struct device *dev,
+                                               struct clk_bulk_data **clks)
+ {
+       return 0;
+@@ -1136,6 +1138,15 @@ static inline void clk_restore_context(void) {}
+ #endif
++/* Deprecated. Use devm_clk_bulk_get_all_enabled() */
++static inline int __must_check
++devm_clk_bulk_get_all_enable(struct device *dev, struct clk_bulk_data **clks)
++{
++      int ret = devm_clk_bulk_get_all_enabled(dev, clks);
++
++      return ret > 0 ? 0 : ret;
++}
++
+ /* clk_prepare_enable helps cases using clk_enable in non-atomic context. */
+ static inline int clk_prepare_enable(struct clk *clk)
+ {
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/010-01-v6.11-dt-bindings-clock-Add-PCIe-pipe-related-clocks-for-I.patch b/target/linux/qualcommbe/patches-6.6/010-01-v6.11-dt-bindings-clock-Add-PCIe-pipe-related-clocks-for-I.patch
new file mode 100644 (file)
index 0000000..3580795
--- /dev/null
@@ -0,0 +1,31 @@
+From 475beea0b9f631656b5cc39429a39696876af613 Mon Sep 17 00:00:00 2001
+From: Alexandru Gagniuc <mr.nuke.me@gmail.com>
+Date: Tue, 30 Apr 2024 23:07:43 -0500
+Subject: [PATCH] dt-bindings: clock: Add PCIe pipe related clocks for IPQ9574
+
+Add defines for the missing PCIe PIPE clocks.
+
+Signed-off-by: Alexandru Gagniuc <mr.nuke.me@gmail.com>
+Acked-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Link: https://lore.kernel.org/r/20240501040800.1542805-2-mr.nuke.me@gmail.com
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+---
+ include/dt-bindings/clock/qcom,ipq9574-gcc.h | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/include/dt-bindings/clock/qcom,ipq9574-gcc.h b/include/dt-bindings/clock/qcom,ipq9574-gcc.h
+index 08fd3a37acaa..52123c5a09fa 100644
+--- a/include/dt-bindings/clock/qcom,ipq9574-gcc.h
++++ b/include/dt-bindings/clock/qcom,ipq9574-gcc.h
+@@ -216,4 +216,8 @@
+ #define GCC_CRYPTO_AHB_CLK                            207
+ #define GCC_USB0_PIPE_CLK                             208
+ #define GCC_USB0_SLEEP_CLK                            209
++#define GCC_PCIE0_PIPE_CLK                            210
++#define GCC_PCIE1_PIPE_CLK                            211
++#define GCC_PCIE2_PIPE_CLK                            212
++#define GCC_PCIE3_PIPE_CLK                            213
+ #endif
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/010-02-v6.11-clk-qcom-gcc-ipq9574-Add-PCIe-pipe-clocks.patch b/target/linux/qualcommbe/patches-6.6/010-02-v6.11-clk-qcom-gcc-ipq9574-Add-PCIe-pipe-clocks.patch
new file mode 100644 (file)
index 0000000..05a634a
--- /dev/null
@@ -0,0 +1,134 @@
+From a8fe85d40ffe5ec0fd2f557932ffee902be35b38 Mon Sep 17 00:00:00 2001
+From: Alexandru Gagniuc <mr.nuke.me@gmail.com>
+Date: Tue, 30 Apr 2024 23:07:44 -0500
+Subject: [PATCH] clk: qcom: gcc-ipq9574: Add PCIe pipe clocks
+
+The IPQ9574 has four PCIe "pipe" clocks. These clocks are required by
+PCIe PHYs. Port the pipe clocks from the downstream 5.4 kernel.
+
+Signed-off-by: Alexandru Gagniuc <mr.nuke.me@gmail.com>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Link: https://lore.kernel.org/r/20240501040800.1542805-3-mr.nuke.me@gmail.com
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+---
+ drivers/clk/qcom/gcc-ipq9574.c | 76 ++++++++++++++++++++++++++++++++++
+ 1 file changed, 76 insertions(+)
+
+diff --git a/drivers/clk/qcom/gcc-ipq9574.c b/drivers/clk/qcom/gcc-ipq9574.c
+index 0a3f846695b8..bc3e17f34295 100644
+--- a/drivers/clk/qcom/gcc-ipq9574.c
++++ b/drivers/clk/qcom/gcc-ipq9574.c
+@@ -1569,6 +1569,24 @@ static struct clk_regmap_phy_mux pcie0_pipe_clk_src = {
+       },
+ };
++static struct clk_branch gcc_pcie0_pipe_clk = {
++      .halt_reg = 0x28044,
++      .halt_check = BRANCH_HALT_DELAY,
++      .clkr = {
++              .enable_reg = 0x28044,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "gcc_pcie0_pipe_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &pcie0_pipe_clk_src.clkr.hw
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
+ static struct clk_regmap_phy_mux pcie1_pipe_clk_src = {
+       .reg = 0x29064,
+       .clkr = {
+@@ -1583,6 +1601,24 @@ static struct clk_regmap_phy_mux pcie1_pipe_clk_src = {
+       },
+ };
++static struct clk_branch gcc_pcie1_pipe_clk = {
++      .halt_reg = 0x29044,
++      .halt_check = BRANCH_HALT_DELAY,
++      .clkr = {
++              .enable_reg = 0x29044,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "gcc_pcie1_pipe_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &pcie1_pipe_clk_src.clkr.hw
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
+ static struct clk_regmap_phy_mux pcie2_pipe_clk_src = {
+       .reg = 0x2a064,
+       .clkr = {
+@@ -1597,6 +1633,24 @@ static struct clk_regmap_phy_mux pcie2_pipe_clk_src = {
+       },
+ };
++static struct clk_branch gcc_pcie2_pipe_clk = {
++      .halt_reg = 0x2a044,
++      .halt_check = BRANCH_HALT_DELAY,
++      .clkr = {
++              .enable_reg = 0x2a044,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "gcc_pcie2_pipe_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &pcie2_pipe_clk_src.clkr.hw
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
+ static struct clk_regmap_phy_mux pcie3_pipe_clk_src = {
+       .reg = 0x2b064,
+       .clkr = {
+@@ -1611,6 +1665,24 @@ static struct clk_regmap_phy_mux pcie3_pipe_clk_src = {
+       },
+ };
++static struct clk_branch gcc_pcie3_pipe_clk = {
++      .halt_reg = 0x2b044,
++      .halt_check = BRANCH_HALT_DELAY,
++      .clkr = {
++              .enable_reg = 0x2b044,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "gcc_pcie3_pipe_clk",
++                      .parent_hws = (const struct clk_hw *[]) {
++                              &pcie3_pipe_clk_src.clkr.hw
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
+ static const struct freq_tbl ftbl_pcie_rchng_clk_src[] = {
+       F(24000000, P_XO, 1, 0, 0),
+       F(100000000, P_GPLL0, 8, 0, 0),
+@@ -4141,6 +4213,10 @@ static struct clk_regmap *gcc_ipq9574_clks[] = {
+       [GCC_SNOC_PCIE1_1LANE_S_CLK] = &gcc_snoc_pcie1_1lane_s_clk.clkr,
+       [GCC_SNOC_PCIE2_2LANE_S_CLK] = &gcc_snoc_pcie2_2lane_s_clk.clkr,
+       [GCC_SNOC_PCIE3_2LANE_S_CLK] = &gcc_snoc_pcie3_2lane_s_clk.clkr,
++      [GCC_PCIE0_PIPE_CLK] = &gcc_pcie0_pipe_clk.clkr,
++      [GCC_PCIE1_PIPE_CLK] = &gcc_pcie1_pipe_clk.clkr,
++      [GCC_PCIE2_PIPE_CLK] = &gcc_pcie2_pipe_clk.clkr,
++      [GCC_PCIE3_PIPE_CLK] = &gcc_pcie3_pipe_clk.clkr,
+ };
+ static const struct qcom_reset_map gcc_ipq9574_resets[] = {
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/011-v6.11-arm64-dts-qcom-ipq9574-drop-power-domain-cells-prope.patch b/target/linux/qualcommbe/patches-6.6/011-v6.11-arm64-dts-qcom-ipq9574-drop-power-domain-cells-prope.patch
new file mode 100644 (file)
index 0000000..ed8e3e9
--- /dev/null
@@ -0,0 +1,32 @@
+From ef3308cf52553522d619a858a72a68f82432865b Mon Sep 17 00:00:00 2001
+From: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Date: Wed, 29 May 2024 17:47:10 +0300
+Subject: [PATCH] arm64: dts: qcom: ipq9574: drop #power-domain-cells property
+ of GCC
+
+On IPQ9574 the Global Clock Controller (GCC) doesn't provide power
+domains. Drop the #power-domain-cells property from the controller
+device node.
+
+Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Link: https://lore.kernel.org/r/20240529-qcom-gdscs-v2-12-69c63d0ae1e7@linaro.org
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+---
+ arch/arm64/boot/dts/qcom/ipq9574.dtsi | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/arch/arm64/boot/dts/qcom/ipq9574.dtsi b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+index ded02bc39275..d21937b09b4b 100644
+--- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+@@ -315,7 +315,6 @@ gcc: clock-controller@1800000 {
+                                <0>;
+                       #clock-cells = <1>;
+                       #reset-cells = <1>;
+-                      #power-domain-cells = <1>;
+               };
+               tcsr_mutex: hwlock@1905000 {
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/012-01-v6.11-interconnect-icc-clk-Specify-master-slave-ids.patch b/target/linux/qualcommbe/patches-6.6/012-01-v6.11-interconnect-icc-clk-Specify-master-slave-ids.patch
new file mode 100644 (file)
index 0000000..eae3398
--- /dev/null
@@ -0,0 +1,85 @@
+From f45b94ffc5f1204b35b5c695ed265b1385951616 Mon Sep 17 00:00:00 2001
+From: Varadarajan Narayanan <quic_varada@quicinc.com>
+Date: Tue, 30 Apr 2024 12:12:09 +0530
+Subject: [PATCH] interconnect: icc-clk: Specify master/slave ids
+
+Presently, icc-clk driver autogenerates the master and slave ids.
+However, devices with multiple nodes on the interconnect could
+have other constraints and may not match with the auto generated
+node ids.
+
+Hence, modify the driver to use the master/slave ids provided by
+the caller instead of auto generating.
+
+Also, update clk-cbf-8996 accordingly.
+
+Acked-by: Georgi Djakov <djakov@kernel.org>
+Signed-off-by: Varadarajan Narayanan <quic_varada@quicinc.com>
+Link: https://lore.kernel.org/r/20240430064214.2030013-2-quic_varada@quicinc.com
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+---
+ drivers/clk/qcom/clk-cbf-8996.c  | 7 ++++++-
+ drivers/interconnect/icc-clk.c   | 6 +++---
+ include/linux/interconnect-clk.h | 2 ++
+ 3 files changed, 11 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/clk/qcom/clk-cbf-8996.c b/drivers/clk/qcom/clk-cbf-8996.c
+index 76bf523431b8..f5fd1ff9c6c9 100644
+--- a/drivers/clk/qcom/clk-cbf-8996.c
++++ b/drivers/clk/qcom/clk-cbf-8996.c
+@@ -226,7 +226,12 @@ static int qcom_msm8996_cbf_icc_register(struct platform_device *pdev, struct cl
+       struct device *dev = &pdev->dev;
+       struct clk *clk = devm_clk_hw_get_clk(dev, cbf_hw, "cbf");
+       const struct icc_clk_data data[] = {
+-              { .clk = clk, .name = "cbf", },
++              {
++                      .clk = clk,
++                      .name = "cbf",
++                      .master_id = MASTER_CBF_M4M,
++                      .slave_id = SLAVE_CBF_M4M,
++              },
+       };
+       struct icc_provider *provider;
+diff --git a/drivers/interconnect/icc-clk.c b/drivers/interconnect/icc-clk.c
+index d787f2ea36d9..2be193fd7d8f 100644
+--- a/drivers/interconnect/icc-clk.c
++++ b/drivers/interconnect/icc-clk.c
+@@ -108,7 +108,7 @@ struct icc_provider *icc_clk_register(struct device *dev,
+       for (i = 0, j = 0; i < num_clocks; i++) {
+               qp->clocks[i].clk = data[i].clk;
+-              node = icc_node_create(first_id + j);
++              node = icc_node_create(first_id + data[i].master_id);
+               if (IS_ERR(node)) {
+                       ret = PTR_ERR(node);
+                       goto err;
+@@ -118,10 +118,10 @@ struct icc_provider *icc_clk_register(struct device *dev,
+               node->data = &qp->clocks[i];
+               icc_node_add(node, provider);
+               /* link to the next node, slave */
+-              icc_link_create(node, first_id + j + 1);
++              icc_link_create(node, first_id + data[i].slave_id);
+               onecell->nodes[j++] = node;
+-              node = icc_node_create(first_id + j);
++              node = icc_node_create(first_id + data[i].slave_id);
+               if (IS_ERR(node)) {
+                       ret = PTR_ERR(node);
+                       goto err;
+diff --git a/include/linux/interconnect-clk.h b/include/linux/interconnect-clk.h
+index 0cd80112bea5..170898faaacb 100644
+--- a/include/linux/interconnect-clk.h
++++ b/include/linux/interconnect-clk.h
+@@ -11,6 +11,8 @@ struct device;
+ struct icc_clk_data {
+       struct clk *clk;
+       const char *name;
++      unsigned int master_id;
++      unsigned int slave_id;
+ };
+ struct icc_provider *icc_clk_register(struct device *dev,
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/012-02-v6.11-dt-bindings-interconnect-Add-Qualcomm-IPQ9574-suppor.patch b/target/linux/qualcommbe/patches-6.6/012-02-v6.11-dt-bindings-interconnect-Add-Qualcomm-IPQ9574-suppor.patch
new file mode 100644 (file)
index 0000000..f822003
--- /dev/null
@@ -0,0 +1,106 @@
+From d1f1570f3d6db5d35642092a671812e62bfba79d Mon Sep 17 00:00:00 2001
+From: Varadarajan Narayanan <quic_varada@quicinc.com>
+Date: Tue, 30 Apr 2024 12:12:10 +0530
+Subject: [PATCH] dt-bindings: interconnect: Add Qualcomm IPQ9574 support
+
+Add interconnect-cells to clock provider so that it can be
+used as icc provider.
+
+Add master/slave ids for Qualcomm IPQ9574 Network-On-Chip
+interfaces. This will be used by the gcc-ipq9574 driver
+that will for providing interconnect services using the
+icc-clk framework.
+
+Acked-by: Georgi Djakov <djakov@kernel.org>
+Reviewed-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Signed-off-by: Varadarajan Narayanan <quic_varada@quicinc.com>
+Link: https://lore.kernel.org/r/20240430064214.2030013-3-quic_varada@quicinc.com
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+---
+ .../bindings/clock/qcom,ipq9574-gcc.yaml      |  3 +
+ .../dt-bindings/interconnect/qcom,ipq9574.h   | 59 +++++++++++++++++++
+ 2 files changed, 62 insertions(+)
+ create mode 100644 include/dt-bindings/interconnect/qcom,ipq9574.h
+
+diff --git a/Documentation/devicetree/bindings/clock/qcom,ipq9574-gcc.yaml b/Documentation/devicetree/bindings/clock/qcom,ipq9574-gcc.yaml
+index 944a0ea79cd6..824781cbdf34 100644
+--- a/Documentation/devicetree/bindings/clock/qcom,ipq9574-gcc.yaml
++++ b/Documentation/devicetree/bindings/clock/qcom,ipq9574-gcc.yaml
+@@ -33,6 +33,9 @@ properties:
+       - description: PCIE30 PHY3 pipe clock source
+       - description: USB3 PHY pipe clock source
++  '#interconnect-cells':
++    const: 1
++
+ required:
+   - compatible
+   - clocks
+diff --git a/include/dt-bindings/interconnect/qcom,ipq9574.h b/include/dt-bindings/interconnect/qcom,ipq9574.h
+new file mode 100644
+index 000000000000..42019335c7dd
+--- /dev/null
++++ b/include/dt-bindings/interconnect/qcom,ipq9574.h
+@@ -0,0 +1,59 @@
++/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
++#ifndef INTERCONNECT_QCOM_IPQ9574_H
++#define INTERCONNECT_QCOM_IPQ9574_H
++
++#define MASTER_ANOC_PCIE0             0
++#define SLAVE_ANOC_PCIE0              1
++#define MASTER_SNOC_PCIE0             2
++#define SLAVE_SNOC_PCIE0              3
++#define MASTER_ANOC_PCIE1             4
++#define SLAVE_ANOC_PCIE1              5
++#define MASTER_SNOC_PCIE1             6
++#define SLAVE_SNOC_PCIE1              7
++#define MASTER_ANOC_PCIE2             8
++#define SLAVE_ANOC_PCIE2              9
++#define MASTER_SNOC_PCIE2             10
++#define SLAVE_SNOC_PCIE2              11
++#define MASTER_ANOC_PCIE3             12
++#define SLAVE_ANOC_PCIE3              13
++#define MASTER_SNOC_PCIE3             14
++#define SLAVE_SNOC_PCIE3              15
++#define MASTER_USB                    16
++#define SLAVE_USB                     17
++#define MASTER_USB_AXI                        18
++#define SLAVE_USB_AXI                 19
++#define MASTER_NSSNOC_NSSCC           20
++#define SLAVE_NSSNOC_NSSCC            21
++#define MASTER_NSSNOC_SNOC_0          22
++#define SLAVE_NSSNOC_SNOC_0           23
++#define MASTER_NSSNOC_SNOC_1          24
++#define SLAVE_NSSNOC_SNOC_1           25
++#define MASTER_NSSNOC_PCNOC_1         26
++#define SLAVE_NSSNOC_PCNOC_1          27
++#define MASTER_NSSNOC_QOSGEN_REF      28
++#define SLAVE_NSSNOC_QOSGEN_REF               29
++#define MASTER_NSSNOC_TIMEOUT_REF     30
++#define SLAVE_NSSNOC_TIMEOUT_REF      31
++#define MASTER_NSSNOC_XO_DCD          32
++#define SLAVE_NSSNOC_XO_DCD           33
++#define MASTER_NSSNOC_ATB             34
++#define SLAVE_NSSNOC_ATB              35
++#define MASTER_MEM_NOC_NSSNOC         36
++#define SLAVE_MEM_NOC_NSSNOC          37
++#define MASTER_NSSNOC_MEMNOC          38
++#define SLAVE_NSSNOC_MEMNOC           39
++#define MASTER_NSSNOC_MEM_NOC_1               40
++#define SLAVE_NSSNOC_MEM_NOC_1                41
++
++#define MASTER_NSSNOC_PPE             0
++#define SLAVE_NSSNOC_PPE              1
++#define MASTER_NSSNOC_PPE_CFG         2
++#define SLAVE_NSSNOC_PPE_CFG          3
++#define MASTER_NSSNOC_NSS_CSR         4
++#define SLAVE_NSSNOC_NSS_CSR          5
++#define MASTER_NSSNOC_IMEM_QSB                6
++#define SLAVE_NSSNOC_IMEM_QSB         7
++#define MASTER_NSSNOC_IMEM_AHB                8
++#define SLAVE_NSSNOC_IMEM_AHB         9
++
++#endif /* INTERCONNECT_QCOM_IPQ9574_H */
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/012-03-v6.11-interconnect-icc-clk-Add-devm_icc_clk_register.patch b/target/linux/qualcommbe/patches-6.6/012-03-v6.11-interconnect-icc-clk-Add-devm_icc_clk_register.patch
new file mode 100644 (file)
index 0000000..ae86828
--- /dev/null
@@ -0,0 +1,63 @@
+From d3153113619216e87038a20bebf82582f9be10e7 Mon Sep 17 00:00:00 2001
+From: Varadarajan Narayanan <quic_varada@quicinc.com>
+Date: Tue, 30 Apr 2024 12:12:11 +0530
+Subject: [PATCH] interconnect: icc-clk: Add devm_icc_clk_register
+
+Wrap icc_clk_register to create devm_icc_clk_register to be
+able to release the resources properly.
+
+Acked-by: Georgi Djakov <djakov@kernel.org>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Signed-off-by: Varadarajan Narayanan <quic_varada@quicinc.com>
+Link: https://lore.kernel.org/r/20240430064214.2030013-4-quic_varada@quicinc.com
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+---
+ drivers/interconnect/icc-clk.c   | 18 ++++++++++++++++++
+ include/linux/interconnect-clk.h |  2 ++
+ 2 files changed, 20 insertions(+)
+
+diff --git a/drivers/interconnect/icc-clk.c b/drivers/interconnect/icc-clk.c
+index 2be193fd7d8f..f788db15cd76 100644
+--- a/drivers/interconnect/icc-clk.c
++++ b/drivers/interconnect/icc-clk.c
+@@ -148,6 +148,24 @@ struct icc_provider *icc_clk_register(struct device *dev,
+ }
+ EXPORT_SYMBOL_GPL(icc_clk_register);
++static void devm_icc_release(void *res)
++{
++      icc_clk_unregister(res);
++}
++
++int devm_icc_clk_register(struct device *dev, unsigned int first_id,
++                        unsigned int num_clocks, const struct icc_clk_data *data)
++{
++      struct icc_provider *prov;
++
++      prov = icc_clk_register(dev, first_id, num_clocks, data);
++      if (IS_ERR(prov))
++              return PTR_ERR(prov);
++
++      return devm_add_action_or_reset(dev, devm_icc_release, prov);
++}
++EXPORT_SYMBOL_GPL(devm_icc_clk_register);
++
+ /**
+  * icc_clk_unregister() - unregister a previously registered clk interconnect provider
+  * @provider: provider returned by icc_clk_register()
+diff --git a/include/linux/interconnect-clk.h b/include/linux/interconnect-clk.h
+index 170898faaacb..9bcee3e9c56c 100644
+--- a/include/linux/interconnect-clk.h
++++ b/include/linux/interconnect-clk.h
+@@ -19,6 +19,8 @@ struct icc_provider *icc_clk_register(struct device *dev,
+                                     unsigned int first_id,
+                                     unsigned int num_clocks,
+                                     const struct icc_clk_data *data);
++int devm_icc_clk_register(struct device *dev, unsigned int first_id,
++                        unsigned int num_clocks, const struct icc_clk_data *data);
+ void icc_clk_unregister(struct icc_provider *provider);
+ #endif
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/012-04-v6.11-clk-qcom-common-Add-interconnect-clocks-support.patch b/target/linux/qualcommbe/patches-6.6/012-04-v6.11-clk-qcom-common-Add-interconnect-clocks-support.patch
new file mode 100644 (file)
index 0000000..e0af1be
--- /dev/null
@@ -0,0 +1,115 @@
+From 8737ec830ee32162858af7c1504169b05b313ab1 Mon Sep 17 00:00:00 2001
+From: Varadarajan Narayanan <quic_varada@quicinc.com>
+Date: Tue, 30 Apr 2024 12:12:12 +0530
+Subject: [PATCH] clk: qcom: common: Add interconnect clocks support
+
+Unlike MSM platforms that manage NoC related clocks and scaling
+from RPM, IPQ SoCs dont involve RPM in managing NoC related
+clocks and there is no NoC scaling.
+
+However, there is a requirement to enable some NoC interface
+clocks for accessing the peripheral controllers present on
+these NoCs. Though exposing these as normal clocks would work,
+having a minimalistic interconnect driver to handle these clocks
+would make it consistent with other Qualcomm platforms resulting
+in common code paths. This is similar to msm8996-cbf's usage of
+icc-clk framework.
+
+Signed-off-by: Varadarajan Narayanan <quic_varada@quicinc.com>
+Link: https://lore.kernel.org/r/20240430064214.2030013-5-quic_varada@quicinc.com
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+---
+ drivers/clk/qcom/common.c | 35 ++++++++++++++++++++++++++++++++++-
+ drivers/clk/qcom/common.h |  9 +++++++++
+ 2 files changed, 43 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
+index c92e10c60322..ea3788ba46f7 100644
+--- a/drivers/clk/qcom/common.c
++++ b/drivers/clk/qcom/common.c
+@@ -8,6 +8,7 @@
+ #include <linux/regmap.h>
+ #include <linux/platform_device.h>
+ #include <linux/clk-provider.h>
++#include <linux/interconnect-clk.h>
+ #include <linux/reset-controller.h>
+ #include <linux/of.h>
+@@ -252,6 +253,38 @@ static struct clk_hw *qcom_cc_clk_hw_get(struct of_phandle_args *clkspec,
+       return cc->rclks[idx] ? &cc->rclks[idx]->hw : NULL;
+ }
++static int qcom_cc_icc_register(struct device *dev,
++                              const struct qcom_cc_desc *desc)
++{
++      struct icc_clk_data *icd;
++      struct clk_hw *hws;
++      int i;
++
++      if (!IS_ENABLED(CONFIG_INTERCONNECT_CLK))
++              return 0;
++
++      if (!desc->icc_hws)
++              return 0;
++
++      icd = devm_kcalloc(dev, desc->num_icc_hws, sizeof(*icd), GFP_KERNEL);
++      if (!icd)
++              return -ENOMEM;
++
++      for (i = 0; i < desc->num_icc_hws; i++) {
++              icd[i].master_id = desc->icc_hws[i].master_id;
++              icd[i].slave_id = desc->icc_hws[i].slave_id;
++              hws = &desc->clks[desc->icc_hws[i].clk_id]->hw;
++              icd[i].clk = devm_clk_hw_get_clk(dev, hws, "icc");
++              if (!icd[i].clk)
++                      return dev_err_probe(dev, -ENOENT,
++                                           "(%d) clock entry is null\n", i);
++              icd[i].name = clk_hw_get_name(hws);
++      }
++
++      return devm_icc_clk_register(dev, desc->icc_first_node_id,
++                                                   desc->num_icc_hws, icd);
++}
++
+ int qcom_cc_really_probe(struct device *dev,
+                        const struct qcom_cc_desc *desc, struct regmap *regmap)
+ {
+@@ -320,7 +353,7 @@ int qcom_cc_really_probe(struct device *dev,
+       if (ret)
+               return ret;
+-      return 0;
++      return qcom_cc_icc_register(dev, desc);
+ }
+ EXPORT_SYMBOL_GPL(qcom_cc_really_probe);
+diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
+index d048bdeeba10..7e57f8fe8ea6 100644
+--- a/drivers/clk/qcom/common.h
++++ b/drivers/clk/qcom/common.h
+@@ -19,6 +19,12 @@ struct clk_hw;
+ #define PLL_VOTE_FSM_ENA      BIT(20)
+ #define PLL_VOTE_FSM_RESET    BIT(21)
++struct qcom_icc_hws_data {
++      int master_id;
++      int slave_id;
++      int clk_id;
++};
++
+ struct qcom_cc_desc {
+       const struct regmap_config *config;
+       struct clk_regmap **clks;
+@@ -29,6 +35,9 @@ struct qcom_cc_desc {
+       size_t num_gdscs;
+       struct clk_hw **clk_hws;
+       size_t num_clk_hws;
++      struct qcom_icc_hws_data *icc_hws;
++      size_t num_icc_hws;
++      unsigned int icc_first_node_id;
+ };
+ /**
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/012-05-v6.11-clk-qcom-ipq9574-Use-icc-clk-for-enabling-NoC-relate.patch b/target/linux/qualcommbe/patches-6.6/012-05-v6.11-clk-qcom-ipq9574-Use-icc-clk-for-enabling-NoC-relate.patch
new file mode 100644 (file)
index 0000000..bbcecdc
--- /dev/null
@@ -0,0 +1,106 @@
+From 23711cabe122ef55bcb2e5c3e3835b5a2a688fc0 Mon Sep 17 00:00:00 2001
+From: Varadarajan Narayanan <quic_varada@quicinc.com>
+Date: Tue, 30 Apr 2024 12:12:13 +0530
+Subject: [PATCH] clk: qcom: ipq9574: Use icc-clk for enabling NoC related
+ clocks
+
+Use the icc-clk framework to enable few clocks to be able to
+create paths and use the peripherals connected on those NoCs.
+
+Signed-off-by: Varadarajan Narayanan <quic_varada@quicinc.com>
+Reviewed-by: Konrad Dybcio <konrad.dybcio@linaro.org>
+Link: https://lore.kernel.org/r/20240430064214.2030013-6-quic_varada@quicinc.com
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+---
+ drivers/clk/qcom/Kconfig       |  2 ++
+ drivers/clk/qcom/gcc-ipq9574.c | 33 +++++++++++++++++++++++++++++++++
+ 2 files changed, 35 insertions(+)
+
+diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
+index 1231eae51556..11ae28430dad 100644
+--- a/drivers/clk/qcom/Kconfig
++++ b/drivers/clk/qcom/Kconfig
+@@ -14,6 +14,8 @@ menuconfig COMMON_CLK_QCOM
+       select RATIONAL
+       select REGMAP_MMIO
+       select RESET_CONTROLLER
++      select INTERCONNECT
++      select INTERCONNECT_CLK
+ if COMMON_CLK_QCOM
+diff --git a/drivers/clk/qcom/gcc-ipq9574.c b/drivers/clk/qcom/gcc-ipq9574.c
+index bc3e17f34295..f08a447370bd 100644
+--- a/drivers/clk/qcom/gcc-ipq9574.c
++++ b/drivers/clk/qcom/gcc-ipq9574.c
+@@ -4,6 +4,8 @@
+  */
+ #include <linux/clk-provider.h>
++#include <linux/interconnect-clk.h>
++#include <linux/interconnect-provider.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+@@ -12,6 +14,7 @@
+ #include <dt-bindings/clock/qcom,ipq9574-gcc.h>
+ #include <dt-bindings/reset/qcom,ipq9574-gcc.h>
++#include <dt-bindings/interconnect/qcom,ipq9574.h>
+ #include "clk-alpha-pll.h"
+ #include "clk-branch.h"
+@@ -4377,6 +4380,32 @@ static const struct qcom_reset_map gcc_ipq9574_resets[] = {
+       [GCC_WCSS_Q6_TBU_BCR] = { 0x12054, 0 },
+ };
++#define IPQ_APPS_ID                   9574    /* some unique value */
++
++static struct qcom_icc_hws_data icc_ipq9574_hws[] = {
++      { MASTER_ANOC_PCIE0, SLAVE_ANOC_PCIE0, GCC_ANOC_PCIE0_1LANE_M_CLK },
++      { MASTER_SNOC_PCIE0, SLAVE_SNOC_PCIE0, GCC_SNOC_PCIE0_1LANE_S_CLK },
++      { MASTER_ANOC_PCIE1, SLAVE_ANOC_PCIE1, GCC_ANOC_PCIE1_1LANE_M_CLK },
++      { MASTER_SNOC_PCIE1, SLAVE_SNOC_PCIE1, GCC_SNOC_PCIE1_1LANE_S_CLK },
++      { MASTER_ANOC_PCIE2, SLAVE_ANOC_PCIE2, GCC_ANOC_PCIE2_2LANE_M_CLK },
++      { MASTER_SNOC_PCIE2, SLAVE_SNOC_PCIE2, GCC_SNOC_PCIE2_2LANE_S_CLK },
++      { MASTER_ANOC_PCIE3, SLAVE_ANOC_PCIE3, GCC_ANOC_PCIE3_2LANE_M_CLK },
++      { MASTER_SNOC_PCIE3, SLAVE_SNOC_PCIE3, GCC_SNOC_PCIE3_2LANE_S_CLK },
++      { MASTER_USB, SLAVE_USB, GCC_SNOC_USB_CLK },
++      { MASTER_USB_AXI, SLAVE_USB_AXI, GCC_ANOC_USB_AXI_CLK },
++      { MASTER_NSSNOC_NSSCC, SLAVE_NSSNOC_NSSCC, GCC_NSSNOC_NSSCC_CLK },
++      { MASTER_NSSNOC_SNOC_0, SLAVE_NSSNOC_SNOC_0, GCC_NSSNOC_SNOC_CLK },
++      { MASTER_NSSNOC_SNOC_1, SLAVE_NSSNOC_SNOC_1, GCC_NSSNOC_SNOC_1_CLK },
++      { MASTER_NSSNOC_PCNOC_1, SLAVE_NSSNOC_PCNOC_1, GCC_NSSNOC_PCNOC_1_CLK },
++      { MASTER_NSSNOC_QOSGEN_REF, SLAVE_NSSNOC_QOSGEN_REF, GCC_NSSNOC_QOSGEN_REF_CLK },
++      { MASTER_NSSNOC_TIMEOUT_REF, SLAVE_NSSNOC_TIMEOUT_REF, GCC_NSSNOC_TIMEOUT_REF_CLK },
++      { MASTER_NSSNOC_XO_DCD, SLAVE_NSSNOC_XO_DCD, GCC_NSSNOC_XO_DCD_CLK },
++      { MASTER_NSSNOC_ATB, SLAVE_NSSNOC_ATB, GCC_NSSNOC_ATB_CLK },
++      { MASTER_MEM_NOC_NSSNOC, SLAVE_MEM_NOC_NSSNOC, GCC_MEM_NOC_NSSNOC_CLK },
++      { MASTER_NSSNOC_MEMNOC, SLAVE_NSSNOC_MEMNOC, GCC_NSSNOC_MEMNOC_CLK },
++      { MASTER_NSSNOC_MEM_NOC_1, SLAVE_NSSNOC_MEM_NOC_1, GCC_NSSNOC_MEM_NOC_1_CLK },
++};
++
+ static const struct of_device_id gcc_ipq9574_match_table[] = {
+       { .compatible = "qcom,ipq9574-gcc" },
+       { }
+@@ -4399,6 +4428,9 @@ static const struct qcom_cc_desc gcc_ipq9574_desc = {
+       .num_resets = ARRAY_SIZE(gcc_ipq9574_resets),
+       .clk_hws = gcc_ipq9574_hws,
+       .num_clk_hws = ARRAY_SIZE(gcc_ipq9574_hws),
++      .icc_hws = icc_ipq9574_hws,
++      .num_icc_hws = ARRAY_SIZE(icc_ipq9574_hws),
++      .icc_first_node_id = IPQ_APPS_ID,
+ };
+ static int gcc_ipq9574_probe(struct platform_device *pdev)
+@@ -4411,6 +4443,7 @@ static struct platform_driver gcc_ipq9574_driver = {
+       .driver = {
+               .name   = "qcom,gcc-ipq9574",
+               .of_match_table = gcc_ipq9574_match_table,
++              .sync_state = icc_sync_state,
+       },
+ };
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/012-06-v6.11-arm64-dts-qcom-ipq9574-Add-icc-provider-ability-to-g.patch b/target/linux/qualcommbe/patches-6.6/012-06-v6.11-arm64-dts-qcom-ipq9574-Add-icc-provider-ability-to-g.patch
new file mode 100644 (file)
index 0000000..b5f9c0f
--- /dev/null
@@ -0,0 +1,46 @@
+From 5d0ab61a700214366dfcca5893b87655261e8c94 Mon Sep 17 00:00:00 2001
+From: Varadarajan Narayanan <quic_varada@quicinc.com>
+Date: Tue, 30 Apr 2024 12:12:14 +0530
+Subject: [PATCH] arm64: dts: qcom: ipq9574: Add icc provider ability to gcc
+
+IPQ SoCs dont involve RPM in managing NoC related clocks and
+there is no NoC scaling. Linux itself handles these clocks.
+However, these should not be exposed as just clocks and align
+with other Qualcomm SoCs that handle these clocks from a
+interconnect provider.
+
+Hence include icc provider capability to the gcc node so that
+peripherals can use the interconnect facility to enable these
+clocks.
+
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Signed-off-by: Varadarajan Narayanan <quic_varada@quicinc.com>
+Link: https://lore.kernel.org/r/20240430064214.2030013-7-quic_varada@quicinc.com
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+---
+ arch/arm64/boot/dts/qcom/ipq9574.dtsi | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/arch/arm64/boot/dts/qcom/ipq9574.dtsi b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+index 04ba09a9156c..48dfafea46a7 100644
+--- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+@@ -8,6 +8,7 @@
+ #include <dt-bindings/clock/qcom,apss-ipq.h>
+ #include <dt-bindings/clock/qcom,ipq9574-gcc.h>
++#include <dt-bindings/interconnect/qcom,ipq9574.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/reset/qcom,ipq9574-gcc.h>
+ #include <dt-bindings/thermal/thermal.h>
+@@ -315,6 +316,7 @@ gcc: clock-controller@1800000 {
+                                <0>;
+                       #clock-cells = <1>;
+                       #reset-cells = <1>;
++                      #interconnect-cells = <1>;
+               };
+               tcsr_mutex: hwlock@1905000 {
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/100-02-mtd-rawnand-qcom-cleanup-qcom_nandc-driver.patch b/target/linux/qualcommbe/patches-6.6/100-02-mtd-rawnand-qcom-cleanup-qcom_nandc-driver.patch
new file mode 100644 (file)
index 0000000..595ef4d
--- /dev/null
@@ -0,0 +1,1064 @@
+From 1142a905d3450a40ac03cbe1426b16cd9650c5f7 Mon Sep 17 00:00:00 2001
+From: Md Sadre Alam <quic_mdalam@quicinc.com>
+Date: Thu, 4 Apr 2024 16:19:43 +0530
+Subject: [PATCH v10 2/8] mtd: rawnand: qcom: cleanup qcom_nandc driver
+
+cleanup qcom_nandc driver as below
+
+- Remove register value indirection api
+
+- Remove set_reg() api
+
+- Convert read_loc_first & read_loc_last macro to function
+
+- Renamed multiple variables
+
+Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
+---
+
+Change in [v10]
+
+* No change
+
+Change in [v9]
+
+* Changed type of cmd1, vld to u32 from __le32 in qcom_nand_controller
+  structure
+* Changed type of cfg0, cfg1, cfg0_raw, cfg1_raw, clrflashstatus,
+  ecc_buf_cfg, ecc_bch_cfg, clrreadstatus to u32 in qcom_nand_host
+  structure
+* In nandc_set_read_loc_first() api added cpu_to_le32() macro to fix
+  compilation warning reported by kernel test bot
+* In nandc_set_read_loc_last() api added cpu_to_le32() macro to fix
+  compilation warning reported by kernel test bot
+* Changed data type of cw_offset, read_size, is_last_read_loc to
+  u32 in nandc_set_read_loc() api to fix compilation warning reported
+  by kernel test bot
+* In set_address() api added cpu_to_le32() macro to fix compilation
+  warning reported by kernel test bot
+* In update_rw_regs() api added cpu_to_le32() macro to fix compilation
+  warning reported by kernel test bot
+* In qcom_op_cmd_mapping() api added cpu_to_le32() macro to fix compilation
+  warning reported by kernel test bot
+* In qcom_read_status_exec() api added cpu_to_le32() macro to fix compilation
+  warning reported by kernel test bot
+* In qcom_read_id_type_exec() api added cpu_to_le32() macro to fix compilation
+  warning reported by kernel test bot
+* In qcom_misc_cmd_type_exec() api added cpu_to_le32() macro to fix compilation
+  warning reported by kernel test bot
+* In qcom_param_page_type_exec() api added cpu_to_le32() macro to fix
+  compilation warning reported by kernel test bot 
+
+Change in [v8]
+
+* Fixed compilation warning reported by kernel test robot
+* Added "chip" description in nandc_set_read_loc_first()
+* Added "chip" description in nandc_set_read_loc_last()
+* Changed data type of read_location0, read_location1, 
+  read_location2, read_location3, read_location_last0,
+  read_location_last1, read_location_last2, read_location_last3,
+  addr0, addr1, cmd, cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg,
+  clrflashstatus, clrreadstatus, orig_cmd1, orig_vld to 
+  __le32 to fix compilation warning reported by kernel test robot
+Change in [v7]
+
+* No change
+
+Change in [v6]
+
+* No change
+
+Change in [v5]
+
+* Cleand up raw nand driver.
+
+* Removed register value indirection
+
+* Removed set_reg() api.
+
+Change in [v4]
+
+* This patch was not included in [v4]
+
+Change in [v3]
+
+* This patch was not included in [v3]
+
+Change in [v2]
+
+* This patch was not included in [v2]
+
+Change in [v1]
+
+* This patch was not included in [v1]
+
+ drivers/mtd/nand/raw/qcom_nandc.c | 506 ++++++++++++++----------------
+ 1 file changed, 229 insertions(+), 277 deletions(-)
+
+--- a/drivers/mtd/nand/raw/qcom_nandc.c
++++ b/drivers/mtd/nand/raw/qcom_nandc.c
+@@ -189,17 +189,6 @@
+ #define       ECC_BCH_4BIT    BIT(2)
+ #define       ECC_BCH_8BIT    BIT(3)
+-#define nandc_set_read_loc_first(chip, reg, cw_offset, read_size, is_last_read_loc)   \
+-nandc_set_reg(chip, reg,                      \
+-            ((cw_offset) << READ_LOCATION_OFFSET) |           \
+-            ((read_size) << READ_LOCATION_SIZE) |                     \
+-            ((is_last_read_loc) << READ_LOCATION_LAST))
+-
+-#define nandc_set_read_loc_last(chip, reg, cw_offset, read_size, is_last_read_loc)    \
+-nandc_set_reg(chip, reg,                      \
+-            ((cw_offset) << READ_LOCATION_OFFSET) |           \
+-            ((read_size) << READ_LOCATION_SIZE) |                     \
+-            ((is_last_read_loc) << READ_LOCATION_LAST))
+ /*
+  * Returns the actual register address for all NAND_DEV_ registers
+  * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
+@@ -257,8 +246,6 @@ nandc_set_reg(chip, reg,                   \
+  * @tx_sgl_start - start index in data sgl for tx.
+  * @rx_sgl_pos - current index in data sgl for rx.
+  * @rx_sgl_start - start index in data sgl for rx.
+- * @wait_second_completion - wait for second DMA desc completion before making
+- *                         the NAND transfer completion.
+  */
+ struct bam_transaction {
+       struct bam_cmd_element *bam_ce;
+@@ -275,7 +262,6 @@ struct bam_transaction {
+       u32 tx_sgl_start;
+       u32 rx_sgl_pos;
+       u32 rx_sgl_start;
+-      bool wait_second_completion;
+ };
+ /*
+@@ -471,9 +457,9 @@ struct qcom_op {
+       unsigned int data_instr_idx;
+       unsigned int rdy_timeout_ms;
+       unsigned int rdy_delay_ns;
+-      u32 addr1_reg;
+-      u32 addr2_reg;
+-      u32 cmd_reg;
++      __le32 addr1_reg;
++      __le32 addr2_reg;
++      __le32 cmd_reg;
+       u8 flag;
+ };
+@@ -549,17 +535,17 @@ struct qcom_nand_host {
+  * among different NAND controllers.
+  * @ecc_modes - ecc mode for NAND
+  * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
+- * @is_bam - whether NAND controller is using BAM
+- * @is_qpic - whether NAND CTRL is part of qpic IP
+- * @qpic_v2 - flag to indicate QPIC IP version 2
++ * @supports_bam - whether NAND controller is using BAM
++ * @nandc_part_of_qpic - whether NAND controller is part of qpic IP
++ * @qpic_version2 - flag to indicate QPIC IP version 2
+  * @use_codeword_fixup - whether NAND has different layout for boot partitions
+  */
+ struct qcom_nandc_props {
+       u32 ecc_modes;
+       u32 dev_cmd_reg_start;
+-      bool is_bam;
+-      bool is_qpic;
+-      bool qpic_v2;
++      bool supports_bam;
++      bool nandc_part_of_qpic;
++      bool qpic_version2;
+       bool use_codeword_fixup;
+ };
+@@ -613,19 +599,18 @@ static void clear_bam_transaction(struct
+ {
+       struct bam_transaction *bam_txn = nandc->bam_txn;
+-      if (!nandc->props->is_bam)
++      if (!nandc->props->supports_bam)
+               return;
+       bam_txn->bam_ce_pos = 0;
+       bam_txn->bam_ce_start = 0;
+       bam_txn->cmd_sgl_pos = 0;
+       bam_txn->cmd_sgl_start = 0;
+       bam_txn->tx_sgl_pos = 0;
+       bam_txn->tx_sgl_start = 0;
+       bam_txn->rx_sgl_pos = 0;
+       bam_txn->rx_sgl_start = 0;
+       bam_txn->last_data_desc = NULL;
+-      bam_txn->wait_second_completion = false;
+       sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
+                     QPIC_PER_CW_CMD_SGL);
+@@ -640,17 +618,7 @@ static void qpic_bam_dma_done(void *data
+ {
+       struct bam_transaction *bam_txn = data;
+-      /*
+-       * In case of data transfer with NAND, 2 callbacks will be generated.
+-       * One for command channel and another one for data channel.
+-       * If current transaction has data descriptors
+-       * (i.e. wait_second_completion is true), then set this to false
+-       * and wait for second DMA descriptor completion.
+-       */
+-      if (bam_txn->wait_second_completion)
+-              bam_txn->wait_second_completion = false;
+-      else
+-              complete(&bam_txn->txn_done);
++      complete(&bam_txn->txn_done);
+ }
+ static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
+@@ -676,10 +644,9 @@ static inline void nandc_write(struct qc
+       iowrite32(val, nandc->base + offset);
+ }
+-static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
+-                                        bool is_cpu)
++static inline void nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
+ {
+-      if (!nandc->props->is_bam)
++      if (!nandc->props->supports_bam)
+               return;
+       if (is_cpu)
+@@ -694,93 +661,90 @@ static inline void nandc_read_buffer_syn
+                                          DMA_FROM_DEVICE);
+ }
+-static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
+-{
+-      switch (offset) {
+-      case NAND_FLASH_CMD:
+-              return &regs->cmd;
+-      case NAND_ADDR0:
+-              return &regs->addr0;
+-      case NAND_ADDR1:
+-              return &regs->addr1;
+-      case NAND_FLASH_CHIP_SELECT:
+-              return &regs->chip_sel;
+-      case NAND_EXEC_CMD:
+-              return &regs->exec;
+-      case NAND_FLASH_STATUS:
+-              return &regs->clrflashstatus;
+-      case NAND_DEV0_CFG0:
+-              return &regs->cfg0;
+-      case NAND_DEV0_CFG1:
+-              return &regs->cfg1;
+-      case NAND_DEV0_ECC_CFG:
+-              return &regs->ecc_bch_cfg;
+-      case NAND_READ_STATUS:
+-              return &regs->clrreadstatus;
+-      case NAND_DEV_CMD1:
+-              return &regs->cmd1;
+-      case NAND_DEV_CMD1_RESTORE:
+-              return &regs->orig_cmd1;
+-      case NAND_DEV_CMD_VLD:
+-              return &regs->vld;
+-      case NAND_DEV_CMD_VLD_RESTORE:
+-              return &regs->orig_vld;
+-      case NAND_EBI2_ECC_BUF_CFG:
+-              return &regs->ecc_buf_cfg;
+-      case NAND_READ_LOCATION_0:
+-              return &regs->read_location0;
+-      case NAND_READ_LOCATION_1:
+-              return &regs->read_location1;
+-      case NAND_READ_LOCATION_2:
+-              return &regs->read_location2;
+-      case NAND_READ_LOCATION_3:
+-              return &regs->read_location3;
+-      case NAND_READ_LOCATION_LAST_CW_0:
+-              return &regs->read_location_last0;
+-      case NAND_READ_LOCATION_LAST_CW_1:
+-              return &regs->read_location_last1;
+-      case NAND_READ_LOCATION_LAST_CW_2:
+-              return &regs->read_location_last2;
+-      case NAND_READ_LOCATION_LAST_CW_3:
+-              return &regs->read_location_last3;
+-      default:
+-              return NULL;
+-      }
+-}
+-
+-static void nandc_set_reg(struct nand_chip *chip, int offset,
+-                        u32 val)
+-{
+-      struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+-      struct nandc_regs *regs = nandc->regs;
+-      __le32 *reg;
+-
+-      reg = offset_to_nandc_reg(regs, offset);
+-
+-      if (reg)
+-              *reg = cpu_to_le32(val);
+-}
+-
+ /* Helper to check the code word, whether it is last cw or not */
+ static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw)
+ {
+       return cw == (ecc->steps - 1);
+ }
++/**
++ * nandc_set_read_loc_first() - to set read location first register
++ * @chip:             NAND Private Flash Chip Data
++ * @reg_base:         location register base
++ * @cw_offset:                code word offset
++ * @read_size:                code word read length
++ * @is_last_read_loc: is this the last read location
++ *
++ * This function will set location register value
++ */
++static void nandc_set_read_loc_first(struct nand_chip *chip,
++                                   int reg_base, u32 cw_offset,
++                                   u32 read_size, u32 is_last_read_loc)
++{
++      struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
++      __le32 locreg_val;
++      u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
++                ((read_size) << READ_LOCATION_SIZE) |
++                ((is_last_read_loc) << READ_LOCATION_LAST));
++
++      locreg_val = cpu_to_le32(val);
++
++      if (reg_base == NAND_READ_LOCATION_0)
++              nandc->regs->read_location0 = locreg_val;
++      else if (reg_base == NAND_READ_LOCATION_1)
++              nandc->regs->read_location1 = locreg_val;
++      else if (reg_base == NAND_READ_LOCATION_2)
++              nandc->regs->read_location2 = locreg_val;
++      else if (reg_base == NAND_READ_LOCATION_3)
++              nandc->regs->read_location3 = locreg_val;
++}
++
++/**
++ * nandc_set_read_loc_last - to set read location last register
++ * @chip:             NAND Private Flash Chip Data
++ * @reg_base:         location register base
++ * @cw_offset:                code word offset
++ * @read_size:                code word read length
++ * @is_last_read_loc: is this the last read location
++ *
++ * This function will set location last register value
++ */
++static void nandc_set_read_loc_last(struct nand_chip *chip,
++                                  int reg_base, u32 cw_offset,
++                                  u32 read_size, u32 is_last_read_loc)
++{
++      struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
++      __le32 locreg_val;
++      u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
++                ((read_size) << READ_LOCATION_SIZE) |
++                ((is_last_read_loc) << READ_LOCATION_LAST));
++
++      locreg_val = cpu_to_le32(val);
++
++      if (reg_base == NAND_READ_LOCATION_LAST_CW_0)
++              nandc->regs->read_location_last0 = locreg_val;
++      else if (reg_base == NAND_READ_LOCATION_LAST_CW_1)
++              nandc->regs->read_location_last1 = locreg_val;
++      else if (reg_base == NAND_READ_LOCATION_LAST_CW_2)
++              nandc->regs->read_location_last2 = locreg_val;
++      else if (reg_base == NAND_READ_LOCATION_LAST_CW_3)
++              nandc->regs->read_location_last3 = locreg_val;
++}
++
+ /* helper to configure location register values */
+ static void nandc_set_read_loc(struct nand_chip *chip, int cw, int reg,
+-                             int cw_offset, int read_size, int is_last_read_loc)
++                             u32 cw_offset, u32 read_size, u32 is_last_read_loc)
+ {
+       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+       struct nand_ecc_ctrl *ecc = &chip->ecc;
+       int reg_base = NAND_READ_LOCATION_0;
+-      if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
++      if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
+               reg_base = NAND_READ_LOCATION_LAST_CW_0;
+       reg_base += reg * 4;
+-      if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
++      if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
+               return nandc_set_read_loc_last(chip, reg_base, cw_offset,
+                               read_size, is_last_read_loc);
+       else
+@@ -792,12 +756,13 @@ static void nandc_set_read_loc(struct na
+ static void set_address(struct qcom_nand_host *host, u16 column, int page)
+ {
+       struct nand_chip *chip = &host->chip;
++      struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+       if (chip->options & NAND_BUSWIDTH_16)
+               column >>= 1;
+-      nandc_set_reg(chip, NAND_ADDR0, page << 16 | column);
+-      nandc_set_reg(chip, NAND_ADDR1, page >> 16 & 0xff);
++      nandc->regs->addr0 = cpu_to_le32(page << 16 | column);
++      nandc->regs->addr1 = cpu_to_le32(page >> 16 & 0xff);
+ }
+ /*
+@@ -811,41 +776,43 @@ static void set_address(struct qcom_nand
+ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, int cw)
+ {
+       struct nand_chip *chip = &host->chip;
+-      u32 cmd, cfg0, cfg1, ecc_bch_cfg;
++      __le32 cmd, cfg0, cfg1, ecc_bch_cfg;
+       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+       if (read) {
+               if (host->use_ecc)
+-                      cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
++                      cmd = cpu_to_le32(OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE);
+               else
+-                      cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
++                      cmd = cpu_to_le32(OP_PAGE_READ | PAGE_ACC | LAST_PAGE);
+       } else {
+-              cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
++              cmd = cpu_to_le32(OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE);
+       }
+       if (host->use_ecc) {
+-              cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
+-                              (num_cw - 1) << CW_PER_PAGE;
++              cfg0 = cpu_to_le32((host->cfg0 & ~(7U << CW_PER_PAGE)) |
++                              (num_cw - 1) << CW_PER_PAGE);
+-              cfg1 = host->cfg1;
+-              ecc_bch_cfg = host->ecc_bch_cfg;
++              cfg1 = cpu_to_le32(host->cfg1);
++              ecc_bch_cfg = cpu_to_le32(host->ecc_bch_cfg);
+       } else {
+-              cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
+-                              (num_cw - 1) << CW_PER_PAGE;
++              cfg0 = cpu_to_le32((host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
++                              (num_cw - 1) << CW_PER_PAGE);
+-              cfg1 = host->cfg1_raw;
+-              ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
++              cfg1 = cpu_to_le32(host->cfg1_raw);
++              ecc_bch_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE);
+       }
+-      nandc_set_reg(chip, NAND_FLASH_CMD, cmd);
+-      nandc_set_reg(chip, NAND_DEV0_CFG0, cfg0);
+-      nandc_set_reg(chip, NAND_DEV0_CFG1, cfg1);
+-      nandc_set_reg(chip, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
+-      if (!nandc->props->qpic_v2)
+-              nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
+-      nandc_set_reg(chip, NAND_FLASH_STATUS, host->clrflashstatus);
+-      nandc_set_reg(chip, NAND_READ_STATUS, host->clrreadstatus);
+-      nandc_set_reg(chip, NAND_EXEC_CMD, 1);
++      nandc->regs->cmd = cmd;
++      nandc->regs->cfg0 = cfg0;
++      nandc->regs->cfg1 = cfg1;
++      nandc->regs->ecc_bch_cfg = ecc_bch_cfg;
++
++      if (!nandc->props->qpic_version2)
++              nandc->regs->ecc_buf_cfg = cpu_to_le32(host->ecc_buf_cfg);
++
++      nandc->regs->clrflashstatus = cpu_to_le32(host->clrflashstatus);
++      nandc->regs->clrreadstatus = cpu_to_le32(host->clrreadstatus);
++      nandc->regs->exec = cpu_to_le32(1);
+       if (read)
+               nandc_set_read_loc(chip, cw, 0, 0, host->use_ecc ?
+@@ -1121,7 +1088,7 @@ static int read_reg_dma(struct qcom_nand
+       if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
+               first = dev_cmd_reg_addr(nandc, first);
+-      if (nandc->props->is_bam)
++      if (nandc->props->supports_bam)
+               return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
+                                            num_regs, flags);
+@@ -1136,25 +1103,16 @@ static int read_reg_dma(struct qcom_nand
+  * write_reg_dma:     prepares a descriptor to write a given number of
+  *                    contiguous registers
+  *
++ * @vaddr:            contnigeous memory from where register value will
++ *                    be written
+  * @first:            offset of the first register in the contiguous block
+  * @num_regs:         number of registers to write
+  * @flags:            flags to control DMA descriptor preparation
+  */
+-static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
+-                       int num_regs, unsigned int flags)
++static int write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
++                       int first, int num_regs, unsigned int flags)
+ {
+       bool flow_control = false;
+-      struct nandc_regs *regs = nandc->regs;
+-      void *vaddr;
+-
+-      vaddr = offset_to_nandc_reg(regs, first);
+-
+-      if (first == NAND_ERASED_CW_DETECT_CFG) {
+-              if (flags & NAND_ERASED_CW_SET)
+-                      vaddr = &regs->erased_cw_detect_cfg_set;
+-              else
+-                      vaddr = &regs->erased_cw_detect_cfg_clr;
+-      }
+       if (first == NAND_EXEC_CMD)
+               flags |= NAND_BAM_NWD;
+@@ -1165,7 +1123,7 @@ static int write_reg_dma(struct qcom_nan
+       if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
+               first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
+-      if (nandc->props->is_bam)
++      if (nandc->props->supports_bam)
+               return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
+                                            num_regs, flags);
+@@ -1188,7 +1146,7 @@ static int write_reg_dma(struct qcom_nan
+ static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+                        const u8 *vaddr, int size, unsigned int flags)
+ {
+-      if (nandc->props->is_bam)
++      if (nandc->props->supports_bam)
+               return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
+       return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
+@@ -1206,7 +1164,7 @@ static int read_data_dma(struct qcom_nan
+ static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+                         const u8 *vaddr, int size, unsigned int flags)
+ {
+-      if (nandc->props->is_bam)
++      if (nandc->props->supports_bam)
+               return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
+       return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
+@@ -1220,13 +1178,14 @@ static void config_nand_page_read(struct
+ {
+       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+-      write_reg_dma(nandc, NAND_ADDR0, 2, 0);
+-      write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
+-      if (!nandc->props->qpic_v2)
+-              write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
+-      write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
+-      write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
+-                    NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
++      write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
++      write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
++      if (!nandc->props->qpic_version2)
++              write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
++      write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr,
++                    NAND_ERASED_CW_DETECT_CFG, 1, 0);
++      write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set,
++                    NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
+ }
+ /*
+@@ -1239,16 +1198,16 @@ config_nand_cw_read(struct nand_chip *ch
+       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+       struct nand_ecc_ctrl *ecc = &chip->ecc;
+-      int reg = NAND_READ_LOCATION_0;
++      __le32 *reg = &nandc->regs->read_location0;
+-      if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
+-              reg = NAND_READ_LOCATION_LAST_CW_0;
++      if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
++              reg = &nandc->regs->read_location_last0;
+-      if (nandc->props->is_bam)
+-              write_reg_dma(nandc, reg, 4, NAND_BAM_NEXT_SGL);
++      if (nandc->props->supports_bam)
++              write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
+-      write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+-      write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
++      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+       if (use_ecc) {
+               read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
+@@ -1279,10 +1238,10 @@ static void config_nand_page_write(struc
+ {
+       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+-      write_reg_dma(nandc, NAND_ADDR0, 2, 0);
+-      write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
+-      if (!nandc->props->qpic_v2)
+-              write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
++      write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
++      write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
++      if (!nandc->props->qpic_version2)
++              write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1,
+                             NAND_BAM_NEXT_SGL);
+ }
+@@ -1294,13 +1253,13 @@ static void config_nand_cw_write(struct
+ {
+       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+-      write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+-      write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
++      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+       read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+-      write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
+-      write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
++      write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
++      write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
+ }
+ /* helpers to submit/free our list of dma descriptors */
+@@ -1311,7 +1270,7 @@ static int submit_descs(struct qcom_nand
+       struct bam_transaction *bam_txn = nandc->bam_txn;
+       int ret = 0;
+-      if (nandc->props->is_bam) {
++      if (nandc->props->supports_bam) {
+               if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
+                       ret = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
+                       if (ret)
+@@ -1336,14 +1295,9 @@ static int submit_descs(struct qcom_nand
+       list_for_each_entry(desc, &nandc->desc_list, node)
+               cookie = dmaengine_submit(desc->dma_desc);
+-      if (nandc->props->is_bam) {
++      if (nandc->props->supports_bam) {
+               bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
+               bam_txn->last_cmd_desc->callback_param = bam_txn;
+-              if (bam_txn->last_data_desc) {
+-                      bam_txn->last_data_desc->callback = qpic_bam_dma_done;
+-                      bam_txn->last_data_desc->callback_param = bam_txn;
+-                      bam_txn->wait_second_completion = true;
+-              }
+               dma_async_issue_pending(nandc->tx_chan);
+               dma_async_issue_pending(nandc->rx_chan);
+@@ -1365,7 +1319,7 @@ err_unmap_free_desc:
+       list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
+               list_del(&desc->node);
+-              if (nandc->props->is_bam)
++              if (nandc->props->supports_bam)
+                       dma_unmap_sg(nandc->dev, desc->bam_sgl,
+                                    desc->sgl_cnt, desc->dir);
+               else
+@@ -1382,7 +1336,7 @@ err_unmap_free_desc:
+ static void clear_read_regs(struct qcom_nand_controller *nandc)
+ {
+       nandc->reg_read_pos = 0;
+-      nandc_read_buffer_sync(nandc, false);
++      nandc_dev_to_mem(nandc, false);
+ }
+ /*
+@@ -1446,7 +1400,7 @@ static int check_flash_errors(struct qco
+       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+       int i;
+-      nandc_read_buffer_sync(nandc, true);
++      nandc_dev_to_mem(nandc, true);
+       for (i = 0; i < cw_cnt; i++) {
+               u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
+@@ -1476,7 +1430,7 @@ qcom_nandc_read_cw_raw(struct mtd_info *
+       clear_read_regs(nandc);
+       host->use_ecc = false;
+-      if (nandc->props->qpic_v2)
++      if (nandc->props->qpic_version2)
+               raw_cw = ecc->steps - 1;
+       clear_bam_transaction(nandc);
+@@ -1497,7 +1451,7 @@ qcom_nandc_read_cw_raw(struct mtd_info *
+               oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
+       }
+-      if (nandc->props->is_bam) {
++      if (nandc->props->supports_bam) {
+               nandc_set_read_loc(chip, cw, 0, read_loc, data_size1, 0);
+               read_loc += data_size1;
+@@ -1621,7 +1575,7 @@ static int parse_read_errors(struct qcom
+       u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
+       buf = (struct read_stats *)nandc->reg_read_buf;
+-      nandc_read_buffer_sync(nandc, true);
++      nandc_dev_to_mem(nandc, true);
+       for (i = 0; i < ecc->steps; i++, buf++) {
+               u32 flash, buffer, erased_cw;
+@@ -1734,7 +1688,7 @@ static int read_page_ecc(struct qcom_nan
+                       oob_size = host->ecc_bytes_hw + host->spare_bytes;
+               }
+-              if (nandc->props->is_bam) {
++              if (nandc->props->supports_bam) {
+                       if (data_buf && oob_buf) {
+                               nandc_set_read_loc(chip, i, 0, 0, data_size, 0);
+                               nandc_set_read_loc(chip, i, 1, data_size,
+@@ -2455,14 +2409,14 @@ static int qcom_nand_attach_chip(struct
+       mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
+       /* Free the initially allocated BAM transaction for reading the ONFI params */
+-      if (nandc->props->is_bam)
++      if (nandc->props->supports_bam)
+               free_bam_transaction(nandc);
+       nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
+                                    cwperpage);
+       /* Now allocate the BAM transaction based on updated max_cwperpage */
+-      if (nandc->props->is_bam) {
++      if (nandc->props->supports_bam) {
+               nandc->bam_txn = alloc_bam_transaction(nandc);
+               if (!nandc->bam_txn) {
+                       dev_err(nandc->dev,
+@@ -2522,7 +2476,7 @@ static int qcom_nand_attach_chip(struct
+                               | ecc_mode << ECC_MODE
+                               | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
+-      if (!nandc->props->qpic_v2)
++      if (!nandc->props->qpic_version2)
+               host->ecc_buf_cfg = 0x203 << NUM_STEPS;
+       host->clrflashstatus = FS_READY_BSY_N;
+@@ -2556,7 +2510,7 @@ static int qcom_op_cmd_mapping(struct na
+               cmd = OP_FETCH_ID;
+               break;
+       case NAND_CMD_PARAM:
+-              if (nandc->props->qpic_v2)
++              if (nandc->props->qpic_version2)
+                       cmd = OP_PAGE_READ_ONFI_READ;
+               else
+                       cmd = OP_PAGE_READ;
+@@ -2609,7 +2563,7 @@ static int qcom_parse_instructions(struc
+                       if (ret < 0)
+                               return ret;
+-                      q_op->cmd_reg = ret;
++                      q_op->cmd_reg = cpu_to_le32(ret);
+                       q_op->rdy_delay_ns = instr->delay_ns;
+                       break;
+@@ -2619,10 +2573,10 @@ static int qcom_parse_instructions(struc
+                       addrs = &instr->ctx.addr.addrs[offset];
+                       for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
+-                              q_op->addr1_reg |= addrs[i] << (i * 8);
++                              q_op->addr1_reg |= cpu_to_le32(addrs[i] << (i * 8));
+                       if (naddrs > 4)
+-                              q_op->addr2_reg |= addrs[4];
++                              q_op->addr2_reg |= cpu_to_le32(addrs[4]);
+                       q_op->rdy_delay_ns = instr->delay_ns;
+                       break;
+@@ -2663,7 +2617,7 @@ static int qcom_wait_rdy_poll(struct nan
+       unsigned long start = jiffies + msecs_to_jiffies(time_ms);
+       u32 flash;
+-      nandc_read_buffer_sync(nandc, true);
++      nandc_dev_to_mem(nandc, true);
+       do {
+               flash = le32_to_cpu(nandc->reg_read_buf[0]);
+@@ -2706,11 +2660,11 @@ static int qcom_read_status_exec(struct
+       clear_read_regs(nandc);
+       clear_bam_transaction(nandc);
+-      nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
+-      nandc_set_reg(chip, NAND_EXEC_CMD, 1);
++      nandc->regs->cmd = q_op.cmd_reg;
++      nandc->regs->exec = cpu_to_le32(1);
+-      write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+-      write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
++      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+       read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+       ret = submit_descs(nandc);
+@@ -2719,7 +2673,7 @@ static int qcom_read_status_exec(struct
+               goto err_out;
+       }
+-      nandc_read_buffer_sync(nandc, true);
++      nandc_dev_to_mem(nandc, true);
+       for (i = 0; i < num_cw; i++) {
+               flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
+@@ -2763,16 +2717,14 @@ static int qcom_read_id_type_exec(struct
+       clear_read_regs(nandc);
+       clear_bam_transaction(nandc);
+-      nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
+-      nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg);
+-      nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg);
+-      nandc_set_reg(chip, NAND_FLASH_CHIP_SELECT,
+-                    nandc->props->is_bam ? 0 : DM_EN);
++      nandc->regs->cmd = q_op.cmd_reg;
++      nandc->regs->addr0 = q_op.addr1_reg;
++      nandc->regs->addr1 = q_op.addr2_reg;
++      nandc->regs->chip_sel = cpu_to_le32(nandc->props->supports_bam ? 0 : DM_EN);
++      nandc->regs->exec = cpu_to_le32(1);
+-      nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+-
+-      write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
+-      write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
++      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+       read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
+@@ -2786,7 +2738,7 @@ static int qcom_read_id_type_exec(struct
+       op_id = q_op.data_instr_idx;
+       len = nand_subop_get_data_len(subop, op_id);
+-      nandc_read_buffer_sync(nandc, true);
++      nandc_dev_to_mem(nandc, true);
+       memcpy(instr->ctx.data.buf.in, nandc->reg_read_buf, len);
+ err_out:
+@@ -2807,15 +2759,14 @@ static int qcom_misc_cmd_type_exec(struc
+       if (q_op.flag == OP_PROGRAM_PAGE) {
+               goto wait_rdy;
+-      } else if (q_op.cmd_reg == OP_BLOCK_ERASE) {
+-              q_op.cmd_reg |= PAGE_ACC | LAST_PAGE;
+-              nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg);
+-              nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg);
+-              nandc_set_reg(chip, NAND_DEV0_CFG0,
+-                            host->cfg0_raw & ~(7 << CW_PER_PAGE));
+-              nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw);
++      } else if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE)) {
++              q_op.cmd_reg |= cpu_to_le32(PAGE_ACC | LAST_PAGE);
++              nandc->regs->addr0 = q_op.addr1_reg;
++              nandc->regs->addr1 = q_op.addr2_reg;
++              nandc->regs->cfg0 = cpu_to_le32(host->cfg0_raw & ~(7 << CW_PER_PAGE));
++              nandc->regs->cfg1 = cpu_to_le32(host->cfg1_raw);
+               instrs = 3;
+-      } else if (q_op.cmd_reg != OP_RESET_DEVICE) {
++      } else if (q_op.cmd_reg != cpu_to_le32(OP_RESET_DEVICE)) {
+               return 0;
+       }
+@@ -2826,14 +2777,14 @@ static int qcom_misc_cmd_type_exec(struc
+       clear_read_regs(nandc);
+       clear_bam_transaction(nandc);
+-      nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
+-      nandc_set_reg(chip, NAND_EXEC_CMD, 1);
++      nandc->regs->cmd = q_op.cmd_reg;
++      nandc->regs->exec = cpu_to_le32(1);
+-      write_reg_dma(nandc, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
+-      if (q_op.cmd_reg == OP_BLOCK_ERASE)
+-              write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
++      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
++      if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE))
++              write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
+-      write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+       read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+       ret = submit_descs(nandc);
+@@ -2864,7 +2815,7 @@ static int qcom_param_page_type_exec(str
+       if (ret)
+               return ret;
+-      q_op.cmd_reg |= PAGE_ACC | LAST_PAGE;
++      q_op.cmd_reg |= cpu_to_le32(PAGE_ACC | LAST_PAGE);
+       nandc->buf_count = 0;
+       nandc->buf_start = 0;
+@@ -2872,38 +2823,38 @@ static int qcom_param_page_type_exec(str
+       clear_read_regs(nandc);
+       clear_bam_transaction(nandc);
+-      nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
++      nandc->regs->cmd = q_op.cmd_reg;
++      nandc->regs->addr0 = 0;
++      nandc->regs->addr1 = 0;
++
++      nandc->regs->cfg0 = cpu_to_le32(0 << CW_PER_PAGE
++                          | 512 << UD_SIZE_BYTES
++                          | 5 << NUM_ADDR_CYCLES
++                          | 0 << SPARE_SIZE_BYTES);
++
++      nandc->regs->cfg1 = cpu_to_le32(7 << NAND_RECOVERY_CYCLES
++                          | 0 << CS_ACTIVE_BSY
++                          | 17 << BAD_BLOCK_BYTE_NUM
++                          | 1 << BAD_BLOCK_IN_SPARE_AREA
++                          | 2 << WR_RD_BSY_GAP
++                          | 0 << WIDE_FLASH
++                          | 1 << DEV0_CFG1_ECC_DISABLE);
+-      nandc_set_reg(chip, NAND_ADDR0, 0);
+-      nandc_set_reg(chip, NAND_ADDR1, 0);
+-      nandc_set_reg(chip, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
+-                                      | 512 << UD_SIZE_BYTES
+-                                      | 5 << NUM_ADDR_CYCLES
+-                                      | 0 << SPARE_SIZE_BYTES);
+-      nandc_set_reg(chip, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
+-                                      | 0 << CS_ACTIVE_BSY
+-                                      | 17 << BAD_BLOCK_BYTE_NUM
+-                                      | 1 << BAD_BLOCK_IN_SPARE_AREA
+-                                      | 2 << WR_RD_BSY_GAP
+-                                      | 0 << WIDE_FLASH
+-                                      | 1 << DEV0_CFG1_ECC_DISABLE);
+-      if (!nandc->props->qpic_v2)
+-              nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
++      if (!nandc->props->qpic_version2)
++              nandc->regs->ecc_buf_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE);
+       /* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
+-      if (!nandc->props->qpic_v2) {
+-              nandc_set_reg(chip, NAND_DEV_CMD_VLD,
+-                            (nandc->vld & ~READ_START_VLD));
+-              nandc_set_reg(chip, NAND_DEV_CMD1,
+-                            (nandc->cmd1 & ~(0xFF << READ_ADDR))
+-                            | NAND_CMD_PARAM << READ_ADDR);
++      if (!nandc->props->qpic_version2) {
++              nandc->regs->vld = cpu_to_le32((nandc->vld & ~READ_START_VLD));
++              nandc->regs->cmd1 = cpu_to_le32((nandc->cmd1 & ~(0xFF << READ_ADDR))
++                                  | NAND_CMD_PARAM << READ_ADDR);
+       }
+-      nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+-
+-      if (!nandc->props->qpic_v2) {
+-              nandc_set_reg(chip, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
+-              nandc_set_reg(chip, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
++      nandc->regs->exec = cpu_to_le32(1);
++
++      if (!nandc->props->qpic_version2) {
++              nandc->regs->orig_cmd1 = cpu_to_le32(nandc->cmd1);
++              nandc->regs->orig_vld = cpu_to_le32(nandc->vld);
+       }
+       instr = q_op.data_instr;
+@@ -2912,9 +2863,9 @@ static int qcom_param_page_type_exec(str
+       nandc_set_read_loc(chip, 0, 0, 0, len, 1);
+-      if (!nandc->props->qpic_v2) {
+-              write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
+-              write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
++      if (!nandc->props->qpic_version2) {
++              write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
++              write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
+       }
+       nandc->buf_count = len;
+@@ -2926,9 +2877,10 @@ static int qcom_param_page_type_exec(str
+                     nandc->buf_count, 0);
+       /* restore CMD1 and VLD regs */
+-      if (!nandc->props->qpic_v2) {
+-              write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
+-              write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
++      if (!nandc->props->qpic_version2) {
++              write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0);
++              write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1,
++                            NAND_BAM_NEXT_SGL);
+       }
+       ret = submit_descs(nandc);
+@@ -3017,7 +2969,7 @@ static const struct nand_controller_ops
+ static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
+ {
+-      if (nandc->props->is_bam) {
++      if (nandc->props->supports_bam) {
+               if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
+                       dma_unmap_single(nandc->dev, nandc->reg_read_dma,
+                                        MAX_REG_RD *
+@@ -3070,7 +3022,7 @@ static int qcom_nandc_alloc(struct qcom_
+       if (!nandc->reg_read_buf)
+               return -ENOMEM;
+-      if (nandc->props->is_bam) {
++      if (nandc->props->supports_bam) {
+               nandc->reg_read_dma =
+                       dma_map_single(nandc->dev, nandc->reg_read_buf,
+                                      MAX_REG_RD *
+@@ -3151,15 +3103,15 @@ static int qcom_nandc_setup(struct qcom_
+       u32 nand_ctrl;
+       /* kill onenand */
+-      if (!nandc->props->is_qpic)
++      if (!nandc->props->nandc_part_of_qpic)
+               nandc_write(nandc, SFLASHC_BURST_CFG, 0);
+-      if (!nandc->props->qpic_v2)
++      if (!nandc->props->qpic_version2)
+               nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
+                           NAND_DEV_CMD_VLD_VAL);
+       /* enable ADM or BAM DMA */
+-      if (nandc->props->is_bam) {
++      if (nandc->props->supports_bam) {
+               nand_ctrl = nandc_read(nandc, NAND_CTRL);
+               /*
+@@ -3176,7 +3128,7 @@ static int qcom_nandc_setup(struct qcom_
+       }
+       /* save the original values of these registers */
+-      if (!nandc->props->qpic_v2) {
++      if (!nandc->props->qpic_version2) {
+               nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
+               nandc->vld = NAND_DEV_CMD_VLD_VAL;
+       }
+@@ -3349,7 +3301,7 @@ static int qcom_nandc_parse_dt(struct pl
+       struct device_node *np = nandc->dev->of_node;
+       int ret;
+-      if (!nandc->props->is_bam) {
++      if (!nandc->props->supports_bam) {
+               ret = of_property_read_u32(np, "qcom,cmd-crci",
+                                          &nandc->cmd_crci);
+               if (ret) {
+@@ -3474,30 +3426,30 @@ static void qcom_nandc_remove(struct pla
+ static const struct qcom_nandc_props ipq806x_nandc_props = {
+       .ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
+-      .is_bam = false,
++      .supports_bam = false,
+       .use_codeword_fixup = true,
+       .dev_cmd_reg_start = 0x0,
+ };
+ static const struct qcom_nandc_props ipq4019_nandc_props = {
+       .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
+-      .is_bam = true,
+-      .is_qpic = true,
++      .supports_bam = true,
++      .nandc_part_of_qpic = true,
+       .dev_cmd_reg_start = 0x0,
+ };
+ static const struct qcom_nandc_props ipq8074_nandc_props = {
+       .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
+-      .is_bam = true,
+-      .is_qpic = true,
++      .supports_bam = true,
++      .nandc_part_of_qpic = true,
+       .dev_cmd_reg_start = 0x7000,
+ };
+ static const struct qcom_nandc_props sdx55_nandc_props = {
+       .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
+-      .is_bam = true,
+-      .is_qpic = true,
+-      .qpic_v2 = true,
++      .supports_bam = true,
++      .nandc_part_of_qpic = true,
++      .qpic_version2 = true,
+       .dev_cmd_reg_start = 0x7000,
+ };
diff --git a/target/linux/qualcommbe/patches-6.6/100-03-mtd-rawnand-qcom-Add-qcom-prefix-to-common-api.patch b/target/linux/qualcommbe/patches-6.6/100-03-mtd-rawnand-qcom-Add-qcom-prefix-to-common-api.patch
new file mode 100644 (file)
index 0000000..e34bd28
--- /dev/null
@@ -0,0 +1,919 @@
+From dde50ed4a7bdb79b4bb408781d3e4846d4c49f0a Mon Sep 17 00:00:00 2001
+From: Md Sadre Alam <quic_mdalam@quicinc.com>
+Date: Wed, 11 Sep 2024 11:13:42 +0530
+Subject: [PATCH v10 3/8] mtd: rawnand: qcom: Add qcom prefix to common api
+
+Add qcom prefix to all the api which will be commonly
+used by spi nand driver and raw nand driver.
+
+Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
+---
+
+Change in [v10]
+
+* No change
+
+Change in [v9]
+
+* No change
+
+Change in [v8]
+
+* No change
+
+Change in [v7]
+
+* No change
+
+Change in [v6]
+
+* No change
+
+Change in [v5]
+
+* Add qcom_ prefix to all common API.
+
+Change in [v4]
+
+* This patch was not included in [v4]
+
+Change in [v3]
+
+* This patch was not included in [v3]
+
+Change in [v2]
+
+* This patch was not included in [v2]
+
+Change in [v1]
+
+* This patch was not included in [v1]
+
+ drivers/mtd/nand/raw/qcom_nandc.c | 320 +++++++++++++++---------------
+ 1 file changed, 160 insertions(+), 160 deletions(-)
+
+--- a/drivers/mtd/nand/raw/qcom_nandc.c
++++ b/drivers/mtd/nand/raw/qcom_nandc.c
+@@ -53,7 +53,7 @@
+ #define       NAND_READ_LOCATION_LAST_CW_2    0xf48
+ #define       NAND_READ_LOCATION_LAST_CW_3    0xf4c
+-/* dummy register offsets, used by write_reg_dma */
++/* dummy register offsets, used by qcom_write_reg_dma */
+ #define       NAND_DEV_CMD1_RESTORE           0xdead
+ #define       NAND_DEV_CMD_VLD_RESTORE        0xbeef
+@@ -211,7 +211,7 @@
+ /*
+  * Flags used in DMA descriptor preparation helper functions
+- * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
++ * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma)
+  */
+ /* Don't set the EOT in current tx BAM sgl */
+ #define NAND_BAM_NO_EOT                       BIT(0)
+@@ -550,7 +550,7 @@ struct qcom_nandc_props {
+ };
+ /* Frees the BAM transaction memory */
+-static void free_bam_transaction(struct qcom_nand_controller *nandc)
++static void qcom_free_bam_transaction(struct qcom_nand_controller *nandc)
+ {
+       struct bam_transaction *bam_txn = nandc->bam_txn;
+@@ -559,7 +559,7 @@ static void free_bam_transaction(struct
+ /* Allocates and Initializes the BAM transaction */
+ static struct bam_transaction *
+-alloc_bam_transaction(struct qcom_nand_controller *nandc)
++qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc)
+ {
+       struct bam_transaction *bam_txn;
+       size_t bam_txn_size;
+@@ -595,7 +595,7 @@ alloc_bam_transaction(struct qcom_nand_c
+ }
+ /* Clears the BAM transaction indexes */
+-static void clear_bam_transaction(struct qcom_nand_controller *nandc)
++static void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
+ {
+       struct bam_transaction *bam_txn = nandc->bam_txn;
+@@ -614,7 +614,7 @@ static void clear_bam_transaction(struct
+ }
+ /* Callback for DMA descriptor completion */
+-static void qpic_bam_dma_done(void *data)
++static void qcom_qpic_bam_dma_done(void *data)
+ {
+       struct bam_transaction *bam_txn = data;
+@@ -644,7 +644,7 @@ static inline void nandc_write(struct qc
+       iowrite32(val, nandc->base + offset);
+ }
+-static inline void nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
++static inline void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
+ {
+       if (!nandc->props->supports_bam)
+               return;
+@@ -824,9 +824,9 @@ static void update_rw_regs(struct qcom_n
+  * for BAM. This descriptor will be added in the NAND DMA descriptor queue
+  * which will be submitted to DMA engine.
+  */
+-static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
+-                                struct dma_chan *chan,
+-                                unsigned long flags)
++static int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
++                                     struct dma_chan *chan,
++                                     unsigned long flags)
+ {
+       struct desc_info *desc;
+       struct scatterlist *sgl;
+@@ -903,9 +903,9 @@ static int prepare_bam_async_desc(struct
+  * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
+  * after the current command element.
+  */
+-static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
+-                               int reg_off, const void *vaddr,
+-                               int size, unsigned int flags)
++static int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
++                                    int reg_off, const void *vaddr,
++                                    int size, unsigned int flags)
+ {
+       int bam_ce_size;
+       int i, ret;
+@@ -943,9 +943,9 @@ static int prep_bam_dma_desc_cmd(struct
+               bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
+               if (flags & NAND_BAM_NWD) {
+-                      ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
+-                                                   DMA_PREP_FENCE |
+-                                                   DMA_PREP_CMD);
++                      ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
++                                                        DMA_PREP_FENCE |
++                                                        DMA_PREP_CMD);
+                       if (ret)
+                               return ret;
+               }
+@@ -958,9 +958,8 @@ static int prep_bam_dma_desc_cmd(struct
+  * Prepares the data descriptor for BAM DMA which will be used for NAND
+  * data reads and writes.
+  */
+-static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
+-                                const void *vaddr,
+-                                int size, unsigned int flags)
++static int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
++                                     const void *vaddr, int size, unsigned int flags)
+ {
+       int ret;
+       struct bam_transaction *bam_txn = nandc->bam_txn;
+@@ -979,8 +978,8 @@ static int prep_bam_dma_desc_data(struct
+                * is not set, form the DMA descriptor
+                */
+               if (!(flags & NAND_BAM_NO_EOT)) {
+-                      ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
+-                                                   DMA_PREP_INTERRUPT);
++                      ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
++                                                        DMA_PREP_INTERRUPT);
+                       if (ret)
+                               return ret;
+               }
+@@ -989,9 +988,9 @@ static int prep_bam_dma_desc_data(struct
+       return 0;
+ }
+-static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
+-                           int reg_off, const void *vaddr, int size,
+-                           bool flow_control)
++static int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
++                                int reg_off, const void *vaddr, int size,
++                                bool flow_control)
+ {
+       struct desc_info *desc;
+       struct dma_async_tx_descriptor *dma_desc;
+@@ -1069,15 +1068,15 @@ err:
+ }
+ /*
+- * read_reg_dma:      prepares a descriptor to read a given number of
++ * qcom_read_reg_dma: prepares a descriptor to read a given number of
+  *                    contiguous registers to the reg_read_buf pointer
+  *
+  * @first:            offset of the first register in the contiguous block
+  * @num_regs:         number of registers to read
+  * @flags:            flags to control DMA descriptor preparation
+  */
+-static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
+-                      int num_regs, unsigned int flags)
++static int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first,
++                           int num_regs, unsigned int flags)
+ {
+       bool flow_control = false;
+       void *vaddr;
+@@ -1089,18 +1088,18 @@ static int read_reg_dma(struct qcom_nand
+               first = dev_cmd_reg_addr(nandc, first);
+       if (nandc->props->supports_bam)
+-              return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
++              return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
+                                            num_regs, flags);
+       if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
+               flow_control = true;
+-      return prep_adm_dma_desc(nandc, true, first, vaddr,
++      return qcom_prep_adm_dma_desc(nandc, true, first, vaddr,
+                                num_regs * sizeof(u32), flow_control);
+ }
+ /*
+- * write_reg_dma:     prepares a descriptor to write a given number of
++ * qcom_write_reg_dma:        prepares a descriptor to write a given number of
+  *                    contiguous registers
+  *
+  * @vaddr:            contnigeous memory from where register value will
+@@ -1109,8 +1108,8 @@ static int read_reg_dma(struct qcom_nand
+  * @num_regs:         number of registers to write
+  * @flags:            flags to control DMA descriptor preparation
+  */
+-static int write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
+-                       int first, int num_regs, unsigned int flags)
++static int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
++                            int first, int num_regs, unsigned int flags)
+ {
+       bool flow_control = false;
+@@ -1124,18 +1123,18 @@ static int write_reg_dma(struct qcom_nan
+               first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
+       if (nandc->props->supports_bam)
+-              return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
++              return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
+                                            num_regs, flags);
+       if (first == NAND_FLASH_CMD)
+               flow_control = true;
+-      return prep_adm_dma_desc(nandc, false, first, vaddr,
++      return qcom_prep_adm_dma_desc(nandc, false, first, vaddr,
+                                num_regs * sizeof(u32), flow_control);
+ }
+ /*
+- * read_data_dma:     prepares a DMA descriptor to transfer data from the
++ * qcom_read_data_dma:        prepares a DMA descriptor to transfer data from the
+  *                    controller's internal buffer to the buffer 'vaddr'
+  *
+  * @reg_off:          offset within the controller's data buffer
+@@ -1143,17 +1142,17 @@ static int write_reg_dma(struct qcom_nan
+  * @size:             DMA transaction size in bytes
+  * @flags:            flags to control DMA descriptor preparation
+  */
+-static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+-                       const u8 *vaddr, int size, unsigned int flags)
++static int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
++                            const u8 *vaddr, int size, unsigned int flags)
+ {
+       if (nandc->props->supports_bam)
+-              return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
++              return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
+-      return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
++      return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
+ }
+ /*
+- * write_data_dma:    prepares a DMA descriptor to transfer data from
++ * qcom_write_data_dma:       prepares a DMA descriptor to transfer data from
+  *                    'vaddr' to the controller's internal buffer
+  *
+  * @reg_off:          offset within the controller's data buffer
+@@ -1161,13 +1160,13 @@ static int read_data_dma(struct qcom_nan
+  * @size:             DMA transaction size in bytes
+  * @flags:            flags to control DMA descriptor preparation
+  */
+-static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+-                        const u8 *vaddr, int size, unsigned int flags)
++static int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
++                             const u8 *vaddr, int size, unsigned int flags)
+ {
+       if (nandc->props->supports_bam)
+-              return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
++              return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
+-      return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
++      return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
+ }
+ /*
+@@ -1178,14 +1177,14 @@ static void config_nand_page_read(struct
+ {
+       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+-      write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
+-      write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
++      qcom_write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
++      qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+       if (!nandc->props->qpic_version2)
+-              write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
+-      write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr,
+-                    NAND_ERASED_CW_DETECT_CFG, 1, 0);
+-      write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set,
+-                    NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
++              qcom_write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
++      qcom_write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr,
++                         NAND_ERASED_CW_DETECT_CFG, 1, 0);
++      qcom_write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set,
++                         NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
+ }
+ /*
+@@ -1204,17 +1203,17 @@ config_nand_cw_read(struct nand_chip *ch
+               reg = &nandc->regs->read_location_last0;
+       if (nandc->props->supports_bam)
+-              write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
++              qcom_write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
+-      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+-      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++      qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
++      qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+       if (use_ecc) {
+-              read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
+-              read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
+-                           NAND_BAM_NEXT_SGL);
++              qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
++              qcom_read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
++                                NAND_BAM_NEXT_SGL);
+       } else {
+-              read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
++              qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+       }
+ }
+@@ -1238,11 +1237,11 @@ static void config_nand_page_write(struc
+ {
+       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+-      write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
+-      write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
++      qcom_write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
++      qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+       if (!nandc->props->qpic_version2)
+-              write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1,
+-                            NAND_BAM_NEXT_SGL);
++              qcom_write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1,
++                                 NAND_BAM_NEXT_SGL);
+ }
+ /*
+@@ -1253,17 +1252,18 @@ static void config_nand_cw_write(struct
+ {
+       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+-      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+-      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++      qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
++      qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+-      read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
++      qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+-      write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
+-      write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
++      qcom_write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
++      qcom_write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1,
++                         NAND_BAM_NEXT_SGL);
+ }
+ /* helpers to submit/free our list of dma descriptors */
+-static int submit_descs(struct qcom_nand_controller *nandc)
++static int qcom_submit_descs(struct qcom_nand_controller *nandc)
+ {
+       struct desc_info *desc, *n;
+       dma_cookie_t cookie = 0;
+@@ -1272,21 +1272,21 @@ static int submit_descs(struct qcom_nand
+       if (nandc->props->supports_bam) {
+               if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
+-                      ret = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
++                      ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
+                       if (ret)
+                               goto err_unmap_free_desc;
+               }
+               if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
+-                      ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
+-                                                 DMA_PREP_INTERRUPT);
++                      ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
++                                                        DMA_PREP_INTERRUPT);
+                       if (ret)
+                               goto err_unmap_free_desc;
+               }
+               if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
+-                      ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
+-                                                 DMA_PREP_CMD);
++                      ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
++                                                        DMA_PREP_CMD);
+                       if (ret)
+                               goto err_unmap_free_desc;
+               }
+@@ -1296,7 +1296,7 @@ static int submit_descs(struct qcom_nand
+               cookie = dmaengine_submit(desc->dma_desc);
+       if (nandc->props->supports_bam) {
+-              bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
++              bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done;
+               bam_txn->last_cmd_desc->callback_param = bam_txn;
+               dma_async_issue_pending(nandc->tx_chan);
+@@ -1314,7 +1314,7 @@ static int submit_descs(struct qcom_nand
+ err_unmap_free_desc:
+       /*
+        * Unmap the dma sg_list and free the desc allocated by both
+-       * prepare_bam_async_desc() and prep_adm_dma_desc() functions.
++       * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions.
+        */
+       list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
+               list_del(&desc->node);
+@@ -1333,10 +1333,10 @@ err_unmap_free_desc:
+ }
+ /* reset the register read buffer for next NAND operation */
+-static void clear_read_regs(struct qcom_nand_controller *nandc)
++static void qcom_clear_read_regs(struct qcom_nand_controller *nandc)
+ {
+       nandc->reg_read_pos = 0;
+-      nandc_dev_to_mem(nandc, false);
++      qcom_nandc_dev_to_mem(nandc, false);
+ }
+ /*
+@@ -1400,7 +1400,7 @@ static int check_flash_errors(struct qco
+       struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+       int i;
+-      nandc_dev_to_mem(nandc, true);
++      qcom_nandc_dev_to_mem(nandc, true);
+       for (i = 0; i < cw_cnt; i++) {
+               u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
+@@ -1427,13 +1427,13 @@ qcom_nandc_read_cw_raw(struct mtd_info *
+       nand_read_page_op(chip, page, 0, NULL, 0);
+       nandc->buf_count = 0;
+       nandc->buf_start = 0;
+-      clear_read_regs(nandc);
++      qcom_clear_read_regs(nandc);
+       host->use_ecc = false;
+       if (nandc->props->qpic_version2)
+               raw_cw = ecc->steps - 1;
+-      clear_bam_transaction(nandc);
++      qcom_clear_bam_transaction(nandc);
+       set_address(host, host->cw_size * cw, page);
+       update_rw_regs(host, 1, true, raw_cw);
+       config_nand_page_read(chip);
+@@ -1466,18 +1466,18 @@ qcom_nandc_read_cw_raw(struct mtd_info *
+       config_nand_cw_read(chip, false, raw_cw);
+-      read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
++      qcom_read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
+       reg_off += data_size1;
+-      read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
++      qcom_read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
+       reg_off += oob_size1;
+-      read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
++      qcom_read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
+       reg_off += data_size2;
+-      read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
++      qcom_read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
+-      ret = submit_descs(nandc);
++      ret = qcom_submit_descs(nandc);
+       if (ret) {
+               dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
+               return ret;
+@@ -1575,7 +1575,7 @@ static int parse_read_errors(struct qcom
+       u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
+       buf = (struct read_stats *)nandc->reg_read_buf;
+-      nandc_dev_to_mem(nandc, true);
++      qcom_nandc_dev_to_mem(nandc, true);
+       for (i = 0; i < ecc->steps; i++, buf++) {
+               u32 flash, buffer, erased_cw;
+@@ -1704,8 +1704,8 @@ static int read_page_ecc(struct qcom_nan
+               config_nand_cw_read(chip, true, i);
+               if (data_buf)
+-                      read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
+-                                    data_size, 0);
++                      qcom_read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
++                                         data_size, 0);
+               /*
+                * when ecc is enabled, the controller doesn't read the real
+@@ -1720,8 +1720,8 @@ static int read_page_ecc(struct qcom_nan
+                       for (j = 0; j < host->bbm_size; j++)
+                               *oob_buf++ = 0xff;
+-                      read_data_dma(nandc, FLASH_BUF_ACC + data_size,
+-                                    oob_buf, oob_size, 0);
++                      qcom_read_data_dma(nandc, FLASH_BUF_ACC + data_size,
++                                         oob_buf, oob_size, 0);
+               }
+               if (data_buf)
+@@ -1730,7 +1730,7 @@ static int read_page_ecc(struct qcom_nan
+                       oob_buf += oob_size;
+       }
+-      ret = submit_descs(nandc);
++      ret = qcom_submit_descs(nandc);
+       if (ret) {
+               dev_err(nandc->dev, "failure to read page/oob\n");
+               return ret;
+@@ -1751,7 +1751,7 @@ static int copy_last_cw(struct qcom_nand
+       int size;
+       int ret;
+-      clear_read_regs(nandc);
++      qcom_clear_read_regs(nandc);
+       size = host->use_ecc ? host->cw_data : host->cw_size;
+@@ -1763,9 +1763,9 @@ static int copy_last_cw(struct qcom_nand
+       config_nand_single_cw_page_read(chip, host->use_ecc, ecc->steps - 1);
+-      read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
++      qcom_read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
+-      ret = submit_descs(nandc);
++      ret = qcom_submit_descs(nandc);
+       if (ret)
+               dev_err(nandc->dev, "failed to copy last codeword\n");
+@@ -1851,14 +1851,14 @@ static int qcom_nandc_read_page(struct n
+       nandc->buf_count = 0;
+       nandc->buf_start = 0;
+       host->use_ecc = true;
+-      clear_read_regs(nandc);
++      qcom_clear_read_regs(nandc);
+       set_address(host, 0, page);
+       update_rw_regs(host, ecc->steps, true, 0);
+       data_buf = buf;
+       oob_buf = oob_required ? chip->oob_poi : NULL;
+-      clear_bam_transaction(nandc);
++      qcom_clear_bam_transaction(nandc);
+       return read_page_ecc(host, data_buf, oob_buf, page);
+ }
+@@ -1899,8 +1899,8 @@ static int qcom_nandc_read_oob(struct na
+       if (host->nr_boot_partitions)
+               qcom_nandc_codeword_fixup(host, page);
+-      clear_read_regs(nandc);
+-      clear_bam_transaction(nandc);
++      qcom_clear_read_regs(nandc);
++      qcom_clear_bam_transaction(nandc);
+       host->use_ecc = true;
+       set_address(host, 0, page);
+@@ -1927,8 +1927,8 @@ static int qcom_nandc_write_page(struct
+       set_address(host, 0, page);
+       nandc->buf_count = 0;
+       nandc->buf_start = 0;
+-      clear_read_regs(nandc);
+-      clear_bam_transaction(nandc);
++      qcom_clear_read_regs(nandc);
++      qcom_clear_bam_transaction(nandc);
+       data_buf = (u8 *)buf;
+       oob_buf = chip->oob_poi;
+@@ -1949,8 +1949,8 @@ static int qcom_nandc_write_page(struct
+                       oob_size = ecc->bytes;
+               }
+-              write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
+-                             i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
++              qcom_write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
++                                  i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
+               /*
+                * when ECC is enabled, we don't really need to write anything
+@@ -1962,8 +1962,8 @@ static int qcom_nandc_write_page(struct
+               if (qcom_nandc_is_last_cw(ecc, i)) {
+                       oob_buf += host->bbm_size;
+-                      write_data_dma(nandc, FLASH_BUF_ACC + data_size,
+-                                     oob_buf, oob_size, 0);
++                      qcom_write_data_dma(nandc, FLASH_BUF_ACC + data_size,
++                                          oob_buf, oob_size, 0);
+               }
+               config_nand_cw_write(chip);
+@@ -1972,7 +1972,7 @@ static int qcom_nandc_write_page(struct
+               oob_buf += oob_size;
+       }
+-      ret = submit_descs(nandc);
++      ret = qcom_submit_descs(nandc);
+       if (ret) {
+               dev_err(nandc->dev, "failure to write page\n");
+               return ret;
+@@ -1997,8 +1997,8 @@ static int qcom_nandc_write_page_raw(str
+               qcom_nandc_codeword_fixup(host, page);
+       nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+-      clear_read_regs(nandc);
+-      clear_bam_transaction(nandc);
++      qcom_clear_read_regs(nandc);
++      qcom_clear_bam_transaction(nandc);
+       data_buf = (u8 *)buf;
+       oob_buf = chip->oob_poi;
+@@ -2024,28 +2024,28 @@ static int qcom_nandc_write_page_raw(str
+                       oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
+               }
+-              write_data_dma(nandc, reg_off, data_buf, data_size1,
+-                             NAND_BAM_NO_EOT);
++              qcom_write_data_dma(nandc, reg_off, data_buf, data_size1,
++                                  NAND_BAM_NO_EOT);
+               reg_off += data_size1;
+               data_buf += data_size1;
+-              write_data_dma(nandc, reg_off, oob_buf, oob_size1,
+-                             NAND_BAM_NO_EOT);
++              qcom_write_data_dma(nandc, reg_off, oob_buf, oob_size1,
++                                  NAND_BAM_NO_EOT);
+               reg_off += oob_size1;
+               oob_buf += oob_size1;
+-              write_data_dma(nandc, reg_off, data_buf, data_size2,
+-                             NAND_BAM_NO_EOT);
++              qcom_write_data_dma(nandc, reg_off, data_buf, data_size2,
++                                  NAND_BAM_NO_EOT);
+               reg_off += data_size2;
+               data_buf += data_size2;
+-              write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
++              qcom_write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
+               oob_buf += oob_size2;
+               config_nand_cw_write(chip);
+       }
+-      ret = submit_descs(nandc);
++      ret = qcom_submit_descs(nandc);
+       if (ret) {
+               dev_err(nandc->dev, "failure to write raw page\n");
+               return ret;
+@@ -2075,7 +2075,7 @@ static int qcom_nandc_write_oob(struct n
+               qcom_nandc_codeword_fixup(host, page);
+       host->use_ecc = true;
+-      clear_bam_transaction(nandc);
++      qcom_clear_bam_transaction(nandc);
+       /* calculate the data and oob size for the last codeword/step */
+       data_size = ecc->size - ((ecc->steps - 1) << 2);
+@@ -2090,11 +2090,11 @@ static int qcom_nandc_write_oob(struct n
+       update_rw_regs(host, 1, false, 0);
+       config_nand_page_write(chip);
+-      write_data_dma(nandc, FLASH_BUF_ACC,
+-                     nandc->data_buffer, data_size + oob_size, 0);
++      qcom_write_data_dma(nandc, FLASH_BUF_ACC,
++                          nandc->data_buffer, data_size + oob_size, 0);
+       config_nand_cw_write(chip);
+-      ret = submit_descs(nandc);
++      ret = qcom_submit_descs(nandc);
+       if (ret) {
+               dev_err(nandc->dev, "failure to write oob\n");
+               return ret;
+@@ -2121,7 +2121,7 @@ static int qcom_nandc_block_bad(struct n
+        */
+       host->use_ecc = false;
+-      clear_bam_transaction(nandc);
++      qcom_clear_bam_transaction(nandc);
+       ret = copy_last_cw(host, page);
+       if (ret)
+               goto err;
+@@ -2148,8 +2148,8 @@ static int qcom_nandc_block_markbad(stru
+       struct nand_ecc_ctrl *ecc = &chip->ecc;
+       int page, ret;
+-      clear_read_regs(nandc);
+-      clear_bam_transaction(nandc);
++      qcom_clear_read_regs(nandc);
++      qcom_clear_bam_transaction(nandc);
+       /*
+        * to mark the BBM as bad, we flash the entire last codeword with 0s.
+@@ -2166,11 +2166,11 @@ static int qcom_nandc_block_markbad(stru
+       update_rw_regs(host, 1, false, ecc->steps - 1);
+       config_nand_page_write(chip);
+-      write_data_dma(nandc, FLASH_BUF_ACC,
+-                     nandc->data_buffer, host->cw_size, 0);
++      qcom_write_data_dma(nandc, FLASH_BUF_ACC,
++                          nandc->data_buffer, host->cw_size, 0);
+       config_nand_cw_write(chip);
+-      ret = submit_descs(nandc);
++      ret = qcom_submit_descs(nandc);
+       if (ret) {
+               dev_err(nandc->dev, "failure to update BBM\n");
+               return ret;
+@@ -2410,14 +2410,14 @@ static int qcom_nand_attach_chip(struct
+       mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
+       /* Free the initially allocated BAM transaction for reading the ONFI params */
+       if (nandc->props->supports_bam)
+-              free_bam_transaction(nandc);
++              qcom_free_bam_transaction(nandc);
+       nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
+                                    cwperpage);
+       /* Now allocate the BAM transaction based on updated max_cwperpage */
+       if (nandc->props->supports_bam) {
+-              nandc->bam_txn = alloc_bam_transaction(nandc);
++              nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
+               if (!nandc->bam_txn) {
+                       dev_err(nandc->dev,
+                               "failed to allocate bam transaction\n");
+@@ -2617,7 +2617,7 @@ static int qcom_wait_rdy_poll(struct nan
+       unsigned long start = jiffies + msecs_to_jiffies(time_ms);
+       u32 flash;
+-      nandc_dev_to_mem(nandc, true);
++      qcom_nandc_dev_to_mem(nandc, true);
+       do {
+               flash = le32_to_cpu(nandc->reg_read_buf[0]);
+@@ -2657,23 +2657,23 @@ static int qcom_read_status_exec(struct
+       nandc->buf_start = 0;
+       host->use_ecc = false;
+-      clear_read_regs(nandc);
+-      clear_bam_transaction(nandc);
++      qcom_clear_read_regs(nandc);
++      qcom_clear_bam_transaction(nandc);
+       nandc->regs->cmd = q_op.cmd_reg;
+       nandc->regs->exec = cpu_to_le32(1);
+-      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+-      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+-      read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
++      qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
++      qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++      qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+-      ret = submit_descs(nandc);
++      ret = qcom_submit_descs(nandc);
+       if (ret) {
+               dev_err(nandc->dev, "failure in submitting status descriptor\n");
+               goto err_out;
+       }
+-      nandc_dev_to_mem(nandc, true);
++      qcom_nandc_dev_to_mem(nandc, true);
+       for (i = 0; i < num_cw; i++) {
+               flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
+@@ -2714,8 +2714,8 @@ static int qcom_read_id_type_exec(struct
+       nandc->buf_start = 0;
+       host->use_ecc = false;
+-      clear_read_regs(nandc);
+-      clear_bam_transaction(nandc);
++      qcom_clear_read_regs(nandc);
++      qcom_clear_bam_transaction(nandc);
+       nandc->regs->cmd = q_op.cmd_reg;
+       nandc->regs->addr0 = q_op.addr1_reg;
+@@ -2723,12 +2723,12 @@ static int qcom_read_id_type_exec(struct
+       nandc->regs->chip_sel = cpu_to_le32(nandc->props->supports_bam ? 0 : DM_EN);
+       nandc->regs->exec = cpu_to_le32(1);
+-      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
+-      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++      qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
++      qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+-      read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
++      qcom_read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
+-      ret = submit_descs(nandc);
++      ret = qcom_submit_descs(nandc);
+       if (ret) {
+               dev_err(nandc->dev, "failure in submitting read id descriptor\n");
+               goto err_out;
+@@ -2738,7 +2738,7 @@ static int qcom_read_id_type_exec(struct
+       op_id = q_op.data_instr_idx;
+       len = nand_subop_get_data_len(subop, op_id);
+-      nandc_dev_to_mem(nandc, true);
++      qcom_nandc_dev_to_mem(nandc, true);
+       memcpy(instr->ctx.data.buf.in, nandc->reg_read_buf, len);
+ err_out:
+@@ -2774,20 +2774,20 @@ static int qcom_misc_cmd_type_exec(struc
+       nandc->buf_start = 0;
+       host->use_ecc = false;
+-      clear_read_regs(nandc);
+-      clear_bam_transaction(nandc);
++      qcom_clear_read_regs(nandc);
++      qcom_clear_bam_transaction(nandc);
+       nandc->regs->cmd = q_op.cmd_reg;
+       nandc->regs->exec = cpu_to_le32(1);
+-      write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
++      qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
+       if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE))
+-              write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
++              qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
+-      write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+-      read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
++      qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++      qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+-      ret = submit_descs(nandc);
++      ret = qcom_submit_descs(nandc);
+       if (ret) {
+               dev_err(nandc->dev, "failure in submitting misc descriptor\n");
+               goto err_out;
+@@ -2820,8 +2820,8 @@ static int qcom_param_page_type_exec(str
+       nandc->buf_count = 0;
+       nandc->buf_start = 0;
+       host->use_ecc = false;
+-      clear_read_regs(nandc);
+-      clear_bam_transaction(nandc);
++      qcom_clear_read_regs(nandc);
++      qcom_clear_bam_transaction(nandc);
+       nandc->regs->cmd = q_op.cmd_reg;
+       nandc->regs->addr0 = 0;
+@@ -2864,8 +2864,8 @@ static int qcom_param_page_type_exec(str
+       nandc_set_read_loc(chip, 0, 0, 0, len, 1);
+       if (!nandc->props->qpic_version2) {
+-              write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
+-              write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
++              qcom_write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
++              qcom_write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
+       }
+       nandc->buf_count = len;
+@@ -2873,17 +2873,17 @@ static int qcom_param_page_type_exec(str
+       config_nand_single_cw_page_read(chip, false, 0);
+-      read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
+-                    nandc->buf_count, 0);
++      qcom_read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
++                         nandc->buf_count, 0);
+       /* restore CMD1 and VLD regs */
+       if (!nandc->props->qpic_version2) {
+-              write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0);
+-              write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1,
+-                            NAND_BAM_NEXT_SGL);
++              qcom_write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0);
++              qcom_write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1,
++                                 NAND_BAM_NEXT_SGL);
+       }
+-      ret = submit_descs(nandc);
++      ret = qcom_submit_descs(nandc);
+       if (ret) {
+               dev_err(nandc->dev, "failure in submitting param page descriptor\n");
+               goto err_out;
+@@ -3067,7 +3067,7 @@ static int qcom_nandc_alloc(struct qcom_
+                * maximum codeword size
+                */
+               nandc->max_cwperpage = 1;
+-              nandc->bam_txn = alloc_bam_transaction(nandc);
++              nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
+               if (!nandc->bam_txn) {
+                       dev_err(nandc->dev,
+                               "failed to allocate bam transaction\n");
diff --git a/target/linux/qualcommbe/patches-6.6/100-04-mtd-nand-Add-qpic_common-API-file.patch b/target/linux/qualcommbe/patches-6.6/100-04-mtd-nand-Add-qpic_common-API-file.patch
new file mode 100644 (file)
index 0000000..93dca8a
--- /dev/null
@@ -0,0 +1,2517 @@
+From b00c2f583e54aa8bed2044e5b1898d9accd45415 Mon Sep 17 00:00:00 2001
+From: Md Sadre Alam <quic_mdalam@quicinc.com>
+Date: Wed, 11 Sep 2024 17:20:22 +0530
+Subject: [PATCH v10 4/8] mtd: nand: Add qpic_common API file
+
+Add qpic_common.c file which hold all the common
+qpic APIs which will be used by both qpic raw nand
+driver and qpic spi nand driver.
+
+Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
+---
+
+Change in [v10]
+
+* No change
+
+Change in [v9]
+
+* No Change 
+
+Change in [v8]
+
+* Removed "inline" from qcom_nandc_dev_to_mem()
+
+Change in [v7]
+
+* Removed partition.h
+
+* Updated commit message heading
+
+* Made CONFIG_MTD_NAND_QCOM as bool
+
+Change in [v6]
+
+* made changes to select qpic_common.c based on either
+  CONFIG_MTD_NAND_QCOM=y or CONFIG_SPI_QPIC_SNAND=y
+
+* Removed rawnand.h from qpic_common.c
+
+* change nand_controller variable as a pointer type.
+
+Change in [v5]
+
+* Remove multiple dma call back to avoid race condition
+
+Change in [v4]
+
+* Added kernel doc for all common api as per kernel doc
+  standard
+
+* Added QPIC_COMMON config to build qpic_common.c
+
+Change in [v3]
+
+* Added original copy right
+
+* Removed all EXPORT_SYMBOL()
+
+* Made this common api file more generic
+
+* Added qcom_ prefix to all api in this file
+
+* Removed devm_kfree and added kfree
+
+* Moved to_qcom_nand_controller() to raw nand driver
+  since it was only used by raw nand driver, so not needed
+  as common
+
+* Added kernel doc for all api
+
+* made reverse tree of variable declaration in 
+  prep_adm_dma_desc() function
+
+* Added if(!ret) condition in prep_adm_dma_desc()
+  function
+
+* Initialized slave_conf as 0 while declaration
+
+Change in [v2]
+
+* Posted initial support for common api file
+
+Change in [v1]
+
+* Posted as RFC patch for design review
+
+ drivers/mtd/nand/Makefile            |    4 +
+ drivers/mtd/nand/qpic_common.c       |  738 +++++++++++++++++
+ drivers/mtd/nand/raw/Kconfig         |    2 +-
+ drivers/mtd/nand/raw/qcom_nandc.c    | 1092 +-------------------------
+ include/linux/mtd/nand-qpic-common.h |  468 +++++++++++
+ 5 files changed, 1223 insertions(+), 1081 deletions(-)
+ create mode 100644 drivers/mtd/nand/qpic_common.c
+ create mode 100644 include/linux/mtd/nand-qpic-common.h
+
+--- a/drivers/mtd/nand/Makefile
++++ b/drivers/mtd/nand/Makefile
+@@ -5,6 +5,10 @@ obj-$(CONFIG_MTD_NAND_CORE) += nandcore.
+ obj-$(CONFIG_MTD_NAND_ECC_MEDIATEK) += ecc-mtk.o
+ obj-$(CONFIG_MTD_NAND_MTK_BMT)        += mtk_bmt.o mtk_bmt_v2.o mtk_bmt_bbt.o mtk_bmt_nmbm.o
++ifeq ($(CONFIG_MTD_NAND_QCOM),y)
++obj-y += qpic_common.o
++endif
++
+ obj-y += onenand/
+ obj-y += raw/
+ obj-y += spi/
+--- /dev/null
++++ b/drivers/mtd/nand/qpic_common.c
+@@ -0,0 +1,745 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
++ */
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/dmaengine.h>
++#include <linux/dma-mapping.h>
++#include <linux/dma/qcom_adm.h>
++#include <linux/dma/qcom_bam_dma.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/mtd/nand-qpic-common.h>
++
++/**
++ * qcom_free_bam_transaction() - Frees the BAM transaction memory
++ * @nandc: qpic nand controller
++ *
++ * This function frees the bam transaction memory
++ */
++void qcom_free_bam_transaction(struct qcom_nand_controller *nandc)
++{
++      struct bam_transaction *bam_txn = nandc->bam_txn;
++
++      kfree(bam_txn);
++}
++
++/**
++ * qcom_alloc_bam_transaction() - allocate BAM transaction
++ * @nandc: qpic nand controller
++ *
++ * This function will allocate and initialize the BAM transaction structure
++ */
++struct bam_transaction *
++qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc)
++{
++      struct bam_transaction *bam_txn;
++      size_t bam_txn_size;
++      unsigned int num_cw = nandc->max_cwperpage;
++      void *bam_txn_buf;
++
++      bam_txn_size =
++              sizeof(*bam_txn) + num_cw *
++              ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
++              (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
++              (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
++
++      bam_txn_buf = kzalloc(bam_txn_size, GFP_KERNEL);
++      if (!bam_txn_buf)
++              return NULL;
++
++      bam_txn = bam_txn_buf;
++      bam_txn_buf += sizeof(*bam_txn);
++
++      bam_txn->bam_ce = bam_txn_buf;
++      bam_txn_buf +=
++              sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
++
++      bam_txn->cmd_sgl = bam_txn_buf;
++      bam_txn_buf +=
++              sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
++
++      bam_txn->data_sgl = bam_txn_buf;
++
++      init_completion(&bam_txn->txn_done);
++
++      return bam_txn;
++}
++
++/**
++ * qcom_clear_bam_transaction() - Clears the BAM transaction
++ * @nandc: qpic nand controller
++ *
++ * This function will clear the BAM transaction indexes.
++ */
++void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
++{
++      struct bam_transaction *bam_txn = nandc->bam_txn;
++
++      if (!nandc->props->supports_bam)
++              return;
++
++      bam_txn->bam_ce_pos = 0;
++      bam_txn->bam_ce_start = 0;
++      bam_txn->cmd_sgl_pos = 0;
++      bam_txn->cmd_sgl_start = 0;
++      bam_txn->tx_sgl_pos = 0;
++      bam_txn->tx_sgl_start = 0;
++      bam_txn->rx_sgl_pos = 0;
++      bam_txn->rx_sgl_start = 0;
++      bam_txn->last_data_desc = NULL;
++
++      sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
++                    QPIC_PER_CW_CMD_SGL);
++      sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
++                    QPIC_PER_CW_DATA_SGL);
++
++      reinit_completion(&bam_txn->txn_done);
++}
++
++/**
++ * qcom_qpic_bam_dma_done() - Callback for DMA descriptor completion
++ * @data: data pointer
++ *
++ * This function is a callback for DMA descriptor completion
++ */
++void qcom_qpic_bam_dma_done(void *data)
++{
++      struct bam_transaction *bam_txn = data;
++
++      complete(&bam_txn->txn_done);
++}
++
++/**
++ * qcom_nandc_dev_to_mem() - Check for dma sync for cpu or device
++ * @nandc: qpic nand controller
++ * @is_cpu: cpu or Device
++ *
++ * This function will check for dma sync for cpu or device
++ */
++inline void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
++{
++      if (!nandc->props->supports_bam)
++              return;
++
++      if (is_cpu)
++              dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
++                                      MAX_REG_RD *
++                                      sizeof(*nandc->reg_read_buf),
++                                      DMA_FROM_DEVICE);
++      else
++              dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
++                                         MAX_REG_RD *
++                                         sizeof(*nandc->reg_read_buf),
++                                         DMA_FROM_DEVICE);
++}
++
++/**
++ * qcom_prepare_bam_async_desc() - Prepare DMA descriptor
++ * @nandc: qpic nand controller
++ * @chan: dma channel
++ * @flags: flags to control DMA descriptor preparation
++ *
++ * This function maps the scatter gather list for DMA transfer and forms the
++ * DMA descriptor for BAM.This descriptor will be added in the NAND DMA
++ * descriptor queue which will be submitted to DMA engine.
++ */
++int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
++                              struct dma_chan *chan, unsigned long flags)
++{
++      struct desc_info *desc;
++      struct scatterlist *sgl;
++      unsigned int sgl_cnt;
++      int ret;
++      struct bam_transaction *bam_txn = nandc->bam_txn;
++      enum dma_transfer_direction dir_eng;
++      struct dma_async_tx_descriptor *dma_desc;
++
++      desc = kzalloc(sizeof(*desc), GFP_KERNEL);
++      if (!desc)
++              return -ENOMEM;
++
++      if (chan == nandc->cmd_chan) {
++              sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
++              sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
++              bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
++              dir_eng = DMA_MEM_TO_DEV;
++              desc->dir = DMA_TO_DEVICE;
++      } else if (chan == nandc->tx_chan) {
++              sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
++              sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
++              bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
++              dir_eng = DMA_MEM_TO_DEV;
++              desc->dir = DMA_TO_DEVICE;
++      } else {
++              sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
++              sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
++              bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
++              dir_eng = DMA_DEV_TO_MEM;
++              desc->dir = DMA_FROM_DEVICE;
++      }
++
++      sg_mark_end(sgl + sgl_cnt - 1);
++      ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
++      if (ret == 0) {
++              dev_err(nandc->dev, "failure in mapping desc\n");
++              kfree(desc);
++              return -ENOMEM;
++      }
++
++      desc->sgl_cnt = sgl_cnt;
++      desc->bam_sgl = sgl;
++
++      dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
++                                         flags);
++
++      if (!dma_desc) {
++              dev_err(nandc->dev, "failure in prep desc\n");
++              dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
++              kfree(desc);
++              return -EINVAL;
++      }
++
++      desc->dma_desc = dma_desc;
++
++      /* update last data/command descriptor */
++      if (chan == nandc->cmd_chan)
++              bam_txn->last_cmd_desc = dma_desc;
++      else
++              bam_txn->last_data_desc = dma_desc;
++
++      list_add_tail(&desc->node, &nandc->desc_list);
++
++      return 0;
++}
++
++/**
++ * qcom_prep_bam_dma_desc_cmd() - Prepares the command descriptor for BAM DMA
++ * @nandc: qpic nand controller
++ * @read: read or write type
++ * @reg_off: offset within the controller's data buffer
++ * @vaddr: virtual address of the buffer we want to write to
++ * @size: DMA transaction size in bytes
++ * @flags: flags to control DMA descriptor preparation
++ *
++ * This function will prepares the command descriptor for BAM DMA
++ * which will be used for NAND register reads and writes.
++ */
++int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
++                             int reg_off, const void *vaddr,
++                             int size, unsigned int flags)
++{
++      int bam_ce_size;
++      int i, ret;
++      struct bam_cmd_element *bam_ce_buffer;
++      struct bam_transaction *bam_txn = nandc->bam_txn;
++
++      bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
++
++      /* fill the command desc */
++      for (i = 0; i < size; i++) {
++              if (read)
++                      bam_prep_ce(&bam_ce_buffer[i],
++                                  nandc_reg_phys(nandc, reg_off + 4 * i),
++                                  BAM_READ_COMMAND,
++                                  reg_buf_dma_addr(nandc,
++                                                   (__le32 *)vaddr + i));
++              else
++                      bam_prep_ce_le32(&bam_ce_buffer[i],
++                                       nandc_reg_phys(nandc, reg_off + 4 * i),
++                                       BAM_WRITE_COMMAND,
++                                       *((__le32 *)vaddr + i));
++      }
++
++      bam_txn->bam_ce_pos += size;
++
++      /* use the separate sgl after this command */
++      if (flags & NAND_BAM_NEXT_SGL) {
++              bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
++              bam_ce_size = (bam_txn->bam_ce_pos -
++                              bam_txn->bam_ce_start) *
++                              sizeof(struct bam_cmd_element);
++              sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
++                         bam_ce_buffer, bam_ce_size);
++              bam_txn->cmd_sgl_pos++;
++              bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
++
++              if (flags & NAND_BAM_NWD) {
++                      ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
++                                                        DMA_PREP_FENCE | DMA_PREP_CMD);
++                      if (ret)
++                              return ret;
++              }
++      }
++
++      return 0;
++}
++
++/**
++ * qcom_prep_bam_dma_desc_data() - Prepares the data descriptor for BAM DMA
++ * @nandc: qpic nand controller
++ * @read: read or write type
++ * @vaddr: virtual address of the buffer we want to write to
++ * @size: DMA transaction size in bytes
++ * @flags: flags to control DMA descriptor preparation
++ *
++ * This function will prepares the data descriptor for BAM DMA which
++ * will be used for NAND data reads and writes.
++ */
++int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
++                              const void *vaddr, int size, unsigned int flags)
++{
++      int ret;
++      struct bam_transaction *bam_txn = nandc->bam_txn;
++
++      if (read) {
++              sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
++                         vaddr, size);
++              bam_txn->rx_sgl_pos++;
++      } else {
++              sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
++                         vaddr, size);
++              bam_txn->tx_sgl_pos++;
++
++              /*
++               * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
++               * is not set, form the DMA descriptor
++               */
++              if (!(flags & NAND_BAM_NO_EOT)) {
++                      ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
++                                                        DMA_PREP_INTERRUPT);
++                      if (ret)
++                              return ret;
++              }
++      }
++
++      return 0;
++}
++
++/**
++ * qcom_prep_adm_dma_desc() - Prepare descriptor for adma
++ * @nandc: qpic nand controller
++ * @read: read or write type
++ * @reg_off: offset within the controller's data buffer
++ * @vaddr: virtual address of the buffer we want to write to
++ * @size: adm dma transaction size in bytes
++ * @flow_control: flow controller
++ *
++ * This function will prepare descriptor for adma
++ */
++int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
++                         int reg_off, const void *vaddr, int size,
++                         bool flow_control)
++{
++      struct qcom_adm_peripheral_config periph_conf = {};
++      struct dma_async_tx_descriptor *dma_desc;
++      struct dma_slave_config slave_conf = {0};
++      enum dma_transfer_direction dir_eng;
++      struct desc_info *desc;
++      struct scatterlist *sgl;
++      int ret;
++
++      desc = kzalloc(sizeof(*desc), GFP_KERNEL);
++      if (!desc)
++              return -ENOMEM;
++
++      sgl = &desc->adm_sgl;
++
++      sg_init_one(sgl, vaddr, size);
++
++      if (read) {
++              dir_eng = DMA_DEV_TO_MEM;
++              desc->dir = DMA_FROM_DEVICE;
++      } else {
++              dir_eng = DMA_MEM_TO_DEV;
++              desc->dir = DMA_TO_DEVICE;
++      }
++
++      ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
++      if (!ret) {
++              ret = -ENOMEM;
++              goto err;
++      }
++
++      slave_conf.device_fc = flow_control;
++      if (read) {
++              slave_conf.src_maxburst = 16;
++              slave_conf.src_addr = nandc->base_dma + reg_off;
++              if (nandc->data_crci) {
++                      periph_conf.crci = nandc->data_crci;
++                      slave_conf.peripheral_config = &periph_conf;
++                      slave_conf.peripheral_size = sizeof(periph_conf);
++              }
++      } else {
++              slave_conf.dst_maxburst = 16;
++              slave_conf.dst_addr = nandc->base_dma + reg_off;
++              if (nandc->cmd_crci) {
++                      periph_conf.crci = nandc->cmd_crci;
++                      slave_conf.peripheral_config = &periph_conf;
++                      slave_conf.peripheral_size = sizeof(periph_conf);
++              }
++      }
++
++      ret = dmaengine_slave_config(nandc->chan, &slave_conf);
++      if (ret) {
++              dev_err(nandc->dev, "failed to configure dma channel\n");
++              goto err;
++      }
++
++      dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
++      if (!dma_desc) {
++              dev_err(nandc->dev, "failed to prepare desc\n");
++              ret = -EINVAL;
++              goto err;
++      }
++
++      desc->dma_desc = dma_desc;
++
++      list_add_tail(&desc->node, &nandc->desc_list);
++
++      return 0;
++err:
++      kfree(desc);
++
++      return ret;
++}
++
++/**
++ * qcom_read_reg_dma() - read a given number of registers to the reg_read_buf pointer
++ * @nandc: qpic nand controller
++ * @first: offset of the first register in the contiguous block
++ * @num_regs: number of registers to read
++ * @flags: flags to control DMA descriptor preparation
++ *
++ * This function will prepares a descriptor to read a given number of
++ * contiguous registers to the reg_read_buf pointer.
++ */
++int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first,
++                    int num_regs, unsigned int flags)
++{
++      bool flow_control = false;
++      void *vaddr;
++
++      vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
++      nandc->reg_read_pos += num_regs;
++
++      if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
++              first = dev_cmd_reg_addr(nandc, first);
++
++      if (nandc->props->supports_bam)
++              return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
++                                           num_regs, flags);
++
++      if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
++              flow_control = true;
++
++      return qcom_prep_adm_dma_desc(nandc, true, first, vaddr,
++                                    num_regs * sizeof(u32), flow_control);
++}
++
++/**
++ * qcom_write_reg_dma() - write a given number of registers
++ * @nandc: qpic nand controller
++ * @vaddr: contnigeous memory from where register value will
++ *       be written
++ * @first: offset of the first register in the contiguous block
++ * @num_regs: number of registers to write
++ * @flags: flags to control DMA descriptor preparation
++ *
++ * This function will prepares a descriptor to write a given number of
++ * contiguous registers
++ */
++int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
++                     int first, int num_regs, unsigned int flags)
++{
++      bool flow_control = false;
++
++      if (first == NAND_EXEC_CMD)
++              flags |= NAND_BAM_NWD;
++
++      if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
++              first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
++
++      if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
++              first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
++
++      if (nandc->props->supports_bam)
++              return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
++                                                num_regs, flags);
++
++      if (first == NAND_FLASH_CMD)
++              flow_control = true;
++
++      return qcom_prep_adm_dma_desc(nandc, false, first, vaddr,
++                                    num_regs * sizeof(u32), flow_control);
++}
++
++/**
++ * qcom_read_data_dma() - transfer data
++ * @nandc: qpic nand controller
++ * @reg_off: offset within the controller's data buffer
++ * @vaddr: virtual address of the buffer we want to write to
++ * @size: DMA transaction size in bytes
++ * @flags: flags to control DMA descriptor preparation
++ *
++ * This function will prepares a DMA descriptor to transfer data from the
++ * controller's internal buffer to the buffer 'vaddr'
++ */
++int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
++                     const u8 *vaddr, int size, unsigned int flags)
++{
++      if (nandc->props->supports_bam)
++              return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
++
++      return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
++}
++
++/**
++ * qcom_write_data_dma() - transfer data
++ * @nandc: qpic nand controller
++ * @reg_off: offset within the controller's data buffer
++ * @vaddr: virtual address of the buffer we want to read from
++ * @size: DMA transaction size in bytes
++ * @flags: flags to control DMA descriptor preparation
++ *
++ * This function will prepares a DMA descriptor to transfer data from
++ * 'vaddr' to the controller's internal buffer
++ */
++int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
++                      const u8 *vaddr, int size, unsigned int flags)
++{
++      if (nandc->props->supports_bam)
++              return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
++
++      return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
++}
++
++/**
++ * qcom_submit_descs() - submit dma descriptor
++ * @nandc: qpic nand controller
++ *
++ * This function will submit all the prepared dma descriptor
++ * cmd or data descriptor
++ */
++int qcom_submit_descs(struct qcom_nand_controller *nandc)
++{
++      struct desc_info *desc, *n;
++      dma_cookie_t cookie = 0;
++      struct bam_transaction *bam_txn = nandc->bam_txn;
++      int ret = 0;
++
++      if (nandc->props->supports_bam) {
++              if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
++                      ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
++                      if (ret)
++                              goto err_unmap_free_desc;
++              }
++
++              if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
++                      ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
++                                                        DMA_PREP_INTERRUPT);
++                      if (ret)
++                              goto err_unmap_free_desc;
++              }
++
++              if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
++                      ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
++                                                        DMA_PREP_CMD);
++                      if (ret)
++                              goto err_unmap_free_desc;
++              }
++      }
++
++      list_for_each_entry(desc, &nandc->desc_list, node)
++              cookie = dmaengine_submit(desc->dma_desc);
++
++      if (nandc->props->supports_bam) {
++              bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done;
++              bam_txn->last_cmd_desc->callback_param = bam_txn;
++
++              dma_async_issue_pending(nandc->tx_chan);
++              dma_async_issue_pending(nandc->rx_chan);
++              dma_async_issue_pending(nandc->cmd_chan);
++
++              if (!wait_for_completion_timeout(&bam_txn->txn_done,
++                                               QPIC_NAND_COMPLETION_TIMEOUT))
++                      ret = -ETIMEDOUT;
++      } else {
++              if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
++                      ret = -ETIMEDOUT;
++      }
++
++err_unmap_free_desc:
++      /*
++       * Unmap the dma sg_list and free the desc allocated by both
++       * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions.
++       */
++      list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
++              list_del(&desc->node);
++
++              if (nandc->props->supports_bam)
++                      dma_unmap_sg(nandc->dev, desc->bam_sgl,
++                                   desc->sgl_cnt, desc->dir);
++              else
++                      dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
++                                   desc->dir);
++
++              kfree(desc);
++      }
++
++      return ret;
++}
++
++/**
++ * qcom_clear_read_regs() - reset the read register buffer
++ * @nandc: qpic nand controller
++ *
++ * This function reset the register read buffer for next NAND operation
++ */
++void qcom_clear_read_regs(struct qcom_nand_controller *nandc)
++{
++      nandc->reg_read_pos = 0;
++      qcom_nandc_dev_to_mem(nandc, false);
++}
++
++/**
++ * qcom_nandc_unalloc() - unallocate qpic nand controller
++ * @nandc: qpic nand controller
++ *
++ * This function will unallocate memory alloacted for qpic nand controller
++ */
++void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
++{
++      if (nandc->props->supports_bam) {
++              if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
++                      dma_unmap_single(nandc->dev, nandc->reg_read_dma,
++                                       MAX_REG_RD *
++                                       sizeof(*nandc->reg_read_buf),
++                                       DMA_FROM_DEVICE);
++
++              if (nandc->tx_chan)
++                      dma_release_channel(nandc->tx_chan);
++
++              if (nandc->rx_chan)
++                      dma_release_channel(nandc->rx_chan);
++
++              if (nandc->cmd_chan)
++                      dma_release_channel(nandc->cmd_chan);
++      } else {
++              if (nandc->chan)
++                      dma_release_channel(nandc->chan);
++      }
++}
++
++/**
++ * qcom_nandc_alloc() - Allocate qpic nand controller
++ * @nandc: qpic nand controller
++ *
++ * This function will allocate memory for qpic nand controller
++ */
++int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
++{
++      int ret;
++
++      ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
++      if (ret) {
++              dev_err(nandc->dev, "failed to set DMA mask\n");
++              return ret;
++      }
++
++      /*
++       * we use the internal buffer for reading ONFI params, reading small
++       * data like ID and status, and preforming read-copy-write operations
++       * when writing to a codeword partially. 532 is the maximum possible
++       * size of a codeword for our nand controller
++       */
++      nandc->buf_size = 532;
++
++      nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL);
++      if (!nandc->data_buffer)
++              return -ENOMEM;
++
++      nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL);
++      if (!nandc->regs)
++              return -ENOMEM;
++
++      nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD,
++                                         sizeof(*nandc->reg_read_buf),
++                                         GFP_KERNEL);
++      if (!nandc->reg_read_buf)
++              return -ENOMEM;
++
++      if (nandc->props->supports_bam) {
++              nandc->reg_read_dma =
++                      dma_map_single(nandc->dev, nandc->reg_read_buf,
++                                     MAX_REG_RD *
++                                     sizeof(*nandc->reg_read_buf),
++                                     DMA_FROM_DEVICE);
++              if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
++                      dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
++                      return -EIO;
++              }
++
++              nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
++              if (IS_ERR(nandc->tx_chan)) {
++                      ret = PTR_ERR(nandc->tx_chan);
++                      nandc->tx_chan = NULL;
++                      dev_err_probe(nandc->dev, ret,
++                                    "tx DMA channel request failed\n");
++                      goto unalloc;
++              }
++
++              nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
++              if (IS_ERR(nandc->rx_chan)) {
++                      ret = PTR_ERR(nandc->rx_chan);
++                      nandc->rx_chan = NULL;
++                      dev_err_probe(nandc->dev, ret,
++                                    "rx DMA channel request failed\n");
++                      goto unalloc;
++              }
++
++              nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
++              if (IS_ERR(nandc->cmd_chan)) {
++                      ret = PTR_ERR(nandc->cmd_chan);
++                      nandc->cmd_chan = NULL;
++                      dev_err_probe(nandc->dev, ret,
++                                    "cmd DMA channel request failed\n");
++                      goto unalloc;
++              }
++
++              /*
++               * Initially allocate BAM transaction to read ONFI param page.
++               * After detecting all the devices, this BAM transaction will
++               * be freed and the next BAM transaction will be allocated with
++               * maximum codeword size
++               */
++              nandc->max_cwperpage = 1;
++              nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
++              if (!nandc->bam_txn) {
++                      dev_err(nandc->dev,
++                              "failed to allocate bam transaction\n");
++                      ret = -ENOMEM;
++                      goto unalloc;
++              }
++      } else {
++              nandc->chan = dma_request_chan(nandc->dev, "rxtx");
++              if (IS_ERR(nandc->chan)) {
++                      ret = PTR_ERR(nandc->chan);
++                      nandc->chan = NULL;
++                      dev_err_probe(nandc->dev, ret,
++                                    "rxtx DMA channel request failed\n");
++                      return ret;
++              }
++      }
++
++      INIT_LIST_HEAD(&nandc->desc_list);
++      INIT_LIST_HEAD(&nandc->host_list);
++
++      return 0;
++unalloc:
++      qcom_nandc_unalloc(nandc);
++      return ret;
++}
+--- a/drivers/mtd/nand/raw/Kconfig
++++ b/drivers/mtd/nand/raw/Kconfig
+@@ -330,7 +330,7 @@ config MTD_NAND_HISI504
+         Enables support for NAND controller on Hisilicon SoC Hip04.
+ config MTD_NAND_QCOM
+-      tristate "QCOM NAND controller"
++      bool "QCOM NAND controller"
+       depends on ARCH_QCOM || COMPILE_TEST
+       depends on HAS_IOMEM
+       help
+--- a/drivers/mtd/nand/raw/qcom_nandc.c
++++ b/drivers/mtd/nand/raw/qcom_nandc.c
+@@ -15,417 +15,7 @@
+ #include <linux/of.h>
+ #include <linux/platform_device.h>
+ #include <linux/slab.h>
+-
+-/* NANDc reg offsets */
+-#define       NAND_FLASH_CMD                  0x00
+-#define       NAND_ADDR0                      0x04
+-#define       NAND_ADDR1                      0x08
+-#define       NAND_FLASH_CHIP_SELECT          0x0c
+-#define       NAND_EXEC_CMD                   0x10
+-#define       NAND_FLASH_STATUS               0x14
+-#define       NAND_BUFFER_STATUS              0x18
+-#define       NAND_DEV0_CFG0                  0x20
+-#define       NAND_DEV0_CFG1                  0x24
+-#define       NAND_DEV0_ECC_CFG               0x28
+-#define       NAND_AUTO_STATUS_EN             0x2c
+-#define       NAND_DEV1_CFG0                  0x30
+-#define       NAND_DEV1_CFG1                  0x34
+-#define       NAND_READ_ID                    0x40
+-#define       NAND_READ_STATUS                0x44
+-#define       NAND_DEV_CMD0                   0xa0
+-#define       NAND_DEV_CMD1                   0xa4
+-#define       NAND_DEV_CMD2                   0xa8
+-#define       NAND_DEV_CMD_VLD                0xac
+-#define       SFLASHC_BURST_CFG               0xe0
+-#define       NAND_ERASED_CW_DETECT_CFG       0xe8
+-#define       NAND_ERASED_CW_DETECT_STATUS    0xec
+-#define       NAND_EBI2_ECC_BUF_CFG           0xf0
+-#define       FLASH_BUF_ACC                   0x100
+-
+-#define       NAND_CTRL                       0xf00
+-#define       NAND_VERSION                    0xf08
+-#define       NAND_READ_LOCATION_0            0xf20
+-#define       NAND_READ_LOCATION_1            0xf24
+-#define       NAND_READ_LOCATION_2            0xf28
+-#define       NAND_READ_LOCATION_3            0xf2c
+-#define       NAND_READ_LOCATION_LAST_CW_0    0xf40
+-#define       NAND_READ_LOCATION_LAST_CW_1    0xf44
+-#define       NAND_READ_LOCATION_LAST_CW_2    0xf48
+-#define       NAND_READ_LOCATION_LAST_CW_3    0xf4c
+-
+-/* dummy register offsets, used by qcom_write_reg_dma */
+-#define       NAND_DEV_CMD1_RESTORE           0xdead
+-#define       NAND_DEV_CMD_VLD_RESTORE        0xbeef
+-
+-/* NAND_FLASH_CMD bits */
+-#define       PAGE_ACC                        BIT(4)
+-#define       LAST_PAGE                       BIT(5)
+-
+-/* NAND_FLASH_CHIP_SELECT bits */
+-#define       NAND_DEV_SEL                    0
+-#define       DM_EN                           BIT(2)
+-
+-/* NAND_FLASH_STATUS bits */
+-#define       FS_OP_ERR                       BIT(4)
+-#define       FS_READY_BSY_N                  BIT(5)
+-#define       FS_MPU_ERR                      BIT(8)
+-#define       FS_DEVICE_STS_ERR               BIT(16)
+-#define       FS_DEVICE_WP                    BIT(23)
+-
+-/* NAND_BUFFER_STATUS bits */
+-#define       BS_UNCORRECTABLE_BIT            BIT(8)
+-#define       BS_CORRECTABLE_ERR_MSK          0x1f
+-
+-/* NAND_DEVn_CFG0 bits */
+-#define       DISABLE_STATUS_AFTER_WRITE      4
+-#define       CW_PER_PAGE                     6
+-#define       UD_SIZE_BYTES                   9
+-#define       UD_SIZE_BYTES_MASK              GENMASK(18, 9)
+-#define       ECC_PARITY_SIZE_BYTES_RS        19
+-#define       SPARE_SIZE_BYTES                23
+-#define       SPARE_SIZE_BYTES_MASK           GENMASK(26, 23)
+-#define       NUM_ADDR_CYCLES                 27
+-#define       STATUS_BFR_READ                 30
+-#define       SET_RD_MODE_AFTER_STATUS        31
+-
+-/* NAND_DEVn_CFG0 bits */
+-#define       DEV0_CFG1_ECC_DISABLE           0
+-#define       WIDE_FLASH                      1
+-#define       NAND_RECOVERY_CYCLES            2
+-#define       CS_ACTIVE_BSY                   5
+-#define       BAD_BLOCK_BYTE_NUM              6
+-#define       BAD_BLOCK_IN_SPARE_AREA         16
+-#define       WR_RD_BSY_GAP                   17
+-#define       ENABLE_BCH_ECC                  27
+-
+-/* NAND_DEV0_ECC_CFG bits */
+-#define       ECC_CFG_ECC_DISABLE             0
+-#define       ECC_SW_RESET                    1
+-#define       ECC_MODE                        4
+-#define       ECC_PARITY_SIZE_BYTES_BCH       8
+-#define       ECC_NUM_DATA_BYTES              16
+-#define       ECC_NUM_DATA_BYTES_MASK         GENMASK(25, 16)
+-#define       ECC_FORCE_CLK_OPEN              30
+-
+-/* NAND_DEV_CMD1 bits */
+-#define       READ_ADDR                       0
+-
+-/* NAND_DEV_CMD_VLD bits */
+-#define       READ_START_VLD                  BIT(0)
+-#define       READ_STOP_VLD                   BIT(1)
+-#define       WRITE_START_VLD                 BIT(2)
+-#define       ERASE_START_VLD                 BIT(3)
+-#define       SEQ_READ_START_VLD              BIT(4)
+-
+-/* NAND_EBI2_ECC_BUF_CFG bits */
+-#define       NUM_STEPS                       0
+-
+-/* NAND_ERASED_CW_DETECT_CFG bits */
+-#define       ERASED_CW_ECC_MASK              1
+-#define       AUTO_DETECT_RES                 0
+-#define       MASK_ECC                        BIT(ERASED_CW_ECC_MASK)
+-#define       RESET_ERASED_DET                BIT(AUTO_DETECT_RES)
+-#define       ACTIVE_ERASED_DET               (0 << AUTO_DETECT_RES)
+-#define       CLR_ERASED_PAGE_DET             (RESET_ERASED_DET | MASK_ECC)
+-#define       SET_ERASED_PAGE_DET             (ACTIVE_ERASED_DET | MASK_ECC)
+-
+-/* NAND_ERASED_CW_DETECT_STATUS bits */
+-#define       PAGE_ALL_ERASED                 BIT(7)
+-#define       CODEWORD_ALL_ERASED             BIT(6)
+-#define       PAGE_ERASED                     BIT(5)
+-#define       CODEWORD_ERASED                 BIT(4)
+-#define       ERASED_PAGE                     (PAGE_ALL_ERASED | PAGE_ERASED)
+-#define       ERASED_CW                       (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
+-
+-/* NAND_READ_LOCATION_n bits */
+-#define READ_LOCATION_OFFSET          0
+-#define READ_LOCATION_SIZE            16
+-#define READ_LOCATION_LAST            31
+-
+-/* Version Mask */
+-#define       NAND_VERSION_MAJOR_MASK         0xf0000000
+-#define       NAND_VERSION_MAJOR_SHIFT        28
+-#define       NAND_VERSION_MINOR_MASK         0x0fff0000
+-#define       NAND_VERSION_MINOR_SHIFT        16
+-
+-/* NAND OP_CMDs */
+-#define       OP_PAGE_READ                    0x2
+-#define       OP_PAGE_READ_WITH_ECC           0x3
+-#define       OP_PAGE_READ_WITH_ECC_SPARE     0x4
+-#define       OP_PAGE_READ_ONFI_READ          0x5
+-#define       OP_PROGRAM_PAGE                 0x6
+-#define       OP_PAGE_PROGRAM_WITH_ECC        0x7
+-#define       OP_PROGRAM_PAGE_SPARE           0x9
+-#define       OP_BLOCK_ERASE                  0xa
+-#define       OP_CHECK_STATUS                 0xc
+-#define       OP_FETCH_ID                     0xb
+-#define       OP_RESET_DEVICE                 0xd
+-
+-/* Default Value for NAND_DEV_CMD_VLD */
+-#define NAND_DEV_CMD_VLD_VAL          (READ_START_VLD | WRITE_START_VLD | \
+-                                       ERASE_START_VLD | SEQ_READ_START_VLD)
+-
+-/* NAND_CTRL bits */
+-#define       BAM_MODE_EN                     BIT(0)
+-
+-/*
+- * the NAND controller performs reads/writes with ECC in 516 byte chunks.
+- * the driver calls the chunks 'step' or 'codeword' interchangeably
+- */
+-#define       NANDC_STEP_SIZE                 512
+-
+-/*
+- * the largest page size we support is 8K, this will have 16 steps/codewords
+- * of 512 bytes each
+- */
+-#define       MAX_NUM_STEPS                   (SZ_8K / NANDC_STEP_SIZE)
+-
+-/* we read at most 3 registers per codeword scan */
+-#define       MAX_REG_RD                      (3 * MAX_NUM_STEPS)
+-
+-/* ECC modes supported by the controller */
+-#define       ECC_NONE        BIT(0)
+-#define       ECC_RS_4BIT     BIT(1)
+-#define       ECC_BCH_4BIT    BIT(2)
+-#define       ECC_BCH_8BIT    BIT(3)
+-
+-/*
+- * Returns the actual register address for all NAND_DEV_ registers
+- * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
+- */
+-#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
+-
+-/* Returns the NAND register physical address */
+-#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
+-
+-/* Returns the dma address for reg read buffer */
+-#define reg_buf_dma_addr(chip, vaddr) \
+-      ((chip)->reg_read_dma + \
+-      ((u8 *)(vaddr) - (u8 *)(chip)->reg_read_buf))
+-
+-#define QPIC_PER_CW_CMD_ELEMENTS      32
+-#define QPIC_PER_CW_CMD_SGL           32
+-#define QPIC_PER_CW_DATA_SGL          8
+-
+-#define QPIC_NAND_COMPLETION_TIMEOUT  msecs_to_jiffies(2000)
+-
+-/*
+- * Flags used in DMA descriptor preparation helper functions
+- * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma)
+- */
+-/* Don't set the EOT in current tx BAM sgl */
+-#define NAND_BAM_NO_EOT                       BIT(0)
+-/* Set the NWD flag in current BAM sgl */
+-#define NAND_BAM_NWD                  BIT(1)
+-/* Finish writing in the current BAM sgl and start writing in another BAM sgl */
+-#define NAND_BAM_NEXT_SGL             BIT(2)
+-/*
+- * Erased codeword status is being used two times in single transfer so this
+- * flag will determine the current value of erased codeword status register
+- */
+-#define NAND_ERASED_CW_SET            BIT(4)
+-
+-#define MAX_ADDRESS_CYCLE             5
+-
+-/*
+- * This data type corresponds to the BAM transaction which will be used for all
+- * NAND transfers.
+- * @bam_ce - the array of BAM command elements
+- * @cmd_sgl - sgl for NAND BAM command pipe
+- * @data_sgl - sgl for NAND BAM consumer/producer pipe
+- * @last_data_desc - last DMA desc in data channel (tx/rx).
+- * @last_cmd_desc - last DMA desc in command channel.
+- * @txn_done - completion for NAND transfer.
+- * @bam_ce_pos - the index in bam_ce which is available for next sgl
+- * @bam_ce_start - the index in bam_ce which marks the start position ce
+- *               for current sgl. It will be used for size calculation
+- *               for current sgl
+- * @cmd_sgl_pos - current index in command sgl.
+- * @cmd_sgl_start - start index in command sgl.
+- * @tx_sgl_pos - current index in data sgl for tx.
+- * @tx_sgl_start - start index in data sgl for tx.
+- * @rx_sgl_pos - current index in data sgl for rx.
+- * @rx_sgl_start - start index in data sgl for rx.
+- */
+-struct bam_transaction {
+-      struct bam_cmd_element *bam_ce;
+-      struct scatterlist *cmd_sgl;
+-      struct scatterlist *data_sgl;
+-      struct dma_async_tx_descriptor *last_data_desc;
+-      struct dma_async_tx_descriptor *last_cmd_desc;
+-      struct completion txn_done;
+-      u32 bam_ce_pos;
+-      u32 bam_ce_start;
+-      u32 cmd_sgl_pos;
+-      u32 cmd_sgl_start;
+-      u32 tx_sgl_pos;
+-      u32 tx_sgl_start;
+-      u32 rx_sgl_pos;
+-      u32 rx_sgl_start;
+-};
+-
+-/*
+- * This data type corresponds to the nand dma descriptor
+- * @dma_desc - low level DMA engine descriptor
+- * @list - list for desc_info
+- *
+- * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
+- *          ADM
+- * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
+- * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
+- * @dir - DMA transfer direction
+- */
+-struct desc_info {
+-      struct dma_async_tx_descriptor *dma_desc;
+-      struct list_head node;
+-
+-      union {
+-              struct scatterlist adm_sgl;
+-              struct {
+-                      struct scatterlist *bam_sgl;
+-                      int sgl_cnt;
+-              };
+-      };
+-      enum dma_data_direction dir;
+-};
+-
+-/*
+- * holds the current register values that we want to write. acts as a contiguous
+- * chunk of memory which we use to write the controller registers through DMA.
+- */
+-struct nandc_regs {
+-      __le32 cmd;
+-      __le32 addr0;
+-      __le32 addr1;
+-      __le32 chip_sel;
+-      __le32 exec;
+-
+-      __le32 cfg0;
+-      __le32 cfg1;
+-      __le32 ecc_bch_cfg;
+-
+-      __le32 clrflashstatus;
+-      __le32 clrreadstatus;
+-
+-      __le32 cmd1;
+-      __le32 vld;
+-
+-      __le32 orig_cmd1;
+-      __le32 orig_vld;
+-
+-      __le32 ecc_buf_cfg;
+-      __le32 read_location0;
+-      __le32 read_location1;
+-      __le32 read_location2;
+-      __le32 read_location3;
+-      __le32 read_location_last0;
+-      __le32 read_location_last1;
+-      __le32 read_location_last2;
+-      __le32 read_location_last3;
+-
+-      __le32 erased_cw_detect_cfg_clr;
+-      __le32 erased_cw_detect_cfg_set;
+-};
+-
+-/*
+- * NAND controller data struct
+- *
+- * @dev:                      parent device
+- *
+- * @base:                     MMIO base
+- *
+- * @core_clk:                 controller clock
+- * @aon_clk:                  another controller clock
+- *
+- * @regs:                     a contiguous chunk of memory for DMA register
+- *                            writes. contains the register values to be
+- *                            written to controller
+- *
+- * @props:                    properties of current NAND controller,
+- *                            initialized via DT match data
+- *
+- * @controller:                       base controller structure
+- * @host_list:                        list containing all the chips attached to the
+- *                            controller
+- *
+- * @chan:                     dma channel
+- * @cmd_crci:                 ADM DMA CRCI for command flow control
+- * @data_crci:                        ADM DMA CRCI for data flow control
+- *
+- * @desc_list:                        DMA descriptor list (list of desc_infos)
+- *
+- * @data_buffer:              our local DMA buffer for page read/writes,
+- *                            used when we can't use the buffer provided
+- *                            by upper layers directly
+- * @reg_read_buf:             local buffer for reading back registers via DMA
+- *
+- * @base_phys:                        physical base address of controller registers
+- * @base_dma:                 dma base address of controller registers
+- * @reg_read_dma:             contains dma address for register read buffer
+- *
+- * @buf_size/count/start:     markers for chip->legacy.read_buf/write_buf
+- *                            functions
+- * @max_cwperpage:            maximum QPIC codewords required. calculated
+- *                            from all connected NAND devices pagesize
+- *
+- * @reg_read_pos:             marker for data read in reg_read_buf
+- *
+- * @cmd1/vld:                 some fixed controller register values
+- *
+- * @exec_opwrite:             flag to select correct number of code word
+- *                            while reading status
+- */
+-struct qcom_nand_controller {
+-      struct device *dev;
+-
+-      void __iomem *base;
+-
+-      struct clk *core_clk;
+-      struct clk *aon_clk;
+-
+-      struct nandc_regs *regs;
+-      struct bam_transaction *bam_txn;
+-
+-      const struct qcom_nandc_props *props;
+-
+-      struct nand_controller controller;
+-      struct list_head host_list;
+-
+-      union {
+-              /* will be used only by QPIC for BAM DMA */
+-              struct {
+-                      struct dma_chan *tx_chan;
+-                      struct dma_chan *rx_chan;
+-                      struct dma_chan *cmd_chan;
+-              };
+-
+-              /* will be used only by EBI2 for ADM DMA */
+-              struct {
+-                      struct dma_chan *chan;
+-                      unsigned int cmd_crci;
+-                      unsigned int data_crci;
+-              };
+-      };
+-
+-      struct list_head desc_list;
+-
+-      u8              *data_buffer;
+-      __le32          *reg_read_buf;
+-
+-      phys_addr_t base_phys;
+-      dma_addr_t base_dma;
+-      dma_addr_t reg_read_dma;
+-
+-      int             buf_size;
+-      int             buf_count;
+-      int             buf_start;
+-      unsigned int    max_cwperpage;
+-
+-      int reg_read_pos;
+-
+-      u32 cmd1, vld;
+-      bool exec_opwrite;
+-};
++#include <linux/mtd/nand-qpic-common.h>
+ /*
+  * NAND special boot partitions
+@@ -530,104 +120,6 @@ struct qcom_nand_host {
+       bool bch_enabled;
+ };
+-/*
+- * This data type corresponds to the NAND controller properties which varies
+- * among different NAND controllers.
+- * @ecc_modes - ecc mode for NAND
+- * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
+- * @supports_bam - whether NAND controller is using BAM
+- * @nandc_part_of_qpic - whether NAND controller is part of qpic IP
+- * @qpic_version2 - flag to indicate QPIC IP version 2
+- * @use_codeword_fixup - whether NAND has different layout for boot partitions
+- */
+-struct qcom_nandc_props {
+-      u32 ecc_modes;
+-      u32 dev_cmd_reg_start;
+-      bool supports_bam;
+-      bool nandc_part_of_qpic;
+-      bool qpic_version2;
+-      bool use_codeword_fixup;
+-};
+-
+-/* Frees the BAM transaction memory */
+-static void qcom_free_bam_transaction(struct qcom_nand_controller *nandc)
+-{
+-      struct bam_transaction *bam_txn = nandc->bam_txn;
+-
+-      devm_kfree(nandc->dev, bam_txn);
+-}
+-
+-/* Allocates and Initializes the BAM transaction */
+-static struct bam_transaction *
+-qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc)
+-{
+-      struct bam_transaction *bam_txn;
+-      size_t bam_txn_size;
+-      unsigned int num_cw = nandc->max_cwperpage;
+-      void *bam_txn_buf;
+-
+-      bam_txn_size =
+-              sizeof(*bam_txn) + num_cw *
+-              ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
+-              (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
+-              (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
+-
+-      bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
+-      if (!bam_txn_buf)
+-              return NULL;
+-
+-      bam_txn = bam_txn_buf;
+-      bam_txn_buf += sizeof(*bam_txn);
+-
+-      bam_txn->bam_ce = bam_txn_buf;
+-      bam_txn_buf +=
+-              sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
+-
+-      bam_txn->cmd_sgl = bam_txn_buf;
+-      bam_txn_buf +=
+-              sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
+-
+-      bam_txn->data_sgl = bam_txn_buf;
+-
+-      init_completion(&bam_txn->txn_done);
+-
+-      return bam_txn;
+-}
+-
+-/* Clears the BAM transaction indexes */
+-static void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
+-{
+-      struct bam_transaction *bam_txn = nandc->bam_txn;
+-
+-      if (!nandc->props->supports_bam)
+-              return;
+-
+-      bam_txn->bam_ce_pos = 0;
+-      bam_txn->bam_ce_start = 0;
+-      bam_txn->cmd_sgl_pos = 0;
+-      bam_txn->cmd_sgl_start = 0;
+-      bam_txn->tx_sgl_pos = 0;
+-      bam_txn->tx_sgl_start = 0;
+-      bam_txn->rx_sgl_pos = 0;
+-      bam_txn->rx_sgl_start = 0;
+-      bam_txn->last_data_desc = NULL;
+-
+-      sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
+-                    QPIC_PER_CW_CMD_SGL);
+-      sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
+-                    QPIC_PER_CW_DATA_SGL);
+-
+-      reinit_completion(&bam_txn->txn_done);
+-}
+-
+-/* Callback for DMA descriptor completion */
+-static void qcom_qpic_bam_dma_done(void *data)
+-{
+-      struct bam_transaction *bam_txn = data;
+-
+-      complete(&bam_txn->txn_done);
+-}
+-
+ static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
+ {
+       return container_of(chip, struct qcom_nand_host, chip);
+@@ -629,8 +128,8 @@ static inline struct qcom_nand_host *to_
+ static inline struct qcom_nand_controller *
+ get_qcom_nand_controller(struct nand_chip *chip)
+ {
+-      return container_of(chip->controller, struct qcom_nand_controller,
+-                          controller);
++      return (struct qcom_nand_controller *)
++              ((u8 *)chip->controller - sizeof(struct qcom_nand_controller));
+ }
+ static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
+@@ -644,23 +143,6 @@ static inline void nandc_write(struct qc
+       iowrite32(val, nandc->base + offset);
+ }
+-static inline void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
+-{
+-      if (!nandc->props->supports_bam)
+-              return;
+-
+-      if (is_cpu)
+-              dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
+-                                      MAX_REG_RD *
+-                                      sizeof(*nandc->reg_read_buf),
+-                                      DMA_FROM_DEVICE);
+-      else
+-              dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
+-                                         MAX_REG_RD *
+-                                         sizeof(*nandc->reg_read_buf),
+-                                         DMA_FROM_DEVICE);
+-}
+-
+ /* Helper to check the code word, whether it is last cw or not */
+ static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw)
+ {
+@@ -820,356 +302,6 @@ static void update_rw_regs(struct qcom_n
+ }
+ /*
+- * Maps the scatter gather list for DMA transfer and forms the DMA descriptor
+- * for BAM. This descriptor will be added in the NAND DMA descriptor queue
+- * which will be submitted to DMA engine.
+- */
+-static int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
+-                                     struct dma_chan *chan,
+-                                     unsigned long flags)
+-{
+-      struct desc_info *desc;
+-      struct scatterlist *sgl;
+-      unsigned int sgl_cnt;
+-      int ret;
+-      struct bam_transaction *bam_txn = nandc->bam_txn;
+-      enum dma_transfer_direction dir_eng;
+-      struct dma_async_tx_descriptor *dma_desc;
+-
+-      desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+-      if (!desc)
+-              return -ENOMEM;
+-
+-      if (chan == nandc->cmd_chan) {
+-              sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
+-              sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
+-              bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
+-              dir_eng = DMA_MEM_TO_DEV;
+-              desc->dir = DMA_TO_DEVICE;
+-      } else if (chan == nandc->tx_chan) {
+-              sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
+-              sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
+-              bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
+-              dir_eng = DMA_MEM_TO_DEV;
+-              desc->dir = DMA_TO_DEVICE;
+-      } else {
+-              sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
+-              sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
+-              bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
+-              dir_eng = DMA_DEV_TO_MEM;
+-              desc->dir = DMA_FROM_DEVICE;
+-      }
+-
+-      sg_mark_end(sgl + sgl_cnt - 1);
+-      ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
+-      if (ret == 0) {
+-              dev_err(nandc->dev, "failure in mapping desc\n");
+-              kfree(desc);
+-              return -ENOMEM;
+-      }
+-
+-      desc->sgl_cnt = sgl_cnt;
+-      desc->bam_sgl = sgl;
+-
+-      dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
+-                                         flags);
+-
+-      if (!dma_desc) {
+-              dev_err(nandc->dev, "failure in prep desc\n");
+-              dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
+-              kfree(desc);
+-              return -EINVAL;
+-      }
+-
+-      desc->dma_desc = dma_desc;
+-
+-      /* update last data/command descriptor */
+-      if (chan == nandc->cmd_chan)
+-              bam_txn->last_cmd_desc = dma_desc;
+-      else
+-              bam_txn->last_data_desc = dma_desc;
+-
+-      list_add_tail(&desc->node, &nandc->desc_list);
+-
+-      return 0;
+-}
+-
+-/*
+- * Prepares the command descriptor for BAM DMA which will be used for NAND
+- * register reads and writes. The command descriptor requires the command
+- * to be formed in command element type so this function uses the command
+- * element from bam transaction ce array and fills the same with required
+- * data. A single SGL can contain multiple command elements so
+- * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
+- * after the current command element.
+- */
+-static int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
+-                                    int reg_off, const void *vaddr,
+-                                    int size, unsigned int flags)
+-{
+-      int bam_ce_size;
+-      int i, ret;
+-      struct bam_cmd_element *bam_ce_buffer;
+-      struct bam_transaction *bam_txn = nandc->bam_txn;
+-
+-      bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
+-
+-      /* fill the command desc */
+-      for (i = 0; i < size; i++) {
+-              if (read)
+-                      bam_prep_ce(&bam_ce_buffer[i],
+-                                  nandc_reg_phys(nandc, reg_off + 4 * i),
+-                                  BAM_READ_COMMAND,
+-                                  reg_buf_dma_addr(nandc,
+-                                                   (__le32 *)vaddr + i));
+-              else
+-                      bam_prep_ce_le32(&bam_ce_buffer[i],
+-                                       nandc_reg_phys(nandc, reg_off + 4 * i),
+-                                       BAM_WRITE_COMMAND,
+-                                       *((__le32 *)vaddr + i));
+-      }
+-
+-      bam_txn->bam_ce_pos += size;
+-
+-      /* use the separate sgl after this command */
+-      if (flags & NAND_BAM_NEXT_SGL) {
+-              bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
+-              bam_ce_size = (bam_txn->bam_ce_pos -
+-                              bam_txn->bam_ce_start) *
+-                              sizeof(struct bam_cmd_element);
+-              sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
+-                         bam_ce_buffer, bam_ce_size);
+-              bam_txn->cmd_sgl_pos++;
+-              bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
+-
+-              if (flags & NAND_BAM_NWD) {
+-                      ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
+-                                                        DMA_PREP_FENCE |
+-                                                        DMA_PREP_CMD);
+-                      if (ret)
+-                              return ret;
+-              }
+-      }
+-
+-      return 0;
+-}
+-
+-/*
+- * Prepares the data descriptor for BAM DMA which will be used for NAND
+- * data reads and writes.
+- */
+-static int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
+-                                     const void *vaddr, int size, unsigned int flags)
+-{
+-      int ret;
+-      struct bam_transaction *bam_txn = nandc->bam_txn;
+-
+-      if (read) {
+-              sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
+-                         vaddr, size);
+-              bam_txn->rx_sgl_pos++;
+-      } else {
+-              sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
+-                         vaddr, size);
+-              bam_txn->tx_sgl_pos++;
+-
+-              /*
+-               * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
+-               * is not set, form the DMA descriptor
+-               */
+-              if (!(flags & NAND_BAM_NO_EOT)) {
+-                      ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
+-                                                        DMA_PREP_INTERRUPT);
+-                      if (ret)
+-                              return ret;
+-              }
+-      }
+-
+-      return 0;
+-}
+-
+-static int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
+-                                int reg_off, const void *vaddr, int size,
+-                                bool flow_control)
+-{
+-      struct desc_info *desc;
+-      struct dma_async_tx_descriptor *dma_desc;
+-      struct scatterlist *sgl;
+-      struct dma_slave_config slave_conf;
+-      struct qcom_adm_peripheral_config periph_conf = {};
+-      enum dma_transfer_direction dir_eng;
+-      int ret;
+-
+-      desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+-      if (!desc)
+-              return -ENOMEM;
+-
+-      sgl = &desc->adm_sgl;
+-
+-      sg_init_one(sgl, vaddr, size);
+-
+-      if (read) {
+-              dir_eng = DMA_DEV_TO_MEM;
+-              desc->dir = DMA_FROM_DEVICE;
+-      } else {
+-              dir_eng = DMA_MEM_TO_DEV;
+-              desc->dir = DMA_TO_DEVICE;
+-      }
+-
+-      ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
+-      if (ret == 0) {
+-              ret = -ENOMEM;
+-              goto err;
+-      }
+-
+-      memset(&slave_conf, 0x00, sizeof(slave_conf));
+-
+-      slave_conf.device_fc = flow_control;
+-      if (read) {
+-              slave_conf.src_maxburst = 16;
+-              slave_conf.src_addr = nandc->base_dma + reg_off;
+-              if (nandc->data_crci) {
+-                      periph_conf.crci = nandc->data_crci;
+-                      slave_conf.peripheral_config = &periph_conf;
+-                      slave_conf.peripheral_size = sizeof(periph_conf);
+-              }
+-      } else {
+-              slave_conf.dst_maxburst = 16;
+-              slave_conf.dst_addr = nandc->base_dma + reg_off;
+-              if (nandc->cmd_crci) {
+-                      periph_conf.crci = nandc->cmd_crci;
+-                      slave_conf.peripheral_config = &periph_conf;
+-                      slave_conf.peripheral_size = sizeof(periph_conf);
+-              }
+-      }
+-
+-      ret = dmaengine_slave_config(nandc->chan, &slave_conf);
+-      if (ret) {
+-              dev_err(nandc->dev, "failed to configure dma channel\n");
+-              goto err;
+-      }
+-
+-      dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
+-      if (!dma_desc) {
+-              dev_err(nandc->dev, "failed to prepare desc\n");
+-              ret = -EINVAL;
+-              goto err;
+-      }
+-
+-      desc->dma_desc = dma_desc;
+-
+-      list_add_tail(&desc->node, &nandc->desc_list);
+-
+-      return 0;
+-err:
+-      kfree(desc);
+-
+-      return ret;
+-}
+-
+-/*
+- * qcom_read_reg_dma: prepares a descriptor to read a given number of
+- *                    contiguous registers to the reg_read_buf pointer
+- *
+- * @first:            offset of the first register in the contiguous block
+- * @num_regs:         number of registers to read
+- * @flags:            flags to control DMA descriptor preparation
+- */
+-static int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first,
+-                           int num_regs, unsigned int flags)
+-{
+-      bool flow_control = false;
+-      void *vaddr;
+-
+-      vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
+-      nandc->reg_read_pos += num_regs;
+-
+-      if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
+-              first = dev_cmd_reg_addr(nandc, first);
+-
+-      if (nandc->props->supports_bam)
+-              return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
+-                                           num_regs, flags);
+-
+-      if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
+-              flow_control = true;
+-
+-      return qcom_prep_adm_dma_desc(nandc, true, first, vaddr,
+-                               num_regs * sizeof(u32), flow_control);
+-}
+-
+-/*
+- * qcom_write_reg_dma:        prepares a descriptor to write a given number of
+- *                    contiguous registers
+- *
+- * @vaddr:            contnigeous memory from where register value will
+- *                    be written
+- * @first:            offset of the first register in the contiguous block
+- * @num_regs:         number of registers to write
+- * @flags:            flags to control DMA descriptor preparation
+- */
+-static int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
+-                            int first, int num_regs, unsigned int flags)
+-{
+-      bool flow_control = false;
+-
+-      if (first == NAND_EXEC_CMD)
+-              flags |= NAND_BAM_NWD;
+-
+-      if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
+-              first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
+-
+-      if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
+-              first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
+-
+-      if (nandc->props->supports_bam)
+-              return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
+-                                           num_regs, flags);
+-
+-      if (first == NAND_FLASH_CMD)
+-              flow_control = true;
+-
+-      return qcom_prep_adm_dma_desc(nandc, false, first, vaddr,
+-                               num_regs * sizeof(u32), flow_control);
+-}
+-
+-/*
+- * qcom_read_data_dma:        prepares a DMA descriptor to transfer data from the
+- *                    controller's internal buffer to the buffer 'vaddr'
+- *
+- * @reg_off:          offset within the controller's data buffer
+- * @vaddr:            virtual address of the buffer we want to write to
+- * @size:             DMA transaction size in bytes
+- * @flags:            flags to control DMA descriptor preparation
+- */
+-static int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+-                            const u8 *vaddr, int size, unsigned int flags)
+-{
+-      if (nandc->props->supports_bam)
+-              return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
+-
+-      return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
+-}
+-
+-/*
+- * qcom_write_data_dma:       prepares a DMA descriptor to transfer data from
+- *                    'vaddr' to the controller's internal buffer
+- *
+- * @reg_off:          offset within the controller's data buffer
+- * @vaddr:            virtual address of the buffer we want to read from
+- * @size:             DMA transaction size in bytes
+- * @flags:            flags to control DMA descriptor preparation
+- */
+-static int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+-                             const u8 *vaddr, int size, unsigned int flags)
+-{
+-      if (nandc->props->supports_bam)
+-              return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
+-
+-      return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
+-}
+-
+-/*
+  * Helper to prepare DMA descriptors for configuring registers
+  * before reading a NAND page.
+  */
+@@ -1262,83 +394,6 @@ static void config_nand_cw_write(struct
+                          NAND_BAM_NEXT_SGL);
+ }
+-/* helpers to submit/free our list of dma descriptors */
+-static int qcom_submit_descs(struct qcom_nand_controller *nandc)
+-{
+-      struct desc_info *desc, *n;
+-      dma_cookie_t cookie = 0;
+-      struct bam_transaction *bam_txn = nandc->bam_txn;
+-      int ret = 0;
+-
+-      if (nandc->props->supports_bam) {
+-              if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
+-                      ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
+-                      if (ret)
+-                              goto err_unmap_free_desc;
+-              }
+-
+-              if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
+-                      ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
+-                                                        DMA_PREP_INTERRUPT);
+-                      if (ret)
+-                              goto err_unmap_free_desc;
+-              }
+-
+-              if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
+-                      ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
+-                                                        DMA_PREP_CMD);
+-                      if (ret)
+-                              goto err_unmap_free_desc;
+-              }
+-      }
+-
+-      list_for_each_entry(desc, &nandc->desc_list, node)
+-              cookie = dmaengine_submit(desc->dma_desc);
+-
+-      if (nandc->props->supports_bam) {
+-              bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done;
+-              bam_txn->last_cmd_desc->callback_param = bam_txn;
+-
+-              dma_async_issue_pending(nandc->tx_chan);
+-              dma_async_issue_pending(nandc->rx_chan);
+-              dma_async_issue_pending(nandc->cmd_chan);
+-
+-              if (!wait_for_completion_timeout(&bam_txn->txn_done,
+-                                               QPIC_NAND_COMPLETION_TIMEOUT))
+-                      ret = -ETIMEDOUT;
+-      } else {
+-              if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
+-                      ret = -ETIMEDOUT;
+-      }
+-
+-err_unmap_free_desc:
+-      /*
+-       * Unmap the dma sg_list and free the desc allocated by both
+-       * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions.
+-       */
+-      list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
+-              list_del(&desc->node);
+-
+-              if (nandc->props->supports_bam)
+-                      dma_unmap_sg(nandc->dev, desc->bam_sgl,
+-                                   desc->sgl_cnt, desc->dir);
+-              else
+-                      dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
+-                                   desc->dir);
+-
+-              kfree(desc);
+-      }
+-
+-      return ret;
+-}
+-
+-/* reset the register read buffer for next NAND operation */
+-static void qcom_clear_read_regs(struct qcom_nand_controller *nandc)
+-{
+-      nandc->reg_read_pos = 0;
+-      qcom_nandc_dev_to_mem(nandc, false);
+-}
+-
+ /*
+  * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
+  * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
+@@ -2967,141 +2022,14 @@ static const struct nand_controller_ops
+       .exec_op = qcom_nand_exec_op,
+ };
+-static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
+-{
+-      if (nandc->props->supports_bam) {
+-              if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
+-                      dma_unmap_single(nandc->dev, nandc->reg_read_dma,
+-                                       MAX_REG_RD *
+-                                       sizeof(*nandc->reg_read_buf),
+-                                       DMA_FROM_DEVICE);
+-
+-              if (nandc->tx_chan)
+-                      dma_release_channel(nandc->tx_chan);
+-
+-              if (nandc->rx_chan)
+-                      dma_release_channel(nandc->rx_chan);
+-
+-              if (nandc->cmd_chan)
+-                      dma_release_channel(nandc->cmd_chan);
+-      } else {
+-              if (nandc->chan)
+-                      dma_release_channel(nandc->chan);
+-      }
+-}
+-
+-static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
+-{
+-      int ret;
+-
+-      ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
+-      if (ret) {
+-              dev_err(nandc->dev, "failed to set DMA mask\n");
+-              return ret;
+-      }
+-
+-      /*
+-       * we use the internal buffer for reading ONFI params, reading small
+-       * data like ID and status, and preforming read-copy-write operations
+-       * when writing to a codeword partially. 532 is the maximum possible
+-       * size of a codeword for our nand controller
+-       */
+-      nandc->buf_size = 532;
+-
+-      nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL);
+-      if (!nandc->data_buffer)
+-              return -ENOMEM;
+-
+-      nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL);
+-      if (!nandc->regs)
+-              return -ENOMEM;
+-
+-      nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD,
+-                                         sizeof(*nandc->reg_read_buf),
+-                                         GFP_KERNEL);
+-      if (!nandc->reg_read_buf)
+-              return -ENOMEM;
+-
+-      if (nandc->props->supports_bam) {
+-              nandc->reg_read_dma =
+-                      dma_map_single(nandc->dev, nandc->reg_read_buf,
+-                                     MAX_REG_RD *
+-                                     sizeof(*nandc->reg_read_buf),
+-                                     DMA_FROM_DEVICE);
+-              if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
+-                      dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
+-                      return -EIO;
+-              }
+-
+-              nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
+-              if (IS_ERR(nandc->tx_chan)) {
+-                      ret = PTR_ERR(nandc->tx_chan);
+-                      nandc->tx_chan = NULL;
+-                      dev_err_probe(nandc->dev, ret,
+-                                    "tx DMA channel request failed\n");
+-                      goto unalloc;
+-              }
+-
+-              nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
+-              if (IS_ERR(nandc->rx_chan)) {
+-                      ret = PTR_ERR(nandc->rx_chan);
+-                      nandc->rx_chan = NULL;
+-                      dev_err_probe(nandc->dev, ret,
+-                                    "rx DMA channel request failed\n");
+-                      goto unalloc;
+-              }
+-
+-              nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
+-              if (IS_ERR(nandc->cmd_chan)) {
+-                      ret = PTR_ERR(nandc->cmd_chan);
+-                      nandc->cmd_chan = NULL;
+-                      dev_err_probe(nandc->dev, ret,
+-                                    "cmd DMA channel request failed\n");
+-                      goto unalloc;
+-              }
+-
+-              /*
+-               * Initially allocate BAM transaction to read ONFI param page.
+-               * After detecting all the devices, this BAM transaction will
+-               * be freed and the next BAM transaction will be allocated with
+-               * maximum codeword size
+-               */
+-              nandc->max_cwperpage = 1;
+-              nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
+-              if (!nandc->bam_txn) {
+-                      dev_err(nandc->dev,
+-                              "failed to allocate bam transaction\n");
+-                      ret = -ENOMEM;
+-                      goto unalloc;
+-              }
+-      } else {
+-              nandc->chan = dma_request_chan(nandc->dev, "rxtx");
+-              if (IS_ERR(nandc->chan)) {
+-                      ret = PTR_ERR(nandc->chan);
+-                      nandc->chan = NULL;
+-                      dev_err_probe(nandc->dev, ret,
+-                                    "rxtx DMA channel request failed\n");
+-                      return ret;
+-              }
+-      }
+-
+-      INIT_LIST_HEAD(&nandc->desc_list);
+-      INIT_LIST_HEAD(&nandc->host_list);
+-
+-      nand_controller_init(&nandc->controller);
+-      nandc->controller.ops = &qcom_nandc_ops;
+-
+-      return 0;
+-unalloc:
+-      qcom_nandc_unalloc(nandc);
+-      return ret;
+-}
+-
+ /* one time setup of a few nand controller registers */
+ static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
+ {
+       u32 nand_ctrl;
++      nand_controller_init(nandc->controller);
++      nandc->controller->ops = &qcom_nandc_ops;
++
+       /* kill onenand */
+       if (!nandc->props->nandc_part_of_qpic)
+               nandc_write(nandc, SFLASHC_BURST_CFG, 0);
+@@ -3240,7 +2168,7 @@ static int qcom_nand_host_init_and_regis
+       chip->legacy.block_bad          = qcom_nandc_block_bad;
+       chip->legacy.block_markbad      = qcom_nandc_block_markbad;
+-      chip->controller = &nandc->controller;
++      chip->controller = nandc->controller;
+       chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA |
+                        NAND_SKIP_BBTSCAN;
+@@ -3323,17 +2251,21 @@ static int qcom_nandc_parse_dt(struct pl
+ static int qcom_nandc_probe(struct platform_device *pdev)
+ {
+       struct qcom_nand_controller *nandc;
++      struct nand_controller *controller;
+       const void *dev_data;
+       struct device *dev = &pdev->dev;
+       struct resource *res;
+       int ret;
+-      nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
++      nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc) + sizeof(*controller),
++                           GFP_KERNEL);
+       if (!nandc)
+               return -ENOMEM;
++      controller = (struct nand_controller *)&nandc[1];
+       platform_set_drvdata(pdev, nandc);
+       nandc->dev = dev;
++      nandc->controller = controller;
+       dev_data = of_device_get_match_data(dev);
+       if (!dev_data) {
+--- /dev/null
++++ b/include/linux/mtd/nand-qpic-common.h
+@@ -0,0 +1,468 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * QCOM QPIC common APIs header file
++ *
++ * Copyright (c) 2023 Qualcomm Inc.
++ * Authors:   Md sadre Alam   <quic_mdalam@quicinc.com>
++ *
++ */
++#ifndef __MTD_NAND_QPIC_COMMON_H__
++#define __MTD_NAND_QPIC_COMMON_H__
++
++/* NANDc reg offsets */
++#define       NAND_FLASH_CMD                  0x00
++#define       NAND_ADDR0                      0x04
++#define       NAND_ADDR1                      0x08
++#define       NAND_FLASH_CHIP_SELECT          0x0c
++#define       NAND_EXEC_CMD                   0x10
++#define       NAND_FLASH_STATUS               0x14
++#define       NAND_BUFFER_STATUS              0x18
++#define       NAND_DEV0_CFG0                  0x20
++#define       NAND_DEV0_CFG1                  0x24
++#define       NAND_DEV0_ECC_CFG               0x28
++#define       NAND_AUTO_STATUS_EN             0x2c
++#define       NAND_DEV1_CFG0                  0x30
++#define       NAND_DEV1_CFG1                  0x34
++#define       NAND_READ_ID                    0x40
++#define       NAND_READ_STATUS                0x44
++#define       NAND_DEV_CMD0                   0xa0
++#define       NAND_DEV_CMD1                   0xa4
++#define       NAND_DEV_CMD2                   0xa8
++#define       NAND_DEV_CMD_VLD                0xac
++#define       SFLASHC_BURST_CFG               0xe0
++#define       NAND_ERASED_CW_DETECT_CFG       0xe8
++#define       NAND_ERASED_CW_DETECT_STATUS    0xec
++#define       NAND_EBI2_ECC_BUF_CFG           0xf0
++#define       FLASH_BUF_ACC                   0x100
++
++#define       NAND_CTRL                       0xf00
++#define       NAND_VERSION                    0xf08
++#define       NAND_READ_LOCATION_0            0xf20
++#define       NAND_READ_LOCATION_1            0xf24
++#define       NAND_READ_LOCATION_2            0xf28
++#define       NAND_READ_LOCATION_3            0xf2c
++#define       NAND_READ_LOCATION_LAST_CW_0    0xf40
++#define       NAND_READ_LOCATION_LAST_CW_1    0xf44
++#define       NAND_READ_LOCATION_LAST_CW_2    0xf48
++#define       NAND_READ_LOCATION_LAST_CW_3    0xf4c
++
++/* dummy register offsets, used by qcom_write_reg_dma */
++#define       NAND_DEV_CMD1_RESTORE           0xdead
++#define       NAND_DEV_CMD_VLD_RESTORE        0xbeef
++
++/* NAND_FLASH_CMD bits */
++#define       PAGE_ACC                        BIT(4)
++#define       LAST_PAGE                       BIT(5)
++
++/* NAND_FLASH_CHIP_SELECT bits */
++#define       NAND_DEV_SEL                    0
++#define       DM_EN                           BIT(2)
++
++/* NAND_FLASH_STATUS bits */
++#define       FS_OP_ERR                       BIT(4)
++#define       FS_READY_BSY_N                  BIT(5)
++#define       FS_MPU_ERR                      BIT(8)
++#define       FS_DEVICE_STS_ERR               BIT(16)
++#define       FS_DEVICE_WP                    BIT(23)
++
++/* NAND_BUFFER_STATUS bits */
++#define       BS_UNCORRECTABLE_BIT            BIT(8)
++#define       BS_CORRECTABLE_ERR_MSK          0x1f
++
++/* NAND_DEVn_CFG0 bits */
++#define       DISABLE_STATUS_AFTER_WRITE      4
++#define       CW_PER_PAGE                     6
++#define       UD_SIZE_BYTES                   9
++#define       UD_SIZE_BYTES_MASK              GENMASK(18, 9)
++#define       ECC_PARITY_SIZE_BYTES_RS        19
++#define       SPARE_SIZE_BYTES                23
++#define       SPARE_SIZE_BYTES_MASK           GENMASK(26, 23)
++#define       NUM_ADDR_CYCLES                 27
++#define       STATUS_BFR_READ                 30
++#define       SET_RD_MODE_AFTER_STATUS        31
++
++/* NAND_DEVn_CFG0 bits */
++#define       DEV0_CFG1_ECC_DISABLE           0
++#define       WIDE_FLASH                      1
++#define       NAND_RECOVERY_CYCLES            2
++#define       CS_ACTIVE_BSY                   5
++#define       BAD_BLOCK_BYTE_NUM              6
++#define       BAD_BLOCK_IN_SPARE_AREA         16
++#define       WR_RD_BSY_GAP                   17
++#define       ENABLE_BCH_ECC                  27
++
++/* NAND_DEV0_ECC_CFG bits */
++#define       ECC_CFG_ECC_DISABLE             0
++#define       ECC_SW_RESET                    1
++#define       ECC_MODE                        4
++#define       ECC_PARITY_SIZE_BYTES_BCH       8
++#define       ECC_NUM_DATA_BYTES              16
++#define       ECC_NUM_DATA_BYTES_MASK         GENMASK(25, 16)
++#define       ECC_FORCE_CLK_OPEN              30
++
++/* NAND_DEV_CMD1 bits */
++#define       READ_ADDR                       0
++
++/* NAND_DEV_CMD_VLD bits */
++#define       READ_START_VLD                  BIT(0)
++#define       READ_STOP_VLD                   BIT(1)
++#define       WRITE_START_VLD                 BIT(2)
++#define       ERASE_START_VLD                 BIT(3)
++#define       SEQ_READ_START_VLD              BIT(4)
++
++/* NAND_EBI2_ECC_BUF_CFG bits */
++#define       NUM_STEPS                       0
++
++/* NAND_ERASED_CW_DETECT_CFG bits */
++#define       ERASED_CW_ECC_MASK              1
++#define       AUTO_DETECT_RES                 0
++#define       MASK_ECC                        BIT(ERASED_CW_ECC_MASK)
++#define       RESET_ERASED_DET                BIT(AUTO_DETECT_RES)
++#define       ACTIVE_ERASED_DET               (0 << AUTO_DETECT_RES)
++#define       CLR_ERASED_PAGE_DET             (RESET_ERASED_DET | MASK_ECC)
++#define       SET_ERASED_PAGE_DET             (ACTIVE_ERASED_DET | MASK_ECC)
++
++/* NAND_ERASED_CW_DETECT_STATUS bits */
++#define       PAGE_ALL_ERASED                 BIT(7)
++#define       CODEWORD_ALL_ERASED             BIT(6)
++#define       PAGE_ERASED                     BIT(5)
++#define       CODEWORD_ERASED                 BIT(4)
++#define       ERASED_PAGE                     (PAGE_ALL_ERASED | PAGE_ERASED)
++#define       ERASED_CW                       (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
++
++/* NAND_READ_LOCATION_n bits */
++#define READ_LOCATION_OFFSET          0
++#define READ_LOCATION_SIZE            16
++#define READ_LOCATION_LAST            31
++
++/* Version Mask */
++#define       NAND_VERSION_MAJOR_MASK         0xf0000000
++#define       NAND_VERSION_MAJOR_SHIFT        28
++#define       NAND_VERSION_MINOR_MASK         0x0fff0000
++#define       NAND_VERSION_MINOR_SHIFT        16
++
++/* NAND OP_CMDs */
++#define       OP_PAGE_READ                    0x2
++#define       OP_PAGE_READ_WITH_ECC           0x3
++#define       OP_PAGE_READ_WITH_ECC_SPARE     0x4
++#define       OP_PAGE_READ_ONFI_READ          0x5
++#define       OP_PROGRAM_PAGE                 0x6
++#define       OP_PAGE_PROGRAM_WITH_ECC        0x7
++#define       OP_PROGRAM_PAGE_SPARE           0x9
++#define       OP_BLOCK_ERASE                  0xa
++#define       OP_CHECK_STATUS                 0xc
++#define       OP_FETCH_ID                     0xb
++#define       OP_RESET_DEVICE                 0xd
++
++/* Default Value for NAND_DEV_CMD_VLD */
++#define NAND_DEV_CMD_VLD_VAL          (READ_START_VLD | WRITE_START_VLD | \
++                                       ERASE_START_VLD | SEQ_READ_START_VLD)
++
++/* NAND_CTRL bits */
++#define       BAM_MODE_EN                     BIT(0)
++
++/*
++ * the NAND controller performs reads/writes with ECC in 516 byte chunks.
++ * the driver calls the chunks 'step' or 'codeword' interchangeably
++ */
++#define       NANDC_STEP_SIZE                 512
++
++/*
++ * the largest page size we support is 8K, this will have 16 steps/codewords
++ * of 512 bytes each
++ */
++#define       MAX_NUM_STEPS                   (SZ_8K / NANDC_STEP_SIZE)
++
++/* we read at most 3 registers per codeword scan */
++#define       MAX_REG_RD                      (3 * MAX_NUM_STEPS)
++
++/* ECC modes supported by the controller */
++#define       ECC_NONE        BIT(0)
++#define       ECC_RS_4BIT     BIT(1)
++#define       ECC_BCH_4BIT    BIT(2)
++#define       ECC_BCH_8BIT    BIT(3)
++
++/*
++ * Returns the actual register address for all NAND_DEV_ registers
++ * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
++ */
++#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
++
++/* Returns the NAND register physical address */
++#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
++
++/* Returns the dma address for reg read buffer */
++#define reg_buf_dma_addr(chip, vaddr) \
++      ((chip)->reg_read_dma + \
++      ((u8 *)(vaddr) - (u8 *)(chip)->reg_read_buf))
++
++#define QPIC_PER_CW_CMD_ELEMENTS      32
++#define QPIC_PER_CW_CMD_SGL           32
++#define QPIC_PER_CW_DATA_SGL          8
++
++#define QPIC_NAND_COMPLETION_TIMEOUT  msecs_to_jiffies(2000)
++
++/*
++ * Flags used in DMA descriptor preparation helper functions
++ * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma)
++ */
++/* Don't set the EOT in current tx BAM sgl */
++#define NAND_BAM_NO_EOT                       BIT(0)
++/* Set the NWD flag in current BAM sgl */
++#define NAND_BAM_NWD                  BIT(1)
++/* Finish writing in the current BAM sgl and start writing in another BAM sgl */
++#define NAND_BAM_NEXT_SGL             BIT(2)
++/*
++ * Erased codeword status is being used two times in single transfer so this
++ * flag will determine the current value of erased codeword status register
++ */
++#define NAND_ERASED_CW_SET            BIT(4)
++
++#define MAX_ADDRESS_CYCLE             5
++
++/*
++ * This data type corresponds to the BAM transaction which will be used for all
++ * NAND transfers.
++ * @bam_ce - the array of BAM command elements
++ * @cmd_sgl - sgl for NAND BAM command pipe
++ * @data_sgl - sgl for NAND BAM consumer/producer pipe
++ * @last_data_desc - last DMA desc in data channel (tx/rx).
++ * @last_cmd_desc - last DMA desc in command channel.
++ * @txn_done - completion for NAND transfer.
++ * @bam_ce_pos - the index in bam_ce which is available for next sgl
++ * @bam_ce_start - the index in bam_ce which marks the start position ce
++ *               for current sgl. It will be used for size calculation
++ *               for current sgl
++ * @cmd_sgl_pos - current index in command sgl.
++ * @cmd_sgl_start - start index in command sgl.
++ * @tx_sgl_pos - current index in data sgl for tx.
++ * @tx_sgl_start - start index in data sgl for tx.
++ * @rx_sgl_pos - current index in data sgl for rx.
++ * @rx_sgl_start - start index in data sgl for rx.
++ */
++struct bam_transaction {
++      struct bam_cmd_element *bam_ce;
++      struct scatterlist *cmd_sgl;
++      struct scatterlist *data_sgl;
++      struct dma_async_tx_descriptor *last_data_desc;
++      struct dma_async_tx_descriptor *last_cmd_desc;
++      struct completion txn_done;
++      u32 bam_ce_pos;
++      u32 bam_ce_start;
++      u32 cmd_sgl_pos;
++      u32 cmd_sgl_start;
++      u32 tx_sgl_pos;
++      u32 tx_sgl_start;
++      u32 rx_sgl_pos;
++      u32 rx_sgl_start;
++};
++
++/*
++ * This data type corresponds to the nand dma descriptor
++ * @dma_desc - low level DMA engine descriptor
++ * @list - list for desc_info
++ *
++ * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
++ *          ADM
++ * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
++ * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
++ * @dir - DMA transfer direction
++ */
++struct desc_info {
++      struct dma_async_tx_descriptor *dma_desc;
++      struct list_head node;
++
++      union {
++              struct scatterlist adm_sgl;
++              struct {
++                      struct scatterlist *bam_sgl;
++                      int sgl_cnt;
++              };
++      };
++      enum dma_data_direction dir;
++};
++
++/*
++ * holds the current register values that we want to write. acts as a contiguous
++ * chunk of memory which we use to write the controller registers through DMA.
++ */
++struct nandc_regs {
++      __le32 cmd;
++      __le32 addr0;
++      __le32 addr1;
++      __le32 chip_sel;
++      __le32 exec;
++
++      __le32 cfg0;
++      __le32 cfg1;
++      __le32 ecc_bch_cfg;
++
++      __le32 clrflashstatus;
++      __le32 clrreadstatus;
++
++      __le32 cmd1;
++      __le32 vld;
++
++      __le32 orig_cmd1;
++      __le32 orig_vld;
++
++      __le32 ecc_buf_cfg;
++      __le32 read_location0;
++      __le32 read_location1;
++      __le32 read_location2;
++      __le32 read_location3;
++      __le32 read_location_last0;
++      __le32 read_location_last1;
++      __le32 read_location_last2;
++      __le32 read_location_last3;
++
++      __le32 erased_cw_detect_cfg_clr;
++      __le32 erased_cw_detect_cfg_set;
++};
++
++/*
++ * NAND controller data struct
++ *
++ * @dev:                      parent device
++ *
++ * @base:                     MMIO base
++ *
++ * @core_clk:                 controller clock
++ * @aon_clk:                  another controller clock
++ *
++ * @regs:                     a contiguous chunk of memory for DMA register
++ *                            writes. contains the register values to be
++ *                            written to controller
++ *
++ * @props:                    properties of current NAND controller,
++ *                            initialized via DT match data
++ *
++ * @controller:                       base controller structure
++ * @host_list:                        list containing all the chips attached to the
++ *                            controller
++ *
++ * @chan:                     dma channel
++ * @cmd_crci:                 ADM DMA CRCI for command flow control
++ * @data_crci:                        ADM DMA CRCI for data flow control
++ *
++ * @desc_list:                        DMA descriptor list (list of desc_infos)
++ *
++ * @data_buffer:              our local DMA buffer for page read/writes,
++ *                            used when we can't use the buffer provided
++ *                            by upper layers directly
++ * @reg_read_buf:             local buffer for reading back registers via DMA
++ *
++ * @base_phys:                        physical base address of controller registers
++ * @base_dma:                 dma base address of controller registers
++ * @reg_read_dma:             contains dma address for register read buffer
++ *
++ * @buf_size/count/start:     markers for chip->legacy.read_buf/write_buf
++ *                            functions
++ * @max_cwperpage:            maximum QPIC codewords required. calculated
++ *                            from all connected NAND devices pagesize
++ *
++ * @reg_read_pos:             marker for data read in reg_read_buf
++ *
++ * @cmd1/vld:                 some fixed controller register values
++ *
++ * @exec_opwrite:             flag to select correct number of code word
++ *                            while reading status
++ */
++struct qcom_nand_controller {
++      struct device *dev;
++
++      void __iomem *base;
++
++      struct clk *core_clk;
++      struct clk *aon_clk;
++
++      struct nandc_regs *regs;
++      struct bam_transaction *bam_txn;
++
++      const struct qcom_nandc_props *props;
++
++      struct nand_controller *controller;
++      struct list_head host_list;
++
++      union {
++              /* will be used only by QPIC for BAM DMA */
++              struct {
++                      struct dma_chan *tx_chan;
++                      struct dma_chan *rx_chan;
++                      struct dma_chan *cmd_chan;
++              };
++
++              /* will be used only by EBI2 for ADM DMA */
++              struct {
++                      struct dma_chan *chan;
++                      unsigned int cmd_crci;
++                      unsigned int data_crci;
++              };
++      };
++
++      struct list_head desc_list;
++
++      u8              *data_buffer;
++      __le32          *reg_read_buf;
++
++      phys_addr_t base_phys;
++      dma_addr_t base_dma;
++      dma_addr_t reg_read_dma;
++
++      int             buf_size;
++      int             buf_count;
++      int             buf_start;
++      unsigned int    max_cwperpage;
++
++      int reg_read_pos;
++
++      u32 cmd1, vld;
++      bool exec_opwrite;
++};
++
++/*
++ * This data type corresponds to the NAND controller properties which varies
++ * among different NAND controllers.
++ * @ecc_modes - ecc mode for NAND
++ * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
++ * @supports_bam - whether NAND controller is using BAM
++ * @nandc_part_of_qpic - whether NAND controller is part of qpic IP
++ * @qpic_version2 - flag to indicate QPIC IP version 2
++ * @use_codeword_fixup - whether NAND has different layout for boot partitions
++ */
++struct qcom_nandc_props {
++      u32 ecc_modes;
++      u32 dev_cmd_reg_start;
++      bool supports_bam;
++      bool nandc_part_of_qpic;
++      bool qpic_version2;
++      bool use_codeword_fixup;
++};
++
++void qcom_free_bam_transaction(struct qcom_nand_controller *nandc);
++struct bam_transaction *qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc);
++void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc);
++void qcom_qpic_bam_dma_done(void *data);
++void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu);
++int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
++                              struct dma_chan *chan, unsigned long flags);
++int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
++                             int reg_off, const void *vaddr, int size, unsigned int flags);
++int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
++                              const void *vaddr, int size, unsigned int flags);
++int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read, int reg_off,
++                         const void *vaddr, int size, bool flow_control);
++int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first, int num_regs,
++                    unsigned int flags);
++int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr, int first,
++                     int num_regs, unsigned int flags);
++int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off, const u8 *vaddr,
++                     int size, unsigned int flags);
++int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off, const u8 *vaddr,
++                      int size, unsigned int flags);
++int qcom_submit_descs(struct qcom_nand_controller *nandc);
++void qcom_clear_read_regs(struct qcom_nand_controller *nandc);
++void qcom_nandc_unalloc(struct qcom_nand_controller *nandc);
++int qcom_nandc_alloc(struct qcom_nand_controller *nandc);
++#endif
++
diff --git a/target/linux/qualcommbe/patches-6.6/100-05-mtd-rawnand-qcom-use-FIELD_PREP-and-GENMASK.patch b/target/linux/qualcommbe/patches-6.6/100-05-mtd-rawnand-qcom-use-FIELD_PREP-and-GENMASK.patch
new file mode 100644 (file)
index 0000000..5dddc3d
--- /dev/null
@@ -0,0 +1,240 @@
+From 9c5b6453db27706f090ab06987394aabaaf24e1b Mon Sep 17 00:00:00 2001
+From: Md Sadre Alam <quic_mdalam@quicinc.com>
+Date: Wed, 11 Sep 2024 12:50:42 +0530
+Subject: [PATCH v10 5/8] mtd: rawnand: qcom: use FIELD_PREP and GENMASK
+
+Use the bitfield macro FIELD_PREP, and GENMASK to
+do the shift and mask in one go. This makes the code
+more readable.
+
+Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
+---
+
+Change in [v10]
+
+* No change
+
+Change in [v9]
+
+* In update_rw_regs() api added cpu_to_le32() macro to fix compilation
+  issue reported by kernel test bot
+* In qcom_param_page_type_exec() api added cpu_to_le32() macro to fix
+  compilation issue reported by kernel test bot  
+
+Change in [v8]
+
+* No change
+
+Change in [v7]
+
+* No change
+
+Change in [v6]
+
+* Added FIELD_PREP() and GENMASK() macro
+
+Change in [v5]
+
+* This patch was not included in [v1]
+
+Change in [v4]
+
+* This patch was not included in [v4]
+
+Change in [v3]
+
+* This patch was not included in [v3]
+
+Change in [v2]
+
+* This patch was not included in [v2]
+
+Change in [v1]
+
+* This patch was not included in [v1]
+
+ drivers/mtd/nand/raw/qcom_nandc.c    | 97 ++++++++++++++--------------
+ include/linux/mtd/nand-qpic-common.h | 31 +++++----
+ 2 files changed, 67 insertions(+), 61 deletions(-)
+
+--- a/drivers/mtd/nand/raw/qcom_nandc.c
++++ b/drivers/mtd/nand/raw/qcom_nandc.c
+@@ -281,7 +281,7 @@ static void update_rw_regs(struct qcom_n
+                               (num_cw - 1) << CW_PER_PAGE);
+               cfg1 = cpu_to_le32(host->cfg1_raw);
+-              ecc_bch_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE);
++              ecc_bch_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);
+       }
+       nandc->regs->cmd = cmd;
+@@ -1494,42 +1494,41 @@ static int qcom_nand_attach_chip(struct
+       host->cw_size = host->cw_data + ecc->bytes;
+       bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
+-      host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
+-                              | host->cw_data << UD_SIZE_BYTES
+-                              | 0 << DISABLE_STATUS_AFTER_WRITE
+-                              | 5 << NUM_ADDR_CYCLES
+-                              | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
+-                              | 0 << STATUS_BFR_READ
+-                              | 1 << SET_RD_MODE_AFTER_STATUS
+-                              | host->spare_bytes << SPARE_SIZE_BYTES;
+-
+-      host->cfg1 = 7 << NAND_RECOVERY_CYCLES
+-                              | 0 <<  CS_ACTIVE_BSY
+-                              | bad_block_byte << BAD_BLOCK_BYTE_NUM
+-                              | 0 << BAD_BLOCK_IN_SPARE_AREA
+-                              | 2 << WR_RD_BSY_GAP
+-                              | wide_bus << WIDE_FLASH
+-                              | host->bch_enabled << ENABLE_BCH_ECC;
+-
+-      host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
+-                              | host->cw_size << UD_SIZE_BYTES
+-                              | 5 << NUM_ADDR_CYCLES
+-                              | 0 << SPARE_SIZE_BYTES;
+-
+-      host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
+-                              | 0 << CS_ACTIVE_BSY
+-                              | 17 << BAD_BLOCK_BYTE_NUM
+-                              | 1 << BAD_BLOCK_IN_SPARE_AREA
+-                              | 2 << WR_RD_BSY_GAP
+-                              | wide_bus << WIDE_FLASH
+-                              | 1 << DEV0_CFG1_ECC_DISABLE;
+-
+-      host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
+-                              | 0 << ECC_SW_RESET
+-                              | host->cw_data << ECC_NUM_DATA_BYTES
+-                              | 1 << ECC_FORCE_CLK_OPEN
+-                              | ecc_mode << ECC_MODE
+-                              | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
++      host->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
++                   FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_data) |
++                   FIELD_PREP(DISABLE_STATUS_AFTER_WRITE, 0) |
++                   FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
++                   FIELD_PREP(ECC_PARITY_SIZE_BYTES_RS, host->ecc_bytes_hw) |
++                   FIELD_PREP(STATUS_BFR_READ, 0) |
++                   FIELD_PREP(SET_RD_MODE_AFTER_STATUS, 1) |
++                   FIELD_PREP(SPARE_SIZE_BYTES_MASK, host->spare_bytes);
++
++      host->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
++                   FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, bad_block_byte) |
++                   FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 0) |
++                   FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
++                   FIELD_PREP(WIDE_FLASH, wide_bus) |
++                   FIELD_PREP(ENABLE_BCH_ECC, host->bch_enabled);
++
++      host->cfg0_raw = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
++                       FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_size) |
++                       FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
++                       FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
++
++      host->cfg1_raw = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
++                       FIELD_PREP(CS_ACTIVE_BSY, 0) |
++                       FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
++                       FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
++                       FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
++                       FIELD_PREP(WIDE_FLASH, wide_bus) |
++                       FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
++
++      host->ecc_bch_cfg = FIELD_PREP(ECC_CFG_ECC_DISABLE, !host->bch_enabled) |
++                          FIELD_PREP(ECC_SW_RESET, 0) |
++                          FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, host->cw_data) |
++                          FIELD_PREP(ECC_FORCE_CLK_OPEN, 1) |
++                          FIELD_PREP(ECC_MODE_MASK, ecc_mode) |
++                          FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, host->ecc_bytes_hw);
+       if (!nandc->props->qpic_version2)
+               host->ecc_buf_cfg = 0x203 << NUM_STEPS;
+@@ -1882,21 +1881,21 @@ static int qcom_param_page_type_exec(str
+       nandc->regs->addr0 = 0;
+       nandc->regs->addr1 = 0;
+-      nandc->regs->cfg0 = cpu_to_le32(0 << CW_PER_PAGE
+-                          | 512 << UD_SIZE_BYTES
+-                          | 5 << NUM_ADDR_CYCLES
+-                          | 0 << SPARE_SIZE_BYTES);
+-
+-      nandc->regs->cfg1 = cpu_to_le32(7 << NAND_RECOVERY_CYCLES
+-                          | 0 << CS_ACTIVE_BSY
+-                          | 17 << BAD_BLOCK_BYTE_NUM
+-                          | 1 << BAD_BLOCK_IN_SPARE_AREA
+-                          | 2 << WR_RD_BSY_GAP
+-                          | 0 << WIDE_FLASH
+-                          | 1 << DEV0_CFG1_ECC_DISABLE);
++      host->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, 0) |
++                   FIELD_PREP(UD_SIZE_BYTES_MASK, 512) |
++                   FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
++                   FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
++
++      host->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
++                   FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
++                   FIELD_PREP(CS_ACTIVE_BSY, 0) |
++                   FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
++                   FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
++                   FIELD_PREP(WIDE_FLASH, 0) |
++                   FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
+       if (!nandc->props->qpic_version2)
+-              nandc->regs->ecc_buf_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE);
++              nandc->regs->ecc_buf_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);
+       /* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
+       if (!nandc->props->qpic_version2) {
+--- a/include/linux/mtd/nand-qpic-common.h
++++ b/include/linux/mtd/nand-qpic-common.h
+@@ -70,35 +70,42 @@
+ #define       BS_CORRECTABLE_ERR_MSK          0x1f
+ /* NAND_DEVn_CFG0 bits */
+-#define       DISABLE_STATUS_AFTER_WRITE      4
++#define       DISABLE_STATUS_AFTER_WRITE      BIT(4)
+ #define       CW_PER_PAGE                     6
++#define       CW_PER_PAGE_MASK                GENMASK(8, 6)
+ #define       UD_SIZE_BYTES                   9
+ #define       UD_SIZE_BYTES_MASK              GENMASK(18, 9)
+-#define       ECC_PARITY_SIZE_BYTES_RS        19
++#define       ECC_PARITY_SIZE_BYTES_RS        GENMASK(22, 19)
+ #define       SPARE_SIZE_BYTES                23
+ #define       SPARE_SIZE_BYTES_MASK           GENMASK(26, 23)
+ #define       NUM_ADDR_CYCLES                 27
+-#define       STATUS_BFR_READ                 30
+-#define       SET_RD_MODE_AFTER_STATUS        31
++#define       NUM_ADDR_CYCLES_MASK            GENMASK(29, 27)
++#define       STATUS_BFR_READ                 BIT(30)
++#define       SET_RD_MODE_AFTER_STATUS        BIT(31)
+ /* NAND_DEVn_CFG0 bits */
+-#define       DEV0_CFG1_ECC_DISABLE           0
+-#define       WIDE_FLASH                      1
++#define       DEV0_CFG1_ECC_DISABLE           BIT(0)
++#define       WIDE_FLASH                      BIT(1)
+ #define       NAND_RECOVERY_CYCLES            2
+-#define       CS_ACTIVE_BSY                   5
++#define       NAND_RECOVERY_CYCLES_MASK       GENMASK(4, 2)
++#define       CS_ACTIVE_BSY                   BIT(5)
+ #define       BAD_BLOCK_BYTE_NUM              6
+-#define       BAD_BLOCK_IN_SPARE_AREA         16
++#define       BAD_BLOCK_BYTE_NUM_MASK         GENMASK(15, 6)
++#define       BAD_BLOCK_IN_SPARE_AREA         BIT(16)
+ #define       WR_RD_BSY_GAP                   17
+-#define       ENABLE_BCH_ECC                  27
++#define       WR_RD_BSY_GAP_MASK              GENMASK(22, 17)
++#define       ENABLE_BCH_ECC                  BIT(27)
+ /* NAND_DEV0_ECC_CFG bits */
+-#define       ECC_CFG_ECC_DISABLE             0
+-#define       ECC_SW_RESET                    1
++#define       ECC_CFG_ECC_DISABLE             BIT(0)
++#define       ECC_SW_RESET                    BIT(1)
+ #define       ECC_MODE                        4
++#define       ECC_MODE_MASK                   GENMASK(5, 4)
+ #define       ECC_PARITY_SIZE_BYTES_BCH       8
++#define       ECC_PARITY_SIZE_BYTES_BCH_MASK  GENMASK(12, 8)
+ #define       ECC_NUM_DATA_BYTES              16
+ #define       ECC_NUM_DATA_BYTES_MASK         GENMASK(25, 16)
+-#define       ECC_FORCE_CLK_OPEN              30
++#define       ECC_FORCE_CLK_OPEN              BIT(30)
+ /* NAND_DEV_CMD1 bits */
+ #define       READ_ADDR                       0
diff --git a/target/linux/qualcommbe/patches-6.6/100-06-spi-spi-qpic-add-driver-for-QCOM-SPI-NAND-flash-Inte.patch b/target/linux/qualcommbe/patches-6.6/100-06-spi-spi-qpic-add-driver-for-QCOM-SPI-NAND-flash-Inte.patch
new file mode 100644 (file)
index 0000000..72145db
--- /dev/null
@@ -0,0 +1,1893 @@
+From dc12953941ed3b8bc9eb8d47f8c7e74f54b47049 Mon Sep 17 00:00:00 2001
+From: Md Sadre Alam <quic_mdalam@quicinc.com>
+Date: Mon, 19 Aug 2024 11:05:18 +0530
+Subject: [PATCH v10 6/8] spi: spi-qpic: add driver for QCOM SPI NAND flash
+ Interface
+
+This driver implements support for the SPI-NAND mode of QCOM NAND Flash
+Interface as a SPI-MEM controller with pipelined ECC capability.
+
+Co-developed-by: Sricharan Ramabadhran <quic_srichara@quicinc.com>
+Signed-off-by: Sricharan Ramabadhran <quic_srichara@quicinc.com>
+Co-developed-by: Varadarajan Narayanan <quic_varada@quicinc.com>
+Signed-off-by: Varadarajan Narayanan <quic_varada@quicinc.com>
+Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
+---
+
+Change in [v10]
+
+* Fixed compilation warnings reported by kernel test robot.
+* Added depends on CONFIG_MTD
+* removed extra bracket from statement if (i == (num_cw - 1)) in 
+  qcom_spi_program_raw() api.
+
+Change in [v9]
+
+* Changed data type of addr1, addr2, cmd, to __le32 in qpic_spi_nand
+  structure
+* In qcom_spi_set_read_loc_first() api added cpu_to_le32() macro to fix
+  compilation warning
+* In qcom_spi_set_read_loc_last() api added cpu_to_le32() macro to fix
+  compilation warning
+* In qcom_spi_init() api added cpu_to_le32() macro to fix compilation
+  warning
+* In qcom_spi_ecc_init_ctx_pipelined() api removed unused variables
+  reqs, user, step_size, strength and added cpu_to_le32() macro as well
+  to fix compilation warning
+* In qcom_spi_read_last_cw() api added cpu_to_le32() macro to fix compilation
+  warning
+* In qcom_spi_check_error() api added cpu_to_le32() macro to fix compilation
+  warning
+* In qcom_spi_read_page_ecc() api added cpu_to_le32() macro to fix compilation
+  warning
+* In qcom_spi_read_page_oob() api added cpu_to_le32() macro to fix compilation
+  warning
+* In qcom_spi_program_raw() api added cpu_to_le32() macro to fix compilation
+  warning
+* In qcom_spi_program_ecc() api added cpu_to_le32() macro to fix compilation
+  warning
+* In qcom_spi_program_oob() api added cpu_to_le32() macro to fix compilation
+  warning
+* In qcom_spi_send_cmdaddr() api added cpu_to_le32() macro to fix compilation
+  warning
+* In qcom_spi_io_op() api added cpu_to_le32() macro to fix compilation
+  warning
+
+Change in [v8]
+
+* Included "bitfield.h" file to /spi-qpic-snand.c
+  to fix compilation warning reported by kernel test robot
+* Removed unused variable "steps" in 
+  qcom_spi_ecc_init_ctx_pipelined() to fix compilation warning
+
+Change in [v7]
+
+* Added read_oob() and write_oob() api
+
+* Handled offset value for oob layout
+
+* Made CONFIG_SPI_QPIC_SNAND as bool
+
+* Added macro ecceng_to_qspi()
+
+* Added FIELD_PREP() Macro in spi init
+
+* Added else condition in 
+  qcom_spi_ecc_finish_io_req_pipelined()
+  for corrected ecc
+
+* Handled multiple error condition for api
+  qcom_spi_cmd_mapping()
+
+* Fix typo for printing debug message
+  
+Change in [v6]
+
+* Added separate qpic_spi_nand{...} struct
+
+* moved qpic_ecc and qcom_ecc_stats struct to
+  spi-qpic-snand.c file, since its spi nand
+  specific
+
+* Added FIELD_PREP() and GENMASK() macro
+
+* Removed rawnand.h and partition.h from 
+  spi-qpic-snand.c
+
+* Removed oob_buff assignment form
+  qcom_spi_write_page_cache
+
+* Added qcom_nand_unalloc() in remove() path
+
+* Fixes all all comments
+
+Change in [v5]
+
+* Added raw_read() and raw_write() api
+
+* Updated commit message
+
+* Removed register indirection
+
+* Added qcom_spi_ prefix to all the api
+
+* Removed snand_set_reg() api.
+
+* Fixed nandbiterr issue
+
+* Removed hardcoded num_cw and made it variable
+
+* Removed hardcoded value for mtd pagesize
+
+* Added -ENOSUPPORT in cmd mapping for unsupported
+  commands
+
+* Replace if..else with switch..case statement
+
+Change in [v4]
+
+* No change
+
+Change in [v3]
+
+* Set SPI_QPIC_SNAND to n and added COMPILE_TEST in Kconfig
+
+* Made driver name sorted in Make file
+
+* Made comment like c++
+
+* Changed macro to functions, snandc_set_read_loc_last()
+  and snandc_set_read_loc_first()
+
+* Added error handling in snandc_set_reg()
+
+* Changed into normal conditional statement for
+  return snandc->ecc_stats.failed ? -EBADMSG :
+  snandc->ecc_stats.bitflips;
+
+* Remove cast of wbuf in qpic_snand_program_execute()
+  function
+
+* Made num_cw variable instead hardcoded value
+
+* changed if..else condition of function qpic_snand_io_op()
+  to switch..case statement
+
+* Added __devm_spi_alloc_controller() api instead of
+  devm_spi_alloc_master()
+
+* Disabling clock in remove path
+
+Change in [v2]
+
+* Added initial support for SPI-NAND driver
+
+Change in [v1]
+
+* Added RFC patch for design review
+
+ drivers/mtd/nand/Makefile            |    5 +-
+ drivers/spi/Kconfig                  |    9 +
+ drivers/spi/Makefile                 |    1 +
+ drivers/spi/spi-qpic-snand.c         | 1634 ++++++++++++++++++++++++++
+ include/linux/mtd/nand-qpic-common.h |    7 +
+ 5 files changed, 1655 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/spi/spi-qpic-snand.c
+
+--- a/drivers/mtd/nand/Makefile
++++ b/drivers/mtd/nand/Makefile
+@@ -7,8 +7,11 @@ obj-$(CONFIG_MTD_NAND_MTK_BMT)        += mtk_bm
+ ifeq ($(CONFIG_MTD_NAND_QCOM),y)
+ obj-y += qpic_common.o
++else
++ifeq ($(CONFIG_SPI_QPIC_SNAND),y)
++obj-y   += qpic_common.o
++endif
+ endif
+-
+ obj-y += onenand/
+ obj-y += raw/
+ obj-y += spi/
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -870,6 +870,15 @@ config SPI_QCOM_QSPI
+       help
+         QSPI(Quad SPI) driver for Qualcomm QSPI controller.
++config SPI_QPIC_SNAND
++      bool "QPIC SNAND controller"
++      depends on ARCH_QCOM || COMPILE_TEST
++      depends on MTD
++      help
++        QPIC_SNAND (QPIC SPI NAND) driver for Qualcomm QPIC controller.
++        QPIC controller supports both parallel nand and serial nand.
++        This config will enable serial nand driver for QPIC controller.
++
+ config SPI_QUP
+       tristate "Qualcomm SPI controller with QUP interface"
+       depends on ARCH_QCOM || COMPILE_TEST
+--- a/drivers/spi/Makefile
++++ b/drivers/spi/Makefile
+@@ -110,6 +110,7 @@ obj-$(CONFIG_SPI_PXA2XX)           += spi-pxa2xx-
+ obj-$(CONFIG_SPI_PXA2XX_PCI)          += spi-pxa2xx-pci.o
+ obj-$(CONFIG_SPI_QCOM_GENI)           += spi-geni-qcom.o
+ obj-$(CONFIG_SPI_QCOM_QSPI)           += spi-qcom-qspi.o
++obj-$(CONFIG_SPI_QPIC_SNAND)            += spi-qpic-snand.o
+ obj-$(CONFIG_SPI_QUP)                 += spi-qup.o
+ obj-$(CONFIG_SPI_ROCKCHIP)            += spi-rockchip.o
+ obj-$(CONFIG_SPI_ROCKCHIP_SFC)                += spi-rockchip-sfc.o
+--- /dev/null
++++ b/drivers/spi/spi-qpic-snand.c
+@@ -0,0 +1,1634 @@
++/*
++ * SPDX-License-Identifier: GPL-2.0
++ *
++ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
++ *
++ * Authors:
++ *    Md Sadre Alam <quic_mdalam@quicinc.com>
++ *    Sricharan R <quic_srichara@quicinc.com>
++ *    Varadarajan Narayanan <quic_varada@quicinc.com>
++ */
++#include <linux/bitops.h>
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/dmaengine.h>
++#include <linux/dma-mapping.h>
++#include <linux/dma/qcom_adm.h>
++#include <linux/dma/qcom_bam_dma.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/mtd/nand-qpic-common.h>
++#include <linux/mtd/spinand.h>
++#include <linux/bitfield.h>
++
++#define NAND_FLASH_SPI_CFG            0xc0
++#define NAND_NUM_ADDR_CYCLES          0xc4
++#define NAND_BUSY_CHECK_WAIT_CNT      0xc8
++#define NAND_FLASH_FEATURES           0xf64
++
++/* QSPI NAND config reg bits */
++#define LOAD_CLK_CNTR_INIT_EN         BIT(28)
++#define CLK_CNTR_INIT_VAL_VEC         0x924
++#define CLK_CNTR_INIT_VAL_VEC_MASK    GENMASK(27, 16)
++#define FEA_STATUS_DEV_ADDR           0xc0
++#define FEA_STATUS_DEV_ADDR_MASK      GENMASK(15, 8)
++#define SPI_CFG                               BIT(0)
++#define SPI_NUM_ADDR                  0xDA4DB
++#define SPI_WAIT_CNT                  0x10
++#define QPIC_QSPI_NUM_CS              1
++#define SPI_TRANSFER_MODE_x1          BIT(29)
++#define SPI_TRANSFER_MODE_x4          (3 << 29)
++#define SPI_WP                                BIT(28)
++#define SPI_HOLD                      BIT(27)
++#define QPIC_SET_FEATURE              BIT(31)
++
++#define SPINAND_RESET                 0xff
++#define SPINAND_READID                        0x9f
++#define SPINAND_GET_FEATURE           0x0f
++#define SPINAND_SET_FEATURE           0x1f
++#define SPINAND_READ                  0x13
++#define SPINAND_ERASE                 0xd8
++#define SPINAND_WRITE_EN              0x06
++#define SPINAND_PROGRAM_EXECUTE               0x10
++#define SPINAND_PROGRAM_LOAD          0x84
++
++#define ACC_FEATURE                   0xe
++#define BAD_BLOCK_MARKER_SIZE         0x2
++#define OOB_BUF_SIZE                  128
++#define ecceng_to_qspi(eng)           container_of(eng, struct qpic_spi_nand, ecc_eng)
++struct qpic_snand_op {
++      u32 cmd_reg;
++      u32 addr1_reg;
++      u32 addr2_reg;
++};
++
++struct snandc_read_status {
++      __le32 snandc_flash;
++      __le32 snandc_buffer;
++      __le32 snandc_erased_cw;
++};
++
++/*
++ * ECC state struct
++ * @corrected:                ECC corrected
++ * @bitflips:         Max bit flip
++ * @failed:           ECC failed
++ */
++struct qcom_ecc_stats {
++      u32 corrected;
++      u32 bitflips;
++      u32 failed;
++};
++
++struct qpic_ecc {
++      struct device *dev;
++      int ecc_bytes_hw;
++      int spare_bytes;
++      int bbm_size;
++      int ecc_mode;
++      int bytes;
++      int steps;
++      int step_size;
++      int strength;
++      int cw_size;
++      int cw_data;
++      u32 cfg0;
++      u32 cfg1;
++      u32 cfg0_raw;
++      u32 cfg1_raw;
++      u32 ecc_buf_cfg;
++      u32 ecc_bch_cfg;
++      u32 clrflashstatus;
++      u32 clrreadstatus;
++      bool bch_enabled;
++};
++
++struct qpic_spi_nand {
++      struct qcom_nand_controller *snandc;
++      struct spi_controller *ctlr;
++      struct mtd_info *mtd;
++      struct clk *iomacro_clk;
++      struct qpic_ecc *ecc;
++      struct qcom_ecc_stats ecc_stats;
++      struct nand_ecc_engine ecc_eng;
++      u8 *data_buf;
++      u8 *oob_buf;
++      u32 wlen;
++      __le32 addr1;
++      __le32 addr2;
++      __le32 cmd;
++      u32 num_cw;
++      bool oob_rw;
++      bool page_rw;
++      bool raw_rw;
++};
++
++static void qcom_spi_set_read_loc_first(struct qcom_nand_controller *snandc,
++                                      int reg, int cw_offset, int read_size,
++                                      int is_last_read_loc)
++{
++      __le32 locreg_val;
++      u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
++                ((read_size) << READ_LOCATION_SIZE) | ((is_last_read_loc)
++                << READ_LOCATION_LAST));
++
++      locreg_val = cpu_to_le32(val);
++
++      if (reg == NAND_READ_LOCATION_0)
++              snandc->regs->read_location0 = locreg_val;
++      else if (reg == NAND_READ_LOCATION_1)
++              snandc->regs->read_location1 = locreg_val;
++      else if (reg == NAND_READ_LOCATION_2)
++              snandc->regs->read_location1 = locreg_val;
++      else if (reg == NAND_READ_LOCATION_3)
++              snandc->regs->read_location3 = locreg_val;
++}
++
++static void qcom_spi_set_read_loc_last(struct qcom_nand_controller *snandc,
++                                     int reg, int cw_offset, int read_size,
++                                     int is_last_read_loc)
++{
++      __le32 locreg_val;
++      u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
++                ((read_size) << READ_LOCATION_SIZE) | ((is_last_read_loc)
++                << READ_LOCATION_LAST));
++
++      locreg_val = cpu_to_le32(val);
++
++      if (reg == NAND_READ_LOCATION_LAST_CW_0)
++              snandc->regs->read_location_last0 = locreg_val;
++      else if (reg == NAND_READ_LOCATION_LAST_CW_1)
++              snandc->regs->read_location_last1 = locreg_val;
++      else if (reg == NAND_READ_LOCATION_LAST_CW_2)
++              snandc->regs->read_location_last2 = locreg_val;
++      else if (reg == NAND_READ_LOCATION_LAST_CW_3)
++              snandc->regs->read_location_last3 = locreg_val;
++}
++
++static struct qcom_nand_controller *nand_to_qcom_snand(struct nand_device *nand)
++{
++      struct nand_ecc_engine *eng = nand->ecc.engine;
++      struct qpic_spi_nand *qspi = ecceng_to_qspi(eng);
++
++      return qspi->snandc;
++}
++
++static int qcom_spi_init(struct qcom_nand_controller *snandc)
++{
++      u32 snand_cfg_val = 0x0;
++      int ret;
++
++      snand_cfg_val = FIELD_PREP(CLK_CNTR_INIT_VAL_VEC_MASK, CLK_CNTR_INIT_VAL_VEC) |
++                      FIELD_PREP(LOAD_CLK_CNTR_INIT_EN, 0) |
++                      FIELD_PREP(FEA_STATUS_DEV_ADDR_MASK, FEA_STATUS_DEV_ADDR) |
++                      FIELD_PREP(SPI_CFG, 0);
++
++      snandc->regs->spi_cfg = cpu_to_le32(snand_cfg_val);
++      snandc->regs->num_addr_cycle = cpu_to_le32(SPI_NUM_ADDR);
++      snandc->regs->busy_wait_cnt = cpu_to_le32(SPI_WAIT_CNT);
++
++      qcom_write_reg_dma(snandc, &snandc->regs->spi_cfg, NAND_FLASH_SPI_CFG, 1, 0);
++
++      snand_cfg_val &= ~LOAD_CLK_CNTR_INIT_EN;
++      snandc->regs->spi_cfg = cpu_to_le32(snand_cfg_val);
++
++      qcom_write_reg_dma(snandc, &snandc->regs->spi_cfg, NAND_FLASH_SPI_CFG, 1, 0);
++
++      qcom_write_reg_dma(snandc, &snandc->regs->num_addr_cycle, NAND_NUM_ADDR_CYCLES, 1, 0);
++      qcom_write_reg_dma(snandc, &snandc->regs->busy_wait_cnt, NAND_BUSY_CHECK_WAIT_CNT, 1,
++                         NAND_BAM_NEXT_SGL);
++
++      ret = qcom_submit_descs(snandc);
++      if (ret) {
++              dev_err(snandc->dev, "failure in submitting spi init descriptor\n");
++              return ret;
++      }
++
++      return ret;
++}
++
++static int qcom_spi_ooblayout_ecc(struct mtd_info *mtd, int section,
++                                struct mtd_oob_region *oobregion)
++{
++      struct nand_device *nand = mtd_to_nanddev(mtd);
++      struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
++      struct qpic_ecc *qecc = snandc->qspi->ecc;
++
++      if (section > 1)
++              return -ERANGE;
++
++      oobregion->length = qecc->ecc_bytes_hw + qecc->spare_bytes;
++      oobregion->offset = mtd->oobsize - oobregion->length;
++
++      return 0;
++}
++
++static int qcom_spi_ooblayout_free(struct mtd_info *mtd, int section,
++                                 struct mtd_oob_region *oobregion)
++{
++      struct nand_device *nand = mtd_to_nanddev(mtd);
++      struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
++      struct qpic_ecc *qecc = snandc->qspi->ecc;
++
++      if (section)
++              return -ERANGE;
++
++      oobregion->length = qecc->steps * 4;
++      oobregion->offset = ((qecc->steps - 1) * qecc->bytes) + qecc->bbm_size;
++
++      return 0;
++}
++
++static const struct mtd_ooblayout_ops qcom_spi_ooblayout = {
++      .ecc = qcom_spi_ooblayout_ecc,
++      .free = qcom_spi_ooblayout_free,
++};
++
++static int qcom_spi_ecc_init_ctx_pipelined(struct nand_device *nand)
++{
++      struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
++      struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
++      struct mtd_info *mtd = nanddev_to_mtd(nand);
++      int cwperpage, bad_block_byte;
++      struct qpic_ecc *ecc_cfg;
++
++      cwperpage = mtd->writesize / NANDC_STEP_SIZE;
++      snandc->qspi->num_cw = cwperpage;
++
++      ecc_cfg = kzalloc(sizeof(*ecc_cfg), GFP_KERNEL);
++      if (!ecc_cfg)
++              return -ENOMEM;
++      snandc->qspi->oob_buf = kzalloc(mtd->writesize + mtd->oobsize,
++                                      GFP_KERNEL);
++      if (!snandc->qspi->oob_buf)
++              return -ENOMEM;
++
++      memset(snandc->qspi->oob_buf, 0xff, mtd->writesize + mtd->oobsize);
++
++      nand->ecc.ctx.priv = ecc_cfg;
++      snandc->qspi->mtd = mtd;
++
++      ecc_cfg->ecc_bytes_hw = 7;
++      ecc_cfg->spare_bytes = 4;
++      ecc_cfg->bbm_size = 1;
++      ecc_cfg->bch_enabled = true;
++      ecc_cfg->bytes = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes + ecc_cfg->bbm_size;
++
++      ecc_cfg->steps = 4;
++      ecc_cfg->strength = 4;
++      ecc_cfg->step_size = 512;
++      ecc_cfg->cw_data = 516;
++      ecc_cfg->cw_size = ecc_cfg->cw_data + ecc_cfg->bytes;
++      bad_block_byte = mtd->writesize - ecc_cfg->cw_size * (cwperpage - 1) + 1;
++
++      mtd_set_ooblayout(mtd, &qcom_spi_ooblayout);
++
++      ecc_cfg->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
++                      FIELD_PREP(UD_SIZE_BYTES_MASK, ecc_cfg->cw_data) |
++                      FIELD_PREP(DISABLE_STATUS_AFTER_WRITE, 1) |
++                      FIELD_PREP(NUM_ADDR_CYCLES_MASK, 3) |
++                      FIELD_PREP(ECC_PARITY_SIZE_BYTES_RS, ecc_cfg->ecc_bytes_hw) |
++                      FIELD_PREP(STATUS_BFR_READ, 0) |
++                      FIELD_PREP(SET_RD_MODE_AFTER_STATUS, 1) |
++                      FIELD_PREP(SPARE_SIZE_BYTES_MASK, ecc_cfg->spare_bytes);
++
++      ecc_cfg->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 0) |
++                      FIELD_PREP(CS_ACTIVE_BSY, 0) |
++                      FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, bad_block_byte) |
++                      FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 0) |
++                      FIELD_PREP(WR_RD_BSY_GAP_MASK, 20) |
++                      FIELD_PREP(WIDE_FLASH, 0) |
++                      FIELD_PREP(ENABLE_BCH_ECC, ecc_cfg->bch_enabled);
++
++      ecc_cfg->cfg0_raw = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
++                          FIELD_PREP(NUM_ADDR_CYCLES_MASK, 3) |
++                          FIELD_PREP(UD_SIZE_BYTES_MASK, ecc_cfg->cw_size) |
++                          FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
++
++      ecc_cfg->cfg1_raw = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 0) |
++                          FIELD_PREP(CS_ACTIVE_BSY, 0) |
++                          FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
++                          FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
++                          FIELD_PREP(WR_RD_BSY_GAP_MASK, 20) |
++                          FIELD_PREP(WIDE_FLASH, 0) |
++                          FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
++
++      ecc_cfg->ecc_bch_cfg = FIELD_PREP(ECC_CFG_ECC_DISABLE, !ecc_cfg->bch_enabled) |
++                             FIELD_PREP(ECC_SW_RESET, 0) |
++                             FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, ecc_cfg->cw_data) |
++                             FIELD_PREP(ECC_FORCE_CLK_OPEN, 1) |
++                             FIELD_PREP(ECC_MODE_MASK, 0) |
++                             FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, ecc_cfg->ecc_bytes_hw);
++
++      ecc_cfg->ecc_buf_cfg = 0x203 << NUM_STEPS;
++      ecc_cfg->clrflashstatus = FS_READY_BSY_N;
++      ecc_cfg->clrreadstatus = 0xc0;
++
++      conf->step_size = ecc_cfg->step_size;
++      conf->strength = ecc_cfg->strength;
++
++      snandc->regs->erased_cw_detect_cfg_clr = cpu_to_le32(CLR_ERASED_PAGE_DET);
++      snandc->regs->erased_cw_detect_cfg_set = cpu_to_le32(SET_ERASED_PAGE_DET);
++
++      dev_dbg(snandc->dev, "ECC strength: %u bits per %u bytes\n",
++              ecc_cfg->strength, ecc_cfg->step_size);
++
++      return 0;
++}
++
++static void qcom_spi_ecc_cleanup_ctx_pipelined(struct nand_device *nand)
++{
++      struct qpic_ecc *ecc_cfg = nand_to_ecc_ctx(nand);
++
++      kfree(ecc_cfg);
++}
++
++static int qcom_spi_ecc_prepare_io_req_pipelined(struct nand_device *nand,
++                                               struct nand_page_io_req *req)
++{
++      struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
++      struct qpic_ecc *ecc_cfg = nand_to_ecc_ctx(nand);
++
++      snandc->qspi->ecc = ecc_cfg;
++      snandc->qspi->raw_rw = false;
++      snandc->qspi->oob_rw = false;
++      snandc->qspi->page_rw = false;
++
++      if (req->datalen)
++              snandc->qspi->page_rw = true;
++
++      if (req->ooblen)
++              snandc->qspi->oob_rw = true;
++
++      if (req->mode == MTD_OPS_RAW)
++              snandc->qspi->raw_rw = true;
++
++      return 0;
++}
++
++static int qcom_spi_ecc_finish_io_req_pipelined(struct nand_device *nand,
++                                              struct nand_page_io_req *req)
++{
++      struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
++      struct mtd_info *mtd = nanddev_to_mtd(nand);
++
++      if (req->mode == MTD_OPS_RAW || req->type != NAND_PAGE_READ)
++              return 0;
++
++      if (snandc->qspi->ecc_stats.failed)
++              mtd->ecc_stats.failed += snandc->qspi->ecc_stats.failed;
++      else
++              mtd->ecc_stats.corrected += snandc->qspi->ecc_stats.corrected;
++
++      if (snandc->qspi->ecc_stats.failed)
++              return -EBADMSG;
++      else
++              return snandc->qspi->ecc_stats.bitflips;
++}
++
++static struct nand_ecc_engine_ops qcom_spi_ecc_engine_ops_pipelined = {
++      .init_ctx = qcom_spi_ecc_init_ctx_pipelined,
++      .cleanup_ctx = qcom_spi_ecc_cleanup_ctx_pipelined,
++      .prepare_io_req = qcom_spi_ecc_prepare_io_req_pipelined,
++      .finish_io_req = qcom_spi_ecc_finish_io_req_pipelined,
++};
++
++/* helper to configure location register values */
++static void qcom_spi_set_read_loc(struct qcom_nand_controller *snandc, int cw, int reg,
++                                int cw_offset, int read_size, int is_last_read_loc)
++{
++      int reg_base = NAND_READ_LOCATION_0;
++      int num_cw = snandc->qspi->num_cw;
++
++      if (cw == (num_cw - 1))
++              reg_base = NAND_READ_LOCATION_LAST_CW_0;
++
++      reg_base += reg * 4;
++
++      if (cw == (num_cw - 1))
++              return qcom_spi_set_read_loc_last(snandc, reg_base, cw_offset,
++                                                read_size, is_last_read_loc);
++      else
++              return qcom_spi_set_read_loc_first(snandc, reg_base, cw_offset,
++                                                 read_size, is_last_read_loc);
++}
++
++static void
++qcom_spi_config_cw_read(struct qcom_nand_controller *snandc, bool use_ecc, int cw)
++{
++      __le32 *reg = &snandc->regs->read_location0;
++      int num_cw = snandc->qspi->num_cw;
++
++      qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
++      if (cw == (num_cw - 1)) {
++              reg = &snandc->regs->read_location_last0;
++              qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_LAST_CW_0, 4,
++                                 NAND_BAM_NEXT_SGL);
++      }
++
++      qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
++      qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++
++      qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 2, 0);
++      qcom_read_reg_dma(snandc, NAND_ERASED_CW_DETECT_STATUS, 1,
++                        NAND_BAM_NEXT_SGL);
++}
++
++static int qcom_spi_block_erase(struct qcom_nand_controller *snandc)
++{
++      struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
++      int ret;
++
++      snandc->buf_count = 0;
++      snandc->buf_start = 0;
++      qcom_clear_read_regs(snandc);
++      qcom_clear_bam_transaction(snandc);
++
++      snandc->regs->cmd = snandc->qspi->cmd;
++      snandc->regs->addr0 = snandc->qspi->addr1;
++      snandc->regs->addr1 = snandc->qspi->addr2;
++      snandc->regs->cfg0 = cpu_to_le32(ecc_cfg->cfg0_raw & ~(7 << CW_PER_PAGE));
++      snandc->regs->cfg1 = cpu_to_le32(ecc_cfg->cfg1_raw);
++      snandc->regs->exec = cpu_to_le32(1);
++
++      qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
++      qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
++      qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++
++      ret = qcom_submit_descs(snandc);
++      if (ret) {
++              dev_err(snandc->dev, "failure to erase block\n");
++              return ret;
++      }
++
++      return 0;
++}
++
++static void qcom_spi_config_single_cw_page_read(struct qcom_nand_controller *snandc,
++                                              bool use_ecc, int cw)
++{
++      __le32 *reg = &snandc->regs->read_location0;
++      int num_cw = snandc->qspi->num_cw;
++
++      qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
++      qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
++      qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
++                         NAND_ERASED_CW_DETECT_CFG, 1, 0);
++      qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
++                         NAND_ERASED_CW_DETECT_CFG, 1,
++                         NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
++
++      if (cw == (num_cw - 1)) {
++              reg = &snandc->regs->read_location_last0;
++              qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_LAST_CW_0, 4, NAND_BAM_NEXT_SGL);
++      }
++      qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
++      qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++
++      qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 1, 0);
++}
++
++static int qcom_spi_read_last_cw(struct qcom_nand_controller *snandc,
++                               const struct spi_mem_op *op)
++{
++      struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
++      struct mtd_info *mtd = snandc->qspi->mtd;
++      int size, ret = 0;
++      int col,  bbpos;
++      u32 cfg0, cfg1, ecc_bch_cfg;
++      u32 num_cw = snandc->qspi->num_cw;
++
++      qcom_clear_bam_transaction(snandc);
++      qcom_clear_read_regs(snandc);
++
++      size = ecc_cfg->cw_size;
++      col = ecc_cfg->cw_size * (num_cw - 1);
++
++      memset(snandc->data_buffer, 0xff, size);
++      snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col));
++      snandc->regs->addr1 = snandc->qspi->addr2;
++
++      cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) |
++              0 << CW_PER_PAGE;
++      cfg1 = ecc_cfg->cfg1_raw;
++      ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
++
++      snandc->regs->cmd = snandc->qspi->cmd;
++      snandc->regs->cfg0 = cpu_to_le32(cfg0);
++      snandc->regs->cfg1 = cpu_to_le32(cfg1);
++      snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
++      snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
++      snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
++      snandc->regs->exec = cpu_to_le32(1);
++
++      qcom_spi_set_read_loc(snandc, num_cw - 1, 0, 0, ecc_cfg->cw_size, 1);
++
++      qcom_spi_config_single_cw_page_read(snandc, false, num_cw - 1);
++
++      qcom_read_data_dma(snandc, FLASH_BUF_ACC, snandc->data_buffer, size, 0);
++
++      ret = qcom_submit_descs(snandc);
++      if (ret) {
++              dev_err(snandc->dev, "failed to read last cw\n");
++              return ret;
++      }
++
++      qcom_nandc_dev_to_mem(snandc, true);
++      u32 flash = le32_to_cpu(snandc->reg_read_buf[0]);
++
++      if (flash & (FS_OP_ERR | FS_MPU_ERR))
++              return -EIO;
++
++      bbpos = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1);
++
++      if (snandc->data_buffer[bbpos] == 0xff)
++              snandc->data_buffer[bbpos + 1] = 0xff;
++      if (snandc->data_buffer[bbpos] != 0xff)
++              snandc->data_buffer[bbpos + 1] = snandc->data_buffer[bbpos];
++
++      memcpy(op->data.buf.in, snandc->data_buffer + bbpos, op->data.nbytes);
++
++      return ret;
++}
++
++static int qcom_spi_check_error(struct qcom_nand_controller *snandc, u8 *data_buf, u8 *oob_buf)
++{
++      struct snandc_read_status *buf;
++      struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
++      int i, num_cw = snandc->qspi->num_cw;
++      bool flash_op_err = false, erased;
++      unsigned int max_bitflips = 0;
++      unsigned int uncorrectable_cws = 0;
++
++      snandc->qspi->ecc_stats.failed = 0;
++      snandc->qspi->ecc_stats.corrected = 0;
++
++      qcom_nandc_dev_to_mem(snandc, true);
++      buf = (struct snandc_read_status *)snandc->reg_read_buf;
++
++      for (i = 0; i < num_cw; i++, buf++) {
++              u32 flash, buffer, erased_cw;
++              int data_len, oob_len;
++
++              if (i == (num_cw - 1)) {
++                      data_len = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
++                      oob_len = num_cw << 2;
++              } else {
++                      data_len = ecc_cfg->cw_data;
++                      oob_len = 0;
++              }
++
++              flash = le32_to_cpu(buf->snandc_flash);
++              buffer = le32_to_cpu(buf->snandc_buffer);
++              erased_cw = le32_to_cpu(buf->snandc_erased_cw);
++
++              if ((flash & FS_OP_ERR) && (buffer & BS_UNCORRECTABLE_BIT)) {
++                      if (ecc_cfg->bch_enabled)
++                              erased = (erased_cw & ERASED_CW) == ERASED_CW;
++                      else
++                              erased = false;
++
++                      if (!erased)
++                              uncorrectable_cws |= BIT(i);
++
++              } else if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
++                      flash_op_err = true;
++              } else {
++                      unsigned int stat;
++
++                      stat = buffer & BS_CORRECTABLE_ERR_MSK;
++                      snandc->qspi->ecc_stats.corrected += stat;
++                      max_bitflips = max(max_bitflips, stat);
++              }
++
++              if (data_buf)
++                      data_buf += data_len;
++              if (oob_buf)
++                      oob_buf += oob_len + ecc_cfg->bytes;
++      }
++
++      if (flash_op_err)
++              return -EIO;
++
++      if (!uncorrectable_cws)
++              snandc->qspi->ecc_stats.bitflips = max_bitflips;
++      else
++              snandc->qspi->ecc_stats.failed++;
++
++      return 0;
++}
++
++static int qcom_spi_check_raw_flash_errors(struct qcom_nand_controller *snandc, int cw_cnt)
++{
++      int i;
++
++      qcom_nandc_dev_to_mem(snandc, true);
++
++      for (i = 0; i < cw_cnt; i++) {
++              u32 flash = le32_to_cpu(snandc->reg_read_buf[i]);
++
++              if (flash & (FS_OP_ERR | FS_MPU_ERR))
++                      return -EIO;
++      }
++
++      return 0;
++}
++
++static int qcom_spi_read_cw_raw(struct qcom_nand_controller *snandc, u8 *data_buf,
++                              u8 *oob_buf, int cw)
++{
++      struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
++      struct mtd_info *mtd = snandc->qspi->mtd;
++      int data_size1, data_size2, oob_size1, oob_size2;
++      int ret, reg_off = FLASH_BUF_ACC, read_loc = 0;
++      int raw_cw = cw;
++      u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw;
++      int col;
++
++      snandc->buf_count = 0;
++      snandc->buf_start = 0;
++      qcom_clear_read_regs(snandc);
++      qcom_clear_bam_transaction(snandc);
++      raw_cw = num_cw - 1;
++
++      cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) |
++                              0 << CW_PER_PAGE;
++      cfg1 = ecc_cfg->cfg1_raw;
++      ecc_bch_cfg = ECC_CFG_ECC_DISABLE;
++
++      col = ecc_cfg->cw_size * cw;
++
++      snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col));
++      snandc->regs->addr1 = snandc->qspi->addr2;
++      snandc->regs->cmd = snandc->qspi->cmd;
++      snandc->regs->cfg0 = cpu_to_le32(cfg0);
++      snandc->regs->cfg1 = cpu_to_le32(cfg1);
++      snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
++      snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
++      snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
++      snandc->regs->exec = cpu_to_le32(1);
++
++      qcom_spi_set_read_loc(snandc, raw_cw, 0, 0, ecc_cfg->cw_size, 1);
++
++      qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
++      qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
++      qcom_write_reg_dma(snandc, &snandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
++
++      qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
++                         NAND_ERASED_CW_DETECT_CFG, 1, 0);
++      qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
++                         NAND_ERASED_CW_DETECT_CFG, 1,
++                         NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
++
++      data_size1 = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1);
++      oob_size1 = ecc_cfg->bbm_size;
++
++      if (cw == (num_cw - 1)) {
++              data_size2 = NANDC_STEP_SIZE - data_size1 -
++                           ((num_cw - 1) * 4);
++              oob_size2 = (num_cw * 4) + ecc_cfg->ecc_bytes_hw +
++                          ecc_cfg->spare_bytes;
++      } else {
++              data_size2 = ecc_cfg->cw_data - data_size1;
++              oob_size2 = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
++      }
++
++      qcom_spi_set_read_loc(snandc, cw, 0, read_loc, data_size1, 0);
++      read_loc += data_size1;
++
++      qcom_spi_set_read_loc(snandc, cw, 1, read_loc, oob_size1, 0);
++      read_loc += oob_size1;
++
++      qcom_spi_set_read_loc(snandc, cw, 2, read_loc, data_size2, 0);
++      read_loc += data_size2;
++
++      qcom_spi_set_read_loc(snandc, cw, 3, read_loc, oob_size2, 1);
++
++      qcom_spi_config_cw_read(snandc, false, raw_cw);
++
++      qcom_read_data_dma(snandc, reg_off, data_buf, data_size1, 0);
++      reg_off += data_size1;
++
++      qcom_read_data_dma(snandc, reg_off, oob_buf, oob_size1, 0);
++      reg_off += oob_size1;
++
++      qcom_read_data_dma(snandc, reg_off, data_buf + data_size1, data_size2, 0);
++      reg_off += data_size2;
++
++      qcom_read_data_dma(snandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
++
++      ret = qcom_submit_descs(snandc);
++      if (ret) {
++              dev_err(snandc->dev, "failure to read raw cw %d\n", cw);
++              return ret;
++      }
++
++      return qcom_spi_check_raw_flash_errors(snandc, 1);
++}
++
++static int qcom_spi_read_page_raw(struct qcom_nand_controller *snandc,
++                                const struct spi_mem_op *op)
++{
++      struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
++      u8 *data_buf = NULL, *oob_buf = NULL;
++      int ret, cw;
++      u32 num_cw = snandc->qspi->num_cw;
++
++      if (snandc->qspi->page_rw)
++              data_buf = op->data.buf.in;
++
++      oob_buf = snandc->qspi->oob_buf;
++      memset(oob_buf, 0xff, OOB_BUF_SIZE);
++
++      for (cw = 0; cw < num_cw; cw++) {
++              ret = qcom_spi_read_cw_raw(snandc, data_buf, oob_buf, cw);
++              if (ret)
++                      return ret;
++
++              if (data_buf)
++                      data_buf += ecc_cfg->cw_data;
++              if (oob_buf)
++                      oob_buf += ecc_cfg->bytes;
++      }
++
++      return 0;
++}
++
++static int qcom_spi_read_page_ecc(struct qcom_nand_controller *snandc,
++                                const struct spi_mem_op *op)
++{
++      struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
++      u8 *data_buf = NULL, *data_buf_start, *oob_buf = NULL, *oob_buf_start;
++      int ret, i;
++      u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw;
++
++      data_buf = op->data.buf.in;
++      data_buf_start = data_buf;
++
++      oob_buf = snandc->qspi->oob_buf;
++      oob_buf_start = oob_buf;
++
++      snandc->buf_count = 0;
++      snandc->buf_start = 0;
++      qcom_clear_read_regs(snandc);
++
++      cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) |
++                              (num_cw - 1) << CW_PER_PAGE;
++      cfg1 = ecc_cfg->cfg1;
++      ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
++
++      snandc->regs->addr0 = snandc->qspi->addr1;
++      snandc->regs->addr1 = snandc->qspi->addr2;
++      snandc->regs->cmd = snandc->qspi->cmd;
++      snandc->regs->cfg0 = cpu_to_le32(cfg0);
++      snandc->regs->cfg1 = cpu_to_le32(cfg1);
++      snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
++      snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
++      snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
++      snandc->regs->exec = cpu_to_le32(1);
++
++      qcom_spi_set_read_loc(snandc, 0, 0, 0, ecc_cfg->cw_data, 1);
++
++      qcom_clear_bam_transaction(snandc);
++
++      qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
++      qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
++      qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
++                         NAND_ERASED_CW_DETECT_CFG, 1, 0);
++      qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
++                         NAND_ERASED_CW_DETECT_CFG, 1,
++                         NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
++
++      for (i = 0; i < num_cw; i++) {
++              int data_size, oob_size;
++
++              if (i == (num_cw - 1)) {
++                      data_size = 512 - ((num_cw - 1) << 2);
++                      oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
++                                  ecc_cfg->spare_bytes;
++              } else {
++                      data_size = ecc_cfg->cw_data;
++                      oob_size = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
++              }
++
++              if (data_buf && oob_buf) {
++                      qcom_spi_set_read_loc(snandc, i, 0, 0, data_size, 0);
++                      qcom_spi_set_read_loc(snandc, i, 1, data_size, oob_size, 1);
++              } else if (data_buf) {
++                      qcom_spi_set_read_loc(snandc, i, 0, 0, data_size, 1);
++              } else {
++                      qcom_spi_set_read_loc(snandc, i, 0, data_size, oob_size, 1);
++              }
++
++              qcom_spi_config_cw_read(snandc, true, i);
++
++              if (data_buf)
++                      qcom_read_data_dma(snandc, FLASH_BUF_ACC, data_buf,
++                                         data_size, 0);
++              if (oob_buf) {
++                      int j;
++
++                      for (j = 0; j < ecc_cfg->bbm_size; j++)
++                              *oob_buf++ = 0xff;
++
++                      qcom_read_data_dma(snandc, FLASH_BUF_ACC + data_size,
++                                         oob_buf, oob_size, 0);
++              }
++
++              if (data_buf)
++                      data_buf += data_size;
++              if (oob_buf)
++                      oob_buf += oob_size;
++      }
++
++      ret = qcom_submit_descs(snandc);
++      if (ret) {
++              dev_err(snandc->dev, "failure to read page\n");
++              return ret;
++      }
++
++      return qcom_spi_check_error(snandc, data_buf_start, oob_buf_start);
++}
++
++static int qcom_spi_read_page_oob(struct qcom_nand_controller *snandc,
++                                const struct spi_mem_op *op)
++{
++      struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
++      u8 *data_buf = NULL, *data_buf_start, *oob_buf = NULL, *oob_buf_start;
++      int ret, i;
++      u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw;
++
++      oob_buf = op->data.buf.in;
++      oob_buf_start = oob_buf;
++
++      data_buf_start = data_buf;
++
++      snandc->buf_count = 0;
++      snandc->buf_start = 0;
++      qcom_clear_read_regs(snandc);
++      qcom_clear_bam_transaction(snandc);
++
++      cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) |
++                              (num_cw - 1) << CW_PER_PAGE;
++      cfg1 = ecc_cfg->cfg1;
++      ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
++
++      snandc->regs->addr0 = snandc->qspi->addr1;
++      snandc->regs->addr1 = snandc->qspi->addr2;
++      snandc->regs->cmd = snandc->qspi->cmd;
++      snandc->regs->cfg0 = cpu_to_le32(cfg0);
++      snandc->regs->cfg1 = cpu_to_le32(cfg1);
++      snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
++      snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
++      snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
++      snandc->regs->exec = cpu_to_le32(1);
++
++      qcom_spi_set_read_loc(snandc, 0, 0, 0, ecc_cfg->cw_data, 1);
++
++      qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
++      qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
++      qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
++                         NAND_ERASED_CW_DETECT_CFG, 1, 0);
++      qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
++                         NAND_ERASED_CW_DETECT_CFG, 1,
++                         NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
++
++      for (i = 0; i < num_cw; i++) {
++              int data_size, oob_size;
++
++              if (i == (num_cw - 1)) {
++                      data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
++                      oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
++                                  ecc_cfg->spare_bytes;
++              } else {
++                      data_size = ecc_cfg->cw_data;
++                      oob_size = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
++              }
++
++              qcom_spi_set_read_loc(snandc, i, 0, data_size, oob_size, 1);
++
++              qcom_spi_config_cw_read(snandc, true, i);
++
++              if (oob_buf) {
++                      int j;
++
++                      for (j = 0; j < ecc_cfg->bbm_size; j++)
++                              *oob_buf++ = 0xff;
++
++                      qcom_read_data_dma(snandc, FLASH_BUF_ACC + data_size,
++                                         oob_buf, oob_size, 0);
++              }
++
++              if (oob_buf)
++                      oob_buf += oob_size;
++      }
++
++      ret = qcom_submit_descs(snandc);
++      if (ret) {
++              dev_err(snandc->dev, "failure to read oob\n");
++              return ret;
++      }
++
++      return qcom_spi_check_error(snandc, data_buf_start, oob_buf_start);
++}
++
++static int qcom_spi_read_page(struct qcom_nand_controller *snandc,
++                            const struct spi_mem_op *op)
++{
++      if (snandc->qspi->page_rw && snandc->qspi->raw_rw)
++              return qcom_spi_read_page_raw(snandc, op);
++
++      if (snandc->qspi->page_rw)
++              return qcom_spi_read_page_ecc(snandc, op);
++
++      if (snandc->qspi->oob_rw && snandc->qspi->raw_rw)
++              return qcom_spi_read_last_cw(snandc, op);
++
++      if (snandc->qspi->oob_rw)
++              return qcom_spi_read_page_oob(snandc, op);
++
++      return 0;
++}
++
++static void qcom_spi_config_page_write(struct qcom_nand_controller *snandc)
++{
++      qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
++      qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
++      qcom_write_reg_dma(snandc, &snandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG,
++                         1, NAND_BAM_NEXT_SGL);
++}
++
++static void qcom_spi_config_cw_write(struct qcom_nand_controller *snandc)
++{
++      qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
++      qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++      qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
++
++      qcom_write_reg_dma(snandc, &snandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
++      qcom_write_reg_dma(snandc, &snandc->regs->clrreadstatus, NAND_READ_STATUS, 1,
++                         NAND_BAM_NEXT_SGL);
++}
++
++static int qcom_spi_program_raw(struct qcom_nand_controller *snandc,
++                              const struct spi_mem_op *op)
++{
++      struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
++      struct mtd_info *mtd = snandc->qspi->mtd;
++      u8 *data_buf = NULL, *oob_buf = NULL;
++      int i, ret;
++      int num_cw = snandc->qspi->num_cw;
++      u32 cfg0, cfg1, ecc_bch_cfg;
++
++      cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) |
++                      (num_cw - 1) << CW_PER_PAGE;
++      cfg1 = ecc_cfg->cfg1_raw;
++      ecc_bch_cfg = ECC_CFG_ECC_DISABLE;
++
++      data_buf = snandc->qspi->data_buf;
++
++      oob_buf = snandc->qspi->oob_buf;
++      memset(oob_buf, 0xff, OOB_BUF_SIZE);
++
++      snandc->buf_count = 0;
++      snandc->buf_start = 0;
++      qcom_clear_read_regs(snandc);
++      qcom_clear_bam_transaction(snandc);
++
++      snandc->regs->addr0 = snandc->qspi->addr1;
++      snandc->regs->addr1 = snandc->qspi->addr2;
++      snandc->regs->cmd = snandc->qspi->cmd;
++      snandc->regs->cfg0 = cpu_to_le32(cfg0);
++      snandc->regs->cfg1 = cpu_to_le32(cfg1);
++      snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
++      snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
++      snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
++      snandc->regs->exec = cpu_to_le32(1);
++
++      qcom_spi_config_page_write(snandc);
++
++      for (i = 0; i < num_cw; i++) {
++              int data_size1, data_size2, oob_size1, oob_size2;
++              int reg_off = FLASH_BUF_ACC;
++
++              data_size1 = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1);
++              oob_size1 = ecc_cfg->bbm_size;
++
++              if (i == (num_cw - 1)) {
++                      data_size2 = NANDC_STEP_SIZE - data_size1 -
++                                   ((num_cw - 1) << 2);
++                      oob_size2 = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
++                                  ecc_cfg->spare_bytes;
++              } else {
++                      data_size2 = ecc_cfg->cw_data - data_size1;
++                      oob_size2 = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
++              }
++
++              qcom_write_data_dma(snandc, reg_off, data_buf, data_size1,
++                                  NAND_BAM_NO_EOT);
++              reg_off += data_size1;
++              data_buf += data_size1;
++
++              qcom_write_data_dma(snandc, reg_off, oob_buf, oob_size1,
++                                  NAND_BAM_NO_EOT);
++              oob_buf += oob_size1;
++              reg_off += oob_size1;
++
++              qcom_write_data_dma(snandc, reg_off, data_buf, data_size2,
++                                  NAND_BAM_NO_EOT);
++              reg_off += data_size2;
++              data_buf += data_size2;
++
++              qcom_write_data_dma(snandc, reg_off, oob_buf, oob_size2, 0);
++              oob_buf += oob_size2;
++
++              qcom_spi_config_cw_write(snandc);
++      }
++
++      ret = qcom_submit_descs(snandc);
++      if (ret) {
++              dev_err(snandc->dev, "failure to write raw page\n");
++              return ret;
++      }
++
++      return 0;
++}
++
++static int qcom_spi_program_ecc(struct qcom_nand_controller *snandc,
++                              const struct spi_mem_op *op)
++{
++      struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
++      u8 *data_buf = NULL, *oob_buf = NULL;
++      int i, ret;
++      int num_cw = snandc->qspi->num_cw;
++      u32 cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg;
++
++      cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) |
++                              (num_cw - 1) << CW_PER_PAGE;
++      cfg1 = ecc_cfg->cfg1;
++      ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
++      ecc_buf_cfg = ecc_cfg->ecc_buf_cfg;
++
++      if (snandc->qspi->data_buf)
++              data_buf = snandc->qspi->data_buf;
++
++      oob_buf = snandc->qspi->oob_buf;
++
++      snandc->buf_count = 0;
++      snandc->buf_start = 0;
++      qcom_clear_read_regs(snandc);
++      qcom_clear_bam_transaction(snandc);
++
++      snandc->regs->addr0 = snandc->qspi->addr1;
++      snandc->regs->addr1 = snandc->qspi->addr2;
++      snandc->regs->cmd = snandc->qspi->cmd;
++      snandc->regs->cfg0 = cpu_to_le32(cfg0);
++      snandc->regs->cfg1 = cpu_to_le32(cfg1);
++      snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
++      snandc->regs->ecc_buf_cfg = cpu_to_le32(ecc_buf_cfg);
++      snandc->regs->exec = cpu_to_le32(1);
++
++      qcom_spi_config_page_write(snandc);
++
++      for (i = 0; i < num_cw; i++) {
++              int data_size, oob_size;
++
++              if (i == (num_cw - 1)) {
++                      data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
++                      oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
++                                  ecc_cfg->spare_bytes;
++              } else {
++                      data_size = ecc_cfg->cw_data;
++                      oob_size = ecc_cfg->bytes;
++              }
++
++              if (data_buf)
++                      qcom_write_data_dma(snandc, FLASH_BUF_ACC, data_buf, data_size,
++                                          i == (num_cw - 1) ? NAND_BAM_NO_EOT : 0);
++
++              if (i == (num_cw - 1)) {
++                      if (oob_buf) {
++                              oob_buf += ecc_cfg->bbm_size;
++                              qcom_write_data_dma(snandc, FLASH_BUF_ACC + data_size,
++                                                  oob_buf, oob_size, 0);
++                      }
++              }
++
++              qcom_spi_config_cw_write(snandc);
++
++              if (data_buf)
++                      data_buf += data_size;
++              if (oob_buf)
++                      oob_buf += oob_size;
++      }
++
++      ret = qcom_submit_descs(snandc);
++      if (ret) {
++              dev_err(snandc->dev, "failure to write page\n");
++              return ret;
++      }
++
++      return 0;
++}
++
++static int qcom_spi_program_oob(struct qcom_nand_controller *snandc,
++                              const struct spi_mem_op *op)
++{
++      struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
++      u8 *oob_buf = NULL;
++      int ret, col, data_size, oob_size;
++      int num_cw = snandc->qspi->num_cw;
++      u32 cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg;
++
++      cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) |
++                              (num_cw - 1) << CW_PER_PAGE;
++      cfg1 = ecc_cfg->cfg1;
++      ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
++      ecc_buf_cfg = ecc_cfg->ecc_buf_cfg;
++
++      col = ecc_cfg->cw_size * (num_cw - 1);
++
++      oob_buf = snandc->qspi->data_buf;
++
++      snandc->buf_count = 0;
++      snandc->buf_start = 0;
++      qcom_clear_read_regs(snandc);
++      qcom_clear_bam_transaction(snandc);
++      snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col));
++      snandc->regs->addr1 = snandc->qspi->addr2;
++      snandc->regs->cmd = snandc->qspi->cmd;
++      snandc->regs->cfg0 = cpu_to_le32(cfg0);
++      snandc->regs->cfg1 = cpu_to_le32(cfg1);
++      snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
++      snandc->regs->ecc_buf_cfg = cpu_to_le32(ecc_buf_cfg);
++      snandc->regs->exec = cpu_to_le32(1);
++
++      /* calculate the data and oob size for the last codeword/step */
++      data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
++      oob_size = snandc->qspi->mtd->oobavail;
++
++      memset(snandc->data_buffer, 0xff, ecc_cfg->cw_data);
++      /* override new oob content to last codeword */
++      mtd_ooblayout_get_databytes(snandc->qspi->mtd, snandc->data_buffer + data_size,
++                                  oob_buf, 0, snandc->qspi->mtd->oobavail);
++      qcom_spi_config_page_write(snandc);
++      qcom_write_data_dma(snandc, FLASH_BUF_ACC, snandc->data_buffer, data_size + oob_size, 0);
++      qcom_spi_config_cw_write(snandc);
++
++      ret = qcom_submit_descs(snandc);
++      if (ret) {
++              dev_err(snandc->dev, "failure to write oob\n");
++              return ret;
++      }
++
++      return 0;
++}
++
++static int qcom_spi_program_execute(struct qcom_nand_controller *snandc,
++                                  const struct spi_mem_op *op)
++{
++      if (snandc->qspi->page_rw && snandc->qspi->raw_rw)
++              return qcom_spi_program_raw(snandc, op);
++
++      if (snandc->qspi->page_rw)
++              return qcom_spi_program_ecc(snandc, op);
++
++      if (snandc->qspi->oob_rw)
++              return qcom_spi_program_oob(snandc, op);
++
++      return 0;
++}
++
++static u32 qcom_spi_cmd_mapping(struct qcom_nand_controller *snandc, u32 opcode)
++{
++      u32 cmd = 0x0;
++
++      switch (opcode) {
++      case SPINAND_RESET:
++              cmd = (SPI_WP | SPI_HOLD | SPI_TRANSFER_MODE_x1 | OP_RESET_DEVICE);
++              break;
++      case SPINAND_READID:
++              cmd = (SPI_WP | SPI_HOLD | SPI_TRANSFER_MODE_x1 | OP_FETCH_ID);
++              break;
++      case SPINAND_GET_FEATURE:
++              cmd = (SPI_TRANSFER_MODE_x1 | SPI_WP | SPI_HOLD | ACC_FEATURE);
++              break;
++      case SPINAND_SET_FEATURE:
++              cmd = (SPI_TRANSFER_MODE_x1 | SPI_WP | SPI_HOLD | ACC_FEATURE |
++                      QPIC_SET_FEATURE);
++              break;
++      case SPINAND_READ:
++              if (snandc->qspi->raw_rw) {
++                      cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 |
++                                      SPI_WP | SPI_HOLD | OP_PAGE_READ);
++              } else {
++                      cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 |
++                                      SPI_WP | SPI_HOLD | OP_PAGE_READ_WITH_ECC);
++              }
++
++              break;
++      case SPINAND_ERASE:
++              cmd = OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE | SPI_WP |
++                      SPI_HOLD | SPI_TRANSFER_MODE_x1;
++              break;
++      case SPINAND_WRITE_EN:
++              cmd = SPINAND_WRITE_EN;
++              break;
++      case SPINAND_PROGRAM_EXECUTE:
++              cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 |
++                              SPI_WP | SPI_HOLD | OP_PROGRAM_PAGE);
++              break;
++      case SPINAND_PROGRAM_LOAD:
++              cmd = SPINAND_PROGRAM_LOAD;
++              break;
++      default:
++              dev_err(snandc->dev, "Opcode not supported: %u\n", opcode);
++              return -EOPNOTSUPP;
++      }
++
++      return cmd;
++}
++
++static int qcom_spi_write_page(struct qcom_nand_controller *snandc,
++                             const struct spi_mem_op *op)
++{
++      struct qpic_snand_op s_op = {};
++      u32 cmd;
++
++      cmd = qcom_spi_cmd_mapping(snandc, op->cmd.opcode);
++      if (cmd < 0)
++              return cmd;
++
++      s_op.cmd_reg = cmd;
++
++      if (op->cmd.opcode == SPINAND_PROGRAM_LOAD)
++              snandc->qspi->data_buf = (u8 *)op->data.buf.out;
++
++      return 0;
++}
++
++static int qcom_spi_send_cmdaddr(struct qcom_nand_controller *snandc,
++                               const struct spi_mem_op *op)
++{
++      struct qpic_snand_op s_op = {};
++      u32 cmd;
++      int ret, opcode;
++
++      cmd = qcom_spi_cmd_mapping(snandc, op->cmd.opcode);
++      if (cmd < 0)
++              return cmd;
++
++      s_op.cmd_reg = cmd;
++      s_op.addr1_reg = op->addr.val;
++      s_op.addr2_reg = 0;
++
++      opcode = op->cmd.opcode;
++
++      switch (opcode) {
++      case SPINAND_WRITE_EN:
++              return 0;
++      case SPINAND_PROGRAM_EXECUTE:
++              s_op.addr1_reg = op->addr.val << 16;
++              s_op.addr2_reg = op->addr.val >> 16 & 0xff;
++              snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg);
++              snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg);
++              snandc->qspi->cmd = cpu_to_le32(cmd);
++              return qcom_spi_program_execute(snandc, op);
++      case SPINAND_READ:
++              s_op.addr1_reg = (op->addr.val << 16);
++              s_op.addr2_reg = op->addr.val >> 16 & 0xff;
++              snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg);
++              snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg);
++              snandc->qspi->cmd = cpu_to_le32(cmd);
++              return 0;
++      case SPINAND_ERASE:
++              s_op.addr2_reg = (op->addr.val >> 16) & 0xffff;
++              s_op.addr1_reg = op->addr.val;
++              snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg << 16);
++              snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg);
++              snandc->qspi->cmd = cpu_to_le32(cmd);
++              qcom_spi_block_erase(snandc);
++              return 0;
++      default:
++              break;
++      }
++
++      snandc->buf_count = 0;
++      snandc->buf_start = 0;
++      qcom_clear_read_regs(snandc);
++      qcom_clear_bam_transaction(snandc);
++
++      snandc->regs->cmd = cpu_to_le32(s_op.cmd_reg);
++      snandc->regs->exec = cpu_to_le32(1);
++      snandc->regs->addr0 = cpu_to_le32(s_op.addr1_reg);
++      snandc->regs->addr1 = cpu_to_le32(s_op.addr2_reg);
++
++      qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
++      qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++
++      ret = qcom_submit_descs(snandc);
++      if (ret)
++              dev_err(snandc->dev, "failure in submitting cmd descriptor\n");
++
++      return ret;
++}
++
++static int qcom_spi_io_op(struct qcom_nand_controller *snandc, const struct spi_mem_op *op)
++{
++      int ret, val, opcode;
++      bool copy = false, copy_ftr = false;
++
++      ret = qcom_spi_send_cmdaddr(snandc, op);
++      if (ret)
++              return ret;
++
++      snandc->buf_count = 0;
++      snandc->buf_start = 0;
++      qcom_clear_read_regs(snandc);
++      qcom_clear_bam_transaction(snandc);
++      opcode = op->cmd.opcode;
++
++      switch (opcode) {
++      case SPINAND_READID:
++              snandc->buf_count = 4;
++              qcom_read_reg_dma(snandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
++              copy = true;
++              break;
++      case SPINAND_GET_FEATURE:
++              snandc->buf_count = 4;
++              qcom_read_reg_dma(snandc, NAND_FLASH_FEATURES, 1, NAND_BAM_NEXT_SGL);
++              copy_ftr = true;
++              break;
++      case SPINAND_SET_FEATURE:
++              snandc->regs->flash_feature = cpu_to_le32(*(u32 *)op->data.buf.out);
++              qcom_write_reg_dma(snandc, &snandc->regs->flash_feature,
++                                 NAND_FLASH_FEATURES, 1, NAND_BAM_NEXT_SGL);
++              break;
++      case SPINAND_PROGRAM_EXECUTE:
++      case SPINAND_WRITE_EN:
++      case SPINAND_RESET:
++      case SPINAND_ERASE:
++      case SPINAND_READ:
++              return 0;
++      default:
++              return -EOPNOTSUPP;
++      }
++
++      ret = qcom_submit_descs(snandc);
++      if (ret)
++              dev_err(snandc->dev, "failure in submitting descriptor for:%d\n", opcode);
++
++      if (copy) {
++              qcom_nandc_dev_to_mem(snandc, true);
++              memcpy(op->data.buf.in, snandc->reg_read_buf, snandc->buf_count);
++      }
++
++      if (copy_ftr) {
++              qcom_nandc_dev_to_mem(snandc, true);
++              val = le32_to_cpu(*(__le32 *)snandc->reg_read_buf);
++              val >>= 8;
++              memcpy(op->data.buf.in, &val, snandc->buf_count);
++      }
++
++      return ret;
++}
++
++static bool qcom_spi_is_page_op(const struct spi_mem_op *op)
++{
++      if (op->addr.buswidth != 1 && op->addr.buswidth != 2 && op->addr.buswidth != 4)
++              return false;
++
++      if (op->data.dir == SPI_MEM_DATA_IN) {
++              if (op->addr.buswidth == 4 && op->data.buswidth == 4)
++                      return true;
++
++              if (op->addr.nbytes == 2 && op->addr.buswidth == 1)
++                      return true;
++
++      } else if (op->data.dir == SPI_MEM_DATA_OUT) {
++              if (op->data.buswidth == 4)
++                      return true;
++              if (op->addr.nbytes == 2 && op->addr.buswidth == 1)
++                      return true;
++      }
++
++      return false;
++}
++
++static bool qcom_spi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
++{
++      if (!spi_mem_default_supports_op(mem, op))
++              return false;
++
++      if (op->cmd.nbytes != 1 || op->cmd.buswidth != 1)
++              return false;
++
++      if (qcom_spi_is_page_op(op))
++              return true;
++
++      return ((!op->addr.nbytes || op->addr.buswidth == 1) &&
++              (!op->dummy.nbytes || op->dummy.buswidth == 1) &&
++              (!op->data.nbytes || op->data.buswidth == 1));
++}
++
++static int qcom_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
++{
++      struct qcom_nand_controller *snandc = spi_controller_get_devdata(mem->spi->controller);
++
++      dev_dbg(snandc->dev, "OP %02x ADDR %08llX@%d:%u DATA %d:%u", op->cmd.opcode,
++              op->addr.val, op->addr.buswidth, op->addr.nbytes,
++              op->data.buswidth, op->data.nbytes);
++
++      if (qcom_spi_is_page_op(op)) {
++              if (op->data.dir == SPI_MEM_DATA_IN)
++                      return qcom_spi_read_page(snandc, op);
++              if (op->data.dir == SPI_MEM_DATA_OUT)
++                      return qcom_spi_write_page(snandc, op);
++      } else {
++              return qcom_spi_io_op(snandc, op);
++      }
++
++      return 0;
++}
++
++static const struct spi_controller_mem_ops qcom_spi_mem_ops = {
++      .supports_op = qcom_spi_supports_op,
++      .exec_op = qcom_spi_exec_op,
++};
++
++static const struct spi_controller_mem_caps qcom_spi_mem_caps = {
++      .ecc = true,
++};
++
++static int qcom_spi_probe(struct platform_device *pdev)
++{
++      struct device *dev = &pdev->dev;
++      struct spi_controller *ctlr;
++      struct qcom_nand_controller *snandc;
++      struct qpic_spi_nand *qspi;
++      struct qpic_ecc *ecc;
++      struct resource *res;
++      const void *dev_data;
++      int ret;
++
++      ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
++      if (!ecc)
++              return -ENOMEM;
++
++      qspi = devm_kzalloc(dev, sizeof(*qspi), GFP_KERNEL);
++      if (!qspi)
++              return -ENOMEM;
++
++      ctlr = __devm_spi_alloc_controller(dev, sizeof(*snandc), false);
++      if (!ctlr)
++              return -ENOMEM;
++
++      platform_set_drvdata(pdev, ctlr);
++
++      snandc = spi_controller_get_devdata(ctlr);
++      qspi->snandc = snandc;
++
++      snandc->dev = dev;
++      snandc->qspi = qspi;
++      snandc->qspi->ctlr = ctlr;
++      snandc->qspi->ecc = ecc;
++
++      dev_data = of_device_get_match_data(dev);
++      if (!dev_data) {
++              dev_err(&pdev->dev, "failed to get device data\n");
++              return -ENODEV;
++      }
++
++      snandc->props = dev_data;
++      snandc->dev = &pdev->dev;
++
++      snandc->core_clk = devm_clk_get(dev, "core");
++      if (IS_ERR(snandc->core_clk))
++              return PTR_ERR(snandc->core_clk);
++
++      snandc->aon_clk = devm_clk_get(dev, "aon");
++      if (IS_ERR(snandc->aon_clk))
++              return PTR_ERR(snandc->aon_clk);
++
++      snandc->qspi->iomacro_clk = devm_clk_get(dev, "iom");
++      if (IS_ERR(snandc->qspi->iomacro_clk))
++              return PTR_ERR(snandc->qspi->iomacro_clk);
++
++      snandc->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
++      if (IS_ERR(snandc->base))
++              return PTR_ERR(snandc->base);
++
++      snandc->base_phys = res->start;
++      snandc->base_dma = dma_map_resource(dev, res->start, resource_size(res),
++                                          DMA_BIDIRECTIONAL, 0);
++      if (dma_mapping_error(dev, snandc->base_dma))
++              return -ENXIO;
++
++      ret = clk_prepare_enable(snandc->core_clk);
++      if (ret)
++              goto err_dis_core_clk;
++
++      ret = clk_prepare_enable(snandc->aon_clk);
++      if (ret)
++              goto err_dis_aon_clk;
++
++      ret = clk_prepare_enable(snandc->qspi->iomacro_clk);
++      if (ret)
++              goto err_dis_iom_clk;
++
++      ret = qcom_nandc_alloc(snandc);
++      if (ret)
++              goto err_snand_alloc;
++
++      ret = qcom_spi_init(snandc);
++      if (ret)
++              goto err_spi_init;
++
++      /* setup ECC engine */
++      snandc->qspi->ecc_eng.dev = &pdev->dev;
++      snandc->qspi->ecc_eng.integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED;
++      snandc->qspi->ecc_eng.ops = &qcom_spi_ecc_engine_ops_pipelined;
++      snandc->qspi->ecc_eng.priv = snandc;
++
++      ret = nand_ecc_register_on_host_hw_engine(&snandc->qspi->ecc_eng);
++      if (ret) {
++              dev_err(&pdev->dev, "failed to register ecc engine:%d\n", ret);
++              goto err_spi_init;
++      }
++
++      ctlr->num_chipselect = QPIC_QSPI_NUM_CS;
++      ctlr->mem_ops = &qcom_spi_mem_ops;
++      ctlr->mem_caps = &qcom_spi_mem_caps;
++      ctlr->dev.of_node = pdev->dev.of_node;
++      ctlr->mode_bits = SPI_TX_DUAL | SPI_RX_DUAL |
++                          SPI_TX_QUAD | SPI_RX_QUAD;
++
++      ret = spi_register_controller(ctlr);
++      if (ret) {
++              dev_err(&pdev->dev, "spi_register_controller failed.\n");
++              goto err_spi_init;
++      }
++
++      return 0;
++
++err_spi_init:
++      qcom_nandc_unalloc(snandc);
++err_snand_alloc:
++      clk_disable_unprepare(snandc->qspi->iomacro_clk);
++err_dis_iom_clk:
++      clk_disable_unprepare(snandc->aon_clk);
++err_dis_aon_clk:
++      clk_disable_unprepare(snandc->core_clk);
++err_dis_core_clk:
++      dma_unmap_resource(dev, res->start, resource_size(res),
++                         DMA_BIDIRECTIONAL, 0);
++      return ret;
++}
++
++static void qcom_spi_remove(struct platform_device *pdev)
++{
++      struct spi_controller *ctlr = platform_get_drvdata(pdev);
++      struct qcom_nand_controller *snandc = spi_controller_get_devdata(ctlr);
++      struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++
++      spi_unregister_controller(ctlr);
++
++      qcom_nandc_unalloc(snandc);
++
++      clk_disable_unprepare(snandc->aon_clk);
++      clk_disable_unprepare(snandc->core_clk);
++      clk_disable_unprepare(snandc->qspi->iomacro_clk);
++
++      dma_unmap_resource(&pdev->dev, snandc->base_dma, resource_size(res),
++                         DMA_BIDIRECTIONAL, 0);
++}
++
++static const struct qcom_nandc_props ipq9574_snandc_props = {
++      .dev_cmd_reg_start = 0x7000,
++      .supports_bam = true,
++};
++
++static const struct of_device_id qcom_snandc_of_match[] = {
++      {
++              .compatible = "qcom,spi-qpic-snand",
++              .data = &ipq9574_snandc_props,
++      },
++      {}
++}
++MODULE_DEVICE_TABLE(of, qcom_snandc_of_match);
++
++static struct platform_driver qcom_spi_driver = {
++      .driver = {
++              .name           = "qcom_snand",
++              .of_match_table = qcom_snandc_of_match,
++      },
++      .probe = qcom_spi_probe,
++      .remove_new = qcom_spi_remove,
++};
++module_platform_driver(qcom_spi_driver);
++
++MODULE_DESCRIPTION("SPI driver for QPIC QSPI cores");
++MODULE_AUTHOR("Md Sadre Alam <quic_mdalam@quicinc.com>");
++MODULE_LICENSE("GPL");
++
+--- a/include/linux/mtd/nand-qpic-common.h
++++ b/include/linux/mtd/nand-qpic-common.h
+@@ -322,6 +322,10 @@ struct nandc_regs {
+       __le32 read_location_last1;
+       __le32 read_location_last2;
+       __le32 read_location_last3;
++      __le32 spi_cfg;
++      __le32 num_addr_cycle;
++      __le32 busy_wait_cnt;
++      __le32 flash_feature;
+       __le32 erased_cw_detect_cfg_clr;
+       __le32 erased_cw_detect_cfg_set;
+@@ -336,6 +340,7 @@ struct nandc_regs {
+  *
+  * @core_clk:                 controller clock
+  * @aon_clk:                  another controller clock
++ * @iomacro_clk:              io macro clock
+  *
+  * @regs:                     a contiguous chunk of memory for DMA register
+  *                            writes. contains the register values to be
+@@ -345,6 +350,7 @@ struct nandc_regs {
+  *                            initialized via DT match data
+  *
+  * @controller:                       base controller structure
++ * @qspi:                     qpic spi structure
+  * @host_list:                        list containing all the chips attached to the
+  *                            controller
+  *
+@@ -389,6 +395,7 @@ struct qcom_nand_controller {
+       const struct qcom_nandc_props *props;
+       struct nand_controller *controller;
++      struct qpic_spi_nand *qspi;
+       struct list_head host_list;
+       union {
diff --git a/target/linux/qualcommbe/patches-6.6/101-arm64-dts-qcom-ipq9574-Add-SPI-nand-support.patch b/target/linux/qualcommbe/patches-6.6/101-arm64-dts-qcom-ipq9574-Add-SPI-nand-support.patch
new file mode 100644 (file)
index 0000000..aa211ee
--- /dev/null
@@ -0,0 +1,144 @@
+From 968c5e8220209eb2185654f01748c349515a3b8e Mon Sep 17 00:00:00 2001
+From: Md Sadre Alam <quic_mdalam@quicinc.com>
+Date: Thu, 15 Feb 2024 12:26:40 +0530
+Subject: [PATCH v10 7/8] arm64: dts: qcom: ipq9574: Add SPI nand support
+
+Add SPI NAND support for ipq9574 SoC.
+
+Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
+---
+
+Change in [v10]
+
+* No change 
+
+Change in [v9]
+
+* No change
+
+Change in [v8]
+
+* No change
+
+Change in [v7]
+
+* No change
+
+Change in [v6]
+
+* No change
+
+Change in [v5]
+
+* No change
+
+Change in [v4]
+
+* No change
+
+Change in [v3]
+
+* Updated gpio number as per pin control driver
+
+* Fixed alignment issue
+
+Change in [v2]
+
+* Added initial enablement for spi-nand 
+
+Change in [v1]
+
+* Posted as RFC patch for design review
+
+ .../boot/dts/qcom/ipq9574-rdp-common.dtsi     | 43 +++++++++++++++++++
+ arch/arm64/boot/dts/qcom/ipq9574.dtsi         | 27 ++++++++++++
+ 2 files changed, 70 insertions(+)
+
+--- a/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts
++++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts
+@@ -121,6 +121,49 @@
+                       bias-pull-down;
+               };
+       };
++
++      qpic_snand_default_state: qpic-snand-default-state {
++              clock-pins {
++                      pins = "gpio5";
++                      function = "qspi_clk";
++                      drive-strength = <8>;
++                      bias-disable;
++              };
++
++              cs-pins {
++                      pins = "gpio4";
++                      function = "qspi_cs";
++                      drive-strength = <8>;
++                      bias-disable;
++              };
++
++              data-pins {
++                      pins = "gpio0", "gpio1", "gpio2", "gpio3";
++                      function = "qspi_data";
++                      drive-strength = <8>;
++                      bias-disable;
++              };
++      };
++};
++
++&qpic_bam {
++      status = "okay";
++};
++
++&qpic_nand {
++      pinctrl-0 = <&qpic_snand_default_state>;
++      pinctrl-names = "default";
++      status = "okay";
++
++      flash@0 {
++              compatible = "spi-nand";
++              reg = <0>;
++              #address-cells = <1>;
++              #size-cells = <1>;
++              nand-ecc-engine = <&qpic_nand>;
++              nand-ecc-strength = <4>;
++              nand-ecc-step-size = <512>;
++      };
+ };
+ &usb_0_dwc3 {
+--- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+@@ -300,6 +300,33 @@
+                       reg = <0x01937000 0x21000>;
+               };
++              qpic_bam: dma-controller@7984000 {
++                      compatible = "qcom,bam-v1.7.0";
++                      reg = <0x7984000 0x1c000>;
++                      interrupts = <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
++                      clocks = <&gcc GCC_QPIC_AHB_CLK>;
++                      clock-names = "bam_clk";
++                      #dma-cells = <1>;
++                      qcom,ee = <0>;
++                      status = "disabled";
++              };
++
++              qpic_nand: spi@79b0000 {
++                      compatible = "qcom,spi-qpic-snand", "qcom,ipq9574-nand";
++                      reg = <0x79b0000 0x10000>;
++                      #address-cells = <1>;
++                      #size-cells = <0>;
++                      clocks = <&gcc GCC_QPIC_CLK>,
++                               <&gcc GCC_QPIC_AHB_CLK>,
++                               <&gcc GCC_QPIC_IO_MACRO_CLK>;
++                      clock-names = "core", "aon", "iom";
++                      dmas = <&qpic_bam 0>,
++                             <&qpic_bam 1>,
++                             <&qpic_bam 2>;
++                      dma-names = "tx", "rx", "cmd";
++                      status = "disabled";
++              };
++
+               sdhc_1: mmc@7804000 {
+                       compatible = "qcom,ipq9574-sdhci", "qcom,sdhci-msm-v5";
+                       reg = <0x07804000 0x1000>, <0x07805000 0x1000>;
diff --git a/target/linux/qualcommbe/patches-6.6/102-arm64-dts-qcom-ipq9574-Disable-eMMC-node.patch b/target/linux/qualcommbe/patches-6.6/102-arm64-dts-qcom-ipq9574-Disable-eMMC-node.patch
new file mode 100644 (file)
index 0000000..57a16f7
--- /dev/null
@@ -0,0 +1,65 @@
+From a28a71e2a4728ec4f1f4a6b28595b664a1a49e4b Mon Sep 17 00:00:00 2001
+From: Md Sadre Alam <quic_mdalam@quicinc.com>
+Date: Wed, 7 Feb 2024 16:05:27 +0530
+Subject: [PATCH v10 8/8] arm64: dts: qcom: ipq9574: Disable eMMC node
+
+Disable eMMC node for rdp433, since rdp433
+default boot mode is norplusnand
+
+Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
+---
+
+Change in [v10]
+
+* No change
+
+Change in [v9]
+
+* No change
+
+Change in [v8]
+
+* No change
+
+Change in [v7]
+
+* No Change
+
+Change in [v6]
+
+* Updated commit message
+
+Change in [v5]
+
+* No Change
+
+Change in [v4]
+
+* No change
+
+Change in [v3]
+
+* Removed co-developed by 
+
+Change in [v2]
+
+* Posted as initial eMMC disable patch
+
+Change in [v1]
+
+* This patch was not included in v1
+
+ arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts
++++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts
+@@ -82,7 +82,7 @@
+       mmc-hs400-enhanced-strobe;
+       max-frequency = <384000000>;
+       bus-width = <8>;
+-      status = "okay";
++      status = "disabled";
+ };
+ &sleep_clk {
diff --git a/target/linux/qualcommbe/patches-6.6/103-01-dt-bindings-net-Document-Qualcomm-QCA8084-PHY-packag.patch b/target/linux/qualcommbe/patches-6.6/103-01-dt-bindings-net-Document-Qualcomm-QCA8084-PHY-packag.patch
new file mode 100644 (file)
index 0000000..260df88
--- /dev/null
@@ -0,0 +1,253 @@
+From 9e76817056937645205f23ee91e762d5cff5e848 Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Mon, 29 Jan 2024 17:57:20 +0800
+Subject: [PATCH 01/50] dt-bindings: net: Document Qualcomm QCA8084 PHY package
+
+QCA8084 is quad PHY chip, which integrates 4 PHYs, 2 PCS
+interfaces (PCS0 and PCS1) and clock controller, which can
+also be integrated to the switch chip named as QCA8386.
+
+1. MDIO address of 4 PHYs, 2 PCS and 1 XPCS (PCS1 includes
+   PCS and XPCS, PCS0 includes PCS) can be configured.
+2. The package mode of PHY is optionally configured for the
+   interface mode of two PCSes working correctly.
+3. The package level clock and reset need to be initialized.
+4. The clock and reset per PHY device need to be initialized
+   so that the PHY register can be accessed.
+
+Change-Id: Idb2338d2673152cbd3c57e95968faa59e9d4a80f
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ .../devicetree/bindings/net/qcom,qca8084.yaml | 198 ++++++++++++++++++
+ include/dt-bindings/net/qcom,qca808x.h        |  14 ++
+ 2 files changed, 212 insertions(+)
+ create mode 100644 Documentation/devicetree/bindings/net/qcom,qca8084.yaml
+ create mode 100644 include/dt-bindings/net/qcom,qca808x.h
+
+diff --git a/Documentation/devicetree/bindings/net/qcom,qca8084.yaml b/Documentation/devicetree/bindings/net/qcom,qca8084.yaml
+new file mode 100644
+index 000000000000..efa1fa4ebfdc
+--- /dev/null
++++ b/Documentation/devicetree/bindings/net/qcom,qca8084.yaml
+@@ -0,0 +1,198 @@
++# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
++%YAML 1.2
++---
++$id: http://devicetree.org/schemas/net/qcom,qca8084.yaml#
++$schema: http://devicetree.org/meta-schemas/core.yaml#
++
++title: Qualcomm QCA8084 Ethernet Quad PHY
++
++maintainers:
++  - Luo Jie <quic_luoj@quicinc.com>
++
++description:
++  Qualcomm QCA8084 is a four-port Ethernet transceiver, the
++  Ethernet port supports link speed 10/100/1000/2500 Mbps.
++  There are two PCSes (PCS0 and PCS1) integrated in the PHY
++  package, PCS1 includes XPCS and PCS to support the interface
++  mode 10G-QXGMII and SGMII, PCS0 includes a PCS to support the
++  interface mode SGMII only. There is also a clock controller
++  integrated in the PHY package. This four-port Ethernet
++  transceiver can also be integrated to the switch chip named
++  as QCA8386. The PHY package mode needs to be configured as the
++  correct value to apply the interface mode of two PCSes as
++  mentioned below.
++
++  QCA8084 expects an input reference clock 50 MHZ as the clock
++  source of the integrated clock controller, the integrated
++  clock controller supplies the clocks and resets to the
++  integrated PHY, PCS and PHY package.
++
++  - |
++                 +--| |--+-------------------+--| |--+
++                 | PCS1  |<------------+---->| PCS0  |
++                 +-------+             |     +-------+
++                 |                     |             |
++    Ref 50M clk  +--------+            |             |
++    ------------>|        | clk & rst  |             |
++    GPIO Reset   |QCA8K_CC+------------+             |
++    ------------>|        |            |             |
++                 +--------+            |             |
++                 |                     V             |
++                 +--------+--------+--------+--------+
++                 |  PHY0  |  PHY1  |  PHY2  |  PHY3  |
++                 +--------+--------+--------+--------+
++
++$ref: ethernet-phy-package.yaml#
++
++properties:
++  compatible:
++    const: qcom,qca8084-package
++
++  clocks:
++    description: PHY package level initial common clocks, which are
++      needed to be enabled after GPIO reset on the PHY package, these
++      clocks are supplied from the PHY integrated clock controller
++      (QCA8K-CC).
++    items:
++      - description: APB bridge clock
++      - description: AHB clock
++      - description: Security control clock
++      - description: TLMM clock
++      - description: TLMM AHB clock
++      - description: CNOC AHB clock
++      - description: MDIO AHB clock
++
++  clock-names:
++    items:
++      - const: apb_bridge
++      - const: ahb
++      - const: sec_ctrl_ahb
++      - const: tlmm
++      - const: tlmm_ahb
++      - const: cnoc_ahb
++      - const: mdio_ahb
++
++  resets:
++    description: PHY package level initial common reset, which are
++      needed to be deasserted after GPIO reset on the PHY package,
++      this reset is provided by the PHY integrated clock controller
++      to do PHY DSP reset.
++    maxItems: 1
++
++  qcom,package-mode:
++    description: |
++      The package mode of PHY supports to be configured as 3 modes
++      to apply the combinations of interface mode of two PCSes
++      correctly. This value should use one of the values defined in
++      dt-bindings/net/qcom,qca808x.h. The package mode 10G-QXGMII of
++      Quad PHY is used by default.
++
++      package mode             PCS1             PCS0
++      phy mode (0)             10G-QXGMII for   not used
++                               PHY0-PHY3
++
++      switch mode (1)          SGMII for        SGMII for
++                               switch MAC0      switch MAC5 (optional)
++
++      switch bypass MAC5 (2)   SGMII for        SGMII for
++                               switch MAC0      PHY3
++    $ref: /schemas/types.yaml#/definitions/uint32
++    enum: [0, 1, 2]
++    default: 0
++
++  qcom,phy-addr-fixup:
++    description: MDIO address for PHY0-PHY3, PCS0 and PCS1 including
++      PCS and XPCS, which can be optionally customized by programming
++      the security control register of PHY package. The hardware default
++      MDIO address of PHY0-PHY3, PCS0 and PCS1 including PCS and XPCS is
++      0-6.
++    $ref: /schemas/types.yaml#/definitions/uint32-array
++    minItems: 7
++    maxItems: 7
++
++patternProperties:
++  ^ethernet-phy(@[a-f0-9]+)?$:
++    $ref: ethernet-phy.yaml#
++
++    properties:
++      compatible:
++        const: ethernet-phy-id004d.d180
++
++    required:
++      - compatible
++      - reg
++      - clocks
++      - resets
++
++    unevaluatedProperties: false
++
++required:
++  - compatible
++  - clocks
++  - clock-names
++  - resets
++
++unevaluatedProperties: false
++
++examples:
++  - |
++    #include <dt-bindings/clock/qcom,qca8k-nsscc.h>
++    #include <dt-bindings/net/qcom,qca808x.h>
++    #include <dt-bindings/reset/qcom,qca8k-nsscc.h>
++
++    mdio {
++        #address-cells = <1>;
++        #size-cells = <0>;
++
++        ethernet-phy-package@1 {
++            #address-cells = <1>;
++            #size-cells = <0>;
++            compatible = "qcom,qca8084-package";
++            reg = <1>;
++            clocks = <&qca8k_nsscc NSS_CC_APB_BRIDGE_CLK>,
++                     <&qca8k_nsscc NSS_CC_AHB_CLK>,
++                     <&qca8k_nsscc NSS_CC_SEC_CTRL_AHB_CLK>,
++                     <&qca8k_nsscc NSS_CC_TLMM_CLK>,
++                     <&qca8k_nsscc NSS_CC_TLMM_AHB_CLK>,
++                     <&qca8k_nsscc NSS_CC_CNOC_AHB_CLK>,
++                     <&qca8k_nsscc NSS_CC_MDIO_AHB_CLK>;
++            clock-names = "apb_bridge",
++                          "ahb",
++                          "sec_ctrl_ahb",
++                          "tlmm",
++                          "tlmm_ahb",
++                          "cnoc_ahb",
++                          "mdio_ahb";
++            resets = <&qca8k_nsscc NSS_CC_GEPHY_FULL_ARES>;
++            qcom,package-mode = <QCA808X_PCS1_SGMII_MAC_PCS0_SGMII_MAC>;
++            qcom,phy-addr-fixup = <1 2 3 4 5 6 7>;
++
++            ethernet-phy@1 {
++                compatible = "ethernet-phy-id004d.d180";
++                reg = <1>;
++                clocks = <&qca8k_nsscc NSS_CC_GEPHY0_SYS_CLK>;
++                resets = <&qca8k_nsscc NSS_CC_GEPHY0_SYS_ARES>;
++            };
++
++            ethernet-phy@2 {
++                compatible = "ethernet-phy-id004d.d180";
++                reg = <2>;
++                clocks = <&qca8k_nsscc NSS_CC_GEPHY1_SYS_CLK>;
++                resets = <&qca8k_nsscc NSS_CC_GEPHY1_SYS_ARES>;
++            };
++
++            ethernet-phy@3 {
++                compatible = "ethernet-phy-id004d.d180";
++                reg = <3>;
++                clocks = <&qca8k_nsscc NSS_CC_GEPHY2_SYS_CLK>;
++                resets = <&qca8k_nsscc NSS_CC_GEPHY2_SYS_ARES>;
++            };
++
++            ethernet-phy@4 {
++                compatible = "ethernet-phy-id004d.d180";
++                reg = <4>;
++                clocks = <&qca8k_nsscc NSS_CC_GEPHY3_SYS_CLK>;
++                resets = <&qca8k_nsscc NSS_CC_GEPHY3_SYS_ARES>;
++            };
++        };
++    };
+diff --git a/include/dt-bindings/net/qcom,qca808x.h b/include/dt-bindings/net/qcom,qca808x.h
+new file mode 100644
+index 000000000000..c3a2830445ea
+--- /dev/null
++++ b/include/dt-bindings/net/qcom,qca808x.h
+@@ -0,0 +1,14 @@
++/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
++/*
++ * Device Tree constants for the Qualcomm QCA808X PHYs
++ */
++
++#ifndef _DT_BINDINGS_QCOM_QCA808X_H
++#define _DT_BINDINGS_QCOM_QCA808X_H
++
++/* PHY package modes of QCA8084 to apply the interface modes of two PCSes. */
++#define QCA808X_PCS1_10G_QXGMII_PCS0_UNUNSED          0
++#define QCA808X_PCS1_SGMII_MAC_PCS0_SGMII_MAC         1
++#define QCA808X_PCS1_SGMII_MAC_PCS0_SGMII_PHY         2
++
++#endif
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-02-net-phy-qca808x-Add-QCA8084-ethernet-phy-support.patch b/target/linux/qualcommbe/patches-6.6/103-02-net-phy-qca808x-Add-QCA8084-ethernet-phy-support.patch
new file mode 100644 (file)
index 0000000..0e84685
--- /dev/null
@@ -0,0 +1,138 @@
+From 9dec04efa81322029e210281b1753a2eb5279e27 Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Thu, 6 Apr 2023 18:09:07 +0800
+Subject: [PATCH 02/50] net: phy: qca808x: Add QCA8084 ethernet phy support
+
+Add QCA8084 Quad-PHY support, which is a four-port PHY with
+maximum link capability of 2.5 Gbps. The features of each port
+are almost same as QCA8081. The slave seed and fast retrain
+configs are not needed for QCA8084. It includes two PCSes.
+
+PCS0 of QCA8084 supports the interface modes:
+PHY_INTERFACE_MODE_2500BASEX and PHY_INTERFACE_MODE_SGMII.
+
+PCS1 of QCA8084 supports the interface modes:
+PHY_INTERFACE_MODE_10G_QXGMII, PHY_INTERFACE_MODE_2500BASEX and
+PHY_INTERFACE_MODE_SGMII.
+
+The additional CDT configurations needed for QCA8084 compared
+with QCA8081.
+
+Change-Id: I12555fa70662682474ab4432204405b5e752fef6
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ drivers/net/phy/qcom/qca808x.c | 62 ++++++++++++++++++++++++++++++++--
+ 1 file changed, 60 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/phy/qcom/qca808x.c b/drivers/net/phy/qcom/qca808x.c
+index 5048304ccc9e..be46d16ca09f 100644
+--- a/drivers/net/phy/qcom/qca808x.c
++++ b/drivers/net/phy/qcom/qca808x.c
+@@ -86,9 +86,16 @@
+ #define QCA8081_PHY_FIFO_RSTN                 BIT(11)
+ #define QCA8081_PHY_ID                                0x004dd101
++#define QCA8084_PHY_ID                                0x004dd180
++
++#define QCA8084_MMD3_CDT_PULSE_CTRL           0x8075
++#define QCA8084_CDT_PULSE_THRESH_VAL          0xa060
++
++#define QCA8084_MMD3_CDT_NEAR_CTRL            0x807f
++#define QCA8084_CDT_NEAR_BYPASS                       BIT(15)
+ MODULE_DESCRIPTION("Qualcomm Atheros QCA808X PHY driver");
+-MODULE_AUTHOR("Matus Ujhelyi");
++MODULE_AUTHOR("Matus Ujhelyi, Luo Jie");
+ MODULE_LICENSE("GPL");
+ struct qca808x_priv {
+@@ -153,7 +160,9 @@ static bool qca808x_is_prefer_master(struct phy_device *phydev)
+ static bool qca808x_has_fast_retrain_or_slave_seed(struct phy_device *phydev)
+ {
+-      return linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported);
++      return phydev_id_compare(phydev, QCA8081_PHY_ID) &&
++              linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
++                                phydev->supported);
+ }
+ static bool qca808x_is_1g_only(struct phy_device *phydev)
+@@ -273,6 +282,23 @@ static int qca808x_read_status(struct phy_device *phydev)
+               return ret;
+       if (phydev->link) {
++              /* There are two PCSes available for QCA8084, which support
++               * the following interface modes.
++               *
++               * 1. PHY_INTERFACE_MODE_10G_QXGMII utilizes PCS1 for all
++               * available 4 ports, which is for all link speeds.
++               *
++               * 2. PHY_INTERFACE_MODE_2500BASEX utilizes PCS0 for the
++               * fourth port, which is only for the link speed 2500M same
++               * as QCA8081.
++               *
++               * 3. PHY_INTERFACE_MODE_SGMII utilizes PCS0 for the fourth
++               * port, which is for the link speed 10M, 100M and 1000M same
++               * as QCA8081.
++               */
++              if (phydev->interface == PHY_INTERFACE_MODE_10G_QXGMII)
++                      return 0;
++
+               if (phydev->speed == SPEED_2500)
+                       phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+               else
+@@ -352,6 +378,18 @@ static int qca808x_cable_test_start(struct phy_device *phydev)
+       phy_write_mmd(phydev, MDIO_MMD_PCS, 0x807a, 0xc060);
+       phy_write_mmd(phydev, MDIO_MMD_PCS, 0x807e, 0xb060);
++      if (phydev_id_compare(phydev, QCA8084_PHY_ID)) {
++              /* Adjust the positive and negative pulse thereshold of CDT. */
++              phy_write_mmd(phydev, MDIO_MMD_PCS,
++                            QCA8084_MMD3_CDT_PULSE_CTRL,
++                            QCA8084_CDT_PULSE_THRESH_VAL);
++
++              /* Disable the near bypass of CDT. */
++              phy_modify_mmd(phydev, MDIO_MMD_PCS,
++                             QCA8084_MMD3_CDT_NEAR_CTRL,
++                             QCA8084_CDT_NEAR_BYPASS, 0);
++      }
++
+       return 0;
+ }
+@@ -651,12 +689,32 @@ static struct phy_driver qca808x_driver[] = {
+       .led_hw_control_set     = qca808x_led_hw_control_set,
+       .led_hw_control_get     = qca808x_led_hw_control_get,
+       .led_polarity_set       = qca808x_led_polarity_set,
++}, {
++      /* Qualcomm QCA8084 */
++      PHY_ID_MATCH_MODEL(QCA8084_PHY_ID),
++      .name                   = "Qualcomm QCA8084",
++      .flags                  = PHY_POLL_CABLE_TEST,
++      .config_intr            = at803x_config_intr,
++      .handle_interrupt       = at803x_handle_interrupt,
++      .get_tunable            = at803x_get_tunable,
++      .set_tunable            = at803x_set_tunable,
++      .set_wol                = at803x_set_wol,
++      .get_wol                = at803x_get_wol,
++      .get_features           = qca808x_get_features,
++      .config_aneg            = qca808x_config_aneg,
++      .suspend                = genphy_suspend,
++      .resume                 = genphy_resume,
++      .read_status            = qca808x_read_status,
++      .soft_reset             = qca808x_soft_reset,
++      .cable_test_start       = qca808x_cable_test_start,
++      .cable_test_get_status  = qca808x_cable_test_get_status,
+ }, };
+ module_phy_driver(qca808x_driver);
+ static struct mdio_device_id __maybe_unused qca808x_tbl[] = {
+       { PHY_ID_MATCH_EXACT(QCA8081_PHY_ID) },
++      { PHY_ID_MATCH_MODEL(QCA8084_PHY_ID) },
+       { }
+ };
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-03-net-phy-qca808x-Add-config_init-function-for-QCA8084.patch b/target/linux/qualcommbe/patches-6.6/103-03-net-phy-qca808x-Add-config_init-function-for-QCA8084.patch
new file mode 100644 (file)
index 0000000..f55e08d
--- /dev/null
@@ -0,0 +1,90 @@
+From fd5ec7c0a9f7167baf377a4bbae72eda391df996 Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Wed, 8 Nov 2023 16:18:02 +0800
+Subject: [PATCH 03/50] net: phy: qca808x: Add config_init function for QCA8084
+
+1. The ADC of QCA8084 PHY must be configured as edge inverted
+and falling whenever it is initialized or reset. In addition,
+the default MSE (Mean square error) threshold value is adjusted,
+which comes into play during link partner detection to detect
+the valid link signal.
+
+2. Add the possible interface modes.
+   When QCA8084 works on the interface mode SGMII or 2500BASE-X, the
+   interface mode can be switched according to the PHY link speed.
+
+   When QCA8084 works on the 10G-QXGMII mode, which will be the only
+   possible interface mode.
+
+Change-Id: I832c0d0b069e95cc411a8a7b680a5f60e1d6041a
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ drivers/net/phy/qcom/qca808x.c | 38 ++++++++++++++++++++++++++++++++++
+ 1 file changed, 38 insertions(+)
+
+diff --git a/drivers/net/phy/qcom/qca808x.c b/drivers/net/phy/qcom/qca808x.c
+index be46d16ca09f..c88fa59d4029 100644
+--- a/drivers/net/phy/qcom/qca808x.c
++++ b/drivers/net/phy/qcom/qca808x.c
+@@ -94,6 +94,15 @@
+ #define QCA8084_MMD3_CDT_NEAR_CTRL            0x807f
+ #define QCA8084_CDT_NEAR_BYPASS                       BIT(15)
++/* QCA8084 ADC clock edge */
++#define QCA8084_ADC_CLK_SEL                   0x8b80
++#define QCA8084_ADC_CLK_SEL_ACLK              GENMASK(7, 4)
++#define QCA8084_ADC_CLK_SEL_ACLK_FALL         0xf
++#define QCA8084_ADC_CLK_SEL_ACLK_RISE         0x0
++
++#define QCA8084_MSE_THRESHOLD                 0x800a
++#define QCA8084_MSE_THRESHOLD_2P5G_VAL                0x51c6
++
+ MODULE_DESCRIPTION("Qualcomm Atheros QCA808X PHY driver");
+ MODULE_AUTHOR("Matus Ujhelyi, Luo Jie");
+ MODULE_LICENSE("GPL");
+@@ -660,6 +669,34 @@ static int qca808x_led_polarity_set(struct phy_device *phydev, int index,
+                             active_low ? 0 : QCA808X_LED_ACTIVE_HIGH);
+ }
++static int qca8084_config_init(struct phy_device *phydev)
++{
++      int ret;
++
++      if (phydev->interface == PHY_INTERFACE_MODE_10G_QXGMII)
++              __set_bit(PHY_INTERFACE_MODE_10G_QXGMII,
++                        phydev->possible_interfaces);
++      else
++              qca808x_fill_possible_interfaces(phydev);
++
++      /* Configure the ADC to convert the signal using falling edge
++       * instead of the default rising edge.
++       */
++      ret = at803x_debug_reg_mask(phydev, QCA8084_ADC_CLK_SEL,
++                                  QCA8084_ADC_CLK_SEL_ACLK,
++                                  FIELD_PREP(QCA8084_ADC_CLK_SEL_ACLK,
++                                             QCA8084_ADC_CLK_SEL_ACLK_FALL));
++      if (ret < 0)
++              return ret;
++
++      /* Adjust MSE threshold value to avoid link issue with
++       * some link partner.
++       */
++      return phy_write_mmd(phydev, MDIO_MMD_PMAPMD,
++                           QCA8084_MSE_THRESHOLD,
++                           QCA8084_MSE_THRESHOLD_2P5G_VAL);
++}
++
+ static struct phy_driver qca808x_driver[] = {
+ {
+       /* Qualcomm QCA8081 */
+@@ -708,6 +745,7 @@ static struct phy_driver qca808x_driver[] = {
+       .soft_reset             = qca808x_soft_reset,
+       .cable_test_start       = qca808x_cable_test_start,
+       .cable_test_get_status  = qca808x_cable_test_get_status,
++      .config_init            = qca8084_config_init,
+ }, };
+ module_phy_driver(qca808x_driver);
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-04-net-phy-qca808x-Add-link_change_notify-function-for-.patch b/target/linux/qualcommbe/patches-6.6/103-04-net-phy-qca808x-Add-link_change_notify-function-for-.patch
new file mode 100644 (file)
index 0000000..2510ec3
--- /dev/null
@@ -0,0 +1,95 @@
+From d9b391e7b695b7de04c4363b5ec9ffaaed387353 Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Wed, 8 Nov 2023 18:01:14 +0800
+Subject: [PATCH 04/50] net: phy: qca808x: Add link_change_notify function for
+ QCA8084
+
+When the link is changed, QCA8084 needs to do the fifo reset and
+adjust the IPG level for the 10G-QXGMII link on the speed 1000M.
+
+Change-Id: I21de802c78496fb95f1c5119fe3894c9fdebbd65
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ drivers/net/phy/qcom/qca808x.c | 52 ++++++++++++++++++++++++++++++++++
+ 1 file changed, 52 insertions(+)
+
+diff --git a/drivers/net/phy/qcom/qca808x.c b/drivers/net/phy/qcom/qca808x.c
+index c88fa59d4029..029d5f9de6b8 100644
+--- a/drivers/net/phy/qcom/qca808x.c
++++ b/drivers/net/phy/qcom/qca808x.c
+@@ -103,6 +103,14 @@
+ #define QCA8084_MSE_THRESHOLD                 0x800a
+ #define QCA8084_MSE_THRESHOLD_2P5G_VAL                0x51c6
++/* QCA8084 FIFO reset control */
++#define QCA8084_FIFO_CONTROL                  0x19
++#define QCA8084_FIFO_MAC_2_PHY                        BIT(1)
++#define QCA8084_FIFO_PHY_2_MAC                        BIT(0)
++
++#define QCA8084_MMD7_IPG_OP                   0x901d
++#define QCA8084_IPG_10_TO_11_EN                       BIT(0)
++
+ MODULE_DESCRIPTION("Qualcomm Atheros QCA808X PHY driver");
+ MODULE_AUTHOR("Matus Ujhelyi, Luo Jie");
+ MODULE_LICENSE("GPL");
+@@ -697,6 +705,49 @@ static int qca8084_config_init(struct phy_device *phydev)
+                            QCA8084_MSE_THRESHOLD_2P5G_VAL);
+ }
++static void qca8084_link_change_notify(struct phy_device *phydev)
++{
++      int ret;
++
++      /* Assert the FIFO between PHY and MAC. */
++      ret = phy_modify(phydev, QCA8084_FIFO_CONTROL,
++                       QCA8084_FIFO_MAC_2_PHY | QCA8084_FIFO_PHY_2_MAC,
++                       0);
++      if (ret) {
++              phydev_err(phydev, "Asserting PHY FIFO failed\n");
++              return;
++      }
++
++      /* If the PHY is in 10G_QXGMII mode, the FIFO needs to be kept in
++       * reset state when link is down, otherwise the FIFO needs to be
++       * de-asserted after waiting 50 ms to make the assert completed.
++       */
++      if (phydev->interface != PHY_INTERFACE_MODE_10G_QXGMII ||
++          phydev->link) {
++              msleep(50);
++
++              /* Deassert the FIFO between PHY and MAC. */
++              ret = phy_modify(phydev, QCA8084_FIFO_CONTROL,
++                               QCA8084_FIFO_MAC_2_PHY |
++                               QCA8084_FIFO_PHY_2_MAC,
++                               QCA8084_FIFO_MAC_2_PHY |
++                               QCA8084_FIFO_PHY_2_MAC);
++              if (ret) {
++                      phydev_err(phydev, "De-asserting PHY FIFO failed\n");
++                      return;
++              }
++      }
++
++      /* Enable IPG level 10 to 11 tuning for link speed 1000M in the
++       * 10G_QXGMII mode.
++       */
++      if (phydev->interface == PHY_INTERFACE_MODE_10G_QXGMII)
++              phy_modify_mmd(phydev, MDIO_MMD_AN, QCA8084_MMD7_IPG_OP,
++                             QCA8084_IPG_10_TO_11_EN,
++                             phydev->speed == SPEED_1000 ?
++                             QCA8084_IPG_10_TO_11_EN : 0);
++}
++
+ static struct phy_driver qca808x_driver[] = {
+ {
+       /* Qualcomm QCA8081 */
+@@ -746,6 +797,7 @@ static struct phy_driver qca808x_driver[] = {
+       .cable_test_start       = qca808x_cable_test_start,
+       .cable_test_get_status  = qca808x_cable_test_get_status,
+       .config_init            = qca8084_config_init,
++      .link_change_notify     = qca8084_link_change_notify,
+ }, };
+ module_phy_driver(qca808x_driver);
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-05-net-phy-qca808x-Add-register-access-support-routines.patch b/target/linux/qualcommbe/patches-6.6/103-05-net-phy-qca808x-Add-register-access-support-routines.patch
new file mode 100644 (file)
index 0000000..7986a6c
--- /dev/null
@@ -0,0 +1,130 @@
+From 9443d85d8f3e397b025700251516e248fc4e37c0 Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Wed, 29 Nov 2023 15:21:22 +0800
+Subject: [PATCH 05/50] net: phy: qca808x: Add register access support routines
+ for QCA8084
+
+QCA8084 integrates clock controller and security control modules
+besides of the PHY and PCS. The 32bit registers in these modules
+are accessed using special MDIO sequences to read or write these
+registers.
+
+The MDIO address of PHY and PCS are configured by writing to the
+security control register. The package mode for QCA8084 is also
+configured in a similar manner.
+
+Change-Id: I9317307ef9bbc738a6adcbc3ea1be8e6528d711e
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ drivers/net/phy/qcom/qca808x.c | 88 ++++++++++++++++++++++++++++++++++
+ 1 file changed, 88 insertions(+)
+
+diff --git a/drivers/net/phy/qcom/qca808x.c b/drivers/net/phy/qcom/qca808x.c
+index 029d5f9de6b8..8873474146e8 100644
+--- a/drivers/net/phy/qcom/qca808x.c
++++ b/drivers/net/phy/qcom/qca808x.c
+@@ -111,6 +111,22 @@
+ #define QCA8084_MMD7_IPG_OP                   0x901d
+ #define QCA8084_IPG_10_TO_11_EN                       BIT(0)
++/* QCA8084 includes secure control module, which supports customizing the
++ * MDIO address of PHY device and PCS device and configuring package mode
++ * for the interface mode of PCS. The register of secure control is accessed
++ * by MDIO bus with the special MDIO sequences, where the 32 bits register
++ * address is split into 3 MDIO operations with 16 bits address.
++ */
++#define QCA8084_HIGH_ADDR_PREFIX              0x18
++#define QCA8084_LOW_ADDR_PREFIX                       0x10
++
++/* Bottom two bits of REG must be zero */
++#define QCA8084_MII_REG_MASK                  GENMASK(4, 0)
++#define QCA8084_MII_PHY_ADDR_MASK             GENMASK(7, 5)
++#define QCA8084_MII_PAGE_MASK                 GENMASK(23, 8)
++#define QCA8084_MII_SW_ADDR_MASK              GENMASK(31, 24)
++#define QCA8084_MII_REG_DATA_UPPER_16_BITS    BIT(1)
++
+ MODULE_DESCRIPTION("Qualcomm Atheros QCA808X PHY driver");
+ MODULE_AUTHOR("Matus Ujhelyi, Luo Jie");
+ MODULE_LICENSE("GPL");
+@@ -119,6 +135,78 @@ struct qca808x_priv {
+       int led_polarity_mode;
+ };
++static int __qca8084_set_page(struct mii_bus *bus, u16 sw_addr, u16 page)
++{
++      return __mdiobus_write(bus, QCA8084_HIGH_ADDR_PREFIX | (sw_addr >> 5),
++                             sw_addr & 0x1f, page);
++}
++
++static int __qca8084_mii_read(struct mii_bus *bus, u16 addr, u16 reg, u32 *val)
++{
++      int ret, data;
++
++      ret = __mdiobus_read(bus, addr, reg);
++      if (ret < 0)
++              return ret;
++
++      data = ret;
++      ret = __mdiobus_read(bus, addr,
++                           reg | QCA8084_MII_REG_DATA_UPPER_16_BITS);
++      if (ret < 0)
++              return ret;
++
++      *val =  data | ret << 16;
++
++      return 0;
++}
++
++static int __qca8084_mii_write(struct mii_bus *bus, u16 addr, u16 reg, u32 val)
++{
++      int ret;
++
++      ret = __mdiobus_write(bus, addr, reg, lower_16_bits(val));
++      if (!ret)
++              ret = __mdiobus_write(bus, addr,
++                                    reg | QCA8084_MII_REG_DATA_UPPER_16_BITS,
++                                    upper_16_bits(val));
++
++      return ret;
++}
++
++static int qca8084_mii_modify(struct phy_device *phydev, u32 regaddr,
++                            u32 clear, u32 set)
++{
++      u16 reg, addr, page, sw_addr;
++      struct mii_bus *bus;
++      u32 val;
++      int ret;
++
++      bus = phydev->mdio.bus;
++      mutex_lock(&bus->mdio_lock);
++
++      reg = FIELD_GET(QCA8084_MII_REG_MASK, regaddr);
++      addr = FIELD_GET(QCA8084_MII_PHY_ADDR_MASK, regaddr);
++      page = FIELD_GET(QCA8084_MII_PAGE_MASK, regaddr);
++      sw_addr = FIELD_GET(QCA8084_MII_SW_ADDR_MASK, regaddr);
++
++      ret = __qca8084_set_page(bus, sw_addr, page);
++      if (ret < 0)
++              goto qca8084_mii_modify_exit;
++
++      ret = __qca8084_mii_read(bus, QCA8084_LOW_ADDR_PREFIX | addr,
++                               reg, &val);
++      if (ret < 0)
++              goto qca8084_mii_modify_exit;
++
++      val &= ~clear;
++      val |= set;
++      ret = __qca8084_mii_write(bus, QCA8084_LOW_ADDR_PREFIX | addr,
++                                reg, val);
++qca8084_mii_modify_exit:
++      mutex_unlock(&bus->mdio_lock);
++      return ret;
++};
++
+ static int qca808x_phy_fast_retrain_config(struct phy_device *phydev)
+ {
+       int ret;
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-06-net-phy-qca808x-Add-QCA8084-probe-function.patch b/target/linux/qualcommbe/patches-6.6/103-06-net-phy-qca808x-Add-QCA8084-probe-function.patch
new file mode 100644 (file)
index 0000000..ad8bc02
--- /dev/null
@@ -0,0 +1,145 @@
+From 9d0e22124d6f3ca901626dd5537b36c7c0c97812 Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Mon, 29 Jan 2024 10:51:38 +0800
+Subject: [PATCH 06/50] net: phy: qca808x: Add QCA8084 probe function
+
+Add the PHY package probe function. The MDIO slave address of
+PHY, PCS and XPCS can be optionally customized by configuring
+the PHY package level register.
+
+In addition, enable system clock of PHY and de-assert PHY in
+the probe function so that the register of PHY device can be
+accessed, and the features of PHY can be acquired.
+
+Change-Id: I2251b9c5c398a21a4ef547a727189a934ad3a44c
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ drivers/net/phy/qcom/qca808x.c | 91 ++++++++++++++++++++++++++++++++++
+ 1 file changed, 91 insertions(+)
+
+diff --git a/drivers/net/phy/qcom/qca808x.c b/drivers/net/phy/qcom/qca808x.c
+index 8873474146e8..85bb299fe0a3 100644
+--- a/drivers/net/phy/qcom/qca808x.c
++++ b/drivers/net/phy/qcom/qca808x.c
+@@ -2,6 +2,8 @@
+ #include <linux/phy.h>
+ #include <linux/module.h>
++#include <linux/of.h>
++#include <linux/clk.h>
+ #include "qcom.h"
+@@ -127,6 +129,21 @@
+ #define QCA8084_MII_SW_ADDR_MASK              GENMASK(31, 24)
+ #define QCA8084_MII_REG_DATA_UPPER_16_BITS    BIT(1)
++/* QCA8084 integrates 4 PHYs, PCS0 and PCS1(includes PCS and XPCS). */
++#define QCA8084_MDIO_DEVICE_NUM                       7
++
++#define QCA8084_PCS_CFG                               0xc90f014
++#define QCA8084_PCS_ADDR0_MASK                        GENMASK(4, 0)
++#define QCA8084_PCS_ADDR1_MASK                        GENMASK(9, 5)
++#define QCA8084_PCS_ADDR2_MASK                        GENMASK(14, 10)
++
++#define QCA8084_EPHY_CFG                      0xc90f018
++#define QCA8084_EPHY_ADDR0_MASK                       GENMASK(4, 0)
++#define QCA8084_EPHY_ADDR1_MASK                       GENMASK(9, 5)
++#define QCA8084_EPHY_ADDR2_MASK                       GENMASK(14, 10)
++#define QCA8084_EPHY_ADDR3_MASK                       GENMASK(19, 15)
++#define QCA8084_EPHY_LDO_EN                   GENMASK(21, 20)
++
+ MODULE_DESCRIPTION("Qualcomm Atheros QCA808X PHY driver");
+ MODULE_AUTHOR("Matus Ujhelyi, Luo Jie");
+ MODULE_LICENSE("GPL");
+@@ -836,6 +853,79 @@ static void qca8084_link_change_notify(struct phy_device *phydev)
+                              QCA8084_IPG_10_TO_11_EN : 0);
+ }
++static int qca8084_phy_package_probe_once(struct phy_device *phydev)
++{
++      int addr[QCA8084_MDIO_DEVICE_NUM] = {0, 1, 2, 3, 4, 5, 6};
++      struct phy_package_shared *shared = phydev->shared;
++      int ret, clear, set;
++
++      /* Program the MDIO address of PHY and PCS optionally, the MDIO
++       * address 0-6 is used for PHY and PCS MDIO devices by default.
++       */
++      ret = of_property_read_u32_array(shared->np,
++                                       "qcom,phy-addr-fixup",
++                                       addr, ARRAY_SIZE(addr));
++      if (ret && ret != -EINVAL)
++              return ret;
++
++      /* Configure the MDIO addresses for the four PHY devices. */
++      clear = QCA8084_EPHY_ADDR0_MASK | QCA8084_EPHY_ADDR1_MASK |
++              QCA8084_EPHY_ADDR2_MASK | QCA8084_EPHY_ADDR3_MASK;
++      set = FIELD_PREP(QCA8084_EPHY_ADDR0_MASK, addr[0]);
++      set |= FIELD_PREP(QCA8084_EPHY_ADDR1_MASK, addr[1]);
++      set |= FIELD_PREP(QCA8084_EPHY_ADDR2_MASK, addr[2]);
++      set |= FIELD_PREP(QCA8084_EPHY_ADDR3_MASK, addr[3]);
++
++      ret = qca8084_mii_modify(phydev, QCA8084_EPHY_CFG, clear, set);
++      if (ret)
++              return ret;
++
++      /* Configure the MDIO addresses for PCS0 and PCS1 including
++       * PCS and XPCS.
++       */
++      clear = QCA8084_PCS_ADDR0_MASK | QCA8084_PCS_ADDR1_MASK |
++              QCA8084_PCS_ADDR2_MASK;
++      set = FIELD_PREP(QCA8084_PCS_ADDR0_MASK, addr[4]);
++      set |= FIELD_PREP(QCA8084_PCS_ADDR1_MASK, addr[5]);
++      set |= FIELD_PREP(QCA8084_PCS_ADDR2_MASK, addr[6]);
++
++      return qca8084_mii_modify(phydev, QCA8084_PCS_CFG, clear, set);
++}
++
++static int qca8084_probe(struct phy_device *phydev)
++{
++      struct device *dev = &phydev->mdio.dev;
++      struct reset_control *rstc;
++      struct clk *clk;
++      int ret;
++
++      ret = devm_of_phy_package_join(dev, phydev, 0);
++      if (ret)
++              return ret;
++
++      if (phy_package_probe_once(phydev)) {
++              ret = qca8084_phy_package_probe_once(phydev);
++              if (ret)
++                      return ret;
++      }
++
++      /* Enable clock of PHY device, so that the PHY register
++       * can be accessed to get PHY features.
++       */
++      clk = devm_clk_get_enabled(dev, NULL);
++      if (IS_ERR(clk))
++              return dev_err_probe(dev, PTR_ERR(clk),
++                                   "Enable PHY clock failed\n");
++
++      /* De-assert PHY reset after the clock of PHY enabled. */
++      rstc = devm_reset_control_get_exclusive(dev, NULL);
++      if (IS_ERR(rstc))
++              return dev_err_probe(dev, PTR_ERR(rstc),
++                                   "Get PHY reset failed\n");
++
++      return reset_control_deassert(rstc);
++}
++
+ static struct phy_driver qca808x_driver[] = {
+ {
+       /* Qualcomm QCA8081 */
+@@ -886,6 +976,7 @@ static struct phy_driver qca808x_driver[] = {
+       .cable_test_get_status  = qca808x_cable_test_get_status,
+       .config_init            = qca8084_config_init,
+       .link_change_notify     = qca8084_link_change_notify,
++      .probe                  = qca8084_probe,
+ }, };
+ module_phy_driver(qca808x_driver);
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-07-net-phy-qca808x-Add-package-clocks-and-resets-for-QC.patch b/target/linux/qualcommbe/patches-6.6/103-07-net-phy-qca808x-Add-package-clocks-and-resets-for-QC.patch
new file mode 100644 (file)
index 0000000..78111f7
--- /dev/null
@@ -0,0 +1,138 @@
+From 324c5b908a5294390ed9659a6439758cb20ecd61 Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Tue, 9 Apr 2024 16:30:55 +0800
+Subject: [PATCH 07/50] net: phy: qca808x: Add package clocks and resets for
+ QCA8084
+
+Parse the PHY package clocks from the PHY package DTS node.
+These package level clocks will be enabled in the PHY package
+init function.
+
+Deassert PHY package reset, which is necessary for accessing
+the PHY registers.
+
+Change-Id: I254d0aa0a1155d3618c6f1fc7d7a5b6ecadccbaa
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ drivers/net/phy/qcom/qca808x.c | 67 ++++++++++++++++++++++++++++++++--
+ 1 file changed, 64 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/phy/qcom/qca808x.c b/drivers/net/phy/qcom/qca808x.c
+index 85bb299fe0a3..632cad1ad190 100644
+--- a/drivers/net/phy/qcom/qca808x.c
++++ b/drivers/net/phy/qcom/qca808x.c
+@@ -4,6 +4,7 @@
+ #include <linux/module.h>
+ #include <linux/of.h>
+ #include <linux/clk.h>
++#include <linux/reset.h>
+ #include "qcom.h"
+@@ -148,10 +149,35 @@ MODULE_DESCRIPTION("Qualcomm Atheros QCA808X PHY driver");
+ MODULE_AUTHOR("Matus Ujhelyi, Luo Jie");
+ MODULE_LICENSE("GPL");
++enum {
++      APB_BRIDGE_CLK,
++      AHB_CLK,
++      SEC_CTRL_AHB_CLK,
++      TLMM_CLK,
++      TLMM_AHB_CLK,
++      CNOC_AHB_CLK,
++      MDIO_AHB_CLK,
++      PACKAGE_CLK_MAX
++};
++
+ struct qca808x_priv {
+       int led_polarity_mode;
+ };
++struct qca808x_shared_priv {
++      struct clk *clk[PACKAGE_CLK_MAX];
++};
++
++static const char *const qca8084_package_clk_name[PACKAGE_CLK_MAX] = {
++      [APB_BRIDGE_CLK] =      "apb_bridge",
++      [AHB_CLK] =             "ahb",
++      [SEC_CTRL_AHB_CLK] =    "sec_ctrl_ahb",
++      [TLMM_CLK] =            "tlmm",
++      [TLMM_AHB_CLK] =        "tlmm_ahb",
++      [CNOC_AHB_CLK] =        "cnoc_ahb",
++      [MDIO_AHB_CLK] =        "mdio_ahb",
++};
++
+ static int __qca8084_set_page(struct mii_bus *bus, u16 sw_addr, u16 page)
+ {
+       return __mdiobus_write(bus, QCA8084_HIGH_ADDR_PREFIX | (sw_addr >> 5),
+@@ -853,11 +879,24 @@ static void qca8084_link_change_notify(struct phy_device *phydev)
+                              QCA8084_IPG_10_TO_11_EN : 0);
+ }
++/* QCA8084 is a four-port PHY, which integrates the clock controller,
++ * 4 PHY devices and 2 PCS interfaces (PCS0 and PCS1). PCS1 includes
++ * XPCS and PCS to support 10G-QXGMII and SGMII. PCS0 includes one PCS
++ * to support SGMII.
++ *
++ * The clocks and resets are sourced from the integrated clock controller
++ * of the PHY package. This integrated clock controller is driven by a
++ * QCA8K clock provider that supplies the clocks and resets to the four
++ * PHYs, PCS and PHY package.
++ */
+ static int qca8084_phy_package_probe_once(struct phy_device *phydev)
+ {
+       int addr[QCA8084_MDIO_DEVICE_NUM] = {0, 1, 2, 3, 4, 5, 6};
+       struct phy_package_shared *shared = phydev->shared;
+-      int ret, clear, set;
++      struct qca808x_shared_priv *shared_priv;
++      struct reset_control *rstc;
++      int i, ret, clear, set;
++      struct clk *clk;
+       /* Program the MDIO address of PHY and PCS optionally, the MDIO
+        * address 0-6 is used for PHY and PCS MDIO devices by default.
+@@ -889,17 +928,39 @@ static int qca8084_phy_package_probe_once(struct phy_device *phydev)
+       set |= FIELD_PREP(QCA8084_PCS_ADDR1_MASK, addr[5]);
+       set |= FIELD_PREP(QCA8084_PCS_ADDR2_MASK, addr[6]);
+-      return qca8084_mii_modify(phydev, QCA8084_PCS_CFG, clear, set);
++      ret =  qca8084_mii_modify(phydev, QCA8084_PCS_CFG, clear, set);
++      if (ret)
++              return ret;
++
++      shared_priv = shared->priv;
++      for (i = 0; i < ARRAY_SIZE(qca8084_package_clk_name); i++) {
++              clk = of_clk_get_by_name(shared->np,
++                                       qca8084_package_clk_name[i]);
++              if (IS_ERR(clk))
++                      return dev_err_probe(&phydev->mdio.dev, PTR_ERR(clk),
++                                           "package clock %s not ready\n",
++                                           qca8084_package_clk_name[i]);
++              shared_priv->clk[i] = clk;
++      }
++
++      rstc = of_reset_control_get_exclusive(shared->np, NULL);
++      if (IS_ERR(rstc))
++              return dev_err_probe(&phydev->mdio.dev, PTR_ERR(rstc),
++                                   "package reset not ready\n");
++
++      /* Deassert PHY package. */
++      return reset_control_deassert(rstc);
+ }
+ static int qca8084_probe(struct phy_device *phydev)
+ {
++      struct qca808x_shared_priv *shared_priv;
+       struct device *dev = &phydev->mdio.dev;
+       struct reset_control *rstc;
+       struct clk *clk;
+       int ret;
+-      ret = devm_of_phy_package_join(dev, phydev, 0);
++      ret = devm_of_phy_package_join(dev, phydev, sizeof(*shared_priv));
+       if (ret)
+               return ret;
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-08-net-phy-qca808x-Add-QCA8084-package-init-function.patch b/target/linux/qualcommbe/patches-6.6/103-08-net-phy-qca808x-Add-QCA8084-package-init-function.patch
new file mode 100644 (file)
index 0000000..fd951ce
--- /dev/null
@@ -0,0 +1,176 @@
+From 392a648b7b0324d03e6f6a7b326e33136d79b134 Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Thu, 25 Jan 2024 17:13:24 +0800
+Subject: [PATCH 08/50] net: phy: qca808x: Add QCA8084 package init function
+
+The package mode of PHY is configured for the interface mode of two
+PCSes working correctly.
+
+The PHY package level clocks are enabled and their rates configured.
+
+Change-Id: I63d4b22d2a70ee713cc6a6818b0f3c7aa098a5f5
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ drivers/net/phy/qcom/qca808x.c | 115 +++++++++++++++++++++++++++++++++
+ 1 file changed, 115 insertions(+)
+
+diff --git a/drivers/net/phy/qcom/qca808x.c b/drivers/net/phy/qcom/qca808x.c
+index 632cad1ad190..459f8e8a9749 100644
+--- a/drivers/net/phy/qcom/qca808x.c
++++ b/drivers/net/phy/qcom/qca808x.c
+@@ -1,5 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0+
++#include <dt-bindings/net/qcom,qca808x.h>
+ #include <linux/phy.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+@@ -145,6 +146,13 @@
+ #define QCA8084_EPHY_ADDR3_MASK                       GENMASK(19, 15)
+ #define QCA8084_EPHY_LDO_EN                   GENMASK(21, 20)
++#define QCA8084_WORK_MODE_CFG                 0xc90f030
++#define QCA8084_WORK_MODE_MASK                        GENMASK(5, 0)
++#define QCA8084_WORK_MODE_QXGMII              (BIT(5) | GENMASK(3, 0))
++#define QCA8084_WORK_MODE_QXGMII_PORT4_SGMII  (BIT(5) | GENMASK(2, 0))
++#define QCA8084_WORK_MODE_SWITCH              BIT(4)
++#define QCA8084_WORK_MODE_SWITCH_PORT4_SGMII  BIT(5)
++
+ MODULE_DESCRIPTION("Qualcomm Atheros QCA808X PHY driver");
+ MODULE_AUTHOR("Matus Ujhelyi, Luo Jie");
+ MODULE_LICENSE("GPL");
+@@ -165,6 +173,7 @@ struct qca808x_priv {
+ };
+ struct qca808x_shared_priv {
++      int package_mode;
+       struct clk *clk[PACKAGE_CLK_MAX];
+ };
+@@ -808,10 +817,107 @@ static int qca808x_led_polarity_set(struct phy_device *phydev, int index,
+                             active_low ? 0 : QCA808X_LED_ACTIVE_HIGH);
+ }
++static int qca8084_package_clock_init(struct qca808x_shared_priv *shared_priv)
++{
++      int ret;
++
++      /* Configure clock rate 312.5MHZ for the PHY package
++       * APB bridge clock tree.
++       */
++      ret = clk_set_rate(shared_priv->clk[APB_BRIDGE_CLK], 312500000);
++      if (ret)
++              return ret;
++
++      ret = clk_prepare_enable(shared_priv->clk[APB_BRIDGE_CLK]);
++      if (ret)
++              return ret;
++
++      /* Configure clock rate 104.17MHZ for the PHY package
++       * AHB clock tree.
++       */
++      ret = clk_set_rate(shared_priv->clk[AHB_CLK], 104170000);
++      if (ret)
++              return ret;
++
++      ret = clk_prepare_enable(shared_priv->clk[AHB_CLK]);
++      if (ret)
++              return ret;
++
++      ret = clk_prepare_enable(shared_priv->clk[SEC_CTRL_AHB_CLK]);
++      if (ret)
++              return ret;
++
++      ret = clk_prepare_enable(shared_priv->clk[TLMM_CLK]);
++      if (ret)
++              return ret;
++
++      ret = clk_prepare_enable(shared_priv->clk[TLMM_AHB_CLK]);
++      if (ret)
++              return ret;
++
++      ret = clk_prepare_enable(shared_priv->clk[CNOC_AHB_CLK]);
++      if (ret)
++              return ret;
++
++      return clk_prepare_enable(shared_priv->clk[MDIO_AHB_CLK]);
++}
++
++static int qca8084_phy_package_config_init_once(struct phy_device *phydev)
++{
++      struct phy_package_shared *shared = phydev->shared;
++      struct qca808x_shared_priv *shared_priv;
++      int ret, mode;
++
++      shared_priv = shared->priv;
++      switch (shared_priv->package_mode) {
++      case QCA808X_PCS1_10G_QXGMII_PCS0_UNUNSED:
++              mode = QCA8084_WORK_MODE_QXGMII;
++              break;
++      case QCA808X_PCS1_SGMII_MAC_PCS0_SGMII_MAC:
++              mode = QCA8084_WORK_MODE_SWITCH;
++              break;
++      case QCA808X_PCS1_SGMII_MAC_PCS0_SGMII_PHY:
++              mode = QCA8084_WORK_MODE_SWITCH_PORT4_SGMII;
++              break;
++      default:
++              phydev_err(phydev, "Invalid qcom,package-mode %d\n",
++                         shared_priv->package_mode);
++              return -EINVAL;
++      }
++
++      ret = qca8084_mii_modify(phydev, QCA8084_WORK_MODE_CFG,
++                               QCA8084_WORK_MODE_MASK,
++                               FIELD_PREP(QCA8084_WORK_MODE_MASK, mode));
++      if (ret)
++              return ret;
++
++      /* Initialize the PHY package clock and reset, which is the
++       * necessary config sequence after GPIO reset on the PHY package.
++       */
++      ret = qca8084_package_clock_init(shared_priv);
++      if (ret)
++              return ret;
++
++      /* Enable efuse loading into analog circuit */
++      ret = qca8084_mii_modify(phydev, QCA8084_EPHY_CFG,
++                               QCA8084_EPHY_LDO_EN, 0);
++      if (ret)
++              return ret;
++
++      usleep_range(10000, 11000);
++      return ret;
++}
++
+ static int qca8084_config_init(struct phy_device *phydev)
+ {
+       int ret;
++      if (phy_package_init_once(phydev)) {
++              ret = qca8084_phy_package_config_init_once(phydev);
++              if (ret)
++                      return ret;
++      }
++
+       if (phydev->interface == PHY_INTERFACE_MODE_10G_QXGMII)
+               __set_bit(PHY_INTERFACE_MODE_10G_QXGMII,
+                         phydev->possible_interfaces);
+@@ -948,6 +1054,15 @@ static int qca8084_phy_package_probe_once(struct phy_device *phydev)
+               return dev_err_probe(&phydev->mdio.dev, PTR_ERR(rstc),
+                                    "package reset not ready\n");
++      /* The package mode 10G-QXGMII of PCS1 is used for Quad PHY and
++       * PCS0 is unused by default.
++       */
++      shared_priv->package_mode = QCA808X_PCS1_10G_QXGMII_PCS0_UNUNSED;
++      ret = of_property_read_u32(shared->np, "qcom,package-mode",
++                                 &shared_priv->package_mode);
++      if (ret && ret != -EINVAL)
++              return ret;
++
+       /* Deassert PHY package. */
+       return reset_control_deassert(rstc);
+ }
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-11-net-pcs-Add-driver-for-Qualcomm-IPQ-UNIPHY-PCS.patch b/target/linux/qualcommbe/patches-6.6/103-11-net-pcs-Add-driver-for-Qualcomm-IPQ-UNIPHY-PCS.patch
new file mode 100644 (file)
index 0000000..e463e69
--- /dev/null
@@ -0,0 +1,1043 @@
+From 5e4192952cfb2110aaba1b03a3c66c84d74a27db Mon Sep 17 00:00:00 2001
+From: Lei Wei <quic_leiwei@quicinc.com>
+Date: Mon, 29 Jan 2024 11:39:36 +0800
+Subject: [PATCH 11/50] net: pcs: Add driver for Qualcomm IPQ UNIPHY PCS
+
+The UNIPHY hardware block in Qualcomm's IPQ SoC based boards enables
+PCS and XPCS functions, and helps in interfacing the Ethernet MAC in
+IPQ SoC to external PHYs.
+
+This patch adds the PCS driver support for the UNIPHY hardware used in
+IPQ SoC based boards. Support for SGMII/QSGMII/PSGMII and USXGMII
+interface modes are added in the driver.
+
+Change-Id: Id2c8f993f121098f7b02186b53770b75bb539a93
+Signed-off-by: Lei Wei <quic_leiwei@quicinc.com>
+---
+ MAINTAINERS                             |   8 +
+ drivers/net/pcs/Kconfig                 |  10 +
+ drivers/net/pcs/Makefile                |   1 +
+ drivers/net/pcs/pcs-qcom-ipq-uniphy.c   | 943 ++++++++++++++++++++++++
+ include/linux/pcs/pcs-qcom-ipq-uniphy.h |  13 +
+ 5 files changed, 975 insertions(+)
+ create mode 100644 drivers/net/pcs/pcs-qcom-ipq-uniphy.c
+ create mode 100644 include/linux/pcs/pcs-qcom-ipq-uniphy.h
+
+# diff --git a/MAINTAINERS b/MAINTAINERS
+# index 8836b2200acf..1940990ae342 100644
+# --- a/MAINTAINERS
+# +++ b/MAINTAINERS
+# @@ -18900,6 +18900,14 @@ S:  Maintained
+#  F:  Documentation/devicetree/bindings/regulator/vqmmc-ipq4019-regulator.yaml
+#  F:  drivers/regulator/vqmmc-ipq4019-regulator.c
+# +QUALCOMM IPQ Ethernet UNIPHY PCS DRIVER
+# +M:  Lei Wei <quic_leiwei@quicinc.com>
+# +L:  netdev@vger.kernel.org
+# +S:  Supported
+# +F:  Documentation/devicetree/bindings/net/pcs/qcom,ipq-uniphy.yaml
+# +F:  drivers/net/pcs/pcs-qcom-ipq-uniphy.c
+# +F:  include/linux/pcs/pcs-qcom-ipq-uniphy.h
+# +
+#  QUALCOMM NAND CONTROLLER DRIVER
+#  M:  Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+#  L:  linux-mtd@lists.infradead.org
+diff --git a/drivers/net/pcs/Kconfig b/drivers/net/pcs/Kconfig
+index f6aa437473de..796004de6a31 100644
+--- a/drivers/net/pcs/Kconfig
++++ b/drivers/net/pcs/Kconfig
+@@ -33,4 +33,14 @@ config PCS_RZN1_MIIC
+         on RZ/N1 SoCs. This PCS converts MII to RMII/RGMII or can be set in
+         pass-through mode for MII.
++config PCS_QCOM_IPQ_UNIPHY
++      tristate "Qualcomm IPQ UNIPHY PCS driver"
++      depends on OF && (ARCH_QCOM || COMPILE_TEST)
++      depends on HAS_IOMEM
++      help
++        This module provides PCS driver for Qualcomm IPQ UNIPHY that is
++        available on Qualcomm IPQ SoCs. The UNIPHY provides both PCS and XPCS
++        functions to support different interface modes for MAC to PHY connections.
++        These modes help to support various combination of ethernet switch/PHY on
++        IPQ SoC based boards.
+ endmenu
+diff --git a/drivers/net/pcs/Makefile b/drivers/net/pcs/Makefile
+index 4f7920618b90..f95cdff03c7f 100644
+--- a/drivers/net/pcs/Makefile
++++ b/drivers/net/pcs/Makefile
+@@ -8,3 +8,4 @@ obj-$(CONFIG_PCS_XPCS)         += pcs_xpcs.o
+ obj-$(CONFIG_PCS_MTK_LYNXI)   += pcs-mtk-lynxi.o
+ obj-$(CONFIG_PCS_RZN1_MIIC)   += pcs-rzn1-miic.o
+ obj-$(CONFIG_PCS_MTK_USXGMII) += pcs-mtk-usxgmii.o
++obj-$(CONFIG_PCS_QCOM_IPQ_UNIPHY)     += pcs-qcom-ipq-uniphy.o
+diff --git a/drivers/net/pcs/pcs-qcom-ipq-uniphy.c b/drivers/net/pcs/pcs-qcom-ipq-uniphy.c
+new file mode 100644
+index 000000000000..837de629d0b2
+--- /dev/null
++++ b/drivers/net/pcs/pcs-qcom-ipq-uniphy.c
+@@ -0,0 +1,943 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ *
++ */
++
++#include <linux/clk.h>
++#include <linux/clk-provider.h>
++#include <linux/device.h>
++#include <linux/of.h>
++#include <linux/of_platform.h>
++#include <linux/pcs/pcs-qcom-ipq-uniphy.h>
++#include <linux/phylink.h>
++#include <linux/platform_device.h>
++#include <linux/reset.h>
++
++/* Maximum PCS channel numbers, For PSGMII it has 5 channels */
++#define PCS_MAX_CHANNELS              5
++
++#define PCS_CALIBRATION                       0x1e0
++#define PCS_CALIBRATION_DONE          BIT(7)
++
++#define PCS_MODE_CTRL                 0x46c
++#define PCS_MODE_SEL_MASK             GENMASK(12, 8)
++#define PCS_MODE_SGMII                        FIELD_PREP(PCS_MODE_SEL_MASK, 0x4)
++#define PCS_MODE_QSGMII                       FIELD_PREP(PCS_MODE_SEL_MASK, 0x1)
++#define PCS_MODE_PSGMII                       FIELD_PREP(PCS_MODE_SEL_MASK, 0x2)
++#define PCS_MODE_XPCS                 FIELD_PREP(PCS_MODE_SEL_MASK, 0x10)
++#define PCS_MODE_AN_MODE              BIT(0)
++
++#define PCS_CHANNEL_CTRL(x)           (0x480 + 0x18 * (x))
++#define PCS_CHANNEL_ADPT_RESET                BIT(11)
++#define PCS_CHANNEL_FORCE_MODE                BIT(3)
++#define PCS_CHANNEL_SPEED_MASK                GENMASK(2, 1)
++#define PCS_CHANNEL_SPEED_1000                FIELD_PREP(PCS_CHANNEL_SPEED_MASK, 0x2)
++#define PCS_CHANNEL_SPEED_100         FIELD_PREP(PCS_CHANNEL_SPEED_MASK, 0x1)
++#define PCS_CHANNEL_SPEED_10          FIELD_PREP(PCS_CHANNEL_SPEED_MASK, 0x0)
++
++#define PCS_CHANNEL_STS(x)            (0x488 + 0x18 * (x))
++#define PCS_CHANNEL_LINK_STS          BIT(7)
++#define PCS_CHANNEL_STS_DUPLEX_FULL   BIT(6)
++#define PCS_CHANNEL_STS_SPEED_MASK    GENMASK(5, 4)
++#define PCS_CHANNEL_STS_SPEED_10      0
++#define PCS_CHANNEL_STS_SPEED_100     1
++#define PCS_CHANNEL_STS_SPEED_1000    2
++#define PCS_CHANNEL_STS_PAUSE_TX_EN   BIT(1)
++#define PCS_CHANNEL_STS_PAUSE_RX_EN   BIT(0)
++
++#define PCS_PLL_RESET                 0x780
++#define PCS_ANA_SW_RESET              BIT(6)
++
++#define XPCS_INDIRECT_ADDR            0x8000
++#define XPCS_INDIRECT_AHB_ADDR                0x83fc
++#define XPCS_INDIRECT_ADDR_H          GENMASK(20, 8)
++#define XPCS_INDIRECT_ADDR_L          GENMASK(7, 0)
++#define XPCS_INDIRECT_DATA_ADDR(reg)  (FIELD_PREP(GENMASK(15, 10), 0x20) | \
++                                       FIELD_PREP(GENMASK(9, 2), \
++                                       FIELD_GET(XPCS_INDIRECT_ADDR_L, reg)))
++
++#define XPCS_DIG_CTRL                 0x38000
++#define XPCS_USXG_ADPT_RESET          BIT(10)
++#define XPCS_USXG_EN                  BIT(9)
++
++#define XPCS_MII_CTRL                 0x1f0000
++#define XPCS_MII_AN_EN                        BIT(12)
++#define XPCS_DUPLEX_FULL              BIT(8)
++#define XPCS_SPEED_MASK                       (BIT(13) | BIT(6) | BIT(5))
++#define XPCS_SPEED_10000              (BIT(13) | BIT(6))
++#define XPCS_SPEED_5000                       (BIT(13) | BIT(5))
++#define XPCS_SPEED_2500                       BIT(5)
++#define XPCS_SPEED_1000                       BIT(6)
++#define XPCS_SPEED_100                        BIT(13)
++#define XPCS_SPEED_10                 0
++
++#define XPCS_MII_AN_CTRL              0x1f8001
++#define XPCS_MII_AN_8BIT              BIT(8)
++
++#define XPCS_MII_AN_INTR_STS          0x1f8002
++#define XPCS_USXG_AN_LINK_STS         BIT(14)
++#define XPCS_USXG_AN_DUPLEX_FULL      BIT(13)
++#define XPCS_USXG_AN_SPEED_MASK               GENMASK(12, 10)
++#define XPCS_USXG_AN_SPEED_10         0
++#define XPCS_USXG_AN_SPEED_100                1
++#define XPCS_USXG_AN_SPEED_1000               2
++#define XPCS_USXG_AN_SPEED_2500               4
++#define XPCS_USXG_AN_SPEED_5000               5
++#define XPCS_USXG_AN_SPEED_10000      3
++
++/* UNIPHY PCS RAW clock ID */
++enum {
++      PCS_RAW_RX_CLK = 0,
++      PCS_RAW_TX_CLK,
++      PCS_RAW_CLK_MAX
++};
++
++/* UNIPHY PCS raw clock */
++struct ipq_unipcs_raw_clk {
++      struct clk_hw hw;
++      unsigned long rate;
++};
++
++/* UNIPHY PCS clock ID */
++enum {
++      PCS_SYS_CLK,
++      PCS_AHB_CLK,
++      PCS_CLK_MAX
++};
++
++/* UNIPHY PCS reset ID */
++enum {
++      PCS_SYS_RESET,
++      PCS_AHB_RESET,
++      XPCS_RESET,
++      PCS_RESET_MAX
++};
++
++/* UNIPHY PCS clock name */
++static const char *const pcs_clock_name[PCS_CLK_MAX] = {
++      "sys",
++      "ahb",
++};
++
++/* UNIPHY PCS reset name */
++static const char *const pcs_reset_name[PCS_RESET_MAX] = {
++      "sys",
++      "ahb",
++      "xpcs",
++};
++
++/* UNIPHY PCS channel clock ID */
++enum {
++      PCS_CH_RX_CLK,
++      PCS_CH_TX_CLK,
++      PCS_CH_CLK_MAX
++};
++
++/* UNIPHY PCS channel clock name */
++static const char *const pcs_ch_clock_name[PCS_CH_CLK_MAX] = {
++      "ch_rx",
++      "ch_tx",
++};
++
++/* UNIPHY PCS private data instance */
++struct ipq_uniphy_pcs {
++      void __iomem *base;
++      struct device *dev;
++      phy_interface_t interface;
++      struct mutex shared_lock; /* Lock to protect shared config */
++      struct clk *clk[PCS_CLK_MAX];
++      struct reset_control *reset[PCS_RESET_MAX];
++      struct ipq_unipcs_raw_clk raw_clk[PCS_RAW_CLK_MAX];
++};
++
++/* UNIPHY PCS channel private data instance */
++struct ipq_uniphy_pcs_ch {
++      struct ipq_uniphy_pcs *qunipcs;
++      struct phylink_pcs pcs;
++      int channel;
++      struct clk *clk[PCS_CH_CLK_MAX];
++};
++
++#define to_unipcs_raw_clk(_hw)                \
++      container_of(_hw, struct ipq_unipcs_raw_clk, hw)
++#define phylink_pcs_to_unipcs(_pcs)   \
++      container_of(_pcs, struct ipq_uniphy_pcs_ch, pcs)
++
++static unsigned long ipq_unipcs_raw_clk_recalc_rate(struct clk_hw *hw,
++                                                  unsigned long parent_rate)
++{
++      struct ipq_unipcs_raw_clk *raw_clk = to_unipcs_raw_clk(hw);
++
++      return raw_clk->rate;
++}
++
++static int ipq_unipcs_raw_clk_determine_rate(struct clk_hw *hw,
++                                           struct clk_rate_request *req)
++{
++      switch (req->rate) {
++      case 125000000:
++      case 312500000:
++              return 0;
++      default:
++              return -EINVAL;
++      }
++}
++
++static int ipq_unipcs_raw_clk_set_rate(struct clk_hw *hw,
++                                     unsigned long rate,
++                                     unsigned long parent_rate)
++{
++      struct ipq_unipcs_raw_clk *raw_clk = to_unipcs_raw_clk(hw);
++
++      switch (rate) {
++      case 125000000:
++      case 312500000:
++              raw_clk->rate = rate;
++              return 0;
++      default:
++              return -EINVAL;
++      }
++}
++
++static const struct clk_ops ipq_unipcs_raw_clk_ops = {
++      .recalc_rate = ipq_unipcs_raw_clk_recalc_rate,
++      .determine_rate = ipq_unipcs_raw_clk_determine_rate,
++      .set_rate = ipq_unipcs_raw_clk_set_rate,
++};
++
++static u32 ipq_unipcs_reg_read32(struct ipq_uniphy_pcs *qunipcs, u32 reg)
++{
++      /* PCS use direct AHB access while XPCS use indirect AHB access */
++      if (reg >= XPCS_INDIRECT_ADDR) {
++              writel(FIELD_GET(XPCS_INDIRECT_ADDR_H, reg),
++                     qunipcs->base + XPCS_INDIRECT_AHB_ADDR);
++              return readl(qunipcs->base + XPCS_INDIRECT_DATA_ADDR(reg));
++      } else {
++              return readl(qunipcs->base + reg);
++      }
++}
++
++static void ipq_unipcs_reg_write32(struct ipq_uniphy_pcs *qunipcs,
++                                 u32 reg, u32 val)
++{
++      if (reg >= XPCS_INDIRECT_ADDR) {
++              writel(FIELD_GET(XPCS_INDIRECT_ADDR_H, reg),
++                     qunipcs->base + XPCS_INDIRECT_AHB_ADDR);
++              writel(val, qunipcs->base + XPCS_INDIRECT_DATA_ADDR(reg));
++      } else {
++              writel(val, qunipcs->base + reg);
++      }
++}
++
++static void ipq_unipcs_reg_modify32(struct ipq_uniphy_pcs *qunipcs,
++                                  u32 reg, u32 mask, u32 set)
++{
++      u32 val;
++
++      val = ipq_unipcs_reg_read32(qunipcs, reg);
++      val &= ~mask;
++      val |= set;
++      ipq_unipcs_reg_write32(qunipcs, reg, val);
++}
++
++static void ipq_unipcs_get_state_sgmii(struct ipq_uniphy_pcs *qunipcs,
++                                     int channel,
++                                     struct phylink_link_state *state)
++{
++      u32 val;
++
++      val = ipq_unipcs_reg_read32(qunipcs, PCS_CHANNEL_STS(channel));
++
++      state->link = !!(val & PCS_CHANNEL_LINK_STS);
++
++      if (!state->link)
++              return;
++
++      switch (FIELD_GET(PCS_CHANNEL_STS_SPEED_MASK, val)) {
++      case PCS_CHANNEL_STS_SPEED_1000:
++              state->speed = SPEED_1000;
++              break;
++      case PCS_CHANNEL_STS_SPEED_100:
++              state->speed = SPEED_100;
++              break;
++      case PCS_CHANNEL_STS_SPEED_10:
++              state->speed = SPEED_10;
++              break;
++      default:
++              return;
++      }
++
++      if (val & PCS_CHANNEL_STS_DUPLEX_FULL)
++              state->duplex = DUPLEX_FULL;
++      else
++              state->duplex = DUPLEX_HALF;
++
++      if (val & PCS_CHANNEL_STS_PAUSE_TX_EN)
++              state->pause |= MLO_PAUSE_TX;
++      if (val & PCS_CHANNEL_STS_PAUSE_RX_EN)
++              state->pause |= MLO_PAUSE_RX;
++}
++
++static void ipq_unipcs_get_state_usxgmii(struct ipq_uniphy_pcs *qunipcs,
++                                       struct phylink_link_state *state)
++{
++      u32 val;
++
++      val = ipq_unipcs_reg_read32(qunipcs, XPCS_MII_AN_INTR_STS);
++
++      state->link = !!(val & XPCS_USXG_AN_LINK_STS);
++
++      if (!state->link)
++              return;
++
++      switch (FIELD_GET(XPCS_USXG_AN_SPEED_MASK, val)) {
++      case XPCS_USXG_AN_SPEED_10000:
++              state->speed = SPEED_10000;
++              break;
++      case XPCS_USXG_AN_SPEED_5000:
++              state->speed = SPEED_5000;
++              break;
++      case XPCS_USXG_AN_SPEED_2500:
++              state->speed = SPEED_2500;
++              break;
++      case XPCS_USXG_AN_SPEED_1000:
++              state->speed = SPEED_1000;
++              break;
++      case XPCS_USXG_AN_SPEED_100:
++              state->speed = SPEED_100;
++              break;
++      case XPCS_USXG_AN_SPEED_10:
++              state->speed = SPEED_10;
++              break;
++      default:
++              return;
++      }
++
++      if (val & XPCS_USXG_AN_DUPLEX_FULL)
++              state->duplex = DUPLEX_FULL;
++      else
++              state->duplex = DUPLEX_HALF;
++}
++
++static int ipq_unipcs_config_mode(struct ipq_uniphy_pcs *qunipcs,
++                                phy_interface_t interface)
++{
++      unsigned long rate = 0;
++      u32 val;
++      int ret;
++
++      /* Assert XPCS reset */
++      reset_control_assert(qunipcs->reset[XPCS_RESET]);
++
++      /* Config PCS interface mode */
++      switch (interface) {
++      case PHY_INTERFACE_MODE_SGMII:
++              rate = 125000000;
++              /* Select Qualcomm SGMII AN mode */
++              ipq_unipcs_reg_modify32(qunipcs, PCS_MODE_CTRL,
++                                      PCS_MODE_SEL_MASK | PCS_MODE_AN_MODE,
++                                      PCS_MODE_SGMII);
++              break;
++      case PHY_INTERFACE_MODE_QSGMII:
++              rate = 125000000;
++              /* Select Qualcomm SGMII AN mode */
++              ipq_unipcs_reg_modify32(qunipcs, PCS_MODE_CTRL,
++                                      PCS_MODE_SEL_MASK | PCS_MODE_AN_MODE,
++                                      PCS_MODE_QSGMII);
++              break;
++      case PHY_INTERFACE_MODE_PSGMII:
++              rate = 125000000;
++              /* Select Qualcomm SGMII AN mode */
++              ipq_unipcs_reg_modify32(qunipcs, PCS_MODE_CTRL,
++                                      PCS_MODE_SEL_MASK | PCS_MODE_AN_MODE,
++                                      PCS_MODE_PSGMII);
++              break;
++      case PHY_INTERFACE_MODE_USXGMII:
++              rate = 312500000;
++              ipq_unipcs_reg_modify32(qunipcs, PCS_MODE_CTRL,
++                                      PCS_MODE_SEL_MASK,
++                                      PCS_MODE_XPCS);
++              break;
++      default:
++              dev_err(qunipcs->dev,
++                      "interface %s not supported\n", phy_modes(interface));
++              return -EOPNOTSUPP;
++      }
++
++      /* PCS PLL reset */
++      ipq_unipcs_reg_modify32(qunipcs, PCS_PLL_RESET, PCS_ANA_SW_RESET, 0);
++      fsleep(10000);
++      ipq_unipcs_reg_modify32(qunipcs, PCS_PLL_RESET,
++                              PCS_ANA_SW_RESET, PCS_ANA_SW_RESET);
++
++      /* Wait for calibration completion */
++      ret = read_poll_timeout(ipq_unipcs_reg_read32, val,
++                              val & PCS_CALIBRATION_DONE,
++                              1000, 100000, true,
++                              qunipcs, PCS_CALIBRATION);
++      if (ret) {
++              dev_err(qunipcs->dev, "UNIPHY PCS calibration timed-out\n");
++              return ret;
++      }
++
++      /* Configure raw clock rate */
++      clk_set_rate(qunipcs->raw_clk[PCS_RAW_RX_CLK].hw.clk, rate);
++      clk_set_rate(qunipcs->raw_clk[PCS_RAW_TX_CLK].hw.clk, rate);
++
++      return 0;
++}
++
++static int ipq_unipcs_config_sgmii(struct ipq_uniphy_pcs *qunipcs,
++                                 int channel,
++                                 unsigned int neg_mode,
++                                 phy_interface_t interface)
++{
++      int ret;
++
++      /* PCS configurations shared by multi channels should be
++       * configured for only once.
++       */
++      if (phy_interface_num_ports(interface) > 1)
++              mutex_lock(&qunipcs->shared_lock);
++
++      if (qunipcs->interface != interface) {
++              ret = ipq_unipcs_config_mode(qunipcs, interface);
++              if (ret)
++                      goto err;
++
++              qunipcs->interface = interface;
++      }
++
++      if (phy_interface_num_ports(interface) > 1)
++              mutex_unlock(&qunipcs->shared_lock);
++
++      /* In-band autoneg mode is enabled by default for each PCS channel */
++      if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED)
++              return 0;
++
++      /* Force speed mode */
++      ipq_unipcs_reg_modify32(qunipcs, PCS_CHANNEL_CTRL(channel),
++                              PCS_CHANNEL_FORCE_MODE, PCS_CHANNEL_FORCE_MODE);
++
++      return 0;
++
++err:
++      if (phy_interface_num_ports(interface) > 1)
++              mutex_unlock(&qunipcs->shared_lock);
++
++      return ret;
++}
++
++static int ipq_unipcs_config_usxgmii(struct ipq_uniphy_pcs *qunipcs,
++                                   unsigned int neg_mode,
++                                   phy_interface_t interface)
++{
++      int ret;
++
++      if (qunipcs->interface != interface) {
++              ret = ipq_unipcs_config_mode(qunipcs, interface);
++              if (ret)
++                      return ret;
++
++              /* Deassert XPCS and configure XPCS USXGMII */
++              reset_control_deassert(qunipcs->reset[XPCS_RESET]);
++
++              ipq_unipcs_reg_modify32(qunipcs, XPCS_DIG_CTRL,
++                                      XPCS_USXG_EN, XPCS_USXG_EN);
++
++              if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) {
++                      ipq_unipcs_reg_modify32(qunipcs, XPCS_MII_AN_CTRL,
++                                              XPCS_MII_AN_8BIT,
++                                              XPCS_MII_AN_8BIT);
++
++                      ipq_unipcs_reg_modify32(qunipcs, XPCS_MII_CTRL,
++                                              XPCS_MII_AN_EN, XPCS_MII_AN_EN);
++              }
++
++              qunipcs->interface = interface;
++      }
++
++      return 0;
++}
++
++static unsigned long ipq_unipcs_clock_rate_get_gmii(int speed)
++{
++      unsigned long rate = 0;
++
++      switch (speed) {
++      case SPEED_1000:
++              rate = 125000000;
++              break;
++      case SPEED_100:
++              rate = 25000000;
++              break;
++      case SPEED_10:
++              rate = 2500000;
++              break;
++      default:
++              break;
++      }
++
++      return rate;
++}
++
++static unsigned long ipq_unipcs_clock_rate_get_xgmii(int speed)
++{
++      unsigned long rate = 0;
++
++      switch (speed) {
++      case SPEED_10000:
++              rate = 312500000;
++              break;
++      case SPEED_5000:
++              rate = 156250000;
++              break;
++      case SPEED_2500:
++              rate = 78125000;
++              break;
++      case SPEED_1000:
++              rate = 125000000;
++              break;
++      case SPEED_100:
++              rate = 12500000;
++              break;
++      case SPEED_10:
++              rate = 1250000;
++              break;
++      default:
++              break;
++      }
++
++      return rate;
++}
++
++static void
++ipq_unipcs_link_up_clock_rate_set(struct ipq_uniphy_pcs_ch *qunipcs_ch,
++                                phy_interface_t interface,
++                                int speed)
++{
++      struct ipq_uniphy_pcs *qunipcs = qunipcs_ch->qunipcs;
++      unsigned long rate = 0;
++
++      switch (interface) {
++      case PHY_INTERFACE_MODE_SGMII:
++      case PHY_INTERFACE_MODE_QSGMII:
++      case PHY_INTERFACE_MODE_PSGMII:
++              rate = ipq_unipcs_clock_rate_get_gmii(speed);
++              break;
++      case PHY_INTERFACE_MODE_USXGMII:
++              rate = ipq_unipcs_clock_rate_get_xgmii(speed);
++              break;
++      default:
++              dev_err(qunipcs->dev,
++                      "interface %s not supported\n", phy_modes(interface));
++              return;
++      }
++
++      if (rate == 0) {
++              dev_err(qunipcs->dev, "Invalid PCS clock rate\n");
++              return;
++      }
++
++      clk_set_rate(qunipcs_ch->clk[PCS_CH_RX_CLK], rate);
++      clk_set_rate(qunipcs_ch->clk[PCS_CH_TX_CLK], rate);
++      fsleep(10000);
++}
++
++static void ipq_unipcs_link_up_config_sgmii(struct ipq_uniphy_pcs *qunipcs,
++                                          int channel,
++                                          unsigned int neg_mode,
++                                          int speed)
++{
++      /* No need to config PCS speed if in-band autoneg is enabled */
++      if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED)
++              goto pcs_adapter_reset;
++
++      /* PCS speed set for force mode */
++      switch (speed) {
++      case SPEED_1000:
++              ipq_unipcs_reg_modify32(qunipcs, PCS_CHANNEL_CTRL(channel),
++                                      PCS_CHANNEL_SPEED_MASK,
++                                      PCS_CHANNEL_SPEED_1000);
++              break;
++      case SPEED_100:
++              ipq_unipcs_reg_modify32(qunipcs, PCS_CHANNEL_CTRL(channel),
++                                      PCS_CHANNEL_SPEED_MASK,
++                                      PCS_CHANNEL_SPEED_100);
++              break;
++      case SPEED_10:
++              ipq_unipcs_reg_modify32(qunipcs, PCS_CHANNEL_CTRL(channel),
++                                      PCS_CHANNEL_SPEED_MASK,
++                                      PCS_CHANNEL_SPEED_10);
++              break;
++      default:
++              dev_err(qunipcs->dev, "Force speed %d not supported\n", speed);
++              return;
++      }
++
++pcs_adapter_reset:
++      /* PCS channel adapter reset */
++      ipq_unipcs_reg_modify32(qunipcs, PCS_CHANNEL_CTRL(channel),
++                              PCS_CHANNEL_ADPT_RESET,
++                              0);
++      ipq_unipcs_reg_modify32(qunipcs, PCS_CHANNEL_CTRL(channel),
++                              PCS_CHANNEL_ADPT_RESET,
++                              PCS_CHANNEL_ADPT_RESET);
++}
++
++static void ipq_unipcs_link_up_config_usxgmii(struct ipq_uniphy_pcs *qunipcs,
++                                            int speed)
++{
++      u32 val;
++
++      switch (speed) {
++      case SPEED_10000:
++              val = XPCS_SPEED_10000;
++              break;
++      case SPEED_5000:
++              val = XPCS_SPEED_5000;
++              break;
++      case SPEED_2500:
++              val = XPCS_SPEED_2500;
++              break;
++      case SPEED_1000:
++              val = XPCS_SPEED_1000;
++              break;
++      case SPEED_100:
++              val = XPCS_SPEED_100;
++              break;
++      case SPEED_10:
++              val = XPCS_SPEED_10;
++              break;
++      default:
++              return;
++      }
++
++      /* USXGMII only support full duplex mode */
++      val |= XPCS_DUPLEX_FULL;
++
++      /* Config XPCS speed */
++      ipq_unipcs_reg_modify32(qunipcs, XPCS_MII_CTRL,
++                              XPCS_SPEED_MASK | XPCS_DUPLEX_FULL,
++                              val);
++
++      /* XPCS adapter reset */
++      ipq_unipcs_reg_modify32(qunipcs, XPCS_DIG_CTRL,
++                              XPCS_USXG_ADPT_RESET,
++                              XPCS_USXG_ADPT_RESET);
++}
++
++static void ipq_unipcs_get_state(struct phylink_pcs *pcs,
++                               struct phylink_link_state *state)
++{
++      struct ipq_uniphy_pcs_ch *qunipcs_ch = phylink_pcs_to_unipcs(pcs);
++      struct ipq_uniphy_pcs *qunipcs = qunipcs_ch->qunipcs;
++      int channel = qunipcs_ch->channel;
++
++      switch (state->interface) {
++      case PHY_INTERFACE_MODE_SGMII:
++      case PHY_INTERFACE_MODE_QSGMII:
++      case PHY_INTERFACE_MODE_PSGMII:
++              ipq_unipcs_get_state_sgmii(qunipcs, channel, state);
++              break;
++      case PHY_INTERFACE_MODE_USXGMII:
++              ipq_unipcs_get_state_usxgmii(qunipcs, state);
++              break;
++      default:
++              break;
++      }
++
++      dev_dbg(qunipcs->dev,
++              "mode=%s/%s/%s link=%u\n",
++              phy_modes(state->interface),
++              phy_speed_to_str(state->speed),
++              phy_duplex_to_str(state->duplex),
++              state->link);
++}
++
++static int ipq_unipcs_config(struct phylink_pcs *pcs,
++                           unsigned int neg_mode,
++                           phy_interface_t interface,
++                           const unsigned long *advertising,
++                           bool permit)
++{
++      struct ipq_uniphy_pcs_ch *qunipcs_ch = phylink_pcs_to_unipcs(pcs);
++      struct ipq_uniphy_pcs *qunipcs = qunipcs_ch->qunipcs;
++      int channel = qunipcs_ch->channel;
++
++      switch (interface) {
++      case PHY_INTERFACE_MODE_SGMII:
++      case PHY_INTERFACE_MODE_QSGMII:
++      case PHY_INTERFACE_MODE_PSGMII:
++              return ipq_unipcs_config_sgmii(qunipcs, channel,
++                                             neg_mode, interface);
++      case PHY_INTERFACE_MODE_USXGMII:
++              return ipq_unipcs_config_usxgmii(qunipcs,
++                                               neg_mode, interface);
++      default:
++              dev_err(qunipcs->dev,
++                      "interface %s not supported\n", phy_modes(interface));
++              return -EOPNOTSUPP;
++      };
++}
++
++static void ipq_unipcs_link_up(struct phylink_pcs *pcs,
++                             unsigned int neg_mode,
++                             phy_interface_t interface,
++                             int speed, int duplex)
++{
++      struct ipq_uniphy_pcs_ch *qunipcs_ch = phylink_pcs_to_unipcs(pcs);
++      struct ipq_uniphy_pcs *qunipcs = qunipcs_ch->qunipcs;
++      int channel = qunipcs_ch->channel;
++
++      /* Configure PCS channel interface clock rate */
++      ipq_unipcs_link_up_clock_rate_set(qunipcs_ch, interface, speed);
++
++      /* Configure PCS speed and reset PCS adapter */
++      switch (interface) {
++      case PHY_INTERFACE_MODE_SGMII:
++      case PHY_INTERFACE_MODE_QSGMII:
++      case PHY_INTERFACE_MODE_PSGMII:
++              ipq_unipcs_link_up_config_sgmii(qunipcs, channel,
++                                              neg_mode, speed);
++              break;
++      case PHY_INTERFACE_MODE_USXGMII:
++              ipq_unipcs_link_up_config_usxgmii(qunipcs, speed);
++              break;
++      default:
++              dev_err(qunipcs->dev,
++                      "interface %s not supported\n", phy_modes(interface));
++              break;
++      }
++}
++
++static const struct phylink_pcs_ops ipq_unipcs_phylink_ops = {
++      .pcs_get_state = ipq_unipcs_get_state,
++      .pcs_config = ipq_unipcs_config,
++      .pcs_link_up = ipq_unipcs_link_up,
++};
++
++/**
++ * ipq_unipcs_create() - Create Qualcomm IPQ UNIPHY PCS
++ * @np: Device tree node to the PCS
++ *
++ * Description: Create a phylink PCS instance for a PCS node @np.
++ *
++ * Return: A pointer to the phylink PCS instance or an error-pointer value.
++ */
++struct phylink_pcs *ipq_unipcs_create(struct device_node *np)
++{
++      struct ipq_uniphy_pcs_ch *qunipcs_ch;
++      struct ipq_uniphy_pcs *qunipcs;
++      struct device_node *uniphy_np;
++      struct platform_device *pdev;
++      u32 channel;
++      int i, j;
++
++      if (!of_device_is_available(np))
++              return ERR_PTR(-ENODEV);
++
++      if (of_property_read_u32(np, "reg", &channel))
++              return ERR_PTR(-EINVAL);
++
++      if (channel >= PCS_MAX_CHANNELS)
++              return ERR_PTR(-EINVAL);
++
++      uniphy_np = of_get_parent(np);
++      if (!uniphy_np)
++              return ERR_PTR(-ENODEV);
++
++      if (!of_device_is_available(uniphy_np)) {
++              of_node_put(uniphy_np);
++              return ERR_PTR(-ENODEV);
++      }
++
++      pdev = of_find_device_by_node(uniphy_np);
++      of_node_put(uniphy_np);
++      if (!pdev)
++              return ERR_PTR(-ENODEV);
++
++      qunipcs = platform_get_drvdata(pdev);
++      platform_device_put(pdev);
++
++      /* If probe is not yet completed, return DEFER to
++       * the dependent driver.
++       */
++      if (!qunipcs)
++              return ERR_PTR(-EPROBE_DEFER);
++
++      qunipcs_ch = kzalloc(sizeof(*qunipcs_ch), GFP_KERNEL);
++      if (!qunipcs_ch)
++              return ERR_PTR(-ENOMEM);
++
++      qunipcs_ch->qunipcs = qunipcs;
++      qunipcs_ch->channel = channel;
++      qunipcs_ch->pcs.ops = &ipq_unipcs_phylink_ops;
++      qunipcs_ch->pcs.neg_mode = true;
++      qunipcs_ch->pcs.poll = true;
++
++      for (i = 0; i < PCS_CH_CLK_MAX; i++) {
++              qunipcs_ch->clk[i] = of_clk_get_by_name(np,
++                                                      pcs_ch_clock_name[i]);
++              if (IS_ERR(qunipcs_ch->clk[i])) {
++                      dev_err(qunipcs->dev,
++                              "Failed to get PCS channel %d clock ID %s\n",
++                              channel, pcs_ch_clock_name[i]);
++                      goto free_pcs;
++              }
++
++              clk_prepare_enable(qunipcs_ch->clk[i]);
++      }
++
++      return &qunipcs_ch->pcs;
++
++free_pcs:
++      for (j = 0; j < i; j++) {
++              clk_disable_unprepare(qunipcs_ch->clk[j]);
++              clk_put(qunipcs_ch->clk[j]);
++      }
++
++      kfree(qunipcs_ch);
++      return ERR_PTR(-ENODEV);
++}
++EXPORT_SYMBOL(ipq_unipcs_create);
++
++/**
++ * ipq_unipcs_destroy() - Destroy Qualcomm IPQ UNIPHY PCS
++ * @pcs: PCS instance
++ *
++ * Description: Destroy a phylink PCS instance.
++ */
++void ipq_unipcs_destroy(struct phylink_pcs *pcs)
++{
++      struct ipq_uniphy_pcs_ch *qunipcs_ch;
++      int i;
++
++      if (!pcs)
++              return;
++
++      qunipcs_ch = phylink_pcs_to_unipcs(pcs);
++
++      for (i = 0; i < PCS_CH_CLK_MAX; i++) {
++              clk_disable_unprepare(qunipcs_ch->clk[i]);
++              clk_put(qunipcs_ch->clk[i]);
++      }
++
++      kfree(qunipcs_ch);
++}
++EXPORT_SYMBOL(ipq_unipcs_destroy);
++
++static int ipq_uniphy_clk_register(struct ipq_uniphy_pcs *qunipcs)
++{
++      struct ipq_unipcs_raw_clk *raw_clk;
++      struct device *dev = qunipcs->dev;
++      struct clk_hw_onecell_data *data;
++
++      struct clk_init_data init = { };
++      int i, ret;
++
++      data = devm_kzalloc(dev,
++                          struct_size(data, hws, PCS_RAW_CLK_MAX),
++                          GFP_KERNEL);
++      if (!data)
++              return -ENOMEM;
++
++      data->num = PCS_RAW_CLK_MAX;
++      for (i = 0; i < PCS_RAW_CLK_MAX; i++) {
++              ret = of_property_read_string_index(dev->of_node,
++                                                  "clock-output-names",
++                                                  i, &init.name);
++              if (ret) {
++                      dev_err(dev,
++                              "%pOFn: No clock-output-names\n", dev->of_node);
++                      return ret;
++              }
++
++              init.ops = &ipq_unipcs_raw_clk_ops;
++              raw_clk = &qunipcs->raw_clk[i];
++
++              raw_clk->rate = 125000000;
++              raw_clk->hw.init = &init;
++
++              ret = devm_clk_hw_register(dev, &raw_clk->hw);
++              if (ret) {
++                      dev_err(dev, "Failed to register UNIPHY PCS raw clock\n");
++                      return ret;
++              }
++
++              data->hws[i] = &raw_clk->hw;
++      }
++
++      return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, data);
++}
++
++static int ipq_uniphy_probe(struct platform_device *pdev)
++{
++      struct device *dev = &pdev->dev;
++      struct ipq_uniphy_pcs *priv;
++      int i, ret;
++
++      priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
++      if (!priv)
++              return -ENOMEM;
++
++      priv->dev = dev;
++
++      priv->base = devm_platform_ioremap_resource(pdev, 0);
++      if (IS_ERR(priv->base))
++              return PTR_ERR(priv->base);
++
++      for (i = 0; i < PCS_CLK_MAX; i++) {
++              priv->clk[i] = devm_clk_get_optional_enabled(dev,
++                                                           pcs_clock_name[i]);
++
++              if (IS_ERR(priv->clk[i]))
++                      dev_err(dev, "Failed to get the clock ID %s\n",
++                              pcs_clock_name[i]);
++      }
++
++      for (i = 0; i < PCS_RESET_MAX; i++) {
++              priv->reset[i] =
++                      devm_reset_control_get_optional_exclusive(dev,
++                                                                pcs_reset_name[i]);
++
++              if (IS_ERR(priv->reset[i]))
++                      dev_err(dev, "Failed to get the reset ID %s\n",
++                              pcs_reset_name[i]);
++      }
++
++      /* Set UNIPHY PCS system and AHB clock rate */
++      clk_set_rate(priv->clk[PCS_SYS_CLK], 24000000);
++      clk_set_rate(priv->clk[PCS_AHB_CLK], 100000000);
++
++      ret = ipq_uniphy_clk_register(priv);
++      if (ret)
++              return ret;
++
++      mutex_init(&priv->shared_lock);
++
++      platform_set_drvdata(pdev, priv);
++
++      return 0;
++}
++
++static const struct of_device_id ipq_uniphy_of_mtable[] = {
++      { .compatible = "qcom,ipq5332-uniphy" },
++      { .compatible = "qcom,ipq9574-uniphy" },
++      { /* sentinel */ },
++};
++MODULE_DEVICE_TABLE(of, ipq_uniphy_of_mtable);
++
++static struct platform_driver ipq_uniphy_driver = {
++      .driver = {
++              .name = "ipq_uniphy",
++              .of_match_table = ipq_uniphy_of_mtable,
++      },
++      .probe = ipq_uniphy_probe,
++};
++module_platform_driver(ipq_uniphy_driver);
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("Qualcomm IPQ UNIPHY PCS driver");
++MODULE_AUTHOR("Lei Wei <quic_leiwei@quicinc.com>");
+diff --git a/include/linux/pcs/pcs-qcom-ipq-uniphy.h b/include/linux/pcs/pcs-qcom-ipq-uniphy.h
+new file mode 100644
+index 000000000000..4a617bcb32f4
+--- /dev/null
++++ b/include/linux/pcs/pcs-qcom-ipq-uniphy.h
+@@ -0,0 +1,13 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++/*
++ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ *
++ */
++
++#ifndef __LINUX_PCS_QCOM_IPQ_UNIPHY_H
++#define __LINUX_PCS_QCOM_IPQ_UNIPHY_H
++
++struct phylink_pcs *ipq_unipcs_create(struct device_node *np);
++void ipq_unipcs_destroy(struct phylink_pcs *pcs);
++
++#endif /* __LINUX_PCS_QCOM_IPQ_UNIPHY_H */
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-12-net-pcs-Add-10GBASER-interface-mode-support-to-IPQ-U.patch b/target/linux/qualcommbe/patches-6.6/103-12-net-pcs-Add-10GBASER-interface-mode-support-to-IPQ-U.patch
new file mode 100644 (file)
index 0000000..ea15675
--- /dev/null
@@ -0,0 +1,125 @@
+From f23eb497c891985126a065f950bc61e9c404bb12 Mon Sep 17 00:00:00 2001
+From: Lei Wei <quic_leiwei@quicinc.com>
+Date: Wed, 6 Mar 2024 17:40:52 +0800
+Subject: [PATCH 12/50] net: pcs: Add 10GBASER interface mode support to IPQ
+ UNIPHY PCS driver
+
+10GBASER mode is used when PCS connects with a 10G SFP module.
+
+Change-Id: Ifc3c3bb23811807a9b34e88771aab2c830c2327c
+Signed-off-by: Lei Wei <quic_leiwei@quicinc.com>
+---
+ drivers/net/pcs/pcs-qcom-ipq-uniphy.c | 48 +++++++++++++++++++++++++++
+ 1 file changed, 48 insertions(+)
+
+diff --git a/drivers/net/pcs/pcs-qcom-ipq-uniphy.c b/drivers/net/pcs/pcs-qcom-ipq-uniphy.c
+index 837de629d0b2..68a1715531ef 100644
+--- a/drivers/net/pcs/pcs-qcom-ipq-uniphy.c
++++ b/drivers/net/pcs/pcs-qcom-ipq-uniphy.c
+@@ -57,6 +57,9 @@
+                                        FIELD_PREP(GENMASK(9, 2), \
+                                        FIELD_GET(XPCS_INDIRECT_ADDR_L, reg)))
++#define XPCS_10GBASER_STS             0x30020
++#define XPCS_10GBASER_LINK_STS                BIT(12)
++
+ #define XPCS_DIG_CTRL                 0x38000
+ #define XPCS_USXG_ADPT_RESET          BIT(10)
+ #define XPCS_USXG_EN                  BIT(9)
+@@ -320,6 +323,23 @@ static void ipq_unipcs_get_state_usxgmii(struct ipq_uniphy_pcs *qunipcs,
+               state->duplex = DUPLEX_HALF;
+ }
++static void ipq_unipcs_get_state_10gbaser(struct ipq_uniphy_pcs *qunipcs,
++                                        struct phylink_link_state *state)
++{
++      u32 val;
++
++      val = ipq_unipcs_reg_read32(qunipcs, XPCS_10GBASER_STS);
++
++      state->link = !!(val & XPCS_10GBASER_LINK_STS);
++
++      if (!state->link)
++              return;
++
++      state->speed = SPEED_10000;
++      state->duplex = DUPLEX_FULL;
++      state->pause |= MLO_PAUSE_TXRX_MASK;
++}
++
+ static int ipq_unipcs_config_mode(struct ipq_uniphy_pcs *qunipcs,
+                                 phy_interface_t interface)
+ {
+@@ -354,6 +374,7 @@ static int ipq_unipcs_config_mode(struct ipq_uniphy_pcs *qunipcs,
+                                       PCS_MODE_PSGMII);
+               break;
+       case PHY_INTERFACE_MODE_USXGMII:
++      case PHY_INTERFACE_MODE_10GBASER:
+               rate = 312500000;
+               ipq_unipcs_reg_modify32(qunipcs, PCS_MODE_CTRL,
+                                       PCS_MODE_SEL_MASK,
+@@ -461,6 +482,25 @@ static int ipq_unipcs_config_usxgmii(struct ipq_uniphy_pcs *qunipcs,
+       return 0;
+ }
++static int ipq_unipcs_config_10gbaser(struct ipq_uniphy_pcs *qunipcs,
++                                    phy_interface_t interface)
++{
++      int ret;
++
++      if (qunipcs->interface != interface) {
++              ret = ipq_unipcs_config_mode(qunipcs, interface);
++              if (ret)
++                      return ret;
++
++              /* Deassert XPCS */
++              reset_control_deassert(qunipcs->reset[XPCS_RESET]);
++
++              qunipcs->interface = interface;
++      }
++
++      return 0;
++}
++
+ static unsigned long ipq_unipcs_clock_rate_get_gmii(int speed)
+ {
+       unsigned long rate = 0;
+@@ -527,6 +567,7 @@ ipq_unipcs_link_up_clock_rate_set(struct ipq_uniphy_pcs_ch *qunipcs_ch,
+               rate = ipq_unipcs_clock_rate_get_gmii(speed);
+               break;
+       case PHY_INTERFACE_MODE_USXGMII:
++      case PHY_INTERFACE_MODE_10GBASER:
+               rate = ipq_unipcs_clock_rate_get_xgmii(speed);
+               break;
+       default:
+@@ -644,6 +685,9 @@ static void ipq_unipcs_get_state(struct phylink_pcs *pcs,
+       case PHY_INTERFACE_MODE_USXGMII:
+               ipq_unipcs_get_state_usxgmii(qunipcs, state);
+               break;
++      case PHY_INTERFACE_MODE_10GBASER:
++              ipq_unipcs_get_state_10gbaser(qunipcs, state);
++              break;
+       default:
+               break;
+       }
+@@ -675,6 +719,8 @@ static int ipq_unipcs_config(struct phylink_pcs *pcs,
+       case PHY_INTERFACE_MODE_USXGMII:
+               return ipq_unipcs_config_usxgmii(qunipcs,
+                                                neg_mode, interface);
++      case PHY_INTERFACE_MODE_10GBASER:
++              return ipq_unipcs_config_10gbaser(qunipcs, interface);
+       default:
+               dev_err(qunipcs->dev,
+                       "interface %s not supported\n", phy_modes(interface));
+@@ -705,6 +751,8 @@ static void ipq_unipcs_link_up(struct phylink_pcs *pcs,
+       case PHY_INTERFACE_MODE_USXGMII:
+               ipq_unipcs_link_up_config_usxgmii(qunipcs, speed);
+               break;
++      case PHY_INTERFACE_MODE_10GBASER:
++              break;
+       default:
+               dev_err(qunipcs->dev,
+                       "interface %s not supported\n", phy_modes(interface));
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-13-net-pcs-Add-2500BASEX-interface-mode-support-to-IPQ-.patch b/target/linux/qualcommbe/patches-6.6/103-13-net-pcs-Add-2500BASEX-interface-mode-support-to-IPQ-.patch
new file mode 100644 (file)
index 0000000..ed58d92
--- /dev/null
@@ -0,0 +1,203 @@
+From fcd1c53b460aa39cfd15f842126af62b27a4fad5 Mon Sep 17 00:00:00 2001
+From: Lei Wei <quic_leiwei@quicinc.com>
+Date: Tue, 2 Apr 2024 18:28:42 +0800
+Subject: [PATCH 13/50] net: pcs: Add 2500BASEX interface mode support to IPQ
+ UNIPHY PCS driver
+
+2500BASEX mode is used when PCS connects with QCA8386 switch in a fixed
+2500M link. It is also used when PCS connectes with QCA8081 PHY which
+works at 2500M link speed. In addition, it can be also used when PCS
+connects with a 2.5G SFP module.
+
+Change-Id: I3fe61113c1b3685debc20659736a9488216a029d
+Signed-off-by: Lei Wei <quic_leiwei@quicinc.com>
+---
+ drivers/net/pcs/pcs-qcom-ipq-uniphy.c | 95 +++++++++++++++++++++++++++
+ 1 file changed, 95 insertions(+)
+
+diff --git a/drivers/net/pcs/pcs-qcom-ipq-uniphy.c b/drivers/net/pcs/pcs-qcom-ipq-uniphy.c
+index 68a1715531ef..ed9c55a6c0fa 100644
+--- a/drivers/net/pcs/pcs-qcom-ipq-uniphy.c
++++ b/drivers/net/pcs/pcs-qcom-ipq-uniphy.c
+@@ -25,6 +25,7 @@
+ #define PCS_MODE_SGMII                        FIELD_PREP(PCS_MODE_SEL_MASK, 0x4)
+ #define PCS_MODE_QSGMII                       FIELD_PREP(PCS_MODE_SEL_MASK, 0x1)
+ #define PCS_MODE_PSGMII                       FIELD_PREP(PCS_MODE_SEL_MASK, 0x2)
++#define PCS_MODE_SGMII_PLUS           FIELD_PREP(PCS_MODE_SEL_MASK, 0x8)
+ #define PCS_MODE_XPCS                 FIELD_PREP(PCS_MODE_SEL_MASK, 0x10)
+ #define PCS_MODE_AN_MODE              BIT(0)
+@@ -282,6 +283,24 @@ static void ipq_unipcs_get_state_sgmii(struct ipq_uniphy_pcs *qunipcs,
+               state->pause |= MLO_PAUSE_RX;
+ }
++static void ipq_unipcs_get_state_2500basex(struct ipq_uniphy_pcs *qunipcs,
++                                         int channel,
++                                         struct phylink_link_state *state)
++{
++      u32 val;
++
++      val = ipq_unipcs_reg_read32(qunipcs, PCS_CHANNEL_STS(channel));
++
++      state->link = !!(val & PCS_CHANNEL_LINK_STS);
++
++      if (!state->link)
++              return;
++
++      state->speed = SPEED_2500;
++      state->duplex = DUPLEX_FULL;
++      state->pause |= MLO_PAUSE_TXRX_MASK;
++}
++
+ static void ipq_unipcs_get_state_usxgmii(struct ipq_uniphy_pcs *qunipcs,
+                                        struct phylink_link_state *state)
+ {
+@@ -373,6 +392,12 @@ static int ipq_unipcs_config_mode(struct ipq_uniphy_pcs *qunipcs,
+                                       PCS_MODE_SEL_MASK | PCS_MODE_AN_MODE,
+                                       PCS_MODE_PSGMII);
+               break;
++      case PHY_INTERFACE_MODE_2500BASEX:
++              rate = 312500000;
++              ipq_unipcs_reg_modify32(qunipcs, PCS_MODE_CTRL,
++                                      PCS_MODE_SEL_MASK,
++                                      PCS_MODE_SGMII_PLUS);
++              break;
+       case PHY_INTERFACE_MODE_USXGMII:
+       case PHY_INTERFACE_MODE_10GBASER:
+               rate = 312500000;
+@@ -450,6 +475,22 @@ static int ipq_unipcs_config_sgmii(struct ipq_uniphy_pcs *qunipcs,
+       return ret;
+ }
++static int ipq_unipcs_config_2500basex(struct ipq_uniphy_pcs *qunipcs,
++                                     phy_interface_t interface)
++{
++      int ret;
++
++      if (qunipcs->interface != interface) {
++              ret = ipq_unipcs_config_mode(qunipcs, interface);
++              if (ret)
++                      return ret;
++
++              qunipcs->interface = interface;
++      }
++
++      return 0;
++}
++
+ static int ipq_unipcs_config_usxgmii(struct ipq_uniphy_pcs *qunipcs,
+                                    unsigned int neg_mode,
+                                    phy_interface_t interface)
+@@ -522,6 +563,21 @@ static unsigned long ipq_unipcs_clock_rate_get_gmii(int speed)
+       return rate;
+ }
++static unsigned long ipq_unipcs_clock_rate_get_gmiiplus(int speed)
++{
++      unsigned long rate = 0;
++
++      switch (speed) {
++      case SPEED_2500:
++              rate = 312500000;
++              break;
++      default:
++              break;
++      }
++
++      return rate;
++}
++
+ static unsigned long ipq_unipcs_clock_rate_get_xgmii(int speed)
+ {
+       unsigned long rate = 0;
+@@ -566,6 +622,9 @@ ipq_unipcs_link_up_clock_rate_set(struct ipq_uniphy_pcs_ch *qunipcs_ch,
+       case PHY_INTERFACE_MODE_PSGMII:
+               rate = ipq_unipcs_clock_rate_get_gmii(speed);
+               break;
++      case PHY_INTERFACE_MODE_2500BASEX:
++              rate = ipq_unipcs_clock_rate_get_gmiiplus(speed);
++              break;
+       case PHY_INTERFACE_MODE_USXGMII:
+       case PHY_INTERFACE_MODE_10GBASER:
+               rate = ipq_unipcs_clock_rate_get_xgmii(speed);
+@@ -627,6 +686,21 @@ static void ipq_unipcs_link_up_config_sgmii(struct ipq_uniphy_pcs *qunipcs,
+                               PCS_CHANNEL_ADPT_RESET);
+ }
++static void ipq_unipcs_link_up_config_2500basex(struct ipq_uniphy_pcs *qunipcs,
++                                              int channel,
++                                              int speed)
++{
++      /* 2500BASEX do not support autoneg and do not need to
++       * configure PCS speed, only reset PCS adapter here.
++       */
++      ipq_unipcs_reg_modify32(qunipcs, PCS_CHANNEL_CTRL(channel),
++                              PCS_CHANNEL_ADPT_RESET,
++                              0);
++      ipq_unipcs_reg_modify32(qunipcs, PCS_CHANNEL_CTRL(channel),
++                              PCS_CHANNEL_ADPT_RESET,
++                              PCS_CHANNEL_ADPT_RESET);
++}
++
+ static void ipq_unipcs_link_up_config_usxgmii(struct ipq_uniphy_pcs *qunipcs,
+                                             int speed)
+ {
+@@ -669,6 +743,17 @@ static void ipq_unipcs_link_up_config_usxgmii(struct ipq_uniphy_pcs *qunipcs,
+                               XPCS_USXG_ADPT_RESET);
+ }
++static int ipq_unipcs_validate(struct phylink_pcs *pcs,
++                             unsigned long *supported,
++                             const struct phylink_link_state *state)
++{
++      /* In-band autoneg is not supported for 2500BASEX */
++      if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
++              phylink_clear(supported, Autoneg);
++
++      return 0;
++}
++
+ static void ipq_unipcs_get_state(struct phylink_pcs *pcs,
+                                struct phylink_link_state *state)
+ {
+@@ -682,6 +767,9 @@ static void ipq_unipcs_get_state(struct phylink_pcs *pcs,
+       case PHY_INTERFACE_MODE_PSGMII:
+               ipq_unipcs_get_state_sgmii(qunipcs, channel, state);
+               break;
++      case PHY_INTERFACE_MODE_2500BASEX:
++              ipq_unipcs_get_state_2500basex(qunipcs, channel, state);
++              break;
+       case PHY_INTERFACE_MODE_USXGMII:
+               ipq_unipcs_get_state_usxgmii(qunipcs, state);
+               break;
+@@ -716,6 +804,8 @@ static int ipq_unipcs_config(struct phylink_pcs *pcs,
+       case PHY_INTERFACE_MODE_PSGMII:
+               return ipq_unipcs_config_sgmii(qunipcs, channel,
+                                              neg_mode, interface);
++      case PHY_INTERFACE_MODE_2500BASEX:
++              return ipq_unipcs_config_2500basex(qunipcs, interface);
+       case PHY_INTERFACE_MODE_USXGMII:
+               return ipq_unipcs_config_usxgmii(qunipcs,
+                                                neg_mode, interface);
+@@ -748,6 +838,10 @@ static void ipq_unipcs_link_up(struct phylink_pcs *pcs,
+               ipq_unipcs_link_up_config_sgmii(qunipcs, channel,
+                                               neg_mode, speed);
+               break;
++      case PHY_INTERFACE_MODE_2500BASEX:
++              ipq_unipcs_link_up_config_2500basex(qunipcs,
++                                                  channel, speed);
++              break;
+       case PHY_INTERFACE_MODE_USXGMII:
+               ipq_unipcs_link_up_config_usxgmii(qunipcs, speed);
+               break;
+@@ -761,6 +855,7 @@ static void ipq_unipcs_link_up(struct phylink_pcs *pcs,
+ }
+ static const struct phylink_pcs_ops ipq_unipcs_phylink_ops = {
++      .pcs_validate = ipq_unipcs_validate,
+       .pcs_get_state = ipq_unipcs_get_state,
+       .pcs_config = ipq_unipcs_config,
+       .pcs_link_up = ipq_unipcs_link_up,
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-14-net-pcs-Add-1000BASEX-interface-mode-support-to-IPQ-.patch b/target/linux/qualcommbe/patches-6.6/103-14-net-pcs-Add-1000BASEX-interface-mode-support-to-IPQ-.patch
new file mode 100644 (file)
index 0000000..4c0c9c3
--- /dev/null
@@ -0,0 +1,100 @@
+From 23f3550c387246025ed2971989b747a5936bf080 Mon Sep 17 00:00:00 2001
+From: Lei Wei <quic_leiwei@quicinc.com>
+Date: Tue, 9 Apr 2024 01:07:22 +0800
+Subject: [PATCH 14/50] net:pcs: Add 1000BASEX interface mode support to IPQ
+ UNIPHY PCS driver
+
+1000BASEX is used when PCS connects with a 1G SFP module.
+
+Change-Id: Ied7298de3c1ecba74e6457a07fdd6b3ceab79728
+Signed-off-by: Lei Wei <quic_leiwei@quicinc.com>
+---
+ drivers/net/pcs/pcs-qcom-ipq-uniphy.c | 23 +++++++++++++++++++++++
+ 1 file changed, 23 insertions(+)
+
+diff --git a/drivers/net/pcs/pcs-qcom-ipq-uniphy.c b/drivers/net/pcs/pcs-qcom-ipq-uniphy.c
+index ed9c55a6c0fa..820d197744e8 100644
+--- a/drivers/net/pcs/pcs-qcom-ipq-uniphy.c
++++ b/drivers/net/pcs/pcs-qcom-ipq-uniphy.c
+@@ -27,6 +27,9 @@
+ #define PCS_MODE_PSGMII                       FIELD_PREP(PCS_MODE_SEL_MASK, 0x2)
+ #define PCS_MODE_SGMII_PLUS           FIELD_PREP(PCS_MODE_SEL_MASK, 0x8)
+ #define PCS_MODE_XPCS                 FIELD_PREP(PCS_MODE_SEL_MASK, 0x10)
++#define PCS_MODE_SGMII_CTRL_MASK      GENMASK(6, 4)
++#define PCS_MODE_SGMII_CTRL_1000BASEX FIELD_PREP(PCS_MODE_SGMII_CTRL_MASK, \
++                                                 0x0)
+ #define PCS_MODE_AN_MODE              BIT(0)
+ #define PCS_CHANNEL_CTRL(x)           (0x480 + 0x18 * (x))
+@@ -392,6 +395,13 @@ static int ipq_unipcs_config_mode(struct ipq_uniphy_pcs *qunipcs,
+                                       PCS_MODE_SEL_MASK | PCS_MODE_AN_MODE,
+                                       PCS_MODE_PSGMII);
+               break;
++      case PHY_INTERFACE_MODE_1000BASEX:
++              ipq_unipcs_reg_modify32(qunipcs, PCS_MODE_CTRL,
++                                      PCS_MODE_SEL_MASK |
++                                      PCS_MODE_SGMII_CTRL_MASK,
++                                      PCS_MODE_SGMII |
++                                      PCS_MODE_SGMII_CTRL_1000BASEX);
++              break;
+       case PHY_INTERFACE_MODE_2500BASEX:
+               rate = 312500000;
+               ipq_unipcs_reg_modify32(qunipcs, PCS_MODE_CTRL,
+@@ -620,6 +630,7 @@ ipq_unipcs_link_up_clock_rate_set(struct ipq_uniphy_pcs_ch *qunipcs_ch,
+       case PHY_INTERFACE_MODE_SGMII:
+       case PHY_INTERFACE_MODE_QSGMII:
+       case PHY_INTERFACE_MODE_PSGMII:
++      case PHY_INTERFACE_MODE_1000BASEX:
+               rate = ipq_unipcs_clock_rate_get_gmii(speed);
+               break;
+       case PHY_INTERFACE_MODE_2500BASEX:
+@@ -765,6 +776,10 @@ static void ipq_unipcs_get_state(struct phylink_pcs *pcs,
+       case PHY_INTERFACE_MODE_SGMII:
+       case PHY_INTERFACE_MODE_QSGMII:
+       case PHY_INTERFACE_MODE_PSGMII:
++      case PHY_INTERFACE_MODE_1000BASEX:
++              /* SGMII and 1000BASEX in-band autoneg word format are decoded
++               * by PCS hardware and both placed to the same status register.
++               */
+               ipq_unipcs_get_state_sgmii(qunipcs, channel, state);
+               break;
+       case PHY_INTERFACE_MODE_2500BASEX:
+@@ -802,6 +817,7 @@ static int ipq_unipcs_config(struct phylink_pcs *pcs,
+       case PHY_INTERFACE_MODE_SGMII:
+       case PHY_INTERFACE_MODE_QSGMII:
+       case PHY_INTERFACE_MODE_PSGMII:
++      case PHY_INTERFACE_MODE_1000BASEX:
+               return ipq_unipcs_config_sgmii(qunipcs, channel,
+                                              neg_mode, interface);
+       case PHY_INTERFACE_MODE_2500BASEX:
+@@ -818,6 +834,11 @@ static int ipq_unipcs_config(struct phylink_pcs *pcs,
+       };
+ }
++static void qcom_ipq_unipcs_an_restart(struct phylink_pcs *pcs)
++{
++      /* Currently not used */
++}
++
+ static void ipq_unipcs_link_up(struct phylink_pcs *pcs,
+                              unsigned int neg_mode,
+                              phy_interface_t interface,
+@@ -835,6 +856,7 @@ static void ipq_unipcs_link_up(struct phylink_pcs *pcs,
+       case PHY_INTERFACE_MODE_SGMII:
+       case PHY_INTERFACE_MODE_QSGMII:
+       case PHY_INTERFACE_MODE_PSGMII:
++      case PHY_INTERFACE_MODE_1000BASEX:
+               ipq_unipcs_link_up_config_sgmii(qunipcs, channel,
+                                               neg_mode, speed);
+               break;
+@@ -858,6 +880,7 @@ static const struct phylink_pcs_ops ipq_unipcs_phylink_ops = {
+       .pcs_validate = ipq_unipcs_validate,
+       .pcs_get_state = ipq_unipcs_get_state,
+       .pcs_config = ipq_unipcs_config,
++      .pcs_an_restart = qcom_ipq_unipcs_an_restart,
+       .pcs_link_up = ipq_unipcs_link_up,
+ };
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-15-net-pcs-Add-10G_QXGMII-interface-mode-support-to-IPQ.patch b/target/linux/qualcommbe/patches-6.6/103-15-net-pcs-Add-10G_QXGMII-interface-mode-support-to-IPQ.patch
new file mode 100644 (file)
index 0000000..7da55c0
--- /dev/null
@@ -0,0 +1,364 @@
+From d96ec0527b0f5618b3a0757b47606705555ee996 Mon Sep 17 00:00:00 2001
+From: Lei Wei <quic_leiwei@quicinc.com>
+Date: Mon, 15 Apr 2024 11:06:02 +0800
+Subject: [PATCH 15/50] net:pcs: Add 10G_QXGMII interface mode support to IPQ
+ UNIPHY PCS driver
+
+10G_QXGMII is used when PCS connectes with QCA8084 four ports
+2.5G PHYs.
+
+Change-Id: If3dc92a07ac3e51f7c9473fb05fa0668617916fb
+Signed-off-by: Lei Wei <quic_leiwei@quicinc.com>
+---
+ drivers/net/pcs/pcs-qcom-ipq-uniphy.c | 174 +++++++++++++++++++++-----
+ 1 file changed, 142 insertions(+), 32 deletions(-)
+
+diff --git a/drivers/net/pcs/pcs-qcom-ipq-uniphy.c b/drivers/net/pcs/pcs-qcom-ipq-uniphy.c
+index 820d197744e8..a98180c91632 100644
+--- a/drivers/net/pcs/pcs-qcom-ipq-uniphy.c
++++ b/drivers/net/pcs/pcs-qcom-ipq-uniphy.c
+@@ -50,6 +50,9 @@
+ #define PCS_CHANNEL_STS_PAUSE_TX_EN   BIT(1)
+ #define PCS_CHANNEL_STS_PAUSE_RX_EN   BIT(0)
++#define PCS_QP_USXG_OPTION            0x584
++#define PCS_QP_USXG_GMII_SRC_XPCS     BIT(0)
++
+ #define PCS_PLL_RESET                 0x780
+ #define PCS_ANA_SW_RESET              BIT(6)
+@@ -65,10 +68,22 @@
+ #define XPCS_10GBASER_LINK_STS                BIT(12)
+ #define XPCS_DIG_CTRL                 0x38000
++#define XPCS_SOFT_RESET                       BIT(15)
+ #define XPCS_USXG_ADPT_RESET          BIT(10)
+ #define XPCS_USXG_EN                  BIT(9)
++#define XPCS_KR_CTRL                  0x38007
++#define XPCS_USXG_MODE_MASK           GENMASK(12, 10)
++#define XPCS_10G_QXGMII_MODE          FIELD_PREP(XPCS_USXG_MODE_MASK, 0x5)
++
++#define XPCS_DIG_STS                  0x3800a
++#define XPCS_DIG_STS_AM_COUNT         GENMASK(14, 0)
++
++#define XPCS_CHANNEL_DIG_CTRL(x)      (0x1a8000 + 0x10000 * ((x) - 1))
++#define XPCS_CHANNEL_USXG_ADPT_RESET  BIT(5)
++
+ #define XPCS_MII_CTRL                 0x1f0000
++#define XPCS_CHANNEL_MII_CTRL(x)      (0x1a0000 + 0x10000 * ((x) - 1))
+ #define XPCS_MII_AN_EN                        BIT(12)
+ #define XPCS_DUPLEX_FULL              BIT(8)
+ #define XPCS_SPEED_MASK                       (BIT(13) | BIT(6) | BIT(5))
+@@ -80,9 +95,11 @@
+ #define XPCS_SPEED_10                 0
+ #define XPCS_MII_AN_CTRL              0x1f8001
++#define XPCS_CHANNEL_MII_AN_CTRL(x)   (0x1a8001 + 0x10000 * ((x) - 1))
+ #define XPCS_MII_AN_8BIT              BIT(8)
+ #define XPCS_MII_AN_INTR_STS          0x1f8002
++#define XPCS_CHANNEL_MII_AN_INTR_STS(x)       (0x1a8002 + 0x10000 * ((x) - 1))
+ #define XPCS_USXG_AN_LINK_STS         BIT(14)
+ #define XPCS_USXG_AN_DUPLEX_FULL      BIT(13)
+ #define XPCS_USXG_AN_SPEED_MASK               GENMASK(12, 10)
+@@ -93,6 +110,10 @@
+ #define XPCS_USXG_AN_SPEED_5000               5
+ #define XPCS_USXG_AN_SPEED_10000      3
++#define XPCS_XAUI_MODE_CTRL           0x1f8004
++#define XPCS_CHANNEL_XAUI_MODE_CTRL(x)        (0x1a8004 + 0x10000 * ((x) - 1))
++#define XPCS_TX_IPG_CHECK_DIS         BIT(0)
++
+ /* UNIPHY PCS RAW clock ID */
+ enum {
+       PCS_RAW_RX_CLK = 0,
+@@ -153,6 +174,7 @@ struct ipq_uniphy_pcs {
+       struct device *dev;
+       phy_interface_t interface;
+       struct mutex shared_lock; /* Lock to protect shared config */
++      spinlock_t reg_lock; /* Lock for register access */
+       struct clk *clk[PCS_CLK_MAX];
+       struct reset_control *reset[PCS_RESET_MAX];
+       struct ipq_unipcs_raw_clk raw_clk[PCS_RAW_CLK_MAX];
+@@ -215,39 +237,55 @@ static const struct clk_ops ipq_unipcs_raw_clk_ops = {
+ static u32 ipq_unipcs_reg_read32(struct ipq_uniphy_pcs *qunipcs, u32 reg)
+ {
++      u32 val;
++
+       /* PCS use direct AHB access while XPCS use indirect AHB access */
+       if (reg >= XPCS_INDIRECT_ADDR) {
++              /* For XPCS, althrough the register is different for different
++               * channels, but they use the same indirect AHB address to
++               * access, so add protects here.
++               */
++              spin_lock(&qunipcs->reg_lock);
++
+               writel(FIELD_GET(XPCS_INDIRECT_ADDR_H, reg),
+                      qunipcs->base + XPCS_INDIRECT_AHB_ADDR);
+-              return readl(qunipcs->base + XPCS_INDIRECT_DATA_ADDR(reg));
++              val = readl(qunipcs->base + XPCS_INDIRECT_DATA_ADDR(reg));
++
++              spin_unlock(&qunipcs->reg_lock);
++              return val;
+       } else {
+               return readl(qunipcs->base + reg);
+       }
+ }
+-static void ipq_unipcs_reg_write32(struct ipq_uniphy_pcs *qunipcs,
+-                                 u32 reg, u32 val)
++static void ipq_unipcs_reg_modify32(struct ipq_uniphy_pcs *qunipcs,
++                                  u32 reg, u32 mask, u32 set)
+ {
++      u32 val;
++
+       if (reg >= XPCS_INDIRECT_ADDR) {
++              spin_lock(&qunipcs->reg_lock);
++
++              /* XPCS read */
+               writel(FIELD_GET(XPCS_INDIRECT_ADDR_H, reg),
+                      qunipcs->base + XPCS_INDIRECT_AHB_ADDR);
++              val = readl(qunipcs->base + XPCS_INDIRECT_DATA_ADDR(reg));
++
++              val &= ~mask;
++              val |= set;
++
++              /* XPCS write */
+               writel(val, qunipcs->base + XPCS_INDIRECT_DATA_ADDR(reg));
++
++              spin_unlock(&qunipcs->reg_lock);
+       } else {
++              val = readl(qunipcs->base + reg);
++              val &= ~mask;
++              val |= set;
+               writel(val, qunipcs->base + reg);
+       }
+ }
+-static void ipq_unipcs_reg_modify32(struct ipq_uniphy_pcs *qunipcs,
+-                                  u32 reg, u32 mask, u32 set)
+-{
+-      u32 val;
+-
+-      val = ipq_unipcs_reg_read32(qunipcs, reg);
+-      val &= ~mask;
+-      val |= set;
+-      ipq_unipcs_reg_write32(qunipcs, reg, val);
+-}
+-
+ static void ipq_unipcs_get_state_sgmii(struct ipq_uniphy_pcs *qunipcs,
+                                      int channel,
+                                      struct phylink_link_state *state)
+@@ -305,11 +343,15 @@ static void ipq_unipcs_get_state_2500basex(struct ipq_uniphy_pcs *qunipcs,
+ }
+ static void ipq_unipcs_get_state_usxgmii(struct ipq_uniphy_pcs *qunipcs,
++                                       int channel,
+                                        struct phylink_link_state *state)
+ {
+-      u32 val;
++      u32 val, reg;
++
++      reg = (channel == 0) ? XPCS_MII_AN_INTR_STS :
++              XPCS_CHANNEL_MII_AN_INTR_STS(channel);
+-      val = ipq_unipcs_reg_read32(qunipcs, XPCS_MII_AN_INTR_STS);
++      val = ipq_unipcs_reg_read32(qunipcs, reg);
+       state->link = !!(val & XPCS_USXG_AN_LINK_STS);
+@@ -415,6 +457,15 @@ static int ipq_unipcs_config_mode(struct ipq_uniphy_pcs *qunipcs,
+                                       PCS_MODE_SEL_MASK,
+                                       PCS_MODE_XPCS);
+               break;
++      case PHY_INTERFACE_MODE_10G_QXGMII:
++              rate = 312500000;
++              ipq_unipcs_reg_modify32(qunipcs, PCS_MODE_CTRL,
++                                      PCS_MODE_SEL_MASK,
++                                      PCS_MODE_XPCS);
++              ipq_unipcs_reg_modify32(qunipcs, PCS_QP_USXG_OPTION,
++                                      PCS_QP_USXG_GMII_SRC_XPCS,
++                                      PCS_QP_USXG_GMII_SRC_XPCS);
++              break;
+       default:
+               dev_err(qunipcs->dev,
+                       "interface %s not supported\n", phy_modes(interface));
+@@ -502,35 +553,82 @@ static int ipq_unipcs_config_2500basex(struct ipq_uniphy_pcs *qunipcs,
+ }
+ static int ipq_unipcs_config_usxgmii(struct ipq_uniphy_pcs *qunipcs,
++                                   int channel,
+                                    unsigned int neg_mode,
+                                    phy_interface_t interface)
+ {
+       int ret;
++      u32 reg;
++
++      /* Only in-band autoneg mode is supported currently */
++      if (neg_mode != PHYLINK_PCS_NEG_INBAND_ENABLED)
++              return -EOPNOTSUPP;
++
++      if (interface == PHY_INTERFACE_MODE_10G_QXGMII)
++              mutex_lock(&qunipcs->shared_lock);
+       if (qunipcs->interface != interface) {
+               ret = ipq_unipcs_config_mode(qunipcs, interface);
+               if (ret)
+-                      return ret;
++                      goto err;
+-              /* Deassert XPCS and configure XPCS USXGMII */
++              /* Deassert XPCS and configure XPCS USXGMII or 10G_QXGMII */
+               reset_control_deassert(qunipcs->reset[XPCS_RESET]);
+               ipq_unipcs_reg_modify32(qunipcs, XPCS_DIG_CTRL,
+                                       XPCS_USXG_EN, XPCS_USXG_EN);
+-              if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) {
+-                      ipq_unipcs_reg_modify32(qunipcs, XPCS_MII_AN_CTRL,
+-                                              XPCS_MII_AN_8BIT,
+-                                              XPCS_MII_AN_8BIT);
++              if (interface == PHY_INTERFACE_MODE_10G_QXGMII) {
++                      ipq_unipcs_reg_modify32(qunipcs, XPCS_KR_CTRL,
++                                              XPCS_USXG_MODE_MASK,
++                                              XPCS_10G_QXGMII_MODE);
++
++                      /* Set Alignment Marker Interval */
++                      ipq_unipcs_reg_modify32(qunipcs, XPCS_DIG_STS,
++                                              XPCS_DIG_STS_AM_COUNT,
++                                              0x6018);
+-                      ipq_unipcs_reg_modify32(qunipcs, XPCS_MII_CTRL,
+-                                              XPCS_MII_AN_EN, XPCS_MII_AN_EN);
++                      ipq_unipcs_reg_modify32(qunipcs, XPCS_DIG_CTRL,
++                                              XPCS_SOFT_RESET,
++                                              XPCS_SOFT_RESET);
+               }
+               qunipcs->interface = interface;
+       }
++      if (interface == PHY_INTERFACE_MODE_10G_QXGMII)
++              mutex_unlock(&qunipcs->shared_lock);
++
++      /* Disable Tx IPG check for 10G_QXGMII */
++      if (interface == PHY_INTERFACE_MODE_10G_QXGMII) {
++              reg = (channel == 0) ? XPCS_XAUI_MODE_CTRL :
++                      XPCS_CHANNEL_XAUI_MODE_CTRL(channel);
++
++              ipq_unipcs_reg_modify32(qunipcs, reg,
++                                      XPCS_TX_IPG_CHECK_DIS,
++                                      XPCS_TX_IPG_CHECK_DIS);
++      }
++
++      /* Enable autoneg */
++      reg = (channel == 0) ? XPCS_MII_AN_CTRL :
++              XPCS_CHANNEL_MII_AN_CTRL(channel);
++
++      ipq_unipcs_reg_modify32(qunipcs, reg,
++                              XPCS_MII_AN_8BIT, XPCS_MII_AN_8BIT);
++
++      reg = (channel == 0) ? XPCS_MII_CTRL :
++              XPCS_CHANNEL_MII_CTRL(channel);
++
++      ipq_unipcs_reg_modify32(qunipcs, reg,
++                              XPCS_MII_AN_EN, XPCS_MII_AN_EN);
++
+       return 0;
++
++err:
++      if (interface == PHY_INTERFACE_MODE_10G_QXGMII)
++              mutex_unlock(&qunipcs->shared_lock);
++
++      return ret;
+ }
+ static int ipq_unipcs_config_10gbaser(struct ipq_uniphy_pcs *qunipcs,
+@@ -638,6 +736,7 @@ ipq_unipcs_link_up_clock_rate_set(struct ipq_uniphy_pcs_ch *qunipcs_ch,
+               break;
+       case PHY_INTERFACE_MODE_USXGMII:
+       case PHY_INTERFACE_MODE_10GBASER:
++      case PHY_INTERFACE_MODE_10G_QXGMII:
+               rate = ipq_unipcs_clock_rate_get_xgmii(speed);
+               break;
+       default:
+@@ -713,9 +812,10 @@ static void ipq_unipcs_link_up_config_2500basex(struct ipq_uniphy_pcs *qunipcs,
+ }
+ static void ipq_unipcs_link_up_config_usxgmii(struct ipq_uniphy_pcs *qunipcs,
++                                            int channel,
+                                             int speed)
+ {
+-      u32 val;
++      u32 val, reg;
+       switch (speed) {
+       case SPEED_10000:
+@@ -744,14 +844,20 @@ static void ipq_unipcs_link_up_config_usxgmii(struct ipq_uniphy_pcs *qunipcs,
+       val |= XPCS_DUPLEX_FULL;
+       /* Config XPCS speed */
+-      ipq_unipcs_reg_modify32(qunipcs, XPCS_MII_CTRL,
++      reg = (channel == 0) ? XPCS_MII_CTRL : XPCS_CHANNEL_MII_CTRL(channel);
++      ipq_unipcs_reg_modify32(qunipcs, reg,
+                               XPCS_SPEED_MASK | XPCS_DUPLEX_FULL,
+                               val);
+       /* XPCS adapter reset */
+-      ipq_unipcs_reg_modify32(qunipcs, XPCS_DIG_CTRL,
+-                              XPCS_USXG_ADPT_RESET,
+-                              XPCS_USXG_ADPT_RESET);
++      if (channel == 0)
++              ipq_unipcs_reg_modify32(qunipcs, XPCS_DIG_CTRL,
++                                      XPCS_USXG_ADPT_RESET,
++                                      XPCS_USXG_ADPT_RESET);
++      else
++              ipq_unipcs_reg_modify32(qunipcs, XPCS_CHANNEL_DIG_CTRL(channel),
++                                      XPCS_CHANNEL_USXG_ADPT_RESET,
++                                      XPCS_CHANNEL_USXG_ADPT_RESET);
+ }
+ static int ipq_unipcs_validate(struct phylink_pcs *pcs,
+@@ -786,7 +892,8 @@ static void ipq_unipcs_get_state(struct phylink_pcs *pcs,
+               ipq_unipcs_get_state_2500basex(qunipcs, channel, state);
+               break;
+       case PHY_INTERFACE_MODE_USXGMII:
+-              ipq_unipcs_get_state_usxgmii(qunipcs, state);
++      case PHY_INTERFACE_MODE_10G_QXGMII:
++              ipq_unipcs_get_state_usxgmii(qunipcs, channel, state);
+               break;
+       case PHY_INTERFACE_MODE_10GBASER:
+               ipq_unipcs_get_state_10gbaser(qunipcs, state);
+@@ -823,7 +930,8 @@ static int ipq_unipcs_config(struct phylink_pcs *pcs,
+       case PHY_INTERFACE_MODE_2500BASEX:
+               return ipq_unipcs_config_2500basex(qunipcs, interface);
+       case PHY_INTERFACE_MODE_USXGMII:
+-              return ipq_unipcs_config_usxgmii(qunipcs,
++      case PHY_INTERFACE_MODE_10G_QXGMII:
++              return ipq_unipcs_config_usxgmii(qunipcs, channel,
+                                                neg_mode, interface);
+       case PHY_INTERFACE_MODE_10GBASER:
+               return ipq_unipcs_config_10gbaser(qunipcs, interface);
+@@ -865,7 +973,8 @@ static void ipq_unipcs_link_up(struct phylink_pcs *pcs,
+                                                   channel, speed);
+               break;
+       case PHY_INTERFACE_MODE_USXGMII:
+-              ipq_unipcs_link_up_config_usxgmii(qunipcs, speed);
++      case PHY_INTERFACE_MODE_10G_QXGMII:
++              ipq_unipcs_link_up_config_usxgmii(qunipcs, channel, speed);
+               break;
+       case PHY_INTERFACE_MODE_10GBASER:
+               break;
+@@ -1082,6 +1191,7 @@ static int ipq_uniphy_probe(struct platform_device *pdev)
+               return ret;
+       mutex_init(&priv->shared_lock);
++      spin_lock_init(&priv->reg_lock);
+       platform_set_drvdata(pdev, priv);
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-18-net-ethernet-qualcomm-Add-PPE-driver-for-IPQ9574-SoC.patch b/target/linux/qualcommbe/patches-6.6/103-18-net-ethernet-qualcomm-Add-PPE-driver-for-IPQ9574-SoC.patch
new file mode 100644 (file)
index 0000000..8872005
--- /dev/null
@@ -0,0 +1,363 @@
+From a29ee27a42fc208ef1cd99f5014d57dbfe1af3dd Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Tue, 26 Dec 2023 17:11:35 +0800
+Subject: [PATCH 18/50] net: ethernet: qualcomm: Add PPE driver for IPQ9574 SoC
+
+The PPE (Packet Process Engine) hardware block is available
+on Qualcomm IPQ SoC that support PPE architecture, such as
+IPQ9574.
+
+The PPE in IPQ9574 includes six integrated ethernet MAC
+(for 6 PPE ports), buffer management, queue management and
+scheduler functions. The MACs can connect with the external
+PHY or switch devices using the UNIPHY PCS block available
+in the SoC.
+
+The PPE also includes various packet processing offload
+capabilities such as L3 routing and L2 bridging, VLAN and
+tunnel processing offload. It also includes Ethernet DMA (EDMA)
+function for transferring packets between ARM cores and PPE
+ethernet ports.
+
+This patch adds the base source files and Makefiles for the PPE
+driver such as platform driver registration, clock initialization,
+and PPE reset routines.
+
+Change-Id: I73166b5d4bb7e3c42ec6e0ac178a75528a25ef30
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ drivers/net/ethernet/qualcomm/Kconfig      |  15 ++
+ drivers/net/ethernet/qualcomm/Makefile     |   1 +
+ drivers/net/ethernet/qualcomm/ppe/Makefile |   7 +
+ drivers/net/ethernet/qualcomm/ppe/ppe.c    | 225 +++++++++++++++++++++
+ drivers/net/ethernet/qualcomm/ppe/ppe.h    |  36 ++++
+ 5 files changed, 284 insertions(+)
+ create mode 100644 drivers/net/ethernet/qualcomm/ppe/Makefile
+ create mode 100644 drivers/net/ethernet/qualcomm/ppe/ppe.c
+ create mode 100644 drivers/net/ethernet/qualcomm/ppe/ppe.h
+
+diff --git a/drivers/net/ethernet/qualcomm/Kconfig b/drivers/net/ethernet/qualcomm/Kconfig
+index 9210ff360fdc..8cc24da48777 100644
+--- a/drivers/net/ethernet/qualcomm/Kconfig
++++ b/drivers/net/ethernet/qualcomm/Kconfig
+@@ -61,6 +61,21 @@ config QCOM_EMAC
+         low power, Receive-Side Scaling (RSS), and IEEE 1588-2008
+         Precision Clock Synchronization Protocol.
++config QCOM_PPE
++      tristate "Qualcomm Technologies, Inc. PPE Ethernet support"
++      depends on HAS_IOMEM && OF
++      depends on COMMON_CLK
++      select REGMAP_MMIO
++      help
++        This driver supports the Qualcomm Technologies, Inc. packet
++        process engine (PPE) available with IPQ SoC. The PPE houses
++        the ethernet MACs, Ethernet DMA (EDMA) and switch core that
++        supports L3 flow offload, L2 switch function, RSS and tunnel
++        offload.
++
++        To compile this driver as a module, choose M here. The module
++        will be called qcom-ppe.
++
+ source "drivers/net/ethernet/qualcomm/rmnet/Kconfig"
+ endif # NET_VENDOR_QUALCOMM
+diff --git a/drivers/net/ethernet/qualcomm/Makefile b/drivers/net/ethernet/qualcomm/Makefile
+index 9250976dd884..166a59aea363 100644
+--- a/drivers/net/ethernet/qualcomm/Makefile
++++ b/drivers/net/ethernet/qualcomm/Makefile
+@@ -11,4 +11,5 @@ qcauart-objs := qca_uart.o
+ obj-y += emac/
++obj-$(CONFIG_QCOM_PPE) += ppe/
+ obj-$(CONFIG_RMNET) += rmnet/
+diff --git a/drivers/net/ethernet/qualcomm/ppe/Makefile b/drivers/net/ethernet/qualcomm/ppe/Makefile
+new file mode 100644
+index 000000000000..63d50d3b4f2e
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
+@@ -0,0 +1,7 @@
++# SPDX-License-Identifier: GPL-2.0-only
++#
++# Makefile for the device driver of PPE (Packet Process Engine) in IPQ SoC
++#
++
++obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
++qcom-ppe-objs := ppe.o
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe.c b/drivers/net/ethernet/qualcomm/ppe/ppe.c
+new file mode 100644
+index 000000000000..14998ac771c7
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe.c
+@@ -0,0 +1,225 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++/* PPE platform device probe, DTSI parser and PPE clock initializations. */
++
++#include <linux/clk.h>
++#include <linux/interconnect.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
++#include <linux/regmap.h>
++#include <linux/reset.h>
++
++#include "ppe.h"
++
++#define PPE_PORT_MAX          8
++#define PPE_CLK_RATE          353000000
++
++/* ICC clocks for enabling PPE device. The avg and peak with value 0
++ * will be decided by the clock rate of PPE.
++ */
++static const struct icc_bulk_data ppe_icc_data[] = {
++      {
++              .name = "ppe",
++              .avg_bw = 0,
++              .peak_bw = 0,
++      },
++      {
++              .name = "ppe_cfg",
++              .avg_bw = 0,
++              .peak_bw = 0,
++      },
++      {
++              .name = "qos_gen",
++              .avg_bw = 6000,
++              .peak_bw = 6000,
++      },
++      {
++              .name = "timeout_ref",
++              .avg_bw = 6000,
++              .peak_bw = 6000,
++      },
++      {
++              .name = "nssnoc_memnoc",
++              .avg_bw = 533333,
++              .peak_bw = 533333,
++      },
++      {
++              .name = "memnoc_nssnoc",
++              .avg_bw = 533333,
++              .peak_bw = 533333,
++      },
++      {
++              .name = "memnoc_nssnoc_1",
++              .avg_bw = 533333,
++              .peak_bw = 533333,
++      },
++};
++
++static const struct regmap_range ppe_readable_ranges[] = {
++      regmap_reg_range(0x0, 0x1ff),           /* Global */
++      regmap_reg_range(0x400, 0x5ff),         /* LPI CSR */
++      regmap_reg_range(0x1000, 0x11ff),       /* GMAC0 */
++      regmap_reg_range(0x1200, 0x13ff),       /* GMAC1 */
++      regmap_reg_range(0x1400, 0x15ff),       /* GMAC2 */
++      regmap_reg_range(0x1600, 0x17ff),       /* GMAC3 */
++      regmap_reg_range(0x1800, 0x19ff),       /* GMAC4 */
++      regmap_reg_range(0x1a00, 0x1bff),       /* GMAC5 */
++      regmap_reg_range(0xb000, 0xefff),       /* PRX CSR */
++      regmap_reg_range(0xf000, 0x1efff),      /* IPE */
++      regmap_reg_range(0x20000, 0x5ffff),     /* PTX CSR */
++      regmap_reg_range(0x60000, 0x9ffff),     /* IPE L2 CSR */
++      regmap_reg_range(0xb0000, 0xeffff),     /* IPO CSR */
++      regmap_reg_range(0x100000, 0x17ffff),   /* IPE PC */
++      regmap_reg_range(0x180000, 0x1bffff),   /* PRE IPO CSR */
++      regmap_reg_range(0x1d0000, 0x1dffff),   /* Tunnel parser */
++      regmap_reg_range(0x1e0000, 0x1effff),   /* Ingress parse */
++      regmap_reg_range(0x200000, 0x2fffff),   /* IPE L3 */
++      regmap_reg_range(0x300000, 0x3fffff),   /* IPE tunnel */
++      regmap_reg_range(0x400000, 0x4fffff),   /* Scheduler */
++      regmap_reg_range(0x500000, 0x503fff),   /* XGMAC0 */
++      regmap_reg_range(0x504000, 0x507fff),   /* XGMAC1 */
++      regmap_reg_range(0x508000, 0x50bfff),   /* XGMAC2 */
++      regmap_reg_range(0x50c000, 0x50ffff),   /* XGMAC3 */
++      regmap_reg_range(0x510000, 0x513fff),   /* XGMAC4 */
++      regmap_reg_range(0x514000, 0x517fff),   /* XGMAC5 */
++      regmap_reg_range(0x600000, 0x6fffff),   /* BM */
++      regmap_reg_range(0x800000, 0x9fffff),   /* QM */
++      regmap_reg_range(0xb00000, 0xbef800),   /* EDMA */
++};
++
++static const struct regmap_access_table ppe_reg_table = {
++      .yes_ranges = ppe_readable_ranges,
++      .n_yes_ranges = ARRAY_SIZE(ppe_readable_ranges),
++};
++
++static const struct regmap_config regmap_config_ipq9574 = {
++      .reg_bits = 32,
++      .reg_stride = 4,
++      .val_bits = 32,
++      .rd_table = &ppe_reg_table,
++      .wr_table = &ppe_reg_table,
++      .max_register = 0xbef800,
++      .fast_io = true,
++};
++
++static int ppe_clock_init_and_reset(struct ppe_device *ppe_dev)
++{
++      unsigned long ppe_rate = ppe_dev->clk_rate;
++      struct device *dev = ppe_dev->dev;
++      struct reset_control *rstc;
++      struct clk_bulk_data *clks;
++      struct clk *clk;
++      int ret, i;
++
++      for (i = 0; i < ppe_dev->num_icc_paths; i++) {
++              ppe_dev->icc_paths[i].name = ppe_icc_data[i].name;
++              ppe_dev->icc_paths[i].avg_bw = ppe_icc_data[i].avg_bw ? :
++                                             Bps_to_icc(ppe_rate);
++              ppe_dev->icc_paths[i].peak_bw = ppe_icc_data[i].peak_bw ? :
++                                              Bps_to_icc(ppe_rate);
++      }
++
++      ret = devm_of_icc_bulk_get(dev, ppe_dev->num_icc_paths,
++                                 ppe_dev->icc_paths);
++      if (ret)
++              return ret;
++
++      ret = icc_bulk_set_bw(ppe_dev->num_icc_paths, ppe_dev->icc_paths);
++      if (ret)
++              return ret;
++
++      /* PPE clocks take the same clock tree, which work on the same
++       * clock rate. Setting the clock rate of "ppe" ensures the clock
++       * rate of all PPE clocks configured as same.
++       */
++      clk = devm_clk_get(dev, "ppe");
++      if (IS_ERR(clk))
++              return PTR_ERR(clk);
++
++      ret = clk_set_rate(clk, ppe_rate);
++      if (ret)
++              return ret;
++
++      ret = devm_clk_bulk_get_all_enabled(dev, &clks);
++      if (ret < 0)
++              return ret;
++
++      rstc = devm_reset_control_get_exclusive(dev, NULL);
++      if (IS_ERR(rstc))
++              return PTR_ERR(rstc);
++
++      /* Reset PPE, the delay 100ms of assert and deassert is necessary
++       * for resetting PPE.
++       */
++      ret = reset_control_assert(rstc);
++      if (ret)
++              return ret;
++
++      msleep(100);
++      ret = reset_control_deassert(rstc);
++      if (ret)
++              return ret;
++
++      msleep(100);
++
++      return 0;
++}
++
++static int qcom_ppe_probe(struct platform_device *pdev)
++{
++      struct device *dev = &pdev->dev;
++      struct ppe_device *ppe_dev;
++      void __iomem *base;
++      int ret, num_icc;
++
++      num_icc = ARRAY_SIZE(ppe_icc_data);
++      ppe_dev = devm_kzalloc(dev,
++                             struct_size(ppe_dev, icc_paths, num_icc),
++                             GFP_KERNEL);
++      if (!ppe_dev)
++              return dev_err_probe(dev, -ENOMEM, "PPE alloc memory failed\n");
++
++      base = devm_platform_ioremap_resource(pdev, 0);
++      if (IS_ERR(base))
++              return dev_err_probe(dev, PTR_ERR(base), "PPE ioremap failed\n");
++
++      ppe_dev->regmap = devm_regmap_init_mmio(dev, base, &regmap_config_ipq9574);
++      if (IS_ERR(ppe_dev->regmap))
++              return dev_err_probe(dev, PTR_ERR(ppe_dev->regmap),
++                                   "PPE initialize regmap failed\n");
++      ppe_dev->dev = dev;
++      ppe_dev->clk_rate = PPE_CLK_RATE;
++      ppe_dev->num_ports = PPE_PORT_MAX;
++      ppe_dev->num_icc_paths = num_icc;
++
++      ret = ppe_clock_init_and_reset(ppe_dev);
++      if (ret)
++              return dev_err_probe(dev, ret, "PPE clock config failed\n");
++
++      platform_set_drvdata(pdev, ppe_dev);
++
++      return 0;
++}
++
++static const struct of_device_id qcom_ppe_of_match[] = {
++      { .compatible = "qcom,ipq9574-ppe" },
++      {},
++};
++MODULE_DEVICE_TABLE(of, qcom_ppe_of_match);
++
++static struct platform_driver qcom_ppe_driver = {
++      .driver = {
++              .name = "qcom_ppe",
++              .of_match_table = qcom_ppe_of_match,
++      },
++      .probe  = qcom_ppe_probe,
++};
++module_platform_driver(qcom_ppe_driver);
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("Qualcomm IPQ PPE driver");
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe.h b/drivers/net/ethernet/qualcomm/ppe/ppe.h
+new file mode 100644
+index 000000000000..733d77f4063d
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe.h
+@@ -0,0 +1,36 @@
++/* SPDX-License-Identifier: GPL-2.0-only
++ *
++ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++#ifndef __PPE_H__
++#define __PPE_H__
++
++#include <linux/compiler.h>
++#include <linux/interconnect.h>
++
++struct device;
++struct regmap;
++
++/**
++ * struct ppe_device - PPE device private data.
++ * @dev: PPE device structure.
++ * @regmap: PPE register map.
++ * @clk_rate: PPE clock rate.
++ * @num_ports: Number of PPE ports.
++ * @num_icc_paths: Number of interconnect paths.
++ * @icc_paths: Interconnect path array.
++ *
++ * PPE device is the instance of PPE hardware, which is used to
++ * configure PPE packet process modules such as BM (buffer management),
++ * QM (queue management), and scheduler.
++ */
++struct ppe_device {
++      struct device *dev;
++      struct regmap *regmap;
++      unsigned long clk_rate;
++      unsigned int num_ports;
++      unsigned int num_icc_paths;
++      struct icc_bulk_data icc_paths[] __counted_by(num_icc_paths);
++};
++#endif
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-19-net-ethernet-qualcomm-Add-PPE-buffer-manager-configu.patch b/target/linux/qualcommbe/patches-6.6/103-19-net-ethernet-qualcomm-Add-PPE-buffer-manager-configu.patch
new file mode 100644 (file)
index 0000000..cc470b4
--- /dev/null
@@ -0,0 +1,324 @@
+From 049820d8a0c918cedd4524eda9abf750819ac901 Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Tue, 26 Dec 2023 18:19:30 +0800
+Subject: [PATCH 19/50] net: ethernet: qualcomm: Add PPE buffer manager
+ configuration
+
+The BM (Buffer Management) config controls the pause frame generated
+on the PPE port. There are maximum 15 BM ports and 4 groups supported,
+all BM ports are assigned to group 0 by default. The number of hardware
+buffers configured for the port influence the threshold of the flow
+control for that port.
+
+Change-Id: Ifb1b69c89966cf5cab19f8e2661c64a4dc6230fe
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ drivers/net/ethernet/qualcomm/ppe/Makefile    |   2 +-
+ drivers/net/ethernet/qualcomm/ppe/ppe.c       |   5 +
+ .../net/ethernet/qualcomm/ppe/ppe_config.c    | 181 ++++++++++++++++++
+ .../net/ethernet/qualcomm/ppe/ppe_config.h    |  10 +
+ drivers/net/ethernet/qualcomm/ppe/ppe_regs.h  |  54 ++++++
+ 5 files changed, 251 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/net/ethernet/qualcomm/ppe/ppe_config.c
+ create mode 100644 drivers/net/ethernet/qualcomm/ppe/ppe_config.h
+ create mode 100644 drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+
+diff --git a/drivers/net/ethernet/qualcomm/ppe/Makefile b/drivers/net/ethernet/qualcomm/ppe/Makefile
+index 63d50d3b4f2e..410a7bb54cfe 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/Makefile
++++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
+@@ -4,4 +4,4 @@
+ #
+ obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
+-qcom-ppe-objs := ppe.o
++qcom-ppe-objs := ppe.o ppe_config.o
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe.c b/drivers/net/ethernet/qualcomm/ppe/ppe.c
+index 14998ac771c7..443706291ce0 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe.c
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe.c
+@@ -15,6 +15,7 @@
+ #include <linux/reset.h>
+ #include "ppe.h"
++#include "ppe_config.h"
+ #define PPE_PORT_MAX          8
+ #define PPE_CLK_RATE          353000000
+@@ -201,6 +202,10 @@ static int qcom_ppe_probe(struct platform_device *pdev)
+       if (ret)
+               return dev_err_probe(dev, ret, "PPE clock config failed\n");
++      ret = ppe_hw_config(ppe_dev);
++      if (ret)
++              return dev_err_probe(dev, ret, "PPE HW config failed\n");
++
+       platform_set_drvdata(pdev, ppe_dev);
+       return 0;
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
+new file mode 100644
+index 000000000000..0ba4efdfd509
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
+@@ -0,0 +1,181 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++/* PPE HW initialization configs such as BM(buffer management),
++ * QM(queue management) and scheduler configs.
++ */
++
++#include <linux/bitfield.h>
++#include <linux/bits.h>
++#include <linux/device.h>
++#include <linux/regmap.h>
++
++#include "ppe.h"
++#include "ppe_config.h"
++#include "ppe_regs.h"
++
++/**
++ * struct ppe_bm_port_config - PPE BM port configuration.
++ * @port_id_start: The fist BM port ID to configure.
++ * @port_id_end: The last BM port ID to configure.
++ * @pre_alloc: BM port dedicated buffer number.
++ * @in_fly_buf: Buffer number for receiving the packet after pause frame sent.
++ * @ceil: Ceil to generate the back pressure.
++ * @weight: Weight value.
++ * @resume_offset: Resume offset from the threshold value.
++ * @resume_ceil: Ceil to resume from the back pressure state.
++ * @dynamic: Dynamic threshold used or not.
++ *
++ * The is for configuring the threshold that impacts the port
++ * flow control.
++ */
++struct ppe_bm_port_config {
++      unsigned int port_id_start;
++      unsigned int port_id_end;
++      unsigned int pre_alloc;
++      unsigned int in_fly_buf;
++      unsigned int ceil;
++      unsigned int weight;
++      unsigned int resume_offset;
++      unsigned int resume_ceil;
++      bool dynamic;
++};
++
++static int ipq9574_ppe_bm_group_config = 1550;
++static struct ppe_bm_port_config ipq9574_ppe_bm_port_config[] = {
++      {
++              .port_id_start  = 0,
++              .port_id_end    = 0,
++              .pre_alloc      = 0,
++              .in_fly_buf     = 100,
++              .ceil           = 1146,
++              .weight         = 7,
++              .resume_offset  = 8,
++              .resume_ceil    = 0,
++              .dynamic        = true,
++      },
++      {
++              .port_id_start  = 1,
++              .port_id_end    = 7,
++              .pre_alloc      = 0,
++              .in_fly_buf     = 100,
++              .ceil           = 250,
++              .weight         = 4,
++              .resume_offset  = 36,
++              .resume_ceil    = 0,
++              .dynamic        = true,
++      },
++      {
++              .port_id_start  = 8,
++              .port_id_end    = 13,
++              .pre_alloc      = 0,
++              .in_fly_buf     = 128,
++              .ceil           = 250,
++              .weight         = 4,
++              .resume_offset  = 36,
++              .resume_ceil    = 0,
++              .dynamic        = true,
++      },
++      {
++              .port_id_start  = 14,
++              .port_id_end    = 14,
++              .pre_alloc      = 0,
++              .in_fly_buf     = 40,
++              .ceil           = 250,
++              .weight         = 4,
++              .resume_offset  = 36,
++              .resume_ceil    = 0,
++              .dynamic        = true,
++      },
++};
++
++static int ppe_config_bm_threshold(struct ppe_device *ppe_dev, int bm_port_id,
++                                 struct ppe_bm_port_config port_cfg)
++{
++      u32 reg, val, bm_fc_val[2];
++      int ret;
++
++      /* Configure BM flow control related threshold */
++      PPE_BM_PORT_FC_SET_WEIGHT(bm_fc_val, port_cfg.weight);
++      PPE_BM_PORT_FC_SET_RESUME_OFFSET(bm_fc_val, port_cfg.resume_offset);
++      PPE_BM_PORT_FC_SET_RESUME_THRESHOLD(bm_fc_val, port_cfg.resume_ceil);
++      PPE_BM_PORT_FC_SET_DYNAMIC(bm_fc_val, port_cfg.dynamic);
++      PPE_BM_PORT_FC_SET_REACT_LIMIT(bm_fc_val, port_cfg.in_fly_buf);
++      PPE_BM_PORT_FC_SET_PRE_ALLOC(bm_fc_val, port_cfg.pre_alloc);
++
++      /* Ceiling is divided into the different register word. */
++      val = FIELD_GET(GENMASK(2, 0), port_cfg.ceil);
++      PPE_BM_PORT_FC_SET_CEILING_LOW(bm_fc_val, val);
++      val = FIELD_GET(GENMASK(10, 3), port_cfg.ceil);
++      PPE_BM_PORT_FC_SET_CEILING_HIGH(bm_fc_val, val);
++
++      reg = PPE_BM_PORT_FC_CFG_ADDR + PPE_BM_PORT_FC_CFG_INC * bm_port_id;
++      ret = regmap_bulk_write(ppe_dev->regmap, reg,
++                              bm_fc_val, ARRAY_SIZE(bm_fc_val));
++      if (ret)
++              return ret;
++
++      /* Assign the default group ID 0 to the BM port */
++      val = FIELD_PREP(PPE_BM_PORT_GROUP_ID_SHARED_GROUP_ID, 0);
++      reg = PPE_BM_PORT_GROUP_ID_ADDR + PPE_BM_PORT_GROUP_ID_INC * bm_port_id;
++      ret = regmap_update_bits(ppe_dev->regmap, reg,
++                               PPE_BM_PORT_GROUP_ID_SHARED_GROUP_ID,
++                               val);
++      if (ret)
++              return ret;
++
++      /* Enable BM port flow control */
++      val = FIELD_PREP(PPE_BM_PORT_FC_MODE_EN, true);
++      reg = PPE_BM_PORT_FC_MODE_ADDR + PPE_BM_PORT_FC_MODE_INC * bm_port_id;
++
++      return regmap_update_bits(ppe_dev->regmap, reg,
++                                PPE_BM_PORT_FC_MODE_EN,
++                                val);
++}
++
++/* Configure the buffer threshold for the port flow control function. */
++static int ppe_config_bm(struct ppe_device *ppe_dev)
++{
++      unsigned int i, bm_port_id, port_cfg_cnt;
++      struct ppe_bm_port_config *port_cfg;
++      u32 reg, val;
++      int ret;
++
++      /* Configure the buffer number of group 0 by default. The buffer
++       * number of group 1-3 is cleared to 0 after PPE reset on the probe
++       * of PPE driver.
++       */
++      reg = PPE_BM_SHARED_GROUP_CFG_ADDR;
++      val = FIELD_PREP(PPE_BM_SHARED_GROUP_CFG_SHARED_LIMIT,
++                       ipq9574_ppe_bm_group_config);
++      ret = regmap_update_bits(ppe_dev->regmap, reg,
++                               PPE_BM_SHARED_GROUP_CFG_SHARED_LIMIT,
++                               val);
++      if (ret)
++              goto bm_config_fail;
++
++      port_cfg = ipq9574_ppe_bm_port_config;
++      port_cfg_cnt = ARRAY_SIZE(ipq9574_ppe_bm_port_config);
++      for (i = 0; i < port_cfg_cnt; i++) {
++              for (bm_port_id = port_cfg[i].port_id_start;
++                   bm_port_id <= port_cfg[i].port_id_end; bm_port_id++) {
++                      ret = ppe_config_bm_threshold(ppe_dev, bm_port_id,
++                                                    port_cfg[i]);
++                      if (ret)
++                              goto bm_config_fail;
++              }
++      }
++
++      return 0;
++
++bm_config_fail:
++      dev_err(ppe_dev->dev, "PPE BM config error %d\n", ret);
++      return ret;
++}
++
++int ppe_hw_config(struct ppe_device *ppe_dev)
++{
++      return ppe_config_bm(ppe_dev);
++}
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_config.h b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
+new file mode 100644
+index 000000000000..7e66019de799
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
+@@ -0,0 +1,10 @@
++/* SPDX-License-Identifier: GPL-2.0-only
++ *
++ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++#ifndef __PPE_CONFIG_H__
++#define __PPE_CONFIG_H__
++
++int ppe_hw_config(struct ppe_device *ppe_dev);
++#endif
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+new file mode 100644
+index 000000000000..bf25e0acc0f6
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+@@ -0,0 +1,54 @@
++/* SPDX-License-Identifier: GPL-2.0-only
++ *
++ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++/* PPE hardware register and table declarations. */
++#ifndef __PPE_REGS_H__
++#define __PPE_REGS_H__
++
++/* There are 15 BM ports and 4 BM groups supported by PPE,
++ * BM port (0-7) is matched to EDMA port 0, BM port (8-13) is matched
++ * to PPE physical port 1-6, BM port 14 is matched to EIP.
++ */
++#define PPE_BM_PORT_FC_MODE_ADDR              0x600100
++#define PPE_BM_PORT_FC_MODE_INC                       0x4
++#define PPE_BM_PORT_FC_MODE_EN                        BIT(0)
++
++#define PPE_BM_PORT_GROUP_ID_ADDR             0x600180
++#define PPE_BM_PORT_GROUP_ID_INC              0x4
++#define PPE_BM_PORT_GROUP_ID_SHARED_GROUP_ID  GENMASK(1, 0)
++
++#define PPE_BM_SHARED_GROUP_CFG_ADDR          0x600290
++#define PPE_BM_SHARED_GROUP_CFG_INC           0x4
++#define PPE_BM_SHARED_GROUP_CFG_SHARED_LIMIT  GENMASK(10, 0)
++
++#define PPE_BM_PORT_FC_CFG_ADDR                       0x601000
++#define PPE_BM_PORT_FC_CFG_INC                        0x10
++#define PPE_BM_PORT_FC_W0_REACT_LIMIT         GENMASK(8, 0)
++#define PPE_BM_PORT_FC_W0_RESUME_THRESHOLD    GENMASK(17, 9)
++#define PPE_BM_PORT_FC_W0_RESUME_OFFSET               GENMASK(28, 18)
++#define PPE_BM_PORT_FC_W0_CEILING_LOW         GENMASK(31, 29)
++#define PPE_BM_PORT_FC_W1_CEILING_HIGH                GENMASK(7, 0)
++#define PPE_BM_PORT_FC_W1_WEIGHT              GENMASK(10, 8)
++#define PPE_BM_PORT_FC_W1_DYNAMIC             BIT(11)
++#define PPE_BM_PORT_FC_W1_PRE_ALLOC           GENMASK(22, 12)
++
++#define PPE_BM_PORT_FC_SET_REACT_LIMIT(tbl_cfg, value)        \
++      u32p_replace_bits((u32 *)tbl_cfg, value, PPE_BM_PORT_FC_W0_REACT_LIMIT)
++#define PPE_BM_PORT_FC_SET_RESUME_THRESHOLD(tbl_cfg, value)   \
++      u32p_replace_bits((u32 *)tbl_cfg, value, PPE_BM_PORT_FC_W0_RESUME_THRESHOLD)
++#define PPE_BM_PORT_FC_SET_RESUME_OFFSET(tbl_cfg, value)      \
++      u32p_replace_bits((u32 *)tbl_cfg, value, PPE_BM_PORT_FC_W0_RESUME_OFFSET)
++#define PPE_BM_PORT_FC_SET_CEILING_LOW(tbl_cfg, value)        \
++      u32p_replace_bits((u32 *)tbl_cfg, value, PPE_BM_PORT_FC_W0_CEILING_LOW)
++#define PPE_BM_PORT_FC_SET_CEILING_HIGH(tbl_cfg, value)       \
++      u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_BM_PORT_FC_W1_CEILING_HIGH)
++#define PPE_BM_PORT_FC_SET_WEIGHT(tbl_cfg, value)     \
++      u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_BM_PORT_FC_W1_WEIGHT)
++#define PPE_BM_PORT_FC_SET_DYNAMIC(tbl_cfg, value)    \
++      u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_BM_PORT_FC_W1_DYNAMIC)
++#define PPE_BM_PORT_FC_SET_PRE_ALLOC(tbl_cfg, value)  \
++      u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_BM_PORT_FC_W1_PRE_ALLOC)
++
++#endif
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-20-net-ethernet-qualcomm-Add-PPE-queue-management-confi.patch b/target/linux/qualcommbe/patches-6.6/103-20-net-ethernet-qualcomm-Add-PPE-queue-management-confi.patch
new file mode 100644 (file)
index 0000000..e24fb69
--- /dev/null
@@ -0,0 +1,322 @@
+From 12a50075552d0e2ada65c039e5a09ca50421f152 Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Tue, 26 Dec 2023 19:34:49 +0800
+Subject: [PATCH 20/50] net: ethernet: qualcomm: Add PPE queue management
+ config
+
+QM (queue management) config decides the length of PPE port queues
+and the threshold to drop packet.
+
+There are two types of PPE queue, unicast queue (0-255) and multicast
+queue (256-299) are configured with different length, which are used
+to forward the different types of traffic.
+
+Change-Id: I74ffcb6a39618ca8f585b5204d483fb45edecba8
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ .../net/ethernet/qualcomm/ppe/ppe_config.c    | 176 +++++++++++++++++-
+ drivers/net/ethernet/qualcomm/ppe/ppe_regs.h  |  82 ++++++++
+ 2 files changed, 257 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
+index 0ba4efdfd509..4192fdc8d3a3 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
+@@ -43,6 +43,27 @@ struct ppe_bm_port_config {
+       bool dynamic;
+ };
++/**
++ * struct ppe_qm_queue_config - PPE queue config.
++ * @queue_start: PPE start of queue ID.
++ * @queue_end: PPE end of queue ID.
++ * @prealloc_buf: Queue dedicated buffer number.
++ * @ceil: Ceil to start drop packet from queue.
++ * @weight: Weight value.
++ * @resume_offset: Resume offset from the threshold.
++ * @dynamic: Threshold value is decided dynamically or statically.
++ *
++ */
++struct ppe_qm_queue_config {
++      unsigned int queue_start;
++      unsigned int queue_end;
++      unsigned int prealloc_buf;
++      unsigned int ceil;
++      unsigned int weight;
++      unsigned int resume_offset;
++      bool dynamic;
++};
++
+ static int ipq9574_ppe_bm_group_config = 1550;
+ static struct ppe_bm_port_config ipq9574_ppe_bm_port_config[] = {
+       {
+@@ -91,6 +112,31 @@ static struct ppe_bm_port_config ipq9574_ppe_bm_port_config[] = {
+       },
+ };
++/* Default QM group settings for IPQ9754. */
++static int ipq9574_ppe_qm_group_config = 2000;
++
++/* Default QM settings for unicast and multicast queues for IPQ9754. */
++static struct ppe_qm_queue_config ipq9574_ppe_qm_queue_config[] = {
++      {
++              .queue_start    = 0,
++              .queue_end      = 255,
++              .prealloc_buf   = 0,
++              .ceil           = 400,
++              .weight         = 4,
++              .resume_offset  = 36,
++              .dynamic        = true,
++      },
++      {
++              .queue_start    = 256,
++              .queue_end      = 299,
++              .prealloc_buf   = 0,
++              .ceil           = 250,
++              .weight         = 0,
++              .resume_offset  = 36,
++              .dynamic        = false,
++      },
++};
++
+ static int ppe_config_bm_threshold(struct ppe_device *ppe_dev, int bm_port_id,
+                                  struct ppe_bm_port_config port_cfg)
+ {
+@@ -175,7 +221,135 @@ static int ppe_config_bm(struct ppe_device *ppe_dev)
+       return ret;
+ }
++/* Configure PPE hardware queue depth, which is decided by the threshold
++ * of queue.
++ */
++static int ppe_config_qm(struct ppe_device *ppe_dev)
++{
++      struct ppe_qm_queue_config *queue_cfg;
++      int ret, i, queue_id, queue_cfg_count;
++      u32 reg, multicast_queue_cfg[5];
++      u32 unicast_queue_cfg[4];
++      u32 group_cfg[3];
++
++      /* Assign the buffer number to the group 0 by default. */
++      reg = PPE_AC_GRP_CFG_TBL_ADDR;
++      ret = regmap_bulk_read(ppe_dev->regmap, reg,
++                             group_cfg, ARRAY_SIZE(group_cfg));
++      if (ret)
++              goto qm_config_fail;
++
++      PPE_AC_GRP_SET_BUF_LIMIT(group_cfg, ipq9574_ppe_qm_group_config);
++
++      ret = regmap_bulk_write(ppe_dev->regmap, reg,
++                              group_cfg, ARRAY_SIZE(group_cfg));
++      if (ret)
++              goto qm_config_fail;
++
++      queue_cfg = ipq9574_ppe_qm_queue_config;
++      queue_cfg_count = ARRAY_SIZE(ipq9574_ppe_qm_queue_config);
++      for (i = 0; i < queue_cfg_count; i++) {
++              queue_id = queue_cfg[i].queue_start;
++
++              /* Configure threshold for dropping packet from unicast queue
++               * and multicast queue, which belong to the different queue ID.
++               */
++              while (queue_id <= queue_cfg[i].queue_end) {
++                      if (queue_id < PPE_AC_UNI_QUEUE_CFG_TBL_NUM) {
++                              reg = PPE_AC_UNI_QUEUE_CFG_TBL_ADDR +
++                                    PPE_AC_UNI_QUEUE_CFG_TBL_INC * queue_id;
++
++                              ret = regmap_bulk_read(ppe_dev->regmap, reg,
++                                                     unicast_queue_cfg,
++                                                     ARRAY_SIZE(unicast_queue_cfg));
++                              if (ret)
++                                      goto qm_config_fail;
++
++                              PPE_AC_UNI_QUEUE_SET_EN(unicast_queue_cfg, true);
++                              PPE_AC_UNI_QUEUE_SET_GRP_ID(unicast_queue_cfg, 0);
++                              PPE_AC_UNI_QUEUE_SET_PRE_LIMIT(unicast_queue_cfg,
++                                                             queue_cfg[i].prealloc_buf);
++                              PPE_AC_UNI_QUEUE_SET_DYNAMIC(unicast_queue_cfg,
++                                                           queue_cfg[i].dynamic);
++                              PPE_AC_UNI_QUEUE_SET_WEIGHT(unicast_queue_cfg,
++                                                          queue_cfg[i].weight);
++                              PPE_AC_UNI_QUEUE_SET_THRESHOLD(unicast_queue_cfg,
++                                                             queue_cfg[i].ceil);
++                              PPE_AC_UNI_QUEUE_SET_GRN_RESUME(unicast_queue_cfg,
++                                                              queue_cfg[i].resume_offset);
++
++                              ret = regmap_bulk_write(ppe_dev->regmap, reg,
++                                                      unicast_queue_cfg,
++                                                      ARRAY_SIZE(unicast_queue_cfg));
++                              if (ret)
++                                      goto qm_config_fail;
++                      } else {
++                              reg = PPE_AC_MUL_QUEUE_CFG_TBL_ADDR +
++                                    PPE_AC_MUL_QUEUE_CFG_TBL_INC * queue_id;
++
++                              ret = regmap_bulk_read(ppe_dev->regmap, reg,
++                                                     multicast_queue_cfg,
++                                                     ARRAY_SIZE(multicast_queue_cfg));
++                              if (ret)
++                                      goto qm_config_fail;
++
++                              PPE_AC_MUL_QUEUE_SET_EN(multicast_queue_cfg, true);
++                              PPE_AC_MUL_QUEUE_SET_GRN_GRP_ID(multicast_queue_cfg, 0);
++                              PPE_AC_MUL_QUEUE_SET_GRN_PRE_LIMIT(multicast_queue_cfg,
++                                                                 queue_cfg[i].prealloc_buf);
++                              PPE_AC_MUL_QUEUE_SET_GRN_THRESHOLD(multicast_queue_cfg,
++                                                                 queue_cfg[i].ceil);
++                              PPE_AC_MUL_QUEUE_SET_GRN_RESUME(multicast_queue_cfg,
++                                                              queue_cfg[i].resume_offset);
++
++                              ret = regmap_bulk_write(ppe_dev->regmap, reg,
++                                                      multicast_queue_cfg,
++                                                      ARRAY_SIZE(multicast_queue_cfg));
++                              if (ret)
++                                      goto qm_config_fail;
++                      }
++
++                      /* Enable enqueue */
++                      reg = PPE_ENQ_OPR_TBL_ADDR + PPE_ENQ_OPR_TBL_INC * queue_id;
++                      ret = regmap_update_bits(ppe_dev->regmap, reg,
++                                               PPE_ENQ_OPR_TBL_ENQ_DISABLE,
++                                               FIELD_PREP(PPE_ENQ_OPR_TBL_ENQ_DISABLE, false));
++                      if (ret)
++                              goto qm_config_fail;
++
++                      /* Enable dequeue */
++                      reg = PPE_DEQ_OPR_TBL_ADDR + PPE_DEQ_OPR_TBL_INC * queue_id;
++                      ret = regmap_update_bits(ppe_dev->regmap, reg,
++                                               PPE_DEQ_OPR_TBL_DEQ_DISABLE,
++                                               FIELD_PREP(PPE_ENQ_OPR_TBL_ENQ_DISABLE, false));
++                      if (ret)
++                              goto qm_config_fail;
++
++                      queue_id++;
++              }
++      }
++
++      /* Enable queue counter for all PPE hardware queues. */
++      ret = regmap_update_bits(ppe_dev->regmap, PPE_EG_BRIDGE_CONFIG_ADDR,
++                               PPE_EG_BRIDGE_CONFIG_QUEUE_CNT_EN,
++                               PPE_EG_BRIDGE_CONFIG_QUEUE_CNT_EN);
++      if (ret)
++              goto qm_config_fail;
++
++      return 0;
++
++qm_config_fail:
++      dev_err(ppe_dev->dev, "PPE QM config error %d\n", ret);
++      return ret;
++}
++
+ int ppe_hw_config(struct ppe_device *ppe_dev)
+ {
+-      return ppe_config_bm(ppe_dev);
++      int ret;
++
++      ret = ppe_config_bm(ppe_dev);
++      if (ret)
++              return ret;
++
++      return ppe_config_qm(ppe_dev);
+ }
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+index bf25e0acc0f6..0bc13979e5e2 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+@@ -11,6 +11,14 @@
+  * BM port (0-7) is matched to EDMA port 0, BM port (8-13) is matched
+  * to PPE physical port 1-6, BM port 14 is matched to EIP.
+  */
++#define PPE_EG_BRIDGE_CONFIG_ADDR             0x20044
++#define PPE_EG_BRIDGE_CONFIG_QUEUE_CNT_EN     BIT(2)
++
++#define PPE_DEQ_OPR_TBL_ADDR                  0x430000
++#define PPE_DEQ_OPR_TBL_NUM                   300
++#define PPE_DEQ_OPR_TBL_INC                   0x10
++#define PPE_DEQ_OPR_TBL_DEQ_DISABLE           BIT(0)
++
+ #define PPE_BM_PORT_FC_MODE_ADDR              0x600100
+ #define PPE_BM_PORT_FC_MODE_INC                       0x4
+ #define PPE_BM_PORT_FC_MODE_EN                        BIT(0)
+@@ -51,4 +59,78 @@
+ #define PPE_BM_PORT_FC_SET_PRE_ALLOC(tbl_cfg, value)  \
+       u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_BM_PORT_FC_W1_PRE_ALLOC)
++/* PPE unicast queue (0-255) configurations. */
++#define PPE_AC_UNI_QUEUE_CFG_TBL_ADDR         0x848000
++#define PPE_AC_UNI_QUEUE_CFG_TBL_NUM          256
++#define PPE_AC_UNI_QUEUE_CFG_TBL_INC          0x10
++#define PPE_AC_UNI_QUEUE_CFG_W0_EN            BIT(0)
++#define PPE_AC_UNI_QUEUE_CFG_W0_WRED_EN               BIT(1)
++#define PPE_AC_UNI_QUEUE_CFG_W0_FC_EN         BIT(2)
++#define PPE_AC_UNI_QUEUE_CFG_W0_COLOR_AWARE   BIT(3)
++#define PPE_AC_UNI_QUEUE_CFG_W0_GRP_ID                GENMASK(5, 4)
++#define PPE_AC_UNI_QUEUE_CFG_W0_PRE_LIMIT     GENMASK(16, 6)
++#define PPE_AC_UNI_QUEUE_CFG_W0_DYNAMIC               BIT(17)
++#define PPE_AC_UNI_QUEUE_CFG_W0_WEIGHT                GENMASK(20, 18)
++#define PPE_AC_UNI_QUEUE_CFG_W0_THRESHOLD     GENMASK(31, 21)
++#define PPE_AC_UNI_QUEUE_CFG_W3_GRN_RESUME    GENMASK(23, 13)
++
++#define PPE_AC_UNI_QUEUE_SET_EN(tbl_cfg, value)       \
++      u32p_replace_bits((u32 *)tbl_cfg, value, PPE_AC_UNI_QUEUE_CFG_W0_EN)
++#define PPE_AC_UNI_QUEUE_SET_GRP_ID(tbl_cfg, value)   \
++      u32p_replace_bits((u32 *)tbl_cfg, value, PPE_AC_UNI_QUEUE_CFG_W0_GRP_ID)
++#define PPE_AC_UNI_QUEUE_SET_PRE_LIMIT(tbl_cfg, value)        \
++      u32p_replace_bits((u32 *)tbl_cfg, value, PPE_AC_UNI_QUEUE_CFG_W0_PRE_LIMIT)
++#define PPE_AC_UNI_QUEUE_SET_DYNAMIC(tbl_cfg, value)  \
++      u32p_replace_bits((u32 *)tbl_cfg, value, PPE_AC_UNI_QUEUE_CFG_W0_DYNAMIC)
++#define PPE_AC_UNI_QUEUE_SET_WEIGHT(tbl_cfg, value)   \
++      u32p_replace_bits((u32 *)tbl_cfg, value, PPE_AC_UNI_QUEUE_CFG_W0_WEIGHT)
++#define PPE_AC_UNI_QUEUE_SET_THRESHOLD(tbl_cfg, value)        \
++      u32p_replace_bits((u32 *)tbl_cfg, value, PPE_AC_UNI_QUEUE_CFG_W0_THRESHOLD)
++#define PPE_AC_UNI_QUEUE_SET_GRN_RESUME(tbl_cfg, value)       \
++      u32p_replace_bits((u32 *)(tbl_cfg) + 0x3, value, PPE_AC_UNI_QUEUE_CFG_W3_GRN_RESUME)
++
++/* PPE multicast queue (256-299) configurations. */
++#define PPE_AC_MUL_QUEUE_CFG_TBL_ADDR         0x84a000
++#define PPE_AC_MUL_QUEUE_CFG_TBL_NUM          44
++#define PPE_AC_MUL_QUEUE_CFG_TBL_INC          0x10
++#define PPE_AC_MUL_QUEUE_CFG_W0_EN            BIT(0)
++#define PPE_AC_MUL_QUEUE_CFG_W0_FC_EN         BIT(1)
++#define PPE_AC_MUL_QUEUE_CFG_W0_COLOR_AWARE   BIT(2)
++#define PPE_AC_MUL_QUEUE_CFG_W0_GRP_ID                GENMASK(4, 3)
++#define PPE_AC_MUL_QUEUE_CFG_W0_PRE_LIMIT     GENMASK(15, 5)
++#define PPE_AC_MUL_QUEUE_CFG_W0_THRESHOLD     GENMASK(26, 16)
++#define PPE_AC_MUL_QUEUE_CFG_W2_RESUME                GENMASK(17, 7)
++
++#define PPE_AC_MUL_QUEUE_SET_EN(tbl_cfg, value)       \
++      u32p_replace_bits((u32 *)tbl_cfg, value, PPE_AC_MUL_QUEUE_CFG_W0_EN)
++#define PPE_AC_MUL_QUEUE_SET_GRN_GRP_ID(tbl_cfg, value)       \
++      u32p_replace_bits((u32 *)tbl_cfg, value, PPE_AC_MUL_QUEUE_CFG_W0_GRP_ID)
++#define PPE_AC_MUL_QUEUE_SET_GRN_PRE_LIMIT(tbl_cfg, value)    \
++      u32p_replace_bits((u32 *)tbl_cfg, value, PPE_AC_MUL_QUEUE_CFG_W0_PRE_LIMIT)
++#define PPE_AC_MUL_QUEUE_SET_GRN_THRESHOLD(tbl_cfg, value)    \
++      u32p_replace_bits((u32 *)tbl_cfg, value, PPE_AC_MUL_QUEUE_CFG_W0_THRESHOLD)
++#define PPE_AC_MUL_QUEUE_SET_GRN_RESUME(tbl_cfg, value)       \
++      u32p_replace_bits((u32 *)(tbl_cfg) + 0x2, value, PPE_AC_MUL_QUEUE_CFG_W2_RESUME)
++
++/* PPE admission control group (0-3) configurations */
++#define PPE_AC_GRP_CFG_TBL_ADDR                       0x84c000
++#define PPE_AC_GRP_CFG_TBL_NUM                        0x4
++#define PPE_AC_GRP_CFG_TBL_INC                        0x10
++#define PPE_AC_GRP_W0_AC_EN                   BIT(0)
++#define PPE_AC_GRP_W0_AC_FC_EN                        BIT(1)
++#define PPE_AC_GRP_W0_COLOR_AWARE             BIT(2)
++#define PPE_AC_GRP_W0_THRESHOLD_LOW           GENMASK(31, 25)
++#define PPE_AC_GRP_W1_THRESHOLD_HIGH          GENMASK(3, 0)
++#define PPE_AC_GRP_W1_BUF_LIMIT                       GENMASK(14, 4)
++#define PPE_AC_GRP_W2_RESUME_GRN              GENMASK(15, 5)
++#define PPE_AC_GRP_W2_PRE_ALLOC                       GENMASK(26, 16)
++
++#define PPE_AC_GRP_SET_BUF_LIMIT(tbl_cfg, value)      \
++      u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_AC_GRP_W1_BUF_LIMIT)
++
++#define PPE_ENQ_OPR_TBL_ADDR                  0x85c000
++#define PPE_ENQ_OPR_TBL_NUM                   300
++#define PPE_ENQ_OPR_TBL_INC                   0x10
++#define PPE_ENQ_OPR_TBL_ENQ_DISABLE           BIT(0)
++
+ #endif
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-21-net-ethernet-qualcomm-Add-PPE-scheduler-config.patch b/target/linux/qualcommbe/patches-6.6/103-21-net-ethernet-qualcomm-Add-PPE-scheduler-config.patch
new file mode 100644 (file)
index 0000000..01716e3
--- /dev/null
@@ -0,0 +1,1185 @@
+From 8ae6ba538521693c4df0675a2f6a45f92daedb80 Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Tue, 26 Dec 2023 20:18:09 +0800
+Subject: [PATCH 21/50] net: ethernet: qualcomm: Add PPE scheduler config
+
+PPE scheduler config determines the priority of scheduling the
+packet. The scheduler config is used for supporting the QoS
+offload in PPE hardware.
+
+Change-Id: I4811bd133074757371775a6a69a1cc3cfaa8d0d0
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ drivers/net/ethernet/qualcomm/ppe/Makefile    |   2 +-
+ drivers/net/ethernet/qualcomm/ppe/ppe_api.c   |  40 +
+ drivers/net/ethernet/qualcomm/ppe/ppe_api.h   |  21 +
+ .../net/ethernet/qualcomm/ppe/ppe_config.c    | 884 +++++++++++++++++-
+ .../net/ethernet/qualcomm/ppe/ppe_config.h    |  26 +
+ drivers/net/ethernet/qualcomm/ppe/ppe_regs.h  |  94 ++
+ 6 files changed, 1064 insertions(+), 3 deletions(-)
+ create mode 100644 drivers/net/ethernet/qualcomm/ppe/ppe_api.c
+ create mode 100644 drivers/net/ethernet/qualcomm/ppe/ppe_api.h
+
+diff --git a/drivers/net/ethernet/qualcomm/ppe/Makefile b/drivers/net/ethernet/qualcomm/ppe/Makefile
+index 410a7bb54cfe..e4e5c94fde3e 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/Makefile
++++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
+@@ -4,4 +4,4 @@
+ #
+ obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
+-qcom-ppe-objs := ppe.o ppe_config.o
++qcom-ppe-objs := ppe.o ppe_config.o ppe_api.o
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_api.c b/drivers/net/ethernet/qualcomm/ppe/ppe_api.c
+new file mode 100644
+index 000000000000..ba35fc151e2c
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_api.c
+@@ -0,0 +1,40 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++#include "ppe.h"
++#include "ppe_api.h"
++#include "ppe_config.h"
++
++/**
++ * ppe_queue_priority_set - set scheduler priority of PPE hardware queue
++ * @ppe_dev: PPE device
++ * @node_id: PPE hardware node ID, which is either queue ID or flow ID
++ * @priority: Qos scheduler priority
++ *
++ * Configure scheduler priority of PPE hardware queque, the maximum node
++ * ID supported is PPE_QUEUE_ID_NUM added by PPE_FLOW_ID_NUM, queue ID
++ * belongs to level 0, flow ID belongs to level 1 in the packet pipeline.
++ *
++ * Return 0 on success, negative error code on failure.
++ */
++int ppe_queue_priority_set(struct ppe_device *ppe_dev,
++                         int node_id, int priority)
++{
++      struct ppe_qos_scheduler_cfg sch_cfg;
++      int ret, port, level = 0;
++
++      if (node_id >= PPE_QUEUE_ID_NUM) {
++              level = 1;
++              node_id -= PPE_QUEUE_ID_NUM;
++      }
++
++      ret = ppe_queue_scheduler_get(ppe_dev, node_id, level, &port, &sch_cfg);
++      if (ret)
++              return ret;
++
++      sch_cfg.pri = priority;
++
++      return ppe_queue_scheduler_set(ppe_dev, node_id, level, port, sch_cfg);
++}
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_api.h b/drivers/net/ethernet/qualcomm/ppe/ppe_api.h
+new file mode 100644
+index 000000000000..ee5b47d06991
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_api.h
+@@ -0,0 +1,21 @@
++/* SPDX-License-Identifier: GPL-2.0-only
++ *
++ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++/* These may also be used by higher level network drivers such as ethernet or
++ * QoS drivers.
++ */
++
++#ifndef __PPE_API_H__
++#define __PPE_API_H__
++
++#include "ppe.h"
++
++#define PPE_QUEUE_ID_NUM                      300
++#define PPE_FLOW_ID_NUM                               64
++#define PPE_QUEUE_SCH_PRI_NUM                 8
++
++int ppe_queue_priority_set(struct ppe_device *ppe_dev,
++                         int queue_id, int priority);
++#endif
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
+index 4192fdc8d3a3..bdef26da6fd3 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
+@@ -13,6 +13,7 @@
+ #include <linux/regmap.h>
+ #include "ppe.h"
++#include "ppe_api.h"
+ #include "ppe_config.h"
+ #include "ppe_regs.h"
+@@ -52,7 +53,6 @@ struct ppe_bm_port_config {
+  * @weight: Weight value.
+  * @resume_offset: Resume offset from the threshold.
+  * @dynamic: Threshold value is decided dynamically or statically.
+- *
+  */
+ struct ppe_qm_queue_config {
+       unsigned int queue_start;
+@@ -64,6 +64,61 @@ struct ppe_qm_queue_config {
+       bool dynamic;
+ };
++/**
++ * struct ppe_sch_bm_config - PPE arbitration for buffer config.
++ * @valid: Arbitration entry valid or not.
++ * @is_egress: Arbitration entry for egress or not.
++ * @port: Port ID to use arbitration entry.
++ * @second_valid: Second port valid or not.
++ * @second_port: Second port to use.
++ */
++struct ppe_sch_bm_config {
++      bool valid;
++      bool is_egress;
++      unsigned int port;
++      bool second_valid;
++      unsigned int second_port;
++};
++
++/**
++ * struct ppe_sch_schedule_config - PPE arbitration for scheduler config.
++ * @ensch_port_bmp: Port bit map for enqueue scheduler.
++ * @ensch_port: Port ID to enqueue scheduler.
++ * @desch_port: Port ID to dequeue scheduler.
++ * @desch_second_valid: Dequeue for the second port valid or not.
++ * @desch_second_port: Second port ID to dequeue scheduler.
++ */
++struct ppe_sch_schedule_config {
++      unsigned int ensch_port_bmp;
++      unsigned int ensch_port;
++      unsigned int desch_port;
++      bool desch_second_valid;
++      unsigned int desch_second_port;
++};
++
++/**
++ * struct ppe_port_schedule_config - PPE port scheduler config.
++ * @port: Port ID to be scheduled.
++ * @flow_level: Scheduler flow level or not.
++ * @node_id: Node ID, for level 0, queue ID is used.
++ * @loop_num: Loop number of scheduler config.
++ * @pri_max: Max priority configured.
++ * @flow_id: Strict priority ID.
++ * @drr_node_id: Node ID for scheduler.
++ *
++ * PPE scheduler config, which decides the packet scheduler priority
++ * from egress port.
++ */
++struct ppe_port_schedule_config {
++      unsigned int port;
++      bool flow_level;
++      unsigned int node_id;
++      unsigned int loop_num;
++      unsigned int pri_max;
++      unsigned int flow_id;
++      unsigned int drr_node_id;
++};
++
+ static int ipq9574_ppe_bm_group_config = 1550;
+ static struct ppe_bm_port_config ipq9574_ppe_bm_port_config[] = {
+       {
+@@ -137,6 +192,707 @@ static struct ppe_qm_queue_config ipq9574_ppe_qm_queue_config[] = {
+       },
+ };
++static struct ppe_sch_bm_config ipq9574_ppe_sch_bm_config[] = {
++      {1, 0, 0, 0, 0},
++      {1, 1, 0, 0, 0},
++      {1, 0, 5, 0, 0},
++      {1, 1, 5, 0, 0},
++      {1, 0, 6, 0, 0},
++      {1, 1, 6, 0, 0},
++      {1, 0, 1, 0, 0},
++      {1, 1, 1, 0, 0},
++      {1, 0, 0, 0, 0},
++      {1, 1, 0, 0, 0},
++      {1, 0, 5, 0, 0},
++      {1, 1, 5, 0, 0},
++      {1, 0, 6, 0, 0},
++      {1, 1, 6, 0, 0},
++      {1, 0, 7, 0, 0},
++      {1, 1, 7, 0, 0},
++      {1, 0, 0, 0, 0},
++      {1, 1, 0, 0, 0},
++      {1, 0, 1, 0, 0},
++      {1, 1, 1, 0, 0},
++      {1, 0, 5, 0, 0},
++      {1, 1, 5, 0, 0},
++      {1, 0, 6, 0, 0},
++      {1, 1, 6, 0, 0},
++      {1, 0, 2, 0, 0},
++      {1, 1, 2, 0, 0},
++      {1, 0, 0, 0, 0},
++      {1, 1, 0, 0, 0},
++      {1, 0, 5, 0, 0},
++      {1, 1, 5, 0, 0},
++      {1, 0, 6, 0, 0},
++      {1, 1, 6, 0, 0},
++      {1, 0, 1, 0, 0},
++      {1, 1, 1, 0, 0},
++      {1, 0, 3, 0, 0},
++      {1, 1, 3, 0, 0},
++      {1, 0, 0, 0, 0},
++      {1, 1, 0, 0, 0},
++      {1, 0, 5, 0, 0},
++      {1, 1, 5, 0, 0},
++      {1, 0, 6, 0, 0},
++      {1, 1, 6, 0, 0},
++      {1, 0, 7, 0, 0},
++      {1, 1, 7, 0, 0},
++      {1, 0, 0, 0, 0},
++      {1, 1, 0, 0, 0},
++      {1, 0, 1, 0, 0},
++      {1, 1, 1, 0, 0},
++      {1, 0, 5, 0, 0},
++      {1, 1, 5, 0, 0},
++      {1, 0, 6, 0, 0},
++      {1, 1, 6, 0, 0},
++      {1, 0, 4, 0, 0},
++      {1, 1, 4, 0, 0},
++      {1, 0, 0, 0, 0},
++      {1, 1, 0, 0, 0},
++      {1, 0, 5, 0, 0},
++      {1, 1, 5, 0, 0},
++      {1, 0, 6, 0, 0},
++      {1, 1, 6, 0, 0},
++      {1, 0, 1, 0, 0},
++      {1, 1, 1, 0, 0},
++      {1, 0, 0, 0, 0},
++      {1, 1, 0, 0, 0},
++      {1, 0, 5, 0, 0},
++      {1, 1, 5, 0, 0},
++      {1, 0, 6, 0, 0},
++      {1, 1, 6, 0, 0},
++      {1, 0, 2, 0, 0},
++      {1, 1, 2, 0, 0},
++      {1, 0, 0, 0, 0},
++      {1, 1, 0, 0, 0},
++      {1, 0, 7, 0, 0},
++      {1, 1, 7, 0, 0},
++      {1, 0, 5, 0, 0},
++      {1, 1, 5, 0, 0},
++      {1, 0, 6, 0, 0},
++      {1, 1, 6, 0, 0},
++      {1, 0, 1, 0, 0},
++      {1, 1, 1, 0, 0},
++      {1, 0, 0, 0, 0},
++      {1, 1, 0, 0, 0},
++      {1, 0, 5, 0, 0},
++      {1, 1, 5, 0, 0},
++      {1, 0, 6, 0, 0},
++      {1, 1, 6, 0, 0},
++      {1, 0, 3, 0, 0},
++      {1, 1, 3, 0, 0},
++      {1, 0, 1, 0, 0},
++      {1, 1, 1, 0, 0},
++      {1, 0, 0, 0, 0},
++      {1, 1, 0, 0, 0},
++      {1, 0, 5, 0, 0},
++      {1, 1, 5, 0, 0},
++      {1, 0, 6, 0, 0},
++      {1, 1, 6, 0, 0},
++      {1, 0, 4, 0, 0},
++      {1, 1, 4, 0, 0},
++      {1, 0, 7, 0, 0},
++      {1, 1, 7, 0, 0},
++};
++
++static struct ppe_sch_schedule_config ipq9574_ppe_sch_schedule_config[] = {
++      {0x98, 6, 0, 1, 1},
++      {0x94, 5, 6, 1, 3},
++      {0x86, 0, 5, 1, 4},
++      {0x8C, 1, 6, 1, 0},
++      {0x1C, 7, 5, 1, 1},
++      {0x98, 2, 6, 1, 0},
++      {0x1C, 5, 7, 1, 1},
++      {0x34, 3, 6, 1, 0},
++      {0x8C, 4, 5, 1, 1},
++      {0x98, 2, 6, 1, 0},
++      {0x8C, 5, 4, 1, 1},
++      {0xA8, 0, 6, 1, 2},
++      {0x98, 5, 1, 1, 0},
++      {0x98, 6, 5, 1, 2},
++      {0x89, 1, 6, 1, 4},
++      {0xA4, 3, 0, 1, 1},
++      {0x8C, 5, 6, 1, 4},
++      {0xA8, 0, 2, 1, 1},
++      {0x98, 6, 5, 1, 0},
++      {0xC4, 4, 3, 1, 1},
++      {0x94, 6, 5, 1, 0},
++      {0x1C, 7, 6, 1, 1},
++      {0x98, 2, 5, 1, 0},
++      {0x1C, 6, 7, 1, 1},
++      {0x1C, 5, 6, 1, 0},
++      {0x94, 3, 5, 1, 1},
++      {0x8C, 4, 6, 1, 0},
++      {0x94, 1, 5, 1, 3},
++      {0x94, 6, 1, 1, 0},
++      {0xD0, 3, 5, 1, 2},
++      {0x98, 6, 0, 1, 1},
++      {0x94, 5, 6, 1, 3},
++      {0x94, 1, 5, 1, 0},
++      {0x98, 2, 6, 1, 1},
++      {0x8C, 4, 5, 1, 0},
++      {0x1C, 7, 6, 1, 1},
++      {0x8C, 0, 5, 1, 4},
++      {0x89, 1, 6, 1, 2},
++      {0x98, 5, 0, 1, 1},
++      {0x94, 6, 5, 1, 3},
++      {0x92, 0, 6, 1, 2},
++      {0x98, 1, 5, 1, 0},
++      {0x98, 6, 2, 1, 1},
++      {0xD0, 0, 5, 1, 3},
++      {0x94, 6, 0, 1, 1},
++      {0x8C, 5, 6, 1, 4},
++      {0x8C, 1, 5, 1, 0},
++      {0x1C, 6, 7, 1, 1},
++      {0x1C, 5, 6, 1, 0},
++      {0xB0, 2, 3, 1, 1},
++      {0xC4, 4, 5, 1, 0},
++      {0x8C, 6, 4, 1, 1},
++      {0xA4, 3, 6, 1, 0},
++      {0x1C, 5, 7, 1, 1},
++      {0x4C, 0, 5, 1, 4},
++      {0x8C, 6, 0, 1, 1},
++      {0x34, 7, 6, 1, 3},
++      {0x94, 5, 0, 1, 1},
++      {0x98, 6, 5, 1, 2},
++};
++
++static struct ppe_port_schedule_config ppe_qos_schedule_config[] = {
++      {
++              .port           = 0,
++              .flow_level     = true,
++              .node_id        = 0,
++              .loop_num       = 1,
++              .pri_max        = 1,
++              .flow_id        = 0,
++              .drr_node_id    = 0,
++      },
++      {
++              .port           = 0,
++              .flow_level     = false,
++              .node_id        = 0,
++              .loop_num       = 8,
++              .pri_max        = 8,
++              .flow_id        = 0,
++              .drr_node_id    = 0,
++      },
++      {
++              .port           = 0,
++              .flow_level     = false,
++              .node_id        = 8,
++              .loop_num       = 8,
++              .pri_max        = 8,
++              .flow_id        = 0,
++              .drr_node_id    = 0,
++      },
++      {
++              .port           = 0,
++              .flow_level     = false,
++              .node_id        = 16,
++              .loop_num       = 8,
++              .pri_max        = 8,
++              .flow_id        = 0,
++              .drr_node_id    = 0,
++      },
++      {
++              .port           = 0,
++              .flow_level     = false,
++              .node_id        = 24,
++              .loop_num       = 8,
++              .pri_max        = 8,
++              .flow_id        = 0,
++              .drr_node_id    = 0,
++      },
++      {
++              .port           = 0,
++              .flow_level     = false,
++              .node_id        = 32,
++              .loop_num       = 8,
++              .pri_max        = 8,
++              .flow_id        = 0,
++              .drr_node_id    = 0,
++      },
++      {
++              .port           = 0,
++              .flow_level     = false,
++              .node_id        = 40,
++              .loop_num       = 8,
++              .pri_max        = 8,
++              .flow_id        = 0,
++              .drr_node_id    = 0,
++      },
++      {
++              .port           = 0,
++              .flow_level     = false,
++              .node_id        = 48,
++              .loop_num       = 8,
++              .pri_max        = 8,
++              .flow_id        = 0,
++              .drr_node_id    = 0,
++      },
++      {
++              .port           = 0,
++              .flow_level     = false,
++              .node_id        = 56,
++              .loop_num       = 8,
++              .pri_max        = 8,
++              .flow_id        = 0,
++              .drr_node_id    = 0,
++      },
++      {
++              .port           = 0,
++              .flow_level     = false,
++              .node_id        = 256,
++              .loop_num       = 8,
++              .pri_max        = 8,
++              .flow_id        = 0,
++              .drr_node_id    = 0,
++      },
++      {
++              .port           = 0,
++              .flow_level     = false,
++              .node_id        = 264,
++              .loop_num       = 8,
++              .pri_max        = 8,
++              .flow_id        = 0,
++              .drr_node_id    = 0,
++      },
++      {
++              .port           = 1,
++              .flow_level     = true,
++              .node_id        = 36,
++              .loop_num       = 2,
++              .pri_max        = 0,
++              .flow_id        = 1,
++              .drr_node_id    = 8,
++      },
++      {
++              .port           = 1,
++              .flow_level     = false,
++              .node_id        = 144,
++              .loop_num       = 16,
++              .pri_max        = 8,
++              .flow_id        = 36,
++              .drr_node_id    = 48,
++      },
++      {
++              .port           = 1,
++              .flow_level     = false,
++              .node_id        = 272,
++              .loop_num       = 4,
++              .pri_max        = 4,
++              .flow_id        = 36,
++              .drr_node_id    = 48,
++      },
++      {
++              .port           = 2,
++              .flow_level     = true,
++              .node_id        = 40,
++              .loop_num       = 2,
++              .pri_max        = 0,
++              .flow_id        = 2,
++              .drr_node_id    = 12,
++      },
++      {
++              .port           = 2,
++              .flow_level     = false,
++              .node_id        = 160,
++              .loop_num       = 16,
++              .pri_max        = 8,
++              .flow_id        = 40,
++              .drr_node_id    = 64,
++      },
++      {
++              .port           = 2,
++              .flow_level     = false,
++              .node_id        = 276,
++              .loop_num       = 4,
++              .pri_max        = 4,
++              .flow_id        = 40,
++              .drr_node_id    = 64,
++      },
++      {
++              .port           = 3,
++              .flow_level     = true,
++              .node_id        = 44,
++              .loop_num       = 2,
++              .pri_max        = 0,
++              .flow_id        = 3,
++              .drr_node_id    = 16,
++      },
++      {
++              .port           = 3,
++              .flow_level     = false,
++              .node_id        = 176,
++              .loop_num       = 16,
++              .pri_max        = 8,
++              .flow_id        = 44,
++              .drr_node_id    = 80,
++      },
++      {
++              .port           = 3,
++              .flow_level     = false,
++              .node_id        = 280,
++              .loop_num       = 4,
++              .pri_max        = 4,
++              .flow_id        = 44,
++              .drr_node_id    = 80,
++      },
++      {
++              .port           = 4,
++              .flow_level     = true,
++              .node_id        = 48,
++              .loop_num       = 2,
++              .pri_max        = 0,
++              .flow_id        = 4,
++              .drr_node_id    = 20,
++      },
++      {
++              .port           = 4,
++              .flow_level     = false,
++              .node_id        = 192,
++              .loop_num       = 16,
++              .pri_max        = 8,
++              .flow_id        = 48,
++              .drr_node_id    = 96,
++      },
++      {
++              .port           = 4,
++              .flow_level     = false,
++              .node_id        = 284,
++              .loop_num       = 4,
++              .pri_max        = 4,
++              .flow_id        = 48,
++              .drr_node_id    = 96,
++      },
++      {
++              .port           = 5,
++              .flow_level     = true,
++              .node_id        = 52,
++              .loop_num       = 2,
++              .pri_max        = 0,
++              .flow_id        = 5,
++              .drr_node_id    = 24,
++      },
++      {
++              .port           = 5,
++              .flow_level     = false,
++              .node_id        = 208,
++              .loop_num       = 16,
++              .pri_max        = 8,
++              .flow_id        = 52,
++              .drr_node_id    = 112,
++      },
++      {
++              .port           = 5,
++              .flow_level     = false,
++              .node_id        = 288,
++              .loop_num       = 4,
++              .pri_max        = 4,
++              .flow_id        = 52,
++              .drr_node_id    = 112,
++      },
++      {
++              .port           = 6,
++              .flow_level     = true,
++              .node_id        = 56,
++              .loop_num       = 2,
++              .pri_max        = 0,
++              .flow_id        = 6,
++              .drr_node_id    = 28,
++      },
++      {
++              .port           = 6,
++              .flow_level     = false,
++              .node_id        = 224,
++              .loop_num       = 16,
++              .pri_max        = 8,
++              .flow_id        = 56,
++              .drr_node_id    = 128,
++      },
++      {
++              .port           = 6,
++              .flow_level     = false,
++              .node_id        = 292,
++              .loop_num       = 4,
++              .pri_max        = 4,
++              .flow_id        = 56,
++              .drr_node_id    = 128,
++      },
++      {
++              .port           = 7,
++              .flow_level     = true,
++              .node_id        = 60,
++              .loop_num       = 2,
++              .pri_max        = 0,
++              .flow_id        = 7,
++              .drr_node_id    = 32,
++      },
++      {
++              .port           = 7,
++              .flow_level     = false,
++              .node_id        = 240,
++              .loop_num       = 16,
++              .pri_max        = 8,
++              .flow_id        = 60,
++              .drr_node_id    = 144,
++      },
++      {
++              .port           = 7,
++              .flow_level     = false,
++              .node_id        = 296,
++              .loop_num       = 4,
++              .pri_max        = 4,
++              .flow_id        = 60,
++              .drr_node_id    = 144,
++      },
++};
++
++/* Set the first level scheduler configuration. */
++static int ppe_scheduler_l0_queue_map_set(struct ppe_device *ppe_dev,
++                                        int node_id, int port,
++                                        struct ppe_qos_scheduler_cfg scheduler_cfg)
++{
++      u32 val, reg;
++      int ret;
++
++      reg = PPE_L0_FLOW_MAP_TBL_ADDR + node_id * PPE_L0_FLOW_MAP_TBL_INC;
++      val = FIELD_PREP(PPE_L0_FLOW_MAP_TBL_FLOW_ID, scheduler_cfg.flow_id);
++      val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_C_PRI, scheduler_cfg.pri);
++      val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_E_PRI, scheduler_cfg.pri);
++      val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_C_NODE_WT, scheduler_cfg.drr_node_wt);
++      val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_E_NODE_WT, scheduler_cfg.drr_node_wt);
++
++      ret = regmap_write(ppe_dev->regmap, reg, val);
++      if (ret)
++              return ret;
++
++      reg = PPE_L0_C_FLOW_CFG_TBL_ADDR +
++            (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) *
++            PPE_L0_C_FLOW_CFG_TBL_INC;
++      val = FIELD_PREP(PPE_L0_C_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id);
++      val |= FIELD_PREP(PPE_L0_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.node_unit);
++
++      ret = regmap_write(ppe_dev->regmap, reg, val);
++      if (ret)
++              return ret;
++
++      reg = PPE_L0_E_FLOW_CFG_TBL_ADDR +
++            (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) *
++            PPE_L0_E_FLOW_CFG_TBL_INC;
++      val = FIELD_PREP(PPE_L0_E_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id);
++      val |= FIELD_PREP(PPE_L0_E_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.node_unit);
++
++      ret = regmap_write(ppe_dev->regmap, reg, val);
++      if (ret)
++              return ret;
++
++      reg = PPE_L0_FLOW_PORT_MAP_TBL_ADDR + node_id * PPE_L0_FLOW_PORT_MAP_TBL_INC;
++      val = FIELD_PREP(PPE_L0_FLOW_PORT_MAP_TBL_PORT_NUM, port);
++
++      ret = regmap_write(ppe_dev->regmap, reg, val);
++      if (ret)
++              return ret;
++
++      reg = PPE_L0_COMP_CFG_TBL_ADDR + node_id * PPE_L0_COMP_CFG_TBL_INC;
++      val = FIELD_PREP(PPE_L0_COMP_CFG_TBL_NODE_METER_LEN, scheduler_cfg.node_frame_mode);
++
++      return regmap_update_bits(ppe_dev->regmap, reg,
++                                PPE_L0_COMP_CFG_TBL_NODE_METER_LEN,
++                                val);
++}
++
++/* Get the first level scheduler configuration. */
++static int ppe_scheduler_l0_queue_map_get(struct ppe_device *ppe_dev,
++                                        int node_id, int *port,
++                                        struct ppe_qos_scheduler_cfg *scheduler_cfg)
++{
++      u32 val, reg;
++      int ret;
++
++      reg = PPE_L0_FLOW_MAP_TBL_ADDR + node_id * PPE_L0_FLOW_MAP_TBL_INC;
++      ret = regmap_read(ppe_dev->regmap, reg, &val);
++      if (ret)
++              return ret;
++
++      scheduler_cfg->flow_id = FIELD_GET(PPE_L0_FLOW_MAP_TBL_FLOW_ID, val);
++      scheduler_cfg->pri = FIELD_GET(PPE_L0_FLOW_MAP_TBL_C_PRI, val);
++      scheduler_cfg->drr_node_wt = FIELD_GET(PPE_L0_FLOW_MAP_TBL_C_NODE_WT, val);
++
++      reg = PPE_L0_C_FLOW_CFG_TBL_ADDR +
++            (scheduler_cfg->flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg->pri) *
++            PPE_L0_C_FLOW_CFG_TBL_INC;
++
++      ret = regmap_read(ppe_dev->regmap, reg, &val);
++      if (ret)
++              return ret;
++
++      scheduler_cfg->drr_node_id = FIELD_GET(PPE_L0_C_FLOW_CFG_TBL_NODE_ID, val);
++      scheduler_cfg->node_unit = FIELD_GET(PPE_L0_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT, val);
++
++      reg = PPE_L0_FLOW_PORT_MAP_TBL_ADDR + node_id * PPE_L0_FLOW_PORT_MAP_TBL_INC;
++      ret = regmap_read(ppe_dev->regmap, reg, &val);
++      if (ret)
++              return ret;
++
++      *port = FIELD_GET(PPE_L0_FLOW_PORT_MAP_TBL_PORT_NUM, val);
++
++      reg = PPE_L0_COMP_CFG_TBL_ADDR + node_id * PPE_L0_COMP_CFG_TBL_INC;
++      ret = regmap_read(ppe_dev->regmap, reg, &val);
++      if (ret)
++              return ret;
++
++      scheduler_cfg->node_frame_mode = FIELD_GET(PPE_L0_COMP_CFG_TBL_NODE_METER_LEN, val);
++
++      return 0;
++}
++
++/* Set the second level scheduler configuration. */
++static int ppe_scheduler_l1_queue_map_set(struct ppe_device *ppe_dev,
++                                        int node_id, int port,
++                                        struct ppe_qos_scheduler_cfg scheduler_cfg)
++{
++      u32 val, reg;
++      int ret;
++
++      val = FIELD_PREP(PPE_L1_FLOW_MAP_TBL_FLOW_ID, scheduler_cfg.flow_id);
++      val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_C_PRI, scheduler_cfg.pri);
++      val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_E_PRI, scheduler_cfg.pri);
++      val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_C_NODE_WT, scheduler_cfg.drr_node_wt);
++      val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_E_NODE_WT, scheduler_cfg.drr_node_wt);
++      reg = PPE_L1_FLOW_MAP_TBL_ADDR + node_id * PPE_L1_FLOW_MAP_TBL_INC;
++
++      ret = regmap_write(ppe_dev->regmap, reg, val);
++      if (ret)
++              return ret;
++
++      val = FIELD_PREP(PPE_L1_C_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id);
++      val |= FIELD_PREP(PPE_L1_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.node_unit);
++      reg = PPE_L1_C_FLOW_CFG_TBL_ADDR +
++            (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) *
++            PPE_L1_C_FLOW_CFG_TBL_INC;
++
++      ret = regmap_write(ppe_dev->regmap, reg, val);
++      if (ret)
++              return ret;
++
++      val = FIELD_PREP(PPE_L1_E_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id);
++      val |= FIELD_PREP(PPE_L1_E_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.node_unit);
++      reg = PPE_L1_E_FLOW_CFG_TBL_ADDR +
++              (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) *
++              PPE_L1_E_FLOW_CFG_TBL_INC;
++
++      ret = regmap_write(ppe_dev->regmap, reg, val);
++      if (ret)
++              return ret;
++
++      val = FIELD_PREP(PPE_L1_FLOW_PORT_MAP_TBL_PORT_NUM, port);
++      reg = PPE_L1_FLOW_PORT_MAP_TBL_ADDR + node_id * PPE_L1_FLOW_PORT_MAP_TBL_INC;
++
++      ret = regmap_write(ppe_dev->regmap, reg, val);
++      if (ret)
++              return ret;
++
++      reg = PPE_L1_COMP_CFG_TBL_ADDR + node_id * PPE_L1_COMP_CFG_TBL_INC;
++      val = FIELD_PREP(PPE_L1_COMP_CFG_TBL_NODE_METER_LEN, scheduler_cfg.node_frame_mode);
++
++      return regmap_update_bits(ppe_dev->regmap, reg, PPE_L1_COMP_CFG_TBL_NODE_METER_LEN, val);
++}
++
++/* Get the second level scheduler configuration. */
++static int ppe_scheduler_l1_queue_map_get(struct ppe_device *ppe_dev,
++                                        int node_id, int *port,
++                                        struct ppe_qos_scheduler_cfg *scheduler_cfg)
++{
++      u32 val, reg;
++      int ret;
++
++      reg = PPE_L1_FLOW_MAP_TBL_ADDR + node_id * PPE_L1_FLOW_MAP_TBL_INC;
++      ret = regmap_read(ppe_dev->regmap, reg, &val);
++      if (ret)
++              return ret;
++
++      scheduler_cfg->flow_id = FIELD_GET(PPE_L1_FLOW_MAP_TBL_FLOW_ID, val);
++      scheduler_cfg->pri = FIELD_GET(PPE_L1_FLOW_MAP_TBL_C_PRI, val);
++      scheduler_cfg->drr_node_wt = FIELD_GET(PPE_L1_FLOW_MAP_TBL_C_NODE_WT, val);
++
++      reg = PPE_L1_C_FLOW_CFG_TBL_ADDR +
++            (scheduler_cfg->flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg->pri) *
++            PPE_L1_C_FLOW_CFG_TBL_INC;
++      ret = regmap_read(ppe_dev->regmap, reg, &val);
++      if (ret)
++              return ret;
++
++      scheduler_cfg->drr_node_id = FIELD_GET(PPE_L1_C_FLOW_CFG_TBL_NODE_ID, val);
++      scheduler_cfg->node_unit = FIELD_GET(PPE_L1_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT, val);
++
++      reg = PPE_L1_FLOW_PORT_MAP_TBL_ADDR + node_id * PPE_L1_FLOW_PORT_MAP_TBL_INC;
++      ret = regmap_read(ppe_dev->regmap, reg, &val);
++      if (ret)
++              return ret;
++
++      *port = FIELD_GET(PPE_L1_FLOW_PORT_MAP_TBL_PORT_NUM, val);
++
++      reg = PPE_L1_COMP_CFG_TBL_ADDR + node_id * PPE_L1_COMP_CFG_TBL_INC;
++      ret = regmap_read(ppe_dev->regmap, reg, &val);
++      if (ret)
++              return ret;
++
++      scheduler_cfg->node_frame_mode = FIELD_GET(PPE_L1_COMP_CFG_TBL_NODE_METER_LEN, val);
++
++      return 0;
++}
++
++/**
++ * ppe_queue_scheduler_set - set QoS scheduler of PPE hardware queue
++ * @ppe_dev: PPE device
++ * @node_id: PPE node ID
++ * @flow_level: Flow level scheduler or queue level scheduler
++ * @port: PPE port ID set scheduler configuration
++ * @scheduler_cfg: QoS scheduler configuration
++ *
++ * The hardware QoS function is supported by PPE, which is based on
++ * PPE hardware queue scheduler of PPE port.
++ *
++ * Return 0 on success, negative error code on failure.
++ */
++int ppe_queue_scheduler_set(struct ppe_device *ppe_dev,
++                          int node_id, bool flow_level, int port,
++                          struct ppe_qos_scheduler_cfg scheduler_cfg)
++{
++      if (flow_level)
++              return ppe_scheduler_l1_queue_map_set(ppe_dev, node_id,
++                                                    port, scheduler_cfg);
++
++      return ppe_scheduler_l0_queue_map_set(ppe_dev, node_id,
++                                            port, scheduler_cfg);
++}
++
++/**
++ * ppe_queue_scheduler_get - get QoS scheduler of PPE hardware queue
++ * @ppe_dev: PPE device
++ * @node_id: PPE node ID
++ * @flow_level: Flow level scheduler or queue level scheduler
++ * @port: PPE port ID to get scheduler config
++ * @scheduler_cfg: QoS scheduler configuration
++ *
++ * The hardware QoS function is supported by PPE, the current scheduler
++ * configuration can be acquired based on the queue ID of PPE port.
++ *
++ * Return 0 on success, negative error code on failure.
++ */
++int ppe_queue_scheduler_get(struct ppe_device *ppe_dev,
++                          int node_id, bool flow_level, int *port,
++                          struct ppe_qos_scheduler_cfg *scheduler_cfg)
++{
++      if (flow_level)
++              return ppe_scheduler_l1_queue_map_get(ppe_dev, node_id,
++                                                    port, scheduler_cfg);
++
++      return ppe_scheduler_l0_queue_map_get(ppe_dev, node_id,
++                                            port, scheduler_cfg);
++}
++
+ static int ppe_config_bm_threshold(struct ppe_device *ppe_dev, int bm_port_id,
+                                  struct ppe_bm_port_config port_cfg)
+ {
+@@ -343,6 +1099,126 @@ static int ppe_config_qm(struct ppe_device *ppe_dev)
+       return ret;
+ }
++static int ppe_node_scheduler_config(struct ppe_device *ppe_dev,
++                                   struct ppe_port_schedule_config config)
++{
++      struct ppe_qos_scheduler_cfg qos_cfg;
++      int ret, i;
++
++      for (i = 0; i < config.loop_num; i++) {
++              if (!config.pri_max) {
++                      /* Round robin scheduler without priority. */
++                      qos_cfg.flow_id = config.flow_id;
++                      qos_cfg.pri = 0;
++                      qos_cfg.drr_node_id = config.drr_node_id;
++              } else {
++                      qos_cfg.flow_id = config.flow_id + (i / config.pri_max);
++                      qos_cfg.pri = i % config.pri_max;
++                      qos_cfg.drr_node_id = config.drr_node_id + i;
++              }
++
++              /* Scheduler weight, must be more than 0. */
++              qos_cfg.drr_node_wt = 1;
++              /* Byte based to schedule. */
++              qos_cfg.node_unit = 0;
++              /* Frame + CRC calculated. */
++              qos_cfg.node_frame_mode = 1;
++
++              ret = ppe_queue_scheduler_set(ppe_dev, config.node_id + i,
++                                            config.flow_level,
++                                            config.port,
++                                            qos_cfg);
++              if (ret) {
++                      dev_err(ppe_dev->dev, "PPE scheduler config error %d\n", ret);
++                      return ret;
++              }
++      }
++
++      return 0;
++}
++
++/* Configure PPE offloaded QoS scheduler. */
++static int ppe_config_qos(struct ppe_device *ppe_dev)
++{
++      int ret, i;
++
++      for (i = 0; i < ARRAY_SIZE(ppe_qos_schedule_config); i++) {
++              if (ppe_qos_schedule_config[i].port >= ppe_dev->num_ports)
++                      break;
++
++              ret = ppe_node_scheduler_config(ppe_dev, ppe_qos_schedule_config[i]);
++              if (ret)
++                      return ret;
++      }
++
++      return 0;
++}
++
++/* Configure scheduling management of PPE ports. */
++static int ppe_config_scheduler(struct ppe_device *ppe_dev)
++{
++      struct ppe_sch_schedule_config *schedule_cfg;
++      int ret, i, bm_count, schedule_count;
++      struct ppe_sch_bm_config *bm_cfg;
++      u32 val, reg;
++
++      bm_cfg = ipq9574_ppe_sch_bm_config;
++      bm_count = ARRAY_SIZE(ipq9574_ppe_sch_bm_config);
++
++      schedule_cfg = ipq9574_ppe_sch_schedule_config;
++      schedule_count = ARRAY_SIZE(ipq9574_ppe_sch_schedule_config);
++
++      val = FIELD_PREP(PPE_BM_SCH_CTRL_SCH_DEPTH, bm_count);
++      val |= FIELD_PREP(PPE_BM_SCH_CTRL_SCH_OFFSET, 0);
++      val |= FIELD_PREP(PPE_BM_SCH_CTRL_SCH_EN, 1);
++
++      ret = regmap_write(ppe_dev->regmap, PPE_BM_SCH_CTRL_ADDR, val);
++      if (ret)
++              goto sch_config_fail;
++
++      for (i = 0; i < bm_count; i++) {
++              val = FIELD_PREP(PPE_BM_SCH_CFG_TBL_VALID, bm_cfg[i].valid);
++              val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_DIR, bm_cfg[i].is_egress);
++              val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_PORT_NUM, bm_cfg[i].port);
++              val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_SECOND_PORT_VALID, bm_cfg[i].second_valid);
++              val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_SECOND_PORT, bm_cfg[i].second_port);
++
++              reg = PPE_BM_SCH_CFG_TBL_ADDR + i * PPE_BM_SCH_CFG_TBL_INC;
++              ret = regmap_write(ppe_dev->regmap, reg, val);
++              if (ret)
++                      goto sch_config_fail;
++      }
++
++      val = FIELD_PREP(PPE_PSCH_SCH_DEPTH_CFG_SCH_DEPTH, schedule_count);
++      ret = regmap_write(ppe_dev->regmap, PPE_PSCH_SCH_DEPTH_CFG_ADDR, val);
++      if (ret)
++              goto sch_config_fail;
++
++      for (i = 0; i < schedule_count; i++) {
++              val = FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_ENS_PORT_BITMAP,
++                               schedule_cfg[i].ensch_port_bmp);
++              val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_ENS_PORT,
++                                schedule_cfg[i].ensch_port);
++              val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_DES_PORT,
++                                schedule_cfg[i].desch_port);
++              val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_DES_SECOND_PORT_EN,
++                                schedule_cfg[i].desch_second_valid);
++              val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_DES_SECOND_PORT,
++                                schedule_cfg[i].desch_second_port);
++              reg = PPE_PSCH_SCH_CFG_TBL_ADDR + i * PPE_PSCH_SCH_CFG_TBL_INC;
++
++              ret = regmap_write(ppe_dev->regmap, reg, val);
++              if (ret)
++                      goto sch_config_fail;
++      }
++
++      return ppe_config_qos(ppe_dev);
++
++sch_config_fail:
++      dev_err(ppe_dev->dev, "PPE scheduler arbitration config error %d\n", ret);
++      return ret;
++};
++
+ int ppe_hw_config(struct ppe_device *ppe_dev)
+ {
+       int ret;
+@@ -351,5 +1227,9 @@ int ppe_hw_config(struct ppe_device *ppe_dev)
+       if (ret)
+               return ret;
+-      return ppe_config_qm(ppe_dev);
++      ret = ppe_config_qm(ppe_dev);
++      if (ret)
++              return ret;
++
++      return ppe_config_scheduler(ppe_dev);
+ }
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_config.h b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
+index 7e66019de799..167a114031fd 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
+@@ -6,5 +6,31 @@
+ #ifndef __PPE_CONFIG_H__
+ #define __PPE_CONFIG_H__
++/**
++ * struct ppe_qos_scheduler_cfg - PPE QoS scheduler configuration.
++ * @flow_id: PPE flow ID.
++ * @pri: Scheduler priority.
++ * @drr_node_id: Node ID for scheduled traffic.
++ * @drr_node_wt: weight for scheduled traffic.
++ * @node_unit : Unit for scheduled traffic.
++ * @node_frame_mode: Packet mode to be scheduled.
++ *
++ * PPE QoS feature supports the commit and exceed traffic.
++ */
++struct ppe_qos_scheduler_cfg {
++      int flow_id;
++      int pri;
++      int drr_node_id;
++      int drr_node_wt;
++      int node_unit;
++      int node_frame_mode;
++};
++
+ int ppe_hw_config(struct ppe_device *ppe_dev);
++int ppe_queue_scheduler_set(struct ppe_device *ppe_dev,
++                          int node_id, bool flow_level, int port,
++                          struct ppe_qos_scheduler_cfg scheduler_cfg);
++int ppe_queue_scheduler_get(struct ppe_device *ppe_dev,
++                          int node_id, bool flow_level, int *port,
++                          struct ppe_qos_scheduler_cfg *scheduler_cfg);
+ #endif
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+index 0bc13979e5e2..0279f1023bcf 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+@@ -11,14 +11,108 @@
+  * BM port (0-7) is matched to EDMA port 0, BM port (8-13) is matched
+  * to PPE physical port 1-6, BM port 14 is matched to EIP.
+  */
++#define PPE_BM_SCH_CTRL_ADDR                  0xb000
++#define PPE_BM_SCH_CTRL_NUM                   1
++#define PPE_BM_SCH_CTRL_INC                   4
++#define PPE_BM_SCH_CTRL_SCH_DEPTH             GENMASK(7, 0)
++#define PPE_BM_SCH_CTRL_SCH_OFFSET            GENMASK(14, 8)
++#define PPE_BM_SCH_CTRL_SCH_EN                        BIT(31)
++
++#define PPE_BM_SCH_CFG_TBL_ADDR                       0xc000
++#define PPE_BM_SCH_CFG_TBL_NUM                        128
++#define PPE_BM_SCH_CFG_TBL_INC                        0x10
++#define PPE_BM_SCH_CFG_TBL_PORT_NUM           GENMASK(3, 0)
++#define PPE_BM_SCH_CFG_TBL_DIR                        BIT(4)
++#define PPE_BM_SCH_CFG_TBL_VALID              BIT(5)
++#define PPE_BM_SCH_CFG_TBL_SECOND_PORT_VALID  BIT(6)
++#define PPE_BM_SCH_CFG_TBL_SECOND_PORT                GENMASK(11, 8)
++
+ #define PPE_EG_BRIDGE_CONFIG_ADDR             0x20044
+ #define PPE_EG_BRIDGE_CONFIG_QUEUE_CNT_EN     BIT(2)
++#define PPE_PSCH_SCH_DEPTH_CFG_ADDR           0x400000
++#define PPE_PSCH_SCH_DEPTH_CFG_NUM            1
++#define PPE_PSCH_SCH_DEPTH_CFG_INC            4
++#define PPE_PSCH_SCH_DEPTH_CFG_SCH_DEPTH      GENMASK(7, 0)
++
++#define PPE_L0_FLOW_MAP_TBL_ADDR              0x402000
++#define PPE_L0_FLOW_MAP_TBL_NUM                       300
++#define PPE_L0_FLOW_MAP_TBL_INC                       0x10
++#define PPE_L0_FLOW_MAP_TBL_FLOW_ID           GENMASK(5, 0)
++#define PPE_L0_FLOW_MAP_TBL_C_PRI             GENMASK(8, 6)
++#define PPE_L0_FLOW_MAP_TBL_E_PRI             GENMASK(11, 9)
++#define PPE_L0_FLOW_MAP_TBL_C_NODE_WT         GENMASK(21, 12)
++#define PPE_L0_FLOW_MAP_TBL_E_NODE_WT         GENMASK(31, 22)
++
++#define PPE_L0_C_FLOW_CFG_TBL_ADDR            0x404000
++#define PPE_L0_C_FLOW_CFG_TBL_NUM             512
++#define PPE_L0_C_FLOW_CFG_TBL_INC             0x10
++#define PPE_L0_C_FLOW_CFG_TBL_NODE_ID         GENMASK(7, 0)
++#define PPE_L0_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT        BIT(8)
++
++#define PPE_L0_E_FLOW_CFG_TBL_ADDR            0x406000
++#define PPE_L0_E_FLOW_CFG_TBL_NUM             512
++#define PPE_L0_E_FLOW_CFG_TBL_INC             0x10
++#define PPE_L0_E_FLOW_CFG_TBL_NODE_ID         GENMASK(7, 0)
++#define PPE_L0_E_FLOW_CFG_TBL_NODE_CREDIT_UNIT        BIT(8)
++
++#define PPE_L0_FLOW_PORT_MAP_TBL_ADDR         0x408000
++#define PPE_L0_FLOW_PORT_MAP_TBL_NUM          300
++#define PPE_L0_FLOW_PORT_MAP_TBL_INC          0x10
++#define PPE_L0_FLOW_PORT_MAP_TBL_PORT_NUM     GENMASK(3, 0)
++
++#define PPE_L0_COMP_CFG_TBL_ADDR              0x428000
++#define PPE_L0_COMP_CFG_TBL_NUM                       300
++#define PPE_L0_COMP_CFG_TBL_INC                       0x10
++#define PPE_L0_COMP_CFG_TBL_SHAPER_METER_LEN  GENMASK(1, 0)
++#define PPE_L0_COMP_CFG_TBL_NODE_METER_LEN    GENMASK(3, 2)
++
+ #define PPE_DEQ_OPR_TBL_ADDR                  0x430000
+ #define PPE_DEQ_OPR_TBL_NUM                   300
+ #define PPE_DEQ_OPR_TBL_INC                   0x10
+ #define PPE_DEQ_OPR_TBL_DEQ_DISABLE           BIT(0)
++#define PPE_L1_FLOW_MAP_TBL_ADDR              0x440000
++#define PPE_L1_FLOW_MAP_TBL_NUM                       64
++#define PPE_L1_FLOW_MAP_TBL_INC                       0x10
++#define PPE_L1_FLOW_MAP_TBL_FLOW_ID           GENMASK(3, 0)
++#define PPE_L1_FLOW_MAP_TBL_C_PRI             GENMASK(6, 4)
++#define PPE_L1_FLOW_MAP_TBL_E_PRI             GENMASK(9, 7)
++#define PPE_L1_FLOW_MAP_TBL_C_NODE_WT         GENMASK(19, 10)
++#define PPE_L1_FLOW_MAP_TBL_E_NODE_WT         GENMASK(29, 20)
++
++#define PPE_L1_C_FLOW_CFG_TBL_ADDR            0x442000
++#define PPE_L1_C_FLOW_CFG_TBL_NUM             64
++#define PPE_L1_C_FLOW_CFG_TBL_INC             0x10
++#define PPE_L1_C_FLOW_CFG_TBL_NODE_ID         GENMASK(5, 0)
++#define PPE_L1_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT        BIT(6)
++
++#define PPE_L1_E_FLOW_CFG_TBL_ADDR            0x444000
++#define PPE_L1_E_FLOW_CFG_TBL_NUM             64
++#define PPE_L1_E_FLOW_CFG_TBL_INC             0x10
++#define PPE_L1_E_FLOW_CFG_TBL_NODE_ID         GENMASK(5, 0)
++#define PPE_L1_E_FLOW_CFG_TBL_NODE_CREDIT_UNIT        BIT(6)
++
++#define PPE_L1_FLOW_PORT_MAP_TBL_ADDR         0x446000
++#define PPE_L1_FLOW_PORT_MAP_TBL_NUM          64
++#define PPE_L1_FLOW_PORT_MAP_TBL_INC          0x10
++#define PPE_L1_FLOW_PORT_MAP_TBL_PORT_NUM     GENMASK(3, 0)
++
++#define PPE_L1_COMP_CFG_TBL_ADDR              0x46a000
++#define PPE_L1_COMP_CFG_TBL_NUM                       64
++#define PPE_L1_COMP_CFG_TBL_INC                       0x10
++#define PPE_L1_COMP_CFG_TBL_SHAPER_METER_LEN  GENMASK(1, 0)
++#define PPE_L1_COMP_CFG_TBL_NODE_METER_LEN    GENMASK(3, 2)
++
++#define PPE_PSCH_SCH_CFG_TBL_ADDR             0x47a000
++#define PPE_PSCH_SCH_CFG_TBL_NUM              128
++#define PPE_PSCH_SCH_CFG_TBL_INC              0x10
++#define PPE_PSCH_SCH_CFG_TBL_DES_PORT         GENMASK(3, 0)
++#define PPE_PSCH_SCH_CFG_TBL_ENS_PORT         GENMASK(7, 4)
++#define PPE_PSCH_SCH_CFG_TBL_ENS_PORT_BITMAP  GENMASK(15, 8)
++#define PPE_PSCH_SCH_CFG_TBL_DES_SECOND_PORT_EN       BIT(16)
++#define PPE_PSCH_SCH_CFG_TBL_DES_SECOND_PORT  GENMASK(20, 17)
++
+ #define PPE_BM_PORT_FC_MODE_ADDR              0x600100
+ #define PPE_BM_PORT_FC_MODE_INC                       0x4
+ #define PPE_BM_PORT_FC_MODE_EN                        BIT(0)
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-22-net-ethernet-qualcomm-Initialize-PPE-queue-settings.patch b/target/linux/qualcommbe/patches-6.6/103-22-net-ethernet-qualcomm-Initialize-PPE-queue-settings.patch
new file mode 100644 (file)
index 0000000..5b6bf5a
--- /dev/null
@@ -0,0 +1,613 @@
+From 8dd72bdbb1e3f0061f2e4a9bb4f6fce0966585a6 Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Wed, 27 Dec 2023 13:13:46 +0800
+Subject: [PATCH 22/50] net: ethernet: qualcomm: Initialize PPE queue settings
+
+Configure unicast and multicast hardware queues to forward
+the traffic between PPE ports.
+
+Each PPE port is assigned with the specific queue resource,
+and the egress queue ID is decided by the priority and the
+RSS hash value of packet.
+
+Change-Id: I3e4d4e12548a12b11f129106678375cc3b58828d
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ drivers/net/ethernet/qualcomm/ppe/ppe_api.c   |  44 +++
+ drivers/net/ethernet/qualcomm/ppe/ppe_api.h   |  34 ++
+ .../net/ethernet/qualcomm/ppe/ppe_config.c    | 362 +++++++++++++++++-
+ .../net/ethernet/qualcomm/ppe/ppe_config.h    |  41 ++
+ drivers/net/ethernet/qualcomm/ppe/ppe_regs.h  |  16 +
+ 5 files changed, 496 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_api.c b/drivers/net/ethernet/qualcomm/ppe/ppe_api.c
+index ba35fc151e2c..72d416e0ca44 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_api.c
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_api.c
+@@ -38,3 +38,47 @@ int ppe_queue_priority_set(struct ppe_device *ppe_dev,
+       return ppe_queue_scheduler_set(ppe_dev, node_id, level, port, sch_cfg);
+ }
++
++/**
++ * ppe_edma_queue_offset_config - Configure queue offset for EDMA interface
++ * @ppe_dev: PPE device
++ * @class: The class to configure queue offset
++ * @index: Class index, internal priority or hash value
++ * @queue_offset: Queue offset value
++ *
++ * PPE EDMA queue offset is configured based on the PPE internal priority or
++ * RSS hash value, the profile ID is fixed to 0 for EDMA interface.
++ *
++ * Return 0 on success, negative error code on failure.
++ */
++int ppe_edma_queue_offset_config(struct ppe_device *ppe_dev,
++                               enum ppe_queue_class_type class,
++                               int index, int queue_offset)
++{
++      if (class == PPE_QUEUE_CLASS_PRIORITY)
++              return ppe_queue_ucast_pri_class_set(ppe_dev, 0,
++                                                   index, queue_offset);
++
++      return ppe_queue_ucast_hash_class_set(ppe_dev, 0,
++                                            index, queue_offset);
++}
++
++/**
++ * ppe_edma_queue_resource_get - Get EDMA queue resource
++ * @ppe_dev: PPE device
++ * @type: Resource type
++ * @res_start: Resource start ID returned
++ * @res_end: Resource end ID returned
++ *
++ * PPE EDMA queue resource includes unicast queue and multicast queue.
++ *
++ * Return 0 on success, negative error code on failure.
++ */
++int ppe_edma_queue_resource_get(struct ppe_device *ppe_dev, int type,
++                              int *res_start, int *res_end)
++{
++      if (type != PPE_RES_UCAST && type != PPE_RES_MCAST)
++              return -EINVAL;
++
++      return ppe_port_resource_get(ppe_dev, 0, type, res_start, res_end);
++};
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_api.h b/drivers/net/ethernet/qualcomm/ppe/ppe_api.h
+index ee5b47d06991..c8aa8945f90f 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_api.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_api.h
+@@ -15,7 +15,41 @@
+ #define PPE_QUEUE_ID_NUM                      300
+ #define PPE_FLOW_ID_NUM                               64
+ #define PPE_QUEUE_SCH_PRI_NUM                 8
++#define PPE_QUEUE_INTER_PRI_NUM                       16
++#define PPE_QUEUE_HASH_NUM                    256
++
++/**
++ * enum ppe_queue_class_type - PPE queue class type
++ * @PPE_QUEUE_CLASS_PRIORITY: Queue offset configured from internal priority
++ * @PPE_QUEUE_CLASS_HASH: Queue offset configured from RSS hash.
++ */
++enum ppe_queue_class_type {
++      PPE_QUEUE_CLASS_PRIORITY,
++      PPE_QUEUE_CLASS_HASH,
++};
++
++/**
++ * enum ppe_resource_type - PPE resource type
++ * @PPE_RES_UCAST: Unicast queue resource
++ * @PPE_RES_MCAST: Multicast queue resource
++ * @PPE_RES_FLOW_ID: Flow resource
++ * @PPE_RES_L0_NODE: Level 0 QoS node resource
++ * @PPE_RES_L1_NODE: Level 1 QoS node resource
++ */
++enum ppe_resource_type {
++      PPE_RES_UCAST,
++      PPE_RES_MCAST,
++      PPE_RES_FLOW_ID,
++      PPE_RES_L0_NODE,
++      PPE_RES_L1_NODE,
++};
+ int ppe_queue_priority_set(struct ppe_device *ppe_dev,
+                          int queue_id, int priority);
++
++int ppe_edma_queue_offset_config(struct ppe_device *ppe_dev,
++                               enum ppe_queue_class_type class,
++                               int index, int queue_offset);
++int ppe_edma_queue_resource_get(struct ppe_device *ppe_dev, int type,
++                              int *res_start, int *res_end);
+ #endif
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
+index bdef26da6fd3..ac90d33aecba 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
+@@ -119,6 +119,34 @@ struct ppe_port_schedule_config {
+       unsigned int drr_node_id;
+ };
++/**
++ * struct ppe_port_schedule_resource - PPE port scheduler resource.
++ * @ucastq_start: Unicast queue start ID.
++ * @ucastq_end: Unicast queue end ID.
++ * @mcastq_start: Multicast queue start ID.
++ * @mcastq_end: Multicast queue end ID.
++ * @flow_id_start: Flow start ID.
++ * @flow_id_end: Flow end ID.
++ * @l0node_start: Scheduler node start ID for queue level.
++ * @l0node_end: Scheduler node end ID for queue level.
++ * @l1node_start: Scheduler node start ID for flow level.
++ * @l1node_end: Scheduler node end ID for flow level.
++ *
++ * PPE scheduler resource allocated among the PPE ports.
++ */
++struct ppe_port_schedule_resource {
++      unsigned int ucastq_start;
++      unsigned int ucastq_end;
++      unsigned int mcastq_start;
++      unsigned int mcastq_end;
++      unsigned int flow_id_start;
++      unsigned int flow_id_end;
++      unsigned int l0node_start;
++      unsigned int l0node_end;
++      unsigned int l1node_start;
++      unsigned int l1node_end;
++};
++
+ static int ipq9574_ppe_bm_group_config = 1550;
+ static struct ppe_bm_port_config ipq9574_ppe_bm_port_config[] = {
+       {
+@@ -648,6 +676,111 @@ static struct ppe_port_schedule_config ppe_qos_schedule_config[] = {
+       },
+ };
++/* The QoS resource is applied to each PPE port, there are some
++ * resource reserved as the last one.
++ */
++static struct ppe_port_schedule_resource ppe_scheduler_res[] = {
++      {       .ucastq_start   = 0,
++              .ucastq_end     = 63,
++              .mcastq_start   = 256,
++              .ucastq_end     = 271,
++              .flow_id_start  = 0,
++              .flow_id_end    = 0,
++              .l0node_start   = 0,
++              .l0node_end     = 7,
++              .l1node_start   = 0,
++              .l1node_end     = 0,
++      },
++      {       .ucastq_start   = 144,
++              .ucastq_end     = 159,
++              .mcastq_start   = 272,
++              .ucastq_end     = 275,
++              .flow_id_start  = 36,
++              .flow_id_end    = 39,
++              .l0node_start   = 48,
++              .l0node_end     = 63,
++              .l1node_start   = 8,
++              .l1node_end     = 11,
++      },
++      {       .ucastq_start   = 160,
++              .ucastq_end     = 175,
++              .mcastq_start   = 276,
++              .ucastq_end     = 279,
++              .flow_id_start  = 40,
++              .flow_id_end    = 43,
++              .l0node_start   = 64,
++              .l0node_end     = 79,
++              .l1node_start   = 12,
++              .l1node_end     = 15,
++      },
++      {       .ucastq_start   = 176,
++              .ucastq_end     = 191,
++              .mcastq_start   = 280,
++              .ucastq_end     = 283,
++              .flow_id_start  = 44,
++              .flow_id_end    = 47,
++              .l0node_start   = 80,
++              .l0node_end     = 95,
++              .l1node_start   = 16,
++              .l1node_end     = 19,
++      },
++      {       .ucastq_start   = 192,
++              .ucastq_end     = 207,
++              .mcastq_start   = 284,
++              .ucastq_end     = 287,
++              .flow_id_start  = 48,
++              .flow_id_end    = 51,
++              .l0node_start   = 96,
++              .l0node_end     = 111,
++              .l1node_start   = 20,
++              .l1node_end     = 23,
++      },
++      {       .ucastq_start   = 208,
++              .ucastq_end     = 223,
++              .mcastq_start   = 288,
++              .ucastq_end     = 291,
++              .flow_id_start  = 52,
++              .flow_id_end    = 55,
++              .l0node_start   = 112,
++              .l0node_end     = 127,
++              .l1node_start   = 24,
++              .l1node_end     = 27,
++      },
++      {       .ucastq_start   = 224,
++              .ucastq_end     = 239,
++              .mcastq_start   = 292,
++              .ucastq_end     = 295,
++              .flow_id_start  = 56,
++              .flow_id_end    = 59,
++              .l0node_start   = 128,
++              .l0node_end     = 143,
++              .l1node_start   = 28,
++              .l1node_end     = 31,
++      },
++      {       .ucastq_start   = 240,
++              .ucastq_end     = 255,
++              .mcastq_start   = 296,
++              .ucastq_end     = 299,
++              .flow_id_start  = 60,
++              .flow_id_end    = 63,
++              .l0node_start   = 144,
++              .l0node_end     = 159,
++              .l1node_start   = 32,
++              .l1node_end     = 35,
++      },
++      {       .ucastq_start   = 64,
++              .ucastq_end     = 143,
++              .mcastq_start   = 0,
++              .ucastq_end     = 0,
++              .flow_id_start  = 1,
++              .flow_id_end    = 35,
++              .l0node_start   = 8,
++              .l0node_end     = 47,
++              .l1node_start   = 1,
++              .l1node_end     = 7,
++      },
++};
++
+ /* Set the first level scheduler configuration. */
+ static int ppe_scheduler_l0_queue_map_set(struct ppe_device *ppe_dev,
+                                         int node_id, int port,
+@@ -893,6 +1026,147 @@ int ppe_queue_scheduler_get(struct ppe_device *ppe_dev,
+                                             port, scheduler_cfg);
+ }
++/**
++ * ppe_queue_ucast_base_set - Set PPE unicast queue base ID
++ * @ppe_dev: PPE device
++ * @queue_dst: PPE queue destination configuration
++ * @queue_base: PPE queue base ID
++ * @profile_id: Profile ID
++ *
++ * The PPE unicast queue base ID is configured based on the destination
++ * port information per profile ID.
++ *
++ * Return 0 on success, negative error code on failure.
++ */
++int ppe_queue_ucast_base_set(struct ppe_device *ppe_dev,
++                           struct ppe_queue_ucast_dest queue_dst,
++                           int queue_base, int profile_id)
++{
++      int index, profile_size;
++      u32 val, reg;
++
++      profile_size = queue_dst.src_profile << 8;
++      if (queue_dst.service_code_en)
++              index = PPE_QUEUE_BASE_SERVICE_CODE + profile_size +
++                      queue_dst.service_code;
++      else if (queue_dst.cpu_code_en)
++              index = PPE_QUEUE_BASE_CPU_CODE + profile_size +
++                      queue_dst.cpu_code;
++      else
++              index = profile_size + queue_dst.dest_port;
++
++      val = FIELD_PREP(PPE_UCAST_QUEUE_MAP_TBL_PROFILE_ID, profile_id);
++      val |= FIELD_PREP(PPE_UCAST_QUEUE_MAP_TBL_QUEUE_ID, queue_base);
++      reg = PPE_UCAST_QUEUE_MAP_TBL_ADDR + index * PPE_UCAST_QUEUE_MAP_TBL_INC;
++
++      return regmap_write(ppe_dev->regmap, reg, val);
++}
++
++/**
++ * ppe_queue_ucast_pri_class_set - Set PPE unicast queue class of priority
++ * @ppe_dev: PPE device
++ * @profile_id: Profile ID
++ * @priority: Priority to be used to set class
++ * @class_offset: Class value for the destination queue ID
++ *
++ * The PPE unicast queue class is configured based on the PPE
++ * internal priority.
++ *
++ * Return 0 on success, negative error code on failure.
++ */
++int ppe_queue_ucast_pri_class_set(struct ppe_device *ppe_dev,
++                                int profile_id,
++                                int priority,
++                                int class_offset)
++{
++      u32 val, reg;
++      int index;
++
++      index = (profile_id << 4) + priority;
++      val = FIELD_PREP(PPE_UCAST_PRIORITY_MAP_TBL_CLASS, class_offset);
++      reg = PPE_UCAST_PRIORITY_MAP_TBL_ADDR + index * PPE_UCAST_PRIORITY_MAP_TBL_INC;
++
++      return regmap_write(ppe_dev->regmap, reg, val);
++}
++
++/**
++ * ppe_queue_ucast_hash_class_set - Set PPE unicast queue class of hash value
++ * @ppe_dev: PPE device
++ * @profile_id: Profile ID
++ * @rss_hash: Hash value to be used to set clas
++ * @class_offset: Class value for the destination queue ID
++ *
++ * The PPE unicast queue class is configured based on the RSS hash value.
++ *
++ * Return 0 on success, negative error code on failure.
++ */
++int ppe_queue_ucast_hash_class_set(struct ppe_device *ppe_dev,
++                                 int profile_id,
++                                 int rss_hash,
++                                 int class_offset)
++{
++      u32 val, reg;
++      int index;
++
++      index = (profile_id << 8) + rss_hash;
++      val = FIELD_PREP(PPE_UCAST_HASH_MAP_TBL_HASH, class_offset);
++      reg = PPE_UCAST_HASH_MAP_TBL_ADDR + index * PPE_UCAST_HASH_MAP_TBL_INC;
++
++      return regmap_write(ppe_dev->regmap, reg, val);
++}
++
++/**
++ * ppe_port_resource_get - Get PPE resource per port
++ * @ppe_dev: PPE device
++ * @port: PPE port
++ * @type: Resource type
++ * @res_start: Resource start ID
++ * @res_end: Resource end ID
++ *
++ * PPE resource is assigned per PPE port, which is acquired for QoS scheduler.
++ *
++ * Return 0 on success, negative error code on failure.
++ */
++int ppe_port_resource_get(struct ppe_device *ppe_dev, int port, int type,
++                        int *res_start, int *res_end)
++{
++      struct ppe_port_schedule_resource res;
++
++      /* The reserved resource with the maximum port ID of PPE is
++       * also allowed to be acquired.
++       */
++      if (port > ppe_dev->num_ports)
++              return -EINVAL;
++
++      res = ppe_scheduler_res[port];
++      switch (type) {
++      case PPE_RES_UCAST:
++              *res_start = res.ucastq_start;
++              *res_end = res.ucastq_end;
++              break;
++      case PPE_RES_MCAST:
++              *res_start = res.mcastq_start;
++              *res_end = res.mcastq_end;
++              break;
++      case PPE_RES_FLOW_ID:
++              *res_start = res.flow_id_start;
++              *res_end = res.flow_id_end;
++              break;
++      case PPE_RES_L0_NODE:
++              *res_start = res.l0node_start;
++              *res_end = res.l0node_end;
++              break;
++      case PPE_RES_L1_NODE:
++              *res_start = res.l1node_start;
++              *res_end = res.l1node_end;
++              break;
++      default:
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
+ static int ppe_config_bm_threshold(struct ppe_device *ppe_dev, int bm_port_id,
+                                  struct ppe_bm_port_config port_cfg)
+ {
+@@ -1219,6 +1493,88 @@ static int ppe_config_scheduler(struct ppe_device *ppe_dev)
+       return ret;
+ };
++/* Configure PPE queue destination of each PPE port. */
++static int ppe_queue_dest_init(struct ppe_device *ppe_dev)
++{
++      int ret, port_id, index, class, res_start, res_end, queue_base, pri_max;
++      struct ppe_queue_ucast_dest queue_dst;
++
++      for (port_id = 0; port_id < ppe_dev->num_ports; port_id++) {
++              memset(&queue_dst, 0, sizeof(queue_dst));
++
++              ret = ppe_port_resource_get(ppe_dev, port_id, PPE_RES_UCAST,
++                                          &res_start, &res_end);
++              if (ret)
++                      return ret;
++
++              queue_base = res_start;
++              queue_dst.dest_port = port_id;
++
++              /* Configure queue base ID and profile ID that is same as
++               * physical port ID.
++               */
++              ret = ppe_queue_ucast_base_set(ppe_dev, queue_dst,
++                                             queue_base, port_id);
++              if (ret)
++                      return ret;
++
++              /* Queue priority range supported by each PPE port */
++              ret = ppe_port_resource_get(ppe_dev, port_id, PPE_RES_L0_NODE,
++                                          &res_start, &res_end);
++              if (ret)
++                      return ret;
++
++              pri_max = res_end - res_start;
++
++              /* Redirect ARP reply packet with the max priority on CPU port,
++               * which keeps the ARP reply directed to CPU (CPU code is 101)
++               * with highest priority received by EDMA when there is a heavy
++               * traffic loaded.
++               */
++              if (port_id == 0) {
++                      memset(&queue_dst, 0, sizeof(queue_dst));
++
++                      queue_dst.cpu_code_en = true;
++                      queue_dst.cpu_code = 101;
++                      ret = ppe_queue_ucast_base_set(ppe_dev, queue_dst,
++                                                     queue_base + pri_max,
++                                                     0);
++                      if (ret)
++                              return ret;
++              }
++
++              /* Initialize the class offset of internal priority. */
++              for (index = 0; index < PPE_QUEUE_INTER_PRI_NUM; index++) {
++                      class = index > pri_max ? pri_max : index;
++
++                      ret = ppe_queue_ucast_pri_class_set(ppe_dev, port_id,
++                                                          index, class);
++                      if (ret)
++                              return ret;
++              }
++
++              /* Initialize the class offset of RSS hash as 0 to avoid the
++               * random hardware value that will lead to the unexpected
++               * destination queue generated.
++               */
++              index = 0;
++              for (index = 0; index < PPE_QUEUE_HASH_NUM; index++) {
++                      ret = ppe_queue_ucast_hash_class_set(ppe_dev, port_id,
++                                                           index, 0);
++                      if (ret)
++                              return ret;
++              }
++      }
++
++      return 0;
++}
++
++/* Initialize PPE device to handle traffic correctly. */
++static int ppe_dev_hw_init(struct ppe_device *ppe_dev)
++{
++      return ppe_queue_dest_init(ppe_dev);
++}
++
+ int ppe_hw_config(struct ppe_device *ppe_dev)
+ {
+       int ret;
+@@ -1231,5 +1587,9 @@ int ppe_hw_config(struct ppe_device *ppe_dev)
+       if (ret)
+               return ret;
+-      return ppe_config_scheduler(ppe_dev);
++      ret = ppe_config_scheduler(ppe_dev);
++      if (ret)
++              return ret;
++
++      return ppe_dev_hw_init(ppe_dev);
+ }
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_config.h b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
+index 167a114031fd..676c4ec45f6f 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
+@@ -6,6 +6,13 @@
+ #ifndef __PPE_CONFIG_H__
+ #define __PPE_CONFIG_H__
++/* There are different queue config ranges for the destination port,
++ * CPU code and service code.
++ */
++#define PPE_QUEUE_BASE_DEST_PORT              0
++#define PPE_QUEUE_BASE_CPU_CODE                       1024
++#define PPE_QUEUE_BASE_SERVICE_CODE           2048
++
+ /**
+  * struct ppe_qos_scheduler_cfg - PPE QoS scheduler configuration.
+  * @flow_id: PPE flow ID.
+@@ -26,6 +33,26 @@ struct ppe_qos_scheduler_cfg {
+       int node_frame_mode;
+ };
++/**
++ * struct ppe_queue_ucast_dest - PPE unicast queue destination.
++ * @src_profile: Source profile.
++ * @service_code_en: Enable service code.
++ * @service_code: Service code.
++ * @cpu_code_en: Enable CPU code.
++ * @cpu_code: CPU code.
++ * @dest_port: destination port.
++ *
++ * PPE egress queue ID is decided by the egress port ID.
++ */
++struct ppe_queue_ucast_dest {
++      int src_profile;
++      bool service_code_en;
++      int service_code;
++      bool cpu_code_en;
++      int cpu_code;
++      int dest_port;
++};
++
+ int ppe_hw_config(struct ppe_device *ppe_dev);
+ int ppe_queue_scheduler_set(struct ppe_device *ppe_dev,
+                           int node_id, bool flow_level, int port,
+@@ -33,4 +60,18 @@ int ppe_queue_scheduler_set(struct ppe_device *ppe_dev,
+ int ppe_queue_scheduler_get(struct ppe_device *ppe_dev,
+                           int node_id, bool flow_level, int *port,
+                           struct ppe_qos_scheduler_cfg *scheduler_cfg);
++int ppe_queue_ucast_base_set(struct ppe_device *ppe_dev,
++                           struct ppe_queue_ucast_dest queue_dst,
++                           int queue_base,
++                           int profile_id);
++int ppe_queue_ucast_pri_class_set(struct ppe_device *ppe_dev,
++                                int profile_id,
++                                int priority,
++                                int class_offset);
++int ppe_queue_ucast_hash_class_set(struct ppe_device *ppe_dev,
++                                 int profile_id,
++                                 int rss_hash,
++                                 int class_offset);
++int ppe_port_resource_get(struct ppe_device *ppe_dev, int port, int type,
++                        int *res_start, int *res_end);
+ #endif
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+index 0279f1023bcf..1f6828237f94 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+@@ -153,6 +153,22 @@
+ #define PPE_BM_PORT_FC_SET_PRE_ALLOC(tbl_cfg, value)  \
+       u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_BM_PORT_FC_W1_PRE_ALLOC)
++#define PPE_UCAST_QUEUE_MAP_TBL_ADDR          0x810000
++#define PPE_UCAST_QUEUE_MAP_TBL_NUM           3072
++#define PPE_UCAST_QUEUE_MAP_TBL_INC           0x10
++#define PPE_UCAST_QUEUE_MAP_TBL_PROFILE_ID    GENMASK(3, 0)
++#define PPE_UCAST_QUEUE_MAP_TBL_QUEUE_ID      GENMASK(11, 4)
++
++#define PPE_UCAST_HASH_MAP_TBL_ADDR           0x830000
++#define PPE_UCAST_HASH_MAP_TBL_NUM            4096
++#define PPE_UCAST_HASH_MAP_TBL_INC            0x10
++#define PPE_UCAST_HASH_MAP_TBL_HASH           GENMASK(7, 0)
++
++#define PPE_UCAST_PRIORITY_MAP_TBL_ADDR               0x842000
++#define PPE_UCAST_PRIORITY_MAP_TBL_NUM                256
++#define PPE_UCAST_PRIORITY_MAP_TBL_INC                0x10
++#define PPE_UCAST_PRIORITY_MAP_TBL_CLASS      GENMASK(3, 0)
++
+ /* PPE unicast queue (0-255) configurations. */
+ #define PPE_AC_UNI_QUEUE_CFG_TBL_ADDR         0x848000
+ #define PPE_AC_UNI_QUEUE_CFG_TBL_NUM          256
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-23-net-ethernet-qualcomm-Add-PPE-service-code-config.patch b/target/linux/qualcommbe/patches-6.6/103-23-net-ethernet-qualcomm-Add-PPE-service-code-config.patch
new file mode 100644 (file)
index 0000000..3a390ef
--- /dev/null
@@ -0,0 +1,392 @@
+From 278b9f94b1dd344e88739044dd20d407b7f0651f Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Wed, 27 Dec 2023 13:51:20 +0800
+Subject: [PATCH 23/50] net: ethernet: qualcomm: Add PPE service code config
+
+Configure service code for marking the traffic passed through
+PPE. Service code is generated according the features of packet
+when the packet is processed by PPE.
+
+The bypass features of service code 1 is configured by default,
+which used by CPU port when the packet is transmitted from host
+to the CPU port of PPE.
+
+Change-Id: I9fd2d26ba4c40e9ca182c20f5e02bd2f6f3e5e05
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ drivers/net/ethernet/qualcomm/ppe/ppe_api.h   |   3 +
+ .../net/ethernet/qualcomm/ppe/ppe_config.c    |  98 +++++++++++-
+ .../net/ethernet/qualcomm/ppe/ppe_config.h    | 142 ++++++++++++++++++
+ drivers/net/ethernet/qualcomm/ppe/ppe_regs.h  |  48 ++++++
+ 4 files changed, 290 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_api.h b/drivers/net/ethernet/qualcomm/ppe/ppe_api.h
+index c8aa8945f90f..ecdae4b95667 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_api.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_api.h
+@@ -18,6 +18,9 @@
+ #define PPE_QUEUE_INTER_PRI_NUM                       16
+ #define PPE_QUEUE_HASH_NUM                    256
++/* The service code is used by EDMA driver to transmit packet to PPE. */
++#define PPE_EDMA_SC_BYPASS_ID                 1
++
+ /**
+  * enum ppe_queue_class_type - PPE queue class type
+  * @PPE_QUEUE_CLASS_PRIORITY: Queue offset configured from internal priority
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
+index ac90d33aecba..a8e7a536a6e0 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
+@@ -8,6 +8,7 @@
+  */
+ #include <linux/bitfield.h>
++#include <linux/bitmap.h>
+ #include <linux/bits.h>
+ #include <linux/device.h>
+ #include <linux/regmap.h>
+@@ -1167,6 +1168,76 @@ int ppe_port_resource_get(struct ppe_device *ppe_dev, int port, int type,
+       return 0;
+ }
++/**
++ * ppe_servcode_config_set - Set PPE service code configuration
++ * @ppe_dev: PPE device
++ * @servcode: Service ID, 0-255 supported by PPE
++ * @cfg: Service code configuration
++ *
++ * The service code configuration of PPE is used to handle the PPE
++ * functions.
++ *
++ * Return 0 on success, negative error code on failure.
++ */
++int ppe_servcode_config_set(struct ppe_device *ppe_dev, int servcode,
++                          struct ppe_servcode_cfg cfg)
++{
++      u32 val, reg, servcode_val[2] = {};
++      unsigned long bitmap_value;
++      int ret;
++
++      val = FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_PORT_ID_VALID, cfg.dest_port_valid);
++      val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_PORT_ID, cfg.dest_port);
++      val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_DIRECTION, cfg.is_src);
++
++      bitmap_value = bitmap_read(cfg.bitmaps.egress, 0, PPE_SC_BYPASS_EGRESS_SIZE);
++      val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_BYPASS_BITMAP, bitmap_value);
++      val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_RX_CNT_EN,
++                        test_bit(PPE_SC_BYPASS_COUNTER_RX, cfg.bitmaps.counter));
++      val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_TX_CNT_EN,
++                        test_bit(PPE_SC_BYPASS_COUNTER_TX, cfg.bitmaps.counter));
++      reg = PPE_IN_L2_SERVICE_TBL_ADDR + PPE_IN_L2_SERVICE_TBL_INC * servcode;
++
++      ret = regmap_write(ppe_dev->regmap, reg, val);
++      if (ret)
++              return ret;
++
++      bitmap_value = bitmap_read(cfg.bitmaps.ingress, 0, PPE_SC_BYPASS_INGRESS_SIZE);
++      PPE_SERVICE_SET_BYPASS_BITMAP(servcode_val, bitmap_value);
++      PPE_SERVICE_SET_RX_CNT_EN(servcode_val,
++                                test_bit(PPE_SC_BYPASS_COUNTER_RX_VLAN, cfg.bitmaps.counter));
++      reg = PPE_SERVICE_TBL_ADDR + PPE_SERVICE_TBL_INC * servcode;
++
++      ret = regmap_bulk_write(ppe_dev->regmap, reg,
++                              servcode_val, ARRAY_SIZE(servcode_val));
++      if (ret)
++              return ret;
++
++      reg = PPE_EG_SERVICE_TBL_ADDR + PPE_EG_SERVICE_TBL_INC * servcode;
++      ret = regmap_bulk_read(ppe_dev->regmap, reg,
++                             servcode_val, ARRAY_SIZE(servcode_val));
++      if (ret)
++              return ret;
++
++      PPE_EG_SERVICE_SET_UPDATE_ACTION(servcode_val, cfg.field_update_bitmap);
++      PPE_EG_SERVICE_SET_NEXT_SERVCODE(servcode_val, cfg.next_service_code);
++      PPE_EG_SERVICE_SET_HW_SERVICE(servcode_val, cfg.hw_service);
++      PPE_EG_SERVICE_SET_OFFSET_SEL(servcode_val, cfg.offset_sel);
++      PPE_EG_SERVICE_SET_TX_CNT_EN(servcode_val,
++                                   test_bit(PPE_SC_BYPASS_COUNTER_TX_VLAN, cfg.bitmaps.counter));
++
++      ret = regmap_bulk_write(ppe_dev->regmap, reg,
++                              servcode_val, ARRAY_SIZE(servcode_val));
++      if (ret)
++              return ret;
++
++      bitmap_value = bitmap_read(cfg.bitmaps.tunnel, 0, PPE_SC_BYPASS_TUNNEL_SIZE);
++      val = FIELD_PREP(PPE_TL_SERVICE_TBL_BYPASS_BITMAP, bitmap_value);
++      reg = PPE_TL_SERVICE_TBL_ADDR + PPE_TL_SERVICE_TBL_INC * servcode;
++
++      return regmap_write(ppe_dev->regmap, reg, val);
++}
++
+ static int ppe_config_bm_threshold(struct ppe_device *ppe_dev, int bm_port_id,
+                                  struct ppe_bm_port_config port_cfg)
+ {
+@@ -1569,10 +1640,35 @@ static int ppe_queue_dest_init(struct ppe_device *ppe_dev)
+       return 0;
+ }
++/* Initialize the service code 1 used by CPU port. */
++static int ppe_servcode_init(struct ppe_device *ppe_dev)
++{
++      struct ppe_servcode_cfg servcode_cfg = {};
++
++      bitmap_zero(servcode_cfg.bitmaps.counter, PPE_SC_BYPASS_COUNTER_SIZE);
++      bitmap_zero(servcode_cfg.bitmaps.tunnel, PPE_SC_BYPASS_TUNNEL_SIZE);
++
++      bitmap_fill(servcode_cfg.bitmaps.ingress, PPE_SC_BYPASS_INGRESS_SIZE);
++      clear_bit(PPE_SC_BYPASS_INGRESS_FAKE_MAC_HEADER, servcode_cfg.bitmaps.ingress);
++      clear_bit(PPE_SC_BYPASS_INGRESS_SERVICE_CODE, servcode_cfg.bitmaps.ingress);
++      clear_bit(PPE_SC_BYPASS_INGRESS_FAKE_L2_PROTO, servcode_cfg.bitmaps.ingress);
++
++      bitmap_fill(servcode_cfg.bitmaps.egress, PPE_SC_BYPASS_EGRESS_SIZE);
++      clear_bit(PPE_SC_BYPASS_EGRESS_ACL_POST_ROUTING_CHECK, servcode_cfg.bitmaps.egress);
++
++      return ppe_servcode_config_set(ppe_dev, PPE_EDMA_SC_BYPASS_ID, servcode_cfg);
++}
++
+ /* Initialize PPE device to handle traffic correctly. */
+ static int ppe_dev_hw_init(struct ppe_device *ppe_dev)
+ {
+-      return ppe_queue_dest_init(ppe_dev);
++      int ret;
++
++      ret = ppe_queue_dest_init(ppe_dev);
++      if (ret)
++              return ret;
++
++      return ppe_servcode_init(ppe_dev);
+ }
+ int ppe_hw_config(struct ppe_device *ppe_dev)
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_config.h b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
+index 676c4ec45f6f..dcb557ed843c 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
+@@ -6,6 +6,8 @@
+ #ifndef __PPE_CONFIG_H__
+ #define __PPE_CONFIG_H__
++#include <linux/types.h>
++
+ /* There are different queue config ranges for the destination port,
+  * CPU code and service code.
+  */
+@@ -53,6 +55,143 @@ struct ppe_queue_ucast_dest {
+       int dest_port;
+ };
++/* Hardware bitmaps for bypassing features of the ingress packet. */
++enum ppe_sc_ingress_type {
++      PPE_SC_BYPASS_INGRESS_VLAN_TAG_FMT_CHECK = 0,
++      PPE_SC_BYPASS_INGRESS_VLAN_MEMBER_CHECK = 1,
++      PPE_SC_BYPASS_INGRESS_VLAN_TRANSLATE = 2,
++      PPE_SC_BYPASS_INGRESS_MY_MAC_CHECK = 3,
++      PPE_SC_BYPASS_INGRESS_DIP_LOOKUP = 4,
++      PPE_SC_BYPASS_INGRESS_FLOW_LOOKUP = 5,
++      PPE_SC_BYPASS_INGRESS_FLOW_ACTION = 6,
++      PPE_SC_BYPASS_INGRESS_ACL = 7,
++      PPE_SC_BYPASS_INGRESS_FAKE_MAC_HEADER = 8,
++      PPE_SC_BYPASS_INGRESS_SERVICE_CODE = 9,
++      PPE_SC_BYPASS_INGRESS_WRONG_PKT_FMT_L2 = 10,
++      PPE_SC_BYPASS_INGRESS_WRONG_PKT_FMT_L3_IPV4 = 11,
++      PPE_SC_BYPASS_INGRESS_WRONG_PKT_FMT_L3_IPV6 = 12,
++      PPE_SC_BYPASS_INGRESS_WRONG_PKT_FMT_L4 = 13,
++      PPE_SC_BYPASS_INGRESS_FLOW_SERVICE_CODE = 14,
++      PPE_SC_BYPASS_INGRESS_ACL_SERVICE_CODE = 15,
++      PPE_SC_BYPASS_INGRESS_FAKE_L2_PROTO = 16,
++      PPE_SC_BYPASS_INGRESS_PPPOE_TERMINATION = 17,
++      PPE_SC_BYPASS_INGRESS_DEFAULT_VLAN = 18,
++      PPE_SC_BYPASS_INGRESS_DEFAULT_PCP = 19,
++      PPE_SC_BYPASS_INGRESS_VSI_ASSIGN = 20,
++      /* Values 21-23 are not specified by hardware. */
++      PPE_SC_BYPASS_INGRESS_VLAN_ASSIGN_FAIL = 24,
++      PPE_SC_BYPASS_INGRESS_SOURCE_GUARD = 25,
++      PPE_SC_BYPASS_INGRESS_MRU_MTU_CHECK = 26,
++      PPE_SC_BYPASS_INGRESS_FLOW_SRC_CHECK = 27,
++      PPE_SC_BYPASS_INGRESS_FLOW_QOS = 28,
++      /* This must be last as it determines the size of the BITMAP. */
++      PPE_SC_BYPASS_INGRESS_SIZE,
++};
++
++/* Hardware bitmaps for bypassing features of the egress packet. */
++enum ppe_sc_egress_type {
++      PPE_SC_BYPASS_EGRESS_VLAN_MEMBER_CHECK = 0,
++      PPE_SC_BYPASS_EGRESS_VLAN_TRANSLATE = 1,
++      PPE_SC_BYPASS_EGRESS_VLAN_TAG_FMT_CTRL = 2,
++      PPE_SC_BYPASS_EGRESS_FDB_LEARN = 3,
++      PPE_SC_BYPASS_EGRESS_FDB_REFRESH = 4,
++      PPE_SC_BYPASS_EGRESS_L2_SOURCE_SECURITY = 5,
++      PPE_SC_BYPASS_EGRESS_MANAGEMENT_FWD = 6,
++      PPE_SC_BYPASS_EGRESS_BRIDGING_FWD = 7,
++      PPE_SC_BYPASS_EGRESS_IN_STP_FLTR = 8,
++      PPE_SC_BYPASS_EGRESS_EG_STP_FLTR = 9,
++      PPE_SC_BYPASS_EGRESS_SOURCE_FLTR = 10,
++      PPE_SC_BYPASS_EGRESS_POLICER = 11,
++      PPE_SC_BYPASS_EGRESS_L2_PKT_EDIT = 12,
++      PPE_SC_BYPASS_EGRESS_L3_PKT_EDIT = 13,
++      PPE_SC_BYPASS_EGRESS_ACL_POST_ROUTING_CHECK = 14,
++      PPE_SC_BYPASS_EGRESS_PORT_ISOLATION = 15,
++      PPE_SC_BYPASS_EGRESS_PRE_ACL_QOS = 16,
++      PPE_SC_BYPASS_EGRESS_POST_ACL_QOS = 17,
++      PPE_SC_BYPASS_EGRESS_DSCP_QOS = 18,
++      PPE_SC_BYPASS_EGRESS_PCP_QOS = 19,
++      PPE_SC_BYPASS_EGRESS_PREHEADER_QOS = 20,
++      PPE_SC_BYPASS_EGRESS_FAKE_MAC_DROP = 21,
++      PPE_SC_BYPASS_EGRESS_TUNL_CONTEXT = 22,
++      PPE_SC_BYPASS_EGRESS_FLOW_POLICER = 23,
++      /* This must be last as it determines the size of the BITMAP. */
++      PPE_SC_BYPASS_EGRESS_SIZE,
++};
++
++/* Hardware bitmaps for bypassing counter of packet. */
++enum ppe_sc_counter_type {
++      PPE_SC_BYPASS_COUNTER_RX_VLAN = 0,
++      PPE_SC_BYPASS_COUNTER_RX = 1,
++      PPE_SC_BYPASS_COUNTER_TX_VLAN = 2,
++      PPE_SC_BYPASS_COUNTER_TX = 3,
++      /* This must be last as it determines the size of the BITMAP. */
++      PPE_SC_BYPASS_COUNTER_SIZE,
++};
++
++/* Hardware bitmaps for bypassing features of tunnel packet. */
++enum ppe_sc_tunnel_type {
++      PPE_SC_BYPASS_TUNNEL_SERVICE_CODE = 0,
++      PPE_SC_BYPASS_TUNNEL_TUNNEL_HANDLE = 1,
++      PPE_SC_BYPASS_TUNNEL_L3_IF_CHECK = 2,
++      PPE_SC_BYPASS_TUNNEL_VLAN_CHECK = 3,
++      PPE_SC_BYPASS_TUNNEL_DMAC_CHECK = 4,
++      PPE_SC_BYPASS_TUNNEL_UDP_CSUM_0_CHECK = 5,
++      PPE_SC_BYPASS_TUNNEL_TBL_DE_ACCE_CHECK = 6,
++      PPE_SC_BYPASS_TUNNEL_PPPOE_MC_TERM_CHECK = 7,
++      PPE_SC_BYPASS_TUNNEL_TTL_EXCEED_CHECK = 8,
++      PPE_SC_BYPASS_TUNNEL_MAP_SRC_CHECK = 9,
++      PPE_SC_BYPASS_TUNNEL_MAP_DST_CHECK = 10,
++      PPE_SC_BYPASS_TUNNEL_LPM_DST_LOOKUP = 11,
++      PPE_SC_BYPASS_TUNNEL_LPM_LOOKUP = 12,
++      PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_L2 = 13,
++      PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_L3_IPV4 = 14,
++      PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_L3_IPV6 = 15,
++      PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_L4 = 16,
++      PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_TUNNEL = 17,
++      /* Values 18-19 are not specified by hardware. */
++      PPE_SC_BYPASS_TUNNEL_PRE_IPO = 20,
++      /* This must be last as it determines the size of the BITMAP. */
++      PPE_SC_BYPASS_TUNNEL_SIZE,
++};
++
++/**
++ * struct ppe_sc_bypss - PPE service bypass bitmaps
++ * @ingress: Bitmap of features that can be bypassed on the ingress packet.
++ * @egress: Bitmap of features that can be bypassed on the egress packet.
++ * @counter: Bitmap of features that can be bypassed on the counter type.
++ * @tunnel: Bitmap of features that can be bypassed on the tunnel packet.
++ */
++struct ppe_sc_bypass {
++      DECLARE_BITMAP(ingress, PPE_SC_BYPASS_INGRESS_SIZE);
++      DECLARE_BITMAP(egress, PPE_SC_BYPASS_EGRESS_SIZE);
++      DECLARE_BITMAP(counter, PPE_SC_BYPASS_COUNTER_SIZE);
++      DECLARE_BITMAP(tunnel, PPE_SC_BYPASS_TUNNEL_SIZE);
++};
++
++/**
++ * struct ppe_servcode_cfg - PPE service code configuration.
++ * @dest_port_valid: Generate destination port or not.
++ * @dest_port: Destination port ID.
++ * @bitmaps: Bitmap of bypass features.
++ * @is_src: Destination port acts as source port, packet sent to CPU.
++ * @field_update_bitmap: Fields updated to the EDMA preheader.
++ * @next_service_code: New service code.
++ * @hw_service: Hardware functions selected.
++ * @offset_sel: Packet offset selection.
++ *
++ * Service code is generated during the packet passing through PPE.
++ */
++struct ppe_servcode_cfg {
++      bool dest_port_valid;
++      int dest_port;
++      struct ppe_sc_bypass bitmaps;
++      bool is_src;
++      int field_update_bitmap;
++      int next_service_code;
++      int hw_service;
++      int offset_sel;
++};
++
+ int ppe_hw_config(struct ppe_device *ppe_dev);
+ int ppe_queue_scheduler_set(struct ppe_device *ppe_dev,
+                           int node_id, bool flow_level, int port,
+@@ -74,4 +213,7 @@ int ppe_queue_ucast_hash_class_set(struct ppe_device *ppe_dev,
+                                  int class_offset);
+ int ppe_port_resource_get(struct ppe_device *ppe_dev, int port, int type,
+                         int *res_start, int *res_end);
++int ppe_servcode_config_set(struct ppe_device *ppe_dev,
++                          int servcode,
++                          struct ppe_servcode_cfg cfg);
+ #endif
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+index 1f6828237f94..3122743af98d 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+@@ -27,9 +27,57 @@
+ #define PPE_BM_SCH_CFG_TBL_SECOND_PORT_VALID  BIT(6)
+ #define PPE_BM_SCH_CFG_TBL_SECOND_PORT                GENMASK(11, 8)
++/* PPE service code configuration on the ingress direction. */
++#define PPE_SERVICE_TBL_ADDR                  0x15000
++#define PPE_SERVICE_TBL_NUM                   256
++#define PPE_SERVICE_TBL_INC                   0x10
++#define PPE_SERVICE_W0_BYPASS_BITMAP          GENMASK(31, 0)
++#define PPE_SERVICE_W1_RX_CNT_EN              BIT(0)
++
++#define PPE_SERVICE_SET_BYPASS_BITMAP(tbl_cfg, value) \
++      u32p_replace_bits((u32 *)tbl_cfg, value, PPE_SERVICE_W0_BYPASS_BITMAP)
++#define PPE_SERVICE_SET_RX_CNT_EN(tbl_cfg, value)     \
++      u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_SERVICE_W1_RX_CNT_EN)
++
+ #define PPE_EG_BRIDGE_CONFIG_ADDR             0x20044
+ #define PPE_EG_BRIDGE_CONFIG_QUEUE_CNT_EN     BIT(2)
++/* PPE service code configuration on the egress direction. */
++#define PPE_EG_SERVICE_TBL_ADDR                       0x43000
++#define PPE_EG_SERVICE_TBL_NUM                        256
++#define PPE_EG_SERVICE_TBL_INC                        0x10
++#define PPE_EG_SERVICE_W0_UPDATE_ACTION               GENMASK(31, 0)
++#define PPE_EG_SERVICE_W1_NEXT_SERVCODE               GENMASK(7, 0)
++#define PPE_EG_SERVICE_W1_HW_SERVICE          GENMASK(13, 8)
++#define PPE_EG_SERVICE_W1_OFFSET_SEL          BIT(14)
++#define PPE_EG_SERVICE_W1_TX_CNT_EN           BIT(15)
++
++#define PPE_EG_SERVICE_SET_UPDATE_ACTION(tbl_cfg, value)      \
++      u32p_replace_bits((u32 *)tbl_cfg, value, PPE_EG_SERVICE_W0_UPDATE_ACTION)
++#define PPE_EG_SERVICE_SET_NEXT_SERVCODE(tbl_cfg, value)      \
++      u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_EG_SERVICE_W1_NEXT_SERVCODE)
++#define PPE_EG_SERVICE_SET_HW_SERVICE(tbl_cfg, value) \
++      u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_EG_SERVICE_W1_HW_SERVICE)
++#define PPE_EG_SERVICE_SET_OFFSET_SEL(tbl_cfg, value) \
++      u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_EG_SERVICE_W1_OFFSET_SEL)
++#define PPE_EG_SERVICE_SET_TX_CNT_EN(tbl_cfg, value)  \
++      u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_EG_SERVICE_W1_TX_CNT_EN)
++
++#define PPE_IN_L2_SERVICE_TBL_ADDR            0x66000
++#define PPE_IN_L2_SERVICE_TBL_NUM             256
++#define PPE_IN_L2_SERVICE_TBL_INC             0x10
++#define PPE_IN_L2_SERVICE_TBL_DST_PORT_ID_VALID       BIT(0)
++#define PPE_IN_L2_SERVICE_TBL_DST_PORT_ID     GENMASK(4, 1)
++#define PPE_IN_L2_SERVICE_TBL_DST_DIRECTION   BIT(5)
++#define PPE_IN_L2_SERVICE_TBL_DST_BYPASS_BITMAP       GENMASK(29, 6)
++#define PPE_IN_L2_SERVICE_TBL_RX_CNT_EN               BIT(30)
++#define PPE_IN_L2_SERVICE_TBL_TX_CNT_EN               BIT(31)
++
++#define PPE_TL_SERVICE_TBL_ADDR                       0x306000
++#define PPE_TL_SERVICE_TBL_NUM                        256
++#define PPE_TL_SERVICE_TBL_INC                        4
++#define PPE_TL_SERVICE_TBL_BYPASS_BITMAP      GENMASK(31, 0)
++
+ #define PPE_PSCH_SCH_DEPTH_CFG_ADDR           0x400000
+ #define PPE_PSCH_SCH_DEPTH_CFG_NUM            1
+ #define PPE_PSCH_SCH_DEPTH_CFG_INC            4
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-24-net-ethernet-qualcomm-Add-PPE-port-control-config.patch b/target/linux/qualcommbe/patches-6.6/103-24-net-ethernet-qualcomm-Add-PPE-port-control-config.patch
new file mode 100644 (file)
index 0000000..a55b09b
--- /dev/null
@@ -0,0 +1,238 @@
+From 61881bae3ad9d961139e970f1aae75070cd45b5c Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Wed, 27 Dec 2023 14:11:40 +0800
+Subject: [PATCH 24/50] net: ethernet: qualcomm: Add PPE port control config
+
+1. Initialize and setup the physical port.
+2. Configure the default action as drop when the packet size
+   is more than the configured MTU of physical port.
+
+Change-Id: Id98aea7b17556f85021905978b3403ca6d427557
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ .../net/ethernet/qualcomm/ppe/ppe_config.c    | 91 ++++++++++++++++++-
+ .../net/ethernet/qualcomm/ppe/ppe_config.h    | 11 +++
+ drivers/net/ethernet/qualcomm/ppe/ppe_regs.h  | 50 ++++++++++
+ 3 files changed, 151 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
+index a8e7a536a6e0..18296a449d4e 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
+@@ -1238,6 +1238,50 @@ int ppe_servcode_config_set(struct ppe_device *ppe_dev, int servcode,
+       return regmap_write(ppe_dev->regmap, reg, val);
+ }
++/**
++ * ppe_counter_set - Set PPE port counter enabled or not
++ * @ppe_dev: PPE device
++ * @port: PPE port ID
++ * @enable: Counter status
++ *
++ * PPE port counter is optionally configured as enabled or not.
++ *
++ * Return 0 on success, negative error code on failure.
++ */
++int ppe_counter_set(struct ppe_device *ppe_dev, int port, bool enable)
++{
++      u32 reg, val, mru_mtu_val[3];
++      int ret;
++
++      reg = PPE_MRU_MTU_CTRL_TBL_ADDR + PPE_MRU_MTU_CTRL_TBL_INC * port;
++      ret = regmap_bulk_read(ppe_dev->regmap, reg,
++                             mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
++      if (ret)
++              return ret;
++
++      PPE_MRU_MTU_CTRL_SET_RX_CNT_EN(mru_mtu_val, enable);
++      PPE_MRU_MTU_CTRL_SET_TX_CNT_EN(mru_mtu_val, enable);
++      ret = regmap_bulk_write(ppe_dev->regmap, reg,
++                              mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
++      if (ret)
++              return ret;
++
++      reg = PPE_MC_MTU_CTRL_TBL_ADDR + PPE_MC_MTU_CTRL_TBL_INC * port;
++      val = FIELD_PREP(PPE_MC_MTU_CTRL_TBL_TX_CNT_EN, enable);
++      ret = regmap_update_bits(ppe_dev->regmap, reg,
++                               PPE_MC_MTU_CTRL_TBL_TX_CNT_EN,
++                               val);
++      if (ret)
++              return ret;
++
++      reg = PPE_PORT_EG_VLAN_ADDR + PPE_PORT_EG_VLAN_INC * port;
++      val = FIELD_PREP(PPE_PORT_EG_VLAN_TX_COUNTING_EN, enable);
++
++      return regmap_update_bits(ppe_dev->regmap, reg,
++                                PPE_PORT_EG_VLAN_TX_COUNTING_EN,
++                                val);
++}
++
+ static int ppe_config_bm_threshold(struct ppe_device *ppe_dev, int bm_port_id,
+                                  struct ppe_bm_port_config port_cfg)
+ {
+@@ -1659,6 +1703,47 @@ static int ppe_servcode_init(struct ppe_device *ppe_dev)
+       return ppe_servcode_config_set(ppe_dev, PPE_EDMA_SC_BYPASS_ID, servcode_cfg);
+ }
++/* Initialize PPE port configurations. */
++static int ppe_port_ctrl_init(struct ppe_device *ppe_dev)
++{
++      u32 reg, val, mru_mtu_val[3];
++      int i, ret;
++
++      for (i = 1; i < ppe_dev->num_ports; i++) {
++              /* Enable PPE port counter */
++              ret = ppe_counter_set(ppe_dev, i, true);
++              if (ret)
++                      return ret;
++
++              reg = PPE_MRU_MTU_CTRL_TBL_ADDR + PPE_MRU_MTU_CTRL_TBL_INC * i;
++              ret = regmap_bulk_read(ppe_dev->regmap, reg,
++                                     mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
++              if (ret)
++                      return ret;
++
++              /* Drop the packet when the packet size is more than
++               * the MTU or MRU of the physical PPE port.
++               */
++              PPE_MRU_MTU_CTRL_SET_MRU_CMD(mru_mtu_val, PPE_ACTION_DROP);
++              PPE_MRU_MTU_CTRL_SET_MTU_CMD(mru_mtu_val, PPE_ACTION_DROP);
++              ret = regmap_bulk_write(ppe_dev->regmap, reg,
++                                      mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
++              if (ret)
++                      return ret;
++
++              reg = PPE_MC_MTU_CTRL_TBL_ADDR + PPE_MC_MTU_CTRL_TBL_INC * i;
++              val = FIELD_PREP(PPE_MC_MTU_CTRL_TBL_MTU_CMD, PPE_ACTION_DROP);
++              ret = regmap_update_bits(ppe_dev->regmap, reg,
++                                       PPE_MC_MTU_CTRL_TBL_MTU_CMD,
++                                       val);
++              if (ret)
++                      return ret;
++      }
++
++      /* Enable CPU port counter. */
++      return ppe_counter_set(ppe_dev, 0, true);
++}
++
+ /* Initialize PPE device to handle traffic correctly. */
+ static int ppe_dev_hw_init(struct ppe_device *ppe_dev)
+ {
+@@ -1668,7 +1753,11 @@ static int ppe_dev_hw_init(struct ppe_device *ppe_dev)
+       if (ret)
+               return ret;
+-      return ppe_servcode_init(ppe_dev);
++      ret = ppe_servcode_init(ppe_dev);
++      if (ret)
++              return ret;
++
++      return ppe_port_ctrl_init(ppe_dev);
+ }
+ int ppe_hw_config(struct ppe_device *ppe_dev)
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_config.h b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
+index dcb557ed843c..7f5d92c39dd3 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
+@@ -192,6 +192,16 @@ struct ppe_servcode_cfg {
+       int offset_sel;
+ };
++/* The action of packet received by PPE can be forwarded, dropped, copied
++ * to CPU (enter multicast queue), redirected to CPU (enter unicast queue).
++ */
++enum ppe_action_type {
++      PPE_ACTION_FORWARD = 0,
++      PPE_ACTION_DROP = 1,
++      PPE_ACTION_COPY_TO_CPU = 2,
++      PPE_ACTION_REDIRECT_TO_CPU = 3,
++};
++
+ int ppe_hw_config(struct ppe_device *ppe_dev);
+ int ppe_queue_scheduler_set(struct ppe_device *ppe_dev,
+                           int node_id, bool flow_level, int port,
+@@ -216,4 +226,5 @@ int ppe_port_resource_get(struct ppe_device *ppe_dev, int port, int type,
+ int ppe_servcode_config_set(struct ppe_device *ppe_dev,
+                           int servcode,
+                           struct ppe_servcode_cfg cfg);
++int ppe_counter_set(struct ppe_device *ppe_dev, int port, bool enable);
+ #endif
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+index 3122743af98d..e981a1c0e670 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+@@ -18,6 +18,11 @@
+ #define PPE_BM_SCH_CTRL_SCH_OFFSET            GENMASK(14, 8)
+ #define PPE_BM_SCH_CTRL_SCH_EN                        BIT(31)
++#define PPE_RX_FIFO_CFG_ADDR                  0xb004
++#define PPE_RX_FIFO_CFG_NUM                   8
++#define PPE_RX_FIFO_CFG_INC                   4
++#define PPE_RX_FIFO_CFG_THRSH                 GENMASK(2, 0)
++
+ #define PPE_BM_SCH_CFG_TBL_ADDR                       0xc000
+ #define PPE_BM_SCH_CFG_TBL_NUM                        128
+ #define PPE_BM_SCH_CFG_TBL_INC                        0x10
+@@ -39,6 +44,17 @@
+ #define PPE_SERVICE_SET_RX_CNT_EN(tbl_cfg, value)     \
+       u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_SERVICE_W1_RX_CNT_EN)
++#define PPE_PORT_EG_VLAN_ADDR                 0x20020
++#define PPE_PORT_EG_VLAN_NUM                  8
++#define PPE_PORT_EG_VLAN_INC                  4
++#define PPE_PORT_EG_VLAN_VLAN_TYPE            BIT(0)
++#define PPE_PORT_EG_VLAN_CTAG_MODE            GENMASK(2, 1)
++#define PPE_PORT_EG_VLAN_STAG_MODE            GENMASK(4, 3)
++#define PPE_PORT_EG_VLAN_VSI_TAG_MODE_EN      BIT(5)
++#define PPE_PORT_EG_VLAN_PCP_PROP_CMD         BIT(6)
++#define PPE_PORT_EG_VLAN_DEI_PROP_CMD         BIT(7)
++#define PPE_PORT_EG_VLAN_TX_COUNTING_EN               BIT(8)
++
+ #define PPE_EG_BRIDGE_CONFIG_ADDR             0x20044
+ #define PPE_EG_BRIDGE_CONFIG_QUEUE_CNT_EN     BIT(2)
+@@ -63,6 +79,40 @@
+ #define PPE_EG_SERVICE_SET_TX_CNT_EN(tbl_cfg, value)  \
+       u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_EG_SERVICE_W1_TX_CNT_EN)
++#define PPE_MC_MTU_CTRL_TBL_ADDR              0x60a00
++#define PPE_MC_MTU_CTRL_TBL_NUM                       8
++#define PPE_MC_MTU_CTRL_TBL_INC                       4
++#define PPE_MC_MTU_CTRL_TBL_MTU                       GENMASK(13, 0)
++#define PPE_MC_MTU_CTRL_TBL_MTU_CMD           GENMASK(15, 14)
++#define PPE_MC_MTU_CTRL_TBL_TX_CNT_EN         BIT(16)
++
++/* PPE port control configuration, the MTU and MRU configs. */
++#define PPE_MRU_MTU_CTRL_TBL_ADDR             0x65000
++#define PPE_MRU_MTU_CTRL_TBL_NUM              256
++#define PPE_MRU_MTU_CTRL_TBL_INC              0x10
++#define PPE_MRU_MTU_CTRL_W0_MRU                       GENMASK(13, 0)
++#define PPE_MRU_MTU_CTRL_W0_MRU_CMD           GENMASK(15, 14)
++#define PPE_MRU_MTU_CTRL_W0_MTU                       GENMASK(29, 16)
++#define PPE_MRU_MTU_CTRL_W0_MTU_CMD           GENMASK(31, 30)
++#define PPE_MRU_MTU_CTRL_W1_RX_CNT_EN         BIT(0)
++#define PPE_MRU_MTU_CTRL_W1_TX_CNT_EN         BIT(1)
++#define PPE_MRU_MTU_CTRL_W1_SRC_PROFILE               GENMASK(3, 2)
++#define PPE_MRU_MTU_CTRL_W1_INNER_PREC_LOW    BIT(31)
++#define PPE_MRU_MTU_CTRL_W2_INNER_PREC_HIGH   GENMASK(1, 0)
++
++#define PPE_MRU_MTU_CTRL_SET_MRU(tbl_cfg, value)      \
++      u32p_replace_bits((u32 *)tbl_cfg, value, PPE_MRU_MTU_CTRL_W0_MRU)
++#define PPE_MRU_MTU_CTRL_SET_MRU_CMD(tbl_cfg, value)  \
++      u32p_replace_bits((u32 *)tbl_cfg, value, PPE_MRU_MTU_CTRL_W0_MRU_CMD)
++#define PPE_MRU_MTU_CTRL_SET_MTU(tbl_cfg, value)      \
++      u32p_replace_bits((u32 *)tbl_cfg, value, PPE_MRU_MTU_CTRL_W0_MTU)
++#define PPE_MRU_MTU_CTRL_SET_MTU_CMD(tbl_cfg, value)  \
++      u32p_replace_bits((u32 *)tbl_cfg, value, PPE_MRU_MTU_CTRL_W0_MTU_CMD)
++#define PPE_MRU_MTU_CTRL_SET_RX_CNT_EN(tbl_cfg, value)        \
++      u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_MRU_MTU_CTRL_W1_RX_CNT_EN)
++#define PPE_MRU_MTU_CTRL_SET_TX_CNT_EN(tbl_cfg, value)        \
++      u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_MRU_MTU_CTRL_W1_TX_CNT_EN)
++
+ #define PPE_IN_L2_SERVICE_TBL_ADDR            0x66000
+ #define PPE_IN_L2_SERVICE_TBL_NUM             256
+ #define PPE_IN_L2_SERVICE_TBL_INC             0x10
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-25-net-ethernet-qualcomm-Add-PPE-RSS-hash-config.patch b/target/linux/qualcommbe/patches-6.6/103-25-net-ethernet-qualcomm-Add-PPE-RSS-hash-config.patch
new file mode 100644 (file)
index 0000000..6ee4e34
--- /dev/null
@@ -0,0 +1,351 @@
+From b052daae2f22a7a7fcfe981598444c3f5fb370b4 Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Wed, 27 Dec 2023 14:52:13 +0800
+Subject: [PATCH 25/50] net: ethernet: qualcomm: Add PPE RSS hash config
+
+PPE RSS hash is generated by the configured seed based on the
+packet content, which is used to select queue and can also be
+passed to EDMA RX descriptor.
+
+Change-Id: If02cb25aa81a3afb0f3d68b2a5a354bd6cee28b8
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ .../net/ethernet/qualcomm/ppe/ppe_config.c    | 182 +++++++++++++++++-
+ .../net/ethernet/qualcomm/ppe/ppe_config.h    |  36 ++++
+ drivers/net/ethernet/qualcomm/ppe/ppe_regs.h  |  47 +++++
+ 3 files changed, 263 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
+index 18296a449d4e..4363ea3cfb90 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
+@@ -1282,6 +1282,143 @@ int ppe_counter_set(struct ppe_device *ppe_dev, int port, bool enable)
+                                 val);
+ }
++static int ppe_rss_hash_ipv4_config(struct ppe_device *ppe_dev, int index,
++                                  struct ppe_rss_hash_cfg cfg)
++{
++      u32 reg, val;
++
++      switch (index) {
++      case 0:
++              val = FIELD_PREP(PPE_RSS_HASH_MIX_IPV4_VAL, cfg.hash_sip_mix[0]);
++              break;
++      case 1:
++              val = FIELD_PREP(PPE_RSS_HASH_MIX_IPV4_VAL, cfg.hash_dip_mix[0]);
++              break;
++      case 2:
++              val = FIELD_PREP(PPE_RSS_HASH_MIX_IPV4_VAL, cfg.hash_protocol_mix);
++              break;
++      case 3:
++              val = FIELD_PREP(PPE_RSS_HASH_MIX_IPV4_VAL, cfg.hash_dport_mix);
++              break;
++      case 4:
++              val = FIELD_PREP(PPE_RSS_HASH_MIX_IPV4_VAL, cfg.hash_sport_mix);
++              break;
++      default:
++              return -EINVAL;
++      }
++
++      reg = PPE_RSS_HASH_MIX_IPV4_ADDR + index * PPE_RSS_HASH_MIX_IPV4_INC;
++
++      return regmap_write(ppe_dev->regmap, reg, val);
++}
++
++static int ppe_rss_hash_ipv6_config(struct ppe_device *ppe_dev, int index,
++                                  struct ppe_rss_hash_cfg cfg)
++{
++      u32 reg, val;
++
++      switch (index) {
++      case 0 ... 3:
++              val = FIELD_PREP(PPE_RSS_HASH_MIX_VAL, cfg.hash_sip_mix[index]);
++              break;
++      case 4 ... 7:
++              val = FIELD_PREP(PPE_RSS_HASH_MIX_VAL, cfg.hash_dip_mix[index - 4]);
++              break;
++      case 8:
++              val = FIELD_PREP(PPE_RSS_HASH_MIX_VAL, cfg.hash_protocol_mix);
++              break;
++      case 9:
++              val = FIELD_PREP(PPE_RSS_HASH_MIX_VAL, cfg.hash_dport_mix);
++              break;
++      case 10:
++              val = FIELD_PREP(PPE_RSS_HASH_MIX_VAL, cfg.hash_sport_mix);
++              break;
++      default:
++              return -EINVAL;
++      }
++
++      reg = PPE_RSS_HASH_MIX_ADDR + index * PPE_RSS_HASH_MIX_INC;
++
++      return regmap_write(ppe_dev->regmap, reg, val);
++}
++
++/**
++ * ppe_rss_hash_config_set - Set PPE RSS hash seed
++ * @ppe_dev: PPE device
++ * @mode: Packet format mode
++ * @hash_cfg: RSS hash configuration
++ *
++ * PPE RSS hash seed is configured based on the packet format.
++ *
++ * Return 0 on success, negative error code on failure.
++ */
++int ppe_rss_hash_config_set(struct ppe_device *ppe_dev, int mode,
++                          struct ppe_rss_hash_cfg cfg)
++{
++      u32 val, reg;
++      int i, ret;
++
++      if (mode & PPE_RSS_HASH_MODE_IPV4) {
++              val = FIELD_PREP(PPE_RSS_HASH_MASK_IPV4_HASH_MASK, cfg.hash_mask);
++              val |= FIELD_PREP(PPE_RSS_HASH_MASK_IPV4_FRAGMENT, cfg.hash_fragment_mode);
++              ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_MASK_IPV4_ADDR, val);
++              if (ret)
++                      return ret;
++
++              val = FIELD_PREP(PPE_RSS_HASH_SEED_IPV4_VAL, cfg.hash_seed);
++              ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_SEED_IPV4_ADDR, val);
++              if (ret)
++                      return ret;
++
++              for (i = 0; i < PPE_RSS_HASH_MIX_IPV4_NUM; i++) {
++                      ret = ppe_rss_hash_ipv4_config(ppe_dev, i, cfg);
++                      if (ret)
++                              return ret;
++              }
++
++              for (i = 0; i < PPE_RSS_HASH_FIN_IPV4_NUM; i++) {
++                      val = FIELD_PREP(PPE_RSS_HASH_FIN_IPV4_INNER, cfg.hash_fin_inner[i]);
++                      val |= FIELD_PREP(PPE_RSS_HASH_FIN_IPV4_OUTER, cfg.hash_fin_outer[i]);
++                      reg = PPE_RSS_HASH_FIN_IPV4_ADDR + i * PPE_RSS_HASH_FIN_IPV4_INC;
++
++                      ret = regmap_write(ppe_dev->regmap, reg, val);
++                      if (ret)
++                              return ret;
++              }
++      }
++
++      if (mode & PPE_RSS_HASH_MODE_IPV6) {
++              val = FIELD_PREP(PPE_RSS_HASH_MASK_HASH_MASK, cfg.hash_mask);
++              val |= FIELD_PREP(PPE_RSS_HASH_MASK_FRAGMENT, cfg.hash_fragment_mode);
++              ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_MASK_ADDR, val);
++              if (ret)
++                      return ret;
++
++              val = FIELD_PREP(PPE_RSS_HASH_SEED_VAL, cfg.hash_seed);
++              ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_SEED_ADDR, val);
++              if (ret)
++                      return ret;
++
++              for (i = 0; i < PPE_RSS_HASH_MIX_NUM; i++) {
++                      ret = ppe_rss_hash_ipv6_config(ppe_dev, i, cfg);
++                      if (ret)
++                              return ret;
++              }
++
++              for (i = 0; i < PPE_RSS_HASH_FIN_NUM; i++) {
++                      val = FIELD_PREP(PPE_RSS_HASH_FIN_INNER, cfg.hash_fin_inner[i]);
++                      val |= FIELD_PREP(PPE_RSS_HASH_FIN_OUTER, cfg.hash_fin_outer[i]);
++                      reg = PPE_RSS_HASH_FIN_ADDR + i * PPE_RSS_HASH_FIN_INC;
++
++                      ret = regmap_write(ppe_dev->regmap, reg, val);
++                      if (ret)
++                              return ret;
++              }
++      }
++
++      return 0;
++}
++
+ static int ppe_config_bm_threshold(struct ppe_device *ppe_dev, int bm_port_id,
+                                  struct ppe_bm_port_config port_cfg)
+ {
+@@ -1324,7 +1461,7 @@ static int ppe_config_bm_threshold(struct ppe_device *ppe_dev, int bm_port_id,
+       return regmap_update_bits(ppe_dev->regmap, reg,
+                                 PPE_BM_PORT_FC_MODE_EN,
+                                 val);
+-}
++};
+ /* Configure the buffer threshold for the port flow control function. */
+ static int ppe_config_bm(struct ppe_device *ppe_dev)
+@@ -1744,6 +1881,43 @@ static int ppe_port_ctrl_init(struct ppe_device *ppe_dev)
+       return ppe_counter_set(ppe_dev, 0, true);
+ }
++/* Initialize PPE RSS hash configuration, the RSS hash configs decides the
++ * random hash value generated, which is used to generate the queue offset.
++ */
++static int ppe_rss_hash_init(struct ppe_device *ppe_dev)
++{
++      u16 fins[PPE_RSS_HASH_TUPLES] = { 0x205, 0x264, 0x227, 0x245, 0x201 };
++      u8 ips[PPE_RSS_HASH_IP_LENGTH] = { 0x13, 0xb, 0x13, 0xb };
++      struct ppe_rss_hash_cfg hash_cfg;
++      int i, ret;
++
++      hash_cfg.hash_seed = get_random_u32();
++      hash_cfg.hash_mask = 0xfff;
++      hash_cfg.hash_fragment_mode = false;
++
++      for (i = 0; i < ARRAY_SIZE(fins); i++) {
++              hash_cfg.hash_fin_inner[i] = fins[i] & 0x1f;
++              hash_cfg.hash_fin_outer[i] = fins[i] >> 5;
++      }
++
++      hash_cfg.hash_protocol_mix = 0x13;
++      hash_cfg.hash_dport_mix = 0xb;
++      hash_cfg.hash_sport_mix = 0x13;
++      hash_cfg.hash_sip_mix[0] = 0x13;
++      hash_cfg.hash_dip_mix[0] = 0xb;
++
++      ret = ppe_rss_hash_config_set(ppe_dev, PPE_RSS_HASH_MODE_IPV4, hash_cfg);
++      if (ret)
++              return ret;
++
++      for (i = 0; i < ARRAY_SIZE(ips); i++) {
++              hash_cfg.hash_sip_mix[i] = ips[i];
++              hash_cfg.hash_dip_mix[i] = ips[i];
++      }
++
++      return ppe_rss_hash_config_set(ppe_dev, PPE_RSS_HASH_MODE_IPV6, hash_cfg);
++}
++
+ /* Initialize PPE device to handle traffic correctly. */
+ static int ppe_dev_hw_init(struct ppe_device *ppe_dev)
+ {
+@@ -1757,7 +1931,11 @@ static int ppe_dev_hw_init(struct ppe_device *ppe_dev)
+       if (ret)
+               return ret;
+-      return ppe_port_ctrl_init(ppe_dev);
++      ret = ppe_port_ctrl_init(ppe_dev);
++      if (ret)
++              return ret;
++
++      return ppe_rss_hash_init(ppe_dev);
+ }
+ int ppe_hw_config(struct ppe_device *ppe_dev)
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_config.h b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
+index 7f5d92c39dd3..6dd91bc45908 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
+@@ -15,6 +15,11 @@
+ #define PPE_QUEUE_BASE_CPU_CODE                       1024
+ #define PPE_QUEUE_BASE_SERVICE_CODE           2048
++#define PPE_RSS_HASH_MODE_IPV4                        BIT(0)
++#define PPE_RSS_HASH_MODE_IPV6                        BIT(1)
++#define PPE_RSS_HASH_IP_LENGTH                        4
++#define PPE_RSS_HASH_TUPLES                   5
++
+ /**
+  * struct ppe_qos_scheduler_cfg - PPE QoS scheduler configuration.
+  * @flow_id: PPE flow ID.
+@@ -202,6 +207,35 @@ enum ppe_action_type {
+       PPE_ACTION_REDIRECT_TO_CPU = 3,
+ };
++/**
++ * struct ppe_rss_hash_cfg - PPE RSS hash configuration.
++ * @hash_mask: Mask of the generated hash value.
++ * @hash_fragment_mode: Mode of the fragment packet for 3 tuples.
++ * @hash_seed: Seed to generate RSS hash.
++ * @hash_sip_mix: Source IP selection.
++ * @hash_dip_mix: Destination IP selection.
++ * @hash_protocol_mix: Protocol selection.
++ * @hash_sport_mix: Source L4 port selection.
++ * @hash_sport_mix: Destination L4 port selection.
++ * @hash_fin_inner: RSS hash value first selection.
++ * @hash_fin_outer: RSS hash value second selection.
++ *
++ * PPE RSS hash value is generated based on the RSS hash configuration
++ * with the received packet.
++ */
++struct ppe_rss_hash_cfg {
++      u32 hash_mask;
++      bool hash_fragment_mode;
++      u32 hash_seed;
++      u8 hash_sip_mix[PPE_RSS_HASH_IP_LENGTH];
++      u8 hash_dip_mix[PPE_RSS_HASH_IP_LENGTH];
++      u8 hash_protocol_mix;
++      u8 hash_sport_mix;
++      u8 hash_dport_mix;
++      u8 hash_fin_inner[PPE_RSS_HASH_TUPLES];
++      u8 hash_fin_outer[PPE_RSS_HASH_TUPLES];
++};
++
+ int ppe_hw_config(struct ppe_device *ppe_dev);
+ int ppe_queue_scheduler_set(struct ppe_device *ppe_dev,
+                           int node_id, bool flow_level, int port,
+@@ -227,4 +261,6 @@ int ppe_servcode_config_set(struct ppe_device *ppe_dev,
+                           int servcode,
+                           struct ppe_servcode_cfg cfg);
+ int ppe_counter_set(struct ppe_device *ppe_dev, int port, bool enable);
++int ppe_rss_hash_config_set(struct ppe_device *ppe_dev, int mode,
++                          struct ppe_rss_hash_cfg hash_cfg);
+ #endif
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+index e981a1c0e670..29001a2599d8 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+@@ -23,6 +23,53 @@
+ #define PPE_RX_FIFO_CFG_INC                   4
+ #define PPE_RX_FIFO_CFG_THRSH                 GENMASK(2, 0)
++/* RSS configs contributes to the random RSS hash value generated, which
++ * is used to configure the queue offset.
++ */
++#define PPE_RSS_HASH_MASK_ADDR                        0xb4318
++#define PPE_RSS_HASH_MASK_NUM                 1
++#define PPE_RSS_HASH_MASK_INC                 4
++#define PPE_RSS_HASH_MASK_HASH_MASK           GENMASK(20, 0)
++#define PPE_RSS_HASH_MASK_FRAGMENT            BIT(28)
++
++#define PPE_RSS_HASH_SEED_ADDR                        0xb431c
++#define PPE_RSS_HASH_SEED_NUM                 1
++#define PPE_RSS_HASH_SEED_INC                 4
++#define PPE_RSS_HASH_SEED_VAL                 GENMASK(31, 0)
++
++#define PPE_RSS_HASH_MIX_ADDR                 0xb4320
++#define PPE_RSS_HASH_MIX_NUM                  11
++#define PPE_RSS_HASH_MIX_INC                  4
++#define PPE_RSS_HASH_MIX_VAL                  GENMASK(4, 0)
++
++#define PPE_RSS_HASH_FIN_ADDR                 0xb4350
++#define PPE_RSS_HASH_FIN_NUM                  5
++#define PPE_RSS_HASH_FIN_INC                  4
++#define PPE_RSS_HASH_FIN_INNER                        GENMASK(4, 0)
++#define PPE_RSS_HASH_FIN_OUTER                        GENMASK(9, 5)
++
++#define PPE_RSS_HASH_MASK_IPV4_ADDR           0xb4380
++#define PPE_RSS_HASH_MASK_IPV4_NUM            1
++#define PPE_RSS_HASH_MASK_IPV4_INC            4
++#define PPE_RSS_HASH_MASK_IPV4_HASH_MASK      GENMASK(20, 0)
++#define PPE_RSS_HASH_MASK_IPV4_FRAGMENT               BIT(28)
++
++#define PPE_RSS_HASH_SEED_IPV4_ADDR           0xb4384
++#define PPE_RSS_HASH_SEED_IPV4_NUM            1
++#define PPE_RSS_HASH_SEED_IPV4_INC            4
++#define PPE_RSS_HASH_SEED_IPV4_VAL            GENMASK(31, 0)
++
++#define PPE_RSS_HASH_MIX_IPV4_ADDR            0xb4390
++#define PPE_RSS_HASH_MIX_IPV4_NUM             5
++#define PPE_RSS_HASH_MIX_IPV4_INC             4
++#define PPE_RSS_HASH_MIX_IPV4_VAL             GENMASK(4, 0)
++
++#define PPE_RSS_HASH_FIN_IPV4_ADDR            0xb43b0
++#define PPE_RSS_HASH_FIN_IPV4_NUM             5
++#define PPE_RSS_HASH_FIN_IPV4_INC             4
++#define PPE_RSS_HASH_FIN_IPV4_INNER           GENMASK(4, 0)
++#define PPE_RSS_HASH_FIN_IPV4_OUTER           GENMASK(9, 5)
++
+ #define PPE_BM_SCH_CFG_TBL_ADDR                       0xc000
+ #define PPE_BM_SCH_CFG_TBL_NUM                        128
+ #define PPE_BM_SCH_CFG_TBL_INC                        0x10
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-26-net-ethernet-qualcomm-Add-PPE-queue-map-function.patch b/target/linux/qualcommbe/patches-6.6/103-26-net-ethernet-qualcomm-Add-PPE-queue-map-function.patch
new file mode 100644 (file)
index 0000000..c390be4
--- /dev/null
@@ -0,0 +1,172 @@
+From 809513a92e3aef6ae852b35e118408059929d6d3 Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Wed, 27 Dec 2023 15:44:37 +0800
+Subject: [PATCH 26/50] net: ethernet: qualcomm: Add PPE queue map function
+
+Configure the queues of CPU port mapped with the EDMA ring.
+
+All queues of CPU port are mappled to the EDMA ring 0 by default,
+which can be updated by EDMA driver.
+
+Change-Id: I87ab4117af86e4b3fe7a4b41490ba8ac71ce29ef
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ drivers/net/ethernet/qualcomm/ppe/ppe_api.c   | 23 ++++++++++
+ drivers/net/ethernet/qualcomm/ppe/ppe_api.h   |  2 +
+ .../net/ethernet/qualcomm/ppe/ppe_config.c    | 45 ++++++++++++++++++-
+ .../net/ethernet/qualcomm/ppe/ppe_config.h    |  5 +++
+ drivers/net/ethernet/qualcomm/ppe/ppe_regs.h  |  5 +++
+ 5 files changed, 79 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_api.c b/drivers/net/ethernet/qualcomm/ppe/ppe_api.c
+index 72d416e0ca44..6199c7025f66 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_api.c
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_api.c
+@@ -82,3 +82,26 @@ int ppe_edma_queue_resource_get(struct ppe_device *ppe_dev, int type,
+       return ppe_port_resource_get(ppe_dev, 0, type, res_start, res_end);
+ };
++
++/**
++ * ppe_edma_ring_to_queues_config - Map EDMA ring to PPE queues
++ * @ppe_dev: PPE device
++ * @ring_id: EDMA ring ID
++ * @num: Number of queues mapped to EDMA ring
++ * @queues: PPE queue IDs
++ *
++ * PPE queues are configured to map with the special EDMA ring ID.
++ *
++ * Return 0 on success, negative error code on failure.
++ */
++int ppe_edma_ring_to_queues_config(struct ppe_device *ppe_dev, int ring_id,
++                                 int num, int queues[] __counted_by(num))
++{
++      u32 queue_bmap[PPE_RING_MAPPED_BP_QUEUE_WORD_COUNT] = {};
++      int index;
++
++      for (index = 0; index < num; index++)
++              queue_bmap[queues[index] / 32] |= BIT_MASK(queues[index] % 32);
++
++      return ppe_ring_queue_map_set(ppe_dev, ring_id, queue_bmap);
++}
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_api.h b/drivers/net/ethernet/qualcomm/ppe/ppe_api.h
+index ecdae4b95667..2135b5383bcd 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_api.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_api.h
+@@ -55,4 +55,6 @@ int ppe_edma_queue_offset_config(struct ppe_device *ppe_dev,
+                                int index, int queue_offset);
+ int ppe_edma_queue_resource_get(struct ppe_device *ppe_dev, int type,
+                               int *res_start, int *res_end);
++int ppe_edma_ring_to_queues_config(struct ppe_device *ppe_dev, int ring_id,
++                                 int num, int queues[] __counted_by(num));
+ #endif
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
+index 4363ea3cfb90..a19a6472e4ed 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
+@@ -1419,6 +1419,28 @@ int ppe_rss_hash_config_set(struct ppe_device *ppe_dev, int mode,
+       return 0;
+ }
++/**
++ * ppe_ring_queue_map_set - Set PPE queue mapped with EDMA ring
++ * @ppe_dev: PPE device
++ * @ring_id: EDMA ring ID
++ * @queue_map: Queue bit map
++ *
++ * PPE queue is configured to use the special Ring.
++ *
++ * Return 0 on success, negative error code on failure.
++ */
++int ppe_ring_queue_map_set(struct ppe_device *ppe_dev, int ring_id, u32 *queue_map)
++{
++      u32 reg, queue_bitmap_val[PPE_RING_MAPPED_BP_QUEUE_WORD_COUNT];
++
++      memcpy(queue_bitmap_val, queue_map, sizeof(queue_bitmap_val));
++      reg = PPE_RING_Q_MAP_TBL_ADDR + PPE_RING_Q_MAP_TBL_INC * ring_id;
++
++      return regmap_bulk_write(ppe_dev->regmap, reg,
++                               queue_bitmap_val,
++                               ARRAY_SIZE(queue_bitmap_val));
++}
++
+ static int ppe_config_bm_threshold(struct ppe_device *ppe_dev, int bm_port_id,
+                                  struct ppe_bm_port_config port_cfg)
+ {
+@@ -1918,6 +1940,23 @@ static int ppe_rss_hash_init(struct ppe_device *ppe_dev)
+       return ppe_rss_hash_config_set(ppe_dev, PPE_RSS_HASH_MODE_IPV6, hash_cfg);
+ }
++/* Initialize queues of CPU port mapped with EDMA ring 0. */
++static int ppe_queues_to_ring_init(struct ppe_device *ppe_dev)
++{
++      u32 queue_bmap[PPE_RING_MAPPED_BP_QUEUE_WORD_COUNT] = {};
++      int ret, queue_id, queue_max;
++
++      ret = ppe_port_resource_get(ppe_dev, 0, PPE_RES_UCAST,
++                                  &queue_id, &queue_max);
++      if (ret)
++              return ret;
++
++      for (; queue_id <= queue_max; queue_id++)
++              queue_bmap[queue_id / 32] |= BIT_MASK(queue_id % 32);
++
++      return ppe_ring_queue_map_set(ppe_dev, 0, queue_bmap);
++}
++
+ /* Initialize PPE device to handle traffic correctly. */
+ static int ppe_dev_hw_init(struct ppe_device *ppe_dev)
+ {
+@@ -1935,7 +1974,11 @@ static int ppe_dev_hw_init(struct ppe_device *ppe_dev)
+       if (ret)
+               return ret;
+-      return ppe_rss_hash_init(ppe_dev);
++      ret = ppe_rss_hash_init(ppe_dev);
++      if (ret)
++              return ret;
++
++      return ppe_queues_to_ring_init(ppe_dev);
+ }
+ int ppe_hw_config(struct ppe_device *ppe_dev)
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_config.h b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
+index 6dd91bc45908..9be749800f14 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
+@@ -20,6 +20,8 @@
+ #define PPE_RSS_HASH_IP_LENGTH                        4
+ #define PPE_RSS_HASH_TUPLES                   5
++#define PPE_RING_MAPPED_BP_QUEUE_WORD_COUNT   10
++
+ /**
+  * struct ppe_qos_scheduler_cfg - PPE QoS scheduler configuration.
+  * @flow_id: PPE flow ID.
+@@ -263,4 +265,7 @@ int ppe_servcode_config_set(struct ppe_device *ppe_dev,
+ int ppe_counter_set(struct ppe_device *ppe_dev, int port, bool enable);
+ int ppe_rss_hash_config_set(struct ppe_device *ppe_dev, int mode,
+                           struct ppe_rss_hash_cfg hash_cfg);
++int ppe_ring_queue_map_set(struct ppe_device *ppe_dev,
++                         int ring_id,
++                         u32 *queue_map);
+ #endif
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+index 29001a2599d8..8c6cd6b52b0f 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+@@ -212,6 +212,11 @@
+ #define PPE_L0_COMP_CFG_TBL_SHAPER_METER_LEN  GENMASK(1, 0)
+ #define PPE_L0_COMP_CFG_TBL_NODE_METER_LEN    GENMASK(3, 2)
++/* PPE queue bitmap. */
++#define PPE_RING_Q_MAP_TBL_ADDR                       0x42a000
++#define PPE_RING_Q_MAP_TBL_NUM                        24
++#define PPE_RING_Q_MAP_TBL_INC                        0x40
++
+ #define PPE_DEQ_OPR_TBL_ADDR                  0x430000
+ #define PPE_DEQ_OPR_TBL_NUM                   300
+ #define PPE_DEQ_OPR_TBL_INC                   0x10
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-27-net-ethernet-qualcomm-Add-PPE-L2-bridge-initializati.patch b/target/linux/qualcommbe/patches-6.6/103-27-net-ethernet-qualcomm-Add-PPE-L2-bridge-initializati.patch
new file mode 100644 (file)
index 0000000..99845d3
--- /dev/null
@@ -0,0 +1,187 @@
+From 244012f3f879d4709be68e7ddabc064268bbd69e Mon Sep 17 00:00:00 2001
+From: Lei Wei <quic_leiwei@quicinc.com>
+Date: Thu, 28 Dec 2023 00:38:08 +0800
+Subject: [PATCH 27/50] net: ethernet: qualcomm: Add PPE L2 bridge
+ initialization
+
+The per-port L2 bridge settings are initialized as follows:
+For PPE CPU port, the PPE bridge Tx is enabled and FDB learn is
+disabled. For PPE physical port, the PPE bridge Tx is disabled
+and FDB learn is enabled by default and the L2 forward action
+is initialized as forward to CPU port.
+
+Change-Id: Ida42464f1d5e53583a434a11b19e6501c649d44e
+Signed-off-by: Lei Wei <quic_leiwei@quicinc.com>
+---
+ .../net/ethernet/qualcomm/ppe/ppe_config.c    | 68 ++++++++++++++++++-
+ drivers/net/ethernet/qualcomm/ppe/ppe_regs.h  | 54 +++++++++++++++
+ 2 files changed, 121 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
+index a19a6472e4ed..621f4f0ba9e2 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
+@@ -1957,6 +1957,68 @@ static int ppe_queues_to_ring_init(struct ppe_device *ppe_dev)
+       return ppe_ring_queue_map_set(ppe_dev, 0, queue_bmap);
+ }
++/* Initialize PPE bridge configuration. */
++static int ppe_bridge_init(struct ppe_device *ppe_dev)
++{
++      u32 reg, mask, port_cfg[4], vsi_cfg[2];
++      int ret, i;
++
++      /* CPU port0 enable bridge Tx and disable FDB new address
++       * learning and station move address learning.
++       */
++      mask = PPE_PORT_BRIDGE_TXMAC_EN;
++      mask |= PPE_PORT_BRIDGE_NEW_LRN_EN;
++      mask |= PPE_PORT_BRIDGE_STA_MOVE_LRN_EN;
++      ret = regmap_update_bits(ppe_dev->regmap,
++                               PPE_PORT_BRIDGE_CTRL_ADDR,
++                               mask,
++                               PPE_PORT_BRIDGE_TXMAC_EN);
++      if (ret)
++              return ret;
++
++      for (i = 1; i < ppe_dev->num_ports; i++) {
++              /* Set Invalid VSI forwarding to CPU port0 if no VSI
++               * is assigned to the port.
++               */
++              reg = PPE_L2_VP_PORT_TBL_ADDR + PPE_L2_VP_PORT_TBL_INC * i;
++              ret = regmap_bulk_read(ppe_dev->regmap, reg,
++                                     port_cfg, ARRAY_SIZE(port_cfg));
++
++              if (ret)
++                      return ret;
++
++              PPE_L2_PORT_SET_INVALID_VSI_FWD_EN(port_cfg, true);
++              PPE_L2_PORT_SET_DST_INFO(port_cfg, 0);
++
++              ret = regmap_bulk_write(ppe_dev->regmap, reg,
++                                      port_cfg, ARRAY_SIZE(port_cfg));
++              if (ret)
++                      return ret;
++      }
++
++      for (i = 0; i < PPE_VSI_TBL_NUM; i++) {
++              /* Enable VSI bridge forward address learning and set VSI
++               * forward member includes CPU port0.
++               */
++              PPE_VSI_SET_MEMBER_PORT_BITMAP(vsi_cfg, BIT(0));
++              PPE_VSI_SET_UUC_BITMAP(vsi_cfg, BIT(0));
++              PPE_VSI_SET_UMC_BITMAP(vsi_cfg, BIT(0));
++              PPE_VSI_SET_BC_BITMAP(vsi_cfg, BIT(0));
++              PPE_VSI_SET_NEW_ADDR_LRN_EN(vsi_cfg, true);
++              PPE_VSI_SET_NEW_ADDR_FWD_CMD(vsi_cfg, PPE_ACTION_FORWARD);
++              PPE_VSI_SET_STATION_MOVE_LRN_EN(vsi_cfg, true);
++              PPE_VSI_SET_STATION_MOVE_FWD_CMD(vsi_cfg, PPE_ACTION_FORWARD);
++
++              reg = PPE_VSI_TBL_ADDR + PPE_VSI_TBL_INC * i;
++              ret = regmap_bulk_write(ppe_dev->regmap, reg,
++                                      vsi_cfg, ARRAY_SIZE(vsi_cfg));
++              if (ret)
++                      return ret;
++      }
++
++      return 0;
++}
++
+ /* Initialize PPE device to handle traffic correctly. */
+ static int ppe_dev_hw_init(struct ppe_device *ppe_dev)
+ {
+@@ -1978,7 +2040,11 @@ static int ppe_dev_hw_init(struct ppe_device *ppe_dev)
+       if (ret)
+               return ret;
+-      return ppe_queues_to_ring_init(ppe_dev);
++      ret = ppe_queues_to_ring_init(ppe_dev);
++      if (ret)
++              return ret;
++
++      return ppe_bridge_init(ppe_dev);
+ }
+ int ppe_hw_config(struct ppe_device *ppe_dev)
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+index 8c6cd6b52b0f..7f06843e4151 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+@@ -126,6 +126,18 @@
+ #define PPE_EG_SERVICE_SET_TX_CNT_EN(tbl_cfg, value)  \
+       u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_EG_SERVICE_W1_TX_CNT_EN)
++/* PPE port bridge configuration */
++#define PPE_PORT_BRIDGE_CTRL_ADDR             0x60300
++#define PPE_PORT_BRIDGE_CTRL_NUM              8
++#define PPE_PORT_BRIDGE_CTRL_INC              4
++#define PPE_PORT_BRIDGE_NEW_LRN_EN            BIT(0)
++#define PPE_PORT_BRIDGE_NEW_FWD_CMD           GENMASK(2, 1)
++#define PPE_PORT_BRIDGE_STA_MOVE_LRN_EN               BIT(3)
++#define PPE_PORT_BRIDGE_STA_MOVE_FWD_CMD      GENMASK(5, 4)
++#define PPE_PORT_BRIDGE_ISOLATION_BITMAP      GENMASK(15, 8)
++#define PPE_PORT_BRIDGE_TXMAC_EN              BIT(16)
++#define PPE_PORT_BRIDGE_PROMISC_EN            BIT(17)
++
+ #define PPE_MC_MTU_CTRL_TBL_ADDR              0x60a00
+ #define PPE_MC_MTU_CTRL_TBL_NUM                       8
+ #define PPE_MC_MTU_CTRL_TBL_INC                       4
+@@ -133,6 +145,36 @@
+ #define PPE_MC_MTU_CTRL_TBL_MTU_CMD           GENMASK(15, 14)
+ #define PPE_MC_MTU_CTRL_TBL_TX_CNT_EN         BIT(16)
++/* PPE VSI configurations */
++#define PPE_VSI_TBL_ADDR                      0x63800
++#define PPE_VSI_TBL_NUM                               64
++#define PPE_VSI_TBL_INC                               0x10
++#define PPE_VSI_W0_MEMBER_PORT_BITMAP         GENMASK(7, 0)
++#define PPE_VSI_W0_UUC_BITMAP                 GENMASK(15, 8)
++#define PPE_VSI_W0_UMC_BITMAP                 GENMASK(23, 16)
++#define PPE_VSI_W0_BC_BITMAP                  GENMASK(31, 24)
++#define PPE_VSI_W1_NEW_ADDR_LRN_EN            BIT(0)
++#define PPE_VSI_W1_NEW_ADDR_FWD_CMD           GENMASK(2, 1)
++#define PPE_VSI_W1_STATION_MOVE_LRN_EN                BIT(3)
++#define PPE_VSI_W1_STATION_MOVE_FWD_CMD               GENMASK(5, 4)
++
++#define PPE_VSI_SET_MEMBER_PORT_BITMAP(tbl_cfg, value)                \
++      u32p_replace_bits((u32 *)tbl_cfg, value, PPE_VSI_W0_MEMBER_PORT_BITMAP)
++#define PPE_VSI_SET_UUC_BITMAP(tbl_cfg, value)                        \
++      u32p_replace_bits((u32 *)tbl_cfg, value, PPE_VSI_W0_UUC_BITMAP)
++#define PPE_VSI_SET_UMC_BITMAP(tbl_cfg, value)                        \
++      u32p_replace_bits((u32 *)tbl_cfg, value, PPE_VSI_W0_UMC_BITMAP)
++#define PPE_VSI_SET_BC_BITMAP(tbl_cfg, value)                 \
++      u32p_replace_bits((u32 *)tbl_cfg, value, PPE_VSI_W0_BC_BITMAP)
++#define PPE_VSI_SET_NEW_ADDR_LRN_EN(tbl_cfg, value)           \
++      u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_VSI_W1_NEW_ADDR_LRN_EN)
++#define PPE_VSI_SET_NEW_ADDR_FWD_CMD(tbl_cfg, value)          \
++      u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_VSI_W1_NEW_ADDR_FWD_CMD)
++#define PPE_VSI_SET_STATION_MOVE_LRN_EN(tbl_cfg, value)               \
++      u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_VSI_W1_STATION_MOVE_LRN_EN)
++#define PPE_VSI_SET_STATION_MOVE_FWD_CMD(tbl_cfg, value)      \
++      u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_VSI_W1_STATION_MOVE_FWD_CMD)
++
+ /* PPE port control configuration, the MTU and MRU configs. */
+ #define PPE_MRU_MTU_CTRL_TBL_ADDR             0x65000
+ #define PPE_MRU_MTU_CTRL_TBL_NUM              256
+@@ -170,6 +212,18 @@
+ #define PPE_IN_L2_SERVICE_TBL_RX_CNT_EN               BIT(30)
+ #define PPE_IN_L2_SERVICE_TBL_TX_CNT_EN               BIT(31)
++/* L2 Port configurations */
++#define PPE_L2_VP_PORT_TBL_ADDR                       0x98000
++#define PPE_L2_VP_PORT_TBL_NUM                        256
++#define PPE_L2_VP_PORT_TBL_INC                        0x10
++#define PPE_L2_VP_PORT_W0_INVALID_VSI_FWD_EN  BIT(0)
++#define PPE_L2_VP_PORT_W0_DST_INFO            GENMASK(9, 2)
++
++#define PPE_L2_PORT_SET_INVALID_VSI_FWD_EN(tbl_cfg, value)    \
++      u32p_replace_bits((u32 *)tbl_cfg, value, PPE_L2_VP_PORT_W0_INVALID_VSI_FWD_EN)
++#define PPE_L2_PORT_SET_DST_INFO(tbl_cfg, value)              \
++      u32p_replace_bits((u32 *)tbl_cfg, value, PPE_L2_VP_PORT_W0_DST_INFO)
++
+ #define PPE_TL_SERVICE_TBL_ADDR                       0x306000
+ #define PPE_TL_SERVICE_TBL_NUM                        256
+ #define PPE_TL_SERVICE_TBL_INC                        4
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-28-net-ethernet-qualcomm-Add-PPE-debugfs-support.patch b/target/linux/qualcommbe/patches-6.6/103-28-net-ethernet-qualcomm-Add-PPE-debugfs-support.patch
new file mode 100644 (file)
index 0000000..3147717
--- /dev/null
@@ -0,0 +1,986 @@
+From 45fb5b1303af9b7341c9a9fd692248aa67f5dc63 Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Wed, 27 Dec 2023 17:04:08 +0800
+Subject: [PATCH 28/50] net: ethernet: qualcomm: Add PPE debugfs support
+
+The PPE hardware counter is exposed by the file
+entry "/sys/kernel/debug/ppe/packet_counter".
+
+Change-Id: I58251fe00a89f78ee6c410af1d2380270e55a176
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ drivers/net/ethernet/qualcomm/ppe/Makefile    |   2 +-
+ drivers/net/ethernet/qualcomm/ppe/ppe.c       |  11 +
+ drivers/net/ethernet/qualcomm/ppe/ppe.h       |   3 +
+ .../net/ethernet/qualcomm/ppe/ppe_debugfs.c   | 725 ++++++++++++++++++
+ .../net/ethernet/qualcomm/ppe/ppe_debugfs.h   |  16 +
+ drivers/net/ethernet/qualcomm/ppe/ppe_regs.h  |  98 +++
+ 6 files changed, 854 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c
+ create mode 100644 drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.h
+
+diff --git a/drivers/net/ethernet/qualcomm/ppe/Makefile b/drivers/net/ethernet/qualcomm/ppe/Makefile
+index e4e5c94fde3e..227af2168224 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/Makefile
++++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
+@@ -4,4 +4,4 @@
+ #
+ obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
+-qcom-ppe-objs := ppe.o ppe_config.o ppe_api.o
++qcom-ppe-objs := ppe.o ppe_config.o ppe_api.o ppe_debugfs.o
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe.c b/drivers/net/ethernet/qualcomm/ppe/ppe.c
+index 443706291ce0..8cf6c1161c4b 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe.c
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe.c
+@@ -16,6 +16,7 @@
+ #include "ppe.h"
+ #include "ppe_config.h"
++#include "ppe_debugfs.h"
+ #define PPE_PORT_MAX          8
+ #define PPE_CLK_RATE          353000000
+@@ -206,11 +207,20 @@ static int qcom_ppe_probe(struct platform_device *pdev)
+       if (ret)
+               return dev_err_probe(dev, ret, "PPE HW config failed\n");
++      ppe_debugfs_setup(ppe_dev);
+       platform_set_drvdata(pdev, ppe_dev);
+       return 0;
+ }
++static void qcom_ppe_remove(struct platform_device *pdev)
++{
++      struct ppe_device *ppe_dev;
++
++      ppe_dev = platform_get_drvdata(pdev);
++      ppe_debugfs_teardown(ppe_dev);
++}
++
+ static const struct of_device_id qcom_ppe_of_match[] = {
+       { .compatible = "qcom,ipq9574-ppe" },
+       {},
+@@ -223,6 +233,7 @@ static struct platform_driver qcom_ppe_driver = {
+               .of_match_table = qcom_ppe_of_match,
+       },
+       .probe  = qcom_ppe_probe,
++      .remove_new = qcom_ppe_remove,
+ };
+ module_platform_driver(qcom_ppe_driver);
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe.h b/drivers/net/ethernet/qualcomm/ppe/ppe.h
+index 733d77f4063d..a2a5d1901547 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe.h
+@@ -11,6 +11,7 @@
+ struct device;
+ struct regmap;
++struct dentry;
+ /**
+  * struct ppe_device - PPE device private data.
+@@ -18,6 +19,7 @@ struct regmap;
+  * @regmap: PPE register map.
+  * @clk_rate: PPE clock rate.
+  * @num_ports: Number of PPE ports.
++ * @debugfs_root: PPE debug root entry.
+  * @num_icc_paths: Number of interconnect paths.
+  * @icc_paths: Interconnect path array.
+  *
+@@ -30,6 +32,7 @@ struct ppe_device {
+       struct regmap *regmap;
+       unsigned long clk_rate;
+       unsigned int num_ports;
++      struct dentry *debugfs_root;
+       unsigned int num_icc_paths;
+       struct icc_bulk_data icc_paths[] __counted_by(num_icc_paths);
+ };
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c b/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c
+new file mode 100644
+index 000000000000..1cd4c491e724
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c
+@@ -0,0 +1,725 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++/* PPE debugfs routines for display of PPE counters useful for debug. */
++
++#include <linux/debugfs.h>
++#include <linux/regmap.h>
++#include <linux/seq_file.h>
++
++#include "ppe.h"
++#include "ppe_config.h"
++#include "ppe_debugfs.h"
++#include "ppe_regs.h"
++
++#define PPE_PKT_CNT_TBL_SIZE          3
++#define PPE_DROP_PKT_CNT_TBL_SIZE     5
++
++#define PREFIX_S(desc, cnt_type) \
++      seq_printf(seq, "%-16s %16s", desc, cnt_type)
++#define CNT_ONE_TYPE(cnt, str, index) \
++      seq_printf(seq, "%10u(%s=%04d)", cnt, str, index)
++#define CNT_TWO_TYPE(cnt, cnt1, str, index) \
++      seq_printf(seq, "%10u/%u(%s=%04d)", cnt, cnt1, str, index)
++#define CNT_CPU_CODE(cnt, index) \
++      seq_printf(seq, "%10u(cpucode:%d)", cnt, index)
++#define CNT_DROP_CODE(cnt, port, index) \
++      seq_printf(seq, "%10u(port=%d),dropcode:%d", cnt, port, index)
++
++#define PPE_W0_PKT_CNT                                GENMASK(31, 0)
++#define PPE_W2_DROP_PKT_CNT_LOW                       GENMASK(31, 8)
++#define PPE_W3_DROP_PKT_CNT_HIGH              GENMASK(7, 0)
++
++#define PPE_GET_PKT_CNT(tbl_cfg)              \
++      u32_get_bits(*((u32 *)(tbl_cfg)), PPE_W0_PKT_CNT)
++#define PPE_GET_DROP_PKT_CNT_LOW(tbl_cfg)     \
++      u32_get_bits(*((u32 *)(tbl_cfg) + 0x2), PPE_W2_DROP_PKT_CNT_LOW)
++#define PPE_GET_DROP_PKT_CNT_HIGH(tbl_cfg)    \
++      u32_get_bits(*((u32 *)(tbl_cfg) + 0x3), PPE_W3_DROP_PKT_CNT_HIGH)
++
++/**
++ * enum ppe_cnt_size_type - PPE counter size type
++ * @PPE_PKT_CNT_SIZE_1WORD: Counter size with single register
++ * @PPE_PKT_CNT_SIZE_3WORD: Counter size with table of 3 words
++ * @PPE_PKT_CNT_SIZE_5WORD: Counter size with table of 5 words
++ *
++ * PPE takes the different register size to record the packet counter,
++ * which uses single register or register table with 3 words or 5 words.
++ * The counter with table size 5 words also records the drop counter.
++ * There are also some other counters only occupying several bits less than
++ * 32 bits, which is not covered by this enumeration type.
++ */
++enum ppe_cnt_size_type {
++      PPE_PKT_CNT_SIZE_1WORD,
++      PPE_PKT_CNT_SIZE_3WORD,
++      PPE_PKT_CNT_SIZE_5WORD,
++};
++
++static int ppe_pkt_cnt_get(struct ppe_device *ppe_dev, u32 reg,
++                         enum ppe_cnt_size_type cnt_type,
++                         u32 *cnt, u32 *drop_cnt)
++{
++      u32 drop_pkt_cnt[PPE_DROP_PKT_CNT_TBL_SIZE];
++      u32 pkt_cnt[PPE_PKT_CNT_TBL_SIZE];
++      u32 value;
++      int ret;
++
++      switch (cnt_type) {
++      case PPE_PKT_CNT_SIZE_1WORD:
++              ret = regmap_read(ppe_dev->regmap, reg, &value);
++              if (ret)
++                      return ret;
++
++              *cnt = value;
++              break;
++      case PPE_PKT_CNT_SIZE_3WORD:
++              ret = regmap_bulk_read(ppe_dev->regmap, reg,
++                                     pkt_cnt, ARRAY_SIZE(pkt_cnt));
++              if (ret)
++                      return ret;
++
++              *cnt = PPE_GET_PKT_CNT(pkt_cnt);
++              break;
++      case PPE_PKT_CNT_SIZE_5WORD:
++              ret = regmap_bulk_read(ppe_dev->regmap, reg,
++                                     drop_pkt_cnt, ARRAY_SIZE(drop_pkt_cnt));
++              if (ret)
++                      return ret;
++
++              *cnt = PPE_GET_PKT_CNT(drop_pkt_cnt);
++
++              /* Drop counter with low 24 bits. */
++              value  = PPE_GET_DROP_PKT_CNT_LOW(drop_pkt_cnt);
++              *drop_cnt = FIELD_PREP(GENMASK(23, 0), value);
++
++              /* Drop counter with high 8 bits. */
++              value  = PPE_GET_DROP_PKT_CNT_HIGH(drop_pkt_cnt);
++              *drop_cnt |= FIELD_PREP(GENMASK(31, 24), value);
++              break;
++      }
++
++      return 0;
++}
++
++static void ppe_tbl_pkt_cnt_clear(struct ppe_device *ppe_dev, u32 reg,
++                                enum ppe_cnt_size_type cnt_type)
++{
++      u32 drop_pkt_cnt[PPE_DROP_PKT_CNT_TBL_SIZE] = {};
++      u32 pkt_cnt[PPE_PKT_CNT_TBL_SIZE] = {};
++
++      switch (cnt_type) {
++      case PPE_PKT_CNT_SIZE_1WORD:
++              regmap_write(ppe_dev->regmap, reg, 0);
++              break;
++      case PPE_PKT_CNT_SIZE_3WORD:
++              regmap_bulk_write(ppe_dev->regmap, reg,
++                                pkt_cnt, ARRAY_SIZE(pkt_cnt));
++              break;
++      case PPE_PKT_CNT_SIZE_5WORD:
++              regmap_bulk_write(ppe_dev->regmap, reg,
++                                drop_pkt_cnt, ARRAY_SIZE(drop_pkt_cnt));
++              break;
++      }
++}
++
++/* The number of packets dropped because of no buffer available. */
++static void ppe_prx_drop_counter_get(struct ppe_device *ppe_dev,
++                                   struct seq_file *seq)
++{
++      int ret, i, tag = 0;
++      u32 reg, drop_cnt;
++
++      PREFIX_S("PRX_DROP_CNT", "SILENT_DROP:");
++      for (i = 0; i < PPE_DROP_CNT_NUM; i++) {
++              reg = PPE_DROP_CNT_ADDR + i * PPE_DROP_CNT_INC;
++              ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD,
++                                    &drop_cnt, NULL);
++              if (ret) {
++                      seq_printf(seq, "ERROR %d\n", ret);
++                      return;
++              }
++
++              if (drop_cnt > 0) {
++                      tag++;
++                      if (!(tag % 4)) {
++                              seq_putc(seq, '\n');
++                              PREFIX_S("", "");
++                      }
++
++                      CNT_ONE_TYPE(drop_cnt, "port", i);
++              }
++      }
++
++      seq_putc(seq, '\n');
++}
++
++/* The number of packet dropped because of no enough buffer to cache
++ * packet, some buffer allocated for the part of packet.
++ */
++static void ppe_prx_bm_drop_counter_get(struct ppe_device *ppe_dev,
++                                      struct seq_file *seq)
++{
++      u32 reg, pkt_cnt = 0;
++      int ret, i, tag = 0;
++
++      PREFIX_S("PRX_BM_DROP_CNT", "OVERFLOW_DROP:");
++      for (i = 0; i < PPE_DROP_STAT_NUM; i++) {
++              reg = PPE_DROP_STAT_ADDR + PPE_DROP_STAT_INC * i;
++
++              ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
++                                    &pkt_cnt, NULL);
++              if (ret) {
++                      seq_printf(seq, "ERROR %d\n", ret);
++                      return;
++              }
++
++              if (pkt_cnt > 0) {
++                      tag++;
++                      if (!(tag % 4)) {
++                              seq_putc(seq, '\n');
++                              PREFIX_S("", "");
++                      }
++
++                      CNT_ONE_TYPE(pkt_cnt, "port", i);
++              }
++      }
++
++      seq_putc(seq, '\n');
++}
++
++/* The number of currently occupied buffers, that can't be flushed. */
++static void ppe_prx_bm_port_counter_get(struct ppe_device *ppe_dev,
++                                      struct seq_file *seq)
++{
++      int used_cnt, react_cnt;
++      int ret, i, tag = 0;
++      u32 reg, val;
++
++      PREFIX_S("PRX_BM_PORT_CNT", "USED/REACT:");
++      for (i = 0; i < PPE_BM_USED_CNT_NUM; i++) {
++              reg = PPE_BM_USED_CNT_ADDR + i * PPE_BM_USED_CNT_INC;
++              ret = regmap_read(ppe_dev->regmap, reg, &val);
++              if (ret) {
++                      seq_printf(seq, "ERROR %d\n", ret);
++                      return;
++              }
++
++              used_cnt = FIELD_GET(PPE_BM_USED_CNT_VAL, val);
++
++              reg = PPE_BM_REACT_CNT_ADDR + i * PPE_BM_REACT_CNT_INC;
++              ret = regmap_read(ppe_dev->regmap, reg, &val);
++              if (ret) {
++                      seq_printf(seq, "ERROR %d\n", ret);
++                      return;
++              }
++
++              react_cnt = FIELD_GET(PPE_BM_REACT_CNT_VAL, val);
++
++              if (used_cnt > 0 || react_cnt > 0) {
++                      tag++;
++                      if (!(tag % 4)) {
++                              seq_putc(seq, '\n');
++                              PREFIX_S("", "");
++                      }
++
++                      CNT_TWO_TYPE(used_cnt, react_cnt, "port", i);
++              }
++      }
++
++      seq_putc(seq, '\n');
++}
++
++/* The number of ingress packets. */
++static void ppe_ipx_pkt_counter_get(struct ppe_device *ppe_dev,
++                                  struct seq_file *seq)
++{
++      u32 reg, cnt, tunnel_cnt;
++      int i, ret, tag = 0;
++
++      PREFIX_S("IPR_PKT_CNT", "TPRX/IPRX:");
++      for (i = 0; i < PPE_IPR_PKT_CNT_NUM; i++) {
++              reg = PPE_TPR_PKT_CNT_ADDR + i * PPE_IPR_PKT_CNT_INC;
++              ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD,
++                                    &tunnel_cnt, NULL);
++              if (ret) {
++                      seq_printf(seq, "ERROR %d\n", ret);
++                      return;
++              }
++
++              reg = PPE_IPR_PKT_CNT_ADDR + i * PPE_IPR_PKT_CNT_INC;
++              ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD,
++                                    &cnt, NULL);
++              if (ret) {
++                      seq_printf(seq, "ERROR %d\n", ret);
++                      return;
++              }
++
++              if (tunnel_cnt > 0 || cnt > 0) {
++                      tag++;
++                      if (!(tag % 4)) {
++                              seq_putc(seq, '\n');
++                              PREFIX_S("", "");
++                      }
++
++                      CNT_TWO_TYPE(tunnel_cnt, cnt, "port", i);
++              }
++      }
++
++      seq_putc(seq, '\n');
++}
++
++/* The number of packet received or dropped on the ingress direction. */
++static void ppe_port_rx_counter_get(struct ppe_device *ppe_dev,
++                                  struct seq_file *seq)
++{
++      u32 reg, pkt_cnt, drop_cnt;
++      int ret, i, tag = 0;
++
++      PREFIX_S("PORT_RX_CNT", "RX/RX_DROP:");
++      for (i = 0; i < PPE_PHY_PORT_RX_CNT_TBL_NUM; i++) {
++              reg = PPE_PHY_PORT_RX_CNT_TBL_ADDR + PPE_PHY_PORT_RX_CNT_TBL_INC * i;
++              ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD,
++                                    &pkt_cnt, &drop_cnt);
++              if (ret) {
++                      seq_printf(seq, "ERROR %d\n", ret);
++                      return;
++              }
++
++              if (pkt_cnt > 0) {
++                      tag++;
++                      if (!(tag % 4)) {
++                              seq_putc(seq, '\n');
++                              PREFIX_S("", "");
++                      }
++
++                      CNT_TWO_TYPE(pkt_cnt, drop_cnt, "port", i);
++              }
++      }
++
++      seq_putc(seq, '\n');
++}
++
++/* The number of packet received or dropped by the port. */
++static void ppe_vp_rx_counter_get(struct ppe_device *ppe_dev,
++                                struct seq_file *seq)
++{
++      u32 reg, pkt_cnt, drop_cnt;
++      int ret, i, tag = 0;
++
++      PREFIX_S("VPORT_RX_CNT", "RX/RX_DROP:");
++      for (i = 0; i < PPE_PORT_RX_CNT_TBL_NUM; i++) {
++              reg = PPE_PORT_RX_CNT_TBL_ADDR + PPE_PORT_RX_CNT_TBL_INC * i;
++              ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD,
++                                    &pkt_cnt, &drop_cnt);
++              if (ret) {
++                      seq_printf(seq, "ERROR %d\n", ret);
++                      return;
++              }
++
++              if (pkt_cnt > 0) {
++                      tag++;
++                      if (!(tag % 4)) {
++                              seq_putc(seq, '\n');
++                              PREFIX_S("", "");
++                      }
++
++                      CNT_TWO_TYPE(pkt_cnt, drop_cnt, "port", i);
++              }
++      }
++
++      seq_putc(seq, '\n');
++}
++
++/* The number of packet received or dropped by layer 2 processing. */
++static void ppe_pre_l2_counter_get(struct ppe_device *ppe_dev,
++                                 struct seq_file *seq)
++{
++      u32 reg, pkt_cnt, drop_cnt;
++      int ret, i, tag = 0;
++
++      PREFIX_S("PRE_L2_CNT", "RX/RX_DROP:");
++      for (i = 0; i < PPE_PRE_L2_CNT_TBL_NUM; i++) {
++              reg = PPE_PRE_L2_CNT_TBL_ADDR + PPE_PRE_L2_CNT_TBL_INC * i;
++              ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD,
++                                    &pkt_cnt, &drop_cnt);
++              if (ret) {
++                      seq_printf(seq, "ERROR %d\n", ret);
++                      return;
++              }
++
++              if (pkt_cnt > 0) {
++                      tag++;
++                      if (!(tag % 4)) {
++                              seq_putc(seq, '\n');
++                              PREFIX_S("", "");
++                      }
++
++                      CNT_TWO_TYPE(pkt_cnt, drop_cnt, "vsi", i);
++              }
++      }
++
++      seq_putc(seq, '\n');
++}
++
++/* The number of packet received for VLAN handler. */
++static void ppe_vlan_counter_get(struct ppe_device *ppe_dev,
++                               struct seq_file *seq)
++{
++      u32 reg, pkt_cnt = 0;
++      int ret, i, tag = 0;
++
++      PREFIX_S("VLAN_CNT", "RX:");
++      for (i = 0; i < PPE_VLAN_CNT_TBL_NUM; i++) {
++              reg = PPE_VLAN_CNT_TBL_ADDR + PPE_VLAN_CNT_TBL_INC * i;
++
++              ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
++                                    &pkt_cnt, NULL);
++              if (ret) {
++                      seq_printf(seq, "ERROR %d\n", ret);
++                      return;
++              }
++
++              if (pkt_cnt > 0) {
++                      tag++;
++                      if (!(tag % 4)) {
++                              seq_putc(seq, '\n');
++                              PREFIX_S("", "");
++                      }
++
++                      CNT_ONE_TYPE(pkt_cnt, "vsi", i);
++              }
++      }
++
++      seq_putc(seq, '\n');
++}
++
++/* The number of packet forwarded to CPU handler. */
++static void ppe_cpu_code_counter_get(struct ppe_device *ppe_dev,
++                                   struct seq_file *seq)
++{
++      u32 reg, pkt_cnt = 0;
++      int ret, i;
++
++      PREFIX_S("CPU_CODE_CNT", "CODE:");
++      for (i = 0; i < PPE_DROP_CPU_CNT_TBL_NUM; i++) {
++              reg = PPE_DROP_CPU_CNT_TBL_ADDR + PPE_DROP_CPU_CNT_TBL_INC * i;
++
++              ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
++                                    &pkt_cnt, NULL);
++              if (ret) {
++                      seq_printf(seq, "ERROR %d\n", ret);
++                      return;
++              }
++
++              if (!pkt_cnt)
++                      continue;
++
++              if (i < 256)
++                      CNT_CPU_CODE(pkt_cnt, i);
++              else
++                      CNT_DROP_CODE(pkt_cnt, (i - 256) % 8, (i - 256) / 8);
++
++              seq_putc(seq, '\n');
++              PREFIX_S("", "");
++      }
++
++      seq_putc(seq, '\n');
++}
++
++/* The number of packet forwarded by VLAN on the egress direction. */
++static void ppe_eg_vsi_counter_get(struct ppe_device *ppe_dev,
++                                 struct seq_file *seq)
++{
++      u32 reg, pkt_cnt = 0;
++      int ret, i, tag = 0;
++
++      PREFIX_S("EG_VSI_CNT", "TX:");
++      for (i = 0; i < PPE_EG_VSI_COUNTER_TBL_NUM; i++) {
++              reg = PPE_EG_VSI_COUNTER_TBL_ADDR + PPE_EG_VSI_COUNTER_TBL_INC * i;
++
++              ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
++                                    &pkt_cnt, NULL);
++              if (ret) {
++                      seq_printf(seq, "ERROR %d\n", ret);
++                      return;
++              }
++
++              if (pkt_cnt > 0) {
++                      tag++;
++                      if (!(tag % 4)) {
++                              seq_putc(seq, '\n');
++                              PREFIX_S("", "");
++                      }
++
++                      CNT_ONE_TYPE(pkt_cnt, "vsi", i);
++              }
++      }
++
++      seq_putc(seq, '\n');
++}
++
++/* The number of packet trasmitted or dropped by port. */
++static void ppe_vp_tx_counter_get(struct ppe_device *ppe_dev,
++                                struct seq_file *seq)
++{
++      u32 reg, pkt_cnt = 0, drop_cnt = 0;
++      int ret, i, tag = 0;
++
++      PREFIX_S("VPORT_TX_CNT", "TX/TX_DROP:");
++      for (i = 0; i < PPE_VPORT_TX_COUNTER_TBL_NUM; i++) {
++              reg = PPE_VPORT_TX_COUNTER_TBL_ADDR + PPE_VPORT_TX_COUNTER_TBL_INC * i;
++              ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
++                                    &pkt_cnt, NULL);
++              if (ret) {
++                      seq_printf(seq, "ERROR %d\n", ret);
++                      return;
++              }
++
++              reg = PPE_VPORT_TX_DROP_CNT_TBL_ADDR + PPE_VPORT_TX_DROP_CNT_TBL_INC * i;
++              ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
++                                    &drop_cnt, NULL);
++              if (ret) {
++                      seq_printf(seq, "ERROR %d\n", ret);
++                      return;
++              }
++
++              if (pkt_cnt > 0 || drop_cnt > 0) {
++                      tag++;
++                      if (!(tag % 4)) {
++                              seq_putc(seq, '\n');
++                              PREFIX_S("", "");
++                      }
++
++                      CNT_TWO_TYPE(pkt_cnt, drop_cnt, "port", i);
++              }
++      }
++
++      seq_putc(seq, '\n');
++}
++
++/* The number of packet trasmitted or dropped on the egress direction. */
++static void ppe_port_tx_counter_get(struct ppe_device *ppe_dev,
++                                  struct seq_file *seq)
++{
++      u32 reg, pkt_cnt = 0, drop_cnt = 0;
++      int ret, i, tag = 0;
++
++      PREFIX_S("PORT_TX_CNT", "TX/TX_DROP:");
++      for (i = 0; i < PPE_PORT_TX_COUNTER_TBL_NUM; i++) {
++              reg = PPE_PORT_TX_COUNTER_TBL_ADDR + PPE_PORT_TX_COUNTER_TBL_INC * i;
++              ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
++                                    &pkt_cnt, NULL);
++              if (ret) {
++                      seq_printf(seq, "ERROR %d\n", ret);
++                      return;
++              }
++
++              reg = PPE_PORT_TX_DROP_CNT_TBL_ADDR + PPE_PORT_TX_DROP_CNT_TBL_INC * i;
++              ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
++                                    &drop_cnt, NULL);
++              if (ret) {
++                      seq_printf(seq, "ERROR %d\n", ret);
++                      return;
++              }
++
++              if (pkt_cnt > 0 || drop_cnt > 0) {
++                      tag++;
++                      if (!(tag % 4)) {
++                              seq_putc(seq, '\n');
++                              PREFIX_S("", "");
++                      }
++
++                      CNT_TWO_TYPE(pkt_cnt, drop_cnt, "port", i);
++              }
++      }
++
++      seq_putc(seq, '\n');
++}
++
++/* The number of packet trasmitted or pended by the PPE queue. */
++static void ppe_queue_tx_counter_get(struct ppe_device *ppe_dev,
++                                   struct seq_file *seq)
++{
++      u32 reg, val, pkt_cnt = 0, pend_cnt = 0;
++      int ret, i, tag = 0;
++
++      PREFIX_S("QUEUE_TX_CNT", "TX/PEND:");
++      for (i = 0; i < PPE_QUEUE_TX_COUNTER_TBL_NUM; i++) {
++              reg = PPE_QUEUE_TX_COUNTER_TBL_ADDR + PPE_QUEUE_TX_COUNTER_TBL_INC * i;
++              ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
++                                    &pkt_cnt, NULL);
++              if (ret) {
++                      seq_printf(seq, "ERROR %d\n", ret);
++                      return;
++              }
++
++              if (i < PPE_AC_UNI_QUEUE_CFG_TBL_NUM) {
++                      reg = PPE_AC_UNI_QUEUE_CNT_TBL_ADDR + PPE_AC_UNI_QUEUE_CNT_TBL_INC * i;
++                      ret = regmap_read(ppe_dev->regmap, reg, &val);
++                      if (ret) {
++                              seq_printf(seq, "ERROR %d\n", ret);
++                              return;
++                      }
++
++                      pend_cnt = FIELD_GET(PPE_AC_UNI_QUEUE_CNT_TBL_PEND_CNT, val);
++              } else {
++                      reg = PPE_AC_MUL_QUEUE_CNT_TBL_ADDR +
++                            PPE_AC_MUL_QUEUE_CNT_TBL_INC * (i - PPE_AC_UNI_QUEUE_CFG_TBL_NUM);
++                      ret = regmap_read(ppe_dev->regmap, reg, &val);
++                      if (ret) {
++                              seq_printf(seq, "ERROR %d\n", ret);
++                              return;
++                      }
++
++                      pend_cnt = FIELD_GET(PPE_AC_MUL_QUEUE_CNT_TBL_PEND_CNT, val);
++              }
++
++              if (pkt_cnt > 0 || pend_cnt > 0) {
++                      tag++;
++                      if (!(tag % 4)) {
++                              seq_putc(seq, '\n');
++                              PREFIX_S("", "");
++                      }
++
++                      CNT_TWO_TYPE(pkt_cnt, pend_cnt, "queue", i);
++              }
++      }
++
++      seq_putc(seq, '\n');
++}
++
++/* Display the packet counter of PPE. */
++static int ppe_packet_counter_show(struct seq_file *seq, void *v)
++{
++      struct ppe_device *ppe_dev = seq->private;
++
++      ppe_prx_drop_counter_get(ppe_dev, seq);
++      ppe_prx_bm_drop_counter_get(ppe_dev, seq);
++      ppe_prx_bm_port_counter_get(ppe_dev, seq);
++      ppe_ipx_pkt_counter_get(ppe_dev, seq);
++      ppe_port_rx_counter_get(ppe_dev, seq);
++      ppe_vp_rx_counter_get(ppe_dev, seq);
++      ppe_pre_l2_counter_get(ppe_dev, seq);
++      ppe_vlan_counter_get(ppe_dev, seq);
++      ppe_cpu_code_counter_get(ppe_dev, seq);
++      ppe_eg_vsi_counter_get(ppe_dev, seq);
++      ppe_vp_tx_counter_get(ppe_dev, seq);
++      ppe_port_tx_counter_get(ppe_dev, seq);
++      ppe_queue_tx_counter_get(ppe_dev, seq);
++
++      return 0;
++}
++
++static int ppe_packet_counter_open(struct inode *inode, struct file *file)
++{
++      return single_open(file, ppe_packet_counter_show, inode->i_private);
++}
++
++static ssize_t ppe_packet_counter_clear(struct file *file,
++                                      const char __user *buf,
++                                      size_t count, loff_t *pos)
++{
++      struct ppe_device *ppe_dev = file_inode(file)->i_private;
++      u32 reg;
++      int i;
++
++      for (i = 0; i < PPE_DROP_CNT_NUM; i++) {
++              reg = PPE_DROP_CNT_ADDR + i * PPE_DROP_CNT_INC;
++              ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD);
++      }
++
++      for (i = 0; i < PPE_DROP_STAT_NUM; i++) {
++              reg = PPE_DROP_STAT_ADDR + PPE_DROP_STAT_INC * i;
++              ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
++      }
++
++      for (i = 0; i < PPE_IPR_PKT_CNT_NUM; i++) {
++              reg = PPE_IPR_PKT_CNT_ADDR + i * PPE_IPR_PKT_CNT_INC;
++              ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD);
++
++              reg = PPE_TPR_PKT_CNT_ADDR + i * PPE_IPR_PKT_CNT_INC;
++              ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD);
++      }
++
++      for (i = 0; i < PPE_VLAN_CNT_TBL_NUM; i++) {
++              reg = PPE_VLAN_CNT_TBL_ADDR + PPE_VLAN_CNT_TBL_INC * i;
++              ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
++      }
++
++      for (i = 0; i < PPE_PRE_L2_CNT_TBL_NUM; i++) {
++              reg = PPE_PRE_L2_CNT_TBL_ADDR + PPE_PRE_L2_CNT_TBL_INC * i;
++              ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD);
++      }
++
++      for (i = 0; i < PPE_PORT_TX_COUNTER_TBL_NUM; i++) {
++              reg = PPE_PORT_TX_DROP_CNT_TBL_ADDR + PPE_PORT_TX_DROP_CNT_TBL_INC * i;
++              ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
++
++              reg = PPE_PORT_TX_COUNTER_TBL_ADDR + PPE_PORT_TX_COUNTER_TBL_INC * i;
++              ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
++      }
++
++      for (i = 0; i < PPE_EG_VSI_COUNTER_TBL_NUM; i++) {
++              reg = PPE_EG_VSI_COUNTER_TBL_ADDR + PPE_EG_VSI_COUNTER_TBL_INC * i;
++              ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
++      }
++
++      for (i = 0; i < PPE_VPORT_TX_COUNTER_TBL_NUM; i++) {
++              reg = PPE_VPORT_TX_COUNTER_TBL_ADDR + PPE_VPORT_TX_COUNTER_TBL_INC * i;
++              ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
++
++              reg = PPE_VPORT_TX_DROP_CNT_TBL_ADDR + PPE_VPORT_TX_DROP_CNT_TBL_INC * i;
++              ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
++      }
++
++      for (i = 0; i < PPE_QUEUE_TX_COUNTER_TBL_NUM; i++) {
++              reg = PPE_QUEUE_TX_COUNTER_TBL_ADDR + PPE_QUEUE_TX_COUNTER_TBL_INC * i;
++              ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
++      }
++
++      ppe_tbl_pkt_cnt_clear(ppe_dev, PPE_EPE_DBG_IN_CNT_ADDR, PPE_PKT_CNT_SIZE_1WORD);
++      ppe_tbl_pkt_cnt_clear(ppe_dev, PPE_EPE_DBG_OUT_CNT_ADDR, PPE_PKT_CNT_SIZE_1WORD);
++
++      for (i = 0; i < PPE_DROP_CPU_CNT_TBL_NUM; i++) {
++              reg = PPE_DROP_CPU_CNT_TBL_ADDR + PPE_DROP_CPU_CNT_TBL_INC * i;
++              ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
++      }
++
++      for (i = 0; i < PPE_PORT_RX_CNT_TBL_NUM; i++) {
++              reg = PPE_PORT_RX_CNT_TBL_ADDR + PPE_PORT_RX_CNT_TBL_INC * i;
++              ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD);
++      }
++
++      for (i = 0; i < PPE_PHY_PORT_RX_CNT_TBL_NUM; i++) {
++              reg = PPE_PHY_PORT_RX_CNT_TBL_ADDR + PPE_PHY_PORT_RX_CNT_TBL_INC * i;
++              ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD);
++      }
++
++      return count;
++}
++
++static const struct file_operations ppe_debugfs_packet_counter_fops = {
++      .owner   = THIS_MODULE,
++      .open    = ppe_packet_counter_open,
++      .read    = seq_read,
++      .llseek  = seq_lseek,
++      .release = single_release,
++      .write   = ppe_packet_counter_clear,
++};
++
++void ppe_debugfs_setup(struct ppe_device *ppe_dev)
++{
++      ppe_dev->debugfs_root = debugfs_create_dir("ppe", NULL);
++      debugfs_create_file("packet_counter", 0444,
++                          ppe_dev->debugfs_root,
++                          ppe_dev,
++                          &ppe_debugfs_packet_counter_fops);
++}
++
++void ppe_debugfs_teardown(struct ppe_device *ppe_dev)
++{
++      debugfs_remove_recursive(ppe_dev->debugfs_root);
++      ppe_dev->debugfs_root = NULL;
++}
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.h b/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.h
+new file mode 100644
+index 000000000000..a979fcf9d742
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.h
+@@ -0,0 +1,16 @@
++/* SPDX-License-Identifier: GPL-2.0-only
++ *
++ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++/* PPE debugfs counters setup. */
++
++#ifndef __PPE_DEBUGFS_H__
++#define __PPE_DEBUGFS_H__
++
++#include "ppe.h"
++
++void ppe_debugfs_setup(struct ppe_device *ppe_dev);
++void ppe_debugfs_teardown(struct ppe_device *ppe_dev);
++
++#endif
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+index 7f06843e4151..e84633d0f572 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+@@ -23,6 +23,43 @@
+ #define PPE_RX_FIFO_CFG_INC                   4
+ #define PPE_RX_FIFO_CFG_THRSH                 GENMASK(2, 0)
++#define PPE_DROP_CNT_ADDR                     0xb024
++#define PPE_DROP_CNT_NUM                      8
++#define PPE_DROP_CNT_INC                      4
++
++/* BM port drop counter */
++#define PPE_DROP_STAT_ADDR                    0xe000
++#define PPE_DROP_STAT_NUM                     30
++#define PPE_DROP_STAT_INC                     0x10
++
++#define PPE_EPE_DBG_IN_CNT_ADDR                       0x26054
++#define PPE_EPE_DBG_IN_CNT_NUM                        1
++#define PPE_EPE_DBG_IN_CNT_INC                        0x4
++
++#define PPE_EPE_DBG_OUT_CNT_ADDR              0x26070
++#define PPE_EPE_DBG_OUT_CNT_NUM                       1
++#define PPE_EPE_DBG_OUT_CNT_INC                       0x4
++
++/* Egress VLAN counter */
++#define PPE_EG_VSI_COUNTER_TBL_ADDR           0x41000
++#define PPE_EG_VSI_COUNTER_TBL_NUM            64
++#define PPE_EG_VSI_COUNTER_TBL_INC            0x10
++
++/* Port TX counter */
++#define PPE_PORT_TX_COUNTER_TBL_ADDR          0x45000
++#define PPE_PORT_TX_COUNTER_TBL_NUM           8
++#define PPE_PORT_TX_COUNTER_TBL_INC           0x10
++
++/* Virtual port TX counter */
++#define PPE_VPORT_TX_COUNTER_TBL_ADDR         0x47000
++#define PPE_VPORT_TX_COUNTER_TBL_NUM          256
++#define PPE_VPORT_TX_COUNTER_TBL_INC          0x10
++
++/* Queue counter */
++#define PPE_QUEUE_TX_COUNTER_TBL_ADDR         0x4a000
++#define PPE_QUEUE_TX_COUNTER_TBL_NUM          300
++#define PPE_QUEUE_TX_COUNTER_TBL_INC          0x10
++
+ /* RSS configs contributes to the random RSS hash value generated, which
+  * is used to configure the queue offset.
+  */
+@@ -224,6 +261,47 @@
+ #define PPE_L2_PORT_SET_DST_INFO(tbl_cfg, value)              \
+       u32p_replace_bits((u32 *)tbl_cfg, value, PPE_L2_VP_PORT_W0_DST_INFO)
++/* Port RX and RX drop counter */
++#define PPE_PORT_RX_CNT_TBL_ADDR              0x150000
++#define PPE_PORT_RX_CNT_TBL_NUM                       256
++#define PPE_PORT_RX_CNT_TBL_INC                       0x20
++
++/* Physical port RX and RX drop counter */
++#define PPE_PHY_PORT_RX_CNT_TBL_ADDR          0x156000
++#define PPE_PHY_PORT_RX_CNT_TBL_NUM           8
++#define PPE_PHY_PORT_RX_CNT_TBL_INC           0x20
++
++/* Counter for the packet to CPU port */
++#define PPE_DROP_CPU_CNT_TBL_ADDR             0x160000
++#define PPE_DROP_CPU_CNT_TBL_NUM              1280
++#define PPE_DROP_CPU_CNT_TBL_INC              0x10
++
++/* VLAN counter */
++#define PPE_VLAN_CNT_TBL_ADDR                 0x178000
++#define PPE_VLAN_CNT_TBL_NUM                  64
++#define PPE_VLAN_CNT_TBL_INC                  0x10
++
++/* PPE L2 counter */
++#define PPE_PRE_L2_CNT_TBL_ADDR                       0x17c000
++#define PPE_PRE_L2_CNT_TBL_NUM                        64
++#define PPE_PRE_L2_CNT_TBL_INC                        0x20
++
++/* Port TX drop counter */
++#define PPE_PORT_TX_DROP_CNT_TBL_ADDR         0x17d000
++#define PPE_PORT_TX_DROP_CNT_TBL_NUM          8
++#define PPE_PORT_TX_DROP_CNT_TBL_INC          0x10
++
++/* Virtual port TX counter */
++#define PPE_VPORT_TX_DROP_CNT_TBL_ADDR                0x17e000
++#define PPE_VPORT_TX_DROP_CNT_TBL_NUM         256
++#define PPE_VPORT_TX_DROP_CNT_TBL_INC         0x10
++
++#define PPE_TPR_PKT_CNT_ADDR                  0x1d0080
++
++#define PPE_IPR_PKT_CNT_ADDR                  0x1e0080
++#define PPE_IPR_PKT_CNT_NUM                   8
++#define PPE_IPR_PKT_CNT_INC                   4
++
+ #define PPE_TL_SERVICE_TBL_ADDR                       0x306000
+ #define PPE_TL_SERVICE_TBL_NUM                        256
+ #define PPE_TL_SERVICE_TBL_INC                        4
+@@ -325,6 +403,16 @@
+ #define PPE_BM_PORT_GROUP_ID_INC              0x4
+ #define PPE_BM_PORT_GROUP_ID_SHARED_GROUP_ID  GENMASK(1, 0)
++#define PPE_BM_USED_CNT_ADDR                  0x6001c0
++#define PPE_BM_USED_CNT_NUM                   15
++#define PPE_BM_USED_CNT_INC                   0x4
++#define PPE_BM_USED_CNT_VAL                   GENMASK(10, 0)
++
++#define PPE_BM_REACT_CNT_ADDR                 0x600240
++#define PPE_BM_REACT_CNT_NUM                  15
++#define PPE_BM_REACT_CNT_INC                  0x4
++#define PPE_BM_REACT_CNT_VAL                  GENMASK(8, 0)
++
+ #define PPE_BM_SHARED_GROUP_CFG_ADDR          0x600290
+ #define PPE_BM_SHARED_GROUP_CFG_INC           0x4
+ #define PPE_BM_SHARED_GROUP_CFG_SHARED_LIMIT  GENMASK(10, 0)
+@@ -442,6 +530,16 @@
+ #define PPE_AC_GRP_SET_BUF_LIMIT(tbl_cfg, value)      \
+       u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_AC_GRP_W1_BUF_LIMIT)
++#define PPE_AC_UNI_QUEUE_CNT_TBL_ADDR         0x84e000
++#define PPE_AC_UNI_QUEUE_CNT_TBL_NUM          256
++#define PPE_AC_UNI_QUEUE_CNT_TBL_INC          0x10
++#define PPE_AC_UNI_QUEUE_CNT_TBL_PEND_CNT     GENMASK(12, 0)
++
++#define PPE_AC_MUL_QUEUE_CNT_TBL_ADDR         0x852000
++#define PPE_AC_MUL_QUEUE_CNT_TBL_NUM          44
++#define PPE_AC_MUL_QUEUE_CNT_TBL_INC          0x10
++#define PPE_AC_MUL_QUEUE_CNT_TBL_PEND_CNT     GENMASK(12, 0)
++
+ #define PPE_ENQ_OPR_TBL_ADDR                  0x85c000
+ #define PPE_ENQ_OPR_TBL_NUM                   300
+ #define PPE_ENQ_OPR_TBL_INC                   0x10
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-32-net-ethernet-qualcomm-Add-phylink-support-for-PPE-MA.patch b/target/linux/qualcommbe/patches-6.6/103-32-net-ethernet-qualcomm-Add-phylink-support-for-PPE-MA.patch
new file mode 100644 (file)
index 0000000..d89ae3a
--- /dev/null
@@ -0,0 +1,1058 @@
+From 028ed86f08a4fdf25213af5f5afd63b30fb7b029 Mon Sep 17 00:00:00 2001
+From: Lei Wei <quic_leiwei@quicinc.com>
+Date: Thu, 29 Feb 2024 16:59:53 +0800
+Subject: [PATCH 32/50] net: ethernet: qualcomm: Add phylink support for PPE
+ MAC ports
+
+Add MAC initialization and phylink functions for PPE MAC ports.
+
+Change-Id: I39dcba671732392bcfa2e734473fd083989bfbec
+Signed-off-by: Lei Wei <quic_leiwei@quicinc.com>
+---
+ drivers/net/ethernet/qualcomm/Kconfig        |   3 +
+ drivers/net/ethernet/qualcomm/ppe/Makefile   |   2 +-
+ drivers/net/ethernet/qualcomm/ppe/ppe.c      |   9 +
+ drivers/net/ethernet/qualcomm/ppe/ppe.h      |   2 +
+ drivers/net/ethernet/qualcomm/ppe/ppe_port.c | 728 +++++++++++++++++++
+ drivers/net/ethernet/qualcomm/ppe/ppe_port.h |  76 ++
+ drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 123 ++++
+ 7 files changed, 942 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/net/ethernet/qualcomm/ppe/ppe_port.c
+ create mode 100644 drivers/net/ethernet/qualcomm/ppe/ppe_port.h
+
+diff --git a/drivers/net/ethernet/qualcomm/Kconfig b/drivers/net/ethernet/qualcomm/Kconfig
+index 8cc24da48777..a96f6acd4561 100644
+--- a/drivers/net/ethernet/qualcomm/Kconfig
++++ b/drivers/net/ethernet/qualcomm/Kconfig
+@@ -66,6 +66,9 @@ config QCOM_PPE
+       depends on HAS_IOMEM && OF
+       depends on COMMON_CLK
+       select REGMAP_MMIO
++      select PHYLINK
++      select PCS_QCOM_IPQ_UNIPHY
++      select SFP
+       help
+         This driver supports the Qualcomm Technologies, Inc. packet
+         process engine (PPE) available with IPQ SoC. The PPE houses
+diff --git a/drivers/net/ethernet/qualcomm/ppe/Makefile b/drivers/net/ethernet/qualcomm/ppe/Makefile
+index 227af2168224..76cdc423a8cc 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/Makefile
++++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
+@@ -4,4 +4,4 @@
+ #
+ obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
+-qcom-ppe-objs := ppe.o ppe_config.o ppe_api.o ppe_debugfs.o
++qcom-ppe-objs := ppe.o ppe_config.o ppe_api.o ppe_debugfs.o ppe_port.o
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe.c b/drivers/net/ethernet/qualcomm/ppe/ppe.c
+index 8cf6c1161c4b..bcf21c838e05 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe.c
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe.c
+@@ -17,6 +17,7 @@
+ #include "ppe.h"
+ #include "ppe_config.h"
+ #include "ppe_debugfs.h"
++#include "ppe_port.h"
+ #define PPE_PORT_MAX          8
+ #define PPE_CLK_RATE          353000000
+@@ -207,6 +208,11 @@ static int qcom_ppe_probe(struct platform_device *pdev)
+       if (ret)
+               return dev_err_probe(dev, ret, "PPE HW config failed\n");
++      ret = ppe_port_mac_init(ppe_dev);
++      if (ret)
++              return dev_err_probe(dev, ret,
++                                   "PPE Port MAC initialization failed\n");
++
+       ppe_debugfs_setup(ppe_dev);
+       platform_set_drvdata(pdev, ppe_dev);
+@@ -219,6 +225,9 @@ static void qcom_ppe_remove(struct platform_device *pdev)
+       ppe_dev = platform_get_drvdata(pdev);
+       ppe_debugfs_teardown(ppe_dev);
++      ppe_port_mac_deinit(ppe_dev);
++
++      platform_set_drvdata(pdev, NULL);
+ }
+ static const struct of_device_id qcom_ppe_of_match[] = {
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe.h b/drivers/net/ethernet/qualcomm/ppe/ppe.h
+index a2a5d1901547..020d5df2c5e3 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe.h
+@@ -20,6 +20,7 @@ struct dentry;
+  * @clk_rate: PPE clock rate.
+  * @num_ports: Number of PPE ports.
+  * @debugfs_root: PPE debug root entry.
++ * @ports: PPE MAC ports.
+  * @num_icc_paths: Number of interconnect paths.
+  * @icc_paths: Interconnect path array.
+  *
+@@ -33,6 +34,7 @@ struct ppe_device {
+       unsigned long clk_rate;
+       unsigned int num_ports;
+       struct dentry *debugfs_root;
++      struct ppe_ports *ports;
+       unsigned int num_icc_paths;
+       struct icc_bulk_data icc_paths[] __counted_by(num_icc_paths);
+ };
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_port.c b/drivers/net/ethernet/qualcomm/ppe/ppe_port.c
+new file mode 100644
+index 000000000000..dcc13889089e
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_port.c
+@@ -0,0 +1,728 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++/* PPE Port MAC initialization and PPE port MAC functions. */
++
++#include <linux/clk.h>
++#include <linux/of_net.h>
++#include <linux/pcs/pcs-qcom-ipq-uniphy.h>
++#include <linux/phylink.h>
++#include <linux/reset.h>
++#include <linux/regmap.h>
++#include <linux/rtnetlink.h>
++
++#include "ppe.h"
++#include "ppe_port.h"
++#include "ppe_regs.h"
++
++/* PPE MAC max frame size which including 4bytes FCS */
++#define PPE_PORT_MAC_MAX_FRAME_SIZE           0x3000
++
++/* PPE BM port start for PPE MAC ports */
++#define PPE_BM_PORT_MAC_START                 7
++
++/* PPE port clock and reset name */
++static const char * const ppe_port_clk_rst_name[] = {
++      [PPE_PORT_CLK_RST_MAC] = "port_mac",
++      [PPE_PORT_CLK_RST_RX] = "port_rx",
++      [PPE_PORT_CLK_RST_TX] = "port_tx",
++};
++
++/* PPE port and MAC reset */
++static int ppe_port_mac_reset(struct ppe_port *ppe_port)
++{
++      struct ppe_device *ppe_dev = ppe_port->ppe_dev;
++      int ret;
++
++      ret = reset_control_assert(ppe_port->rstcs[PPE_PORT_CLK_RST_MAC]);
++      if (ret)
++              goto error;
++
++      ret = reset_control_assert(ppe_port->rstcs[PPE_PORT_CLK_RST_RX]);
++      if (ret)
++              goto error;
++
++      ret = reset_control_assert(ppe_port->rstcs[PPE_PORT_CLK_RST_TX]);
++      if (ret)
++              goto error;
++
++      /* 150ms delay is required by hardware to reset PPE port and MAC */
++      msleep(150);
++
++      ret = reset_control_deassert(ppe_port->rstcs[PPE_PORT_CLK_RST_MAC]);
++      if (ret)
++              goto error;
++
++      ret = reset_control_deassert(ppe_port->rstcs[PPE_PORT_CLK_RST_RX]);
++      if (ret)
++              goto error;
++
++      ret = reset_control_deassert(ppe_port->rstcs[PPE_PORT_CLK_RST_TX]);
++      if (ret)
++              goto error;
++
++      return ret;
++
++error:
++      dev_err(ppe_dev->dev, "%s: port %d reset fail %d\n",
++              __func__, ppe_port->port_id, ret);
++      return ret;
++}
++
++/* PPE port MAC configuration for phylink */
++static void ppe_port_mac_config(struct phylink_config *config,
++                              unsigned int mode,
++                              const struct phylink_link_state *state)
++{
++      struct ppe_port *ppe_port = container_of(config, struct ppe_port,
++                                               phylink_config);
++      struct ppe_device *ppe_dev = ppe_port->ppe_dev;
++      int port = ppe_port->port_id;
++      enum ppe_mac_type mac_type;
++      u32 val, mask;
++      int ret;
++
++      switch (state->interface) {
++      case PHY_INTERFACE_MODE_2500BASEX:
++      case PHY_INTERFACE_MODE_USXGMII:
++      case PHY_INTERFACE_MODE_10GBASER:
++      case PHY_INTERFACE_MODE_10G_QXGMII:
++              mac_type = PPE_MAC_TYPE_XGMAC;
++              break;
++      case PHY_INTERFACE_MODE_QSGMII:
++      case PHY_INTERFACE_MODE_PSGMII:
++      case PHY_INTERFACE_MODE_SGMII:
++      case PHY_INTERFACE_MODE_1000BASEX:
++              mac_type = PPE_MAC_TYPE_GMAC;
++              break;
++      default:
++              dev_err(ppe_dev->dev, "%s: Unsupport interface %s\n",
++                      __func__, phy_modes(state->interface));
++              return;
++      }
++
++      /* Reset Port MAC for GMAC */
++      if (mac_type == PPE_MAC_TYPE_GMAC) {
++              ret = ppe_port_mac_reset(ppe_port);
++              if (ret)
++                      goto err_mac_config;
++      }
++
++      /* Port mux to select GMAC or XGMAC */
++      mask = PPE_PORT_SEL_XGMAC(port);
++      val = mac_type == PPE_MAC_TYPE_GMAC ? 0 : mask;
++      ret = regmap_update_bits(ppe_dev->regmap,
++                               PPE_PORT_MUX_CTRL_ADDR,
++                               mask, val);
++      if (ret)
++              goto err_mac_config;
++
++      ppe_port->mac_type = mac_type;
++
++      return;
++
++err_mac_config:
++      dev_err(ppe_dev->dev, "%s: port %d MAC config fail %d\n",
++              __func__, port, ret);
++}
++
++/* PPE port GMAC link up configuration */
++static int ppe_port_gmac_link_up(struct ppe_port *ppe_port, int speed,
++                               int duplex, bool tx_pause, bool rx_pause)
++{
++      struct ppe_device *ppe_dev = ppe_port->ppe_dev;
++      int ret, port = ppe_port->port_id;
++      u32 reg, val;
++
++      /* Set GMAC speed */
++      switch (speed) {
++      case SPEED_1000:
++              val = GMAC_SPEED_1000;
++              break;
++      case SPEED_100:
++              val = GMAC_SPEED_100;
++              break;
++      case SPEED_10:
++              val = GMAC_SPEED_10;
++              break;
++      default:
++              dev_err(ppe_dev->dev, "%s: Invalid GMAC speed %s\n",
++                      __func__, phy_speed_to_str(speed));
++              return -EINVAL;
++      }
++
++      reg = PPE_PORT_GMAC_ADDR(port);
++      ret = regmap_update_bits(ppe_dev->regmap, reg + GMAC_SPEED_ADDR,
++                               GMAC_SPEED_M, val);
++      if (ret)
++              return ret;
++
++      /* Set duplex, flow control and enable GMAC */
++      val = GMAC_TRXEN;
++      if (duplex == DUPLEX_FULL)
++              val |= GMAC_DUPLEX_FULL;
++      if (tx_pause)
++              val |= GMAC_TXFCEN;
++      if (rx_pause)
++              val |= GMAC_RXFCEN;
++
++      ret = regmap_update_bits(ppe_dev->regmap, reg + GMAC_ENABLE_ADDR,
++                               GMAC_ENABLE_ALL, val);
++
++      return ret;
++}
++
++/* PPE port XGMAC link up configuration */
++static int ppe_port_xgmac_link_up(struct ppe_port *ppe_port,
++                                phy_interface_t interface,
++                                int speed, int duplex,
++                                bool tx_pause, bool rx_pause)
++{
++      struct ppe_device *ppe_dev = ppe_port->ppe_dev;
++      int ret, port = ppe_port->port_id;
++      u32 reg, val;
++
++      /* Set XGMAC TX speed and enable TX */
++      switch (speed) {
++      case SPEED_10000:
++              if (interface == PHY_INTERFACE_MODE_USXGMII)
++                      val = XGMAC_SPEED_10000_USXGMII;
++              else
++                      val = XGMAC_SPEED_10000;
++              break;
++      case SPEED_5000:
++              val = XGMAC_SPEED_5000;
++              break;
++      case SPEED_2500:
++              if (interface == PHY_INTERFACE_MODE_USXGMII ||
++                  interface == PHY_INTERFACE_MODE_10G_QXGMII)
++                      val = XGMAC_SPEED_2500_USXGMII;
++              else
++                      val = XGMAC_SPEED_2500;
++              break;
++      case SPEED_1000:
++              val = XGMAC_SPEED_1000;
++              break;
++      case SPEED_100:
++              val = XGMAC_SPEED_100;
++              break;
++      case SPEED_10:
++              val = XGMAC_SPEED_10;
++              break;
++      default:
++              dev_err(ppe_dev->dev, "%s: Invalid XGMAC speed %s\n",
++                      __func__, phy_speed_to_str(speed));
++              return -EINVAL;
++      }
++
++      reg = PPE_PORT_XGMAC_ADDR(port);
++      val |= XGMAC_TXEN;
++      ret = regmap_update_bits(ppe_dev->regmap, reg + XGMAC_TX_CONFIG_ADDR,
++                               XGMAC_SPEED_M | XGMAC_TXEN, val);
++      if (ret)
++              return ret;
++
++      /* Set XGMAC TX flow control */
++      val = FIELD_PREP(XGMAC_PAUSE_TIME_M, FIELD_MAX(XGMAC_PAUSE_TIME_M));
++      val |= tx_pause ? XGMAC_TXFCEN : 0;
++      ret = regmap_update_bits(ppe_dev->regmap, reg + XGMAC_TX_FLOW_CTRL_ADDR,
++                               XGMAC_PAUSE_TIME_M | XGMAC_TXFCEN, val);
++      if (ret)
++              return ret;
++
++      /* Set XGMAC RX flow control */
++      val = rx_pause ? XGMAC_RXFCEN : 0;
++      ret = regmap_update_bits(ppe_dev->regmap, reg + XGMAC_RX_FLOW_CTRL_ADDR,
++                               XGMAC_RXFCEN, val);
++      if (ret)
++              return ret;
++
++      /* Enable XGMAC RX*/
++      ret = regmap_update_bits(ppe_dev->regmap, reg + XGMAC_RX_CONFIG_ADDR,
++                               XGMAC_RXEN, XGMAC_RXEN);
++
++      return ret;
++}
++
++/* PPE port MAC link up configuration for phylink */
++static void ppe_port_mac_link_up(struct phylink_config *config,
++                               struct phy_device *phy,
++                               unsigned int mode,
++                               phy_interface_t interface,
++                               int speed, int duplex,
++                               bool tx_pause, bool rx_pause)
++{
++      struct ppe_port *ppe_port = container_of(config, struct ppe_port,
++                                               phylink_config);
++      enum ppe_mac_type mac_type = ppe_port->mac_type;
++      struct ppe_device *ppe_dev = ppe_port->ppe_dev;
++      int ret, port = ppe_port->port_id;
++      u32 reg, val;
++
++      if (mac_type == PPE_MAC_TYPE_GMAC)
++              ret = ppe_port_gmac_link_up(ppe_port,
++                                          speed, duplex, tx_pause, rx_pause);
++      else
++              ret = ppe_port_xgmac_link_up(ppe_port, interface,
++                                           speed, duplex, tx_pause, rx_pause);
++      if (ret)
++              goto err_port_mac_link_up;
++
++      /* Set PPE port BM flow control */
++      reg = PPE_BM_PORT_FC_MODE_ADDR +
++              PPE_BM_PORT_FC_MODE_INC * (port + PPE_BM_PORT_MAC_START);
++      val = tx_pause ? PPE_BM_PORT_FC_MODE_EN : 0;
++      ret = regmap_update_bits(ppe_dev->regmap, reg,
++                               PPE_BM_PORT_FC_MODE_EN, val);
++      if (ret)
++              goto err_port_mac_link_up;
++
++      /* Enable PPE port TX */
++      reg = PPE_PORT_BRIDGE_CTRL_ADDR + PPE_PORT_BRIDGE_CTRL_INC * port;
++      ret = regmap_update_bits(ppe_dev->regmap, reg,
++                               PPE_PORT_BRIDGE_TXMAC_EN,
++                               PPE_PORT_BRIDGE_TXMAC_EN);
++      if (ret)
++              goto err_port_mac_link_up;
++
++      return;
++
++err_port_mac_link_up:
++      dev_err(ppe_dev->dev, "%s: port %d link up fail %d\n",
++              __func__, port, ret);
++}
++
++/* PPE port MAC link down configuration for phylink */
++static void ppe_port_mac_link_down(struct phylink_config *config,
++                                 unsigned int mode,
++                                 phy_interface_t interface)
++{
++      struct ppe_port *ppe_port = container_of(config, struct ppe_port,
++                                               phylink_config);
++      enum ppe_mac_type mac_type = ppe_port->mac_type;
++      struct ppe_device *ppe_dev = ppe_port->ppe_dev;
++      int ret, port = ppe_port->port_id;
++      u32 reg;
++
++      /* Disable PPE port TX */
++      reg = PPE_PORT_BRIDGE_CTRL_ADDR + PPE_PORT_BRIDGE_CTRL_INC * port;
++      ret = regmap_update_bits(ppe_dev->regmap, reg,
++                               PPE_PORT_BRIDGE_TXMAC_EN, 0);
++      if (ret)
++              goto err_port_mac_link_down;
++
++      /* Disable PPE MAC */
++      if (mac_type == PPE_MAC_TYPE_GMAC) {
++              reg = PPE_PORT_GMAC_ADDR(port) + GMAC_ENABLE_ADDR;
++              ret = regmap_update_bits(ppe_dev->regmap, reg, GMAC_TRXEN, 0);
++              if (ret)
++                      goto err_port_mac_link_down;
++      } else {
++              reg = PPE_PORT_XGMAC_ADDR(port);
++              ret = regmap_update_bits(ppe_dev->regmap,
++                                       reg + XGMAC_RX_CONFIG_ADDR,
++                                       XGMAC_RXEN, 0);
++              if (ret)
++                      goto err_port_mac_link_down;
++
++              ret = regmap_update_bits(ppe_dev->regmap,
++                                       reg + XGMAC_TX_CONFIG_ADDR,
++                                       XGMAC_TXEN, 0);
++              if (ret)
++                      goto err_port_mac_link_down;
++      }
++
++      return;
++
++err_port_mac_link_down:
++      dev_err(ppe_dev->dev, "%s: port %d link down fail %d\n",
++              __func__, port, ret);
++}
++
++/* PPE port MAC PCS selection for phylink */
++static
++struct phylink_pcs *ppe_port_mac_select_pcs(struct phylink_config *config,
++                                          phy_interface_t interface)
++{
++      struct ppe_port *ppe_port = container_of(config, struct ppe_port,
++                                               phylink_config);
++      struct ppe_device *ppe_dev = ppe_port->ppe_dev;
++      int ret, port = ppe_port->port_id;
++      u32 val;
++
++      /* PPE port5 can connects with PCS0 or PCS1. In PSGMII
++       * mode, it selects PCS0; otherwise, it selects PCS1.
++       */
++      if (port == 5) {
++              val = interface == PHY_INTERFACE_MODE_PSGMII ?
++                      0 : PPE_PORT5_SEL_PCS1;
++              ret = regmap_update_bits(ppe_dev->regmap,
++                                       PPE_PORT_MUX_CTRL_ADDR,
++                                       PPE_PORT5_SEL_PCS1, val);
++              if (ret) {
++                      dev_err(ppe_dev->dev, "%s: port5 select PCS fail %d\n",
++                              __func__, ret);
++                      return NULL;
++              }
++      }
++
++      return ppe_port->pcs;
++}
++
++static const struct phylink_mac_ops ppe_phylink_ops = {
++      .mac_config = ppe_port_mac_config,
++      .mac_link_up = ppe_port_mac_link_up,
++      .mac_link_down = ppe_port_mac_link_down,
++      .mac_select_pcs = ppe_port_mac_select_pcs,
++};
++
++/**
++ * ppe_port_phylink_setup() - Set phylink instance for the given PPE port
++ * @ppe_port: PPE port
++ * @netdev: Netdevice
++ *
++ * Description: Wrapper function to help setup phylink for the PPE port
++ * specified by @ppe_port and associated with the net device @netdev.
++ *
++ * Return: 0 upon success or a negative error upon failure.
++ */
++int ppe_port_phylink_setup(struct ppe_port *ppe_port, struct net_device *netdev)
++{
++      struct ppe_device *ppe_dev = ppe_port->ppe_dev;
++      struct device_node *pcs_node;
++      int ret;
++
++      /* Create PCS */
++      pcs_node = of_parse_phandle(ppe_port->np, "pcs-handle", 0);
++      if (!pcs_node)
++              return -ENODEV;
++
++      ppe_port->pcs = ipq_unipcs_create(pcs_node);
++      of_node_put(pcs_node);
++      if (IS_ERR(ppe_port->pcs)) {
++              dev_err(ppe_dev->dev, "%s: port %d failed to create PCS\n",
++                      __func__, ppe_port->port_id);
++              return PTR_ERR(ppe_port->pcs);
++      }
++
++      /* Port phylink capability */
++      ppe_port->phylink_config.dev = &netdev->dev;
++      ppe_port->phylink_config.type = PHYLINK_NETDEV;
++      ppe_port->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
++              MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000 |
++              MAC_2500FD | MAC_5000FD | MAC_10000FD;
++      __set_bit(PHY_INTERFACE_MODE_QSGMII,
++                ppe_port->phylink_config.supported_interfaces);
++      __set_bit(PHY_INTERFACE_MODE_PSGMII,
++                ppe_port->phylink_config.supported_interfaces);
++      __set_bit(PHY_INTERFACE_MODE_SGMII,
++                ppe_port->phylink_config.supported_interfaces);
++      __set_bit(PHY_INTERFACE_MODE_1000BASEX,
++                ppe_port->phylink_config.supported_interfaces);
++      __set_bit(PHY_INTERFACE_MODE_2500BASEX,
++                ppe_port->phylink_config.supported_interfaces);
++      __set_bit(PHY_INTERFACE_MODE_USXGMII,
++                ppe_port->phylink_config.supported_interfaces);
++      __set_bit(PHY_INTERFACE_MODE_10GBASER,
++                ppe_port->phylink_config.supported_interfaces);
++      __set_bit(PHY_INTERFACE_MODE_10G_QXGMII,
++                ppe_port->phylink_config.supported_interfaces);
++
++      /* Create phylink */
++      ppe_port->phylink = phylink_create(&ppe_port->phylink_config,
++                                         of_fwnode_handle(ppe_port->np),
++                                         ppe_port->interface,
++                                         &ppe_phylink_ops);
++      if (IS_ERR(ppe_port->phylink)) {
++              dev_err(ppe_dev->dev, "%s: port %d failed to create phylink\n",
++                      __func__, ppe_port->port_id);
++              ret = PTR_ERR(ppe_port->phylink);
++              goto err_free_pcs;
++      }
++
++      /* Connect phylink */
++      ret = phylink_of_phy_connect(ppe_port->phylink, ppe_port->np, 0);
++      if (ret) {
++              dev_err(ppe_dev->dev, "%s: port %d failed to connect phylink\n",
++                      __func__, ppe_port->port_id);
++              goto err_free_phylink;
++      }
++
++      return 0;
++
++err_free_phylink:
++      phylink_destroy(ppe_port->phylink);
++      ppe_port->phylink = NULL;
++err_free_pcs:
++      ipq_unipcs_destroy(ppe_port->pcs);
++      ppe_port->pcs = NULL;
++      return ret;
++}
++
++/**
++ * ppe_port_phylink_destroy() - Destroy phylink instance for the given PPE port
++ * @ppe_port: PPE port
++ *
++ * Description: Wrapper function to help destroy phylink for the PPE port
++ * specified by @ppe_port.
++ */
++void ppe_port_phylink_destroy(struct ppe_port *ppe_port)
++{
++      /* Destroy phylink */
++      if (ppe_port->phylink) {
++              rtnl_lock();
++              phylink_disconnect_phy(ppe_port->phylink);
++              rtnl_unlock();
++              phylink_destroy(ppe_port->phylink);
++              ppe_port->phylink = NULL;
++      }
++
++      /* Destroy PCS */
++      if (ppe_port->pcs) {
++              ipq_unipcs_destroy(ppe_port->pcs);
++              ppe_port->pcs = NULL;
++      }
++}
++
++/* PPE port clock initialization */
++static int ppe_port_clock_init(struct ppe_port *ppe_port)
++{
++      struct device_node *port_node = ppe_port->np;
++      struct reset_control *rstc;
++      struct clk *clk;
++      int i, j, ret;
++
++      for (i = 0; i < PPE_PORT_CLK_RST_MAX; i++) {
++              /* Get PPE port resets which will be used to reset PPE
++               * port and MAC.
++               */
++              rstc = of_reset_control_get_exclusive(port_node,
++                                                    ppe_port_clk_rst_name[i]);
++              if (IS_ERR(rstc)) {
++                      ret =  PTR_ERR(rstc);
++                      goto err_rst;
++              }
++
++              clk = of_clk_get_by_name(port_node, ppe_port_clk_rst_name[i]);
++              if (IS_ERR(clk)) {
++                      ret = PTR_ERR(clk);
++                      goto err_clk_get;
++              }
++
++              ret = clk_prepare_enable(clk);
++              if (ret)
++                      goto err_clk_en;
++
++              ppe_port->clks[i] = clk;
++              ppe_port->rstcs[i] = rstc;
++      }
++
++      return 0;
++
++err_clk_en:
++      clk_put(clk);
++err_clk_get:
++      reset_control_put(rstc);
++err_rst:
++      for (j = 0; j < i; j++) {
++              clk_disable_unprepare(ppe_port->clks[j]);
++              clk_put(ppe_port->clks[j]);
++              reset_control_put(ppe_port->rstcs[j]);
++      }
++
++      return ret;
++}
++
++/* PPE port clock deinitialization */
++static void ppe_port_clock_deinit(struct ppe_port *ppe_port)
++{
++      int i;
++
++      for (i = 0; i < PPE_PORT_CLK_RST_MAX; i++) {
++              clk_disable_unprepare(ppe_port->clks[i]);
++              clk_put(ppe_port->clks[i]);
++              reset_control_put(ppe_port->rstcs[i]);
++      }
++}
++
++/* PPE port MAC hardware init configuration */
++static int ppe_port_mac_hw_init(struct ppe_port *ppe_port)
++{
++      struct ppe_device *ppe_dev = ppe_port->ppe_dev;
++      int ret, port = ppe_port->port_id;
++      u32 reg, val;
++
++      /* GMAC RX and TX are initialized as disabled */
++      reg = PPE_PORT_GMAC_ADDR(port);
++      ret = regmap_update_bits(ppe_dev->regmap,
++                               reg + GMAC_ENABLE_ADDR, GMAC_TRXEN, 0);
++      if (ret)
++              return ret;
++
++      /* GMAC max frame size configuration */
++      val = FIELD_PREP(GMAC_JUMBO_SIZE_M, PPE_PORT_MAC_MAX_FRAME_SIZE);
++      ret = regmap_update_bits(ppe_dev->regmap, reg + GMAC_JUMBO_SIZE_ADDR,
++                               GMAC_JUMBO_SIZE_M, val);
++      if (ret)
++              return ret;
++
++      val = FIELD_PREP(GMAC_MAXFRAME_SIZE_M, PPE_PORT_MAC_MAX_FRAME_SIZE);
++      val |= FIELD_PREP(GMAC_TX_THD_M, 0x1);
++      ret = regmap_update_bits(ppe_dev->regmap, reg + GMAC_CTRL_ADDR,
++                               GMAC_CTRL_MASK, val);
++      if (ret)
++              return ret;
++
++      val = FIELD_PREP(GMAC_HIGH_IPG_M, 0xc);
++      ret = regmap_update_bits(ppe_dev->regmap, reg + GMAC_DBG_CTRL_ADDR,
++                               GMAC_HIGH_IPG_M, val);
++      if (ret)
++              return ret;
++
++      /* Enable and reset GMAC MIB counters and set as read clear
++       * mode, the GMAC MIB counters will be cleared after reading.
++       */
++      ret = regmap_update_bits(ppe_dev->regmap, reg + GMAC_MIB_CTRL_ADDR,
++                               GMAC_MIB_CTRL_MASK, GMAC_MIB_CTRL_MASK);
++      if (ret)
++              return ret;
++
++      ret = regmap_update_bits(ppe_dev->regmap, reg + GMAC_MIB_CTRL_ADDR,
++                               GMAC_MIB_RST, 0);
++      if (ret)
++              return ret;
++
++      /* XGMAC RX and TX disabled and max frame size configuration */
++      reg = PPE_PORT_XGMAC_ADDR(port);
++      ret = regmap_update_bits(ppe_dev->regmap, reg + XGMAC_TX_CONFIG_ADDR,
++                               XGMAC_TXEN | XGMAC_JD, XGMAC_JD);
++      if (ret)
++              return ret;
++
++      val = FIELD_PREP(XGMAC_GPSL_M, PPE_PORT_MAC_MAX_FRAME_SIZE);
++      val |= XGMAC_GPSLEN;
++      val |= XGMAC_CST;
++      val |= XGMAC_ACS;
++      ret = regmap_update_bits(ppe_dev->regmap, reg + XGMAC_RX_CONFIG_ADDR,
++                               XGMAC_RX_CONFIG_MASK, val);
++      if (ret)
++              return ret;
++
++      ret = regmap_update_bits(ppe_dev->regmap, reg + XGMAC_WD_TIMEOUT_ADDR,
++                               XGMAC_WD_TIMEOUT_MASK, XGMAC_WD_TIMEOUT_VAL);
++      if (ret)
++              return ret;
++
++      ret = regmap_update_bits(ppe_dev->regmap, reg + XGMAC_PKT_FILTER_ADDR,
++                               XGMAC_PKT_FILTER_MASK, XGMAC_PKT_FILTER_VAL);
++      if (ret)
++              return ret;
++
++      /* Enable and reset XGMAC MIB counters */
++      ret = regmap_update_bits(ppe_dev->regmap, reg + XGMAC_MMC_CTRL_ADDR,
++                               XGMAC_MCF | XGMAC_CNTRST, XGMAC_CNTRST);
++
++      return ret;
++}
++
++/**
++ * ppe_port_mac_init() - Initialization of PPE ports for the PPE device
++ * @ppe_dev: PPE device
++ *
++ * Description: Initialize the PPE MAC ports on the PPE device specified
++ * by @ppe_dev.
++ *
++ * Return: 0 upon success or a negative error upon failure.
++ */
++int ppe_port_mac_init(struct ppe_device *ppe_dev)
++{
++      struct device_node *ports_node, *port_node;
++      int port, num, ret, j, i = 0;
++      struct ppe_ports *ppe_ports;
++      phy_interface_t phy_mode;
++
++      ports_node = of_get_child_by_name(ppe_dev->dev->of_node,
++                                        "ethernet-ports");
++      if (!ports_node) {
++              dev_err(ppe_dev->dev, "Failed to get ports node\n");
++              return -ENODEV;
++      }
++
++      num = of_get_available_child_count(ports_node);
++
++      ppe_ports = devm_kzalloc(ppe_dev->dev,
++                               struct_size(ppe_ports, port, num),
++                               GFP_KERNEL);
++      if (!ppe_ports) {
++              ret = -ENOMEM;
++              goto err_ports_node;
++      }
++
++      ppe_dev->ports = ppe_ports;
++      ppe_ports->num = num;
++
++      for_each_available_child_of_node(ports_node, port_node) {
++              ret = of_property_read_u32(port_node, "reg", &port);
++              if (ret) {
++                      dev_err(ppe_dev->dev, "Failed to get port id\n");
++                      goto err_port_node;
++              }
++
++              ret = of_get_phy_mode(port_node, &phy_mode);
++              if (ret) {
++                      dev_err(ppe_dev->dev, "Failed to get phy mode\n");
++                      goto err_port_node;
++              }
++
++              ppe_ports->port[i].ppe_dev = ppe_dev;
++              ppe_ports->port[i].port_id = port;
++              ppe_ports->port[i].np = port_node;
++              ppe_ports->port[i].interface = phy_mode;
++
++              ret = ppe_port_clock_init(&ppe_ports->port[i]);
++              if (ret) {
++                      dev_err(ppe_dev->dev, "Failed to initialize port clocks\n");
++                      goto err_port_clk;
++              }
++
++              ret = ppe_port_mac_hw_init(&ppe_ports->port[i]);
++              if (ret) {
++                      dev_err(ppe_dev->dev, "Failed to initialize MAC hardware\n");
++                      goto err_port_node;
++              }
++
++              i++;
++      }
++
++      of_node_put(ports_node);
++      return 0;
++
++err_port_clk:
++      for (j = 0; j < i; j++)
++              ppe_port_clock_deinit(&ppe_ports->port[j]);
++err_port_node:
++      of_node_put(port_node);
++err_ports_node:
++      of_node_put(ports_node);
++      return ret;
++}
++
++/**
++ * ppe_port_mac_deinit() - Deinitialization of PPE ports for the PPE device
++ * @ppe_dev: PPE device
++ *
++ * Description: Deinitialize the PPE MAC ports on the PPE device specified
++ * by @ppe_dev.
++ */
++void ppe_port_mac_deinit(struct ppe_device *ppe_dev)
++{
++      struct ppe_port *ppe_port;
++      int i;
++
++      for (i = 0; i < ppe_dev->ports->num; i++) {
++              ppe_port = &ppe_dev->ports->port[i];
++              ppe_port_clock_deinit(ppe_port);
++      }
++}
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_port.h b/drivers/net/ethernet/qualcomm/ppe/ppe_port.h
+new file mode 100644
+index 000000000000..194f65815011
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_port.h
+@@ -0,0 +1,76 @@
++/* SPDX-License-Identifier: GPL-2.0-only
++ *
++ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++#ifndef __PPE_PORT_H__
++#define __PPE_PORT_H__
++
++#include <linux/phylink.h>
++
++/**
++ * enum ppe_port_clk_rst_type - PPE port clock and reset ID type
++ * @PPE_PORT_CLK_RST_MAC: The clock and reset ID for port MAC
++ * @PPE_PORT_CLK_RST_RX: The clock and reset ID for port receive path
++ * @PPE_PORT_CLK_RST_TX: The clock and reset for port transmit path
++ * @PPE_PORT_CLK_RST_MAX: The maximum of port clock and reset
++ */
++enum ppe_port_clk_rst_type {
++      PPE_PORT_CLK_RST_MAC,
++      PPE_PORT_CLK_RST_RX,
++      PPE_PORT_CLK_RST_TX,
++      PPE_PORT_CLK_RST_MAX,
++};
++
++/**
++ * enum ppe_mac_type - PPE MAC type
++ * @PPE_MAC_TYPE_GMAC: GMAC type
++ * @PPE_MAC_TYPE_XGMAC: XGMAC type
++ */
++enum ppe_mac_type {
++      PPE_MAC_TYPE_GMAC,
++      PPE_MAC_TYPE_XGMAC,
++};
++
++/**
++ * struct ppe_port - Private data for each PPE port
++ * @phylink: Linux phylink instance
++ * @phylink_config: Linux phylink configurations
++ * @pcs: Linux phylink PCS instance
++ * @np: Port device tree node
++ * @ppe_dev: Back pointer to PPE device private data
++ * @interface: Port interface mode
++ * @mac_type: Port MAC type, GMAC or XGMAC
++ * @port_id: Port ID
++ * @clks: Port clocks
++ * @rstcs: Port resets
++ */
++struct ppe_port {
++      struct phylink *phylink;
++      struct phylink_config phylink_config;
++      struct phylink_pcs *pcs;
++      struct device_node *np;
++      struct ppe_device *ppe_dev;
++      phy_interface_t interface;
++      enum ppe_mac_type mac_type;
++      int port_id;
++      struct clk *clks[PPE_PORT_CLK_RST_MAX];
++      struct reset_control *rstcs[PPE_PORT_CLK_RST_MAX];
++};
++
++/**
++ * struct ppe_ports - Array of PPE ports
++ * @num: Number of PPE ports
++ * @port: Each PPE port private data
++ */
++struct ppe_ports {
++      unsigned int num;
++      struct ppe_port port[] __counted_by(num);
++};
++
++int ppe_port_mac_init(struct ppe_device *ppe_dev);
++void ppe_port_mac_deinit(struct ppe_device *ppe_dev);
++int ppe_port_phylink_setup(struct ppe_port *ppe_port,
++                         struct net_device *netdev);
++void ppe_port_phylink_destroy(struct ppe_port *ppe_port);
++#endif
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+index e84633d0f572..34b659ac0c37 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+@@ -7,6 +7,17 @@
+ #ifndef __PPE_REGS_H__
+ #define __PPE_REGS_H__
++/* PPE port mux select control register */
++#define PPE_PORT_MUX_CTRL_ADDR                        0x10
++#define PPE_PORT6_SEL_XGMAC                   BIT(13)
++#define PPE_PORT5_SEL_XGMAC                   BIT(12)
++#define PPE_PORT4_SEL_XGMAC                   BIT(11)
++#define PPE_PORT3_SEL_XGMAC                   BIT(10)
++#define PPE_PORT2_SEL_XGMAC                   BIT(9)
++#define PPE_PORT1_SEL_XGMAC                   BIT(8)
++#define PPE_PORT5_SEL_PCS1                    BIT(4)
++#define PPE_PORT_SEL_XGMAC(x)                 (BIT(8) << ((x) - 1))
++
+ /* There are 15 BM ports and 4 BM groups supported by PPE,
+  * BM port (0-7) is matched to EDMA port 0, BM port (8-13) is matched
+  * to PPE physical port 1-6, BM port 14 is matched to EIP.
+@@ -545,4 +556,116 @@
+ #define PPE_ENQ_OPR_TBL_INC                   0x10
+ #define PPE_ENQ_OPR_TBL_ENQ_DISABLE           BIT(0)
++/* PPE GMAC and XGMAC register base address */
++#define PPE_PORT_GMAC_ADDR(x)                 (0x001000 + ((x) - 1) * 0x200)
++#define PPE_PORT_XGMAC_ADDR(x)                        (0x500000 + ((x) - 1) * 0x4000)
++
++/* GMAC enable register */
++#define GMAC_ENABLE_ADDR                      0x0
++#define GMAC_TXFCEN                           BIT(6)
++#define GMAC_RXFCEN                           BIT(5)
++#define GMAC_DUPLEX_FULL                      BIT(4)
++#define GMAC_TXEN                             BIT(1)
++#define GMAC_RXEN                             BIT(0)
++
++#define GMAC_TRXEN                            \
++      (GMAC_TXEN | GMAC_RXEN)
++#define GMAC_ENABLE_ALL                               \
++      (GMAC_TXFCEN | GMAC_RXFCEN | GMAC_DUPLEX_FULL | GMAC_TXEN | GMAC_RXEN)
++
++/* GMAC speed register */
++#define GMAC_SPEED_ADDR                               0x4
++#define GMAC_SPEED_M                          GENMASK(1, 0)
++#define GMAC_SPEED_10                         0
++#define GMAC_SPEED_100                                1
++#define GMAC_SPEED_1000                               2
++
++/* GMAC control register */
++#define GMAC_CTRL_ADDR                                0x18
++#define GMAC_TX_THD_M                         GENMASK(27, 24)
++#define GMAC_MAXFRAME_SIZE_M                  GENMASK(21, 8)
++#define GMAC_CRS_SEL                          BIT(6)
++
++#define GMAC_CTRL_MASK                                \
++      (GMAC_TX_THD_M | GMAC_MAXFRAME_SIZE_M | GMAC_CRS_SEL)
++
++/* GMAC debug control register */
++#define GMAC_DBG_CTRL_ADDR                    0x1c
++#define GMAC_HIGH_IPG_M                               GENMASK(15, 8)
++
++/* GMAC jumbo size register */
++#define GMAC_JUMBO_SIZE_ADDR                  0x30
++#define GMAC_JUMBO_SIZE_M                     GENMASK(13, 0)
++
++/* GMAC MIB control register */
++#define GMAC_MIB_CTRL_ADDR                    0x34
++#define GMAC_MIB_RD_CLR                               BIT(2)
++#define GMAC_MIB_RST                          BIT(1)
++#define GMAC_MIB_EN                           BIT(0)
++
++#define GMAC_MIB_CTRL_MASK                    \
++      (GMAC_MIB_RD_CLR | GMAC_MIB_RST | GMAC_MIB_EN)
++
++/* XGMAC TX configuration register */
++#define XGMAC_TX_CONFIG_ADDR                  0x0
++#define XGMAC_SPEED_M                         GENMASK(31, 29)
++#define XGMAC_SPEED_10000_USXGMII             FIELD_PREP(XGMAC_SPEED_M, 4)
++#define XGMAC_SPEED_10000                     FIELD_PREP(XGMAC_SPEED_M, 0)
++#define XGMAC_SPEED_5000                      FIELD_PREP(XGMAC_SPEED_M, 5)
++#define XGMAC_SPEED_2500_USXGMII              FIELD_PREP(XGMAC_SPEED_M, 6)
++#define XGMAC_SPEED_2500                      FIELD_PREP(XGMAC_SPEED_M, 2)
++#define XGMAC_SPEED_1000                      FIELD_PREP(XGMAC_SPEED_M, 3)
++#define XGMAC_SPEED_100                               XGMAC_SPEED_1000
++#define XGMAC_SPEED_10                                XGMAC_SPEED_1000
++#define XGMAC_JD                              BIT(16)
++#define XGMAC_TXEN                            BIT(0)
++
++/* XGMAC RX configuration register */
++#define XGMAC_RX_CONFIG_ADDR                  0x4
++#define XGMAC_GPSL_M                          GENMASK(29, 16)
++#define XGMAC_WD                              BIT(7)
++#define XGMAC_GPSLEN                          BIT(6)
++#define XGMAC_CST                             BIT(2)
++#define XGMAC_ACS                             BIT(1)
++#define XGMAC_RXEN                            BIT(0)
++
++#define XGMAC_RX_CONFIG_MASK                  \
++      (XGMAC_GPSL_M | XGMAC_WD | XGMAC_GPSLEN | XGMAC_CST | \
++       XGMAC_ACS | XGMAC_RXEN)
++
++/* XGMAC packet filter register */
++#define XGMAC_PKT_FILTER_ADDR                 0x8
++#define XGMAC_RA                              BIT(31)
++#define XGMAC_PCF_M                           GENMASK(7, 6)
++#define XGMAC_PR                              BIT(0)
++
++#define XGMAC_PKT_FILTER_MASK                 \
++      (XGMAC_RA | XGMAC_PCF_M | XGMAC_PR)
++#define XGMAC_PKT_FILTER_VAL                  \
++      (XGMAC_RA | XGMAC_PR | FIELD_PREP(XGMAC_PCF_M, 0x2))
++
++/* XGMAC watchdog timeout register */
++#define XGMAC_WD_TIMEOUT_ADDR                 0xc
++#define XGMAC_PWE                             BIT(8)
++#define XGMAC_WTO_M                           GENMASK(3, 0)
++
++#define XGMAC_WD_TIMEOUT_MASK                 \
++      (XGMAC_PWE | XGMAC_WTO_M)
++#define XGMAC_WD_TIMEOUT_VAL                  \
++      (XGMAC_PWE | FIELD_PREP(XGMAC_WTO_M, 0xb))
++
++/* XGMAC TX flow control register */
++#define XGMAC_TX_FLOW_CTRL_ADDR                       0x70
++#define XGMAC_PAUSE_TIME_M                    GENMASK(31, 16)
++#define XGMAC_TXFCEN                          BIT(1)
++
++/* XGMAC RX flow control register */
++#define XGMAC_RX_FLOW_CTRL_ADDR                       0x90
++#define XGMAC_RXFCEN                          BIT(0)
++
++/* XGMAC management counters control register */
++#define XGMAC_MMC_CTRL_ADDR                   0x800
++#define XGMAC_MCF                             BIT(3)
++#define XGMAC_CNTRST                          BIT(0)
++
+ #endif
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-33-net-ethernet-qualcomm-Add-PPE-port-MAC-MIB-statistic.patch b/target/linux/qualcommbe/patches-6.6/103-33-net-ethernet-qualcomm-Add-PPE-port-MAC-MIB-statistic.patch
new file mode 100644 (file)
index 0000000..e3a57a9
--- /dev/null
@@ -0,0 +1,682 @@
+From 3e8cb061bff0bf74503cd2f206ed5c599a1e7ff7 Mon Sep 17 00:00:00 2001
+From: Lei Wei <quic_leiwei@quicinc.com>
+Date: Thu, 29 Feb 2024 20:16:14 +0800
+Subject: [PATCH 33/50] net: ethernet: qualcomm: Add PPE port MAC MIB
+ statistics functions
+
+Add PPE port MAC MIB statistics functions which are used by netdev
+ops and ethtool. For GMAC, a polling task is scheduled to read the
+MIB counters periodically to avoid 32bit register counter overflow.
+
+Change-Id: Ic20e240061278f77d703f652e1f7d959db8fac37
+Signed-off-by: Lei Wei <quic_leiwei@quicinc.com>
+---
+ drivers/net/ethernet/qualcomm/ppe/ppe_port.c | 465 +++++++++++++++++++
+ drivers/net/ethernet/qualcomm/ppe/ppe_port.h |  13 +
+ drivers/net/ethernet/qualcomm/ppe/ppe_regs.h |  91 ++++
+ 3 files changed, 569 insertions(+)
+
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_port.c b/drivers/net/ethernet/qualcomm/ppe/ppe_port.c
+index dcc13889089e..284ee14b8d03 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_port.c
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_port.c
+@@ -23,6 +23,122 @@
+ /* PPE BM port start for PPE MAC ports */
+ #define PPE_BM_PORT_MAC_START                 7
++/* Poll interval time to poll GMAC MIBs for overflow protection,
++ * the time should ensure that the 32bit GMAC packet counter
++ * register would not overflow within this time at line rate
++ * speed for 64B packet size.
++ */
++#define PPE_GMIB_POLL_INTERVAL_MS             120000
++
++#define PPE_MAC_MIB_DESC(_s, _o, _n)          \
++      {                                       \
++              .size = (_s),                   \
++              .offset = (_o),                 \
++              .name = (_n),                   \
++      }
++
++/* PPE MAC MIB description */
++struct ppe_mac_mib_info {
++      u32 size;
++      u32 offset;
++      const char *name;
++};
++
++/* PPE GMAC MIB statistics type */
++enum ppe_gmib_stats_type {
++      gmib_rx_broadcast,
++      gmib_rx_pause,
++      gmib_rx_multicast,
++      gmib_rx_fcserr,
++      gmib_rx_alignerr,
++      gmib_rx_runt,
++      gmib_rx_frag,
++      gmib_rx_jumbofcserr,
++      gmib_rx_jumboalignerr,
++      gmib_rx_pkt64,
++      gmib_rx_pkt65to127,
++      gmib_rx_pkt128to255,
++      gmib_rx_pkt256to511,
++      gmib_rx_pkt512to1023,
++      gmib_rx_pkt1024to1518,
++      gmib_rx_pkt1519tomax,
++      gmib_rx_toolong,
++      gmib_rx_bytes_g,
++      gmib_rx_bytes_b,
++      gmib_rx_unicast,
++      gmib_tx_broadcast,
++      gmib_tx_pause,
++      gmib_tx_multicast,
++      gmib_tx_underrun,
++      gmib_tx_pkt64,
++      gmib_tx_pkt65to127,
++      gmib_tx_pkt128to255,
++      gmib_tx_pkt256to511,
++      gmib_tx_pkt512to1023,
++      gmib_tx_pkt1024to1518,
++      gmib_tx_pkt1519tomax,
++      gmib_tx_bytes,
++      gmib_tx_collisions,
++      gmib_tx_abortcol,
++      gmib_tx_multicol,
++      gmib_tx_singlecol,
++      gmib_tx_excdeffer,
++      gmib_tx_deffer,
++      gmib_tx_latecol,
++      gmib_tx_unicast,
++};
++
++/* PPE XGMAC MIB statistics type */
++enum ppe_xgmib_stats_type {
++      xgmib_tx_bytes,
++      xgmib_tx_frames,
++      xgmib_tx_broadcast_g,
++      xgmib_tx_multicast_g,
++      xgmib_tx_pkt64,
++      xgmib_tx_pkt65to127,
++      xgmib_tx_pkt128to255,
++      xgmib_tx_pkt256to511,
++      xgmib_tx_pkt512to1023,
++      xgmib_tx_pkt1024tomax,
++      xgmib_tx_unicast,
++      xgmib_tx_multicast,
++      xgmib_tx_broadcast,
++      xgmib_tx_underflow_err,
++      xgmib_tx_bytes_g,
++      xgmib_tx_frames_g,
++      xgmib_tx_pause,
++      xgmib_tx_vlan_g,
++      xgmib_tx_lpi_usec,
++      xgmib_tx_lpi_tran,
++      xgmib_rx_frames,
++      xgmib_rx_bytes,
++      xgmib_rx_bytes_g,
++      xgmib_rx_broadcast_g,
++      xgmib_rx_multicast_g,
++      xgmib_rx_crc_err,
++      xgmib_rx_runt_err,
++      xgmib_rx_jabber_err,
++      xgmib_rx_undersize_g,
++      xgmib_rx_oversize_g,
++      xgmib_rx_pkt64,
++      xgmib_rx_pkt65to127,
++      xgmib_rx_pkt128to255,
++      xgmib_rx_pkt256to511,
++      xgmib_rx_pkt512to1023,
++      xgmib_rx_pkt1024tomax,
++      xgmib_rx_unicast_g,
++      xgmib_rx_len_err,
++      xgmib_rx_outofrange_err,
++      xgmib_rx_pause,
++      xgmib_rx_fifo_overflow,
++      xgmib_rx_vlan,
++      xgmib_rx_wdog_err,
++      xgmib_rx_lpi_usec,
++      xgmib_rx_lpi_tran,
++      xgmib_rx_drop_frames,
++      xgmib_rx_drop_bytes,
++};
++
+ /* PPE port clock and reset name */
+ static const char * const ppe_port_clk_rst_name[] = {
+       [PPE_PORT_CLK_RST_MAC] = "port_mac",
+@@ -30,6 +146,322 @@ static const char * const ppe_port_clk_rst_name[] = {
+       [PPE_PORT_CLK_RST_TX] = "port_tx",
+ };
++/* PPE GMAC MIB statistics description information */
++static const struct ppe_mac_mib_info gmib_info[] = {
++      PPE_MAC_MIB_DESC(4, GMAC_RXBROAD_ADDR, "rx_broadcast"),
++      PPE_MAC_MIB_DESC(4, GMAC_RXPAUSE_ADDR, "rx_pause"),
++      PPE_MAC_MIB_DESC(4, GMAC_RXMULTI_ADDR, "rx_multicast"),
++      PPE_MAC_MIB_DESC(4, GMAC_RXFCSERR_ADDR, "rx_fcserr"),
++      PPE_MAC_MIB_DESC(4, GMAC_RXALIGNERR_ADDR, "rx_alignerr"),
++      PPE_MAC_MIB_DESC(4, GMAC_RXRUNT_ADDR, "rx_runt"),
++      PPE_MAC_MIB_DESC(4, GMAC_RXFRAG_ADDR, "rx_frag"),
++      PPE_MAC_MIB_DESC(4, GMAC_RXJUMBOFCSERR_ADDR, "rx_jumbofcserr"),
++      PPE_MAC_MIB_DESC(4, GMAC_RXJUMBOALIGNERR_ADDR, "rx_jumboalignerr"),
++      PPE_MAC_MIB_DESC(4, GMAC_RXPKT64_ADDR, "rx_pkt64"),
++      PPE_MAC_MIB_DESC(4, GMAC_RXPKT65TO127_ADDR, "rx_pkt65to127"),
++      PPE_MAC_MIB_DESC(4, GMAC_RXPKT128TO255_ADDR, "rx_pkt128to255"),
++      PPE_MAC_MIB_DESC(4, GMAC_RXPKT256TO511_ADDR, "rx_pkt256to511"),
++      PPE_MAC_MIB_DESC(4, GMAC_RXPKT512TO1023_ADDR, "rx_pkt512to1023"),
++      PPE_MAC_MIB_DESC(4, GMAC_RXPKT1024TO1518_ADDR, "rx_pkt1024to1518"),
++      PPE_MAC_MIB_DESC(4, GMAC_RXPKT1519TOX_ADDR, "rx_pkt1519tomax"),
++      PPE_MAC_MIB_DESC(4, GMAC_RXTOOLONG_ADDR, "rx_toolong"),
++      PPE_MAC_MIB_DESC(8, GMAC_RXBYTE_G_ADDR, "rx_bytes_g"),
++      PPE_MAC_MIB_DESC(8, GMAC_RXBYTE_B_ADDR, "rx_bytes_b"),
++      PPE_MAC_MIB_DESC(4, GMAC_RXUNI_ADDR, "rx_unicast"),
++      PPE_MAC_MIB_DESC(4, GMAC_TXBROAD_ADDR, "tx_broadcast"),
++      PPE_MAC_MIB_DESC(4, GMAC_TXPAUSE_ADDR, "tx_pause"),
++      PPE_MAC_MIB_DESC(4, GMAC_TXMULTI_ADDR, "tx_multicast"),
++      PPE_MAC_MIB_DESC(4, GMAC_TXUNDERRUN_ADDR, "tx_underrun"),
++      PPE_MAC_MIB_DESC(4, GMAC_TXPKT64_ADDR, "tx_pkt64"),
++      PPE_MAC_MIB_DESC(4, GMAC_TXPKT65TO127_ADDR, "tx_pkt65to127"),
++      PPE_MAC_MIB_DESC(4, GMAC_TXPKT128TO255_ADDR, "tx_pkt128to255"),
++      PPE_MAC_MIB_DESC(4, GMAC_TXPKT256TO511_ADDR, "tx_pkt256to511"),
++      PPE_MAC_MIB_DESC(4, GMAC_TXPKT512TO1023_ADDR, "tx_pkt512to1023"),
++      PPE_MAC_MIB_DESC(4, GMAC_TXPKT1024TO1518_ADDR, "tx_pkt1024to1518"),
++      PPE_MAC_MIB_DESC(4, GMAC_TXPKT1519TOX_ADDR, "tx_pkt1519tomax"),
++      PPE_MAC_MIB_DESC(8, GMAC_TXBYTE_ADDR, "tx_bytes"),
++      PPE_MAC_MIB_DESC(4, GMAC_TXCOLLISIONS_ADDR, "tx_collisions"),
++      PPE_MAC_MIB_DESC(4, GMAC_TXABORTCOL_ADDR, "tx_abortcol"),
++      PPE_MAC_MIB_DESC(4, GMAC_TXMULTICOL_ADDR, "tx_multicol"),
++      PPE_MAC_MIB_DESC(4, GMAC_TXSINGLECOL_ADDR, "tx_singlecol"),
++      PPE_MAC_MIB_DESC(4, GMAC_TXEXCESSIVEDEFER_ADDR, "tx_excdeffer"),
++      PPE_MAC_MIB_DESC(4, GMAC_TXDEFER_ADDR, "tx_deffer"),
++      PPE_MAC_MIB_DESC(4, GMAC_TXLATECOL_ADDR, "tx_latecol"),
++      PPE_MAC_MIB_DESC(4, GMAC_TXUNI_ADDR, "tx_unicast"),
++};
++
++/* PPE XGMAC MIB statistics description information */
++static const struct ppe_mac_mib_info xgmib_info[] = {
++      PPE_MAC_MIB_DESC(8, XGMAC_TXBYTE_GB_ADDR, "tx_bytes"),
++      PPE_MAC_MIB_DESC(8, XGMAC_TXPKT_GB_ADDR, "tx_frames"),
++      PPE_MAC_MIB_DESC(8, XGMAC_TXBROAD_G_ADDR, "tx_broadcast_g"),
++      PPE_MAC_MIB_DESC(8, XGMAC_TXMULTI_G_ADDR, "tx_multicast_g"),
++      PPE_MAC_MIB_DESC(8, XGMAC_TXPKT64_GB_ADDR, "tx_pkt64"),
++      PPE_MAC_MIB_DESC(8, XGMAC_TXPKT65TO127_GB_ADDR, "tx_pkt65to127"),
++      PPE_MAC_MIB_DESC(8, XGMAC_TXPKT128TO255_GB_ADDR, "tx_pkt128to255"),
++      PPE_MAC_MIB_DESC(8, XGMAC_TXPKT256TO511_GB_ADDR, "tx_pkt256to511"),
++      PPE_MAC_MIB_DESC(8, XGMAC_TXPKT512TO1023_GB_ADDR, "tx_pkt512to1023"),
++      PPE_MAC_MIB_DESC(8, XGMAC_TXPKT1024TOMAX_GB_ADDR, "tx_pkt1024tomax"),
++      PPE_MAC_MIB_DESC(8, XGMAC_TXUNI_GB_ADDR, "tx_unicast"),
++      PPE_MAC_MIB_DESC(8, XGMAC_TXMULTI_GB_ADDR, "tx_multicast"),
++      PPE_MAC_MIB_DESC(8, XGMAC_TXBROAD_GB_ADDR, "tx_broadcast"),
++      PPE_MAC_MIB_DESC(8, XGMAC_TXUNDERFLOW_ERR_ADDR, "tx_underflow_err"),
++      PPE_MAC_MIB_DESC(8, XGMAC_TXBYTE_G_ADDR, "tx_bytes_g"),
++      PPE_MAC_MIB_DESC(8, XGMAC_TXPKT_G_ADDR, "tx_frames_g"),
++      PPE_MAC_MIB_DESC(8, XGMAC_TXPAUSE_ADDR, "tx_pause"),
++      PPE_MAC_MIB_DESC(8, XGMAC_TXVLAN_G_ADDR, "tx_vlan_g"),
++      PPE_MAC_MIB_DESC(4, XGMAC_TXLPI_USEC_ADDR, "tx_lpi_usec"),
++      PPE_MAC_MIB_DESC(4, XGMAC_TXLPI_TRAN_ADDR, "tx_lpi_tran"),
++      PPE_MAC_MIB_DESC(8, XGMAC_RXPKT_GB_ADDR, "rx_frames"),
++      PPE_MAC_MIB_DESC(8, XGMAC_RXBYTE_GB_ADDR, "rx_bytes"),
++      PPE_MAC_MIB_DESC(8, XGMAC_RXBYTE_G_ADDR, "rx_bytes_g"),
++      PPE_MAC_MIB_DESC(8, XGMAC_RXBROAD_G_ADDR, "rx_broadcast_g"),
++      PPE_MAC_MIB_DESC(8, XGMAC_RXMULTI_G_ADDR, "rx_multicast_g"),
++      PPE_MAC_MIB_DESC(8, XGMAC_RXCRC_ERR_ADDR, "rx_crc_err"),
++      PPE_MAC_MIB_DESC(4, XGMAC_RXRUNT_ERR_ADDR, "rx_runt_err"),
++      PPE_MAC_MIB_DESC(4, XGMAC_RXJABBER_ERR_ADDR, "rx_jabber_err"),
++      PPE_MAC_MIB_DESC(4, XGMAC_RXUNDERSIZE_G_ADDR, "rx_undersize_g"),
++      PPE_MAC_MIB_DESC(4, XGMAC_RXOVERSIZE_G_ADDR, "rx_oversize_g"),
++      PPE_MAC_MIB_DESC(8, XGMAC_RXPKT64_GB_ADDR, "rx_pkt64"),
++      PPE_MAC_MIB_DESC(8, XGMAC_RXPKT65TO127_GB_ADDR, "rx_pkt65to127"),
++      PPE_MAC_MIB_DESC(8, XGMAC_RXPKT128TO255_GB_ADDR, "rx_pkt128to255"),
++      PPE_MAC_MIB_DESC(8, XGMAC_RXPKT256TO511_GB_ADDR, "rx_pkt256to511"),
++      PPE_MAC_MIB_DESC(8, XGMAC_RXPKT512TO1023_GB_ADDR, "rx_pkt512to1023"),
++      PPE_MAC_MIB_DESC(8, XGMAC_RXPKT1024TOMAX_GB_ADDR, "rx_pkt1024tomax"),
++      PPE_MAC_MIB_DESC(8, XGMAC_RXUNI_G_ADDR, "rx_unicast_g"),
++      PPE_MAC_MIB_DESC(8, XGMAC_RXLEN_ERR_ADDR, "rx_len_err"),
++      PPE_MAC_MIB_DESC(8, XGMAC_RXOUTOFRANGE_ADDR, "rx_outofrange_err"),
++      PPE_MAC_MIB_DESC(8, XGMAC_RXPAUSE_ADDR, "rx_pause"),
++      PPE_MAC_MIB_DESC(8, XGMAC_RXFIFOOVERFLOW_ADDR, "rx_fifo_overflow"),
++      PPE_MAC_MIB_DESC(8, XGMAC_RXVLAN_GB_ADDR, "rx_vlan"),
++      PPE_MAC_MIB_DESC(4, XGMAC_RXWATCHDOG_ERR_ADDR, "rx_wdog_err"),
++      PPE_MAC_MIB_DESC(4, XGMAC_RXLPI_USEC_ADDR, "rx_lpi_usec"),
++      PPE_MAC_MIB_DESC(4, XGMAC_RXLPI_TRAN_ADDR, "rx_lpi_tran"),
++      PPE_MAC_MIB_DESC(8, XGMAC_RXDISCARD_GB_ADDR, "rx_drop_frames"),
++      PPE_MAC_MIB_DESC(8, XGMAC_RXDISCARDBYTE_GB_ADDR, "rx_drop_bytes"),
++};
++
++/* Get GMAC MIBs from registers and accumulate to PPE port GMIB stats array */
++static void ppe_port_gmib_update(struct ppe_port *ppe_port)
++{
++      struct ppe_device *ppe_dev = ppe_port->ppe_dev;
++      const struct ppe_mac_mib_info *mib;
++      int port = ppe_port->port_id;
++      u32 reg, val;
++      int i, ret;
++
++      for (i = 0; i < ARRAY_SIZE(gmib_info); i++) {
++              mib = &gmib_info[i];
++              reg = PPE_PORT_GMAC_ADDR(port) + mib->offset;
++
++              ret = regmap_read(ppe_dev->regmap, reg, &val);
++              if (ret) {
++                      dev_warn(ppe_dev->dev, "%s: %d\n", __func__, ret);
++                      continue;
++              }
++
++              ppe_port->gmib_stats[i] += val;
++              if (mib->size == 8) {
++                      ret = regmap_read(ppe_dev->regmap, reg + 4, &val);
++                      if (ret) {
++                              dev_warn(ppe_dev->dev, "%s: %d\n",
++                                       __func__, ret);
++                              continue;
++                      }
++
++                      ppe_port->gmib_stats[i] += (u64)val << 32;
++              }
++      }
++}
++
++/* Polling task to read GMIB statistics to avoid GMIB 32bit register overflow */
++static void ppe_port_gmib_stats_poll(struct work_struct *work)
++{
++      struct ppe_port *ppe_port = container_of(work, struct ppe_port,
++                                               gmib_read.work);
++      spin_lock(&ppe_port->gmib_stats_lock);
++      ppe_port_gmib_update(ppe_port);
++      spin_unlock(&ppe_port->gmib_stats_lock);
++
++      schedule_delayed_work(&ppe_port->gmib_read,
++                            msecs_to_jiffies(PPE_GMIB_POLL_INTERVAL_MS));
++}
++
++/* Get the XGMAC MIB counter based on the specific MIB stats type */
++static u64 ppe_port_xgmib_get(struct ppe_port *ppe_port,
++                            enum ppe_xgmib_stats_type xgmib_type)
++{
++      struct ppe_device *ppe_dev = ppe_port->ppe_dev;
++      const struct ppe_mac_mib_info *mib;
++      int port = ppe_port->port_id;
++      u32 reg, val;
++      u64 data = 0;
++      int ret;
++
++      mib = &xgmib_info[xgmib_type];
++      reg = PPE_PORT_XGMAC_ADDR(port) + mib->offset;
++
++      ret = regmap_read(ppe_dev->regmap, reg, &val);
++      if (ret) {
++              dev_warn(ppe_dev->dev, "%s: %d\n", __func__, ret);
++              goto data_return;
++      }
++
++      data = val;
++      if (mib->size == 8) {
++              ret = regmap_read(ppe_dev->regmap, reg + 4, &val);
++              if (ret) {
++                      dev_warn(ppe_dev->dev, "%s: %d\n", __func__, ret);
++                      goto data_return;
++              }
++
++              data |= (u64)val << 32;
++      }
++
++data_return:
++      return data;
++}
++
++/**
++ * ppe_port_get_sset_count() - Get PPE port statistics string count
++ * @ppe_port: PPE port
++ * @sset: string set ID
++ *
++ * Description: Get the MAC statistics string count for the PPE port
++ * specified by @ppe_port.
++ *
++ * Return: The count of the statistics string.
++ */
++int ppe_port_get_sset_count(struct ppe_port *ppe_port, int sset)
++{
++      if (sset != ETH_SS_STATS)
++              return 0;
++
++      if (ppe_port->mac_type == PPE_MAC_TYPE_GMAC)
++              return ARRAY_SIZE(gmib_info);
++      else
++              return ARRAY_SIZE(xgmib_info);
++}
++
++/**
++ * ppe_port_get_strings() - Get PPE port statistics strings
++ * @ppe_port: PPE port
++ * @stringset: string set ID
++ * @data: pointer to statistics strings
++ *
++ * Description: Get the MAC statistics stings for the PPE port
++ * specified by @ppe_port. The strings are stored in the buffer
++ * indicated by @data which used in the ethtool ops.
++ */
++void ppe_port_get_strings(struct ppe_port *ppe_port, u32 stringset, u8 *data)
++{
++      int i;
++
++      if (stringset != ETH_SS_STATS)
++              return;
++
++      if (ppe_port->mac_type == PPE_MAC_TYPE_GMAC) {
++              for (i = 0; i < ARRAY_SIZE(gmib_info); i++)
++                      strscpy(data + i * ETH_GSTRING_LEN, gmib_info[i].name,
++                              ETH_GSTRING_LEN);
++      } else {
++              for (i = 0; i < ARRAY_SIZE(xgmib_info); i++)
++                      strscpy(data + i * ETH_GSTRING_LEN, xgmib_info[i].name,
++                              ETH_GSTRING_LEN);
++      }
++}
++
++/**
++ * ppe_port_get_ethtool_stats() - Get PPE port ethtool statistics
++ * @ppe_port: PPE port
++ * @data: pointer to statistics data
++ *
++ * Description: Get the MAC statistics for the PPE port specified
++ * by @ppe_port. The statistics are stored in the buffer indicated
++ * by @data which used in the ethtool ops.
++ */
++void ppe_port_get_ethtool_stats(struct ppe_port *ppe_port, u64 *data)
++{
++      int i;
++
++      if (ppe_port->mac_type == PPE_MAC_TYPE_GMAC) {
++              spin_lock(&ppe_port->gmib_stats_lock);
++
++              ppe_port_gmib_update(ppe_port);
++              for (i = 0; i < ARRAY_SIZE(gmib_info); i++)
++                      data[i] = ppe_port->gmib_stats[i];
++
++              spin_unlock(&ppe_port->gmib_stats_lock);
++      } else {
++              for (i = 0; i < ARRAY_SIZE(xgmib_info); i++)
++                      data[i] = ppe_port_xgmib_get(ppe_port, i);
++      }
++}
++
++/**
++ * ppe_port_get_stats64() - Get PPE port statistics
++ * @ppe_port: PPE port
++ * @s: statistics pointer
++ *
++ * Description: Get the MAC statistics for the PPE port specified
++ * by @ppe_port.
++ */
++void ppe_port_get_stats64(struct ppe_port *ppe_port,
++                        struct rtnl_link_stats64 *s)
++{
++      if (ppe_port->mac_type == PPE_MAC_TYPE_GMAC) {
++              u64 *src = ppe_port->gmib_stats;
++
++              spin_lock(&ppe_port->gmib_stats_lock);
++
++              ppe_port_gmib_update(ppe_port);
++
++              s->rx_packets = src[gmib_rx_unicast] +
++                      src[gmib_rx_broadcast] + src[gmib_rx_multicast];
++
++              s->tx_packets = src[gmib_tx_unicast] +
++                      src[gmib_tx_broadcast] + src[gmib_tx_multicast];
++
++              s->rx_bytes = src[gmib_rx_bytes_g];
++              s->tx_bytes = src[gmib_tx_bytes];
++              s->multicast = src[gmib_rx_multicast];
++
++              s->rx_crc_errors = src[gmib_rx_fcserr] + src[gmib_rx_frag];
++              s->rx_frame_errors = src[gmib_rx_alignerr];
++              s->rx_errors = s->rx_crc_errors + s->rx_frame_errors;
++              s->rx_dropped = src[gmib_rx_toolong] + s->rx_errors;
++
++              s->tx_fifo_errors = src[gmib_tx_underrun];
++              s->tx_aborted_errors = src[gmib_tx_abortcol];
++              s->tx_errors = s->tx_fifo_errors + s->tx_aborted_errors;
++              s->collisions = src[gmib_tx_collisions];
++
++              spin_unlock(&ppe_port->gmib_stats_lock);
++      } else {
++              s->multicast = ppe_port_xgmib_get(ppe_port, xgmib_rx_multicast_g);
++
++              s->rx_packets = s->multicast;
++              s->rx_packets += ppe_port_xgmib_get(ppe_port, xgmib_rx_unicast_g);
++              s->rx_packets += ppe_port_xgmib_get(ppe_port, xgmib_rx_broadcast_g);
++
++              s->tx_packets = ppe_port_xgmib_get(ppe_port, xgmib_tx_frames);
++              s->rx_bytes = ppe_port_xgmib_get(ppe_port, xgmib_rx_bytes);
++              s->tx_bytes = ppe_port_xgmib_get(ppe_port, xgmib_tx_bytes);
++
++              s->rx_crc_errors = ppe_port_xgmib_get(ppe_port, xgmib_rx_crc_err);
++              s->rx_fifo_errors = ppe_port_xgmib_get(ppe_port, xgmib_rx_fifo_overflow);
++
++              s->rx_length_errors = ppe_port_xgmib_get(ppe_port, xgmib_rx_len_err);
++              s->rx_errors = s->rx_crc_errors +
++                      s->rx_fifo_errors + s->rx_length_errors;
++              s->rx_dropped = s->rx_errors;
++
++              s->tx_fifo_errors = ppe_port_xgmib_get(ppe_port, xgmib_tx_underflow_err);
++              s->tx_errors = s->tx_packets -
++                      ppe_port_xgmib_get(ppe_port, xgmib_tx_frames_g);
++      }
++}
++
+ /* PPE port and MAC reset */
+ static int ppe_port_mac_reset(struct ppe_port *ppe_port)
+ {
+@@ -261,6 +693,9 @@ static void ppe_port_mac_link_up(struct phylink_config *config,
+       int ret, port = ppe_port->port_id;
+       u32 reg, val;
++      /* Start GMIB statistics polling */
++      schedule_delayed_work(&ppe_port->gmib_read, 0);
++
+       if (mac_type == PPE_MAC_TYPE_GMAC)
+               ret = ppe_port_gmac_link_up(ppe_port,
+                                           speed, duplex, tx_pause, rx_pause);
+@@ -306,6 +741,9 @@ static void ppe_port_mac_link_down(struct phylink_config *config,
+       int ret, port = ppe_port->port_id;
+       u32 reg;
++      /* Stop GMIB statistics polling */
++      cancel_delayed_work_sync(&ppe_port->gmib_read);
++
+       /* Disable PPE port TX */
+       reg = PPE_PORT_BRIDGE_CTRL_ADDR + PPE_PORT_BRIDGE_CTRL_INC * port;
+       ret = regmap_update_bits(ppe_dev->regmap, reg,
+@@ -627,6 +1065,27 @@ static int ppe_port_mac_hw_init(struct ppe_port *ppe_port)
+       return ret;
+ }
++/* PPE port MAC MIB work task initialization */
++static int ppe_port_mac_mib_work_init(struct ppe_port *ppe_port)
++{
++      struct ppe_device *ppe_dev = ppe_port->ppe_dev;
++      u64 *gstats;
++
++      gstats = devm_kzalloc(ppe_dev->dev,
++                            sizeof(*gstats) * ARRAY_SIZE(gmib_info),
++                            GFP_KERNEL);
++      if (!gstats)
++              return -ENOMEM;
++
++      ppe_port->gmib_stats = gstats;
++
++      spin_lock_init(&ppe_port->gmib_stats_lock);
++      INIT_DELAYED_WORK(&ppe_port->gmib_read,
++                        ppe_port_gmib_stats_poll);
++
++      return 0;
++}
++
+ /**
+  * ppe_port_mac_init() - Initialization of PPE ports for the PPE device
+  * @ppe_dev: PPE device
+@@ -693,6 +1152,12 @@ int ppe_port_mac_init(struct ppe_device *ppe_dev)
+                       goto err_port_node;
+               }
++              ret = ppe_port_mac_mib_work_init(&ppe_ports->port[i]);
++              if (ret) {
++                      dev_err(ppe_dev->dev, "Failed to initialize MAC MIB work\n");
++                      goto err_port_node;
++              }
++
+               i++;
+       }
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_port.h b/drivers/net/ethernet/qualcomm/ppe/ppe_port.h
+index 194f65815011..a524d90e1446 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_port.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_port.h
+@@ -8,6 +8,8 @@
+ #include <linux/phylink.h>
++struct rtnl_link_stats64;
++
+ /**
+  * enum ppe_port_clk_rst_type - PPE port clock and reset ID type
+  * @PPE_PORT_CLK_RST_MAC: The clock and reset ID for port MAC
+@@ -44,6 +46,9 @@ enum ppe_mac_type {
+  * @port_id: Port ID
+  * @clks: Port clocks
+  * @rstcs: Port resets
++ * @gmib_read: Delay work task for GMAC MIB statistics polling function
++ * @gmib_stats: GMAC MIB statistics array
++ * @gmib_stats_lock: Lock to protect GMAC MIB statistics
+  */
+ struct ppe_port {
+       struct phylink *phylink;
+@@ -56,6 +61,9 @@ struct ppe_port {
+       int port_id;
+       struct clk *clks[PPE_PORT_CLK_RST_MAX];
+       struct reset_control *rstcs[PPE_PORT_CLK_RST_MAX];
++      struct delayed_work gmib_read;
++      u64 *gmib_stats;
++      spinlock_t gmib_stats_lock; /* Protects GMIB stats */
+ };
+ /**
+@@ -73,4 +81,9 @@ void ppe_port_mac_deinit(struct ppe_device *ppe_dev);
+ int ppe_port_phylink_setup(struct ppe_port *ppe_port,
+                          struct net_device *netdev);
+ void ppe_port_phylink_destroy(struct ppe_port *ppe_port);
++int ppe_port_get_sset_count(struct ppe_port *ppe_port, int sset);
++void ppe_port_get_strings(struct ppe_port *ppe_port, u32 stringset, u8 *data);
++void ppe_port_get_ethtool_stats(struct ppe_port *ppe_port, u64 *data);
++void ppe_port_get_stats64(struct ppe_port *ppe_port,
++                        struct rtnl_link_stats64 *s);
+ #endif
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+index 34b659ac0c37..2cd5bd9fa446 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+@@ -606,6 +606,48 @@
+ #define GMAC_MIB_CTRL_MASK                    \
+       (GMAC_MIB_RD_CLR | GMAC_MIB_RST | GMAC_MIB_EN)
++/* GMAC MIB counter registers */
++#define GMAC_RXBROAD_ADDR                     0x40
++#define GMAC_RXPAUSE_ADDR                     0x44
++#define GMAC_RXMULTI_ADDR                     0x48
++#define GMAC_RXFCSERR_ADDR                    0x4C
++#define GMAC_RXALIGNERR_ADDR                  0x50
++#define GMAC_RXRUNT_ADDR                      0x54
++#define GMAC_RXFRAG_ADDR                      0x58
++#define GMAC_RXJUMBOFCSERR_ADDR                       0x5C
++#define GMAC_RXJUMBOALIGNERR_ADDR             0x60
++#define GMAC_RXPKT64_ADDR                     0x64
++#define GMAC_RXPKT65TO127_ADDR                        0x68
++#define GMAC_RXPKT128TO255_ADDR                       0x6C
++#define GMAC_RXPKT256TO511_ADDR                       0x70
++#define GMAC_RXPKT512TO1023_ADDR              0x74
++#define GMAC_RXPKT1024TO1518_ADDR             0x78
++#define GMAC_RXPKT1519TOX_ADDR                        0x7C
++#define GMAC_RXTOOLONG_ADDR                   0x80
++#define GMAC_RXBYTE_G_ADDR                    0x84
++#define GMAC_RXBYTE_B_ADDR                    0x8C
++#define GMAC_RXUNI_ADDR                               0x94
++#define GMAC_TXBROAD_ADDR                     0xA0
++#define GMAC_TXPAUSE_ADDR                     0xA4
++#define GMAC_TXMULTI_ADDR                     0xA8
++#define GMAC_TXUNDERRUN_ADDR                  0xAC
++#define GMAC_TXPKT64_ADDR                     0xB0
++#define GMAC_TXPKT65TO127_ADDR                        0xB4
++#define GMAC_TXPKT128TO255_ADDR                       0xB8
++#define GMAC_TXPKT256TO511_ADDR                       0xBC
++#define GMAC_TXPKT512TO1023_ADDR              0xC0
++#define GMAC_TXPKT1024TO1518_ADDR             0xC4
++#define GMAC_TXPKT1519TOX_ADDR                        0xC8
++#define GMAC_TXBYTE_ADDR                      0xCC
++#define GMAC_TXCOLLISIONS_ADDR                        0xD4
++#define GMAC_TXABORTCOL_ADDR                  0xD8
++#define GMAC_TXMULTICOL_ADDR                  0xDC
++#define GMAC_TXSINGLECOL_ADDR                 0xE0
++#define GMAC_TXEXCESSIVEDEFER_ADDR            0xE4
++#define GMAC_TXDEFER_ADDR                     0xE8
++#define GMAC_TXLATECOL_ADDR                   0xEC
++#define GMAC_TXUNI_ADDR                               0xF0
++
+ /* XGMAC TX configuration register */
+ #define XGMAC_TX_CONFIG_ADDR                  0x0
+ #define XGMAC_SPEED_M                         GENMASK(31, 29)
+@@ -668,4 +710,53 @@
+ #define XGMAC_MCF                             BIT(3)
+ #define XGMAC_CNTRST                          BIT(0)
++/* XGMAC MIB counter registers */
++#define XGMAC_TXBYTE_GB_ADDR                  0x814
++#define XGMAC_TXPKT_GB_ADDR                   0x81C
++#define XGMAC_TXBROAD_G_ADDR                  0x824
++#define XGMAC_TXMULTI_G_ADDR                  0x82C
++#define XGMAC_TXPKT64_GB_ADDR                 0x834
++#define XGMAC_TXPKT65TO127_GB_ADDR            0x83C
++#define XGMAC_TXPKT128TO255_GB_ADDR           0x844
++#define XGMAC_TXPKT256TO511_GB_ADDR           0x84C
++#define XGMAC_TXPKT512TO1023_GB_ADDR          0x854
++#define XGMAC_TXPKT1024TOMAX_GB_ADDR          0x85C
++#define XGMAC_TXUNI_GB_ADDR                   0x864
++#define XGMAC_TXMULTI_GB_ADDR                 0x86C
++#define XGMAC_TXBROAD_GB_ADDR                 0x874
++#define XGMAC_TXUNDERFLOW_ERR_ADDR            0x87C
++#define XGMAC_TXBYTE_G_ADDR                   0x884
++#define XGMAC_TXPKT_G_ADDR                    0x88C
++#define XGMAC_TXPAUSE_ADDR                    0x894
++#define XGMAC_TXVLAN_G_ADDR                   0x89C
++#define XGMAC_TXLPI_USEC_ADDR                 0x8A4
++#define XGMAC_TXLPI_TRAN_ADDR                 0x8A8
++#define XGMAC_RXPKT_GB_ADDR                   0x900
++#define XGMAC_RXBYTE_GB_ADDR                  0x908
++#define XGMAC_RXBYTE_G_ADDR                   0x910
++#define XGMAC_RXBROAD_G_ADDR                  0x918
++#define XGMAC_RXMULTI_G_ADDR                  0x920
++#define XGMAC_RXCRC_ERR_ADDR                  0x928
++#define XGMAC_RXRUNT_ERR_ADDR                 0x930
++#define XGMAC_RXJABBER_ERR_ADDR                       0x934
++#define XGMAC_RXUNDERSIZE_G_ADDR              0x938
++#define XGMAC_RXOVERSIZE_G_ADDR                       0x93C
++#define XGMAC_RXPKT64_GB_ADDR                 0x940
++#define XGMAC_RXPKT65TO127_GB_ADDR            0x948
++#define XGMAC_RXPKT128TO255_GB_ADDR           0x950
++#define XGMAC_RXPKT256TO511_GB_ADDR           0x958
++#define XGMAC_RXPKT512TO1023_GB_ADDR          0x960
++#define XGMAC_RXPKT1024TOMAX_GB_ADDR          0x968
++#define XGMAC_RXUNI_G_ADDR                    0x970
++#define XGMAC_RXLEN_ERR_ADDR                  0x978
++#define XGMAC_RXOUTOFRANGE_ADDR                       0x980
++#define XGMAC_RXPAUSE_ADDR                    0x988
++#define XGMAC_RXFIFOOVERFLOW_ADDR             0x990
++#define XGMAC_RXVLAN_GB_ADDR                  0x998
++#define XGMAC_RXWATCHDOG_ERR_ADDR             0x9A0
++#define XGMAC_RXLPI_USEC_ADDR                 0x9A4
++#define XGMAC_RXLPI_TRAN_ADDR                 0x9A8
++#define XGMAC_RXDISCARD_GB_ADDR                       0x9AC
++#define XGMAC_RXDISCARDBYTE_GB_ADDR           0x9B4
++
+ #endif
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-34-net-ethernet-qualcomm-Add-PPE-port-MAC-address-and-E.patch b/target/linux/qualcommbe/patches-6.6/103-34-net-ethernet-qualcomm-Add-PPE-port-MAC-address-and-E.patch
new file mode 100644 (file)
index 0000000..fc8f955
--- /dev/null
@@ -0,0 +1,179 @@
+From 172dc9a0d7704051c63407af6b39939c43801a99 Mon Sep 17 00:00:00 2001
+From: Lei Wei <quic_leiwei@quicinc.com>
+Date: Fri, 1 Mar 2024 13:36:26 +0800
+Subject: [PATCH 34/50] net: ethernet: qualcomm: Add PPE port MAC address and
+ EEE functions
+
+Add PPE port MAC address set and EEE set API functions which
+will be used by netdev ops and ethtool.
+
+Change-Id: Id2b3b06ae940b3b6f5227d927316329cdf3caeaa
+Signed-off-by: Lei Wei <quic_leiwei@quicinc.com>
+---
+ drivers/net/ethernet/qualcomm/ppe/ppe_port.c | 75 ++++++++++++++++++++
+ drivers/net/ethernet/qualcomm/ppe/ppe_port.h |  3 +
+ drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 29 ++++++++
+ 3 files changed, 107 insertions(+)
+
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_port.c b/drivers/net/ethernet/qualcomm/ppe/ppe_port.c
+index 284ee14b8d03..a9781e1197f7 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_port.c
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_port.c
+@@ -462,6 +462,81 @@ void ppe_port_get_stats64(struct ppe_port *ppe_port,
+       }
+ }
++/**
++ * ppe_port_set_mac_address() - Set PPE port MAC address
++ * @ppe_port: PPE port
++ * @addr: MAC address
++ *
++ * Description: Set MAC address for the given PPE port.
++ *
++ * Return: 0 upon success or a negative error upon failure.
++ */
++int ppe_port_set_mac_address(struct ppe_port *ppe_port, const u8 *addr)
++{
++      struct ppe_device *ppe_dev = ppe_port->ppe_dev;
++      int port = ppe_port->port_id;
++      u32 reg, val;
++      int ret;
++
++      if (ppe_port->mac_type == PPE_MAC_TYPE_GMAC) {
++              reg = PPE_PORT_GMAC_ADDR(port);
++              val = (addr[5] << 8) | addr[4];
++              ret = regmap_write(ppe_dev->regmap, reg + GMAC_GOL_ADDR0_ADDR, val);
++              if (ret)
++                      return ret;
++
++              val = (addr[0] << 24) | (addr[1] << 16) |
++                    (addr[2] << 8) | addr[3];
++              ret = regmap_write(ppe_dev->regmap, reg + GMAC_GOL_ADDR1_ADDR, val);
++              if (ret)
++                      return ret;
++      } else {
++              reg = PPE_PORT_XGMAC_ADDR(port);
++              val = (addr[5] << 8) | addr[4] | XGMAC_ADDR_EN;
++              ret = regmap_write(ppe_dev->regmap, reg + XGMAC_ADDR0_H_ADDR, val);
++              if (ret)
++                      return ret;
++
++              val = (addr[3] << 24) | (addr[2] << 16) |
++                    (addr[1] << 8) | addr[0];
++              ret = regmap_write(ppe_dev->regmap, reg + XGMAC_ADDR0_L_ADDR, val);
++              if (ret)
++                      return ret;
++      }
++
++      return 0;
++}
++
++/**
++ * ppe_port_set_mac_eee() - Set EEE configuration for PPE port MAC
++ * @ppe_port: PPE port
++ * @eee: EEE settings
++ *
++ * Description: Set port MAC EEE settings for the given PPE port.
++ *
++ * Return: 0 upon success or a negative error upon failure.
++ */
++int ppe_port_set_mac_eee(struct ppe_port *ppe_port, struct ethtool_eee *eee)
++{
++      struct ppe_device *ppe_dev = ppe_port->ppe_dev;
++      int port = ppe_port->port_id;
++      u32 val;
++      int ret;
++
++      ret = regmap_read(ppe_dev->regmap, PPE_LPI_EN_ADDR, &val);
++      if (ret)
++              return ret;
++
++      if (eee->tx_lpi_enabled)
++              val |= PPE_LPI_PORT_EN(port);
++      else
++              val &= ~PPE_LPI_PORT_EN(port);
++
++      ret = regmap_write(ppe_dev->regmap, PPE_LPI_EN_ADDR, val);
++
++      return ret;
++}
++
+ /* PPE port and MAC reset */
+ static int ppe_port_mac_reset(struct ppe_port *ppe_port)
+ {
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_port.h b/drivers/net/ethernet/qualcomm/ppe/ppe_port.h
+index a524d90e1446..2234c9bfbd9a 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_port.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_port.h
+@@ -8,6 +8,7 @@
+ #include <linux/phylink.h>
++struct ethtool_eee;
+ struct rtnl_link_stats64;
+ /**
+@@ -86,4 +87,6 @@ void ppe_port_get_strings(struct ppe_port *ppe_port, u32 stringset, u8 *data);
+ void ppe_port_get_ethtool_stats(struct ppe_port *ppe_port, u64 *data);
+ void ppe_port_get_stats64(struct ppe_port *ppe_port,
+                         struct rtnl_link_stats64 *s);
++int ppe_port_set_mac_address(struct ppe_port *ppe_port, const u8 *addr);
++int ppe_port_set_mac_eee(struct ppe_port *ppe_port, struct ethtool_eee *eee);
+ #endif
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+index 2cd5bd9fa446..6e6e469247c8 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+@@ -18,6 +18,16 @@
+ #define PPE_PORT5_SEL_PCS1                    BIT(4)
+ #define PPE_PORT_SEL_XGMAC(x)                 (BIT(8) << ((x) - 1))
++/* PPE port LPI enable register */
++#define PPE_LPI_EN_ADDR                               0x400
++#define PPE_LPI_PORT1_EN                      BIT(0)
++#define PPE_LPI_PORT2_EN                      BIT(1)
++#define PPE_LPI_PORT3_EN                      BIT(2)
++#define PPE_LPI_PORT4_EN                      BIT(3)
++#define PPE_LPI_PORT5_EN                      BIT(4)
++#define PPE_LPI_PORT6_EN                      BIT(5)
++#define PPE_LPI_PORT_EN(x)                    (BIT(0) << ((x) - 1))
++
+ /* There are 15 BM ports and 4 BM groups supported by PPE,
+  * BM port (0-7) is matched to EDMA port 0, BM port (8-13) is matched
+  * to PPE physical port 1-6, BM port 14 is matched to EIP.
+@@ -580,6 +590,17 @@
+ #define GMAC_SPEED_100                                1
+ #define GMAC_SPEED_1000                               2
++/* GMAC MAC address register */
++#define GMAC_GOL_ADDR0_ADDR                   0x8
++#define GMAC_ADDR_BYTE5                               GENMASK(15, 8)
++#define GMAC_ADDR_BYTE4                               GENMASK(7, 0)
++
++#define GMAC_GOL_ADDR1_ADDR                   0xC
++#define GMAC_ADDR_BYTE0                               GENMASK(31, 24)
++#define GMAC_ADDR_BYTE1                               GENMASK(23, 16)
++#define GMAC_ADDR_BYTE2                               GENMASK(15, 8)
++#define GMAC_ADDR_BYTE3                               GENMASK(7, 0)
++
+ /* GMAC control register */
+ #define GMAC_CTRL_ADDR                                0x18
+ #define GMAC_TX_THD_M                         GENMASK(27, 24)
+@@ -705,6 +726,14 @@
+ #define XGMAC_RX_FLOW_CTRL_ADDR                       0x90
+ #define XGMAC_RXFCEN                          BIT(0)
++/* XGMAC MAC address register */
++#define XGMAC_ADDR0_H_ADDR                    0x300
++#define XGMAC_ADDR_EN                         BIT(31)
++#define XGMAC_ADDRH                           GENMASK(15, 0)
++
++#define XGMAC_ADDR0_L_ADDR                    0x304
++#define XGMAC_ADDRL                           GENMASK(31, 0)
++
+ /* XGMAC management counters control register */
+ #define XGMAC_MMC_CTRL_ADDR                   0x800
+ #define XGMAC_MCF                             BIT(3)
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-35-net-ethernet-qualcomm-Add-API-to-configure-PPE-port-.patch b/target/linux/qualcommbe/patches-6.6/103-35-net-ethernet-qualcomm-Add-API-to-configure-PPE-port-.patch
new file mode 100644 (file)
index 0000000..e75a9c0
--- /dev/null
@@ -0,0 +1,85 @@
+From cf3e71b3c8bd63cd832c0512386700cac6a2c363 Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Tue, 5 Mar 2024 16:42:56 +0800
+Subject: [PATCH 35/50] net: ethernet: qualcomm: Add API to configure PPE port
+ max frame size
+
+This function is called when the MTU of an ethernet port is
+configured. It limits the size of packet passed through the
+ethernet port.
+
+Change-Id: I2a4dcd04407156d73770d2becbb7cbc0d56b3754
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ drivers/net/ethernet/qualcomm/ppe/ppe_port.c | 44 ++++++++++++++++++++
+ drivers/net/ethernet/qualcomm/ppe/ppe_port.h |  1 +
+ 2 files changed, 45 insertions(+)
+
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_port.c b/drivers/net/ethernet/qualcomm/ppe/ppe_port.c
+index a9781e1197f7..52820e2eedf8 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_port.c
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_port.c
+@@ -537,6 +537,50 @@ int ppe_port_set_mac_eee(struct ppe_port *ppe_port, struct ethtool_keee *eee)
+       return ret;
+ }
++/**
++ * ppe_port_set_maxframe() - Set port maximum frame size
++ * @ppe_port: PPE port structure
++ * @maxframe_size: Maximum frame size supported by PPE port
++ *
++ * Description: Set MTU of network interface specified by @ppe_port.
++ *
++ * Return: 0 upon success or a negative error upon failure.
++ */
++int ppe_port_set_maxframe(struct ppe_port *ppe_port, int maxframe_size)
++{
++      struct ppe_device *ppe_dev = ppe_port->ppe_dev;
++      u32 reg, val, mru_mtu_val[3];
++      int port = ppe_port->port_id;
++      int ret;
++
++      /* The max frame size should be MTU added by ETH_HLEN in PPE. */
++      maxframe_size += ETH_HLEN;
++
++      /* MAC takes cover the FCS for the calculation of frame size. */
++      if (maxframe_size > PPE_PORT_MAC_MAX_FRAME_SIZE - ETH_FCS_LEN)
++              return -EINVAL;
++
++      reg = PPE_MC_MTU_CTRL_TBL_ADDR + PPE_MC_MTU_CTRL_TBL_INC * port;
++      val = FIELD_PREP(PPE_MC_MTU_CTRL_TBL_MTU, maxframe_size);
++      ret = regmap_update_bits(ppe_dev->regmap, reg,
++                               PPE_MC_MTU_CTRL_TBL_MTU,
++                               val);
++      if (ret)
++              return ret;
++
++      reg = PPE_MRU_MTU_CTRL_TBL_ADDR + PPE_MRU_MTU_CTRL_TBL_INC * port;
++      ret = regmap_bulk_read(ppe_dev->regmap, reg,
++                             mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
++      if (ret)
++              return ret;
++
++      PPE_MRU_MTU_CTRL_SET_MRU(mru_mtu_val, maxframe_size);
++      PPE_MRU_MTU_CTRL_SET_MTU(mru_mtu_val, maxframe_size);
++
++      return regmap_bulk_write(ppe_dev->regmap, reg,
++                               mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
++}
++
+ /* PPE port and MAC reset */
+ static int ppe_port_mac_reset(struct ppe_port *ppe_port)
+ {
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_port.h b/drivers/net/ethernet/qualcomm/ppe/ppe_port.h
+index 2234c9bfbd9a..8234e86fb401 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_port.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_port.h
+@@ -89,4 +89,5 @@ void ppe_port_get_stats64(struct ppe_port *ppe_port,
+                         struct rtnl_link_stats64 *s);
+ int ppe_port_set_mac_address(struct ppe_port *ppe_port, const u8 *addr);
+ int ppe_port_set_mac_eee(struct ppe_port *ppe_port, struct ethtool_eee *eee);
++int ppe_port_set_maxframe(struct ppe_port *ppe_port, int maxframe_size);
+ #endif
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-38-net-ethernet-qualcomm-Add-EDMA-support-for-QCOM-IPQ9.patch b/target/linux/qualcommbe/patches-6.6/103-38-net-ethernet-qualcomm-Add-EDMA-support-for-QCOM-IPQ9.patch
new file mode 100644 (file)
index 0000000..8aab02b
--- /dev/null
@@ -0,0 +1,905 @@
+From f9246c9597e89510ae016c33ffa3b367ed83cf2d Mon Sep 17 00:00:00 2001
+From: Pavithra R <quic_pavir@quicinc.com>
+Date: Wed, 28 Feb 2024 11:25:15 +0530
+Subject: [PATCH 38/50] net: ethernet: qualcomm: Add EDMA support for QCOM
+ IPQ9574 chipset.
+
+Add the infrastructure functions such as Makefile,
+EDMA hardware configuration, clock and IRQ initializations.
+
+Change-Id: I64f65e554e70e9095b0cf3636fec421569ae6895
+Signed-off-by: Pavithra R <quic_pavir@quicinc.com>
+Co-developed-by: Suruchi Agarwal <quic_suruchia@quicinc.com>
+Signed-off-by: Suruchi Agarwal <quic_suruchia@quicinc.com>
+---
+ drivers/net/ethernet/qualcomm/ppe/Makefile   |   3 +
+ drivers/net/ethernet/qualcomm/ppe/edma.c     | 456 +++++++++++++++++++
+ drivers/net/ethernet/qualcomm/ppe/edma.h     |  99 ++++
+ drivers/net/ethernet/qualcomm/ppe/ppe.c      |  10 +-
+ drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 253 ++++++++++
+ 5 files changed, 820 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma.c
+ create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma.h
+
+diff --git a/drivers/net/ethernet/qualcomm/ppe/Makefile b/drivers/net/ethernet/qualcomm/ppe/Makefile
+index 76cdc423a8cc..7fea135ceb36 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/Makefile
++++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
+@@ -5,3 +5,6 @@
+ obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
+ qcom-ppe-objs := ppe.o ppe_config.o ppe_api.o ppe_debugfs.o ppe_port.o
++
++#EDMA
++qcom-ppe-objs += edma.o
+\ No newline at end of file
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma.c b/drivers/net/ethernet/qualcomm/ppe/edma.c
+new file mode 100644
+index 000000000000..d7bf1f39e9e1
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ppe/edma.c
+@@ -0,0 +1,456 @@
++// SPDX-License-Identifier: GPL-2.0-only
++ /* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
++  */
++
++ /* Qualcomm Ethernet DMA driver setup, HW configuration, clocks and
++  * interrupt initializations.
++  */
++
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/netdevice.h>
++#include <linux/of_irq.h>
++#include <linux/platform_device.h>
++#include <linux/printk.h>
++#include <linux/regmap.h>
++#include <linux/reset.h>
++
++#include "edma.h"
++#include "ppe_regs.h"
++
++#define EDMA_IRQ_NAME_SIZE            32
++
++/* Global EDMA context. */
++struct edma_context *edma_ctx;
++
++/* Priority to multi-queue mapping. */
++static u8 edma_pri_map[PPE_QUEUE_INTER_PRI_NUM] = {
++      0, 1, 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7};
++
++enum edma_clk_id {
++      EDMA_CLK,
++      EDMA_CFG_CLK,
++      EDMA_CLK_MAX
++};
++
++static const char * const clock_name[EDMA_CLK_MAX] = {
++      [EDMA_CLK] = "edma",
++      [EDMA_CFG_CLK] = "edma-cfg",
++};
++
++/* Rx Fill ring info for IPQ9574. */
++static struct edma_ring_info ipq9574_rxfill_ring_info = {
++      .max_rings = 8,
++      .ring_start = 4,
++      .num_rings = 4,
++};
++
++/* Rx ring info for IPQ9574. */
++static struct edma_ring_info ipq9574_rx_ring_info = {
++      .max_rings = 24,
++      .ring_start = 20,
++      .num_rings = 4,
++};
++
++/* Tx ring info for IPQ9574. */
++static struct edma_ring_info ipq9574_tx_ring_info = {
++      .max_rings = 32,
++      .ring_start = 8,
++      .num_rings = 24,
++};
++
++/* Tx complete ring info for IPQ9574. */
++static struct edma_ring_info ipq9574_txcmpl_ring_info = {
++      .max_rings = 32,
++      .ring_start = 8,
++      .num_rings = 24,
++};
++
++/* HW info for IPQ9574. */
++static struct edma_hw_info ipq9574_hw_info = {
++      .rxfill = &ipq9574_rxfill_ring_info,
++      .rx = &ipq9574_rx_ring_info,
++      .tx = &ipq9574_tx_ring_info,
++      .txcmpl = &ipq9574_txcmpl_ring_info,
++      .max_ports = 6,
++      .napi_budget_rx = 128,
++      .napi_budget_tx = 512,
++};
++
++static int edma_clock_set_and_enable(struct device *dev,
++                                   const char *id, unsigned long rate)
++{
++      struct device_node *edma_np;
++      struct clk *clk = NULL;
++      int ret;
++
++      edma_np = of_get_child_by_name(dev->of_node, "edma");
++
++      clk = devm_get_clk_from_child(dev, edma_np, id);
++      if (IS_ERR(clk)) {
++              dev_err(dev, "clk %s get failed\n", id);
++              of_node_put(edma_np);
++              return PTR_ERR(clk);
++      }
++
++      ret = clk_set_rate(clk, rate);
++      if (ret) {
++              dev_err(dev, "set %lu rate for %s failed\n", rate, id);
++              of_node_put(edma_np);
++              return ret;
++      }
++
++      ret = clk_prepare_enable(clk);
++      if (ret) {
++              dev_err(dev, "clk %s enable failed\n", id);
++              of_node_put(edma_np);
++              return ret;
++      }
++
++      of_node_put(edma_np);
++
++      dev_dbg(dev, "set %lu rate for %s\n", rate, id);
++
++      return 0;
++}
++
++static int edma_clock_init(void)
++{
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct device *dev = ppe_dev->dev;
++      unsigned long ppe_rate;
++      int ret;
++
++      ppe_rate = ppe_dev->clk_rate;
++
++      ret = edma_clock_set_and_enable(dev, clock_name[EDMA_CLK],
++                                      ppe_rate);
++      if (ret)
++              return ret;
++
++      ret = edma_clock_set_and_enable(dev, clock_name[EDMA_CFG_CLK],
++                                      ppe_rate);
++      if (ret)
++              return ret;
++
++      return 0;
++}
++
++/**
++ * edma_configure_ucast_prio_map_tbl - Configure unicast priority map table.
++ *
++ * Map int_priority values to priority class and initialize
++ * unicast priority map table for default profile_id.
++ */
++static int edma_configure_ucast_prio_map_tbl(void)
++{
++      u8 pri_class, int_pri;
++      int ret = 0;
++
++      /* Set the priority class value for every possible priority. */
++      for (int_pri = 0; int_pri < PPE_QUEUE_INTER_PRI_NUM; int_pri++) {
++              pri_class = edma_pri_map[int_pri];
++
++              /* Priority offset should be less than maximum supported
++               * queue priority.
++               */
++              if (pri_class > EDMA_PRI_MAX_PER_CORE - 1) {
++                      pr_err("Configured incorrect priority offset: %d\n",
++                             pri_class);
++                      return -EINVAL;
++              }
++
++              ret = ppe_edma_queue_offset_config(edma_ctx->ppe_dev,
++                                                 PPE_QUEUE_CLASS_PRIORITY, int_pri, pri_class);
++
++              if (ret) {
++                      pr_err("Failed with error: %d to set queue priority class for int_pri: %d for profile_id: %d\n",
++                             ret, int_pri, 0);
++                      return ret;
++              }
++
++              pr_debug("profile_id: %d, int_priority: %d, pri_class: %d\n",
++                       0, int_pri, pri_class);
++      }
++
++      return ret;
++}
++
++static int edma_irq_init(void)
++{
++      struct edma_hw_info *hw_info = edma_ctx->hw_info;
++      struct edma_ring_info *txcmpl = hw_info->txcmpl;
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct edma_ring_info *rx = hw_info->rx;
++      char edma_irq_name[EDMA_IRQ_NAME_SIZE];
++      struct device *dev = ppe_dev->dev;
++      struct platform_device *pdev;
++      struct device_node *edma_np;
++      u32 i;
++
++      pdev = to_platform_device(dev);
++      edma_np = of_get_child_by_name(dev->of_node, "edma");
++      edma_ctx->intr_info.intr_txcmpl = kzalloc((sizeof(*edma_ctx->intr_info.intr_txcmpl) *
++                                                txcmpl->num_rings), GFP_KERNEL);
++      if (!edma_ctx->intr_info.intr_txcmpl) {
++              of_node_put(edma_np);
++              return -ENOMEM;
++      }
++
++      /* Get TXCMPL rings IRQ numbers. */
++      for (i = 0; i < txcmpl->num_rings; i++) {
++              snprintf(edma_irq_name, sizeof(edma_irq_name), "edma_txcmpl_%d",
++                       txcmpl->ring_start + i);
++              edma_ctx->intr_info.intr_txcmpl[i] = of_irq_get_byname(edma_np, edma_irq_name);
++              if (edma_ctx->intr_info.intr_txcmpl[i] < 0) {
++                      dev_err(dev, "%s: txcmpl_info.intr[%u] irq get failed\n",
++                              edma_np->name, i);
++                      of_node_put(edma_np);
++                      kfree(edma_ctx->intr_info.intr_txcmpl);
++                      return edma_ctx->intr_info.intr_txcmpl[i];
++              }
++
++              dev_dbg(dev, "%s: intr_info.intr_txcmpl[%u] = %u\n",
++                      edma_np->name, i, edma_ctx->intr_info.intr_txcmpl[i]);
++      }
++
++      edma_ctx->intr_info.intr_rx = kzalloc((sizeof(*edma_ctx->intr_info.intr_rx) *
++                                            rx->num_rings), GFP_KERNEL);
++      if (!edma_ctx->intr_info.intr_rx) {
++              of_node_put(edma_np);
++              kfree(edma_ctx->intr_info.intr_txcmpl);
++              return -ENOMEM;
++      }
++
++      /* Get RXDESC rings IRQ numbers. */
++      for (i = 0; i < rx->num_rings; i++) {
++              snprintf(edma_irq_name, sizeof(edma_irq_name), "edma_rxdesc_%d",
++                       rx->ring_start + i);
++              edma_ctx->intr_info.intr_rx[i] = of_irq_get_byname(edma_np, edma_irq_name);
++              if (edma_ctx->intr_info.intr_rx[i] < 0) {
++                      dev_err(dev, "%s: rx_queue_map_info.intr[%u] irq get failed\n",
++                              edma_np->name, i);
++                      of_node_put(edma_np);
++                      kfree(edma_ctx->intr_info.intr_rx);
++                      kfree(edma_ctx->intr_info.intr_txcmpl);
++                      return edma_ctx->intr_info.intr_rx[i];
++              }
++
++              dev_dbg(dev, "%s: intr_info.intr_rx[%u] = %u\n",
++                      edma_np->name, i, edma_ctx->intr_info.intr_rx[i]);
++      }
++
++      /* Get misc IRQ number. */
++      edma_ctx->intr_info.intr_misc = of_irq_get_byname(edma_np, "edma_misc");
++      if (edma_ctx->intr_info.intr_misc < 0) {
++              dev_err(dev, "%s: misc_intr irq get failed\n", edma_np->name);
++              of_node_put(edma_np);
++              kfree(edma_ctx->intr_info.intr_rx);
++              kfree(edma_ctx->intr_info.intr_txcmpl);
++              return edma_ctx->intr_info.intr_misc;
++      }
++
++      of_node_put(edma_np);
++
++      dev_dbg(dev, "%s: misc IRQ:%u\n", edma_np->name,
++              edma_ctx->intr_info.intr_misc);
++
++      return 0;
++}
++
++static int edma_hw_reset(void)
++{
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct device *dev = ppe_dev->dev;
++      struct reset_control *edma_hw_rst;
++      struct device_node *edma_np;
++      const char *reset_string;
++      u32 count, i;
++      int ret;
++
++      /* Count and parse reset names from DTSI. */
++      edma_np = of_get_child_by_name(dev->of_node, "edma");
++      count = of_property_count_strings(edma_np, "reset-names");
++      if (count < 0) {
++              dev_err(dev, "EDMA reset entry not found\n");
++              of_node_put(edma_np);
++              return -EINVAL;
++      }
++
++      for (i = 0; i < count; i++) {
++              ret = of_property_read_string_index(edma_np, "reset-names",
++                                                  i, &reset_string);
++              if (ret) {
++                      dev_err(dev, "Error reading reset-names");
++                      of_node_put(edma_np);
++                      return -EINVAL;
++              }
++
++              edma_hw_rst = of_reset_control_get_exclusive(edma_np, reset_string);
++              if (IS_ERR(edma_hw_rst)) {
++                      of_node_put(edma_np);
++                      return PTR_ERR(edma_hw_rst);
++              }
++
++              /* 100ms delay is required by hardware to reset EDMA. */
++              reset_control_assert(edma_hw_rst);
++              fsleep(100);
++
++              reset_control_deassert(edma_hw_rst);
++              fsleep(100);
++
++              reset_control_put(edma_hw_rst);
++              dev_dbg(dev, "EDMA HW reset, i:%d reset_string:%s\n", i, reset_string);
++      }
++
++      of_node_put(edma_np);
++
++      return 0;
++}
++
++static int edma_hw_configure(void)
++{
++      struct edma_hw_info *hw_info = edma_ctx->hw_info;
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct regmap *regmap = ppe_dev->regmap;
++      u32 data, reg;
++      int ret;
++
++      reg = EDMA_BASE_OFFSET + EDMA_REG_MAS_CTRL_ADDR;
++      ret = regmap_read(regmap, reg, &data);
++      if (ret)
++              return ret;
++
++      pr_debug("EDMA ver %d hw init\n", data);
++
++      /* Setup private data structure. */
++      edma_ctx->intr_info.intr_mask_rx = EDMA_RXDESC_INT_MASK_PKT_INT;
++      edma_ctx->intr_info.intr_mask_txcmpl = EDMA_TX_INT_MASK_PKT_INT;
++
++      /* Reset EDMA. */
++      ret = edma_hw_reset();
++      if (ret) {
++              pr_err("Error in resetting the hardware. ret: %d\n", ret);
++              return ret;
++      }
++
++      /* Allocate memory for netdevices. */
++      edma_ctx->netdev_arr = kzalloc((sizeof(**edma_ctx->netdev_arr) *
++                                       hw_info->max_ports),
++                                       GFP_KERNEL);
++      if (!edma_ctx->netdev_arr)
++              return -ENOMEM;
++
++      /* Configure DMA request priority, DMA read burst length,
++       * and AXI write size.
++       */
++      data = FIELD_PREP(EDMA_DMAR_BURST_LEN_MASK, EDMA_BURST_LEN_ENABLE);
++      data |= FIELD_PREP(EDMA_DMAR_REQ_PRI_MASK, 0);
++      data |= FIELD_PREP(EDMA_DMAR_TXDATA_OUTSTANDING_NUM_MASK, 31);
++      data |= FIELD_PREP(EDMA_DMAR_TXDESC_OUTSTANDING_NUM_MASK, 7);
++      data |= FIELD_PREP(EDMA_DMAR_RXFILL_OUTSTANDING_NUM_MASK, 7);
++
++      reg = EDMA_BASE_OFFSET + EDMA_REG_DMAR_CTRL_ADDR;
++      ret = regmap_write(regmap, reg, data);
++      if (ret)
++              return ret;
++
++      /* Configure Tx Timeout Threshold. */
++      data = EDMA_TX_TIMEOUT_THRESH_VAL;
++
++      reg = EDMA_BASE_OFFSET + EDMA_REG_TX_TIMEOUT_THRESH_ADDR;
++      ret = regmap_write(regmap, reg, data);
++      if (ret)
++              return ret;
++
++      /* Set Miscellaneous error mask. */
++      data = EDMA_MISC_AXI_RD_ERR_MASK |
++              EDMA_MISC_AXI_WR_ERR_MASK |
++              EDMA_MISC_RX_DESC_FIFO_FULL_MASK |
++              EDMA_MISC_RX_ERR_BUF_SIZE_MASK |
++              EDMA_MISC_TX_SRAM_FULL_MASK |
++              EDMA_MISC_TX_CMPL_BUF_FULL_MASK |
++              EDMA_MISC_DATA_LEN_ERR_MASK;
++      data |= EDMA_MISC_TX_TIMEOUT_MASK;
++      edma_ctx->intr_info.intr_mask_misc = data;
++
++      /* Global EDMA enable and padding enable. */
++      data = EDMA_PORT_PAD_EN | EDMA_PORT_EDMA_EN;
++
++      reg = EDMA_BASE_OFFSET + EDMA_REG_PORT_CTRL_ADDR;
++      ret = regmap_write(regmap, reg, data);
++      if (ret)
++              return ret;
++
++      /* Initialize unicast priority map table. */
++      ret = (int)edma_configure_ucast_prio_map_tbl();
++      if (ret) {
++              pr_err("Failed to initialize unicast priority map table: %d\n",
++                     ret);
++              kfree(edma_ctx->netdev_arr);
++              return ret;
++      }
++
++      return 0;
++}
++
++/**
++ * edma_destroy - EDMA Destroy.
++ * @ppe_dev: PPE device
++ *
++ * Free the memory allocated during setup.
++ */
++void edma_destroy(struct ppe_device *ppe_dev)
++{
++      kfree(edma_ctx->intr_info.intr_rx);
++      kfree(edma_ctx->intr_info.intr_txcmpl);
++      kfree(edma_ctx->netdev_arr);
++}
++
++/**
++ * edma_setup - EDMA Setup.
++ * @ppe_dev: PPE device
++ *
++ * Configure Ethernet global ctx, clocks, hardware and interrupts.
++ *
++ * Return 0 on success, negative error code on failure.
++ */
++int edma_setup(struct ppe_device *ppe_dev)
++{
++      struct device *dev = ppe_dev->dev;
++      int ret;
++
++      edma_ctx = devm_kzalloc(dev, sizeof(*edma_ctx), GFP_KERNEL);
++      if (!edma_ctx)
++              return -ENOMEM;
++
++      edma_ctx->hw_info = &ipq9574_hw_info;
++      edma_ctx->ppe_dev = ppe_dev;
++
++      /* Configure the EDMA common clocks. */
++      ret = edma_clock_init();
++      if (ret) {
++              dev_err(dev, "Error in configuring the EDMA clocks\n");
++              return ret;
++      }
++
++      dev_dbg(dev, "QCOM EDMA common clocks are configured\n");
++
++      ret = edma_hw_configure();
++      if (ret) {
++              dev_err(dev, "Error in edma configuration\n");
++              return ret;
++      }
++
++      ret = edma_irq_init();
++      if (ret) {
++              dev_err(dev, "Error in irq initialization\n");
++              return ret;
++      }
++
++      dev_info(dev, "EDMA configuration successful\n");
++
++      return 0;
++}
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma.h b/drivers/net/ethernet/qualcomm/ppe/edma.h
+new file mode 100644
+index 000000000000..6bad51c976dd
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ppe/edma.h
+@@ -0,0 +1,99 @@
++/* SPDX-License-Identifier: GPL-2.0-only
++ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++#ifndef __EDMA_MAIN__
++#define __EDMA_MAIN__
++
++#include "ppe_api.h"
++
++/* One clock cycle = 1/(EDMA clock frequency in Mhz) micro seconds.
++ *
++ * One timer unit is 128 clock cycles.
++ *
++ * So, therefore the microsecond to timer unit calculation is:
++ * Timer unit = time in microseconds / (one clock cycle in microsecond * cycles in 1 timer unit)
++ *            = ('x' microsecond * EDMA clock frequency in MHz ('y') / 128).
++ *
++ */
++#define EDMA_CYCLE_PER_TIMER_UNIT     128
++#define EDMA_MICROSEC_TO_TIMER_UNIT(x, y)     ((x) * (y) / EDMA_CYCLE_PER_TIMER_UNIT)
++#define MHZ                   1000000UL
++
++/* EDMA profile ID. */
++#define EDMA_CPU_PORT_PROFILE_ID  0
++
++/* Number of PPE queue priorities supported per ARM core. */
++#define EDMA_PRI_MAX_PER_CORE 8
++
++/**
++ * struct edma_ring_info - EDMA ring data structure.
++ * @max_rings: Maximum number of rings
++ * @ring_start: Ring start ID
++ * @num_rings: Number of rings
++ */
++struct edma_ring_info {
++      u32 max_rings;
++      u32 ring_start;
++      u32 num_rings;
++};
++
++/**
++ * struct edma_hw_info - EDMA hardware data structure.
++ * @rxfill: Rx Fill ring information
++ * @rx: Rx Desc ring information
++ * @tx: Tx Desc ring information
++ * @txcmpl: Tx complete ring information
++ * @max_ports: Maximum number of ports
++ * @napi_budget_rx: Rx NAPI budget
++ * @napi_budget_tx: Tx NAPI budget
++ */
++struct edma_hw_info {
++      struct edma_ring_info *rxfill;
++      struct edma_ring_info *rx;
++      struct edma_ring_info *tx;
++      struct edma_ring_info *txcmpl;
++      u32 max_ports;
++      u32 napi_budget_rx;
++      u32 napi_budget_tx;
++};
++
++/**
++ * struct edma_intr_info - EDMA interrupt data structure.
++ * @intr_mask_rx: RX interrupt mask
++ * @intr_rx: Rx interrupts
++ * @intr_mask_txcmpl: Tx completion interrupt mask
++ * @intr_txcmpl: Tx completion interrupts
++ * @intr_mask_misc: Miscellaneous interrupt mask
++ * @intr_misc: Miscellaneous interrupts
++ */
++struct edma_intr_info {
++      u32 intr_mask_rx;
++      u32 *intr_rx;
++      u32 intr_mask_txcmpl;
++      u32 *intr_txcmpl;
++      u32 intr_mask_misc;
++      u32 intr_misc;
++};
++
++/**
++ * struct edma_context - EDMA context.
++ * @netdev_arr: Net device for each EDMA port
++ * @ppe_dev: PPE device
++ * @hw_info: EDMA Hardware info
++ * @intr_info: EDMA Interrupt info
++ */
++struct edma_context {
++      struct net_device **netdev_arr;
++      struct ppe_device *ppe_dev;
++      struct edma_hw_info *hw_info;
++      struct edma_intr_info intr_info;
++};
++
++/* Global EDMA context. */
++extern struct edma_context *edma_ctx;
++
++void edma_destroy(struct ppe_device *ppe_dev);
++int edma_setup(struct ppe_device *ppe_dev);
++
++#endif
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe.c b/drivers/net/ethernet/qualcomm/ppe/ppe.c
+index bcf21c838e05..93f92be9dc41 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe.c
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe.c
+@@ -14,6 +14,7 @@
+ #include <linux/regmap.h>
+ #include <linux/reset.h>
++#include "edma.h"
+ #include "ppe.h"
+ #include "ppe_config.h"
+ #include "ppe_debugfs.h"
+@@ -208,10 +209,16 @@ static int qcom_ppe_probe(struct platform_device *pdev)
+       if (ret)
+               return dev_err_probe(dev, ret, "PPE HW config failed\n");
+-      ret = ppe_port_mac_init(ppe_dev);
++      ret = edma_setup(ppe_dev);
+       if (ret)
++              return dev_err_probe(dev, ret, "EDMA setup failed\n");
++
++      ret = ppe_port_mac_init(ppe_dev);
++      if (ret) {
++              edma_destroy(ppe_dev);
+               return dev_err_probe(dev, ret,
+                                    "PPE Port MAC initialization failed\n");
++      }
+       ppe_debugfs_setup(ppe_dev);
+       platform_set_drvdata(pdev, ppe_dev);
+@@ -226,6 +233,7 @@ static void qcom_ppe_remove(struct platform_device *pdev)
+       ppe_dev = platform_get_drvdata(pdev);
+       ppe_debugfs_teardown(ppe_dev);
+       ppe_port_mac_deinit(ppe_dev);
++      edma_destroy(ppe_dev);
+       platform_set_drvdata(pdev, NULL);
+ }
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+index 6e6e469247c8..f2a60776a40a 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
+@@ -788,4 +788,257 @@
+ #define XGMAC_RXDISCARD_GB_ADDR                       0x9AC
+ #define XGMAC_RXDISCARDBYTE_GB_ADDR           0x9B4
++#define EDMA_BASE_OFFSET                      0xb00000
++
++/* EDMA register offsets */
++#define EDMA_REG_MAS_CTRL_ADDR                        0x0
++#define EDMA_REG_PORT_CTRL_ADDR                       0x4
++#define EDMA_REG_VLAN_CTRL_ADDR                       0x8
++#define EDMA_REG_RXDESC2FILL_MAP_0_ADDR               0x14
++#define EDMA_REG_RXDESC2FILL_MAP_1_ADDR               0x18
++#define EDMA_REG_RXDESC2FILL_MAP_2_ADDR               0x1c
++#define EDMA_REG_TXQ_CTRL_ADDR                        0x20
++#define EDMA_REG_TXQ_CTRL_2_ADDR              0x24
++#define EDMA_REG_TXQ_FC_0_ADDR                        0x28
++#define EDMA_REG_TXQ_FC_1_ADDR                        0x30
++#define EDMA_REG_TXQ_FC_2_ADDR                        0x34
++#define EDMA_REG_TXQ_FC_3_ADDR                        0x38
++#define EDMA_REG_RXQ_CTRL_ADDR                        0x3c
++#define EDMA_REG_MISC_ERR_QID_ADDR            0x40
++#define EDMA_REG_RXQ_FC_THRE_ADDR             0x44
++#define EDMA_REG_DMAR_CTRL_ADDR                       0x48
++#define EDMA_REG_AXIR_CTRL_ADDR                       0x4c
++#define EDMA_REG_AXIW_CTRL_ADDR                       0x50
++#define EDMA_REG_MIN_MSS_ADDR                 0x54
++#define EDMA_REG_LOOPBACK_CTRL_ADDR           0x58
++#define EDMA_REG_MISC_INT_STAT_ADDR           0x5c
++#define EDMA_REG_MISC_INT_MASK_ADDR           0x60
++#define EDMA_REG_DBG_CTRL_ADDR                        0x64
++#define EDMA_REG_DBG_DATA_ADDR                        0x68
++#define EDMA_REG_TX_TIMEOUT_THRESH_ADDR               0x6c
++#define EDMA_REG_REQ0_FIFO_THRESH_ADDR                0x80
++#define EDMA_REG_WB_OS_THRESH_ADDR            0x84
++#define EDMA_REG_MISC_ERR_QID_REG2_ADDR               0x88
++#define EDMA_REG_TXDESC2CMPL_MAP_0_ADDR               0x8c
++#define EDMA_REG_TXDESC2CMPL_MAP_1_ADDR               0x90
++#define EDMA_REG_TXDESC2CMPL_MAP_2_ADDR               0x94
++#define EDMA_REG_TXDESC2CMPL_MAP_3_ADDR               0x98
++#define EDMA_REG_TXDESC2CMPL_MAP_4_ADDR               0x9c
++#define EDMA_REG_TXDESC2CMPL_MAP_5_ADDR               0xa0
++
++/* Tx descriptor ring configuration register addresses */
++#define EDMA_REG_TXDESC_BA(n)         (0x1000 + (0x1000 * (n)))
++#define EDMA_REG_TXDESC_PROD_IDX(n)   (0x1004 + (0x1000 * (n)))
++#define EDMA_REG_TXDESC_CONS_IDX(n)   (0x1008 + (0x1000 * (n)))
++#define EDMA_REG_TXDESC_RING_SIZE(n)  (0x100c + (0x1000 * (n)))
++#define EDMA_REG_TXDESC_CTRL(n)               (0x1010 + (0x1000 * (n)))
++#define EDMA_REG_TXDESC_BA2(n)                (0x1014 + (0x1000 * (n)))
++
++/* RxFill ring configuration register addresses */
++#define EDMA_REG_RXFILL_BA(n)         (0x29000 + (0x1000 * (n)))
++#define EDMA_REG_RXFILL_PROD_IDX(n)   (0x29004 + (0x1000 * (n)))
++#define EDMA_REG_RXFILL_CONS_IDX(n)   (0x29008 + (0x1000 * (n)))
++#define EDMA_REG_RXFILL_RING_SIZE(n)  (0x2900c + (0x1000 * (n)))
++#define EDMA_REG_RXFILL_BUFFER1_SIZE(n)       (0x29010 + (0x1000 * (n)))
++#define EDMA_REG_RXFILL_FC_THRE(n)    (0x29014 + (0x1000 * (n)))
++#define EDMA_REG_RXFILL_UGT_THRE(n)   (0x29018 + (0x1000 * (n)))
++#define EDMA_REG_RXFILL_RING_EN(n)    (0x2901c + (0x1000 * (n)))
++#define EDMA_REG_RXFILL_DISABLE(n)    (0x29020 + (0x1000 * (n)))
++#define EDMA_REG_RXFILL_DISABLE_DONE(n)       (0x29024 + (0x1000 * (n)))
++#define EDMA_REG_RXFILL_INT_STAT(n)   (0x31000 + (0x1000 * (n)))
++#define EDMA_REG_RXFILL_INT_MASK(n)   (0x31004 + (0x1000 * (n)))
++
++/* Rx descriptor ring configuration register addresses */
++#define EDMA_REG_RXDESC_BA(n)         (0x39000 + (0x1000 * (n)))
++#define EDMA_REG_RXDESC_PROD_IDX(n)   (0x39004 + (0x1000 * (n)))
++#define EDMA_REG_RXDESC_CONS_IDX(n)   (0x39008 + (0x1000 * (n)))
++#define EDMA_REG_RXDESC_RING_SIZE(n)  (0x3900c + (0x1000 * (n)))
++#define EDMA_REG_RXDESC_FC_THRE(n)    (0x39010 + (0x1000 * (n)))
++#define EDMA_REG_RXDESC_UGT_THRE(n)   (0x39014 + (0x1000 * (n)))
++#define EDMA_REG_RXDESC_CTRL(n)               (0x39018 + (0x1000 * (n)))
++#define EDMA_REG_RXDESC_BPC(n)                (0x3901c + (0x1000 * (n)))
++#define EDMA_REG_RXDESC_DISABLE(n)    (0x39020 + (0x1000 * (n)))
++#define EDMA_REG_RXDESC_DISABLE_DONE(n)       (0x39024 + (0x1000 * (n)))
++#define EDMA_REG_RXDESC_PREHEADER_BA(n)       (0x39028 + (0x1000 * (n)))
++#define EDMA_REG_RXDESC_INT_STAT(n)   (0x59000 + (0x1000 * (n)))
++#define EDMA_REG_RXDESC_INT_MASK(n)   (0x59004 + (0x1000 * (n)))
++
++#define EDMA_REG_RX_MOD_TIMER(n)      (0x59008 + (0x1000 * (n)))
++#define EDMA_REG_RX_INT_CTRL(n)               (0x5900c + (0x1000 * (n)))
++
++/* Tx completion ring configuration register addresses */
++#define EDMA_REG_TXCMPL_BA(n)         (0x79000 + (0x1000 * (n)))
++#define EDMA_REG_TXCMPL_PROD_IDX(n)   (0x79004 + (0x1000 * (n)))
++#define EDMA_REG_TXCMPL_CONS_IDX(n)   (0x79008 + (0x1000 * (n)))
++#define EDMA_REG_TXCMPL_RING_SIZE(n)  (0x7900c + (0x1000 * (n)))
++#define EDMA_REG_TXCMPL_UGT_THRE(n)   (0x79010 + (0x1000 * (n)))
++#define EDMA_REG_TXCMPL_CTRL(n)               (0x79014 + (0x1000 * (n)))
++#define EDMA_REG_TXCMPL_BPC(n)                (0x79018 + (0x1000 * (n)))
++
++#define EDMA_REG_TX_INT_STAT(n)               (0x99000 + (0x1000 * (n)))
++#define EDMA_REG_TX_INT_MASK(n)               (0x99004 + (0x1000 * (n)))
++#define EDMA_REG_TX_MOD_TIMER(n)      (0x99008 + (0x1000 * (n)))
++#define EDMA_REG_TX_INT_CTRL(n)               (0x9900c + (0x1000 * (n)))
++
++/* EDMA_QID2RID_TABLE_MEM register field masks */
++#define EDMA_RX_RING_ID_QUEUE0_MASK   GENMASK(7, 0)
++#define EDMA_RX_RING_ID_QUEUE1_MASK   GENMASK(15, 8)
++#define EDMA_RX_RING_ID_QUEUE2_MASK   GENMASK(23, 16)
++#define EDMA_RX_RING_ID_QUEUE3_MASK   GENMASK(31, 24)
++
++/* EDMA_REG_PORT_CTRL register bit definitions */
++#define EDMA_PORT_PAD_EN                      0x1
++#define EDMA_PORT_EDMA_EN                     0x2
++
++/* EDMA_REG_DMAR_CTRL register field masks */
++#define EDMA_DMAR_REQ_PRI_MASK                        GENMASK(2, 0)
++#define EDMA_DMAR_BURST_LEN_MASK              BIT(3)
++#define EDMA_DMAR_TXDATA_OUTSTANDING_NUM_MASK GENMASK(8, 4)
++#define EDMA_DMAR_TXDESC_OUTSTANDING_NUM_MASK GENMASK(11, 9)
++#define EDMA_DMAR_RXFILL_OUTSTANDING_NUM_MASK GENMASK(14, 12)
++
++#define EDMA_BURST_LEN_ENABLE                 0
++
++/* Tx timeout threshold */
++#define EDMA_TX_TIMEOUT_THRESH_VAL            0xFFFF
++
++/* Rx descriptor ring base address mask */
++#define EDMA_RXDESC_BA_MASK                   0xffffffff
++
++/* Rx Descriptor ring pre-header base address mask */
++#define EDMA_RXDESC_PREHEADER_BA_MASK         0xffffffff
++
++/* Tx descriptor prod ring index mask */
++#define EDMA_TXDESC_PROD_IDX_MASK             0xffff
++
++/* Tx descriptor consumer ring index mask */
++#define EDMA_TXDESC_CONS_IDX_MASK             0xffff
++
++/* Tx descriptor ring size mask */
++#define EDMA_TXDESC_RING_SIZE_MASK            0xffff
++
++/* Tx descriptor ring enable */
++#define EDMA_TXDESC_TX_ENABLE                 0x1
++
++#define EDMA_TXDESC_CTRL_TXEN_MASK            BIT(0)
++#define EDMA_TXDESC_CTRL_FC_GRP_ID_MASK               GENMASK(3, 1)
++
++/* Tx completion ring prod index mask */
++#define EDMA_TXCMPL_PROD_IDX_MASK             0xffff
++
++/* Tx completion ring urgent threshold mask */
++#define EDMA_TXCMPL_LOW_THRE_MASK             0xffff
++#define EDMA_TXCMPL_LOW_THRE_SHIFT            0
++
++/* EDMA_REG_TX_MOD_TIMER mask */
++#define EDMA_TX_MOD_TIMER_INIT_MASK           0xffff
++#define EDMA_TX_MOD_TIMER_INIT_SHIFT          0
++
++/* Rx fill ring prod index mask */
++#define EDMA_RXFILL_PROD_IDX_MASK             0xffff
++
++/* Rx fill ring consumer index mask */
++#define EDMA_RXFILL_CONS_IDX_MASK             0xffff
++
++/* Rx fill ring size mask */
++#define EDMA_RXFILL_RING_SIZE_MASK            0xffff
++
++/* Rx fill ring flow control threshold masks */
++#define EDMA_RXFILL_FC_XON_THRE_MASK          0x7ff
++#define EDMA_RXFILL_FC_XON_THRE_SHIFT         12
++#define EDMA_RXFILL_FC_XOFF_THRE_MASK         0x7ff
++#define EDMA_RXFILL_FC_XOFF_THRE_SHIFT                0
++
++/* Rx fill ring enable bit */
++#define EDMA_RXFILL_RING_EN                   0x1
++
++/* Rx desc ring prod index mask */
++#define EDMA_RXDESC_PROD_IDX_MASK             0xffff
++
++/* Rx descriptor ring cons index mask */
++#define EDMA_RXDESC_CONS_IDX_MASK             0xffff
++
++/* Rx descriptor ring size masks */
++#define EDMA_RXDESC_RING_SIZE_MASK            0xffff
++#define EDMA_RXDESC_PL_OFFSET_MASK            0x1ff
++#define EDMA_RXDESC_PL_OFFSET_SHIFT           16
++#define EDMA_RXDESC_PL_DEFAULT_VALUE          0
++
++/* Rx descriptor ring flow control threshold masks */
++#define EDMA_RXDESC_FC_XON_THRE_MASK          0x7ff
++#define EDMA_RXDESC_FC_XON_THRE_SHIFT         12
++#define EDMA_RXDESC_FC_XOFF_THRE_MASK         0x7ff
++#define EDMA_RXDESC_FC_XOFF_THRE_SHIFT                0
++
++/* Rx descriptor ring urgent threshold mask */
++#define EDMA_RXDESC_LOW_THRE_MASK             0xffff
++#define EDMA_RXDESC_LOW_THRE_SHIFT            0
++
++/* Rx descriptor ring enable bit */
++#define EDMA_RXDESC_RX_EN                     0x1
++
++/* Tx interrupt status bit */
++#define EDMA_TX_INT_MASK_PKT_INT              0x1
++
++/* Rx interrupt mask */
++#define EDMA_RXDESC_INT_MASK_PKT_INT          0x1
++
++#define EDMA_MASK_INT_DISABLE                 0x0
++#define EDMA_MASK_INT_CLEAR                   0x0
++
++/* EDMA_REG_RX_MOD_TIMER register field masks */
++#define EDMA_RX_MOD_TIMER_INIT_MASK           0xffff
++#define EDMA_RX_MOD_TIMER_INIT_SHIFT          0
++
++/* EDMA Ring mask */
++#define EDMA_RING_DMA_MASK                    0xffffffff
++
++/* RXDESC threshold interrupt. */
++#define EDMA_RXDESC_UGT_INT_STAT              0x2
++
++/* RXDESC timer interrupt */
++#define EDMA_RXDESC_PKT_INT_STAT              0x1
++
++/* RXDESC Interrupt status mask */
++#define EDMA_RXDESC_RING_INT_STATUS_MASK \
++      (EDMA_RXDESC_UGT_INT_STAT | EDMA_RXDESC_PKT_INT_STAT)
++
++/* TXCMPL threshold interrupt. */
++#define EDMA_TXCMPL_UGT_INT_STAT              0x2
++
++/* TXCMPL timer interrupt */
++#define EDMA_TXCMPL_PKT_INT_STAT              0x1
++
++/* TXCMPL Interrupt status mask */
++#define EDMA_TXCMPL_RING_INT_STATUS_MASK \
++      (EDMA_TXCMPL_UGT_INT_STAT | EDMA_TXCMPL_PKT_INT_STAT)
++
++#define EDMA_TXCMPL_RETMODE_OPAQUE            0x0
++
++#define EDMA_RXDESC_LOW_THRE                  0
++#define EDMA_RX_MOD_TIMER_INIT                        1000
++#define EDMA_RX_NE_INT_EN                     0x2
++
++#define EDMA_TX_MOD_TIMER                     150
++
++#define EDMA_TX_INITIAL_PROD_IDX              0x0
++#define EDMA_TX_NE_INT_EN                     0x2
++
++/* EDMA misc error mask */
++#define EDMA_MISC_AXI_RD_ERR_MASK             BIT(0)
++#define EDMA_MISC_AXI_WR_ERR_MASK             BIT(1)
++#define EDMA_MISC_RX_DESC_FIFO_FULL_MASK      BIT(2)
++#define EDMA_MISC_RX_ERR_BUF_SIZE_MASK                BIT(3)
++#define EDMA_MISC_TX_SRAM_FULL_MASK           BIT(4)
++#define EDMA_MISC_TX_CMPL_BUF_FULL_MASK               BIT(5)
++
++#define EDMA_MISC_DATA_LEN_ERR_MASK           BIT(6)
++#define EDMA_MISC_TX_TIMEOUT_MASK             BIT(7)
++
++/* EDMA txdesc2cmpl map */
++#define EDMA_TXDESC2CMPL_MAP_TXDESC_MASK              0x1F
++
++/* EDMA rxdesc2fill map */
++#define EDMA_RXDESC2FILL_MAP_RXDESC_MASK      0x7
++
+ #endif
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-39-net-ethernet-qualcomm-Add-netdevice-support-for-QCOM.patch b/target/linux/qualcommbe/patches-6.6/103-39-net-ethernet-qualcomm-Add-netdevice-support-for-QCOM.patch
new file mode 100644 (file)
index 0000000..4d89b87
--- /dev/null
@@ -0,0 +1,414 @@
+From cbcaf81cd148b77ee0570a482b536f269a9f6657 Mon Sep 17 00:00:00 2001
+From: Suruchi Agarwal <quic_suruchia@quicinc.com>
+Date: Thu, 21 Mar 2024 16:14:46 -0700
+Subject: [PATCH 39/50] net: ethernet: qualcomm: Add netdevice support for QCOM
+ IPQ9574 chipset.
+
+Add EDMA ports and netdevice operations for QCOM IPQ9574 chipset.
+
+Change-Id: I08b2eff52b4ef0d6d428c1c416f5580ef010973f
+Co-developed-by: Pavithra R <quic_pavir@quicinc.com>
+Signed-off-by: Pavithra R <quic_pavir@quicinc.com>
+Signed-off-by: Suruchi Agarwal <quic_suruchia@quicinc.com>
+---
+ drivers/net/ethernet/qualcomm/ppe/Makefile    |   2 +-
+ drivers/net/ethernet/qualcomm/ppe/edma.h      |   3 +
+ drivers/net/ethernet/qualcomm/ppe/edma_port.c | 270 ++++++++++++++++++
+ drivers/net/ethernet/qualcomm/ppe/edma_port.h |  31 ++
+ drivers/net/ethernet/qualcomm/ppe/ppe_port.c  |  19 ++
+ 5 files changed, 324 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_port.c
+ create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_port.h
+
+diff --git a/drivers/net/ethernet/qualcomm/ppe/Makefile b/drivers/net/ethernet/qualcomm/ppe/Makefile
+index 7fea135ceb36..e26677644aa9 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/Makefile
++++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
+@@ -7,4 +7,4 @@ obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
+ qcom-ppe-objs := ppe.o ppe_config.o ppe_api.o ppe_debugfs.o ppe_port.o
+ #EDMA
+-qcom-ppe-objs += edma.o
+\ No newline at end of file
++qcom-ppe-objs += edma.o edma_port.o
+\ No newline at end of file
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma.h b/drivers/net/ethernet/qualcomm/ppe/edma.h
+index 6bad51c976dd..5261002f883d 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/edma.h
++++ b/drivers/net/ethernet/qualcomm/ppe/edma.h
+@@ -26,6 +26,9 @@
+ /* Number of PPE queue priorities supported per ARM core. */
+ #define EDMA_PRI_MAX_PER_CORE 8
++/* Interface ID start. */
++#define EDMA_START_IFNUM   1
++
+ /**
+  * struct edma_ring_info - EDMA ring data structure.
+  * @max_rings: Maximum number of rings
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma_port.c b/drivers/net/ethernet/qualcomm/ppe/edma_port.c
+new file mode 100644
+index 000000000000..6292b83d746d
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ppe/edma_port.c
+@@ -0,0 +1,270 @@
++// SPDX-License-Identifier: GPL-2.0-only
++ /* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
++  */
++
++/* EDMA port initialization, configuration and netdevice ops handling */
++
++#include <linux/etherdevice.h>
++#include <linux/net.h>
++#include <linux/netdevice.h>
++#include <linux/of_net.h>
++#include <linux/phylink.h>
++#include <linux/printk.h>
++
++#include "edma.h"
++#include "edma_port.h"
++#include "ppe_regs.h"
++
++/* Number of netdev queues. */
++#define EDMA_NETDEV_QUEUE_NUM 4
++
++static u16 __maybe_unused edma_port_select_queue(__maybe_unused struct net_device *netdev,
++                                               __maybe_unused struct sk_buff *skb,
++                              __maybe_unused struct net_device *sb_dev)
++{
++      int cpu = get_cpu();
++
++      put_cpu();
++
++      return cpu;
++}
++
++static int edma_port_open(struct net_device *netdev)
++{
++      struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
++      struct ppe_port *ppe_port;
++
++      if (!port_priv)
++              return -EINVAL;
++
++      /* Inform the Linux Networking stack about the hardware capability of
++       * checksum offloading and other features. Each port is
++       * responsible to maintain the feature set it supports.
++       */
++      netdev->features |= EDMA_NETDEV_FEATURES;
++      netdev->hw_features |= EDMA_NETDEV_FEATURES;
++      netdev->vlan_features |= EDMA_NETDEV_FEATURES;
++      netdev->wanted_features |= EDMA_NETDEV_FEATURES;
++
++      ppe_port  = port_priv->ppe_port;
++
++      if (ppe_port->phylink)
++              phylink_start(ppe_port->phylink);
++
++      netif_start_queue(netdev);
++
++      return 0;
++}
++
++static int edma_port_close(struct net_device *netdev)
++{
++      struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
++      struct ppe_port *ppe_port;
++
++      if (!port_priv)
++              return -EINVAL;
++
++      netif_stop_queue(netdev);
++
++      ppe_port  = port_priv->ppe_port;
++
++      /* Phylink close. */
++      if (ppe_port->phylink)
++              phylink_stop(ppe_port->phylink);
++
++      return 0;
++}
++
++static int edma_port_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
++{
++      struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
++      struct ppe_port *ppe_port;
++      int ret = -EINVAL;
++
++      if (!port_priv)
++              return -EINVAL;
++
++      ppe_port = port_priv->ppe_port;
++      if (ppe_port->phylink)
++              return phylink_mii_ioctl(ppe_port->phylink, ifr, cmd);
++
++      return ret;
++}
++
++static int edma_port_change_mtu(struct net_device *netdev, int mtu)
++{
++      struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
++
++      if (!port_priv)
++              return -EINVAL;
++
++      netdev->mtu = mtu;
++
++      return ppe_port_set_maxframe(port_priv->ppe_port, mtu);
++}
++
++static netdev_features_t edma_port_feature_check(__maybe_unused struct sk_buff *skb,
++                                               __maybe_unused struct net_device *netdev,
++                                               netdev_features_t features)
++{
++      return features;
++}
++
++static void edma_port_get_stats64(struct net_device *netdev,
++                                struct rtnl_link_stats64 *stats)
++{
++      struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
++
++      if (!port_priv)
++              return;
++
++      ppe_port_get_stats64(port_priv->ppe_port, stats);
++}
++
++static int edma_port_set_mac_address(struct net_device *netdev, void *macaddr)
++{
++      struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
++      struct sockaddr *addr = (struct sockaddr *)macaddr;
++      int ret;
++
++      if (!port_priv)
++              return -EINVAL;
++
++      netdev_dbg(netdev, "AddrFamily: %d, %0x:%0x:%0x:%0x:%0x:%0x\n",
++                 addr->sa_family, addr->sa_data[0], addr->sa_data[1],
++                 addr->sa_data[2], addr->sa_data[3], addr->sa_data[4],
++                 addr->sa_data[5]);
++
++      ret = eth_prepare_mac_addr_change(netdev, addr);
++      if (ret)
++              return ret;
++
++      if (ppe_port_set_mac_address(port_priv->ppe_port, (u8 *)addr)) {
++              netdev_err(netdev, "set mac address failed for dev: %s\n", netdev->name);
++              return -EINVAL;
++      }
++
++      eth_commit_mac_addr_change(netdev, addr);
++
++      return 0;
++}
++
++static const struct net_device_ops edma_port_netdev_ops = {
++      .ndo_open = edma_port_open,
++      .ndo_stop = edma_port_close,
++      .ndo_get_stats64 = edma_port_get_stats64,
++      .ndo_set_mac_address = edma_port_set_mac_address,
++      .ndo_validate_addr = eth_validate_addr,
++      .ndo_change_mtu = edma_port_change_mtu,
++      .ndo_eth_ioctl = edma_port_ioctl,
++      .ndo_features_check = edma_port_feature_check,
++      .ndo_select_queue = edma_port_select_queue,
++};
++
++/**
++ * edma_port_destroy - EDMA port destroy.
++ * @port: PPE port
++ *
++ * Unregister and free the netdevice.
++ */
++void edma_port_destroy(struct ppe_port *port)
++{
++      int port_id = port->port_id;
++      struct net_device *netdev = edma_ctx->netdev_arr[port_id - 1];
++
++      unregister_netdev(netdev);
++      free_netdev(netdev);
++      ppe_port_phylink_destroy(port);
++      edma_ctx->netdev_arr[port_id - 1] = NULL;
++}
++
++/**
++ * edma_port_setup - EDMA port Setup.
++ * @port: PPE port
++ *
++ * Initialize and register the netdevice.
++ *
++ * Return 0 on success, negative error code on failure.
++ */
++int edma_port_setup(struct ppe_port *port)
++{
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct device_node *np = port->np;
++      struct edma_port_priv *port_priv;
++      int port_id = port->port_id;
++      struct net_device *netdev;
++      u8 mac_addr[ETH_ALEN];
++      int ret = 0;
++      u8 *maddr;
++
++      netdev = alloc_etherdev_mqs(sizeof(struct edma_port_priv),
++                                  EDMA_NETDEV_QUEUE_NUM, EDMA_NETDEV_QUEUE_NUM);
++      if (!netdev) {
++              pr_err("alloc_etherdev() failed\n");
++              return -ENOMEM;
++      }
++
++      SET_NETDEV_DEV(netdev, ppe_dev->dev);
++      netdev->dev.of_node = np;
++
++      /* max_mtu is set to 1500 in ether_setup(). */
++      netdev->max_mtu = ETH_MAX_MTU;
++
++      port_priv = netdev_priv(netdev);
++      memset((void *)port_priv, 0, sizeof(struct edma_port_priv));
++
++      port_priv->ppe_port = port;
++      port_priv->netdev = netdev;
++      netdev->watchdog_timeo = 5 * HZ;
++      netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
++      netdev->netdev_ops = &edma_port_netdev_ops;
++      netdev->gso_max_segs = GSO_MAX_SEGS;
++
++      maddr = mac_addr;
++      if (of_get_mac_address(np, maddr))
++              maddr = NULL;
++
++      if (maddr && is_valid_ether_addr(maddr)) {
++              eth_hw_addr_set(netdev, maddr);
++      } else {
++              eth_hw_addr_random(netdev);
++              netdev_info(netdev, "GMAC%d Using random MAC address - %pM\n",
++                          port_id, netdev->dev_addr);
++      }
++
++      netdev_dbg(netdev, "Configuring the port %s(qcom-id:%d)\n",
++                 netdev->name, port_id);
++
++      /* We expect 'port_id' to correspond to ports numbers on SoC.
++       * These begin from '1' and hence we subtract
++       * one when using it as an array index.
++       */
++      edma_ctx->netdev_arr[port_id - 1] = netdev;
++
++      /* Setup phylink. */
++      ret = ppe_port_phylink_setup(port, netdev);
++      if (ret) {
++              netdev_dbg(netdev, "EDMA port phylink setup for netdevice %s\n",
++                         netdev->name);
++              goto port_phylink_setup_fail;
++      }
++
++      /* Register the network interface. */
++      ret = register_netdev(netdev);
++      if (ret) {
++              netdev_dbg(netdev, "Error registering netdevice %s\n",
++                         netdev->name);
++              goto register_netdev_fail;
++      }
++
++      netdev_dbg(netdev, "Setup EDMA port GMAC%d done\n", port_id);
++      return ret;
++
++register_netdev_fail:
++      ppe_port_phylink_destroy(port);
++port_phylink_setup_fail:
++      free_netdev(netdev);
++      edma_ctx->netdev_arr[port_id - 1] = NULL;
++
++      return ret;
++}
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma_port.h b/drivers/net/ethernet/qualcomm/ppe/edma_port.h
+new file mode 100644
+index 000000000000..0f2deb39556e
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ppe/edma_port.h
+@@ -0,0 +1,31 @@
++/* SPDX-License-Identifier: GPL-2.0-only
++ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++#ifndef __EDMA_PORTS__
++#define __EDMA_PORTS__
++
++#include "ppe_port.h"
++
++#define EDMA_NETDEV_FEATURES          (NETIF_F_FRAGLIST \
++                                      | NETIF_F_SG \
++                                      | NETIF_F_RXCSUM \
++                                      | NETIF_F_HW_CSUM \
++                                      | NETIF_F_TSO \
++                                      | NETIF_F_TSO6)
++
++/**
++ * struct edma_port_priv - EDMA port priv structure.
++ * @ppe_port: Pointer to PPE port
++ * @netdev: Corresponding netdevice
++ * @flags: Feature flags
++ */
++struct edma_port_priv {
++      struct ppe_port *ppe_port;
++      struct net_device *netdev;
++      unsigned long flags;
++};
++
++void edma_port_destroy(struct ppe_port *port);
++int edma_port_setup(struct ppe_port *port);
++#endif
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_port.c b/drivers/net/ethernet/qualcomm/ppe/ppe_port.c
+index 52820e2eedf8..05c52ba07aef 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_port.c
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_port.c
+@@ -13,6 +13,7 @@
+ #include <linux/regmap.h>
+ #include <linux/rtnetlink.h>
++#include "edma_port.h"
+ #include "ppe.h"
+ #include "ppe_port.h"
+ #include "ppe_regs.h"
+@@ -1277,12 +1278,26 @@ int ppe_port_mac_init(struct ppe_device *ppe_dev)
+                       goto err_port_node;
+               }
++              ret = edma_port_setup(&ppe_ports->port[i]);
++              if (ret) {
++                      dev_err(ppe_dev->dev, "QCOM EDMA port setup failed\n");
++                      i--;
++                      goto err_port_setup;
++              }
++
+               i++;
+       }
+       of_node_put(ports_node);
+       return 0;
++err_port_setup:
++      /* Destroy edma ports created till now */
++      while (i >= 0) {
++              edma_port_destroy(&ppe_ports->port[i]);
++              i--;
++      }
++
+ err_port_clk:
+       for (j = 0; j < i; j++)
+               ppe_port_clock_deinit(&ppe_ports->port[j]);
+@@ -1307,6 +1322,10 @@ void ppe_port_mac_deinit(struct ppe_device *ppe_dev)
+       for (i = 0; i < ppe_dev->ports->num; i++) {
+               ppe_port = &ppe_dev->ports->port[i];
++
++              /* Destroy all phylinks and edma ports */
++              edma_port_destroy(ppe_port);
++
+               ppe_port_clock_deinit(ppe_port);
+       }
+ }
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-40-net-ethernet-qualcomm-Add-Rx-Ethernet-DMA-support.patch b/target/linux/qualcommbe/patches-6.6/103-40-net-ethernet-qualcomm-Add-Rx-Ethernet-DMA-support.patch
new file mode 100644 (file)
index 0000000..9392ed8
--- /dev/null
@@ -0,0 +1,2420 @@
+From 7c7baa32e0d110d5446113f5513fca84731bddd3 Mon Sep 17 00:00:00 2001
+From: Suruchi Agarwal <quic_suruchia@quicinc.com>
+Date: Thu, 21 Mar 2024 16:21:19 -0700
+Subject: [PATCH 40/50] net: ethernet: qualcomm: Add Rx Ethernet DMA support
+
+Add Rx queues, rings, descriptors configurations and
+DMA support for the EDMA.
+
+Change-Id: I612bcd661e74d5bf3ecb33de10fd5298d18ff7e9
+Co-developed-by: Pavithra R <quic_pavir@quicinc.com>
+Signed-off-by: Pavithra R <quic_pavir@quicinc.com>
+Signed-off-by: Suruchi Agarwal <quic_suruchia@quicinc.com>
+---
+ drivers/net/ethernet/qualcomm/ppe/Makefile    |   2 +-
+ drivers/net/ethernet/qualcomm/ppe/edma.c      | 171 +++-
+ drivers/net/ethernet/qualcomm/ppe/edma.h      |  18 +-
+ .../net/ethernet/qualcomm/ppe/edma_cfg_rx.c   | 964 ++++++++++++++++++
+ .../net/ethernet/qualcomm/ppe/edma_cfg_rx.h   |  48 +
+ drivers/net/ethernet/qualcomm/ppe/edma_port.c |  39 +-
+ drivers/net/ethernet/qualcomm/ppe/edma_port.h |  31 +
+ drivers/net/ethernet/qualcomm/ppe/edma_rx.c   | 622 +++++++++++
+ drivers/net/ethernet/qualcomm/ppe/edma_rx.h   | 287 ++++++
+ 9 files changed, 2177 insertions(+), 5 deletions(-)
+ create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.c
+ create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.h
+ create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_rx.c
+ create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_rx.h
+
+diff --git a/drivers/net/ethernet/qualcomm/ppe/Makefile b/drivers/net/ethernet/qualcomm/ppe/Makefile
+index e26677644aa9..3fd607ce42de 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/Makefile
++++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
+@@ -7,4 +7,4 @@ obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
+ qcom-ppe-objs := ppe.o ppe_config.o ppe_api.o ppe_debugfs.o ppe_port.o
+ #EDMA
+-qcom-ppe-objs += edma.o edma_port.o
+\ No newline at end of file
++qcom-ppe-objs += edma.o edma_cfg_rx.o edma_port.o edma_rx.o
+\ No newline at end of file
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma.c b/drivers/net/ethernet/qualcomm/ppe/edma.c
+index d7bf1f39e9e1..134f6b95c294 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/edma.c
++++ b/drivers/net/ethernet/qualcomm/ppe/edma.c
+@@ -18,12 +18,23 @@
+ #include <linux/reset.h>
+ #include "edma.h"
++#include "edma_cfg_rx.h"
+ #include "ppe_regs.h"
+ #define EDMA_IRQ_NAME_SIZE            32
+ /* Global EDMA context. */
+ struct edma_context *edma_ctx;
++static char **edma_rxdesc_irq_name;
++
++/* Module params. */
++static int page_mode;
++module_param(page_mode, int, 0);
++MODULE_PARM_DESC(page_mode, "Enable page mode (default:0)");
++
++static int rx_buff_size;
++module_param(rx_buff_size, int, 0640);
++MODULE_PARM_DESC(rx_buff_size, "Rx Buffer size for Jumbo MRU value (default:0)");
+ /* Priority to multi-queue mapping. */
+ static u8 edma_pri_map[PPE_QUEUE_INTER_PRI_NUM] = {
+@@ -178,6 +189,59 @@ static int edma_configure_ucast_prio_map_tbl(void)
+       return ret;
+ }
++static int edma_irq_register(void)
++{
++      struct edma_hw_info *hw_info = edma_ctx->hw_info;
++      struct edma_ring_info *rx = hw_info->rx;
++      int ret;
++      u32 i;
++
++      /* Request IRQ for RXDESC rings. */
++      edma_rxdesc_irq_name = kzalloc((sizeof(char *) * rx->num_rings),
++                                     GFP_KERNEL);
++      if (!edma_rxdesc_irq_name)
++              return -ENOMEM;
++
++      for (i = 0; i < rx->num_rings; i++) {
++              edma_rxdesc_irq_name[i] = kzalloc((sizeof(char *) * EDMA_IRQ_NAME_SIZE),
++                                                GFP_KERNEL);
++              if (!edma_rxdesc_irq_name[i]) {
++                      ret = -ENOMEM;
++                      goto rxdesc_irq_name_alloc_fail;
++              }
++
++              snprintf(edma_rxdesc_irq_name[i], 20, "edma_rxdesc_%d",
++                       rx->ring_start + i);
++
++              irq_set_status_flags(edma_ctx->intr_info.intr_rx[i], IRQ_DISABLE_UNLAZY);
++
++              ret = request_irq(edma_ctx->intr_info.intr_rx[i],
++                                edma_rx_handle_irq, IRQF_SHARED,
++                                edma_rxdesc_irq_name[i],
++                                (void *)&edma_ctx->rx_rings[i]);
++              if (ret) {
++                      pr_err("RXDESC ring IRQ:%d request failed\n",
++                             edma_ctx->intr_info.intr_rx[i]);
++                      goto rx_desc_ring_intr_req_fail;
++              }
++
++              pr_debug("RXDESC ring: %d IRQ:%d request success: %s\n",
++                       rx->ring_start + i,
++                       edma_ctx->intr_info.intr_rx[i],
++                       edma_rxdesc_irq_name[i]);
++      }
++
++      return 0;
++
++rx_desc_ring_intr_req_fail:
++      for (i = 0; i < rx->num_rings; i++)
++              kfree(edma_rxdesc_irq_name[i]);
++rxdesc_irq_name_alloc_fail:
++      kfree(edma_rxdesc_irq_name);
++
++      return ret;
++}
++
+ static int edma_irq_init(void)
+ {
+       struct edma_hw_info *hw_info = edma_ctx->hw_info;
+@@ -260,6 +324,16 @@ static int edma_irq_init(void)
+       return 0;
+ }
++static int edma_alloc_rings(void)
++{
++      if (edma_cfg_rx_rings_alloc()) {
++              pr_err("Error in allocating Rx rings\n");
++              return -ENOMEM;
++      }
++
++      return 0;
++}
++
+ static int edma_hw_reset(void)
+ {
+       struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
+@@ -343,6 +417,40 @@ static int edma_hw_configure(void)
+       if (!edma_ctx->netdev_arr)
+               return -ENOMEM;
++      edma_ctx->dummy_dev = alloc_netdev_dummy(0);
++      if (!edma_ctx->dummy_dev) {
++              ret = -ENOMEM;
++              pr_err("Failed to allocate dummy device. ret: %d\n", ret);
++              goto dummy_dev_alloc_failed;
++      }
++
++      /* Set EDMA jumbo MRU if enabled or set page mode. */
++      if (edma_ctx->rx_buf_size) {
++              edma_ctx->rx_page_mode = false;
++              pr_debug("Rx Jumbo mru is enabled: %d\n", edma_ctx->rx_buf_size);
++      } else {
++              edma_ctx->rx_page_mode = page_mode;
++      }
++
++      ret = edma_alloc_rings();
++      if (ret) {
++              pr_err("Error in initializaing the rings. ret: %d\n", ret);
++              goto edma_alloc_rings_failed;
++      }
++
++      /* Disable interrupts. */
++      edma_cfg_rx_disable_interrupts();
++
++      edma_cfg_rx_rings_disable();
++
++      edma_cfg_rx_ring_mappings();
++
++      ret = edma_cfg_rx_rings();
++      if (ret) {
++              pr_err("Error in configuring Rx rings. ret: %d\n", ret);
++              goto edma_cfg_rx_rings_failed;
++      }
++
+       /* Configure DMA request priority, DMA read burst length,
+        * and AXI write size.
+        */
+@@ -376,6 +484,10 @@ static int edma_hw_configure(void)
+       data |= EDMA_MISC_TX_TIMEOUT_MASK;
+       edma_ctx->intr_info.intr_mask_misc = data;
++      edma_cfg_rx_rings_enable();
++      edma_cfg_rx_napi_add();
++      edma_cfg_rx_napi_enable();
++
+       /* Global EDMA enable and padding enable. */
+       data = EDMA_PORT_PAD_EN | EDMA_PORT_EDMA_EN;
+@@ -389,11 +501,32 @@ static int edma_hw_configure(void)
+       if (ret) {
+               pr_err("Failed to initialize unicast priority map table: %d\n",
+                      ret);
+-              kfree(edma_ctx->netdev_arr);
+-              return ret;
++              goto configure_ucast_prio_map_tbl_failed;
++      }
++
++      /* Initialize RPS hash map table. */
++      ret = edma_cfg_rx_rps_hash_map();
++      if (ret) {
++              pr_err("Failed to configure rps hash table: %d\n",
++                     ret);
++              goto edma_cfg_rx_rps_hash_map_failed;
+       }
+       return 0;
++
++edma_cfg_rx_rps_hash_map_failed:
++configure_ucast_prio_map_tbl_failed:
++      edma_cfg_rx_napi_disable();
++      edma_cfg_rx_napi_delete();
++      edma_cfg_rx_rings_disable();
++edma_cfg_rx_rings_failed:
++      edma_cfg_rx_rings_cleanup();
++edma_alloc_rings_failed:
++      free_netdev(edma_ctx->dummy_dev);
++dummy_dev_alloc_failed:
++      kfree(edma_ctx->netdev_arr);
++
++      return ret;
+ }
+ /**
+@@ -404,8 +537,31 @@ static int edma_hw_configure(void)
+  */
+ void edma_destroy(struct ppe_device *ppe_dev)
+ {
++      struct edma_hw_info *hw_info = edma_ctx->hw_info;
++      struct edma_ring_info *rx = hw_info->rx;
++      u32 i;
++
++      /* Disable interrupts. */
++      edma_cfg_rx_disable_interrupts();
++
++      /* Free IRQ for RXDESC rings. */
++      for (i = 0; i < rx->num_rings; i++) {
++              synchronize_irq(edma_ctx->intr_info.intr_rx[i]);
++              free_irq(edma_ctx->intr_info.intr_rx[i],
++                       (void *)&edma_ctx->rx_rings[i]);
++              kfree(edma_rxdesc_irq_name[i]);
++      }
++      kfree(edma_rxdesc_irq_name);
++
+       kfree(edma_ctx->intr_info.intr_rx);
+       kfree(edma_ctx->intr_info.intr_txcmpl);
++
++      edma_cfg_rx_napi_disable();
++      edma_cfg_rx_napi_delete();
++      edma_cfg_rx_rings_disable();
++      edma_cfg_rx_rings_cleanup();
++
++      free_netdev(edma_ctx->dummy_dev);
+       kfree(edma_ctx->netdev_arr);
+ }
+@@ -428,6 +584,7 @@ int edma_setup(struct ppe_device *ppe_dev)
+       edma_ctx->hw_info = &ipq9574_hw_info;
+       edma_ctx->ppe_dev = ppe_dev;
++      edma_ctx->rx_buf_size = rx_buff_size;
+       /* Configure the EDMA common clocks. */
+       ret = edma_clock_init();
+@@ -450,6 +607,16 @@ int edma_setup(struct ppe_device *ppe_dev)
+               return ret;
+       }
++      ret = edma_irq_register();
++      if (ret) {
++              dev_err(dev, "Error in irq registration\n");
++              kfree(edma_ctx->intr_info.intr_rx);
++              kfree(edma_ctx->intr_info.intr_txcmpl);
++              return ret;
++      }
++
++      edma_cfg_rx_enable_interrupts();
++
+       dev_info(dev, "EDMA configuration successful\n");
+       return 0;
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma.h b/drivers/net/ethernet/qualcomm/ppe/edma.h
+index 5261002f883d..778df7997d9f 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/edma.h
++++ b/drivers/net/ethernet/qualcomm/ppe/edma.h
+@@ -6,6 +6,7 @@
+ #define __EDMA_MAIN__
+ #include "ppe_api.h"
++#include "edma_rx.h"
+ /* One clock cycle = 1/(EDMA clock frequency in Mhz) micro seconds.
+  *
+@@ -29,6 +30,11 @@
+ /* Interface ID start. */
+ #define EDMA_START_IFNUM   1
++#define EDMA_DESC_AVAIL_COUNT(head, tail, _max) ({ \
++                      typeof(_max) (max) = (_max); \
++                      ((((head) - (tail)) + \
++                      (max)) & ((max) - 1)); })
++
+ /**
+  * struct edma_ring_info - EDMA ring data structure.
+  * @max_rings: Maximum number of rings
+@@ -82,18 +88,28 @@ struct edma_intr_info {
+ /**
+  * struct edma_context - EDMA context.
+  * @netdev_arr: Net device for each EDMA port
++ * @dummy_dev: Dummy netdevice for RX DMA
+  * @ppe_dev: PPE device
+  * @hw_info: EDMA Hardware info
+  * @intr_info: EDMA Interrupt info
++ * @rxfill_rings: Rx fill Rings, SW is producer
++ * @rx_rings: Rx Desc Rings, SW is consumer
++ * @rx_page_mode: Page mode enabled or disabled
++ * @rx_buf_size: Rx buffer size for Jumbo MRU
+  */
+ struct edma_context {
+       struct net_device **netdev_arr;
++      struct net_device *dummy_dev;
+       struct ppe_device *ppe_dev;
+       struct edma_hw_info *hw_info;
+       struct edma_intr_info intr_info;
++      struct edma_rxfill_ring *rxfill_rings;
++      struct edma_rxdesc_ring *rx_rings;
++      u32 rx_page_mode;
++      u32 rx_buf_size;
+ };
+-/* Global EDMA context. */
++/* Global EDMA context */
+ extern struct edma_context *edma_ctx;
+ void edma_destroy(struct ppe_device *ppe_dev);
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.c b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.c
+new file mode 100644
+index 000000000000..18e4ada6a076
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.c
+@@ -0,0 +1,964 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++/* Configure rings, Buffers and NAPI for receive path along with
++ * providing APIs to enable, disable, clean and map the Rx rings.
++ */
++
++#include <linux/cpumask.h>
++#include <linux/dma-mapping.h>
++#include <linux/kernel.h>
++#include <linux/netdevice.h>
++#include <linux/printk.h>
++#include <linux/regmap.h>
++#include <linux/skbuff.h>
++
++#include "edma.h"
++#include "edma_cfg_rx.h"
++#include "ppe.h"
++#include "ppe_regs.h"
++
++/* EDMA Queue ID to Ring ID Table. */
++#define EDMA_QID2RID_TABLE_MEM(q)     (0xb9000 + (0x4 * (q)))
++
++/* Rx ring queue offset. */
++#define EDMA_QUEUE_OFFSET(q_id)       ((q_id) / EDMA_MAX_PRI_PER_CORE)
++
++/* Rx EDMA maximum queue supported. */
++#define EDMA_CPU_PORT_QUEUE_MAX(queue_start)  \
++                      ((queue_start) + (EDMA_MAX_PRI_PER_CORE * num_possible_cpus()) - 1)
++
++/* EDMA Queue ID to Ring ID configuration. */
++#define EDMA_QID2RID_NUM_PER_REG      4
++
++int rx_queues[] = {0, 8, 16, 24};
++
++static u32 edma_rx_ring_queue_map[][EDMA_MAX_CORE] = {{ 0, 8, 16, 24 },
++                                              { 1, 9, 17, 25 },
++                                              { 2, 10, 18, 26 },
++                                              { 3, 11, 19, 27 },
++                                              { 4, 12, 20, 28 },
++                                              { 5, 13, 21, 29 },
++                                              { 6, 14, 22, 30 },
++                                              { 7, 15, 23, 31 }};
++
++static int edma_cfg_rx_desc_rings_reset_queue_mapping(void)
++{
++      struct edma_hw_info *hw_info = edma_ctx->hw_info;
++      struct edma_ring_info *rx = hw_info->rx;
++      u32 i, ret;
++
++      for (i = 0; i < rx->num_rings; i++) {
++              struct edma_rxdesc_ring *rxdesc_ring;
++
++              rxdesc_ring = &edma_ctx->rx_rings[i];
++
++              ret = ppe_edma_ring_to_queues_config(edma_ctx->ppe_dev, rxdesc_ring->ring_id,
++                                                   ARRAY_SIZE(rx_queues), rx_queues);
++              if (ret) {
++                      pr_err("Error in unmapping rxdesc ring %d to PPE queue mapping to disable its backpressure configuration\n",
++                             i);
++                      return ret;
++              }
++      }
++
++      return 0;
++}
++
++static int edma_cfg_rx_desc_ring_reset_queue_priority(u32 rxdesc_ring_idx)
++{
++      u32 i, queue_id, ret;
++
++      for (i = 0; i < EDMA_MAX_PRI_PER_CORE; i++) {
++              queue_id = edma_rx_ring_queue_map[i][rxdesc_ring_idx];
++
++              ret = ppe_queue_priority_set(edma_ctx->ppe_dev, queue_id, i);
++              if (ret) {
++                      pr_err("Error in resetting %u queue's priority\n",
++                             queue_id);
++                      return ret;
++              }
++      }
++
++      return 0;
++}
++
++static int edma_cfg_rx_desc_ring_reset_queue_config(void)
++{
++      struct edma_hw_info *hw_info = edma_ctx->hw_info;
++      struct edma_ring_info *rx = hw_info->rx;
++      u32 i, ret;
++
++      if (unlikely(rx->num_rings > num_possible_cpus())) {
++              pr_err("Invalid count of rxdesc rings: %d\n",
++                     rx->num_rings);
++              return -EINVAL;
++      }
++
++      /* Unmap Rxdesc ring to PPE queue mapping */
++      ret = edma_cfg_rx_desc_rings_reset_queue_mapping();
++      if (ret)        {
++              pr_err("Error in resetting Rx desc ring backpressure config\n");
++              return ret;
++      }
++
++      /* Reset the priority for PPE queues mapped to Rx rings */
++      for (i = 0; i < rx->num_rings; i++) {
++              ret =  edma_cfg_rx_desc_ring_reset_queue_priority(i);
++              if (ret)        {
++                      pr_err("Error in resetting ring:%d queue's priority\n",
++                             i + rx->ring_start);
++                      return ret;
++              }
++      }
++
++      return 0;
++}
++
++static int edma_cfg_rx_desc_ring_to_queue_mapping(void)
++{
++      struct edma_hw_info *hw_info = edma_ctx->hw_info;
++      struct edma_ring_info *rx = hw_info->rx;
++      u32 i;
++      int ret;
++
++      /* Rxdesc ring to PPE queue mapping */
++      for (i = 0; i < rx->num_rings; i++) {
++              struct edma_rxdesc_ring *rxdesc_ring;
++
++              rxdesc_ring = &edma_ctx->rx_rings[i];
++
++              ret = ppe_edma_ring_to_queues_config(edma_ctx->ppe_dev,
++                                                   rxdesc_ring->ring_id,
++                                              ARRAY_SIZE(rx_queues), rx_queues);
++              if (ret) {
++                      pr_err("Error in configuring Rx ring to PPE queue mapping, ret: %d, id: %d\n",
++                             ret, rxdesc_ring->ring_id);
++                      if (!edma_cfg_rx_desc_rings_reset_queue_mapping())
++                              pr_err("Error in resetting Rx desc ringbackpressure configurations\n");
++
++                      return ret;
++              }
++
++              pr_debug("Rx desc ring %d to PPE queue mapping for backpressure:\n",
++                       rxdesc_ring->ring_id);
++      }
++
++      return 0;
++}
++
++static void edma_cfg_rx_desc_ring_configure(struct edma_rxdesc_ring *rxdesc_ring)
++{
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct regmap *regmap = ppe_dev->regmap;
++      u32 data, reg;
++
++      reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_BA(rxdesc_ring->ring_id);
++      regmap_write(regmap, reg, (u32)(rxdesc_ring->pdma & EDMA_RXDESC_BA_MASK));
++
++      reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_PREHEADER_BA(rxdesc_ring->ring_id);
++      regmap_write(regmap, reg, (u32)(rxdesc_ring->sdma & EDMA_RXDESC_PREHEADER_BA_MASK));
++
++      data = rxdesc_ring->count & EDMA_RXDESC_RING_SIZE_MASK;
++      data |= (EDMA_RXDESC_PL_DEFAULT_VALUE & EDMA_RXDESC_PL_OFFSET_MASK)
++               << EDMA_RXDESC_PL_OFFSET_SHIFT;
++      reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_RING_SIZE(rxdesc_ring->ring_id);
++      regmap_write(regmap, reg, data);
++
++      /* Configure the Mitigation timer */
++      data = EDMA_MICROSEC_TO_TIMER_UNIT(EDMA_RX_MITIGATION_TIMER_DEF,
++                                         ppe_dev->clk_rate / MHZ);
++      data = ((data & EDMA_RX_MOD_TIMER_INIT_MASK)
++                      << EDMA_RX_MOD_TIMER_INIT_SHIFT);
++      pr_debug("EDMA Rx mitigation timer value: %d\n", data);
++      reg = EDMA_BASE_OFFSET + EDMA_REG_RX_MOD_TIMER(rxdesc_ring->ring_id);
++      regmap_write(regmap, reg, data);
++
++      /* Configure the Mitigation packet count */
++      data = (EDMA_RX_MITIGATION_PKT_CNT_DEF & EDMA_RXDESC_LOW_THRE_MASK)
++                      << EDMA_RXDESC_LOW_THRE_SHIFT;
++      pr_debug("EDMA Rx mitigation packet count value: %d\n", data);
++      reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_UGT_THRE(rxdesc_ring->ring_id);
++      regmap_write(regmap, reg, data);
++
++      /* Enable ring. Set ret mode to 'opaque'. */
++      reg = EDMA_BASE_OFFSET + EDMA_REG_RX_INT_CTRL(rxdesc_ring->ring_id);
++      regmap_write(regmap, reg, EDMA_RX_NE_INT_EN);
++}
++
++static void edma_cfg_rx_qid_to_rx_desc_ring_mapping(void)
++{
++      u32 desc_index, ring_index, reg_index, data, q_id;
++      struct edma_hw_info *hw_info = edma_ctx->hw_info;
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct regmap *regmap = ppe_dev->regmap;
++      struct edma_ring_info *rx = hw_info->rx;
++      u32 mcast_start, mcast_end, reg;
++      int ret;
++
++      desc_index = (rx->ring_start & EDMA_RX_RING_ID_MASK);
++
++      /* Here map all the queues to ring. */
++      for (q_id = EDMA_RX_QUEUE_START;
++              q_id <= EDMA_CPU_PORT_QUEUE_MAX(EDMA_RX_QUEUE_START);
++                      q_id += EDMA_QID2RID_NUM_PER_REG) {
++              reg_index = q_id / EDMA_QID2RID_NUM_PER_REG;
++              ring_index = desc_index + EDMA_QUEUE_OFFSET(q_id);
++
++              data = FIELD_PREP(EDMA_RX_RING_ID_QUEUE0_MASK, ring_index);
++              data |= FIELD_PREP(EDMA_RX_RING_ID_QUEUE1_MASK, ring_index);
++              data |= FIELD_PREP(EDMA_RX_RING_ID_QUEUE2_MASK, ring_index);
++              data |= FIELD_PREP(EDMA_RX_RING_ID_QUEUE3_MASK, ring_index);
++
++              reg = EDMA_BASE_OFFSET + EDMA_QID2RID_TABLE_MEM(reg_index);
++              regmap_write(regmap, reg, data);
++              pr_debug("Configure QID2RID: %d reg:0x%x to 0x%x, desc_index: %d, reg_index: %d\n",
++                       q_id, EDMA_QID2RID_TABLE_MEM(reg_index), data, desc_index, reg_index);
++      }
++
++      ret = ppe_edma_queue_resource_get(edma_ctx->ppe_dev, PPE_RES_MCAST,
++                                        &mcast_start, &mcast_end);
++      if (ret < 0) {
++              pr_err("Error in extracting multicast queue values\n");
++              return;
++      }
++
++      /* Map multicast queues to the first Rx ring. */
++      desc_index = (rx->ring_start & EDMA_RX_RING_ID_MASK);
++      for (q_id = mcast_start; q_id <= mcast_end;
++                      q_id += EDMA_QID2RID_NUM_PER_REG) {
++              reg_index = q_id / EDMA_QID2RID_NUM_PER_REG;
++
++              data = FIELD_PREP(EDMA_RX_RING_ID_QUEUE0_MASK, desc_index);
++              data |= FIELD_PREP(EDMA_RX_RING_ID_QUEUE1_MASK, desc_index);
++              data |= FIELD_PREP(EDMA_RX_RING_ID_QUEUE2_MASK, desc_index);
++              data |= FIELD_PREP(EDMA_RX_RING_ID_QUEUE3_MASK, desc_index);
++
++              reg = EDMA_BASE_OFFSET + EDMA_QID2RID_TABLE_MEM(reg_index);
++              regmap_write(regmap, reg, data);
++
++              pr_debug("Configure QID2RID: %d reg:0x%x to 0x%x\n",
++                       q_id, EDMA_QID2RID_TABLE_MEM(reg_index), data);
++      }
++}
++
++static void edma_cfg_rx_rings_to_rx_fill_mapping(void)
++{
++      struct edma_hw_info *hw_info = edma_ctx->hw_info;
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct regmap *regmap = ppe_dev->regmap;
++      struct edma_ring_info *rx = hw_info->rx;
++      u32 i, data, reg;
++
++      regmap_write(regmap, EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_0_ADDR, 0);
++      regmap_write(regmap, EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_1_ADDR, 0);
++      regmap_write(regmap, EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_2_ADDR, 0);
++
++      for (i = 0; i < rx->num_rings; i++) {
++              struct edma_rxdesc_ring *rxdesc_ring = &edma_ctx->rx_rings[i];
++              u32 data, reg, ring_id;
++
++              ring_id = rxdesc_ring->ring_id;
++              if (ring_id >= 0 && ring_id <= 9)
++                      reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_0_ADDR;
++              else if (ring_id >= 10 && ring_id <= 19)
++                      reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_1_ADDR;
++              else
++                      reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_2_ADDR;
++
++              pr_debug("Configure RXDESC:%u to use RXFILL:%u\n",
++                       ring_id,
++                       rxdesc_ring->rxfill->ring_id);
++
++               /* Set the Rx fill ring number in the mapping register. */
++              regmap_read(regmap, reg, &data);
++              data |= (rxdesc_ring->rxfill->ring_id &
++                       EDMA_RXDESC2FILL_MAP_RXDESC_MASK) <<
++                       ((ring_id % 10) * 3);
++              regmap_write(regmap, reg, data);
++      }
++
++      reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_0_ADDR;
++      regmap_read(regmap, reg, &data);
++      pr_debug("EDMA_REG_RXDESC2FILL_MAP_0_ADDR: 0x%x\n", data);
++
++      reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_1_ADDR;
++      regmap_read(regmap, reg, &data);
++      pr_debug("EDMA_REG_RXDESC2FILL_MAP_1_ADDR: 0x%x\n", data);
++
++      reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_2_ADDR;
++      regmap_read(regmap, reg, &data);
++      pr_debug("EDMA_REG_RXDESC2FILL_MAP_2_ADDR: 0x%x\n", data);
++}
++
++/**
++ * edma_cfg_rx_rings_enable - Enable Rx and Rxfill rings
++ *
++ * Enable Rx and Rxfill rings.
++ */
++void edma_cfg_rx_rings_enable(void)
++{
++      struct edma_hw_info *hw_info = edma_ctx->hw_info;
++      struct edma_ring_info *rxfill = hw_info->rxfill;
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct regmap *regmap = ppe_dev->regmap;
++      struct edma_ring_info *rx = hw_info->rx;
++      u32 i, reg;
++
++      /* Enable Rx rings */
++      for (i = rx->ring_start; i < rx->ring_start + rx->num_rings; i++) {
++              u32 data;
++
++              reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_CTRL(i);
++              regmap_read(regmap, reg, &data);
++              data |= EDMA_RXDESC_RX_EN;
++              regmap_write(regmap, reg, data);
++      }
++
++      for (i = rxfill->ring_start; i < rxfill->ring_start + rxfill->num_rings; i++) {
++              u32 data;
++
++              reg = EDMA_BASE_OFFSET + EDMA_REG_RXFILL_RING_EN(i);
++              regmap_read(regmap, reg, &data);
++              data |= EDMA_RXFILL_RING_EN;
++              regmap_write(regmap, reg, data);
++      }
++}
++
++/**
++ * edma_cfg_rx_rings_disable - Disable Rx and Rxfill rings
++ *
++ * Disable Rx and Rxfill rings.
++ */
++void edma_cfg_rx_rings_disable(void)
++{
++      struct edma_hw_info *hw_info = edma_ctx->hw_info;
++      struct edma_ring_info *rxfill = hw_info->rxfill;
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct regmap *regmap = ppe_dev->regmap;
++      struct edma_ring_info *rx = hw_info->rx;
++      u32 i, reg;
++
++      /* Disable Rx rings */
++      for (i = 0; i < rx->num_rings; i++) {
++              struct edma_rxdesc_ring *rxdesc_ring = NULL;
++              u32 data;
++
++              rxdesc_ring = &edma_ctx->rx_rings[i];
++              reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_CTRL(rxdesc_ring->ring_id);
++              regmap_read(regmap, reg, &data);
++              data &= ~EDMA_RXDESC_RX_EN;
++              regmap_write(regmap, reg, data);
++      }
++
++      /* Disable RxFill Rings */
++      for (i = 0; i < rxfill->num_rings; i++) {
++              struct edma_rxfill_ring *rxfill_ring = NULL;
++              u32 data;
++
++              rxfill_ring = &edma_ctx->rxfill_rings[i];
++              reg = EDMA_BASE_OFFSET + EDMA_REG_RXFILL_RING_EN(rxfill_ring->ring_id);
++              regmap_read(regmap, reg, &data);
++              data &= ~EDMA_RXFILL_RING_EN;
++              regmap_write(regmap, reg, data);
++      }
++}
++
++/**
++ * edma_cfg_rx_mappings - Setup RX ring mapping
++ *
++ * Setup queue ID to Rx desc ring mapping.
++ */
++void edma_cfg_rx_ring_mappings(void)
++{
++      edma_cfg_rx_qid_to_rx_desc_ring_mapping();
++      edma_cfg_rx_rings_to_rx_fill_mapping();
++}
++
++static void edma_cfg_rx_fill_ring_cleanup(struct edma_rxfill_ring *rxfill_ring)
++{
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct regmap *regmap = ppe_dev->regmap;
++      struct device *dev = ppe_dev->dev;
++      u16 cons_idx, curr_idx;
++      u32 data, reg;
++
++      /* Get RxFill ring producer index */
++      curr_idx = rxfill_ring->prod_idx & EDMA_RXFILL_PROD_IDX_MASK;
++
++      /* Get RxFill ring consumer index */
++      reg = EDMA_BASE_OFFSET + EDMA_REG_RXFILL_CONS_IDX(rxfill_ring->ring_id);
++      regmap_read(regmap, reg, &data);
++      cons_idx = data & EDMA_RXFILL_CONS_IDX_MASK;
++
++      while (curr_idx != cons_idx) {
++              struct edma_rxfill_desc *rxfill_desc;
++              struct sk_buff *skb;
++
++              /* Get RxFill descriptor */
++              rxfill_desc = EDMA_RXFILL_DESC(rxfill_ring, cons_idx);
++
++              cons_idx = (cons_idx + 1) & EDMA_RX_RING_SIZE_MASK;
++
++              /* Get skb from opaque */
++              skb = (struct sk_buff *)EDMA_RXFILL_OPAQUE_GET(rxfill_desc);
++              if (unlikely(!skb)) {
++                      pr_err("Empty skb reference at index:%d\n",
++                             cons_idx);
++                      continue;
++              }
++
++              dev_kfree_skb_any(skb);
++      }
++
++      /* Free RxFill ring descriptors */
++      dma_free_coherent(dev, (sizeof(struct edma_rxfill_desc)
++                         * rxfill_ring->count),
++                         rxfill_ring->desc, rxfill_ring->dma);
++      rxfill_ring->desc = NULL;
++      rxfill_ring->dma = (dma_addr_t)0;
++}
++
++static int edma_cfg_rx_fill_ring_dma_alloc(struct edma_rxfill_ring *rxfill_ring)
++{
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct device *dev = ppe_dev->dev;
++
++      /* Allocate RxFill ring descriptors */
++      rxfill_ring->desc = dma_alloc_coherent(dev, (sizeof(struct edma_rxfill_desc)
++                                             * rxfill_ring->count),
++                                             &rxfill_ring->dma,
++                                             GFP_KERNEL | __GFP_ZERO);
++      if (unlikely(!rxfill_ring->desc))
++              return -ENOMEM;
++
++      return 0;
++}
++
++static int edma_cfg_rx_desc_ring_dma_alloc(struct edma_rxdesc_ring *rxdesc_ring)
++{
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct device *dev = ppe_dev->dev;
++
++      rxdesc_ring->pdesc = dma_alloc_coherent(dev, (sizeof(struct edma_rxdesc_pri)
++                                              * rxdesc_ring->count),
++                              &rxdesc_ring->pdma, GFP_KERNEL | __GFP_ZERO);
++      if (unlikely(!rxdesc_ring->pdesc))
++              return -ENOMEM;
++
++      rxdesc_ring->sdesc = dma_alloc_coherent(dev, (sizeof(struct edma_rxdesc_sec)
++                              * rxdesc_ring->count),
++                              &rxdesc_ring->sdma, GFP_KERNEL | __GFP_ZERO);
++      if (unlikely(!rxdesc_ring->sdesc)) {
++              dma_free_coherent(dev, (sizeof(struct edma_rxdesc_pri)
++                                * rxdesc_ring->count),
++                                rxdesc_ring->pdesc,
++                                rxdesc_ring->pdma);
++              rxdesc_ring->pdesc = NULL;
++              rxdesc_ring->pdma = (dma_addr_t)0;
++              return -ENOMEM;
++      }
++
++      return 0;
++}
++
++static void edma_cfg_rx_desc_ring_cleanup(struct edma_rxdesc_ring *rxdesc_ring)
++{
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct regmap *regmap = ppe_dev->regmap;
++      struct device *dev = ppe_dev->dev;
++      u32 prod_idx, cons_idx, reg;
++
++      /* Get Rxdesc consumer & producer indices */
++      cons_idx = rxdesc_ring->cons_idx & EDMA_RXDESC_CONS_IDX_MASK;
++
++      reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_PROD_IDX(rxdesc_ring->ring_id);
++      regmap_read(regmap, reg, &prod_idx);
++      prod_idx = prod_idx & EDMA_RXDESC_PROD_IDX_MASK;
++
++      /* Free any buffers assigned to any descriptors */
++      while (cons_idx != prod_idx) {
++              struct edma_rxdesc_pri *rxdesc_pri =
++                      EDMA_RXDESC_PRI_DESC(rxdesc_ring, cons_idx);
++              struct sk_buff *skb;
++
++              /* Update consumer index */
++              cons_idx = (cons_idx + 1) & EDMA_RX_RING_SIZE_MASK;
++
++              /* Get opaque from Rxdesc */
++              skb = (struct sk_buff *)EDMA_RXDESC_OPAQUE_GET(rxdesc_pri);
++              if (unlikely(!skb)) {
++                      pr_warn("Empty skb reference at index:%d\n",
++                              cons_idx);
++                      continue;
++              }
++
++              dev_kfree_skb_any(skb);
++      }
++
++      /* Update the consumer index */
++      reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_CONS_IDX(rxdesc_ring->ring_id);
++      regmap_write(regmap, reg, cons_idx);
++
++      /* Free Rxdesc ring descriptor */
++      dma_free_coherent(dev, (sizeof(struct edma_rxdesc_pri)
++                        * rxdesc_ring->count), rxdesc_ring->pdesc,
++                        rxdesc_ring->pdma);
++      rxdesc_ring->pdesc = NULL;
++      rxdesc_ring->pdma = (dma_addr_t)0;
++
++      /* Free any buffers assigned to any secondary ring descriptors */
++      dma_free_coherent(dev, (sizeof(struct edma_rxdesc_sec)
++                        * rxdesc_ring->count), rxdesc_ring->sdesc,
++                        rxdesc_ring->sdma);
++      rxdesc_ring->sdesc = NULL;
++      rxdesc_ring->sdma = (dma_addr_t)0;
++}
++
++static int edma_cfg_rx_rings_setup(void)
++{
++      struct edma_hw_info *hw_info = edma_ctx->hw_info;
++      struct edma_ring_info *rxfill = hw_info->rxfill;
++      struct edma_ring_info *rx = hw_info->rx;
++      u32 ring_idx, alloc_size, buf_len;
++
++      /* Set buffer allocation size */
++      if (edma_ctx->rx_buf_size) {
++              alloc_size = edma_ctx->rx_buf_size +
++                              EDMA_RX_SKB_HEADROOM + NET_IP_ALIGN;
++              buf_len = alloc_size - EDMA_RX_SKB_HEADROOM - NET_IP_ALIGN;
++      } else if (edma_ctx->rx_page_mode) {
++              alloc_size = EDMA_RX_PAGE_MODE_SKB_SIZE +
++                              EDMA_RX_SKB_HEADROOM + NET_IP_ALIGN;
++              buf_len = PAGE_SIZE;
++      } else {
++              alloc_size = EDMA_RX_BUFFER_SIZE;
++              buf_len = alloc_size - EDMA_RX_SKB_HEADROOM - NET_IP_ALIGN;
++      }
++
++      pr_debug("EDMA ctx:%p rx_ring alloc_size=%d, buf_len=%d\n",
++               edma_ctx, alloc_size, buf_len);
++
++      /* Allocate Rx fill ring descriptors */
++      for (ring_idx = 0; ring_idx < rxfill->num_rings; ring_idx++) {
++              u32 ret;
++              struct edma_rxfill_ring *rxfill_ring = NULL;
++
++              rxfill_ring = &edma_ctx->rxfill_rings[ring_idx];
++              rxfill_ring->count = EDMA_RX_RING_SIZE;
++              rxfill_ring->ring_id = rxfill->ring_start + ring_idx;
++              rxfill_ring->alloc_size = alloc_size;
++              rxfill_ring->buf_len = buf_len;
++              rxfill_ring->page_mode = edma_ctx->rx_page_mode;
++
++              ret = edma_cfg_rx_fill_ring_dma_alloc(rxfill_ring);
++              if (ret) {
++                      pr_err("Error in setting up %d rxfill ring. ret: %d",
++                             rxfill_ring->ring_id, ret);
++                      while (--ring_idx >= 0)
++                              edma_cfg_rx_fill_ring_cleanup(&edma_ctx->rxfill_rings[ring_idx]);
++
++                      return -ENOMEM;
++              }
++      }
++
++      /* Allocate RxDesc ring descriptors */
++      for (ring_idx = 0; ring_idx < rx->num_rings; ring_idx++) {
++              u32 index, queue_id = EDMA_RX_QUEUE_START;
++              struct edma_rxdesc_ring *rxdesc_ring = NULL;
++              u32 ret;
++
++              rxdesc_ring = &edma_ctx->rx_rings[ring_idx];
++              rxdesc_ring->count = EDMA_RX_RING_SIZE;
++              rxdesc_ring->ring_id = rx->ring_start + ring_idx;
++
++              if (queue_id > EDMA_CPU_PORT_QUEUE_MAX(EDMA_RX_QUEUE_START)) {
++                      pr_err("Invalid queue_id: %d\n", queue_id);
++                      while (--ring_idx >= 0)
++                              edma_cfg_rx_desc_ring_cleanup(&edma_ctx->rx_rings[ring_idx]);
++
++                      goto rxdesc_mem_alloc_fail;
++              }
++
++              /* Create a mapping between RX Desc ring and Rx fill ring.
++               * Number of fill rings are lesser than the descriptor rings
++               * Share the fill rings across descriptor rings.
++               */
++              index = rxfill->ring_start +
++                              (ring_idx % rxfill->num_rings);
++              rxdesc_ring->rxfill = &edma_ctx->rxfill_rings[index
++                                              - rxfill->ring_start];
++
++              ret = edma_cfg_rx_desc_ring_dma_alloc(rxdesc_ring);
++              if (ret) {
++                      pr_err("Error in setting up %d rxdesc ring. ret: %d",
++                             rxdesc_ring->ring_id, ret);
++                      while (--ring_idx >= 0)
++                              edma_cfg_rx_desc_ring_cleanup(&edma_ctx->rx_rings[ring_idx]);
++
++                      goto rxdesc_mem_alloc_fail;
++              }
++      }
++
++      pr_debug("Rx descriptor count for Rx desc and Rx fill rings : %d\n",
++               EDMA_RX_RING_SIZE);
++
++      return 0;
++
++rxdesc_mem_alloc_fail:
++      for (ring_idx = 0; ring_idx < rxfill->num_rings; ring_idx++)
++              edma_cfg_rx_fill_ring_cleanup(&edma_ctx->rxfill_rings[ring_idx]);
++
++      return -ENOMEM;
++}
++
++/**
++ * edma_cfg_rx_buff_size_setup - Configure EDMA Rx jumbo buffer
++ *
++ * Configure EDMA Rx jumbo buffer
++ */
++void edma_cfg_rx_buff_size_setup(void)
++{
++      if (edma_ctx->rx_buf_size) {
++              edma_ctx->rx_page_mode = false;
++              pr_debug("Rx Jumbo mru is enabled: %d\n", edma_ctx->rx_buf_size);
++      }
++}
++
++/**
++ * edma_cfg_rx_rings_alloc - Allocate EDMA Rx rings
++ *
++ * Allocate EDMA Rx rings.
++ *
++ * Return 0 on success, negative error code on failure.
++ */
++int edma_cfg_rx_rings_alloc(void)
++{
++      struct edma_hw_info *hw_info = edma_ctx->hw_info;
++      struct edma_ring_info *rxfill = hw_info->rxfill;
++      struct edma_ring_info *rx = hw_info->rx;
++      int ret;
++
++      edma_ctx->rxfill_rings = kzalloc((sizeof(*edma_ctx->rxfill_rings) *
++                                       rxfill->num_rings),
++                                       GFP_KERNEL);
++      if (!edma_ctx->rxfill_rings)
++              return -ENOMEM;
++
++      edma_ctx->rx_rings = kzalloc((sizeof(*edma_ctx->rx_rings) *
++                                       rx->num_rings),
++                                       GFP_KERNEL);
++      if (!edma_ctx->rx_rings)
++              goto rxdesc_ring_alloc_fail;
++
++      pr_debug("RxDesc:%u rx (%u-%u) RxFill:%u (%u-%u)\n",
++               rx->num_rings, rx->ring_start,
++               (rx->ring_start + rx->num_rings - 1),
++               rxfill->num_rings, rxfill->ring_start,
++               (rxfill->ring_start + rxfill->num_rings - 1));
++
++      if (edma_cfg_rx_rings_setup()) {
++              pr_err("Error in setting up Rx rings\n");
++              goto rx_rings_setup_fail;
++      }
++
++      /* Reset Rx descriptor ring mapped queue's configurations */
++      ret = edma_cfg_rx_desc_ring_reset_queue_config();
++      if (ret) {
++              pr_err("Error in resetting the Rx descriptor rings configurations\n");
++              edma_cfg_rx_rings_cleanup();
++              return ret;
++      }
++
++      return 0;
++
++rx_rings_setup_fail:
++      kfree(edma_ctx->rx_rings);
++      edma_ctx->rx_rings = NULL;
++rxdesc_ring_alloc_fail:
++      kfree(edma_ctx->rxfill_rings);
++      edma_ctx->rxfill_rings = NULL;
++
++      return -ENOMEM;
++}
++
++/**
++ * edma_cfg_rx_rings_cleanup - Cleanup EDMA Rx rings
++ *
++ * Cleanup EDMA Rx rings
++ */
++void edma_cfg_rx_rings_cleanup(void)
++{
++      struct edma_hw_info *hw_info = edma_ctx->hw_info;
++      struct edma_ring_info *rxfill = hw_info->rxfill;
++      struct edma_ring_info *rx = hw_info->rx;
++      u32 i;
++
++      /* Free RxFill ring descriptors */
++      for (i = 0; i < rxfill->num_rings; i++)
++              edma_cfg_rx_fill_ring_cleanup(&edma_ctx->rxfill_rings[i]);
++
++      /* Free Rx completion ring descriptors */
++      for (i = 0; i < rx->num_rings; i++)
++              edma_cfg_rx_desc_ring_cleanup(&edma_ctx->rx_rings[i]);
++
++      kfree(edma_ctx->rxfill_rings);
++      kfree(edma_ctx->rx_rings);
++      edma_ctx->rxfill_rings = NULL;
++      edma_ctx->rx_rings = NULL;
++}
++
++static void edma_cfg_rx_fill_ring_configure(struct edma_rxfill_ring *rxfill_ring)
++{
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct regmap *regmap = ppe_dev->regmap;
++      u32 ring_sz, reg;
++
++      reg = EDMA_BASE_OFFSET + EDMA_REG_RXFILL_BA(rxfill_ring->ring_id);
++      regmap_write(regmap, reg, (u32)(rxfill_ring->dma & EDMA_RING_DMA_MASK));
++
++      ring_sz = rxfill_ring->count & EDMA_RXFILL_RING_SIZE_MASK;
++      reg = EDMA_BASE_OFFSET + EDMA_REG_RXFILL_RING_SIZE(rxfill_ring->ring_id);
++      regmap_write(regmap, reg, ring_sz);
++
++      edma_rx_alloc_buffer(rxfill_ring, rxfill_ring->count - 1);
++}
++
++static void edma_cfg_rx_desc_ring_flow_control(u32 threshold_xoff, u32 threshold_xon)
++{
++      struct edma_hw_info *hw_info = edma_ctx->hw_info;
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct regmap *regmap = ppe_dev->regmap;
++      struct edma_ring_info *rx = hw_info->rx;
++      u32 data, i, reg;
++
++      data = (threshold_xoff & EDMA_RXDESC_FC_XOFF_THRE_MASK) << EDMA_RXDESC_FC_XOFF_THRE_SHIFT;
++      data |= ((threshold_xon & EDMA_RXDESC_FC_XON_THRE_MASK) << EDMA_RXDESC_FC_XON_THRE_SHIFT);
++
++      for (i = 0; i < rx->num_rings; i++) {
++              struct edma_rxdesc_ring *rxdesc_ring;
++
++              rxdesc_ring = &edma_ctx->rx_rings[i];
++              reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_FC_THRE(rxdesc_ring->ring_id);
++              regmap_write(regmap, reg, data);
++      }
++}
++
++static void edma_cfg_rx_fill_ring_flow_control(int threshold_xoff, int threshold_xon)
++{
++      struct edma_hw_info *hw_info = edma_ctx->hw_info;
++      struct edma_ring_info *rxfill = hw_info->rxfill;
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct regmap *regmap = ppe_dev->regmap;
++      u32 data, i, reg;
++
++      data = (threshold_xoff & EDMA_RXFILL_FC_XOFF_THRE_MASK) << EDMA_RXFILL_FC_XOFF_THRE_SHIFT;
++      data |= ((threshold_xon & EDMA_RXFILL_FC_XON_THRE_MASK) << EDMA_RXFILL_FC_XON_THRE_SHIFT);
++
++      for (i = 0; i < rxfill->num_rings; i++) {
++              struct edma_rxfill_ring *rxfill_ring;
++
++              rxfill_ring = &edma_ctx->rxfill_rings[i];
++              reg = EDMA_BASE_OFFSET + EDMA_REG_RXFILL_FC_THRE(rxfill_ring->ring_id);
++              regmap_write(regmap, reg, data);
++      }
++}
++
++/**
++ * edma_cfg_rx_rings - Configure EDMA Rx rings.
++ *
++ * Configure EDMA Rx rings.
++ */
++int edma_cfg_rx_rings(void)
++{
++      struct edma_hw_info *hw_info = edma_ctx->hw_info;
++      struct edma_ring_info *rxfill = hw_info->rxfill;
++      struct edma_ring_info *rx = hw_info->rx;
++      u32 i;
++
++      for (i = 0; i < rxfill->num_rings; i++)
++              edma_cfg_rx_fill_ring_configure(&edma_ctx->rxfill_rings[i]);
++
++      for (i = 0; i < rx->num_rings; i++)
++              edma_cfg_rx_desc_ring_configure(&edma_ctx->rx_rings[i]);
++
++      /* Configure Rx flow control configurations */
++      edma_cfg_rx_desc_ring_flow_control(EDMA_RX_FC_XOFF_DEF, EDMA_RX_FC_XON_DEF);
++      edma_cfg_rx_fill_ring_flow_control(EDMA_RX_FC_XOFF_DEF, EDMA_RX_FC_XON_DEF);
++
++      return edma_cfg_rx_desc_ring_to_queue_mapping();
++}
++
++/**
++ * edma_cfg_rx_disable_interrupts - EDMA disable RX interrupts
++ *
++ * Disable RX interrupt masks
++ */
++void edma_cfg_rx_disable_interrupts(void)
++{
++      struct edma_hw_info *hw_info = edma_ctx->hw_info;
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct regmap *regmap = ppe_dev->regmap;
++      struct edma_ring_info *rx = hw_info->rx;
++      u32 i, reg;
++
++      for (i = 0; i < rx->num_rings; i++) {
++              struct edma_rxdesc_ring *rxdesc_ring =
++                              &edma_ctx->rx_rings[i];
++              reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->ring_id);
++              regmap_write(regmap, reg, EDMA_MASK_INT_CLEAR);
++      }
++}
++
++/**
++ * edma_cfg_rx_enable_interrupts - EDMA enable RX interrupts
++ *
++ * Enable RX interrupt masks
++ */
++void edma_cfg_rx_enable_interrupts(void)
++{
++      struct edma_hw_info *hw_info = edma_ctx->hw_info;
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct regmap *regmap = ppe_dev->regmap;
++      struct edma_ring_info *rx = hw_info->rx;
++      u32 i, reg;
++
++      for (i = 0; i < rx->num_rings; i++) {
++              struct edma_rxdesc_ring *rxdesc_ring =
++                              &edma_ctx->rx_rings[i];
++              reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->ring_id);
++              regmap_write(regmap, reg, edma_ctx->intr_info.intr_mask_rx);
++      }
++}
++
++/**
++ * edma_cfg_rx_napi_disable - Disable NAPI for Rx
++ *
++ * Disable NAPI for Rx
++ */
++void edma_cfg_rx_napi_disable(void)
++{
++      struct edma_hw_info *hw_info = edma_ctx->hw_info;
++      struct edma_ring_info *rx = hw_info->rx;
++      u32 i;
++
++      for (i = 0; i < rx->num_rings; i++) {
++              struct edma_rxdesc_ring *rxdesc_ring;
++
++              rxdesc_ring = &edma_ctx->rx_rings[i];
++
++              if (!rxdesc_ring->napi_added)
++                      continue;
++
++              napi_disable(&rxdesc_ring->napi);
++      }
++}
++
++/**
++ * edma_cfg_rx_napi_enable - Enable NAPI for Rx
++ *
++ * Enable NAPI for Rx
++ */
++void edma_cfg_rx_napi_enable(void)
++{
++      struct edma_hw_info *hw_info = edma_ctx->hw_info;
++      struct edma_ring_info *rx = hw_info->rx;
++      u32 i;
++
++      for (i = 0; i < rx->num_rings; i++) {
++              struct edma_rxdesc_ring *rxdesc_ring;
++
++              rxdesc_ring = &edma_ctx->rx_rings[i];
++
++              if (!rxdesc_ring->napi_added)
++                      continue;
++
++              napi_enable(&rxdesc_ring->napi);
++      }
++}
++
++/**
++ * edma_cfg_rx_napi_delete - Delete Rx NAPI
++ *
++ * Delete RX NAPI
++ */
++void edma_cfg_rx_napi_delete(void)
++{
++      struct edma_hw_info *hw_info = edma_ctx->hw_info;
++      struct edma_ring_info *rx = hw_info->rx;
++      u32 i;
++
++      for (i = 0; i < rx->num_rings; i++) {
++              struct edma_rxdesc_ring *rxdesc_ring;
++
++              rxdesc_ring = &edma_ctx->rx_rings[i];
++
++              if (!rxdesc_ring->napi_added)
++                      continue;
++
++              netif_napi_del(&rxdesc_ring->napi);
++              rxdesc_ring->napi_added = false;
++      }
++}
++
++/* Add Rx NAPI */
++/**
++ * edma_cfg_rx_napi_add - Add Rx NAPI
++ * @netdev: Netdevice
++ *
++ * Add RX NAPI
++ */
++void edma_cfg_rx_napi_add(void)
++{
++      struct edma_hw_info *hw_info = edma_ctx->hw_info;
++      struct edma_ring_info *rx = hw_info->rx;
++      u32 i;
++
++      for (i = 0; i < rx->num_rings; i++) {
++              struct edma_rxdesc_ring *rxdesc_ring = &edma_ctx->rx_rings[i];
++
++              netif_napi_add_weight(edma_ctx->dummy_dev, &rxdesc_ring->napi,
++                                    edma_rx_napi_poll, hw_info->napi_budget_rx);
++              rxdesc_ring->napi_added = true;
++      }
++
++      netdev_dbg(edma_ctx->dummy_dev, "Rx NAPI budget: %d\n", hw_info->napi_budget_rx);
++}
++
++/**
++ * edma_cfg_rx_rps_hash_map - Configure rx rps hash map.
++ *
++ * Initialize and configure RPS hash map for queues
++ */
++int edma_cfg_rx_rps_hash_map(void)
++{
++      cpumask_t edma_rps_cpumask = {{EDMA_RX_DEFAULT_BITMAP}};
++      int map_len = 0, idx = 0, ret = 0;
++      u32 q_off = EDMA_RX_QUEUE_START;
++      u32 q_map[EDMA_MAX_CORE] = {0};
++      u32 hash, cpu;
++
++      /* Map all possible hash values to queues used by the EDMA Rx
++       * rings based on a bitmask, which represents the cores to be mapped.
++       * These queues are expected to be mapped to different Rx rings
++       * which are assigned to different cores using IRQ affinity configuration.
++       */
++      for_each_cpu(cpu, &edma_rps_cpumask) {
++              q_map[map_len] = q_off + (cpu * EDMA_MAX_PRI_PER_CORE);
++              map_len++;
++      }
++
++      for (hash = 0; hash < PPE_QUEUE_HASH_NUM; hash++) {
++              ret = ppe_edma_queue_offset_config(edma_ctx->ppe_dev,
++                                                 PPE_QUEUE_CLASS_HASH, hash, q_map[idx]);
++              if (ret)
++                      return ret;
++
++              pr_debug("profile_id: %u, hash: %u, q_off: %u\n",
++                       EDMA_CPU_PORT_PROFILE_ID, hash, q_map[idx]);
++              idx = (idx + 1) % map_len;
++      }
++
++      return 0;
++}
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.h b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.h
+new file mode 100644
+index 000000000000..3c84ef4ea85c
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.h
+@@ -0,0 +1,48 @@
++/* SPDX-License-Identifier: GPL-2.0-only
++ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++#ifndef __EDMA_CFG_RX__
++#define __EDMA_CFG_RX__
++
++/* SKB payload size used in page mode */
++#define EDMA_RX_PAGE_MODE_SKB_SIZE    256
++
++/* Rx flow control X-OFF default value */
++#define EDMA_RX_FC_XOFF_DEF   32
++
++/* Rx flow control X-ON default value */
++#define EDMA_RX_FC_XON_DEF    64
++
++/* Rx AC flow control original threshold */
++#define EDMA_RX_AC_FC_THRE_ORIG               0x190
++
++/* Rx AC flow control default threshold */
++#define EDMA_RX_AC_FC_THRES_DEF               0x104
++/* Rx mitigation timer's default value in microseconds */
++#define EDMA_RX_MITIGATION_TIMER_DEF  25
++
++/* Rx mitigation packet count's default value */
++#define EDMA_RX_MITIGATION_PKT_CNT_DEF        16
++
++/* Default bitmap of cores for RPS to ARM cores */
++#define EDMA_RX_DEFAULT_BITMAP        ((1 << EDMA_MAX_CORE) - 1)
++
++int edma_cfg_rx_rings(void);
++int edma_cfg_rx_rings_alloc(void);
++void edma_cfg_rx_ring_mappings(void);
++void edma_cfg_rx_rings_cleanup(void);
++void edma_cfg_rx_disable_interrupts(void);
++void edma_cfg_rx_enable_interrupts(void);
++void edma_cfg_rx_napi_disable(void);
++void edma_cfg_rx_napi_enable(void);
++void edma_cfg_rx_napi_delete(void);
++void edma_cfg_rx_napi_add(void);
++void edma_cfg_rx_mapping(void);
++void edma_cfg_rx_rings_enable(void);
++void edma_cfg_rx_rings_disable(void);
++void edma_cfg_rx_buff_size_setup(void);
++int edma_cfg_rx_rps_hash_map(void);
++int edma_cfg_rx_rps(struct ctl_table *table, int write,
++                  void *buffer, size_t *lenp, loff_t *ppos);
++#endif
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma_port.c b/drivers/net/ethernet/qualcomm/ppe/edma_port.c
+index 6292b83d746d..bbb5823408fd 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/edma_port.c
++++ b/drivers/net/ethernet/qualcomm/ppe/edma_port.c
+@@ -12,12 +12,39 @@
+ #include <linux/printk.h>
+ #include "edma.h"
++#include "edma_cfg_rx.h"
+ #include "edma_port.h"
+ #include "ppe_regs.h"
+ /* Number of netdev queues. */
+ #define EDMA_NETDEV_QUEUE_NUM 4
++static int edma_port_stats_alloc(struct net_device *netdev)
++{
++      struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
++
++      if (!port_priv)
++              return -EINVAL;
++
++      /* Allocate per-cpu stats memory. */
++      port_priv->pcpu_stats.rx_stats =
++              netdev_alloc_pcpu_stats(struct edma_port_rx_stats);
++      if (!port_priv->pcpu_stats.rx_stats) {
++              netdev_err(netdev, "Per-cpu EDMA Rx stats alloc failed for %s\n",
++                         netdev->name);
++              return -ENOMEM;
++      }
++
++      return 0;
++}
++
++static void edma_port_stats_free(struct net_device *netdev)
++{
++      struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
++
++      free_percpu(port_priv->pcpu_stats.rx_stats);
++}
++
+ static u16 __maybe_unused edma_port_select_queue(__maybe_unused struct net_device *netdev,
+                                                __maybe_unused struct sk_buff *skb,
+                               __maybe_unused struct net_device *sb_dev)
+@@ -172,6 +199,7 @@ void edma_port_destroy(struct ppe_port *port)
+       int port_id = port->port_id;
+       struct net_device *netdev = edma_ctx->netdev_arr[port_id - 1];
++      edma_port_stats_free(netdev);
+       unregister_netdev(netdev);
+       free_netdev(netdev);
+       ppe_port_phylink_destroy(port);
+@@ -232,6 +260,13 @@ int edma_port_setup(struct ppe_port *port)
+                           port_id, netdev->dev_addr);
+       }
++      /* Allocate memory for EDMA port statistics. */
++      ret = edma_port_stats_alloc(netdev);
++      if (ret) {
++              netdev_dbg(netdev, "EDMA port stats alloc failed\n");
++              goto stats_alloc_fail;
++      }
++
+       netdev_dbg(netdev, "Configuring the port %s(qcom-id:%d)\n",
+                  netdev->name, port_id);
+@@ -263,8 +298,10 @@ int edma_port_setup(struct ppe_port *port)
+ register_netdev_fail:
+       ppe_port_phylink_destroy(port);
+ port_phylink_setup_fail:
+-      free_netdev(netdev);
+       edma_ctx->netdev_arr[port_id - 1] = NULL;
++      edma_port_stats_free(netdev);
++stats_alloc_fail:
++      free_netdev(netdev);
+       return ret;
+ }
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma_port.h b/drivers/net/ethernet/qualcomm/ppe/edma_port.h
+index 0f2deb39556e..75f544a4f324 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/edma_port.h
++++ b/drivers/net/ethernet/qualcomm/ppe/edma_port.h
+@@ -14,15 +14,46 @@
+                                       | NETIF_F_TSO \
+                                       | NETIF_F_TSO6)
++/**
++ * struct edma_port_rx_stats - EDMA RX per CPU stats for the port.
++ * @rx_pkts: Number of Rx packets
++ * @rx_bytes: Number of Rx bytes
++ * @rx_drops: Number of Rx drops
++ * @rx_nr_frag_pkts: Number of Rx nr_frags packets
++ * @rx_fraglist_pkts: Number of Rx fraglist packets
++ * @rx_nr_frag_headroom_err: nr_frags headroom error packets
++ * @syncp: Synchronization pointer
++ */
++struct edma_port_rx_stats {
++      u64 rx_pkts;
++      u64 rx_bytes;
++      u64 rx_drops;
++      u64 rx_nr_frag_pkts;
++      u64 rx_fraglist_pkts;
++      u64 rx_nr_frag_headroom_err;
++      struct u64_stats_sync syncp;
++};
++
++/**
++ * struct edma_port_pcpu_stats - EDMA per cpu stats data structure for the port.
++ * @rx_stats: Per CPU Rx statistics
++ */
++struct edma_port_pcpu_stats {
++      struct edma_port_rx_stats __percpu *rx_stats;
++};
++
+ /**
+  * struct edma_port_priv - EDMA port priv structure.
+  * @ppe_port: Pointer to PPE port
+  * @netdev: Corresponding netdevice
++ * @pcpu_stats: Per CPU netdev statistics
++ * @txr_map: Tx ring per-core mapping
+  * @flags: Feature flags
+  */
+ struct edma_port_priv {
+       struct ppe_port *ppe_port;
+       struct net_device *netdev;
++      struct edma_port_pcpu_stats pcpu_stats;
+       unsigned long flags;
+ };
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma_rx.c b/drivers/net/ethernet/qualcomm/ppe/edma_rx.c
+new file mode 100644
+index 000000000000..a1eb533410ce
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ppe/edma_rx.c
+@@ -0,0 +1,622 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++/* Provides APIs to alloc Rx Buffers, reap the buffers, receive and
++ * process linear and Scatter Gather packets.
++ */
++
++#include <linux/dma-mapping.h>
++#include <linux/etherdevice.h>
++#include <linux/irqreturn.h>
++#include <linux/kernel.h>
++#include <linux/netdevice.h>
++#include <linux/platform_device.h>
++#include <linux/printk.h>
++#include <linux/regmap.h>
++
++#include "edma.h"
++#include "edma_cfg_rx.h"
++#include "edma_port.h"
++#include "ppe.h"
++#include "ppe_regs.h"
++
++static int edma_rx_alloc_buffer_list(struct edma_rxfill_ring *rxfill_ring, int alloc_count)
++{
++      struct edma_rxfill_stats *rxfill_stats = &rxfill_ring->rxfill_stats;
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      u32 rx_alloc_size = rxfill_ring->alloc_size;
++      struct regmap *regmap = ppe_dev->regmap;
++      bool page_mode = rxfill_ring->page_mode;
++      struct edma_rxfill_desc *rxfill_desc;
++      u32 buf_len = rxfill_ring->buf_len;
++      struct device *dev = ppe_dev->dev;
++      u16 prod_idx, start_idx;
++      u16 num_alloc = 0;
++      u32 reg;
++
++      prod_idx = rxfill_ring->prod_idx;
++      start_idx = prod_idx;
++
++      while (likely(alloc_count--)) {
++              dma_addr_t buff_addr;
++              struct sk_buff *skb;
++              struct page *pg;
++
++              rxfill_desc = EDMA_RXFILL_DESC(rxfill_ring, prod_idx);
++
++              skb = dev_alloc_skb(rx_alloc_size);
++              if (unlikely(!skb)) {
++                      u64_stats_update_begin(&rxfill_stats->syncp);
++                      ++rxfill_stats->alloc_failed;
++                      u64_stats_update_end(&rxfill_stats->syncp);
++                      break;
++              }
++
++              skb_reserve(skb, EDMA_RX_SKB_HEADROOM + NET_IP_ALIGN);
++
++              if (likely(!page_mode)) {
++                      buff_addr = dma_map_single(dev, skb->data, rx_alloc_size, DMA_FROM_DEVICE);
++                      if (dma_mapping_error(dev, buff_addr)) {
++                              dev_dbg(dev, "edma_context:%p Unable to dma for non page mode",
++                                      edma_ctx);
++                              dev_kfree_skb_any(skb);
++                              break;
++                      }
++              } else {
++                      pg = alloc_page(GFP_ATOMIC);
++                      if (unlikely(!pg)) {
++                              u64_stats_update_begin(&rxfill_stats->syncp);
++                              ++rxfill_stats->page_alloc_failed;
++                              u64_stats_update_end(&rxfill_stats->syncp);
++                              dev_kfree_skb_any(skb);
++                              dev_dbg(dev, "edma_context:%p Unable to allocate page",
++                                      edma_ctx);
++                              break;
++                      }
++
++                      buff_addr = dma_map_page(dev, pg, 0, PAGE_SIZE, DMA_FROM_DEVICE);
++                      if (dma_mapping_error(dev, buff_addr)) {
++                              dev_dbg(dev, "edma_context:%p Mapping error for page mode",
++                                      edma_ctx);
++                              __free_page(pg);
++                              dev_kfree_skb_any(skb);
++                              break;
++                      }
++
++                      skb_fill_page_desc(skb, 0, pg, 0, PAGE_SIZE);
++              }
++
++              EDMA_RXFILL_BUFFER_ADDR_SET(rxfill_desc, buff_addr);
++
++              EDMA_RXFILL_OPAQUE_LO_SET(rxfill_desc, skb);
++#ifdef __LP64__
++              EDMA_RXFILL_OPAQUE_HI_SET(rxfill_desc, skb);
++#endif
++              EDMA_RXFILL_PACKET_LEN_SET(rxfill_desc,
++                                         (u32)(buf_len) & EDMA_RXFILL_BUF_SIZE_MASK);
++              prod_idx = (prod_idx + 1) & EDMA_RX_RING_SIZE_MASK;
++              num_alloc++;
++      }
++
++      if (likely(num_alloc)) {
++              reg = EDMA_BASE_OFFSET + EDMA_REG_RXFILL_PROD_IDX(rxfill_ring->ring_id);
++              regmap_write(regmap, reg, prod_idx);
++              rxfill_ring->prod_idx = prod_idx;
++      }
++
++      return num_alloc;
++}
++
++/**
++ * edma_rx_alloc_buffer - EDMA Rx alloc buffer.
++ * @rxfill_ring: EDMA Rxfill ring
++ * @alloc_count: Number of rings to alloc
++ *
++ * Alloc Rx buffers for RxFill ring.
++ *
++ * Return the number of rings allocated.
++ */
++int edma_rx_alloc_buffer(struct edma_rxfill_ring *rxfill_ring, int alloc_count)
++{
++      return edma_rx_alloc_buffer_list(rxfill_ring, alloc_count);
++}
++
++/* Mark ip_summed appropriately in the skb as per the L3/L4 checksum
++ * status in descriptor.
++ */
++static void edma_rx_checksum_verify(struct edma_rxdesc_pri *rxdesc_pri,
++                                  struct sk_buff *skb)
++{
++      u8 pid = EDMA_RXDESC_PID_GET(rxdesc_pri);
++
++      skb_checksum_none_assert(skb);
++
++      if (likely(EDMA_RX_PID_IS_IPV4(pid))) {
++              if (likely(EDMA_RXDESC_L3CSUM_STATUS_GET(rxdesc_pri)) &&
++                  likely(EDMA_RXDESC_L4CSUM_STATUS_GET(rxdesc_pri)))
++                      skb->ip_summed = CHECKSUM_UNNECESSARY;
++      } else if (likely(EDMA_RX_PID_IS_IPV6(pid))) {
++              if (likely(EDMA_RXDESC_L4CSUM_STATUS_GET(rxdesc_pri)))
++                      skb->ip_summed = CHECKSUM_UNNECESSARY;
++      }
++}
++
++static void edma_rx_process_last_segment(struct edma_rxdesc_ring *rxdesc_ring,
++                                       struct edma_rxdesc_pri *rxdesc_pri,
++                                       struct sk_buff *skb)
++{
++      bool page_mode = rxdesc_ring->rxfill->page_mode;
++      struct edma_port_pcpu_stats *pcpu_stats;
++      struct edma_port_rx_stats *rx_stats;
++      struct edma_port_priv *port_dev;
++      struct sk_buff *skb_head;
++      struct net_device *dev;
++      u32 pkt_length;
++
++      /* Get packet length. */
++      pkt_length = EDMA_RXDESC_PACKET_LEN_GET(rxdesc_pri);
++
++      skb_head = rxdesc_ring->head;
++      dev = skb_head->dev;
++
++      /* Check Rx checksum offload status. */
++      if (likely(dev->features & NETIF_F_RXCSUM))
++              edma_rx_checksum_verify(rxdesc_pri, skb_head);
++
++      /* Get stats for the netdevice. */
++      port_dev = netdev_priv(dev);
++      pcpu_stats = &port_dev->pcpu_stats;
++      rx_stats = this_cpu_ptr(pcpu_stats->rx_stats);
++
++      if (unlikely(page_mode)) {
++              if (unlikely(!pskb_may_pull(skb_head, ETH_HLEN))) {
++                      /* Discard the SKB that we have been building,
++                       * in addition to the SKB linked to current descriptor.
++                       */
++                      dev_kfree_skb_any(skb_head);
++                      rxdesc_ring->head = NULL;
++                      rxdesc_ring->last = NULL;
++                      rxdesc_ring->pdesc_head = NULL;
++
++                      u64_stats_update_begin(&rx_stats->syncp);
++                      rx_stats->rx_nr_frag_headroom_err++;
++                      u64_stats_update_end(&rx_stats->syncp);
++
++                      return;
++              }
++      }
++
++      if (unlikely(!pskb_pull(skb_head, EDMA_RXDESC_DATA_OFFSET_GET(rxdesc_ring->pdesc_head)))) {
++              dev_kfree_skb_any(skb_head);
++              rxdesc_ring->head = NULL;
++              rxdesc_ring->last = NULL;
++              rxdesc_ring->pdesc_head = NULL;
++
++              u64_stats_update_begin(&rx_stats->syncp);
++              rx_stats->rx_nr_frag_headroom_err++;
++              u64_stats_update_end(&rx_stats->syncp);
++
++              return;
++      }
++
++      u64_stats_update_begin(&rx_stats->syncp);
++      rx_stats->rx_pkts++;
++      rx_stats->rx_bytes += skb_head->len;
++      rx_stats->rx_nr_frag_pkts += (u64)page_mode;
++      rx_stats->rx_fraglist_pkts += (u64)(!page_mode);
++      u64_stats_update_end(&rx_stats->syncp);
++
++      pr_debug("edma_context:%p skb:%p Jumbo pkt_length:%u\n",
++               edma_ctx, skb_head, skb_head->len);
++
++      skb_head->protocol = eth_type_trans(skb_head, dev);
++
++      /* Send packet up the stack. */
++      if (dev->features & NETIF_F_GRO)
++              napi_gro_receive(&rxdesc_ring->napi, skb_head);
++      else
++              netif_receive_skb(skb_head);
++
++      rxdesc_ring->head = NULL;
++      rxdesc_ring->last = NULL;
++      rxdesc_ring->pdesc_head = NULL;
++}
++
++static void edma_rx_handle_frag_list(struct edma_rxdesc_ring *rxdesc_ring,
++                                   struct edma_rxdesc_pri *rxdesc_pri,
++                                   struct sk_buff *skb)
++{
++      u32 pkt_length;
++
++      /* Get packet length. */
++      pkt_length = EDMA_RXDESC_PACKET_LEN_GET(rxdesc_pri);
++      pr_debug("edma_context:%p skb:%p fragment pkt_length:%u\n",
++               edma_ctx, skb, pkt_length);
++
++      if (!(rxdesc_ring->head)) {
++              skb_put(skb, pkt_length);
++              rxdesc_ring->head = skb;
++              rxdesc_ring->last = NULL;
++              rxdesc_ring->pdesc_head = rxdesc_pri;
++
++              return;
++      }
++
++      /* Append it to the fraglist of head if this is second frame
++       * If not second frame append to tail.
++       */
++      skb_put(skb, pkt_length);
++      if (!skb_has_frag_list(rxdesc_ring->head))
++              skb_shinfo(rxdesc_ring->head)->frag_list = skb;
++      else
++              rxdesc_ring->last->next = skb;
++
++      rxdesc_ring->last = skb;
++      rxdesc_ring->last->next = NULL;
++      rxdesc_ring->head->len += pkt_length;
++      rxdesc_ring->head->data_len += pkt_length;
++      rxdesc_ring->head->truesize += skb->truesize;
++
++      /* If there are more segments for this packet,
++       * then we have nothing to do. Otherwise process
++       * last segment and send packet to stack.
++       */
++      if (EDMA_RXDESC_MORE_BIT_GET(rxdesc_pri))
++              return;
++
++      edma_rx_process_last_segment(rxdesc_ring, rxdesc_pri, skb);
++}
++
++static void edma_rx_handle_nr_frags(struct edma_rxdesc_ring *rxdesc_ring,
++                                  struct edma_rxdesc_pri *rxdesc_pri,
++                                  struct sk_buff *skb)
++{
++      skb_frag_t *frag = NULL;
++      u32 pkt_length;
++
++      /* Get packet length. */
++      pkt_length = EDMA_RXDESC_PACKET_LEN_GET(rxdesc_pri);
++      pr_debug("edma_context:%p skb:%p fragment pkt_length:%u\n",
++               edma_ctx, skb, pkt_length);
++
++      if (!(rxdesc_ring->head)) {
++              skb->len = pkt_length;
++              skb->data_len = pkt_length;
++              skb->truesize = SKB_TRUESIZE(PAGE_SIZE);
++              rxdesc_ring->head = skb;
++              rxdesc_ring->last = NULL;
++              rxdesc_ring->pdesc_head = rxdesc_pri;
++
++              return;
++      }
++
++      frag = &skb_shinfo(skb)->frags[0];
++
++      /* Append current frag at correct index as nr_frag of parent. */
++      skb_add_rx_frag(rxdesc_ring->head, skb_shinfo(rxdesc_ring->head)->nr_frags,
++                      skb_frag_page(frag), 0, pkt_length, PAGE_SIZE);
++      skb_shinfo(skb)->nr_frags = 0;
++
++      /* Free the SKB after we have appended its frag page to the head skb. */
++      dev_kfree_skb_any(skb);
++
++      /* If there are more segments for this packet,
++       * then we have nothing to do. Otherwise process
++       * last segment and send packet to stack.
++       */
++      if (EDMA_RXDESC_MORE_BIT_GET(rxdesc_pri))
++              return;
++
++      edma_rx_process_last_segment(rxdesc_ring, rxdesc_pri, skb);
++}
++
++static bool edma_rx_handle_linear_packets(struct edma_rxdesc_ring *rxdesc_ring,
++                                        struct edma_rxdesc_pri *rxdesc_pri,
++                                        struct sk_buff *skb)
++{
++      bool page_mode = rxdesc_ring->rxfill->page_mode;
++      struct edma_port_pcpu_stats *pcpu_stats;
++      struct edma_port_rx_stats *rx_stats;
++      struct edma_port_priv *port_dev;
++      skb_frag_t *frag = NULL;
++      u32 pkt_length;
++
++      /* Get stats for the netdevice. */
++      port_dev = netdev_priv(skb->dev);
++      pcpu_stats = &port_dev->pcpu_stats;
++      rx_stats = this_cpu_ptr(pcpu_stats->rx_stats);
++
++      /* Get packet length. */
++      pkt_length = EDMA_RXDESC_PACKET_LEN_GET(rxdesc_pri);
++
++      if (likely(!page_mode)) {
++              skb_put(skb, pkt_length);
++              goto send_to_stack;
++      }
++
++      /* Handle linear packet in page mode. */
++      frag = &skb_shinfo(skb)->frags[0];
++      skb_add_rx_frag(skb, 0, skb_frag_page(frag), 0, pkt_length, PAGE_SIZE);
++
++      /* Pull ethernet header into SKB data area for header processing. */
++      if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) {
++              u64_stats_update_begin(&rx_stats->syncp);
++              rx_stats->rx_nr_frag_headroom_err++;
++              u64_stats_update_end(&rx_stats->syncp);
++              dev_kfree_skb_any(skb);
++
++              return false;
++      }
++
++send_to_stack:
++
++      __skb_pull(skb, EDMA_RXDESC_DATA_OFFSET_GET(rxdesc_pri));
++
++      /* Check Rx checksum offload status. */
++      if (likely(skb->dev->features & NETIF_F_RXCSUM))
++              edma_rx_checksum_verify(rxdesc_pri, skb);
++
++      u64_stats_update_begin(&rx_stats->syncp);
++      rx_stats->rx_pkts++;
++      rx_stats->rx_bytes += pkt_length;
++      rx_stats->rx_nr_frag_pkts += (u64)page_mode;
++      u64_stats_update_end(&rx_stats->syncp);
++
++      skb->protocol = eth_type_trans(skb, skb->dev);
++      if (skb->dev->features & NETIF_F_GRO)
++              napi_gro_receive(&rxdesc_ring->napi, skb);
++      else
++              netif_receive_skb(skb);
++
++      netdev_dbg(skb->dev, "edma_context:%p, skb:%p pkt_length:%u\n",
++                 edma_ctx, skb, skb->len);
++
++      return true;
++}
++
++static struct net_device *edma_rx_get_src_dev(struct edma_rxdesc_stats *rxdesc_stats,
++                                            struct edma_rxdesc_pri *rxdesc_pri,
++                                            struct sk_buff *skb)
++{
++      u32 src_info = EDMA_RXDESC_SRC_INFO_GET(rxdesc_pri);
++      struct edma_hw_info *hw_info = edma_ctx->hw_info;
++      struct net_device *ndev = NULL;
++      u8 src_port_num;
++
++      /* Check src_info. */
++      if (likely((src_info & EDMA_RXDESC_SRCINFO_TYPE_MASK)
++          == EDMA_RXDESC_SRCINFO_TYPE_PORTID)) {
++              src_port_num = src_info & EDMA_RXDESC_PORTNUM_BITS;
++      } else {
++              if (net_ratelimit()) {
++                      pr_warn("Invalid src info_type:0x%x. Drop skb:%p\n",
++                              (src_info & EDMA_RXDESC_SRCINFO_TYPE_MASK), skb);
++              }
++
++              u64_stats_update_begin(&rxdesc_stats->syncp);
++              ++rxdesc_stats->src_port_inval_type;
++              u64_stats_update_end(&rxdesc_stats->syncp);
++
++              return NULL;
++      }
++
++      /* Packet with PP source. */
++      if (likely(src_port_num <= hw_info->max_ports)) {
++              if (unlikely(src_port_num < EDMA_START_IFNUM)) {
++                      if (net_ratelimit())
++                              pr_warn("Port number error :%d. Drop skb:%p\n",
++                                      src_port_num, skb);
++
++                      u64_stats_update_begin(&rxdesc_stats->syncp);
++                      ++rxdesc_stats->src_port_inval;
++                      u64_stats_update_end(&rxdesc_stats->syncp);
++
++                      return NULL;
++              }
++
++              /* Get netdev for this port using the source port
++               * number as index into the netdev array. We need to
++               * subtract one since the indices start form '0' and
++               * port numbers start from '1'.
++               */
++              ndev = edma_ctx->netdev_arr[src_port_num - 1];
++      }
++
++      if (likely(ndev))
++              return ndev;
++
++      if (net_ratelimit())
++              pr_warn("Netdev Null src_info_type:0x%x src port num:%d Drop skb:%p\n",
++                      (src_info & EDMA_RXDESC_SRCINFO_TYPE_MASK),
++                      src_port_num, skb);
++
++      u64_stats_update_begin(&rxdesc_stats->syncp);
++      ++rxdesc_stats->src_port_inval_netdev;
++      u64_stats_update_end(&rxdesc_stats->syncp);
++
++      return NULL;
++}
++
++static int edma_rx_reap(struct edma_rxdesc_ring *rxdesc_ring, int budget)
++{
++      struct edma_rxdesc_stats *rxdesc_stats = &rxdesc_ring->rxdesc_stats;
++      u32 alloc_size = rxdesc_ring->rxfill->alloc_size;
++      bool page_mode = rxdesc_ring->rxfill->page_mode;
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct edma_rxdesc_pri *next_rxdesc_pri;
++      struct regmap *regmap = ppe_dev->regmap;
++      struct device *dev = ppe_dev->dev;
++      u32 prod_idx, cons_idx, end_idx;
++      u32 work_to_do, work_done = 0;
++      struct sk_buff *next_skb;
++      u32 work_leftover, reg;
++
++      /* Get Rx ring producer and consumer indices. */
++      cons_idx = rxdesc_ring->cons_idx;
++
++      if (likely(rxdesc_ring->work_leftover > EDMA_RX_MAX_PROCESS)) {
++              work_to_do = rxdesc_ring->work_leftover;
++      } else {
++              reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_PROD_IDX(rxdesc_ring->ring_id);
++              regmap_read(regmap, reg, &prod_idx);
++              prod_idx = prod_idx & EDMA_RXDESC_PROD_IDX_MASK;
++              work_to_do = EDMA_DESC_AVAIL_COUNT(prod_idx,
++                                                 cons_idx, EDMA_RX_RING_SIZE);
++              rxdesc_ring->work_leftover = work_to_do;
++      }
++
++      if (work_to_do > budget)
++              work_to_do = budget;
++
++      rxdesc_ring->work_leftover -= work_to_do;
++      end_idx = (cons_idx + work_to_do) & EDMA_RX_RING_SIZE_MASK;
++      next_rxdesc_pri = EDMA_RXDESC_PRI_DESC(rxdesc_ring, cons_idx);
++
++      /* Get opaque from RXDESC. */
++      next_skb = (struct sk_buff *)EDMA_RXDESC_OPAQUE_GET(next_rxdesc_pri);
++
++      work_leftover = work_to_do & (EDMA_RX_MAX_PROCESS - 1);
++      while (likely(work_to_do--)) {
++              struct edma_rxdesc_pri *rxdesc_pri;
++              struct net_device *ndev;
++              struct sk_buff *skb;
++              dma_addr_t dma_addr;
++
++              skb = next_skb;
++              rxdesc_pri = next_rxdesc_pri;
++              dma_addr = EDMA_RXDESC_BUFFER_ADDR_GET(rxdesc_pri);
++
++              if (!page_mode)
++                      dma_unmap_single(dev, dma_addr, alloc_size,
++                                       DMA_TO_DEVICE);
++              else
++                      dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_TO_DEVICE);
++
++              /* Update consumer index. */
++              cons_idx = (cons_idx + 1) & EDMA_RX_RING_SIZE_MASK;
++
++              /* Get the next Rx descriptor. */
++              next_rxdesc_pri = EDMA_RXDESC_PRI_DESC(rxdesc_ring, cons_idx);
++
++              /* Handle linear packets or initial segments first. */
++              if (likely(!(rxdesc_ring->head))) {
++                      ndev = edma_rx_get_src_dev(rxdesc_stats, rxdesc_pri, skb);
++                      if (unlikely(!ndev)) {
++                              dev_kfree_skb_any(skb);
++                              goto next_rx_desc;
++                      }
++
++                      /* Update skb fields for head skb. */
++                      skb->dev = ndev;
++                      skb->skb_iif = ndev->ifindex;
++
++                      /* Handle linear packets. */
++                      if (likely(!EDMA_RXDESC_MORE_BIT_GET(rxdesc_pri))) {
++                              next_skb =
++                                      (struct sk_buff *)EDMA_RXDESC_OPAQUE_GET(next_rxdesc_pri);
++
++                              if (unlikely(!
++                                           edma_rx_handle_linear_packets(rxdesc_ring,
++                                                                         rxdesc_pri, skb)))
++                                      dev_kfree_skb_any(skb);
++
++                              goto next_rx_desc;
++                      }
++              }
++
++              next_skb = (struct sk_buff *)EDMA_RXDESC_OPAQUE_GET(next_rxdesc_pri);
++
++              /* Handle scatter frame processing for first/middle/last segments. */
++              page_mode ? edma_rx_handle_nr_frags(rxdesc_ring, rxdesc_pri, skb) :
++                      edma_rx_handle_frag_list(rxdesc_ring, rxdesc_pri, skb);
++
++next_rx_desc:
++              /* Update work done. */
++              work_done++;
++
++              /* Check if we can refill EDMA_RX_MAX_PROCESS worth buffers,
++               * if yes, refill and update index before continuing.
++               */
++              if (unlikely(!(work_done & (EDMA_RX_MAX_PROCESS - 1)))) {
++                      reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_CONS_IDX(rxdesc_ring->ring_id);
++                      regmap_write(regmap, reg, cons_idx);
++                      rxdesc_ring->cons_idx = cons_idx;
++                      edma_rx_alloc_buffer_list(rxdesc_ring->rxfill, EDMA_RX_MAX_PROCESS);
++              }
++      }
++
++      /* Check if we need to refill and update
++       * index for any buffers before exit.
++       */
++      if (unlikely(work_leftover)) {
++              reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_CONS_IDX(rxdesc_ring->ring_id);
++              regmap_write(regmap, reg, cons_idx);
++              rxdesc_ring->cons_idx = cons_idx;
++              edma_rx_alloc_buffer_list(rxdesc_ring->rxfill, work_leftover);
++      }
++
++      return work_done;
++}
++
++/**
++ * edma_rx_napi_poll - EDMA Rx napi poll.
++ * @napi: NAPI structure
++ * @budget: Rx NAPI budget
++ *
++ * EDMA RX NAPI handler to handle the NAPI poll.
++ *
++ * Return the number of packets processed.
++ */
++int edma_rx_napi_poll(struct napi_struct *napi, int budget)
++{
++      struct edma_rxdesc_ring *rxdesc_ring = (struct edma_rxdesc_ring *)napi;
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct regmap *regmap = ppe_dev->regmap;
++      int work_done = 0;
++      u32 status, reg;
++
++      do {
++              work_done += edma_rx_reap(rxdesc_ring, budget - work_done);
++              if (likely(work_done >= budget))
++                      return work_done;
++
++              /* Check if there are more packets to process. */
++              reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_INT_STAT(rxdesc_ring->ring_id);
++              regmap_read(regmap, reg, &status);
++              status = status & EDMA_RXDESC_RING_INT_STATUS_MASK;
++      } while (likely(status));
++
++      napi_complete(napi);
++
++      reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->ring_id);
++      regmap_write(regmap, reg, edma_ctx->intr_info.intr_mask_rx);
++
++      return work_done;
++}
++
++/**
++ * edma_rx_handle_irq - EDMA Rx handle irq.
++ * @irq: Interrupt to handle
++ * @ctx: Context
++ *
++ * Process RX IRQ and schedule NAPI.
++ *
++ * Return IRQ_HANDLED(1) on success.
++ */
++irqreturn_t edma_rx_handle_irq(int irq, void *ctx)
++{
++      struct edma_rxdesc_ring *rxdesc_ring = (struct edma_rxdesc_ring *)ctx;
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct regmap *regmap = ppe_dev->regmap;
++      u32 reg;
++
++      if (likely(napi_schedule_prep(&rxdesc_ring->napi))) {
++              /* Disable RxDesc interrupt. */
++              reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->ring_id);
++              regmap_write(regmap, reg, EDMA_MASK_INT_DISABLE);
++              __napi_schedule(&rxdesc_ring->napi);
++      }
++
++      return IRQ_HANDLED;
++}
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma_rx.h b/drivers/net/ethernet/qualcomm/ppe/edma_rx.h
+new file mode 100644
+index 000000000000..4a262a066808
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ppe/edma_rx.h
+@@ -0,0 +1,287 @@
++/* SPDX-License-Identifier: GPL-2.0-only
++ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++#ifndef __EDMA_RX__
++#define __EDMA_RX__
++
++#include <linux/netdevice.h>
++
++#define EDMA_RXFILL_RING_PER_CORE_MAX 1
++#define EDMA_RXDESC_RING_PER_CORE_MAX 1
++
++/* Max Rx processing without replenishing RxFill ring. */
++#define EDMA_RX_MAX_PROCESS           32
++
++#define EDMA_RX_SKB_HEADROOM          128
++#define EDMA_RX_QUEUE_START           0
++#define EDMA_RX_BUFFER_SIZE           1984
++#define EDMA_MAX_CORE                 4
++
++#define EDMA_GET_DESC(R, i, type)     (&(((type *)((R)->desc))[(i)]))
++#define EDMA_GET_PDESC(R, i, type)    (&(((type *)((R)->pdesc))[(i)]))
++#define EDMA_GET_SDESC(R, i, type)    (&(((type *)((R)->sdesc))[(i)]))
++#define EDMA_RXFILL_DESC(R, i)                EDMA_GET_DESC(R, i, \
++                                              struct edma_rxfill_desc)
++#define EDMA_RXDESC_PRI_DESC(R, i)    EDMA_GET_PDESC(R, i, \
++                                              struct edma_rxdesc_pri)
++#define EDMA_RXDESC_SEC_DESC(R, i)    EDMA_GET_SDESC(R, i, \
++                                              struct edma_rxdesc_sec)
++
++#define EDMA_RX_RING_SIZE     2048
++
++#define EDMA_RX_RING_SIZE_MASK        (EDMA_RX_RING_SIZE - 1)
++#define EDMA_RX_RING_ID_MASK          0x1F
++
++#define EDMA_MAX_PRI_PER_CORE      8
++#define EDMA_RX_PID_IPV4_MAX          0x3
++#define EDMA_RX_PID_IPV6              0x4
++#define EDMA_RX_PID_IS_IPV4(pid)      (!((pid) & (~EDMA_RX_PID_IPV4_MAX)))
++#define EDMA_RX_PID_IS_IPV6(pid)      (!(!((pid) & EDMA_RX_PID_IPV6)))
++
++#define EDMA_RXDESC_BUFFER_ADDR_GET(desc)     \
++                              ((u32)(le32_to_cpu((__force __le32)((desc)->word0))))
++#define EDMA_RXDESC_OPAQUE_GET(_desc) ({ \
++                      typeof(_desc) (desc) = (_desc); \
++                      ((uintptr_t)((u64)((desc)->word2) | \
++                      ((u64)((desc)->word3) << 0x20))); })
++
++#define EDMA_RXDESC_SRCINFO_TYPE_PORTID               0x2000
++#define EDMA_RXDESC_SRCINFO_TYPE_MASK         0xF000
++#define EDMA_RXDESC_L3CSUM_STATUS_MASK                BIT(13)
++#define EDMA_RXDESC_L4CSUM_STATUS_MASK                BIT(12)
++#define EDMA_RXDESC_PORTNUM_BITS              0x0FFF
++
++#define EDMA_RXDESC_PACKET_LEN_MASK           0x3FFFF
++#define EDMA_RXDESC_PACKET_LEN_GET(_desc) ({ \
++                      typeof(_desc) (desc) = (_desc); \
++                      ((le32_to_cpu((__force __le32)((desc)->word5))) & \
++                      EDMA_RXDESC_PACKET_LEN_MASK); })
++
++#define EDMA_RXDESC_MORE_BIT_MASK             0x40000000
++#define EDMA_RXDESC_MORE_BIT_GET(desc)                ((le32_to_cpu((__force __le32)((desc)->word1))) & \
++                                              EDMA_RXDESC_MORE_BIT_MASK)
++#define EDMA_RXDESC_SRC_DST_INFO_GET(desc)    \
++                              ((u32)((le32_to_cpu((__force __le32)((desc)->word4)))))
++
++#define EDMA_RXDESC_L3_OFFSET_MASK    GENMASK(23, 16)
++#define EDMA_RXDESC_L3_OFFSET_GET(desc)       FIELD_GET(EDMA_RXDESC_L3_OFFSET_MASK, \
++                                      le32_to_cpu((__force __le32)((desc)->word7)))
++
++#define EDMA_RXDESC_PID_MASK          GENMASK(15, 12)
++#define EDMA_RXDESC_PID_GET(desc)     FIELD_GET(EDMA_RXDESC_PID_MASK, \
++                                      le32_to_cpu((__force __le32)((desc)->word7)))
++
++#define EDMA_RXDESC_DST_INFO_MASK     GENMASK(31, 16)
++#define EDMA_RXDESC_DST_INFO_GET(desc)        FIELD_GET(EDMA_RXDESC_DST_INFO_MASK, \
++                                      le32_to_cpu((__force __le32)((desc)->word4)))
++
++#define EDMA_RXDESC_SRC_INFO_MASK     GENMASK(15, 0)
++#define EDMA_RXDESC_SRC_INFO_GET(desc)        FIELD_GET(EDMA_RXDESC_SRC_INFO_MASK, \
++                                      le32_to_cpu((__force __le32)((desc)->word4)))
++
++#define EDMA_RXDESC_PORT_ID_MASK      GENMASK(11, 0)
++#define EDMA_RXDESC_PORT_ID_GET(x)    FIELD_GET(EDMA_RXDESC_PORT_ID_MASK, x)
++
++#define EDMA_RXDESC_SRC_PORT_ID_GET(desc)     (EDMA_RXDESC_PORT_ID_GET \
++                                              (EDMA_RXDESC_SRC_INFO_GET(desc)))
++#define EDMA_RXDESC_DST_PORT_ID_GET(desc)     (EDMA_RXDESC_PORT_ID_GET \
++                                              (EDMA_RXDESC_DST_INFO_GET(desc)))
++
++#define EDMA_RXDESC_DST_PORT          (0x2 << EDMA_RXDESC_PID_SHIFT)
++
++#define EDMA_RXDESC_L3CSUM_STATUS_GET(desc)   FIELD_GET(EDMA_RXDESC_L3CSUM_STATUS_MASK, \
++                                              le32_to_cpu((__force __le32)(desc)->word6))
++#define EDMA_RXDESC_L4CSUM_STATUS_GET(desc)   FIELD_GET(EDMA_RXDESC_L4CSUM_STATUS_MASK, \
++                                              le32_to_cpu((__force __le32)(desc)->word6))
++
++#define EDMA_RXDESC_DATA_OFFSET_MASK          GENMASK(11, 0)
++#define EDMA_RXDESC_DATA_OFFSET_GET(desc)     FIELD_GET(EDMA_RXDESC_DATA_OFFSET_MASK, \
++                                              le32_to_cpu((__force __le32)(desc)->word6))
++
++#define EDMA_RXFILL_BUF_SIZE_MASK             0xFFFF
++#define EDMA_RXFILL_BUF_SIZE_SHIFT            16
++
++/* Opaque values are not accessed by the EDMA HW,
++ * so endianness conversion is not needed.
++ */
++
++#define EDMA_RXFILL_OPAQUE_LO_SET(desc, ptr)  (((desc)->word2) = \
++                                              (u32)(uintptr_t)(ptr))
++#ifdef __LP64__
++#define EDMA_RXFILL_OPAQUE_HI_SET(desc, ptr)  (((desc)->word3) = \
++                                              (u32)((u64)(ptr) >> 0x20))
++#endif
++
++#define EDMA_RXFILL_OPAQUE_GET(_desc) ({ \
++                      typeof(_desc) (desc) = (_desc); \
++                      ((uintptr_t)((u64)((desc)->word2) | \
++                      ((u64)((desc)->word3) << 0x20))); })
++
++#define EDMA_RXFILL_PACKET_LEN_SET(desc, len) { \
++      (((desc)->word1) = (u32)((((u32)len) << EDMA_RXFILL_BUF_SIZE_SHIFT) & \
++                                              0xFFFF0000)); \
++}
++
++#define EDMA_RXFILL_BUFFER_ADDR_SET(desc, addr)       (((desc)->word0) = (u32)(addr))
++
++/* Opaque values are set in word2 and word3, they are not accessed by the EDMA HW,
++ * so endianness conversion is not needed.
++ */
++#define EDMA_RXFILL_ENDIAN_SET(_desc) ({ \
++      typeof(_desc) (desc) = (_desc); \
++      cpu_to_le32s(&((desc)->word0)); \
++      cpu_to_le32s(&((desc)->word1)); \
++})
++
++/* RX DESC size shift to obtain index from descriptor pointer. */
++#define EDMA_RXDESC_SIZE_SHIFT                5
++
++/**
++ * struct edma_rxdesc_stats - RX descriptor ring stats.
++ * @src_port_inval: Invalid source port number
++ * @src_port_inval_type: Source type is not PORT ID
++ * @src_port_inval_netdev: Invalid net device for the source port
++ * @syncp: Synchronization pointer
++ */
++struct edma_rxdesc_stats {
++      u64 src_port_inval;
++      u64 src_port_inval_type;
++      u64 src_port_inval_netdev;
++      struct u64_stats_sync syncp;
++};
++
++/**
++ * struct edma_rxfill_stats - Rx fill descriptor ring stats.
++ * @alloc_failed: Buffer allocation failure count
++ * @page_alloc_failed: Page allocation failure count for page mode
++ * @syncp: Synchronization pointer
++ */
++struct edma_rxfill_stats {
++      u64 alloc_failed;
++      u64 page_alloc_failed;
++      struct u64_stats_sync syncp;
++};
++
++/**
++ * struct edma_rxdesc_pri - Rx descriptor.
++ * @word0: Buffer address
++ * @word1: More bit, priority bit, service code
++ * @word2: Opaque low bits
++ * @word3: Opaque high bits
++ * @word4: Destination and source information
++ * @word5: WiFi QoS, data length
++ * @word6: Hash value, check sum status
++ * @word7: DSCP, packet offsets
++ */
++struct edma_rxdesc_pri {
++      u32 word0;
++      u32 word1;
++      u32 word2;
++      u32 word3;
++      u32 word4;
++      u32 word5;
++      u32 word6;
++      u32 word7;
++};
++
++ /**
++  * struct edma_rxdesc_sec - Rx secondary descriptor.
++  * @word0: Timestamp
++  * @word1: Secondary checksum status
++  * @word2: QoS tag
++  * @word3: Flow index details
++  * @word4: Secondary packet offsets
++  * @word5: Multicast bit, checksum
++  * @word6: SVLAN, CVLAN
++  * @word7: Secondary SVLAN, CVLAN
++  */
++struct edma_rxdesc_sec {
++      u32 word0;
++      u32 word1;
++      u32 word2;
++      u32 word3;
++      u32 word4;
++      u32 word5;
++      u32 word6;
++      u32 word7;
++};
++
++/**
++ * struct edma_rxfill_desc - RxFill descriptor.
++ * @word0: Buffer address
++ * @word1: Buffer size
++ * @word2: Opaque low bits
++ * @word3: Opaque high bits
++ */
++struct edma_rxfill_desc {
++      u32 word0;
++      u32 word1;
++      u32 word2;
++      u32 word3;
++};
++
++/**
++ * struct edma_rxfill_ring - RxFill ring
++ * @ring_id: RxFill ring number
++ * @count: Number of descriptors in the ring
++ * @prod_idx: Ring producer index
++ * @alloc_size: Buffer size to allocate
++ * @desc: Descriptor ring virtual address
++ * @dma: Descriptor ring physical address
++ * @buf_len: Buffer length for rxfill descriptor
++ * @page_mode: Page mode for Rx processing
++ * @rx_fill_stats: Rx fill ring statistics
++ */
++struct edma_rxfill_ring {
++      u32 ring_id;
++      u32 count;
++      u32 prod_idx;
++      u32 alloc_size;
++      struct edma_rxfill_desc *desc;
++      dma_addr_t dma;
++      u32 buf_len;
++      bool page_mode;
++      struct edma_rxfill_stats rxfill_stats;
++};
++
++/**
++ * struct edma_rxdesc_ring - RxDesc ring
++ * @napi: Pointer to napi
++ * @ring_id: Rxdesc ring number
++ * @count: Number of descriptors in the ring
++ * @work_leftover: Leftover descriptors to be processed
++ * @cons_idx: Ring consumer index
++ * @pdesc: Primary descriptor ring virtual address
++ * @pdesc_head: Primary descriptor head in case of scatter-gather frame
++ * @sdesc: Secondary descriptor ring virtual address
++ * @rxdesc_stats: Rx descriptor ring statistics
++ * @rxfill: RxFill ring used
++ * @napi_added: Flag to indicate NAPI add status
++ * @pdma: Primary descriptor ring physical address
++ * @sdma: Secondary descriptor ring physical address
++ * @head: Head of the skb list in case of scatter-gather frame
++ * @last: Last skb of the skb list in case of scatter-gather frame
++ */
++struct edma_rxdesc_ring {
++      struct napi_struct napi;
++      u32 ring_id;
++      u32 count;
++      u32 work_leftover;
++      u32 cons_idx;
++      struct edma_rxdesc_pri *pdesc;
++      struct edma_rxdesc_pri *pdesc_head;
++      struct edma_rxdesc_sec *sdesc;
++      struct edma_rxdesc_stats rxdesc_stats;
++      struct edma_rxfill_ring *rxfill;
++      bool napi_added;
++      dma_addr_t pdma;
++      dma_addr_t sdma;
++      struct sk_buff *head;
++      struct sk_buff *last;
++};
++
++irqreturn_t edma_rx_handle_irq(int irq, void *ctx);
++int edma_rx_alloc_buffer(struct edma_rxfill_ring *rxfill_ring, int alloc_count);
++int edma_rx_napi_poll(struct napi_struct *napi, int budget);
++#endif
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-41-net-ethernet-qualcomm-Add-Tx-Ethernet-DMA-support.patch b/target/linux/qualcommbe/patches-6.6/103-41-net-ethernet-qualcomm-Add-Tx-Ethernet-DMA-support.patch
new file mode 100644 (file)
index 0000000..9a190c7
--- /dev/null
@@ -0,0 +1,2390 @@
+From 1c2736afc17435d3bca18a84f9ed2620a5b03830 Mon Sep 17 00:00:00 2001
+From: Suruchi Agarwal <quic_suruchia@quicinc.com>
+Date: Thu, 21 Mar 2024 16:26:29 -0700
+Subject: [PATCH 41/50] net: ethernet: qualcomm: Add Tx Ethernet DMA support
+
+Add Tx queues, rings, descriptors configurations and
+DMA support for the EDMA.
+
+Change-Id: Idfb0e1fe5ac494d614097d6c97dd15d63bbce8e6
+Co-developed-by: Pavithra R <quic_pavir@quicinc.com>
+Signed-off-by: Pavithra R <quic_pavir@quicinc.com>
+Signed-off-by: Suruchi Agarwal <quic_suruchia@quicinc.com>
+---
+ drivers/net/ethernet/qualcomm/ppe/Makefile    |   2 +-
+ drivers/net/ethernet/qualcomm/ppe/edma.c      |  97 ++-
+ drivers/net/ethernet/qualcomm/ppe/edma.h      |   7 +
+ .../net/ethernet/qualcomm/ppe/edma_cfg_tx.c   | 648 ++++++++++++++
+ .../net/ethernet/qualcomm/ppe/edma_cfg_tx.h   |  28 +
+ drivers/net/ethernet/qualcomm/ppe/edma_port.c | 136 +++
+ drivers/net/ethernet/qualcomm/ppe/edma_port.h |  35 +
+ drivers/net/ethernet/qualcomm/ppe/edma_tx.c   | 808 ++++++++++++++++++
+ drivers/net/ethernet/qualcomm/ppe/edma_tx.h   | 302 +++++++
+ 9 files changed, 2055 insertions(+), 8 deletions(-)
+ create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_cfg_tx.c
+ create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_cfg_tx.h
+ create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_tx.c
+ create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_tx.h
+
+diff --git a/drivers/net/ethernet/qualcomm/ppe/Makefile b/drivers/net/ethernet/qualcomm/ppe/Makefile
+index 3fd607ce42de..b358bfd781fb 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/Makefile
++++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
+@@ -7,4 +7,4 @@ obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
+ qcom-ppe-objs := ppe.o ppe_config.o ppe_api.o ppe_debugfs.o ppe_port.o
+ #EDMA
+-qcom-ppe-objs += edma.o edma_cfg_rx.o edma_port.o edma_rx.o
+\ No newline at end of file
++qcom-ppe-objs += edma.o edma_cfg_rx.o edma_cfg_tx.o edma_port.o edma_rx.o edma_tx.o
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma.c b/drivers/net/ethernet/qualcomm/ppe/edma.c
+index 134f6b95c294..739fcfbde0f9 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/edma.c
++++ b/drivers/net/ethernet/qualcomm/ppe/edma.c
+@@ -18,6 +18,7 @@
+ #include <linux/reset.h>
+ #include "edma.h"
++#include "edma_cfg_tx.h"
+ #include "edma_cfg_rx.h"
+ #include "ppe_regs.h"
+@@ -25,6 +26,7 @@
+ /* Global EDMA context. */
+ struct edma_context *edma_ctx;
++static char **edma_txcmpl_irq_name;
+ static char **edma_rxdesc_irq_name;
+ /* Module params. */
+@@ -192,22 +194,59 @@ static int edma_configure_ucast_prio_map_tbl(void)
+ static int edma_irq_register(void)
+ {
+       struct edma_hw_info *hw_info = edma_ctx->hw_info;
++      struct edma_ring_info *txcmpl = hw_info->txcmpl;
+       struct edma_ring_info *rx = hw_info->rx;
+       int ret;
+       u32 i;
++      /* Request IRQ for TXCMPL rings. */
++      edma_txcmpl_irq_name = kzalloc((sizeof(char *) * txcmpl->num_rings), GFP_KERNEL);
++      if (!edma_txcmpl_irq_name)
++              return -ENOMEM;
++
++      for (i = 0; i < txcmpl->num_rings; i++) {
++              edma_txcmpl_irq_name[i] = kzalloc((sizeof(char *) * EDMA_IRQ_NAME_SIZE),
++                                                GFP_KERNEL);
++              if (!edma_txcmpl_irq_name[i]) {
++                      ret = -ENOMEM;
++                      goto txcmpl_ring_irq_name_alloc_fail;
++              }
++
++              snprintf(edma_txcmpl_irq_name[i], EDMA_IRQ_NAME_SIZE, "edma_txcmpl_%d",
++                       txcmpl->ring_start + i);
++
++              irq_set_status_flags(edma_ctx->intr_info.intr_txcmpl[i], IRQ_DISABLE_UNLAZY);
++
++              ret = request_irq(edma_ctx->intr_info.intr_txcmpl[i],
++                                edma_tx_handle_irq, IRQF_SHARED,
++                                edma_txcmpl_irq_name[i],
++                                (void *)&edma_ctx->txcmpl_rings[i]);
++              if (ret) {
++                      pr_err("TXCMPL ring IRQ:%d request %d failed\n",
++                             edma_ctx->intr_info.intr_txcmpl[i], i);
++                      goto txcmpl_ring_intr_req_fail;
++              }
++
++              pr_debug("TXCMPL ring: %d IRQ:%d request success: %s\n",
++                       txcmpl->ring_start + i,
++                       edma_ctx->intr_info.intr_txcmpl[i],
++                       edma_txcmpl_irq_name[i]);
++      }
++
+       /* Request IRQ for RXDESC rings. */
+       edma_rxdesc_irq_name = kzalloc((sizeof(char *) * rx->num_rings),
+                                      GFP_KERNEL);
+-      if (!edma_rxdesc_irq_name)
+-              return -ENOMEM;
++      if (!edma_rxdesc_irq_name) {
++              ret = -ENOMEM;
++              goto rxdesc_irq_name_alloc_fail;
++      }
+       for (i = 0; i < rx->num_rings; i++) {
+               edma_rxdesc_irq_name[i] = kzalloc((sizeof(char *) * EDMA_IRQ_NAME_SIZE),
+                                                 GFP_KERNEL);
+               if (!edma_rxdesc_irq_name[i]) {
+                       ret = -ENOMEM;
+-                      goto rxdesc_irq_name_alloc_fail;
++                      goto rxdesc_ring_irq_name_alloc_fail;
+               }
+               snprintf(edma_rxdesc_irq_name[i], 20, "edma_rxdesc_%d",
+@@ -236,8 +275,19 @@ static int edma_irq_register(void)
+ rx_desc_ring_intr_req_fail:
+       for (i = 0; i < rx->num_rings; i++)
+               kfree(edma_rxdesc_irq_name[i]);
+-rxdesc_irq_name_alloc_fail:
++rxdesc_ring_irq_name_alloc_fail:
+       kfree(edma_rxdesc_irq_name);
++rxdesc_irq_name_alloc_fail:
++      for (i = 0; i < txcmpl->num_rings; i++) {
++              synchronize_irq(edma_ctx->intr_info.intr_txcmpl[i]);
++              free_irq(edma_ctx->intr_info.intr_txcmpl[i],
++                       (void *)&edma_ctx->txcmpl_rings[i]);
++      }
++txcmpl_ring_intr_req_fail:
++      for (i = 0; i < txcmpl->num_rings; i++)
++              kfree(edma_txcmpl_irq_name[i]);
++txcmpl_ring_irq_name_alloc_fail:
++      kfree(edma_txcmpl_irq_name);
+       return ret;
+ }
+@@ -326,12 +376,22 @@ static int edma_irq_init(void)
+ static int edma_alloc_rings(void)
+ {
++      if (edma_cfg_tx_rings_alloc()) {
++              pr_err("Error in allocating Tx rings\n");
++              return -ENOMEM;
++      }
++
+       if (edma_cfg_rx_rings_alloc()) {
+               pr_err("Error in allocating Rx rings\n");
+-              return -ENOMEM;
++              goto rx_rings_alloc_fail;
+       }
+       return 0;
++
++rx_rings_alloc_fail:
++      edma_cfg_tx_rings_cleanup();
++
++      return -ENOMEM;
+ }
+ static int edma_hw_reset(void)
+@@ -389,7 +449,7 @@ static int edma_hw_configure(void)
+       struct edma_hw_info *hw_info = edma_ctx->hw_info;
+       struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
+       struct regmap *regmap = ppe_dev->regmap;
+-      u32 data, reg;
++      u32 data, reg, i;
+       int ret;
+       reg = EDMA_BASE_OFFSET + EDMA_REG_MAS_CTRL_ADDR;
+@@ -439,11 +499,17 @@ static int edma_hw_configure(void)
+       }
+       /* Disable interrupts. */
++      for (i = 1; i <= hw_info->max_ports; i++)
++              edma_cfg_tx_disable_interrupts(i);
++
+       edma_cfg_rx_disable_interrupts();
+       edma_cfg_rx_rings_disable();
+       edma_cfg_rx_ring_mappings();
++      edma_cfg_tx_ring_mappings();
++
++      edma_cfg_tx_rings();
+       ret = edma_cfg_rx_rings();
+       if (ret) {
+@@ -520,6 +586,7 @@ static int edma_hw_configure(void)
+       edma_cfg_rx_napi_delete();
+       edma_cfg_rx_rings_disable();
+ edma_cfg_rx_rings_failed:
++      edma_cfg_tx_rings_cleanup();
+       edma_cfg_rx_rings_cleanup();
+ edma_alloc_rings_failed:
+       free_netdev(edma_ctx->dummy_dev);
+@@ -538,13 +605,27 @@ static int edma_hw_configure(void)
+ void edma_destroy(struct ppe_device *ppe_dev)
+ {
+       struct edma_hw_info *hw_info = edma_ctx->hw_info;
++      struct edma_ring_info *txcmpl = hw_info->txcmpl;
+       struct edma_ring_info *rx = hw_info->rx;
+       u32 i;
+       /* Disable interrupts. */
++      for (i = 1; i <= hw_info->max_ports; i++)
++              edma_cfg_tx_disable_interrupts(i);
++
+       edma_cfg_rx_disable_interrupts();
+-      /* Free IRQ for RXDESC rings. */
++      /* Free IRQ for TXCMPL rings. */
++      for (i = 0; i < txcmpl->num_rings; i++) {
++              synchronize_irq(edma_ctx->intr_info.intr_txcmpl[i]);
++
++              free_irq(edma_ctx->intr_info.intr_txcmpl[i],
++                       (void *)&edma_ctx->txcmpl_rings[i]);
++              kfree(edma_txcmpl_irq_name[i]);
++      }
++      kfree(edma_txcmpl_irq_name);
++
++      /* Free IRQ for RXDESC rings */
+       for (i = 0; i < rx->num_rings; i++) {
+               synchronize_irq(edma_ctx->intr_info.intr_rx[i]);
+               free_irq(edma_ctx->intr_info.intr_rx[i],
+@@ -560,6 +641,7 @@ void edma_destroy(struct ppe_device *ppe_dev)
+       edma_cfg_rx_napi_delete();
+       edma_cfg_rx_rings_disable();
+       edma_cfg_rx_rings_cleanup();
++      edma_cfg_tx_rings_cleanup();
+       free_netdev(edma_ctx->dummy_dev);
+       kfree(edma_ctx->netdev_arr);
+@@ -585,6 +667,7 @@ int edma_setup(struct ppe_device *ppe_dev)
+       edma_ctx->hw_info = &ipq9574_hw_info;
+       edma_ctx->ppe_dev = ppe_dev;
+       edma_ctx->rx_buf_size = rx_buff_size;
++      edma_ctx->tx_requeue_stop = false;
+       /* Configure the EDMA common clocks. */
+       ret = edma_clock_init();
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma.h b/drivers/net/ethernet/qualcomm/ppe/edma.h
+index 778df7997d9f..fb8ccbfbaf41 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/edma.h
++++ b/drivers/net/ethernet/qualcomm/ppe/edma.h
+@@ -7,6 +7,7 @@
+ #include "ppe_api.h"
+ #include "edma_rx.h"
++#include "edma_tx.h"
+ /* One clock cycle = 1/(EDMA clock frequency in Mhz) micro seconds.
+  *
+@@ -94,8 +95,11 @@ struct edma_intr_info {
+  * @intr_info: EDMA Interrupt info
+  * @rxfill_rings: Rx fill Rings, SW is producer
+  * @rx_rings: Rx Desc Rings, SW is consumer
++ * @tx_rings: Tx Descriptor Ring, SW is producer
++ * @txcmpl_rings: Tx complete Ring, SW is consumer
+  * @rx_page_mode: Page mode enabled or disabled
+  * @rx_buf_size: Rx buffer size for Jumbo MRU
++ * @tx_requeue_stop: Tx requeue stop enabled or disabled
+  */
+ struct edma_context {
+       struct net_device **netdev_arr;
+@@ -105,8 +109,11 @@ struct edma_context {
+       struct edma_intr_info intr_info;
+       struct edma_rxfill_ring *rxfill_rings;
+       struct edma_rxdesc_ring *rx_rings;
++      struct edma_txdesc_ring *tx_rings;
++      struct edma_txcmpl_ring *txcmpl_rings;
+       u32 rx_page_mode;
+       u32 rx_buf_size;
++      bool tx_requeue_stop;
+ };
+ /* Global EDMA context */
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma_cfg_tx.c b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_tx.c
+new file mode 100644
+index 000000000000..f704c654b2cd
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_tx.c
+@@ -0,0 +1,648 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++/* Configure rings, Buffers and NAPI for transmit path along with
++ * providing APIs to enable, disable, clean and map the Tx rings.
++ */
++
++#include <linux/dma-mapping.h>
++#include <linux/kernel.h>
++#include <linux/netdevice.h>
++#include <linux/printk.h>
++#include <linux/regmap.h>
++#include <linux/skbuff.h>
++
++#include "edma.h"
++#include "edma_cfg_tx.h"
++#include "edma_port.h"
++#include "ppe.h"
++#include "ppe_regs.h"
++
++static void edma_cfg_txcmpl_ring_cleanup(struct edma_txcmpl_ring *txcmpl_ring)
++{
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct device *dev =  ppe_dev->dev;
++
++      /* Free any buffers assigned to any descriptors. */
++      edma_tx_complete(EDMA_TX_RING_SIZE - 1, txcmpl_ring);
++
++      /* Free TxCmpl ring descriptors. */
++      dma_free_coherent(dev, sizeof(struct edma_txcmpl_desc)
++                        * txcmpl_ring->count, txcmpl_ring->desc,
++                        txcmpl_ring->dma);
++      txcmpl_ring->desc = NULL;
++      txcmpl_ring->dma = (dma_addr_t)0;
++}
++
++static int edma_cfg_txcmpl_ring_setup(struct edma_txcmpl_ring *txcmpl_ring)
++{
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct device *dev =  ppe_dev->dev;
++
++      /* Allocate RxFill ring descriptors. */
++      txcmpl_ring->desc = dma_alloc_coherent(dev, sizeof(struct edma_txcmpl_desc)
++                                             * txcmpl_ring->count,
++                                             &txcmpl_ring->dma,
++                                             GFP_KERNEL | __GFP_ZERO);
++
++      if (unlikely(!txcmpl_ring->desc))
++              return -ENOMEM;
++
++      return 0;
++}
++
++static void edma_cfg_tx_desc_ring_cleanup(struct edma_txdesc_ring *txdesc_ring)
++{
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct regmap *regmap = ppe_dev->regmap;
++      struct edma_txdesc_pri *txdesc = NULL;
++      struct device *dev =  ppe_dev->dev;
++      u32 prod_idx, cons_idx, data, reg;
++      struct sk_buff *skb = NULL;
++
++      /* Free any buffers assigned to any descriptors. */
++      reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC_PROD_IDX(txdesc_ring->id);
++      regmap_read(regmap, reg, &data);
++      prod_idx = data & EDMA_TXDESC_PROD_IDX_MASK;
++
++      reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC_CONS_IDX(txdesc_ring->id);
++      regmap_read(regmap, reg, &data);
++      cons_idx = data & EDMA_TXDESC_CONS_IDX_MASK;
++
++      /* Walk active list, obtain skb from descriptor and free it. */
++      while (cons_idx != prod_idx) {
++              txdesc = EDMA_TXDESC_PRI_DESC(txdesc_ring, cons_idx);
++              skb = (struct sk_buff *)EDMA_TXDESC_OPAQUE_GET(txdesc);
++              dev_kfree_skb_any(skb);
++
++              cons_idx = ((cons_idx + 1) & EDMA_TX_RING_SIZE_MASK);
++      }
++
++      /* Free Tx ring descriptors. */
++      dma_free_coherent(dev, (sizeof(struct edma_txdesc_pri)
++                        * txdesc_ring->count),
++                        txdesc_ring->pdesc,
++                        txdesc_ring->pdma);
++      txdesc_ring->pdesc = NULL;
++      txdesc_ring->pdma = (dma_addr_t)0;
++
++      /* Free any buffers assigned to any secondary descriptors. */
++      dma_free_coherent(dev, (sizeof(struct edma_txdesc_sec)
++                        * txdesc_ring->count),
++                        txdesc_ring->sdesc,
++                        txdesc_ring->sdma);
++      txdesc_ring->sdesc = NULL;
++      txdesc_ring->sdma = (dma_addr_t)0;
++}
++
++static int edma_cfg_tx_desc_ring_setup(struct edma_txdesc_ring *txdesc_ring)
++{
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct device *dev = ppe_dev->dev;
++
++      /* Allocate RxFill ring descriptors. */
++      txdesc_ring->pdesc = dma_alloc_coherent(dev, sizeof(struct edma_txdesc_pri)
++                                              * txdesc_ring->count,
++                                              &txdesc_ring->pdma,
++                                              GFP_KERNEL | __GFP_ZERO);
++
++      if (unlikely(!txdesc_ring->pdesc))
++              return -ENOMEM;
++
++      txdesc_ring->sdesc = dma_alloc_coherent(dev, sizeof(struct edma_txdesc_sec)
++                                              * txdesc_ring->count,
++                                              &txdesc_ring->sdma,
++                                              GFP_KERNEL | __GFP_ZERO);
++
++      if (unlikely(!txdesc_ring->sdesc)) {
++              dma_free_coherent(dev, (sizeof(struct edma_txdesc_pri)
++                                * txdesc_ring->count),
++                                txdesc_ring->pdesc,
++                                txdesc_ring->pdma);
++              txdesc_ring->pdesc = NULL;
++              txdesc_ring->pdma = (dma_addr_t)0;
++              return -ENOMEM;
++      }
++
++      return 0;
++}
++
++static void edma_cfg_tx_desc_ring_configure(struct edma_txdesc_ring *txdesc_ring)
++{
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct regmap *regmap = ppe_dev->regmap;
++      u32 data, reg;
++
++      reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC_BA(txdesc_ring->id);
++      regmap_write(regmap, reg, (u32)(txdesc_ring->pdma & EDMA_RING_DMA_MASK));
++
++      reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC_BA2(txdesc_ring->id);
++      regmap_write(regmap, reg, (u32)(txdesc_ring->sdma & EDMA_RING_DMA_MASK));
++
++      reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC_RING_SIZE(txdesc_ring->id);
++      regmap_write(regmap, reg, (u32)(txdesc_ring->count & EDMA_TXDESC_RING_SIZE_MASK));
++
++      reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC_PROD_IDX(txdesc_ring->id);
++      regmap_write(regmap, reg, (u32)EDMA_TX_INITIAL_PROD_IDX);
++
++      data = FIELD_PREP(EDMA_TXDESC_CTRL_FC_GRP_ID_MASK, txdesc_ring->fc_grp_id);
++
++      /* Configure group ID for flow control for this Tx ring. */
++      reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC_CTRL(txdesc_ring->id);
++      regmap_write(regmap, reg, data);
++}
++
++static void edma_cfg_txcmpl_ring_configure(struct edma_txcmpl_ring *txcmpl_ring)
++{
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct regmap *regmap = ppe_dev->regmap;
++      u32 data, reg;
++
++      /* Configure TxCmpl ring base address. */
++      reg = EDMA_BASE_OFFSET + EDMA_REG_TXCMPL_BA(txcmpl_ring->id);
++      regmap_write(regmap, reg, (u32)(txcmpl_ring->dma & EDMA_RING_DMA_MASK));
++
++      reg = EDMA_BASE_OFFSET + EDMA_REG_TXCMPL_RING_SIZE(txcmpl_ring->id);
++      regmap_write(regmap, reg, (u32)(txcmpl_ring->count & EDMA_TXDESC_RING_SIZE_MASK));
++
++      /* Set TxCmpl ret mode to opaque. */
++      reg = EDMA_BASE_OFFSET + EDMA_REG_TXCMPL_CTRL(txcmpl_ring->id);
++      regmap_write(regmap, reg, EDMA_TXCMPL_RETMODE_OPAQUE);
++
++      /* Configure the Mitigation timer. */
++      data = EDMA_MICROSEC_TO_TIMER_UNIT(EDMA_TX_MITIGATION_TIMER_DEF,
++                                         ppe_dev->clk_rate / MHZ);
++      data = ((data & EDMA_TX_MOD_TIMER_INIT_MASK)
++              << EDMA_TX_MOD_TIMER_INIT_SHIFT);
++      pr_debug("EDMA Tx mitigation timer value: %d\n", data);
++      reg = EDMA_BASE_OFFSET + EDMA_REG_TX_MOD_TIMER(txcmpl_ring->id);
++      regmap_write(regmap, reg, data);
++
++      /* Configure the Mitigation packet count. */
++      data = (EDMA_TX_MITIGATION_PKT_CNT_DEF & EDMA_TXCMPL_LOW_THRE_MASK)
++              << EDMA_TXCMPL_LOW_THRE_SHIFT;
++      pr_debug("EDMA Tx mitigation packet count value: %d\n", data);
++      reg = EDMA_BASE_OFFSET + EDMA_REG_TXCMPL_UGT_THRE(txcmpl_ring->id);
++      regmap_write(regmap, reg, data);
++
++      reg = EDMA_BASE_OFFSET + EDMA_REG_TX_INT_CTRL(txcmpl_ring->id);
++      regmap_write(regmap, reg, EDMA_TX_NE_INT_EN);
++}
++
++/**
++ * edma_cfg_tx_fill_per_port_tx_map - Fill Tx ring mapping.
++ * @netdev: Netdevice.
++ * @port_id: Port ID.
++ *
++ * Fill per-port Tx ring mapping in net device private area.
++ */
++void edma_cfg_tx_fill_per_port_tx_map(struct net_device *netdev, u32 port_id)
++{
++      u32 i;
++
++      /* Ring to core mapping is done in order starting from 0 for port 1. */
++      for_each_possible_cpu(i) {
++              struct edma_port_priv *port_dev = (struct edma_port_priv *)netdev_priv(netdev);
++              struct edma_txdesc_ring *txdesc_ring;
++              u32 txdesc_ring_id;
++
++              txdesc_ring_id = ((port_id - 1) * num_possible_cpus()) + i;
++              txdesc_ring = &edma_ctx->tx_rings[txdesc_ring_id];
++              port_dev->txr_map[i] = txdesc_ring;
++      }
++}
++
++/**
++ * edma_cfg_tx_rings_enable - Enable Tx rings.
++ *
++ * Enable Tx rings.
++ */
++void edma_cfg_tx_rings_enable(u32 port_id)
++{
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct regmap *regmap = ppe_dev->regmap;
++      struct edma_txdesc_ring *txdesc_ring;
++      u32 i, ring_idx, reg;
++
++      for_each_possible_cpu(i) {
++              ring_idx = ((port_id - 1) * num_possible_cpus()) + i;
++              txdesc_ring = &edma_ctx->tx_rings[ring_idx];
++              u32 data;
++
++              reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC_CTRL(txdesc_ring->id);
++              regmap_read(regmap, reg, &data);
++              data |= FIELD_PREP(EDMA_TXDESC_CTRL_TXEN_MASK, EDMA_TXDESC_TX_ENABLE);
++
++              regmap_write(regmap, reg, data);
++      }
++}
++
++/**
++ * edma_cfg_tx_rings_disable - Disable Tx rings.
++ *
++ * Disable Tx rings.
++ */
++void edma_cfg_tx_rings_disable(u32 port_id)
++{
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct regmap *regmap = ppe_dev->regmap;
++      struct edma_txdesc_ring *txdesc_ring;
++      u32 i, ring_idx, reg;
++
++      for_each_possible_cpu(i) {
++              ring_idx = ((port_id - 1) * num_possible_cpus()) + i;
++              txdesc_ring = &edma_ctx->tx_rings[ring_idx];
++              u32 data;
++
++              txdesc_ring = &edma_ctx->tx_rings[i];
++              reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC_CTRL(txdesc_ring->id);
++              regmap_read(regmap, reg, &data);
++              data &= ~EDMA_TXDESC_TX_ENABLE;
++              regmap_write(regmap, reg, data);
++      }
++}
++
++/**
++ * edma_cfg_tx_ring_mappings - Map Tx to Tx complete rings.
++ *
++ * Map Tx to Tx complete rings.
++ */
++void edma_cfg_tx_ring_mappings(void)
++{
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct edma_hw_info *hw_info = edma_ctx->hw_info;
++      struct edma_ring_info *txcmpl = hw_info->txcmpl;
++      struct regmap *regmap = ppe_dev->regmap;
++      struct edma_ring_info *tx = hw_info->tx;
++      u32 desc_index, i, data, reg;
++
++      /* Clear the TXDESC2CMPL_MAP_xx reg before setting up
++       * the mapping. This register holds TXDESC to TXFILL ring
++       * mapping.
++       */
++      regmap_write(regmap, EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_0_ADDR, 0);
++      regmap_write(regmap, EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_1_ADDR, 0);
++      regmap_write(regmap, EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_2_ADDR, 0);
++      regmap_write(regmap, EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_3_ADDR, 0);
++      regmap_write(regmap, EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_4_ADDR, 0);
++      regmap_write(regmap, EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_5_ADDR, 0);
++      desc_index = txcmpl->ring_start;
++
++      /* 6 registers to hold the completion mapping for total 32
++       * TX desc rings (0-5, 6-11, 12-17, 18-23, 24-29 and rest).
++       * In each entry 5 bits hold the mapping for a particular TX desc ring.
++       */
++      for (i = tx->ring_start; i < tx->ring_start + tx->num_rings; i++) {
++              u32 reg, data;
++
++              if (i >= 0 && i <= 5)
++                      reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_0_ADDR;
++              else if (i >= 6 && i <= 11)
++                      reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_1_ADDR;
++              else if (i >= 12 && i <= 17)
++                      reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_2_ADDR;
++              else if (i >= 18 && i <= 23)
++                      reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_3_ADDR;
++              else if (i >= 24 && i <= 29)
++                      reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_4_ADDR;
++              else
++                      reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_5_ADDR;
++
++              pr_debug("Configure Tx desc:%u to use TxCmpl:%u\n", i, desc_index);
++
++              /* Set the Tx complete descriptor ring number in the mapping register.
++               * E.g. If (txcmpl ring)desc_index = 31, (txdesc ring)i = 28.
++               *      reg = EDMA_REG_TXDESC2CMPL_MAP_4_ADDR
++               *      data |= (desc_index & 0x1F) << ((i % 6) * 5);
++               *      data |= (0x1F << 20); -
++               *      This sets 11111 at 20th bit of register EDMA_REG_TXDESC2CMPL_MAP_4_ADDR.
++               */
++              regmap_read(regmap, reg, &data);
++              data |= (desc_index & EDMA_TXDESC2CMPL_MAP_TXDESC_MASK) << ((i % 6) * 5);
++              regmap_write(regmap, reg, data);
++
++              desc_index++;
++              if (desc_index == txcmpl->ring_start + txcmpl->num_rings)
++                      desc_index = txcmpl->ring_start;
++      }
++
++      reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_0_ADDR;
++      regmap_read(regmap, reg, &data);
++      pr_debug("EDMA_REG_TXDESC2CMPL_MAP_0_ADDR: 0x%x\n", data);
++
++      reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_1_ADDR;
++      regmap_read(regmap, reg, &data);
++      pr_debug("EDMA_REG_TXDESC2CMPL_MAP_1_ADDR: 0x%x\n", data);
++
++      reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_2_ADDR;
++      regmap_read(regmap, reg, &data);
++      pr_debug("EDMA_REG_TXDESC2CMPL_MAP_2_ADDR: 0x%x\n", data);
++
++      reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_3_ADDR;
++      regmap_read(regmap, reg, &data);
++      pr_debug("EDMA_REG_TXDESC2CMPL_MAP_3_ADDR: 0x%x\n", data);
++
++      reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_4_ADDR;
++      regmap_read(regmap, reg, &data);
++      pr_debug("EDMA_REG_TXDESC2CMPL_MAP_4_ADDR: 0x%x\n", data);
++
++      reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC2CMPL_MAP_5_ADDR;
++      regmap_read(regmap, reg, &data);
++      pr_debug("EDMA_REG_TXDESC2CMPL_MAP_5_ADDR: 0x%x\n", data);
++}
++
++static int edma_cfg_tx_rings_setup(void)
++{
++      struct edma_hw_info *hw_info = edma_ctx->hw_info;
++      struct edma_ring_info *txcmpl = hw_info->txcmpl;
++      struct edma_ring_info *tx = hw_info->tx;
++      u32 i, j = 0;
++
++      /* Set Txdesc flow control group id, same as port number. */
++      for (i = 0; i < hw_info->max_ports; i++) {
++              for_each_possible_cpu(j) {
++                      struct edma_txdesc_ring *txdesc_ring = NULL;
++                      u32 txdesc_idx = (i * num_possible_cpus()) + j;
++
++                      txdesc_ring = &edma_ctx->tx_rings[txdesc_idx];
++                      txdesc_ring->fc_grp_id = i + 1;
++              }
++      }
++
++      /* Allocate TxDesc ring descriptors. */
++      for (i = 0; i < tx->num_rings; i++) {
++              struct edma_txdesc_ring *txdesc_ring = NULL;
++              int ret;
++
++              txdesc_ring = &edma_ctx->tx_rings[i];
++              txdesc_ring->count = EDMA_TX_RING_SIZE;
++              txdesc_ring->id = tx->ring_start + i;
++
++              ret = edma_cfg_tx_desc_ring_setup(txdesc_ring);
++              if (ret) {
++                      pr_err("Error in setting up %d txdesc ring. ret: %d",
++                             txdesc_ring->id, ret);
++                      while (i-- >= 0)
++                              edma_cfg_tx_desc_ring_cleanup(&edma_ctx->tx_rings[i]);
++
++                      return -ENOMEM;
++              }
++      }
++
++      /* Allocate TxCmpl ring descriptors. */
++      for (i = 0; i < txcmpl->num_rings; i++) {
++              struct edma_txcmpl_ring *txcmpl_ring = NULL;
++              int ret;
++
++              txcmpl_ring = &edma_ctx->txcmpl_rings[i];
++              txcmpl_ring->count = EDMA_TX_RING_SIZE;
++              txcmpl_ring->id = txcmpl->ring_start + i;
++
++              ret = edma_cfg_txcmpl_ring_setup(txcmpl_ring);
++              if (ret != 0) {
++                      pr_err("Error in setting up %d TxCmpl ring. ret: %d",
++                             txcmpl_ring->id, ret);
++                      while (i-- >= 0)
++                              edma_cfg_txcmpl_ring_cleanup(&edma_ctx->txcmpl_rings[i]);
++
++                      goto txcmpl_mem_alloc_fail;
++              }
++      }
++
++      pr_debug("Tx descriptor count for Tx desc and Tx complete rings: %d\n",
++               EDMA_TX_RING_SIZE);
++
++      return 0;
++
++txcmpl_mem_alloc_fail:
++      for (i = 0; i < tx->num_rings; i++)
++              edma_cfg_tx_desc_ring_cleanup(&edma_ctx->tx_rings[i]);
++
++      return -ENOMEM;
++}
++
++/**
++ * edma_cfg_tx_rings_alloc - Allocate EDMA Tx rings.
++ *
++ * Allocate EDMA Tx rings.
++ */
++int edma_cfg_tx_rings_alloc(void)
++{
++      struct edma_hw_info *hw_info = edma_ctx->hw_info;
++      struct edma_ring_info *txcmpl = hw_info->txcmpl;
++      struct edma_ring_info *tx = hw_info->tx;
++
++      edma_ctx->tx_rings = kzalloc((sizeof(*edma_ctx->tx_rings) * tx->num_rings),
++                                   GFP_KERNEL);
++      if (!edma_ctx->tx_rings)
++              return -ENOMEM;
++
++      edma_ctx->txcmpl_rings = kzalloc((sizeof(*edma_ctx->txcmpl_rings) * txcmpl->num_rings),
++                                       GFP_KERNEL);
++      if (!edma_ctx->txcmpl_rings)
++              goto txcmpl_ring_alloc_fail;
++
++      pr_debug("Num rings - TxDesc:%u (%u-%u) TxCmpl:%u (%u-%u)\n",
++               tx->num_rings, tx->ring_start,
++              (tx->ring_start + tx->num_rings - 1),
++              txcmpl->num_rings, txcmpl->ring_start,
++              (txcmpl->ring_start + txcmpl->num_rings - 1));
++
++      if (edma_cfg_tx_rings_setup()) {
++              pr_err("Error in setting up tx rings\n");
++              goto tx_rings_setup_fail;
++      }
++
++      return 0;
++
++tx_rings_setup_fail:
++      kfree(edma_ctx->txcmpl_rings);
++      edma_ctx->txcmpl_rings = NULL;
++
++txcmpl_ring_alloc_fail:
++      kfree(edma_ctx->tx_rings);
++      edma_ctx->tx_rings = NULL;
++
++      return -ENOMEM;
++}
++
++/**
++ * edma_cfg_tx_rings_cleanup - Cleanup EDMA Tx rings.
++ *
++ * Cleanup EDMA Tx rings.
++ */
++void edma_cfg_tx_rings_cleanup(void)
++{
++      struct edma_hw_info *hw_info = edma_ctx->hw_info;
++      struct edma_ring_info *txcmpl = hw_info->txcmpl;
++      struct edma_ring_info *tx = hw_info->tx;
++      u32 i;
++
++      /* Free any buffers assigned to any descriptors. */
++      for (i = 0; i < tx->num_rings; i++)
++              edma_cfg_tx_desc_ring_cleanup(&edma_ctx->tx_rings[i]);
++
++      /* Free Tx completion descriptors. */
++      for (i = 0; i < txcmpl->num_rings; i++)
++              edma_cfg_txcmpl_ring_cleanup(&edma_ctx->txcmpl_rings[i]);
++
++      kfree(edma_ctx->tx_rings);
++      kfree(edma_ctx->txcmpl_rings);
++      edma_ctx->tx_rings = NULL;
++      edma_ctx->txcmpl_rings = NULL;
++}
++
++/**
++ * edma_cfg_tx_rings - Configure EDMA Tx rings.
++ *
++ * Configure EDMA Tx rings.
++ */
++void edma_cfg_tx_rings(void)
++{
++      struct edma_hw_info *hw_info = edma_ctx->hw_info;
++      struct edma_ring_info *txcmpl = hw_info->txcmpl;
++      struct edma_ring_info *tx = hw_info->tx;
++      u32 i;
++
++      /* Configure Tx desc ring. */
++      for (i = 0; i < tx->num_rings; i++)
++              edma_cfg_tx_desc_ring_configure(&edma_ctx->tx_rings[i]);
++
++      /* Configure TxCmpl ring. */
++      for (i = 0; i < txcmpl->num_rings; i++)
++              edma_cfg_txcmpl_ring_configure(&edma_ctx->txcmpl_rings[i]);
++}
++
++/**
++ * edma_cfg_tx_disable_interrupts - EDMA disable TX interrupts.
++ *
++ * Disable TX interrupt masks.
++ */
++void edma_cfg_tx_disable_interrupts(u32 port_id)
++{
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct regmap *regmap = ppe_dev->regmap;
++      struct edma_txcmpl_ring *txcmpl_ring;
++      u32 i, ring_idx, reg;
++
++      for_each_possible_cpu(i) {
++              ring_idx = ((port_id - 1) * num_possible_cpus()) + i;
++              txcmpl_ring = &edma_ctx->txcmpl_rings[ring_idx];
++              reg = EDMA_BASE_OFFSET + EDMA_REG_TX_INT_MASK(txcmpl_ring->id);
++              regmap_write(regmap, reg, EDMA_MASK_INT_CLEAR);
++      }
++}
++
++/**
++ * edma_cfg_tx_enable_interrupts - EDMA enable TX interrupts.
++ *
++ * Enable TX interrupt masks.
++ */
++void edma_cfg_tx_enable_interrupts(u32 port_id)
++{
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct regmap *regmap = ppe_dev->regmap;
++      struct edma_txcmpl_ring *txcmpl_ring;
++      u32 i, ring_idx, reg;
++
++      for_each_possible_cpu(i) {
++              ring_idx = ((port_id - 1) * num_possible_cpus()) + i;
++              txcmpl_ring = &edma_ctx->txcmpl_rings[ring_idx];
++              reg = EDMA_BASE_OFFSET + EDMA_REG_TX_INT_MASK(txcmpl_ring->id);
++              regmap_write(regmap, reg, edma_ctx->intr_info.intr_mask_txcmpl);
++      }
++}
++
++/**
++ * edma_cfg_tx_napi_enable - EDMA Tx NAPI.
++ * @port_id: Port ID.
++ *
++ * Enable Tx NAPI.
++ */
++void edma_cfg_tx_napi_enable(u32 port_id)
++{
++      struct edma_txcmpl_ring *txcmpl_ring;
++      u32 i, ring_idx;
++
++      /* Enabling Tx napi for a interface with each queue. */
++      for_each_possible_cpu(i) {
++              ring_idx = ((port_id - 1) * num_possible_cpus()) + i;
++              txcmpl_ring = &edma_ctx->txcmpl_rings[ring_idx];
++              if (!txcmpl_ring->napi_added)
++                      continue;
++
++              napi_enable(&txcmpl_ring->napi);
++      }
++}
++
++/**
++ * edma_cfg_tx_napi_disable - Disable Tx NAPI.
++ * @port_id: Port ID.
++ *
++ * Disable Tx NAPI.
++ */
++void edma_cfg_tx_napi_disable(u32 port_id)
++{
++      struct edma_txcmpl_ring *txcmpl_ring;
++      u32 i, ring_idx;
++
++      /* Disabling Tx napi for a interface with each queue. */
++      for_each_possible_cpu(i) {
++              ring_idx = ((port_id - 1) * num_possible_cpus()) + i;
++              txcmpl_ring = &edma_ctx->txcmpl_rings[ring_idx];
++              if (!txcmpl_ring->napi_added)
++                      continue;
++
++              napi_disable(&txcmpl_ring->napi);
++      }
++}
++
++/**
++ * edma_cfg_tx_napi_delete - Delete Tx NAPI.
++ * @port_id: Port ID.
++ *
++ * Delete Tx NAPI.
++ */
++void edma_cfg_tx_napi_delete(u32 port_id)
++{
++      struct edma_txcmpl_ring *txcmpl_ring;
++      u32 i, ring_idx;
++
++      /* Disabling Tx napi for a interface with each queue. */
++      for_each_possible_cpu(i) {
++              ring_idx = ((port_id - 1) * num_possible_cpus()) + i;
++              txcmpl_ring = &edma_ctx->txcmpl_rings[ring_idx];
++              if (!txcmpl_ring->napi_added)
++                      continue;
++
++              netif_napi_del(&txcmpl_ring->napi);
++              txcmpl_ring->napi_added = false;
++      }
++}
++
++/**
++ * edma_cfg_tx_napi_add - TX NAPI add.
++ * @netdev: Netdevice.
++ * @port_id: Port ID.
++ *
++ * TX NAPI add.
++ */
++void edma_cfg_tx_napi_add(struct net_device *netdev, u32 port_id)
++{
++      struct edma_hw_info *hw_info = edma_ctx->hw_info;
++      struct edma_txcmpl_ring *txcmpl_ring;
++      u32 i, ring_idx;
++
++      /* Adding tx napi for a interface with each queue. */
++      for_each_possible_cpu(i) {
++              ring_idx = ((port_id - 1) * num_possible_cpus()) + i;
++              txcmpl_ring = &edma_ctx->txcmpl_rings[ring_idx];
++              netif_napi_add_weight(netdev, &txcmpl_ring->napi,
++                                    edma_tx_napi_poll, hw_info->napi_budget_tx);
++              txcmpl_ring->napi_added = true;
++              netdev_dbg(netdev, "Napi added for txcmpl ring: %u\n", txcmpl_ring->id);
++      }
++
++      netdev_dbg(netdev, "Tx NAPI budget: %d\n", hw_info->napi_budget_tx);
++}
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma_cfg_tx.h b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_tx.h
+new file mode 100644
+index 000000000000..4840c601fc86
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_tx.h
+@@ -0,0 +1,28 @@
++/* SPDX-License-Identifier: GPL-2.0-only
++ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++#ifndef __EDMA_CFG_TX__
++#define __EDMA_CFG_TX__
++
++/* Tx mitigation timer's default value. */
++#define EDMA_TX_MITIGATION_TIMER_DEF  250
++
++/* Tx mitigation packet count default value. */
++#define EDMA_TX_MITIGATION_PKT_CNT_DEF        16
++
++void edma_cfg_tx_rings(void);
++int edma_cfg_tx_rings_alloc(void);
++void edma_cfg_tx_rings_cleanup(void);
++void edma_cfg_tx_disable_interrupts(u32 port_id);
++void edma_cfg_tx_enable_interrupts(u32 port_id);
++void edma_cfg_tx_napi_enable(u32 port_id);
++void edma_cfg_tx_napi_disable(u32 port_id);
++void edma_cfg_tx_napi_delete(u32 port_id);
++void edma_cfg_tx_napi_add(struct net_device *netdevice, u32 macid);
++void edma_cfg_tx_ring_mappings(void);
++void edma_cfg_txcmpl_mapping_fill(void);
++void edma_cfg_tx_rings_enable(u32 port_id);
++void edma_cfg_tx_rings_disable(u32 port_id);
++void edma_cfg_tx_fill_per_port_tx_map(struct net_device *netdev, u32 macid);
++#endif
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma_port.c b/drivers/net/ethernet/qualcomm/ppe/edma_port.c
+index bbb5823408fd..afa2b6479822 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/edma_port.c
++++ b/drivers/net/ethernet/qualcomm/ppe/edma_port.c
+@@ -13,6 +13,7 @@
+ #include "edma.h"
+ #include "edma_cfg_rx.h"
++#include "edma_cfg_tx.h"
+ #include "edma_port.h"
+ #include "ppe_regs.h"
+@@ -35,6 +36,15 @@ static int edma_port_stats_alloc(struct net_device *netdev)
+               return -ENOMEM;
+       }
++      port_priv->pcpu_stats.tx_stats =
++              netdev_alloc_pcpu_stats(struct edma_port_tx_stats);
++      if (!port_priv->pcpu_stats.tx_stats) {
++              netdev_err(netdev, "Per-cpu EDMA Tx stats alloc failed for %s\n",
++                         netdev->name);
++              free_percpu(port_priv->pcpu_stats.rx_stats);
++              return -ENOMEM;
++      }
++
+       return 0;
+ }
+@@ -43,6 +53,28 @@ static void edma_port_stats_free(struct net_device *netdev)
+       struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
+       free_percpu(port_priv->pcpu_stats.rx_stats);
++      free_percpu(port_priv->pcpu_stats.tx_stats);
++}
++
++static void edma_port_configure(struct net_device *netdev)
++{
++      struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
++      struct ppe_port *port =  port_priv->ppe_port;
++      int port_id = port->port_id;
++
++      edma_cfg_tx_fill_per_port_tx_map(netdev, port_id);
++      edma_cfg_tx_rings_enable(port_id);
++      edma_cfg_tx_napi_add(netdev, port_id);
++}
++
++static void edma_port_deconfigure(struct net_device *netdev)
++{
++      struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
++      struct ppe_port *port =  port_priv->ppe_port;
++      int port_id = port->port_id;
++
++      edma_cfg_tx_napi_delete(port_id);
++      edma_cfg_tx_rings_disable(port_id);
+ }
+ static u16 __maybe_unused edma_port_select_queue(__maybe_unused struct net_device *netdev,
+@@ -60,6 +92,7 @@ static int edma_port_open(struct net_device *netdev)
+ {
+       struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
+       struct ppe_port *ppe_port;
++      int port_id;
+       if (!port_priv)
+               return -EINVAL;
+@@ -74,10 +107,14 @@ static int edma_port_open(struct net_device *netdev)
+       netdev->wanted_features |= EDMA_NETDEV_FEATURES;
+       ppe_port  = port_priv->ppe_port;
++      port_id = ppe_port->port_id;
+       if (ppe_port->phylink)
+               phylink_start(ppe_port->phylink);
++      edma_cfg_tx_napi_enable(port_id);
++      edma_cfg_tx_enable_interrupts(port_id);
++
+       netif_start_queue(netdev);
+       return 0;
+@@ -87,13 +124,21 @@ static int edma_port_close(struct net_device *netdev)
+ {
+       struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
+       struct ppe_port *ppe_port;
++      int port_id;
+       if (!port_priv)
+               return -EINVAL;
+       netif_stop_queue(netdev);
++      /* 20ms delay would provide a plenty of margin to take care of in-flight packets. */
++      msleep(20);
++
+       ppe_port  = port_priv->ppe_port;
++      port_id = ppe_port->port_id;
++
++      edma_cfg_tx_disable_interrupts(port_id);
++      edma_cfg_tx_napi_disable(port_id);
+       /* Phylink close. */
+       if (ppe_port->phylink)
+@@ -137,6 +182,92 @@ static netdev_features_t edma_port_feature_check(__maybe_unused struct sk_buff *
+       return features;
+ }
++static netdev_tx_t edma_port_xmit(struct sk_buff *skb,
++                                struct net_device *dev)
++{
++      struct edma_port_priv *port_priv = NULL;
++      struct edma_port_pcpu_stats *pcpu_stats;
++      struct edma_txdesc_ring *txdesc_ring;
++      struct edma_port_tx_stats *stats;
++      enum edma_tx_gso_status result;
++      struct sk_buff *segs = NULL;
++      u8 cpu_id;
++      u32 skbq;
++      int ret;
++
++      if (!skb || !dev)
++              return NETDEV_TX_OK;
++
++      port_priv = netdev_priv(dev);
++
++      /* Select a TX ring. */
++      skbq = (skb_get_queue_mapping(skb) & (num_possible_cpus() - 1));
++
++      txdesc_ring = (struct edma_txdesc_ring *)port_priv->txr_map[skbq];
++
++      pcpu_stats = &port_priv->pcpu_stats;
++      stats = this_cpu_ptr(pcpu_stats->tx_stats);
++
++      /* HW does not support TSO for packets with more than or equal to
++       * 32 segments. Perform SW GSO for such packets.
++       */
++      result = edma_tx_gso_segment(skb, dev, &segs);
++      if (likely(result == EDMA_TX_GSO_NOT_NEEDED)) {
++              /* Transmit the packet. */
++              ret = edma_tx_ring_xmit(dev, skb, txdesc_ring, stats);
++
++              if (unlikely(ret == EDMA_TX_FAIL_NO_DESC)) {
++                      if (likely(!edma_ctx->tx_requeue_stop)) {
++                              cpu_id = smp_processor_id();
++                              netdev_dbg(dev, "Stopping tx queue due to lack oftx descriptors\n");
++                              u64_stats_update_begin(&stats->syncp);
++                              ++stats->tx_queue_stopped[cpu_id];
++                              u64_stats_update_end(&stats->syncp);
++                              netif_tx_stop_queue(netdev_get_tx_queue(dev, skbq));
++                              return NETDEV_TX_BUSY;
++                      }
++              }
++
++              if (unlikely(ret != EDMA_TX_OK)) {
++                      dev_kfree_skb_any(skb);
++                      u64_stats_update_begin(&stats->syncp);
++                      ++stats->tx_drops;
++                      u64_stats_update_end(&stats->syncp);
++              }
++
++              return NETDEV_TX_OK;
++      } else if (unlikely(result == EDMA_TX_GSO_FAIL)) {
++              netdev_dbg(dev, "%p: SW GSO failed for segment size: %d\n",
++                         skb, skb_shinfo(skb)->gso_segs);
++              dev_kfree_skb_any(skb);
++              u64_stats_update_begin(&stats->syncp);
++              ++stats->tx_gso_drop_pkts;
++              u64_stats_update_end(&stats->syncp);
++              return NETDEV_TX_OK;
++      }
++
++      u64_stats_update_begin(&stats->syncp);
++      ++stats->tx_gso_pkts;
++      u64_stats_update_end(&stats->syncp);
++
++      dev_kfree_skb_any(skb);
++      while (segs) {
++              skb = segs;
++              segs = segs->next;
++
++              /* Transmit the packet. */
++              ret = edma_tx_ring_xmit(dev, skb, txdesc_ring, stats);
++              if (unlikely(ret != EDMA_TX_OK)) {
++                      dev_kfree_skb_any(skb);
++                      u64_stats_update_begin(&stats->syncp);
++                      ++stats->tx_drops;
++                      u64_stats_update_end(&stats->syncp);
++              }
++      }
++
++      return NETDEV_TX_OK;
++}
++
+ static void edma_port_get_stats64(struct net_device *netdev,
+                                 struct rtnl_link_stats64 *stats)
+ {
+@@ -179,6 +310,7 @@ static int edma_port_set_mac_address(struct net_device *netdev, void *macaddr)
+ static const struct net_device_ops edma_port_netdev_ops = {
+       .ndo_open = edma_port_open,
+       .ndo_stop = edma_port_close,
++      .ndo_start_xmit = edma_port_xmit,
+       .ndo_get_stats64 = edma_port_get_stats64,
+       .ndo_set_mac_address = edma_port_set_mac_address,
+       .ndo_validate_addr = eth_validate_addr,
+@@ -199,6 +331,7 @@ void edma_port_destroy(struct ppe_port *port)
+       int port_id = port->port_id;
+       struct net_device *netdev = edma_ctx->netdev_arr[port_id - 1];
++      edma_port_deconfigure(netdev);
+       edma_port_stats_free(netdev);
+       unregister_netdev(netdev);
+       free_netdev(netdev);
+@@ -276,6 +409,8 @@ int edma_port_setup(struct ppe_port *port)
+        */
+       edma_ctx->netdev_arr[port_id - 1] = netdev;
++      edma_port_configure(netdev);
++
+       /* Setup phylink. */
+       ret = ppe_port_phylink_setup(port, netdev);
+       if (ret) {
+@@ -298,6 +433,7 @@ int edma_port_setup(struct ppe_port *port)
+ register_netdev_fail:
+       ppe_port_phylink_destroy(port);
+ port_phylink_setup_fail:
++      edma_port_deconfigure(netdev);
+       edma_ctx->netdev_arr[port_id - 1] = NULL;
+       edma_port_stats_free(netdev);
+ stats_alloc_fail:
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma_port.h b/drivers/net/ethernet/qualcomm/ppe/edma_port.h
+index 75f544a4f324..b67eddabd41c 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/edma_port.h
++++ b/drivers/net/ethernet/qualcomm/ppe/edma_port.h
+@@ -7,6 +7,8 @@
+ #include "ppe_port.h"
++#define EDMA_PORT_MAX_CORE            4
++
+ #define EDMA_NETDEV_FEATURES          (NETIF_F_FRAGLIST \
+                                       | NETIF_F_SG \
+                                       | NETIF_F_RXCSUM \
+@@ -34,12 +36,44 @@ struct edma_port_rx_stats {
+       struct u64_stats_sync syncp;
+ };
++/**
++ * struct edma_port_tx_stats - EDMA TX port per CPU stats for the port.
++ * @tx_pkts: Number of Tx packets
++ * @tx_bytes: Number of Tx bytes
++ * @tx_drops: Number of Tx drops
++ * @tx_nr_frag_pkts: Number of Tx nr_frag packets
++ * @tx_fraglist_pkts: Number of Tx fraglist packets
++ * @tx_fraglist_with_nr_frags_pkts:  Number of Tx packets with fraglist and nr_frags
++ * @tx_tso_pkts: Number of Tx TSO packets
++ * @tx_tso_drop_pkts: Number of Tx TSO drop packets
++ * @tx_gso_pkts: Number of Tx GSO packets
++ * @tx_gso_drop_pkts: Number of Tx GSO drop packets
++ * @tx_queue_stopped: Number of Tx queue stopped packets
++ * @syncp: Synchronization pointer
++ */
++struct edma_port_tx_stats {
++      u64 tx_pkts;
++      u64 tx_bytes;
++      u64 tx_drops;
++      u64 tx_nr_frag_pkts;
++      u64 tx_fraglist_pkts;
++      u64 tx_fraglist_with_nr_frags_pkts;
++      u64 tx_tso_pkts;
++      u64 tx_tso_drop_pkts;
++      u64 tx_gso_pkts;
++      u64 tx_gso_drop_pkts;
++      u64 tx_queue_stopped[EDMA_PORT_MAX_CORE];
++      struct u64_stats_sync syncp;
++};
++
+ /**
+  * struct edma_port_pcpu_stats - EDMA per cpu stats data structure for the port.
+  * @rx_stats: Per CPU Rx statistics
++ * @tx_stats: Per CPU Tx statistics
+  */
+ struct edma_port_pcpu_stats {
+       struct edma_port_rx_stats __percpu *rx_stats;
++      struct edma_port_tx_stats __percpu *tx_stats;
+ };
+ /**
+@@ -54,6 +88,7 @@ struct edma_port_priv {
+       struct ppe_port *ppe_port;
+       struct net_device *netdev;
+       struct edma_port_pcpu_stats pcpu_stats;
++      struct edma_txdesc_ring *txr_map[EDMA_PORT_MAX_CORE];
+       unsigned long flags;
+ };
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma_tx.c b/drivers/net/ethernet/qualcomm/ppe/edma_tx.c
+new file mode 100644
+index 000000000000..47876c142df5
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ppe/edma_tx.c
+@@ -0,0 +1,808 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++/* Provide APIs to alloc Tx Buffers, fill the Tx descriptors and transmit
++ * Scatter Gather and linear packets, Tx complete to free the skb after transmit.
++ */
++
++#include <linux/dma-mapping.h>
++#include <linux/kernel.h>
++#include <linux/netdevice.h>
++#include <linux/platform_device.h>
++#include <linux/printk.h>
++#include <net/gso.h>
++#include <linux/regmap.h>
++
++#include "edma.h"
++#include "edma_cfg_tx.h"
++#include "edma_port.h"
++#include "ppe.h"
++#include "ppe_regs.h"
++
++static u32 edma_tx_num_descs_for_sg(struct sk_buff *skb)
++{
++      u32 nr_frags_first = 0, num_tx_desc_needed = 0;
++
++      /* Check if we have enough Tx descriptors for SG. */
++      if (unlikely(skb_shinfo(skb)->nr_frags)) {
++              nr_frags_first = skb_shinfo(skb)->nr_frags;
++              WARN_ON_ONCE(nr_frags_first > MAX_SKB_FRAGS);
++              num_tx_desc_needed += nr_frags_first;
++      }
++
++      /* Walk through fraglist skbs making a note of nr_frags
++       * One Tx desc for fraglist skb. Fraglist skb may have
++       * further nr_frags.
++       */
++      if (unlikely(skb_has_frag_list(skb))) {
++              struct sk_buff *iter_skb;
++
++              skb_walk_frags(skb, iter_skb) {
++                      u32 nr_frags = skb_shinfo(iter_skb)->nr_frags;
++
++                      WARN_ON_ONCE(nr_frags > MAX_SKB_FRAGS);
++                      num_tx_desc_needed += (1 + nr_frags);
++              }
++      }
++
++      return (num_tx_desc_needed + 1);
++}
++
++/**
++ * edma_tx_gso_segment - Tx GSO.
++ * @skb: Socket Buffer.
++ * @netdev: Netdevice.
++ * @segs: SKB segments from GSO.
++ *
++ * Format skbs into GSOs.
++ *
++ * Return 1 on success, error code on failure.
++ */
++enum edma_tx_gso_status edma_tx_gso_segment(struct sk_buff *skb,
++                                          struct net_device *netdev, struct sk_buff **segs)
++{
++      u32 num_tx_desc_needed;
++
++      /* Check is skb is non-linear to proceed. */
++      if (likely(!skb_is_nonlinear(skb)))
++              return EDMA_TX_GSO_NOT_NEEDED;
++
++      /* Check if TSO is enabled. If so, return as skb doesn't
++       * need to be segmented by linux.
++       */
++      if (netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) {
++              num_tx_desc_needed = edma_tx_num_descs_for_sg(skb);
++              if (likely(num_tx_desc_needed <= EDMA_TX_TSO_SEG_MAX))
++                      return EDMA_TX_GSO_NOT_NEEDED;
++      }
++
++      /* GSO segmentation of the skb into multiple segments. */
++      *segs = skb_gso_segment(skb, netdev->features
++              & ~(NETIF_F_TSO | NETIF_F_TSO6));
++
++      /* Check for error in GSO segmentation. */
++      if (IS_ERR_OR_NULL(*segs)) {
++              netdev_info(netdev, "Tx gso fail\n");
++              return EDMA_TX_GSO_FAIL;
++      }
++
++      return EDMA_TX_GSO_SUCCEED;
++}
++
++/**
++ * edma_tx_complete - Reap Tx completion descriptors.
++ * @work_to_do: Work to do.
++ * @txcmpl_ring: Tx Completion ring.
++ *
++ * Reap Tx completion descriptors of the transmitted
++ * packets and free the corresponding SKBs.
++ *
++ * Return the number descriptors for which Tx complete is done.
++ */
++u32 edma_tx_complete(u32 work_to_do, struct edma_txcmpl_ring *txcmpl_ring)
++{
++      struct edma_txcmpl_stats *txcmpl_stats = &txcmpl_ring->txcmpl_stats;
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct regmap *regmap = ppe_dev->regmap;
++      u32 cons_idx, end_idx, data, cpu_id;
++      struct device *dev = ppe_dev->dev;
++      u32 avail, count, txcmpl_errors;
++      struct edma_txcmpl_desc *txcmpl;
++      u32 prod_idx = 0, more_bit = 0;
++      struct netdev_queue *nq;
++      struct sk_buff *skb;
++      u32 reg;
++
++      cons_idx = txcmpl_ring->cons_idx;
++
++      if (likely(txcmpl_ring->avail_pkt >= work_to_do)) {
++              avail = work_to_do;
++      } else {
++              /* Get TXCMPL ring producer index. */
++              reg = EDMA_BASE_OFFSET + EDMA_REG_TXCMPL_PROD_IDX(txcmpl_ring->id);
++              regmap_read(regmap, reg, &data);
++              prod_idx = data & EDMA_TXCMPL_PROD_IDX_MASK;
++
++              avail = EDMA_DESC_AVAIL_COUNT(prod_idx, cons_idx, EDMA_TX_RING_SIZE);
++              txcmpl_ring->avail_pkt = avail;
++
++              if (unlikely(!avail)) {
++                      dev_dbg(dev, "No available descriptors are pending for %d txcmpl ring\n",
++                              txcmpl_ring->id);
++                      u64_stats_update_begin(&txcmpl_stats->syncp);
++                      ++txcmpl_stats->no_pending_desc;
++                      u64_stats_update_end(&txcmpl_stats->syncp);
++                      return 0;
++              }
++
++              avail = min(avail, work_to_do);
++      }
++
++      count = avail;
++
++      end_idx = (cons_idx + avail) & EDMA_TX_RING_SIZE_MASK;
++      txcmpl = EDMA_TXCMPL_DESC(txcmpl_ring, cons_idx);
++
++      /* Instead of freeing the skb, it might be better to save and use
++       * for Rxfill.
++       */
++      while (likely(avail--)) {
++              /* The last descriptor holds the SKB pointer for scattered frames.
++               * So skip the descriptors with more bit set.
++               */
++              more_bit = EDMA_TXCMPL_MORE_BIT_GET(txcmpl);
++              if (unlikely(more_bit)) {
++                      u64_stats_update_begin(&txcmpl_stats->syncp);
++                      ++txcmpl_stats->desc_with_more_bit;
++                      u64_stats_update_end(&txcmpl_stats->syncp);
++                      cons_idx = ((cons_idx + 1) & EDMA_TX_RING_SIZE_MASK);
++                      txcmpl = EDMA_TXCMPL_DESC(txcmpl_ring, cons_idx);
++                      continue;
++              }
++
++              /* Find and free the skb for Tx completion. */
++              skb = (struct sk_buff *)EDMA_TXCMPL_OPAQUE_GET(txcmpl);
++              if (unlikely(!skb)) {
++                      if (net_ratelimit())
++                              dev_warn(dev, "Invalid cons_idx:%u prod_idx:%u word2:%x word3:%x\n",
++                                       cons_idx, prod_idx, txcmpl->word2, txcmpl->word3);
++
++                      u64_stats_update_begin(&txcmpl_stats->syncp);
++                      ++txcmpl_stats->invalid_buffer;
++                      u64_stats_update_end(&txcmpl_stats->syncp);
++              } else {
++                      dev_dbg(dev, "TXCMPL: skb:%p, skb->len %d, skb->data_len %d, cons_idx:%d prod_idx:%d word2:0x%x word3:0x%x\n",
++                              skb, skb->len, skb->data_len, cons_idx, prod_idx,
++                              txcmpl->word2, txcmpl->word3);
++
++                      txcmpl_errors = EDMA_TXCOMP_RING_ERROR_GET(txcmpl->word3);
++                      if (unlikely(txcmpl_errors)) {
++                              if (net_ratelimit())
++                                      dev_err(dev, "Error 0x%0x observed in tx complete %d ring\n",
++                                              txcmpl_errors, txcmpl_ring->id);
++
++                              u64_stats_update_begin(&txcmpl_stats->syncp);
++                              ++txcmpl_stats->errors;
++                              u64_stats_update_end(&txcmpl_stats->syncp);
++                      }
++
++                      /* Retrieve pool id for unmapping.
++                       * 0 for linear skb and (pool id - 1) represents nr_frag index.
++                       */
++                      if (!EDMA_TXCOMP_POOL_ID_GET(txcmpl)) {
++                              dma_unmap_single(dev, virt_to_phys(skb->data),
++                                               skb->len, DMA_TO_DEVICE);
++                      } else {
++                              u8 frag_index = (EDMA_TXCOMP_POOL_ID_GET(txcmpl) - 1);
++                              skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_index];
++
++                              dma_unmap_page(dev, virt_to_phys(frag),
++                                             PAGE_SIZE, DMA_TO_DEVICE);
++                      }
++
++                      dev_kfree_skb(skb);
++              }
++
++              cons_idx = ((cons_idx + 1) & EDMA_TX_RING_SIZE_MASK);
++              txcmpl = EDMA_TXCMPL_DESC(txcmpl_ring, cons_idx);
++      }
++
++      txcmpl_ring->cons_idx = cons_idx;
++      txcmpl_ring->avail_pkt -= count;
++
++      dev_dbg(dev, "TXCMPL:%u count:%u prod_idx:%u cons_idx:%u\n",
++              txcmpl_ring->id, count, prod_idx, cons_idx);
++      reg = EDMA_BASE_OFFSET + EDMA_REG_TXCMPL_CONS_IDX(txcmpl_ring->id);
++      regmap_write(regmap, reg, cons_idx);
++
++      /* If tx_requeue_stop disabled (tx_requeue_stop = 0)
++       * Fetch the tx queue of interface and check if it is stopped.
++       * if queue is stopped and interface is up, wake up this queue.
++       */
++      if (unlikely(!edma_ctx->tx_requeue_stop)) {
++              cpu_id = smp_processor_id();
++              nq = netdev_get_tx_queue(txcmpl_ring->napi.dev, cpu_id);
++              if (unlikely(netif_tx_queue_stopped(nq)) &&
++                  netif_carrier_ok(txcmpl_ring->napi.dev)) {
++                      dev_dbg(dev, "Waking queue number %d, for interface %s\n",
++                              cpu_id, txcmpl_ring->napi.dev->name);
++                      __netif_tx_lock(nq, cpu_id);
++                      netif_tx_wake_queue(nq);
++                      __netif_tx_unlock(nq);
++              }
++      }
++
++      return count;
++}
++
++/**
++ * edma_tx_napi_poll - EDMA TX NAPI handler.
++ * @napi: NAPI structure.
++ * @budget: Tx NAPI Budget.
++ *
++ * EDMA TX NAPI handler.
++ */
++int edma_tx_napi_poll(struct napi_struct *napi, int budget)
++{
++      struct edma_txcmpl_ring *txcmpl_ring = (struct edma_txcmpl_ring *)napi;
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct regmap *regmap = ppe_dev->regmap;
++      u32 txcmpl_intr_status;
++      int work_done = 0;
++      u32 data, reg;
++
++      do {
++              work_done += edma_tx_complete(budget - work_done, txcmpl_ring);
++              if (work_done >= budget)
++                      return work_done;
++
++              reg = EDMA_BASE_OFFSET + EDMA_REG_TX_INT_STAT(txcmpl_ring->id);
++              regmap_read(regmap, reg, &data);
++              txcmpl_intr_status = data & EDMA_TXCMPL_RING_INT_STATUS_MASK;
++      } while (txcmpl_intr_status);
++
++      /* No more packets to process. Finish NAPI processing. */
++      napi_complete(napi);
++
++      /* Set TXCMPL ring interrupt mask. */
++      reg = EDMA_BASE_OFFSET + EDMA_REG_TX_INT_MASK(txcmpl_ring->id);
++      regmap_write(regmap, reg, edma_ctx->intr_info.intr_mask_txcmpl);
++
++      return work_done;
++}
++
++/**
++ * edma_tx_handle_irq - Tx IRQ Handler.
++ * @irq: Interrupt request.
++ * @ctx: Context.
++ *
++ * Process TX IRQ and schedule NAPI.
++ *
++ * Return IRQ handler code.
++ */
++irqreturn_t edma_tx_handle_irq(int irq, void *ctx)
++{
++      struct edma_txcmpl_ring *txcmpl_ring = (struct edma_txcmpl_ring *)ctx;
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct regmap *regmap = ppe_dev->regmap;
++      u32 reg;
++
++      pr_debug("irq: irq=%d txcmpl_ring_id=%u\n", irq, txcmpl_ring->id);
++      if (likely(napi_schedule_prep(&txcmpl_ring->napi))) {
++              /* Disable TxCmpl intr. */
++              reg = EDMA_BASE_OFFSET + EDMA_REG_TX_INT_MASK(txcmpl_ring->id);
++              regmap_write(regmap, reg, EDMA_MASK_INT_DISABLE);
++              __napi_schedule(&txcmpl_ring->napi);
++      }
++
++      return IRQ_HANDLED;
++}
++
++static void edma_tx_dma_unmap_frags(struct sk_buff *skb, u32 nr_frags)
++{
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct device *dev = ppe_dev->dev;
++      u32 buf_len = 0;
++      u8 i = 0;
++
++      for (i = 0; i < skb_shinfo(skb)->nr_frags - nr_frags; i++) {
++              skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
++
++              /* DMA mapping was not done for zero size segments. */
++              buf_len = skb_frag_size(frag);
++              if (unlikely(buf_len == 0))
++                      continue;
++
++              dma_unmap_page(dev, virt_to_phys(frag), PAGE_SIZE,
++                             DMA_TO_DEVICE);
++      }
++}
++
++static u32 edma_tx_skb_nr_frags(struct edma_txdesc_ring *txdesc_ring,
++                              struct edma_txdesc_pri **txdesc, struct sk_buff *skb,
++                              u32 *hw_next_to_use, u32 *invalid_frag)
++{
++      u32 nr_frags = 0, buf_len = 0, num_descs = 0, start_idx = 0, end_idx = 0;
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      u32 start_hw_next_to_use = *hw_next_to_use;
++      struct edma_txdesc_pri *txd = *txdesc;
++      struct device *dev = ppe_dev->dev;
++      u8 i = 0;
++
++      /* Hold onto the index mapped to *txdesc.
++       * This will be the index previous to that of current *hw_next_to_use.
++       */
++      start_idx = (((*hw_next_to_use) + EDMA_TX_RING_SIZE_MASK)
++              & EDMA_TX_RING_SIZE_MASK);
++
++      /* Handle if the skb has nr_frags. */
++      nr_frags = skb_shinfo(skb)->nr_frags;
++      num_descs = nr_frags;
++      i = 0;
++
++      while (nr_frags--) {
++              skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
++              dma_addr_t buff_addr;
++
++              buf_len = skb_frag_size(frag);
++
++              /* Zero size segment can lead EDMA HW to hang so, we don't want to
++               * process them. Zero size segment can happen during TSO operation
++               * if there is nothing but header in the primary segment.
++               */
++              if (unlikely(buf_len == 0)) {
++                      num_descs--;
++                      i++;
++                      continue;
++              }
++
++              /* Setting the MORE bit on the previous Tx descriptor.
++               * Note: We will flush this descriptor as well later.
++               */
++              EDMA_TXDESC_MORE_BIT_SET(txd, 1);
++              EDMA_TXDESC_ENDIAN_SET(txd);
++
++              txd = EDMA_TXDESC_PRI_DESC(txdesc_ring, *hw_next_to_use);
++              memset(txd, 0, sizeof(struct edma_txdesc_pri));
++              buff_addr = skb_frag_dma_map(dev, frag, 0, buf_len,
++                                           DMA_TO_DEVICE);
++              if (dma_mapping_error(dev, buff_addr)) {
++                      dev_dbg(dev, "Unable to dma first descriptor for nr_frags tx\n");
++                      *hw_next_to_use = start_hw_next_to_use;
++                      *invalid_frag = nr_frags;
++                      return 0;
++              }
++
++              EDMA_TXDESC_BUFFER_ADDR_SET(txd, buff_addr);
++              EDMA_TXDESC_DATA_LEN_SET(txd, buf_len);
++              EDMA_TXDESC_POOL_ID_SET(txd, (i + 1));
++
++              *hw_next_to_use = ((*hw_next_to_use + 1) & EDMA_TX_RING_SIZE_MASK);
++              i++;
++      }
++
++      EDMA_TXDESC_ENDIAN_SET(txd);
++
++      /* This will be the index previous to that of current *hw_next_to_use. */
++      end_idx = (((*hw_next_to_use) + EDMA_TX_RING_SIZE_MASK) & EDMA_TX_RING_SIZE_MASK);
++
++      *txdesc = txd;
++
++      return num_descs;
++}
++
++static void edma_tx_fill_pp_desc(struct edma_port_priv *port_priv,
++                               struct edma_txdesc_pri *txd, struct sk_buff *skb,
++      struct edma_port_tx_stats *stats)
++{
++      struct ppe_port *port = port_priv->ppe_port;
++      int port_id = port->port_id;
++
++      /* Offload L3/L4 checksum computation. */
++      if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
++              EDMA_TXDESC_ADV_OFFLOAD_SET(txd);
++              EDMA_TXDESC_IP_CSUM_SET(txd);
++              EDMA_TXDESC_L4_CSUM_SET(txd);
++      }
++
++      /* Check if the packet needs TSO
++       * This will be mostly true for SG packets.
++       */
++      if (unlikely(skb_is_gso(skb))) {
++              if ((skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4) ||
++                  (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6)) {
++                      u32 mss = skb_shinfo(skb)->gso_size;
++
++                      /* If MSS<256, HW will do TSO using MSS=256,
++                       * if MSS>10K, HW will do TSO using MSS=10K,
++                       * else HW will report error 0x200000 in Tx Cmpl.
++                       */
++                      if (mss < EDMA_TX_TSO_MSS_MIN)
++                              mss = EDMA_TX_TSO_MSS_MIN;
++                      else if (mss > EDMA_TX_TSO_MSS_MAX)
++                              mss = EDMA_TX_TSO_MSS_MAX;
++
++                      EDMA_TXDESC_TSO_ENABLE_SET(txd, 1);
++                      EDMA_TXDESC_MSS_SET(txd, mss);
++
++                      /* Update tso stats. */
++                      u64_stats_update_begin(&stats->syncp);
++                      stats->tx_tso_pkts++;
++                      u64_stats_update_end(&stats->syncp);
++              }
++      }
++
++      /* Set destination information in the descriptor. */
++      EDMA_TXDESC_SERVICE_CODE_SET(txd, PPE_EDMA_SC_BYPASS_ID);
++      EDMA_DST_INFO_SET(txd, port_id);
++}
++
++static struct edma_txdesc_pri *edma_tx_skb_first_desc(struct edma_port_priv *port_priv,
++                                                    struct edma_txdesc_ring *txdesc_ring,
++                                                    struct sk_buff *skb, u32 *hw_next_to_use,
++                                                    struct edma_port_tx_stats *stats)
++{
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct edma_txdesc_pri *txd = NULL;
++      struct device *dev = ppe_dev->dev;
++      dma_addr_t buff_addr;
++      u32 buf_len = 0;
++
++      /* Get the packet length. */
++      buf_len = skb_headlen(skb);
++      txd = EDMA_TXDESC_PRI_DESC(txdesc_ring, *hw_next_to_use);
++      memset(txd, 0, sizeof(struct edma_txdesc_pri));
++
++      /* Set the data pointer as the buffer address in the descriptor. */
++      buff_addr = dma_map_single(dev, skb->data, buf_len, DMA_TO_DEVICE);
++      if (dma_mapping_error(dev, buff_addr)) {
++              dev_dbg(dev, "Unable to dma first descriptor for tx\n");
++              return NULL;
++      }
++
++      EDMA_TXDESC_BUFFER_ADDR_SET(txd, buff_addr);
++      EDMA_TXDESC_POOL_ID_SET(txd, 0);
++      edma_tx_fill_pp_desc(port_priv, txd, skb, stats);
++
++      /* Set packet length in the descriptor. */
++      EDMA_TXDESC_DATA_LEN_SET(txd, buf_len);
++      *hw_next_to_use = (*hw_next_to_use + 1) & EDMA_TX_RING_SIZE_MASK;
++
++      return txd;
++}
++
++static void edma_tx_handle_dma_err(struct sk_buff *skb, u32 num_sg_frag_list)
++{
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct device *dev = ppe_dev->dev;
++      struct sk_buff *iter_skb = NULL;
++      u32 cnt_sg_frag_list = 0;
++
++      /* Walk through all fraglist skbs. */
++      skb_walk_frags(skb, iter_skb) {
++              if (skb_headlen(iter_skb)) {
++                      dma_unmap_single(dev, virt_to_phys(iter_skb->data),
++                                       skb_headlen(iter_skb), DMA_TO_DEVICE);
++                      cnt_sg_frag_list += 1;
++              }
++
++              if (cnt_sg_frag_list == num_sg_frag_list)
++                      return;
++
++              /* skb fraglist skb had nr_frags, unmap that memory. */
++              u32 nr_frags = skb_shinfo(iter_skb)->nr_frags;
++
++              if (nr_frags == 0)
++                      continue;
++
++              for (int i = 0; i < nr_frags; i++) {
++                      skb_frag_t *frag = &skb_shinfo(iter_skb)->frags[i];
++
++                      /* DMA mapping was not done for zero size segments. */
++                      if (unlikely(skb_frag_size(frag) == 0))
++                              continue;
++
++                      dma_unmap_page(dev, virt_to_phys(frag),
++                                     PAGE_SIZE, DMA_TO_DEVICE);
++                      cnt_sg_frag_list += 1;
++                      if (cnt_sg_frag_list == num_sg_frag_list)
++                              return;
++              }
++      }
++}
++
++static u32 edma_tx_skb_sg_fill_desc(struct edma_txdesc_ring *txdesc_ring,
++                                  struct edma_txdesc_pri **txdesc,
++                                  struct sk_buff *skb, u32 *hw_next_to_use,
++                                  struct edma_port_tx_stats *stats)
++{
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      u32 start_hw_next_to_use = 0, invalid_frag = 0;
++      struct edma_txdesc_pri *txd = *txdesc;
++      struct device *dev = ppe_dev->dev;
++      struct sk_buff *iter_skb = NULL;
++      u32 buf_len = 0, num_descs = 0;
++      u32 num_sg_frag_list = 0;
++
++      /* Head skb processed already. */
++      num_descs++;
++
++      if (unlikely(skb_has_frag_list(skb))) {
++              struct edma_txdesc_pri *start_desc = NULL;
++              u32 start_idx = 0, end_idx = 0;
++
++              /* Hold onto the index mapped to txd.
++               * This will be the index previous to that of current *hw_next_to_use.
++               */
++              start_idx = (((*hw_next_to_use) + EDMA_TX_RING_SIZE_MASK)
++                           & EDMA_TX_RING_SIZE_MASK);
++              start_desc = txd;
++              start_hw_next_to_use = *hw_next_to_use;
++
++              /* Walk through all fraglist skbs. */
++              skb_walk_frags(skb, iter_skb) {
++                      dma_addr_t buff_addr;
++                      u32 num_nr_frag = 0;
++
++                      /* This case could happen during the packet decapsulation.
++                       * All header content might be removed.
++                       */
++                      buf_len = skb_headlen(iter_skb);
++                      if (unlikely(buf_len == 0))
++                              goto skip_primary;
++
++                      /* We make sure to flush this descriptor later. */
++                      EDMA_TXDESC_MORE_BIT_SET(txd, 1);
++                      EDMA_TXDESC_ENDIAN_SET(txd);
++
++                      txd = EDMA_TXDESC_PRI_DESC(txdesc_ring, *hw_next_to_use);
++                      memset(txd, 0, sizeof(struct edma_txdesc_pri));
++                      buff_addr = dma_map_single(dev, iter_skb->data,
++                                                 buf_len, DMA_TO_DEVICE);
++                      if (dma_mapping_error(dev, buff_addr)) {
++                              dev_dbg(dev, "Unable to dma for fraglist\n");
++                              goto dma_err;
++                      }
++
++                      EDMA_TXDESC_BUFFER_ADDR_SET(txd, buff_addr);
++                      EDMA_TXDESC_DATA_LEN_SET(txd, buf_len);
++                      EDMA_TXDESC_POOL_ID_SET(txd, 0);
++
++                      *hw_next_to_use = (*hw_next_to_use + 1) & EDMA_TX_RING_SIZE_MASK;
++                      num_descs += 1;
++                      num_sg_frag_list += 1;
++
++                      /* skb fraglist skb can have nr_frags. */
++skip_primary:
++                      if (unlikely(skb_shinfo(iter_skb)->nr_frags)) {
++                              num_nr_frag = edma_tx_skb_nr_frags(txdesc_ring, &txd,
++                                                                 iter_skb, hw_next_to_use,
++                                                                 &invalid_frag);
++                              if (unlikely(!num_nr_frag)) {
++                                      dev_dbg(dev, "No descriptor available for ring %d\n",
++                                              txdesc_ring->id);
++                                      edma_tx_dma_unmap_frags(iter_skb, invalid_frag);
++                                      goto dma_err;
++                              }
++
++                              num_descs += num_nr_frag;
++                              num_sg_frag_list += num_nr_frag;
++
++                              /* Update fraglist with nr_frag stats. */
++                              u64_stats_update_begin(&stats->syncp);
++                              stats->tx_fraglist_with_nr_frags_pkts++;
++                              u64_stats_update_end(&stats->syncp);
++                      }
++              }
++
++              EDMA_TXDESC_ENDIAN_SET(txd);
++
++              /* This will be the index previous to
++               * that of current *hw_next_to_use.
++               */
++              end_idx = (((*hw_next_to_use) + EDMA_TX_RING_SIZE_MASK) &
++                         EDMA_TX_RING_SIZE_MASK);
++
++              /* Update frag_list stats. */
++              u64_stats_update_begin(&stats->syncp);
++              stats->tx_fraglist_pkts++;
++              u64_stats_update_end(&stats->syncp);
++      } else {
++              /* Process skb with nr_frags. */
++              num_descs += edma_tx_skb_nr_frags(txdesc_ring, &txd, skb,
++                                                hw_next_to_use, &invalid_frag);
++              if (unlikely(!num_descs)) {
++                      dev_dbg(dev, "No descriptor available for ring %d\n", txdesc_ring->id);
++                      edma_tx_dma_unmap_frags(skb, invalid_frag);
++                      *txdesc = NULL;
++                      return num_descs;
++              }
++
++              u64_stats_update_begin(&stats->syncp);
++              stats->tx_nr_frag_pkts++;
++              u64_stats_update_end(&stats->syncp);
++      }
++
++      dev_dbg(dev, "skb:%p num_descs_filled: %u, nr_frags %u, frag_list fragments %u\n",
++              skb, num_descs, skb_shinfo(skb)->nr_frags, num_sg_frag_list);
++
++      *txdesc = txd;
++
++      return num_descs;
++
++dma_err:
++      if (!num_sg_frag_list)
++              goto reset_state;
++
++      edma_tx_handle_dma_err(skb, num_sg_frag_list);
++
++reset_state:
++      *hw_next_to_use = start_hw_next_to_use;
++      *txdesc = NULL;
++
++      return 0;
++}
++
++static u32 edma_tx_avail_desc(struct edma_txdesc_ring *txdesc_ring,
++                            u32 hw_next_to_use)
++{
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      u32 data = 0, avail = 0, hw_next_to_clean = 0;
++      struct regmap *regmap = ppe_dev->regmap;
++      u32 reg;
++
++      reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC_CONS_IDX(txdesc_ring->id);
++      regmap_read(regmap, reg, &data);
++      hw_next_to_clean = data & EDMA_TXDESC_CONS_IDX_MASK;
++
++      avail = EDMA_DESC_AVAIL_COUNT(hw_next_to_clean - 1,
++                                    hw_next_to_use, EDMA_TX_RING_SIZE);
++
++      return avail;
++}
++
++/**
++ * edma_tx_ring_xmit - Transmit a packet.
++ * @netdev: Netdevice.
++ * @skb: Socket Buffer.
++ * @txdesc_ring: Tx Descriptor ring.
++ * @stats: EDMA Tx Statistics.
++ *
++ * Check for available descriptors, fill the descriptors
++ * and transmit both linear and non linear packets.
++ *
++ * Return 0 on success, negative error code on failure.
++ */
++enum edma_tx_status edma_tx_ring_xmit(struct net_device *netdev,
++                                    struct sk_buff *skb, struct edma_txdesc_ring *txdesc_ring,
++                              struct edma_port_tx_stats *stats)
++{
++      struct edma_txdesc_stats *txdesc_stats = &txdesc_ring->txdesc_stats;
++      struct edma_port_priv *port_priv = netdev_priv(netdev);
++      u32 num_tx_desc_needed = 0, num_desc_filled = 0;
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct ppe_port *port = port_priv->ppe_port;
++      struct regmap *regmap = ppe_dev->regmap;
++      struct edma_txdesc_pri *txdesc = NULL;
++      struct device *dev = ppe_dev->dev;
++      int port_id = port->port_id;
++      u32 hw_next_to_use = 0;
++      u32 reg;
++
++      hw_next_to_use = txdesc_ring->prod_idx;
++
++      if (unlikely(!(txdesc_ring->avail_desc)))  {
++              txdesc_ring->avail_desc = edma_tx_avail_desc(txdesc_ring,
++                                                           hw_next_to_use);
++              if (unlikely(!txdesc_ring->avail_desc)) {
++                      netdev_dbg(netdev, "No available descriptors are present at %d ring\n",
++                                 txdesc_ring->id);
++
++                      u64_stats_update_begin(&txdesc_stats->syncp);
++                      ++txdesc_stats->no_desc_avail;
++                      u64_stats_update_end(&txdesc_stats->syncp);
++                      return EDMA_TX_FAIL_NO_DESC;
++              }
++      }
++
++      /* Process head skb for linear skb.
++       * Process head skb + nr_frags + fraglist for non linear skb.
++       */
++      if (likely(!skb_is_nonlinear(skb))) {
++              txdesc = edma_tx_skb_first_desc(port_priv, txdesc_ring, skb,
++                                              &hw_next_to_use, stats);
++              if (unlikely(!txdesc)) {
++                      netdev_dbg(netdev, "No descriptor available for ring %d\n",
++                                 txdesc_ring->id);
++                      u64_stats_update_begin(&txdesc_stats->syncp);
++                      ++txdesc_stats->no_desc_avail;
++                      u64_stats_update_end(&txdesc_stats->syncp);
++                      return EDMA_TX_FAIL_NO_DESC;
++              }
++
++              EDMA_TXDESC_ENDIAN_SET(txdesc);
++              num_desc_filled++;
++      } else {
++              num_tx_desc_needed = edma_tx_num_descs_for_sg(skb);
++
++              /* HW does not support TSO for packets with more than 32 segments.
++               * HW hangs up if it sees more than 32 segments. Kernel Perform GSO
++               * for such packets with netdev gso_max_segs set to 32.
++               */
++              if (unlikely(num_tx_desc_needed > EDMA_TX_TSO_SEG_MAX)) {
++                      netdev_dbg(netdev, "Number of segments %u more than %u for %d ring\n",
++                                 num_tx_desc_needed, EDMA_TX_TSO_SEG_MAX, txdesc_ring->id);
++                      u64_stats_update_begin(&txdesc_stats->syncp);
++                      ++txdesc_stats->tso_max_seg_exceed;
++                      u64_stats_update_end(&txdesc_stats->syncp);
++
++                      u64_stats_update_begin(&stats->syncp);
++                      stats->tx_tso_drop_pkts++;
++                      u64_stats_update_end(&stats->syncp);
++
++                      return EDMA_TX_FAIL;
++              }
++
++              if (unlikely(num_tx_desc_needed > txdesc_ring->avail_desc)) {
++                      txdesc_ring->avail_desc = edma_tx_avail_desc(txdesc_ring,
++                                                                   hw_next_to_use);
++                      if (num_tx_desc_needed > txdesc_ring->avail_desc) {
++                              u64_stats_update_begin(&txdesc_stats->syncp);
++                              ++txdesc_stats->no_desc_avail;
++                              u64_stats_update_end(&txdesc_stats->syncp);
++                              netdev_dbg(netdev, "Not enough available descriptors are present at %d ring for SG packet. Needed %d, currently available %d\n",
++                                         txdesc_ring->id, num_tx_desc_needed,
++                                         txdesc_ring->avail_desc);
++                              return EDMA_TX_FAIL_NO_DESC;
++                      }
++              }
++
++              txdesc = edma_tx_skb_first_desc(port_priv, txdesc_ring, skb,
++                                              &hw_next_to_use, stats);
++              if (unlikely(!txdesc)) {
++                      netdev_dbg(netdev, "No non-linear descriptor available for ring %d\n",
++                                 txdesc_ring->id);
++                      u64_stats_update_begin(&txdesc_stats->syncp);
++                      ++txdesc_stats->no_desc_avail;
++                      u64_stats_update_end(&txdesc_stats->syncp);
++                      return EDMA_TX_FAIL_NO_DESC;
++              }
++
++              num_desc_filled = edma_tx_skb_sg_fill_desc(txdesc_ring,
++                                                         &txdesc, skb, &hw_next_to_use, stats);
++              if (unlikely(!txdesc)) {
++                      netdev_dbg(netdev, "No descriptor available for ring %d\n",
++                                 txdesc_ring->id);
++                      dma_unmap_single(dev, virt_to_phys(skb->data),
++                                       skb->len, DMA_TO_DEVICE);
++                      u64_stats_update_begin(&txdesc_stats->syncp);
++                      ++txdesc_stats->no_desc_avail;
++                      u64_stats_update_end(&txdesc_stats->syncp);
++                      return EDMA_TX_FAIL_NO_DESC;
++              }
++      }
++
++      /* Set the skb pointer to the descriptor's opaque field/s
++       * on the last descriptor of the packet/SG packet.
++       */
++      EDMA_TXDESC_OPAQUE_SET(txdesc, skb);
++
++      /* Update producer index. */
++      txdesc_ring->prod_idx = hw_next_to_use & EDMA_TXDESC_PROD_IDX_MASK;
++      txdesc_ring->avail_desc -= num_desc_filled;
++
++      netdev_dbg(netdev, "%s: skb:%p tx_ring:%u proto:0x%x skb->len:%d\n port:%u prod_idx:%u ip_summed:0x%x\n",
++                 netdev->name, skb, txdesc_ring->id, ntohs(skb->protocol),
++               skb->len, port_id, hw_next_to_use, skb->ip_summed);
++
++      reg = EDMA_BASE_OFFSET + EDMA_REG_TXDESC_PROD_IDX(txdesc_ring->id);
++      regmap_write(regmap, reg, txdesc_ring->prod_idx);
++
++      u64_stats_update_begin(&stats->syncp);
++      stats->tx_pkts++;
++      stats->tx_bytes += skb->len;
++      u64_stats_update_end(&stats->syncp);
++
++      return EDMA_TX_OK;
++}
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma_tx.h b/drivers/net/ethernet/qualcomm/ppe/edma_tx.h
+new file mode 100644
+index 000000000000..c09a4e0f6a42
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ppe/edma_tx.h
+@@ -0,0 +1,302 @@
++/* SPDX-License-Identifier: GPL-2.0-only
++ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++#ifndef __EDMA_TX__
++#define __EDMA_TX__
++
++#include "edma_port.h"
++
++#define EDMA_GET_DESC(R, i, type)     (&(((type *)((R)->desc))[(i)]))
++#define EDMA_GET_PDESC(R, i, type)    (&(((type *)((R)->pdesc))[(i)]))
++#define EDMA_GET_SDESC(R, i, type)    (&(((type *)((R)->sdesc))[(i)]))
++#define EDMA_TXCMPL_DESC(R, i)                EDMA_GET_DESC(R, i, \
++                                              struct edma_txcmpl_desc)
++#define EDMA_TXDESC_PRI_DESC(R, i)    EDMA_GET_PDESC(R, i, \
++                                              struct edma_txdesc_pri)
++#define EDMA_TXDESC_SEC_DESC(R, i)    EDMA_GET_SDESC(R, i, \
++                                              struct edma_txdesc_sec)
++
++#define EDMA_DESC_AVAIL_COUNT(head, tail, _max) ({ \
++                      typeof(_max) (max) = (_max); \
++                      ((((head) - (tail)) + \
++                      (max)) & ((max) - 1)); })
++
++#define EDMA_TX_RING_SIZE               2048
++#define EDMA_TX_RING_SIZE_MASK                (EDMA_TX_RING_SIZE - 1)
++
++/* Max segment processing capacity of HW for TSO. */
++#define EDMA_TX_TSO_SEG_MAX           32
++
++/* HW defined low and high MSS size. */
++#define EDMA_TX_TSO_MSS_MIN           256
++#define EDMA_TX_TSO_MSS_MAX           10240
++
++#define EDMA_DST_PORT_TYPE            2
++#define EDMA_DST_PORT_TYPE_SHIFT      28
++#define EDMA_DST_PORT_TYPE_MASK               (0xf << EDMA_DST_PORT_TYPE_SHIFT)
++#define EDMA_DST_PORT_ID_SHIFT                16
++#define EDMA_DST_PORT_ID_MASK         (0xfff << EDMA_DST_PORT_ID_SHIFT)
++
++#define EDMA_DST_PORT_TYPE_SET(x)     (((x) << EDMA_DST_PORT_TYPE_SHIFT) & \
++                                                      EDMA_DST_PORT_TYPE_MASK)
++#define EDMA_DST_PORT_ID_SET(x)               (((x) << EDMA_DST_PORT_ID_SHIFT) & \
++                                                      EDMA_DST_PORT_ID_MASK)
++#define EDMA_DST_INFO_SET(desc, x)    ((desc)->word4 |= \
++      (EDMA_DST_PORT_TYPE_SET(EDMA_DST_PORT_TYPE) | EDMA_DST_PORT_ID_SET(x)))
++
++#define EDMA_TXDESC_TSO_ENABLE_MASK           BIT(24)
++#define EDMA_TXDESC_TSO_ENABLE_SET(desc, x)   ((desc)->word5 |= \
++                              FIELD_PREP(EDMA_TXDESC_TSO_ENABLE_MASK, x))
++#define EDMA_TXDESC_MSS_MASK                  GENMASK(31, 16)
++#define EDMA_TXDESC_MSS_SET(desc, x)          ((desc)->word6 |= \
++                                      FIELD_PREP(EDMA_TXDESC_MSS_MASK, x))
++#define EDMA_TXDESC_MORE_BIT_MASK     BIT(30)
++#define EDMA_TXDESC_MORE_BIT_SET(desc, x)     ((desc)->word1 |= \
++                              FIELD_PREP(EDMA_TXDESC_MORE_BIT_MASK, x))
++
++#define EDMA_TXDESC_ADV_OFFSET_BIT    BIT(31)
++#define EDMA_TXDESC_ADV_OFFLOAD_SET(desc)     ((desc)->word5 |= \
++                                      FIELD_PREP(EDMA_TXDESC_ADV_OFFSET_BIT, 1))
++#define EDMA_TXDESC_IP_CSUM_BIT               BIT(25)
++#define EDMA_TXDESC_IP_CSUM_SET(desc)         ((desc)->word5 |= \
++                                      FIELD_PREP(EDMA_TXDESC_IP_CSUM_BIT, 1))
++
++#define EDMA_TXDESC_L4_CSUM_SET_MASK   GENMASK(27, 26)
++#define EDMA_TXDESC_L4_CSUM_SET(desc)  ((desc)->word5 |= \
++                             (FIELD_PREP(EDMA_TXDESC_L4_CSUM_SET_MASK, 1)))
++
++#define EDMA_TXDESC_POOL_ID_SET_MASK  GENMASK(24, 18)
++#define EDMA_TXDESC_POOL_ID_SET(desc, x)      ((desc)->word5 |= \
++                              (FIELD_PREP(EDMA_TXDESC_POOL_ID_SET_MASK, x)))
++
++#define EDMA_TXDESC_DATA_LEN_SET(desc, x)     ((desc)->word5 |= ((x) & 0x1ffff))
++#define EDMA_TXDESC_SERVICE_CODE_MASK GENMASK(24, 16)
++#define EDMA_TXDESC_SERVICE_CODE_SET(desc, x) ((desc)->word1 |= \
++                              (FIELD_PREP(EDMA_TXDESC_SERVICE_CODE_MASK, x)))
++#define EDMA_TXDESC_BUFFER_ADDR_SET(desc, addr)       (((desc)->word0) = (addr))
++
++#ifdef __LP64__
++#define EDMA_TXDESC_OPAQUE_GET(_desc) ({ \
++                      typeof(_desc) (desc) = (_desc); \
++                      (((u64)(desc)->word3 << 32) | (desc)->word2); })
++
++#define EDMA_TXCMPL_OPAQUE_GET(_desc) ({ \
++                      typeof(_desc) (desc) = (_desc); \
++                      (((u64)(desc)->word1 << 32) | \
++                      (desc)->word0); })
++
++#define EDMA_TXDESC_OPAQUE_LO_SET(desc, ptr)  ((desc)->word2 = \
++                                              (u32)(uintptr_t)(ptr))
++
++#define EDMA_TXDESC_OPAQUE_HI_SET(desc, ptr)  ((desc)->word3 = \
++                                              (u32)((u64)(ptr) >> 32))
++
++#define EDMA_TXDESC_OPAQUE_SET(_desc, _ptr)   do { \
++      typeof(_desc) (desc) = (_desc); \
++      typeof(_ptr) (ptr) = (_ptr); \
++      EDMA_TXDESC_OPAQUE_LO_SET(desc, ptr); \
++      EDMA_TXDESC_OPAQUE_HI_SET(desc, ptr); \
++} while (0)
++#else
++#define EDMA_TXCMPL_OPAQUE_GET(desc)          ((desc)->word0)
++#define EDMA_TXDESC_OPAQUE_GET(desc)          ((desc)->word2)
++#define EDMA_TXDESC_OPAQUE_LO_SET(desc, ptr)  ((desc)->word2 = (u32)(uintptr_t)ptr)
++
++#define EDMA_TXDESC_OPAQUE_SET(desc, ptr)     \
++                                      EDMA_TXDESC_OPAQUE_LO_SET(desc, ptr)
++#endif
++#define EDMA_TXCMPL_MORE_BIT_MASK     BIT(30)
++
++#define EDMA_TXCMPL_MORE_BIT_GET(desc)        ((le32_to_cpu((__force __le32)((desc)->word2))) & \
++                                      EDMA_TXCMPL_MORE_BIT_MASK)
++
++#define EDMA_TXCOMP_RING_ERROR_MASK   GENMASK(22, 0)
++
++#define EDMA_TXCOMP_RING_ERROR_GET(x) ((le32_to_cpu((__force __le32)x)) & \
++                                      EDMA_TXCOMP_RING_ERROR_MASK)
++
++#define EDMA_TXCOMP_POOL_ID_MASK      GENMASK(5, 0)
++
++#define EDMA_TXCOMP_POOL_ID_GET(desc) ((le32_to_cpu((__force __le32)((desc)->word2))) & \
++                                      EDMA_TXCOMP_POOL_ID_MASK)
++
++/* Opaque values are set in word2 and word3,
++ * they are not accessed by the EDMA HW,
++ * so endianness conversion is not needed.
++ */
++#define EDMA_TXDESC_ENDIAN_SET(_desc) ({ \
++      typeof(_desc) (desc) = (_desc); \
++      cpu_to_le32s(&((desc)->word0)); \
++      cpu_to_le32s(&((desc)->word1)); \
++      cpu_to_le32s(&((desc)->word4)); \
++      cpu_to_le32s(&((desc)->word5)); \
++      cpu_to_le32s(&((desc)->word6)); \
++      cpu_to_le32s(&((desc)->word7)); \
++})
++
++/* EDMA Tx GSO status */
++enum edma_tx_status {
++      EDMA_TX_OK = 0,                 /* Tx success. */
++      EDMA_TX_FAIL_NO_DESC = 1,       /* Not enough descriptors. */
++      EDMA_TX_FAIL = 2,               /* Tx failure. */
++};
++
++/* EDMA TX GSO status */
++enum edma_tx_gso_status {
++      EDMA_TX_GSO_NOT_NEEDED = 0,
++              /* Packet has segment count less than TX_TSO_SEG_MAX. */
++      EDMA_TX_GSO_SUCCEED = 1,
++              /* GSO Succeed. */
++      EDMA_TX_GSO_FAIL = 2,
++              /* GSO failed, drop the packet. */
++};
++
++/**
++ * struct edma_txcmpl_stats - EDMA TX complete ring statistics.
++ * @invalid_buffer: Invalid buffer address received.
++ * @errors: Other Tx complete descriptor errors indicated by the hardware.
++ * @desc_with_more_bit: Packet's segment transmit count.
++ * @no_pending_desc: No descriptor is pending for processing.
++ * @syncp: Synchronization pointer.
++ */
++struct edma_txcmpl_stats {
++      u64 invalid_buffer;
++      u64 errors;
++      u64 desc_with_more_bit;
++      u64 no_pending_desc;
++      struct u64_stats_sync syncp;
++};
++
++/**
++ * struct edma_txdesc_stats - EDMA Tx descriptor ring statistics.
++ * @no_desc_avail: No descriptor available to transmit.
++ * @tso_max_seg_exceed: Packets extending EDMA_TX_TSO_SEG_MAX segments.
++ * @syncp: Synchronization pointer.
++ */
++struct edma_txdesc_stats {
++      u64 no_desc_avail;
++      u64 tso_max_seg_exceed;
++      struct u64_stats_sync syncp;
++};
++
++/**
++ * struct edma_txdesc_pri - EDMA primary TX descriptor.
++ * @word0: Low 32-bit of buffer address.
++ * @word1: Buffer recycling, PTP tag flag, PRI valid flag.
++ * @word2: Low 32-bit of opaque value.
++ * @word3: High 32-bit of opaque value.
++ * @word4: Source/Destination port info.
++ * @word5: VLAN offload, csum mode, ip_csum_en, tso_en, data len.
++ * @word6: MSS/hash_value/PTP tag, data offset.
++ * @word7: L4/L3 offset, PROT type, L2 type, CVLAN/SVLAN tag, service code.
++ */
++struct edma_txdesc_pri {
++      u32 word0;
++      u32 word1;
++      u32 word2;
++      u32 word3;
++      u32 word4;
++      u32 word5;
++      u32 word6;
++      u32 word7;
++};
++
++/**
++ * struct edma_txdesc_sec - EDMA secondary TX descriptor.
++ * @word0: Reserved.
++ * @word1: Custom csum offset, payload offset, TTL/NAT action.
++ * @word2: NAPT translated port, DSCP value, TTL value.
++ * @word3: Flow index value and valid flag.
++ * @word4: Reserved.
++ * @word5: Reserved.
++ * @word6: CVLAN/SVLAN command.
++ * @word7: CVLAN/SVLAN tag value.
++ */
++struct edma_txdesc_sec {
++      u32 word0;
++      u32 word1;
++      u32 word2;
++      u32 word3;
++      u32 word4;
++      u32 word5;
++      u32 word6;
++      u32 word7;
++};
++
++/**
++ * struct edma_txcmpl_desc - EDMA TX complete descriptor.
++ * @word0: Low 32-bit opaque value.
++ * @word1: High 32-bit opaque value.
++ * @word2: More fragment, transmit ring id, pool id.
++ * @word3: Error indications.
++ */
++struct edma_txcmpl_desc {
++      u32 word0;
++      u32 word1;
++      u32 word2;
++      u32 word3;
++};
++
++/**
++ * struct edma_txdesc_ring - EDMA TX descriptor ring
++ * @prod_idx: Producer index
++ * @id: Tx ring number
++ * @avail_desc: Number of available descriptor to process
++ * @pdesc: Primary descriptor ring virtual address
++ * @pdma: Primary descriptor ring physical address
++ * @sdesc: Secondary descriptor ring virtual address
++ * @tx_desc_stats: Tx descriptor ring statistics
++ * @sdma: Secondary descriptor ring physical address
++ * @count: Number of descriptors
++ * @fc_grp_id: Flow control group ID
++ */
++struct edma_txdesc_ring {
++      u32 prod_idx;
++      u32 id;
++      u32 avail_desc;
++      struct edma_txdesc_pri *pdesc;
++      dma_addr_t pdma;
++      struct edma_txdesc_sec *sdesc;
++      struct edma_txdesc_stats txdesc_stats;
++      dma_addr_t sdma;
++      u32 count;
++      u8 fc_grp_id;
++};
++
++/**
++ * struct edma_txcmpl_ring - EDMA TX complete ring
++ * @napi: NAPI
++ * @cons_idx: Consumer index
++ * @avail_pkt: Number of available packets to process
++ * @desc: Descriptor ring virtual address
++ * @id: Txcmpl ring number
++ * @tx_cmpl_stats: Tx complete ring statistics
++ * @dma: Descriptor ring physical address
++ * @count: Number of descriptors in the ring
++ * @napi_added: Flag to indicate NAPI add status
++ */
++struct edma_txcmpl_ring {
++      struct napi_struct napi;
++      u32 cons_idx;
++      u32 avail_pkt;
++      struct edma_txcmpl_desc *desc;
++      u32 id;
++      struct edma_txcmpl_stats txcmpl_stats;
++      dma_addr_t dma;
++      u32 count;
++      bool napi_added;
++};
++
++enum edma_tx_status edma_tx_ring_xmit(struct net_device *netdev,
++                                    struct sk_buff *skb,
++                             struct edma_txdesc_ring *txdesc_ring,
++                             struct edma_port_tx_stats *stats);
++u32 edma_tx_complete(u32 work_to_do,
++                   struct edma_txcmpl_ring *txcmpl_ring);
++irqreturn_t edma_tx_handle_irq(int irq, void *ctx);
++int edma_tx_napi_poll(struct napi_struct *napi, int budget);
++enum edma_tx_gso_status edma_tx_gso_segment(struct sk_buff *skb,
++                                          struct net_device *netdev, struct sk_buff **segs);
++
++#endif
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-42-net-ethernet-qualcomm-Add-miscellaneous-error-interr.patch b/target/linux/qualcommbe/patches-6.6/103-42-net-ethernet-qualcomm-Add-miscellaneous-error-interr.patch
new file mode 100644 (file)
index 0000000..df61e9f
--- /dev/null
@@ -0,0 +1,743 @@
+From 4dfbbaa1e9ab01f1126c9e7a89583aad0b6600da Mon Sep 17 00:00:00 2001
+From: Suruchi Agarwal <quic_suruchia@quicinc.com>
+Date: Thu, 21 Mar 2024 16:31:04 -0700
+Subject: [PATCH 42/50] net: ethernet: qualcomm: Add miscellaneous error
+ interrupts and counters
+
+Miscellaneous error interrupts, EDMA Tx/Rx and error counters are supported
+using debugfs framework.
+
+Change-Id: I7da8b978a7e93947b03a45269a81b401f35da31c
+Co-developed-by: Pavithra R <quic_pavir@quicinc.com>
+Signed-off-by: Pavithra R <quic_pavir@quicinc.com>
+Signed-off-by: Suruchi Agarwal <quic_suruchia@quicinc.com>
+---
+ drivers/net/ethernet/qualcomm/ppe/Makefile    |   2 +-
+ drivers/net/ethernet/qualcomm/ppe/edma.c      | 162 ++++++++
+ drivers/net/ethernet/qualcomm/ppe/edma.h      |  31 +-
+ .../net/ethernet/qualcomm/ppe/edma_debugfs.c  | 370 ++++++++++++++++++
+ .../net/ethernet/qualcomm/ppe/ppe_debugfs.c   |  17 +
+ 5 files changed, 580 insertions(+), 2 deletions(-)
+ create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_debugfs.c
+
+diff --git a/drivers/net/ethernet/qualcomm/ppe/Makefile b/drivers/net/ethernet/qualcomm/ppe/Makefile
+index b358bfd781fb..45e1b103ec7a 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/Makefile
++++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
+@@ -7,4 +7,4 @@ obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
+ qcom-ppe-objs := ppe.o ppe_config.o ppe_api.o ppe_debugfs.o ppe_port.o
+ #EDMA
+-qcom-ppe-objs += edma.o edma_cfg_rx.o edma_cfg_tx.o edma_port.o edma_rx.o edma_tx.o
++qcom-ppe-objs += edma.o edma_cfg_rx.o edma_cfg_tx.o edma_debugfs.o edma_port.o edma_rx.o edma_tx.o
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma.c b/drivers/net/ethernet/qualcomm/ppe/edma.c
+index 739fcfbde0f9..0e16f8ab545f 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/edma.c
++++ b/drivers/net/ethernet/qualcomm/ppe/edma.c
+@@ -151,6 +151,42 @@ static int edma_clock_init(void)
+       return 0;
+ }
++/**
++ * edma_err_stats_alloc - Allocate stats memory
++ *
++ * Allocate memory for per-CPU error stats.
++ */
++int edma_err_stats_alloc(void)
++{
++      u32 i;
++
++      edma_ctx->err_stats = alloc_percpu(*edma_ctx->err_stats);
++      if (!edma_ctx->err_stats)
++              return -ENOMEM;
++
++      for_each_possible_cpu(i) {
++              struct edma_err_stats *stats;
++
++              stats = per_cpu_ptr(edma_ctx->err_stats, i);
++              u64_stats_init(&stats->syncp);
++      }
++
++      return 0;
++}
++
++/**
++ * edma_err_stats_free - Free stats memory
++ *
++ * Free memory of per-CPU error stats.
++ */
++void edma_err_stats_free(void)
++{
++      if (edma_ctx->err_stats) {
++              free_percpu(edma_ctx->err_stats);
++              edma_ctx->err_stats = NULL;
++      }
++}
++
+ /**
+  * edma_configure_ucast_prio_map_tbl - Configure unicast priority map table.
+  *
+@@ -191,11 +227,113 @@ static int edma_configure_ucast_prio_map_tbl(void)
+       return ret;
+ }
++static void edma_disable_misc_interrupt(void)
++{
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct regmap *regmap = ppe_dev->regmap;
++      u32 reg;
++
++      reg = EDMA_BASE_OFFSET + EDMA_REG_MISC_INT_MASK_ADDR;
++      regmap_write(regmap, reg, EDMA_MASK_INT_CLEAR);
++}
++
++static void edma_enable_misc_interrupt(void)
++{
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct regmap *regmap = ppe_dev->regmap;
++      u32 reg;
++
++      reg = EDMA_BASE_OFFSET + EDMA_REG_MISC_INT_MASK_ADDR;
++      regmap_write(regmap, reg, edma_ctx->intr_info.intr_mask_misc);
++}
++
++static irqreturn_t edma_misc_handle_irq(int irq,
++                                      __maybe_unused void *ctx)
++{
++      struct edma_err_stats *stats = this_cpu_ptr(edma_ctx->err_stats);
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
++      struct regmap *regmap = ppe_dev->regmap;
++      u32 misc_intr_status, data, reg;
++
++      /* Read Misc intr status */
++      reg = EDMA_BASE_OFFSET + EDMA_REG_MISC_INT_STAT_ADDR;
++      regmap_read(regmap, reg, &data);
++      misc_intr_status = data & edma_ctx->intr_info.intr_mask_misc;
++
++      pr_debug("Received misc irq %d, status: %d\n", irq, misc_intr_status);
++
++      if (FIELD_GET(EDMA_MISC_AXI_RD_ERR_MASK, misc_intr_status)) {
++              pr_err("MISC AXI read error received\n");
++              u64_stats_update_begin(&stats->syncp);
++              ++stats->edma_axi_read_err;
++              u64_stats_update_end(&stats->syncp);
++      }
++
++      if (FIELD_GET(EDMA_MISC_AXI_WR_ERR_MASK, misc_intr_status)) {
++              pr_err("MISC AXI write error received\n");
++              u64_stats_update_begin(&stats->syncp);
++              ++stats->edma_axi_write_err;
++              u64_stats_update_end(&stats->syncp);
++      }
++
++      if (FIELD_GET(EDMA_MISC_RX_DESC_FIFO_FULL_MASK, misc_intr_status)) {
++              if (net_ratelimit())
++                      pr_err("MISC Rx descriptor fifo full error received\n");
++              u64_stats_update_begin(&stats->syncp);
++              ++stats->edma_rxdesc_fifo_full;
++              u64_stats_update_end(&stats->syncp);
++      }
++
++      if (FIELD_GET(EDMA_MISC_RX_ERR_BUF_SIZE_MASK, misc_intr_status)) {
++              if (net_ratelimit())
++                      pr_err("MISC Rx buffer size error received\n");
++              u64_stats_update_begin(&stats->syncp);
++              ++stats->edma_rx_buf_size_err;
++              u64_stats_update_end(&stats->syncp);
++      }
++
++      if (FIELD_GET(EDMA_MISC_TX_SRAM_FULL_MASK, misc_intr_status)) {
++              if (net_ratelimit())
++                      pr_err("MISC Tx SRAM full error received\n");
++              u64_stats_update_begin(&stats->syncp);
++              ++stats->edma_tx_sram_full;
++              u64_stats_update_end(&stats->syncp);
++      }
++
++      if (FIELD_GET(EDMA_MISC_TX_CMPL_BUF_FULL_MASK, misc_intr_status)) {
++              if (net_ratelimit())
++                      pr_err("MISC Tx complete buffer full error received\n");
++              u64_stats_update_begin(&stats->syncp);
++              ++stats->edma_txcmpl_buf_full;
++              u64_stats_update_end(&stats->syncp);
++      }
++
++      if (FIELD_GET(EDMA_MISC_DATA_LEN_ERR_MASK, misc_intr_status)) {
++              if (net_ratelimit())
++                      pr_err("MISC data length error received\n");
++              u64_stats_update_begin(&stats->syncp);
++              ++stats->edma_tx_data_len_err;
++              u64_stats_update_end(&stats->syncp);
++      }
++
++      if (FIELD_GET(EDMA_MISC_TX_TIMEOUT_MASK, misc_intr_status)) {
++              if (net_ratelimit())
++                      pr_err("MISC Tx timeout error received\n");
++              u64_stats_update_begin(&stats->syncp);
++              ++stats->edma_tx_timeout;
++              u64_stats_update_end(&stats->syncp);
++      }
++
++      return IRQ_HANDLED;
++}
++
+ static int edma_irq_register(void)
+ {
+       struct edma_hw_info *hw_info = edma_ctx->hw_info;
+       struct edma_ring_info *txcmpl = hw_info->txcmpl;
++      struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
+       struct edma_ring_info *rx = hw_info->rx;
++      struct device *dev = ppe_dev->dev;
+       int ret;
+       u32 i;
+@@ -270,8 +408,25 @@ static int edma_irq_register(void)
+                        edma_rxdesc_irq_name[i]);
+       }
++      /* Request Misc IRQ */
++      ret = request_irq(edma_ctx->intr_info.intr_misc, edma_misc_handle_irq,
++                        IRQF_SHARED, "edma_misc",
++                        (void *)dev);
++      if (ret) {
++              pr_err("MISC IRQ:%d request failed\n",
++                     edma_ctx->intr_info.intr_misc);
++              goto misc_intr_req_fail;
++      }
++
+       return 0;
++misc_intr_req_fail:
++      /* Free IRQ for RXDESC rings */
++      for (i = 0; i < rx->num_rings; i++) {
++              synchronize_irq(edma_ctx->intr_info.intr_rx[i]);
++              free_irq(edma_ctx->intr_info.intr_rx[i],
++                       (void *)&edma_ctx->rx_rings[i]);
++      }
+ rx_desc_ring_intr_req_fail:
+       for (i = 0; i < rx->num_rings; i++)
+               kfree(edma_rxdesc_irq_name[i]);
+@@ -503,6 +658,7 @@ static int edma_hw_configure(void)
+               edma_cfg_tx_disable_interrupts(i);
+       edma_cfg_rx_disable_interrupts();
++      edma_disable_misc_interrupt();
+       edma_cfg_rx_rings_disable();
+@@ -614,6 +770,7 @@ void edma_destroy(struct ppe_device *ppe_dev)
+               edma_cfg_tx_disable_interrupts(i);
+       edma_cfg_rx_disable_interrupts();
++      edma_disable_misc_interrupt();
+       /* Free IRQ for TXCMPL rings. */
+       for (i = 0; i < txcmpl->num_rings; i++) {
+@@ -634,6 +791,10 @@ void edma_destroy(struct ppe_device *ppe_dev)
+       }
+       kfree(edma_rxdesc_irq_name);
++      /* Free Misc IRQ */
++      synchronize_irq(edma_ctx->intr_info.intr_misc);
++      free_irq(edma_ctx->intr_info.intr_misc, (void *)(ppe_dev->dev));
++
+       kfree(edma_ctx->intr_info.intr_rx);
+       kfree(edma_ctx->intr_info.intr_txcmpl);
+@@ -699,6 +860,7 @@ int edma_setup(struct ppe_device *ppe_dev)
+       }
+       edma_cfg_rx_enable_interrupts();
++      edma_enable_misc_interrupt();
+       dev_info(dev, "EDMA configuration successful\n");
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma.h b/drivers/net/ethernet/qualcomm/ppe/edma.h
+index fb8ccbfbaf41..6500d21b9eba 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/edma.h
++++ b/drivers/net/ethernet/qualcomm/ppe/edma.h
+@@ -36,6 +36,30 @@
+                       ((((head) - (tail)) + \
+                       (max)) & ((max) - 1)); })
++/**
++ * struct edma_err_stats - EDMA error stats
++ * @edma_axi_read_err: AXI read error
++ * @edma_axi_write_err: AXI write error
++ * @edma_rxdesc_fifo_full: Rx desc FIFO full error
++ * @edma_rx_buf_size_err: Rx buffer size too small error
++ * @edma_tx_sram_full: Tx packet SRAM buffer full error
++ * @edma_tx_data_len_err: Tx data length error
++ * @edma_tx_timeout: Tx timeout error
++ * @edma_txcmpl_buf_full: Tx completion buffer full error
++ * @syncp: Synchronization pointer
++ */
++struct edma_err_stats {
++      u64 edma_axi_read_err;
++      u64 edma_axi_write_err;
++      u64 edma_rxdesc_fifo_full;
++      u64 edma_rx_buf_size_err;
++      u64 edma_tx_sram_full;
++      u64 edma_tx_data_len_err;
++      u64 edma_tx_timeout;
++      u64 edma_txcmpl_buf_full;
++      struct u64_stats_sync syncp;
++};
++
+ /**
+  * struct edma_ring_info - EDMA ring data structure.
+  * @max_rings: Maximum number of rings
+@@ -97,6 +121,7 @@ struct edma_intr_info {
+  * @rx_rings: Rx Desc Rings, SW is consumer
+  * @tx_rings: Tx Descriptor Ring, SW is producer
+  * @txcmpl_rings: Tx complete Ring, SW is consumer
++ * @err_stats: Per CPU error statistics
+  * @rx_page_mode: Page mode enabled or disabled
+  * @rx_buf_size: Rx buffer size for Jumbo MRU
+  * @tx_requeue_stop: Tx requeue stop enabled or disabled
+@@ -111,6 +136,7 @@ struct edma_context {
+       struct edma_rxdesc_ring *rx_rings;
+       struct edma_txdesc_ring *tx_rings;
+       struct edma_txcmpl_ring *txcmpl_rings;
++      struct edma_err_stats __percpu *err_stats;
+       u32 rx_page_mode;
+       u32 rx_buf_size;
+       bool tx_requeue_stop;
+@@ -119,7 +145,10 @@ struct edma_context {
+ /* Global EDMA context */
+ extern struct edma_context *edma_ctx;
++int edma_err_stats_alloc(void);
++void edma_err_stats_free(void);
+ void edma_destroy(struct ppe_device *ppe_dev);
+ int edma_setup(struct ppe_device *ppe_dev);
+-
++void edma_debugfs_teardown(void);
++int edma_debugfs_setup(struct ppe_device *ppe_dev);
+ #endif
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma_debugfs.c b/drivers/net/ethernet/qualcomm/ppe/edma_debugfs.c
+new file mode 100644
+index 000000000000..671062d4ee72
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ppe/edma_debugfs.c
+@@ -0,0 +1,370 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++/* EDMA debugfs routines for display of Tx/Rx counters. */
++
++#include <linux/cpumask.h>
++#include <linux/debugfs.h>
++#include <linux/kernel.h>
++#include <linux/netdevice.h>
++#include <linux/printk.h>
++
++#include "edma.h"
++
++#define EDMA_STATS_BANNER_MAX_LEN       80
++#define EDMA_RX_RING_STATS_NODE_NAME    "EDMA_RX"
++#define EDMA_TX_RING_STATS_NODE_NAME    "EDMA_TX"
++#define EDMA_ERR_STATS_NODE_NAME        "EDMA_ERR"
++
++static struct dentry *edma_dentry;
++static struct dentry *stats_dentry;
++
++static void edma_debugfs_print_banner(struct seq_file *m, char *node)
++{
++      u32 banner_char_len, i;
++
++      for (i = 0; i < EDMA_STATS_BANNER_MAX_LEN; i++)
++              seq_puts(m, "_");
++      banner_char_len = (EDMA_STATS_BANNER_MAX_LEN - (strlen(node) + 2)) / 2;
++      seq_puts(m, "\n\n");
++
++      for (i = 0; i < banner_char_len; i++)
++              seq_puts(m, "<");
++      seq_printf(m, " %s ", node);
++
++      for (i = 0; i < banner_char_len; i++)
++              seq_puts(m, ">");
++      seq_puts(m, "\n");
++
++      for (i = 0; i < EDMA_STATS_BANNER_MAX_LEN; i++)
++              seq_puts(m, "_");
++      seq_puts(m, "\n\n");
++}
++
++static int edma_debugfs_rx_rings_stats_show(struct seq_file *m,
++                                          void __maybe_unused *p)
++{
++      struct edma_hw_info *hw_info = edma_ctx->hw_info;
++      struct edma_ring_info *rxfill = hw_info->rxfill;
++      struct edma_rxfill_stats *rxfill_stats;
++      struct edma_rxdesc_stats *rxdesc_stats;
++      struct edma_ring_info *rx = hw_info->rx;
++      unsigned int start;
++      u32 i;
++
++      rxfill_stats = kcalloc(rxfill->num_rings, sizeof(*rxfill_stats), GFP_KERNEL);
++      if (!rxfill_stats)
++              return -ENOMEM;
++
++      rxdesc_stats = kcalloc(rx->num_rings, sizeof(*rxdesc_stats), GFP_KERNEL);
++      if (!rxdesc_stats) {
++              kfree(rxfill_stats);
++              return -ENOMEM;
++      }
++
++      /* Get stats for Rx fill rings. */
++      for (i = 0; i < rxfill->num_rings; i++) {
++              struct edma_rxfill_ring *rxfill_ring;
++              struct edma_rxfill_stats *stats;
++
++              rxfill_ring = &edma_ctx->rxfill_rings[i];
++              stats = &rxfill_ring->rxfill_stats;
++              do {
++                      start = u64_stats_fetch_begin(&stats->syncp);
++                      rxfill_stats[i].alloc_failed = stats->alloc_failed;
++                      rxfill_stats[i].page_alloc_failed = stats->page_alloc_failed;
++              } while (u64_stats_fetch_retry(&stats->syncp, start));
++      }
++
++      /* Get stats for Rx Desc rings. */
++      for (i = 0; i < rx->num_rings; i++) {
++              struct edma_rxdesc_ring *rxdesc_ring;
++              struct edma_rxdesc_stats *stats;
++
++              rxdesc_ring = &edma_ctx->rx_rings[i];
++              stats = &rxdesc_ring->rxdesc_stats;
++              do {
++                      start = u64_stats_fetch_begin(&stats->syncp);
++                      rxdesc_stats[i].src_port_inval = stats->src_port_inval;
++                      rxdesc_stats[i].src_port_inval_type = stats->src_port_inval_type;
++                      rxdesc_stats[i].src_port_inval_netdev = stats->src_port_inval_netdev;
++              } while (u64_stats_fetch_retry(&stats->syncp, start));
++      }
++
++      edma_debugfs_print_banner(m, EDMA_RX_RING_STATS_NODE_NAME);
++
++      seq_puts(m, "\n#EDMA RX descriptor rings stats:\n\n");
++      for (i = 0; i < rx->num_rings; i++) {
++              seq_printf(m, "\t\tEDMA RX descriptor %d ring stats:\n", i + rx->ring_start);
++              seq_printf(m, "\t\t rxdesc[%d]:src_port_inval = %llu\n",
++                         i + rx->ring_start, rxdesc_stats[i].src_port_inval);
++              seq_printf(m, "\t\t rxdesc[%d]:src_port_inval_type = %llu\n",
++                         i + rx->ring_start, rxdesc_stats[i].src_port_inval_type);
++              seq_printf(m, "\t\t rxdesc[%d]:src_port_inval_netdev = %llu\n",
++                         i + rx->ring_start,
++                         rxdesc_stats[i].src_port_inval_netdev);
++              seq_puts(m, "\n");
++      }
++
++      seq_puts(m, "\n#EDMA RX fill rings stats:\n\n");
++      for (i = 0; i < rxfill->num_rings; i++) {
++              seq_printf(m, "\t\tEDMA RX fill %d ring stats:\n", i + rxfill->ring_start);
++              seq_printf(m, "\t\t rxfill[%d]:alloc_failed = %llu\n",
++                         i + rxfill->ring_start, rxfill_stats[i].alloc_failed);
++              seq_printf(m, "\t\t rxfill[%d]:page_alloc_failed = %llu\n",
++                         i + rxfill->ring_start, rxfill_stats[i].page_alloc_failed);
++              seq_puts(m, "\n");
++      }
++
++      kfree(rxfill_stats);
++      kfree(rxdesc_stats);
++      return 0;
++}
++
++static int edma_debugfs_tx_rings_stats_show(struct seq_file *m,
++                                          void __maybe_unused *p)
++{
++      struct edma_hw_info *hw_info = edma_ctx->hw_info;
++      struct edma_ring_info *txcmpl = hw_info->txcmpl;
++      struct edma_ring_info *tx = hw_info->tx;
++      struct edma_txcmpl_stats *txcmpl_stats;
++      struct edma_txdesc_stats *txdesc_stats;
++      unsigned int start;
++      u32 i;
++
++      txcmpl_stats = kcalloc(txcmpl->num_rings, sizeof(*txcmpl_stats), GFP_KERNEL);
++      if (!txcmpl_stats)
++              return -ENOMEM;
++
++      txdesc_stats = kcalloc(tx->num_rings, sizeof(*txdesc_stats), GFP_KERNEL);
++      if (!txdesc_stats) {
++              kfree(txcmpl_stats);
++              return -ENOMEM;
++      }
++
++      /* Get stats for Tx desc rings. */
++      for (i = 0; i < tx->num_rings; i++) {
++              struct edma_txdesc_ring *txdesc_ring;
++              struct edma_txdesc_stats *stats;
++
++              txdesc_ring = &edma_ctx->tx_rings[i];
++              stats = &txdesc_ring->txdesc_stats;
++              do {
++                      start = u64_stats_fetch_begin(&stats->syncp);
++                      txdesc_stats[i].no_desc_avail = stats->no_desc_avail;
++                      txdesc_stats[i].tso_max_seg_exceed = stats->tso_max_seg_exceed;
++              } while (u64_stats_fetch_retry(&stats->syncp, start));
++      }
++
++      /* Get stats for Tx Complete rings. */
++      for (i = 0; i < txcmpl->num_rings; i++) {
++              struct edma_txcmpl_ring *txcmpl_ring;
++              struct edma_txcmpl_stats *stats;
++
++              txcmpl_ring = &edma_ctx->txcmpl_rings[i];
++              stats = &txcmpl_ring->txcmpl_stats;
++              do {
++                      start = u64_stats_fetch_begin(&stats->syncp);
++                      txcmpl_stats[i].invalid_buffer = stats->invalid_buffer;
++                      txcmpl_stats[i].errors = stats->errors;
++                      txcmpl_stats[i].desc_with_more_bit = stats->desc_with_more_bit;
++                      txcmpl_stats[i].no_pending_desc = stats->no_pending_desc;
++              } while (u64_stats_fetch_retry(&stats->syncp, start));
++      }
++
++      edma_debugfs_print_banner(m, EDMA_TX_RING_STATS_NODE_NAME);
++
++      seq_puts(m, "\n#EDMA TX complete rings stats:\n\n");
++      for (i = 0; i < txcmpl->num_rings; i++) {
++              seq_printf(m, "\t\tEDMA TX complete %d ring stats:\n", i + txcmpl->ring_start);
++              seq_printf(m, "\t\t txcmpl[%d]:invalid_buffer = %llu\n",
++                         i + txcmpl->ring_start, txcmpl_stats[i].invalid_buffer);
++              seq_printf(m, "\t\t txcmpl[%d]:errors = %llu\n",
++                         i + txcmpl->ring_start, txcmpl_stats[i].errors);
++              seq_printf(m, "\t\t txcmpl[%d]:desc_with_more_bit = %llu\n",
++                         i + txcmpl->ring_start, txcmpl_stats[i].desc_with_more_bit);
++              seq_printf(m, "\t\t txcmpl[%d]:no_pending_desc = %llu\n",
++                         i + txcmpl->ring_start, txcmpl_stats[i].no_pending_desc);
++              seq_puts(m, "\n");
++      }
++
++      seq_puts(m, "\n#EDMA TX descriptor rings stats:\n\n");
++      for (i = 0; i < tx->num_rings; i++) {
++              seq_printf(m, "\t\tEDMA TX descriptor %d ring stats:\n", i + tx->ring_start);
++              seq_printf(m, "\t\t txdesc[%d]:no_desc_avail = %llu\n",
++                         i + tx->ring_start, txdesc_stats[i].no_desc_avail);
++              seq_printf(m, "\t\t txdesc[%d]:tso_max_seg_exceed = %llu\n",
++                         i + tx->ring_start, txdesc_stats[i].tso_max_seg_exceed);
++              seq_puts(m, "\n");
++      }
++
++      kfree(txcmpl_stats);
++      kfree(txdesc_stats);
++      return 0;
++}
++
++static int edma_debugfs_err_stats_show(struct seq_file *m,
++                                     void __maybe_unused *p)
++{
++      struct edma_err_stats *err_stats, *pcpu_err_stats;
++      unsigned int start;
++      u32 cpu;
++
++      err_stats = kzalloc(sizeof(*err_stats), GFP_KERNEL);
++      if (!err_stats)
++              return -ENOMEM;
++
++      /* Get percpu EDMA miscellaneous stats. */
++      for_each_possible_cpu(cpu) {
++              pcpu_err_stats = per_cpu_ptr(edma_ctx->err_stats, cpu);
++              do {
++                      start = u64_stats_fetch_begin(&pcpu_err_stats->syncp);
++                      err_stats->edma_axi_read_err +=
++                              pcpu_err_stats->edma_axi_read_err;
++                      err_stats->edma_axi_write_err +=
++                              pcpu_err_stats->edma_axi_write_err;
++                      err_stats->edma_rxdesc_fifo_full +=
++                              pcpu_err_stats->edma_rxdesc_fifo_full;
++                      err_stats->edma_rx_buf_size_err +=
++                              pcpu_err_stats->edma_rx_buf_size_err;
++                      err_stats->edma_tx_sram_full +=
++                              pcpu_err_stats->edma_tx_sram_full;
++                      err_stats->edma_tx_data_len_err +=
++                              pcpu_err_stats->edma_tx_data_len_err;
++                      err_stats->edma_tx_timeout +=
++                              pcpu_err_stats->edma_tx_timeout;
++                      err_stats->edma_txcmpl_buf_full +=
++                              pcpu_err_stats->edma_txcmpl_buf_full;
++              } while (u64_stats_fetch_retry(&pcpu_err_stats->syncp, start));
++      }
++
++      edma_debugfs_print_banner(m, EDMA_ERR_STATS_NODE_NAME);
++
++      seq_puts(m, "\n#EDMA error stats:\n\n");
++      seq_printf(m, "\t\t axi read error = %llu\n",
++                 err_stats->edma_axi_read_err);
++      seq_printf(m, "\t\t axi write error = %llu\n",
++                 err_stats->edma_axi_write_err);
++      seq_printf(m, "\t\t Rx descriptor fifo full = %llu\n",
++                 err_stats->edma_rxdesc_fifo_full);
++      seq_printf(m, "\t\t Rx buffer size error = %llu\n",
++                 err_stats->edma_rx_buf_size_err);
++      seq_printf(m, "\t\t Tx SRAM full = %llu\n",
++                 err_stats->edma_tx_sram_full);
++      seq_printf(m, "\t\t Tx data length error = %llu\n",
++                 err_stats->edma_tx_data_len_err);
++      seq_printf(m, "\t\t Tx timeout = %llu\n",
++                 err_stats->edma_tx_timeout);
++      seq_printf(m, "\t\t Tx completion buffer full = %llu\n",
++                 err_stats->edma_txcmpl_buf_full);
++
++      kfree(err_stats);
++      return 0;
++}
++
++static int edma_debugs_rx_rings_stats_open(struct inode *inode,
++                                         struct file *file)
++{
++      return single_open(file, edma_debugfs_rx_rings_stats_show,
++                         inode->i_private);
++}
++
++static const struct file_operations edma_debugfs_rx_rings_file_ops = {
++      .open = edma_debugs_rx_rings_stats_open,
++      .read = seq_read,
++      .llseek = seq_lseek,
++      .release = seq_release
++};
++
++static int edma_debugs_tx_rings_stats_open(struct inode *inode, struct file *file)
++{
++      return single_open(file, edma_debugfs_tx_rings_stats_show, inode->i_private);
++}
++
++static const struct file_operations edma_debugfs_tx_rings_file_ops = {
++      .open = edma_debugs_tx_rings_stats_open,
++      .read = seq_read,
++      .llseek = seq_lseek,
++      .release = seq_release
++};
++
++static int edma_debugs_err_stats_open(struct inode *inode, struct file *file)
++{
++      return single_open(file, edma_debugfs_err_stats_show, inode->i_private);
++}
++
++static const struct file_operations edma_debugfs_misc_file_ops = {
++      .open = edma_debugs_err_stats_open,
++      .read = seq_read,
++      .llseek = seq_lseek,
++      .release = seq_release
++};
++
++/**
++ * edma_debugfs_teardown - EDMA debugfs teardown.
++ *
++ * EDMA debugfs teardown and free stats memory.
++ */
++void edma_debugfs_teardown(void)
++{
++      /* Free EDMA miscellaneous stats memory */
++      edma_err_stats_free();
++
++      debugfs_remove_recursive(edma_dentry);
++      edma_dentry = NULL;
++      stats_dentry = NULL;
++}
++
++/**
++ * edma_debugfs_setup - EDMA debugfs setup.
++ * @ppe_dev: PPE Device
++ *
++ * EDMA debugfs setup.
++ */
++int edma_debugfs_setup(struct ppe_device *ppe_dev)
++{
++      edma_dentry = debugfs_create_dir("edma", ppe_dev->debugfs_root);
++      if (!edma_dentry) {
++              pr_err("Unable to create debugfs edma directory in debugfs\n");
++              goto debugfs_dir_failed;
++      }
++
++      stats_dentry = debugfs_create_dir("stats", edma_dentry);
++      if (!stats_dentry) {
++              pr_err("Unable to create debugfs stats directory in debugfs\n");
++              goto debugfs_dir_failed;
++      }
++
++      if (!debugfs_create_file("rx_ring_stats", 0444, stats_dentry,
++                               NULL, &edma_debugfs_rx_rings_file_ops)) {
++              pr_err("Unable to create Rx rings statistics file entry in debugfs\n");
++              goto debugfs_dir_failed;
++      }
++
++      if (!debugfs_create_file("tx_ring_stats", 0444, stats_dentry,
++                               NULL, &edma_debugfs_tx_rings_file_ops)) {
++              pr_err("Unable to create Tx rings statistics file entry in debugfs\n");
++              goto debugfs_dir_failed;
++      }
++
++      /* Allocate memory for EDMA miscellaneous stats */
++      if (edma_err_stats_alloc() < 0) {
++              pr_err("Unable to allocate miscellaneous percpu stats\n");
++              goto debugfs_dir_failed;
++      }
++
++      if (!debugfs_create_file("err_stats", 0444, stats_dentry,
++                               NULL, &edma_debugfs_misc_file_ops)) {
++              pr_err("Unable to create EDMA miscellaneous statistics file entry in debugfs\n");
++              goto debugfs_dir_failed;
++      }
++
++      return 0;
++
++debugfs_dir_failed:
++      debugfs_remove_recursive(edma_dentry);
++      edma_dentry = NULL;
++      stats_dentry = NULL;
++      return -ENOMEM;
++}
+diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c b/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c
+index 1cd4c491e724..f325fcf1e17e 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c
++++ b/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c
+@@ -6,9 +6,11 @@
+ /* PPE debugfs routines for display of PPE counters useful for debug. */
+ #include <linux/debugfs.h>
++#include <linux/netdevice.h>
+ #include <linux/regmap.h>
+ #include <linux/seq_file.h>
++#include "edma.h"
+ #include "ppe.h"
+ #include "ppe_config.h"
+ #include "ppe_debugfs.h"
+@@ -711,15 +713,30 @@ static const struct file_operations ppe_debugfs_packet_counter_fops = {
+ void ppe_debugfs_setup(struct ppe_device *ppe_dev)
+ {
++      int ret;
++
+       ppe_dev->debugfs_root = debugfs_create_dir("ppe", NULL);
+       debugfs_create_file("packet_counter", 0444,
+                           ppe_dev->debugfs_root,
+                           ppe_dev,
+                           &ppe_debugfs_packet_counter_fops);
++
++      if (!ppe_dev->debugfs_root) {
++              dev_err(ppe_dev->dev, "Error in PPE debugfs setup\n");
++              return;
++      }
++
++      ret = edma_debugfs_setup(ppe_dev);
++      if (ret) {
++              dev_err(ppe_dev->dev, "Error in EDMA debugfs setup API. ret: %d\n", ret);
++              debugfs_remove_recursive(ppe_dev->debugfs_root);
++              ppe_dev->debugfs_root = NULL;
++      }
+ }
+ void ppe_debugfs_teardown(struct ppe_device *ppe_dev)
+ {
++      edma_debugfs_teardown();
+       debugfs_remove_recursive(ppe_dev->debugfs_root);
+       ppe_dev->debugfs_root = NULL;
+ }
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-43-net-ethernet-qualcomm-Add-ethtool-support-for-EDMA.patch b/target/linux/qualcommbe/patches-6.6/103-43-net-ethernet-qualcomm-Add-ethtool-support-for-EDMA.patch
new file mode 100644 (file)
index 0000000..c4fd0ae
--- /dev/null
@@ -0,0 +1,352 @@
+From ec30075badd13a3e2ffddd1c5dcb40e3c52202ed Mon Sep 17 00:00:00 2001
+From: Pavithra R <quic_pavir@quicinc.com>
+Date: Thu, 30 May 2024 20:46:36 +0530
+Subject: [PATCH 43/50] net: ethernet: qualcomm: Add ethtool support for EDMA
+
+ethtool ops can be used for EDMA netdevice configuration and statistics.
+
+Change-Id: I57fc19415dacbe51fed000520336463938220609
+Signed-off-by: Pavithra R <quic_pavir@quicinc.com>
+---
+ drivers/net/ethernet/qualcomm/ppe/Makefile    |   2 +-
+ drivers/net/ethernet/qualcomm/ppe/edma.h      |   1 +
+ .../net/ethernet/qualcomm/ppe/edma_ethtool.c  | 294 ++++++++++++++++++
+ drivers/net/ethernet/qualcomm/ppe/edma_port.c |   1 +
+ 4 files changed, 297 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_ethtool.c
+
+diff --git a/drivers/net/ethernet/qualcomm/ppe/Makefile b/drivers/net/ethernet/qualcomm/ppe/Makefile
+index 45e1b103ec7a..cb9d30889d06 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/Makefile
++++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
+@@ -7,4 +7,4 @@ obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
+ qcom-ppe-objs := ppe.o ppe_config.o ppe_api.o ppe_debugfs.o ppe_port.o
+ #EDMA
+-qcom-ppe-objs += edma.o edma_cfg_rx.o edma_cfg_tx.o edma_debugfs.o edma_port.o edma_rx.o edma_tx.o
++qcom-ppe-objs += edma.o edma_cfg_rx.o edma_cfg_tx.o edma_debugfs.o edma_port.o edma_rx.o edma_tx.o edma_ethtool.o
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma.h b/drivers/net/ethernet/qualcomm/ppe/edma.h
+index 6500d21b9eba..ac6d2fcc2983 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/edma.h
++++ b/drivers/net/ethernet/qualcomm/ppe/edma.h
+@@ -151,4 +151,5 @@ void edma_destroy(struct ppe_device *ppe_dev);
+ int edma_setup(struct ppe_device *ppe_dev);
+ void edma_debugfs_teardown(void);
+ int edma_debugfs_setup(struct ppe_device *ppe_dev);
++void edma_set_ethtool_ops(struct net_device *netdev);
+ #endif
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma_ethtool.c b/drivers/net/ethernet/qualcomm/ppe/edma_ethtool.c
+new file mode 100644
+index 000000000000..eabc1e11b16f
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ppe/edma_ethtool.c
+@@ -0,0 +1,294 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++/* ethtool support for EDMA */
++
++#include <linux/cpumask.h>
++#include <linux/ethtool.h>
++#include <linux/kernel.h>
++#include <linux/netdevice.h>
++#include <linux/phylink.h>
++
++#include "edma.h"
++#include "edma_port.h"
++
++struct edma_ethtool_stats {
++      u8 stat_string[ETH_GSTRING_LEN];
++      u32 stat_offset;
++};
++
++/**
++ * struct edma_gmac_stats - Per-GMAC statistics.
++ * @rx_packets: Number of RX packets
++ * @rx_bytes: Number of RX bytes
++ * @rx_dropped: Number of RX dropped packets
++ * @rx_fraglist_packets: Number of RX fraglist packets
++ * @rx_nr_frag_packets: Number of RX nr fragment packets
++ * @rx_nr_frag_headroom_err: Number of RX nr fragment packets with headroom error
++ * @tx_packets: Number of TX packets
++ * @tx_bytes: Number of TX bytes
++ * @tx_dropped: Number of TX dropped packets
++ * @tx_nr_frag_packets: Number of TX nr fragment packets
++ * @tx_fraglist_packets: Number of TX fraglist packets
++ * @tx_fraglist_with_nr_frags_packets: Number of TX fraglist packets with nr fragments
++ * @tx_tso_packets: Number of TX TCP segmentation offload packets
++ * @tx_tso_drop_packets: Number of TX TCP segmentation dropped packets
++ * @tx_gso_packets: Number of TX SW GSO packets
++ * @tx_gso_drop_packets: Number of TX SW GSO dropped packets
++ * @tx_queue_stopped: Number of times Queue got stopped
++ */
++struct edma_gmac_stats {
++      u64 rx_packets;
++      u64 rx_bytes;
++      u64 rx_dropped;
++      u64 rx_fraglist_packets;
++      u64 rx_nr_frag_packets;
++      u64 rx_nr_frag_headroom_err;
++      u64 tx_packets;
++      u64 tx_bytes;
++      u64 tx_dropped;
++      u64 tx_nr_frag_packets;
++      u64 tx_fraglist_packets;
++      u64 tx_fraglist_with_nr_frags_packets;
++      u64 tx_tso_packets;
++      u64 tx_tso_drop_packets;
++      u64 tx_gso_packets;
++      u64 tx_gso_drop_packets;
++      u64 tx_queue_stopped[EDMA_MAX_CORE];
++};
++
++#define EDMA_STAT(m)          offsetof(struct edma_gmac_stats, m)
++
++static const struct edma_ethtool_stats edma_gstrings_stats[] = {
++      {"rx_bytes", EDMA_STAT(rx_bytes)},
++      {"rx_packets", EDMA_STAT(rx_packets)},
++      {"rx_dropped", EDMA_STAT(rx_dropped)},
++      {"rx_fraglist_packets", EDMA_STAT(rx_fraglist_packets)},
++      {"rx_nr_frag_packets", EDMA_STAT(rx_nr_frag_packets)},
++      {"rx_nr_frag_headroom_err", EDMA_STAT(rx_nr_frag_headroom_err)},
++      {"tx_bytes", EDMA_STAT(tx_bytes)},
++      {"tx_packets", EDMA_STAT(tx_packets)},
++      {"tx_dropped", EDMA_STAT(tx_dropped)},
++      {"tx_nr_frag_packets", EDMA_STAT(tx_nr_frag_packets)},
++      {"tx_fraglist_packets", EDMA_STAT(tx_fraglist_packets)},
++      {"tx_fraglist_nr_frags_packets", EDMA_STAT(tx_fraglist_with_nr_frags_packets)},
++      {"tx_tso_packets", EDMA_STAT(tx_tso_packets)},
++      {"tx_tso_drop_packets", EDMA_STAT(tx_tso_drop_packets)},
++      {"tx_gso_packets", EDMA_STAT(tx_gso_packets)},
++      {"tx_gso_drop_packets", EDMA_STAT(tx_gso_drop_packets)},
++      {"tx_queue_stopped_cpu0", EDMA_STAT(tx_queue_stopped[0])},
++      {"tx_queue_stopped_cpu1", EDMA_STAT(tx_queue_stopped[1])},
++      {"tx_queue_stopped_cpu2", EDMA_STAT(tx_queue_stopped[2])},
++      {"tx_queue_stopped_cpu3", EDMA_STAT(tx_queue_stopped[3])},
++};
++
++#define EDMA_STATS_LEN                ARRAY_SIZE(edma_gstrings_stats)
++
++static void edma_port_get_stats(struct net_device *netdev,
++                              struct edma_gmac_stats *stats)
++{
++      struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
++      struct edma_port_rx_stats *pcpu_rx_stats;
++      struct edma_port_tx_stats *pcpu_tx_stats;
++      int i;
++
++      memset(stats, 0, sizeof(struct edma_port_pcpu_stats));
++
++      for_each_possible_cpu(i) {
++              struct edma_port_rx_stats rxp;
++              struct edma_port_tx_stats txp;
++              unsigned int start;
++
++              pcpu_rx_stats = per_cpu_ptr(port_priv->pcpu_stats.rx_stats, i);
++
++              do {
++                      start = u64_stats_fetch_begin(&pcpu_rx_stats->syncp);
++                      memcpy(&rxp, pcpu_rx_stats, sizeof(*pcpu_rx_stats));
++              } while (u64_stats_fetch_retry(&pcpu_rx_stats->syncp, start));
++
++              stats->rx_packets += rxp.rx_pkts;
++              stats->rx_bytes += rxp.rx_bytes;
++              stats->rx_dropped += rxp.rx_drops;
++              stats->rx_nr_frag_packets += rxp.rx_nr_frag_pkts;
++              stats->rx_fraglist_packets += rxp.rx_fraglist_pkts;
++              stats->rx_nr_frag_headroom_err += rxp.rx_nr_frag_headroom_err;
++
++              pcpu_tx_stats = per_cpu_ptr(port_priv->pcpu_stats.tx_stats, i);
++
++              do {
++                      start = u64_stats_fetch_begin(&pcpu_tx_stats->syncp);
++                      memcpy(&txp, pcpu_tx_stats, sizeof(*pcpu_tx_stats));
++              } while (u64_stats_fetch_retry(&pcpu_tx_stats->syncp, start));
++
++              stats->tx_packets += txp.tx_pkts;
++              stats->tx_bytes += txp.tx_bytes;
++              stats->tx_dropped += txp.tx_drops;
++              stats->tx_nr_frag_packets += txp.tx_nr_frag_pkts;
++              stats->tx_fraglist_packets += txp.tx_fraglist_pkts;
++              stats->tx_fraglist_with_nr_frags_packets += txp.tx_fraglist_with_nr_frags_pkts;
++              stats->tx_tso_packets += txp.tx_tso_pkts;
++              stats->tx_tso_drop_packets += txp.tx_tso_drop_pkts;
++              stats->tx_gso_packets += txp.tx_gso_pkts;
++              stats->tx_gso_drop_packets += txp.tx_gso_drop_pkts;
++              stats->tx_queue_stopped[i] += txp.tx_queue_stopped[i];
++      }
++}
++
++static void edma_get_ethtool_stats(struct net_device *netdev,
++                                 __maybe_unused struct ethtool_stats *stats,
++                                 u64 *data)
++{
++      struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
++      struct edma_gmac_stats edma_stats;
++      u64 *mib_data;
++      int i;
++      u8 *p;
++
++      if (!port_priv)
++              return;
++
++      /* Get the DMA Driver statistics from the data plane if available. */
++      memset(&edma_stats, 0, sizeof(struct edma_gmac_stats));
++      edma_port_get_stats(netdev, &edma_stats);
++
++       /* Populate data plane statistics. */
++      for (i = 0; i < EDMA_STATS_LEN; i++) {
++              p = ((u8 *)(&edma_stats) + edma_gstrings_stats[i].stat_offset);
++              data[i] = *(u64 *)p;
++      }
++
++       /* Get the GMAC MIB statistics along with the DMA driver statistics. */
++      mib_data = &data[EDMA_STATS_LEN];
++      ppe_port_get_ethtool_stats(port_priv->ppe_port, mib_data);
++}
++
++static int edma_get_strset_count(struct net_device *netdev, int sset)
++{
++      struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
++      int sset_count = 0;
++
++      if (!port_priv || sset != ETH_SS_STATS)
++              return 0;
++
++      sset_count = ppe_port_get_sset_count(port_priv->ppe_port, sset);
++
++      return (EDMA_STATS_LEN + sset_count);
++}
++
++static void edma_get_strings(struct net_device *netdev, u32 stringset,
++                           u8 *data)
++{
++      struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
++      int i;
++
++      if (!port_priv || stringset != ETH_SS_STATS)
++              return;
++
++      for (i = 0; i < EDMA_STATS_LEN; i++) {
++              memcpy(data, edma_gstrings_stats[i].stat_string,
++                     strlen(edma_gstrings_stats[i].stat_string));
++              data += ETH_GSTRING_LEN;
++      }
++
++      ppe_port_get_strings(port_priv->ppe_port, stringset, data);
++}
++
++static int edma_get_link_ksettings(struct net_device *netdev,
++                                 struct ethtool_link_ksettings *cmd)
++{
++      struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
++      struct ppe_port *port =  port_priv->ppe_port;
++
++      if (!port_priv)
++              return -EINVAL;
++
++      return phylink_ethtool_ksettings_get(port->phylink, cmd);
++}
++
++static int edma_set_link_ksettings(struct net_device *netdev,
++                                 const struct ethtool_link_ksettings *cmd)
++{
++      struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
++      struct ppe_port *port =  port_priv->ppe_port;
++
++      if (!port_priv)
++              return -EINVAL;
++
++      return phylink_ethtool_ksettings_set(port->phylink, cmd);
++}
++
++static void edma_get_pauseparam(struct net_device *netdev,
++                              struct ethtool_pauseparam *pause)
++{
++      struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
++      struct ppe_port *port =  port_priv->ppe_port;
++
++      if (!port_priv)
++              return;
++
++      phylink_ethtool_get_pauseparam(port->phylink, pause);
++}
++
++static int edma_set_pauseparam(struct net_device *netdev,
++                             struct ethtool_pauseparam *pause)
++{
++      struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
++      struct ppe_port *port =  port_priv->ppe_port;
++
++      if (!port_priv)
++              return -EINVAL;
++
++      return phylink_ethtool_set_pauseparam(port->phylink, pause);
++}
++
++static int edma_get_eee(struct net_device *netdev, struct ethtool_eee *eee)
++{
++      struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
++      struct ppe_port *port =  port_priv->ppe_port;
++
++      if (!port_priv)
++              return -EINVAL;
++
++      return phylink_ethtool_get_eee(port->phylink, eee);
++}
++
++static int edma_set_eee(struct net_device *netdev, struct ethtool_eee *eee)
++{
++      struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
++      struct ppe_port *port =  port_priv->ppe_port;
++      int ret;
++
++      if (!port_priv)
++              return -EINVAL;
++
++      ret = ppe_port_set_mac_eee(port_priv->ppe_port, eee);
++      if (ret)
++              return ret;
++
++      return phylink_ethtool_set_eee(port->phylink, eee);
++}
++
++static const struct ethtool_ops edma_ethtool_ops = {
++      .get_strings = &edma_get_strings,
++      .get_sset_count = &edma_get_strset_count,
++      .get_ethtool_stats = &edma_get_ethtool_stats,
++      .get_link = &ethtool_op_get_link,
++      .get_link_ksettings = edma_get_link_ksettings,
++      .set_link_ksettings = edma_set_link_ksettings,
++      .get_pauseparam = &edma_get_pauseparam,
++      .set_pauseparam = &edma_set_pauseparam,
++      .get_eee = &edma_get_eee,
++      .set_eee = &edma_set_eee,
++};
++
++/**
++ * edma_set_ethtool_ops - Set ethtool operations
++ * @netdev: Netdevice
++ *
++ * Set ethtool operations.
++ */
++void edma_set_ethtool_ops(struct net_device *netdev)
++{
++      netdev->ethtool_ops = &edma_ethtool_ops;
++}
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma_port.c b/drivers/net/ethernet/qualcomm/ppe/edma_port.c
+index afa2b6479822..0b3b769a4a49 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/edma_port.c
++++ b/drivers/net/ethernet/qualcomm/ppe/edma_port.c
+@@ -380,6 +380,7 @@ int edma_port_setup(struct ppe_port *port)
+       netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+       netdev->netdev_ops = &edma_port_netdev_ops;
+       netdev->gso_max_segs = GSO_MAX_SEGS;
++      edma_set_ethtool_ops(netdev);
+       maddr = mac_addr;
+       if (of_get_mac_address(np, maddr))
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-44-net-ethernet-qualcomm-Add-module-parameters-for-driv.patch b/target/linux/qualcommbe/patches-6.6/103-44-net-ethernet-qualcomm-Add-module-parameters-for-driv.patch
new file mode 100644 (file)
index 0000000..48bca9d
--- /dev/null
@@ -0,0 +1,303 @@
+From c9ad8286ca39c2545f6a6851a8ede8488a9263f3 Mon Sep 17 00:00:00 2001
+From: Pavithra R <quic_pavir@quicinc.com>
+Date: Tue, 11 Jun 2024 00:00:46 +0530
+Subject: [PATCH 44/50] net: ethernet: qualcomm: Add module parameters for
+ driver tunings
+
+Add module params and corresponding functionality for Tx/Rx
+mitigation timer/packet count, napi budget and tx requeue stop.
+
+Change-Id: I1717559c931bba4f355ee06ab89f289818400ca2
+Signed-off-by: Pavithra R <quic_pavir@quicinc.com>
+---
+ drivers/net/ethernet/qualcomm/ppe/edma.c      | 35 +++++++++++++++++++
+ .../net/ethernet/qualcomm/ppe/edma_cfg_rx.c   | 29 +++++++++++++--
+ .../net/ethernet/qualcomm/ppe/edma_cfg_rx.h   | 21 +++++++++++
+ .../net/ethernet/qualcomm/ppe/edma_cfg_tx.c   | 29 +++++++++++++--
+ .../net/ethernet/qualcomm/ppe/edma_cfg_tx.h   | 16 +++++++++
+ drivers/net/ethernet/qualcomm/ppe/edma_rx.h   |  4 +++
+ drivers/net/ethernet/qualcomm/ppe/edma_tx.h   |  4 +++
+ 7 files changed, 134 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma.c b/drivers/net/ethernet/qualcomm/ppe/edma.c
+index 0e16f8ab545f..ae9ca528fd55 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/edma.c
++++ b/drivers/net/ethernet/qualcomm/ppe/edma.c
+@@ -38,6 +38,38 @@ static int rx_buff_size;
+ module_param(rx_buff_size, int, 0640);
+ MODULE_PARM_DESC(rx_buff_size, "Rx Buffer size for Jumbo MRU value (default:0)");
++int edma_rx_napi_budget = EDMA_RX_NAPI_WORK_DEF;
++module_param(edma_rx_napi_budget, int, 0444);
++MODULE_PARM_DESC(edma_rx_napi_budget, "Rx NAPI budget (default:128, min:16, max:512)");
++
++int edma_tx_napi_budget = EDMA_TX_NAPI_WORK_DEF;
++module_param(edma_tx_napi_budget, int, 0444);
++MODULE_PARM_DESC(edma_tx_napi_budget, "Tx NAPI budget (default:512 for ipq95xx, min:16, max:512)");
++
++int edma_rx_mitigation_pkt_cnt = EDMA_RX_MITIGATION_PKT_CNT_DEF;
++module_param(edma_rx_mitigation_pkt_cnt, int, 0444);
++MODULE_PARM_DESC(edma_rx_mitigation_pkt_cnt,
++               "Rx mitigation packet count value (default:16, min:0, max: 256)");
++
++s32 edma_rx_mitigation_timer = EDMA_RX_MITIGATION_TIMER_DEF;
++module_param(edma_rx_mitigation_timer, int, 0444);
++MODULE_PARM_DESC(edma_dp_rx_mitigation_timer,
++               "Rx mitigation timer value in microseconds (default:25, min:0, max: 1000)");
++
++int edma_tx_mitigation_timer = EDMA_TX_MITIGATION_TIMER_DEF;
++module_param(edma_tx_mitigation_timer, int, 0444);
++MODULE_PARM_DESC(edma_tx_mitigation_timer,
++               "Tx mitigation timer value in microseconds (default:250, min:0, max: 1000)");
++
++int edma_tx_mitigation_pkt_cnt = EDMA_TX_MITIGATION_PKT_CNT_DEF;
++module_param(edma_tx_mitigation_pkt_cnt, int, 0444);
++MODULE_PARM_DESC(edma_tx_mitigation_pkt_cnt,
++               "Tx mitigation packet count value (default:16, min:0, max: 256)");
++
++static int tx_requeue_stop;
++module_param(tx_requeue_stop, int, 0640);
++MODULE_PARM_DESC(tx_requeue_stop, "Disable Tx requeue function (default:0)");
++
+ /* Priority to multi-queue mapping. */
+ static u8 edma_pri_map[PPE_QUEUE_INTER_PRI_NUM] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7};
+@@ -828,7 +860,10 @@ int edma_setup(struct ppe_device *ppe_dev)
+       edma_ctx->hw_info = &ipq9574_hw_info;
+       edma_ctx->ppe_dev = ppe_dev;
+       edma_ctx->rx_buf_size = rx_buff_size;
++
+       edma_ctx->tx_requeue_stop = false;
++      if (tx_requeue_stop != 0)
++              edma_ctx->tx_requeue_stop = true;
+       /* Configure the EDMA common clocks. */
+       ret = edma_clock_init();
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.c b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.c
+index 18e4ada6a076..bf8854976328 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.c
++++ b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.c
+@@ -166,6 +166,24 @@ static void edma_cfg_rx_desc_ring_configure(struct edma_rxdesc_ring *rxdesc_ring
+       reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_RING_SIZE(rxdesc_ring->ring_id);
+       regmap_write(regmap, reg, data);
++      /* Validate mitigation timer value */
++      if (edma_rx_mitigation_timer < EDMA_RX_MITIGATION_TIMER_MIN ||
++          edma_rx_mitigation_timer > EDMA_RX_MITIGATION_TIMER_MAX) {
++              pr_err("Invalid Rx mitigation timer configured:%d for ring:%d. Using the default timer value:%d\n",
++                     edma_rx_mitigation_timer, rxdesc_ring->ring_id,
++                      EDMA_RX_MITIGATION_TIMER_DEF);
++              edma_rx_mitigation_timer = EDMA_RX_MITIGATION_TIMER_DEF;
++      }
++
++      /* Validate mitigation packet count value */
++      if (edma_rx_mitigation_pkt_cnt < EDMA_RX_MITIGATION_PKT_CNT_MIN ||
++          edma_rx_mitigation_pkt_cnt > EDMA_RX_MITIGATION_PKT_CNT_MAX) {
++              pr_err("Invalid Rx mitigation packet count configured:%d for ring:%d. Using the default packet counter value:%d\n",
++                     edma_rx_mitigation_timer, rxdesc_ring->ring_id,
++                      EDMA_RX_MITIGATION_PKT_CNT_DEF);
++              edma_rx_mitigation_pkt_cnt = EDMA_RX_MITIGATION_PKT_CNT_DEF;
++      }
++
+       /* Configure the Mitigation timer */
+       data = EDMA_MICROSEC_TO_TIMER_UNIT(EDMA_RX_MITIGATION_TIMER_DEF,
+                                          ppe_dev->clk_rate / MHZ);
+@@ -176,7 +194,7 @@ static void edma_cfg_rx_desc_ring_configure(struct edma_rxdesc_ring *rxdesc_ring
+       regmap_write(regmap, reg, data);
+       /* Configure the Mitigation packet count */
+-      data = (EDMA_RX_MITIGATION_PKT_CNT_DEF & EDMA_RXDESC_LOW_THRE_MASK)
++      data = (edma_rx_mitigation_pkt_cnt & EDMA_RXDESC_LOW_THRE_MASK)
+                       << EDMA_RXDESC_LOW_THRE_SHIFT;
+       pr_debug("EDMA Rx mitigation packet count value: %d\n", data);
+       reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_UGT_THRE(rxdesc_ring->ring_id);
+@@ -915,6 +933,13 @@ void edma_cfg_rx_napi_add(void)
+       struct edma_ring_info *rx = hw_info->rx;
+       u32 i;
++      if (edma_rx_napi_budget < EDMA_RX_NAPI_WORK_MIN ||
++          edma_rx_napi_budget > EDMA_RX_NAPI_WORK_MAX) {
++              pr_err("Incorrect Rx NAPI budget: %d, setting to default: %d",
++                     edma_rx_napi_budget, hw_info->napi_budget_rx);
++              edma_rx_napi_budget = hw_info->napi_budget_rx;
++      }
++
+       for (i = 0; i < rx->num_rings; i++) {
+               struct edma_rxdesc_ring *rxdesc_ring = &edma_ctx->rx_rings[i];
+@@ -923,7 +948,7 @@ void edma_cfg_rx_napi_add(void)
+               rxdesc_ring->napi_added = true;
+       }
+-      netdev_dbg(edma_ctx->dummy_dev, "Rx NAPI budget: %d\n", hw_info->napi_budget_rx);
++      netdev_dbg(edma_ctx->dummy_dev, "Rx NAPI budget: %d\n", edma_rx_napi_budget);
+ }
+ /**
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.h b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.h
+index 3c84ef4ea85c..bd897dba286a 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.h
++++ b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.h
+@@ -5,6 +5,15 @@
+ #ifndef __EDMA_CFG_RX__
+ #define __EDMA_CFG_RX__
++/* Rx default NAPI budget */
++#define EDMA_RX_NAPI_WORK_DEF         128
++
++/* RX minimum NAPI budget */
++#define EDMA_RX_NAPI_WORK_MIN         16
++
++/* Rx maximum NAPI budget */
++#define EDMA_RX_NAPI_WORK_MAX         512
++
+ /* SKB payload size used in page mode */
+ #define EDMA_RX_PAGE_MODE_SKB_SIZE    256
+@@ -22,9 +31,21 @@
+ /* Rx mitigation timer's default value in microseconds */
+ #define EDMA_RX_MITIGATION_TIMER_DEF  25
++/* Rx mitigation timer's minimum value in microseconds */
++#define EDMA_RX_MITIGATION_TIMER_MIN  0
++
++/* Rx mitigation timer's maximum value in microseconds */
++#define EDMA_RX_MITIGATION_TIMER_MAX  1000
++
+ /* Rx mitigation packet count's default value */
+ #define EDMA_RX_MITIGATION_PKT_CNT_DEF        16
++/* Rx mitigation packet count's minimum value */
++#define EDMA_RX_MITIGATION_PKT_CNT_MIN        0
++
++/* Rx mitigation packet count's maximum value */
++#define EDMA_RX_MITIGATION_PKT_CNT_MAX        256
++
+ /* Default bitmap of cores for RPS to ARM cores */
+ #define EDMA_RX_DEFAULT_BITMAP        ((1 << EDMA_MAX_CORE) - 1)
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma_cfg_tx.c b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_tx.c
+index f704c654b2cd..771acebdaf75 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/edma_cfg_tx.c
++++ b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_tx.c
+@@ -170,6 +170,24 @@ static void edma_cfg_txcmpl_ring_configure(struct edma_txcmpl_ring *txcmpl_ring)
+       reg = EDMA_BASE_OFFSET + EDMA_REG_TXCMPL_CTRL(txcmpl_ring->id);
+       regmap_write(regmap, reg, EDMA_TXCMPL_RETMODE_OPAQUE);
++      /* Validate mitigation timer value */
++      if (edma_tx_mitigation_timer < EDMA_TX_MITIGATION_TIMER_MIN ||
++          edma_tx_mitigation_timer > EDMA_TX_MITIGATION_TIMER_MAX) {
++              pr_err("Invalid Tx mitigation timer configured:%d for ring:%d. Using the default timer value:%d\n",
++                     edma_tx_mitigation_timer, txcmpl_ring->id,
++                     EDMA_TX_MITIGATION_TIMER_DEF);
++              edma_tx_mitigation_timer = EDMA_TX_MITIGATION_TIMER_DEF;
++      }
++
++      /* Validate mitigation packet count value */
++      if (edma_tx_mitigation_pkt_cnt < EDMA_TX_MITIGATION_PKT_CNT_MIN ||
++          edma_tx_mitigation_pkt_cnt > EDMA_TX_MITIGATION_PKT_CNT_MAX) {
++              pr_err("Invalid Tx mitigation packet count configured:%d for ring:%d. Using the default packet counter value:%d\n",
++                     edma_tx_mitigation_timer, txcmpl_ring->id,
++                     EDMA_TX_MITIGATION_PKT_CNT_DEF);
++              edma_tx_mitigation_pkt_cnt = EDMA_TX_MITIGATION_PKT_CNT_DEF;
++      }
++
+       /* Configure the Mitigation timer. */
+       data = EDMA_MICROSEC_TO_TIMER_UNIT(EDMA_TX_MITIGATION_TIMER_DEF,
+                                          ppe_dev->clk_rate / MHZ);
+@@ -180,7 +198,7 @@ static void edma_cfg_txcmpl_ring_configure(struct edma_txcmpl_ring *txcmpl_ring)
+       regmap_write(regmap, reg, data);
+       /* Configure the Mitigation packet count. */
+-      data = (EDMA_TX_MITIGATION_PKT_CNT_DEF & EDMA_TXCMPL_LOW_THRE_MASK)
++      data = (edma_tx_mitigation_pkt_cnt & EDMA_TXCMPL_LOW_THRE_MASK)
+               << EDMA_TXCMPL_LOW_THRE_SHIFT;
+       pr_debug("EDMA Tx mitigation packet count value: %d\n", data);
+       reg = EDMA_BASE_OFFSET + EDMA_REG_TXCMPL_UGT_THRE(txcmpl_ring->id);
+@@ -634,6 +652,13 @@ void edma_cfg_tx_napi_add(struct net_device *netdev, u32 port_id)
+       struct edma_txcmpl_ring *txcmpl_ring;
+       u32 i, ring_idx;
++      if (edma_tx_napi_budget < EDMA_TX_NAPI_WORK_MIN ||
++          edma_tx_napi_budget > EDMA_TX_NAPI_WORK_MAX) {
++              pr_err("Incorrect Tx NAPI budget: %d, setting to default: %d",
++                     edma_tx_napi_budget, hw_info->napi_budget_tx);
++              edma_tx_napi_budget = hw_info->napi_budget_tx;
++      }
++
+       /* Adding tx napi for a interface with each queue. */
+       for_each_possible_cpu(i) {
+               ring_idx = ((port_id - 1) * num_possible_cpus()) + i;
+@@ -644,5 +669,5 @@ void edma_cfg_tx_napi_add(struct net_device *netdev, u32 port_id)
+               netdev_dbg(netdev, "Napi added for txcmpl ring: %u\n", txcmpl_ring->id);
+       }
+-      netdev_dbg(netdev, "Tx NAPI budget: %d\n", hw_info->napi_budget_tx);
++      netdev_dbg(netdev, "Tx NAPI budget: %d\n", edma_tx_napi_budget);
+ }
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma_cfg_tx.h b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_tx.h
+index 4840c601fc86..608bbc5f93e8 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/edma_cfg_tx.h
++++ b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_tx.h
+@@ -5,12 +5,28 @@
+ #ifndef __EDMA_CFG_TX__
+ #define __EDMA_CFG_TX__
++#define EDMA_TX_NAPI_WORK_DEF 512
++#define EDMA_TX_NAPI_WORK_MIN 16
++#define EDMA_TX_NAPI_WORK_MAX 512
++
+ /* Tx mitigation timer's default value. */
+ #define EDMA_TX_MITIGATION_TIMER_DEF  250
++/* Tx mitigation timer's minimum value in microseconds */
++#define EDMA_TX_MITIGATION_TIMER_MIN  0
++
++/* Tx mitigation timer's maximum value in microseconds */
++#define EDMA_TX_MITIGATION_TIMER_MAX  1000
++
+ /* Tx mitigation packet count default value. */
+ #define EDMA_TX_MITIGATION_PKT_CNT_DEF        16
++/* Tx mitigation packet count's minimum value */
++#define EDMA_TX_MITIGATION_PKT_CNT_MIN        0
++
++/* Tx mitigation packet count's maximum value */
++#define EDMA_TX_MITIGATION_PKT_CNT_MAX        256
++
+ void edma_cfg_tx_rings(void);
+ int edma_cfg_tx_rings_alloc(void);
+ void edma_cfg_tx_rings_cleanup(void);
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma_rx.h b/drivers/net/ethernet/qualcomm/ppe/edma_rx.h
+index 4a262a066808..0ef8138b4530 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/edma_rx.h
++++ b/drivers/net/ethernet/qualcomm/ppe/edma_rx.h
+@@ -281,6 +281,10 @@ struct edma_rxdesc_ring {
+       struct sk_buff *last;
+ };
++extern int edma_rx_napi_budget;
++extern int edma_rx_mitigation_timer;
++extern int edma_rx_mitigation_pkt_cnt;
++
+ irqreturn_t edma_rx_handle_irq(int irq, void *ctx);
+ int edma_rx_alloc_buffer(struct edma_rxfill_ring *rxfill_ring, int alloc_count);
+ int edma_rx_napi_poll(struct napi_struct *napi, int budget);
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma_tx.h b/drivers/net/ethernet/qualcomm/ppe/edma_tx.h
+index c09a4e0f6a42..c4fa63321d1f 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/edma_tx.h
++++ b/drivers/net/ethernet/qualcomm/ppe/edma_tx.h
+@@ -288,6 +288,10 @@ struct edma_txcmpl_ring {
+       bool napi_added;
+ };
++extern int edma_tx_napi_budget;
++extern int edma_tx_mitigation_timer;
++extern int edma_tx_mitigation_pkt_cnt;
++
+ enum edma_tx_status edma_tx_ring_xmit(struct net_device *netdev,
+                                     struct sk_buff *skb,
+                              struct edma_txdesc_ring *txdesc_ring,
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/103-45-net-ethernet-qualcomm-Add-sysctl-for-RPS-bitmap.patch b/target/linux/qualcommbe/patches-6.6/103-45-net-ethernet-qualcomm-Add-sysctl-for-RPS-bitmap.patch
new file mode 100644 (file)
index 0000000..a591b3f
--- /dev/null
@@ -0,0 +1,151 @@
+From a36607b554841358733167483d194ae7d3969444 Mon Sep 17 00:00:00 2001
+From: Pavithra R <quic_pavir@quicinc.com>
+Date: Tue, 11 Jun 2024 01:43:22 +0530
+Subject: [PATCH 45/50] net: ethernet: qualcomm: Add sysctl for RPS bitmap
+
+Add sysctl to configure RPS bitmap for EDMA receive.
+This bitmap is used to configure the set of ARM cores
+used to receive packets from EDMA.
+
+Change-Id: Ie0e7d5971db93ea1494608a9e79c4abb13ce69b6
+Signed-off-by: Pavithra R <quic_pavir@quicinc.com>
+---
+ drivers/net/ethernet/qualcomm/ppe/edma.c      | 23 ++++++++++++++++
+ drivers/net/ethernet/qualcomm/ppe/edma.h      |  2 ++
+ .../net/ethernet/qualcomm/ppe/edma_cfg_rx.c   | 27 +++++++++++++++++++
+ .../net/ethernet/qualcomm/ppe/edma_cfg_rx.h   |  4 +++
+ 4 files changed, 56 insertions(+)
+
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma.c b/drivers/net/ethernet/qualcomm/ppe/edma.c
+index ae9ca528fd55..428c7b134feb 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/edma.c
++++ b/drivers/net/ethernet/qualcomm/ppe/edma.c
+@@ -797,6 +797,11 @@ void edma_destroy(struct ppe_device *ppe_dev)
+       struct edma_ring_info *rx = hw_info->rx;
+       u32 i;
++      if (edma_ctx->rx_rps_ctl_table_hdr) {
++              unregister_sysctl_table(edma_ctx->rx_rps_ctl_table_hdr);
++              edma_ctx->rx_rps_ctl_table_hdr = NULL;
++      }
++
+       /* Disable interrupts. */
+       for (i = 1; i <= hw_info->max_ports; i++)
+               edma_cfg_tx_disable_interrupts(i);
+@@ -840,6 +845,17 @@ void edma_destroy(struct ppe_device *ppe_dev)
+       kfree(edma_ctx->netdev_arr);
+ }
++/* EDMA Rx RPS core sysctl table */
++static struct ctl_table edma_rx_rps_core_table[] = {
++      {
++              .procname       =       "rps_bitmap_cores",
++              .data           =       &edma_cfg_rx_rps_bitmap_cores,
++              .maxlen         =       sizeof(int),
++              .mode           =       0644,
++              .proc_handler   =       edma_cfg_rx_rps_bitmap
++      },
++};
++
+ /**
+  * edma_setup - EDMA Setup.
+  * @ppe_dev: PPE device
+@@ -865,6 +881,13 @@ int edma_setup(struct ppe_device *ppe_dev)
+       if (tx_requeue_stop != 0)
+               edma_ctx->tx_requeue_stop = true;
++      edma_ctx->rx_rps_ctl_table_hdr = register_sysctl("net/edma",
++                                                       edma_rx_rps_core_table);
++      if (!edma_ctx->rx_rps_ctl_table_hdr) {
++              pr_err("Rx rps sysctl table configuration failed\n");
++              return -EINVAL;
++      }
++
+       /* Configure the EDMA common clocks. */
+       ret = edma_clock_init();
+       if (ret) {
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma.h b/drivers/net/ethernet/qualcomm/ppe/edma.h
+index ac6d2fcc2983..3f3d253476f6 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/edma.h
++++ b/drivers/net/ethernet/qualcomm/ppe/edma.h
+@@ -122,6 +122,7 @@ struct edma_intr_info {
+  * @tx_rings: Tx Descriptor Ring, SW is producer
+  * @txcmpl_rings: Tx complete Ring, SW is consumer
+  * @err_stats: Per CPU error statistics
++ * @rx_rps_ctl_table_hdr: Rx RPS sysctl table
+  * @rx_page_mode: Page mode enabled or disabled
+  * @rx_buf_size: Rx buffer size for Jumbo MRU
+  * @tx_requeue_stop: Tx requeue stop enabled or disabled
+@@ -137,6 +138,7 @@ struct edma_context {
+       struct edma_txdesc_ring *tx_rings;
+       struct edma_txcmpl_ring *txcmpl_rings;
+       struct edma_err_stats __percpu *err_stats;
++      struct ctl_table_header *rx_rps_ctl_table_hdr;
+       u32 rx_page_mode;
+       u32 rx_buf_size;
+       bool tx_requeue_stop;
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.c b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.c
+index bf8854976328..58021df6c950 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.c
++++ b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.c
+@@ -43,6 +43,8 @@ static u32 edma_rx_ring_queue_map[][EDMA_MAX_CORE] = {{ 0, 8, 16, 24 },
+                                               { 6, 14, 22, 30 },
+                                               { 7, 15, 23, 31 }};
++u32 edma_cfg_rx_rps_bitmap_cores = EDMA_RX_DEFAULT_BITMAP;
++
+ static int edma_cfg_rx_desc_rings_reset_queue_mapping(void)
+ {
+       struct edma_hw_info *hw_info = edma_ctx->hw_info;
+@@ -987,3 +989,28 @@ int edma_cfg_rx_rps_hash_map(void)
+       return 0;
+ }
++
++/* Configure RPS hash mapping based on bitmap */
++int edma_cfg_rx_rps_bitmap(struct ctl_table *table, int write,
++                         void *buffer, size_t *lenp, loff_t *ppos)
++{
++      int ret;
++
++      ret = proc_dointvec(table, write, buffer, lenp, ppos);
++
++      if (!write)
++              return ret;
++
++      if (!edma_cfg_rx_rps_bitmap_cores ||
++          edma_cfg_rx_rps_bitmap_cores > EDMA_RX_DEFAULT_BITMAP) {
++              pr_warn("Incorrect CPU bitmap: %x. Setting it to default value: %d",
++                      edma_cfg_rx_rps_bitmap_cores, EDMA_RX_DEFAULT_BITMAP);
++              edma_cfg_rx_rps_bitmap_cores = EDMA_RX_DEFAULT_BITMAP;
++      }
++
++      ret = edma_cfg_rx_rps_hash_map();
++
++      pr_info("EDMA RPS bitmap value: %d\n", edma_cfg_rx_rps_bitmap_cores);
++
++      return ret;
++}
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.h b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.h
+index bd897dba286a..53d2e6b39794 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.h
++++ b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.h
+@@ -49,6 +49,8 @@
+ /* Default bitmap of cores for RPS to ARM cores */
+ #define EDMA_RX_DEFAULT_BITMAP        ((1 << EDMA_MAX_CORE) - 1)
++extern u32 edma_cfg_rx_rps_bitmap_cores;
++
+ int edma_cfg_rx_rings(void);
+ int edma_cfg_rx_rings_alloc(void);
+ void edma_cfg_rx_ring_mappings(void);
+@@ -66,4 +68,6 @@ void edma_cfg_rx_buff_size_setup(void);
+ int edma_cfg_rx_rps_hash_map(void);
+ int edma_cfg_rx_rps(struct ctl_table *table, int write,
+                   void *buffer, size_t *lenp, loff_t *ppos);
++int edma_cfg_rx_rps_bitmap(struct ctl_table *table, int write,
++                         void *buffer, size_t *lenp, loff_t *ppos);
+ #endif
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/104-01-dt-bindings-clock-qcom-Add-CMN-PLL-clock-controller-.patch b/target/linux/qualcommbe/patches-6.6/104-01-dt-bindings-clock-qcom-Add-CMN-PLL-clock-controller-.patch
new file mode 100644 (file)
index 0000000..124a008
--- /dev/null
@@ -0,0 +1,147 @@
+From be826ce36477e94539f5d2dfe292126dbb39b3a4 Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Thu, 7 Nov 2024 17:50:22 +0800
+Subject: [PATCH 1/5] dt-bindings: clock: qcom: Add CMN PLL clock controller
+ for IPQ SoC
+
+The CMN PLL controller provides clocks to networking hardware blocks
+and to GCC on Qualcomm IPQ9574 SoC. It receives input clock from the
+on-chip Wi-Fi, and produces output clocks at fixed rates. These output
+rates are predetermined, and are unrelated to the input clock rate.
+The primary purpose of CMN PLL is to supply clocks to the networking
+hardware such as PPE (packet process engine), PCS and the externally
+connected switch or PHY device. The CMN PLL block also outputs fixed
+rate clocks to GCC, such as 24 MHZ as XO clock and 32 KHZ as sleep
+clock supplied to GCC.
+
+Reviewed-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ .../bindings/clock/qcom,ipq9574-cmn-pll.yaml  | 85 +++++++++++++++++++
+ include/dt-bindings/clock/qcom,ipq-cmn-pll.h  | 22 +++++
+ 2 files changed, 107 insertions(+)
+ create mode 100644 Documentation/devicetree/bindings/clock/qcom,ipq9574-cmn-pll.yaml
+ create mode 100644 include/dt-bindings/clock/qcom,ipq-cmn-pll.h
+
+diff --git a/Documentation/devicetree/bindings/clock/qcom,ipq9574-cmn-pll.yaml b/Documentation/devicetree/bindings/clock/qcom,ipq9574-cmn-pll.yaml
+new file mode 100644
+index 000000000000..db8a3ee56067
+--- /dev/null
++++ b/Documentation/devicetree/bindings/clock/qcom,ipq9574-cmn-pll.yaml
+@@ -0,0 +1,85 @@
++# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
++%YAML 1.2
++---
++$id: http://devicetree.org/schemas/clock/qcom,ipq9574-cmn-pll.yaml#
++$schema: http://devicetree.org/meta-schemas/core.yaml#
++
++title: Qualcomm CMN PLL Clock Controller on IPQ SoC
++
++maintainers:
++  - Bjorn Andersson <andersson@kernel.org>
++  - Luo Jie <quic_luoj@quicinc.com>
++
++description:
++  The CMN (or common) PLL clock controller expects a reference
++  input clock. This reference clock is from the on-board Wi-Fi.
++  The CMN PLL supplies a number of fixed rate output clocks to
++  the devices providing networking functions and to GCC. These
++  networking hardware include PPE (packet process engine), PCS
++  and the externally connected switch or PHY devices. The CMN
++  PLL block also outputs fixed rate clocks to GCC. The PLL's
++  primary function is to enable fixed rate output clocks for
++  networking hardware functions used with the IPQ SoC.
++
++properties:
++  compatible:
++    enum:
++      - qcom,ipq9574-cmn-pll
++
++  reg:
++    maxItems: 1
++
++  clocks:
++    items:
++      - description: The reference clock. The supported clock rates include
++          25000000, 31250000, 40000000, 48000000, 50000000 and 96000000 HZ.
++      - description: The AHB clock
++      - description: The SYS clock
++    description:
++      The reference clock is the source clock of CMN PLL, which is from the
++      Wi-Fi. The AHB and SYS clocks must be enabled to access CMN PLL
++      clock registers.
++
++  clock-names:
++    items:
++      - const: ref
++      - const: ahb
++      - const: sys
++
++  "#clock-cells":
++    const: 1
++
++  assigned-clocks:
++    maxItems: 1
++
++  assigned-clock-rates-u64:
++    maxItems: 1
++
++required:
++  - compatible
++  - reg
++  - clocks
++  - clock-names
++  - "#clock-cells"
++  - assigned-clocks
++  - assigned-clock-rates-u64
++
++additionalProperties: false
++
++examples:
++  - |
++    #include <dt-bindings/clock/qcom,ipq-cmn-pll.h>
++    #include <dt-bindings/clock/qcom,ipq9574-gcc.h>
++
++    cmn_pll: clock-controller@9b000 {
++        compatible = "qcom,ipq9574-cmn-pll";
++        reg = <0x0009b000 0x800>;
++        clocks = <&cmn_pll_ref_clk>,
++                 <&gcc GCC_CMN_12GPLL_AHB_CLK>,
++                 <&gcc GCC_CMN_12GPLL_SYS_CLK>;
++        clock-names = "ref", "ahb", "sys";
++        #clock-cells = <1>;
++        assigned-clocks = <&cmn_pll CMN_PLL_CLK>;
++        assigned-clock-rates-u64 = /bits/ 64 <12000000000>;
++    };
++...
+diff --git a/include/dt-bindings/clock/qcom,ipq-cmn-pll.h b/include/dt-bindings/clock/qcom,ipq-cmn-pll.h
+new file mode 100644
+index 000000000000..936e92b3b62c
+--- /dev/null
++++ b/include/dt-bindings/clock/qcom,ipq-cmn-pll.h
+@@ -0,0 +1,22 @@
++/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
++/*
++ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++#ifndef _DT_BINDINGS_CLK_QCOM_IPQ_CMN_PLL_H
++#define _DT_BINDINGS_CLK_QCOM_IPQ_CMN_PLL_H
++
++/* CMN PLL core clock. */
++#define CMN_PLL_CLK                   0
++
++/* The output clocks from CMN PLL of IPQ9574. */
++#define XO_24MHZ_CLK                  1
++#define SLEEP_32KHZ_CLK                       2
++#define PCS_31P25MHZ_CLK              3
++#define NSS_1200MHZ_CLK                       4
++#define PPE_353MHZ_CLK                        5
++#define ETH0_50MHZ_CLK                        6
++#define ETH1_50MHZ_CLK                        7
++#define ETH2_50MHZ_CLK                        8
++#define ETH_25MHZ_CLK                 9
++#endif
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/104-02-clk-qcom-Add-CMN-PLL-clock-controller-driver-for-IPQ.patch b/target/linux/qualcommbe/patches-6.6/104-02-clk-qcom-Add-CMN-PLL-clock-controller-driver-for-IPQ.patch
new file mode 100644 (file)
index 0000000..8d35650
--- /dev/null
@@ -0,0 +1,509 @@
+From f9ecde8dc380769d1477f01416d2e3a65c4fd881 Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Thu, 7 Nov 2024 17:50:23 +0800
+Subject: [PATCH 2/5] clk: qcom: Add CMN PLL clock controller driver for IPQ
+ SoC
+
+The CMN PLL clock controller supplies clocks to the hardware
+blocks that together make up the Ethernet function on Qualcomm
+IPQ SoCs and to GCC. The driver is initially supported for
+IPQ9574 SoC.
+
+The CMN PLL clock controller expects a reference input clock
+from the on-board Wi-Fi block acting as clock source. The input
+reference clock needs to be configured to one of the supported
+clock rates.
+
+The controller supplies a number of fixed-rate output clocks.
+For the IPQ9574, there is one output clock of 353 MHZ to PPE
+(Packet Process Engine) hardware block, three 50 MHZ output
+clocks and an additional 25 MHZ output clock supplied to the
+connected Ethernet devices. The PLL also supplies a 24 MHZ
+clock as XO and a 32 KHZ sleep clock to GCC, and one 31.25
+MHZ clock to PCS.
+
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ drivers/clk/qcom/Kconfig       |   9 +
+ drivers/clk/qcom/Makefile      |   1 +
+ drivers/clk/qcom/ipq-cmn-pll.c | 436 +++++++++++++++++++++++++++++++++
+ 3 files changed, 446 insertions(+)
+ create mode 100644 drivers/clk/qcom/ipq-cmn-pll.c
+
+diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
+index b9a5cc9fd8c8..3cc7156f881d 100644
+--- a/drivers/clk/qcom/Kconfig
++++ b/drivers/clk/qcom/Kconfig
+@@ -190,6 +190,15 @@ config IPQ_APSS_6018
+         Say Y if you want to support CPU frequency scaling on
+         ipq based devices.
++config IPQ_CMN_PLL
++      tristate "IPQ CMN PLL Clock Controller"
++      help
++        Support for CMN PLL clock controller on IPQ platform. The
++        CMN PLL consumes the AHB/SYS clocks from GCC and supplies
++        the output clocks to the networking hardware and GCC blocks.
++        Say Y or M if you want to support CMN PLL clock on the IPQ
++        based devices.
++
+ config IPQ_GCC_4019
+       tristate "IPQ4019 Global Clock Controller"
+       help
+diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
+index 65b825a54c45..d12ed80a3021 100644
+--- a/drivers/clk/qcom/Makefile
++++ b/drivers/clk/qcom/Makefile
+@@ -29,6 +29,7 @@ obj-$(CONFIG_CLK_X1E80100_TCSRCC) += tcsrcc-x1e80100.o
+ obj-$(CONFIG_CLK_QCM2290_GPUCC) += gpucc-qcm2290.o
+ obj-$(CONFIG_IPQ_APSS_PLL) += apss-ipq-pll.o
+ obj-$(CONFIG_IPQ_APSS_6018) += apss-ipq6018.o
++obj-$(CONFIG_IPQ_CMN_PLL) += ipq-cmn-pll.o
+ obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
+ obj-$(CONFIG_IPQ_GCC_5018) += gcc-ipq5018.o
+ obj-$(CONFIG_IPQ_GCC_5332) += gcc-ipq5332.o
+diff --git a/drivers/clk/qcom/ipq-cmn-pll.c b/drivers/clk/qcom/ipq-cmn-pll.c
+new file mode 100644
+index 000000000000..1da8a4a9a8d5
+--- /dev/null
++++ b/drivers/clk/qcom/ipq-cmn-pll.c
+@@ -0,0 +1,436 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++/*
++ * CMN PLL block expects the reference clock from on-board Wi-Fi block,
++ * and supplies fixed rate clocks as output to the networking hardware
++ * blocks and to GCC. The networking related blocks include PPE (packet
++ * process engine), the externally connected PHY or switch devices, and
++ * the PCS.
++ *
++ * On the IPQ9574 SoC, there are three clocks with 50 MHZ and one clock
++ * with 25 MHZ which are output from the CMN PLL to Ethernet PHY (or switch),
++ * and one clock with 353 MHZ to PPE. The other fixed rate output clocks
++ * are supplied to GCC (24 MHZ as XO and 32 KHZ as sleep clock), and to PCS
++ * with 31.25 MHZ.
++ *
++ *               +---------+
++ *               |   GCC   |
++ *               +--+---+--+
++ *           AHB CLK|   |SYS CLK
++ *                  V   V
++ *          +-------+---+------+
++ *          |                  +-------------> eth0-50mhz
++ * REF CLK  |     IPQ9574      |
++ * -------->+                  +-------------> eth1-50mhz
++ *          |  CMN PLL block   |
++ *          |                  +-------------> eth2-50mhz
++ *          |                  |
++ *          +----+----+----+---+-------------> eth-25mhz
++ *               |    |    |
++ *               V    V    V
++ *              GCC  PCS  NSS/PPE
++ */
++
++#include <linux/bitfield.h>
++#include <linux/clk-provider.h>
++#include <linux/delay.h>
++#include <linux/err.h>
++#include <linux/mod_devicetable.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/pm_clock.h>
++#include <linux/pm_runtime.h>
++#include <linux/regmap.h>
++
++#include <dt-bindings/clock/qcom,ipq-cmn-pll.h>
++
++#define CMN_PLL_REFCLK_SRC_SELECTION          0x28
++#define CMN_PLL_REFCLK_SRC_DIV                        GENMASK(9, 8)
++
++#define CMN_PLL_LOCKED                                0x64
++#define CMN_PLL_CLKS_LOCKED                   BIT(8)
++
++#define CMN_PLL_POWER_ON_AND_RESET            0x780
++#define CMN_ANA_EN_SW_RSTN                    BIT(6)
++
++#define CMN_PLL_REFCLK_CONFIG                 0x784
++#define CMN_PLL_REFCLK_EXTERNAL                       BIT(9)
++#define CMN_PLL_REFCLK_DIV                    GENMASK(8, 4)
++#define CMN_PLL_REFCLK_INDEX                  GENMASK(3, 0)
++
++#define CMN_PLL_CTRL                          0x78c
++#define CMN_PLL_CTRL_LOCK_DETECT_EN           BIT(15)
++
++#define CMN_PLL_DIVIDER_CTRL                  0x794
++#define CMN_PLL_DIVIDER_CTRL_FACTOR           GENMASK(9, 0)
++
++/**
++ * struct cmn_pll_fixed_output_clk - CMN PLL output clocks information
++ * @id:       Clock specifier to be supplied
++ * @name: Clock name to be registered
++ * @rate: Clock rate
++ */
++struct cmn_pll_fixed_output_clk {
++      unsigned int id;
++      const char *name;
++      unsigned long rate;
++};
++
++/**
++ * struct clk_cmn_pll - CMN PLL hardware specific data
++ * @regmap: hardware regmap.
++ * @hw: handle between common and hardware-specific interfaces
++ */
++struct clk_cmn_pll {
++      struct regmap *regmap;
++      struct clk_hw hw;
++};
++
++#define CLK_PLL_OUTPUT(_id, _name, _rate) {           \
++      .id =           _id,                            \
++      .name =         _name,                          \
++      .rate =         _rate,                          \
++}
++
++#define to_clk_cmn_pll(_hw) container_of(_hw, struct clk_cmn_pll, hw)
++
++static const struct regmap_config ipq_cmn_pll_regmap_config = {
++      .reg_bits = 32,
++      .reg_stride = 4,
++      .val_bits = 32,
++      .max_register = 0x7fc,
++      .fast_io = true,
++};
++
++static const struct cmn_pll_fixed_output_clk ipq9574_output_clks[] = {
++      CLK_PLL_OUTPUT(XO_24MHZ_CLK, "xo-24mhz", 24000000UL),
++      CLK_PLL_OUTPUT(SLEEP_32KHZ_CLK, "sleep-32khz", 32000UL),
++      CLK_PLL_OUTPUT(PCS_31P25MHZ_CLK, "pcs-31p25mhz", 31250000UL),
++      CLK_PLL_OUTPUT(NSS_1200MHZ_CLK, "nss-1200mhz", 1200000000UL),
++      CLK_PLL_OUTPUT(PPE_353MHZ_CLK, "ppe-353mhz", 353000000UL),
++      CLK_PLL_OUTPUT(ETH0_50MHZ_CLK, "eth0-50mhz", 50000000UL),
++      CLK_PLL_OUTPUT(ETH1_50MHZ_CLK, "eth1-50mhz", 50000000UL),
++      CLK_PLL_OUTPUT(ETH2_50MHZ_CLK, "eth2-50mhz", 50000000UL),
++      CLK_PLL_OUTPUT(ETH_25MHZ_CLK, "eth-25mhz", 25000000UL),
++};
++
++/*
++ * CMN PLL has the single parent clock, which supports the several
++ * possible parent clock rates, each parent clock rate is reflected
++ * by the specific reference index value in the hardware.
++ */
++static int ipq_cmn_pll_find_freq_index(unsigned long parent_rate)
++{
++      int index = -EINVAL;
++
++      switch (parent_rate) {
++      case 25000000:
++              index = 3;
++              break;
++      case 31250000:
++              index = 4;
++              break;
++      case 40000000:
++              index = 6;
++              break;
++      case 48000000:
++      case 96000000:
++              /*
++               * Parent clock rate 48 MHZ and 96 MHZ take the same value
++               * of reference clock index. 96 MHZ needs the source clock
++               * divider to be programmed as 2.
++               */
++              index = 7;
++              break;
++      case 50000000:
++              index = 8;
++              break;
++      default:
++              break;
++      }
++
++      return index;
++}
++
++static unsigned long clk_cmn_pll_recalc_rate(struct clk_hw *hw,
++                                           unsigned long parent_rate)
++{
++      struct clk_cmn_pll *cmn_pll = to_clk_cmn_pll(hw);
++      u32 val, factor;
++
++      /*
++       * The value of CMN_PLL_DIVIDER_CTRL_FACTOR is automatically adjusted
++       * by HW according to the parent clock rate.
++       */
++      regmap_read(cmn_pll->regmap, CMN_PLL_DIVIDER_CTRL, &val);
++      factor = FIELD_GET(CMN_PLL_DIVIDER_CTRL_FACTOR, val);
++
++      return parent_rate * 2 * factor;
++}
++
++static int clk_cmn_pll_determine_rate(struct clk_hw *hw,
++                                    struct clk_rate_request *req)
++{
++      int ret;
++
++      /* Validate the rate of the single parent clock. */
++      ret = ipq_cmn_pll_find_freq_index(req->best_parent_rate);
++
++      return ret < 0 ? ret : 0;
++}
++
++/*
++ * This function is used to initialize the CMN PLL to enable the fixed
++ * rate output clocks. It is expected to be configured once.
++ */
++static int clk_cmn_pll_set_rate(struct clk_hw *hw, unsigned long rate,
++                              unsigned long parent_rate)
++{
++      struct clk_cmn_pll *cmn_pll = to_clk_cmn_pll(hw);
++      int ret, index;
++      u32 val;
++
++      /*
++       * Configure the reference input clock selection as per the given
++       * parent clock. The output clock rates are always of fixed value.
++       */
++      index = ipq_cmn_pll_find_freq_index(parent_rate);
++      if (index < 0)
++              return index;
++
++      ret = regmap_update_bits(cmn_pll->regmap, CMN_PLL_REFCLK_CONFIG,
++                               CMN_PLL_REFCLK_INDEX,
++                               FIELD_PREP(CMN_PLL_REFCLK_INDEX, index));
++      if (ret)
++              return ret;
++
++      /*
++       * Update the source clock rate selection and source clock
++       * divider as 2 when the parent clock rate is 96 MHZ.
++       */
++      if (parent_rate == 96000000) {
++              ret = regmap_update_bits(cmn_pll->regmap, CMN_PLL_REFCLK_CONFIG,
++                                       CMN_PLL_REFCLK_DIV,
++                                       FIELD_PREP(CMN_PLL_REFCLK_DIV, 2));
++              if (ret)
++                      return ret;
++
++              ret = regmap_update_bits(cmn_pll->regmap, CMN_PLL_REFCLK_SRC_SELECTION,
++                                       CMN_PLL_REFCLK_SRC_DIV,
++                                       FIELD_PREP(CMN_PLL_REFCLK_SRC_DIV, 0));
++              if (ret)
++                      return ret;
++      }
++
++      /* Enable PLL locked detect. */
++      ret = regmap_update_bits(cmn_pll->regmap, CMN_PLL_CTRL,
++                               CMN_PLL_CTRL_LOCK_DETECT_EN,
++                               CMN_PLL_CTRL_LOCK_DETECT_EN);
++      if (ret)
++              return ret;
++
++      /*
++       * Reset the CMN PLL block to ensure the updated configurations
++       * take effect.
++       */
++      ret = regmap_update_bits(cmn_pll->regmap, CMN_PLL_POWER_ON_AND_RESET,
++                               CMN_ANA_EN_SW_RSTN, 0);
++      if (ret)
++              return ret;
++
++      usleep_range(1000, 1200);
++      ret = regmap_update_bits(cmn_pll->regmap, CMN_PLL_POWER_ON_AND_RESET,
++                               CMN_ANA_EN_SW_RSTN, CMN_ANA_EN_SW_RSTN);
++      if (ret)
++              return ret;
++
++      /* Stability check of CMN PLL output clocks. */
++      return regmap_read_poll_timeout(cmn_pll->regmap, CMN_PLL_LOCKED, val,
++                                      (val & CMN_PLL_CLKS_LOCKED),
++                                      100, 100 * USEC_PER_MSEC);
++}
++
++static const struct clk_ops clk_cmn_pll_ops = {
++      .recalc_rate = clk_cmn_pll_recalc_rate,
++      .determine_rate = clk_cmn_pll_determine_rate,
++      .set_rate = clk_cmn_pll_set_rate,
++};
++
++static struct clk_hw *ipq_cmn_pll_clk_hw_register(struct platform_device *pdev)
++{
++      struct clk_parent_data pdata = { .index = 0 };
++      struct device *dev = &pdev->dev;
++      struct clk_init_data init = {};
++      struct clk_cmn_pll *cmn_pll;
++      struct regmap *regmap;
++      void __iomem *base;
++      int ret;
++
++      base = devm_platform_ioremap_resource(pdev, 0);
++      if (IS_ERR(base))
++              return ERR_CAST(base);
++
++      regmap = devm_regmap_init_mmio(dev, base, &ipq_cmn_pll_regmap_config);
++      if (IS_ERR(regmap))
++              return ERR_CAST(regmap);
++
++      cmn_pll = devm_kzalloc(dev, sizeof(*cmn_pll), GFP_KERNEL);
++      if (!cmn_pll)
++              return ERR_PTR(-ENOMEM);
++
++      init.name = "cmn_pll";
++      init.parent_data = &pdata;
++      init.num_parents = 1;
++      init.ops = &clk_cmn_pll_ops;
++
++      cmn_pll->hw.init = &init;
++      cmn_pll->regmap = regmap;
++
++      ret = devm_clk_hw_register(dev, &cmn_pll->hw);
++      if (ret)
++              return ERR_PTR(ret);
++
++      return &cmn_pll->hw;
++}
++
++static int ipq_cmn_pll_register_clks(struct platform_device *pdev)
++{
++      const struct cmn_pll_fixed_output_clk *fixed_clk;
++      struct clk_hw_onecell_data *hw_data;
++      struct device *dev = &pdev->dev;
++      struct clk_hw *cmn_pll_hw;
++      unsigned int num_clks;
++      struct clk_hw *hw;
++      int ret, i;
++
++      fixed_clk = ipq9574_output_clks;
++      num_clks = ARRAY_SIZE(ipq9574_output_clks);
++
++      hw_data = devm_kzalloc(dev, struct_size(hw_data, hws, num_clks + 1),
++                             GFP_KERNEL);
++      if (!hw_data)
++              return -ENOMEM;
++
++      /*
++       * Register the CMN PLL clock, which is the parent clock of
++       * the fixed rate output clocks.
++       */
++      cmn_pll_hw = ipq_cmn_pll_clk_hw_register(pdev);
++      if (IS_ERR(cmn_pll_hw))
++              return PTR_ERR(cmn_pll_hw);
++
++      /* Register the fixed rate output clocks. */
++      for (i = 0; i < num_clks; i++) {
++              hw = clk_hw_register_fixed_rate_parent_hw(dev, fixed_clk[i].name,
++                                                        cmn_pll_hw, 0,
++                                                        fixed_clk[i].rate);
++              if (IS_ERR(hw)) {
++                      ret = PTR_ERR(hw);
++                      goto unregister_fixed_clk;
++              }
++
++              hw_data->hws[fixed_clk[i].id] = hw;
++      }
++
++      /*
++       * Provide the CMN PLL clock. The clock rate of CMN PLL
++       * is configured to 12 GHZ by DT property assigned-clock-rates-u64.
++       */
++      hw_data->hws[CMN_PLL_CLK] = cmn_pll_hw;
++      hw_data->num = num_clks + 1;
++
++      ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, hw_data);
++      if (ret)
++              goto unregister_fixed_clk;
++
++      platform_set_drvdata(pdev, hw_data);
++
++      return 0;
++
++unregister_fixed_clk:
++      while (i > 0)
++              clk_hw_unregister(hw_data->hws[fixed_clk[--i].id]);
++
++      return ret;
++}
++
++static int ipq_cmn_pll_clk_probe(struct platform_device *pdev)
++{
++      struct device *dev = &pdev->dev;
++      int ret;
++
++      ret = devm_pm_runtime_enable(dev);
++      if (ret)
++              return ret;
++
++      ret = devm_pm_clk_create(dev);
++      if (ret)
++              return ret;
++
++      /*
++       * To access the CMN PLL registers, the GCC AHB & SYSY clocks
++       * of CMN PLL block need to be enabled.
++       */
++      ret = pm_clk_add(dev, "ahb");
++      if (ret)
++              return dev_err_probe(dev, ret, "Fail to add AHB clock\n");
++
++      ret = pm_clk_add(dev, "sys");
++      if (ret)
++              return dev_err_probe(dev, ret, "Fail to add SYS clock\n");
++
++      ret = pm_runtime_resume_and_get(dev);
++      if (ret)
++              return ret;
++
++      /* Register CMN PLL clock and fixed rate output clocks. */
++      ret = ipq_cmn_pll_register_clks(pdev);
++      pm_runtime_put(dev);
++      if (ret)
++              return dev_err_probe(dev, ret,
++                                   "Fail to register CMN PLL clocks\n");
++
++      return 0;
++}
++
++static void ipq_cmn_pll_clk_remove(struct platform_device *pdev)
++{
++      struct clk_hw_onecell_data *hw_data = platform_get_drvdata(pdev);
++      int i;
++
++      /*
++       * The clock with index CMN_PLL_CLK is unregistered by
++       * device management.
++       */
++      for (i = 0; i < hw_data->num; i++) {
++              if (i != CMN_PLL_CLK)
++                      clk_hw_unregister(hw_data->hws[i]);
++      }
++}
++
++static const struct dev_pm_ops ipq_cmn_pll_pm_ops = {
++      SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
++};
++
++static const struct of_device_id ipq_cmn_pll_clk_ids[] = {
++      { .compatible = "qcom,ipq9574-cmn-pll", },
++      { }
++};
++MODULE_DEVICE_TABLE(of, ipq_cmn_pll_clk_ids);
++
++static struct platform_driver ipq_cmn_pll_clk_driver = {
++      .probe = ipq_cmn_pll_clk_probe,
++      .remove_new = ipq_cmn_pll_clk_remove,
++      .driver = {
++              .name = "ipq_cmn_pll",
++              .of_match_table = ipq_cmn_pll_clk_ids,
++              .pm = &ipq_cmn_pll_pm_ops,
++      },
++};
++module_platform_driver(ipq_cmn_pll_clk_driver);
++
++MODULE_DESCRIPTION("Qualcomm Technologies, Inc. IPQ CMN PLL Driver");
++MODULE_LICENSE("GPL");
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/104-04-arm64-dts-qcom-Add-CMN-PLL-node-for-IPQ9574-SoC.patch b/target/linux/qualcommbe/patches-6.6/104-04-arm64-dts-qcom-Add-CMN-PLL-node-for-IPQ9574-SoC.patch
new file mode 100644 (file)
index 0000000..1bf4063
--- /dev/null
@@ -0,0 +1,125 @@
+From ed97b7b7c657baf9c9d8e9dfebd9f7703c870593 Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Thu, 7 Nov 2024 17:50:25 +0800
+Subject: [PATCH 4/5] arm64: dts: qcom: Add CMN PLL node for IPQ9574 SoC
+
+The CMN PLL clock controller allows selection of an input clock rate
+from a defined set of input clock rates. It in-turn supplies fixed
+rate output clocks to the hardware blocks that provide the ethernet
+functions such as PPE (Packet Process Engine) and connected switch or
+PHY, and to GCC.
+
+The reference clock of CMN PLL is routed from XO to the CMN PLL through
+the internal WiFi block.
+.XO (48 MHZ or 96 MHZ)-->WiFi (multiplier/divider)-->48 MHZ to CMN PLL.
+
+The reference input clock from WiFi to CMN PLL is fully controlled by
+the bootstrap pins which select the XO frequency (48 MHZ or 96 MHZ).
+Based on this frequency, the divider in the internal Wi-Fi block is
+automatically configured by hardware (1 for 48 MHZ, 2 for 96 MHZ), to
+ensure output clock to CMN PLL is 48 MHZ.
+
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ .../boot/dts/qcom/ipq9574-rdp-common.dtsi     | 16 +++++++++++-
+ arch/arm64/boot/dts/qcom/ipq9574.dtsi         | 26 ++++++++++++++++++-
+ 2 files changed, 40 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi b/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
+index 91e104b0f865..78f6a2e053d5 100644
+--- a/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
+@@ -3,7 +3,7 @@
+  * IPQ9574 RDP board common device tree source
+  *
+  * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
+  */
+ /dts-v1/;
+@@ -164,6 +164,20 @@ &usb3 {
+       status = "okay";
+ };
++/*
++ * The bootstrap pins for the board select the XO clock frequency,
++ * which automatically enables the right dividers to ensure the
++ * reference clock output from WiFi is 48 MHZ.
++ */
++&ref_48mhz_clk {
++      clock-div = <1>;
++      clock-mult = <1>;
++};
++
+ &xo_board_clk {
+       clock-frequency = <24000000>;
+ };
++
++&xo_clk {
++      clock-frequency = <48000000>;
++};
+diff --git a/arch/arm64/boot/dts/qcom/ipq9574.dtsi b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+index c113fff22f73..dc4965abff58 100644
+--- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+@@ -3,10 +3,11 @@
+  * IPQ9574 SoC device tree source
+  *
+  * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
+  */
+ #include <dt-bindings/clock/qcom,apss-ipq.h>
++#include <dt-bindings/clock/qcom,ipq-cmn-pll.h>
+ #include <dt-bindings/clock/qcom,ipq9574-gcc.h>
+ #include <dt-bindings/interconnect/qcom,ipq9574.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+@@ -21,6 +22,12 @@ / {
+       #size-cells = <2>;
+       clocks {
++              ref_48mhz_clk: ref-48mhz-clk {
++                      compatible = "fixed-factor-clock";
++                      clocks = <&xo_clk>;
++                      #clock-cells = <0>;
++              };
++
+               sleep_clk: sleep-clk {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+@@ -30,6 +37,11 @@ xo_board_clk: xo-board-clk {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+               };
++
++              xo_clk: xo-clk {
++                      compatible = "fixed-clock";
++                      #clock-cells = <0>;
++              };
+       };
+       cpus {
+@@ -245,6 +257,18 @@ mdio: mdio@90000 {
+                       status = "disabled";
+               };
++              cmn_pll: clock-controller@9b000 {
++                      compatible = "qcom,ipq9574-cmn-pll";
++                      reg = <0x0009b000 0x800>;
++                      clocks = <&ref_48mhz_clk>,
++                               <&gcc GCC_CMN_12GPLL_AHB_CLK>,
++                               <&gcc GCC_CMN_12GPLL_SYS_CLK>;
++                      clock-names = "ref", "ahb", "sys";
++                      #clock-cells = <1>;
++                      assigned-clocks = <&cmn_pll CMN_PLL_CLK>;
++                      assigned-clock-rates-u64 = /bits/ 64 <12000000000>;
++              };
++
+               qfprom: efuse@a4000 {
+                       compatible = "qcom,ipq9574-qfprom", "qcom,qfprom";
+                       reg = <0x000a4000 0x5a1>;
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/104-05-arm64-dts-qcom-Update-IPQ9574-xo_board_clk-to-use-fi.patch b/target/linux/qualcommbe/patches-6.6/104-05-arm64-dts-qcom-Update-IPQ9574-xo_board_clk-to-use-fi.patch
new file mode 100644 (file)
index 0000000..5fbd85c
--- /dev/null
@@ -0,0 +1,52 @@
+From dcb1e63fbc695c3971d7207238a78f66355a2f9a Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Thu, 7 Nov 2024 17:50:26 +0800
+Subject: [PATCH 5/5] arm64: dts: qcom: Update IPQ9574 xo_board_clk to use
+ fixed factor clock
+
+xo_board_clk is fixed to 24 MHZ, which is routed from WiFi output clock
+48 MHZ (also being the reference clock of CMN PLL) divided 2 by analog
+block routing channel.
+
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi | 7 ++++++-
+ arch/arm64/boot/dts/qcom/ipq9574.dtsi            | 3 ++-
+ 2 files changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi b/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
+index 78f6a2e053d5..9a8692377176 100644
+--- a/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
+@@ -174,8 +174,13 @@ &ref_48mhz_clk {
+       clock-mult = <1>;
+ };
++/*
++ * The frequency of xo_board_clk is fixed to 24 MHZ, which is routed
++ * from WiFi output clock 48 MHZ divided by 2.
++ */
+ &xo_board_clk {
+-      clock-frequency = <24000000>;
++      clock-div = <2>;
++      clock-mult = <1>;
+ };
+ &xo_clk {
+diff --git a/arch/arm64/boot/dts/qcom/ipq9574.dtsi b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+index dc4965abff58..376b75976524 100644
+--- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+@@ -34,7 +34,8 @@ sleep_clk: sleep-clk {
+               };
+               xo_board_clk: xo-board-clk {
+-                      compatible = "fixed-clock";
++                      compatible = "fixed-factor-clock";
++                      clocks = <&ref_48mhz_clk>;
+                       #clock-cells = <0>;
+               };
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/105-01-clk-qcom-clk-alpha-pll-Add-NSS-HUAYRA-ALPHA-PLL-supp.patch b/target/linux/qualcommbe/patches-6.6/105-01-clk-qcom-clk-alpha-pll-Add-NSS-HUAYRA-ALPHA-PLL-supp.patch
new file mode 100644 (file)
index 0000000..80ba02b
--- /dev/null
@@ -0,0 +1,55 @@
+From 4305650c92eef5921cc140c999eccbb6de1ab4b8 Mon Sep 17 00:00:00 2001
+From: Devi Priya <quic_devipriy@quicinc.com>
+Date: Fri, 25 Oct 2024 09:25:14 +0530
+Subject: [PATCH 1/7] clk: qcom: clk-alpha-pll: Add NSS HUAYRA ALPHA PLL
+ support for ipq9574
+
+Add support for NSS Huayra alpha pll found on ipq9574 SoCs.
+Programming sequence is the same as that of Huayra type Alpha PLL,
+so we can re-use the same.
+
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Signed-off-by: Devi Priya <quic_devipriy@quicinc.com>
+Signed-off-by: Manikanta Mylavarapu <quic_mmanikan@quicinc.com>
+---
+ drivers/clk/qcom/clk-alpha-pll.c | 11 +++++++++++
+ drivers/clk/qcom/clk-alpha-pll.h |  1 +
+ 2 files changed, 12 insertions(+)
+
+diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
+index f9105443d7db..c2e56e9403ff 100644
+--- a/drivers/clk/qcom/clk-alpha-pll.c
++++ b/drivers/clk/qcom/clk-alpha-pll.c
+@@ -267,6 +267,17 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
+               [PLL_OFF_OPMODE] = 0x30,
+               [PLL_OFF_STATUS] = 0x3c,
+       },
++      [CLK_ALPHA_PLL_TYPE_NSS_HUAYRA] =  {
++              [PLL_OFF_L_VAL] = 0x04,
++              [PLL_OFF_ALPHA_VAL] = 0x08,
++              [PLL_OFF_TEST_CTL] = 0x0c,
++              [PLL_OFF_TEST_CTL_U] = 0x10,
++              [PLL_OFF_USER_CTL] = 0x14,
++              [PLL_OFF_CONFIG_CTL] = 0x18,
++              [PLL_OFF_CONFIG_CTL_U] = 0x1c,
++              [PLL_OFF_STATUS] = 0x20,
++      },
++
+ };
+ EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
+diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
+index 55eca04b23a1..c6d1b8429f95 100644
+--- a/drivers/clk/qcom/clk-alpha-pll.h
++++ b/drivers/clk/qcom/clk-alpha-pll.h
+@@ -32,6 +32,7 @@ enum {
+       CLK_ALPHA_PLL_TYPE_BRAMMO_EVO,
+       CLK_ALPHA_PLL_TYPE_STROMER,
+       CLK_ALPHA_PLL_TYPE_STROMER_PLUS,
++      CLK_ALPHA_PLL_TYPE_NSS_HUAYRA,
+       CLK_ALPHA_PLL_TYPE_MAX,
+ };
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/105-02-dt-bindings-clock-gcc-ipq9574-Add-definition-for-GPL.patch b/target/linux/qualcommbe/patches-6.6/105-02-dt-bindings-clock-gcc-ipq9574-Add-definition-for-GPL.patch
new file mode 100644 (file)
index 0000000..4cb241c
--- /dev/null
@@ -0,0 +1,28 @@
+From 47f539afdab8fb99d4c047add3e1a1b1dc5a3f2d Mon Sep 17 00:00:00 2001
+From: Devi Priya <quic_devipriy@quicinc.com>
+Date: Fri, 25 Oct 2024 09:25:15 +0530
+Subject: [PATCH 2/7] dt-bindings: clock: gcc-ipq9574: Add definition for
+ GPLL0_OUT_AUX
+
+Add the definition for GPLL0_OUT_AUX clock.
+
+Acked-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Signed-off-by: Devi Priya <quic_devipriy@quicinc.com>
+Signed-off-by: Manikanta Mylavarapu <quic_mmanikan@quicinc.com>
+---
+ include/dt-bindings/clock/qcom,ipq9574-gcc.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/include/dt-bindings/clock/qcom,ipq9574-gcc.h b/include/dt-bindings/clock/qcom,ipq9574-gcc.h
+index 52123c5a09fa..05ef3074c9da 100644
+--- a/include/dt-bindings/clock/qcom,ipq9574-gcc.h
++++ b/include/dt-bindings/clock/qcom,ipq9574-gcc.h
+@@ -220,4 +220,5 @@
+ #define GCC_PCIE1_PIPE_CLK                            211
+ #define GCC_PCIE2_PIPE_CLK                            212
+ #define GCC_PCIE3_PIPE_CLK                            213
++#define GPLL0_OUT_AUX                                 214
+ #endif
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/105-03-clk-qcom-gcc-ipq9574-Add-support-for-gpll0_out_aux-c.patch b/target/linux/qualcommbe/patches-6.6/105-03-clk-qcom-gcc-ipq9574-Add-support-for-gpll0_out_aux-c.patch
new file mode 100644 (file)
index 0000000..e90b855
--- /dev/null
@@ -0,0 +1,52 @@
+From ac2bd244609c4423f96406005c9cee8b6952cd20 Mon Sep 17 00:00:00 2001
+From: Devi Priya <quic_devipriy@quicinc.com>
+Date: Fri, 25 Oct 2024 09:25:16 +0530
+Subject: [PATCH 3/7] clk: qcom: gcc-ipq9574: Add support for gpll0_out_aux
+ clock
+
+Add support for gpll0_out_aux clock which acts as the parent for
+certain networking subsystem (nss) clocks.
+
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Signed-off-by: Devi Priya <quic_devipriy@quicinc.com>
+Signed-off-by: Manikanta Mylavarapu <quic_mmanikan@quicinc.com>
+---
+ drivers/clk/qcom/gcc-ipq9574.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+diff --git a/drivers/clk/qcom/gcc-ipq9574.c b/drivers/clk/qcom/gcc-ipq9574.c
+index 645109f75b46..a458d69e1a98 100644
+--- a/drivers/clk/qcom/gcc-ipq9574.c
++++ b/drivers/clk/qcom/gcc-ipq9574.c
+@@ -108,6 +108,20 @@ static struct clk_alpha_pll_postdiv gpll0 = {
+       },
+ };
++static struct clk_alpha_pll_postdiv gpll0_out_aux = {
++      .offset = 0x20000,
++      .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++      .width = 4,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "gpll0_out_aux",
++              .parent_hws = (const struct clk_hw *[]) {
++                      &gpll0_main.clkr.hw
++              },
++              .num_parents = 1,
++              .ops = &clk_alpha_pll_postdiv_ro_ops,
++      },
++};
++
+ static struct clk_alpha_pll gpll4_main = {
+       .offset = 0x22000,
+       .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
+@@ -4222,6 +4236,7 @@ static struct clk_regmap *gcc_ipq9574_clks[] = {
+       [GCC_PCIE1_PIPE_CLK] = &gcc_pcie1_pipe_clk.clkr,
+       [GCC_PCIE2_PIPE_CLK] = &gcc_pcie2_pipe_clk.clkr,
+       [GCC_PCIE3_PIPE_CLK] = &gcc_pcie3_pipe_clk.clkr,
++      [GPLL0_OUT_AUX] = &gpll0_out_aux.clkr,
+ };
+ static const struct qcom_reset_map gcc_ipq9574_resets[] = {
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/105-04-dt-bindings-clock-Add-ipq9574-NSSCC-clock-and-reset-.patch b/target/linux/qualcommbe/patches-6.6/105-04-dt-bindings-clock-Add-ipq9574-NSSCC-clock-and-reset-.patch
new file mode 100644 (file)
index 0000000..1efd6ac
--- /dev/null
@@ -0,0 +1,400 @@
+From 3d98604921d4b7216d3d0c8a76160dce083bd040 Mon Sep 17 00:00:00 2001
+From: Devi Priya <quic_devipriy@quicinc.com>
+Date: Fri, 25 Oct 2024 09:25:17 +0530
+Subject: [PATCH 4/7] dt-bindings: clock: Add ipq9574 NSSCC clock and reset
+ definitions
+
+Add NSSCC clock and reset definitions for ipq9574.
+
+Signed-off-by: Devi Priya <quic_devipriy@quicinc.com>
+Signed-off-by: Manikanta Mylavarapu <quic_mmanikan@quicinc.com>
+Reviewed-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+---
+ .../bindings/clock/qcom,ipq9574-nsscc.yaml    |  73 +++++++++
+ .../dt-bindings/clock/qcom,ipq9574-nsscc.h    | 152 ++++++++++++++++++
+ .../dt-bindings/reset/qcom,ipq9574-nsscc.h    | 134 +++++++++++++++
+ 3 files changed, 359 insertions(+)
+ create mode 100644 Documentation/devicetree/bindings/clock/qcom,ipq9574-nsscc.yaml
+ create mode 100644 include/dt-bindings/clock/qcom,ipq9574-nsscc.h
+ create mode 100644 include/dt-bindings/reset/qcom,ipq9574-nsscc.h
+
+diff --git a/Documentation/devicetree/bindings/clock/qcom,ipq9574-nsscc.yaml b/Documentation/devicetree/bindings/clock/qcom,ipq9574-nsscc.yaml
+new file mode 100644
+index 000000000000..14a320079dbf
+--- /dev/null
++++ b/Documentation/devicetree/bindings/clock/qcom,ipq9574-nsscc.yaml
+@@ -0,0 +1,73 @@
++# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
++%YAML 1.2
++---
++$id: http://devicetree.org/schemas/clock/qcom,ipq9574-nsscc.yaml#
++$schema: http://devicetree.org/meta-schemas/core.yaml#
++
++title: Qualcomm Networking Sub System Clock & Reset Controller on IPQ9574
++
++maintainers:
++  - Bjorn Andersson <andersson@kernel.org>
++  - Anusha Rao <quic_anusha@quicinc.com>
++
++description: |
++  Qualcomm networking sub system clock control module provides the clocks,
++  resets and power domains on IPQ9574
++
++  See also::
++    include/dt-bindings/clock/qcom,ipq9574-nsscc.h
++    include/dt-bindings/reset/qcom,ipq9574-nsscc.h
++
++properties:
++  compatible:
++    const: qcom,ipq9574-nsscc
++
++  clocks:
++    items:
++      - description: Board XO source
++      - description: CMN_PLL NSS 1200MHz (Bias PLL cc) clock source
++      - description: CMN_PLL PPE 353MHz (Bias PLL ubi nc) clock source
++      - description: GCC GPLL0 OUT AUX clock source
++      - description: Uniphy0 NSS Rx clock source
++      - description: Uniphy0 NSS Tx clock source
++      - description: Uniphy1 NSS Rx clock source
++      - description: Uniphy1 NSS Tx clock source
++      - description: Uniphy2 NSS Rx clock source
++      - description: Uniphy2 NSS Tx clock source
++      - description: GCC NSSCC clock source
++
++  '#interconnect-cells':
++    const: 1
++
++required:
++  - compatible
++  - clocks
++
++allOf:
++  - $ref: qcom,gcc.yaml#
++
++unevaluatedProperties: false
++
++examples:
++  - |
++    #include <dt-bindings/clock/qcom,ipq9574-gcc.h>
++    #include <dt-bindings/clock/qcom,ipq-cmn-pll.h>
++    clock-controller@39b00000 {
++      compatible = "qcom,ipq9574-nsscc";
++      reg = <0x39b00000 0x80000>;
++      clocks = <&xo_board_clk>,
++               <&cmn_pll NSS_1200MHZ_CLK>,
++               <&cmn_pll PPE_353MHZ_CLK>,
++               <&gcc GPLL0_OUT_AUX>,
++               <&uniphy 0>,
++               <&uniphy 1>,
++               <&uniphy 2>,
++               <&uniphy 3>,
++               <&uniphy 4>,
++               <&uniphy 5>,
++               <&gcc GCC_NSSCC_CLK>;
++      #clock-cells = <1>;
++      #reset-cells = <1>;
++      #power-domain-cells = <1>;
++    };
++...
+diff --git a/include/dt-bindings/clock/qcom,ipq9574-nsscc.h b/include/dt-bindings/clock/qcom,ipq9574-nsscc.h
+new file mode 100644
+index 000000000000..59d57d9c788c
+--- /dev/null
++++ b/include/dt-bindings/clock/qcom,ipq9574-nsscc.h
+@@ -0,0 +1,152 @@
++/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
++/*
++ * Copyright (c) 2023, The Linux Foundation. All rights reserved.
++ */
++
++#ifndef _DT_BINDINGS_CLOCK_IPQ_NSSCC_9574_H
++#define _DT_BINDINGS_CLOCK_IPQ_NSSCC_9574_H
++
++#define NSS_CC_CE_APB_CLK                                     0
++#define NSS_CC_CE_AXI_CLK                                     1
++#define NSS_CC_CE_CLK_SRC                                     2
++#define NSS_CC_CFG_CLK_SRC                                    3
++#define NSS_CC_CLC_AXI_CLK                                    4
++#define NSS_CC_CLC_CLK_SRC                                    5
++#define NSS_CC_CRYPTO_CLK                                     6
++#define NSS_CC_CRYPTO_CLK_SRC                                 7
++#define NSS_CC_CRYPTO_PPE_CLK                                 8
++#define NSS_CC_HAQ_AHB_CLK                                    9
++#define NSS_CC_HAQ_AXI_CLK                                    10
++#define NSS_CC_HAQ_CLK_SRC                                    11
++#define NSS_CC_IMEM_AHB_CLK                                   12
++#define NSS_CC_IMEM_CLK_SRC                                   13
++#define NSS_CC_IMEM_QSB_CLK                                   14
++#define NSS_CC_INT_CFG_CLK_SRC                                        15
++#define NSS_CC_NSS_CSR_CLK                                    16
++#define NSS_CC_NSSNOC_CE_APB_CLK                              17
++#define NSS_CC_NSSNOC_CE_AXI_CLK                              18
++#define NSS_CC_NSSNOC_CLC_AXI_CLK                             19
++#define NSS_CC_NSSNOC_CRYPTO_CLK                              20
++#define NSS_CC_NSSNOC_HAQ_AHB_CLK                             21
++#define NSS_CC_NSSNOC_HAQ_AXI_CLK                             22
++#define NSS_CC_NSSNOC_IMEM_AHB_CLK                            23
++#define NSS_CC_NSSNOC_IMEM_QSB_CLK                            24
++#define NSS_CC_NSSNOC_NSS_CSR_CLK                             25
++#define NSS_CC_NSSNOC_PPE_CFG_CLK                             26
++#define NSS_CC_NSSNOC_PPE_CLK                                 27
++#define NSS_CC_NSSNOC_UBI32_AHB0_CLK                          28
++#define NSS_CC_NSSNOC_UBI32_AXI0_CLK                          29
++#define NSS_CC_NSSNOC_UBI32_INT0_AHB_CLK                      30
++#define NSS_CC_NSSNOC_UBI32_NC_AXI0_1_CLK                     31
++#define NSS_CC_NSSNOC_UBI32_NC_AXI0_CLK                               32
++#define NSS_CC_PORT1_MAC_CLK                                  33
++#define NSS_CC_PORT1_RX_CLK                                   34
++#define NSS_CC_PORT1_RX_CLK_SRC                                       35
++#define NSS_CC_PORT1_RX_DIV_CLK_SRC                           36
++#define NSS_CC_PORT1_TX_CLK                                   37
++#define NSS_CC_PORT1_TX_CLK_SRC                                       38
++#define NSS_CC_PORT1_TX_DIV_CLK_SRC                           39
++#define NSS_CC_PORT2_MAC_CLK                                  40
++#define NSS_CC_PORT2_RX_CLK                                   41
++#define NSS_CC_PORT2_RX_CLK_SRC                                       42
++#define NSS_CC_PORT2_RX_DIV_CLK_SRC                           43
++#define NSS_CC_PORT2_TX_CLK                                   44
++#define NSS_CC_PORT2_TX_CLK_SRC                                       45
++#define NSS_CC_PORT2_TX_DIV_CLK_SRC                           46
++#define NSS_CC_PORT3_MAC_CLK                                  47
++#define NSS_CC_PORT3_RX_CLK                                   48
++#define NSS_CC_PORT3_RX_CLK_SRC                                       49
++#define NSS_CC_PORT3_RX_DIV_CLK_SRC                           50
++#define NSS_CC_PORT3_TX_CLK                                   51
++#define NSS_CC_PORT3_TX_CLK_SRC                                       52
++#define NSS_CC_PORT3_TX_DIV_CLK_SRC                           53
++#define NSS_CC_PORT4_MAC_CLK                                  54
++#define NSS_CC_PORT4_RX_CLK                                   55
++#define NSS_CC_PORT4_RX_CLK_SRC                                       56
++#define NSS_CC_PORT4_RX_DIV_CLK_SRC                           57
++#define NSS_CC_PORT4_TX_CLK                                   58
++#define NSS_CC_PORT4_TX_CLK_SRC                                       59
++#define NSS_CC_PORT4_TX_DIV_CLK_SRC                           60
++#define NSS_CC_PORT5_MAC_CLK                                  61
++#define NSS_CC_PORT5_RX_CLK                                   62
++#define NSS_CC_PORT5_RX_CLK_SRC                                       63
++#define NSS_CC_PORT5_RX_DIV_CLK_SRC                           64
++#define NSS_CC_PORT5_TX_CLK                                   65
++#define NSS_CC_PORT5_TX_CLK_SRC                                       66
++#define NSS_CC_PORT5_TX_DIV_CLK_SRC                           67
++#define NSS_CC_PORT6_MAC_CLK                                  68
++#define NSS_CC_PORT6_RX_CLK                                   69
++#define NSS_CC_PORT6_RX_CLK_SRC                                       70
++#define NSS_CC_PORT6_RX_DIV_CLK_SRC                           71
++#define NSS_CC_PORT6_TX_CLK                                   72
++#define NSS_CC_PORT6_TX_CLK_SRC                                       73
++#define NSS_CC_PORT6_TX_DIV_CLK_SRC                           74
++#define NSS_CC_PPE_CLK_SRC                                    75
++#define NSS_CC_PPE_EDMA_CFG_CLK                                       76
++#define NSS_CC_PPE_EDMA_CLK                                   77
++#define NSS_CC_PPE_SWITCH_BTQ_CLK                             78
++#define NSS_CC_PPE_SWITCH_CFG_CLK                             79
++#define NSS_CC_PPE_SWITCH_CLK                                 80
++#define NSS_CC_PPE_SWITCH_IPE_CLK                             81
++#define NSS_CC_UBI0_CLK_SRC                                   82
++#define NSS_CC_UBI0_DIV_CLK_SRC                                       83
++#define NSS_CC_UBI1_CLK_SRC                                   84
++#define NSS_CC_UBI1_DIV_CLK_SRC                                       85
++#define NSS_CC_UBI2_CLK_SRC                                   86
++#define NSS_CC_UBI2_DIV_CLK_SRC                                       87
++#define NSS_CC_UBI32_AHB0_CLK                                 88
++#define NSS_CC_UBI32_AHB1_CLK                                 89
++#define NSS_CC_UBI32_AHB2_CLK                                 90
++#define NSS_CC_UBI32_AHB3_CLK                                 91
++#define NSS_CC_UBI32_AXI0_CLK                                 92
++#define NSS_CC_UBI32_AXI1_CLK                                 93
++#define NSS_CC_UBI32_AXI2_CLK                                 94
++#define NSS_CC_UBI32_AXI3_CLK                                 95
++#define NSS_CC_UBI32_CORE0_CLK                                        96
++#define NSS_CC_UBI32_CORE1_CLK                                        97
++#define NSS_CC_UBI32_CORE2_CLK                                        98
++#define NSS_CC_UBI32_CORE3_CLK                                        99
++#define NSS_CC_UBI32_INTR0_AHB_CLK                            100
++#define NSS_CC_UBI32_INTR1_AHB_CLK                            101
++#define NSS_CC_UBI32_INTR2_AHB_CLK                            102
++#define NSS_CC_UBI32_INTR3_AHB_CLK                            103
++#define NSS_CC_UBI32_NC_AXI0_CLK                              104
++#define NSS_CC_UBI32_NC_AXI1_CLK                              105
++#define NSS_CC_UBI32_NC_AXI2_CLK                              106
++#define NSS_CC_UBI32_NC_AXI3_CLK                              107
++#define NSS_CC_UBI32_UTCM0_CLK                                        108
++#define NSS_CC_UBI32_UTCM1_CLK                                        109
++#define NSS_CC_UBI32_UTCM2_CLK                                        110
++#define NSS_CC_UBI32_UTCM3_CLK                                        111
++#define NSS_CC_UBI3_CLK_SRC                                   112
++#define NSS_CC_UBI3_DIV_CLK_SRC                                       113
++#define NSS_CC_UBI_AXI_CLK_SRC                                        114
++#define NSS_CC_UBI_NC_AXI_BFDCD_CLK_SRC                               115
++#define NSS_CC_UNIPHY_PORT1_RX_CLK                            116
++#define NSS_CC_UNIPHY_PORT1_TX_CLK                            117
++#define NSS_CC_UNIPHY_PORT2_RX_CLK                            118
++#define NSS_CC_UNIPHY_PORT2_TX_CLK                            119
++#define NSS_CC_UNIPHY_PORT3_RX_CLK                            120
++#define NSS_CC_UNIPHY_PORT3_TX_CLK                            121
++#define NSS_CC_UNIPHY_PORT4_RX_CLK                            122
++#define NSS_CC_UNIPHY_PORT4_TX_CLK                            123
++#define NSS_CC_UNIPHY_PORT5_RX_CLK                            124
++#define NSS_CC_UNIPHY_PORT5_TX_CLK                            125
++#define NSS_CC_UNIPHY_PORT6_RX_CLK                            126
++#define NSS_CC_UNIPHY_PORT6_TX_CLK                            127
++#define NSS_CC_XGMAC0_PTP_REF_CLK                             128
++#define NSS_CC_XGMAC0_PTP_REF_DIV_CLK_SRC                     129
++#define NSS_CC_XGMAC1_PTP_REF_CLK                             130
++#define NSS_CC_XGMAC1_PTP_REF_DIV_CLK_SRC                     131
++#define NSS_CC_XGMAC2_PTP_REF_CLK                             132
++#define NSS_CC_XGMAC2_PTP_REF_DIV_CLK_SRC                     133
++#define NSS_CC_XGMAC3_PTP_REF_CLK                             134
++#define NSS_CC_XGMAC3_PTP_REF_DIV_CLK_SRC                     135
++#define NSS_CC_XGMAC4_PTP_REF_CLK                             136
++#define NSS_CC_XGMAC4_PTP_REF_DIV_CLK_SRC                     137
++#define NSS_CC_XGMAC5_PTP_REF_CLK                             138
++#define NSS_CC_XGMAC5_PTP_REF_DIV_CLK_SRC                     139
++#define UBI32_PLL                                             140
++#define UBI32_PLL_MAIN                                                141
++
++#endif
+diff --git a/include/dt-bindings/reset/qcom,ipq9574-nsscc.h b/include/dt-bindings/reset/qcom,ipq9574-nsscc.h
+new file mode 100644
+index 000000000000..6910db0cff51
+--- /dev/null
++++ b/include/dt-bindings/reset/qcom,ipq9574-nsscc.h
+@@ -0,0 +1,134 @@
++/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
++/*
++ * Copyright (c) 2023, The Linux Foundation. All rights reserved.
++ */
++
++#ifndef _DT_BINDINGS_RESET_IPQ_NSSCC_9574_H
++#define _DT_BINDINGS_RESET_IPQ_NSSCC_9574_H
++
++#define EDMA_HW_RESET                   0
++#define NSS_CC_CE_BCR                 1
++#define NSS_CC_CLC_BCR                        2
++#define NSS_CC_EIP197_BCR             3
++#define NSS_CC_HAQ_BCR                        4
++#define NSS_CC_IMEM_BCR                       5
++#define NSS_CC_MAC_BCR                        6
++#define NSS_CC_PPE_BCR                        7
++#define NSS_CC_UBI_BCR                        8
++#define NSS_CC_UNIPHY_BCR             9
++#define UBI3_CLKRST_CLAMP_ENABLE      10
++#define UBI3_CORE_CLAMP_ENABLE                11
++#define UBI2_CLKRST_CLAMP_ENABLE      12
++#define UBI2_CORE_CLAMP_ENABLE                13
++#define UBI1_CLKRST_CLAMP_ENABLE      14
++#define UBI1_CORE_CLAMP_ENABLE                15
++#define UBI0_CLKRST_CLAMP_ENABLE      16
++#define UBI0_CORE_CLAMP_ENABLE                17
++#define NSSNOC_NSS_CSR_ARES           18
++#define NSS_CSR_ARES                  19
++#define PPE_BTQ_ARES                  20
++#define PPE_IPE_ARES                  21
++#define PPE_ARES                      22
++#define PPE_CFG_ARES                  23
++#define PPE_EDMA_ARES                 24
++#define PPE_EDMA_CFG_ARES             25
++#define CRY_PPE_ARES                  26
++#define NSSNOC_PPE_ARES                       27
++#define NSSNOC_PPE_CFG_ARES           28
++#define PORT1_MAC_ARES                        29
++#define PORT2_MAC_ARES                        30
++#define PORT3_MAC_ARES                        31
++#define PORT4_MAC_ARES                        32
++#define PORT5_MAC_ARES                        33
++#define PORT6_MAC_ARES                        34
++#define XGMAC0_PTP_REF_ARES           35
++#define XGMAC1_PTP_REF_ARES           36
++#define XGMAC2_PTP_REF_ARES           37
++#define XGMAC3_PTP_REF_ARES           38
++#define XGMAC4_PTP_REF_ARES           39
++#define XGMAC5_PTP_REF_ARES           40
++#define HAQ_AHB_ARES                  41
++#define HAQ_AXI_ARES                  42
++#define NSSNOC_HAQ_AHB_ARES           43
++#define NSSNOC_HAQ_AXI_ARES           44
++#define CE_APB_ARES                   45
++#define CE_AXI_ARES                   46
++#define NSSNOC_CE_APB_ARES            47
++#define NSSNOC_CE_AXI_ARES            48
++#define CRYPTO_ARES                   49
++#define NSSNOC_CRYPTO_ARES            50
++#define NSSNOC_NC_AXI0_1_ARES         51
++#define UBI0_CORE_ARES                        52
++#define UBI1_CORE_ARES                        53
++#define UBI2_CORE_ARES                        54
++#define UBI3_CORE_ARES                        55
++#define NC_AXI0_ARES                  56
++#define UTCM0_ARES                    57
++#define NC_AXI1_ARES                  58
++#define UTCM1_ARES                    59
++#define NC_AXI2_ARES                  60
++#define UTCM2_ARES                    61
++#define NC_AXI3_ARES                  62
++#define UTCM3_ARES                    63
++#define NSSNOC_NC_AXI0_ARES           64
++#define AHB0_ARES                     65
++#define INTR0_AHB_ARES                        66
++#define AHB1_ARES                     67
++#define INTR1_AHB_ARES                        68
++#define AHB2_ARES                     69
++#define INTR2_AHB_ARES                        70
++#define AHB3_ARES                     71
++#define INTR3_AHB_ARES                        72
++#define NSSNOC_AHB0_ARES              73
++#define NSSNOC_INT0_AHB_ARES          74
++#define AXI0_ARES                     75
++#define AXI1_ARES                     76
++#define AXI2_ARES                     77
++#define AXI3_ARES                     78
++#define NSSNOC_AXI0_ARES              79
++#define IMEM_QSB_ARES                 80
++#define NSSNOC_IMEM_QSB_ARES          81
++#define IMEM_AHB_ARES                 82
++#define NSSNOC_IMEM_AHB_ARES          83
++#define UNIPHY_PORT1_RX_ARES          84
++#define UNIPHY_PORT1_TX_ARES          85
++#define UNIPHY_PORT2_RX_ARES          86
++#define UNIPHY_PORT2_TX_ARES          87
++#define UNIPHY_PORT3_RX_ARES          88
++#define UNIPHY_PORT3_TX_ARES          89
++#define UNIPHY_PORT4_RX_ARES          90
++#define UNIPHY_PORT4_TX_ARES          91
++#define UNIPHY_PORT5_RX_ARES          92
++#define UNIPHY_PORT5_TX_ARES          93
++#define UNIPHY_PORT6_RX_ARES          94
++#define UNIPHY_PORT6_TX_ARES          95
++#define PORT1_RX_ARES                 96
++#define PORT1_TX_ARES                 97
++#define PORT2_RX_ARES                 98
++#define PORT2_TX_ARES                 99
++#define PORT3_RX_ARES                 100
++#define PORT3_TX_ARES                 101
++#define PORT4_RX_ARES                 102
++#define PORT4_TX_ARES                 103
++#define PORT5_RX_ARES                 104
++#define PORT5_TX_ARES                 105
++#define PORT6_RX_ARES                 106
++#define PORT6_TX_ARES                 107
++#define PPE_FULL_RESET                        108
++#define UNIPHY0_SOFT_RESET            109
++#define UNIPHY1_SOFT_RESET            110
++#define UNIPHY2_SOFT_RESET            111
++#define UNIPHY_PORT1_ARES             112
++#define UNIPHY_PORT2_ARES             113
++#define UNIPHY_PORT3_ARES             114
++#define UNIPHY_PORT4_ARES             115
++#define UNIPHY_PORT5_ARES             116
++#define UNIPHY_PORT6_ARES             117
++#define NSSPORT1_RESET                        118
++#define NSSPORT2_RESET                        119
++#define NSSPORT3_RESET                        120
++#define NSSPORT4_RESET                        121
++#define NSSPORT5_RESET                        122
++#define NSSPORT6_RESET                        123
++
++#endif
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/105-05-clk-qcom-Add-NSS-clock-Controller-driver-for-IPQ9574.patch b/target/linux/qualcommbe/patches-6.6/105-05-clk-qcom-Add-NSS-clock-Controller-driver-for-IPQ9574.patch
new file mode 100644 (file)
index 0000000..cadb416
--- /dev/null
@@ -0,0 +1,3138 @@
+From 86db870d9e00b71fb3bd2c8a1a72cda971d9a77d Mon Sep 17 00:00:00 2001
+From: Devi Priya <quic_devipriy@quicinc.com>
+Date: Fri, 25 Oct 2024 09:25:18 +0530
+Subject: [PATCH 5/7] clk: qcom: Add NSS clock Controller driver for IPQ9574
+
+Add Networking Sub System Clock Controller(NSSCC) driver for ipq9574 based
+devices.
+
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202410101431.tjpSRNTY-lkp@intel.com/
+Signed-off-by: Devi Priya <quic_devipriy@quicinc.com>
+Signed-off-by: Manikanta Mylavarapu <quic_mmanikan@quicinc.com>
+---
+ drivers/clk/qcom/Kconfig         |    7 +
+ drivers/clk/qcom/Makefile        |    1 +
+ drivers/clk/qcom/nsscc-ipq9574.c | 3080 ++++++++++++++++++++++++++++++
+ 3 files changed, 3088 insertions(+)
+ create mode 100644 drivers/clk/qcom/nsscc-ipq9574.c
+
+diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
+index a3e2a09e2105..b9a5cc9fd8c8 100644
+--- a/drivers/clk/qcom/Kconfig
++++ b/drivers/clk/qcom/Kconfig
+@@ -255,6 +255,13 @@ config IPQ_GCC_9574
+         i2c, USB, SD/eMMC, etc. Select this for the root clock
+         of ipq9574.
++config IPQ_NSSCC_9574
++        tristate "IPQ9574 NSS Clock Controller"
++        depends on ARM64 || COMPILE_TEST
++        depends on IPQ_GCC_9574
++        help
++          Support for NSS clock controller on ipq9574 devices.
++
+ config IPQ_NSSCC_QCA8K
+       tristate "QCA8K(QCA8386 or QCA8084) NSS Clock Controller"
+       depends on MDIO_BUS
+diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
+index 2b378667a63f..65b825a54c45 100644
+--- a/drivers/clk/qcom/Makefile
++++ b/drivers/clk/qcom/Makefile
+@@ -36,6 +36,7 @@ obj-$(CONFIG_IPQ_GCC_6018) += gcc-ipq6018.o
+ obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
+ obj-$(CONFIG_IPQ_GCC_8074) += gcc-ipq8074.o
+ obj-$(CONFIG_IPQ_GCC_9574) += gcc-ipq9574.o
++obj-$(CONFIG_IPQ_NSSCC_9574)  += nsscc-ipq9574.o
+ obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
+ obj-$(CONFIG_IPQ_NSSCC_QCA8K) += nsscc-qca8k.o
+ obj-$(CONFIG_MDM_GCC_9607) += gcc-mdm9607.o
+diff --git a/drivers/clk/qcom/nsscc-ipq9574.c b/drivers/clk/qcom/nsscc-ipq9574.c
+new file mode 100644
+index 000000000000..d3e9aa391236
+--- /dev/null
++++ b/drivers/clk/qcom/nsscc-ipq9574.c
+@@ -0,0 +1,3080 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++#include <linux/clk.h>
++#include <linux/clk-provider.h>
++#include <linux/err.h>
++#include <linux/interconnect-provider.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/of_device.h>
++#include <linux/regmap.h>
++#include <linux/platform_device.h>
++
++#include <dt-bindings/clock/qcom,ipq9574-nsscc.h>
++#include <dt-bindings/interconnect/qcom,ipq9574.h>
++#include <dt-bindings/reset/qcom,ipq9574-nsscc.h>
++
++#include "clk-alpha-pll.h"
++#include "clk-branch.h"
++#include "clk-pll.h"
++#include "clk-rcg.h"
++#include "clk-regmap.h"
++#include "clk-regmap-divider.h"
++#include "clk-regmap-mux.h"
++#include "common.h"
++#include "reset.h"
++
++/* Need to match the order of clocks in DT binding */
++enum {
++      DT_XO,
++      DT_BIAS_PLL_CC_CLK,
++      DT_BIAS_PLL_UBI_NC_CLK,
++      DT_GCC_GPLL0_OUT_AUX,
++      DT_UNIPHY0_NSS_RX_CLK,
++      DT_UNIPHY0_NSS_TX_CLK,
++      DT_UNIPHY1_NSS_RX_CLK,
++      DT_UNIPHY1_NSS_TX_CLK,
++      DT_UNIPHY2_NSS_RX_CLK,
++      DT_UNIPHY2_NSS_TX_CLK,
++};
++
++enum {
++      P_XO,
++      P_BIAS_PLL_CC_CLK,
++      P_BIAS_PLL_UBI_NC_CLK,
++      P_GCC_GPLL0_OUT_AUX,
++      P_UBI32_PLL_OUT_MAIN,
++      P_UNIPHY0_NSS_RX_CLK,
++      P_UNIPHY0_NSS_TX_CLK,
++      P_UNIPHY1_NSS_RX_CLK,
++      P_UNIPHY1_NSS_TX_CLK,
++      P_UNIPHY2_NSS_RX_CLK,
++      P_UNIPHY2_NSS_TX_CLK,
++};
++
++static const struct alpha_pll_config ubi32_pll_config = {
++      .l = 0x3e,
++      .alpha = 0x6666,
++      .config_ctl_val = 0x200d4aa8,
++      .config_ctl_hi_val = 0x3c,
++      .main_output_mask = BIT(0),
++      .aux_output_mask = BIT(1),
++      .pre_div_val = 0x0,
++      .pre_div_mask = BIT(12),
++      .post_div_val = 0x0,
++      .post_div_mask = GENMASK(9, 8),
++      .alpha_en_mask = BIT(24),
++      .test_ctl_val = 0x1c0000c0,
++      .test_ctl_hi_val = 0x4000,
++};
++
++static struct clk_alpha_pll ubi32_pll_main = {
++      .offset = 0x28000,
++      .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_NSS_HUAYRA],
++      .flags = SUPPORTS_DYNAMIC_UPDATE,
++      .clkr = {
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "ubi32_pll_main",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .index = DT_XO,
++                      },
++                      .num_parents = 1,
++                      .ops = &clk_alpha_pll_huayra_ops,
++              },
++      },
++};
++
++static struct clk_alpha_pll_postdiv ubi32_pll = {
++      .offset = 0x28000,
++      .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_NSS_HUAYRA],
++      .width = 2,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "ubi32_pll",
++              .parent_hws = (const struct clk_hw *[]) {
++                      &ubi32_pll_main.clkr.hw
++              },
++              .num_parents = 1,
++              .ops = &clk_alpha_pll_postdiv_ro_ops,
++              .flags = CLK_SET_RATE_PARENT,
++      },
++};
++
++static const struct parent_map nss_cc_parent_map_0[] = {
++      { P_XO, 0 },
++      { P_BIAS_PLL_CC_CLK, 1 },
++      { P_UNIPHY0_NSS_RX_CLK, 2 },
++      { P_UNIPHY0_NSS_TX_CLK, 3 },
++      { P_UNIPHY1_NSS_RX_CLK, 4 },
++      { P_UNIPHY1_NSS_TX_CLK, 5 },
++};
++
++static const struct clk_parent_data nss_cc_parent_data_0[] = {
++      { .index = DT_XO },
++      { .index = DT_BIAS_PLL_CC_CLK },
++      { .index = DT_UNIPHY0_NSS_RX_CLK },
++      { .index = DT_UNIPHY0_NSS_TX_CLK },
++      { .index = DT_UNIPHY1_NSS_RX_CLK },
++      { .index = DT_UNIPHY1_NSS_TX_CLK },
++};
++
++static const struct parent_map nss_cc_parent_map_1[] = {
++      { P_XO, 0 },
++      { P_BIAS_PLL_UBI_NC_CLK, 1 },
++      { P_GCC_GPLL0_OUT_AUX, 2 },
++      { P_BIAS_PLL_CC_CLK, 6 },
++};
++
++static const struct clk_parent_data nss_cc_parent_data_1[] = {
++      { .index = DT_XO },
++      { .index = DT_BIAS_PLL_UBI_NC_CLK },
++      { .index = DT_GCC_GPLL0_OUT_AUX },
++      { .index = DT_BIAS_PLL_CC_CLK },
++};
++
++static const struct parent_map nss_cc_parent_map_2[] = {
++      { P_XO, 0 },
++      { P_UBI32_PLL_OUT_MAIN, 1 },
++      { P_GCC_GPLL0_OUT_AUX, 2 },
++};
++
++static const struct clk_parent_data nss_cc_parent_data_2[] = {
++      { .index = DT_XO },
++      { .hw = &ubi32_pll.clkr.hw },
++      { .index = DT_GCC_GPLL0_OUT_AUX },
++};
++
++static const struct parent_map nss_cc_parent_map_3[] = {
++      { P_XO, 0 },
++      { P_BIAS_PLL_CC_CLK, 1 },
++      { P_GCC_GPLL0_OUT_AUX, 2 },
++};
++
++static const struct clk_parent_data nss_cc_parent_data_3[] = {
++      { .index = DT_XO },
++      { .index = DT_BIAS_PLL_CC_CLK },
++      { .index = DT_GCC_GPLL0_OUT_AUX },
++};
++
++static const struct parent_map nss_cc_parent_map_4[] = {
++      { P_XO, 0 },
++      { P_BIAS_PLL_CC_CLK, 1 },
++      { P_UNIPHY0_NSS_RX_CLK, 2 },
++      { P_UNIPHY0_NSS_TX_CLK, 3 },
++};
++
++static const struct clk_parent_data nss_cc_parent_data_4[] = {
++      { .index = DT_XO },
++      { .index = DT_BIAS_PLL_CC_CLK },
++      { .index = DT_UNIPHY0_NSS_RX_CLK },
++      { .index = DT_UNIPHY0_NSS_TX_CLK },
++};
++
++static const struct parent_map nss_cc_parent_map_5[] = {
++      { P_XO, 0 },
++      { P_BIAS_PLL_CC_CLK, 1 },
++      { P_UNIPHY2_NSS_RX_CLK, 2 },
++      { P_UNIPHY2_NSS_TX_CLK, 3 },
++};
++
++static const struct clk_parent_data nss_cc_parent_data_5[] = {
++      { .index = DT_XO },
++      { .index = DT_BIAS_PLL_CC_CLK },
++      { .index = DT_UNIPHY2_NSS_RX_CLK },
++      { .index = DT_UNIPHY2_NSS_TX_CLK },
++};
++
++static const struct parent_map nss_cc_parent_map_6[] = {
++      { P_XO, 0 },
++      { P_GCC_GPLL0_OUT_AUX, 2 },
++      { P_BIAS_PLL_CC_CLK, 6 },
++};
++
++static const struct clk_parent_data nss_cc_parent_data_6[] = {
++      { .index = DT_XO },
++      { .index = DT_GCC_GPLL0_OUT_AUX },
++      { .index = DT_BIAS_PLL_CC_CLK },
++};
++
++static const struct parent_map nss_cc_parent_map_7[] = {
++      { P_XO, 0 },
++      { P_UBI32_PLL_OUT_MAIN, 1 },
++      { P_GCC_GPLL0_OUT_AUX, 2 },
++      { P_BIAS_PLL_CC_CLK, 6 },
++};
++
++static const struct clk_parent_data nss_cc_parent_data_7[] = {
++      { .index = DT_XO },
++      { .hw = &ubi32_pll.clkr.hw },
++      { .index = DT_GCC_GPLL0_OUT_AUX },
++      { .index = DT_BIAS_PLL_CC_CLK },
++};
++
++static const struct freq_tbl ftbl_nss_cc_ce_clk_src[] = {
++      F(24000000, P_XO, 1, 0, 0),
++      F(353000000, P_BIAS_PLL_UBI_NC_CLK, 1, 0, 0),
++      { }
++};
++
++static struct clk_rcg2 nss_cc_ce_clk_src = {
++      .cmd_rcgr = 0x28404,
++      .mnd_width = 0,
++      .hid_width = 5,
++      .parent_map = nss_cc_parent_map_1,
++      .freq_tbl = ftbl_nss_cc_ce_clk_src,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_ce_clk_src",
++              .parent_data = nss_cc_parent_data_1,
++              .num_parents = ARRAY_SIZE(nss_cc_parent_data_1),
++              .ops = &clk_rcg2_ops,
++      },
++};
++
++static const struct freq_tbl ftbl_nss_cc_cfg_clk_src[] = {
++      F(100000000, P_GCC_GPLL0_OUT_AUX, 8, 0, 0),
++      { }
++};
++
++static struct clk_rcg2 nss_cc_cfg_clk_src = {
++      .cmd_rcgr = 0x28104,
++      .mnd_width = 0,
++      .hid_width = 5,
++      .parent_map = nss_cc_parent_map_3,
++      .freq_tbl = ftbl_nss_cc_cfg_clk_src,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_cfg_clk_src",
++              .parent_data = nss_cc_parent_data_3,
++              .num_parents = ARRAY_SIZE(nss_cc_parent_data_3),
++              .ops = &clk_rcg2_ops,
++      },
++};
++
++static const struct freq_tbl ftbl_nss_cc_clc_clk_src[] = {
++      F(533333333, P_GCC_GPLL0_OUT_AUX, 1.5, 0, 0),
++      { }
++};
++
++static struct clk_rcg2 nss_cc_clc_clk_src = {
++      .cmd_rcgr = 0x28604,
++      .mnd_width = 0,
++      .hid_width = 5,
++      .parent_map = nss_cc_parent_map_6,
++      .freq_tbl = ftbl_nss_cc_clc_clk_src,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_clc_clk_src",
++              .parent_data = nss_cc_parent_data_6,
++              .num_parents = ARRAY_SIZE(nss_cc_parent_data_6),
++              .ops = &clk_rcg2_ops,
++      },
++};
++
++static const struct freq_tbl ftbl_nss_cc_crypto_clk_src[] = {
++      F(24000000, P_XO, 1, 0, 0),
++      F(300000000, P_BIAS_PLL_CC_CLK, 4, 0, 0),
++      F(600000000, P_BIAS_PLL_CC_CLK, 2, 0, 0),
++      { }
++};
++
++static struct clk_rcg2 nss_cc_crypto_clk_src = {
++      .cmd_rcgr = 0x16008,
++      .mnd_width = 16,
++      .hid_width = 5,
++      .parent_map = nss_cc_parent_map_3,
++      .freq_tbl = ftbl_nss_cc_crypto_clk_src,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_crypto_clk_src",
++              .parent_data = nss_cc_parent_data_3,
++              .num_parents = ARRAY_SIZE(nss_cc_parent_data_3),
++              .flags = CLK_SET_RATE_PARENT,
++              .ops = &clk_rcg2_ops,
++      },
++};
++
++static struct clk_rcg2 nss_cc_haq_clk_src = {
++      .cmd_rcgr = 0x28304,
++      .mnd_width = 0,
++      .hid_width = 5,
++      .parent_map = nss_cc_parent_map_1,
++      .freq_tbl = ftbl_nss_cc_ce_clk_src,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_haq_clk_src",
++              .parent_data = nss_cc_parent_data_1,
++              .num_parents = ARRAY_SIZE(nss_cc_parent_data_1),
++              .ops = &clk_rcg2_ops,
++      },
++};
++
++static struct clk_rcg2 nss_cc_imem_clk_src = {
++      .cmd_rcgr = 0xe008,
++      .mnd_width = 0,
++      .hid_width = 5,
++      .parent_map = nss_cc_parent_map_1,
++      .freq_tbl = ftbl_nss_cc_ce_clk_src,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_imem_clk_src",
++              .parent_data = nss_cc_parent_data_1,
++              .num_parents = ARRAY_SIZE(nss_cc_parent_data_1),
++              .ops = &clk_rcg2_ops,
++      },
++};
++
++static const struct freq_tbl ftbl_nss_cc_int_cfg_clk_src[] = {
++      F(200000000, P_GCC_GPLL0_OUT_AUX, 4, 0, 0),
++      { }
++};
++
++static struct clk_rcg2 nss_cc_int_cfg_clk_src = {
++      .cmd_rcgr = 0x287b4,
++      .mnd_width = 0,
++      .hid_width = 5,
++      .parent_map = nss_cc_parent_map_3,
++      .freq_tbl = ftbl_nss_cc_int_cfg_clk_src,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_int_cfg_clk_src",
++              .parent_data = nss_cc_parent_data_3,
++              .num_parents = ARRAY_SIZE(nss_cc_parent_data_3),
++              .ops = &clk_rcg2_ops,
++      },
++};
++
++static const struct freq_conf ftbl_nss_cc_port1_rx_clk_src_25[] = {
++      C(P_UNIPHY0_NSS_RX_CLK, 12.5, 0, 0),
++      C(P_UNIPHY0_NSS_RX_CLK, 5, 0, 0),
++};
++
++static const struct freq_conf ftbl_nss_cc_port1_rx_clk_src_125[] = {
++      C(P_UNIPHY0_NSS_RX_CLK, 2.5, 0, 0),
++      C(P_UNIPHY0_NSS_RX_CLK, 1, 0, 0),
++};
++
++static const struct freq_multi_tbl ftbl_nss_cc_port1_rx_clk_src[] = {
++      FMS(24000000, P_XO, 1, 0, 0),
++      FM(25000000, ftbl_nss_cc_port1_rx_clk_src_25),
++      FMS(78125000, P_UNIPHY0_NSS_RX_CLK, 4, 0, 0),
++      FM(125000000, ftbl_nss_cc_port1_rx_clk_src_125),
++      FMS(312500000, P_UNIPHY0_NSS_RX_CLK, 1, 0, 0),
++      { }
++};
++
++static const struct freq_conf ftbl_nss_cc_port1_tx_clk_src_25[] = {
++      C(P_UNIPHY0_NSS_TX_CLK, 12.5, 0, 0),
++      C(P_UNIPHY0_NSS_TX_CLK, 5, 0, 0),
++};
++
++static const struct freq_conf ftbl_nss_cc_port1_tx_clk_src_125[] = {
++      C(P_UNIPHY0_NSS_TX_CLK, 2.5, 0, 0),
++      C(P_UNIPHY0_NSS_TX_CLK, 1, 0, 0),
++};
++
++static const struct freq_multi_tbl ftbl_nss_cc_port1_tx_clk_src[] = {
++      FMS(24000000, P_XO, 1, 0, 0),
++      FM(25000000, ftbl_nss_cc_port1_tx_clk_src_25),
++      FMS(78125000, P_UNIPHY0_NSS_TX_CLK, 4, 0, 0),
++      FM(125000000, ftbl_nss_cc_port1_tx_clk_src_125),
++      FMS(312500000, P_UNIPHY0_NSS_TX_CLK, 1, 0, 0),
++      { }
++};
++
++static const struct freq_conf ftbl_nss_cc_port5_rx_clk_src_25[] = {
++      C(P_UNIPHY1_NSS_RX_CLK, 12.5, 0, 0),
++      C(P_UNIPHY0_NSS_RX_CLK, 5, 0, 0),
++};
++
++static const struct freq_conf ftbl_nss_cc_port5_rx_clk_src_125[] = {
++      C(P_UNIPHY1_NSS_RX_CLK, 2.5, 0, 0),
++      C(P_UNIPHY0_NSS_RX_CLK, 1, 0, 0),
++};
++
++static const struct freq_conf ftbl_nss_cc_port5_rx_clk_src_312p5[] = {
++      C(P_UNIPHY1_NSS_RX_CLK, 1, 0, 0),
++      C(P_UNIPHY0_NSS_RX_CLK, 1, 0, 0),
++};
++
++static const struct freq_multi_tbl ftbl_nss_cc_port5_rx_clk_src[] = {
++      FMS(24000000, P_XO, 1, 0, 0),
++      FM(25000000, ftbl_nss_cc_port5_rx_clk_src_25),
++      FMS(78125000, P_UNIPHY1_NSS_RX_CLK, 4, 0, 0),
++      FM(125000000, ftbl_nss_cc_port5_rx_clk_src_125),
++      FMS(156250000, P_UNIPHY1_NSS_RX_CLK, 2, 0, 0),
++      FM(312500000, ftbl_nss_cc_port5_rx_clk_src_312p5),
++      { }
++};
++
++static const struct freq_conf ftbl_nss_cc_port5_tx_clk_src_25[] = {
++      C(P_UNIPHY1_NSS_TX_CLK, 12.5, 0, 0),
++      C(P_UNIPHY0_NSS_TX_CLK, 5, 0, 0),
++};
++
++static const struct freq_conf ftbl_nss_cc_port5_tx_clk_src_125[] = {
++      C(P_UNIPHY1_NSS_TX_CLK, 2.5, 0, 0),
++      C(P_UNIPHY0_NSS_TX_CLK, 1, 0, 0),
++};
++
++static const struct freq_conf ftbl_nss_cc_port5_tx_clk_src_312p5[] = {
++      C(P_UNIPHY1_NSS_TX_CLK, 1, 0, 0),
++      C(P_UNIPHY0_NSS_TX_CLK, 1, 0, 0),
++};
++
++static const struct freq_multi_tbl ftbl_nss_cc_port5_tx_clk_src[] = {
++      FMS(24000000, P_XO, 1, 0, 0),
++      FM(25000000, ftbl_nss_cc_port5_tx_clk_src_25),
++      FMS(78125000, P_UNIPHY1_NSS_TX_CLK, 4, 0, 0),
++      FM(125000000, ftbl_nss_cc_port5_tx_clk_src_125),
++      FMS(156250000, P_UNIPHY1_NSS_TX_CLK, 2, 0, 0),
++      FM(312500000, ftbl_nss_cc_port5_tx_clk_src_312p5),
++      { }
++};
++
++static const struct freq_conf ftbl_nss_cc_port6_rx_clk_src_25[] = {
++      C(P_UNIPHY2_NSS_RX_CLK, 12.5, 0, 0),
++      C(P_UNIPHY2_NSS_RX_CLK, 5, 0, 0),
++};
++
++static const struct freq_conf ftbl_nss_cc_port6_rx_clk_src_125[] = {
++      C(P_UNIPHY2_NSS_RX_CLK, 2.5, 0, 0),
++      C(P_UNIPHY2_NSS_RX_CLK, 1, 0, 0),
++};
++
++static const struct freq_multi_tbl ftbl_nss_cc_port6_rx_clk_src[] = {
++      FMS(24000000, P_XO, 1, 0, 0),
++      FM(25000000, ftbl_nss_cc_port6_rx_clk_src_25),
++      FMS(78125000, P_UNIPHY2_NSS_RX_CLK, 4, 0, 0),
++      FM(125000000, ftbl_nss_cc_port6_rx_clk_src_125),
++      FMS(156250000, P_UNIPHY2_NSS_RX_CLK, 2, 0, 0),
++      FMS(312500000, P_UNIPHY2_NSS_RX_CLK, 1, 0, 0),
++      { }
++};
++
++static const struct freq_conf ftbl_nss_cc_port6_tx_clk_src_25[] = {
++      C(P_UNIPHY2_NSS_TX_CLK, 12.5, 0, 0),
++      C(P_UNIPHY2_NSS_TX_CLK, 5, 0, 0),
++};
++
++static const struct freq_conf ftbl_nss_cc_port6_tx_clk_src_125[] = {
++      C(P_UNIPHY2_NSS_TX_CLK, 2.5, 0, 0),
++      C(P_UNIPHY2_NSS_TX_CLK, 1, 0, 0),
++};
++
++static const struct freq_multi_tbl ftbl_nss_cc_port6_tx_clk_src[] = {
++      FMS(24000000, P_XO, 1, 0, 0),
++      FM(25000000, ftbl_nss_cc_port6_tx_clk_src_25),
++      FMS(78125000, P_UNIPHY2_NSS_TX_CLK, 4, 0, 0),
++      FM(125000000, ftbl_nss_cc_port6_tx_clk_src_125),
++      FMS(156250000, P_UNIPHY2_NSS_TX_CLK, 2, 0, 0),
++      FMS(312500000, P_UNIPHY2_NSS_TX_CLK, 1, 0, 0),
++      { }
++};
++
++static struct clk_rcg2 nss_cc_port1_rx_clk_src = {
++      .cmd_rcgr = 0x28110,
++      .mnd_width = 0,
++      .hid_width = 5,
++      .parent_map = nss_cc_parent_map_4,
++      .freq_multi_tbl = ftbl_nss_cc_port1_rx_clk_src,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_port1_rx_clk_src",
++              .parent_data = nss_cc_parent_data_4,
++              .num_parents = ARRAY_SIZE(nss_cc_parent_data_4),
++              .ops = &clk_rcg2_fm_ops,
++      },
++};
++
++static struct clk_rcg2 nss_cc_port1_tx_clk_src = {
++      .cmd_rcgr = 0x2811c,
++      .mnd_width = 0,
++      .hid_width = 5,
++      .parent_map = nss_cc_parent_map_4,
++      .freq_multi_tbl = ftbl_nss_cc_port1_tx_clk_src,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_port1_tx_clk_src",
++              .parent_data = nss_cc_parent_data_4,
++              .num_parents = ARRAY_SIZE(nss_cc_parent_data_4),
++              .ops = &clk_rcg2_fm_ops,
++      },
++};
++
++static struct clk_rcg2 nss_cc_port2_rx_clk_src = {
++      .cmd_rcgr = 0x28128,
++      .mnd_width = 0,
++      .hid_width = 5,
++      .parent_map = nss_cc_parent_map_4,
++      .freq_multi_tbl = ftbl_nss_cc_port1_rx_clk_src,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_port2_rx_clk_src",
++              .parent_data = nss_cc_parent_data_4,
++              .num_parents = ARRAY_SIZE(nss_cc_parent_data_4),
++              .ops = &clk_rcg2_fm_ops,
++      },
++};
++
++static struct clk_rcg2 nss_cc_port2_tx_clk_src = {
++      .cmd_rcgr = 0x28134,
++      .mnd_width = 0,
++      .hid_width = 5,
++      .parent_map = nss_cc_parent_map_4,
++      .freq_multi_tbl = ftbl_nss_cc_port1_tx_clk_src,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_port2_tx_clk_src",
++              .parent_data = nss_cc_parent_data_4,
++              .num_parents = ARRAY_SIZE(nss_cc_parent_data_4),
++              .ops = &clk_rcg2_fm_ops,
++      },
++};
++
++static struct clk_rcg2 nss_cc_port3_rx_clk_src = {
++      .cmd_rcgr = 0x28140,
++      .mnd_width = 0,
++      .hid_width = 5,
++      .parent_map = nss_cc_parent_map_4,
++      .freq_multi_tbl = ftbl_nss_cc_port1_rx_clk_src,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_port3_rx_clk_src",
++              .parent_data = nss_cc_parent_data_4,
++              .num_parents = ARRAY_SIZE(nss_cc_parent_data_4),
++              .ops = &clk_rcg2_fm_ops,
++      },
++};
++
++static struct clk_rcg2 nss_cc_port3_tx_clk_src = {
++      .cmd_rcgr = 0x2814c,
++      .mnd_width = 0,
++      .hid_width = 5,
++      .parent_map = nss_cc_parent_map_4,
++      .freq_multi_tbl = ftbl_nss_cc_port1_tx_clk_src,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_port3_tx_clk_src",
++              .parent_data = nss_cc_parent_data_4,
++              .num_parents = ARRAY_SIZE(nss_cc_parent_data_4),
++              .ops = &clk_rcg2_fm_ops,
++      },
++};
++
++static struct clk_rcg2 nss_cc_port4_rx_clk_src = {
++      .cmd_rcgr = 0x28158,
++      .mnd_width = 0,
++      .hid_width = 5,
++      .parent_map = nss_cc_parent_map_4,
++      .freq_multi_tbl = ftbl_nss_cc_port1_rx_clk_src,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_port4_rx_clk_src",
++              .parent_data = nss_cc_parent_data_4,
++              .num_parents = ARRAY_SIZE(nss_cc_parent_data_4),
++              .ops = &clk_rcg2_fm_ops,
++      },
++};
++
++static struct clk_rcg2 nss_cc_port4_tx_clk_src = {
++      .cmd_rcgr = 0x28164,
++      .mnd_width = 0,
++      .hid_width = 5,
++      .parent_map = nss_cc_parent_map_4,
++      .freq_multi_tbl = ftbl_nss_cc_port1_tx_clk_src,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_port4_tx_clk_src",
++              .parent_data = nss_cc_parent_data_4,
++              .num_parents = ARRAY_SIZE(nss_cc_parent_data_4),
++              .ops = &clk_rcg2_fm_ops,
++      },
++};
++
++static struct clk_rcg2 nss_cc_port5_rx_clk_src = {
++      .cmd_rcgr = 0x28170,
++      .mnd_width = 0,
++      .hid_width = 5,
++      .parent_map = nss_cc_parent_map_0,
++      .freq_multi_tbl = ftbl_nss_cc_port5_rx_clk_src,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_port5_rx_clk_src",
++              .parent_data = nss_cc_parent_data_0,
++              .num_parents = ARRAY_SIZE(nss_cc_parent_data_0),
++              .ops = &clk_rcg2_fm_ops,
++      },
++};
++
++static struct clk_rcg2 nss_cc_port5_tx_clk_src = {
++      .cmd_rcgr = 0x2817c,
++      .mnd_width = 0,
++      .hid_width = 5,
++      .parent_map = nss_cc_parent_map_0,
++      .freq_multi_tbl = ftbl_nss_cc_port5_tx_clk_src,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_port5_tx_clk_src",
++              .parent_data = nss_cc_parent_data_0,
++              .num_parents = ARRAY_SIZE(nss_cc_parent_data_0),
++              .ops = &clk_rcg2_fm_ops,
++      },
++};
++
++static struct clk_rcg2 nss_cc_port6_rx_clk_src = {
++      .cmd_rcgr = 0x28188,
++      .mnd_width = 0,
++      .hid_width = 5,
++      .parent_map = nss_cc_parent_map_5,
++      .freq_multi_tbl = ftbl_nss_cc_port6_rx_clk_src,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_port6_rx_clk_src",
++              .parent_data = nss_cc_parent_data_5,
++              .num_parents = ARRAY_SIZE(nss_cc_parent_data_5),
++              .ops = &clk_rcg2_fm_ops,
++      },
++};
++
++static struct clk_rcg2 nss_cc_port6_tx_clk_src = {
++      .cmd_rcgr = 0x28194,
++      .mnd_width = 0,
++      .hid_width = 5,
++      .parent_map = nss_cc_parent_map_5,
++      .freq_multi_tbl = ftbl_nss_cc_port6_tx_clk_src,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_port6_tx_clk_src",
++              .parent_data = nss_cc_parent_data_5,
++              .num_parents = ARRAY_SIZE(nss_cc_parent_data_5),
++              .ops = &clk_rcg2_fm_ops,
++      },
++};
++
++static struct clk_rcg2 nss_cc_ppe_clk_src = {
++      .cmd_rcgr = 0x28204,
++      .mnd_width = 0,
++      .hid_width = 5,
++      .parent_map = nss_cc_parent_map_1,
++      .freq_tbl = ftbl_nss_cc_ce_clk_src,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_ppe_clk_src",
++              .parent_data = nss_cc_parent_data_1,
++              .num_parents = ARRAY_SIZE(nss_cc_parent_data_1),
++              .flags = CLK_SET_RATE_PARENT,
++              .ops = &clk_rcg2_ops,
++      },
++};
++
++static const struct freq_tbl ftbl_nss_cc_ubi0_clk_src[] = {
++      F(24000000, P_XO, 1, 0, 0),
++      F(187200000, P_UBI32_PLL_OUT_MAIN, 8, 0, 0),
++      F(748800000, P_UBI32_PLL_OUT_MAIN, 2, 0, 0),
++      F(1497600000, P_UBI32_PLL_OUT_MAIN, 1, 0, 0),
++      F(1689600000, P_UBI32_PLL_OUT_MAIN, 1, 0, 0),
++      { }
++};
++
++static struct clk_rcg2 nss_cc_ubi0_clk_src = {
++      .cmd_rcgr = 0x28704,
++      .mnd_width = 0,
++      .hid_width = 5,
++      .parent_map = nss_cc_parent_map_2,
++      .freq_tbl = ftbl_nss_cc_ubi0_clk_src,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_ubi0_clk_src",
++              .parent_data = nss_cc_parent_data_2,
++              .num_parents = ARRAY_SIZE(nss_cc_parent_data_2),
++              .flags = CLK_SET_RATE_PARENT,
++              .ops = &clk_rcg2_ops,
++      },
++};
++
++static struct clk_rcg2 nss_cc_ubi1_clk_src = {
++      .cmd_rcgr = 0x2870c,
++      .mnd_width = 0,
++      .hid_width = 5,
++      .parent_map = nss_cc_parent_map_2,
++      .freq_tbl = ftbl_nss_cc_ubi0_clk_src,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_ubi1_clk_src",
++              .parent_data = nss_cc_parent_data_2,
++              .num_parents = ARRAY_SIZE(nss_cc_parent_data_2),
++              .flags = CLK_SET_RATE_PARENT,
++              .ops = &clk_rcg2_ops,
++      },
++};
++
++static struct clk_rcg2 nss_cc_ubi2_clk_src = {
++      .cmd_rcgr = 0x28714,
++      .mnd_width = 0,
++      .hid_width = 5,
++      .parent_map = nss_cc_parent_map_2,
++      .freq_tbl = ftbl_nss_cc_ubi0_clk_src,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_ubi2_clk_src",
++              .parent_data = nss_cc_parent_data_2,
++              .num_parents = ARRAY_SIZE(nss_cc_parent_data_2),
++              .flags = CLK_SET_RATE_PARENT,
++              .ops = &clk_rcg2_ops,
++      },
++};
++
++static struct clk_rcg2 nss_cc_ubi3_clk_src = {
++      .cmd_rcgr = 0x2871c,
++      .mnd_width = 0,
++      .hid_width = 5,
++      .parent_map = nss_cc_parent_map_2,
++      .freq_tbl = ftbl_nss_cc_ubi0_clk_src,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_ubi3_clk_src",
++              .parent_data = nss_cc_parent_data_2,
++              .num_parents = ARRAY_SIZE(nss_cc_parent_data_2),
++              .flags = CLK_SET_RATE_PARENT,
++              .ops = &clk_rcg2_ops,
++      },
++};
++
++static struct clk_rcg2 nss_cc_ubi_axi_clk_src = {
++      .cmd_rcgr = 0x28724,
++      .mnd_width = 0,
++      .hid_width = 5,
++      .parent_map = nss_cc_parent_map_7,
++      .freq_tbl = ftbl_nss_cc_clc_clk_src,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_ubi_axi_clk_src",
++              .parent_data = nss_cc_parent_data_7,
++              .num_parents = ARRAY_SIZE(nss_cc_parent_data_7),
++              .ops = &clk_rcg2_ops,
++      },
++};
++
++static struct clk_rcg2 nss_cc_ubi_nc_axi_bfdcd_clk_src = {
++      .cmd_rcgr = 0x2872c,
++      .mnd_width = 0,
++      .hid_width = 5,
++      .parent_map = nss_cc_parent_map_1,
++      .freq_tbl = ftbl_nss_cc_ce_clk_src,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_ubi_nc_axi_bfdcd_clk_src",
++              .parent_data = nss_cc_parent_data_1,
++              .num_parents = ARRAY_SIZE(nss_cc_parent_data_1),
++              .flags = CLK_SET_RATE_PARENT,
++              .ops = &clk_rcg2_ops,
++      },
++};
++
++static struct clk_regmap_div nss_cc_port1_rx_div_clk_src = {
++      .reg = 0x28118,
++      .shift = 0,
++      .width = 9,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_port1_rx_div_clk_src",
++              .parent_data = &(const struct clk_parent_data) {
++                      .hw = &nss_cc_port1_rx_clk_src.clkr.hw,
++              },
++              .num_parents = 1,
++              .flags = CLK_SET_RATE_PARENT,
++              .ops = &clk_regmap_div_ops,
++      },
++};
++
++static struct clk_regmap_div nss_cc_port1_tx_div_clk_src = {
++      .reg = 0x28124,
++      .shift = 0,
++      .width = 9,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_port1_tx_div_clk_src",
++              .parent_data = &(const struct clk_parent_data) {
++                      .hw = &nss_cc_port1_tx_clk_src.clkr.hw,
++              },
++              .num_parents = 1,
++              .flags = CLK_SET_RATE_PARENT,
++              .ops = &clk_regmap_div_ops,
++      },
++};
++
++static struct clk_regmap_div nss_cc_port2_rx_div_clk_src = {
++      .reg = 0x28130,
++      .shift = 0,
++      .width = 9,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_port2_rx_div_clk_src",
++              .parent_data = &(const struct clk_parent_data) {
++                      .hw = &nss_cc_port2_rx_clk_src.clkr.hw,
++              },
++              .num_parents = 1,
++              .flags = CLK_SET_RATE_PARENT,
++              .ops = &clk_regmap_div_ops,
++      },
++};
++
++static struct clk_regmap_div nss_cc_port2_tx_div_clk_src = {
++      .reg = 0x2813c,
++      .shift = 0,
++      .width = 9,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_port2_tx_div_clk_src",
++              .parent_data = &(const struct clk_parent_data) {
++                      .hw = &nss_cc_port2_tx_clk_src.clkr.hw,
++              },
++              .num_parents = 1,
++              .flags = CLK_SET_RATE_PARENT,
++              .ops = &clk_regmap_div_ops,
++      },
++};
++
++static struct clk_regmap_div nss_cc_port3_rx_div_clk_src = {
++      .reg = 0x28148,
++      .shift = 0,
++      .width = 9,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_port3_rx_div_clk_src",
++              .parent_data = &(const struct clk_parent_data) {
++                      .hw = &nss_cc_port3_rx_clk_src.clkr.hw,
++              },
++              .num_parents = 1,
++              .flags = CLK_SET_RATE_PARENT,
++              .ops = &clk_regmap_div_ops,
++      },
++};
++
++static struct clk_regmap_div nss_cc_port3_tx_div_clk_src = {
++      .reg = 0x28154,
++      .shift = 0,
++      .width = 9,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_port3_tx_div_clk_src",
++              .parent_data = &(const struct clk_parent_data) {
++                      .hw = &nss_cc_port3_tx_clk_src.clkr.hw,
++              },
++              .num_parents = 1,
++              .flags = CLK_SET_RATE_PARENT,
++              .ops = &clk_regmap_div_ops,
++      },
++};
++
++static struct clk_regmap_div nss_cc_port4_rx_div_clk_src = {
++      .reg = 0x28160,
++      .shift = 0,
++      .width = 9,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_port4_rx_div_clk_src",
++              .parent_data = &(const struct clk_parent_data) {
++                      .hw = &nss_cc_port4_rx_clk_src.clkr.hw,
++              },
++              .num_parents = 1,
++              .flags = CLK_SET_RATE_PARENT,
++              .ops = &clk_regmap_div_ops,
++      },
++};
++
++static struct clk_regmap_div nss_cc_port4_tx_div_clk_src = {
++      .reg = 0x2816c,
++      .shift = 0,
++      .width = 9,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_port4_tx_div_clk_src",
++              .parent_data = &(const struct clk_parent_data) {
++                      .hw = &nss_cc_port4_tx_clk_src.clkr.hw,
++              },
++              .num_parents = 1,
++              .flags = CLK_SET_RATE_PARENT,
++              .ops = &clk_regmap_div_ops,
++      },
++};
++
++static struct clk_regmap_div nss_cc_port5_rx_div_clk_src = {
++      .reg = 0x28178,
++      .shift = 0,
++      .width = 9,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_port5_rx_div_clk_src",
++              .parent_data = &(const struct clk_parent_data) {
++                      .hw = &nss_cc_port5_rx_clk_src.clkr.hw,
++              },
++              .num_parents = 1,
++              .flags = CLK_SET_RATE_PARENT,
++              .ops = &clk_regmap_div_ops,
++      },
++};
++
++static struct clk_regmap_div nss_cc_port5_tx_div_clk_src = {
++      .reg = 0x28184,
++      .shift = 0,
++      .width = 9,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_port5_tx_div_clk_src",
++              .parent_data = &(const struct clk_parent_data) {
++                      .hw = &nss_cc_port5_tx_clk_src.clkr.hw,
++              },
++              .num_parents = 1,
++              .flags = CLK_SET_RATE_PARENT,
++              .ops = &clk_regmap_div_ops,
++      },
++};
++
++static struct clk_regmap_div nss_cc_port6_rx_div_clk_src = {
++      .reg = 0x28190,
++      .shift = 0,
++      .width = 9,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_port6_rx_div_clk_src",
++              .parent_data = &(const struct clk_parent_data) {
++                      .hw = &nss_cc_port6_rx_clk_src.clkr.hw,
++              },
++              .num_parents = 1,
++              .flags = CLK_SET_RATE_PARENT,
++              .ops = &clk_regmap_div_ops,
++      },
++};
++
++static struct clk_regmap_div nss_cc_port6_tx_div_clk_src = {
++      .reg = 0x2819c,
++      .shift = 0,
++      .width = 9,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_port6_tx_div_clk_src",
++              .parent_data = &(const struct clk_parent_data) {
++                      .hw = &nss_cc_port6_tx_clk_src.clkr.hw,
++              },
++              .num_parents = 1,
++              .flags = CLK_SET_RATE_PARENT,
++              .ops = &clk_regmap_div_ops,
++      },
++};
++
++static struct clk_regmap_div nss_cc_ubi0_div_clk_src = {
++      .reg = 0x287a4,
++      .shift = 0,
++      .width = 4,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_ubi0_div_clk_src",
++              .parent_data = &(const struct clk_parent_data) {
++                      .hw = &nss_cc_ubi0_clk_src.clkr.hw,
++              },
++              .num_parents = 1,
++              .flags = CLK_SET_RATE_PARENT,
++              .ops = &clk_regmap_div_ro_ops,
++      },
++};
++
++static struct clk_regmap_div nss_cc_ubi1_div_clk_src = {
++      .reg = 0x287a8,
++      .shift = 0,
++      .width = 4,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_ubi1_div_clk_src",
++              .parent_data = &(const struct clk_parent_data) {
++                      .hw = &nss_cc_ubi1_clk_src.clkr.hw,
++              },
++              .num_parents = 1,
++              .flags = CLK_SET_RATE_PARENT,
++              .ops = &clk_regmap_div_ro_ops,
++      },
++};
++
++static struct clk_regmap_div nss_cc_ubi2_div_clk_src = {
++      .reg = 0x287ac,
++      .shift = 0,
++      .width = 4,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_ubi2_div_clk_src",
++              .parent_data = &(const struct clk_parent_data) {
++                      .hw = &nss_cc_ubi2_clk_src.clkr.hw,
++              },
++              .num_parents = 1,
++              .flags = CLK_SET_RATE_PARENT,
++              .ops = &clk_regmap_div_ro_ops,
++      },
++};
++
++static struct clk_regmap_div nss_cc_ubi3_div_clk_src = {
++      .reg = 0x287b0,
++      .shift = 0,
++      .width = 4,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_ubi3_div_clk_src",
++              .parent_data = &(const struct clk_parent_data) {
++                      .hw = &nss_cc_ubi3_clk_src.clkr.hw,
++              },
++              .num_parents = 1,
++              .flags = CLK_SET_RATE_PARENT,
++              .ops = &clk_regmap_div_ro_ops,
++      },
++};
++
++static struct clk_regmap_div nss_cc_xgmac0_ptp_ref_div_clk_src = {
++      .reg = 0x28214,
++      .shift = 0,
++      .width = 4,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_xgmac0_ptp_ref_div_clk_src",
++              .parent_data = &(const struct clk_parent_data) {
++                      .hw = &nss_cc_ppe_clk_src.clkr.hw,
++              },
++              .num_parents = 1,
++              .flags = CLK_SET_RATE_PARENT,
++              .ops = &clk_regmap_div_ro_ops,
++      },
++};
++
++static struct clk_regmap_div nss_cc_xgmac1_ptp_ref_div_clk_src = {
++      .reg = 0x28218,
++      .shift = 0,
++      .width = 4,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_xgmac1_ptp_ref_div_clk_src",
++              .parent_data = &(const struct clk_parent_data) {
++                      .hw = &nss_cc_ppe_clk_src.clkr.hw,
++              },
++              .num_parents = 1,
++              .flags = CLK_SET_RATE_PARENT,
++              .ops = &clk_regmap_div_ro_ops,
++      },
++};
++
++static struct clk_regmap_div nss_cc_xgmac2_ptp_ref_div_clk_src = {
++      .reg = 0x2821c,
++      .shift = 0,
++      .width = 4,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_xgmac2_ptp_ref_div_clk_src",
++              .parent_data = &(const struct clk_parent_data) {
++                      .hw = &nss_cc_ppe_clk_src.clkr.hw,
++              },
++              .num_parents = 1,
++              .flags = CLK_SET_RATE_PARENT,
++              .ops = &clk_regmap_div_ro_ops,
++      },
++};
++
++static struct clk_regmap_div nss_cc_xgmac3_ptp_ref_div_clk_src = {
++      .reg = 0x28220,
++      .shift = 0,
++      .width = 4,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_xgmac3_ptp_ref_div_clk_src",
++              .parent_data = &(const struct clk_parent_data) {
++                      .hw = &nss_cc_ppe_clk_src.clkr.hw,
++              },
++              .num_parents = 1,
++              .flags = CLK_SET_RATE_PARENT,
++              .ops = &clk_regmap_div_ro_ops,
++      },
++};
++
++static struct clk_regmap_div nss_cc_xgmac4_ptp_ref_div_clk_src = {
++      .reg = 0x28224,
++      .shift = 0,
++      .width = 4,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_xgmac4_ptp_ref_div_clk_src",
++              .parent_data = &(const struct clk_parent_data) {
++                      .hw = &nss_cc_ppe_clk_src.clkr.hw,
++              },
++              .num_parents = 1,
++              .flags = CLK_SET_RATE_PARENT,
++              .ops = &clk_regmap_div_ro_ops,
++      },
++};
++
++static struct clk_regmap_div nss_cc_xgmac5_ptp_ref_div_clk_src = {
++      .reg = 0x28228,
++      .shift = 0,
++      .width = 4,
++      .clkr.hw.init = &(const struct clk_init_data) {
++              .name = "nss_cc_xgmac5_ptp_ref_div_clk_src",
++              .parent_data = &(const struct clk_parent_data) {
++                      .hw = &nss_cc_ppe_clk_src.clkr.hw,
++              },
++              .num_parents = 1,
++              .flags = CLK_SET_RATE_PARENT,
++              .ops = &clk_regmap_div_ro_ops,
++      },
++};
++
++static struct clk_branch nss_cc_ce_apb_clk = {
++      .halt_reg = 0x2840c,
++      .clkr = {
++              .enable_reg = 0x2840c,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_ce_apb_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ce_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_ce_axi_clk = {
++      .halt_reg = 0x28410,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28410,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_ce_axi_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ce_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_clc_axi_clk = {
++      .halt_reg = 0x2860c,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x2860c,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_clc_axi_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_clc_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_crypto_clk = {
++      .halt_reg = 0x1601c,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x1601c,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_crypto_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_crypto_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_crypto_ppe_clk = {
++      .halt_reg = 0x28240,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28240,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_crypto_ppe_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ppe_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_haq_ahb_clk = {
++      .halt_reg = 0x2830c,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x2830c,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_haq_ahb_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_haq_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_haq_axi_clk = {
++      .halt_reg = 0x28310,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28310,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_haq_axi_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_haq_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_imem_ahb_clk = {
++      .halt_reg = 0xe018,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0xe018,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_imem_ahb_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_cfg_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_imem_qsb_clk = {
++      .halt_reg = 0xe010,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0xe010,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_imem_qsb_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_imem_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_nss_csr_clk = {
++      .halt_reg = 0x281d0,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x281d0,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_nss_csr_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_cfg_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_nssnoc_ce_apb_clk = {
++      .halt_reg = 0x28414,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28414,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_nssnoc_ce_apb_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ce_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_nssnoc_ce_axi_clk = {
++      .halt_reg = 0x28418,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28418,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_nssnoc_ce_axi_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ce_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_nssnoc_clc_axi_clk = {
++      .halt_reg = 0x28610,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28610,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_nssnoc_clc_axi_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_clc_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_nssnoc_crypto_clk = {
++      .halt_reg = 0x16020,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x16020,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_nssnoc_crypto_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_crypto_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_nssnoc_haq_ahb_clk = {
++      .halt_reg = 0x28314,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28314,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_nssnoc_haq_ahb_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_haq_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_nssnoc_haq_axi_clk = {
++      .halt_reg = 0x28318,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28318,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_nssnoc_haq_axi_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_haq_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_nssnoc_imem_ahb_clk = {
++      .halt_reg = 0xe01c,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0xe01c,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_nssnoc_imem_ahb_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_cfg_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_nssnoc_imem_qsb_clk = {
++      .halt_reg = 0xe014,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0xe014,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_nssnoc_imem_qsb_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_imem_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_nssnoc_nss_csr_clk = {
++      .halt_reg = 0x281d4,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x281d4,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_nssnoc_nss_csr_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_cfg_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_nssnoc_ppe_cfg_clk = {
++      .halt_reg = 0x28248,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28248,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_nssnoc_ppe_cfg_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ppe_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_nssnoc_ppe_clk = {
++      .halt_reg = 0x28244,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28244,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_nssnoc_ppe_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ppe_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_nssnoc_ubi32_ahb0_clk = {
++      .halt_reg = 0x28788,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28788,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_nssnoc_ubi32_ahb0_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_cfg_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_nssnoc_ubi32_axi0_clk = {
++      .halt_reg = 0x287a0,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x287a0,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_nssnoc_ubi32_axi0_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ubi_axi_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_nssnoc_ubi32_int0_ahb_clk = {
++      .halt_reg = 0x2878c,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x2878c,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_nssnoc_ubi32_int0_ahb_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_int_cfg_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_nssnoc_ubi32_nc_axi0_1_clk = {
++      .halt_reg = 0x287bc,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x287bc,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_nssnoc_ubi32_nc_axi0_1_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_nssnoc_ubi32_nc_axi0_clk = {
++      .halt_reg = 0x28764,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28764,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_nssnoc_ubi32_nc_axi0_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_port1_mac_clk = {
++      .halt_reg = 0x2824c,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x2824c,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_port1_mac_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ppe_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_port1_rx_clk = {
++      .halt_reg = 0x281a0,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x281a0,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_port1_rx_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_port1_rx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_port1_tx_clk = {
++      .halt_reg = 0x281a4,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x281a4,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_port1_tx_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_port1_tx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_port2_mac_clk = {
++      .halt_reg = 0x28250,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28250,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_port2_mac_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ppe_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_port2_rx_clk = {
++      .halt_reg = 0x281a8,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x281a8,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_port2_rx_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_port2_rx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_port2_tx_clk = {
++      .halt_reg = 0x281ac,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x281ac,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_port2_tx_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_port2_tx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_port3_mac_clk = {
++      .halt_reg = 0x28254,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28254,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_port3_mac_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ppe_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_port3_rx_clk = {
++      .halt_reg = 0x281b0,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x281b0,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_port3_rx_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_port3_rx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_port3_tx_clk = {
++      .halt_reg = 0x281b4,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x281b4,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_port3_tx_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_port3_tx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_port4_mac_clk = {
++      .halt_reg = 0x28258,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28258,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_port4_mac_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ppe_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_port4_rx_clk = {
++      .halt_reg = 0x281b8,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x281b8,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_port4_rx_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_port4_rx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_port4_tx_clk = {
++      .halt_reg = 0x281bc,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x281bc,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_port4_tx_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_port4_tx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_port5_mac_clk = {
++      .halt_reg = 0x2825c,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x2825c,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_port5_mac_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ppe_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_port5_rx_clk = {
++      .halt_reg = 0x281c0,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x281c0,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_port5_rx_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_port5_rx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_port5_tx_clk = {
++      .halt_reg = 0x281c4,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x281c4,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_port5_tx_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_port5_tx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_port6_mac_clk = {
++      .halt_reg = 0x28260,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28260,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_port6_mac_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ppe_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_port6_rx_clk = {
++      .halt_reg = 0x281c8,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x281c8,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_port6_rx_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_port6_rx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_port6_tx_clk = {
++      .halt_reg = 0x281cc,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x281cc,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_port6_tx_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_port6_tx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_ppe_edma_cfg_clk = {
++      .halt_reg = 0x2823c,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x2823c,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_ppe_edma_cfg_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ppe_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_ppe_edma_clk = {
++      .halt_reg = 0x28238,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28238,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_ppe_edma_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ppe_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_ppe_switch_btq_clk = {
++      .halt_reg = 0x2827c,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x2827c,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_ppe_switch_btq_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ppe_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_ppe_switch_cfg_clk = {
++      .halt_reg = 0x28234,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28234,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_ppe_switch_cfg_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ppe_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_ppe_switch_clk = {
++      .halt_reg = 0x28230,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28230,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_ppe_switch_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ppe_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_ppe_switch_ipe_clk = {
++      .halt_reg = 0x2822c,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x2822c,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_ppe_switch_ipe_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ppe_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_ubi32_ahb0_clk = {
++      .halt_reg = 0x28768,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28768,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_ubi32_ahb0_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_cfg_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_ubi32_ahb1_clk = {
++      .halt_reg = 0x28770,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28770,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_ubi32_ahb1_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_cfg_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_ubi32_ahb2_clk = {
++      .halt_reg = 0x28778,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28778,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_ubi32_ahb2_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_cfg_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_ubi32_ahb3_clk = {
++      .halt_reg = 0x28780,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28780,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_ubi32_ahb3_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_cfg_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_ubi32_axi0_clk = {
++      .halt_reg = 0x28790,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28790,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_ubi32_axi0_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ubi_axi_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_ubi32_axi1_clk = {
++      .halt_reg = 0x28794,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28794,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_ubi32_axi1_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ubi_axi_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_ubi32_axi2_clk = {
++      .halt_reg = 0x28798,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28798,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_ubi32_axi2_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ubi_axi_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_ubi32_axi3_clk = {
++      .halt_reg = 0x2879c,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x2879c,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_ubi32_axi3_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ubi_axi_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_ubi32_core0_clk = {
++      .halt_reg = 0x28734,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28734,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_ubi32_core0_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ubi0_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_ubi32_core1_clk = {
++      .halt_reg = 0x28738,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28738,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_ubi32_core1_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ubi1_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_ubi32_core2_clk = {
++      .halt_reg = 0x2873c,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x2873c,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_ubi32_core2_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ubi2_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_ubi32_core3_clk = {
++      .halt_reg = 0x28740,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28740,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_ubi32_core3_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ubi3_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_ubi32_intr0_ahb_clk = {
++      .halt_reg = 0x2876c,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x2876c,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_ubi32_intr0_ahb_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_int_cfg_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_ubi32_intr1_ahb_clk = {
++      .halt_reg = 0x28774,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28774,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_ubi32_intr1_ahb_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_int_cfg_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_ubi32_intr2_ahb_clk = {
++      .halt_reg = 0x2877c,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x2877c,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_ubi32_intr2_ahb_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_int_cfg_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_ubi32_intr3_ahb_clk = {
++      .halt_reg = 0x28784,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28784,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_ubi32_intr3_ahb_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_int_cfg_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_ubi32_nc_axi0_clk = {
++      .halt_reg = 0x28744,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28744,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_ubi32_nc_axi0_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_ubi32_nc_axi1_clk = {
++      .halt_reg = 0x2874c,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x2874c,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_ubi32_nc_axi1_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_ubi32_nc_axi2_clk = {
++      .halt_reg = 0x28754,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28754,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_ubi32_nc_axi2_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_ubi32_nc_axi3_clk = {
++      .halt_reg = 0x2875c,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x2875c,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_ubi32_nc_axi3_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_ubi32_utcm0_clk = {
++      .halt_reg = 0x28748,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28748,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_ubi32_utcm0_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_ubi32_utcm1_clk = {
++      .halt_reg = 0x28750,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28750,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_ubi32_utcm1_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_ubi32_utcm2_clk = {
++      .halt_reg = 0x28758,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28758,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_ubi32_utcm2_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_ubi32_utcm3_clk = {
++      .halt_reg = 0x28760,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28760,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_ubi32_utcm3_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_uniphy_port1_rx_clk = {
++      .halt_reg = 0x28904,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28904,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_uniphy_port1_rx_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_port1_rx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_uniphy_port1_tx_clk = {
++      .halt_reg = 0x28908,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28908,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_uniphy_port1_tx_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_port1_tx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_uniphy_port2_rx_clk = {
++      .halt_reg = 0x2890c,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x2890c,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_uniphy_port2_rx_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_port2_rx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_uniphy_port2_tx_clk = {
++      .halt_reg = 0x28910,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28910,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_uniphy_port2_tx_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_port2_tx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_uniphy_port3_rx_clk = {
++      .halt_reg = 0x28914,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28914,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_uniphy_port3_rx_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_port3_rx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_uniphy_port3_tx_clk = {
++      .halt_reg = 0x28918,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28918,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_uniphy_port3_tx_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_port3_tx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_uniphy_port4_rx_clk = {
++      .halt_reg = 0x2891c,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x2891c,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_uniphy_port4_rx_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_port4_rx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_uniphy_port4_tx_clk = {
++      .halt_reg = 0x28920,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28920,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_uniphy_port4_tx_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_port4_tx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_uniphy_port5_rx_clk = {
++      .halt_reg = 0x28924,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28924,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_uniphy_port5_rx_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_port5_rx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_uniphy_port5_tx_clk = {
++      .halt_reg = 0x28928,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28928,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_uniphy_port5_tx_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_port5_tx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_uniphy_port6_rx_clk = {
++      .halt_reg = 0x2892c,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x2892c,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_uniphy_port6_rx_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_port6_rx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_uniphy_port6_tx_clk = {
++      .halt_reg = 0x28930,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28930,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_uniphy_port6_tx_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_port6_tx_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_xgmac0_ptp_ref_clk = {
++      .halt_reg = 0x28264,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28264,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_xgmac0_ptp_ref_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_xgmac0_ptp_ref_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_xgmac1_ptp_ref_clk = {
++      .halt_reg = 0x28268,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28268,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_xgmac1_ptp_ref_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_xgmac1_ptp_ref_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_xgmac2_ptp_ref_clk = {
++      .halt_reg = 0x2826c,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x2826c,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_xgmac2_ptp_ref_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_xgmac2_ptp_ref_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_xgmac3_ptp_ref_clk = {
++      .halt_reg = 0x28270,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28270,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_xgmac3_ptp_ref_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_xgmac3_ptp_ref_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_xgmac4_ptp_ref_clk = {
++      .halt_reg = 0x28274,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28274,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_xgmac4_ptp_ref_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_xgmac4_ptp_ref_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_branch nss_cc_xgmac5_ptp_ref_clk = {
++      .halt_reg = 0x28278,
++      .halt_check = BRANCH_HALT,
++      .clkr = {
++              .enable_reg = 0x28278,
++              .enable_mask = BIT(0),
++              .hw.init = &(const struct clk_init_data) {
++                      .name = "nss_cc_xgmac5_ptp_ref_clk",
++                      .parent_data = &(const struct clk_parent_data) {
++                              .hw = &nss_cc_xgmac5_ptp_ref_div_clk_src.clkr.hw,
++                      },
++                      .num_parents = 1,
++                      .flags = CLK_SET_RATE_PARENT,
++                      .ops = &clk_branch2_ops,
++              },
++      },
++};
++
++static struct clk_regmap *nss_cc_ipq9574_clocks[] = {
++      [NSS_CC_CE_APB_CLK] = &nss_cc_ce_apb_clk.clkr,
++      [NSS_CC_CE_AXI_CLK] = &nss_cc_ce_axi_clk.clkr,
++      [NSS_CC_CE_CLK_SRC] = &nss_cc_ce_clk_src.clkr,
++      [NSS_CC_CFG_CLK_SRC] = &nss_cc_cfg_clk_src.clkr,
++      [NSS_CC_CLC_AXI_CLK] = &nss_cc_clc_axi_clk.clkr,
++      [NSS_CC_CLC_CLK_SRC] = &nss_cc_clc_clk_src.clkr,
++      [NSS_CC_CRYPTO_CLK] = &nss_cc_crypto_clk.clkr,
++      [NSS_CC_CRYPTO_CLK_SRC] = &nss_cc_crypto_clk_src.clkr,
++      [NSS_CC_CRYPTO_PPE_CLK] = &nss_cc_crypto_ppe_clk.clkr,
++      [NSS_CC_HAQ_AHB_CLK] = &nss_cc_haq_ahb_clk.clkr,
++      [NSS_CC_HAQ_AXI_CLK] = &nss_cc_haq_axi_clk.clkr,
++      [NSS_CC_HAQ_CLK_SRC] = &nss_cc_haq_clk_src.clkr,
++      [NSS_CC_IMEM_AHB_CLK] = &nss_cc_imem_ahb_clk.clkr,
++      [NSS_CC_IMEM_CLK_SRC] = &nss_cc_imem_clk_src.clkr,
++      [NSS_CC_IMEM_QSB_CLK] = &nss_cc_imem_qsb_clk.clkr,
++      [NSS_CC_INT_CFG_CLK_SRC] = &nss_cc_int_cfg_clk_src.clkr,
++      [NSS_CC_NSS_CSR_CLK] = &nss_cc_nss_csr_clk.clkr,
++      [NSS_CC_NSSNOC_CE_APB_CLK] = &nss_cc_nssnoc_ce_apb_clk.clkr,
++      [NSS_CC_NSSNOC_CE_AXI_CLK] = &nss_cc_nssnoc_ce_axi_clk.clkr,
++      [NSS_CC_NSSNOC_CLC_AXI_CLK] = &nss_cc_nssnoc_clc_axi_clk.clkr,
++      [NSS_CC_NSSNOC_CRYPTO_CLK] = &nss_cc_nssnoc_crypto_clk.clkr,
++      [NSS_CC_NSSNOC_HAQ_AHB_CLK] = &nss_cc_nssnoc_haq_ahb_clk.clkr,
++      [NSS_CC_NSSNOC_HAQ_AXI_CLK] = &nss_cc_nssnoc_haq_axi_clk.clkr,
++      [NSS_CC_NSSNOC_IMEM_AHB_CLK] = &nss_cc_nssnoc_imem_ahb_clk.clkr,
++      [NSS_CC_NSSNOC_IMEM_QSB_CLK] = &nss_cc_nssnoc_imem_qsb_clk.clkr,
++      [NSS_CC_NSSNOC_NSS_CSR_CLK] = &nss_cc_nssnoc_nss_csr_clk.clkr,
++      [NSS_CC_NSSNOC_PPE_CFG_CLK] = &nss_cc_nssnoc_ppe_cfg_clk.clkr,
++      [NSS_CC_NSSNOC_PPE_CLK] = &nss_cc_nssnoc_ppe_clk.clkr,
++      [NSS_CC_NSSNOC_UBI32_AHB0_CLK] = &nss_cc_nssnoc_ubi32_ahb0_clk.clkr,
++      [NSS_CC_NSSNOC_UBI32_AXI0_CLK] = &nss_cc_nssnoc_ubi32_axi0_clk.clkr,
++      [NSS_CC_NSSNOC_UBI32_INT0_AHB_CLK] =
++              &nss_cc_nssnoc_ubi32_int0_ahb_clk.clkr,
++      [NSS_CC_NSSNOC_UBI32_NC_AXI0_1_CLK] =
++              &nss_cc_nssnoc_ubi32_nc_axi0_1_clk.clkr,
++      [NSS_CC_NSSNOC_UBI32_NC_AXI0_CLK] =
++              &nss_cc_nssnoc_ubi32_nc_axi0_clk.clkr,
++      [NSS_CC_PORT1_MAC_CLK] = &nss_cc_port1_mac_clk.clkr,
++      [NSS_CC_PORT1_RX_CLK] = &nss_cc_port1_rx_clk.clkr,
++      [NSS_CC_PORT1_RX_CLK_SRC] = &nss_cc_port1_rx_clk_src.clkr,
++      [NSS_CC_PORT1_RX_DIV_CLK_SRC] = &nss_cc_port1_rx_div_clk_src.clkr,
++      [NSS_CC_PORT1_TX_CLK] = &nss_cc_port1_tx_clk.clkr,
++      [NSS_CC_PORT1_TX_CLK_SRC] = &nss_cc_port1_tx_clk_src.clkr,
++      [NSS_CC_PORT1_TX_DIV_CLK_SRC] = &nss_cc_port1_tx_div_clk_src.clkr,
++      [NSS_CC_PORT2_MAC_CLK] = &nss_cc_port2_mac_clk.clkr,
++      [NSS_CC_PORT2_RX_CLK] = &nss_cc_port2_rx_clk.clkr,
++      [NSS_CC_PORT2_RX_CLK_SRC] = &nss_cc_port2_rx_clk_src.clkr,
++      [NSS_CC_PORT2_RX_DIV_CLK_SRC] = &nss_cc_port2_rx_div_clk_src.clkr,
++      [NSS_CC_PORT2_TX_CLK] = &nss_cc_port2_tx_clk.clkr,
++      [NSS_CC_PORT2_TX_CLK_SRC] = &nss_cc_port2_tx_clk_src.clkr,
++      [NSS_CC_PORT2_TX_DIV_CLK_SRC] = &nss_cc_port2_tx_div_clk_src.clkr,
++      [NSS_CC_PORT3_MAC_CLK] = &nss_cc_port3_mac_clk.clkr,
++      [NSS_CC_PORT3_RX_CLK] = &nss_cc_port3_rx_clk.clkr,
++      [NSS_CC_PORT3_RX_CLK_SRC] = &nss_cc_port3_rx_clk_src.clkr,
++      [NSS_CC_PORT3_RX_DIV_CLK_SRC] = &nss_cc_port3_rx_div_clk_src.clkr,
++      [NSS_CC_PORT3_TX_CLK] = &nss_cc_port3_tx_clk.clkr,
++      [NSS_CC_PORT3_TX_CLK_SRC] = &nss_cc_port3_tx_clk_src.clkr,
++      [NSS_CC_PORT3_TX_DIV_CLK_SRC] = &nss_cc_port3_tx_div_clk_src.clkr,
++      [NSS_CC_PORT4_MAC_CLK] = &nss_cc_port4_mac_clk.clkr,
++      [NSS_CC_PORT4_RX_CLK] = &nss_cc_port4_rx_clk.clkr,
++      [NSS_CC_PORT4_RX_CLK_SRC] = &nss_cc_port4_rx_clk_src.clkr,
++      [NSS_CC_PORT4_RX_DIV_CLK_SRC] = &nss_cc_port4_rx_div_clk_src.clkr,
++      [NSS_CC_PORT4_TX_CLK] = &nss_cc_port4_tx_clk.clkr,
++      [NSS_CC_PORT4_TX_CLK_SRC] = &nss_cc_port4_tx_clk_src.clkr,
++      [NSS_CC_PORT4_TX_DIV_CLK_SRC] = &nss_cc_port4_tx_div_clk_src.clkr,
++      [NSS_CC_PORT5_MAC_CLK] = &nss_cc_port5_mac_clk.clkr,
++      [NSS_CC_PORT5_RX_CLK] = &nss_cc_port5_rx_clk.clkr,
++      [NSS_CC_PORT5_RX_CLK_SRC] = &nss_cc_port5_rx_clk_src.clkr,
++      [NSS_CC_PORT5_RX_DIV_CLK_SRC] = &nss_cc_port5_rx_div_clk_src.clkr,
++      [NSS_CC_PORT5_TX_CLK] = &nss_cc_port5_tx_clk.clkr,
++      [NSS_CC_PORT5_TX_CLK_SRC] = &nss_cc_port5_tx_clk_src.clkr,
++      [NSS_CC_PORT5_TX_DIV_CLK_SRC] = &nss_cc_port5_tx_div_clk_src.clkr,
++      [NSS_CC_PORT6_MAC_CLK] = &nss_cc_port6_mac_clk.clkr,
++      [NSS_CC_PORT6_RX_CLK] = &nss_cc_port6_rx_clk.clkr,
++      [NSS_CC_PORT6_RX_CLK_SRC] = &nss_cc_port6_rx_clk_src.clkr,
++      [NSS_CC_PORT6_RX_DIV_CLK_SRC] = &nss_cc_port6_rx_div_clk_src.clkr,
++      [NSS_CC_PORT6_TX_CLK] = &nss_cc_port6_tx_clk.clkr,
++      [NSS_CC_PORT6_TX_CLK_SRC] = &nss_cc_port6_tx_clk_src.clkr,
++      [NSS_CC_PORT6_TX_DIV_CLK_SRC] = &nss_cc_port6_tx_div_clk_src.clkr,
++      [NSS_CC_PPE_CLK_SRC] = &nss_cc_ppe_clk_src.clkr,
++      [NSS_CC_PPE_EDMA_CFG_CLK] = &nss_cc_ppe_edma_cfg_clk.clkr,
++      [NSS_CC_PPE_EDMA_CLK] = &nss_cc_ppe_edma_clk.clkr,
++      [NSS_CC_PPE_SWITCH_BTQ_CLK] = &nss_cc_ppe_switch_btq_clk.clkr,
++      [NSS_CC_PPE_SWITCH_CFG_CLK] = &nss_cc_ppe_switch_cfg_clk.clkr,
++      [NSS_CC_PPE_SWITCH_CLK] = &nss_cc_ppe_switch_clk.clkr,
++      [NSS_CC_PPE_SWITCH_IPE_CLK] = &nss_cc_ppe_switch_ipe_clk.clkr,
++      [NSS_CC_UBI0_CLK_SRC] = &nss_cc_ubi0_clk_src.clkr,
++      [NSS_CC_UBI0_DIV_CLK_SRC] = &nss_cc_ubi0_div_clk_src.clkr,
++      [NSS_CC_UBI1_CLK_SRC] = &nss_cc_ubi1_clk_src.clkr,
++      [NSS_CC_UBI1_DIV_CLK_SRC] = &nss_cc_ubi1_div_clk_src.clkr,
++      [NSS_CC_UBI2_CLK_SRC] = &nss_cc_ubi2_clk_src.clkr,
++      [NSS_CC_UBI2_DIV_CLK_SRC] = &nss_cc_ubi2_div_clk_src.clkr,
++      [NSS_CC_UBI32_AHB0_CLK] = &nss_cc_ubi32_ahb0_clk.clkr,
++      [NSS_CC_UBI32_AHB1_CLK] = &nss_cc_ubi32_ahb1_clk.clkr,
++      [NSS_CC_UBI32_AHB2_CLK] = &nss_cc_ubi32_ahb2_clk.clkr,
++      [NSS_CC_UBI32_AHB3_CLK] = &nss_cc_ubi32_ahb3_clk.clkr,
++      [NSS_CC_UBI32_AXI0_CLK] = &nss_cc_ubi32_axi0_clk.clkr,
++      [NSS_CC_UBI32_AXI1_CLK] = &nss_cc_ubi32_axi1_clk.clkr,
++      [NSS_CC_UBI32_AXI2_CLK] = &nss_cc_ubi32_axi2_clk.clkr,
++      [NSS_CC_UBI32_AXI3_CLK] = &nss_cc_ubi32_axi3_clk.clkr,
++      [NSS_CC_UBI32_CORE0_CLK] = &nss_cc_ubi32_core0_clk.clkr,
++      [NSS_CC_UBI32_CORE1_CLK] = &nss_cc_ubi32_core1_clk.clkr,
++      [NSS_CC_UBI32_CORE2_CLK] = &nss_cc_ubi32_core2_clk.clkr,
++      [NSS_CC_UBI32_CORE3_CLK] = &nss_cc_ubi32_core3_clk.clkr,
++      [NSS_CC_UBI32_INTR0_AHB_CLK] = &nss_cc_ubi32_intr0_ahb_clk.clkr,
++      [NSS_CC_UBI32_INTR1_AHB_CLK] = &nss_cc_ubi32_intr1_ahb_clk.clkr,
++      [NSS_CC_UBI32_INTR2_AHB_CLK] = &nss_cc_ubi32_intr2_ahb_clk.clkr,
++      [NSS_CC_UBI32_INTR3_AHB_CLK] = &nss_cc_ubi32_intr3_ahb_clk.clkr,
++      [NSS_CC_UBI32_NC_AXI0_CLK] = &nss_cc_ubi32_nc_axi0_clk.clkr,
++      [NSS_CC_UBI32_NC_AXI1_CLK] = &nss_cc_ubi32_nc_axi1_clk.clkr,
++      [NSS_CC_UBI32_NC_AXI2_CLK] = &nss_cc_ubi32_nc_axi2_clk.clkr,
++      [NSS_CC_UBI32_NC_AXI3_CLK] = &nss_cc_ubi32_nc_axi3_clk.clkr,
++      [NSS_CC_UBI32_UTCM0_CLK] = &nss_cc_ubi32_utcm0_clk.clkr,
++      [NSS_CC_UBI32_UTCM1_CLK] = &nss_cc_ubi32_utcm1_clk.clkr,
++      [NSS_CC_UBI32_UTCM2_CLK] = &nss_cc_ubi32_utcm2_clk.clkr,
++      [NSS_CC_UBI32_UTCM3_CLK] = &nss_cc_ubi32_utcm3_clk.clkr,
++      [NSS_CC_UBI3_CLK_SRC] = &nss_cc_ubi3_clk_src.clkr,
++      [NSS_CC_UBI3_DIV_CLK_SRC] = &nss_cc_ubi3_div_clk_src.clkr,
++      [NSS_CC_UBI_AXI_CLK_SRC] = &nss_cc_ubi_axi_clk_src.clkr,
++      [NSS_CC_UBI_NC_AXI_BFDCD_CLK_SRC] =
++              &nss_cc_ubi_nc_axi_bfdcd_clk_src.clkr,
++      [NSS_CC_UNIPHY_PORT1_RX_CLK] = &nss_cc_uniphy_port1_rx_clk.clkr,
++      [NSS_CC_UNIPHY_PORT1_TX_CLK] = &nss_cc_uniphy_port1_tx_clk.clkr,
++      [NSS_CC_UNIPHY_PORT2_RX_CLK] = &nss_cc_uniphy_port2_rx_clk.clkr,
++      [NSS_CC_UNIPHY_PORT2_TX_CLK] = &nss_cc_uniphy_port2_tx_clk.clkr,
++      [NSS_CC_UNIPHY_PORT3_RX_CLK] = &nss_cc_uniphy_port3_rx_clk.clkr,
++      [NSS_CC_UNIPHY_PORT3_TX_CLK] = &nss_cc_uniphy_port3_tx_clk.clkr,
++      [NSS_CC_UNIPHY_PORT4_RX_CLK] = &nss_cc_uniphy_port4_rx_clk.clkr,
++      [NSS_CC_UNIPHY_PORT4_TX_CLK] = &nss_cc_uniphy_port4_tx_clk.clkr,
++      [NSS_CC_UNIPHY_PORT5_RX_CLK] = &nss_cc_uniphy_port5_rx_clk.clkr,
++      [NSS_CC_UNIPHY_PORT5_TX_CLK] = &nss_cc_uniphy_port5_tx_clk.clkr,
++      [NSS_CC_UNIPHY_PORT6_RX_CLK] = &nss_cc_uniphy_port6_rx_clk.clkr,
++      [NSS_CC_UNIPHY_PORT6_TX_CLK] = &nss_cc_uniphy_port6_tx_clk.clkr,
++      [NSS_CC_XGMAC0_PTP_REF_CLK] = &nss_cc_xgmac0_ptp_ref_clk.clkr,
++      [NSS_CC_XGMAC0_PTP_REF_DIV_CLK_SRC] =
++              &nss_cc_xgmac0_ptp_ref_div_clk_src.clkr,
++      [NSS_CC_XGMAC1_PTP_REF_CLK] = &nss_cc_xgmac1_ptp_ref_clk.clkr,
++      [NSS_CC_XGMAC1_PTP_REF_DIV_CLK_SRC] =
++              &nss_cc_xgmac1_ptp_ref_div_clk_src.clkr,
++      [NSS_CC_XGMAC2_PTP_REF_CLK] = &nss_cc_xgmac2_ptp_ref_clk.clkr,
++      [NSS_CC_XGMAC2_PTP_REF_DIV_CLK_SRC] =
++              &nss_cc_xgmac2_ptp_ref_div_clk_src.clkr,
++      [NSS_CC_XGMAC3_PTP_REF_CLK] = &nss_cc_xgmac3_ptp_ref_clk.clkr,
++      [NSS_CC_XGMAC3_PTP_REF_DIV_CLK_SRC] =
++              &nss_cc_xgmac3_ptp_ref_div_clk_src.clkr,
++      [NSS_CC_XGMAC4_PTP_REF_CLK] = &nss_cc_xgmac4_ptp_ref_clk.clkr,
++      [NSS_CC_XGMAC4_PTP_REF_DIV_CLK_SRC] =
++              &nss_cc_xgmac4_ptp_ref_div_clk_src.clkr,
++      [NSS_CC_XGMAC5_PTP_REF_CLK] = &nss_cc_xgmac5_ptp_ref_clk.clkr,
++      [NSS_CC_XGMAC5_PTP_REF_DIV_CLK_SRC] =
++              &nss_cc_xgmac5_ptp_ref_div_clk_src.clkr,
++      [UBI32_PLL] = &ubi32_pll.clkr,
++      [UBI32_PLL_MAIN] = &ubi32_pll_main.clkr,
++};
++
++static const struct qcom_reset_map nss_cc_ipq9574_resets[] = {
++      [NSS_CC_CE_BCR] = { 0x28400, 0 },
++      [NSS_CC_CLC_BCR] = { 0x28600, 0 },
++      [NSS_CC_EIP197_BCR] = { 0x16004, 0 },
++      [NSS_CC_HAQ_BCR] = { 0x28300, 0 },
++      [NSS_CC_IMEM_BCR] = { 0xe004, 0 },
++      [NSS_CC_MAC_BCR] = { 0x28100, 0 },
++      [NSS_CC_PPE_BCR] = { 0x28200, 0 },
++      [NSS_CC_UBI_BCR] = { 0x28700, 0 },
++      [NSS_CC_UNIPHY_BCR] = { 0x28900, 0 },
++      [UBI3_CLKRST_CLAMP_ENABLE] = { 0x28a04, 9 },
++      [UBI3_CORE_CLAMP_ENABLE] = { 0x28a04, 8 },
++      [UBI2_CLKRST_CLAMP_ENABLE] = { 0x28a04, 7 },
++      [UBI2_CORE_CLAMP_ENABLE] = { 0x28a04, 6 },
++      [UBI1_CLKRST_CLAMP_ENABLE] = { 0x28a04, 5 },
++      [UBI1_CORE_CLAMP_ENABLE] = { 0x28a04, 4 },
++      [UBI0_CLKRST_CLAMP_ENABLE] = { 0x28a04, 3 },
++      [UBI0_CORE_CLAMP_ENABLE] = { 0x28a04, 2 },
++      [NSSNOC_NSS_CSR_ARES] = { 0x28a04, 1 },
++      [NSS_CSR_ARES] = { 0x28a04, 0 },
++      [PPE_BTQ_ARES] = { 0x28a08, 20 },
++      [PPE_IPE_ARES] = { 0x28a08, 19 },
++      [PPE_ARES] = { 0x28a08, 18 },
++      [PPE_CFG_ARES] = { 0x28a08, 17 },
++      [PPE_EDMA_ARES] = { 0x28a08, 16 },
++      [PPE_EDMA_CFG_ARES] = { 0x28a08, 15 },
++      [CRY_PPE_ARES] = { 0x28a08, 14 },
++      [NSSNOC_PPE_ARES] = { 0x28a08, 13 },
++      [NSSNOC_PPE_CFG_ARES] = { 0x28a08, 12 },
++      [PORT1_MAC_ARES] = { 0x28a08, 11 },
++      [PORT2_MAC_ARES] = { 0x28a08, 10 },
++      [PORT3_MAC_ARES] = { 0x28a08, 9 },
++      [PORT4_MAC_ARES] = { 0x28a08, 8 },
++      [PORT5_MAC_ARES] = { 0x28a08, 7 },
++      [PORT6_MAC_ARES] = { 0x28a08, 6 },
++      [XGMAC0_PTP_REF_ARES] = { 0x28a08, 5 },
++      [XGMAC1_PTP_REF_ARES] = { 0x28a08, 4 },
++      [XGMAC2_PTP_REF_ARES] = { 0x28a08, 3 },
++      [XGMAC3_PTP_REF_ARES] = { 0x28a08, 2 },
++      [XGMAC4_PTP_REF_ARES] = { 0x28a08, 1 },
++      [XGMAC5_PTP_REF_ARES] = { 0x28a08, 0 },
++      [HAQ_AHB_ARES] = { 0x28a0c, 3 },
++      [HAQ_AXI_ARES] = { 0x28a0c, 2 },
++      [NSSNOC_HAQ_AHB_ARES] = { 0x28a0c, 1 },
++      [NSSNOC_HAQ_AXI_ARES] = { 0x28a0c, 0 },
++      [CE_APB_ARES] = { 0x28a10, 3 },
++      [CE_AXI_ARES] = { 0x28a10, 2 },
++      [NSSNOC_CE_APB_ARES] = { 0x28a10, 1 },
++      [NSSNOC_CE_AXI_ARES] = { 0x28a10, 0 },
++      [CRYPTO_ARES] = { 0x28a14, 1 },
++      [NSSNOC_CRYPTO_ARES] = { 0x28a14, 0 },
++      [NSSNOC_NC_AXI0_1_ARES] = { 0x28a1c, 28 },
++      [UBI0_CORE_ARES] = { 0x28a1c, 27 },
++      [UBI1_CORE_ARES] = { 0x28a1c, 26 },
++      [UBI2_CORE_ARES] = { 0x28a1c, 25 },
++      [UBI3_CORE_ARES] = { 0x28a1c, 24 },
++      [NC_AXI0_ARES] = { 0x28a1c, 23 },
++      [UTCM0_ARES] = { 0x28a1c, 22 },
++      [NC_AXI1_ARES] = { 0x28a1c, 21 },
++      [UTCM1_ARES] = { 0x28a1c, 20 },
++      [NC_AXI2_ARES] = { 0x28a1c, 19 },
++      [UTCM2_ARES] = { 0x28a1c, 18 },
++      [NC_AXI3_ARES] = { 0x28a1c, 17 },
++      [UTCM3_ARES] = { 0x28a1c, 16 },
++      [NSSNOC_NC_AXI0_ARES] = { 0x28a1c, 15 },
++      [AHB0_ARES] = { 0x28a1c, 14 },
++      [INTR0_AHB_ARES] = { 0x28a1c, 13 },
++      [AHB1_ARES] = { 0x28a1c, 12 },
++      [INTR1_AHB_ARES] = { 0x28a1c, 11 },
++      [AHB2_ARES] = { 0x28a1c, 10 },
++      [INTR2_AHB_ARES] = { 0x28a1c, 9 },
++      [AHB3_ARES] = { 0x28a1c, 8 },
++      [INTR3_AHB_ARES] = { 0x28a1c, 7 },
++      [NSSNOC_AHB0_ARES] = { 0x28a1c, 6 },
++      [NSSNOC_INT0_AHB_ARES] = { 0x28a1c, 5 },
++      [AXI0_ARES] = { 0x28a1c, 4 },
++      [AXI1_ARES] = { 0x28a1c, 3 },
++      [AXI2_ARES] = { 0x28a1c, 2 },
++      [AXI3_ARES] = { 0x28a1c, 1 },
++      [NSSNOC_AXI0_ARES] = { 0x28a1c, 0 },
++      [IMEM_QSB_ARES] = { 0x28a20, 3 },
++      [NSSNOC_IMEM_QSB_ARES] = { 0x28a20, 2 },
++      [IMEM_AHB_ARES] = { 0x28a20, 1 },
++      [NSSNOC_IMEM_AHB_ARES] = { 0x28a20, 0 },
++      [UNIPHY_PORT1_RX_ARES] = { 0x28a24, 23 },
++      [UNIPHY_PORT1_TX_ARES] = { 0x28a24, 22 },
++      [UNIPHY_PORT2_RX_ARES] = { 0x28a24, 21 },
++      [UNIPHY_PORT2_TX_ARES] = { 0x28a24, 20 },
++      [UNIPHY_PORT3_RX_ARES] = { 0x28a24, 19 },
++      [UNIPHY_PORT3_TX_ARES] = { 0x28a24, 18 },
++      [UNIPHY_PORT4_RX_ARES] = { 0x28a24, 17 },
++      [UNIPHY_PORT4_TX_ARES] = { 0x28a24, 16 },
++      [UNIPHY_PORT5_RX_ARES] = { 0x28a24, 15 },
++      [UNIPHY_PORT5_TX_ARES] = { 0x28a24, 14 },
++      [UNIPHY_PORT6_RX_ARES] = { 0x28a24, 13 },
++      [UNIPHY_PORT6_TX_ARES] = { 0x28a24, 12 },
++      [PORT1_RX_ARES] = { 0x28a24, 11 },
++      [PORT1_TX_ARES] = { 0x28a24, 10 },
++      [PORT2_RX_ARES] = { 0x28a24, 9 },
++      [PORT2_TX_ARES] = { 0x28a24, 8 },
++      [PORT3_RX_ARES] = { 0x28a24, 7 },
++      [PORT3_TX_ARES] = { 0x28a24, 6 },
++      [PORT4_RX_ARES] = { 0x28a24, 5 },
++      [PORT4_TX_ARES] = { 0x28a24, 4 },
++      [PORT5_RX_ARES] = { 0x28a24, 3 },
++      [PORT5_TX_ARES] = { 0x28a24, 2 },
++      [PORT6_RX_ARES] = { 0x28a24, 1 },
++      [PORT6_TX_ARES] = { 0x28a24, 0 },
++      [PPE_FULL_RESET] = { .reg = 0x28a08, .bitmask = GENMASK(20, 17) },
++      [UNIPHY0_SOFT_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(23, 14) },
++      [UNIPHY1_SOFT_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(15, 14) },
++      [UNIPHY2_SOFT_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(13, 12) },
++      [UNIPHY_PORT1_ARES] = { .reg = 0x28a24, .bitmask = GENMASK(23, 22) },
++      [UNIPHY_PORT2_ARES] = { .reg = 0x28a24, .bitmask = GENMASK(21, 20) },
++      [UNIPHY_PORT3_ARES] = { .reg = 0x28a24, .bitmask = GENMASK(19, 18) },
++      [UNIPHY_PORT4_ARES] = { .reg = 0x28a24, .bitmask = GENMASK(17, 16) },
++      [UNIPHY_PORT5_ARES] = { .reg = 0x28a24, .bitmask = GENMASK(15, 14) },
++      [UNIPHY_PORT6_ARES] = { .reg = 0x28a24, .bitmask = GENMASK(13, 12) },
++      [NSSPORT1_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(11, 10) },
++      [NSSPORT2_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(9, 8) },
++      [NSSPORT3_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(7, 6) },
++      [NSSPORT4_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(5, 4) },
++      [NSSPORT5_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(3, 2) },
++      [NSSPORT6_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(1, 0) },
++      [EDMA_HW_RESET] =  { .reg = 0x28a08, .bitmask = GENMASK(16, 15) },
++};
++
++static const struct regmap_config nss_cc_ipq9574_regmap_config = {
++      .reg_bits = 32,
++      .reg_stride = 4,
++      .val_bits = 32,
++      .max_register = 0x28a34,
++      .fast_io = true,
++};
++
++static struct qcom_icc_hws_data icc_ipq9574_nss_hws[] = {
++      { MASTER_NSSNOC_PPE, SLAVE_NSSNOC_PPE, NSS_CC_NSSNOC_PPE_CLK },
++      { MASTER_NSSNOC_PPE_CFG, SLAVE_NSSNOC_PPE_CFG, NSS_CC_NSSNOC_PPE_CFG_CLK },
++      { MASTER_NSSNOC_NSS_CSR, SLAVE_NSSNOC_NSS_CSR, NSS_CC_NSSNOC_NSS_CSR_CLK },
++      { MASTER_NSSNOC_IMEM_QSB, SLAVE_NSSNOC_IMEM_QSB, NSS_CC_NSSNOC_IMEM_QSB_CLK },
++      { MASTER_NSSNOC_IMEM_AHB, SLAVE_NSSNOC_IMEM_AHB, NSS_CC_NSSNOC_IMEM_AHB_CLK },
++};
++
++#define IPQ_NSSCC_ID  (9574 * 2) /* some unique value */
++
++static const struct qcom_cc_desc nss_cc_ipq9574_desc = {
++      .config = &nss_cc_ipq9574_regmap_config,
++      .clks = nss_cc_ipq9574_clocks,
++      .num_clks = ARRAY_SIZE(nss_cc_ipq9574_clocks),
++      .resets = nss_cc_ipq9574_resets,
++      .num_resets = ARRAY_SIZE(nss_cc_ipq9574_resets),
++      .icc_hws = icc_ipq9574_nss_hws,
++      .num_icc_hws = ARRAY_SIZE(icc_ipq9574_nss_hws),
++      .icc_first_node_id = IPQ_NSSCC_ID,
++};
++
++static const struct of_device_id nss_cc_ipq9574_match_table[] = {
++      { .compatible = "qcom,ipq9574-nsscc" },
++      { }
++};
++MODULE_DEVICE_TABLE(of, nss_cc_ipq9574_match_table);
++
++static int nss_cc_ipq9574_probe(struct platform_device *pdev)
++{
++      struct regmap *regmap;
++
++      regmap = qcom_cc_map(pdev, &nss_cc_ipq9574_desc);
++      if (IS_ERR(regmap))
++              return PTR_ERR(regmap);
++
++      clk_alpha_pll_configure(&ubi32_pll_main, regmap, &ubi32_pll_config);
++
++      return qcom_cc_really_probe(&pdev->dev, &nss_cc_ipq9574_desc, regmap);
++}
++
++static struct platform_driver nss_cc_ipq9574_driver = {
++      .probe = nss_cc_ipq9574_probe,
++      .driver = {
++              .name = "qcom,nsscc-ipq9574",
++              .of_match_table = nss_cc_ipq9574_match_table,
++              .sync_state = icc_sync_state, /* TODO seems to cause hang */
++      },
++};
++
++module_platform_driver(nss_cc_ipq9574_driver);
++
++MODULE_DESCRIPTION("QTI NSS_CC IPQ9574 Driver");
++MODULE_LICENSE("GPL");
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/105-06-arm64-dts-qcom-ipq9574-Add-nsscc-node.patch b/target/linux/qualcommbe/patches-6.6/105-06-arm64-dts-qcom-ipq9574-Add-nsscc-node.patch
new file mode 100644 (file)
index 0000000..bf3f724
--- /dev/null
@@ -0,0 +1,56 @@
+From 03cbf5e97bf4cd863aff002cb5e6def43f2034d0 Mon Sep 17 00:00:00 2001
+From: Devi Priya <quic_devipriy@quicinc.com>
+Date: Fri, 25 Oct 2024 09:25:19 +0530
+Subject: [PATCH 6/7] arm64: dts: qcom: ipq9574: Add nsscc node
+
+Add a node for the nss clock controller found on ipq9574 based devices.
+
+Signed-off-by: Devi Priya <quic_devipriy@quicinc.com>
+Signed-off-by: Manikanta Mylavarapu <quic_mmanikan@quicinc.com>
+---
+ arch/arm64/boot/dts/qcom/ipq9574.dtsi | 22 ++++++++++++++++++++++
+ 1 file changed, 22 insertions(+)
+
+diff --git a/arch/arm64/boot/dts/qcom/ipq9574.dtsi b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+index 08a82a5cf667..c113fff22f73 100644
+--- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+@@ -11,6 +11,8 @@
+ #include <dt-bindings/interconnect/qcom,ipq9574.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/reset/qcom,ipq9574-gcc.h>
++#include <dt-bindings/clock/qcom,ipq9574-nsscc.h>
++#include <dt-bindings/reset/qcom,ipq9574-nsscc.h>
+ #include <dt-bindings/thermal/thermal.h>
+ / {
+@@ -756,6 +758,26 @@ frame@b128000 {
+                               status = "disabled";
+                       };
+               };
++
++              nsscc: clock-controller@39b00000 {
++                      compatible = "qcom,ipq9574-nsscc";
++                      reg = <0x39b00000 0x80000>;
++                      clocks = <&xo_board_clk>,
++                               <&cmn_pll NSS_1200MHZ_CLK>,
++                               <&cmn_pll PPE_353MHZ_CLK>,
++                               <&gcc GPLL0_OUT_AUX>,
++                               <0>,
++                               <0>,
++                               <0>,
++                               <0>,
++                               <0>,
++                               <0>,
++                               <&gcc GCC_NSSCC_CLK>;
++                      #clock-cells = <1>;
++                      #reset-cells = <1>;
++                      #power-domain-cells = <1>;
++                      #interconnect-cells = <1>;
++              };
+       };
+       thermal-zones {
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/200-01-arm64-dts-qcom-ipq9574-Add-PCS-UNIPHY-device-tree-su.patch b/target/linux/qualcommbe/patches-6.6/200-01-arm64-dts-qcom-ipq9574-Add-PCS-UNIPHY-device-tree-su.patch
new file mode 100644 (file)
index 0000000..6f4a69a
--- /dev/null
@@ -0,0 +1,152 @@
+From 3105ff9d7111d15b686b8d14e8b4413a5c2a88ce Mon Sep 17 00:00:00 2001
+From: Lei Wei <quic_leiwei@quicinc.com>
+Date: Thu, 1 Feb 2024 13:03:14 +0800
+Subject: [PATCH 13/17] arm64: dts: qcom: ipq9574: Add PCS UNIPHY device tree
+ support
+
+The UNIPHY block in the IPQ SoC enables PCS/XPCS functions and helps in
+interfacing the Ethernet MAC to external PHYs.
+
+There are three PCS UNIPHY instances available in the IPQ9574 SoC. The
+first UNIPHY has four PCS channels which can connect to QCA8075 Quad
+PHYs in QSGMII mode or QCA8085 PHYs with 10G-QXGMII mode. The second
+and third UNIPHYs each has one PCS channel which can connect with single
+10G capable PHY such as Aquantia 113c PHY in USXGMII mode.
+
+Change-Id: I7832a71b12730d5bd7926a25f4feda371c09b58e
+Signed-off-by: Lei Wei <quic_leiwei@quicinc.com>
+---
+ arch/arm64/boot/dts/qcom/ipq9574.dtsi | 110 +++++++++++++++++++++++++-
+ 1 file changed, 109 insertions(+), 1 deletion(-)
+
+diff --git a/arch/arm64/boot/dts/qcom/ipq9574.dtsi b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+index 02cf318e3d17..ce3a1b5d70ea 100644
+--- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+@@ -3,7 +3,7 @@
+  * IPQ9574 SoC device tree source
+  *
+  * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+  */
+ #include <dt-bindings/clock/qcom,apss-ipq.h>
+@@ -776,6 +776,114 @@ frame@b128000 {
+                       #power-domain-cells = <1>;
+                       #interconnect-cells = <1>;
+               };
++
++              pcsuniphy0: ethernet-uniphy@7a00000 {
++                      #address-cells = <1>;
++                      #size-cells = <0>;
++                      compatible = "qcom,ipq9574-uniphy";
++                      reg = <0x7a00000 0x10000>;
++                      clocks = <&gcc GCC_UNIPHY0_SYS_CLK>,
++                               <&gcc GCC_UNIPHY0_AHB_CLK>;
++                      clock-names = "sys",
++                                    "ahb";
++                      resets = <&gcc GCC_UNIPHY0_SYS_RESET>,
++                               <&gcc GCC_UNIPHY0_AHB_RESET>,
++                               <&gcc GCC_UNIPHY0_XPCS_RESET>;
++                      reset-names = "sys",
++                                    "ahb",
++                                    "xpcs";
++                      #clock-cells = <1>;
++                      clock-output-names = "uniphy0_nss_rx_clk",
++                                           "uniphy0_nss_tx_clk";
++
++                      pcsuniphy0_ch0: uniphy-ch@0 {
++                              reg = <0>;
++                              clocks = <&nsscc NSS_CC_UNIPHY_PORT1_RX_CLK>,
++                                       <&nsscc NSS_CC_UNIPHY_PORT1_TX_CLK>;
++                              clock-names = "ch_rx",
++                                            "ch_tx";
++                      };
++
++                      pcsuniphy0_ch1: uniphy-ch@1 {
++                              reg = <1>;
++                              clocks = <&nsscc NSS_CC_UNIPHY_PORT2_RX_CLK>,
++                                       <&nsscc NSS_CC_UNIPHY_PORT2_TX_CLK>;
++                              clock-names = "ch_rx",
++                                            "ch_tx";
++                      };
++
++                      pcsuniphy0_ch2: uniphy-ch@2 {
++                              reg = <2>;
++                              clocks = <&nsscc NSS_CC_UNIPHY_PORT3_RX_CLK>,
++                                       <&nsscc NSS_CC_UNIPHY_PORT3_TX_CLK>;
++                              clock-names = "ch_rx",
++                                            "ch_tx";
++                      };
++
++                      pcsuniphy0_ch3: uniphy-ch@3 {
++                              reg = <3>;
++                              clocks = <&nsscc NSS_CC_UNIPHY_PORT4_RX_CLK>,
++                                       <&nsscc NSS_CC_UNIPHY_PORT4_TX_CLK>;
++                              clock-names = "ch_rx",
++                                            "ch_tx";
++                      };
++              };
++
++              pcsuniphy1: ethernet-uniphy@7a10000 {
++                      #address-cells = <1>;
++                      #size-cells = <0>;
++                      compatible = "qcom,ipq9574-uniphy";
++                      reg = <0x7a10000 0x10000>;
++                      clocks = <&gcc GCC_UNIPHY1_SYS_CLK>,
++                               <&gcc GCC_UNIPHY1_AHB_CLK>;
++                      clock-names = "sys",
++                                    "ahb";
++                      resets = <&gcc GCC_UNIPHY1_SYS_RESET>,
++                               <&gcc GCC_UNIPHY1_AHB_RESET>,
++                               <&gcc GCC_UNIPHY1_XPCS_RESET>;
++                      reset-names = "sys",
++                                    "ahb",
++                                    "xpcs";
++                      #clock-cells = <1>;
++                      clock-output-names = "uniphy1_nss_rx_clk",
++                                           "uniphy1_nss_tx_clk";
++
++                      pcsuniphy1_ch0: uniphy-ch@0 {
++                              reg = <0>;
++                              clocks = <&nsscc NSS_CC_UNIPHY_PORT5_RX_CLK>,
++                                       <&nsscc NSS_CC_UNIPHY_PORT5_TX_CLK>;
++                              clock-names = "ch_rx",
++                                            "ch_tx";
++                      };
++              };
++
++              pcsuniphy2: ethernet-uniphy@7a20000 {
++                      #address-cells = <1>;
++                      #size-cells = <0>;
++                      compatible = "qcom,ipq9574-uniphy";
++                      reg = <0x7a20000 0x10000>;
++                      clocks = <&gcc GCC_UNIPHY2_SYS_CLK>,
++                               <&gcc GCC_UNIPHY2_AHB_CLK>;
++                      clock-names = "sys",
++                                    "ahb";
++                      resets = <&gcc GCC_UNIPHY2_SYS_RESET>,
++                               <&gcc GCC_UNIPHY2_AHB_RESET>,
++                               <&gcc GCC_UNIPHY2_XPCS_RESET>;
++                      reset-names = "sys",
++                                    "ahb",
++                                    "xpcs";
++                      #clock-cells = <1>;
++                      clock-output-names = "uniphy2_nss_rx_clk",
++                                           "uniphy2_nss_tx_clk";
++
++                      pcsuniphy2_ch0: uniphy-ch@0 {
++                              reg = <0>;
++                              clocks = <&nsscc NSS_CC_UNIPHY_PORT6_RX_CLK>,
++                                       <&nsscc NSS_CC_UNIPHY_PORT6_TX_CLK>;
++                              clock-names = "ch_rx",
++                                            "ch_tx";
++                      };
++              };
+       };
+       thermal-zones {
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/200-02-arm64-dts-qcom-Add-IPQ9574-MDIO-device-node.patch b/target/linux/qualcommbe/patches-6.6/200-02-arm64-dts-qcom-Add-IPQ9574-MDIO-device-node.patch
new file mode 100644 (file)
index 0000000..01ed26d
--- /dev/null
@@ -0,0 +1,54 @@
+From 3e98aaf9e5c6b2206edce3309beb1adeb2b61b60 Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Thu, 23 Nov 2023 15:41:20 +0800
+Subject: [PATCH 15/17] arm64: dts: qcom: Add IPQ9574 MDIO device node
+
+The MDIO bus master block is used to accessing the MDIO slave
+device (such as PHY device), the dedicated MDIO PINs needs to
+be configured.
+
+Change-Id: Ia64083529e693256dbd8f8af4071c02afdded8f9
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ arch/arm64/boot/dts/qcom/ipq9574.dtsi | 18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+diff --git a/arch/arm64/boot/dts/qcom/ipq9574.dtsi b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+index 07a96d26b359..ef82935e7ef5 100644
+--- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+@@ -256,6 +256,8 @@ rng: rng@e3000 {
+               mdio: mdio@90000 {
+                       compatible =  "qcom,ipq9574-mdio", "qcom,ipq4019-mdio";
+                       reg = <0x00090000 0x64>;
++                      pinctrl-0 = <&mdio_pins>;
++                      pinctrl-names = "default";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       clocks = <&gcc GCC_MDIO_AHB_CLK>;
+@@ -315,6 +317,22 @@ tlmm: pinctrl@1000000 {
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
++                      mdio_pins: mdio-pins {
++                              mdc-state {
++                                      pins = "gpio38";
++                                      function = "mdc";
++                                      drive-strength = <8>;
++                                      bias-disable;
++                              };
++
++                              mdio-state {
++                                      pins = "gpio39";
++                                      function = "mdio";
++                                      drive-strength = <8>;
++                                      bias-pull-up;
++                              };
++                      };
++
+                       uart2_pins: uart2-state {
+                               pins = "gpio34", "gpio35";
+                               function = "blsp2_uart";
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/200-03-arm64-dts-qcom-Add-IPQ9574-PPE-base-device-node.patch b/target/linux/qualcommbe/patches-6.6/200-03-arm64-dts-qcom-Add-IPQ9574-PPE-base-device-node.patch
new file mode 100644 (file)
index 0000000..49c1ef3
--- /dev/null
@@ -0,0 +1,67 @@
+From ff847b8692e877e660b64ff2de4f26c6f7ce932e Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Fri, 1 Mar 2024 14:46:45 +0800
+Subject: [PATCH 14/17] arm64: dts: qcom: Add IPQ9574 PPE base device node
+
+PPE is the packet process engine on the Qualcomm IPQ platform,
+which is connected with the external switch or PHY device via
+the UNIPHY (PCS).
+
+Change-Id: I254bd48c218aa4eab54f697a2ad149f5a93b682c
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+---
+ arch/arm64/boot/dts/qcom/ipq9574.dtsi | 38 +++++++++++++++++++++++++++
+ 1 file changed, 38 insertions(+)
+
+diff --git a/arch/arm64/boot/dts/qcom/ipq9574.dtsi b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+index ce3a1b5d70ea..07a96d26b359 100644
+--- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+@@ -884,6 +884,44 @@ pcsuniphy2_ch0: uniphy-ch@0 {
+                                             "ch_tx";
+                       };
+               };
++
++              ethernet@3a000000 {
++                      compatible = "qcom,ipq9574-ppe";
++                      reg = <0x3a000000 0xbef800>;
++                      ranges;
++                      #address-cells = <1>;
++                      #size-cells = <1>;
++                      clocks = <&nsscc NSS_CC_PPE_SWITCH_CLK>,
++                               <&nsscc NSS_CC_PPE_SWITCH_CFG_CLK>,
++                               <&nsscc NSS_CC_PPE_SWITCH_IPE_CLK>,
++                               <&nsscc NSS_CC_PPE_SWITCH_BTQ_CLK>;
++                      clock-names = "ppe",
++                                    "ppe_cfg",
++                                    "ppe_ipe",
++                                    "ppe_btq";
++                      resets = <&nsscc PPE_FULL_RESET>;
++                      interconnects = <&nsscc MASTER_NSSNOC_PPE
++                                       &nsscc SLAVE_NSSNOC_PPE>,
++                                      <&nsscc MASTER_NSSNOC_PPE_CFG
++                                       &nsscc SLAVE_NSSNOC_PPE_CFG>,
++                                      <&gcc MASTER_NSSNOC_QOSGEN_REF
++                                       &gcc SLAVE_NSSNOC_QOSGEN_REF>,
++                                      <&gcc MASTER_NSSNOC_TIMEOUT_REF
++                                       &gcc SLAVE_NSSNOC_TIMEOUT_REF>,
++                                      <&gcc MASTER_MEM_NOC_NSSNOC
++                                       &gcc SLAVE_MEM_NOC_NSSNOC>,
++                                      <&gcc MASTER_NSSNOC_MEMNOC
++                                       &gcc SLAVE_NSSNOC_MEMNOC>,
++                                      <&gcc MASTER_NSSNOC_MEM_NOC_1
++                                       &gcc SLAVE_NSSNOC_MEM_NOC_1>;
++                      interconnect-names = "ppe",
++                                           "ppe_cfg",
++                                           "qos_gen",
++                                           "timeout_ref",
++                                           "nssnoc_memnoc",
++                                           "memnoc_nssnoc",
++                                           "memnoc_nssnoc_1";
++              };
+       };
+       thermal-zones {
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/200-04-arm64-dts-qcom-Add-EDMA-node-for-IPQ9574.patch b/target/linux/qualcommbe/patches-6.6/200-04-arm64-dts-qcom-Add-EDMA-node-for-IPQ9574.patch
new file mode 100644 (file)
index 0000000..61f7d37
--- /dev/null
@@ -0,0 +1,96 @@
+From 14caaa7a23404cfee65a0d74b61d7998f762c70f Mon Sep 17 00:00:00 2001
+From: Pavithra R <quic_pavir@quicinc.com>
+Date: Wed, 6 Mar 2024 22:29:41 +0530
+Subject: [PATCH 16/17] arm64: dts: qcom: Add EDMA node for IPQ9574
+
+Add EDMA (Ethernet DMA) device tree node for IPQ9574 to
+enable ethernet support.
+
+Change-Id: I87d7c50f2485c8670948dce305000337f6499f8b
+Signed-off-by: Pavithra R <quic_pavir@quicinc.com>
+---
+ arch/arm64/boot/dts/qcom/ipq9574.dtsi | 68 +++++++++++++++++++++++++++
+ 1 file changed, 68 insertions(+)
+
+diff --git a/arch/arm64/boot/dts/qcom/ipq9574.dtsi b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+index ef82935e7ef5..61f9a7ee7282 100644
+--- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+@@ -939,6 +939,74 @@ &gcc SLAVE_NSSNOC_MEMNOC>,
+                                            "nssnoc_memnoc",
+                                            "memnoc_nssnoc",
+                                            "memnoc_nssnoc_1";
++
++                      edma {
++                              compatible = "qcom,ipq9574-edma";
++                              clocks = <&nsscc NSS_CC_PPE_EDMA_CLK>,
++                                       <&nsscc NSS_CC_PPE_EDMA_CFG_CLK>;
++                              clock-names = "edma",
++                                            "edma-cfg";
++                              resets = <&nsscc EDMA_HW_RESET>;
++                              reset-names = "edma_rst";
++                              interrupts = <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>,
++                                           <GIC_SPI 372 IRQ_TYPE_LEVEL_HIGH>,
++                                           <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>,
++                                           <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>,
++                                           <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>,
++                                           <GIC_SPI 376 IRQ_TYPE_LEVEL_HIGH>,
++                                           <GIC_SPI 377 IRQ_TYPE_LEVEL_HIGH>,
++                                           <GIC_SPI 378 IRQ_TYPE_LEVEL_HIGH>,
++                                           <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH>,
++                                           <GIC_SPI 380 IRQ_TYPE_LEVEL_HIGH>,
++                                           <GIC_SPI 381 IRQ_TYPE_LEVEL_HIGH>,
++                                           <GIC_SPI 382 IRQ_TYPE_LEVEL_HIGH>,
++                                           <GIC_SPI 383 IRQ_TYPE_LEVEL_HIGH>,
++                                           <GIC_SPI 384 IRQ_TYPE_LEVEL_HIGH>,
++                                           <GIC_SPI 509 IRQ_TYPE_LEVEL_HIGH>,
++                                           <GIC_SPI 508 IRQ_TYPE_LEVEL_HIGH>,
++                                           <GIC_SPI 507 IRQ_TYPE_LEVEL_HIGH>,
++                                           <GIC_SPI 506 IRQ_TYPE_LEVEL_HIGH>,
++                                           <GIC_SPI 505 IRQ_TYPE_LEVEL_HIGH>,
++                                           <GIC_SPI 504 IRQ_TYPE_LEVEL_HIGH>,
++                                           <GIC_SPI 503 IRQ_TYPE_LEVEL_HIGH>,
++                                           <GIC_SPI 502 IRQ_TYPE_LEVEL_HIGH>,
++                                           <GIC_SPI 501 IRQ_TYPE_LEVEL_HIGH>,
++                                           <GIC_SPI 500 IRQ_TYPE_LEVEL_HIGH>,
++                                           <GIC_SPI 351 IRQ_TYPE_LEVEL_HIGH>,
++                                           <GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>,
++                                           <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>,
++                                           <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>,
++                                           <GIC_SPI 499 IRQ_TYPE_LEVEL_HIGH>;
++                              interrupt-names = "edma_txcmpl_8",
++                                                "edma_txcmpl_9",
++                                                "edma_txcmpl_10",
++                                                "edma_txcmpl_11",
++                                                "edma_txcmpl_12",
++                                                "edma_txcmpl_13",
++                                                "edma_txcmpl_14",
++                                                "edma_txcmpl_15",
++                                                "edma_txcmpl_16",
++                                                "edma_txcmpl_17",
++                                                "edma_txcmpl_18",
++                                                "edma_txcmpl_19",
++                                                "edma_txcmpl_20",
++                                                "edma_txcmpl_21",
++                                                "edma_txcmpl_22",
++                                                "edma_txcmpl_23",
++                                                "edma_txcmpl_24",
++                                                "edma_txcmpl_25",
++                                                "edma_txcmpl_26",
++                                                "edma_txcmpl_27",
++                                                "edma_txcmpl_28",
++                                                "edma_txcmpl_29",
++                                                "edma_txcmpl_30",
++                                                "edma_txcmpl_31",
++                                                "edma_rxdesc_20",
++                                                "edma_rxdesc_21",
++                                                "edma_rxdesc_22",
++                                                "edma_rxdesc_23",
++                                                "edma_misc";
++                      };
+               };
+       };
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/200-05-arm64-dts-qcom-Add-IPQ9574-RDP433-port-node.patch b/target/linux/qualcommbe/patches-6.6/200-05-arm64-dts-qcom-Add-IPQ9574-RDP433-port-node.patch
new file mode 100644 (file)
index 0000000..73c0c12
--- /dev/null
@@ -0,0 +1,231 @@
+From 9f3d547ccaf1113244f9aeb1a849e553321869ea Mon Sep 17 00:00:00 2001
+From: Lei Wei <quic_leiwei@quicinc.com>
+Date: Tue, 14 May 2024 10:53:27 +0800
+Subject: [PATCH 17/17] arm64: dts: qcom: Add IPQ9574 RDP433 port node
+
+There are 6 PPE MAC ports available on RDP433. The port1-port4 are
+connected with QCA8075 QUAD PHYs through UNIPHY0 PCS channel0-channel3.
+The port5 is connected with Aquantia PHY through UNIPHY1 PCS channel0
+and the port6 is connected with Aquantia PHY through UNIPHY2 PCS
+channel0.
+
+Change-Id: Ic16efdef2fe2cff7b1e80245619c0f82afb24cb9
+Signed-off-by: Lei Wei <quic_leiwei@quicinc.com>
+---
+ arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts | 169 +++++++++++++++++++-
+ arch/arm64/boot/dts/qcom/ipq9574.dtsi       |   2 +-
+ 2 files changed, 169 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts b/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts
+index 1bb8d96c9a82..1bbe4c258c15 100644
+--- a/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts
++++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts
+@@ -3,11 +3,13 @@
+  * IPQ9574 RDP433 board device tree source
+  *
+  * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+  */
+ /dts-v1/;
+
++#include <dt-bindings/gpio/gpio.h>
++
+ #include "ipq9574-rdp-common.dtsi"
+
+ / {
+@@ -15,6 +15,46 @@ / {
+       compatible = "qcom,ipq9574-ap-al02-c7", "qcom,ipq9574";
+ };
++&mdio {
++      reset-gpios = <&tlmm 60 GPIO_ACTIVE_LOW>;
++      clock-frequency = <6250000>;
++      status = "okay";
++
++      ethernet-phy-package@0 {
++              compatible = "qcom,qca8075-package";
++              #address-cells = <1>;
++              #size-cells = <0>;
++              reg = <0x10>;
++              qcom,package-mode = "qsgmii";
++
++              phy0: ethernet-phy@10 {
++                      reg = <0x10>;
++              };
++
++              phy1: ethernet-phy@11 {
++                      reg = <0x11>;
++              };
++
++              phy2: ethernet-phy@12 {
++                      reg = <0x12>;
++              };
++
++              phy3: ethernet-phy@13 {
++                      reg = <0x13>;
++              };
++      };
++
++      phy4: ethernet-phy@8 {
++              compatible ="ethernet-phy-ieee802.3-c45";
++              reg = <8>;
++      };
++
++      phy5: ethernet-phy@0 {
++              compatible ="ethernet-phy-ieee802.3-c45";
++              reg = <0>;
++      };
++};
++
+ &sdhc_1 {
+       pinctrl-0 = <&sdc_default_state>;
+       pinctrl-names = "default";
+@@ -60,3 +100,130 @@ rclk-pins {
+               };
+       };
+ };
++
++&qcom_ppe {
++      ethernet-ports {
++              #address-cells = <1>;
++              #size-cells = <0>;
++
++              port@1 {
++                      reg = <1>;
++                      phy-mode = "qsgmii";
++                      managed = "in-band-status";
++                      phy-handle = <&phy0>;
++                      pcs-handle = <&pcsuniphy0_ch0>;
++                      clocks = <&nsscc NSS_CC_PORT1_MAC_CLK>,
++                               <&nsscc NSS_CC_PORT1_RX_CLK>,
++                               <&nsscc NSS_CC_PORT1_TX_CLK>;
++                      clock-names = "port_mac",
++                                    "port_rx",
++                                    "port_tx";
++                      resets = <&nsscc PORT1_MAC_ARES>,
++                               <&nsscc PORT1_RX_ARES>,
++                               <&nsscc PORT1_TX_ARES>;
++                      reset-names = "port_mac",
++                                    "port_rx",
++                                    "port_tx";
++              };
++
++              port@2 {
++                      reg = <2>;
++                      phy-mode = "qsgmii";
++                      managed = "in-band-status";
++                      phy-handle = <&phy1>;
++                      pcs-handle = <&pcsuniphy0_ch1>;
++                      clocks = <&nsscc NSS_CC_PORT2_MAC_CLK>,
++                               <&nsscc NSS_CC_PORT2_RX_CLK>,
++                               <&nsscc NSS_CC_PORT2_TX_CLK>;
++                      clock-names = "port_mac",
++                                    "port_rx",
++                                    "port_tx";
++                      resets = <&nsscc PORT2_MAC_ARES>,
++                               <&nsscc PORT2_RX_ARES>,
++                               <&nsscc PORT2_TX_ARES>;
++                      reset-names = "port_mac",
++                                    "port_rx",
++                                    "port_tx";
++              };
++
++              port@3 {
++                      reg = <3>;
++                      phy-mode = "qsgmii";
++                      managed = "in-band-status";
++                      phy-handle = <&phy2>;
++                      pcs-handle = <&pcsuniphy0_ch2>;
++                      clocks = <&nsscc NSS_CC_PORT3_MAC_CLK>,
++                               <&nsscc NSS_CC_PORT3_RX_CLK>,
++                               <&nsscc NSS_CC_PORT3_TX_CLK>;
++                      clock-names = "port_mac",
++                                    "port_rx",
++                                    "port_tx";
++                      resets = <&nsscc PORT3_MAC_ARES>,
++                               <&nsscc PORT3_RX_ARES>,
++                               <&nsscc PORT3_TX_ARES>;
++                      reset-names = "port_mac",
++                                    "port_rx",
++                                    "port_tx";
++              };
++
++              port@4 {
++                      reg = <4>;
++                      phy-mode = "qsgmii";
++                      managed = "in-band-status";
++                      phy-handle = <&phy3>;
++                      pcs-handle = <&pcsuniphy0_ch3>;
++                      clocks = <&nsscc NSS_CC_PORT4_MAC_CLK>,
++                               <&nsscc NSS_CC_PORT4_RX_CLK>,
++                               <&nsscc NSS_CC_PORT4_TX_CLK>;
++                      clock-names = "port_mac",
++                                    "port_rx",
++                                    "port_tx";
++                      resets = <&nsscc PORT4_MAC_ARES>,
++                               <&nsscc PORT4_RX_ARES>,
++                               <&nsscc PORT4_TX_ARES>;
++                      reset-names = "port_mac",
++                                    "port_rx",
++                                    "port_tx";
++              };
++
++              port@5 {
++                      reg = <5>;
++                      phy-mode = "usxgmii";
++                      managed = "in-band-status";
++                      phy-handle = <&phy4>;
++                      pcs-handle = <&pcsuniphy1_ch0>;
++                      clocks = <&nsscc NSS_CC_PORT5_MAC_CLK>,
++                               <&nsscc NSS_CC_PORT5_RX_CLK>,
++                               <&nsscc NSS_CC_PORT5_TX_CLK>;
++                      clock-names = "port_mac",
++                                    "port_rx",
++                                    "port_tx";
++                      resets = <&nsscc PORT5_MAC_ARES>,
++                               <&nsscc PORT5_RX_ARES>,
++                               <&nsscc PORT5_TX_ARES>;
++                      reset-names = "port_mac",
++                                    "port_rx",
++                                    "port_tx";
++              };
++
++              port@6 {
++                      reg = <6>;
++                      phy-mode = "usxgmii";
++                      managed = "in-band-status";
++                      phy-handle = <&phy5>;
++                      pcs-handle = <&pcsuniphy2_ch0>;
++                      clocks = <&nsscc NSS_CC_PORT6_MAC_CLK>,
++                               <&nsscc NSS_CC_PORT6_RX_CLK>,
++                               <&nsscc NSS_CC_PORT6_TX_CLK>;
++                      clock-names = "port_mac",
++                                    "port_rx",
++                                    "port_tx";
++                      resets = <&nsscc PORT6_MAC_ARES>,
++                               <&nsscc PORT6_RX_ARES>,
++                               <&nsscc PORT6_TX_ARES>;
++                      reset-names = "port_mac",
++                                    "port_rx",
++                                    "port_tx";
++              };
++      };
++};
+diff --git a/arch/arm64/boot/dts/qcom/ipq9574.dtsi b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+index 61f9a7ee7282..5c7b47979b79 100644
+--- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+@@ -903,7 +903,7 @@ pcsuniphy2_ch0: uniphy-ch@0 {
+                       };
+               };
+-              ethernet@3a000000 {
++              qcom_ppe: ethernet@3a000000 {
+                       compatible = "qcom,ipq9574-ppe";
+                       reg = <0x3a000000 0xbef800>;
+                       ranges;
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/300-dt-bindings-clock-Add-clock-ID-for-IPQ-PCS-UNIPHY.patch b/target/linux/qualcommbe/patches-6.6/300-dt-bindings-clock-Add-clock-ID-for-IPQ-PCS-UNIPHY.patch
new file mode 100644 (file)
index 0000000..c33bee9
--- /dev/null
@@ -0,0 +1,37 @@
+From 82a6de914d0d6ff82333a4b65c81f71335e1f9d0 Mon Sep 17 00:00:00 2001
+From: Christian Marangi <ansuelsmth@gmail.com>
+Date: Wed, 4 Dec 2024 01:31:46 +0100
+Subject: [PATCH] dt-bindings: clock: Add clock ID for IPQ PCS UNIPHY
+
+IPQ9574 expose 3 PCS UNIPHY that expose all the same clock ID for RX and
+TX clock. This is needed to correctly parent the EDMA ports and scale
+to the correct frequency to permit correct function of attached PHY.
+
+Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
+---
+ include/dt-bindings/clock/qcom,ipq-pcs-uniphy.h | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+ create mode 100644 include/dt-bindings/clock/qcom,ipq-pcs-uniphy.h
+
+diff --git a/include/dt-bindings/clock/qcom,ipq-pcs-uniphy.h b/include/dt-bindings/clock/qcom,ipq-pcs-uniphy.h
+new file mode 100644
+index 000000000000..c47d7191cda5
+--- /dev/null
++++ b/include/dt-bindings/clock/qcom,ipq-pcs-uniphy.h
+@@ -0,0 +1,13 @@
++/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
++
++#ifndef _DT_BINDINGS_CLK_QCOM_IPQ_PCS_UNIPHY_H
++#define _DT_BINDINGS_CLK_QCOM_IPQ_PCS_UNIPHY_H
++
++/* 
++ * IPQ9574 expose 3 PCS UNIPHY that expose all
++ * the same clock ID
++ */
++#define UNIPHY_NSS_RX_CLK             0
++#define UNIPHY_NSS_TX_CLK             1
++
++#endif
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/301-net-ethernet-qualcomm-Add-support-for-label-property.patch b/target/linux/qualcommbe/patches-6.6/301-net-ethernet-qualcomm-Add-support-for-label-property.patch
new file mode 100644 (file)
index 0000000..2fb1260
--- /dev/null
@@ -0,0 +1,53 @@
+From cb72c5119463897df2ba4a007b490e6251a15f75 Mon Sep 17 00:00:00 2001
+From: Christian Marangi <ansuelsmth@gmail.com>
+Date: Wed, 4 Dec 2024 01:37:05 +0100
+Subject: [PATCH] net: ethernet: qualcomm: Add support for label property for
+ EDMA port
+
+Add support for label property for EDMA port. This is useful to define
+custom name in DTS for specific ethernet port instead of assigning a
+dynamic name at runtime.
+
+This also improve the log output by using modern APIs.
+
+Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
+---
+ drivers/net/ethernet/qualcomm/ppe/edma_port.c | 18 +++++++++++++++---
+ 1 file changed, 15 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/qualcomm/ppe/edma_port.c b/drivers/net/ethernet/qualcomm/ppe/edma_port.c
+index 0b3b769a4a49..6730cee5d6c9 100644
+--- a/drivers/net/ethernet/qualcomm/ppe/edma_port.c
++++ b/drivers/net/ethernet/qualcomm/ppe/edma_port.c
+@@ -355,13 +355,25 @@ int edma_port_setup(struct ppe_port *port)
+       int port_id = port->port_id;
+       struct net_device *netdev;
+       u8 mac_addr[ETH_ALEN];
++      const char *name;
++      int assign_type;
+       int ret = 0;
+       u8 *maddr;
+-      netdev = alloc_etherdev_mqs(sizeof(struct edma_port_priv),
+-                                  EDMA_NETDEV_QUEUE_NUM, EDMA_NETDEV_QUEUE_NUM);
++      name = of_get_property(np, "label", NULL);
++      if (name) {
++              assign_type = NET_NAME_PREDICTABLE;
++      } else {
++              name = "eth%d";
++              assign_type = NET_NAME_ENUM;
++      }
++
++      netdev = alloc_netdev_mqs(sizeof(struct edma_port_priv),
++                                name, assign_type,
++                                ether_setup,
++                                EDMA_NETDEV_QUEUE_NUM, EDMA_NETDEV_QUEUE_NUM);
+       if (!netdev) {
+-              pr_err("alloc_etherdev() failed\n");
++              dev_err(ppe_dev->dev, "alloc_netdev_mqs() failed\n");
+               return -ENOMEM;
+       }
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/302-arm64-dts-qcom-Add-missing-clock-for-nsscc-from-pcs-.patch b/target/linux/qualcommbe/patches-6.6/302-arm64-dts-qcom-Add-missing-clock-for-nsscc-from-pcs-.patch
new file mode 100644 (file)
index 0000000..34247e9
--- /dev/null
@@ -0,0 +1,47 @@
+From a471ccfd5b2c7810506aac71d4eb3616a3fb18f9 Mon Sep 17 00:00:00 2001
+From: Christian Marangi <ansuelsmth@gmail.com>
+Date: Wed, 4 Dec 2024 01:43:20 +0100
+Subject: [PATCH] arm64: dts: qcom: Add missing clock for nsscc from pcs uniphy
+
+Add missing clock for nsscc from PCS UNIPHY to scale frequency of each
+clock based on the requested PHY.
+
+Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
+---
+ arch/arm64/boot/dts/qcom/ipq9574.dtsi | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+diff --git a/arch/arm64/boot/dts/qcom/ipq9574.dtsi b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+index 5aa456bc0e03..2785e1ba1ca9 100644
+--- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+@@ -14,6 +14,7 @@
+ #include <dt-bindings/reset/qcom,ipq9574-gcc.h>
+ #include <dt-bindings/clock/qcom,ipq9574-nsscc.h>
+ #include <dt-bindings/reset/qcom,ipq9574-nsscc.h>
++#include <dt-bindings/clock/qcom,ipq-pcs-uniphy.h>
+ #include <dt-bindings/thermal/thermal.h>
+ / {
+@@ -809,12 +810,12 @@ nsscc: clock-controller@39b00000 {
+                                <&cmn_pll NSS_1200MHZ_CLK>,
+                                <&cmn_pll PPE_353MHZ_CLK>,
+                                <&gcc GPLL0_OUT_AUX>,
+-                               <0>,
+-                               <0>,
+-                               <0>,
+-                               <0>,
+-                               <0>,
+-                               <0>,
++                               <&pcsuniphy0 UNIPHY_NSS_RX_CLK>,
++                               <&pcsuniphy0 UNIPHY_NSS_TX_CLK>,
++                               <&pcsuniphy1 UNIPHY_NSS_RX_CLK>,
++                               <&pcsuniphy1 UNIPHY_NSS_TX_CLK>,
++                               <&pcsuniphy2 UNIPHY_NSS_RX_CLK>,
++                               <&pcsuniphy2 UNIPHY_NSS_TX_CLK>,
+                                <&gcc GCC_NSSCC_CLK>;
+                       #clock-cells = <1>;
+                       #reset-cells = <1>;
+-- 
+2.45.2
+
diff --git a/target/linux/qualcommbe/patches-6.6/900-arm64-dts-qcom-Add-label-to-EDMA-port-for-IPQ9574-RD.patch b/target/linux/qualcommbe/patches-6.6/900-arm64-dts-qcom-Add-label-to-EDMA-port-for-IPQ9574-RD.patch
new file mode 100644 (file)
index 0000000..c5815a4
--- /dev/null
@@ -0,0 +1,67 @@
+From a6118ebc98ec0081064ccc01d0d23f029a504d71 Mon Sep 17 00:00:00 2001
+From: Christian Marangi <ansuelsmth@gmail.com>
+Date: Wed, 4 Dec 2024 01:49:09 +0100
+Subject: [PATCH] arm64: dts: qcom: Add label to EDMA port for IPQ9574 RDP433
+
+Add label to EDMA port for IPQ9574 RDP433 board.
+
+Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
+---
+ arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts b/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts
+index f835ff9f4b28..d56abe92c24e 100644
+--- a/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts
++++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp433.dts
+@@ -112,6 +112,7 @@ port@1 {
+                       reg = <1>;
+                       phy-mode = "qsgmii";
+                       managed = "in-band-status";
++                      label = "lan1";
+                       phy-handle = <&phy0>;
+                       pcs-handle = <&pcsuniphy0_ch0>;
+                       clocks = <&nsscc NSS_CC_PORT1_MAC_CLK>,
+@@ -132,6 +133,7 @@ port@2 {
+                       reg = <2>;
+                       phy-mode = "qsgmii";
+                       managed = "in-band-status";
++                      label = "lan2";
+                       phy-handle = <&phy1>;
+                       pcs-handle = <&pcsuniphy0_ch1>;
+                       clocks = <&nsscc NSS_CC_PORT2_MAC_CLK>,
+@@ -152,6 +154,7 @@ port@3 {
+                       reg = <3>;
+                       phy-mode = "qsgmii";
+                       managed = "in-band-status";
++                      label = "lan3";
+                       phy-handle = <&phy2>;
+                       pcs-handle = <&pcsuniphy0_ch2>;
+                       clocks = <&nsscc NSS_CC_PORT3_MAC_CLK>,
+@@ -172,6 +175,7 @@ port@4 {
+                       reg = <4>;
+                       phy-mode = "qsgmii";
+                       managed = "in-band-status";
++                      label = "lan4";
+                       phy-handle = <&phy3>;
+                       pcs-handle = <&pcsuniphy0_ch3>;
+                       clocks = <&nsscc NSS_CC_PORT4_MAC_CLK>,
+@@ -192,6 +196,7 @@ port@5 {
+                       reg = <5>;
+                       phy-mode = "usxgmii";
+                       managed = "in-band-status";
++                      label = "lan5";
+                       phy-handle = <&phy4>;
+                       pcs-handle = <&pcsuniphy1_ch0>;
+                       clocks = <&nsscc NSS_CC_PORT5_MAC_CLK>,
+@@ -212,6 +217,7 @@ port@6 {
+                       reg = <6>;
+                       phy-mode = "usxgmii";
+                       managed = "in-band-status";
++                      label = "wan";
+                       phy-handle = <&phy5>;
+                       pcs-handle = <&pcsuniphy2_ch0>;
+                       clocks = <&nsscc NSS_CC_PORT6_MAC_CLK>,
+-- 
+2.45.2
+