All patches of LSDK 19.03 were ported to Openwrt kernel.
We still used an all-in-one patch for each IP/feature for
OpenWrt.
Below are the changes this patch introduced.
- Updated original IP/feature patches to LSDK 19.03.
- Added new IP/feature patches for eTSEC/PTP/TMU.
- Squashed scattered patches into IP/feature patches.
- Updated config-4.14 correspondingly.
- Refreshed all patches.
More info about LSDK and the kernel:
- https://lsdk.github.io/components.html
- https://source.codeaurora.org/external/qoriq/qoriq-components/linux
Signed-off-by: Biwen Li <biwen.li@nxp.com>
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
CONFIG_ARM_UNWIND=y
CONFIG_ARM_VIRT_EXT=y
CONFIG_AT803X_PHY=y
+# CONFIG_AT803X_PHY_SMART_EEE is not set
CONFIG_ATAGS=y
CONFIG_AUTOFS4_FS=y
CONFIG_AUTO_ZRELADDR=y
# CONFIG_FSL_PPFE is not set
CONFIG_FSL_PQ_MDIO=y
# CONFIG_FSL_QDMA is not set
+# CONFIG_FSL_QIXIS is not set
# CONFIG_FSL_SDK_DPA is not set
CONFIG_FSL_XGMAC_MDIO=y
CONFIG_FS_MBCACHE=y
CONFIG_GENERIC_IO=y
CONFIG_GENERIC_IRQ_CHIP=y
CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y
+CONFIG_GENERIC_IRQ_MIGRATION=y
CONFIG_GENERIC_IRQ_SHOW=y
CONFIG_GENERIC_IRQ_SHOW_LEVEL=y
CONFIG_GENERIC_MSI_IRQ=y
CONFIG_IMX_SDMA=y
# CONFIG_IMX_WEIM is not set
CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_INPHI_PHY is not set
CONFIG_INPUT=y
# CONFIG_INPUT_MISC is not set
CONFIG_IOMMU_HELPER=y
CONFIG_MCPM=y
CONFIG_MDIO_BITBANG=y
CONFIG_MDIO_BUS=y
+# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set
CONFIG_MDIO_DEVICE=y
# CONFIG_MDIO_FSL_BACKPLANE is not set
# CONFIG_MDIO_GPIO is not set
CONFIG_PCIEPORTBUS=y
CONFIG_PCIE_DW=y
CONFIG_PCIE_DW_HOST=y
+# CONFIG_PCIE_DW_PLAT_HOST is not set
CONFIG_PCIE_PME=y
CONFIG_PCI_DOMAINS=y
CONFIG_PCI_DOMAINS_GENERIC=y
CONFIG_PSTORE_RAM=y
CONFIG_PSTORE_ZLIB_COMPRESS=y
CONFIG_PTP_1588_CLOCK=y
-CONFIG_PTP_1588_CLOCK_GIANFAR=y
+CONFIG_PTP_1588_CLOCK_QORIQ=y
CONFIG_QORIQ_CPUFREQ=y
# CONFIG_QUICC_ENGINE is not set
CONFIG_RAS=y
# CONFIG_SPI_FSL_QUADSPI is not set
# CONFIG_SPI_IMX is not set
CONFIG_SPI_MASTER=y
+# CONFIG_SPI_NXP_FLEXSPI is not set
CONFIG_SPI_SPIDEV=y
CONFIG_SPI_XILINX=y
CONFIG_SPMI=y
# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
CONFIG_BOUNCE=y
-CONFIG_BPF_JIT=y
# CONFIG_BPF_SYSCALL is not set
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_CRYPTO_CRC32C=y
CONFIG_CRYPTO_CRCT10DIF=y
CONFIG_CRYPTO_DEFLATE=y
-# CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC is not set
# CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM is not set
CONFIG_CRYPTO_HASH=y
CONFIG_CRYPTO_HASH2=y
CONFIG_FSL_DPAA2_EVB=y
CONFIG_FSL_DPAA2_MAC=y
# CONFIG_FSL_DPAA2_MAC_NETDEVS is not set
+CONFIG_FSL_DPAA2_PTP_CLOCK=y
# CONFIG_FSL_DPAA2_QDMA is not set
# CONFIG_FSL_DPAA_1588 is not set
CONFIG_FSL_DPAA_ADVANCED_DRIVERS=y
# CONFIG_FSL_DPAA_ETH_JUMBO_FRAME is not set
CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT=128
CONFIG_FSL_DPAA_ETH_REFILL_THRESHOLD=80
-CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE=y
# CONFIG_FSL_DPAA_HOOKS is not set
CONFIG_FSL_DPAA_INGRESS_CS_THRESHOLD=0x10000000
CONFIG_FSL_DPAA_OFFLINE_PORTS=y
CONFIG_FSL_PPFE_UTIL_DISABLED=y
# CONFIG_FSL_QBMAN_DEBUG is not set
# CONFIG_FSL_QDMA is not set
+CONFIG_FSL_QIXIS=y
CONFIG_FSL_QMAN_CI_SCHED_CFG_BMAN_W=2
CONFIG_FSL_QMAN_CI_SCHED_CFG_RW_W=2
CONFIG_FSL_QMAN_CI_SCHED_CFG_SRCCIV=4
# CONFIG_INET_RAW_DIAG is not set
CONFIG_INET_TCP_DIAG=y
CONFIG_INITRAMFS_SOURCE=""
+CONFIG_INPHI_PHY=y
CONFIG_INPUT=y
CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_KEYBOARD=y
CONFIG_MDIO_BUS=y
CONFIG_MDIO_BUS_MUX=y
CONFIG_MDIO_BUS_MUX_MMIOREG=y
+CONFIG_MDIO_BUS_MUX_MULTIPLEXER=y
CONFIG_MDIO_DEVICE=y
# CONFIG_MDIO_FSL_BACKPLANE is not set
# CONFIG_MDIO_GPIO is not set
CONFIG_MEMORY_BALLOON=y
CONFIG_MEMORY_ISOLATION=y
CONFIG_MEMTEST=y
+CONFIG_MFD_CORE=y
CONFIG_MFD_SYSCON=y
# CONFIG_MFD_VEXPRESS_SYSREG is not set
CONFIG_MICREL_PHY=y
# CONFIG_MTD_UBI_FASTMAP is not set
# CONFIG_MTD_UBI_GLUEBI is not set
CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MULTIPLEXER=y
CONFIG_MUTEX_SPIN_ON_OWNER=y
+# CONFIG_MUX_ADG792A is not set
+# CONFIG_MUX_GPIO is not set
+CONFIG_MUX_MMIO=y
CONFIG_MV_XOR_V2=y
CONFIG_NAMESPACES=y
CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_PCIEPORTBUS=y
CONFIG_PCIE_DW=y
CONFIG_PCIE_DW_HOST=y
+# CONFIG_PCIE_DW_PLAT_HOST is not set
+CONFIG_PCIE_MOBIVEIL=y
+CONFIG_PCIE_MOBIVEIL_HOST=y
CONFIG_PCIE_PME=y
CONFIG_PCI_ATS=y
CONFIG_PCI_BUS_ADDR_T_64BIT=y
CONFIG_PCI_IOV=y
CONFIG_PCI_LABEL=y
CONFIG_PCI_LAYERSCAPE=y
+CONFIG_PCI_LAYERSCAPE_GEN4=y
CONFIG_PCI_MSI=y
CONFIG_PCI_MSI_IRQ_DOMAIN=y
CONFIG_PERF_EVENTS=y
CONFIG_PROC_CHILDREN=y
CONFIG_PROFILING=y
CONFIG_PTP_1588_CLOCK=y
-# CONFIG_PTP_1588_CLOCK_DPAA is not set
-CONFIG_PTP_1588_CLOCK_DPAA2=y
+CONFIG_PTP_1588_CLOCK_QORIQ=y
CONFIG_QCOM_HIDMA=y
CONFIG_QCOM_HIDMA_MGMT=y
CONFIG_QCOM_QDF2400_ERRATUM_0065=y
CONFIG_SPI_FSL_DSPI=y
CONFIG_SPI_FSL_QUADSPI=y
CONFIG_SPI_MASTER=y
+CONFIG_SPI_NXP_FLEXSPI=y
CONFIG_SPI_PL022=y
CONFIG_SPMI=y
# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set
-From 0bafdb711c1a61fbe5bb5b4d4bb5e32425d95a72 Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Fri, 16 Nov 2018 15:36:03 +0800
+From 2a1351617985ef47581de825ae1bbf1d42bf3200 Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Mon, 6 May 2019 17:29:32 +0800
Subject: [PATCH] config: support layerscape
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Signed-off-by: Alison Wang <alison.wang@nxp.com>
Signed-off-by: Bharat Bhushan <Bharat.Bhushan@nxp.com>
+Signed-off-by: Biwen Li <biwen.li@nxp.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Signed-off-by: Bogdan Purcareata <bogdan.purcareata@nxp.com>
Signed-off-by: Calvin Johnson <calvin.johnson@nxp.com>
Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
Signed-off-by: Chenhui Zhao <chenhui.zhao@nxp.com>
+Signed-off-by: Florin Chiculita <florinlaurentiu.chiculita@nxp.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
Signed-off-by: Li Yang <leoyang.li@nxp.com>
+Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
+Signed-off-by: Pankaj Bansal <pankaj.bansal@nxp.com>
Signed-off-by: Pankit Garg <pankit.garg@nxp.com>
Signed-off-by: Prabhakar Kushwaha <prabhakar.kushwaha@nxp.com>
Signed-off-by: Ran Wang <ran.wang_1@nxp.com>
Signed-off-by: Razvan Stefanescu <razvan.stefanescu@nxp.com>
Signed-off-by: Shengzhou Liu <Shengzhou.Liu@nxp.com>
+Signed-off-by: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+Signed-off-by: Xiaowei Bao <xiaowei.bao@nxp.com>
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+Signed-off-by: Yogesh Gaur <yogeshnarayan.gaur@nxp.com>
Signed-off-by: Zhang Ying-22455 <ying.zhang22455@nxp.com>
-Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
+Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
---
+ drivers/Makefile | 2 ++
drivers/irqchip/Makefile | 1 +
- drivers/net/ethernet/freescale/Kconfig | 14 ++++----
+ drivers/net/ethernet/freescale/Kconfig | 14 +++++----
drivers/net/ethernet/freescale/Makefile | 3 ++
- drivers/ptp/Kconfig | 29 +++++++++++++++++
+ drivers/ptp/Kconfig | 16 +++++-----
drivers/soc/Kconfig | 1 +
- drivers/soc/fsl/Kconfig | 11 +++++++
- drivers/soc/fsl/Kconfig.arm | 16 +++++++++
- drivers/soc/fsl/Makefile | 3 ++
+ drivers/soc/fsl/Kconfig | 22 +++++++++++++
+ drivers/soc/fsl/Kconfig.arm | 16 ++++++++++
+ drivers/soc/fsl/Makefile | 5 +++
drivers/soc/fsl/layerscape/Kconfig | 10 ++++++
drivers/soc/fsl/layerscape/Makefile | 1 +
drivers/staging/Kconfig | 4 +++
drivers/staging/Makefile | 2 ++
- drivers/staging/fsl-dpaa2/Kconfig | 43 ++++++++++++++++++++++++-
+ drivers/staging/fsl-dpaa2/Kconfig | 56 ++++++++++++++++++++++++++++++++-
drivers/staging/fsl-dpaa2/Makefile | 4 +++
- 14 files changed, 135 insertions(+), 7 deletions(-)
+ 15 files changed, 142 insertions(+), 15 deletions(-)
create mode 100644 drivers/soc/fsl/Kconfig.arm
create mode 100644 drivers/soc/fsl/layerscape/Kconfig
create mode 100644 drivers/soc/fsl/layerscape/Makefile
+--- a/drivers/Makefile
++++ b/drivers/Makefile
+@@ -20,6 +20,8 @@ obj-$(CONFIG_PCI) += pci/
+ obj-$(CONFIG_PCI_ENDPOINT) += pci/endpoint/
+ # PCI dwc controller drivers
+ obj-y += pci/dwc/
++# PCI mobiveil controller drivers
++obj-y += pci/mobiveil/
+
+ obj-$(CONFIG_PARISC) += parisc/
+ obj-$(CONFIG_RAPIDIO) += rapidio/
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -80,3 +80,4 @@ obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed
obj-$(CONFIG_FSL_DPAA_ETH) += dpaa/
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
-@@ -55,6 +55,35 @@ config PTP_1588_CLOCK_GIANFAR
+@@ -41,19 +41,19 @@ config PTP_1588_CLOCK_DTE
To compile this driver as a module, choose M here: the module
- will be called gianfar_ptp.
+ will be called ptp_dte.
+
+-config PTP_1588_CLOCK_GIANFAR
+- tristate "Freescale eTSEC as PTP clock"
+- depends on GIANFAR
++config PTP_1588_CLOCK_QORIQ
++ tristate "Freescale QorIQ 1588 timer as PTP clock"
++ depends on GIANFAR || FSL_DPAA_ETH || FSL_SDK_DPAA_ETH
+ depends on PTP_1588_CLOCK
+ default y
+ help
+- This driver adds support for using the eTSEC as a PTP
+- clock. This clock is only useful if your PTP programs are
+- getting hardware time stamps on the PTP Ethernet packets
+- using the SO_TIMESTAMPING API.
++ This driver adds support for using the Freescale QorIQ 1588
++ timer as a PTP clock. This clock is only useful if your PTP
++ programs are getting hardware time stamps on the PTP Ethernet
++ packets using the SO_TIMESTAMPING API.
+
+ To compile this driver as a module, choose M here: the module
+- will be called gianfar_ptp.
++ will be called ptp_qoriq.
-+config PTP_1588_CLOCK_DPAA
-+ tristate "Freescale DPAA as PTP clock"
-+ depends on FSL_SDK_DPAA_ETH
-+ select PTP_1588_CLOCK
-+ select FSL_DPAA_TS
-+ default n
-+ help
-+ This driver adds support for using the DPAA 1588 timer module
-+ as a PTP clock. This clock is only useful if your PTP programs are
-+ getting hardware time stamps on the PTP Ethernet packets
-+ using the SO_TIMESTAMPING API.
-+
-+ To compile this driver as a module, choose M here: the module
-+ will be called dpaa_ptp.
-+
-+config PTP_1588_CLOCK_DPAA2
-+ tristate "Freescale DPAA2 as PTP clock"
-+ depends on FSL_DPAA2_ETH
-+ select PTP_1588_CLOCK
-+ default y
-+ help
-+ This driver adds support for using the DPAA2 1588 timer module
-+ as a PTP clock. This clock is only useful if your PTP programs are
-+ getting hardware time stamps on the PTP Ethernet packets
-+ using the SO_TIMESTAMPING API.
-+
-+ To compile this driver as a module, choose M here: the module
-+ will be called dpaa2-rtc.
-+
config PTP_1588_CLOCK_IXP46X
tristate "Intel IXP46x as PTP clock"
- depends on IXP4XX_ETH
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -5,6 +5,7 @@ source "drivers/soc/amlogic/Kconfig"
source "drivers/soc/qcom/Kconfig"
--- a/drivers/soc/fsl/Kconfig
+++ b/drivers/soc/fsl/Kconfig
-@@ -16,3 +16,14 @@ config FSL_GUTS
+@@ -16,3 +16,25 @@ config FSL_GUTS
Initially only reading SVR and registering soc device are supported.
Other guts accesses, such as reading RCW, should eventually be moved
into this driver as well.
+
++config FSL_QIXIS
++ tristate "QIXIS system controller driver"
++ depends on OF
++ select REGMAP_I2C
++ select REGMAP_MMIO
++ select MFD_CORE
++ default n
++ help
++ Say y here to enable QIXIS system controller api. The qixis driver
++ provides FPGA functions to control system.
++
+config FSL_SLEEP_FSM
+ bool
+ help
+endif
--- a/drivers/soc/fsl/Makefile
+++ b/drivers/soc/fsl/Makefile
-@@ -6,3 +6,6 @@ obj-$(CONFIG_FSL_DPAA) +
+@@ -5,4 +5,9 @@
+ obj-$(CONFIG_FSL_DPAA) += qbman/
obj-$(CONFIG_QUICC_ENGINE) += qe/
obj-$(CONFIG_CPM) += qe/
++obj-$(CONFIG_FSL_QIXIS) += qixis_ctrl.o
obj-$(CONFIG_FSL_GUTS) += guts.o
+obj-$(CONFIG_FSL_LS2_CONSOLE) += ls2-console/
++obj-$(CONFIG_SUSPEND) += rcpm.o
+obj-$(CONFIG_LS_SOC_DRIVERS) += layerscape/
+obj-$(CONFIG_FSL_SLEEP_FSM) += sleep_fsm.o
--- /dev/null
---help---
Build drivers for Freescale DataPath Acceleration
Architecture (DPAA2) family of SoCs.
-@@ -16,3 +16,44 @@ config FSL_DPAA2_ETH
+@@ -16,3 +16,57 @@ config FSL_DPAA2_ETH
---help---
Ethernet driver for Freescale DPAA2 SoCs, using the
Freescale MC bus driver
+ (PFC) in the driver.
+
+ If unsure, say N.
++
++config FSL_DPAA2_PTP_CLOCK
++ tristate "Freescale DPAA2 as PTP clock"
++ select PTP_1588_CLOCK
++ default y
++ help
++ This driver adds support for using the DPAA2 1588 timer module
++ as a PTP clock. This clock is only useful if your PTP programs are
++ getting hardware time stamps on the PTP Ethernet packets
++ using the SO_TIMESTAMPING API.
++
++ To compile this driver as a module, choose M here: the module
++ will be called dpaa2-rtc.
+endif
+
+source "drivers/staging/fsl-dpaa2/mac/Kconfig"
obj-$(CONFIG_FSL_DPAA2_ETH) += ethernet/
+obj-$(CONFIG_FSL_DPAA2_MAC) += mac/
+obj-$(CONFIG_FSL_DPAA2_EVB) += evb/
-+obj-$(CONFIG_PTP_1588_CLOCK_DPAA2) += rtc/
++obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += rtc/
+obj-$(CONFIG_FSL_DPAA2_ETHSW) += ethsw/
-From 74243154052af635ee9ce9d07aab273ce219c855 Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Thu, 13 Dec 2018 13:23:52 +0800
+From d2ef9f2f6d16d34d7eee74cb8efd269341fec5a1 Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Mon, 6 May 2019 16:54:17 +0800
Subject: [PATCH] core-linux: support layerscape
-This is an integrated patch of core-linux for layerscape.
+This is an integrated patch of core-linux for layerscape
+Signed-off-by: Aaron Lu <aaron.lu@intel.com>
Signed-off-by: Abhijit Ayarekar <abhijit.ayarekar@caviumnetworks.com>
Signed-off-by: Amrita Kumari <amrita.kumari@nxp.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ashish Kumar <Ashish.Kumar@nxp.com>
+Signed-off-by: Biwen Li <biwen.li@nxp.com>
Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: David Ahern <dsahern@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com>
+Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Joel Fernandes <joelaf@google.com>
+Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Li Yang <leoyang.li@nxp.com>
-Signed-off-by: Madalin Bucur <madalin.bucur@freescale.com>
+Signed-off-by: Lukas Wunner <lukas@wunner.de>
Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
Signed-off-by: Nikhil Badola <nikhil.badola@freescale.com>
Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
+Signed-off-by: Pankaj Bansal <pankaj.bansal@nxp.com>
+Signed-off-by: pascal paillet <p.paillet@st.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Signed-off-by: Ramneek Mehresh <ramneek.mehresh@freescale.com>
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Suresh Gupta <suresh.gupta@freescale.com>
+Signed-off-by: Vivek Gautam <vivek.gautam@codeaurora.org>
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
-Signed-off-by: yinbo.zhu <yinbo.zhu@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
---
- drivers/base/dma-mapping.c | 7 ++
- drivers/net/bonding/bond_main.c | 5 +-
- drivers/net/bonding/bond_options.c | 2 +-
- drivers/net/team/team.c | 3 +-
- drivers/net/vrf.c | 3 +-
- drivers/of/device.c | 13 +++-
- drivers/soc/fsl/guts.c | 3 +
- include/linux/fsl_devices.h | 2 +
- include/linux/netdevice.h | 13 +++-
- include/linux/skbuff.h | 2 +
- include/net/bonding.h | 3 +-
- net/batman-adv/soft-interface.c | 3 +-
- net/bridge/br_device.c | 3 +-
- net/core/dev.c | 81 ++++++++++++++---------
- net/core/rtnetlink.c | 10 +--
- net/core/skbuff.c | 29 +++++++-
- samples/bpf/Makefile | 12 +++-
- samples/bpf/map_perf_test_kern.c | 2 +-
- samples/bpf/map_perf_test_user.c | 2 +-
- tools/testing/selftests/bpf/bpf_helpers.h | 56 ++++++++++++++--
- 20 files changed, 193 insertions(+), 61 deletions(-)
+ drivers/base/core.c | 122 ++++++++++++++++++++++++++----
+ drivers/base/dma-mapping.c | 7 ++
+ drivers/gpu/ipu-v3/ipu-pre.c | 3 +-
+ drivers/gpu/ipu-v3/ipu-prg.c | 3 +-
+ drivers/iommu/dma-iommu.c | 3 +
+ drivers/mux/Kconfig | 12 +--
+ drivers/mux/mmio.c | 6 +-
+ drivers/of/device.c | 14 +++-
+ drivers/soc/imx/gpc.c | 2 +-
+ include/linux/device.h | 20 +++--
+ include/linux/fsl_devices.h | 2 +
+ include/linux/netdevice.h | 10 ++-
+ include/linux/skbuff.h | 2 +
+ lib/dma-noop.c | 19 +++++
+ mm/page_alloc.c | 10 ++-
+ net/core/dev.c | 81 ++++++++++++--------
+ net/core/skbuff.c | 29 ++++++-
+ samples/bpf/Makefile | 12 ++-
+ samples/bpf/map_perf_test_kern.c | 2 +-
+ samples/bpf/map_perf_test_user.c | 2 +-
+ tools/testing/selftests/bpf/bpf_helpers.h | 56 ++++++++++++--
+ 21 files changed, 337 insertions(+), 80 deletions(-)
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -161,10 +161,10 @@ static int device_reorder_to_tail(struct
+ * of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be
+ * ignored.
+ *
+- * If the DL_FLAG_AUTOREMOVE is set, the link will be removed automatically
+- * when the consumer device driver unbinds from it. The combination of both
+- * DL_FLAG_AUTOREMOVE and DL_FLAG_STATELESS set is invalid and will cause NULL
+- * to be returned.
++ * If the DL_FLAG_AUTOREMOVE_CONSUMER is set, the link will be removed
++ * automatically when the consumer device driver unbinds from it.
++ * The combination of both DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_STATELESS
++ * set is invalid and will cause NULL to be returned.
+ *
+ * A side effect of the link creation is re-ordering of dpm_list and the
+ * devices_kset list by moving the consumer device and all devices depending
+@@ -181,7 +181,8 @@ struct device_link *device_link_add(stru
+ struct device_link *link;
+
+ if (!consumer || !supplier ||
+- ((flags & DL_FLAG_STATELESS) && (flags & DL_FLAG_AUTOREMOVE)))
++ ((flags & DL_FLAG_STATELESS) &&
++ (flags & DL_FLAG_AUTOREMOVE_CONSUMER)))
+ return NULL;
+
+ device_links_write_lock();
+@@ -199,8 +200,10 @@ struct device_link *device_link_add(stru
+ }
+
+ list_for_each_entry(link, &supplier->links.consumers, s_node)
+- if (link->consumer == consumer)
++ if (link->consumer == consumer) {
++ kref_get(&link->kref);
+ goto out;
++ }
+
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+@@ -232,6 +235,7 @@ struct device_link *device_link_add(stru
+ link->consumer = consumer;
+ INIT_LIST_HEAD(&link->c_node);
+ link->flags = flags;
++ kref_init(&link->kref);
+
+ /* Determine the initial link state. */
+ if (flags & DL_FLAG_STATELESS) {
+@@ -302,8 +306,10 @@ static void __device_link_free_srcu(stru
+ device_link_free(container_of(rhead, struct device_link, rcu_head));
+ }
+
+-static void __device_link_del(struct device_link *link)
++static void __device_link_del(struct kref *kref)
+ {
++ struct device_link *link = container_of(kref, struct device_link, kref);
++
+ dev_info(link->consumer, "Dropping the link to %s\n",
+ dev_name(link->supplier));
+
+@@ -315,8 +321,10 @@ static void __device_link_del(struct dev
+ call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu);
+ }
+ #else /* !CONFIG_SRCU */
+-static void __device_link_del(struct device_link *link)
++static void __device_link_del(struct kref *kref)
+ {
++ struct device_link *link = container_of(kref, struct device_link, kref);
++
+ dev_info(link->consumer, "Dropping the link to %s\n",
+ dev_name(link->supplier));
+
+@@ -334,18 +342,50 @@ static void __device_link_del(struct dev
+ * @link: Device link to delete.
+ *
+ * The caller must ensure proper synchronization of this function with runtime
+- * PM.
++ * PM. If the link was added multiple times, it needs to be deleted as often.
++ * Care is required for hotplugged devices: Their links are purged on removal
++ * and calling device_link_del() is then no longer allowed.
+ */
+ void device_link_del(struct device_link *link)
+ {
+ device_links_write_lock();
+ device_pm_lock();
+- __device_link_del(link);
++ kref_put(&link->kref, __device_link_del);
+ device_pm_unlock();
+ device_links_write_unlock();
+ }
+ EXPORT_SYMBOL_GPL(device_link_del);
+
++/**
++ * device_link_remove - remove a link between two devices.
++ * @consumer: Consumer end of the link.
++ * @supplier: Supplier end of the link.
++ *
++ * The caller must ensure proper synchronization of this function with runtime
++ * PM.
++ */
++void device_link_remove(void *consumer, struct device *supplier)
++{
++ struct device_link *link;
++
++ if (WARN_ON(consumer == supplier))
++ return;
++
++ device_links_write_lock();
++ device_pm_lock();
++
++ list_for_each_entry(link, &supplier->links.consumers, s_node) {
++ if (link->consumer == consumer) {
++ kref_put(&link->kref, __device_link_del);
++ break;
++ }
++ }
++
++ device_pm_unlock();
++ device_links_write_unlock();
++}
++EXPORT_SYMBOL_GPL(device_link_remove);
++
+ static void device_links_missing_supplier(struct device *dev)
+ {
+ struct device_link *link;
+@@ -453,8 +493,8 @@ static void __device_links_no_driver(str
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+- if (link->flags & DL_FLAG_AUTOREMOVE)
+- __device_link_del(link);
++ if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
++ kref_put(&link->kref, __device_link_del);
+ else if (link->status != DL_STATE_SUPPLIER_UNBIND)
+ WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
+ }
+@@ -489,8 +529,18 @@ void device_links_driver_cleanup(struct
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+- WARN_ON(link->flags & DL_FLAG_AUTOREMOVE);
++ WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER);
+ WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
++
++ /*
++ * autoremove the links between this @dev and its consumer
++ * devices that are not active, i.e. where the link state
++ * has moved to DL_STATE_SUPPLIER_UNBIND.
++ */
++ if (link->status == DL_STATE_SUPPLIER_UNBIND &&
++ link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
++ kref_put(&link->kref, __device_link_del);
++
+ WRITE_ONCE(link->status, DL_STATE_DORMANT);
+ }
+
+@@ -607,13 +657,13 @@ static void device_links_purge(struct de
+
+ list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
+ WARN_ON(link->status == DL_STATE_ACTIVE);
+- __device_link_del(link);
++ __device_link_del(&link->kref);
+ }
+
+ list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) {
+ WARN_ON(link->status != DL_STATE_DORMANT &&
+ link->status != DL_STATE_NONE);
+- __device_link_del(link);
++ __device_link_del(&link->kref);
+ }
+
+ device_links_write_unlock();
+@@ -1035,6 +1085,34 @@ static ssize_t online_store(struct devic
+ }
+ static DEVICE_ATTR_RW(online);
+
++static ssize_t suppliers_show(struct device *dev, struct device_attribute *attr,
++ char *buf)
++{
++ struct device_link *link;
++ size_t count = 0;
++
++ list_for_each_entry(link, &dev->links.suppliers, c_node)
++ count += scnprintf(buf + count, PAGE_SIZE - count, "%s\n",
++ dev_name(link->supplier));
++
++ return count;
++}
++static DEVICE_ATTR_RO(suppliers);
++
++static ssize_t consumers_show(struct device *dev, struct device_attribute *attr,
++ char *buf)
++{
++ struct device_link *link;
++ size_t count = 0;
++
++ list_for_each_entry(link, &dev->links.consumers, s_node)
++ count += scnprintf(buf + count, PAGE_SIZE - count, "%s\n",
++ dev_name(link->consumer));
++
++ return count;
++}
++static DEVICE_ATTR_RO(consumers);
++
+ int device_add_groups(struct device *dev, const struct attribute_group **groups)
+ {
+ return sysfs_create_groups(&dev->kobj, groups);
+@@ -1206,8 +1284,20 @@ static int device_add_attrs(struct devic
+ goto err_remove_dev_groups;
+ }
+
++ error = device_create_file(dev, &dev_attr_suppliers);
++ if (error)
++ goto err_remove_online;
++
++ error = device_create_file(dev, &dev_attr_consumers);
++ if (error)
++ goto err_remove_suppliers;
++
+ return 0;
+
++ err_remove_suppliers:
++ device_remove_file(dev, &dev_attr_suppliers);
++ err_remove_online:
++ device_remove_file(dev, &dev_attr_online);
+ err_remove_dev_groups:
+ device_remove_groups(dev, dev->groups);
+ err_remove_type_groups:
+@@ -1225,6 +1315,8 @@ static void device_remove_attrs(struct d
+ struct class *class = dev->class;
+ const struct device_type *type = dev->type;
+
++ device_remove_file(dev, &dev_attr_consumers);
++ device_remove_file(dev, &dev_attr_suppliers);
+ device_remove_file(dev, &dev_attr_online);
+ device_remove_groups(dev, dev->groups);
+
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -335,6 +335,7 @@ void dma_common_free_remap(void *cpu_add
if (dma_dev->of_node) {
ret = of_dma_configure(dev, dma_dev->of_node);
} else if (has_acpi_companion(dma_dev)) {
---- a/drivers/net/bonding/bond_main.c
-+++ b/drivers/net/bonding/bond_main.c
-@@ -1330,7 +1330,8 @@ void bond_lower_state_changed(struct sla
- }
-
- /* enslave device <slave> to bond device <master> */
--int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
-+int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
-+ struct netlink_ext_ack *extack)
- {
- struct bonding *bond = netdev_priv(bond_dev);
- const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-@@ -3506,7 +3507,7 @@ static int bond_do_ioctl(struct net_devi
- switch (cmd) {
- case BOND_ENSLAVE_OLD:
- case SIOCBONDENSLAVE:
-- res = bond_enslave(bond_dev, slave_dev);
-+ res = bond_enslave(bond_dev, slave_dev, NULL);
- break;
- case BOND_RELEASE_OLD:
- case SIOCBONDRELEASE:
---- a/drivers/net/bonding/bond_options.c
-+++ b/drivers/net/bonding/bond_options.c
-@@ -1382,7 +1382,7 @@ static int bond_option_slaves_set(struct
- switch (command[0]) {
- case '+':
- netdev_dbg(bond->dev, "Adding slave %s\n", dev->name);
-- ret = bond_enslave(bond->dev, dev);
-+ ret = bond_enslave(bond->dev, dev, NULL);
- break;
-
- case '-':
---- a/drivers/net/team/team.c
-+++ b/drivers/net/team/team.c
-@@ -1953,7 +1953,8 @@ static int team_netpoll_setup(struct net
- }
- #endif
-
--static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
-+static int team_add_slave(struct net_device *dev, struct net_device *port_dev,
-+ struct netlink_ext_ack *extack)
- {
- struct team *team = netdev_priv(dev);
- int err;
---- a/drivers/net/vrf.c
-+++ b/drivers/net/vrf.c
-@@ -791,7 +791,8 @@ err:
- return ret;
- }
-
--static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
-+static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev,
-+ struct netlink_ext_ack *extack)
- {
- if (netif_is_l3_master(port_dev) || netif_is_l3_slave(port_dev))
- return -EINVAL;
+--- a/drivers/gpu/ipu-v3/ipu-pre.c
++++ b/drivers/gpu/ipu-v3/ipu-pre.c
+@@ -124,7 +124,8 @@ ipu_pre_lookup_by_phandle(struct device
+ list_for_each_entry(pre, &ipu_pre_list, list) {
+ if (pre_node == pre->dev->of_node) {
+ mutex_unlock(&ipu_pre_list_mutex);
+- device_link_add(dev, pre->dev, DL_FLAG_AUTOREMOVE);
++ device_link_add(dev, pre->dev,
++ DL_FLAG_AUTOREMOVE_CONSUMER);
+ of_node_put(pre_node);
+ return pre;
+ }
+--- a/drivers/gpu/ipu-v3/ipu-prg.c
++++ b/drivers/gpu/ipu-v3/ipu-prg.c
+@@ -99,7 +99,8 @@ ipu_prg_lookup_by_phandle(struct device
+ list_for_each_entry(prg, &ipu_prg_list, list) {
+ if (prg_node == prg->dev->of_node) {
+ mutex_unlock(&ipu_prg_list_mutex);
+- device_link_add(dev, prg->dev, DL_FLAG_AUTOREMOVE);
++ device_link_add(dev, prg->dev,
++ DL_FLAG_AUTOREMOVE_CONSUMER);
+ prg->id = ipu_id;
+ of_node_put(prg_node);
+ return prg;
+--- a/drivers/iommu/dma-iommu.c
++++ b/drivers/iommu/dma-iommu.c
+@@ -381,6 +381,9 @@ static dma_addr_t iommu_dma_alloc_iova(s
+ if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
+ iova_len = roundup_pow_of_two(iova_len);
+
++ if (dev->bus_dma_mask)
++ dma_limit &= dev->bus_dma_mask;
++
+ if (domain->geometry.force_aperture)
+ dma_limit = min(dma_limit, domain->geometry.aperture_end);
+
+--- a/drivers/mux/Kconfig
++++ b/drivers/mux/Kconfig
+@@ -35,14 +35,14 @@ config MUX_GPIO
+ be called mux-gpio.
+
+ config MUX_MMIO
+- tristate "MMIO register bitfield-controlled Multiplexer"
+- depends on (OF && MFD_SYSCON) || COMPILE_TEST
++ tristate "MMIO/Regmap register bitfield-controlled Multiplexer"
++ depends on OF || COMPILE_TEST
+ help
+- MMIO register bitfield-controlled Multiplexer controller.
++ MMIO/Regmap register bitfield-controlled Multiplexer controller.
+
+- The driver builds multiplexer controllers for bitfields in a syscon
+- register. For N bit wide bitfields, there will be 2^N possible
+- multiplexer states.
++ The driver builds multiplexer controllers for bitfields in either
++ a syscon register or a driver regmap register. For N bit wide
++ bitfields, there will be 2^N possible multiplexer states.
+
+ To compile the driver as a module, choose M here: the module will
+ be called mux-mmio.
+--- a/drivers/mux/mmio.c
++++ b/drivers/mux/mmio.c
+@@ -31,6 +31,7 @@ static const struct mux_control_ops mux_
+
+ static const struct of_device_id mux_mmio_dt_ids[] = {
+ { .compatible = "mmio-mux", },
++ { .compatible = "reg-mux", },
+ { /* sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(of, mux_mmio_dt_ids);
+@@ -46,7 +47,10 @@ static int mux_mmio_probe(struct platfor
+ int ret;
+ int i;
+
+- regmap = syscon_node_to_regmap(np->parent);
++ if (of_device_is_compatible(np, "mmio-mux"))
++ regmap = syscon_node_to_regmap(np->parent);
++ else
++ regmap = dev_get_regmap(dev->parent, NULL) ?: ERR_PTR(-ENODEV);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ dev_err(dev, "failed to get regmap: %d\n", ret);
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -15,6 +15,9 @@
dev->bus != &platform_bus_type)
return ret == -ENODEV ? 0 : ret;
-@@ -155,7 +161,12 @@ int of_dma_configure(struct device *dev,
+@@ -152,10 +158,16 @@ int of_dma_configure(struct device *dev,
+ * set by the driver.
+ */
+ mask = DMA_BIT_MASK(ilog2(dma_addr + size - 1) + 1);
++ dev->bus_dma_mask = mask;
dev->coherent_dma_mask &= mask;
*dev->dma_mask &= mask;
dev_dbg(dev, "device is%sdma coherent\n",
coherent ? " " : " not ");
---- a/drivers/soc/fsl/guts.c
-+++ b/drivers/soc/fsl/guts.c
-@@ -213,6 +213,9 @@ static const struct of_device_id fsl_gut
- { .compatible = "fsl,ls1021a-dcfg", },
- { .compatible = "fsl,ls1043a-dcfg", },
- { .compatible = "fsl,ls2080a-dcfg", },
-+ { .compatible = "fsl,ls1088a-dcfg", },
-+ { .compatible = "fsl,ls1012a-dcfg", },
-+ { .compatible = "fsl,ls1046a-dcfg", },
- {}
- };
- MODULE_DEVICE_TABLE(of, fsl_guts_of_match);
+--- a/drivers/soc/imx/gpc.c
++++ b/drivers/soc/imx/gpc.c
+@@ -209,7 +209,7 @@ static int imx_pgc_power_domain_probe(st
+ goto genpd_err;
+ }
+
+- device_link_add(dev, dev->parent, DL_FLAG_AUTOREMOVE);
++ device_link_add(dev, dev->parent, DL_FLAG_AUTOREMOVE_CONSUMER);
+
+ return 0;
+
+--- a/include/linux/device.h
++++ b/include/linux/device.h
+@@ -55,6 +55,8 @@ struct bus_attribute {
+ struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
+ #define BUS_ATTR_RO(_name) \
+ struct bus_attribute bus_attr_##_name = __ATTR_RO(_name)
++#define BUS_ATTR_WO(_name) \
++ struct bus_attribute bus_attr_##_name = __ATTR_WO(_name)
+
+ extern int __must_check bus_create_file(struct bus_type *,
+ struct bus_attribute *);
+@@ -750,14 +752,16 @@ enum device_link_state {
+ * Device link flags.
+ *
+ * STATELESS: The core won't track the presence of supplier/consumer drivers.
+- * AUTOREMOVE: Remove this link automatically on consumer driver unbind.
++ * AUTOREMOVE_CONSUMER: Remove the link automatically on consumer driver unbind.
+ * PM_RUNTIME: If set, the runtime PM framework will use this link.
+ * RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation.
++ * AUTOREMOVE_SUPPLIER: Remove the link automatically on supplier driver unbind.
+ */
+-#define DL_FLAG_STATELESS BIT(0)
+-#define DL_FLAG_AUTOREMOVE BIT(1)
+-#define DL_FLAG_PM_RUNTIME BIT(2)
+-#define DL_FLAG_RPM_ACTIVE BIT(3)
++#define DL_FLAG_STATELESS BIT(0)
++#define DL_FLAG_AUTOREMOVE_CONSUMER BIT(1)
++#define DL_FLAG_PM_RUNTIME BIT(2)
++#define DL_FLAG_RPM_ACTIVE BIT(3)
++#define DL_FLAG_AUTOREMOVE_SUPPLIER BIT(4)
+
+ /**
+ * struct device_link - Device link representation.
+@@ -768,6 +772,7 @@ enum device_link_state {
+ * @status: The state of the link (with respect to the presence of drivers).
+ * @flags: Link flags.
+ * @rpm_active: Whether or not the consumer device is runtime-PM-active.
++ * @kref: Count repeated addition of the same link.
+ * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks.
+ */
+ struct device_link {
+@@ -778,6 +783,7 @@ struct device_link {
+ enum device_link_state status;
+ u32 flags;
+ bool rpm_active;
++ struct kref kref;
+ #ifdef CONFIG_SRCU
+ struct rcu_head rcu_head;
+ #endif
+@@ -850,6 +856,8 @@ struct dev_links_info {
+ * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
+ * hardware supports 64-bit addresses for consistent allocations
+ * such descriptors.
++ * @bus_dma_mask: Mask of an upstream bridge or bus which imposes a smaller DMA
++ * limit than the device itself supports.
+ * @dma_pfn_offset: offset of DMA memory range relatively of RAM
+ * @dma_parms: A low level driver may set these to teach IOMMU code about
+ * segment limitations.
+@@ -929,6 +937,7 @@ struct device {
+ not all hardware supports
+ 64 bit addresses for consistent
+ allocations such descriptors. */
++ u64 bus_dma_mask; /* upstream dma_mask constraint */
+ unsigned long dma_pfn_offset;
+
+ struct device_dma_parameters *dma_parms;
+@@ -1267,6 +1276,7 @@ extern const char *dev_driver_string(con
+ struct device_link *device_link_add(struct device *consumer,
+ struct device *supplier, u32 flags);
+ void device_link_del(struct device_link *link);
++void device_link_remove(void *consumer, struct device *supplier);
+
+ #ifdef CONFIG_PRINTK
+
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -99,7 +99,9 @@ struct fsl_usb2_platform_data {
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
-@@ -1260,7 +1260,8 @@ struct net_device_ops {
- u32 flow_id);
- #endif
- int (*ndo_add_slave)(struct net_device *dev,
-- struct net_device *slave_dev);
-+ struct net_device *slave_dev,
-+ struct netlink_ext_ack *extack);
- int (*ndo_del_slave)(struct net_device *dev,
- struct net_device *slave_dev);
- netdev_features_t (*ndo_fix_features)(struct net_device *dev,
-@@ -2344,7 +2345,8 @@ int register_netdevice_notifier(struct n
+@@ -2344,7 +2344,8 @@ int register_netdevice_notifier(struct n
int unregister_netdevice_notifier(struct notifier_block *nb);
struct netdev_notifier_info {
};
struct netdev_notifier_info_ext {
-@@ -2376,6 +2378,7 @@ static inline void netdev_notifier_info_
+@@ -2376,6 +2377,7 @@ static inline void netdev_notifier_info_
struct net_device *dev)
{
info->dev = dev;
}
static inline struct net_device *
-@@ -2384,6 +2387,12 @@ netdev_notifier_info_to_dev(const struct
+@@ -2384,6 +2386,12 @@ netdev_notifier_info_to_dev(const struct
return info->dev;
}
int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
int len, __wsum csum);
---- a/include/net/bonding.h
-+++ b/include/net/bonding.h
-@@ -592,7 +592,8 @@ void bond_destroy_sysfs(struct bond_net
- void bond_prepare_sysfs_group(struct bonding *bond);
- int bond_sysfs_slave_add(struct slave *slave);
- void bond_sysfs_slave_del(struct slave *slave);
--int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
-+int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
-+ struct netlink_ext_ack *extack);
- int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
- u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb);
- int bond_set_carrier(struct bonding *bond);
---- a/net/batman-adv/soft-interface.c
-+++ b/net/batman-adv/soft-interface.c
-@@ -876,7 +876,8 @@ free_bat_counters:
- * Return: 0 if successful or error otherwise.
- */
- static int batadv_softif_slave_add(struct net_device *dev,
-- struct net_device *slave_dev)
-+ struct net_device *slave_dev,
-+ struct netlink_ext_ack *extack)
- {
- struct batadv_hard_iface *hard_iface;
- struct net *net = dev_net(dev);
---- a/net/bridge/br_device.c
-+++ b/net/bridge/br_device.c
-@@ -324,7 +324,8 @@ void br_netpoll_disable(struct net_bridg
-
- #endif
+--- a/lib/dma-noop.c
++++ b/lib/dma-noop.c
+@@ -58,11 +58,30 @@ static int dma_noop_map_sg(struct device
+ return nents;
+ }
--static int br_add_slave(struct net_device *dev, struct net_device *slave_dev)
-+static int br_add_slave(struct net_device *dev, struct net_device *slave_dev,
-+ struct netlink_ext_ack *extack)
++static int dma_noop_supported(struct device *dev, u64 mask)
++{
++#ifdef CONFIG_ZONE_DMA
++ if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
++ return 0;
++#else
++ /*
++ * Because 32-bit DMA masks are so common we expect every architecture
++ * to be able to satisfy them - either by not supporting more physical
++ * memory, or by providing a ZONE_DMA32. If neither is the case, the
++ * architecture needs to use an IOMMU instead of the direct mapping.
++ */
++ if (dev->bus_dma_mask && mask > dev->bus_dma_mask)
++ return 0;
++#endif
++ return 1;
++}
++
+ const struct dma_map_ops dma_noop_ops = {
+ .alloc = dma_noop_alloc,
+ .free = dma_noop_free,
+ .map_page = dma_noop_map_page,
+ .map_sg = dma_noop_map_sg,
++ dma_supported = dma_noop_supported
+ };
+ EXPORT_SYMBOL(dma_noop_ops);
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -4366,8 +4366,14 @@ void page_frag_free(void *addr)
{
- struct net_bridge *br = netdev_priv(dev);
+ struct page *page = virt_to_head_page(addr);
+
+- if (unlikely(put_page_testzero(page)))
+- __free_pages_ok(page, compound_order(page));
++ if (unlikely(put_page_testzero(page))) {
++ unsigned int order = compound_order(page);
++
++ if (order == 0) /* Via pcp? */
++ free_hot_cold_page(page, false);
++ else
++ __free_pages_ok(page, order);
++ }
+ }
+ EXPORT_SYMBOL(page_frag_free);
+
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -162,7 +162,6 @@ static struct list_head offload_base __r
}
}
---- a/net/core/rtnetlink.c
-+++ b/net/core/rtnetlink.c
-@@ -1912,7 +1912,8 @@ static int do_setvfinfo(struct net_devic
- return err;
- }
-
--static int do_set_master(struct net_device *dev, int ifindex)
-+static int do_set_master(struct net_device *dev, int ifindex,
-+ struct netlink_ext_ack *extack)
- {
- struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
- const struct net_device_ops *ops;
-@@ -1937,7 +1938,7 @@ static int do_set_master(struct net_devi
- return -EINVAL;
- ops = upper_dev->netdev_ops;
- if (ops->ndo_add_slave) {
-- err = ops->ndo_add_slave(upper_dev, dev);
-+ err = ops->ndo_add_slave(upper_dev, dev, extack);
- if (err)
- return err;
- } else {
-@@ -2074,7 +2075,7 @@ static int do_setlink(const struct sk_bu
- }
-
- if (tb[IFLA_MASTER]) {
-- err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]));
-+ err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
- if (err)
- goto errout;
- status |= DO_SETLINK_MODIFIED;
-@@ -2723,7 +2724,8 @@ replay:
- goto out_unregister;
- }
- if (tb[IFLA_MASTER]) {
-- err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]));
-+ err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]),
-+ extack);
- if (err)
- goto out_unregister;
- }
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -803,6 +803,32 @@ void napi_consume_skb(struct sk_buff *sk
-From 6eeff55fd4756f271ad09a914078c9aa45f8359d Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Fri, 16 Nov 2018 14:23:40 +0800
-Subject: [PATCH 04/39] arch: support layerscape
+From f29db0048a07384ee4cd962c676b750e13e5d6b0 Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Mon, 6 May 2019 17:17:58 +0800
+Subject: [PATCH] arch: support layerscape
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
This is an integrated patch of arch for layerscape
Signed-off-by: Abhimanyu Saini <abhimanyu.saini@nxp.com>
Signed-off-by: Alison Wang <alison.wang@freescale.com>
Signed-off-by: Amrita Kumari <amrita.kumari@nxp.com>
+Signed-off-by: Biwen Li <biwen.li@nxp.com>
Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
Signed-off-by: Dave Liu <daveliu@freescale.com>
Signed-off-by: Guanhua <guanhua.gao@nxp.com>
Signed-off-by: Li Yang <leoli@freescale.com>
Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
Signed-off-by: Pan Jiafei <Jiafei.Pan@nxp.com>
+Signed-off-by: Pankaj Bansal <pankaj.bansal@nxp.com>
Signed-off-by: Poonam Aggrwal <poonam.aggrwal@nxp.com>
Signed-off-by: Rajesh Bhagat <rajesh.bhagat@nxp.com>
Signed-off-by: Ramneek Mehresh <ramneek.mehresh@freescale.com>
Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
Signed-off-by: Shengzhou Liu <Shengzhou.Liu@freescale.com>
Signed-off-by: Tang Yuantian <Yuantian.Tang@freescale.com>
+Signed-off-by: Vabhav Sharma <vabhav.sharma@nxp.com>
Signed-off-by: Wang Dongsheng <dongsheng.wang@freescale.com>
Signed-off-by: Xie Xiaobo <X.Xie@freescale.com>
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+Signed-off-by: Yinbo Zhu <yinbo.zhu@nxp.com>
Signed-off-by: Zhao Chenhui <chenhui.zhao@freescale.com>
Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
---
- arch/arm/include/asm/delay.h | 16 ++++++++++++++
- arch/arm/include/asm/io.h | 31 +++++++++++++++++++++++++++
- arch/arm/include/asm/mach/map.h | 4 ++--
- arch/arm/include/asm/pgtable.h | 7 ++++++
- arch/arm/kernel/time.c | 3 +++
- arch/arm/mm/dma-mapping.c | 1 +
- arch/arm/mm/ioremap.c | 7 ++++++
- arch/arm/mm/mmu.c | 9 ++++++++
- arch/arm64/include/asm/cache.h | 2 +-
- arch/arm64/include/asm/io.h | 1 +
- arch/arm64/include/asm/pgtable-prot.h | 3 +++
- arch/arm64/include/asm/pgtable.h | 5 +++++
- arch/arm64/mm/dma-mapping.c | 1 +
- arch/arm64/mm/init.c | 12 +++++++----
- 14 files changed, 95 insertions(+), 7 deletions(-)
+ arch/arm/include/asm/delay.h | 16 ++++++
+ arch/arm/include/asm/io.h | 31 ++++++++++
+ arch/arm/include/asm/mach/map.h | 4 +-
+ arch/arm/include/asm/pgtable.h | 7 +++
+ arch/arm/kernel/time.c | 3 +
+ arch/arm/mm/dma-mapping.c | 1 +
+ arch/arm/mm/ioremap.c | 7 +++
+ arch/arm/mm/mmu.c | 9 +++
+ arch/arm64/include/asm/cache.h | 2 +-
+ arch/arm64/include/asm/io.h | 1 +
+ arch/arm64/include/asm/pgtable-prot.h | 3 +
+ arch/arm64/include/asm/pgtable.h | 5 ++
+ arch/arm64/mm/dma-mapping.c | 1 +
+ arch/arm64/mm/init.c | 12 ++--
+ drivers/soc/fsl/guts.c | 9 +++
+ drivers/soc/fsl/qixis_ctrl.c | 105 ++++++++++++++++++++++++++++++++++
+ 16 files changed, 209 insertions(+), 7 deletions(-)
+ create mode 100644 drivers/soc/fsl/qixis_ctrl.c
--- a/arch/arm/include/asm/delay.h
+++ b/arch/arm/include/asm/delay.h
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
dma_contiguous_reserve(arm64_dma_phys_limit);
+--- a/drivers/soc/fsl/guts.c
++++ b/drivers/soc/fsl/guts.c
+@@ -100,6 +100,11 @@ static const struct fsl_soc_die_attr fsl
+ .svr = 0x87000000,
+ .mask = 0xfff70000,
+ },
++ /* Die: LX2160A, SoC: LX2160A/LX2120A/LX2080A */
++ { .die = "LX2160A",
++ .svr = 0x87360000,
++ .mask = 0xff3f0000,
++ },
+ { },
+ };
+
+@@ -213,6 +218,10 @@ static const struct of_device_id fsl_gut
+ { .compatible = "fsl,ls1021a-dcfg", },
+ { .compatible = "fsl,ls1043a-dcfg", },
+ { .compatible = "fsl,ls2080a-dcfg", },
++ { .compatible = "fsl,ls1088a-dcfg", },
++ { .compatible = "fsl,ls1012a-dcfg", },
++ { .compatible = "fsl,ls1046a-dcfg", },
++ { .compatible = "fsl,lx2160a-dcfg", },
+ {}
+ };
+ MODULE_DEVICE_TABLE(of, fsl_guts_of_match);
+--- /dev/null
++++ b/drivers/soc/fsl/qixis_ctrl.c
+@@ -0,0 +1,105 @@
++// SPDX-License-Identifier: GPL-2.0+
++
++/* Freescale QIXIS system controller driver.
++ *
++ * Copyright 2015 Freescale Semiconductor, Inc.
++ * Copyright 2018-2019 NXP
++ */
++
++#include <linux/err.h>
++#include <linux/i2c.h>
++#include <linux/module.h>
++#include <linux/mfd/core.h>
++#include <linux/of.h>
++#include <linux/regmap.h>
++
++/* QIXIS MAP */
++struct fsl_qixis_regs {
++ u8 id; /* Identification Registers */
++ u8 version; /* Version Register */
++ u8 qixis_ver; /* QIXIS Version Register */
++ u8 reserved1[0x1f];
++};
++
++struct qixis_priv {
++ struct regmap *regmap;
++};
++
++static struct regmap_config qixis_regmap_config = {
++ .reg_bits = 8,
++ .val_bits = 8,
++};
++
++static const struct mfd_cell fsl_qixis_devs[] = {
++ {
++ .name = "reg-mux",
++ .of_compatible = "reg-mux",
++ },
++};
++
++static int fsl_qixis_i2c_probe(struct i2c_client *client)
++{
++ struct qixis_priv *priv;
++ int ret = 0;
++ u32 qver;
++
++ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
++ return -EOPNOTSUPP;
++
++ priv = devm_kzalloc(&client->dev, sizeof(struct qixis_priv),
++ GFP_KERNEL);
++ if (!priv)
++ return -ENOMEM;
++
++ priv->regmap = regmap_init_i2c(client, &qixis_regmap_config);
++ regmap_read(priv->regmap, offsetof(struct fsl_qixis_regs, qixis_ver),
++ &qver);
++ pr_info("Freescale QIXIS Version: 0x%08x\n", qver);
++
++ i2c_set_clientdata(client, priv);
++
++ if (of_device_is_compatible(client->dev.of_node, "simple-mfd"))
++ ret = devm_mfd_add_devices(&client->dev, -1, fsl_qixis_devs,
++ ARRAY_SIZE(fsl_qixis_devs), NULL, 0,
++ NULL);
++ if (ret)
++ goto error;
++
++ return ret;
++error:
++ regmap_exit(priv->regmap);
++
++ return ret;
++}
++
++static int fsl_qixis_i2c_remove(struct i2c_client *client)
++{
++ struct qixis_priv *priv;
++
++ priv = i2c_get_clientdata(client);
++ regmap_exit(priv->regmap);
++
++ return 0;
++}
++
++static const struct of_device_id fsl_qixis_i2c_of_match[] = {
++ { .compatible = "fsl,fpga-qixis-i2c" },
++ {}
++};
++MODULE_DEVICE_TABLE(of, fsl_qixis_i2c_of_match);
++
++static struct i2c_driver fsl_qixis_i2c_driver = {
++ .driver = {
++ .name = "qixis_ctrl_i2c",
++ .owner = THIS_MODULE,
++ .of_match_table = of_match_ptr(fsl_qixis_i2c_of_match),
++ },
++ .probe_new = fsl_qixis_i2c_probe,
++ .remove = fsl_qixis_i2c_remove,
++};
++module_i2c_driver(fsl_qixis_i2c_driver);
++
++MODULE_AUTHOR("Wang Dongsheng <dongsheng.wang@freescale.com>");
++MODULE_DESCRIPTION("Freescale QIXIS system controller driver");
++MODULE_LICENSE("GPL");
++
-From caecd8632a257759735ed6dd9354091cae8a5746 Mon Sep 17 00:00:00 2001
+From cc1d1d1b68d18a31aeb8a572ca6b3929b083855c Mon Sep 17 00:00:00 2001
From: Biwen Li <biwen.li@nxp.com>
-Date: Fri, 16 Nov 2018 17:11:32 +0800
-Subject: [PATCH] dts: support layerscape This is an integrated patch of dts
- for layerscape
+Date: Wed, 17 Apr 2019 18:58:33 +0800
+Subject: [PATCH] dts: support layerscape
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This is an integrated patch of dts for layerscape
Signed-off-by: Abhimanyu Saini <abhimanyu.saini@nxp.com>
Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
Signed-off-by: Alison Wang <alison.wang@nxp.com>
Signed-off-by: Amrita Kumari <amrita.kumari@nxp.com>
Signed-off-by: Anjaneyulu Jagarlmudi <anji.jagarlmudi@nxp.com>
-Signed-off-by: Ashish Kumar <ashish.kumar@nxp.com>
Signed-off-by: Ashish Kumar <Ashish.Kumar@nxp.com>
-Signed-off-by: Bao Xiaowei <xiaowei.bao@nxp.com>
Signed-off-by: Bharat Bhushan <Bharat.Bhushan@nxp.com>
Signed-off-by: Bhaskar Upadhaya <Bhaskar.Upadhaya@nxp.com>
Signed-off-by: Bhupesh Sharma <bhupesh.sharma@freescale.com>
Signed-off-by: Bogdan Purcareata <bogdan.purcareata@nxp.com>
Signed-off-by: Calvin Johnson <calvin.johnson@nxp.com>
Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
+Signed-off-by: Catalin Neacsu <valentin-catalin.neacsu@nxp.com>
Signed-off-by: Changming Huang <jerry.huang@nxp.com>
+Signed-off-by: Chuanhua Han <chuanhua.han@nxp.com>
Signed-off-by: Constantin Tudor <constantin.tudor@nxp.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Florin Chiculita <florinlaurentiu.chiculita@nxp.com>
Signed-off-by: Florinel Iordache <florinel.iordache@nxp.com>
Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com>
Signed-off-by: Honghua Yin <Hong-Hua.Yin@freescale.com>
+Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
Signed-off-by: Iordache Florinel-R70177 <florinel.iordache@nxp.com>
Signed-off-by: Jagdish Gediya <jagdish.gediya@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
+Signed-off-by: jiaheng.fan <jiaheng.fan@nxp.com>
+Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
+Signed-off-by: Li Yang <leoyang.li@nxp.com>
+Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
+Signed-off-by: Mathew McBride <matt@traverse.com.au>
+Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
+Signed-off-by: Pankaj Bansal <pankaj.bansal@nxp.com>
+Signed-off-by: Pankaj Gupta <pankaj.gupta@nxp.com>
+Signed-off-by: Peng Ma <peng.ma@nxp.com>
+Signed-off-by: Po Liu <po.liu@nxp.com>
+Signed-off-by: Prabhakar Kushwaha <prabhakar.kushwaha@nxp.com>
+Signed-off-by: Pratiyush Mohan Srivastava <pratiyush.srivastava@nxp.com>
+Signed-off-by: Priyanka Jain <priyanka.jain@nxp.com>
+Signed-off-by: Raghav Dogra <raghav.dogra@nxp.com>
+Signed-off-by: Rai Harninder <harninder.rai@nxp.com>
+Signed-off-by: Ramneek Mehresh <ramneek.mehresh@nxp.com>
+Signed-off-by: Ran Wang <ran.wang_1@nxp.com>
+Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
+Signed-off-by: Sakar Arora <Sakar.Arora@freescale.com>
+Signed-off-by: Santan Kumar <santan.kumar@nxp.com>
+Signed-off-by: Scott Wood <oss@buserror.net>
+Signed-off-by: Shaohui Xie <Shaohui.Xie@nxp.com>
+Signed-off-by: Shawn Guo <shawnguo@kernel.org>
+Signed-off-by: Sriram Dash <sriram.dash@nxp.com>
+Signed-off-by: Sumit Garg <sumit.garg@nxp.com>
+Signed-off-by: Suresh Gupta <suresh.gupta@nxp.com>
+Signed-off-by: Tang Yuantian <andy.tang@nxp.com>
+Signed-off-by: Tao Yang <b31903@freescale.com>
+Signed-off-by: Vabhav Sharma <vabhav.sharma@nxp.com>
+Signed-off-by: Vicentiu Galanopulo <vicentiu.galanopulo@nxp.com>
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Signed-off-by: Wasim Khan <wasim.khan@nxp.com>
+Signed-off-by: Xiaowei Bao <xiaowei.bao@nxp.com>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+Signed-off-by: Yinbo Zhu <yinbo.zhu@nxp.com>
+Signed-off-by: Yogesh Gaur <yogeshnarayan.gaur@nxp.com>
+Signed-off-by: Yuantian Tang <andy.tang@nxp.com>
+Signed-off-by: Yuan Yao <yao.yuan@nxp.com>
+Signed-off-by: Zhang Ying-22455 <ying.zhang22455@nxp.com>
+Signed-off-by: Zhao Chenhui <chenhui.zhao@freescale.com>
+Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
---
- arch/arm/boot/dts/ls1021a-qds.dts | 28 +
- arch/arm/boot/dts/ls1021a-twr.dts | 27 +
- arch/arm/boot/dts/ls1021a.dtsi | 105 +++-
- arch/arm64/boot/dts/freescale/Makefile | 9 +
- .../boot/dts/freescale/fsl-ls1012a-2g5rdb.dts | 123 +++++
- .../boot/dts/freescale/fsl-ls1012a-frdm.dts | 96 ++--
- .../boot/dts/freescale/fsl-ls1012a-frwy.dts | 177 +++++++
- .../boot/dts/freescale/fsl-ls1012a-qds.dts | 133 +++--
- .../boot/dts/freescale/fsl-ls1012a-rdb.dts | 100 ++--
- .../arm64/boot/dts/freescale/fsl-ls1012a.dtsi | 179 +++++--
- .../boot/dts/freescale/fsl-ls1043-post.dtsi | 3 +-
- .../dts/freescale/fsl-ls1043a-qds-sdk.dts | 71 +++
- .../boot/dts/freescale/fsl-ls1043a-qds.dts | 201 ++++++--
- .../dts/freescale/fsl-ls1043a-rdb-sdk.dts | 71 +++
- .../dts/freescale/fsl-ls1043a-rdb-usdpaa.dts | 117 +++++
- .../boot/dts/freescale/fsl-ls1043a-rdb.dts | 75 ++-
- .../arm64/boot/dts/freescale/fsl-ls1043a.dtsi | 196 +++++--
- .../boot/dts/freescale/fsl-ls1046-post.dtsi | 2 +-
- .../dts/freescale/fsl-ls1046a-qds-sdk.dts | 79 +++
- .../boot/dts/freescale/fsl-ls1046a-qds.dts | 189 +++++--
- .../dts/freescale/fsl-ls1046a-rdb-sdk.dts | 115 +++++
- .../dts/freescale/fsl-ls1046a-rdb-usdpaa.dts | 110 ++++
- .../boot/dts/freescale/fsl-ls1046a-rdb.dts | 44 +-
- .../arm64/boot/dts/freescale/fsl-ls1046a.dtsi | 182 +++++--
- .../boot/dts/freescale/fsl-ls1088a-qds.dts | 88 ++--
- .../boot/dts/freescale/fsl-ls1088a-rdb.dts | 150 ++++--
- .../arm64/boot/dts/freescale/fsl-ls1088a.dtsi | 486 ++++++++++++++++--
- .../boot/dts/freescale/fsl-ls2080a-qds.dts | 100 ++--
- .../boot/dts/freescale/fsl-ls2080a-rdb.dts | 118 +++--
- .../boot/dts/freescale/fsl-ls2080a-simu.dts | 38 +-
- .../arm64/boot/dts/freescale/fsl-ls2080a.dtsi | 42 +-
- .../boot/dts/freescale/fsl-ls2081a-rdb.dts | 163 ++++++
- .../boot/dts/freescale/fsl-ls2088a-qds.dts | 158 ++++--
- .../boot/dts/freescale/fsl-ls2088a-rdb.dts | 118 +++--
- .../arm64/boot/dts/freescale/fsl-ls2088a.dtsi | 44 +-
- .../boot/dts/freescale/fsl-ls208xa-qds.dtsi | 43 +-
- .../boot/dts/freescale/fsl-ls208xa-rdb.dtsi | 60 +--
- .../arm64/boot/dts/freescale/fsl-ls208xa.dtsi | 210 ++++++--
- .../dts/freescale/qoriq-bman-portals-sdk.dtsi | 55 ++
- .../dts/freescale/qoriq-bman-portals.dtsi | 8 +-
- .../boot/dts/freescale/qoriq-dpaa-eth.dtsi | 97 ++++
- .../dts/freescale/qoriq-fman3-0-10g-0.dtsi | 11 +-
- .../dts/freescale/qoriq-fman3-0-10g-1.dtsi | 11 +-
- .../dts/freescale/qoriq-fman3-0-1g-0.dtsi | 7 +-
- .../dts/freescale/qoriq-fman3-0-1g-1.dtsi | 7 +-
- .../dts/freescale/qoriq-fman3-0-1g-2.dtsi | 7 +-
- .../dts/freescale/qoriq-fman3-0-1g-3.dtsi | 7 +-
- .../dts/freescale/qoriq-fman3-0-1g-4.dtsi | 7 +-
- .../dts/freescale/qoriq-fman3-0-1g-5.dtsi | 7 +-
- .../boot/dts/freescale/qoriq-fman3-0-6oh.dtsi | 47 ++
- .../boot/dts/freescale/qoriq-fman3-0.dtsi | 54 +-
- .../dts/freescale/qoriq-qman-portals-sdk.dtsi | 38 ++
- .../dts/freescale/qoriq-qman-portals.dtsi | 9 +-
- .../boot/dts/fsl/qoriq-fman-0-10g-0.dtsi | 4 +-
- .../boot/dts/fsl/qoriq-fman-0-1g-0.dtsi | 4 +-
- .../boot/dts/fsl/qoriq-fman-0-1g-1.dtsi | 4 +-
- .../boot/dts/fsl/qoriq-fman-0-1g-2.dtsi | 4 +-
- .../boot/dts/fsl/qoriq-fman-0-1g-3.dtsi | 4 +-
- .../boot/dts/fsl/qoriq-fman-0-1g-4.dtsi | 4 +-
- .../boot/dts/fsl/qoriq-fman-1-10g-0.dtsi | 4 +-
- .../boot/dts/fsl/qoriq-fman-1-1g-0.dtsi | 4 +-
- .../boot/dts/fsl/qoriq-fman-1-1g-1.dtsi | 4 +-
- .../boot/dts/fsl/qoriq-fman-1-1g-2.dtsi | 4 +-
- .../boot/dts/fsl/qoriq-fman-1-1g-3.dtsi | 4 +-
- .../boot/dts/fsl/qoriq-fman-1-1g-4.dtsi | 4 +-
- .../fsl/qoriq-fman3-0-10g-0-best-effort.dtsi | 4 +-
- .../boot/dts/fsl/qoriq-fman3-0-10g-0.dtsi | 4 +-
- .../fsl/qoriq-fman3-0-10g-1-best-effort.dtsi | 4 +-
- .../boot/dts/fsl/qoriq-fman3-0-10g-1.dtsi | 4 +-
- .../boot/dts/fsl/qoriq-fman3-0-1g-0.dtsi | 4 +-
- .../boot/dts/fsl/qoriq-fman3-0-1g-1.dtsi | 4 +-
- .../boot/dts/fsl/qoriq-fman3-0-1g-2.dtsi | 4 +-
- .../boot/dts/fsl/qoriq-fman3-0-1g-3.dtsi | 4 +-
- .../boot/dts/fsl/qoriq-fman3-0-1g-4.dtsi | 4 +-
- .../boot/dts/fsl/qoriq-fman3-0-1g-5.dtsi | 4 +-
- .../boot/dts/fsl/qoriq-fman3-1-10g-0.dtsi | 4 +-
- .../boot/dts/fsl/qoriq-fman3-1-10g-1.dtsi | 4 +-
- .../boot/dts/fsl/qoriq-fman3-1-1g-0.dtsi | 4 +-
- .../boot/dts/fsl/qoriq-fman3-1-1g-1.dtsi | 4 +-
- .../boot/dts/fsl/qoriq-fman3-1-1g-2.dtsi | 4 +-
- .../boot/dts/fsl/qoriq-fman3-1-1g-3.dtsi | 4 +-
- .../boot/dts/fsl/qoriq-fman3-1-1g-4.dtsi | 4 +-
- .../boot/dts/fsl/qoriq-fman3-1-1g-5.dtsi | 4 +-
- 83 files changed, 3742 insertions(+), 1000 deletions(-)
+ arch/arm/boot/dts/Makefile | 3 +-
+ arch/arm/boot/dts/imx25.dtsi | 4 +-
+ arch/arm/boot/dts/imx28.dtsi | 4 +-
+ arch/arm/boot/dts/imx35.dtsi | 4 +-
+ arch/arm/boot/dts/imx53.dtsi | 4 +-
+ arch/arm/boot/dts/ls1021a-iot.dts | 262 ++++
+ arch/arm/boot/dts/ls1021a-qds.dts | 32 +
+ arch/arm/boot/dts/ls1021a-twr.dts | 27 +
+ arch/arm/boot/dts/ls1021a.dtsi | 111 +-
+ arch/arm64/boot/dts/freescale/Makefile | 16 +-
+ .../boot/dts/freescale/fsl-ls1012a-2g5rdb.dts | 126 ++
+ .../boot/dts/freescale/fsl-ls1012a-frdm.dts | 97 +-
+ .../boot/dts/freescale/fsl-ls1012a-frwy.dts | 179 +++
+ .../boot/dts/freescale/fsl-ls1012a-qds.dts | 136 +-
+ .../boot/dts/freescale/fsl-ls1012a-rdb.dts | 100 +-
+ .../arm64/boot/dts/freescale/fsl-ls1012a.dtsi | 210 ++-
+ .../boot/dts/freescale/fsl-ls1043-post.dtsi | 3 +-
+ .../dts/freescale/fsl-ls1043a-qds-sdk.dts | 263 ++++
+ .../boot/dts/freescale/fsl-ls1043a-qds.dts | 206 ++-
+ .../dts/freescale/fsl-ls1043a-rdb-sdk.dts | 262 ++++
+ .../dts/freescale/fsl-ls1043a-rdb-usdpaa.dts | 140 ++
+ .../boot/dts/freescale/fsl-ls1043a-rdb.dts | 76 +-
+ .../arm64/boot/dts/freescale/fsl-ls1043a.dtsi | 382 +++--
+ .../boot/dts/freescale/fsl-ls1046-post.dtsi | 2 +-
+ .../dts/freescale/fsl-ls1046a-qds-sdk.dts | 268 ++++
+ .../boot/dts/freescale/fsl-ls1046a-qds.dts | 194 ++-
+ .../dts/freescale/fsl-ls1046a-rdb-sdk.dts | 307 ++++
+ .../dts/freescale/fsl-ls1046a-rdb-usdpaa.dts | 133 ++
+ .../boot/dts/freescale/fsl-ls1046a-rdb.dts | 48 +-
+ .../arm64/boot/dts/freescale/fsl-ls1046a.dtsi | 386 +++--
+ .../boot/dts/freescale/fsl-ls1088a-qds.dts | 88 +-
+ .../boot/dts/freescale/fsl-ls1088a-rdb.dts | 150 +-
+ .../arm64/boot/dts/freescale/fsl-ls1088a.dtsi | 546 ++++++-
+ .../boot/dts/freescale/fsl-ls2080a-qds.dts | 100 +-
+ .../boot/dts/freescale/fsl-ls2080a-rdb.dts | 118 +-
+ .../boot/dts/freescale/fsl-ls2080a-simu.dts | 38 +-
+ .../arm64/boot/dts/freescale/fsl-ls2080a.dtsi | 50 +-
+ .../boot/dts/freescale/fsl-ls2081a-rdb.dts | 163 ++
+ .../boot/dts/freescale/fsl-ls2088a-qds.dts | 158 +-
+ .../boot/dts/freescale/fsl-ls2088a-rdb.dts | 118 +-
+ .../arm64/boot/dts/freescale/fsl-ls2088a.dtsi | 52 +-
+ .../boot/dts/freescale/fsl-ls208xa-qds.dtsi | 43 +-
+ .../boot/dts/freescale/fsl-ls208xa-rdb.dtsi | 60 +-
+ .../arm64/boot/dts/freescale/fsl-ls208xa.dtsi | 299 ++--
+ .../boot/dts/freescale/fsl-lx2160a-qds.dts | 353 +++++
+ .../boot/dts/freescale/fsl-lx2160a-rdb.dts | 241 +++
+ .../arm64/boot/dts/freescale/fsl-lx2160a.dtsi | 1318 +++++++++++++++++
+ .../boot/dts/freescale/fsl-tmu-map1.dtsi | 99 ++
+ .../boot/dts/freescale/fsl-tmu-map2.dtsi | 99 ++
+ .../boot/dts/freescale/fsl-tmu-map3.dtsi | 99 ++
+ arch/arm64/boot/dts/freescale/fsl-tmu.dtsi | 251 ++++
+ .../dts/freescale/qoriq-bman-portals-sdk.dtsi | 55 +
+ .../dts/freescale/qoriq-bman-portals.dtsi | 8 +-
+ .../boot/dts/freescale/qoriq-dpaa-eth.dtsi | 97 ++
+ .../dts/freescale/qoriq-fman3-0-10g-0.dtsi | 11 +-
+ .../dts/freescale/qoriq-fman3-0-10g-1.dtsi | 11 +-
+ .../dts/freescale/qoriq-fman3-0-1g-0.dtsi | 7 +-
+ .../dts/freescale/qoriq-fman3-0-1g-1.dtsi | 7 +-
+ .../dts/freescale/qoriq-fman3-0-1g-2.dtsi | 7 +-
+ .../dts/freescale/qoriq-fman3-0-1g-3.dtsi | 7 +-
+ .../dts/freescale/qoriq-fman3-0-1g-4.dtsi | 7 +-
+ .../dts/freescale/qoriq-fman3-0-1g-5.dtsi | 7 +-
+ .../boot/dts/freescale/qoriq-fman3-0-6oh.dtsi | 47 +
+ .../boot/dts/freescale/qoriq-fman3-0.dtsi | 67 +-
+ .../dts/freescale/qoriq-qman-portals-sdk.dtsi | 38 +
+ .../dts/freescale/qoriq-qman-portals.dtsi | 9 +-
+ .../boot/dts/freescale/traverse-ls1043s.dts | 29 +
+ .../boot/dts/freescale/traverse-ls1043v.dts | 29 +
+ 68 files changed, 7660 insertions(+), 1211 deletions(-)
+ create mode 100644 arch/arm/boot/dts/ls1021a-iot.dts
create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1012a-2g5rdb.dts
create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1012a-frwy.dts
create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1043a-qds-sdk.dts
create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb-sdk.dts
create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb-usdpaa.dts
create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls2081a-rdb.dts
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-lx2160a-qds.dts
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-lx2160a-rdb.dts
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-tmu-map1.dtsi
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-tmu-map2.dtsi
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-tmu-map3.dtsi
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-tmu.dtsi
create mode 100644 arch/arm64/boot/dts/freescale/qoriq-bman-portals-sdk.dtsi
create mode 100644 arch/arm64/boot/dts/freescale/qoriq-dpaa-eth.dtsi
create mode 100644 arch/arm64/boot/dts/freescale/qoriq-fman3-0-6oh.dtsi
create mode 100644 arch/arm64/boot/dts/freescale/qoriq-qman-portals-sdk.dtsi
+--- a/arch/arm/boot/dts/Makefile
++++ b/arch/arm/boot/dts/Makefile
+@@ -496,7 +496,8 @@ dtb-$(CONFIG_SOC_IMX7D) += \
+ imx7s-warp.dtb
+ dtb-$(CONFIG_SOC_LS1021A) += \
+ ls1021a-qds.dtb \
+- ls1021a-twr.dtb
++ ls1021a-twr.dtb \
++ ls1021a-iot.dtb
+ dtb-$(CONFIG_SOC_VF610) += \
+ vf500-colibri-eval-v3.dtb \
+ vf610-colibri-eval-v3.dtb \
+--- a/arch/arm/boot/dts/imx25.dtsi
++++ b/arch/arm/boot/dts/imx25.dtsi
+@@ -122,7 +122,7 @@
+ };
+
+ can1: can@43f88000 {
+- compatible = "fsl,imx25-flexcan", "fsl,p1010-flexcan";
++ compatible = "fsl,imx25-flexcan";
+ reg = <0x43f88000 0x4000>;
+ interrupts = <43>;
+ clocks = <&clks 75>, <&clks 75>;
+@@ -131,7 +131,7 @@
+ };
+
+ can2: can@43f8c000 {
+- compatible = "fsl,imx25-flexcan", "fsl,p1010-flexcan";
++ compatible = "fsl,imx25-flexcan";
+ reg = <0x43f8c000 0x4000>;
+ interrupts = <44>;
+ clocks = <&clks 76>, <&clks 76>;
+--- a/arch/arm/boot/dts/imx28.dtsi
++++ b/arch/arm/boot/dts/imx28.dtsi
+@@ -1038,7 +1038,7 @@
+ };
+
+ can0: can@80032000 {
+- compatible = "fsl,imx28-flexcan", "fsl,p1010-flexcan";
++ compatible = "fsl,imx28-flexcan";
+ reg = <0x80032000 0x2000>;
+ interrupts = <8>;
+ clocks = <&clks 58>, <&clks 58>;
+@@ -1047,7 +1047,7 @@
+ };
+
+ can1: can@80034000 {
+- compatible = "fsl,imx28-flexcan", "fsl,p1010-flexcan";
++ compatible = "fsl,imx28-flexcan";
+ reg = <0x80034000 0x2000>;
+ interrupts = <9>;
+ clocks = <&clks 59>, <&clks 59>;
+--- a/arch/arm/boot/dts/imx35.dtsi
++++ b/arch/arm/boot/dts/imx35.dtsi
+@@ -303,7 +303,7 @@
+ };
+
+ can1: can@53fe4000 {
+- compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
++ compatible = "fsl,imx35-flexcan";
+ reg = <0x53fe4000 0x1000>;
+ clocks = <&clks 33>, <&clks 33>;
+ clock-names = "ipg", "per";
+@@ -312,7 +312,7 @@
+ };
+
+ can2: can@53fe8000 {
+- compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
++ compatible = "fsl,imx35-flexcan";
+ reg = <0x53fe8000 0x1000>;
+ clocks = <&clks 34>, <&clks 34>;
+ clock-names = "ipg", "per";
+--- a/arch/arm/boot/dts/imx53.dtsi
++++ b/arch/arm/boot/dts/imx53.dtsi
+@@ -536,7 +536,7 @@
+ };
+
+ can1: can@53fc8000 {
+- compatible = "fsl,imx53-flexcan", "fsl,p1010-flexcan";
++ compatible = "fsl,imx53-flexcan";
+ reg = <0x53fc8000 0x4000>;
+ interrupts = <82>;
+ clocks = <&clks IMX5_CLK_CAN1_IPG_GATE>,
+@@ -546,7 +546,7 @@
+ };
+
+ can2: can@53fcc000 {
+- compatible = "fsl,imx53-flexcan", "fsl,p1010-flexcan";
++ compatible = "fsl,imx53-flexcan";
+ reg = <0x53fcc000 0x4000>;
+ interrupts = <83>;
+ clocks = <&clks IMX5_CLK_CAN2_IPG_GATE>,
+--- /dev/null
++++ b/arch/arm/boot/dts/ls1021a-iot.dts
+@@ -0,0 +1,262 @@
++/*
++ * Copyright 2013-2016 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ */
++
++/dts-v1/;
++#include "ls1021a.dtsi"
++
++/ {
++ model = "LS1021A IOT Board";
++
++ sys_mclk: clock-mclk {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <24576000>;
++ };
++
++ regulators {
++ compatible = "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ reg_3p3v: regulator@0 {
++ compatible = "regulator-fixed";
++ reg = <0>;
++ regulator-name = "3P3V";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-always-on;
++ };
++
++ reg_2p5v: regulator@1 {
++ compatible = "regulator-fixed";
++ reg = <1>;
++ regulator-name = "2P5V";
++ regulator-min-microvolt = <2500000>;
++ regulator-max-microvolt = <2500000>;
++ regulator-always-on;
++ };
++ };
++
++ sound {
++ compatible = "simple-audio-card";
++ simple-audio-card,format = "i2s";
++ simple-audio-card,widgets =
++ "Microphone", "Microphone Jack",
++ "Headphone", "Headphone Jack",
++ "Speaker", "Speaker Ext",
++ "Line", "Line In Jack";
++ simple-audio-card,routing =
++ "MIC_IN", "Microphone Jack",
++ "Microphone Jack", "Mic Bias",
++ "LINE_IN", "Line In Jack",
++ "Headphone Jack", "HP_OUT",
++ "Speaker Ext", "LINE_OUT";
++
++ simple-audio-card,cpu {
++ sound-dai = <&sai2>;
++ frame-master;
++ bitclock-master;
++ };
++
++ simple-audio-card,codec {
++ sound-dai = <&codec>;
++ frame-master;
++ bitclock-master;
++ };
++ };
++
++ firmware {
++ optee {
++ compatible = "linaro,optee-tz";
++ method = "smc";
++ };
++ };
++};
++
++&enet0 {
++ tbi-handle = <&tbi1>;
++ phy-handle = <&phy1>;
++ phy-connection-type = "sgmii";
++ status = "okay";
++};
++
++&enet1 {
++ tbi-handle = <&tbi1>;
++ phy-handle = <&phy3>;
++ phy-connection-type = "sgmii";
++ status = "okay";
++};
++
++&enet2 {
++ fixed-link = <0 1 1000 0 0>;
++ phy-connection-type = "rgmii-id";
++ status = "okay";
++};
++
++&can0{
++ status = "disabled";
++};
++
++&can1{
++ status = "disabled";
++};
++
++&can2{
++ status = "disabled";
++};
++
++&can3{
++ status = "okay";
++};
++
++&esdhc{
++ status = "okay";
++};
++
++&i2c0 {
++ status = "okay";
++
++ max1239@35 {
++ compatible = "maxim,max1239";
++ reg = <0x35>;
++ #io-channel-cells = <1>;
++ };
++
++ codec: sgtl5000@2a {
++ #sound-dai-cells=<0x0>;
++ compatible = "fsl,sgtl5000";
++ reg = <0x2a>;
++ VDDA-supply = <®_3p3v>;
++ VDDIO-supply = <®_2p5v>;
++ clocks = <&sys_mclk 1>;
++ };
++
++ pca9555: pca9555@23 {
++ compatible = "nxp,pca9555";
++ /*pinctrl-names = "default";*/
++ /*interrupt-parent = <&gpio2>;
++ interrupts = <19 0x2>;*/
++ gpio-controller;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ reg = <0x23>;
++ };
++
++ ina220@44 {
++ compatible = "ti,ina220";
++ reg = <0x44>;
++ shunt-resistor = <1000>;
++ };
++
++ ina220@45 {
++ compatible = "ti,ina220";
++ reg = <0x45>;
++ shunt-resistor = <1000>;
++ };
++
++ lm75b@48 {
++ compatible = "nxp,lm75a";
++ reg = <0x48>;
++ };
++
++ adt7461a@4c {
++ compatible = "adt7461a";
++ reg = <0x4c>;
++ };
++
++ hdmi: sii9022a@39 {
++ compatible = "fsl,sii902x";
++ reg = <0x39>;
++ interrupts = <GIC_SPI 163 IRQ_TYPE_EDGE_RISING>;
++ };
++};
++
++&i2c1 {
++ status = "disabled";
++};
++
++&ifc {
++ status = "disabled";
++};
++
++&lpuart0 {
++ status = "okay";
++};
++
++&mdio0 {
++ phy0: ethernet-phy@0 {
++ reg = <0x0>;
++ };
++ phy1: ethernet-phy@1 {
++ reg = <0x1>;
++ };
++ phy2: ethernet-phy@2 {
++ reg = <0x2>;
++ };
++ phy3: ethernet-phy@3 {
++ reg = <0x3>;
++ };
++ tbi1: tbi-phy@1f {
++ reg = <0x1f>;
++ device_type = "tbi-phy";
++ };
++};
++
++&qspi {
++ num-cs = <2>;
++ status = "okay";
++
++ qflash0: s25fl128s@0 {
++ compatible = "spansion,s25fl129p1";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ spi-max-frequency = <20000000>;
++ reg = <0>;
++ };
++};
++
++&sai2 {
++ status = "okay";
++};
++
++&uart0 {
++ status = "okay";
++};
++
++&uart1 {
++ status = "okay";
++};
++
++&dcu {
++ display = <&display>;
++ status = "okay";
++
++ display: display@0 {
++ bits-per-pixel = <24>;
++
++ display-timings {
++ native-mode = <&timing0>;
++
++ timing0: mode0 {
++ clock-frequency = <25000000>;
++ hactive = <640>;
++ vactive = <480>;
++ hback-porch = <80>;
++ hfront-porch = <80>;
++ vback-porch = <16>;
++ vfront-porch = <16>;
++ hsync-len = <12>;
++ vsync-len = <2>;
++ hsync-active = <1>;
++ vsync-active = <1>;
++ };
++ };
++ };
++};
--- a/arch/arm/boot/dts/ls1021a-qds.dts
+++ b/arch/arm/boot/dts/ls1021a-qds.dts
@@ -124,6 +124,21 @@
fpga: board-control@3,0 {
#address-cells = <1>;
#size-cells = <1>;
-@@ -331,3 +351,11 @@
+@@ -316,6 +336,10 @@
+ };
+ };
+
++&esdhc {
++ status = "okay";
++};
++
+ &sai2 {
+ status = "okay";
+ };
+@@ -331,3 +355,11 @@
&uart1 {
status = "okay";
};
big-endian;
};
-@@ -334,25 +335,41 @@
+@@ -334,25 +335,44 @@
status = "disabled";
};
interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "i2c";
clocks = <&clockgen 4 1>;
++ dma-names = "tx", "rx";
++ dmas = <&edma0 1 39>,
++ <&edma0 1 38>;
+ fsl-scl-gpio = <&gpio3 23 0>;
status = "disabled";
};
status = "disabled";
};
-@@ -497,6 +514,17 @@
+@@ -497,6 +517,17 @@
status = "disabled";
};
wdog0: watchdog@2ad0000 {
compatible = "fsl,imx21-wdt";
reg = <0x0 0x2ad0000 0x0 0x10000>;
-@@ -550,6 +578,25 @@
+@@ -550,6 +581,25 @@
<&clockgen 4 1>;
};
dcu: dcu@2ce0000 {
compatible = "fsl,ls1021a-dcu";
reg = <0x0 0x2ce0000 0x0 0x10000>;
-@@ -684,6 +731,10 @@
+@@ -684,6 +734,11 @@
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
snps,dis_rxdet_inp3_quirk;
+ usb3-lpm-capable;
+ snps,dis-u1u2-when-u3-quirk;
+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
++ snps,host-vbus-glitches;
};
pcie@3400000 {
-@@ -691,7 +742,9 @@
+@@ -691,7 +746,9 @@
reg = <0x00 0x03400000 0x0 0x00010000 /* controller registers */
0x40 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "regs", "config";
fsl,pcie-scfg = <&scfg 0>;
#address-cells = <3>;
#size-cells = <2>;
-@@ -714,7 +767,9 @@
+@@ -707,6 +764,7 @@
+ <0000 0 0 2 &gic GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 3 &gic GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 4 &gic GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>;
++ status = "disabled";
+ };
+
+ pcie@3500000 {
+@@ -714,7 +772,9 @@
reg = <0x00 0x03500000 0x0 0x00010000 /* controller registers */
0x48 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "regs", "config";
fsl,pcie-scfg = <&scfg 1>;
#address-cells = <3>;
#size-cells = <2>;
-@@ -731,5 +786,45 @@
+@@ -730,6 +790,47 @@
+ <0000 0 0 2 &gic GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
<0000 0 0 3 &gic GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>,
<0000 0 0 4 &gic GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
- };
++ status = "disabled";
++ };
+
+ can0: can@2a70000 {
+ compatible = "fsl,ls1021ar2-flexcan";
+ clock-names = "ipg", "per";
+ big-endian;
+ status = "disabled";
-+ };
+ };
};
};
--- a/arch/arm64/boot/dts/freescale/Makefile
+++ b/arch/arm64/boot/dts/freescale/Makefile
-@@ -1,15 +1,24 @@
+@@ -1,19 +1,33 @@
# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-frdm.dtb
+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-frwy.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-simu.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2088a-qds.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2088a-rdb.dtb
+-
++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-qds.dtb
++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-rdb.dtb
++
++dtb-$(CONFIG_ARCH_LAYERSCAPE) += traverse-ls1043v.dtb
++dtb-$(CONFIG_ARCH_LAYERSCAPE) += traverse-ls1043s.dtb
++
+ always := $(dtb-y)
+ subdir-y := $(dts-dirs)
+ clean-files := *.dtb
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-2g5rdb.dts
-@@ -0,0 +1,123 @@
+@@ -0,0 +1,126 @@
+/*
+ * Device Tree file for NXP LS1012A 2G5RDB Board.
+ *
+ #address-cells = <1>;
+ #size-cells = <0>;
+
-+ ethernet@0 {
++ pfe_mac0: ethernet@0 {
+ compatible = "fsl,pfe-gemac-port";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0>; /* GEM_ID */
-+ fsl,gemac-bus-id = <0x0>; /* BUS_ID */
-+ fsl,gemac-phy-id = <0x1>; /* PHY_ID */
+ fsl,mdio-mux-val = <0x0>;
+ phy-mode = "sgmii-2500";
-+ fsl,pfe-phy-if-flags = <0x0>;
-+
-+ mdio@0 {
-+ reg = <0x1>; /* enabled/disabled */
-+ };
++ phy-handle = <&sgmii_phy1>;
+ };
+
-+ ethernet@1 {
++ pfe_mac1: ethernet@1 {
+ compatible = "fsl,pfe-gemac-port";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x1>; /* GEM_ID */
-+ fsl,gemac-bus-id = < 0x0>; /* BUS_ID */
-+ fsl,gemac-phy-id = < 0x2>; /* PHY_ID */
+ fsl,mdio-mux-val = <0x0>;
+ phy-mode = "sgmii-2500";
-+ fsl,pfe-phy-if-flags = <0x0>;
++ phy-handle = <&sgmii_phy2>;
++ };
++
++ mdio@0 {
++ #address-cells = <1>;
++ #size-cells = <0>;
+
-+ mdio@0 {
-+ reg = <0x0>; /* enabled/disabled */
++ sgmii_phy1: ethernet-phy@1 {
++ compatible = "ethernet-phy-ieee802.3-c45";
++ reg = <0x1>;
++ };
++
++ sgmii_phy2: ethernet-phy@2 {
++ compatible = "ethernet-phy-ieee802.3-c45";
++ reg = <0x2>;
+ };
+ };
+};
sys_mclk: clock-mclk {
compatible = "fixed-clock";
#clock-cells = <0>;
-@@ -110,6 +79,44 @@
+@@ -110,6 +79,45 @@
};
};
+ #address-cells = <1>;
+ #size-cells = <0>;
+
-+ ethernet@0 {
++ pfe_mac0: ethernet@0 {
+ compatible = "fsl,pfe-gemac-port";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0>; /* GEM_ID */
-+ fsl,gemac-bus-id = <0x0>; /* BUS_ID */
-+ fsl,gemac-phy-id = <0x2>; /* PHY_ID */
+ fsl,mdio-mux-val = <0x0>;
+ phy-mode = "sgmii";
-+ fsl,pfe-phy-if-flags = <0x0>;
-+
-+ mdio@0 {
-+ reg = <0x1>; /* enabled/disabled */
-+ };
++ phy-handle = <&sgmii_phy1>;
+ };
+
-+ ethernet@1 {
++ pfe_mac1: ethernet@1 {
+ compatible = "fsl,pfe-gemac-port";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x1>; /* GEM_ID */
-+ fsl,gemac-bus-id = <0x1>; /* BUS_ID */
-+ fsl,gemac-phy-id = <0x1>; /* PHY_ID */
+ fsl,mdio-mux-val = <0x0>;
+ phy-mode = "sgmii";
-+ fsl,pfe-phy-if-flags = <0x0>;
++ phy-handle = <&sgmii_phy2>;
++ };
+
-+ mdio@0 {
-+ reg = <0x0>; /* enabled/disabled */
++ mdio@0 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ sgmii_phy1: ethernet-phy@2 {
++ reg = <0x2>;
++ };
++
++ sgmii_phy2: ethernet-phy@1 {
++ reg = <0x1>;
+ };
+ };
+};
&sai2 {
status = "okay";
};
-@@ -117,3 +124,18 @@
+@@ -117,3 +125,18 @@
&sata {
status = "okay";
};
+};
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-frwy.dts
-@@ -0,0 +1,177 @@
+@@ -0,0 +1,179 @@
+/*
+ * Device Tree file for NXP LS1012A FRWY Board.
+ *
+ #address-cells = <1>;
+ #size-cells = <0>;
+
-+ ethernet@0 {
++ pfe_mac0: ethernet@0 {
+ compatible = "fsl,pfe-gemac-port";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0>; /* GEM_ID */
+ fsl,gemac-bus-id = <0x0>; /* BUS_ID */
-+ fsl,gemac-phy-id = <0x2>; /* PHY_ID */
+ fsl,mdio-mux-val = <0x0>;
+ phy-mode = "sgmii";
-+ fsl,pfe-phy-if-flags = <0x0>;
-+
-+ mdio@0 {
-+ reg = <0x1>; /* enabled/disabled */
-+ };
++ phy-handle = <&sgmii_phy1>;
+ };
+
-+ ethernet@1 {
++ pfe_mac1: ethernet@1 {
+ compatible = "fsl,pfe-gemac-port";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x1>; /* GEM_ID */
-+ fsl,gemac-bus-id = <0x1>; /* BUS_ID */
-+ fsl,gemac-phy-id = <0x1>; /* PHY_ID */
+ fsl,mdio-mux-val = <0x0>;
+ phy-mode = "sgmii";
-+ fsl,pfe-phy-if-flags = <0x0>;
++ phy-handle = <&sgmii_phy2>;
++ };
++
++ mdio@0 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ sgmii_phy1: ethernet-phy@2 {
++ reg = <0x2>;
++ };
+
-+ mdio@0 {
-+ reg = <0x0>; /* enabled/disabled */
++ sgmii_phy2: ethernet-phy@1 {
++ reg = <0x1>;
+ };
+ };
+};
&duart0 {
status = "okay";
};
-@@ -131,6 +137,44 @@
+@@ -131,6 +137,47 @@
};
};
+ #address-cells = <1>;
+ #size-cells = <0>;
+
-+ ethernet@0 {
++ pfe_mac0: ethernet@0 {
+ compatible = "fsl,pfe-gemac-port";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0>; /* GEM_ID */
-+ fsl,gemac-bus-id = <0x0>; /* BUS_ID */
-+ fsl,gemac-phy-id = <0x1>; /* PHY_ID */
+ fsl,mdio-mux-val = <0x2>;
+ phy-mode = "sgmii-2500";
-+ fsl,pfe-phy-if-flags = <0x0>;
-+
-+ mdio@0 {
-+ reg = <0x1>; /* enabled/disabled */
-+ };
++ phy-handle = <&sgmii_phy1>;
+ };
+
-+ ethernet@1 {
++ pfe_mac1: ethernet@1 {
+ compatible = "fsl,pfe-gemac-port";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x1>; /* GEM_ID */
-+ fsl,gemac-bus-id = <0x1>; /* BUS_ID */
-+ fsl,gemac-phy-id = <0x2>; /* PHY_ID */
+ fsl,mdio-mux-val = <0x3>;
+ phy-mode = "sgmii-2500";
-+ fsl,pfe-phy-if-flags = <0x0>;
++ phy-handle = <&sgmii_phy2>;
++ };
++
++ mdio@0 {
++ #address-cells = <1>;
++ #size-cells = <0>;
+
-+ mdio@0 {
-+ reg = <0x0>; /* enabled/disabled */
++ sgmii_phy1: ethernet-phy@1 {
++ compatible = "ethernet-phy-ieee802.3-c45";
++ reg = <0x1>;
++ };
++
++ sgmii_phy2: ethernet-phy@2 {
++ compatible = "ethernet-phy-ieee802.3-c45";
++ reg = <0x2>;
+ };
+ };
+};
&sai2 {
status = "okay";
};
-@@ -138,3 +182,18 @@
+@@ -138,3 +185,18 @@
&sata {
status = "okay";
};
+ #address-cells = <1>;
+ #size-cells = <0>;
+
-+ ethernet@0 {
++ pfe_mac0: ethernet@0 {
+ compatible = "fsl,pfe-gemac-port";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0>; /* GEM_ID */
-+ fsl,gemac-bus-id = <0x0>; /* BUS_ID */
-+ fsl,gemac-phy-id = <0x2>; /* PHY_ID */
+ fsl,mdio-mux-val = <0x0>;
+ phy-mode = "sgmii";
-+ fsl,pfe-phy-if-flags = <0x0>;
-+
-+ mdio@0 {
-+ reg = <0x1>; /* enabled/disabled */
-+ };
++ phy-handle = <&sgmii_phy>;
+ };
+
-+ ethernet@1 {
++ pfe_mac1: ethernet@1 {
+ compatible = "fsl,pfe-gemac-port";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x1>; /* GEM_ID */
-+ fsl,gemac-bus-id = < 0x1 >; /* BUS_ID */
-+ fsl,gemac-phy-id = < 0x1 >; /* PHY_ID */
+ fsl,mdio-mux-val = <0x0>;
+ phy-mode = "rgmii-txid";
-+ fsl,pfe-phy-if-flags = <0x0>;
++ phy-handle = <&rgmii_phy>;
++ };
++ mdio@0 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ sgmii_phy: ethernet-phy@2 {
++ reg = <0x2>;
++ };
+
-+ mdio@0 {
-+ reg = <0x0>; /* enabled/disabled */
++ rgmii_phy: ethernet-phy@1 {
++ reg = <0x1>;
+ };
+ };
+};
*/
#include <dt-bindings/interrupt-controller/arm-gic.h>
-@@ -70,6 +34,24 @@
+@@ -64,12 +28,30 @@
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+- cpu0: cpu@0 {
++ cooling_map0: cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
reg = <0x0>;
clocks = <&clockgen 1 0>;
#cooling-cells = <2>;
big-endian;
};
-@@ -335,13 +317,23 @@
- };
+@@ -304,44 +286,25 @@
+ #thermal-sensor-cells = <1>;
};
+- thermal-zones {
+- cpu_thermal: cpu-thermal {
+- polling-delay-passive = <1000>;
+- polling-delay = <5000>;
+- thermal-sensors = <&tmu 0>;
+-
+- trips {
+- cpu_alert: cpu-alert {
+- temperature = <85000>;
+- hysteresis = <2000>;
+- type = "passive";
+- };
+-
+- cpu_crit: cpu-crit {
+- temperature = <95000>;
+- hysteresis = <2000>;
+- type = "critical";
+- };
+- };
++ #include "fsl-tmu.dtsi"
+
+- cooling-maps {
+- map0 {
+- trip = <&cpu_alert>;
+- cooling-device =
+- <&cpu0 THERMAL_NO_LIMIT
+- THERMAL_NO_LIMIT>;
+- };
+- };
+- };
+ ftm0: ftm0@29d0000 {
+ compatible = "fsl,ls1012a-ftm-alarm";
+ reg = <0x0 0x29d0000 0x0 0x10000>,
+ reg-names = "ftm", "pmctrl";
+ interrupts = <0 86 0x4>;
+ big-endian;
-+ };
-+
+ };
+
i2c0: i2c@2180000 {
- compatible = "fsl,vf610-i2c";
+ compatible = "fsl,vf610-i2c", "fsl,ls1012a-vf610-i2c";
interrupts = <0 56 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 0>;
+ clocks = <&clockgen 4 3>;
-+ fsl-scl-gpio = <&gpio0 13 0>;
++ scl-gpios = <&gpio0 13 0>;
status = "disabled";
};
-@@ -351,7 +343,20 @@
+@@ -351,7 +314,20 @@
#size-cells = <0>;
reg = <0x0 0x2190000 0x0 0x10000>;
interrupts = <0 57 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
-@@ -400,6 +405,20 @@
+@@ -400,6 +376,20 @@
big-endian;
};
sai1: sai@2b50000 {
#sound-dai-cells = <0>;
compatible = "fsl,vf610-sai";
-@@ -451,6 +470,7 @@
+@@ -451,6 +441,8 @@
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
snps,dis_rxdet_inp3_quirk;
+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
++ snps,host-vbus-glitches;
};
sata: sata@3200000 {
-@@ -471,5 +491,84 @@
+@@ -471,5 +463,85 @@
dr_mode = "host";
phy_type = "ulpi";
};
+ clock-names = "pfe";
+
+ status = "okay";
-+ pfe_mac0: ethernet@0 {
-+ };
-+
-+ pfe_mac1: ethernet@1 {
-+ };
+ };
+
+ firmware {
+ compatible = "linaro,optee-tz";
+ method = "smc";
+ };
- };
++ };
++};
++
++&thermal_zones {
++ thermal-zone0 {
++ status = "okay";
+ };
};
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi
&soc {
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds-sdk.dts
-@@ -0,0 +1,71 @@
+@@ -0,0 +1,263 @@
+/*
+ * Device Tree Include file for Freescale Layerscape-1043A family SoC.
+ *
+};
+
+&soc {
++/delete-property/ dma-coherent;
++
+#include "qoriq-dpaa-eth.dtsi"
+#include "qoriq-fman3-0-6oh.dtsi"
++
++pcie@3400000 {
++ /delete-property/ iommu-map;
++ dma-coherent;
++};
++
++pcie@3500000 {
++ /delete-property/ iommu-map;
++ dma-coherent;
++};
++
++pcie@3600000 {
++ /delete-property/ iommu-map;
++ dma-coherent;
++};
++
++/delete-node/ iommu@9000000;
+};
+
+&fman0 {
+ compatible = "fsl,fman", "simple-bus";
++ dma-coherent;
++};
++
++&clockgen {
++ dma-coherent;
++};
++
++&scfg {
++ dma-coherent;
++};
++
++&crypto {
++ dma-coherent;
++};
++
++&dcfg {
++ dma-coherent;
++};
++
++&ifc {
++ dma-coherent;
++};
++
++&qspi {
++ dma-coherent;
++};
++
++&esdhc {
++ dma-coherent;
++};
++
++&ddr {
++ dma-coherent;
++};
++
++&tmu {
++ dma-coherent;
++};
++
++&qman {
++ dma-coherent;
++};
++
++&bman {
++ dma-coherent;
++};
++
++&bportals {
++ dma-coherent;
++};
++
++&qportals {
++ dma-coherent;
++};
++
++&dspi0 {
++ dma-coherent;
++};
++
++&dspi1 {
++ dma-coherent;
++};
++
++&i2c0 {
++ dma-coherent;
++};
++
++&i2c1 {
++ dma-coherent;
++};
++
++&i2c2 {
++ dma-coherent;
++};
++
++&i2c3 {
++ dma-coherent;
++};
++
++&duart0 {
++ dma-coherent;
++};
++
++&duart1 {
++ dma-coherent;
++};
++
++&duart2 {
++ dma-coherent;
++};
++
++&duart3 {
++ dma-coherent;
++};
++
++&gpio1 {
++ dma-coherent;
++};
++
++&gpio2 {
++ dma-coherent;
++};
++
++&gpio3 {
++ dma-coherent;
++};
++
++&gpio4 {
++ dma-coherent;
++};
++
++&uqe {
++ dma-coherent;
++};
++
++&lpuart0 {
++ dma-coherent;
++};
++
++&lpuart1 {
++ dma-coherent;
++};
++
++&lpuart2 {
++ dma-coherent;
++};
++
++&lpuart3 {
++ dma-coherent;
++};
++
++&lpuart4 {
++ dma-coherent;
++};
++
++&lpuart5 {
++ dma-coherent;
++};
++
++&ftm0 {
++ dma-coherent;
++};
++
++&wdog0 {
++ dma-coherent;
++};
++
++&edma0 {
++ dma-coherent;
++};
++
++&qdma {
++ dma-coherent;
++};
++
++&msi1 {
++ dma-coherent;
++};
++
++&msi2 {
++ dma-coherent;
++};
++
++&msi3 {
++ dma-coherent;
++};
++
++&ptp_timer0 {
++ dma-coherent;
++};
++
++&fsldpaa {
++ dma-coherent;
+};
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
};
chosen {
-@@ -179,7 +158,153 @@
+@@ -97,8 +76,11 @@
+ };
+
+ fpga: board-control@2,0 {
+- compatible = "fsl,ls1043aqds-fpga", "fsl,fpga-qixis";
++ compatible = "fsl,ls1043aqds-fpga", "fsl,fpga-qixis", "simple-bus";
+ reg = <0x2 0x0 0x0000100>;
++ #address-cells = <1>;
++ #size-cells = <1>;
++ ranges = <0 2 0 0x100>;
+ };
+ };
+
+@@ -179,7 +161,153 @@
#size-cells = <1>;
spi-max-frequency = <20000000>;
reg = <0>;
+};
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb-sdk.dts
-@@ -0,0 +1,71 @@
+@@ -0,0 +1,262 @@
+/*
+ * Device Tree Include file for Freescale Layerscape-1043A family SoC.
+ *
+};
+
+&soc {
++/delete-property/ dma-coherent;
++
+#include "qoriq-dpaa-eth.dtsi"
+#include "qoriq-fman3-0-6oh.dtsi"
++
++pcie@3400000 {
++ /delete-property/ iommu-map;
++ dma-coherent;
++};
++
++pcie@3500000 {
++ /delete-property/ iommu-map;
++ dma-coherent;
++};
++
++pcie@3600000 {
++ /delete-property/ iommu-map;
++ dma-coherent;
++};
++
++/delete-node/ iommu@9000000;
+};
+
+&fman0 {
+ compatible = "fsl,fman", "simple-bus";
+};
++
++&clockgen {
++ dma-coherent;
++};
++
++&scfg {
++ dma-coherent;
++};
++
++&crypto {
++ dma-coherent;
++};
++
++&dcfg {
++ dma-coherent;
++};
++
++&ifc {
++ dma-coherent;
++};
++
++&qspi {
++ dma-coherent;
++};
++
++&esdhc {
++ dma-coherent;
++};
++
++&ddr {
++ dma-coherent;
++};
++
++&tmu {
++ dma-coherent;
++};
++
++&qman {
++ dma-coherent;
++};
++
++&bman {
++ dma-coherent;
++};
++
++&bportals {
++ dma-coherent;
++};
++
++&qportals {
++ dma-coherent;
++};
++
++&dspi0 {
++ dma-coherent;
++};
++
++&dspi1 {
++ dma-coherent;
++};
++
++&i2c0 {
++ dma-coherent;
++};
++
++&i2c1 {
++ dma-coherent;
++};
++
++&i2c2 {
++ dma-coherent;
++};
++
++&i2c3 {
++ dma-coherent;
++};
++
++&duart0 {
++ dma-coherent;
++};
++
++&duart1 {
++ dma-coherent;
++};
++
++&duart2 {
++ dma-coherent;
++};
++
++&duart3 {
++ dma-coherent;
++};
++
++&gpio1 {
++ dma-coherent;
++};
++
++&gpio2 {
++ dma-coherent;
++};
++
++&gpio3 {
++ dma-coherent;
++};
++
++&gpio4 {
++ dma-coherent;
++};
++
++&lpuart0 {
++ dma-coherent;
++};
++
++&lpuart1 {
++ dma-coherent;
++};
++
++&lpuart2 {
++ dma-coherent;
++};
++
++&lpuart3 {
++ dma-coherent;
++};
++
++&lpuart4 {
++ dma-coherent;
++};
++
++&lpuart5 {
++ dma-coherent;
++};
++
++&ftm0 {
++ dma-coherent;
++};
++
++&wdog0 {
++ dma-coherent;
++};
++
++&edma0 {
++ dma-coherent;
++};
++
++&qdma {
++ dma-coherent;
++};
++
++&msi1 {
++ dma-coherent;
++};
++
++&msi2 {
++ dma-coherent;
++};
++
++&msi3 {
++ dma-coherent;
++};
++
++&fman0 {
++ dma-coherent;
++};
++
++&ptp_timer0 {
++ dma-coherent;
++};
++
++&fsldpaa {
++ dma-coherent;
++};
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb-usdpaa.dts
-@@ -0,0 +1,117 @@
+@@ -0,0 +1,140 @@
+/*
+ * Device Tree Include file for Freescale Layerscape-1043A family SoC.
+ *
+ fsl,bpid = <7>;
+ fsl,bpool-ethernet-cfg = <0 0 0 192 0 0xdeadbeef>;
+ fsl,bpool-thresholds = <0x400 0xc00 0x0 0x0>;
++ dma-coherent;
+ };
+
+ bp8: buffer-pool@8 {
+ fsl,bpid = <8>;
+ fsl,bpool-ethernet-cfg = <0 0 0 576 0 0xabbaf00d>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
++ dma-coherent;
+ };
+
+ bp9: buffer-pool@9 {
+ fsl,bpid = <9>;
+ fsl,bpool-ethernet-cfg = <0 0 0 2048 0 0xfeedabba>;
+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
++ dma-coherent;
+ };
+
+ fsl,dpaa {
+ compatible = "fsl,ls1043a", "fsl,dpaa", "simple-bus";
++ dma-coherent;
+
+ ethernet@0 {
+ compatible = "fsl,dpa-ethernet-init";
+ fsl,fman-oh-port = <&fman0_oh2>;
+ };
+ };
++
++ pcie@3400000 {
++ /delete-property/ iommu-map;
++ };
++
++ pcie@3500000 {
++ /delete-property/ iommu-map;
++ };
++
++ pcie@3600000 {
++ /delete-property/ iommu-map;
++ };
++
++ /delete-node/ iommu@9000000;
+};
+/ {
+ reserved-memory {
+ #size-cells = <2>;
+ ranges;
+
++ /* For legacy usdpaa based use-cases, update the size and
++ alignment parameters. e.g. to allocate 256 MB memory:
++ size = <0 0x10000000>;
++ alignment = <0 0x10000000>;
++ */
+ usdpaa_mem: usdpaa_mem {
+ compatible = "fsl,usdpaa-mem";
+ alloc-ranges = <0 0 0x10000 0>;
-+ size = <0 0x10000000>;
-+ alignment = <0 0x10000000>;
++ size = <0 0x1000>;
++ alignment = <0 0x1000>;
+ };
+ };
+};
*/
/dts-v1/;
-@@ -86,6 +49,10 @@
+@@ -51,7 +14,6 @@
+ model = "LS1043A RDB Board";
+
+ aliases {
+- crypto = &crypto;
+ serial0 = &duart0;
+ serial1 = &duart1;
+ serial2 = &duart2;
+@@ -86,6 +48,10 @@
compatible = "pericom,pt7c4338";
reg = <0x68>;
};
};
&ifc {
-@@ -130,6 +97,38 @@
+@@ -130,6 +96,38 @@
reg = <0>;
spi-max-frequency = <1000000>; /* input clock */
};
*/
#include <dt-bindings/thermal/thermal.h>
-@@ -81,6 +44,7 @@
+@@ -54,6 +17,7 @@
+ #size-cells = <2>;
+
+ aliases {
++ crypto = &crypto;
+ fman0 = &fman0;
+ ethernet0 = &enet0;
+ ethernet1 = &enet1;
+@@ -74,13 +38,14 @@
+ *
+ * Currently supported enable-method is psci v0.2
+ */
+- cpu0: cpu@0 {
++ cooling_map0: cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x0>;
clocks = <&clockgen 1 0>;
next-level-cache = <&l2>;
#cooling-cells = <2>;
};
cpu1: cpu@1 {
-@@ -89,6 +53,7 @@
+@@ -89,6 +54,7 @@
reg = <0x1>;
clocks = <&clockgen 1 0>;
next-level-cache = <&l2>;
};
cpu2: cpu@2 {
-@@ -97,6 +62,7 @@
+@@ -97,6 +63,7 @@
reg = <0x2>;
clocks = <&clockgen 1 0>;
next-level-cache = <&l2>;
};
cpu3: cpu@3 {
-@@ -105,6 +71,7 @@
+@@ -105,6 +72,7 @@
reg = <0x3>;
clocks = <&clockgen 1 0>;
next-level-cache = <&l2>;
};
l2: l2-cache {
-@@ -112,6 +79,23 @@
+@@ -112,6 +80,23 @@
};
};
memory@80000000 {
device_type = "memory";
reg = <0x0 0x80000000 0 0x80000000>;
-@@ -255,7 +239,7 @@
+@@ -196,6 +181,8 @@
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
++ dma-ranges = <0x0 0x0 0x0 0x0 0x10000 0x00000000>;
++ dma-coherent;
+
+ clockgen: clocking@1ee1000 {
+ compatible = "fsl,ls1043a-clockgen";
+@@ -204,6 +191,49 @@
+ clocks = <&sysclk>;
+ };
+
++ smmu: iommu@9000000 {
++ compatible = "arm,mmu-500";
++ reg = <0 0x9000000 0 0x400000>;
++ dma-coherent;
++ stream-match-mask = <0x7f00>;
++ #global-interrupts = <2>;
++ #iommu-cells = <1>;
++ interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
+ scfg: scfg@1570000 {
+ compatible = "fsl,ls1043a-scfg", "syscon";
+ reg = <0x0 0x1570000 0x0 0x10000>;
+@@ -255,7 +285,7 @@
dcfg: dcfg@1ee0000 {
compatible = "fsl,ls1043a-dcfg", "syscon";
big-endian;
};
-@@ -422,7 +406,7 @@
+@@ -342,36 +372,7 @@
+ #thermal-sensor-cells = <1>;
+ };
+
+- thermal-zones {
+- cpu_thermal: cpu-thermal {
+- polling-delay-passive = <1000>;
+- polling-delay = <5000>;
+-
+- thermal-sensors = <&tmu 3>;
+-
+- trips {
+- cpu_alert: cpu-alert {
+- temperature = <85000>;
+- hysteresis = <2000>;
+- type = "passive";
+- };
+- cpu_crit: cpu-crit {
+- temperature = <95000>;
+- hysteresis = <2000>;
+- type = "critical";
+- };
+- };
+-
+- cooling-maps {
+- map0 {
+- trip = <&cpu_alert>;
+- cooling-device =
+- <&cpu0 THERMAL_NO_LIMIT
+- THERMAL_NO_LIMIT>;
+- };
+- };
+- };
+- };
++ #include "fsl-tmu.dtsi"
+
+ qman: qman@1880000 {
+ compatible = "fsl,qman";
+@@ -422,7 +423,7 @@
};
i2c0: i2c@2180000 {
#address-cells = <1>;
#size-cells = <0>;
reg = <0x0 0x2180000 0x0 0x10000>;
-@@ -432,6 +416,7 @@
+@@ -432,6 +433,7 @@
dmas = <&edma0 1 39>,
<&edma0 1 38>;
dma-names = "tx", "rx";
-+ fsl-scl-gpio = <&gpio4 12 0>;
++ scl-gpios = <&gpio4 12 0>;
status = "disabled";
};
-@@ -536,6 +521,72 @@
+@@ -536,6 +538,72 @@
#interrupt-cells = <2>;
};
lpuart0: serial@2950000 {
compatible = "fsl,ls1021a-lpuart";
reg = <0x0 0x2950000 0x0 0x1000>;
-@@ -590,6 +641,16 @@
+@@ -590,6 +658,16 @@
status = "disabled";
};
wdog0: wdog@2ad0000 {
compatible = "fsl,ls1043a-wdt", "fsl,imx21-wdt";
reg = <0x0 0x2ad0000 0x0 0x10000>;
-@@ -622,6 +683,9 @@
- dr_mode = "host";
- snps,quirk-frame-length-adjustment = <0x20>;
- snps,dis_rxdet_inp3_quirk;
-+ usb3-lpm-capable;
-+ snps,dis-u1u2-when-u3-quirk;
-+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
- };
-
- usb1: usb3@3000000 {
-@@ -631,6 +695,9 @@
- dr_mode = "host";
- snps,quirk-frame-length-adjustment = <0x20>;
- snps,dis_rxdet_inp3_quirk;
-+ usb3-lpm-capable;
-+ snps,dis-u1u2-when-u3-quirk;
-+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
- };
-
- usb2: usb3@3100000 {
-@@ -640,6 +707,9 @@
- dr_mode = "host";
- snps,quirk-frame-length-adjustment = <0x20>;
- snps,dis_rxdet_inp3_quirk;
-+ usb3-lpm-capable;
-+ snps,dis-u1u2-when-u3-quirk;
-+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
- };
-
- sata: sata@3200000 {
-@@ -652,6 +722,27 @@
- dma-coherent;
+@@ -615,41 +693,81 @@
+ <&clockgen 4 0>;
};
+- usb0: usb3@2f00000 {
+- compatible = "snps,dwc3";
+- reg = <0x0 0x2f00000 0x0 0x10000>;
+- interrupts = <0 60 0x4>;
+- dr_mode = "host";
+- snps,quirk-frame-length-adjustment = <0x20>;
+- snps,dis_rxdet_inp3_quirk;
+- };
+-
+- usb1: usb3@3000000 {
+- compatible = "snps,dwc3";
+- reg = <0x0 0x3000000 0x0 0x10000>;
+- interrupts = <0 61 0x4>;
+- dr_mode = "host";
+- snps,quirk-frame-length-adjustment = <0x20>;
+- snps,dis_rxdet_inp3_quirk;
+- };
+-
+- usb2: usb3@3100000 {
+- compatible = "snps,dwc3";
+- reg = <0x0 0x3100000 0x0 0x10000>;
+- interrupts = <0 63 0x4>;
+- dr_mode = "host";
+- snps,quirk-frame-length-adjustment = <0x20>;
+- snps,dis_rxdet_inp3_quirk;
+- };
+-
+- sata: sata@3200000 {
+- compatible = "fsl,ls1043a-ahci";
+- reg = <0x0 0x3200000 0x0 0x10000>,
+- <0x0 0x20140520 0x0 0x4>;
+- reg-names = "ahci", "sata-ecc";
+- interrupts = <0 69 0x4>;
+- clocks = <&clockgen 4 0>;
+- dma-coherent;
++ aux_bus: aux_bus {
++ #address-cells = <2>;
++ #size-cells = <2>;
++ compatible = "simple-bus";
++ ranges;
++ dma-ranges = <0x0 0x0 0x0 0x0 0x100 0x00000000>;
++
++ usb0: usb3@2f00000 {
++ compatible = "snps,dwc3";
++ reg = <0x0 0x2f00000 0x0 0x10000>;
++ interrupts = <0 60 0x4>;
++ dr_mode = "host";
++ snps,quirk-frame-length-adjustment = <0x20>;
++ snps,dis_rxdet_inp3_quirk;
++ usb3-lpm-capable;
++ snps,dis-u1u2-when-u3-quirk;
++ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
++ snps,host-vbus-glitches;
++ };
++
++ usb1: usb3@3000000 {
++ compatible = "snps,dwc3";
++ reg = <0x0 0x3000000 0x0 0x10000>;
++ interrupts = <0 61 0x4>;
++ dr_mode = "host";
++ snps,quirk-frame-length-adjustment = <0x20>;
++ snps,dis_rxdet_inp3_quirk;
++ usb3-lpm-capable;
++ snps,dis-u1u2-when-u3-quirk;
++ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
++ snps,host-vbus-glitches;
++ };
++
++ usb2: usb3@3100000 {
++ compatible = "snps,dwc3";
++ reg = <0x0 0x3100000 0x0 0x10000>;
++ interrupts = <0 63 0x4>;
++ dr_mode = "host";
++ snps,quirk-frame-length-adjustment = <0x20>;
++ snps,dis_rxdet_inp3_quirk;
++ usb3-lpm-capable;
++ snps,dis-u1u2-when-u3-quirk;
++ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
++ snps,host-vbus-glitches;
++ };
++
++ sata: sata@3200000 {
++ compatible = "fsl,ls1043a-ahci";
++ reg = <0x0 0x3200000 0x0 0x10000>,
++ <0x0 0x20140520 0x0 0x4>;
++ reg-names = "ahci", "sata-ecc";
++ interrupts = <0 69 0x4>;
++ clocks = <&clockgen 4 0>;
++ };
++ };
++
+ qdma: qdma@8380000 {
+ compatible = "fsl,ls1021a-qdma", "fsl,ls1043a-qdma";
+ reg = <0x0 0x8380000 0x0 0x1000>, /* Controller regs */
+ status-sizes = <64>;
+ queue-sizes = <64 64>;
+ big-endian;
-+ };
-+
+ };
+
msi1: msi-controller1@1571000 {
- compatible = "fsl,ls1043a-msi";
- reg = <0x0 0x1571000 0x0 0x8>;
-@@ -678,9 +769,9 @@
+@@ -678,13 +796,13 @@
reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
0x40 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "regs", "config";
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
-@@ -703,9 +794,9 @@
+- dma-coherent;
++ iommu-map = <0 &smmu 0 1>;
+ num-lanes = <4>;
+ bus-range = <0x0 0xff>;
+ ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */
+@@ -696,6 +814,7 @@
+ <0000 0 0 2 &gic 0 111 0x4>,
+ <0000 0 0 3 &gic 0 112 0x4>,
+ <0000 0 0 4 &gic 0 113 0x4>;
++ status = "disabled";
+ };
+
+ pcie@3500000 {
+@@ -703,13 +822,13 @@
reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */
0x48 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "regs", "config";
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
-@@ -728,9 +819,9 @@
+- dma-coherent;
++ iommu-map = <0 &smmu 0 1>;
+ num-lanes = <2>;
+ bus-range = <0x0 0xff>;
+ ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */
+@@ -721,6 +840,7 @@
+ <0000 0 0 2 &gic 0 121 0x4>,
+ <0000 0 0 3 &gic 0 122 0x4>,
+ <0000 0 0 4 &gic 0 123 0x4>;
++ status = "disabled";
+ };
+
+ pcie@3600000 {
+@@ -728,13 +848,13 @@
reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */
0x50 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "regs", "config";
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
-@@ -749,6 +840,13 @@
- };
- };
-
+- dma-coherent;
++ iommu-map = <0 &smmu 0 1>;
+ num-lanes = <2>;
+ bus-range = <0x0 0xff>;
+ ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */
+@@ -746,6 +866,14 @@
+ <0000 0 0 2 &gic 0 155 0x4>,
+ <0000 0 0 3 &gic 0 156 0x4>,
+ <0000 0 0 4 &gic 0 157 0x4>;
++ status = "disabled";
++ };
++ };
++
+ firmware {
+ optee {
+ compatible = "linaro,optee-tz";
+ method = "smc";
-+ };
-+ };
-+
- };
+ };
+ };
+
+@@ -753,3 +881,29 @@
#include "qoriq-qman-portals.dtsi"
+ #include "qoriq-bman-portals.dtsi"
++
++&thermal_zones {
++ thermal-zone0 {
++ status = "okay";
++ };
++
++ thermal-zone1 {
++ status = "okay";
++ };
++
++ thermal-zone2 {
++ status = "okay";
++ };
++
++ thermal-zone3 {
++ status = "okay";
++ };
++
++ thermal-zone4 {
++ status = "okay";
++ };
++
++ thermal-zone5 {
++ status = "okay";
++ };
++};
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi
@@ -1,9 +1,9 @@
&soc {
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds-sdk.dts
-@@ -0,0 +1,79 @@
+@@ -0,0 +1,268 @@
+/*
+ * Device Tree Include file for Freescale Layerscape-1046A family SoC.
+ *
+};
+
+&soc {
++/delete-property/ dma-coherent;
++
+#include "qoriq-dpaa-eth.dtsi"
+#include "qoriq-fman3-0-6oh.dtsi"
++
++pcie@3400000 {
++ /delete-property/ iommu-map;
++};
++
++pcie@3500000 {
++ /delete-property/ iommu-map;
++};
++
++pcie@3600000 {
++ /delete-property/ iommu-map;
++};
++
++/delete-node/ iommu@9000000;
+};
+
+&fsldpaa {
+
+&fman0 {
+ compatible = "fsl,fman", "simple-bus";
++ dma-coherent;
++};
++
++&clockgen {
++ dma-coherent;
++};
++
++&scfg {
++ dma-coherent;
++};
++
++&crypto {
++ dma-coherent;
++};
++
++&dcfg {
++ dma-coherent;
++};
++
++&ifc {
++ dma-coherent;
++};
++
++&qspi {
++ dma-coherent;
++};
++
++&esdhc {
++ dma-coherent;
++};
++
++&ddr {
++ dma-coherent;
++};
++
++&tmu {
++ dma-coherent;
++};
++
++&qman {
++ dma-coherent;
++};
++
++&bman {
++ dma-coherent;
++};
++
++&bportals {
++ dma-coherent;
++};
++
++&qportals {
++ dma-coherent;
++};
++
++&dspi {
++ dma-coherent;
++};
++
++&i2c0 {
++ dma-coherent;
++};
++
++&i2c1 {
++ dma-coherent;
++};
++
++&i2c2 {
++ dma-coherent;
++};
++
++&i2c3 {
++ dma-coherent;
++};
++
++&duart0 {
++ dma-coherent;
++};
++
++&duart1 {
++ dma-coherent;
++};
++
++&duart2 {
++ dma-coherent;
++};
++
++&duart3 {
++ dma-coherent;
++};
++
++&gpio0 {
++ dma-coherent;
++};
++
++&gpio1 {
++ dma-coherent;
++};
++
++&gpio2 {
++ dma-coherent;
++};
++
++&gpio3 {
++ dma-coherent;
++};
++
++&lpuart0 {
++ dma-coherent;
++};
++
++&lpuart1 {
++ dma-coherent;
++};
++
++&lpuart2 {
++ dma-coherent;
++};
++
++&lpuart3 {
++ dma-coherent;
++};
++
++&lpuart4 {
++ dma-coherent;
++};
++
++&lpuart5 {
++ dma-coherent;
++};
++
++&ftm0 {
++ dma-coherent;
++};
++
++&wdog0 {
++ dma-coherent;
++};
++
++&edma0 {
++ dma-coherent;
++};
++
++&sata {
++ dma-coherent;
++};
++
++&qdma {
++ dma-coherent;
++};
++
++&msi1 {
++ dma-coherent;
++};
++
++&msi2 {
++ dma-coherent;
++};
++
++&msi3 {
++ dma-coherent;
++};
++
++&ptp_timer0 {
++ dma-coherent;
++};
++
++&serdes1 {
++ dma-coherent;
++};
++
++&fsldpaa {
++ dma-coherent;
+};
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts
};
chosen {
-@@ -208,7 +185,143 @@
+@@ -188,8 +165,9 @@
+ };
+
+ fpga: board-control@2,0 {
+- compatible = "fsl,ls1046aqds-fpga", "fsl,fpga-qixis";
++ compatible = "fsl,ls1046aqds-fpga", "fsl,fpga-qixis", "simple-bus";
+ reg = <0x2 0x0 0x0000100>;
++ ranges = <0 2 0 0x100>;
+ };
+ };
+
+@@ -206,9 +184,145 @@
+ compatible = "spansion,m25p80";
+ #address-cells = <1>;
#size-cells = <1>;
- spi-max-frequency = <20000000>;
+- spi-max-frequency = <20000000>;
++ spi-max-frequency = <50000000>;
reg = <0>;
+ spi-rx-bus-width = <4>;
+ spi-tx-bus-width = <4>;
+};
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb-sdk.dts
-@@ -0,0 +1,115 @@
+@@ -0,0 +1,307 @@
+/*
+ * Device Tree Include file for Freescale Layerscape-1046A family SoC.
+ *
+};
+
+&soc {
++/delete-property/ dma-coherent;
++
+#include "qoriq-dpaa-eth.dtsi"
+#include "qoriq-fman3-0-6oh.dtsi"
++
++pcie@3400000 {
++ /delete-property/ iommu-map;
++};
++
++pcie@3500000 {
++ /delete-property/ iommu-map;
++};
++
++pcie@3600000 {
++ /delete-property/ iommu-map;
++};
++
++/delete-node/ iommu@9000000;
+};
+
+&fsldpaa {
+ * phy-handle = <&pcsphy7>;
+ *};
+*/
---- /dev/null
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb-usdpaa.dts
-@@ -0,0 +1,110 @@
-+/*
-+ * Device Tree Include file for Freescale Layerscape-1046A family SoC.
-+ *
-+ * Copyright (C) 2016, Freescale Semiconductor
-+ *
-+ * This file is licensed under the terms of the GNU General Public
-+ * License version 2. This program is licensed "as is" without any
-+ * warranty of any kind, whether express or implied.
-+ */
+
-+#include "fsl-ls1046a-rdb-sdk.dts"
++&clockgen {
++ dma-coherent;
++};
+
-+&soc {
-+ bp7: buffer-pool@7 {
-+ compatible = "fsl,ls1046a-bpool", "fsl,bpool";
-+ fsl,bpid = <7>;
-+ fsl,bpool-ethernet-cfg = <0 0 0 192 0 0xdeadbeef>;
-+ fsl,bpool-thresholds = <0x400 0xc00 0x0 0x0>;
-+ };
++&scfg {
++ dma-coherent;
++};
+
-+ bp8: buffer-pool@8 {
-+ compatible = "fsl,ls1046a-bpool", "fsl,bpool";
-+ fsl,bpid = <8>;
-+ fsl,bpool-ethernet-cfg = <0 0 0 576 0 0xabbaf00d>;
-+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
-+ };
++&crypto {
++ dma-coherent;
++};
+
-+ bp9: buffer-pool@9 {
-+ compatible = "fsl,ls1046a-bpool", "fsl,bpool";
-+ fsl,bpid = <9>;
-+ fsl,bpool-ethernet-cfg = <0 0 0 2048 0 0xfeedabba>;
-+ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
-+ };
++&dcfg {
++ dma-coherent;
++};
+
-+ fsl,dpaa {
-+ compatible = "fsl,ls1046a", "fsl,dpaa", "simple-bus";
++&ifc {
++ dma-coherent;
++};
+
-+ ethernet@2 {
-+ compatible = "fsl,dpa-ethernet-init";
-+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
-+ fsl,qman-frame-queues-rx = <0x54 1 0x55 1>;
-+ fsl,qman-frame-queues-tx = <0x74 1 0x75 1>;
-+ };
++&qspi {
++ dma-coherent;
++};
+
-+ ethernet@3 {
-+ compatible = "fsl,dpa-ethernet-init";
-+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
-+ fsl,qman-frame-queues-rx = <0x56 1 0x57 1>;
-+ fsl,qman-frame-queues-tx = <0x76 1 0x77 1>;
-+ };
++&esdhc {
++ dma-coherent;
++};
+
-+ ethernet@4 {
-+ compatible = "fsl,dpa-ethernet-init";
-+ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
-+ fsl,qman-frame-queues-rx = <0x58 1 0x59 1>;
-+ fsl,qman-frame-queues-tx = <0x78 1 0x79 1>;
++&ddr {
++ dma-coherent;
++};
++
++&tmu {
++ dma-coherent;
++};
++
++&qman {
++ dma-coherent;
++};
++
++&bman {
++ dma-coherent;
++};
++
++&bportals {
++ dma-coherent;
++};
++
++&qportals {
++ dma-coherent;
++};
++
++&dspi {
++ dma-coherent;
++};
++
++&i2c0 {
++ dma-coherent;
++};
++
++&i2c1 {
++ dma-coherent;
++};
++
++&i2c2 {
++ dma-coherent;
++};
++
++&i2c3 {
++ dma-coherent;
++};
++
++&duart0 {
++ dma-coherent;
++};
++
++&duart1 {
++ dma-coherent;
++};
++
++&duart2 {
++ dma-coherent;
++};
++
++&duart3 {
++ dma-coherent;
++};
++
++&gpio0 {
++ dma-coherent;
++};
++
++&gpio1 {
++ dma-coherent;
++};
++
++&gpio2 {
++ dma-coherent;
++};
++
++&gpio3 {
++ dma-coherent;
++};
++
++&lpuart0 {
++ dma-coherent;
++};
++
++&lpuart1 {
++ dma-coherent;
++};
++
++&lpuart2 {
++ dma-coherent;
++};
++
++&lpuart3 {
++ dma-coherent;
++};
++
++&lpuart4 {
++ dma-coherent;
++};
++
++&lpuart5 {
++ dma-coherent;
++};
++
++&ftm0 {
++ dma-coherent;
++};
++
++&wdog0 {
++ dma-coherent;
++};
++
++&edma0 {
++ dma-coherent;
++};
++
++&sata {
++ dma-coherent;
++};
++
++&qdma {
++ dma-coherent;
++};
++
++&msi1 {
++ dma-coherent;
++};
++
++&msi2 {
++ dma-coherent;
++};
++
++&msi3 {
++ dma-coherent;
++};
++
++&fman0 {
++ dma-coherent;
++};
++
++&ptp_timer0 {
++ dma-coherent;
++};
++
++&serdes1 {
++ dma-coherent;
++};
++
++&fsldpaa {
++ dma-coherent;
++};
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb-usdpaa.dts
+@@ -0,0 +1,133 @@
++/*
++ * Device Tree Include file for Freescale Layerscape-1046A family SoC.
++ *
++ * Copyright (C) 2016, Freescale Semiconductor
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2. This program is licensed "as is" without any
++ * warranty of any kind, whether express or implied.
++ */
++
++#include "fsl-ls1046a-rdb-sdk.dts"
++
++&soc {
++ bp7: buffer-pool@7 {
++ compatible = "fsl,ls1046a-bpool", "fsl,bpool";
++ fsl,bpid = <7>;
++ fsl,bpool-ethernet-cfg = <0 0 0 192 0 0xdeadbeef>;
++ fsl,bpool-thresholds = <0x400 0xc00 0x0 0x0>;
++ dma-coherent;
++ };
++
++ bp8: buffer-pool@8 {
++ compatible = "fsl,ls1046a-bpool", "fsl,bpool";
++ fsl,bpid = <8>;
++ fsl,bpool-ethernet-cfg = <0 0 0 576 0 0xabbaf00d>;
++ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
++ dma-coherent;
++ };
++
++ bp9: buffer-pool@9 {
++ compatible = "fsl,ls1046a-bpool", "fsl,bpool";
++ fsl,bpid = <9>;
++ fsl,bpool-ethernet-cfg = <0 0 0 2048 0 0xfeedabba>;
++ fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>;
++ dma-coherent;
++ };
++
++ fsl,dpaa {
++ compatible = "fsl,ls1046a", "fsl,dpaa", "simple-bus";
++ dma-coherent;
++
++ ethernet@2 {
++ compatible = "fsl,dpa-ethernet-init";
++ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
++ fsl,qman-frame-queues-rx = <0x54 1 0x55 1>;
++ fsl,qman-frame-queues-tx = <0x74 1 0x75 1>;
++ };
++
++ ethernet@3 {
++ compatible = "fsl,dpa-ethernet-init";
++ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
++ fsl,qman-frame-queues-rx = <0x56 1 0x57 1>;
++ fsl,qman-frame-queues-tx = <0x76 1 0x77 1>;
++ };
++
++ ethernet@4 {
++ compatible = "fsl,dpa-ethernet-init";
++ fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>;
++ fsl,qman-frame-queues-rx = <0x58 1 0x59 1>;
++ fsl,qman-frame-queues-tx = <0x78 1 0x79 1>;
+ };
+
+ ethernet@5 {
+ fsl,fman-oh-port = <&fman0_oh2>;
+ };
+ };
++
++ pcie@3400000 {
++ /delete-property/ iommu-map;
++ };
++
++ pcie@3500000 {
++ /delete-property/ iommu-map;
++ };
++
++ pcie@3600000 {
++ /delete-property/ iommu-map;
++ };
++
++ /delete-node/ iommu@9000000;
+};
+/ {
+ reserved-memory {
+ #size-cells = <2>;
+ ranges;
+
++ /* For legacy usdpaa based use-cases, update the size and
++ alignment parameters. e.g. to allocate 256 MB memory:
++ size = <0 0x10000000>;
++ alignment = <0 0x10000000>;
++ */
+ usdpaa_mem: usdpaa_mem {
+ compatible = "fsl,usdpaa-mem";
+ alloc-ranges = <0 0 0x10000 0>;
-+ size = <0 0x10000000>;
-+ alignment = <0 0x10000000>;
++ size = <0 0x1000>;
++ alignment = <0 0x1000>;
+ };
+ };
+};
*/
/dts-v1/;
-@@ -139,6 +102,7 @@
+@@ -139,21 +102,26 @@
num-cs = <2>;
bus-num = <0>;
status = "okay";
qflash0: s25fs512s@0 {
compatible = "spansion,m25p80";
-@@ -146,6 +110,8 @@
+ #address-cells = <1>;
#size-cells = <1>;
- spi-max-frequency = <20000000>;
+- spi-max-frequency = <20000000>;
++ spi-max-frequency = <50000000>;
reg = <0>;
+ spi-rx-bus-width = <4>;
+ spi-tx-bus-width = <4>;
};
qflash1: s25fs512s@1 {
-@@ -154,6 +120,8 @@
+ compatible = "spansion,m25p80";
+ #address-cells = <1>;
#size-cells = <1>;
- spi-max-frequency = <20000000>;
+- spi-max-frequency = <20000000>;
++ spi-max-frequency = <50000000>;
reg = <1>;
+ spi-rx-bus-width = <4>;
+ spi-tx-bus-width = <4>;
*/
#include <dt-bindings/interrupt-controller/arm-gic.h>
+@@ -70,7 +33,7 @@
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+- cpu0: cpu@0 {
++ cooling_map0: cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a72";
+ reg = <0x0>;
@@ -122,7 +85,7 @@
CPU_PH20: cpu-ph20 {
compatible = "arm,idle-state";
entry-latency-us = <1000>;
exit-latency-us = <1000>;
min-residency-us = <3000>;
-@@ -214,7 +177,6 @@
+@@ -188,6 +151,8 @@
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
++ dma-ranges = <0x0 0x0 0x0 0x0 0x10000 0x00000000>;
++ dma-coherent;
+
+ ddr: memory-controller@1080000 {
+ compatible = "fsl,qoriq-memory-controller";
+@@ -214,7 +179,6 @@
clock-names = "qspi_en", "qspi";
clocks = <&clockgen 4 1>, <&clockgen 4 1>;
big-endian;
status = "disabled";
};
-@@ -304,7 +266,7 @@
+@@ -229,6 +193,49 @@
+ bus-width = <4>;
+ };
+
++ smmu: iommu@9000000 {
++ compatible = "arm,mmu-500";
++ reg = <0 0x9000000 0 0x400000>;
++ dma-coherent;
++ stream-match-mask = <0x7f00>;
++ #global-interrupts = <2>;
++ #iommu-cells = <1>;
++ interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
+ scfg: scfg@1570000 {
+ compatible = "fsl,ls1046a-scfg", "syscon";
+ reg = <0x0 0x1570000 0x0 0x10000>;
+@@ -304,7 +311,7 @@
dcfg: dcfg@1ee0000 {
compatible = "fsl,ls1046a-dcfg", "syscon";
big-endian;
};
-@@ -407,7 +369,7 @@
+@@ -362,36 +369,7 @@
+ #thermal-sensor-cells = <1>;
+ };
+
+- thermal-zones {
+- cpu_thermal: cpu-thermal {
+- polling-delay-passive = <1000>;
+- polling-delay = <5000>;
+- thermal-sensors = <&tmu 3>;
+-
+- trips {
+- cpu_alert: cpu-alert {
+- temperature = <85000>;
+- hysteresis = <2000>;
+- type = "passive";
+- };
+-
+- cpu_crit: cpu-crit {
+- temperature = <95000>;
+- hysteresis = <2000>;
+- type = "critical";
+- };
+- };
+-
+- cooling-maps {
+- map0 {
+- trip = <&cpu_alert>;
+- cooling-device =
+- <&cpu0 THERMAL_NO_LIMIT
+- THERMAL_NO_LIMIT>;
+- };
+- };
+- };
+- };
++ #include "fsl-tmu.dtsi"
+
+ dspi: dspi@2100000 {
+ compatible = "fsl,ls1021a-v1.0-dspi";
+@@ -407,7 +385,7 @@
};
i2c0: i2c@2180000 {
#address-cells = <1>;
#size-cells = <0>;
reg = <0x0 0x2180000 0x0 0x10000>;
-@@ -416,6 +378,7 @@
+@@ -416,6 +394,7 @@
dmas = <&edma0 1 39>,
<&edma0 1 38>;
dma-names = "tx", "rx";
-+ fsl-scl-gpio = <&gpio3 12 0>;
++ scl-gpios = <&gpio3 12 0>;
status = "disabled";
};
-@@ -440,12 +403,13 @@
+@@ -440,12 +419,13 @@
};
i2c3: i2c@21b0000 {
reg = <0x0 0x21b0000 0x0 0x10000>;
interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen 4 1>;
-+ fsl-scl-gpio = <&gpio3 12 0>;
++ scl-gpios = <&gpio3 12 0>;
status = "disabled";
};
-@@ -571,6 +535,15 @@
+@@ -571,6 +551,15 @@
status = "disabled";
};
wdog0: watchdog@2ad0000 {
compatible = "fsl,imx21-wdt";
reg = <0x0 0x2ad0000 0x0 0x10000>;
-@@ -602,6 +575,9 @@
- dr_mode = "host";
- snps,quirk-frame-length-adjustment = <0x20>;
- snps,dis_rxdet_inp3_quirk;
-+ usb3-lpm-capable;
-+ snps,dis-u1u2-when-u3-quirk;
-+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
- };
-
- usb1: usb@3000000 {
-@@ -611,6 +587,9 @@
- dr_mode = "host";
- snps,quirk-frame-length-adjustment = <0x20>;
- snps,dis_rxdet_inp3_quirk;
-+ usb3-lpm-capable;
-+ snps,dis-u1u2-when-u3-quirk;
-+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
- };
-
- usb2: usb@3100000 {
-@@ -620,6 +599,9 @@
- dr_mode = "host";
- snps,quirk-frame-length-adjustment = <0x20>;
- snps,dis_rxdet_inp3_quirk;
-+ usb3-lpm-capable;
-+ snps,dis-u1u2-when-u3-quirk;
-+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
- };
-
- sata: sata@3200000 {
-@@ -631,6 +613,27 @@
- clocks = <&clockgen 4 1>;
+@@ -595,40 +584,81 @@
+ <&clockgen 4 1>;
};
+- usb0: usb@2f00000 {
+- compatible = "snps,dwc3";
+- reg = <0x0 0x2f00000 0x0 0x10000>;
+- interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
+- dr_mode = "host";
+- snps,quirk-frame-length-adjustment = <0x20>;
+- snps,dis_rxdet_inp3_quirk;
+- };
+-
+- usb1: usb@3000000 {
+- compatible = "snps,dwc3";
+- reg = <0x0 0x3000000 0x0 0x10000>;
+- interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
+- dr_mode = "host";
+- snps,quirk-frame-length-adjustment = <0x20>;
+- snps,dis_rxdet_inp3_quirk;
+- };
+-
+- usb2: usb@3100000 {
+- compatible = "snps,dwc3";
+- reg = <0x0 0x3100000 0x0 0x10000>;
+- interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
+- dr_mode = "host";
+- snps,quirk-frame-length-adjustment = <0x20>;
+- snps,dis_rxdet_inp3_quirk;
+- };
+-
+- sata: sata@3200000 {
+- compatible = "fsl,ls1046a-ahci";
+- reg = <0x0 0x3200000 0x0 0x10000>,
+- <0x0 0x20140520 0x0 0x4>;
+- reg-names = "ahci", "sata-ecc";
+- interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
+- clocks = <&clockgen 4 1>;
++ aux_bus: aux_bus {
++ #address-cells = <2>;
++ #size-cells = <2>;
++ compatible = "simple-bus";
++ ranges;
++ dma-ranges = <0x0 0x0 0x0 0x0 0x100 0x00000000>;
++
++ usb0: usb@2f00000 {
++ compatible = "snps,dwc3";
++ reg = <0x0 0x2f00000 0x0 0x10000>;
++ interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
++ dr_mode = "host";
++ snps,quirk-frame-length-adjustment = <0x20>;
++ snps,dis_rxdet_inp3_quirk;
++ usb3-lpm-capable;
++ snps,dis-u1u2-when-u3-quirk;
++ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
++ snps,host-vbus-glitches;
++ };
++
++ usb1: usb@3000000 {
++ compatible = "snps,dwc3";
++ reg = <0x0 0x3000000 0x0 0x10000>;
++ interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
++ dr_mode = "host";
++ snps,quirk-frame-length-adjustment = <0x20>;
++ snps,dis_rxdet_inp3_quirk;
++ usb3-lpm-capable;
++ snps,dis-u1u2-when-u3-quirk;
++ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
++ snps,host-vbus-glitches;
++ };
++
++ usb2: usb@3100000 {
++ compatible = "snps,dwc3";
++ reg = <0x0 0x3100000 0x0 0x10000>;
++ interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
++ dr_mode = "host";
++ snps,quirk-frame-length-adjustment = <0x20>;
++ snps,dis_rxdet_inp3_quirk;
++ usb3-lpm-capable;
++ snps,dis-u1u2-when-u3-quirk;
++ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
++ snps,host-vbus-glitches;
++ };
++
++ sata: sata@3200000 {
++ compatible = "fsl,ls1046a-ahci";
++ reg = <0x0 0x3200000 0x0 0x10000>,
++ <0x0 0x20140520 0x0 0x4>;
++ reg-names = "ahci", "sata-ecc";
++ interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 1>;
++ };
++ };
++
+ qdma: qdma@8380000 {
+ compatible = "fsl,ls1046a-qdma", "fsl,ls1021a-qdma";
+ reg = <0x0 0x8380000 0x0 0x1000>, /* Controller regs */
+ status-sizes = <64>;
+ queue-sizes = <64 64>;
+ big-endian;
-+ };
-+
+ };
+
msi1: msi-controller@1580000 {
- compatible = "fsl,ls1046a-msi";
- msi-controller;
-@@ -661,6 +664,92 @@
+@@ -661,6 +691,125 @@
<GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
};
+ #size-cells = <2>;
+ device_type = "pci";
+ dma-coherent;
++ iommu-map = <0 &smmu 0 1>;
+ num-lanes = <4>;
-+ num-ib-windows = <6>;
-+ num-ob-windows = <6>;
+ bus-range = <0x0 0xff>;
+ ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */
+ 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
+ <0000 0 0 2 &gic GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 3 &gic GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 4 &gic GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
++ status = "disabled";
++ };
++
++ pcie_ep@3400000 {
++ compatible = "fsl,ls1046a-pcie-ep","fsl,ls-pcie-ep";
++ reg = <0x00 0x03400000 0x0 0x00100000
++ 0x40 0x00000000 0x8 0x00000000>;
++ reg-names = "regs", "addr_space";
++ num-ib-windows = <6>;
++ num-ob-windows = <8>;
++ num-lanes = <2>;
++ status = "disabled";
+ };
+
+ pcie@3500000 {
+ #size-cells = <2>;
+ device_type = "pci";
+ dma-coherent;
++ iommu-map = <0 &smmu 0 1>;
+ num-lanes = <2>;
-+ num-ib-windows = <6>;
-+ num-ob-windows = <6>;
+ bus-range = <0x0 0xff>;
+ ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */
+ 0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
+ <0000 0 0 2 &gic GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 3 &gic GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 4 &gic GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
++ status = "disabled";
++ };
++
++ pcie_ep@3500000 {
++ compatible = "fsl,ls1046a-pcie-ep","fsl,ls-pcie-ep";
++ reg = <0x00 0x03500000 0x0 0x00100000
++ 0x48 0x00000000 0x8 0x00000000>;
++ reg-names = "regs", "addr_space";
++ num-ib-windows = <6>;
++ num-ob-windows = <8>;
++ num-lanes = <2>;
++ status = "disabled";
+ };
+
+ pcie@3600000 {
+ #size-cells = <2>;
+ device_type = "pci";
+ dma-coherent;
++ iommu-map = <0 &smmu 0 1>;
+ num-lanes = <2>;
-+ num-ib-windows = <6>;
-+ num-ob-windows = <6>;
+ bus-range = <0x0 0xff>;
+ ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */
+ 0x82000000 0x0 0x40000000 0x50 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
+ <0000 0 0 2 &gic GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 3 &gic GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 4 &gic GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>;
++ status = "disabled";
++ };
++
++ pcie_ep@3600000 {
++ compatible = "fsl,ls1046a-pcie-ep", "fsl,ls-pcie-ep";
++ reg = <0x00 0x03600000 0x0 0x00100000
++ 0x50 0x00000000 0x8 0x00000000>;
++ reg-names = "regs", "addr_space";
++ num-ib-windows = <6>;
++ num-ob-windows = <8>;
++ num-lanes = <2>;
++ status = "disabled";
+ };
+
+ serdes1: serdes@1ea0000 {
};
reserved-memory {
-@@ -689,6 +778,13 @@
+@@ -689,7 +838,36 @@
no-map;
};
};
};
#include "qoriq-qman-portals.dtsi"
+ #include "qoriq-bman-portals.dtsi"
++
++&thermal_zones {
++ thermal-zone0 {
++ status = "okay";
++ };
++
++ thermal-zone1 {
++ status = "okay";
++ };
++
++ thermal-zone2 {
++ status = "okay";
++ };
++
++ thermal-zone3 {
++ status = "okay";
++ };
++
++ thermal-zone4 {
++ status = "okay";
++ };
++};
--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts
@@ -1,3 +1,4 @@
*/
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/thermal/thermal.h>
-@@ -130,7 +94,7 @@
- CPU_PH20: cpu-ph20 {
+@@ -61,7 +25,7 @@
+ #size-cells = <0>;
+
+ /* We have 2 clusters having 4 Cortex-A53 cores each */
+- cpu0: cpu@0 {
++ cooling_map0: cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x0>;
+@@ -94,7 +58,7 @@
+ cpu-idle-states = <&CPU_PH20>;
+ };
+
+- cpu4: cpu@100 {
++ cooling_map1: cpu4: cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x100>;
+@@ -130,7 +94,7 @@
+ CPU_PH20: cpu-ph20 {
compatible = "arm,idle-state";
idle-state-name = "PH20";
- arm,psci-suspend-param = <0x00010000>;
clockgen: clocking@1300000 {
compatible = "fsl,ls1088a-clockgen";
-@@ -283,6 +276,62 @@
+@@ -229,43 +222,7 @@
+ #thermal-sensor-cells = <1>;
+ };
+
+- thermal-zones {
+- cpu_thermal: cpu-thermal {
+- polling-delay-passive = <1000>;
+- polling-delay = <5000>;
+- thermal-sensors = <&tmu 0>;
+-
+- trips {
+- cpu_alert: cpu-alert {
+- temperature = <85000>;
+- hysteresis = <2000>;
+- type = "passive";
+- };
+-
+- cpu_crit: cpu-crit {
+- temperature = <95000>;
+- hysteresis = <2000>;
+- type = "critical";
+- };
+- };
+-
+- cooling-maps {
+- map0 {
+- trip = <&cpu_alert>;
+- cooling-device =
+- <&cpu0 THERMAL_NO_LIMIT
+- THERMAL_NO_LIMIT>;
+- };
+-
+- map1 {
+- trip = <&cpu_alert>;
+- cooling-device =
+- <&cpu4 THERMAL_NO_LIMIT
+- THERMAL_NO_LIMIT>;
+- };
+- };
+- };
+- };
++ #include "fsl-tmu.dtsi"
+
+ duart0: serial@21c0500 {
+ compatible = "fsl,ns16550", "ns16550a";
+@@ -283,6 +240,62 @@
status = "disabled";
};
gpio0: gpio@2300000 {
compatible = "fsl,qoriq-gpio";
reg = <0x0 0x2300000 0x0 0x10000>;
-@@ -323,6 +372,72 @@
+@@ -323,6 +336,72 @@
#interrupt-cells = <2>;
};
ifc: ifc@2240000 {
compatible = "fsl,ifc", "simple-bus";
reg = <0x0 0x2240000 0x0 0x20000>;
-@@ -333,13 +448,22 @@
+@@ -333,13 +412,22 @@
status = "disabled";
};
interrupts = <0 34 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 3>;
+ clocks = <&clockgen 4 7>;
-+ fsl-scl-gpio = <&gpio3 30 0>;
++ scl-gpios = <&gpio3 30 0>;
status = "disabled";
};
-@@ -349,7 +473,7 @@
+@@ -349,7 +437,7 @@
#size-cells = <0>;
reg = <0x0 0x2010000 0x0 0x10000>;
interrupts = <0 34 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
-@@ -359,7 +483,7 @@
+@@ -359,7 +447,7 @@
#size-cells = <0>;
reg = <0x0 0x2020000 0x0 0x10000>;
interrupts = <0 35 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
-@@ -369,7 +493,7 @@
+@@ -369,7 +457,7 @@
#size-cells = <0>;
reg = <0x0 0x2030000 0x0 0x10000>;
interrupts = <0 35 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
-@@ -385,6 +509,26 @@
+@@ -385,6 +473,28 @@
status = "disabled";
};
+ configure-gfladj;
+ snps,dis_rxdet_inp3_quirk;
+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
++ snps,host-vbus-glitches;
+ };
+
+ usb1: usb3@3110000 {
+ configure-gfladj;
+ snps,dis_rxdet_inp3_quirk;
+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
++ snps,host-vbus-glitches;
+ };
+
sata: sata@3200000 {
compatible = "fsl,ls1088a-ahci";
reg = <0x0 0x3200000 0x0 0x10000>,
-@@ -395,6 +539,17 @@
+@@ -395,6 +505,17 @@
dma-coherent;
status = "disabled";
};
crypto: crypto@8000000 {
compatible = "fsl,sec-v5.0", "fsl,sec-v4.0";
-@@ -434,6 +589,251 @@
+@@ -434,6 +555,267 @@
interrupts = <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
};
};
+ <0000 0 0 2 &gic 0 0 0 110 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 3 &gic 0 0 0 111 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 4 &gic 0 0 0 112 IRQ_TYPE_LEVEL_HIGH>;
++ status = "disabled";
+ };
+
+ pcie@3500000 {
+ <0000 0 0 2 &gic 0 0 0 115 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 3 &gic 0 0 0 116 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 4 &gic 0 0 0 117 IRQ_TYPE_LEVEL_HIGH>;
++ status = "disabled";
+ };
+
+ pcie@3600000 {
+ <0000 0 0 2 &gic 0 0 0 120 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 3 &gic 0 0 0 121 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 4 &gic 0 0 0 122 IRQ_TYPE_LEVEL_HIGH>;
++ status = "disabled";
+ };
+
+ fsl_mc: fsl-mc@80c000000 {
+ };
+
+ serdes1: serdes@1ea0000 {
-+ reg = <0x0 0x1ea0000 0 0x00002000>;
+ compatible = "fsl,serdes-10g";
++ reg = <0x0 0x1ea0000 0 0x00002000>;
++ little-endian;
+ };
};
+ compatible = "linaro,optee-tz";
+ method = "smc";
+ };
++ };
++};
++
++#include "fsl-tmu-map1.dtsi"
++
++&thermal_zones {
++ thermal-zone0 {
++ status = "okay";
++ };
++
++ thermal-zone1 {
++ status = "okay";
+ };
};
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts
/*
* Device Tree Include file for Freescale Layerscape-2080A family SoC.
*
-@@ -6,43 +7,6 @@
+@@ -6,49 +7,12 @@
* Abhimanyu Saini <abhimanyu.saini@nxp.com>
* Bhupesh Sharma <bhupesh.sharma@freescale.com>
*
*/
#include "fsl-ls208xa.dtsi"
+
+ &cpu {
+- cpu0: cpu@0 {
++ cooling_map0: cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57";
+ reg = <0x0>;
+@@ -67,7 +31,7 @@
+ next-level-cache = <&cluster0_l2>;
+ };
+
+- cpu2: cpu@100 {
++ cooling_map1: cpu2: cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57";
+ reg = <0x100>;
+@@ -86,7 +50,7 @@
+ next-level-cache = <&cluster1_l2>;
+ };
+
+- cpu4: cpu@200 {
++ cooling_map2: cpu4: cpu@200 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57";
+ reg = <0x200>;
+@@ -105,7 +69,7 @@
+ next-level-cache = <&cluster2_l2>;
+ };
+
+- cpu6: cpu@300 {
++ cooling_map3: cpu6: cpu@300 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57";
+ reg = <0x300>;
@@ -150,6 +114,10 @@
};
};
/*
* Device Tree Include file for Freescale Layerscape-2088A family SoC.
*
-@@ -6,43 +7,6 @@
+@@ -6,49 +7,12 @@
*
* Abhimanyu Saini <abhimanyu.saini@nxp.com>
*
*/
#include "fsl-ls208xa.dtsi"
+
+ &cpu {
+- cpu0: cpu@0 {
++ cooling_map0: cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a72";
+ reg = <0x0>;
+@@ -67,7 +31,7 @@
+ next-level-cache = <&cluster0_l2>;
+ };
+
+- cpu2: cpu@100 {
++ cooling_map1: cpu2: cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a72";
+ reg = <0x100>;
+@@ -86,7 +50,7 @@
+ next-level-cache = <&cluster1_l2>;
+ };
+
+- cpu4: cpu@200 {
++ cooling_map2: cpu4: cpu@200 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a72";
+ reg = <0x200>;
+@@ -105,7 +69,7 @@
+ next-level-cache = <&cluster2_l2>;
+ };
+
+- cpu6: cpu@300 {
++ cooling_map3: cpu6: cpu@300 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a72";
+ reg = <0x300>;
@@ -143,7 +107,7 @@
CPU_PW20: cpu-pw20 {
compatible = "arm,idle-state";
clockgen: clocking@1300000 {
compatible = "fsl,ls2080a-clockgen";
-@@ -357,6 +321,8 @@
+@@ -194,54 +158,7 @@
+ #thermal-sensor-cells = <1>;
+ };
+
+- thermal-zones {
+- cpu_thermal: cpu-thermal {
+- polling-delay-passive = <1000>;
+- polling-delay = <5000>;
+-
+- thermal-sensors = <&tmu 4>;
+-
+- trips {
+- cpu_alert: cpu-alert {
+- temperature = <75000>;
+- hysteresis = <2000>;
+- type = "passive";
+- };
+- cpu_crit: cpu-crit {
+- temperature = <85000>;
+- hysteresis = <2000>;
+- type = "critical";
+- };
+- };
+-
+- cooling-maps {
+- map0 {
+- trip = <&cpu_alert>;
+- cooling-device =
+- <&cpu0 THERMAL_NO_LIMIT
+- THERMAL_NO_LIMIT>;
+- };
+- map1 {
+- trip = <&cpu_alert>;
+- cooling-device =
+- <&cpu2 THERMAL_NO_LIMIT
+- THERMAL_NO_LIMIT>;
+- };
+- map2 {
+- trip = <&cpu_alert>;
+- cooling-device =
+- <&cpu4 THERMAL_NO_LIMIT
+- THERMAL_NO_LIMIT>;
+- };
+- map3 {
+- trip = <&cpu_alert>;
+- cooling-device =
+- <&cpu6 THERMAL_NO_LIMIT
+- THERMAL_NO_LIMIT>;
+- };
+- };
+- };
+- };
++ #include "fsl-tmu.dtsi"
+
+ serial0: serial@21c0500 {
+ compatible = "fsl,ns16550", "ns16550a";
+@@ -357,6 +274,8 @@
reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */
<0x00000000 0x08340000 0 0x40000>; /* MC control reg */
msi-parent = <&its>;
#address-cells = <3>;
#size-cells = <1>;
-@@ -460,6 +426,8 @@
+@@ -460,6 +379,8 @@
compatible = "arm,mmu-500";
reg = <0 0x5000000 0 0x800000>;
#global-interrupts = <12>;
interrupts = <0 13 4>, /* global secure fault */
<0 14 4>, /* combined secure interrupt */
<0 15 4>, /* global non-secure fault */
-@@ -502,7 +470,6 @@
+@@ -502,7 +423,6 @@
<0 204 4>, <0 205 4>,
<0 206 4>, <0 207 4>,
<0 208 4>, <0 209 4>;
};
dspi: dspi@2100000 {
-@@ -574,15 +541,126 @@
+@@ -574,15 +494,126 @@
#interrupt-cells = <2>;
};
clock-names = "i2c";
- clocks = <&clockgen 4 3>;
+ clocks = <&clockgen 4 1>;
-+ fsl-scl-gpio = <&gpio3 10 0>;
++ scl-gpios = <&gpio3 10 0>;
};
i2c1: i2c@2010000 {
-@@ -593,7 +671,7 @@
+@@ -593,7 +624,7 @@
reg = <0x0 0x2010000 0x0 0x10000>;
interrupts = <0 34 0x4>; /* Level high type */
clock-names = "i2c";
};
i2c2: i2c@2020000 {
-@@ -604,7 +682,7 @@
+@@ -604,7 +635,7 @@
reg = <0x0 0x2020000 0x0 0x10000>;
interrupts = <0 35 0x4>; /* Level high type */
clock-names = "i2c";
};
i2c3: i2c@2030000 {
-@@ -615,7 +693,7 @@
+@@ -615,7 +646,7 @@
reg = <0x0 0x2030000 0x0 0x10000>;
interrupts = <0 35 0x4>; /* Level high type */
clock-names = "i2c";
};
ifc: ifc@2240000 {
-@@ -648,8 +726,8 @@
+@@ -648,8 +679,8 @@
compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie",
"snps,dw-pcie";
reg-names = "regs", "config";
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
-@@ -657,6 +735,7 @@
+@@ -657,20 +688,22 @@
num-lanes = <4>;
bus-range = <0x0 0xff>;
msi-parent = <&its>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic 0 0 0 109 4>,
-@@ -669,8 +748,8 @@
+ <0000 0 0 2 &gic 0 0 0 110 4>,
+ <0000 0 0 3 &gic 0 0 0 111 4>,
+ <0000 0 0 4 &gic 0 0 0 112 4>;
++ status = "disabled";
+ };
+
+ pcie2: pcie@3500000 {
compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie",
"snps,dw-pcie";
reg-names = "regs", "config";
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
-@@ -678,6 +757,7 @@
+@@ -678,20 +711,22 @@
num-lanes = <4>;
bus-range = <0x0 0xff>;
msi-parent = <&its>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic 0 0 0 114 4>,
-@@ -690,8 +770,8 @@
+ <0000 0 0 2 &gic 0 0 0 115 4>,
+ <0000 0 0 3 &gic 0 0 0 116 4>,
+ <0000 0 0 4 &gic 0 0 0 117 4>;
++ status = "disabled";
+ };
+
+ pcie3: pcie@3600000 {
compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie",
"snps,dw-pcie";
reg-names = "regs", "config";
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
-@@ -699,6 +779,7 @@
+@@ -699,20 +734,22 @@
num-lanes = <8>;
bus-range = <0x0 0xff>;
msi-parent = <&its>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic 0 0 0 119 4>,
-@@ -711,8 +792,8 @@
+ <0000 0 0 2 &gic 0 0 0 120 4>,
+ <0000 0 0 3 &gic 0 0 0 121 4>,
+ <0000 0 0 4 &gic 0 0 0 122 4>;
++ status = "disabled";
+ };
+
+ pcie4: pcie@3700000 {
compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie",
"snps,dw-pcie";
reg-names = "regs", "config";
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
-@@ -720,6 +801,7 @@
+@@ -720,12 +757,14 @@
num-lanes = <4>;
bus-range = <0x0 0xff>;
msi-parent = <&its>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic 0 0 0 124 4>,
-@@ -754,6 +836,7 @@
+ <0000 0 0 2 &gic 0 0 0 125 4>,
+ <0000 0 0 3 &gic 0 0 0 126 4>,
+ <0000 0 0 4 &gic 0 0 0 127 4>;
++ status = "disabled";
+ };
+
+ sata0: sata@3200000 {
+@@ -754,6 +793,8 @@
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
snps,dis_rxdet_inp3_quirk;
+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
++ snps,host-vbus-glitches;
};
usb1: usb3@3110000 {
-@@ -764,6 +847,12 @@
+@@ -764,6 +805,14 @@
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
snps,dis_rxdet_inp3_quirk;
+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
++ snps,host-vbus-glitches;
+ };
+
+ serdes1: serdes@1ea0000 {
-+ reg = <0x0 0x1ea0000 0 0x00002000>;
+ compatible = "fsl,serdes-10g";
++ reg = <0x0 0x1ea0000 0 0x00002000>;
++ little-endian;
};
ccn@4000000 {
-@@ -771,6 +860,14 @@
+@@ -771,6 +820,14 @@
reg = <0x0 0x04000000 0x0 0x01000000>;
interrupts = <0 12 4>;
};
};
ddr1: memory-controller@1080000 {
-@@ -786,4 +883,11 @@
+@@ -786,4 +843,44 @@
interrupts = <0 18 0x4>;
little-endian;
};
+ method = "smc";
+ };
+ };
- };
---- /dev/null
-+++ b/arch/arm64/boot/dts/freescale/qoriq-bman-portals-sdk.dtsi
-@@ -0,0 +1,55 @@
-+/*
-+ * QorIQ BMan SDK Portals device tree nodes
-+ *
-+ * Copyright 2011-2016 Freescale Semiconductor Inc.
-+ * Copyright 2017 NXP
-+ *
-+ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
-+ */
++};
+
-+&bportals {
-+ bman-portal@0 {
-+ cell-index = <0>;
++#include "fsl-tmu-map1.dtsi"
++#include "fsl-tmu-map2.dtsi"
++#include "fsl-tmu-map3.dtsi"
++&thermal_zones {
++ thermal-zone1 {
++ status = "okay";
+ };
+
-+ bman-portal@10000 {
-+ cell-index = <1>;
++ thermal-zone2{
++ status = "okay";
+ };
+
-+ bman-portal@20000 {
-+ cell-index = <2>;
++ thermal-zone3{
++ status = "okay";
+ };
+
-+ bman-portal@30000 {
-+ cell-index = <3>;
++ thermal-zone4{
++ status = "okay";
+ };
+
-+ bman-portal@40000 {
-+ cell-index = <4>;
++ thermal-zone5{
++ status = "okay";
+ };
+
-+ bman-portal@50000 {
-+ cell-index = <5>;
++ thermal-zone6{
++ status = "okay";
+ };
+
-+ bman-portal@60000 {
-+ cell-index = <6>;
++ thermal-zone7 {
++ status = "okay";
+ };
+ };
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a-qds.dts
+@@ -0,0 +1,353 @@
++// SPDX-License-Identifier: (GPL-2.0 OR MIT)
++//
++// Device Tree file for LX2160AQDS
++//
++// Copyright 2018 NXP
+
-+ bman-portal@70000 {
-+ cell-index = <7>;
++/dts-v1/;
++
++#include "fsl-lx2160a.dtsi"
++
++/ {
++ model = "NXP Layerscape LX2160AQDS";
++ compatible = "fsl,lx2160a-qds", "fsl,lx2160a";
++
++ aliases {
++ crypto = &crypto;
++ serial0 = &uart0;
+ };
+
-+ bman-portal@80000 {
-+ cell-index = <8>;
++ chosen {
++ stdout-path = "serial0:115200n8";
+ };
+
-+ bman-portal@90000 {
-+ cell-index = <9>;
++ sb_3v3: regulator-sb3v3 {
++ compatible = "regulator-fixed";
++ regulator-name = "MC34717-3.3VSB";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-boot-on;
++ regulator-always-on;
+ };
+
-+ bman-bpids@0 {
-+ compatible = "fsl,bpid-range";
-+ fsl,bpid-range = <32 32>;
++ mdio-mux-1 {
++ compatible = "mdio-mux-multiplexer";
++ mux-controls = <&mux 0>;
++ mdio-parent-bus = <&emdio1>;
++ #address-cells=<1>;
++ #size-cells = <0>;
++
++ mdio@0 { /* On-board PHY #1 RGMI1*/
++ reg = <0x00>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ mdio@8 { /* On-board PHY #2 RGMI2*/
++ reg = <0x8>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ mdio@18 { /* Slot #1 */
++ reg = <0x18>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ mdio@19 { /* Slot #2 */
++ reg = <0x19>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ mdio@1a { /* Slot #3 */
++ reg = <0x1a>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ mdio@1b { /* Slot #4 */
++ reg = <0x1b>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ mdio@1c { /* Slot #5 */
++ reg = <0x1c>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ mdio@1d { /* Slot #6 */
++ reg = <0x1d>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ mdio@1e { /* Slot #7 */
++ reg = <0x1e>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ mdio@1f { /* Slot #8 */
++ reg = <0x1f>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++ };
++
++ mdio-mux-2 {
++ compatible = "mdio-mux-multiplexer";
++ mux-controls = <&mux 1>;
++ mdio-parent-bus = <&emdio2>;
++ #address-cells=<1>;
++ #size-cells = <0>;
++
++ mdio@0 { /* Slot #1 (secondary EMI) */
++ reg = <0x00>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ mdio@1 { /* Slot #2 (secondary EMI) */
++ reg = <0x01>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ mdio@2 { /* Slot #3 (secondary EMI) */
++ reg = <0x02>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ mdio@3 { /* Slot #4 (secondary EMI) */
++ reg = <0x03>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ mdio@4 { /* Slot #5 (secondary EMI) */
++ reg = <0x04>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ mdio@5 { /* Slot #6 (secondary EMI) */
++ reg = <0x05>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ mdio@6 { /* Slot #7 (secondary EMI) */
++ reg = <0x06>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ mdio@7 { /* Slot #8 (secondary EMI) */
++ reg = <0x07>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
+ };
+};
---- a/arch/arm64/boot/dts/freescale/qoriq-bman-portals.dtsi
-+++ b/arch/arm64/boot/dts/freescale/qoriq-bman-portals.dtsi
-@@ -1,9 +1,9 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /*
- * QorIQ BMan Portals device tree
- *
- * Copyright 2011-2016 Freescale Semiconductor Inc.
- *
-- * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- */
-
- &bportals {
-@@ -68,4 +68,10 @@
- reg = <0x80000 0x4000>, <0x4080000 0x4000>;
- interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
- };
+
-+ bman-portal@90000 {
-+ compatible = "fsl,bman-portal";
-+ reg = <0x90000 0x4000>, <0x4090000 0x4000>;
-+ interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
++&crypto {
++ status = "okay";
++};
++
++&dspi0 {
++ status = "okay";
++
++ dflash0: flash@0 {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "jedec,spi-nor";
++ reg = <0>;
++ spi-max-frequency = <1000000>;
+ };
- };
---- /dev/null
-+++ b/arch/arm64/boot/dts/freescale/qoriq-dpaa-eth.dtsi
-@@ -0,0 +1,97 @@
-+/*
-+ * QorIQ FMan v3 10g port #1 device tree stub [ controller @ offset 0x400000 ]
-+ *
-+ * Copyright 2012 - 2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
++};
+
-+fsldpaa: fsl,dpaa {
-+ compatible = "fsl,ls1043a-dpaa", "simple-bus", "fsl,dpaa";
-+ ethernet@0 {
-+ compatible = "fsl,dpa-ethernet";
-+ fsl,fman-mac = <&enet0>;
-+ dma-coherent;
++&dspi1 {
++ status = "okay";
++
++ dflash1: flash@0 {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "jedec,spi-nor";
++ reg = <0>;
++ spi-max-frequency = <1000000>;
+ };
-+ ethernet@1 {
-+ compatible = "fsl,dpa-ethernet";
-+ fsl,fman-mac = <&enet1>;
-+ dma-coherent;
++};
++
++&dspi2 {
++ status = "okay";
++
++ dflash2: flash@0 {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "jedec,spi-nor";
++ reg = <0>;
++ spi-max-frequency = <1000000>;
+ };
-+ ethernet@2 {
-+ compatible = "fsl,dpa-ethernet";
-+ fsl,fman-mac = <&enet2>;
-+ dma-coherent;
++};
++
++&emdio1 {
++ status = "okay";
++};
++
++&emdio2 {
++ status = "okay";
++};
++
++&esdhc0 {
++ status = "okay";
++};
++
++&esdhc1 {
++ status = "okay";
++};
++
++&i2c0 {
++ status = "okay";
++
++ fpga@66 {
++ compatible = "fsl,lx2160aqds-fpga", "fsl,fpga-qixis-i2c",
++ "simple-mfd";
++ reg = <0x66>;
++
++ mux: mux-controller {
++ compatible = "reg-mux";
++ #mux-control-cells = <1>;
++ mux-reg-masks = <0x54 0xf8>, /* 0: reg 0x54, bits 7:3 */
++ <0x54 0x07>; /* 1: reg 0x54, bit 2:0 */
++ };
+ };
-+ ethernet@3 {
-+ compatible = "fsl,dpa-ethernet";
-+ fsl,fman-mac = <&enet3>;
-+ dma-coherent;
++
++ i2c-mux@77 {
++ compatible = "nxp,pca9547";
++ reg = <0x77>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ i2c@2 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x2>;
++
++ power-monitor@40 {
++ compatible = "ti,ina220";
++ reg = <0x40>;
++ shunt-resistor = <500>;
++ };
++
++ power-monitor@41 {
++ compatible = "ti,ina220";
++ reg = <0x41>;
++ shunt-resistor = <1000>;
++ };
++ };
++
++ i2c@3 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x3>;
++
++ temperature-sensor@4c {
++ compatible = "nxp,sa56004";
++ reg = <0x4c>;
++ vcc-supply = <&sb_3v3>;
++ };
++
++ temperature-sensor@4d {
++ compatible = "nxp,sa56004";
++ reg = <0x4d>;
++ vcc-supply = <&sb_3v3>;
++ };
++
++ rtc@51 {
++ compatible = "nxp,pcf2129";
++ reg = <0x51>;
++ };
++ };
+ };
-+ ethernet@4 {
-+ compatible = "fsl,dpa-ethernet";
-+ fsl,fman-mac = <&enet4>;
-+ dma-coherent;
++};
++
++&uart0 {
++ status = "okay";
++};
++
++&uart1 {
++ status = "okay";
++};
++
++&usb0 {
++ status = "okay";
++};
++
++&usb1 {
++ status = "okay";
++};
++
++&pcs_mdio1 {
++ pcs_phy1: ethernet-phy@0 {
++ compatible = "ethernet-phy-ieee802.3-c45";
++ backplane-mode = "40gbase-kr";
++ reg = <0x0>;
++ fsl,lane-handle = <&serdes1>;
++ fsl,lane-reg = <0xF00 0xE00 0xD00 0xC00>; /* lanes H, G, F, E */
+ };
-+ ethernet@5 {
-+ compatible = "fsl,dpa-ethernet";
-+ fsl,fman-mac = <&enet5>;
-+ dma-coherent;
++};
++
++&pcs_mdio2 {
++ pcs_phy2: ethernet-phy@0 {
++ compatible = "ethernet-phy-ieee802.3-c45";
++ backplane-mode = "40gbase-kr";
++ reg = <0x0>;
++ fsl,lane-handle = <&serdes1>;
++ fsl,lane-reg = <0xB00 0xA00 0x900 0x800>; /* lanes D, C, B, A */
+ };
-+ ethernet@8 {
-+ compatible = "fsl,dpa-ethernet";
-+ fsl,fman-mac = <&enet6>;
-+ dma-coherent;
++};
++
++&pcs_mdio3 {
++ pcs_phy3: ethernet-phy@0 {
++ compatible = "ethernet-phy-ieee802.3-c45";
++ backplane-mode = "10gbase-kr";
++ reg = <0x0>;
++ fsl,lane-handle = <&serdes1>;
++ fsl,lane-reg = <0xF00 0x100>; /* lane H */
+ };
-+ ethernet@6 {
-+ compatible = "fsl,im-ethernet";
-+ fsl,fman-mac = <&enet2>;
-+ dma-coherent;
-+ fpmevt-sel = <0>;
++};
++
++&pcs_mdio4 {
++ pcs_phy4: ethernet-phy@0 {
++ compatible = "ethernet-phy-ieee802.3-c45";
++ backplane-mode = "10gbase-kr";
++ reg = <0x0>;
++ fsl,lane-handle = <&serdes1>;
++ fsl,lane-reg = <0xE00 0x100>; /* lane G */
+ };
-+ ethernet@7 {
-+ compatible = "fsl,im-ethernet";
-+ fsl,fman-mac = <&enet3>;
-+ dma-coherent;
-+ fpmevt-sel = <1>;
++};
++
++/* Update DPMAC connections to 40G backplane PHYs
++ * &dpmac1 {
++ * phy-handle = <&pcs_phy1>;
++ * };
++ *
++ * &dpmac2 {
++ * phy-handle = <&pcs_phy2>;
++ * };
++ */
++
++/* Update DPMAC connections to 10G backplane PHYs
++ * &dpmac3 {
++ * phy-handle = <&pcs_phy3>;
++ * };
++ *
++ * &dpmac4 {
++ * phy-handle = <&pcs_phy4>;
++ * };
++ */
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a-rdb.dts
+@@ -0,0 +1,233 @@
++// SPDX-License-Identifier: (GPL-2.0 OR MIT)
++//
++// Device Tree file for LX2160ARDB
++//
++// Copyright 2018 NXP
++
++/dts-v1/;
++
++#include "fsl-lx2160a.dtsi"
++
++/ {
++ model = "NXP Layerscape LX2160ARDB";
++ compatible = "fsl,lx2160a-rdb", "fsl,lx2160a";
++
++ aliases {
++ crypto = &crypto;
++ serial0 = &uart0;
+ };
-+ ethernet@10 {
-+ compatible = "fsl,im-ethernet";
-+ fsl,fman-mac = <&enet4>;
-+ dma-coherent;
-+ fpmevt-sel = <2>;
++
++ chosen {
++ stdout-path = "serial0:115200n8";
+ };
-+ ethernet@11 {
-+ compatible = "fsl,im-ethernet";
-+ fsl,fman-mac = <&enet5>;
-+ dma-coherent;
-+ fpmevt-sel = <3>;
++
++ sb_3v3: regulator-sb3v3 {
++ compatible = "regulator-fixed";
++ regulator-name = "MC34717-3.3VSB";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-boot-on;
++ regulator-always-on;
+ };
+};
+
---- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi
-+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi
-@@ -1,27 +1,28 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /*
- * QorIQ FMan v3 10g port #0 device tree
- *
- * Copyright 2012-2015 Freescale Semiconductor Inc.
- *
-- * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- */
-
- fman@1a00000 {
- fman0_rx_0x10: port@90000 {
- cell-index = <0x10>;
-- compatible = "fsl,fman-v3-port-rx";
-+ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-10g-rx";
- reg = <0x90000 0x1000>;
- fsl,fman-10g-port;
- };
-
- fman0_tx_0x30: port@b0000 {
- cell-index = <0x30>;
-- compatible = "fsl,fman-v3-port-tx";
-+ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-10g-tx";
- reg = <0xb0000 0x1000>;
- fsl,fman-10g-port;
-+ fsl,qman-channel-id = <0x800>;
- };
-
-- ethernet@f0000 {
-+ mac9: ethernet@f0000 {
- cell-index = <0x8>;
- compatible = "fsl,fman-memac";
- reg = <0xf0000 0x1000>;
-@@ -29,7 +30,7 @@ fman@1a00000 {
- pcsphy-handle = <&pcsphy6>;
- };
-
-- mdio@f1000 {
-+ mdio9: mdio@f1000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
---- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi
-+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi
-@@ -1,27 +1,28 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /*
- * QorIQ FMan v3 10g port #1 device tree
- *
- * Copyright 2012-2015 Freescale Semiconductor Inc.
- *
-- * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- */
-
- fman@1a00000 {
- fman0_rx_0x11: port@91000 {
- cell-index = <0x11>;
-- compatible = "fsl,fman-v3-port-rx";
-+ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-10g-rx";
- reg = <0x91000 0x1000>;
- fsl,fman-10g-port;
- };
-
- fman0_tx_0x31: port@b1000 {
- cell-index = <0x31>;
-- compatible = "fsl,fman-v3-port-tx";
-+ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-10g-tx";
- reg = <0xb1000 0x1000>;
- fsl,fman-10g-port;
-+ fsl,qman-channel-id = <0x801>;
- };
-
-- ethernet@f2000 {
-+ mac10: ethernet@f2000 {
- cell-index = <0x9>;
- compatible = "fsl,fman-memac";
- reg = <0xf2000 0x1000>;
-@@ -29,7 +30,7 @@ fman@1a00000 {
- pcsphy-handle = <&pcsphy7>;
- };
-
-- mdio@f3000 {
-+ mdio10: mdio@f3000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
---- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-0.dtsi
-+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-0.dtsi
-@@ -1,22 +1,23 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /*
- * QorIQ FMan v3 1g port #0 device tree
- *
- * Copyright 2012-2015 Freescale Semiconductor Inc.
- *
-- * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- */
-
- fman@1a00000 {
- fman0_rx_0x08: port@88000 {
- cell-index = <0x8>;
-- compatible = "fsl,fman-v3-port-rx";
-+ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-1g-rx";
- reg = <0x88000 0x1000>;
- };
-
- fman0_tx_0x28: port@a8000 {
- cell-index = <0x28>;
-- compatible = "fsl,fman-v3-port-tx";
-+ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-1g-tx";
- reg = <0xa8000 0x1000>;
-+ fsl,qman-channel-id = <0x802>;
- };
-
- ethernet@e0000 {
---- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-1.dtsi
-+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-1.dtsi
-@@ -1,22 +1,23 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /*
- * QorIQ FMan v3 1g port #1 device tree
- *
- * Copyright 2012-2015 Freescale Semiconductor Inc.
- *
-- * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- */
-
- fman@1a00000 {
- fman0_rx_0x09: port@89000 {
- cell-index = <0x9>;
-- compatible = "fsl,fman-v3-port-rx";
-+ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-1g-rx";
- reg = <0x89000 0x1000>;
- };
-
- fman0_tx_0x29: port@a9000 {
- cell-index = <0x29>;
-- compatible = "fsl,fman-v3-port-tx";
-+ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-1g-tx";
- reg = <0xa9000 0x1000>;
-+ fsl,qman-channel-id = <0x803>;
- };
-
- ethernet@e2000 {
---- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-2.dtsi
-+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-2.dtsi
-@@ -1,22 +1,23 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /*
- * QorIQ FMan v3 1g port #2 device tree
- *
- * Copyright 2012-2015 Freescale Semiconductor Inc.
- *
-- * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- */
-
- fman@1a00000 {
- fman0_rx_0x0a: port@8a000 {
- cell-index = <0xa>;
-- compatible = "fsl,fman-v3-port-rx";
-+ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-1g-rx";
- reg = <0x8a000 0x1000>;
- };
-
- fman0_tx_0x2a: port@aa000 {
- cell-index = <0x2a>;
-- compatible = "fsl,fman-v3-port-tx";
-+ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-1g-tx";
- reg = <0xaa000 0x1000>;
-+ fsl,qman-channel-id = <0x804>;
- };
-
- ethernet@e4000 {
---- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-3.dtsi
-+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-3.dtsi
-@@ -1,22 +1,23 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /*
- * QorIQ FMan v3 1g port #3 device tree
- *
- * Copyright 2012-2015 Freescale Semiconductor Inc.
- *
-- * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- */
-
- fman@1a00000 {
- fman0_rx_0x0b: port@8b000 {
- cell-index = <0xb>;
-- compatible = "fsl,fman-v3-port-rx";
-+ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-1g-rx";
- reg = <0x8b000 0x1000>;
- };
-
- fman0_tx_0x2b: port@ab000 {
- cell-index = <0x2b>;
-- compatible = "fsl,fman-v3-port-tx";
-+ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-1g-tx";
- reg = <0xab000 0x1000>;
-+ fsl,qman-channel-id = <0x805>;
- };
-
- ethernet@e6000 {
---- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-4.dtsi
-+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-4.dtsi
-@@ -1,22 +1,23 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /*
- * QorIQ FMan v3 1g port #4 device tree
- *
- * Copyright 2012-2015 Freescale Semiconductor Inc.
- *
-- * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- */
-
- fman@1a00000 {
- fman0_rx_0x0c: port@8c000 {
- cell-index = <0xc>;
-- compatible = "fsl,fman-v3-port-rx";
-+ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-1g-rx";
- reg = <0x8c000 0x1000>;
- };
-
- fman0_tx_0x2c: port@ac000 {
- cell-index = <0x2c>;
-- compatible = "fsl,fman-v3-port-tx";
-+ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-1g-tx";
- reg = <0xac000 0x1000>;
-+ fsl,qman-channel-id = <0x806>;
- };
-
- ethernet@e8000 {
---- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-5.dtsi
-+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-5.dtsi
-@@ -1,22 +1,23 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /*
- * QorIQ FMan v3 1g port #5 device tree
- *
- * Copyright 2012-2015 Freescale Semiconductor Inc.
- *
-- * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- */
-
- fman@1a00000 {
- fman0_rx_0x0d: port@8d000 {
- cell-index = <0xd>;
-- compatible = "fsl,fman-v3-port-rx";
-+ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-1g-rx";
- reg = <0x8d000 0x1000>;
- };
-
- fman0_tx_0x2d: port@ad000 {
- cell-index = <0x2d>;
-- compatible = "fsl,fman-v3-port-tx";
-+ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-1g-tx";
- reg = <0xad000 0x1000>;
-+ fsl,qman-channel-id = <0x807>;
- };
-
- ethernet@ea000 {
++&crypto {
++ status = "okay";
++};
++
++&emdio1 {
++ status = "okay";
++};
++
++&emdio2 {
++ status = "okay";
++};
++
++&esdhc0 {
++ sd-uhs-sdr104;
++ sd-uhs-sdr50;
++ sd-uhs-sdr25;
++ sd-uhs-sdr12;
++ status = "okay";
++};
++
++&esdhc1 {
++ mmc-hs200-1_8v;
++ mmc-hs400-1_8v;
++ bus-width = <8>;
++ status = "okay";
++};
++
++&i2c0 {
++ status = "okay";
++
++ i2c-mux@77 {
++ compatible = "nxp,pca9547";
++ reg = <0x77>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ i2c@2 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x2>;
++
++ power-monitor@40 {
++ compatible = "ti,ina220";
++ reg = <0x40>;
++ shunt-resistor = <1000>;
++ };
++ };
++
++ i2c@3 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x3>;
++
++ temperature-sensor@4c {
++ compatible = "nxp,sa56004";
++ reg = <0x4c>;
++ vcc-supply = <&sb_3v3>;
++ };
++
++ temperature-sensor@4d {
++ compatible = "nxp,sa56004";
++ reg = <0x4d>;
++ vcc-supply = <&sb_3v3>;
++ };
++ };
++ };
++};
++
++&i2c4 {
++ status = "okay";
++
++ rtc@51 {
++ compatible = "nxp,pcf2129";
++ reg = <0x51>;
++ // IRQ10_B
++ interrupts = <0 150 0x4>;
++ };
++};
++
++&fspi {
++ status = "okay";
++ nxp,fspi-has-second-chip;
++ flash0: mt35xu512aba@0 {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "micron,m25p80";
++ m25p,fast-read;
++ spi-max-frequency = <50000000>;
++ reg = <0>;
++ /* The following setting enables 1-1-8 (CMD-ADDR-DATA) mode */
++ spi-rx-bus-width = <8>;
++ spi-tx-bus-width = <1>;
++ };
++
++ flash1: mt35xu512aba@1 {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "micron,m25p80";
++ m25p,fast-read;
++ spi-max-frequency = <50000000>;
++ reg = <1>;
++ /* The following setting enables 1-1-8 (CMD-ADDR-DATA) mode */
++ spi-rx-bus-width = <8>;
++ spi-tx-bus-width = <1>;
++ };
++};
++
++&uart0 {
++ status = "okay";
++};
++
++&uart1 {
++ status = "okay";
++};
++
++&usb0 {
++ status = "okay";
++};
++
++&usb1 {
++ status = "okay";
++};
++
++&emdio1 {
++ rgmii_phy1: ethernet-phy@1 {
++ /* AR8035 PHY - "compatible" property not strictly needed */
++ compatible = "ethernet-phy-id004d.d072";
++ reg = <0x1>;
++ /* Poll mode - no "interrupts" property defined */
++ };
++ rgmii_phy2: ethernet-phy@2 {
++ /* AR8035 PHY - "compatible" property not strictly needed */
++ compatible = "ethernet-phy-id004d.d072";
++ reg = <0x2>;
++ /* Poll mode - no "interrupts" property defined */
++ };
++ aquantia_phy1: ethernet-phy@4 {
++ /* AQR107 PHY - "compatible" property not strictly needed */
++ compatible = "ethernet-phy-ieee802.3-c45";
++ reg = <0x4>;
++ /* Poll mode - no "interrupts" property defined */
++ };
++ aquantia_phy2: ethernet-phy@5 {
++ /* AQR107 PHY - "compatible" property not strictly needed */
++ compatible = "ethernet-phy-ieee802.3-c45";
++ reg = <0x5>;
++ /* Poll mode - no "interrupts" property defined */
++ };
++};
++
++&emdio2 {
++ inphi_phy: ethernet-phy@0 {
++ compatible = "ethernet-phy-id0210.7440";
++ reg = <0x0>;
++ };
++};
++
++&dpmac3 {
++ phy-handle = <&aquantia_phy1>;
++ phy-connection-type = "xgmii";
++};
++
++&dpmac4 {
++ phy-handle = <&aquantia_phy2>;
++ phy-connection-type = "xgmii";
++};
++
++&dpmac5 {
++ phy-handle = <&inphi_phy>;
++};
++
++&dpmac6 {
++ phy-handle = <&inphi_phy>;
++};
++
++&dpmac17 {
++ phy-handle = <&rgmii_phy1>;
++ phy-connection-type = "rgmii-id";
++};
++
++&dpmac18 {
++ phy-handle = <&rgmii_phy2>;
++ phy-connection-type = "rgmii-id";
++};
++
++&sata0 {
++ status = "okay";
++};
++
++&sata1 {
++ status = "okay";
++};
++
++&sata2 {
++ status = "okay";
++};
++
++&sata3 {
++ status = "okay";
++};
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
+@@ -0,0 +1,1318 @@
++// SPDX-License-Identifier: (GPL-2.0 OR MIT)
++//
++// Device Tree Include file for Layerscape-LX2160A family SoC.
++//
++// Copyright 2018 NXP
++
++#include <dt-bindings/gpio/gpio.h>
++#include <dt-bindings/interrupt-controller/arm-gic.h>
++
++/memreserve/ 0x80000000 0x00010000;
++
++/ {
++ compatible = "fsl,lx2160a";
++ interrupt-parent = <&gic>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++
++ cpus {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ // 8 clusters having 2 Cortex-A72 cores each
++ cpu@0 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ enable-method = "psci";
++ reg = <0x0>;
++ clocks = <&clockgen 1 0>;
++ d-cache-size = <0x8000>;
++ d-cache-line-size = <64>;
++ d-cache-sets = <128>;
++ i-cache-size = <0xC000>;
++ i-cache-line-size = <64>;
++ i-cache-sets = <192>;
++ next-level-cache = <&cluster0_l2>;
++ };
++
++ cpu@1 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ enable-method = "psci";
++ reg = <0x1>;
++ clocks = <&clockgen 1 0>;
++ d-cache-size = <0x8000>;
++ d-cache-line-size = <64>;
++ d-cache-sets = <128>;
++ i-cache-size = <0xC000>;
++ i-cache-line-size = <64>;
++ i-cache-sets = <192>;
++ next-level-cache = <&cluster0_l2>;
++ };
++
++ cpu@100 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ enable-method = "psci";
++ reg = <0x100>;
++ clocks = <&clockgen 1 1>;
++ d-cache-size = <0x8000>;
++ d-cache-line-size = <64>;
++ d-cache-sets = <128>;
++ i-cache-size = <0xC000>;
++ i-cache-line-size = <64>;
++ i-cache-sets = <192>;
++ next-level-cache = <&cluster1_l2>;
++ };
++
++ cpu@101 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ enable-method = "psci";
++ reg = <0x101>;
++ clocks = <&clockgen 1 1>;
++ d-cache-size = <0x8000>;
++ d-cache-line-size = <64>;
++ d-cache-sets = <128>;
++ i-cache-size = <0xC000>;
++ i-cache-line-size = <64>;
++ i-cache-sets = <192>;
++ next-level-cache = <&cluster1_l2>;
++ };
++
++ cpu@200 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ enable-method = "psci";
++ reg = <0x200>;
++ clocks = <&clockgen 1 2>;
++ d-cache-size = <0x8000>;
++ d-cache-line-size = <64>;
++ d-cache-sets = <128>;
++ i-cache-size = <0xC000>;
++ i-cache-line-size = <64>;
++ i-cache-sets = <192>;
++ next-level-cache = <&cluster2_l2>;
++ };
++
++ cpu@201 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ enable-method = "psci";
++ reg = <0x201>;
++ clocks = <&clockgen 1 2>;
++ d-cache-size = <0x8000>;
++ d-cache-line-size = <64>;
++ d-cache-sets = <128>;
++ i-cache-size = <0xC000>;
++ i-cache-line-size = <64>;
++ i-cache-sets = <192>;
++ next-level-cache = <&cluster2_l2>;
++ };
++
++ cpu@300 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ enable-method = "psci";
++ reg = <0x300>;
++ clocks = <&clockgen 1 3>;
++ d-cache-size = <0x8000>;
++ d-cache-line-size = <64>;
++ d-cache-sets = <128>;
++ i-cache-size = <0xC000>;
++ i-cache-line-size = <64>;
++ i-cache-sets = <192>;
++ next-level-cache = <&cluster3_l2>;
++ };
++
++ cpu@301 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ enable-method = "psci";
++ reg = <0x301>;
++ clocks = <&clockgen 1 3>;
++ d-cache-size = <0x8000>;
++ d-cache-line-size = <64>;
++ d-cache-sets = <128>;
++ i-cache-size = <0xC000>;
++ i-cache-line-size = <64>;
++ i-cache-sets = <192>;
++ next-level-cache = <&cluster3_l2>;
++ };
++
++ cpu@400 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ enable-method = "psci";
++ reg = <0x400>;
++ clocks = <&clockgen 1 4>;
++ d-cache-size = <0x8000>;
++ d-cache-line-size = <64>;
++ d-cache-sets = <128>;
++ i-cache-size = <0xC000>;
++ i-cache-line-size = <64>;
++ i-cache-sets = <192>;
++ next-level-cache = <&cluster4_l2>;
++ };
++
++ cpu@401 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ enable-method = "psci";
++ reg = <0x401>;
++ clocks = <&clockgen 1 4>;
++ d-cache-size = <0x8000>;
++ d-cache-line-size = <64>;
++ d-cache-sets = <128>;
++ i-cache-size = <0xC000>;
++ i-cache-line-size = <64>;
++ i-cache-sets = <192>;
++ next-level-cache = <&cluster4_l2>;
++ };
++
++ cpu@500 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ enable-method = "psci";
++ reg = <0x500>;
++ clocks = <&clockgen 1 5>;
++ d-cache-size = <0x8000>;
++ d-cache-line-size = <64>;
++ d-cache-sets = <128>;
++ i-cache-size = <0xC000>;
++ i-cache-line-size = <64>;
++ i-cache-sets = <192>;
++ next-level-cache = <&cluster5_l2>;
++ };
++
++ cpu@501 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ enable-method = "psci";
++ reg = <0x501>;
++ clocks = <&clockgen 1 5>;
++ d-cache-size = <0x8000>;
++ d-cache-line-size = <64>;
++ d-cache-sets = <128>;
++ i-cache-size = <0xC000>;
++ i-cache-line-size = <64>;
++ i-cache-sets = <192>;
++ next-level-cache = <&cluster5_l2>;
++ };
++
++ cpu@600 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ enable-method = "psci";
++ reg = <0x600>;
++ clocks = <&clockgen 1 6>;
++ d-cache-size = <0x8000>;
++ d-cache-line-size = <64>;
++ d-cache-sets = <128>;
++ i-cache-size = <0xC000>;
++ i-cache-line-size = <64>;
++ i-cache-sets = <192>;
++ next-level-cache = <&cluster6_l2>;
++ };
++
++ cpu@601 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ enable-method = "psci";
++ reg = <0x601>;
++ clocks = <&clockgen 1 6>;
++ d-cache-size = <0x8000>;
++ d-cache-line-size = <64>;
++ d-cache-sets = <128>;
++ i-cache-size = <0xC000>;
++ i-cache-line-size = <64>;
++ i-cache-sets = <192>;
++ next-level-cache = <&cluster6_l2>;
++ };
++
++ cpu@700 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ enable-method = "psci";
++ reg = <0x700>;
++ clocks = <&clockgen 1 7>;
++ d-cache-size = <0x8000>;
++ d-cache-line-size = <64>;
++ d-cache-sets = <128>;
++ i-cache-size = <0xC000>;
++ i-cache-line-size = <64>;
++ i-cache-sets = <192>;
++ next-level-cache = <&cluster7_l2>;
++ };
++
++ cpu@701 {
++ device_type = "cpu";
++ compatible = "arm,cortex-a72";
++ enable-method = "psci";
++ reg = <0x701>;
++ clocks = <&clockgen 1 7>;
++ d-cache-size = <0x8000>;
++ d-cache-line-size = <64>;
++ d-cache-sets = <128>;
++ i-cache-size = <0xC000>;
++ i-cache-line-size = <64>;
++ i-cache-sets = <192>;
++ next-level-cache = <&cluster7_l2>;
++ };
++
++ cluster0_l2: l2-cache0 {
++ compatible = "cache";
++ cache-size = <0x100000>;
++ cache-line-size = <64>;
++ cache-sets = <1024>;
++ cache-level = <2>;
++ };
++
++ cluster1_l2: l2-cache1 {
++ compatible = "cache";
++ cache-size = <0x100000>;
++ cache-line-size = <64>;
++ cache-sets = <1024>;
++ cache-level = <2>;
++ };
++
++ cluster2_l2: l2-cache2 {
++ compatible = "cache";
++ cache-size = <0x100000>;
++ cache-line-size = <64>;
++ cache-sets = <1024>;
++ cache-level = <2>;
++ };
++
++ cluster3_l2: l2-cache3 {
++ compatible = "cache";
++ cache-size = <0x100000>;
++ cache-line-size = <64>;
++ cache-sets = <1024>;
++ cache-level = <2>;
++ };
++
++ cluster4_l2: l2-cache4 {
++ compatible = "cache";
++ cache-size = <0x100000>;
++ cache-line-size = <64>;
++ cache-sets = <1024>;
++ cache-level = <2>;
++ };
++
++ cluster5_l2: l2-cache5 {
++ compatible = "cache";
++ cache-size = <0x100000>;
++ cache-line-size = <64>;
++ cache-sets = <1024>;
++ cache-level = <2>;
++ };
++
++ cluster6_l2: l2-cache6 {
++ compatible = "cache";
++ cache-size = <0x100000>;
++ cache-line-size = <64>;
++ cache-sets = <1024>;
++ cache-level = <2>;
++ };
++
++ cluster7_l2: l2-cache7 {
++ compatible = "cache";
++ cache-size = <0x100000>;
++ cache-line-size = <64>;
++ cache-sets = <1024>;
++ cache-level = <2>;
++ };
++ };
++
++ gic: interrupt-controller@6000000 {
++ compatible = "arm,gic-v3";
++ reg = <0x0 0x06000000 0 0x10000>, // GIC Dist
++ <0x0 0x06200000 0 0x200000>, // GICR (RD_base +
++ // SGI_base)
++ <0x0 0x0c0c0000 0 0x2000>, // GICC
++ <0x0 0x0c0d0000 0 0x1000>, // GICH
++ <0x0 0x0c0e0000 0 0x20000>; // GICV
++ #interrupt-cells = <3>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++ ranges;
++ interrupt-controller;
++ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
++
++ its: gic-its@6020000 {
++ compatible = "arm,gic-v3-its";
++ msi-controller;
++ reg = <0x0 0x6020000 0 0x20000>;
++ };
++ };
++
++ timer {
++ compatible = "arm,armv8-timer";
++ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_PPI 10 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ pmu {
++ compatible = "arm,cortex-a72-pmu";
++ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
++ };
++
++ psci {
++ compatible = "arm,psci-0.2";
++ method = "smc";
++ };
++
++ memory@80000000 {
++ // DRAM space - 1, size : 2 GB DRAM
++ device_type = "memory";
++ reg = <0x00000000 0x80000000 0 0x80000000>;
++ };
++
++ ddr1: memory-controller@1080000 {
++ compatible = "fsl,qoriq-memory-controller";
++ reg = <0x0 0x1080000 0x0 0x1000>;
++ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
++ little-endian;
++ };
++
++ ddr2: memory-controller@1090000 {
++ compatible = "fsl,qoriq-memory-controller";
++ reg = <0x0 0x1090000 0x0 0x1000>;
++ interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
++ little-endian;
++ };
++
++ sysclk: sysclk {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <100000000>;
++ clock-output-names = "sysclk";
++ };
++
++ soc {
++ compatible = "simple-bus";
++ #address-cells = <2>;
++ #size-cells = <2>;
++ ranges;
++ dma-ranges = <0x0 0x0 0x0 0x0 0x10000 0x00000000>;
++
++ crypto: crypto@8000000 {
++ compatible = "fsl,sec-v5.0", "fsl,sec-v4.0";
++ fsl,sec-era = <10>;
++ #address-cells = <1>;
++ #size-cells = <1>;
++ ranges = <0x0 0x00 0x8000000 0x100000>;
++ reg = <0x00 0x8000000 0x0 0x100000>;
++ interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>;
++ dma-coherent;
++ status = "disabled";
++
++ sec_jr0: jr@10000 {
++ compatible = "fsl,sec-v5.0-job-ring",
++ "fsl,sec-v4.0-job-ring";
++ reg = <0x10000 0x10000>;
++ interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ sec_jr1: jr@20000 {
++ compatible = "fsl,sec-v5.0-job-ring",
++ "fsl,sec-v4.0-job-ring";
++ reg = <0x20000 0x10000>;
++ interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ sec_jr2: jr@30000 {
++ compatible = "fsl,sec-v5.0-job-ring",
++ "fsl,sec-v4.0-job-ring";
++ reg = <0x30000 0x10000>;
++ interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ sec_jr3: jr@40000 {
++ compatible = "fsl,sec-v5.0-job-ring",
++ "fsl,sec-v4.0-job-ring";
++ reg = <0x40000 0x10000>;
++ interrupts = <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
++ };
++ };
++
++ clockgen: clock-controller@1300000 {
++ compatible = "fsl,lx2160a-clockgen";
++ reg = <0 0x1300000 0 0xa0000>;
++ #clock-cells = <2>;
++ clocks = <&sysclk>;
++ };
++
++ dcfg: syscon@1e00000 {
++ compatible = "fsl,lx2160a-dcfg", "syscon";
++ reg = <0x0 0x1e00000 0x0 0x10000>;
++ little-endian;
++ };
++
++ /* WRIOP0: 0x8b8_0000, E-MDIO1: 0x1_6000 */
++ emdio1: mdio@8b96000 {
++ compatible = "fsl,fman-memac-mdio";
++ reg = <0x0 0x8b96000 0x0 0x1000>;
++ interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ little-endian; /* force the driver in LE mode */
++ status = "disabled";
++ };
++
++ /* WRIOP0: 0x8b8_0000, E-MDIO2: 0x1_7000 */
++ emdio2: mdio@8b97000 {
++ compatible = "fsl,fman-memac-mdio";
++ reg = <0x0 0x8b97000 0x0 0x1000>;
++ interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ little-endian; /* force the driver in LE mode */
++ status = "disabled";
++ };
++
++ pcs_mdio1: mdio@0x8c07000 {
++ compatible = "fsl,fman-memac-mdio";
++ reg = <0x0 0x8c07000 0x0 0x1000>;
++ device_type = "mdio";
++ little-endian;
++
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ pcs_mdio2: mdio@0x8c0b000 {
++ compatible = "fsl,fman-memac-mdio";
++ reg = <0x0 0x8c0b000 0x0 0x1000>;
++ device_type = "mdio";
++ little-endian;
++
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ pcs_mdio3: mdio@0x8c0f000 {
++ compatible = "fsl,fman-memac-mdio";
++ reg = <0x0 0x8c0f000 0x0 0x1000>;
++ device_type = "mdio";
++ little-endian;
++
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ pcs_mdio4: mdio@0x8c13000 {
++ compatible = "fsl,fman-memac-mdio";
++ reg = <0x0 0x8c13000 0x0 0x1000>;
++ device_type = "mdio";
++ little-endian;
++
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ pcs_mdio5: mdio@0x8c17000 {
++ compatible = "fsl,fman-memac-mdio";
++ reg = <0x0 0x8c17000 0x0 0x1000>;
++ device_type = "mdio";
++ little-endian;
++
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ pcs_mdio6: mdio@0x8c1b000 {
++ compatible = "fsl,fman-memac-mdio";
++ reg = <0x0 0x8c1b000 0x0 0x1000>;
++ device_type = "mdio";
++ little-endian;
++
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ pcs_mdio7: mdio@0x8c1f000 {
++ compatible = "fsl,fman-memac-mdio";
++ reg = <0x0 0x8c1f000 0x0 0x1000>;
++ device_type = "mdio";
++ little-endian;
++
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ pcs_mdio8: mdio@0x8c23000 {
++ compatible = "fsl,fman-memac-mdio";
++ reg = <0x0 0x8c23000 0x0 0x1000>;
++ device_type = "mdio";
++ little-endian;
++
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
++ serdes1: serdes@1ea0000 {
++ compatible = "fsl,serdes-28g";
++ reg = <0x0 0x1ea0000 0 0x00002000>;
++ little-endian;
++ };
++
++ i2c0: i2c@2000000 {
++ compatible = "fsl,vf610-i2c";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2000000 0x0 0x10000>;
++ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
++ clock-names = "i2c";
++ clocks = <&clockgen 4 7>;
++ scl-gpios = <&gpio2 15 GPIO_ACTIVE_HIGH>;
++ status = "disabled";
++ };
++
++ i2c1: i2c@2010000 {
++ compatible = "fsl,vf610-i2c";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2010000 0x0 0x10000>;
++ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
++ clock-names = "i2c";
++ clocks = <&clockgen 4 7>;
++ status = "disabled";
++ };
++
++ i2c2: i2c@2020000 {
++ compatible = "fsl,vf610-i2c";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2020000 0x0 0x10000>;
++ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
++ clock-names = "i2c";
++ clocks = <&clockgen 4 7>;
++ status = "disabled";
++ };
++
++ i2c3: i2c@2030000 {
++ compatible = "fsl,vf610-i2c";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2030000 0x0 0x10000>;
++ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
++ clock-names = "i2c";
++ clocks = <&clockgen 4 7>;
++ status = "disabled";
++ };
++
++ i2c4: i2c@2040000 {
++ compatible = "fsl,vf610-i2c";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2040000 0x0 0x10000>;
++ interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
++ clock-names = "i2c";
++ clocks = <&clockgen 4 7>;
++ scl-gpios = <&gpio2 16 GPIO_ACTIVE_HIGH>;
++ status = "disabled";
++ };
++
++ i2c5: i2c@2050000 {
++ compatible = "fsl,vf610-i2c";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2050000 0x0 0x10000>;
++ interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
++ clock-names = "i2c";
++ clocks = <&clockgen 4 7>;
++ status = "disabled";
++ };
++
++ i2c6: i2c@2060000 {
++ compatible = "fsl,vf610-i2c";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2060000 0x0 0x10000>;
++ interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
++ clock-names = "i2c";
++ clocks = <&clockgen 4 7>;
++ status = "disabled";
++ };
++
++ i2c7: i2c@2070000 {
++ compatible = "fsl,vf610-i2c";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2070000 0x0 0x10000>;
++ interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
++ clock-names = "i2c";
++ clocks = <&clockgen 4 7>;
++ status = "disabled";
++ };
++
++ dspi0: spi@2100000 {
++ compatible = "fsl,lx2160a-dspi", "fsl,ls2085a-dspi";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2100000 0x0 0x10000>;
++ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 7>;
++ clock-names = "dspi";
++ spi-num-chipselects = <5>;
++ bus-num = <0>;
++ status = "disabled";
++ };
++
++ dspi1: spi@2110000 {
++ compatible = "fsl,lx2160a-dspi", "fsl,ls2085a-dspi";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2110000 0x0 0x10000>;
++ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 7>;
++ clock-names = "dspi";
++ spi-num-chipselects = <5>;
++ bus-num = <1>;
++ status = "disabled";
++ };
++
++ dspi2: spi@2120000 {
++ compatible = "fsl,lx2160a-dspi", "fsl,ls2085a-dspi";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2120000 0x0 0x10000>;
++ interrupts = <GIC_SPI 241 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 7>;
++ clock-names = "dspi";
++ spi-num-chipselects = <5>;
++ bus-num = <2>;
++ status = "disabled";
++ };
++
++ esdhc0: esdhc@2140000 {
++ compatible = "fsl,esdhc";
++ reg = <0x0 0x2140000 0x0 0x10000>;
++ interrupts = <0 28 0x4>; /* Level high type */
++ clocks = <&clockgen 4 1>;
++ voltage-ranges = <1800 1800 3300 3300>;
++ sdhci,auto-cmd12;
++ little-endian;
++ bus-width = <4>;
++ status = "disabled";
++ };
++
++ esdhc1: esdhc@2150000 {
++ compatible = "fsl,esdhc";
++ reg = <0x0 0x2150000 0x0 0x10000>;
++ interrupts = <0 63 0x4>; /* Level high type */
++ clocks = <&clockgen 4 1>;
++ voltage-ranges = <1800 1800 3300 3300>;
++ sdhci,auto-cmd12;
++ broken-cd;
++ little-endian;
++ bus-width = <4>;
++ status = "disabled";
++ };
++
++ uart0: serial@21c0000 {
++ compatible = "arm,sbsa-uart","arm,pl011";
++ reg = <0x0 0x21c0000 0x0 0x1000>;
++ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
++ current-speed = <115200>;
++ status = "disabled";
++ };
++
++ uart1: serial@21d0000 {
++ compatible = "arm,sbsa-uart","arm,pl011";
++ reg = <0x0 0x21d0000 0x0 0x1000>;
++ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
++ current-speed = <115200>;
++ status = "disabled";
++ };
++
++ uart2: serial@21e0000 {
++ compatible = "arm,sbsa-uart","arm,pl011";
++ reg = <0x0 0x21e0000 0x0 0x1000>;
++ interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
++ current-speed = <115200>;
++ status = "disabled";
++ };
++
++ uart3: serial@21f0000 {
++ compatible = "arm,sbsa-uart","arm,pl011";
++ reg = <0x0 0x21f0000 0x0 0x1000>;
++ interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
++ current-speed = <115200>;
++ status = "disabled";
++ };
++
++ gpio0: gpio@2300000 {
++ compatible = "fsl,qoriq-gpio";
++ reg = <0x0 0x2300000 0x0 0x10000>;
++ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
++ gpio-controller;
++ little-endian;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ gpio1: gpio@2310000 {
++ compatible = "fsl,qoriq-gpio";
++ reg = <0x0 0x2310000 0x0 0x10000>;
++ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
++ gpio-controller;
++ little-endian;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ gpio2: gpio@2320000 {
++ compatible = "fsl,qoriq-gpio";
++ reg = <0x0 0x2320000 0x0 0x10000>;
++ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
++ gpio-controller;
++ little-endian;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ gpio3: gpio@2330000 {
++ compatible = "fsl,qoriq-gpio";
++ reg = <0x0 0x2330000 0x0 0x10000>;
++ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
++ gpio-controller;
++ little-endian;
++ #gpio-cells = <2>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ watchdog@23a0000 {
++ compatible = "arm,sbsa-gwdt";
++ reg = <0x0 0x23a0000 0 0x1000>,
++ <0x0 0x2390000 0 0x1000>;
++ interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
++ timeout-sec = <30>;
++ };
++
++ ftm0: ftm0@2800000 {
++ compatible = "fsl,ftm-alarm", "fsl,lx2160a-ftm-alarm";
++ reg = <0x0 0x2800000 0x0 0x10000>,
++ <0x0 0x1e34050 0x0 0x4>;
++ reg-names = "ftm", "FlexTimer1";
++ interrupts = <0 44 0x4>;
++ status = "okay";
++ };
++
++ usb0: usb@3100000 {
++ compatible = "snps,dwc3";
++ reg = <0x0 0x3100000 0x0 0x10000>;
++ interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
++ dr_mode = "host";
++ snps,quirk-frame-length-adjustment = <0x20>;
++ snps,dis_rxdet_inp3_quirk;
++ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
++ snps,host-vbus-glitches;
++ status = "disabled";
++ };
++
++ usb1: usb@3110000 {
++ compatible = "snps,dwc3";
++ reg = <0x0 0x3110000 0x0 0x10000>;
++ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
++ dr_mode = "host";
++ snps,quirk-frame-length-adjustment = <0x20>;
++ snps,dis_rxdet_inp3_quirk;
++ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
++ snps,host-vbus-glitches;
++ status = "disabled";
++ };
++
++ smmu: iommu@5000000 {
++ compatible = "arm,mmu-500";
++ reg = <0 0x5000000 0 0x800000>;
++ #iommu-cells = <1>;
++ #global-interrupts = <14>;
++ // global secure fault
++ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
++ // combined secure
++ <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
++ // global non-secure fault
++ <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
++ // combined non-secure
++ <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
++ // performance counter interrupts 0-9
++ <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>,
++ // per context interrupt, 64 interrupts
++ <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 194 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 195 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>;
++ dma-coherent;
++ };
++
++ fsl_mc: fsl-mc@80c000000 {
++ compatible = "fsl,qoriq-mc";
++ reg = <0x00000008 0x0c000000 0 0x40>,
++ <0x00000000 0x08340000 0 0x40000>;
++ msi-parent = <&its>;
++ /* iommu-map property is fixed up by u-boot */
++ iommu-map = <0 &smmu 0 0>;
++ dma-coherent;
++ #address-cells = <3>;
++ #size-cells = <1>;
++
++ /*
++ * Region type 0x0 - MC portals
++ * Region type 0x1 - QBMAN portals
++ */
++ ranges = <0x0 0x0 0x0 0x8 0x0c000000 0x4000000
++ 0x1 0x0 0x0 0x8 0x18000000 0x8000000>;
++
++ /*
++ * Define the maximum number of MACs present on the SoC.
++ */
++ dpmacs {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ dpmac1: dpmac@1 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0x1>;
++ };
++
++ dpmac2: dpmac@2 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0x2>;
++ };
++
++ dpmac3: dpmac@3 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0x3>;
++ };
++
++ dpmac4: dpmac@4 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0x4>;
++ };
++
++ dpmac5: dpmac@5 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0x5>;
++ };
++
++ dpmac6: dpmac@6 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0x6>;
++ };
++
++ dpmac7: dpmac@7 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0x7>;
++ };
++
++ dpmac8: dpmac@8 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0x8>;
++ };
++
++ dpmac9: dpmac@9 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0x9>;
++ };
++
++ dpmac10: dpmac@a {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0xa>;
++ };
++
++ dpmac11: dpmac@b {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0xb>;
++ };
++
++ dpmac12: dpmac@c {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0xc>;
++ };
++
++ dpmac13: dpmac@d {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0xd>;
++ };
++
++ dpmac14: dpmac@e {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0xe>;
++ };
++
++ dpmac15: dpmac@f {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0xf>;
++ };
++
++ dpmac16: dpmac@10 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0x10>;
++ };
++
++ dpmac17: dpmac@11 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0x11>;
++ };
++
++ dpmac18: dpmac@12 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0x12>;
++ };
++ };
++ };
++
++ fspi: flexspi@20c0000 {
++ status = "disabled";
++ compatible = "nxp,lx2160a-fspi";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x20c0000 0x0 0x10000>,
++ <0x0 0x20000000 0x0 0x10000000>;
++ reg-names = "FSPI", "FSPI-memory";
++ interrupts = <0 25 0x4>; /* Level high type */
++ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
++ clock-names = "fspi_en", "fspi";
++ };
++
++ sata0: sata@3200000 {
++ status = "disabled";
++ compatible = "fsl,lx2160a-ahci";
++ reg = <0x0 0x3200000 0x0 0x10000>;
++ interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 3>;
++ dma-coherent;
++ };
++
++ sata1: sata@3210000 {
++ status = "disabled";
++ compatible = "fsl,lx2160a-ahci";
++ reg = <0x0 0x3210000 0x0 0x10000>;
++ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 3>;
++ dma-coherent;
++ };
++
++ sata2: sata@3220000 {
++ status = "disabled";
++ compatible = "fsl,lx2160a-ahci";
++ reg = <0x0 0x3220000 0x0 0x10000>;
++ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 3>;
++ dma-coherent;
++ };
++
++ sata3: sata@3230000 {
++ status = "disabled";
++ compatible = "fsl,lx2160a-ahci";
++ reg = <0x0 0x3230000 0x0 0x10000>;
++ interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 4 3>;
++ dma-coherent;
++ };
++
++ pcie@3400000 {
++ compatible = "fsl,lx2160a-pcie";
++ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
++ 0x80 0x00000000 0x0 0x00001000>; /* configuration space */
++ reg-names = "csr_axi_slave", "config_axi_slave";
++ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>, /* AER interrupt */
++ <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
++ <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
++ interrupt-names = "aer", "pme", "intr";
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
++ dma-coherent;
++ apio-wins = <8>;
++ ppio-wins = <8>;
++ bus-range = <0x0 0xff>;
++ ranges = <0x82000000 0x0 0x40000000 0x80 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
++ msi-parent = <&its>;
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 0 7>;
++ interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 2 &gic 0 0 GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 3 &gic 0 0 GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 4 &gic 0 0 GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
++ status = "disabled";
++ };
++
++ pcie_ep@3400000 {
++ compatible = "fsl,lx2160a-pcie-ep";
++ reg = <0x00 0x03400000 0x0 0x00100000
++ 0x80 0x00000000 0x8 0x00000000>;
++ reg-names = "regs", "addr_space";
++ num-ob-windows = <256>;
++ status = "disabled";
++ };
++
++ pcie@3500000 {
++ compatible = "fsl,lx2160a-pcie";
++ reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */
++ 0x88 0x00000000 0x0 0x00001000>; /* configuration space */
++ reg-names = "csr_axi_slave", "config_axi_slave";
++ interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>, /* AER interrupt */
++ <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
++ <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
++ interrupt-names = "aer", "pme", "intr";
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
++ dma-coherent;
++ apio-wins = <8>;
++ ppio-wins = <8>;
++ bus-range = <0x0 0xff>;
++ ranges = <0x82000000 0x0 0x40000000 0x88 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
++ msi-parent = <&its>;
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 0 7>;
++ interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 2 &gic 0 0 GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 3 &gic 0 0 GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 4 &gic 0 0 GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
++ status = "disabled";
++ };
++
++ pcie_ep@3500000 {
++ compatible = "fsl,lx2160a-pcie-ep";
++ reg = <0x00 0x03500000 0x0 0x00100000
++ 0x88 0x00000000 0x8 0x00000000>;
++ reg-names = "regs", "addr_space";
++ num-ob-windows = <256>;
++ status = "disabled";
++ };
++
++ pcie@3600000 {
++ compatible = "fsl,lx2160a-pcie";
++ reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */
++ 0x90 0x00000000 0x0 0x00001000>; /* configuration space */
++ reg-names = "csr_axi_slave", "config_axi_slave";
++ interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>, /* AER interrupt */
++ <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
++ <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
++ interrupt-names = "aer", "pme", "intr";
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
++ dma-coherent;
++ apio-wins = <8>;
++ ppio-wins = <8>;
++ bus-range = <0x0 0xff>;
++ ranges = <0x82000000 0x0 0x40000000 0x90 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
++ msi-parent = <&its>;
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 0 7>;
++ interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 2 &gic 0 0 GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 3 &gic 0 0 GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 4 &gic 0 0 GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
++ status = "disabled";
++ };
++
++ pcie_ep@3600000 {
++ compatible = "fsl,lx2160a-pcie-ep";
++ reg = <0x00 0x03600000 0x0 0x00100000
++ 0x90 0x00000000 0x8 0x00000000>;
++ reg-names = "regs", "addr_space";
++ num-ob-windows = <256>;
++ max-functions = <2>;
++ status = "disabled";
++ };
++
++ pcie@3700000 {
++ compatible = "fsl,lx2160a-pcie";
++ reg = <0x00 0x03700000 0x0 0x00100000 /* controller registers */
++ 0x98 0x00000000 0x0 0x00001000>; /* configuration space */
++ reg-names = "csr_axi_slave", "config_axi_slave";
++ interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, /* AER interrupt */
++ <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
++ <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
++ interrupt-names = "aer", "pme", "intr";
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
++ dma-coherent;
++ apio-wins = <8>;
++ ppio-wins = <8>;
++ bus-range = <0x0 0xff>;
++ ranges = <0x82000000 0x0 0x40000000 0x98 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
++ msi-parent = <&its>;
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 0 7>;
++ interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 2 &gic 0 0 GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 3 &gic 0 0 GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 4 &gic 0 0 GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
++ status = "disabled";
++ };
++
++ pcie_ep@3700000 {
++ compatible = "fsl,lx2160a-pcie-ep";
++ reg = <0x00 0x03700000 0x0 0x00100000
++ 0x98 0x00000000 0x8 0x00000000>;
++ reg-names = "regs", "addr_space";
++ num-ob-windows = <256>;
++ status = "disabled";
++ };
++
++ pcie@3800000 {
++ compatible = "fsl,lx2160a-pcie";
++ reg = <0x00 0x03800000 0x0 0x00100000 /* controller registers */
++ 0xa0 0x00000000 0x0 0x00001000>; /* configuration space */
++ reg-names = "csr_axi_slave", "config_axi_slave";
++ interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>, /* AER interrupt */
++ <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
++ <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
++ interrupt-names = "aer", "pme", "intr";
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
++ dma-coherent;
++ apio-wins = <8>;
++ ppio-wins = <8>;
++ bus-range = <0x0 0xff>;
++ ranges = <0x82000000 0x0 0x40000000 0xa0 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
++ msi-parent = <&its>;
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 0 7>;
++ interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 2 &gic 0 0 GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 3 &gic 0 0 GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 4 &gic 0 0 GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>;
++ status = "disabled";
++ };
++
++ pcie_ep@3800000 {
++ compatible = "fsl,lx2160a-pcie-ep";
++ reg = <0x00 0x03800000 0x0 0x00100000
++ 0xa0 0x00000000 0x8 0x00000000>;
++ reg-names = "regs", "addr_space";
++ num-ob-windows = <256>;
++ max-functions = <2>;
++ status = "disabled";
++ };
++
++ pcie@3900000 {
++ compatible = "fsl,lx2160a-pcie";
++ reg = <0x00 0x03900000 0x0 0x00100000 /* controller registers */
++ 0xa8 0x00000000 0x0 0x00001000>; /* configuration space */
++ reg-names = "csr_axi_slave", "config_axi_slave";
++ interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>, /* AER interrupt */
++ <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>, /* PME interrupt */
++ <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
++ interrupt-names = "aer", "pme", "intr";
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
++ dma-coherent;
++ apio-wins = <8>;
++ ppio-wins = <8>;
++ bus-range = <0x0 0xff>;
++ ranges = <0x82000000 0x0 0x40000000 0xa8 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
++ msi-parent = <&its>;
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 0 7>;
++ interrupt-map = <0000 0 0 1 &gic 0 0 GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 2 &gic 0 0 GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 3 &gic 0 0 GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 4 &gic 0 0 GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
++ status = "disabled";
++ };
++
++ pcie_ep@3900000 {
++ compatible = "fsl,lx2160a-pcie-ep";
++ reg = <0x00 0x03900000 0x0 0x00100000
++ 0xa8 0x00000000 0x8 0x00000000>;
++ reg-names = "regs", "addr_space";
++ num-ob-windows = <256>;
++ status = "disabled";
++ };
++
++ };
++
++ firmware {
++ optee {
++ compatible = "linaro,optee-tz";
++ method = "smc";
++ };
++ };
++};
--- /dev/null
-+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-6oh.dtsi
-@@ -0,0 +1,47 @@
++++ b/arch/arm64/boot/dts/freescale/fsl-tmu-map1.dtsi
+@@ -0,0 +1,99 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
++/*
++ * Device Tree Include file for Thermal Monitor Unit.
++ *
++ * Copyright 2018 NXP
++ *
++ * Tang Yuantian <andy.tang@nxp.com>
++ *
++ */
++
++&thermal_zones {
++ thermal-zone0 {
++ cooling-maps {
++ map1 {
++ trip = <&alert0>;
++ cooling-device =
++ <&cooling_map1 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
++ };
++
++ thermal-zone1 {
++ cooling-maps {
++ map1 {
++ trip = <&alert1>;
++ cooling-device =
++ <&cooling_map1 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
++ };
++
++ thermal-zone2 {
++ cooling-maps {
++ map1 {
++ trip = <&alert2>;
++ cooling-device =
++ <&cooling_map1 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
++ };
++
++ thermal-zone3 {
++ cooling-maps {
++ map1 {
++ trip = <&alert3>;
++ cooling-device =
++ <&cooling_map1 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
++ };
++
++ thermal-zone4 {
++ cooling-maps {
++ map1 {
++ trip = <&alert4>;
++ cooling-device =
++ <&cooling_map1 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
++ };
++
++ thermal-zone5 {
++ cooling-maps {
++ map1 {
++ trip = <&alert5>;
++ cooling-device =
++ <&cooling_map1 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
++ };
++
++ thermal-zone6 {
++ cooling-maps {
++ map1 {
++ trip = <&alert6>;
++ cooling-device =
++ <&cooling_map1 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
++ };
++
++ thermal-zone7 {
++ cooling-maps {
++ map1 {
++ trip = <&alert7>;
++ cooling-device =
++ <&cooling_map1 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
++ };
++};
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-tmu-map2.dtsi
+@@ -0,0 +1,99 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
++/*
++ * Device Tree Include file for Thermal Monitor Unit.
++ *
++ * Copyright 2018 NXP
++ *
++ * Tang Yuantian <andy.tang@nxp.com>
++ *
++ */
++
++&thermal_zones {
++ thermal-zone0 {
++ cooling-maps {
++ map2 {
++ trip = <&alert0>;
++ cooling-device =
++ <&cooling_map2 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
++ };
++
++ thermal-zone1 {
++ cooling-maps {
++ map2 {
++ trip = <&alert1>;
++ cooling-device =
++ <&cooling_map2 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
++ };
++
++ thermal-zone2 {
++ cooling-maps {
++ map2 {
++ trip = <&alert2>;
++ cooling-device =
++ <&cooling_map2 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
++ };
++
++ thermal-zone3 {
++ cooling-maps {
++ map2 {
++ trip = <&alert3>;
++ cooling-device =
++ <&cooling_map2 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
++ };
++
++ thermal-zone4 {
++ cooling-maps {
++ map2 {
++ trip = <&alert4>;
++ cooling-device =
++ <&cooling_map2 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
++ };
++
++ thermal-zone5 {
++ cooling-maps {
++ map2 {
++ trip = <&alert5>;
++ cooling-device =
++ <&cooling_map2 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
++ };
++
++ thermal-zone6 {
++ cooling-maps {
++ map2 {
++ trip = <&alert6>;
++ cooling-device =
++ <&cooling_map2 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
++ };
++
++ thermal-zone7 {
++ cooling-maps {
++ map2 {
++ trip = <&alert7>;
++ cooling-device =
++ <&cooling_map2 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
++ };
++};
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-tmu-map3.dtsi
+@@ -0,0 +1,99 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
-+ * QorIQ FMan v3 OH ports device tree
++ * Device Tree Include file for Thermal Monitor Unit.
+ *
-+ * Copyright 2012-2015 Freescale Semiconductor Inc.
++ * Copyright 2018 NXP
++ *
++ * Tang Yuantian <andy.tang@nxp.com>
+ *
-+ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
-+fman@1a00000 {
-+
-+ fman0_oh1: port@82000 {
-+ cell-index = <0>;
-+ compatible = "fsl,fman-port-oh";
-+ reg = <0x82000 0x1000>;
++&thermal_zones {
++ thermal-zone0 {
++ cooling-maps {
++ map3 {
++ trip = <&alert0>;
++ cooling-device =
++ <&cooling_map3 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
+ };
+
-+ fman0_oh2: port@83000 {
-+ cell-index = <1>;
-+ compatible = "fsl,fman-port-oh";
-+ reg = <0x83000 0x1000>;
++ thermal-zone1 {
++ cooling-maps {
++ map3 {
++ trip = <&alert1>;
++ cooling-device =
++ <&cooling_map3 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
+ };
+
-+ fman0_oh3: port@84000 {
-+ cell-index = <2>;
-+ compatible = "fsl,fman-port-oh";
-+ reg = <0x84000 0x1000>;
++ thermal-zone2 {
++ cooling-maps {
++ map3 {
++ trip = <&alert2>;
++ cooling-device =
++ <&cooling_map3 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
+ };
+
-+ fman0_oh4: port@85000 {
-+ cell-index = <3>;
-+ compatible = "fsl,fman-port-oh";
-+ reg = <0x85000 0x1000>;
++ thermal-zone3 {
++ cooling-maps {
++ map3 {
++ trip = <&alert3>;
++ cooling-device =
++ <&cooling_map3 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
+ };
+
-+ fman0_oh5: port@86000 {
-+ cell-index = <4>;
-+ compatible = "fsl,fman-port-oh";
-+ reg = <0x86000 0x1000>;
++ thermal-zone4 {
++ cooling-maps {
++ map3 {
++ trip = <&alert4>;
++ cooling-device =
++ <&cooling_map3 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
+ };
+
-+ fman0_oh6: port@87000 {
-+ cell-index = <5>;
-+ compatible = "fsl,fman-port-oh";
-+ reg = <0x87000 0x1000>;
++ thermal-zone5 {
++ cooling-maps {
++ map3 {
++ trip = <&alert5>;
++ cooling-device =
++ <&cooling_map3 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
+ };
+
-+};
---- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi
-+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi
-@@ -1,9 +1,9 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- /*
- * QorIQ FMan v3 device tree
- *
- * Copyright 2012-2015 Freescale Semiconductor Inc.
- *
-- * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
- */
-
- fman0: fman@1a00000 {
-@@ -19,45 +19,95 @@ fman0: fman@1a00000 {
- clock-names = "fmanclk";
- fsl,qman-channel-range = <0x800 0x10>;
-
-+ cc {
-+ compatible = "fsl,fman-cc";
++ thermal-zone6 {
++ cooling-maps {
++ map3 {
++ trip = <&alert6>;
++ cooling-device =
++ <&cooling_map3 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
+ };
+
- muram@0 {
- compatible = "fsl,fman-muram";
- reg = <0x0 0x60000>;
- };
-
-+ bmi@80000 {
-+ compatible = "fsl,fman-bmi";
-+ reg = <0x80000 0x400>;
++ thermal-zone7 {
++ cooling-maps {
++ map3 {
++ trip = <&alert7>;
++ cooling-device =
++ <&cooling_map3 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
+ };
++};
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-tmu.dtsi
+@@ -0,0 +1,251 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
++/*
++ * Device Tree Include file for Thermal Monitor Unit.
++ *
++ * Copyright 2018 NXP
++ *
++ * Tang Yuantian <andy.tang@nxp.com>
++ *
++ */
+
-+ qmi@80400 {
-+ compatible = "fsl,fman-qmi";
-+ reg = <0x80400 0x400>;
++thermal_zones: thermal-zones {
++ thermal_zone0: thermal-zone0 {
++ polling-delay-passive = <1000>;
++ polling-delay = <5000>;
++ thermal-sensors = <&tmu 0>;
++ status = "disabled";
++
++ trips {
++ alert0: alert0 {
++ temperature = <75000>;
++ hysteresis = <2000>;
++ type = "passive";
++ };
++
++ crit0: crit0 {
++ temperature = <85000>;
++ hysteresis = <2000>;
++ type = "critical";
++ };
++ };
++
++ cooling-maps {
++ map0 {
++ trip = <&alert0>;
++ cooling-device =
++ <&cooling_map0 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
+ };
+
- fman0_oh_0x2: port@82000 {
- cell-index = <0x2>;
- compatible = "fsl,fman-v3-port-oh";
- reg = <0x82000 0x1000>;
-+ fsl,qman-channel-id = <0x809>;
- };
-
- fman0_oh_0x3: port@83000 {
- cell-index = <0x3>;
- compatible = "fsl,fman-v3-port-oh";
- reg = <0x83000 0x1000>;
-+ fsl,qman-channel-id = <0x80a>;
- };
-
- fman0_oh_0x4: port@84000 {
- cell-index = <0x4>;
- compatible = "fsl,fman-v3-port-oh";
- reg = <0x84000 0x1000>;
-+ fsl,qman-channel-id = <0x80b>;
- };
-
- fman0_oh_0x5: port@85000 {
- cell-index = <0x5>;
- compatible = "fsl,fman-v3-port-oh";
- reg = <0x85000 0x1000>;
-+ fsl,qman-channel-id = <0x80c>;
- };
-
- fman0_oh_0x6: port@86000 {
- cell-index = <0x6>;
- compatible = "fsl,fman-v3-port-oh";
- reg = <0x86000 0x1000>;
-+ fsl,qman-channel-id = <0x80d>;
- };
-
- fman0_oh_0x7: port@87000 {
- cell-index = <0x7>;
- compatible = "fsl,fman-v3-port-oh";
- reg = <0x87000 0x1000>;
-+ fsl,qman-channel-id = <0x80e>;
++ thermal-zone1 {
++ polling-delay-passive = <1000>;
++ polling-delay = <5000>;
++ thermal-sensors = <&tmu 1>;
++ status = "disabled";
++
++ trips {
++ alert1: alert1 {
++ temperature = <75000>;
++ hysteresis = <2000>;
++ type = "passive";
++ };
++
++ crit1: crit1 {
++ temperature = <85000>;
++ hysteresis = <2000>;
++ type = "critical";
++ };
++ };
++
++ cooling-maps {
++ map0 {
++ trip = <&alert1>;
++ cooling-device =
++ <&cooling_map0 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
+ };
+
-+ policer@c0000 {
-+ compatible = "fsl,fman-policer";
-+ reg = <0xc0000 0x1000>;
++ thermal-zone2 {
++ polling-delay-passive = <1000>;
++ polling-delay = <5000>;
++ thermal-sensors = <&tmu 2>;
++ status = "disabled";
++
++ trips {
++ alert2: alert2 {
++ temperature = <75000>;
++ hysteresis = <2000>;
++ type = "passive";
++ };
++
++ crit2: crit2 {
++ temperature = <85000>;
++ hysteresis = <2000>;
++ type = "critical";
++ };
++ };
++
++ cooling-maps {
++ map0 {
++ trip = <&alert2>;
++ cooling-device =
++ <&cooling_map0 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
+ };
+
-+ keygen@c1000 {
-+ compatible = "fsl,fman-keygen";
-+ reg = <0xc1000 0x1000>;
++ thermal-zone3 {
++ polling-delay-passive = <1000>;
++ polling-delay = <5000>;
++ thermal-sensors = <&tmu 3>;
++ status = "disabled";
++
++ trips {
++ alert3: alert3 {
++ temperature = <75000>;
++ hysteresis = <2000>;
++ type = "passive";
++ };
++
++ crit3: crit3 {
++ temperature = <85000>;
++ hysteresis = <2000>;
++ type = "critical";
++ };
++ };
++
++ cooling-maps {
++ map0 {
++ trip = <&alert3>;
++ cooling-device =
++ <&cooling_map0 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
+ };
+
-+ dma@c2000 {
-+ compatible = "fsl,fman-dma";
-+ reg = <0xc2000 0x1000>;
++ thermal-zone4 {
++ polling-delay-passive = <1000>;
++ polling-delay = <5000>;
++ thermal-sensors = <&tmu 4>;
++ status = "disabled";
++
++ trips {
++ alert4: alert4 {
++ temperature = <75000>;
++ hysteresis = <2000>;
++ type = "passive";
++ };
++
++ crit4: crit4 {
++ temperature = <85000>;
++ hysteresis = <2000>;
++ type = "critical";
++ };
++ };
++
++ cooling-maps {
++ map0 {
++ trip = <&alert4>;
++ cooling-device =
++ <&cooling_map0 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
+ };
+
-+ fpm@c3000 {
-+ compatible = "fsl,fman-fpm";
-+ reg = <0xc3000 0x1000>;
++ thermal-zone5 {
++ polling-delay-passive = <1000>;
++ polling-delay = <5000>;
++ thermal-sensors = <&tmu 5>;
++ status = "disabled";
++
++ trips {
++ alert5: alert5 {
++ temperature = <75000>;
++ hysteresis = <2000>;
++ type = "passive";
++ };
++
++ crit5: crit5 {
++ temperature = <85000>;
++ hysteresis = <2000>;
++ type = "critical";
++ };
++ };
++
++ cooling-maps {
++ map0 {
++ trip = <&alert5>;
++ cooling-device =
++ <&cooling_map0 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
+ };
+
-+ parser@c7000 {
-+ compatible = "fsl,fman-parser";
-+ reg = <0xc7000 0x1000>;
++ thermal-zone6 {
++ polling-delay-passive = <1000>;
++ polling-delay = <5000>;
++ thermal-sensors = <&tmu 6>;
++ status = "disabled";
++
++ trips {
++ alert6: alert6 {
++ temperature = <75000>;
++ hysteresis = <2000>;
++ type = "passive";
++ };
++
++ crit6: crit6 {
++ temperature = <85000>;
++ hysteresis = <2000>;
++ type = "critical";
++ };
++ };
++
++ cooling-maps {
++ map0 {
++ trip = <&alert6>;
++ cooling-device =
++ <&cooling_map0 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
+ };
+
-+ vsps@dc000 {
-+ compatible = "fsl,fman-vsps";
-+ reg = <0xdc000 0x1000>;
- };
-
- mdio0: mdio@fc000 {
-@@ -75,7 +125,7 @@ fman0: fman@1a00000 {
- };
-
- ptp_timer0: ptp-timer@fe000 {
-- compatible = "fsl,fman-ptp-timer";
-+ compatible = "fsl,fman-ptp-timer", "fsl,fman-rtc";
- reg = <0xfe000 0x1000>;
- };
- };
++ thermal-zone7 {
++ polling-delay-passive = <1000>;
++ polling-delay = <5000>;
++ thermal-sensors = <&tmu 7>;
++ status = "disabled";
++
++ trips {
++ alert7: alert7 {
++ temperature = <75000>;
++ hysteresis = <2000>;
++ type = "passive";
++ };
++
++ crit7: crit7 {
++ temperature = <85000>;
++ hysteresis = <2000>;
++ type = "critical";
++ };
++ };
++
++ cooling-maps {
++ map0 {
++ trip = <&alert7>;
++ cooling-device =
++ <&cooling_map0 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
++ };
++};
--- /dev/null
-+++ b/arch/arm64/boot/dts/freescale/qoriq-qman-portals-sdk.dtsi
-@@ -0,0 +1,38 @@
++++ b/arch/arm64/boot/dts/freescale/qoriq-bman-portals-sdk.dtsi
+@@ -0,0 +1,55 @@
+/*
-+ * QorIQ QMan SDK Portals device tree nodes
++ * QorIQ BMan SDK Portals device tree nodes
+ *
+ * Copyright 2011-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
-+&qportals {
-+ qman-fqids@0 {
-+ compatible = "fsl,fqid-range";
-+ fsl,fqid-range = <256 256>;
++&bportals {
++ bman-portal@0 {
++ cell-index = <0>;
++ };
++
++ bman-portal@10000 {
++ cell-index = <1>;
++ };
++
++ bman-portal@20000 {
++ cell-index = <2>;
++ };
++
++ bman-portal@30000 {
++ cell-index = <3>;
++ };
++
++ bman-portal@40000 {
++ cell-index = <4>;
++ };
++
++ bman-portal@50000 {
++ cell-index = <5>;
++ };
++
++ bman-portal@60000 {
++ cell-index = <6>;
+ };
+
-+ qman-fqids@1 {
-+ compatible = "fsl,fqid-range";
-+ fsl,fqid-range = <32768 32768>;
++ bman-portal@70000 {
++ cell-index = <7>;
+ };
+
-+ qman-pools@0 {
-+ compatible = "fsl,pool-channel-range";
-+ fsl,pool-channel-range = <0x401 0xf>;
++ bman-portal@80000 {
++ cell-index = <8>;
+ };
+
-+ qman-cgrids@0 {
-+ compatible = "fsl,cgrid-range";
-+ fsl,cgrid-range = <0 256>;
++ bman-portal@90000 {
++ cell-index = <9>;
+ };
+
-+ qman-ceetm@0 {
-+ compatible = "fsl,qman-ceetm";
-+ fsl,ceetm-lfqid-range = <0xf00000 0x1000>;
-+ fsl,ceetm-sp-range = <0 16>;
-+ fsl,ceetm-lni-range = <0 8>;
-+ fsl,ceetm-channel-range = <0 32>;
++ bman-bpids@0 {
++ compatible = "fsl,bpid-range";
++ fsl,bpid-range = <32 32>;
+ };
+};
---- a/arch/arm64/boot/dts/freescale/qoriq-qman-portals.dtsi
-+++ b/arch/arm64/boot/dts/freescale/qoriq-qman-portals.dtsi
+--- a/arch/arm64/boot/dts/freescale/qoriq-bman-portals.dtsi
++++ b/arch/arm64/boot/dts/freescale/qoriq-bman-portals.dtsi
@@ -1,9 +1,9 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
- * QorIQ QMan Portals device tree
+ * QorIQ BMan Portals device tree
*
* Copyright 2011-2016 Freescale Semiconductor Inc.
*
- * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
*/
- &qportals {
-@@ -77,4 +77,11 @@
- interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
- cell-index = <8>;
+ &bportals {
+@@ -68,4 +68,10 @@
+ reg = <0x80000 0x4000>, <0x4080000 0x4000>;
+ interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
};
+
-+ qportal9: qman-portal@90000 {
-+ compatible = "fsl,qman-portal";
++ bman-portal@90000 {
++ compatible = "fsl,bman-portal";
+ reg = <0x90000 0x4000>, <0x4090000 0x4000>;
-+ interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
-+ cell-index = <9>;
++ interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
+ };
};
---- a/arch/powerpc/boot/dts/fsl/qoriq-fman-0-10g-0.dtsi
-+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman-0-10g-0.dtsi
-@@ -35,13 +35,13 @@
- fman@400000 {
- fman0_rx_0x10: port@90000 {
- cell-index = <0x10>;
-- compatible = "fsl,fman-v2-port-rx";
-+ compatible = "fsl,fman-v2-port-rx","fsl,fman-port-10g-rx";
- reg = <0x90000 0x1000>;
- };
-
- fman0_tx_0x30: port@b0000 {
- cell-index = <0x30>;
-- compatible = "fsl,fman-v2-port-tx";
-+ compatible = "fsl,fman-v2-port-tx","fsl,fman-port-10g-tx";
- reg = <0xb0000 0x1000>;
- };
-
---- a/arch/powerpc/boot/dts/fsl/qoriq-fman-0-1g-0.dtsi
-+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman-0-1g-0.dtsi
-@@ -35,13 +35,13 @@
- fman@400000 {
- fman0_rx_0x08: port@88000 {
- cell-index = <0x8>;
-- compatible = "fsl,fman-v2-port-rx";
-+ compatible = "fsl,fman-v2-port-rx","fsl,fman-port-1g-rx";
- reg = <0x88000 0x1000>;
- };
-
- fman0_tx_0x28: port@a8000 {
- cell-index = <0x28>;
-- compatible = "fsl,fman-v2-port-tx";
-+ compatible = "fsl,fman-v2-port-tx","fsl,fman-port-1g-tx";
- reg = <0xa8000 0x1000>;
- };
-
---- a/arch/powerpc/boot/dts/fsl/qoriq-fman-0-1g-1.dtsi
-+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman-0-1g-1.dtsi
-@@ -35,13 +35,13 @@
- fman@400000 {
- fman0_rx_0x09: port@89000 {
- cell-index = <0x9>;
-- compatible = "fsl,fman-v2-port-rx";
-+ compatible = "fsl,fman-v2-port-rx","fsl,fman-port-1g-rx";
- reg = <0x89000 0x1000>;
- };
-
- fman0_tx_0x29: port@a9000 {
- cell-index = <0x29>;
-- compatible = "fsl,fman-v2-port-tx";
-+ compatible = "fsl,fman-v2-port-tx","fsl,fman-port-1g-tx";
- reg = <0xa9000 0x1000>;
- };
-
---- a/arch/powerpc/boot/dts/fsl/qoriq-fman-0-1g-2.dtsi
-+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman-0-1g-2.dtsi
-@@ -35,13 +35,13 @@
- fman@400000 {
- fman0_rx_0x0a: port@8a000 {
- cell-index = <0xa>;
-- compatible = "fsl,fman-v2-port-rx";
-+ compatible = "fsl,fman-v2-port-rx","fsl,fman-port-1g-rx";
- reg = <0x8a000 0x1000>;
- };
-
- fman0_tx_0x2a: port@aa000 {
- cell-index = <0x2a>;
-- compatible = "fsl,fman-v2-port-tx";
-+ compatible = "fsl,fman-v2-port-tx","fsl,fman-port-1g-tx";
- reg = <0xaa000 0x1000>;
- };
-
---- a/arch/powerpc/boot/dts/fsl/qoriq-fman-0-1g-3.dtsi
-+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman-0-1g-3.dtsi
-@@ -35,13 +35,13 @@
- fman@400000 {
- fman0_rx_0x0b: port@8b000 {
- cell-index = <0xb>;
-- compatible = "fsl,fman-v2-port-rx";
-+ compatible = "fsl,fman-v2-port-rx","fsl,fman-port-1g-rx";
- reg = <0x8b000 0x1000>;
- };
-
- fman0_tx_0x2b: port@ab000 {
- cell-index = <0x2b>;
-- compatible = "fsl,fman-v2-port-tx";
-+ compatible = "fsl,fman-v2-port-tx","fsl,fman-port-1g-tx";
- reg = <0xab000 0x1000>;
- };
-
---- a/arch/powerpc/boot/dts/fsl/qoriq-fman-0-1g-4.dtsi
-+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman-0-1g-4.dtsi
-@@ -35,13 +35,13 @@
- fman@400000 {
- fman0_rx_0x0c: port@8c000 {
- cell-index = <0xc>;
-- compatible = "fsl,fman-v2-port-rx";
-+ compatible = "fsl,fman-v2-port-rx","fsl,fman-port-1g-rx";
- reg = <0x8c000 0x1000>;
- };
-
- fman0_tx_0x2c: port@ac000 {
- cell-index = <0x2c>;
-- compatible = "fsl,fman-v2-port-tx";
-+ compatible = "fsl,fman-v2-port-tx","fsl,fman-port-1g-tx";
- reg = <0xac000 0x1000>;
- };
-
---- a/arch/powerpc/boot/dts/fsl/qoriq-fman-1-10g-0.dtsi
-+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman-1-10g-0.dtsi
-@@ -35,13 +35,13 @@
- fman@500000 {
- fman1_rx_0x10: port@90000 {
- cell-index = <0x10>;
-- compatible = "fsl,fman-v2-port-rx";
-+ compatible = "fsl,fman-v2-port-rx","fsl,fman-port-10g-rx";
- reg = <0x90000 0x1000>;
- };
-
- fman1_tx_0x30: port@b0000 {
- cell-index = <0x30>;
-- compatible = "fsl,fman-v2-port-tx";
-+ compatible = "fsl,fman-v2-port-tx","fsl,fman-port-10g-tx";
- reg = <0xb0000 0x1000>;
- };
-
---- a/arch/powerpc/boot/dts/fsl/qoriq-fman-1-1g-0.dtsi
-+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman-1-1g-0.dtsi
-@@ -35,13 +35,13 @@
- fman@500000 {
- fman1_rx_0x08: port@88000 {
- cell-index = <0x8>;
-- compatible = "fsl,fman-v2-port-rx";
-+ compatible = "fsl,fman-v2-port-rx","fsl,fman-port-1g-rx";
- reg = <0x88000 0x1000>;
- };
-
- fman1_tx_0x28: port@a8000 {
- cell-index = <0x28>;
-- compatible = "fsl,fman-v2-port-tx";
-+ compatible = "fsl,fman-v2-port-tx","fsl,fman-port-1g-tx";
- reg = <0xa8000 0x1000>;
- };
-
---- a/arch/powerpc/boot/dts/fsl/qoriq-fman-1-1g-1.dtsi
-+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman-1-1g-1.dtsi
-@@ -35,13 +35,13 @@
- fman@500000 {
- fman1_rx_0x09: port@89000 {
- cell-index = <0x9>;
-- compatible = "fsl,fman-v2-port-rx";
-+ compatible = "fsl,fman-v2-port-rx","fsl,fman-port-1g-rx";
- reg = <0x89000 0x1000>;
- };
-
- fman1_tx_0x29: port@a9000 {
- cell-index = <0x29>;
-- compatible = "fsl,fman-v2-port-tx";
-+ compatible = "fsl,fman-v2-port-tx","fsl,fman-port-1g-tx";
- reg = <0xa9000 0x1000>;
- };
-
---- a/arch/powerpc/boot/dts/fsl/qoriq-fman-1-1g-2.dtsi
-+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman-1-1g-2.dtsi
-@@ -35,13 +35,13 @@
- fman@500000 {
- fman1_rx_0x0a: port@8a000 {
- cell-index = <0xa>;
-- compatible = "fsl,fman-v2-port-rx";
-+ compatible = "fsl,fman-v2-port-rx","fsl,fman-port-1g-rx";
- reg = <0x8a000 0x1000>;
- };
-
- fman1_tx_0x2a: port@aa000 {
- cell-index = <0x2a>;
-- compatible = "fsl,fman-v2-port-tx";
-+ compatible = "fsl,fman-v2-port-tx","fsl,fman-port-1g-tx";
- reg = <0xaa000 0x1000>;
- };
-
---- a/arch/powerpc/boot/dts/fsl/qoriq-fman-1-1g-3.dtsi
-+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman-1-1g-3.dtsi
-@@ -35,13 +35,13 @@
- fman@500000 {
- fman1_rx_0x0b: port@8b000 {
- cell-index = <0xb>;
-- compatible = "fsl,fman-v2-port-rx";
-+ compatible = "fsl,fman-v2-port-rx","fsl,fman-port-1g-rx";
- reg = <0x8b000 0x1000>;
- };
-
- fman1_tx_0x2b: port@ab000 {
- cell-index = <0x2b>;
-- compatible = "fsl,fman-v2-port-tx";
-+ compatible = "fsl,fman-v2-port-tx","fsl,fman-port-1g-tx";
- reg = <0xab000 0x1000>;
- };
-
---- a/arch/powerpc/boot/dts/fsl/qoriq-fman-1-1g-4.dtsi
-+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman-1-1g-4.dtsi
-@@ -35,13 +35,13 @@
- fman@500000 {
- fman1_rx_0x0c: port@8c000 {
- cell-index = <0xc>;
-- compatible = "fsl,fman-v2-port-rx";
-+ compatible = "fsl,fman-v2-port-rx","fsl,fman-port-1g-rx";
- reg = <0x8c000 0x1000>;
- };
-
- fman1_tx_0x2c: port@ac000 {
- cell-index = <0x2c>;
-- compatible = "fsl,fman-v2-port-tx";
-+ compatible = "fsl,fman-v2-port-tx","fsl,fman-port-1g-tx";
- reg = <0xac000 0x1000>;
- };
-
---- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0-best-effort.dtsi
-+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0-best-effort.dtsi
-@@ -35,7 +35,7 @@
- fman@400000 {
- fman0_rx_0x08: port@88000 {
- cell-index = <0x8>;
-- compatible = "fsl,fman-v3-port-rx";
-+ compatible = "fsl,fman-v3-port-rx","fsl,fman-port-10g-rx";
- reg = <0x88000 0x1000>;
- fsl,fman-10g-port;
- fsl,fman-best-effort-port;
-@@ -43,7 +43,7 @@ fman@400000 {
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/qoriq-dpaa-eth.dtsi
+@@ -0,0 +1,97 @@
++/*
++ * QorIQ FMan v3 10g port #1 device tree stub [ controller @ offset 0x400000 ]
++ *
++ * Copyright 2012 - 2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++fsldpaa: fsl,dpaa {
++ compatible = "fsl,ls1043a-dpaa", "simple-bus", "fsl,dpaa";
++ ethernet@0 {
++ compatible = "fsl,dpa-ethernet";
++ fsl,fman-mac = <&enet0>;
++ dma-coherent;
++ };
++ ethernet@1 {
++ compatible = "fsl,dpa-ethernet";
++ fsl,fman-mac = <&enet1>;
++ dma-coherent;
++ };
++ ethernet@2 {
++ compatible = "fsl,dpa-ethernet";
++ fsl,fman-mac = <&enet2>;
++ dma-coherent;
++ };
++ ethernet@3 {
++ compatible = "fsl,dpa-ethernet";
++ fsl,fman-mac = <&enet3>;
++ dma-coherent;
++ };
++ ethernet@4 {
++ compatible = "fsl,dpa-ethernet";
++ fsl,fman-mac = <&enet4>;
++ dma-coherent;
++ };
++ ethernet@5 {
++ compatible = "fsl,dpa-ethernet";
++ fsl,fman-mac = <&enet5>;
++ dma-coherent;
++ };
++ ethernet@8 {
++ compatible = "fsl,dpa-ethernet";
++ fsl,fman-mac = <&enet6>;
++ dma-coherent;
++ };
++ ethernet@6 {
++ compatible = "fsl,im-ethernet";
++ fsl,fman-mac = <&enet2>;
++ dma-coherent;
++ fpmevt-sel = <0>;
++ };
++ ethernet@7 {
++ compatible = "fsl,im-ethernet";
++ fsl,fman-mac = <&enet3>;
++ dma-coherent;
++ fpmevt-sel = <1>;
++ };
++ ethernet@10 {
++ compatible = "fsl,im-ethernet";
++ fsl,fman-mac = <&enet4>;
++ dma-coherent;
++ fpmevt-sel = <2>;
++ };
++ ethernet@11 {
++ compatible = "fsl,im-ethernet";
++ fsl,fman-mac = <&enet5>;
++ dma-coherent;
++ fpmevt-sel = <3>;
++ };
++};
++
+--- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi
++++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi
+@@ -1,27 +1,28 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ /*
+ * QorIQ FMan v3 10g port #0 device tree
+ *
+ * Copyright 2012-2015 Freescale Semiconductor Inc.
+ *
+- * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
- fman0_tx_0x28: port@a8000 {
- cell-index = <0x28>;
-- compatible = "fsl,fman-v3-port-tx";
-+ compatible = "fsl,fman-v3-port-tx","fsl,fman-port-10g-tx";
- reg = <0xa8000 0x1000>;
- fsl,fman-10g-port;
- fsl,fman-best-effort-port;
---- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0.dtsi
-+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0.dtsi
-@@ -35,14 +35,14 @@
- fman@400000 {
+ fman@1a00000 {
fman0_rx_0x10: port@90000 {
cell-index = <0x10>;
- compatible = "fsl,fman-v3-port-rx";
+ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-10g-tx";
reg = <0xb0000 0x1000>;
fsl,fman-10g-port;
++ fsl,qman-channel-id = <0x800>;
};
---- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1-best-effort.dtsi
-+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1-best-effort.dtsi
-@@ -35,7 +35,7 @@
- fman@400000 {
- fman0_rx_0x09: port@89000 {
- cell-index = <0x9>;
-- compatible = "fsl,fman-v3-port-rx";
-+ compatible = "fsl,fman-v3-port-rx","fsl,fman-port-10g-rx";
- reg = <0x89000 0x1000>;
- fsl,fman-10g-port;
- fsl,fman-best-effort-port;
-@@ -43,7 +43,7 @@ fman@400000 {
- fman0_tx_0x29: port@a9000 {
- cell-index = <0x29>;
-- compatible = "fsl,fman-v3-port-tx";
-+ compatible = "fsl,fman-v3-port-tx","fsl,fman-port-10g-tx";
- reg = <0xa9000 0x1000>;
- fsl,fman-10g-port;
- fsl,fman-best-effort-port;
---- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1.dtsi
-+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1.dtsi
-@@ -35,14 +35,14 @@
- fman@400000 {
+- ethernet@f0000 {
++ mac9: ethernet@f0000 {
+ cell-index = <0x8>;
+ compatible = "fsl,fman-memac";
+ reg = <0xf0000 0x1000>;
+@@ -29,7 +30,7 @@ fman@1a00000 {
+ pcsphy-handle = <&pcsphy6>;
+ };
+
+- mdio@f1000 {
++ mdio9: mdio@f1000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
+--- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi
++++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi
+@@ -1,27 +1,28 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ /*
+ * QorIQ FMan v3 10g port #1 device tree
+ *
+ * Copyright 2012-2015 Freescale Semiconductor Inc.
+ *
+- * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
+ fman@1a00000 {
fman0_rx_0x11: port@91000 {
cell-index = <0x11>;
- compatible = "fsl,fman-v3-port-rx";
+ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-10g-tx";
reg = <0xb1000 0x1000>;
fsl,fman-10g-port;
++ fsl,qman-channel-id = <0x801>;
+ };
+
+- ethernet@f2000 {
++ mac10: ethernet@f2000 {
+ cell-index = <0x9>;
+ compatible = "fsl,fman-memac";
+ reg = <0xf2000 0x1000>;
+@@ -29,7 +30,7 @@ fman@1a00000 {
+ pcsphy-handle = <&pcsphy7>;
};
---- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-0.dtsi
-+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-0.dtsi
-@@ -35,13 +35,13 @@
- fman@400000 {
+
+- mdio@f3000 {
++ mdio10: mdio@f3000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
+--- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-0.dtsi
++++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-0.dtsi
+@@ -1,22 +1,23 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ /*
+ * QorIQ FMan v3 1g port #0 device tree
+ *
+ * Copyright 2012-2015 Freescale Semiconductor Inc.
+ *
+- * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
+ fman@1a00000 {
fman0_rx_0x08: port@88000 {
cell-index = <0x8>;
- compatible = "fsl,fman-v3-port-rx";
-+ compatible = "fsl,fman-v3-port-rx","fsl,fman-port-1g-rx";
++ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-1g-rx";
reg = <0x88000 0x1000>;
};
fman0_tx_0x28: port@a8000 {
cell-index = <0x28>;
- compatible = "fsl,fman-v3-port-tx";
-+ compatible = "fsl,fman-v3-port-tx","fsl,fman-port-1g-tx";
++ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-1g-tx";
reg = <0xa8000 0x1000>;
++ fsl,qman-channel-id = <0x802>;
};
---- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-1.dtsi
-+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-1.dtsi
-@@ -35,13 +35,13 @@
- fman@400000 {
+ ethernet@e0000 {
+--- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-1.dtsi
++++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-1.dtsi
+@@ -1,22 +1,23 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ /*
+ * QorIQ FMan v3 1g port #1 device tree
+ *
+ * Copyright 2012-2015 Freescale Semiconductor Inc.
+ *
+- * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
+ fman@1a00000 {
fman0_rx_0x09: port@89000 {
cell-index = <0x9>;
- compatible = "fsl,fman-v3-port-rx";
-+ compatible = "fsl,fman-v3-port-rx","fsl,fman-port-1g-rx";
++ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-1g-rx";
reg = <0x89000 0x1000>;
};
fman0_tx_0x29: port@a9000 {
cell-index = <0x29>;
- compatible = "fsl,fman-v3-port-tx";
-+ compatible = "fsl,fman-v3-port-tx","fsl,fman-port-1g-tx";
++ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-1g-tx";
reg = <0xa9000 0x1000>;
++ fsl,qman-channel-id = <0x803>;
};
---- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-2.dtsi
-+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-2.dtsi
-@@ -35,13 +35,13 @@
- fman@400000 {
+ ethernet@e2000 {
+--- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-2.dtsi
++++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-2.dtsi
+@@ -1,22 +1,23 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ /*
+ * QorIQ FMan v3 1g port #2 device tree
+ *
+ * Copyright 2012-2015 Freescale Semiconductor Inc.
+ *
+- * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
+ fman@1a00000 {
fman0_rx_0x0a: port@8a000 {
cell-index = <0xa>;
- compatible = "fsl,fman-v3-port-rx";
-+ compatible = "fsl,fman-v3-port-rx","fsl,fman-port-1g-rx";
++ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-1g-rx";
reg = <0x8a000 0x1000>;
};
fman0_tx_0x2a: port@aa000 {
cell-index = <0x2a>;
- compatible = "fsl,fman-v3-port-tx";
-+ compatible = "fsl,fman-v3-port-tx","fsl,fman-port-1g-tx";
++ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-1g-tx";
reg = <0xaa000 0x1000>;
++ fsl,qman-channel-id = <0x804>;
};
---- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-3.dtsi
-+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-3.dtsi
-@@ -35,13 +35,13 @@
- fman@400000 {
+ ethernet@e4000 {
+--- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-3.dtsi
++++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-3.dtsi
+@@ -1,22 +1,23 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ /*
+ * QorIQ FMan v3 1g port #3 device tree
+ *
+ * Copyright 2012-2015 Freescale Semiconductor Inc.
+ *
+- * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
+ fman@1a00000 {
fman0_rx_0x0b: port@8b000 {
cell-index = <0xb>;
- compatible = "fsl,fman-v3-port-rx";
-+ compatible = "fsl,fman-v3-port-rx","fsl,fman-port-1g-rx";
++ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-1g-rx";
reg = <0x8b000 0x1000>;
};
fman0_tx_0x2b: port@ab000 {
cell-index = <0x2b>;
- compatible = "fsl,fman-v3-port-tx";
-+ compatible = "fsl,fman-v3-port-tx","fsl,fman-port-1g-tx";
++ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-1g-tx";
reg = <0xab000 0x1000>;
++ fsl,qman-channel-id = <0x805>;
};
---- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-4.dtsi
-+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-4.dtsi
-@@ -35,13 +35,13 @@
- fman@400000 {
+ ethernet@e6000 {
+--- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-4.dtsi
++++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-4.dtsi
+@@ -1,22 +1,23 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ /*
+ * QorIQ FMan v3 1g port #4 device tree
+ *
+ * Copyright 2012-2015 Freescale Semiconductor Inc.
+ *
+- * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
+ fman@1a00000 {
fman0_rx_0x0c: port@8c000 {
cell-index = <0xc>;
- compatible = "fsl,fman-v3-port-rx";
-+ compatible = "fsl,fman-v3-port-rx","fsl,fman-port-1g-rx";
++ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-1g-rx";
reg = <0x8c000 0x1000>;
};
fman0_tx_0x2c: port@ac000 {
cell-index = <0x2c>;
- compatible = "fsl,fman-v3-port-tx";
-+ compatible = "fsl,fman-v3-port-tx","fsl,fman-port-1g-tx";
++ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-1g-tx";
reg = <0xac000 0x1000>;
++ fsl,qman-channel-id = <0x806>;
};
---- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-5.dtsi
-+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-5.dtsi
-@@ -35,13 +35,13 @@
- fman@400000 {
+ ethernet@e8000 {
+--- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-5.dtsi
++++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-5.dtsi
+@@ -1,22 +1,23 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ /*
+ * QorIQ FMan v3 1g port #5 device tree
+ *
+ * Copyright 2012-2015 Freescale Semiconductor Inc.
+ *
+- * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
+ fman@1a00000 {
fman0_rx_0x0d: port@8d000 {
cell-index = <0xd>;
- compatible = "fsl,fman-v3-port-rx";
-+ compatible = "fsl,fman-v3-port-rx","fsl,fman-port-1g-rx";
++ compatible = "fsl,fman-v3-port-rx", "fsl,fman-port-1g-rx";
reg = <0x8d000 0x1000>;
};
fman0_tx_0x2d: port@ad000 {
cell-index = <0x2d>;
- compatible = "fsl,fman-v3-port-tx";
-+ compatible = "fsl,fman-v3-port-tx","fsl,fman-port-1g-tx";
++ compatible = "fsl,fman-v3-port-tx", "fsl,fman-port-1g-tx";
reg = <0xad000 0x1000>;
++ fsl,qman-channel-id = <0x807>;
};
---- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-0.dtsi
-+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-0.dtsi
-@@ -35,14 +35,14 @@
- fman@500000 {
- fman1_rx_0x10: port@90000 {
- cell-index = <0x10>;
-- compatible = "fsl,fman-v3-port-rx";
-+ compatible = "fsl,fman-v3-port-rx","fsl,fman-port-10g-rx";
- reg = <0x90000 0x1000>;
- fsl,fman-10g-port;
- };
-
- fman1_tx_0x30: port@b0000 {
- cell-index = <0x30>;
-- compatible = "fsl,fman-v3-port-tx";
-+ compatible = "fsl,fman-v3-port-tx","fsl,fman-port-10g-tx";
- reg = <0xb0000 0x1000>;
- fsl,fman-10g-port;
- };
---- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-1.dtsi
-+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-1.dtsi
-@@ -35,14 +35,14 @@
- fman@500000 {
- fman1_rx_0x11: port@91000 {
- cell-index = <0x11>;
-- compatible = "fsl,fman-v3-port-rx";
-+ compatible = "fsl,fman-v3-port-rx","fsl,fman-port-10g-rx";
- reg = <0x91000 0x1000>;
- fsl,fman-10g-port;
- };
-
- fman1_tx_0x31: port@b1000 {
- cell-index = <0x31>;
-- compatible = "fsl,fman-v3-port-tx";
-+ compatible = "fsl,fman-v3-port-tx","fsl,fman-port-10g-tx";
- reg = <0xb1000 0x1000>;
- fsl,fman-10g-port;
- };
---- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-0.dtsi
-+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-0.dtsi
-@@ -35,13 +35,13 @@
- fman@500000 {
- fman1_rx_0x08: port@88000 {
- cell-index = <0x8>;
-- compatible = "fsl,fman-v3-port-rx";
-+ compatible = "fsl,fman-v3-port-rx","fsl,fman-port-1g-rx";
- reg = <0x88000 0x1000>;
- };
+ ethernet@ea000 {
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-6oh.dtsi
+@@ -0,0 +1,47 @@
++/*
++ * QorIQ FMan v3 OH ports device tree
++ *
++ * Copyright 2012-2015 Freescale Semiconductor Inc.
++ *
++ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++ */
++
++fman@1a00000 {
++
++ fman0_oh1: port@82000 {
++ cell-index = <0>;
++ compatible = "fsl,fman-port-oh";
++ reg = <0x82000 0x1000>;
++ };
++
++ fman0_oh2: port@83000 {
++ cell-index = <1>;
++ compatible = "fsl,fman-port-oh";
++ reg = <0x83000 0x1000>;
++ };
++
++ fman0_oh3: port@84000 {
++ cell-index = <2>;
++ compatible = "fsl,fman-port-oh";
++ reg = <0x84000 0x1000>;
++ };
++
++ fman0_oh4: port@85000 {
++ cell-index = <3>;
++ compatible = "fsl,fman-port-oh";
++ reg = <0x85000 0x1000>;
++ };
++
++ fman0_oh5: port@86000 {
++ cell-index = <4>;
++ compatible = "fsl,fman-port-oh";
++ reg = <0x86000 0x1000>;
++ };
++
++ fman0_oh6: port@87000 {
++ cell-index = <5>;
++ compatible = "fsl,fman-port-oh";
++ reg = <0x87000 0x1000>;
++ };
++
++};
+--- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi
++++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi
+@@ -1,9 +1,9 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ /*
+ * QorIQ FMan v3 device tree
+ *
+ * Copyright 2012-2015 Freescale Semiconductor Inc.
+ *
+- * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
- fman1_tx_0x28: port@a8000 {
- cell-index = <0x28>;
-- compatible = "fsl,fman-v3-port-tx";
-+ compatible = "fsl,fman-v3-port-tx","fsl,fman-port-1g-tx";
- reg = <0xa8000 0x1000>;
- };
+ fman0: fman@1a00000 {
+@@ -11,53 +11,104 @@ fman0: fman@1a00000 {
+ #size-cells = <1>;
+ cell-index = <0>;
+ compatible = "fsl,fman";
+- ranges = <0x0 0x0 0x1a00000 0x100000>;
+- reg = <0x0 0x1a00000 0x0 0x100000>;
++ ranges = <0x0 0x0 0x1a00000 0xfe000>;
++ reg = <0x0 0x1a00000 0x0 0xfe000>;
+ interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clockgen 3 0>;
+ clock-names = "fmanclk";
+ fsl,qman-channel-range = <0x800 0x10>;
++ ptimer-handle = <&ptp_timer0>;
++
++ cc {
++ compatible = "fsl,fman-cc";
++ };
---- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-1.dtsi
-+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-1.dtsi
-@@ -35,13 +35,13 @@
- fman@500000 {
- fman1_rx_0x09: port@89000 {
- cell-index = <0x9>;
-- compatible = "fsl,fman-v3-port-rx";
-+ compatible = "fsl,fman-v3-port-rx","fsl,fman-port-1g-rx";
- reg = <0x89000 0x1000>;
+ muram@0 {
+ compatible = "fsl,fman-muram";
+ reg = <0x0 0x60000>;
};
- fman1_tx_0x29: port@a9000 {
- cell-index = <0x29>;
-- compatible = "fsl,fman-v3-port-tx";
-+ compatible = "fsl,fman-v3-port-tx","fsl,fman-port-1g-tx";
- reg = <0xa9000 0x1000>;
++ bmi@80000 {
++ compatible = "fsl,fman-bmi";
++ reg = <0x80000 0x400>;
++ };
++
++ qmi@80400 {
++ compatible = "fsl,fman-qmi";
++ reg = <0x80400 0x400>;
++ };
++
+ fman0_oh_0x2: port@82000 {
+ cell-index = <0x2>;
+ compatible = "fsl,fman-v3-port-oh";
+ reg = <0x82000 0x1000>;
++ fsl,qman-channel-id = <0x809>;
};
---- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-2.dtsi
-+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-2.dtsi
-@@ -35,13 +35,13 @@
- fman@500000 {
- fman1_rx_0x0a: port@8a000 {
- cell-index = <0xa>;
-- compatible = "fsl,fman-v3-port-rx";
-+ compatible = "fsl,fman-v3-port-rx","fsl,fman-port-1g-rx";
- reg = <0x8a000 0x1000>;
+ fman0_oh_0x3: port@83000 {
+ cell-index = <0x3>;
+ compatible = "fsl,fman-v3-port-oh";
+ reg = <0x83000 0x1000>;
++ fsl,qman-channel-id = <0x80a>;
};
- fman1_tx_0x2a: port@aa000 {
- cell-index = <0x2a>;
-- compatible = "fsl,fman-v3-port-tx";
-+ compatible = "fsl,fman-v3-port-tx","fsl,fman-port-1g-tx";
- reg = <0xaa000 0x1000>;
+ fman0_oh_0x4: port@84000 {
+ cell-index = <0x4>;
+ compatible = "fsl,fman-v3-port-oh";
+ reg = <0x84000 0x1000>;
++ fsl,qman-channel-id = <0x80b>;
};
---- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-3.dtsi
-+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-3.dtsi
-@@ -35,13 +35,13 @@
- fman@500000 {
- fman1_rx_0x0b: port@8b000 {
- cell-index = <0xb>;
-- compatible = "fsl,fman-v3-port-rx";
-+ compatible = "fsl,fman-v3-port-rx","fsl,fman-port-1g-rx";
- reg = <0x8b000 0x1000>;
+ fman0_oh_0x5: port@85000 {
+ cell-index = <0x5>;
+ compatible = "fsl,fman-v3-port-oh";
+ reg = <0x85000 0x1000>;
++ fsl,qman-channel-id = <0x80c>;
};
- fman1_tx_0x2b: port@ab000 {
- cell-index = <0x2b>;
-- compatible = "fsl,fman-v3-port-tx";
-+ compatible = "fsl,fman-v3-port-tx","fsl,fman-port-1g-tx";
- reg = <0xab000 0x1000>;
+ fman0_oh_0x6: port@86000 {
+ cell-index = <0x6>;
+ compatible = "fsl,fman-v3-port-oh";
+ reg = <0x86000 0x1000>;
++ fsl,qman-channel-id = <0x80d>;
};
---- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-4.dtsi
-+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-4.dtsi
-@@ -35,13 +35,13 @@
- fman@500000 {
- fman1_rx_0x0c: port@8c000 {
- cell-index = <0xc>;
-- compatible = "fsl,fman-v3-port-rx";
-+ compatible = "fsl,fman-v3-port-rx","fsl,fman-port-1g-rx";
- reg = <0x8c000 0x1000>;
+ fman0_oh_0x7: port@87000 {
+ cell-index = <0x7>;
+ compatible = "fsl,fman-v3-port-oh";
+ reg = <0x87000 0x1000>;
++ fsl,qman-channel-id = <0x80e>;
++ };
++
++ policer@c0000 {
++ compatible = "fsl,fman-policer";
++ reg = <0xc0000 0x1000>;
++ };
++
++ keygen@c1000 {
++ compatible = "fsl,fman-keygen";
++ reg = <0xc1000 0x1000>;
++ };
++
++ dma@c2000 {
++ compatible = "fsl,fman-dma";
++ reg = <0xc2000 0x1000>;
++ };
++
++ fpm@c3000 {
++ compatible = "fsl,fman-fpm";
++ reg = <0xc3000 0x1000>;
++ };
++
++ parser@c7000 {
++ compatible = "fsl,fman-parser";
++ reg = <0xc7000 0x1000>;
++ };
++
++ vsps@dc000 {
++ compatible = "fsl,fman-vsps";
++ reg = <0xdc000 0x1000>;
};
- fman1_tx_0x2c: port@ac000 {
- cell-index = <0x2c>;
-- compatible = "fsl,fman-v3-port-tx";
-+ compatible = "fsl,fman-v3-port-tx","fsl,fman-port-1g-tx";
- reg = <0xac000 0x1000>;
+ mdio0: mdio@fc000 {
+@@ -73,9 +124,11 @@ fman0: fman@1a00000 {
+ compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
+ reg = <0xfd000 0x1000>;
};
++};
---- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-5.dtsi
-+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-5.dtsi
-@@ -35,13 +35,13 @@
- fman@500000 {
- fman1_rx_0x0d: port@8d000 {
- cell-index = <0xd>;
-- compatible = "fsl,fman-v3-port-rx";
-+ compatible = "fsl,fman-v3-port-rx","fsl,fman-port-1g-rx";
- reg = <0x8d000 0x1000>;
- };
+- ptp_timer0: ptp-timer@fe000 {
+- compatible = "fsl,fman-ptp-timer";
+- reg = <0xfe000 0x1000>;
+- };
++ptp_timer0: ptp-timer@1afe000 {
++ compatible = "fsl,fman-ptp-timer";
++ reg = <0x0 0x1afe000 0x0 0x1000>;
++ interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clockgen 3 0>;
+ };
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/qoriq-qman-portals-sdk.dtsi
+@@ -0,0 +1,38 @@
++/*
++ * QorIQ QMan SDK Portals device tree nodes
++ *
++ * Copyright 2011-2016 Freescale Semiconductor Inc.
++ * Copyright 2017 NXP
++ *
++ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++ */
++
++&qportals {
++ qman-fqids@0 {
++ compatible = "fsl,fqid-range";
++ fsl,fqid-range = <256 256>;
++ };
++
++ qman-fqids@1 {
++ compatible = "fsl,fqid-range";
++ fsl,fqid-range = <32768 32768>;
++ };
++
++ qman-pools@0 {
++ compatible = "fsl,pool-channel-range";
++ fsl,pool-channel-range = <0x401 0xf>;
++ };
++
++ qman-cgrids@0 {
++ compatible = "fsl,cgrid-range";
++ fsl,cgrid-range = <0 256>;
++ };
++
++ qman-ceetm@0 {
++ compatible = "fsl,qman-ceetm";
++ fsl,ceetm-lfqid-range = <0xf00000 0x1000>;
++ fsl,ceetm-sp-range = <0 16>;
++ fsl,ceetm-lni-range = <0 8>;
++ fsl,ceetm-channel-range = <0 32>;
++ };
++};
+--- a/arch/arm64/boot/dts/freescale/qoriq-qman-portals.dtsi
++++ b/arch/arm64/boot/dts/freescale/qoriq-qman-portals.dtsi
+@@ -1,9 +1,9 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ /*
+ * QorIQ QMan Portals device tree
+ *
+ * Copyright 2011-2016 Freescale Semiconductor Inc.
+ *
+- * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
- fman1_tx_0x2d: port@ad000 {
- cell-index = <0x2d>;
-- compatible = "fsl,fman-v3-port-tx";
-+ compatible = "fsl,fman-v3-port-tx","fsl,fman-port-1g-tx";
- reg = <0xad000 0x1000>;
+ &qportals {
+@@ -77,4 +77,11 @@
+ interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
+ cell-index = <8>;
};
-
++
++ qportal9: qman-portal@90000 {
++ compatible = "fsl,qman-portal";
++ reg = <0x90000 0x4000>, <0x4090000 0x4000>;
++ interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
++ cell-index = <9>;
++ };
+ };
+--- a/arch/arm64/boot/dts/freescale/traverse-ls1043s.dts
++++ b/arch/arm64/boot/dts/freescale/traverse-ls1043s.dts
+@@ -330,3 +330,32 @@
+ &sata {
+ status = "disabled";
+ };
++
++/* Additions for Layerscape SDK (4.4/4.9) Kernel only
++ * These kernels need additional setup for FMan/QMan DMA shared memory
++ */
++
++#include "qoriq-qman-portals-sdk.dtsi"
++#include "qoriq-bman-portals-sdk.dtsi"
++
++&bman_fbpr {
++ compatible = "fsl,bman-fbpr";
++ alloc-ranges = <0 0 0x10000 0>;
++};
++&qman_fqd {
++ compatible = "fsl,qman-fqd";
++ alloc-ranges = <0 0 0x10000 0>;
++};
++&qman_pfdr {
++ compatible = "fsl,qman-pfdr";
++ alloc-ranges = <0 0 0x10000 0>;
++};
++
++&soc {
++#include "qoriq-dpaa-eth.dtsi"
++#include "qoriq-fman3-0-6oh.dtsi"
++};
++
++&fman0 {
++ compatible = "fsl,fman", "simple-bus";
++};
+--- a/arch/arm64/boot/dts/freescale/traverse-ls1043v.dts
++++ b/arch/arm64/boot/dts/freescale/traverse-ls1043v.dts
+@@ -251,3 +251,32 @@
+ &sata {
+ status = "disabled";
+ };
++
++/* Additions for Layerscape SDK (4.4/4.9) Kernel only
++ * These kernels need additional setup for FMan/QMan DMA shared memory
++ */
++
++#include "qoriq-qman-portals-sdk.dtsi"
++#include "qoriq-bman-portals-sdk.dtsi"
++
++&bman_fbpr {
++ compatible = "fsl,bman-fbpr";
++ alloc-ranges = <0 0 0x10000 0>;
++};
++&qman_fqd {
++ compatible = "fsl,qman-fqd";
++ alloc-ranges = <0 0 0x10000 0>;
++};
++&qman_pfdr {
++ compatible = "fsl,qman-pfdr";
++ alloc-ranges = <0 0 0x10000 0>;
++};
++
++&soc {
++#include "qoriq-dpaa-eth.dtsi"
++#include "qoriq-fman3-0-6oh.dtsi"
++};
++
++&fman0 {
++ compatible = "fsl,fman", "simple-bus";
++};
+++ /dev/null
-From 263092cd68368ac6f030b847a1d5b0069bc2cef3 Mon Sep 17 00:00:00 2001
-From: Mathew McBride <matt@traverse.com.au>
-Date: Tue, 17 Apr 2018 10:01:03 +1000
-Subject: [PATCH 05/40] add DTS for Traverse LS1043 Boards
-
-Signed-off-by: Mathew McBride <matt@traverse.com.au>
----
- arch/arm64/boot/dts/freescale/Makefile | 5 +++-
- .../boot/dts/freescale/traverse-ls1043s.dts | 29 +++++++++++++++++++
- .../boot/dts/freescale/traverse-ls1043v.dts | 29 +++++++++++++++++++
- 3 files changed, 62 insertions(+), 1 deletion(-)
-
---- a/arch/arm64/boot/dts/freescale/Makefile
-+++ b/arch/arm64/boot/dts/freescale/Makefile
-@@ -22,7 +22,10 @@ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2
- dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-simu.dtb
- dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2088a-qds.dtb
- dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2088a-rdb.dtb
--
-+
-+dtb-$(CONFIG_ARCH_LAYERSCAPE) += traverse-ls1043v.dtb
-+dtb-$(CONFIG_ARCH_LAYERSCAPE) += traverse-ls1043s.dtb
-+
- always := $(dtb-y)
- subdir-y := $(dts-dirs)
- clean-files := *.dtb
---- a/arch/arm64/boot/dts/freescale/traverse-ls1043s.dts
-+++ b/arch/arm64/boot/dts/freescale/traverse-ls1043s.dts
-@@ -330,3 +330,32 @@
- &sata {
- status = "disabled";
- };
-+
-+/* Additions for Layerscape SDK (4.4/4.9) Kernel only
-+ * These kernels need additional setup for FMan/QMan DMA shared memory
-+ */
-+
-+#include "qoriq-qman-portals-sdk.dtsi"
-+#include "qoriq-bman-portals-sdk.dtsi"
-+
-+&bman_fbpr {
-+ compatible = "fsl,bman-fbpr";
-+ alloc-ranges = <0 0 0x10000 0>;
-+};
-+&qman_fqd {
-+ compatible = "fsl,qman-fqd";
-+ alloc-ranges = <0 0 0x10000 0>;
-+};
-+&qman_pfdr {
-+ compatible = "fsl,qman-pfdr";
-+ alloc-ranges = <0 0 0x10000 0>;
-+};
-+
-+&soc {
-+#include "qoriq-dpaa-eth.dtsi"
-+#include "qoriq-fman3-0-6oh.dtsi"
-+};
-+
-+&fman0 {
-+ compatible = "fsl,fman", "simple-bus";
-+};
---- a/arch/arm64/boot/dts/freescale/traverse-ls1043v.dts
-+++ b/arch/arm64/boot/dts/freescale/traverse-ls1043v.dts
-@@ -251,3 +251,32 @@
- &sata {
- status = "disabled";
- };
-+
-+/* Additions for Layerscape SDK (4.4/4.9) Kernel only
-+ * These kernels need additional setup for FMan/QMan DMA shared memory
-+ */
-+
-+#include "qoriq-qman-portals-sdk.dtsi"
-+#include "qoriq-bman-portals-sdk.dtsi"
-+
-+&bman_fbpr {
-+ compatible = "fsl,bman-fbpr";
-+ alloc-ranges = <0 0 0x10000 0>;
-+};
-+&qman_fqd {
-+ compatible = "fsl,qman-fqd";
-+ alloc-ranges = <0 0 0x10000 0>;
-+};
-+&qman_pfdr {
-+ compatible = "fsl,qman-pfdr";
-+ alloc-ranges = <0 0 0x10000 0>;
-+};
-+
-+&soc {
-+#include "qoriq-dpaa-eth.dtsi"
-+#include "qoriq-fman3-0-6oh.dtsi"
-+};
-+
-+&fman0 {
-+ compatible = "fsl,fman", "simple-bus";
-+};
+++ /dev/null
-From bb1a53f1bcb3f4c5983955a1d419c0e4e2531043 Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Fri, 26 Oct 2018 16:00:37 +0800
-Subject: [PATCH 06/40] arm: dts: ls1021a: Add LS1021A-IOT board support
-
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
----
- arch/arm/boot/dts/Makefile | 3 +-
- arch/arm/boot/dts/ls1021a-iot.dts | 262 ++++++++++++++++++++++++++++++
- 2 files changed, 264 insertions(+), 1 deletion(-)
- create mode 100644 arch/arm/boot/dts/ls1021a-iot.dts
-
---- a/arch/arm/boot/dts/Makefile
-+++ b/arch/arm/boot/dts/Makefile
-@@ -496,7 +496,8 @@ dtb-$(CONFIG_SOC_IMX7D) += \
- imx7s-warp.dtb
- dtb-$(CONFIG_SOC_LS1021A) += \
- ls1021a-qds.dtb \
-- ls1021a-twr.dtb
-+ ls1021a-twr.dtb \
-+ ls1021a-iot.dtb
- dtb-$(CONFIG_SOC_VF610) += \
- vf500-colibri-eval-v3.dtb \
- vf610-colibri-eval-v3.dtb \
---- /dev/null
-+++ b/arch/arm/boot/dts/ls1021a-iot.dts
-@@ -0,0 +1,262 @@
-+/*
-+ * Copyright 2013-2016 Freescale Semiconductor, Inc.
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ */
-+
-+/dts-v1/;
-+#include "ls1021a.dtsi"
-+
-+/ {
-+ model = "LS1021A IOT Board";
-+
-+ sys_mclk: clock-mclk {
-+ compatible = "fixed-clock";
-+ #clock-cells = <0>;
-+ clock-frequency = <24576000>;
-+ };
-+
-+ regulators {
-+ compatible = "simple-bus";
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+
-+ reg_3p3v: regulator@0 {
-+ compatible = "regulator-fixed";
-+ reg = <0>;
-+ regulator-name = "3P3V";
-+ regulator-min-microvolt = <3300000>;
-+ regulator-max-microvolt = <3300000>;
-+ regulator-always-on;
-+ };
-+
-+ reg_2p5v: regulator@1 {
-+ compatible = "regulator-fixed";
-+ reg = <1>;
-+ regulator-name = "2P5V";
-+ regulator-min-microvolt = <2500000>;
-+ regulator-max-microvolt = <2500000>;
-+ regulator-always-on;
-+ };
-+ };
-+
-+ sound {
-+ compatible = "simple-audio-card";
-+ simple-audio-card,format = "i2s";
-+ simple-audio-card,widgets =
-+ "Microphone", "Microphone Jack",
-+ "Headphone", "Headphone Jack",
-+ "Speaker", "Speaker Ext",
-+ "Line", "Line In Jack";
-+ simple-audio-card,routing =
-+ "MIC_IN", "Microphone Jack",
-+ "Microphone Jack", "Mic Bias",
-+ "LINE_IN", "Line In Jack",
-+ "Headphone Jack", "HP_OUT",
-+ "Speaker Ext", "LINE_OUT";
-+
-+ simple-audio-card,cpu {
-+ sound-dai = <&sai2>;
-+ frame-master;
-+ bitclock-master;
-+ };
-+
-+ simple-audio-card,codec {
-+ sound-dai = <&codec>;
-+ frame-master;
-+ bitclock-master;
-+ };
-+ };
-+
-+ firmware {
-+ optee {
-+ compatible = "linaro,optee-tz";
-+ method = "smc";
-+ };
-+ };
-+};
-+
-+&enet0 {
-+ tbi-handle = <&tbi1>;
-+ phy-handle = <&phy1>;
-+ phy-connection-type = "sgmii";
-+ status = "okay";
-+};
-+
-+&enet1 {
-+ tbi-handle = <&tbi1>;
-+ phy-handle = <&phy3>;
-+ phy-connection-type = "sgmii";
-+ status = "okay";
-+};
-+
-+&enet2 {
-+ fixed-link = <0 1 1000 0 0>;
-+ phy-connection-type = "rgmii-id";
-+ status = "okay";
-+};
-+
-+&can0{
-+ status = "disabled";
-+};
-+
-+&can1{
-+ status = "disabled";
-+};
-+
-+&can2{
-+ status = "disabled";
-+};
-+
-+&can3{
-+ status = "okay";
-+};
-+
-+&esdhc{
-+ status = "okay";
-+};
-+
-+&i2c0 {
-+ status = "okay";
-+
-+ max1239@35 {
-+ compatible = "maxim,max1239";
-+ reg = <0x35>;
-+ #io-channel-cells = <1>;
-+ };
-+
-+ codec: sgtl5000@2a {
-+ #sound-dai-cells=<0x0>;
-+ compatible = "fsl,sgtl5000";
-+ reg = <0x2a>;
-+ VDDA-supply = <®_3p3v>;
-+ VDDIO-supply = <®_2p5v>;
-+ clocks = <&sys_mclk 1>;
-+ };
-+
-+ pca9555: pca9555@23 {
-+ compatible = "nxp,pca9555";
-+ /*pinctrl-names = "default";*/
-+ /*interrupt-parent = <&gpio2>;
-+ interrupts = <19 0x2>;*/
-+ gpio-controller;
-+ #gpio-cells = <2>;
-+ interrupt-controller;
-+ #interrupt-cells = <2>;
-+ reg = <0x23>;
-+ };
-+
-+ ina220@44 {
-+ compatible = "ti,ina220";
-+ reg = <0x44>;
-+ shunt-resistor = <1000>;
-+ };
-+
-+ ina220@45 {
-+ compatible = "ti,ina220";
-+ reg = <0x45>;
-+ shunt-resistor = <1000>;
-+ };
-+
-+ lm75b@48 {
-+ compatible = "nxp,lm75a";
-+ reg = <0x48>;
-+ };
-+
-+ adt7461a@4c {
-+ compatible = "adt7461a";
-+ reg = <0x4c>;
-+ };
-+
-+ hdmi: sii9022a@39 {
-+ compatible = "fsl,sii902x";
-+ reg = <0x39>;
-+ interrupts = <GIC_SPI 163 IRQ_TYPE_EDGE_RISING>;
-+ };
-+};
-+
-+&i2c1 {
-+ status = "disabled";
-+};
-+
-+&ifc {
-+ status = "disabled";
-+};
-+
-+&lpuart0 {
-+ status = "okay";
-+};
-+
-+&mdio0 {
-+ phy0: ethernet-phy@0 {
-+ reg = <0x0>;
-+ };
-+ phy1: ethernet-phy@1 {
-+ reg = <0x1>;
-+ };
-+ phy2: ethernet-phy@2 {
-+ reg = <0x2>;
-+ };
-+ phy3: ethernet-phy@3 {
-+ reg = <0x3>;
-+ };
-+ tbi1: tbi-phy@1f {
-+ reg = <0x1f>;
-+ device_type = "tbi-phy";
-+ };
-+};
-+
-+&qspi {
-+ num-cs = <2>;
-+ status = "okay";
-+
-+ qflash0: s25fl128s@0 {
-+ compatible = "spansion,s25fl129p1";
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ spi-max-frequency = <20000000>;
-+ reg = <0>;
-+ };
-+};
-+
-+&sai2 {
-+ status = "okay";
-+};
-+
-+&uart0 {
-+ status = "okay";
-+};
-+
-+&uart1 {
-+ status = "okay";
-+};
-+
-+&dcu {
-+ display = <&display>;
-+ status = "okay";
-+
-+ display: display@0 {
-+ bits-per-pixel = <24>;
-+
-+ display-timings {
-+ native-mode = <&timing0>;
-+
-+ timing0: mode0 {
-+ clock-frequency = <25000000>;
-+ hactive = <640>;
-+ vactive = <480>;
-+ hback-porch = <80>;
-+ hfront-porch = <80>;
-+ vback-porch = <16>;
-+ vfront-porch = <16>;
-+ hsync-len = <12>;
-+ vsync-len = <2>;
-+ hsync-active = <1>;
-+ vsync-active = <1>;
-+ };
-+ };
-+ };
-+};
-From ede8d823f0e1b2c5e14cbac13839b818ed1c18cf Mon Sep 17 00:00:00 2001
+From 80df9e62536d7cac5c03a4fcb494c6ddf0723633 Mon Sep 17 00:00:00 2001
From: Biwen Li <biwen.li@nxp.com>
-Date: Tue, 30 Oct 2018 18:26:10 +0800
-Subject: [PATCH 07/40] apaa2-dpio:support layerscape
+Date: Wed, 17 Apr 2019 18:58:27 +0800
+Subject: [PATCH] dpaa2-dpio: support layerscape
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
This is an integrated patch of dpaa2-dpio for layerscape
Signed-off-by: Bharat Bhushan <Bharat.Bhushan@nxp.com>
+Signed-off-by: Biwen Li <biwen.li@nxp.com>
Signed-off-by: Bogdan Purcareata <bogdan.purcareata@nxp.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com>
Signed-off-by: Haiying Wang <Haiying.Wang@nxp.com>
Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
+Signed-off-by: Li Yang <leoyang.li@nxp.com>
Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
+Signed-off-by: Youri Querry <youri.querry_1@nxp.com>
---
drivers/staging/fsl-mc/Kconfig | 1 +
drivers/staging/fsl-mc/Makefile | 1 +
drivers/staging/fsl-mc/bus/dpcon.c | 32 +-
drivers/staging/fsl-mc/bus/dpio/Makefile | 3 +-
drivers/staging/fsl-mc/bus/dpio/dpio-cmd.h | 29 +-
- drivers/staging/fsl-mc/bus/dpio/dpio-driver.c | 53 ++--
- .../staging/fsl-mc/bus/dpio/dpio-service.c | 258 +++++++++++++---
- drivers/staging/fsl-mc/bus/dpio/dpio.c | 51 ++--
+ drivers/staging/fsl-mc/bus/dpio/dpio-driver.c | 99 ++--
+ .../staging/fsl-mc/bus/dpio/dpio-service.c | 295 +++++++++---
+ drivers/staging/fsl-mc/bus/dpio/dpio.c | 51 +--
drivers/staging/fsl-mc/bus/dpio/dpio.h | 32 +-
- .../staging/fsl-mc/bus/dpio/qbman-portal.c | 217 ++++++++++---
- .../staging/fsl-mc/bus/dpio/qbman-portal.h | 112 ++++---
+ .../staging/fsl-mc/bus/dpio/qbman-portal.c | 421 ++++++++++++++----
+ .../staging/fsl-mc/bus/dpio/qbman-portal.h | 134 ++++--
drivers/staging/fsl-mc/bus/dpmcp.c | 28 +-
drivers/staging/fsl-mc/bus/dprc-driver.c | 4 +-
drivers/staging/fsl-mc/bus/dprc.c | 28 +-
.../fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c | 4 +-
drivers/staging/fsl-mc/bus/mc-io.c | 28 +-
drivers/staging/fsl-mc/bus/mc-sys.c | 28 +-
- drivers/staging/fsl-mc/include/dpaa2-fd.h | 288 ++++++++++++++++--
+ drivers/staging/fsl-mc/include/dpaa2-fd.h | 288 ++++++++++--
drivers/staging/fsl-mc/include/dpaa2-global.h | 27 +-
- drivers/staging/fsl-mc/include/dpaa2-io.h | 97 ++++--
+ drivers/staging/fsl-mc/include/dpaa2-io.h | 110 +++--
drivers/staging/fsl-mc/include/dpbp.h | 29 +-
drivers/staging/fsl-mc/include/dpcon.h | 32 +-
- drivers/staging/fsl-mc/include/dpopr.h | 110 +++++++
+ drivers/staging/fsl-mc/include/dpopr.h | 110 +++++
drivers/staging/fsl-mc/include/mc.h | 4 +-
- 33 files changed, 970 insertions(+), 634 deletions(-)
+ 33 files changed, 1233 insertions(+), 693 deletions(-)
create mode 100644 drivers/staging/fsl-mc/include/dpopr.h
--- a/drivers/staging/fsl-mc/Kconfig
*/
#include <linux/types.h>
-@@ -114,6 +89,7 @@ static int dpaa2_dpio_probe(struct fsl_m
+@@ -38,6 +13,7 @@
+ #include <linux/msi.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/delay.h>
++#include <linux/io.h>
+
+ #include "../../include/mc.h"
+ #include "../../include/dpaa2-io.h"
+@@ -54,6 +30,8 @@ struct dpio_priv {
+ struct dpaa2_io *io;
+ };
+
++static cpumask_var_t cpus_unused_mask;
++
+ static irqreturn_t dpio_irq_handler(int irq_num, void *arg)
+ {
+ struct device *dev = (struct device *)arg;
+@@ -113,7 +91,7 @@ static int dpaa2_dpio_probe(struct fsl_m
+ struct dpio_priv *priv;
int err = -ENOMEM;
struct device *dev = &dpio_dev->dev;
- static int next_cpu = -1;
+- static int next_cpu = -1;
+ int possible_next_cpu;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
-@@ -135,6 +111,12 @@ static int dpaa2_dpio_probe(struct fsl_m
+@@ -135,6 +113,12 @@ static int dpaa2_dpio_probe(struct fsl_m
goto err_open;
}
err = dpio_get_attributes(dpio_dev->mc_io, 0, dpio_dev->mc_handle,
&dpio_attrs);
if (err) {
-@@ -156,23 +138,23 @@ static int dpaa2_dpio_probe(struct fsl_m
+@@ -155,26 +139,35 @@ static int dpaa2_dpio_probe(struct fsl_m
+ desc.dpio_id = dpio_dev->obj_desc.id;
/* get the cpu to use for the affinity hint */
- if (next_cpu == -1)
+- if (next_cpu == -1)
- next_cpu = cpumask_first(cpu_online_mask);
-+ possible_next_cpu = cpumask_first(cpu_online_mask);
- else
+- else
- next_cpu = cpumask_next(next_cpu, cpu_online_mask);
-+ possible_next_cpu = cpumask_next(next_cpu, cpu_online_mask);
-
+-
- if (!cpu_possible(next_cpu)) {
++ possible_next_cpu = cpumask_first(cpus_unused_mask);
+ if (possible_next_cpu >= nr_cpu_ids) {
dev_err(dev, "probe failed. Number of DPIOs exceeds NR_CPUS.\n");
err = -ERANGE;
goto err_allocate_irqs;
}
- desc.cpu = next_cpu;
-+ desc.cpu = next_cpu = possible_next_cpu;
++ desc.cpu = possible_next_cpu;
++ cpumask_clear_cpu(possible_next_cpu, cpus_unused_mask);
- /*
+- /*
- * Set the CENA regs to be the cache inhibited area of the portal to
- * avoid coherency issues if a user migrates to another core.
-+ * Set the CENA regs to be the cache enabled area of the portal to
-+ * achieve the best performance.
- */
+- */
- desc.regs_cena = ioremap_wc(dpio_dev->regions[1].start,
- resource_size(&dpio_dev->regions[1]));
-+ desc.regs_cena = ioremap_cache_ns(dpio_dev->regions[0].start,
-+ resource_size(&dpio_dev->regions[0]));
- desc.regs_cinh = ioremap(dpio_dev->regions[1].start,
- resource_size(&dpio_dev->regions[1]));
+- desc.regs_cinh = ioremap(dpio_dev->regions[1].start,
+- resource_size(&dpio_dev->regions[1]));
++ if (dpio_dev->obj_desc.region_count < 3) {
++ /* No support for DDR backed portals, use classic mapping */
++ desc.regs_cena = ioremap_cache_ns(dpio_dev->regions[0].start,
++ resource_size(&dpio_dev->regions[0]));
++ } else {
++ desc.regs_cena = memremap(dpio_dev->regions[2].start,
++ resource_size(&dpio_dev->regions[2]),
++ MEMREMAP_WB);
++ }
++ if (IS_ERR(desc.regs_cena)) {
++ dev_err(dev, "ioremap_cache_ns failed\n");
++ goto err_allocate_irqs;
++ }
++
++ desc.regs_cinh = devm_ioremap(dev, dpio_dev->regions[1].start,
++ resource_size(&dpio_dev->regions[1]));
++ if (!desc.regs_cinh) {
++ dev_err(dev, "devm_ioremap failed\n");
++ goto err_allocate_irqs;
++ }
+
+ err = fsl_mc_allocate_irqs(dpio_dev);
+ if (err) {
+@@ -186,7 +179,7 @@ static int dpaa2_dpio_probe(struct fsl_m
+ if (err)
+ goto err_register_dpio_irq;
+
+- priv->io = dpaa2_io_create(&desc);
++ priv->io = dpaa2_io_create(&desc, dev);
+ if (!priv->io) {
+ dev_err(dev, "dpaa2_io_create failed\n");
+ goto err_dpaa2_io_create;
+@@ -196,7 +189,6 @@ static int dpaa2_dpio_probe(struct fsl_m
+ dev_dbg(dev, " receives_notifications = %d\n",
+ desc.receives_notifications);
+ dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+- fsl_mc_portal_free(dpio_dev->mc_io);
-@@ -207,6 +189,7 @@ err_register_dpio_irq:
+ return 0;
+
+@@ -207,6 +199,7 @@ err_register_dpio_irq:
err_allocate_irqs:
dpio_disable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
err_get_attr:
dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
err_open:
fsl_mc_portal_free(dpio_dev->mc_io);
+@@ -227,7 +220,7 @@ static int dpaa2_dpio_remove(struct fsl_
+ {
+ struct device *dev;
+ struct dpio_priv *priv;
+- int err;
++ int err = 0, cpu;
+
+ dev = &dpio_dev->dev;
+ priv = dev_get_drvdata(dev);
+@@ -236,11 +229,8 @@ static int dpaa2_dpio_remove(struct fsl_
+
+ dpio_teardown_irqs(dpio_dev);
+
+- err = fsl_mc_portal_allocate(dpio_dev, 0, &dpio_dev->mc_io);
+- if (err) {
+- dev_err(dev, "MC portal allocation failed\n");
+- goto err_mcportal;
+- }
++ cpu = dpaa2_io_get_cpu(priv->io);
++ cpumask_set_cpu(cpu, cpus_unused_mask);
+
+ err = dpio_open(dpio_dev->mc_io, 0, dpio_dev->obj_desc.id,
+ &dpio_dev->mc_handle);
+@@ -261,7 +251,7 @@ static int dpaa2_dpio_remove(struct fsl_
+
+ err_open:
+ fsl_mc_portal_free(dpio_dev->mc_io);
+-err_mcportal:
++
+ return err;
+ }
+
+@@ -285,11 +275,16 @@ static struct fsl_mc_driver dpaa2_dpio_d
+
+ static int dpio_driver_init(void)
+ {
++ if (!zalloc_cpumask_var(&cpus_unused_mask, GFP_KERNEL))
++ return -ENOMEM;
++ cpumask_copy(cpus_unused_mask, cpu_online_mask);
++
+ return fsl_mc_driver_register(&dpaa2_dpio_driver);
+ }
+
+ static void dpio_driver_exit(void)
+ {
++ free_cpumask_var(cpus_unused_mask);
+ fsl_mc_driver_unregister(&dpaa2_dpio_driver);
+ }
+ module_init(dpio_driver_init);
--- a/drivers/staging/fsl-mc/bus/dpio/dpio-service.c
+++ b/drivers/staging/fsl-mc/bus/dpio/dpio-service.c
@@ -1,33 +1,8 @@
struct dpaa2_io_desc dpio_desc;
struct qbman_swp_desc swp_desc;
struct qbman_swp *swp;
-@@ -83,7 +57,7 @@ static inline struct dpaa2_io *service_s
+@@ -53,6 +27,7 @@ struct dpaa2_io {
+ /* protect notifications list */
+ spinlock_t lock_notifications;
+ struct list_head notifications;
++ struct device *dev;
+ };
+
+ struct dpaa2_io_store {
+@@ -83,7 +58,7 @@ static inline struct dpaa2_io *service_s
* If cpu == -1, choose the current cpu, with no guarantees about
* potentially being migrated away.
*/
cpu = smp_processor_id();
/* If a specific cpu was requested, pick it up immediately */
-@@ -95,6 +69,10 @@ static inline struct dpaa2_io *service_s
+@@ -95,6 +70,10 @@ static inline struct dpaa2_io *service_s
if (d)
return d;
spin_lock(&dpio_list_lock);
d = list_entry(dpio_list.next, struct dpaa2_io, node);
list_del(&d->node);
-@@ -105,6 +83,23 @@ static inline struct dpaa2_io *service_s
+@@ -105,15 +84,34 @@ static inline struct dpaa2_io *service_s
}
/**
+/**
* dpaa2_io_create() - create a dpaa2_io object.
* @desc: the dpaa2_io descriptor
++ * @dev: the actual DPIO device
+ *
+ * Activates a "struct dpaa2_io" corresponding to the given config of an actual
+ * DPIO object.
*
-@@ -126,7 +121,6 @@ struct dpaa2_io *dpaa2_io_create(const s
+ * Return a valid dpaa2_io object for success, or NULL for failure.
+ */
+-struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc)
++struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
++ struct device *dev)
+ {
+ struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL);
+
+@@ -126,7 +124,6 @@ struct dpaa2_io *dpaa2_io_create(const s
return NULL;
}
obj->dpio_desc = *desc;
obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena;
obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh;
-@@ -158,7 +152,6 @@ struct dpaa2_io *dpaa2_io_create(const s
+@@ -156,9 +153,10 @@ struct dpaa2_io *dpaa2_io_create(const s
+ dpio_by_cpu[desc->cpu] = obj;
+ spin_unlock(&dpio_list_lock);
++ obj->dev = dev;
++
return obj;
}
-EXPORT_SYMBOL(dpaa2_io_create);
/**
* dpaa2_io_down() - release the dpaa2_io object.
-@@ -171,11 +164,8 @@ EXPORT_SYMBOL(dpaa2_io_create);
+@@ -171,11 +169,8 @@ EXPORT_SYMBOL(dpaa2_io_create);
*/
void dpaa2_io_down(struct dpaa2_io *d)
{
#define DPAA_POLL_MAX 32
-@@ -206,7 +196,7 @@ irqreturn_t dpaa2_io_irq(struct dpaa2_io
+@@ -206,7 +201,7 @@ irqreturn_t dpaa2_io_irq(struct dpaa2_io
u64 q64;
q64 = qbman_result_SCN_ctx(dq);
ctx->cb(ctx);
} else {
pr_crit("fsl-mc-dpio: Unrecognised/ignored DQRR entry\n");
-@@ -222,7 +212,6 @@ done:
+@@ -222,13 +217,19 @@ done:
qbman_swp_interrupt_set_inhibit(swp, 0);
return IRQ_HANDLED;
}
-EXPORT_SYMBOL(dpaa2_io_irq);
++
++int dpaa2_io_get_cpu(struct dpaa2_io *d)
++{
++ return d->dpio_desc.cpu;
++}
++EXPORT_SYMBOL(dpaa2_io_get_cpu);
/**
* dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN
-@@ -252,7 +241,7 @@ int dpaa2_io_service_register(struct dpa
+ * notifications on the given DPIO service.
+ * @d: the given DPIO service.
+ * @ctx: the notification context.
++ * @dev: the device that requests the register
+ *
+ * The caller should make the MC command to attach a DPAA2 object to
+ * a DPIO after this function completes successfully. In that way:
+@@ -243,7 +244,8 @@ EXPORT_SYMBOL(dpaa2_io_irq);
+ * Return 0 for success, or -ENODEV for failure.
+ */
+ int dpaa2_io_service_register(struct dpaa2_io *d,
+- struct dpaa2_io_notification_ctx *ctx)
++ struct dpaa2_io_notification_ctx *ctx,
++ struct device *dev)
+ {
+ unsigned long irqflags;
+
+@@ -251,8 +253,10 @@ int dpaa2_io_service_register(struct dpa
+ if (!d)
return -ENODEV;
++ device_link_add(dev, d->dev, DL_FLAG_AUTOREMOVE_SUPPLIER);
++
ctx->dpio_id = d->dpio_desc.dpio_id;
- ctx->qman64 = (u64)ctx;
+ ctx->qman64 = (u64)(uintptr_t)ctx;
ctx->dpio_private = d;
spin_lock_irqsave(&d->lock_notifications, irqflags);
list_add(&ctx->node, &d->notifications);
-@@ -265,7 +254,7 @@ int dpaa2_io_service_register(struct dpa
+@@ -263,20 +267,23 @@ int dpaa2_io_service_register(struct dpa
+ return qbman_swp_CDAN_set_context_enable(d->swp,
+ (u16)ctx->id,
ctx->qman64);
++
return 0;
}
-EXPORT_SYMBOL(dpaa2_io_service_register);
/**
* dpaa2_io_service_deregister - The opposite of 'register'.
-@@ -288,7 +277,7 @@ void dpaa2_io_service_deregister(struct
+ * @service: the given DPIO service.
+ * @ctx: the notification context.
++ * @dev: the device that requests to be deregistered
+ *
+ * This function should be called only after sending the MC command to
+ * to detach the notification-producing device from the DPIO.
+ */
+ void dpaa2_io_service_deregister(struct dpaa2_io *service,
+- struct dpaa2_io_notification_ctx *ctx)
++ struct dpaa2_io_notification_ctx *ctx,
++ struct device *dev)
+ {
+ struct dpaa2_io *d = ctx->dpio_private;
+ unsigned long irqflags;
+@@ -287,8 +294,10 @@ void dpaa2_io_service_deregister(struct
+ spin_lock_irqsave(&d->lock_notifications, irqflags);
list_del(&ctx->node);
spin_unlock_irqrestore(&d->lock_notifications, irqflags);
++
++ device_link_remove(dev, d->dev);
}
-EXPORT_SYMBOL(dpaa2_io_service_deregister);
+EXPORT_SYMBOL_GPL(dpaa2_io_service_deregister);
/**
* dpaa2_io_service_rearm() - Rearm the notification for the given DPIO service.
-@@ -322,7 +311,7 @@ int dpaa2_io_service_rearm(struct dpaa2_
+@@ -322,7 +331,7 @@ int dpaa2_io_service_rearm(struct dpaa2_
return err;
}
/**
* dpaa2_io_service_pull_fq() - pull dequeue functions from a fq.
-@@ -385,7 +374,7 @@ int dpaa2_io_service_pull_channel(struct
+@@ -385,7 +394,7 @@ int dpaa2_io_service_pull_channel(struct
return err;
}
/**
* dpaa2_io_service_enqueue_fq() - Enqueue a frame to a frame queue.
-@@ -441,7 +430,7 @@ int dpaa2_io_service_enqueue_qd(struct d
+@@ -441,7 +450,7 @@ int dpaa2_io_service_enqueue_qd(struct d
return qbman_swp_enqueue(d->swp, &ed, fd);
}
/**
* dpaa2_io_service_release() - Release buffers to a buffer pool.
-@@ -453,7 +442,7 @@ EXPORT_SYMBOL(dpaa2_io_service_enqueue_q
+@@ -453,7 +462,7 @@ EXPORT_SYMBOL(dpaa2_io_service_enqueue_q
* Return 0 for success, and negative error code for failure.
*/
int dpaa2_io_service_release(struct dpaa2_io *d,
const u64 *buffers,
unsigned int num_buffers)
{
-@@ -468,7 +457,7 @@ int dpaa2_io_service_release(struct dpaa
+@@ -468,7 +477,7 @@ int dpaa2_io_service_release(struct dpaa
return qbman_swp_release(d->swp, &rd, buffers, num_buffers);
}
/**
* dpaa2_io_service_acquire() - Acquire buffers from a buffer pool.
-@@ -482,7 +471,7 @@ EXPORT_SYMBOL(dpaa2_io_service_release);
+@@ -482,7 +491,7 @@ EXPORT_SYMBOL(dpaa2_io_service_release);
* Eg. if the buffer pool is empty, this will return zero.
*/
int dpaa2_io_service_acquire(struct dpaa2_io *d,
u64 *buffers,
unsigned int num_buffers)
{
-@@ -499,7 +488,7 @@ int dpaa2_io_service_acquire(struct dpaa
+@@ -499,7 +508,7 @@ int dpaa2_io_service_acquire(struct dpaa
return err;
}
/*
* 'Stores' are reusable memory blocks for holding dequeue results, and to
-@@ -553,7 +542,7 @@ struct dpaa2_io_store *dpaa2_io_store_cr
+@@ -553,7 +562,7 @@ struct dpaa2_io_store *dpaa2_io_store_cr
return ret;
}
/**
* dpaa2_io_store_destroy() - Frees the dma memory storage for dequeue
-@@ -567,7 +556,7 @@ void dpaa2_io_store_destroy(struct dpaa2
+@@ -567,7 +576,7 @@ void dpaa2_io_store_destroy(struct dpaa2
kfree(s->alloced_addr);
kfree(s);
}
/**
* dpaa2_io_store_next() - Determine when the next dequeue result is available.
-@@ -615,4 +604,177 @@ struct dpaa2_dq *dpaa2_io_store_next(str
+@@ -610,9 +619,193 @@ struct dpaa2_dq *dpaa2_io_store_next(str
+ if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME))
+ ret = NULL;
+ } else {
++ prefetch(&s->vaddr[s->idx]);
+ *is_last = 0;
+ }
return ret;
}
+{
+ struct qbman_eq_desc ed;
+ struct dpaa2_fd fd;
++ unsigned long irqflags;
++ int ret;
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
++
++ if ((d->swp->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
++ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
++ ret = qbman_orp_drop(d->swp, orpid, seqnum);
++ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
++ return ret;
++ }
++
+ qbman_eq_desc_clear(&ed);
+ qbman_eq_desc_set_orp_hole(&ed, orpid, seqnum);
+ return qbman_swp_enqueue(d->swp, &ed, &fd);
*/
#include <asm/cacheflush.h>
-@@ -99,6 +74,14 @@ enum qbman_sdqcr_fc {
+@@ -37,23 +12,26 @@
+
+ #include "qbman-portal.h"
+
+-#define QMAN_REV_4000 0x04000000
+-#define QMAN_REV_4100 0x04010000
+-#define QMAN_REV_4101 0x04010001
+-#define QMAN_REV_MASK 0xffff0000
+-
+ /* All QBMan command and result structures use this "valid bit" encoding */
+ #define QB_VALID_BIT ((u32)0x80)
+
+ /* QBMan portal management command codes */
+ #define QBMAN_MC_ACQUIRE 0x30
+ #define QBMAN_WQCHAN_CONFIGURE 0x46
++#define QBMAN_MC_ORP 0x63
+
+ /* CINH register offsets */
++#define QBMAN_CINH_SWP_EQCR_PI 0x800
+ #define QBMAN_CINH_SWP_EQAR 0x8c0
++#define QBMAN_CINH_SWP_CR_RT 0x900
++#define QBMAN_CINH_SWP_VDQCR_RT 0x940
++#define QBMAN_CINH_SWP_EQCR_AM_RT 0x980
++#define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0
+ #define QBMAN_CINH_SWP_DQPI 0xa00
+ #define QBMAN_CINH_SWP_DCAP 0xac0
+ #define QBMAN_CINH_SWP_SDQCR 0xb00
++#define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40
++#define QBMAN_CINH_SWP_RCR_PI 0xc00
+ #define QBMAN_CINH_SWP_RAR 0xcc0
+ #define QBMAN_CINH_SWP_ISR 0xe00
+ #define QBMAN_CINH_SWP_IER 0xe40
+@@ -68,6 +46,13 @@
+ #define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1))
+ #define QBMAN_CENA_SWP_VDQCR 0x780
+
++/* CENA register offsets in memory-backed mode */
++#define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((u32)(n) << 6))
++#define QBMAN_CENA_SWP_RCR_MEM(n) (0x1400 + ((u32)(n) << 6))
++#define QBMAN_CENA_SWP_CR_MEM 0x1600
++#define QBMAN_CENA_SWP_RR_MEM 0x1680
++#define QBMAN_CENA_SWP_VDQCR_MEM 0x1780
++
+ /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
+ #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6)
+
+@@ -99,6 +84,14 @@ enum qbman_sdqcr_fc {
qbman_sdqcr_fc_up_to_3 = 1
};
/* Portal Access */
static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset)
-@@ -189,7 +172,7 @@ struct qbman_swp *qbman_swp_init(const s
+@@ -121,10 +114,13 @@ static inline void *qbman_get_cmd(struct
+
+ #define SWP_CFG_DQRR_MF_SHIFT 20
+ #define SWP_CFG_EST_SHIFT 16
++#define SWP_CFG_CPBS_SHIFT 15
+ #define SWP_CFG_WN_SHIFT 14
+ #define SWP_CFG_RPM_SHIFT 12
+ #define SWP_CFG_DCM_SHIFT 10
+ #define SWP_CFG_EPM_SHIFT 8
++#define SWP_CFG_VPM_SHIFT 7
++#define SWP_CFG_CPM_SHIFT 6
+ #define SWP_CFG_SD_SHIFT 5
+ #define SWP_CFG_SP_SHIFT 4
+ #define SWP_CFG_SE_SHIFT 3
+@@ -150,6 +146,8 @@ static inline u32 qbman_set_swp_cfg(u8 m
+ ep << SWP_CFG_EP_SHIFT);
+ }
+
++#define QMAN_RT_MODE 0x00000100
++
+ /**
+ * qbman_swp_init() - Create a functional object representing the given
+ * QBMan portal descriptor.
+@@ -171,6 +169,8 @@ struct qbman_swp *qbman_swp_init(const s
+ p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
+ p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
+ p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
++ if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
++ p->mr.valid_bit = QB_VALID_BIT;
+
+ atomic_set(&p->vdq.available, 1);
+ p->vdq.valid_bit = QB_VALID_BIT;
+@@ -188,8 +188,11 @@ struct qbman_swp *qbman_swp_init(const s
+ p->addr_cena = d->cena_bar;
p->addr_cinh = d->cinh_bar;
++ if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
++ memset(p->addr_cena, 0, 64 * 1024);
++
reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
- 1, /* Writes Non-cacheable */
+ 0, /* Writes cacheable */
0, /* EQCR_CI stashing threshold */
3, /* RPM: Valid bit mode, RCR in array mode */
2, /* DCM: Discrete consumption ack mode */
-@@ -315,6 +298,7 @@ void qbman_swp_mc_submit(struct qbman_sw
+@@ -200,6 +203,10 @@ struct qbman_swp *qbman_swp_init(const s
+ 1, /* dequeue stashing priority == TRUE */
+ 0, /* dequeue stashing enable == FALSE */
+ 0); /* EQCR_CI stashing priority == FALSE */
++ if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
++ reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */
++ 1 << SWP_CFG_VPM_SHIFT | /* VDQCR read triggered mode */
++ 1 << SWP_CFG_CPM_SHIFT; /* CR read triggered mode */
+
+ qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
+ reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
+@@ -208,6 +215,10 @@ struct qbman_swp *qbman_swp_init(const s
+ return NULL;
+ }
- dma_wmb();
- *v = cmd_verb | p->mc.valid_bit;
-+ dccvac(cmd);
++ if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
++ qbman_write_register(p, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE);
++ qbman_write_register(p, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE);
++ }
+ /*
+ * SDQCR needs to be initialized to 0 when no channels are
+ * being dequeued from or else the QMan HW will indicate an
+@@ -302,7 +313,10 @@ void qbman_swp_interrupt_set_inhibit(str
+ */
+ void *qbman_swp_mc_start(struct qbman_swp *p)
+ {
+- return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
++ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
++ return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
++ else
++ return qbman_get_cmd(p, QBMAN_CENA_SWP_CR_MEM);
+ }
+
+ /*
+@@ -313,8 +327,15 @@ void qbman_swp_mc_submit(struct qbman_sw
+ {
+ u8 *v = cmd;
+
+- dma_wmb();
+- *v = cmd_verb | p->mc.valid_bit;
++ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
++ dma_wmb();
++ *v = cmd_verb | p->mc.valid_bit;
++ dccvac(cmd);
++ } else {
++ *v = cmd_verb | p->mc.valid_bit;
++ dma_wmb();
++ qbman_write_register(p, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
++ }
}
/*
-@@ -325,6 +309,7 @@ void *qbman_swp_mc_result(struct qbman_s
+@@ -325,13 +346,28 @@ void *qbman_swp_mc_result(struct qbman_s
{
u32 *ret, verb;
-+ qbman_inval_prefetch(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
- ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
+- ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
++ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
++ qbman_inval_prefetch(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
++ ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
++ /* Remove the valid-bit - command completed if the rest
++ * is non-zero.
++ */
++ verb = ret[0] & ~QB_VALID_BIT;
++ if (!verb)
++ return NULL;
++ p->mc.valid_bit ^= QB_VALID_BIT;
++ } else {
++ ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR_MEM);
++ /* Command completed if the valid bit is toggled */
++ if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
++ return NULL;
++ /* Command completed if the rest is non-zero */
++ verb = ret[0] & ~QB_VALID_BIT;
++ if (!verb)
++ return NULL;
++ p->mr.valid_bit ^= QB_VALID_BIT;
++ }
- /* Remove the valid-bit - command completed if the rest is non-zero */
-@@ -370,6 +355,43 @@ void qbman_eq_desc_set_no_orp(struct qbm
+- /* Remove the valid-bit - command completed if the rest is non-zero */
+- verb = ret[0] & ~QB_VALID_BIT;
+- if (!verb)
+- return NULL;
+- p->mc.valid_bit ^= QB_VALID_BIT;
+ return ret;
+ }
+
+@@ -370,6 +406,43 @@ void qbman_eq_desc_set_no_orp(struct qbm
d->verb |= enqueue_rejects_to_fq;
}
/*
* Exactly one of the following descriptor "targets" should be set. (Calling any
* one of these will replace the effect of any prior call to one of these.)
-@@ -429,12 +451,23 @@ int qbman_swp_enqueue(struct qbman_swp *
+@@ -408,6 +481,18 @@ void qbman_eq_desc_set_qd(struct qbman_e
+ #define EQAR_VB(eqar) ((eqar) & 0x80)
+ #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
+
++static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
++ u8 idx)
++{
++ if (idx < 16)
++ qbman_write_register(p, QBMAN_CINH_SWP_EQCR_AM_RT + idx * 4,
++ QMAN_RT_MODE);
++ else
++ qbman_write_register(p, QBMAN_CINH_SWP_EQCR_AM_RT2 +
++ (idx - 16) * 4,
++ QMAN_RT_MODE);
++}
++
+ /**
+ * qbman_swp_enqueue() - Issue an enqueue command
+ * @s: the software portal used for enqueue
+@@ -429,12 +514,29 @@ int qbman_swp_enqueue(struct qbman_swp *
return -EBUSY;
p = qbman_get_cmd(s, QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
+ memcpy(&p->tgtid, &d->tgtid, 24);
memcpy(&p->fd, fd, sizeof(*fd));
- /* Set the verb byte, have to substitute in the valid-bit */
- dma_wmb();
- p->verb = d->verb | EQAR_VB(eqar);
-+ dccvac(p);
+- /* Set the verb byte, have to substitute in the valid-bit */
+- dma_wmb();
+- p->verb = d->verb | EQAR_VB(eqar);
++ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
++ /* Set the verb byte, have to substitute in the valid-bit */
++ dma_wmb();
++ p->verb = d->verb | EQAR_VB(eqar);
++ dccvac(p);
++ } else {
++ p->verb = d->verb | EQAR_VB(eqar);
++ dma_wmb();
++ qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar));
++ }
return 0;
}
-@@ -522,7 +555,7 @@ void qbman_pull_desc_set_storage(struct
+@@ -522,7 +624,7 @@ void qbman_pull_desc_set_storage(struct
int stash)
{
/* save the virtual address */
if (!storage) {
d->verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
-@@ -615,7 +648,7 @@ int qbman_swp_pull(struct qbman_swp *s,
+@@ -615,18 +717,28 @@ int qbman_swp_pull(struct qbman_swp *s,
atomic_inc(&s->vdq.available);
return -EBUSY;
}
- s->vdq.storage = (void *)d->rsp_addr_virt;
+- p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
+ s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
- p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
++ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
++ p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
++ else
++ p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
p->numf = d->numf;
p->tok = QMAN_DQ_TOKEN_VALID;
-@@ -627,6 +660,7 @@ int qbman_swp_pull(struct qbman_swp *s,
- /* Set the verb byte, have to substitute in the valid-bit */
- p->verb = d->verb | s->vdq.valid_bit;
- s->vdq.valid_bit ^= QB_VALID_BIT;
-+ dccvac(p);
+ p->dq_src = d->dq_src;
+ p->rsp_addr = d->rsp_addr;
+ p->rsp_addr_virt = d->rsp_addr_virt;
+- dma_wmb();
+-
+- /* Set the verb byte, have to substitute in the valid-bit */
+- p->verb = d->verb | s->vdq.valid_bit;
+- s->vdq.valid_bit ^= QB_VALID_BIT;
++ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
++ dma_wmb();
++ /* Set the verb byte, have to substitute in the valid-bit */
++ p->verb = d->verb | s->vdq.valid_bit;
++ s->vdq.valid_bit ^= QB_VALID_BIT;
++ dccvac(p);
++ } else {
++ p->verb = d->verb | s->vdq.valid_bit;
++ s->vdq.valid_bit ^= QB_VALID_BIT;
++ dma_wmb();
++ qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
++ }
return 0;
}
-@@ -680,8 +714,7 @@ const struct dpaa2_dq *qbman_swp_dqrr_ne
+@@ -680,11 +792,13 @@ const struct dpaa2_dq *qbman_swp_dqrr_ne
s->dqrr.next_idx, pi);
s->dqrr.reset_bug = 0;
}
+ qbman_inval_prefetch(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
}
- p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
-@@ -696,8 +729,7 @@ const struct dpaa2_dq *qbman_swp_dqrr_ne
+- p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
++ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
++ p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
++ else
++ p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
+ verb = p->dq.verb;
+
+ /*
+@@ -696,8 +810,7 @@ const struct dpaa2_dq *qbman_swp_dqrr_ne
* knew from reading PI.
*/
if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
return NULL;
}
/*
-@@ -720,7 +752,7 @@ const struct dpaa2_dq *qbman_swp_dqrr_ne
+@@ -720,7 +833,7 @@ const struct dpaa2_dq *qbman_swp_dqrr_ne
(flags & DPAA2_DQ_STAT_EXPIRED))
atomic_inc(&s->vdq.available);
return p;
}
-@@ -848,6 +880,7 @@ int qbman_swp_release(struct qbman_swp *
- */
- dma_wmb();
- p->verb = d->verb | RAR_VB(rar) | num_buffers;
-+ dccvac(p);
+@@ -836,18 +949,29 @@ int qbman_swp_release(struct qbman_swp *
+ return -EBUSY;
+
+ /* Start the release command */
+- p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
++ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
++ p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
++ else
++ p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
+ /* Copy the caller's buffer pointers to the command */
+ for (i = 0; i < num_buffers; i++)
+ p->buf[i] = cpu_to_le64(buffers[i]);
+ p->bpid = d->bpid;
+
+- /*
+- * Set the verb byte, have to substitute in the valid-bit and the number
+- * of buffers.
+- */
+- dma_wmb();
+- p->verb = d->verb | RAR_VB(rar) | num_buffers;
++ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
++ /*
++ * Set the verb byte, have to substitute in the valid-bit
++ * and the number of buffers.
++ */
++ dma_wmb();
++ p->verb = d->verb | RAR_VB(rar) | num_buffers;
++ dccvac(p);
++ } else {
++ p->verb = d->verb | RAR_VB(rar) | num_buffers;
++ dma_wmb();
++ qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT +
++ RAR_IDX(rar) * 4, QMAN_RT_MODE);
++ }
return 0;
}
-@@ -855,7 +888,7 @@ int qbman_swp_release(struct qbman_swp *
+@@ -855,7 +979,7 @@ int qbman_swp_release(struct qbman_swp *
struct qbman_acquire_desc {
u8 verb;
u8 reserved;
u8 num;
u8 reserved2[59];
};
-@@ -863,10 +896,10 @@ struct qbman_acquire_desc {
+@@ -863,10 +987,10 @@ struct qbman_acquire_desc {
struct qbman_acquire_rslt {
u8 verb;
u8 rslt;
};
/**
-@@ -929,7 +962,7 @@ int qbman_swp_acquire(struct qbman_swp *
+@@ -929,7 +1053,7 @@ int qbman_swp_acquire(struct qbman_swp *
struct qbman_alt_fq_state_desc {
u8 verb;
u8 reserved[3];
u8 reserved2[56];
};
-@@ -952,7 +985,7 @@ int qbman_swp_alt_fq_state(struct qbman_
+@@ -952,7 +1076,7 @@ int qbman_swp_alt_fq_state(struct qbman_
if (!p)
return -EBUSY;
/* Complete the management command */
r = qbman_swp_mc_complete(s, p, alt_fq_verb);
-@@ -978,11 +1011,11 @@ int qbman_swp_alt_fq_state(struct qbman_
+@@ -978,11 +1102,11 @@ int qbman_swp_alt_fq_state(struct qbman_
struct qbman_cdan_ctrl_desc {
u8 verb;
u8 reserved;
u8 reserved3[48];
};
-@@ -990,7 +1023,7 @@ struct qbman_cdan_ctrl_desc {
+@@ -990,7 +1114,7 @@ struct qbman_cdan_ctrl_desc {
struct qbman_cdan_ctrl_rslt {
u8 verb;
u8 rslt;
u8 reserved[60];
};
-@@ -1033,3 +1066,99 @@ int qbman_swp_CDAN_set(struct qbman_swp
+@@ -1031,5 +1155,152 @@ int qbman_swp_CDAN_set(struct qbman_swp
+ return -EIO;
+ }
- return 0;
- }
++ return 0;
++}
+
+#define QBMAN_RESPONSE_VERB_MASK 0x7f
+#define QBMAN_FQ_QUERY_NP 0x45
+{
+ return le32_to_cpu(a->fill);
+}
++
++struct qbman_orp_cmd_desc {
++ u8 verb;
++ u8 reserved;
++ u8 cid;
++ u8 reserved2;
++ u16 orpid;
++ u16 seqnum;
++ u8 reserved3[56];
++};
++
++struct qbman_orp_cmd_rslt {
++ u8 verb;
++ u8 rslt;
++ u8 cid;
++ u8 reserved1[61];
++};
++
++int qbman_orp_drop(struct qbman_swp *s, u16 orpid, u16 seqnum)
++{
++ struct qbman_orp_cmd_desc *p;
++ struct qbman_orp_cmd_rslt *r;
++ void *resp;
++
++ p = (struct qbman_orp_cmd_desc *)qbman_swp_mc_start(s);
++ if (!p)
++ return -EBUSY;
++
++ p->cid = 0x7;
++ p->orpid = cpu_to_le16(orpid);
++ p->seqnum = cpu_to_le16(seqnum);
++
++ resp = qbman_swp_mc_complete(s, p, QBMAN_MC_ORP);
++ if (!resp) {
++ pr_err("qbman: Drop sequence num %d orpid 0x%x failed, no response\n",
++ seqnum, orpid);
++ return -EIO;
++ }
++ r = (struct qbman_orp_cmd_rslt *)resp;
++ /* Decode the outcome */
++ WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_MC_ORP);
++
++ /* Determine success or failure */
++ if (r->rslt != QBMAN_MC_RSLT_OK) {
++ pr_err("Drop seqnum %d of prpid 0x%x failed, code=0x%02x\n",
++ seqnum, orpid, r->rslt);
++ return -EIO;
++ }
++
+ return 0;
+ }
--- a/drivers/staging/fsl-mc/bus/dpio/qbman-portal.h
+++ b/drivers/staging/fsl-mc/bus/dpio/qbman-portal.h
-@@ -1,33 +1,8 @@
+@@ -1,46 +1,28 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
- * Copyright 2016 NXP
+- * Copyright 2016 NXP
++ * Copyright 2016-2019 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
*/
#ifndef __FSL_QBMAN_PORTAL_H
#define __FSL_QBMAN_PORTAL_H
-@@ -57,8 +32,8 @@ struct qbman_pull_desc {
+
+ #include "../../include/dpaa2-fd.h"
+
++#define QMAN_REV_4000 0x04000000
++#define QMAN_REV_4100 0x04010000
++#define QMAN_REV_4101 0x04010001
++#define QMAN_REV_5000 0x05000000
++
++#define QMAN_REV_MASK 0xffff0000
++
+ struct dpaa2_dq;
+ struct qbman_swp;
+
+ /* qbman software portal descriptor structure */
+ struct qbman_swp_desc {
+ void *cena_bar; /* Cache-enabled portal base address */
+- void *cinh_bar; /* Cache-inhibited portal base address */
++ void __iomem *cinh_bar; /* Cache-inhibited portal base address */
+ u32 qman_version;
+ };
+
+@@ -57,8 +39,8 @@ struct qbman_pull_desc {
u8 numf;
u8 tok;
u8 reserved;
u64 rsp_addr_virt;
u8 padding[40];
};
-@@ -95,17 +70,17 @@ enum qbman_pull_type_e {
+@@ -95,17 +77,17 @@ enum qbman_pull_type_e {
struct qbman_eq_desc {
u8 verb;
u8 dca;
u8 fd[32];
};
-@@ -113,9 +88,9 @@ struct qbman_eq_desc {
+@@ -113,9 +95,9 @@ struct qbman_eq_desc {
struct qbman_release_desc {
u8 verb;
u8 reserved;
};
/* Management command result codes */
-@@ -187,6 +162,9 @@ int qbman_result_has_new_result(struct q
+@@ -127,7 +109,7 @@ struct qbman_release_desc {
+ /* portal data structure */
+ struct qbman_swp {
+ const struct qbman_swp_desc *desc;
+- void __iomem *addr_cena;
++ void *addr_cena;
+ void __iomem *addr_cinh;
+
+ /* Management commands */
+@@ -135,6 +117,11 @@ struct qbman_swp {
+ u32 valid_bit; /* 0x00 or 0x80 */
+ } mc;
+
++ /* Management response */
++ struct {
++ u32 valid_bit; /* 0x00 or 0x80 */
++ } mr;
++
+ /* Push dequeues */
+ u32 sdq;
+
+@@ -187,6 +174,9 @@ int qbman_result_has_new_result(struct q
void qbman_eq_desc_clear(struct qbman_eq_desc *d);
void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
void qbman_eq_desc_set_token(struct qbman_eq_desc *d, u8 token);
void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid);
void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
-@@ -466,4 +444,62 @@ static inline void *qbman_swp_mc_complet
+@@ -195,6 +185,8 @@ void qbman_eq_desc_set_qd(struct qbman_e
+ int qbman_swp_enqueue(struct qbman_swp *p, const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd);
+
++int qbman_orp_drop(struct qbman_swp *s, u16 orpid, u16 seqnum);
++
+ void qbman_release_desc_clear(struct qbman_release_desc *d);
+ void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid);
+ void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
+@@ -453,7 +445,7 @@ static inline int qbman_swp_CDAN_set_con
+ static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
+ u8 cmd_verb)
+ {
+- int loopvar = 1000;
++ int loopvar = 2000;
+
+ qbman_swp_mc_submit(swp, cmd, cmd_verb);
+
+@@ -466,4 +458,62 @@ static inline void *qbman_swp_mc_complet
return cmd;
}
*/
#ifndef __FSL_DPAA2_IO_H
#define __FSL_DPAA2_IO_H
-@@ -88,6 +63,8 @@ void dpaa2_io_down(struct dpaa2_io *d);
+@@ -77,17 +52,20 @@ struct dpaa2_io_desc {
+ int has_8prio;
+ int cpu;
+ void *regs_cena;
+- void *regs_cinh;
++ void __iomem *regs_cinh;
+ int dpio_id;
+ u32 qman_version;
+ };
+
+-struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc);
++struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
++ struct device *dev);
+
+ void dpaa2_io_down(struct dpaa2_io *d);
irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj);
/**
* struct dpaa2_io_notification_ctx - The DPIO notification context structure
* @cb: The callback to be invoked when the notification arrives
-@@ -103,7 +80,7 @@ irqreturn_t dpaa2_io_irq(struct dpaa2_io
+@@ -103,7 +81,7 @@ irqreturn_t dpaa2_io_irq(struct dpaa2_io
* Used when a FQDAN/CDAN registration is made by drivers.
*/
struct dpaa2_io_notification_ctx {
int is_cdan;
u32 id;
int desired_cpu;
-@@ -129,9 +106,9 @@ int dpaa2_io_service_enqueue_fq(struct d
+@@ -113,10 +91,14 @@ struct dpaa2_io_notification_ctx {
+ void *dpio_private;
+ };
+
++int dpaa2_io_get_cpu(struct dpaa2_io *d);
++
+ int dpaa2_io_service_register(struct dpaa2_io *service,
+- struct dpaa2_io_notification_ctx *ctx);
++ struct dpaa2_io_notification_ctx *ctx,
++ struct device *dev);
+ void dpaa2_io_service_deregister(struct dpaa2_io *service,
+- struct dpaa2_io_notification_ctx *ctx);
++ struct dpaa2_io_notification_ctx *ctx,
++ struct device *dev);
+ int dpaa2_io_service_rearm(struct dpaa2_io *service,
+ struct dpaa2_io_notification_ctx *ctx);
+
+@@ -129,9 +111,9 @@ int dpaa2_io_service_enqueue_fq(struct d
const struct dpaa2_fd *fd);
int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, u32 qdid, u8 prio,
u16 qdbin, const struct dpaa2_fd *fd);
u64 *buffers, unsigned int num_buffers);
struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
-@@ -139,4 +116,64 @@ struct dpaa2_io_store *dpaa2_io_store_cr
+@@ -139,4 +121,64 @@ struct dpaa2_io_store *dpaa2_io_store_cr
void dpaa2_io_store_destroy(struct dpaa2_io_store *s);
struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last);
-From 6ec4d0cf0b0e5e41abc91012db4ebff7d415a92b Mon Sep 17 00:00:00 2001
+From 90b3f1705785f0e30de6f41abc8764aae1391245 Mon Sep 17 00:00:00 2001
From: Biwen Li <biwen.li@nxp.com>
-Date: Tue, 30 Oct 2018 18:26:13 +0800
-Subject: [PATCH 08/40] dpaa2-ethernet: support layerscape
-This is an integrated patch of dpaa2-ethernet for
- layerscape
+Date: Wed, 17 Apr 2019 18:58:28 +0800
+Subject: [PATCH] dpaa2-ethernet: support layerscape
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+This is an integrated patch of dpaa2-ethernet for layerscape
+
+Signed-off-by: Biwen Li <biwen.li@nxp.com>
Signed-off-by: Bogdan Purcareata <bogdan.purcareata@nxp.com>
Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com>
Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
+Signed-off-by: Valentin Catalin Neacsu <valentin-catalin.neacsu@nxp.com>
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
---
drivers/staging/fsl-dpaa2/Kconfig | 7 +
- drivers/staging/fsl-dpaa2/ethernet/Makefile | 2 +
- .../fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c | 1240 +++++++++
+ drivers/staging/fsl-dpaa2/ethernet/Makefile | 3 +
+ .../fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c | 1187 ++++++++
.../fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h | 183 ++
- .../fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c | 357 +++
+ .../fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c | 356 +++
.../fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h | 60 +
- .../staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 2335 +++++++++++++----
- .../staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 388 ++-
- .../fsl-dpaa2/ethernet/dpaa2-ethtool.c | 625 ++++-
- drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h | 192 +-
- drivers/staging/fsl-dpaa2/ethernet/dpni.c | 604 ++++-
- drivers/staging/fsl-dpaa2/ethernet/dpni.h | 344 ++-
- 12 files changed, 5723 insertions(+), 614 deletions(-)
+ .../fsl-dpaa2/ethernet/dpaa2-eth-trace.h | 29 +-
+ .../staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 2509 +++++++++++++----
+ .../staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 394 ++-
+ .../fsl-dpaa2/ethernet/dpaa2-ethtool.c | 716 ++++-
+ drivers/staging/fsl-dpaa2/ethernet/dpkg.h | 380 ++-
+ drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h | 255 +-
+ drivers/staging/fsl-dpaa2/ethernet/dpni.c | 704 ++++-
+ drivers/staging/fsl-dpaa2/ethernet/dpni.h | 401 ++-
+ drivers/staging/fsl-dpaa2/ethernet/net.h | 30 +-
+ 15 files changed, 6315 insertions(+), 899 deletions(-)
create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c
create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h
create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
bool "Enable Rx error queue"
--- a/drivers/staging/fsl-dpaa2/ethernet/Makefile
+++ b/drivers/staging/fsl-dpaa2/ethernet/Makefile
-@@ -5,6 +5,8 @@
+@@ -1,3 +1,4 @@
++# SPDX-License-Identifier: GPL-2.0
+ #
+ # Makefile for the Freescale DPAA2 Ethernet controller
+ #
+@@ -5,6 +6,8 @@
obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o
CFLAGS_dpaa2-eth.o := -I$(src)
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c
-@@ -0,0 +1,1240 @@
+@@ -0,0 +1,1187 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
-+ * Copyright 2017 NXP
++ * Copyright 2017-2019 NXP
+ *
+ */
+
+ enum update_tx_prio type)
+{
+ struct dpaa2_ceetm_qdisc *sch = qdisc_priv(cl->parent);
-+ struct dpni_congestion_notification_cfg notif_cfg = {0};
+ struct dpni_tx_schedule_cfg *sched_cfg;
+ struct dpni_taildrop td = {0};
+ u8 ch_id = 0, tc_id = 0;
+
+ switch (type) {
+ case DPAA2_ETH_ADD_CQ:
-+ /* Disable congestion notifications */
-+ notif_cfg.threshold_entry = 0;
-+ notif_cfg.threshold_exit = 0;
-+ err = dpni_set_congestion_notification(priv->mc_io, 0,
-+ priv->mc_token,
-+ DPNI_QUEUE_TX, tc_id,
-+ ¬if_cfg);
-+ if (err) {
-+ netdev_err(priv->net_dev, "Error disabling congestion notifications %d\n",
-+ err);
-+ return err;
-+ }
+ /* Enable taildrop */
+ td.enable = 1;
+ td.units = DPNI_CONGESTION_UNIT_FRAMES;
+ err);
+ return err;
+ }
-+ /* Enable congestion notifications */
-+ notif_cfg.units = DPNI_CONGESTION_UNIT_BYTES;
-+ notif_cfg.threshold_entry = DPAA2_ETH_TX_CONG_ENTRY_THRESH;
-+ notif_cfg.threshold_exit = DPAA2_ETH_TX_CONG_EXIT_THRESH;
-+ notif_cfg.message_ctx = (u64)priv;
-+ notif_cfg.message_iova = priv->cscn_dma;
-+ notif_cfg.notification_mode = DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
-+ DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
-+ DPNI_CONG_OPT_COHERENT_WRITE;
-+ err = dpni_set_congestion_notification(priv->mc_io, 0,
-+ priv->mc_token,
-+ DPNI_QUEUE_TX, tc_id,
-+ ¬if_cfg);
-+ if (err) {
-+ netdev_err(priv->net_dev, "Error enabling congestion notifications %d\n",
-+ err);
-+ return err;
-+ }
+ break;
+ }
+
+ struct netdev_queue *dev_queue;
+ unsigned int i, parent_id;
+ struct Qdisc *qdisc;
-+ int err;
+
+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
+
+ /* Validate inputs */
+ if (sch->parent != TC_H_ROOT) {
-+ pr_err("CEETM: a root ceetm qdisc can not be attached to a class\n");
-+ tcf_block_put(priv->block);
-+ qdisc_class_hash_destroy(&priv->clhash);
++ pr_err("CEETM: a root ceetm qdisc must be root\n");
+ return -EINVAL;
+ }
+
+ priv->root.qdiscs = kcalloc(dev->num_tx_queues,
+ sizeof(priv->root.qdiscs[0]),
+ GFP_KERNEL);
-+ if (!priv->root.qdiscs) {
-+ err = -ENOMEM;
-+ goto err_init_root;
-+ }
++ if (!priv->root.qdiscs)
++ return -ENOMEM;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ dev_queue = netdev_get_tx_queue(dev, i);
+
+ qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
+ parent_id);
-+ if (!qdisc) {
-+ err = -ENOMEM;
-+ goto err_init_root;
-+ }
++ if (!qdisc)
++ return -ENOMEM;
+
+ priv->root.qdiscs[i] = qdisc;
+ qdisc->flags |= TCQ_F_ONETXQUEUE;
+ if (!priv->root.qstats) {
+ pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
+ __func__);
-+ err = -ENOMEM;
-+ goto err_init_root;
++ return -ENOMEM;
+ }
+
+ dpaa2_eth_ceetm_enable(priv_eth);
+ return 0;
-+
-+err_init_root:
-+ dpaa2_ceetm_destroy(sch);
-+ return err;
+}
+
+/* Configure a prio ceetm qdisc */
+ struct net_device *dev = qdisc_dev(sch);
+ struct dpaa2_ceetm_class *parent_cl;
+ struct Qdisc *parent_qdisc;
-+ int err;
+
+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
+
+ if (sch->parent == TC_H_ROOT) {
+ pr_err("CEETM: a prio ceetm qdisc can not be root\n");
-+ err = -EINVAL;
-+ goto err_init_prio;
++ return -EINVAL;
+ }
+
+ parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent));
+ if (strcmp(parent_qdisc->ops->id, dpaa2_ceetm_qdisc_ops.id)) {
+ pr_err("CEETM: a ceetm qdisc can not be attached to other qdisc/class types\n");
-+ err = -EINVAL;
-+ goto err_init_prio;
++ return -EINVAL;
+ }
+
+ /* Obtain the parent root ceetm_class */
+
+ if (!parent_cl || parent_cl->type != CEETM_ROOT) {
+ pr_err("CEETM: a prio ceetm qdiscs can be added only under a root ceetm class\n");
-+ err = -EINVAL;
-+ goto err_init_prio;
++ return -EINVAL;
+ }
+
+ priv->prio.parent = parent_cl;
+ parent_cl->child = sch;
+
-+ err = dpaa2_ceetm_change_prio(sch, priv, qopt);
-+
-+ return 0;
-+
-+err_init_prio:
-+ dpaa2_ceetm_destroy(sch);
-+ return err;
++ return dpaa2_ceetm_change_prio(sch, priv, qopt);
+}
+
+/* Configure a generic ceetm qdisc */
+ break;
+ default:
+ pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
-+ dpaa2_ceetm_destroy(sch);
++ /* Note: dpaa2_ceetm_destroy() will be called by our caller */
+ err = -EINVAL;
+ }
+
+#endif
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
-@@ -0,0 +1,357 @@
+@@ -0,0 +1,356 @@
+
+/* Copyright 2015 Freescale Semiconductor Inc.
+ *
+ int i, err;
+
+ seq_printf(file, "non-zero FQ stats for %s:\n", priv->net_dev->name);
-+ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n",
++ seq_printf(file, "%s%16s%16s%16s%16s%16s\n",
+ "VFQID", "CPU", "Traffic Class", "Type", "Frames",
-+ "Pending frames", "Congestion");
++ "Pending frames");
+
+ for (i = 0; i < priv->num_fqs; i++) {
+ fq = &priv->fq[i];
+ if (!fq->stats.frames && !fcnt)
+ continue;
+
-+ seq_printf(file, "%5d%16d%16d%16s%16llu%16u%16llu\n",
++ seq_printf(file, "%5d%16d%16d%16s%16llu%16u\n",
+ fq->fqid,
+ fq->target_cpu,
+ fq->tc,
+ fq_type_to_str(fq),
+ fq->stats.frames,
-+ fcnt,
-+ fq->stats.congestion_entry);
++ fcnt);
+ }
+
+ return 0;
+#endif /* CONFIG_FSL_DPAA2_ETH_DEBUGFS */
+
+#endif /* DPAA2_ETH_DEBUGFS_H */
+--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
+@@ -1,32 +1,5 @@
++/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+ /* Copyright 2014-2015 Freescale Semiconductor Inc.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions are met:
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of Freescale Semiconductor nor the
+- * names of its contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- *
+- * ALTERNATIVELY, this software may be distributed under the terms of the
+- * GNU General Public License ("GPL") as published by the Free Software
+- * Foundation, either version 2 of that License or (at your option) any
+- * later version.
+- *
+- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+ #undef TRACE_SYSTEM
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
-@@ -38,9 +38,14 @@
+@@ -1,33 +1,6 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ /* Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2017 NXP
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions are met:
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of Freescale Semiconductor nor the
+- * names of its contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- *
+- * ALTERNATIVELY, this software may be distributed under the terms of the
+- * GNU General Public License ("GPL") as published by the Free Software
+- * Foundation, either version 2 of that License or (at your option) any
+- * later version.
+- *
+- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+ #include <linux/init.h>
+ #include <linux/module.h>
+@@ -38,9 +11,14 @@
#include <linux/msi.h>
#include <linux/kthread.h>
#include <linux/iommu.h>
/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
* using trace events only need to #include <trace/events/sched.h>
-@@ -104,13 +109,15 @@ static void free_rx_fd(struct dpaa2_eth_
+@@ -52,8 +30,6 @@ MODULE_LICENSE("Dual BSD/GPL");
+ MODULE_AUTHOR("Freescale Semiconductor, Inc");
+ MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
+
+-const char dpaa2_eth_drv_version[] = "0.1";
+-
+ static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
+ dma_addr_t iova_addr)
+ {
+@@ -104,26 +80,27 @@ static void free_rx_fd(struct dpaa2_eth_
/* We don't support any other format */
return;
+ for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
addr = dpaa2_sg_get_addr(&sgt[i]);
sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
- dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+- dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
- DMA_FROM_DEVICE);
-+ DMA_BIDIRECTIONAL);
++ dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
++ DMA_BIDIRECTIONAL);
- skb_free_frag(sg_vaddr);
+- skb_free_frag(sg_vaddr);
++ free_pages((unsigned long)sg_vaddr, 0);
if (dpaa2_sg_is_final(&sgt[i]))
-@@ -133,8 +140,7 @@ static struct sk_buff *build_linear_skb(
+ break;
+ }
+
+ free_buf:
+- skb_free_frag(vaddr);
++ free_pages((unsigned long)vaddr, 0);
+ }
+
+ /* Build a linear skb based on a single-buffer frame descriptor */
+-static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
+- struct dpaa2_eth_channel *ch,
++static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ void *fd_vaddr)
+ {
+@@ -133,8 +110,7 @@ static struct sk_buff *build_linear_skb(
ch->buf_count--;
- skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_SIZE +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
-+ skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE);
++ skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
if (unlikely(!skb))
return NULL;
-@@ -170,15 +176,19 @@ static struct sk_buff *build_frag_skb(st
+@@ -169,16 +145,20 @@ static struct sk_buff *build_frag_skb(st
+ /* Get the address and length from the S/G entry */
sg_addr = dpaa2_sg_get_addr(sge);
sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
- dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
+- dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
- DMA_FROM_DEVICE);
-+ DMA_BIDIRECTIONAL);
++ dma_unmap_page(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
++ DMA_BIDIRECTIONAL);
sg_length = dpaa2_sg_get_len(sge);
/* We build the skb around the first data buffer */
- skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_SIZE +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
-+ skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE);
++ skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
if (unlikely(!skb)) {
+ /* Free the first SG entry now, since we already
+ * unmapped it and obtained the virtual address
+ */
-+ skb_free_frag(sg_vaddr);
++ free_pages((unsigned long)sg_vaddr, 0);
+
/* We still need to subtract the buffers used
* by this FD from our software counter
*/
-@@ -213,17 +223,173 @@ static struct sk_buff *build_frag_skb(st
+@@ -213,17 +193,172 @@ static struct sk_buff *build_frag_skb(st
break;
}
+
+ fq = &priv->fq[queue_id];
+ for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
-+ err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
-+ priv->tx_qdid, 0,
-+ fq->tx_qdbin, fd);
++ err = priv->enqueue(priv, fq, fd, 0);
+ if (err != -EBUSY)
+ break;
+ }
+ return err;
+}
+
++/* Free buffers acquired from the buffer pool or which were meant to
++ * be released in the pool
++ */
+static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ int i;
+
+ for (i = 0; i < count; i++) {
-+ /* Same logic as on regular Rx path */
+ vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
-+ dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
-+ DMA_BIDIRECTIONAL);
-+ skb_free_frag(vaddr);
++ dma_unmap_page(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
++ DMA_BIDIRECTIONAL);
++ free_pages((unsigned long)vaddr, 0);
+ }
+}
+
struct dpaa2_eth_channel *ch,
const struct dpaa2_fd *fd,
- struct napi_struct *napi)
-+ struct napi_struct *napi,
-+ u16 queue_id)
++ struct dpaa2_eth_fq *fq)
{
dma_addr_t addr = dpaa2_fd_get_addr(fd);
u8 fd_format = dpaa2_fd_get_format(fd);
-@@ -235,14 +401,16 @@ static void dpaa2_eth_rx(struct dpaa2_et
+@@ -235,14 +370,16 @@ static void dpaa2_eth_rx(struct dpaa2_et
struct dpaa2_fas *fas;
void *buf_data;
u32 status = 0;
prefetch(fas);
buf_data = vaddr + dpaa2_fd_get_offset(fd);
prefetch(buf_data);
-@@ -251,22 +419,41 @@ static void dpaa2_eth_rx(struct dpaa2_et
+@@ -251,22 +388,43 @@ static void dpaa2_eth_rx(struct dpaa2_et
percpu_extras = this_cpu_ptr(priv->percpu_extras);
if (fd_format == dpaa2_fd_single) {
+- skb = build_linear_skb(priv, ch, fd, vaddr);
+ xdp_act = dpaa2_eth_run_xdp(priv, ch, (struct dpaa2_fd *)fd,
-+ queue_id, vaddr);
++ fq->flowid, vaddr);
+ if (xdp_act != XDP_PASS)
+ return;
+
-+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
-+ DMA_BIDIRECTIONAL);
- skb = build_linear_skb(priv, ch, fd, vaddr);
++ dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
++ DMA_BIDIRECTIONAL);
++ skb = build_linear_skb(ch, fd, vaddr);
} else if (fd_format == dpaa2_fd_sg) {
-+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
++ dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
skb = build_frag_skb(priv, ch, buf_data);
- skb_free_frag(vaddr);
+- skb_free_frag(vaddr);
++ free_pages((unsigned long)vaddr, 0);
percpu_extras->rx_sg_frames++;
percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
} else {
+ /* Get the timestamp value */
+ if (priv->ts_rx_en) {
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
-+ u64 *ns = dpaa2_get_ts(vaddr, false);
++ __le64 *ts = dpaa2_get_ts(vaddr, false);
++ u64 ns;
+
-+ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
-+ shhwtstamps->hwtstamp = ns_to_ktime(*ns);
++
++ ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ts);
++ shhwtstamps->hwtstamp = ns_to_ktime(ns);
+ }
+
/* Check if we need to validate the L4 csum */
if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
status = le32_to_cpu(fas->status);
-@@ -275,6 +462,12 @@ static void dpaa2_eth_rx(struct dpaa2_et
+@@ -274,30 +432,80 @@ static void dpaa2_eth_rx(struct dpaa2_et
+ }
skb->protocol = eth_type_trans(skb, priv->net_dev);
++ skb_record_rx_queue(skb, fq->flowid);
-+ /* Record Rx queue - this will be used when picking a Tx queue to
-+ * forward the frames. We're keeping flow affinity through the
-+ * network stack.
-+ */
-+ skb_record_rx_queue(skb, queue_id);
-+
percpu_stats->rx_packets++;
percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
-@@ -282,22 +475,74 @@ static void dpaa2_eth_rx(struct dpaa2_et
+- napi_gro_receive(napi, skb);
++ napi_gro_receive(&ch->napi, skb);
return;
* make sure we don't accidentally issue another volatile dequeue which would
* overwrite (leak) frames already in the store.
*
-+ * The number of frames is returned using the last 2 output arguments,
-+ * separately for Rx and Tx confirmations.
-+ *
* Observance of NAPI budget is not our concern, leaving that to the caller.
*/
-static int consume_frames(struct dpaa2_eth_channel *ch)
-+static bool consume_frames(struct dpaa2_eth_channel *ch, int *rx_cleaned,
-+ int *tx_conf_cleaned)
++static int consume_frames(struct dpaa2_eth_channel *ch,
++ struct dpaa2_eth_fq **src)
{
struct dpaa2_eth_priv *priv = ch->priv;
- struct dpaa2_eth_fq *fq;
struct dpaa2_dq *dq;
const struct dpaa2_fd *fd;
int cleaned = 0;
-@@ -315,14 +560,60 @@ static int consume_frames(struct dpaa2_e
+@@ -315,16 +523,51 @@ static int consume_frames(struct dpaa2_e
}
fd = dpaa2_dq_fd(dq);
-- fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
-- fq->stats.frames++;
+ prefetch(fd);
++
+ fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
+- fq->stats.frames++;
- fq->consume(priv, ch, fd, &ch->napi);
-+ fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
-+ fq->consume(priv, ch, fd, &ch->napi, fq->flowid);
++ fq->consume(priv, ch, fd, fq);
cleaned++;
} while (!is_last);
-- return cleaned;
+ if (!cleaned)
-+ return false;
-+
-+ /* All frames brought in store by a volatile dequeue
-+ * come from the same queue
-+ */
-+ if (fq->type == DPAA2_TX_CONF_FQ) {
-+ *tx_conf_cleaned += cleaned;
-+ } else {
-+ *rx_cleaned += cleaned;
-+ /* If we processed XDP_REDIRECT frames, flush them now */
-+ /* FIXME: Since we don't actually do anything inside
-+ * ndo_xdp_flush, we call it here simply for compliance
-+ * reasons
-+ */
-+ if (ch->flush) {
-+ xdp_do_flush_map();
-+ ch->flush = false;
-+ }
-+ }
++ return 0;
+
+ fq->stats.frames += cleaned;
+ ch->stats.frames += cleaned;
+
-+ return true;
-+}
++ /* A dequeue operation only pulls frames from a single queue
++ * into the store. Return the frame queue as an out param.
++ */
++ if (src)
++ *src = fq;
+
+ return cleaned;
+ }
+
+/* Configure the egress frame annotation for timestamp update */
+static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
+{
+ ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
+ faead = dpaa2_get_faead(buf_start, true);
+ faead->ctrl = cpu_to_le32(ctrl);
- }
-
++}
++
/* Create a frame descriptor based on a fragmented skb */
-@@ -341,7 +632,6 @@ static int build_sg_fd(struct dpaa2_eth_
+ static int build_sg_fd(struct dpaa2_eth_priv *priv,
+ struct sk_buff *skb,
+@@ -341,7 +584,6 @@ static int build_sg_fd(struct dpaa2_eth_
int num_sg;
int num_dma_bufs;
struct dpaa2_eth_swa *swa;
/* Create and map scatterlist.
* We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
-@@ -365,21 +655,14 @@ static int build_sg_fd(struct dpaa2_eth_
+@@ -365,21 +607,14 @@ static int build_sg_fd(struct dpaa2_eth_
/* Prepare the HW SGT structure */
sgt_buf_size = priv->tx_data_offset +
sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
-@@ -402,10 +685,11 @@ static int build_sg_fd(struct dpaa2_eth_
+@@ -402,10 +637,11 @@ static int build_sg_fd(struct dpaa2_eth_
* all of them on Tx Conf.
*/
swa = (struct dpaa2_eth_swa *)sgt_buf;
/* Separately map the SGT buffer */
addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
-@@ -417,13 +701,15 @@ static int build_sg_fd(struct dpaa2_eth_
+@@ -417,13 +653,15 @@ static int build_sg_fd(struct dpaa2_eth_
dpaa2_fd_set_format(fd, dpaa2_fd_sg);
dpaa2_fd_set_addr(fd, addr);
dpaa2_fd_set_len(fd, skb->len);
sgt_buf_alloc_failed:
dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
dma_map_sg_failed:
-@@ -437,29 +723,27 @@ static int build_single_fd(struct dpaa2_
+@@ -437,29 +675,27 @@ static int build_single_fd(struct dpaa2_
struct dpaa2_fd *fd)
{
struct device *dev = priv->net_dev->dev.parent;
addr = dma_map_single(dev, buffer_start,
skb_tail_pointer(skb) - buffer_start,
-@@ -471,8 +755,10 @@ static int build_single_fd(struct dpaa2_
+@@ -471,8 +707,10 @@ static int build_single_fd(struct dpaa2_
dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
dpaa2_fd_set_len(fd, skb->len);
dpaa2_fd_set_format(fd, dpaa2_fd_single);
return 0;
}
-@@ -486,92 +772,128 @@ static int build_single_fd(struct dpaa2_
- * Optionally, return the frame annotation status word (FAS), which needs
- * to be checked if we're on the confirmation path.
+@@ -483,72 +721,75 @@ static int build_single_fd(struct dpaa2_
+ * back-pointed to is also freed.
+ * This can be called either from dpaa2_eth_tx_conf() or on the error path of
+ * dpaa2_eth_tx().
+- * Optionally, return the frame annotation status word (FAS), which needs
+- * to be checked if we're on the confirmation path.
*/
--static void free_tx_fd(const struct dpaa2_eth_priv *priv,
-+static void free_tx_fd(struct dpaa2_eth_priv *priv,
- const struct dpaa2_fd *fd,
+ static void free_tx_fd(const struct dpaa2_eth_priv *priv,
+- const struct dpaa2_fd *fd,
- u32 *status)
-+ bool in_napi)
++ const struct dpaa2_fd *fd, bool in_napi)
{
struct device *dev = priv->net_dev->dev.parent;
dma_addr_t fd_addr;
+ /* Get the timestamp value */
+ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ struct skb_shared_hwtstamps shhwtstamps;
-+ u64 *ns;
++ __le64 *ts = dpaa2_get_ts(buffer_start, true);
++ u64 ns;
+
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+
-+ ns = dpaa2_get_ts(buffer_start, true);
-+ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns);
-+ shhwtstamps.hwtstamp = ns_to_ktime(*ns);
++ ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ts);
++ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ }
}
static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
- {
- struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ struct device *dev = net_dev->dev.parent;
- struct dpaa2_fd fd;
+@@ -558,20 +799,41 @@ static netdev_tx_t dpaa2_eth_tx(struct s
struct rtnl_link_stats64 *percpu_stats;
struct dpaa2_eth_drv_stats *percpu_extras;
struct dpaa2_eth_fq *fq;
++ struct netdev_queue *nq;
u16 queue_mapping;
- int err, i;
+ unsigned int needed_headroom;
++ u32 fd_len;
+ u8 prio;
+ int err, i, ch_id = 0;
+
+ queue_mapping = skb_get_queue_mapping(skb);
+ prio = netdev_txq_to_tc(net_dev, queue_mapping);
-+
+ /* Hardware interprets priority level 0 as being the highest,
+ * so we need to do a reverse mapping to the netdev tc index
+ */
+ if (net_dev->num_tc)
+ prio = net_dev->num_tc - prio - 1;
-+
++
+ queue_mapping %= dpaa2_eth_queue_count(priv);
+ fq = &priv->fq[queue_mapping];
-+
-+ /* If we're congested, stop this tx queue; transmission of
-+ * the current skb happens regardless of congestion state
-+ */
-+ dma_sync_single_for_cpu(dev, priv->cscn_dma,
-+ DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
-+ if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem))) {
-+ netif_stop_subqueue(net_dev, queue_mapping);
-+ fq->stats.congestion_entry++;
-+ }
percpu_stats = this_cpu_ptr(priv->percpu_stats);
percpu_extras = this_cpu_ptr(priv->percpu_extras);
- if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) {
-+ /* For non-linear skb we don't need a minimum headroom */
+ needed_headroom = dpaa2_eth_needed_headroom(priv, skb);
+ if (skb_headroom(skb) < needed_headroom) {
struct sk_buff *ns;
goto err_alloc_headroom;
}
+ percpu_extras->tx_reallocs++;
++
+ if (skb->sk)
+ skb_set_owner_w(ns, skb->sk);
++
dev_kfree_skb(skb);
skb = ns;
}
-@@ -605,13 +927,15 @@ static netdev_tx_t dpaa2_eth_tx(struct s
- /* Tracing point */
- trace_dpaa2_tx_fd(net_dev, &fd);
+@@ -602,17 +864,24 @@ static netdev_tx_t dpaa2_eth_tx(struct s
+ goto err_build_fd;
+ }
-- /* TxConf FQ selection primarily based on cpu affinity; this is
-- * non-migratable context, so it's safe to call smp_processor_id().
-- */
-- queue_mapping = smp_processor_id() % dpaa2_eth_queue_count(priv);
-- fq = &priv->fq[queue_mapping];
+ if (dpaa2_eth_ceetm_is_enabled(priv)) {
+ err = dpaa2_ceetm_classify(skb, net_dev->qdisc, &ch_id, &prio);
+ if (err)
+ goto err_ceetm_classify;
+ }
+
+ /* Tracing point */
+ trace_dpaa2_tx_fd(net_dev, &fd);
+
+- /* TxConf FQ selection primarily based on cpu affinity; this is
+- * non-migratable context, so it's safe to call smp_processor_id().
++ fd_len = dpaa2_fd_get_len(&fd);
++ nq = netdev_get_tx_queue(net_dev, queue_mapping);
++ netdev_tx_sent_queue(nq, fd_len);
++
++ /* Everything that happens after this enqueues might race with
++ * the Tx confirmation callback for this frame
+ */
+- queue_mapping = smp_processor_id() % dpaa2_eth_queue_count(priv);
+- fq = &priv->fq[queue_mapping];
for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
- err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0,
-+ err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
-+ priv->tx_qdid, prio,
- fq->tx_qdbin, &fd);
+- fq->tx_qdbin, &fd);
++ err = priv->enqueue(priv, fq, &fd, 0);
if (err != -EBUSY)
break;
-@@ -620,7 +944,7 @@ static netdev_tx_t dpaa2_eth_tx(struct s
+ }
+@@ -620,14 +889,17 @@ static netdev_tx_t dpaa2_eth_tx(struct s
if (unlikely(err < 0)) {
percpu_stats->tx_errors++;
/* Clean up everything, including freeing the skb */
- free_tx_fd(priv, &fd, NULL);
+ free_tx_fd(priv, &fd, false);
++ netdev_tx_completed_queue(nq, 1, fd_len);
} else {
percpu_stats->tx_packets++;
- percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
-@@ -628,6 +952,8 @@ static netdev_tx_t dpaa2_eth_tx(struct s
+- percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
++ percpu_stats->tx_bytes += fd_len;
+ }
return NETDEV_TX_OK;
err_build_fd:
err_alloc_headroom:
dev_kfree_skb(skb);
-@@ -639,13 +965,13 @@ err_alloc_headroom:
+@@ -637,48 +909,39 @@ err_alloc_headroom:
+
+ /* Tx confirmation frame processing routine */
static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
+- struct dpaa2_eth_channel *ch,
++ struct dpaa2_eth_channel *ch __always_unused,
const struct dpaa2_fd *fd,
- struct napi_struct *napi __always_unused)
-+ struct napi_struct *napi __always_unused,
-+ u16 queue_id)
++ struct dpaa2_eth_fq *fq)
{
-+ struct device *dev = priv->net_dev->dev.parent;
struct rtnl_link_stats64 *percpu_stats;
struct dpaa2_eth_drv_stats *percpu_extras;
- u32 status = 0;
++ u32 fd_len = dpaa2_fd_get_len(fd);
u32 fd_errors;
- bool has_fas_errors = false;
/* Tracing point */
trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
-@@ -654,31 +980,28 @@ static void dpaa2_eth_tx_conf(struct dpa
+
+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
percpu_extras->tx_conf_frames++;
- percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
+- percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
++ percpu_extras->tx_conf_bytes += fd_len;
++
++ fq->dq_frames++;
++ fq->dq_bytes += fd_len;
-- /* Check frame errors in the FD field */
-- fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
+ /* Check frame errors in the FD field */
+ fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
- if (unlikely(fd_errors)) {
- /* We only check error bits in the FAS field if corresponding
- * FAERR bit is set in FD and the FAS field is marked as valid
- if (net_ratelimit())
- netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
- fd_errors);
-+ /* Check congestion state and wake all queues if necessary */
-+ if (unlikely(__netif_subqueue_stopped(priv->net_dev, queue_id))) {
-+ dma_sync_single_for_cpu(dev, priv->cscn_dma,
-+ DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
-+ if (!dpaa2_cscn_state_congested(priv->cscn_mem))
-+ netif_tx_wake_all_queues(priv->net_dev);
- }
-
+- }
+-
- free_tx_fd(priv, fd, has_fas_errors ? &status : NULL);
-+ /* Check frame errors in the FD field */
-+ fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
+ free_tx_fd(priv, fd, true);
if (likely(!fd_errors))
}
static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
-@@ -728,26 +1051,27 @@ static int set_tx_csum(struct dpaa2_eth_
+@@ -728,26 +991,29 @@ static int set_tx_csum(struct dpaa2_eth_
/* Perform a single release command to add buffers
* to the specified buffer pool
*/
{
struct device *dev = priv->net_dev->dev.parent;
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
- void *buf;
+- void *buf;
++ struct page *page;
dma_addr_t addr;
- int i;
+ int i, err;
* alignment padding
*/
- buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE);
-+ buf = napi_alloc_frag(dpaa2_eth_buf_raw_size(priv));
- if (unlikely(!buf))
+- if (unlikely(!buf))
++ /* allocate one page for each Rx buffer. WRIOP sees
++ * the entire page except for a tailroom reserved for
++ * skb shared info
++ */
++ page = dev_alloc_pages(0);
++ if (!page)
goto err_alloc;
- buf = PTR_ALIGN(buf, DPAA2_ETH_RX_BUF_ALIGN);
-+ buf = PTR_ALIGN(buf, priv->rx_buf_align);
-
- addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
+-
+- addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
- DMA_FROM_DEVICE);
-+ DMA_BIDIRECTIONAL);
++ addr = dma_map_page(dev, page, 0, DPAA2_ETH_RX_BUF_SIZE,
++ DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, addr)))
goto err_map;
-@@ -755,28 +1079,31 @@ static int add_bufs(struct dpaa2_eth_pri
+@@ -755,28 +1021,33 @@ static int add_bufs(struct dpaa2_eth_pri
/* tracing point */
trace_dpaa2_eth_buf_seed(priv->net_dev,
- buf, DPAA2_ETH_BUF_RAW_SIZE,
-+ buf, dpaa2_eth_buf_raw_size(priv),
++ page, DPAA2_ETH_RX_BUF_RAW_SIZE,
addr, DPAA2_ETH_RX_BUF_SIZE,
bpid);
}
+ buf_array, i)) == -EBUSY)
cpu_relax();
+
-+ /* If release command failed, clean up and bail out; not much
-+ * else we can do about it
++ /* If release command failed, clean up and bail out;
++ * not much else we can do about it
+ */
+ if (err) {
+ free_bufs(priv, buf_array, i);
return i;
err_map:
- skb_free_frag(buf);
+- skb_free_frag(buf);
++ __free_pages(page, 0);
err_alloc:
-+ /* If we managed to allocate at least some buffers, release them */
++ /* If we managed to allocate at least some buffers,
++ * release them to hardware
++ */
if (i)
goto release_bufs;
-@@ -796,9 +1123,10 @@ static int seed_pool(struct dpaa2_eth_pr
+@@ -796,9 +1067,10 @@ static int seed_pool(struct dpaa2_eth_pr
*/
preempt_disable();
for (j = 0; j < priv->num_channels; j++) {
priv->channel[j]->buf_count += new_count;
if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
-@@ -818,10 +1146,8 @@ static int seed_pool(struct dpaa2_eth_pr
+@@ -818,10 +1090,8 @@ static int seed_pool(struct dpaa2_eth_pr
*/
static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
{
do {
ret = dpaa2_io_service_acquire(NULL, priv->bpid,
-@@ -830,27 +1156,16 @@ static void drain_bufs(struct dpaa2_eth_
+@@ -830,27 +1100,16 @@ static void drain_bufs(struct dpaa2_eth_
netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
return;
}
}
/* Function is called from softirq context only, so we don't need to guard
-@@ -862,19 +1177,19 @@ static int refill_pool(struct dpaa2_eth_
+@@ -862,19 +1121,19 @@ static int refill_pool(struct dpaa2_eth_
{
int new_count;
return -ENOMEM;
return 0;
-@@ -887,7 +1202,8 @@ static int pull_channel(struct dpaa2_eth
+@@ -887,7 +1146,8 @@ static int pull_channel(struct dpaa2_eth
/* Retry while portal is busy */
do {
dequeues++;
cpu_relax();
} while (err == -EBUSY);
-@@ -902,20 +1218,21 @@ static int pull_channel(struct dpaa2_eth
- /* NAPI poll routine
- *
- * Frames are dequeued from the QMan channel associated with this NAPI context.
-- * Rx, Tx confirmation and (if configured) Rx error frames all count
-- * towards the NAPI budget.
-+ * Rx and (if configured) Rx error frames count towards the NAPI budget. Tx
-+ * confirmation frames are limited by a threshold per NAPI poll cycle.
- */
+@@ -908,14 +1168,17 @@ static int pull_channel(struct dpaa2_eth
static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
{
struct dpaa2_eth_channel *ch;
- int cleaned = 0, store_cleaned;
-+ int rx_cleaned = 0, tx_conf_cleaned = 0;
-+ bool store_cleaned;
struct dpaa2_eth_priv *priv;
++ int rx_cleaned = 0, txconf_cleaned = 0;
++ struct dpaa2_eth_fq *fq, *txc_fq = NULL;
++ struct netdev_queue *nq;
++ int store_cleaned, work_done;
int err;
ch = container_of(napi, struct dpaa2_eth_channel, napi);
err = pull_channel(ch);
if (unlikely(err))
break;
-@@ -923,29 +1240,29 @@ static int dpaa2_eth_poll(struct napi_st
+@@ -923,29 +1186,56 @@ static int dpaa2_eth_poll(struct napi_st
/* Refill pool if appropriate */
refill_pool(priv, ch, priv->bpid);
- store_cleaned = consume_frames(ch);
- cleaned += store_cleaned;
-+ store_cleaned = consume_frames(ch, &rx_cleaned,
-+ &tx_conf_cleaned);
++ store_cleaned = consume_frames(ch, &fq);
++ if (!store_cleaned)
++ break;
++ if (fq->type == DPAA2_RX_FQ) {
++ rx_cleaned += store_cleaned;
++ /* If these are XDP_REDIRECT frames, flush them now */
++ /* TODO: Do we need this? */
++ if (ch->flush) {
++ xdp_do_flush_map();
++ ch->flush = false;
++ }
++ } else {
++ txconf_cleaned += store_cleaned;
++ /* We have a single Tx conf FQ on this channel */
++ txc_fq = fq;
++ }
- /* If we have enough budget left for a full store,
- * try a new pull dequeue, otherwise we're done here
-+ /* If we've either consumed the budget with Rx frames,
-+ * or reached the Tx conf threshold, we're done.
++ /* If we either consumed the whole NAPI budget with Rx frames
++ * or we reached the Tx confirmations threshold, we're done.
*/
- if (store_cleaned == 0 ||
- cleaned > budget - DPAA2_ETH_STORE_SIZE)
- break;
- }
--
++ if (rx_cleaned >= budget ||
++ txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
++ work_done = budget;
++ goto out;
++ }
++ } while (store_cleaned);
+
- if (cleaned < budget) {
- napi_complete_done(napi, cleaned);
- /* Re-enable data available notifications */
- cpu_relax();
- } while (err == -EBUSY);
- }
-+ if (rx_cleaned >= budget ||
-+ tx_conf_cleaned >= TX_CONF_PER_NAPI_POLL)
-+ return budget;
-+ } while (store_cleaned);
-
-- ch->stats.frames += cleaned;
-+ /* We didn't consume the entire budget, finish napi and
++ /* We didn't consume the entire budget, so finish napi and
+ * re-enable data availability notifications
+ */
-+ napi_complete(napi);
++ napi_complete_done(napi, rx_cleaned);
+ do {
+ err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
+ cpu_relax();
+ } while (err == -EBUSY);
-+ WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
-+ ch->nctx.desired_cpu);
++ WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
++ ch->nctx.desired_cpu);
+
+- ch->stats.frames += cleaned;
++ work_done = max(rx_cleaned, 1);
- return cleaned;
-+ return max(rx_cleaned, 1);
++out:
++ if (txc_fq) {
++ nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
++ netdev_tx_completed_queue(nq, txc_fq->dq_frames,
++ txc_fq->dq_bytes);
++ txc_fq->dq_frames = 0;
++ txc_fq->dq_bytes = 0;
++ }
++
++ return work_done;
}
static void enable_ch_napi(struct dpaa2_eth_priv *priv)
-@@ -1006,28 +1323,30 @@ static int dpaa2_eth_open(struct net_dev
+@@ -970,9 +1260,23 @@ static void disable_ch_napi(struct dpaa2
+ }
+ }
+
++static void update_tx_fqids(struct dpaa2_eth_priv *priv);
++
++static void update_pf(struct dpaa2_eth_priv *priv,
++ struct dpni_link_state *state)
++{
++ bool pause_frames;
++
++ pause_frames = !!(state->options & DPNI_LINK_OPT_PAUSE);
++ if (priv->tx_pause_frames != pause_frames) {
++ priv->tx_pause_frames = pause_frames;
++ set_rx_taildrop(priv);
++ }
++}
++
+ static int link_state_update(struct dpaa2_eth_priv *priv)
+ {
+- struct dpni_link_state state;
++ struct dpni_link_state state = {0};
+ int err;
+
+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
+@@ -988,6 +1292,8 @@ static int link_state_update(struct dpaa
+
+ priv->link_state = state;
+ if (state.up) {
++ update_tx_fqids(priv);
++ update_pf(priv, &state);
+ netif_carrier_on(priv->net_dev);
+ netif_tx_start_all_queues(priv->net_dev);
+ } else {
+@@ -1006,28 +1312,30 @@ static int dpaa2_eth_open(struct net_dev
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
int err;
err = dpni_enable(priv->mc_io, 0, priv->mc_token);
if (err < 0) {
netdev_err(net_dev, "dpni_enable() failed\n");
-@@ -1047,48 +1366,17 @@ static int dpaa2_eth_open(struct net_dev
+@@ -1047,51 +1355,20 @@ static int dpaa2_eth_open(struct net_dev
link_state_err:
enable_err:
- */
-static u32 drain_channel(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch)
--{
++static int dpaa2_eth_stop(struct net_device *net_dev)
+ {
- u32 drained = 0, total = 0;
-
- do {
-
- return total;
-}
--
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ int dpni_enabled = 0;
++ int retries = 10, i;
++ int err = 0;
+
-static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv)
-{
- struct dpaa2_eth_channel *ch;
- return drained;
-}
-
- static int dpaa2_eth_stop(struct net_device *net_dev)
- {
- struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- int dpni_enabled;
+-static int dpaa2_eth_stop(struct net_device *net_dev)
+-{
+- struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+- int dpni_enabled;
- int retries = 10;
- u32 drained;
-+ int retries = 10, i;
-+ int err = 0;
-
- netif_tx_stop_all_queues(net_dev);
- netif_carrier_off(net_dev);
-@@ -1105,56 +1393,24 @@ static int dpaa2_eth_stop(struct net_dev
+-
+- netif_tx_stop_all_queues(net_dev);
+- netif_carrier_off(net_dev);
++ netif_tx_stop_all_queues(net_dev);
++ netif_carrier_off(net_dev);
+
+ /* Loop while dpni_disable() attempts to drain the egress FQs
+ * and confirm them back to us.
+@@ -1105,56 +1382,24 @@ static int dpaa2_eth_stop(struct net_dev
} while (dpni_enabled && --retries);
if (!retries) {
netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
}
static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
-@@ -1200,25 +1456,6 @@ static void dpaa2_eth_get_stats(struct n
+@@ -1200,25 +1445,6 @@ static void dpaa2_eth_get_stats(struct n
}
}
/* Copy mac unicast addresses from @net_dev to @priv.
* Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
*/
-@@ -1380,16 +1617,363 @@ static int dpaa2_eth_set_features(struct
+@@ -1380,16 +1606,430 @@ static int dpaa2_eth_set_features(struct
return 0;
}
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_buffer_layout buf_layout = {0};
++ u16 rx_buf_align;
+ int err;
+
+ /* We need to check for WRIOP version 1.0.0, but depending on the MC
+ */
+ if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
+ priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
-+ priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
++ rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
+ else
-+ priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
++ rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
+
+ /* tx buffer */
-+ buf_layout.pass_timestamp = true;
+ buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
-+ buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
-+ DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
++ buf_layout.pass_timestamp = true;
++ buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
++ DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
+ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_TX, &buf_layout);
+ if (err) {
+ /* rx buffer */
+ buf_layout.pass_frame_status = true;
+ buf_layout.pass_parser_result = true;
-+ buf_layout.data_align = priv->rx_buf_align;
-+ buf_layout.private_data_size = 0;
++ buf_layout.data_align = rx_buf_align;
+ buf_layout.data_head_room = dpaa2_eth_rx_headroom(priv);
++ buf_layout.private_data_size = 0;
+ /* If XDP program is attached, reserve extra space for
+ * potential header expansions
+ */
+ return 0;
+}
+
++#define DPNI_ENQUEUE_FQID_VER_MAJOR 7
++#define DPNI_ENQUEUE_FQID_VER_MINOR 9
++
++static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_fq *fq,
++ struct dpaa2_fd *fd, u8 prio)
++{
++ return dpaa2_io_service_enqueue_qd(fq->channel->dpio,
++ priv->tx_qdid, prio,
++ fq->tx_qdbin, fd);
++}
++
++static inline int dpaa2_eth_enqueue_fq(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_fq *fq,
++ struct dpaa2_fd *fd,
++ u8 prio __always_unused)
++{
++ return dpaa2_io_service_enqueue_fq(fq->channel->dpio,
++ fq->tx_fqid, fd);
++}
++
++static void set_enqueue_mode(struct dpaa2_eth_priv *priv)
++{
++ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
++ DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
++ priv->enqueue = dpaa2_eth_enqueue_qd;
++ else
++ priv->enqueue = dpaa2_eth_enqueue_fq;
++}
++
++static void update_tx_fqids(struct dpaa2_eth_priv *priv)
++{
++ struct dpaa2_eth_fq *fq;
++ struct dpni_queue queue;
++ struct dpni_queue_id qid = {0};
++ int i, err;
++
++ /* We only use Tx FQIDs for FQID-based enqueue, so check
++ * if DPNI version supports it before updating FQIDs
++ */
++ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
++ DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
++ return;
++
++ for (i = 0; i < priv->num_fqs; i++) {
++ fq = &priv->fq[i];
++ if (fq->type != DPAA2_TX_CONF_FQ)
++ continue;
++ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_TX, 0, fq->flowid,
++ &queue, &qid);
++ if (err)
++ goto out_err;
++
++ fq->tx_fqid = qid.fqid;
++ if (fq->tx_fqid == 0)
++ goto out_err;
++ }
++
++ return;
++
++out_err:
++ netdev_info(priv->net_dev,
++ "Error reading Tx FQID, fallback to QDID-based enqueue");
++ priv->enqueue = dpaa2_eth_enqueue_qd;
++}
++
+static int dpaa2_eth_set_xdp(struct net_device *net_dev, struct bpf_prog *prog)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ * so we do the actual frame enqueue in ndo_xdp_xmit
+ */
+}
-+
+static int dpaa2_eth_update_xps(struct dpaa2_eth_priv *priv)
+{
+ struct net_device *net_dev = priv->net_dev;
};
static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
-@@ -1422,34 +2006,32 @@ static struct fsl_mc_device *setup_dpcon
+@@ -1422,34 +2062,32 @@ static struct fsl_mc_device *setup_dpcon
err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
if (err) {
dev_err(dev, "dpcon_open() failed\n");
fsl_mc_object_free(dpcon);
return NULL;
-@@ -1502,7 +2084,14 @@ err_setup:
+@@ -1502,7 +2140,14 @@ err_setup:
static void free_channel(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_channel *channel)
{
kfree(channel);
}
-@@ -1546,7 +2135,8 @@ static int setup_dpio(struct dpaa2_eth_p
+@@ -1546,7 +2191,8 @@ static int setup_dpio(struct dpaa2_eth_p
nctx->desired_cpu = i;
/* Register the new context */
- err = dpaa2_io_service_register(NULL, nctx);
+ channel->dpio = dpaa2_io_service_select(i);
-+ err = dpaa2_io_service_register(channel->dpio, nctx);
++ err = dpaa2_io_service_register(channel->dpio, nctx, dev);
if (err) {
dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
/* If no affine DPIO for this core, there's probably
-@@ -1586,7 +2176,7 @@ static int setup_dpio(struct dpaa2_eth_p
+@@ -1579,14 +2225,14 @@ static int setup_dpio(struct dpaa2_eth_p
+ /* Stop if we already have enough channels to accommodate all
+ * RX and TX conf queues
+ */
+- if (priv->num_channels == dpaa2_eth_queue_count(priv))
++ if (priv->num_channels == priv->dpni_attrs.num_queues)
+ break;
+ }
+
return 0;
err_set_cdan:
- dpaa2_io_service_deregister(NULL, nctx);
-+ dpaa2_io_service_deregister(channel->dpio, nctx);
++ dpaa2_io_service_deregister(channel->dpio, nctx, dev);
err_service_reg:
free_channel(priv, channel);
err_alloc_ch:
-@@ -1609,7 +2199,7 @@ static void free_dpio(struct dpaa2_eth_p
+@@ -1603,13 +2249,14 @@ err_alloc_ch:
+
+ static void free_dpio(struct dpaa2_eth_priv *priv)
+ {
+- int i;
++ struct device *dev = priv->net_dev->dev.parent;
+ struct dpaa2_eth_channel *ch;
++ int i;
+
/* deregister CDAN notifications and free channels */
for (i = 0; i < priv->num_channels; i++) {
ch = priv->channel[i];
- dpaa2_io_service_deregister(NULL, &ch->nctx);
-+ dpaa2_io_service_deregister(ch->dpio, &ch->nctx);
++ dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev);
free_channel(priv, ch);
}
}
-@@ -1636,8 +2226,7 @@ static void set_fq_affinity(struct dpaa2
+@@ -1636,8 +2283,7 @@ static void set_fq_affinity(struct dpaa2
{
struct device *dev = priv->net_dev->dev.parent;
struct dpaa2_eth_fq *fq;
/* For each FQ, pick one channel/CPU to deliver frames to.
* This may well change at runtime, either through irqbalance or
-@@ -1649,6 +2238,7 @@ static void set_fq_affinity(struct dpaa2
+@@ -1649,6 +2295,7 @@ static void set_fq_affinity(struct dpaa2
fq = &priv->fq[i];
switch (fq->type) {
case DPAA2_RX_FQ:
fq->target_cpu = rx_cpu;
rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
if (rx_cpu >= nr_cpu_ids)
-@@ -1656,6 +2246,7 @@ static void set_fq_affinity(struct dpaa2
- break;
- case DPAA2_TX_CONF_FQ:
- fq->target_cpu = txc_cpu;
-+
- txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
- if (txc_cpu >= nr_cpu_ids)
- txc_cpu = cpumask_first(&priv->dpio_cpumask);
-@@ -1665,11 +2256,13 @@ static void set_fq_affinity(struct dpaa2
+@@ -1665,11 +2312,13 @@ static void set_fq_affinity(struct dpaa2
}
fq->channel = get_affine_channel(priv, fq->target_cpu);
}
/* We have one TxConf FQ per Tx flow.
* The number of Tx and Rx queues is the same.
-@@ -1681,11 +2274,19 @@ static void setup_fqs(struct dpaa2_eth_p
+@@ -1681,11 +2330,19 @@ static void setup_fqs(struct dpaa2_eth_p
priv->fq[priv->num_fqs++].flowid = (u16)i;
}
/* For each FQ, decide on which core to process incoming frames */
set_fq_affinity(priv);
-@@ -1735,6 +2336,9 @@ static int setup_dpbp(struct dpaa2_eth_p
+@@ -1735,6 +2392,9 @@ static int setup_dpbp(struct dpaa2_eth_p
}
priv->bpid = dpbp_attrs.bpid;
return 0;
err_get_attr:
-@@ -1756,13 +2360,59 @@ static void free_dpbp(struct dpaa2_eth_p
- fsl_mc_object_free(priv->dpbp_dev);
- }
-
-+static int setup_tx_congestion(struct dpaa2_eth_priv *priv)
-+{
-+ struct dpni_congestion_notification_cfg notif_cfg = {0};
-+ struct device *dev = priv->net_dev->dev.parent;
-+ int err;
-+
-+ priv->cscn_unaligned = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
-+ GFP_KERNEL);
-+
-+ if (!priv->cscn_unaligned)
-+ return -ENOMEM;
-+
-+ priv->cscn_mem = PTR_ALIGN(priv->cscn_unaligned, DPAA2_CSCN_ALIGN);
-+ priv->cscn_dma = dma_map_single(dev, priv->cscn_mem, DPAA2_CSCN_SIZE,
-+ DMA_FROM_DEVICE);
-+ if (dma_mapping_error(dev, priv->cscn_dma)) {
-+ dev_err(dev, "Error mapping CSCN memory area\n");
-+ err = -ENOMEM;
-+ goto err_dma_map;
-+ }
-+
-+ notif_cfg.units = DPNI_CONGESTION_UNIT_BYTES;
-+ notif_cfg.threshold_entry = DPAA2_ETH_TX_CONG_ENTRY_THRESH;
-+ notif_cfg.threshold_exit = DPAA2_ETH_TX_CONG_EXIT_THRESH;
-+ notif_cfg.message_ctx = (u64)priv;
-+ notif_cfg.message_iova = priv->cscn_dma;
-+ notif_cfg.notification_mode = DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
-+ DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
-+ DPNI_CONG_OPT_COHERENT_WRITE;
-+ err = dpni_set_congestion_notification(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_TX, 0, ¬if_cfg);
-+ if (err) {
-+ dev_err(dev, "dpni_set_congestion_notification failed\n");
-+ goto err_set_cong;
-+ }
-+
-+ return 0;
-+
-+err_set_cong:
-+ dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
-+err_dma_map:
-+ kfree(priv->cscn_unaligned);
-+
-+ return err;
-+}
-+
- /* Configure the DPNI object this interface is associated with */
- static int setup_dpni(struct fsl_mc_device *ls_dev)
- {
+@@ -1762,7 +2422,7 @@ static int setup_dpni(struct fsl_mc_devi
struct device *dev = &ls_dev->dev;
struct dpaa2_eth_priv *priv;
struct net_device *net_dev;
int err;
net_dev = dev_get_drvdata(dev);
-@@ -1772,7 +2422,22 @@ static int setup_dpni(struct fsl_mc_devi
+@@ -1772,7 +2432,22 @@ static int setup_dpni(struct fsl_mc_devi
err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
if (err) {
dev_err(dev, "dpni_open() failed\n");
}
ls_dev->mc_io = priv->mc_io;
-@@ -1781,82 +2446,53 @@ static int setup_dpni(struct fsl_mc_devi
+@@ -1781,77 +2456,41 @@ static int setup_dpni(struct fsl_mc_devi
err = dpni_reset(priv->mc_io, 0, priv->mc_token);
if (err) {
dev_err(dev, "dpni_reset() failed\n");
- dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
- goto err_buf_layout;
- }
-+ /* Enable congestion notifications for Tx queues */
-+ err = setup_tx_congestion(priv);
-+ if (err)
-+ goto close;
++ set_enqueue_mode(priv);
- /* tx-confirm buffer */
- buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
- dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
- goto err_buf_layout;
- }
-+ /* allocate classification rule space */
-+ priv->cls_rule = kzalloc(sizeof(*priv->cls_rule) *
-+ dpaa2_eth_fs_count(priv), GFP_KERNEL);
++ priv->cls_rule = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) *
++ dpaa2_eth_fs_count(priv), GFP_KERNEL);
+ if (!priv->cls_rule)
+ goto close;
- dev_err(dev, "dpni_get_tx_data_offset() failed\n");
- goto err_data_offset;
+ dev_err(dev, "dpni_set_link_cfg() failed\n");
-+ goto cls_free;
++ goto close;
}
- if ((priv->tx_data_offset % 64) != 0)
-err_buf_layout:
-err_get_attr:
-err_reset:
-+cls_free:
-+ kfree(priv->cls_rule);
+close:
dpni_close(priv->mc_io, 0, priv->mc_token);
-err_open:
return err;
}
- static void free_dpni(struct dpaa2_eth_priv *priv)
- {
-+ struct device *dev = priv->net_dev->dev.parent;
- int err;
-
- err = dpni_reset(priv->mc_io, 0, priv->mc_token);
-@@ -1865,6 +2501,11 @@ static void free_dpni(struct dpaa2_eth_p
+@@ -1865,6 +2504,7 @@ static void free_dpni(struct dpaa2_eth_p
err);
dpni_close(priv->mc_io, 0, priv->mc_token);
+
-+ kfree(priv->cls_rule);
-+
-+ dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
-+ kfree(priv->cscn_unaligned);
}
static int setup_rx_flow(struct dpaa2_eth_priv *priv,
-@@ -1873,11 +2514,10 @@ static int setup_rx_flow(struct dpaa2_et
+@@ -1873,11 +2513,10 @@ static int setup_rx_flow(struct dpaa2_et
struct device *dev = priv->net_dev->dev.parent;
struct dpni_queue queue;
struct dpni_queue_id qid;
if (err) {
dev_err(dev, "dpni_get_queue(RX) failed\n");
return err;
-@@ -1890,7 +2530,7 @@ static int setup_rx_flow(struct dpaa2_et
+@@ -1889,24 +2528,136 @@ static int setup_rx_flow(struct dpaa2_et
+ queue.destination.type = DPNI_DEST_DPCON;
queue.destination.priority = 1;
queue.user_context = (u64)(uintptr_t)fq;
++ queue.flc.stash_control = 1;
++ queue.flc.value &= 0xFFFFFFFFFFFFFFC0;
++ /* 01 01 00 - data, annotation, flow context*/
++ queue.flc.value |= 0x14;
++
err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
- DPNI_QUEUE_RX, 0, fq->flowid,
+- DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
+ DPNI_QUEUE_RX, fq->tc, fq->flowid,
- DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
++ DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST |
++ DPNI_QUEUE_OPT_FLC,
&queue);
if (err) {
-@@ -1898,15 +2538,121 @@ static int setup_rx_flow(struct dpaa2_et
+ dev_err(dev, "dpni_set_queue(RX) failed\n");
return err;
}
return 0;
}
-@@ -1953,23 +2699,88 @@ static int setup_tx_flow(struct dpaa2_et
+@@ -1926,6 +2677,7 @@ static int setup_tx_flow(struct dpaa2_et
+ }
+
+ fq->tx_qdbin = qid.qdbin;
++ fq->tx_fqid = qid.fqid;
+
+ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
+@@ -1953,23 +2705,88 @@ static int setup_tx_flow(struct dpaa2_et
return 0;
}
+}
+#endif
+
-+/* default hash key fields */
-+static struct dpaa2_eth_dist_fields default_dist_fields[] = {
++/* Supported header fields for Rx hash distribution key */
++static const struct dpaa2_eth_dist_fields dist_fields[] = {
{
+ /* L2 header */
+ .rxnfc_field = RXH_L2DA,
.size = 1,
}, {
/* Using UDP ports, this is functionally equivalent to raw
-@@ -1978,90 +2789,182 @@ static const struct dpaa2_eth_hash_field
+@@ -1978,41 +2795,170 @@ static const struct dpaa2_eth_hash_field
.rxnfc_field = RXH_L4_B_0_1,
.cls_prot = NET_PROT_UDP,
.cls_field = NH_FLD_UDP_PORT_SRC,
};
-/* Set RX hash options
-- * flags is a combination of RXH_ bits
-- */
--static int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
-+static int legacy_config_dist_key(struct dpaa2_eth_priv *priv,
-+ dma_addr_t key_iova)
- {
-- struct device *dev = net_dev->dev.parent;
-- struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-- struct dpkg_profile_cfg cls_cfg;
++/* Configure the Rx hash key using the legacy API */
++static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
++{
+ struct device *dev = priv->net_dev->dev.parent;
- struct dpni_rx_tc_dist_cfg dist_cfg;
-- u8 *dma_mem;
-- int i;
-- int err = 0;
-+ int i, err;
-
-- if (!dpaa2_eth_hash_enabled(priv)) {
-- dev_dbg(dev, "Hashing support is not enabled\n");
-- return 0;
-+ /* In legacy mode, we can't configure flow steering independently */
-+ if (!dpaa2_eth_hash_enabled(priv))
-+ return -EOPNOTSUPP;
++ struct dpni_rx_tc_dist_cfg dist_cfg;
++ int i, err = 0;
+
+ memset(&dist_cfg, 0, sizeof(dist_cfg));
+
-+ dist_cfg.key_cfg_iova = key_iova;
++ dist_cfg.key_cfg_iova = key;
+ dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
-+ if (dpaa2_eth_fs_enabled(priv)) {
-+ dist_cfg.dist_mode = DPNI_DIST_MODE_FS;
-+ dist_cfg.fs_cfg.miss_action = DPNI_FS_MISS_HASH;
-+ } else {
-+ dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
- }
-
-- memset(&cls_cfg, 0, sizeof(cls_cfg));
++ dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
++
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
-+ err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, i,
-+ &dist_cfg);
++ err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token,
++ i, &dist_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_rx_tc_dist failed\n");
-+ return err;
++ break;
+ }
+ }
+
-+ return 0;
++ return err;
+}
+
-+static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key_iova)
++/* Configure the Rx hash key using the new API */
++static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_rx_dist_cfg dist_cfg;
-+ int i, err;
-
-- for (i = 0; i < ARRAY_SIZE(hash_fields); i++) {
-- struct dpkg_extract *key =
-- &cls_cfg.extracts[cls_cfg.num_extracts];
-+ if (!dpaa2_eth_hash_enabled(priv))
-+ return -EOPNOTSUPP;
-
-- if (!(flags & hash_fields[i].rxnfc_field))
-- continue;
++ int i, err = 0;
++
+ memset(&dist_cfg, 0, sizeof(dist_cfg));
+
-+ dist_cfg.key_cfg_iova = key_iova;
++ dist_cfg.key_cfg_iova = key;
+ dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
-+ dist_cfg.enable = true;
++ dist_cfg.enable = 1;
+
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ dist_cfg.tc = i;
-+
-+ err = dpni_set_rx_hash_dist(priv->mc_io, 0,
-+ priv->mc_token, &dist_cfg);
++ err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token,
++ &dist_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_rx_hash_dist failed\n");
-+ return err;
++ break;
+ }
+ }
+
-+ return 0;
++ return err;
+}
+
-+static int config_fs_key(struct dpaa2_eth_priv *priv, dma_addr_t key_iova)
++/* Configure the Rx flow classification key */
++static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_rx_dist_cfg dist_cfg;
-+ int i, err;
-
-- if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
-- dev_err(dev, "error adding key extraction rule, too many rules?\n");
-- return -E2BIG;
-+ if (!dpaa2_eth_fs_enabled(priv))
-+ return -EOPNOTSUPP;
++ int i, err = 0;
+
+ memset(&dist_cfg, 0, sizeof(dist_cfg));
+
-+ dist_cfg.key_cfg_iova = key_iova;
++ dist_cfg.key_cfg_iova = key;
+ dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
-+ dist_cfg.enable = true;
++ dist_cfg.enable = 1;
+
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ dist_cfg.tc = i;
-+
-+ err = dpni_set_rx_fs_dist(priv->mc_io, 0,
-+ priv->mc_token, &dist_cfg);
++ err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token,
++ &dist_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_rx_fs_dist failed\n");
-+ return err;
- }
++ break;
++ }
+ }
+
-+ return 0;
++ return err;
+}
-
-+int dpaa2_eth_set_dist_key(struct dpaa2_eth_priv *priv,
-+ enum dpaa2_eth_rx_dist type, u32 key_fields)
-+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ struct dpkg_profile_cfg cls_cfg;
-+ struct dpkg_extract *key;
-+ u32 hash_fields = 0;
-+ dma_addr_t key_iova;
-+ u8 *key_mem;
-+ int i, err;
+
-+ memset(&cls_cfg, 0, sizeof(cls_cfg));
++/* Size of the Rx flow classification key */
++int dpaa2_eth_cls_key_size(u64 fields)
++{
++ int i, size = 0;
+
-+ for (i = 0; i < priv->num_dist_fields; i++) {
-+ if (!(key_fields & priv->dist_fields[i].id))
++ for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
++ if (!(fields & dist_fields[i].id))
+ continue;
++ size += dist_fields[i].size;
++ }
++
++ return size;
++}
++
++/* Offset of header field in Rx classification key */
++int dpaa2_eth_cls_fld_off(int prot, int field)
++{
++ int i, off = 0;
++
++ for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
++ if (dist_fields[i].cls_prot == prot &&
++ dist_fields[i].cls_field == field)
++ return off;
++ off += dist_fields[i].size;
++ }
++
++ WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
++ return 0;
++}
++
++/* Prune unused fields from the classification rule.
++ * Used when masking is not supported
++ */
++void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields)
++{
++ int off = 0, new_off = 0;
++ int i, size;
++
++ for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
++ size = dist_fields[i].size;
++ if (dist_fields[i].id & fields) {
++ memcpy(key_mem + new_off, key_mem + off, size);
++ new_off += size;
++ }
++ off += size;
++ }
++}
+
-+ key = &cls_cfg.extracts[cls_cfg.num_extracts];
++/* Set Rx distribution (hash or flow classification) key
+ * flags is a combination of RXH_ bits
+ */
+-static int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
++static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
++ enum dpaa2_eth_rx_dist type, u64 flags)
+ {
+ struct device *dev = net_dev->dev.parent;
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct dpkg_profile_cfg cls_cfg;
+- struct dpni_rx_tc_dist_cfg dist_cfg;
++ u32 rx_hash_fields = 0;
++ dma_addr_t key_iova;
+ u8 *dma_mem;
+ int i;
+ int err = 0;
+
+- if (!dpaa2_eth_hash_enabled(priv)) {
+- dev_dbg(dev, "Hashing support is not enabled\n");
+- return 0;
+- }
+-
+ memset(&cls_cfg, 0, sizeof(cls_cfg));
+
+- for (i = 0; i < ARRAY_SIZE(hash_fields); i++) {
++ for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
+ struct dpkg_extract *key =
+ &cls_cfg.extracts[cls_cfg.num_extracts];
+
+- if (!(flags & hash_fields[i].rxnfc_field))
++ /* For both Rx hashing and classification keys
++ * we set only the selected fields.
++ */
++ if (!(flags & dist_fields[i].id))
+ continue;
++ if (type == DPAA2_ETH_RX_DIST_HASH)
++ rx_hash_fields |= dist_fields[i].rxnfc_field;
+
+ if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
+ dev_err(dev, "error adding key extraction rule, too many rules?\n");
+@@ -2020,49 +2966,107 @@ static int dpaa2_eth_set_hash(struct net
+ }
+
key->type = DPKG_EXTRACT_FROM_HDR;
- key->extract.from_hdr.prot = hash_fields[i].cls_prot;
-+ key->extract.from_hdr.prot = priv->dist_fields[i].cls_prot;
++ key->extract.from_hdr.prot = dist_fields[i].cls_prot;
key->extract.from_hdr.type = DPKG_FULL_FIELD;
- key->extract.from_hdr.field = hash_fields[i].cls_field;
-+ key->extract.from_hdr.field = priv->dist_fields[i].cls_field;
++ key->extract.from_hdr.field = dist_fields[i].cls_field;
cls_cfg.num_extracts++;
++ }
++
++ dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
++ if (!dma_mem)
++ return -ENOMEM;
++
++ err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
++ if (err) {
++ dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
++ goto free_key;
++ }
++
++ /* Prepare for setting the rx dist */
++ key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, key_iova)) {
++ dev_err(dev, "DMA mapping failed\n");
++ err = -ENOMEM;
++ goto free_key;
++ }
++
++ if (type == DPAA2_ETH_RX_DIST_HASH) {
++ if (dpaa2_eth_has_legacy_dist(priv))
++ err = config_legacy_hash_key(priv, key_iova);
++ else
++ err = config_hash_key(priv, key_iova);
++ } else {
++ err = config_cls_key(priv, key_iova);
++ }
++
++ dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
++ DMA_TO_DEVICE);
++ if (!err && type == DPAA2_ETH_RX_DIST_HASH)
++ priv->rx_hash_fields = rx_hash_fields;
++
++free_key:
++ kfree(dma_mem);
++ return err;
++}
++
++int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ u64 key = 0;
++ int i;
++
++ if (!dpaa2_eth_hash_enabled(priv))
++ return -EOPNOTSUPP;
++
++ for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
++ if (dist_fields[i].rxnfc_field & flags)
++ key |= dist_fields[i].id;
++
++ return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key);
++}
++
++int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags)
++{
++ return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags);
++}
++
++static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ int err;
- priv->rx_hash_fields |= hash_fields[i].rxnfc_field;
-+ hash_fields |= priv->dist_fields[i].rxnfc_field;
++ /* Check if we actually support Rx flow classification */
++ if (dpaa2_eth_has_legacy_dist(priv)) {
++ dev_dbg(dev, "Rx cls not supported by current MC version\n");
++ return -EOPNOTSUPP;
}
- dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
- if (!dma_mem)
-+ key_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
-+ if (!key_mem)
- return -ENOMEM;
-
+- return -ENOMEM;
+-
- err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
-+ err = dpni_prepare_key_cfg(&cls_cfg, key_mem);
- if (err) {
- dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
+- if (err) {
+- dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
- goto err_prep_key;
-+ goto free_key;
++ if (!dpaa2_eth_fs_enabled(priv)) {
++ dev_dbg(dev, "Rx cls disabled in DPNI options\n");
++ return -EOPNOTSUPP;
}
- memset(&dist_cfg, 0, sizeof(dist_cfg));
- DPAA2_CLASSIFIER_DMA_SIZE,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) {
-+ key_iova = dma_map_single(dev, key_mem, DPAA2_CLASSIFIER_DMA_SIZE,
-+ DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, key_iova)) {
- dev_err(dev, "DMA mapping failed\n");
- err = -ENOMEM;
+- dev_err(dev, "DMA mapping failed\n");
+- err = -ENOMEM;
- goto err_dma_map;
-+ goto free_key;
++ if (!dpaa2_eth_hash_enabled(priv)) {
++ dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
++ return -EOPNOTSUPP;
}
- dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
- dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
-+ switch (type) {
-+ case DPAA2_ETH_RX_DIST_LEGACY:
-+ err = legacy_config_dist_key(priv, key_iova);
-+ break;
-+ case DPAA2_ETH_RX_DIST_HASH:
-+ err = config_hash_key(priv, key_iova);
-+ break;
-+ case DPAA2_ETH_RX_DIST_FS:
-+ err = config_fs_key(priv, key_iova);
-+ break;
-+ default:
-+ err = -EINVAL;
-+ break;
-+ }
++ /* If there is no support for masking in the classification table,
++ * we don't set a default key, as it will depend on the rules
++ * added by the user at runtime.
++ */
++ if (!dpaa2_eth_fs_mask_enabled(priv))
++ goto out;
- err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
- dma_unmap_single(dev, dist_cfg.key_cfg_iova,
- DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
-- if (err)
++ err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL);
+ if (err)
- dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err);
-+ dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
-+ DMA_TO_DEVICE);
-+ if (err) {
-+ if (err != -EOPNOTSUPP)
-+ dev_err(dev, "Distribution key config failed\n");
-+ goto free_key;
-+ }
++ return err;
-err_dma_map:
-err_prep_key:
- kfree(dma_mem);
-+ if (type != DPAA2_ETH_RX_DIST_FS)
-+ priv->rx_hash_fields = hash_fields;
+- return err;
++out:
++ priv->rx_cls_enabled = 1;
+
-+free_key:
-+ kfree(key_mem);
- return err;
++ return 0;
}
-@@ -2080,6 +2983,7 @@ static int bind_dpni(struct dpaa2_eth_pr
+ /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
+@@ -2080,6 +3084,7 @@ static int bind_dpni(struct dpaa2_eth_pr
pools_params.num_dpbp = 1;
pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
pools_params.pools[0].backup_pool = 0;
pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
if (err) {
-@@ -2087,17 +2991,36 @@ static int bind_dpni(struct dpaa2_eth_pr
+@@ -2087,17 +3092,28 @@ static int bind_dpni(struct dpaa2_eth_pr
return err;
}
- /* have the interface implicitly distribute traffic based on supported
- * header fields
-+ /* Verify classification options and disable hashing and/or
-+ * flow steering support in case of invalid configuration values
++ /* have the interface implicitly distribute traffic based on
++ * the default hash key
*/
- err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_SUPPORTED);
- if (err)
- netdev_err(net_dev, "Failed to configure hashing\n");
-+ priv->dist_fields = default_dist_fields;
-+ priv->num_dist_fields = ARRAY_SIZE(default_dist_fields);
-+ check_cls_support(priv);
++ err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT);
++ if (err && err != -EOPNOTSUPP)
++ dev_err(dev, "Failed to configure hashing\n");
+
-+ /* have the interface implicitly distribute traffic based on
-+ * a static hash key. Also configure flow steering key, if supported.
-+ * Errors here are not blocking, so just let the called function
-+ * print its error message and move along.
++ /* Configure the flow classification key; it includes all
++ * supported header fields and cannot be modified at runtime
+ */
-+ if (dpaa2_eth_has_legacy_dist(priv)) {
-+ dpaa2_eth_set_dist_key(priv, DPAA2_ETH_RX_DIST_LEGACY,
-+ DPAA2_ETH_DIST_ALL);
-+ } else {
-+ dpaa2_eth_set_dist_key(priv, DPAA2_ETH_RX_DIST_HASH,
-+ DPAA2_ETH_DIST_DEFAULT_HASH);
-+ dpaa2_eth_set_dist_key(priv, DPAA2_ETH_RX_DIST_FS,
-+ DPAA2_ETH_DIST_ALL);
-+ }
++ err = dpaa2_eth_set_default_cls(priv);
++ if (err && err != -EOPNOTSUPP)
++ dev_err(dev, "Failed to configure Rx classification key\n");
/* Configure handling of error frames */
err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
&err_cfg);
if (err) {
-@@ -2114,6 +3037,11 @@ static int bind_dpni(struct dpaa2_eth_pr
+@@ -2114,6 +3130,11 @@ static int bind_dpni(struct dpaa2_eth_pr
case DPAA2_TX_CONF_FQ:
err = setup_tx_flow(priv, &priv->fq[i]);
break;
default:
dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
return -EINVAL;
-@@ -2237,11 +3165,14 @@ static int netdev_init(struct net_device
+@@ -2237,11 +3258,14 @@ static int netdev_init(struct net_device
{
struct device *dev = net_dev->dev.parent;
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
err = set_mac_addr(priv);
if (err)
-@@ -2255,14 +3186,14 @@ static int netdev_init(struct net_device
+@@ -2255,14 +3279,14 @@ static int netdev_init(struct net_device
return err;
}
-
- /* Set MTU limits */
- net_dev->min_mtu = 68;
-+ /* Set MTU upper limit; lower limit is default (68B) */
++ /* Set MTU upper limit; lower limit is 68B (default value) */
net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
+ err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
-+ (u16)DPAA2_ETH_MFL);
++ DPAA2_ETH_MFL);
+ if (err) {
+ dev_err(dev, "dpni_set_max_frame_length() failed\n");
+ return err;
/* Set actual number of queues in the net device */
num_queues = dpaa2_eth_queue_count(priv);
-@@ -2277,12 +3208,23 @@ static int netdev_init(struct net_device
+@@ -2277,12 +3301,23 @@ static int netdev_init(struct net_device
return err;
}
return 0;
}
-@@ -2303,14 +3245,9 @@ static int poll_link_state(void *arg)
+@@ -2303,14 +3338,9 @@ static int poll_link_state(void *arg)
return 0;
}
struct device *dev = (struct device *)arg;
struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
struct net_device *net_dev = dev_get_drvdata(dev);
-@@ -2320,18 +3257,12 @@ static irqreturn_t dpni_irq0_handler_thr
+@@ -2320,18 +3350,12 @@ static irqreturn_t dpni_irq0_handler_thr
DPNI_IRQ_INDEX, &status);
if (unlikely(err)) {
netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
return IRQ_HANDLED;
}
-@@ -2348,8 +3279,7 @@ static int setup_irqs(struct fsl_mc_devi
+@@ -2348,8 +3372,7 @@ static int setup_irqs(struct fsl_mc_devi
irq = ls_dev->irqs[0];
err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
IRQF_NO_SUSPEND | IRQF_ONESHOT,
dev_name(&ls_dev->dev), &ls_dev->dev);
if (err < 0) {
-@@ -2405,6 +3335,393 @@ static void del_ch_napi(struct dpaa2_eth
+@@ -2405,6 +3428,393 @@ static void del_ch_napi(struct dpaa2_eth
}
}
static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
{
struct device *dev;
-@@ -2415,7 +3732,7 @@ static int dpaa2_eth_probe(struct fsl_mc
+@@ -2415,7 +3825,7 @@ static int dpaa2_eth_probe(struct fsl_mc
dev = &dpni_dev->dev;
/* Net device */
if (!net_dev) {
dev_err(dev, "alloc_etherdev_mq() failed\n");
return -ENOMEM;
-@@ -2433,7 +3750,10 @@ static int dpaa2_eth_probe(struct fsl_mc
+@@ -2433,7 +3843,10 @@ static int dpaa2_eth_probe(struct fsl_mc
err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
&priv->mc_io);
if (err) {
goto err_portal_alloc;
}
-@@ -2456,9 +3776,6 @@ static int dpaa2_eth_probe(struct fsl_mc
+@@ -2456,9 +3869,6 @@ static int dpaa2_eth_probe(struct fsl_mc
if (err)
goto err_bind;
/* Percpu statistics */
priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
if (!priv->percpu_stats) {
-@@ -2491,7 +3808,14 @@ static int dpaa2_eth_probe(struct fsl_mc
+@@ -2491,7 +3901,14 @@ static int dpaa2_eth_probe(struct fsl_mc
if (err)
goto err_alloc_rings;
err = setup_irqs(dpni_dev);
if (err) {
-@@ -2499,25 +3823,41 @@ static int dpaa2_eth_probe(struct fsl_mc
+@@ -2499,25 +3916,41 @@ static int dpaa2_eth_probe(struct fsl_mc
priv->poll_thread = kthread_run(poll_link_state, priv,
"%s_poll_link", net_dev->name);
if (IS_ERR(priv->poll_thread)) {
del_ch_napi(priv);
err_bind:
free_dpbp(priv);
-@@ -2544,8 +3884,15 @@ static int dpaa2_eth_remove(struct fsl_m
+@@ -2544,8 +3977,15 @@ static int dpaa2_eth_remove(struct fsl_m
net_dev = dev_get_drvdata(dev);
priv = netdev_priv(net_dev);
if (priv->do_link_poll)
kthread_stop(priv->poll_thread);
-@@ -2555,8 +3902,6 @@ static int dpaa2_eth_remove(struct fsl_m
+@@ -2555,17 +3995,16 @@ static int dpaa2_eth_remove(struct fsl_m
free_rings(priv);
free_percpu(priv->percpu_stats);
free_percpu(priv->percpu_extras);
free_dpbp(priv);
free_dpio(priv);
free_dpni(priv);
-@@ -2566,6 +3911,8 @@ static int dpaa2_eth_remove(struct fsl_m
- dev_set_drvdata(dev, NULL);
+
+ fsl_mc_portal_free(priv->mc_io);
+
+- dev_set_drvdata(dev, NULL);
free_netdev(net_dev);
+ dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
return 0;
}
-@@ -2588,4 +3935,34 @@ static struct fsl_mc_driver dpaa2_eth_dr
+@@ -2588,4 +4027,34 @@ static struct fsl_mc_driver dpaa2_eth_dr
.match_id_table = dpaa2_eth_match_id_table
};
+module_exit(dpaa2_eth_driver_exit);
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
-@@ -33,6 +33,7 @@
+@@ -1,40 +1,15 @@
++/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+ /* Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions are met:
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of Freescale Semiconductor nor the
+- * names of its contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- *
+- * ALTERNATIVELY, this software may be distributed under the terms of the
+- * GNU General Public License ("GPL") as published by the Free Software
+- * Foundation, either version 2 of that License or (at your option) any
+- * later version.
+- *
+- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
#ifndef __DPAA2_ETH_H
#define __DPAA2_ETH_H
+#include <linux/dcbnl.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
++#include <linux/filter.h>
-@@ -44,9 +45,17 @@
+ #include "../../fsl-mc/include/dpaa2-io.h"
+ #include "../../fsl-mc/include/dpaa2-fd.h"
+@@ -44,6 +19,9 @@
#include "dpni-cmd.h"
#include "dpaa2-eth-trace.h"
#define DPAA2_ETH_STORE_SIZE 16
-+/* We set a max threshold for how many Tx confirmations we should process
-+ * on a NAPI poll call, they take less processing time.
-+ */
-+#define TX_CONF_PER_NAPI_POLL 256
-+
- /* Maximum number of scatter-gather entries in an ingress frame,
- * considering the maximum receive frame size is 64K
- */
-@@ -60,6 +69,14 @@
+@@ -60,43 +38,59 @@
/* Convert L3 MTU to L2 MFL */
#define DPAA2_ETH_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN)
+-/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo
+- * frames in the Rx queues (length of the current frame is not
+- * taken into account when making the taildrop decision)
+- */
+-#define DPAA2_ETH_TAILDROP_THRESH (64 * 1024)
+-
+-/* Buffer quota per queue. Must be large enough such that for minimum sized
+- * frames taildrop kicks in before the bpool gets depleted, so we compute
+- * how many 64B frames fit inside the taildrop threshold and add a margin
+- * to accommodate the buffer refill delay.
+- */
+-#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64)
+-#define DPAA2_ETH_NUM_BUFS (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
+-#define DPAA2_ETH_REFILL_THRESH DPAA2_ETH_MAX_FRAMES_PER_QUEUE
+/* Maximum burst size value for Tx shaping */
+#define DPAA2_ETH_MAX_BURST_SIZE 0xF7FF
+
+ /* Maximum number of buffers that can be acquired/released through a single
+ * QBMan command
+ */
+ #define DPAA2_ETH_BUFS_PER_CMD 7
+
+-/* Hardware requires alignment for ingress/egress buffer addresses
+- * and ingress buffer lengths.
++/* Set the taildrop threshold to 1MB to allow the enqueue of a sufficiently
++ * large number of jumbo frames in the Rx queues (length of the current frame
++ * is not taken into account when making the taildrop decision)
++ */
++#define DPAA2_ETH_TAILDROP_THRESH (1024 * 1024)
+
-+/* Maximum number of buffers that can be acquired/released through a single
-+ * QBMan command
++/* Maximum number of Tx confirmation frames to be processed
++ * in a single NAPI call
+ */
-+#define DPAA2_ETH_BUFS_PER_CMD 7
++#define DPAA2_ETH_TXCONF_PER_NAPI 256
+
- /* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo
- * frames in the Rx queues (length of the current frame is not
- * taken into account when making the taildrop decision)
-@@ -72,31 +89,32 @@
- * to accommodate the buffer refill delay.
++/* Buffer quota per channel.
++ * We want to keep in check number of ingress frames in flight: for small
++ * sized frames, buffer pool depletion will kick in first; for large sizes,
++ * Rx FQ taildrop threshold will ensure only a reasonable number of frames
++ * will be pending at any given time.
*/
- #define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64)
--#define DPAA2_ETH_NUM_BUFS (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
--#define DPAA2_ETH_REFILL_THRESH DPAA2_ETH_MAX_FRAMES_PER_QUEUE
-+#define DPAA2_ETH_NUM_BUFS_PER_CH (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
+-#define DPAA2_ETH_RX_BUF_SIZE 2048
++#define DPAA2_ETH_NUM_BUFS_PER_CH 1024
+#define DPAA2_ETH_REFILL_THRESH(priv) \
+ ((priv)->max_bufs_per_ch - DPAA2_ETH_BUFS_PER_CMD)
-
--/* Maximum number of buffers that can be acquired/released through a single
-- * QBMan command
-- */
--#define DPAA2_ETH_BUFS_PER_CMD 7
++
+/* Global buffer quota in case flow control is enabled */
+#define DPAA2_ETH_NUM_BUFS_FC 256
+
+/* Hardware requires alignment for ingress/egress buffer addresses */
-+#define DPAA2_ETH_TX_BUF_ALIGN 64
-
--/* Hardware requires alignment for ingress/egress buffer addresses
-- * and ingress buffer lengths.
-+/* Due to a limitation in WRIOP 1.0.0, the RX buffer data must be aligned
-+ * to 256B. For newer revisions, the requirement is only for 64B alignment
- */
-+#define DPAA2_ETH_RX_BUF_ALIGN_REV1 256
-+#define DPAA2_ETH_RX_BUF_ALIGN 64
-+
- #define DPAA2_ETH_RX_BUF_SIZE 2048
--#define DPAA2_ETH_TX_BUF_ALIGN 64
+ #define DPAA2_ETH_TX_BUF_ALIGN 64
-#define DPAA2_ETH_RX_BUF_ALIGN 256
-#define DPAA2_ETH_NEEDED_HEADROOM(p_priv) \
- ((p_priv)->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN)
- (DPAA2_ETH_RX_BUF_SIZE + \
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \
- DPAA2_ETH_RX_BUF_ALIGN)
-+#define DPAA2_ETH_SKB_SIZE \
-+ (DPAA2_ETH_RX_BUF_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
-+/* PTP nominal frequency 1GHz */
-+#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1
++#define DPAA2_ETH_RX_BUF_RAW_SIZE PAGE_SIZE
++#define DPAA2_ETH_RX_BUF_TAILROOM \
++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
++#define DPAA2_ETH_RX_BUF_SIZE \
++ (DPAA2_ETH_RX_BUF_RAW_SIZE - DPAA2_ETH_RX_BUF_TAILROOM)
+
+/* Hardware annotation area in RX/TX buffers */
+#define DPAA2_ETH_RX_HWA_SIZE 64
+#define DPAA2_ETH_TX_HWA_SIZE 128
++
++/* PTP nominal frequency 1GHz */
++#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1
++
++/* Due to a limitation in WRIOP 1.0.0, the RX buffer data must be aligned
++ * to 256B. For newer revisions, the requirement is only for 64B alignment
++ */
++#define DPAA2_ETH_RX_BUF_ALIGN_REV1 256
++#define DPAA2_ETH_RX_BUF_ALIGN 64
/* We are accommodating a skb backpointer and some S/G info
* in the frame's software annotation. The hardware
-@@ -104,12 +122,32 @@
+@@ -104,12 +98,32 @@
*/
#define DPAA2_ETH_SWA_SIZE 64
};
/* Annotation valid bits in FD FRC */
-@@ -120,23 +158,14 @@ struct dpaa2_eth_swa {
- #define DPAA2_FD_FRC_FASWOV 0x0800
+@@ -121,22 +135,14 @@ struct dpaa2_eth_swa {
#define DPAA2_FD_FRC_FAICFDV 0x0400
--/* Error bits in FD CTRL */
+ /* Error bits in FD CTRL */
-#define DPAA2_FD_CTRL_UFD 0x00000004
-#define DPAA2_FD_CTRL_SBE 0x00000008
-#define DPAA2_FD_CTRL_FSE 0x00000020
+ FD_CTRL_FAERR)
/* Annotation bits in FD CTRL */
- #define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */
+-#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */
-#define DPAA2_FD_CTRL_PTA 0x00800000
-#define DPAA2_FD_CTRL_PTV1 0x00400000
++#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128B */
/* Frame annotation status */
struct dpaa2_fas {
-@@ -144,7 +173,7 @@ struct dpaa2_fas {
+@@ -144,7 +150,7 @@ struct dpaa2_fas {
u8 ppid;
__le16 ifpid;
__le32 status;
/* Frame annotation status word is located in the first 8 bytes
* of the buffer's hardware annoatation area
-@@ -152,11 +181,45 @@ struct dpaa2_fas {
+@@ -152,11 +158,45 @@ struct dpaa2_fas {
#define DPAA2_FAS_OFFSET 0
#define DPAA2_FAS_SIZE (sizeof(struct dpaa2_fas))
+ return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAS_OFFSET;
+}
+
-+static inline u64 *dpaa2_get_ts(void *buf_addr, bool swa)
++static inline __le64 *dpaa2_get_ts(void *buf_addr, bool swa)
+{
+ return dpaa2_get_hwa(buf_addr, swa) + DPAA2_TS_OFFSET;
+}
/* Error and status bits in the frame annotation status word */
/* Debug frame, otherwise supposed to be discarded */
-@@ -203,11 +266,6 @@ struct dpaa2_fas {
+@@ -203,11 +243,6 @@ struct dpaa2_fas {
DPAA2_FAS_BLE | \
DPAA2_FAS_L3CE | \
DPAA2_FAS_L4CE)
/* Time in milliseconds between link state updates */
#define DPAA2_ETH_LINK_STATE_REFRESH 1000
-@@ -218,6 +276,14 @@ struct dpaa2_fas {
- */
- #define DPAA2_ETH_ENQUEUE_RETRIES 10
-
-+/* Tx congestion entry & exit thresholds, in number of bytes.
-+ * We allow a maximum of 512KB worth of frames pending processing on the Tx
-+ * queues of an interface
-+ */
-+#define DPAA2_ETH_TX_CONG_ENTRY_THRESH (512 * 1024)
-+#define DPAA2_ETH_TX_CONG_EXIT_THRESH \
-+ (DPAA2_ETH_TX_CONG_ENTRY_THRESH * 9 / 10)
-+
- /* Driver statistics, other than those in struct rtnl_link_stats64.
- * These are usually collected per-CPU and aggregated by ethtool.
- */
-@@ -226,6 +292,7 @@ struct dpaa2_eth_drv_stats {
+@@ -226,6 +261,7 @@ struct dpaa2_eth_drv_stats {
__u64 tx_conf_bytes;
__u64 tx_sg_frames;
__u64 tx_sg_bytes;
__u64 rx_sg_frames;
__u64 rx_sg_bytes;
/* Enqueues retried due to portal busy */
-@@ -236,6 +303,8 @@ struct dpaa2_eth_drv_stats {
- struct dpaa2_eth_fq_stats {
- /* Number of frames received on this queue */
- __u64 frames;
-+ /* Number of times this queue entered congestion */
-+ __u64 congestion_entry;
- };
-
- /* Per-channel statistics */
-@@ -250,17 +319,23 @@ struct dpaa2_eth_ch_stats {
+@@ -250,17 +286,23 @@ struct dpaa2_eth_ch_stats {
__u64 pull_err;
};
};
struct dpaa2_eth_priv;
-@@ -269,6 +344,7 @@ struct dpaa2_eth_fq {
+@@ -268,15 +310,19 @@ struct dpaa2_eth_priv;
+ struct dpaa2_eth_fq {
u32 fqid;
u32 tx_qdbin;
++ u32 tx_fqid;
u16 flowid;
+ u8 tc;
int target_cpu;
++ u32 dq_frames;
++ u32 dq_bytes;
struct dpaa2_eth_channel *channel;
enum dpaa2_eth_fq_type type;
-@@ -276,7 +352,8 @@ struct dpaa2_eth_fq {
- void (*consume)(struct dpaa2_eth_priv *,
- struct dpaa2_eth_channel *,
- const struct dpaa2_fd *,
+
+- void (*consume)(struct dpaa2_eth_priv *,
+- struct dpaa2_eth_channel *,
+- const struct dpaa2_fd *,
- struct napi_struct *);
-+ struct napi_struct *,
-+ u16 queue_id);
++ void (*consume)(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch,
++ const struct dpaa2_fd *fd,
++ struct dpaa2_eth_fq *fq);
struct dpaa2_eth_fq_stats stats;
};
-@@ -285,24 +362,53 @@ struct dpaa2_eth_channel {
+@@ -285,19 +331,29 @@ struct dpaa2_eth_channel {
struct fsl_mc_device *dpcon;
int dpcon_id;
int ch_id;
+ u64 rel_buf_array[DPAA2_ETH_BUFS_PER_CMD];
+ u8 rel_buf_cnt;
+ bool flush;
-+};
-+
-+struct dpaa2_eth_cls_rule {
-+ struct ethtool_rx_flow_spec fs;
-+ bool in_use;
};
-struct dpaa2_eth_hash_fields {
u64 rxnfc_field;
enum net_prot cls_prot;
int cls_field;
-+ int offset;
int size;
-+ u32 id;
++ u64 id;
++};
++
++struct dpaa2_eth_cls_rule {
++ struct ethtool_rx_flow_spec fs;
++ u8 in_use;
};
/* Driver private data */
- struct dpaa2_eth_priv {
- struct net_device *net_dev;
-+ /* Standard statistics */
-+ struct rtnl_link_stats64 __percpu *percpu_stats;
-+ /* Extra stats, in addition to the ones known by the kernel */
-+ struct dpaa2_eth_drv_stats __percpu *percpu_extras;
-+ bool ts_tx_en; /* Tx timestamping enabled */
-+ bool ts_rx_en; /* Rx timestamping enabled */
-+ u16 tx_data_offset;
-+ u16 bpid;
-+ u16 tx_qdid;
-+ u16 rx_buf_align;
-+ struct iommu_domain *iommu_domain;
-+ int max_bufs_per_ch;
-+ int refill_thresh;
-+ bool has_xdp_prog;
-+
-+ void *cscn_mem; /* Tx congestion notifications are written here */
-+ void *cscn_unaligned;
-+ dma_addr_t cscn_dma;
+@@ -306,17 +362,29 @@ struct dpaa2_eth_priv {
u8 num_fqs;
struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES];
-@@ -311,51 +417,193 @@ struct dpaa2_eth_priv {
++ int (*enqueue)(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_fq *fq,
++ struct dpaa2_fd *fd, u8 prio);
+
+ u8 num_channels;
struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
++ int max_bufs_per_ch;
++ int refill_thresh;
++
++ bool has_xdp_prog;
struct dpni_attr dpni_attrs;
-- u16 tx_data_offset;
--
+ u16 dpni_ver_major;
+ u16 dpni_ver_minor;
+ u16 tx_data_offset;
+
struct fsl_mc_device *dpbp_dev;
-- u16 bpid;
-- struct iommu_domain *iommu_domain;
+ u16 bpid;
+ struct iommu_domain *iommu_domain;
-- u16 tx_qdid;
++ bool ts_tx_en; /* Tx timestamping enabled */
++ bool ts_rx_en; /* Rx timestamping enabled */
++
+ u16 tx_qdid;
struct fsl_mc_io *mc_io;
/* Cores which have an affine DPIO/DPCON.
- * This is the cpu set on which Rx and Tx conf frames are processed
- */
- struct cpumask dpio_cpumask;
+@@ -337,13 +405,30 @@ struct dpaa2_eth_priv {
-- /* Standard statistics */
-- struct rtnl_link_stats64 __percpu *percpu_stats;
-- /* Extra stats, in addition to the ones known by the kernel */
-- struct dpaa2_eth_drv_stats __percpu *percpu_extras;
--
- u16 mc_token;
-
- struct dpni_link_state link_state;
- bool do_link_poll;
- struct task_struct *poll_thread;
-
-+ /* Rx distribution (hash and flow steering) header fields
-+ * supported by the driver
-+ */
-+ struct dpaa2_eth_dist_fields *dist_fields;
-+ u8 num_dist_fields;
/* enabled ethtool hashing bits */
u64 rx_hash_fields;
++ u64 rx_cls_fields;
++ struct dpaa2_eth_cls_rule *cls_rule;
++ u8 rx_cls_enabled;
+#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
+ struct dpaa2_debugfs dbg;
+#endif
-+ /* array of classification rules */
-+ struct dpaa2_eth_cls_rule *cls_rule;
+ struct dpni_tx_shaping_cfg shaping_cfg;
+
+ u8 dcbx_mode;
+ bool tx_pause_frames;
+
+ bool ceetm_en;
-+};
-+
-+enum dpaa2_eth_rx_dist {
-+ DPAA2_ETH_RX_DIST_HASH,
-+ DPAA2_ETH_RX_DIST_FS,
-+ DPAA2_ETH_RX_DIST_LEGACY
};
-/* default Rx hash options, set during probing */
--#define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \
-- | RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 \
-- | RXH_L4_B_2_3)
-+/* Supported Rx distribution field ids */
-+#define DPAA2_ETH_DIST_ETHSRC BIT(0)
-+#define DPAA2_ETH_DIST_ETHDST BIT(1)
-+#define DPAA2_ETH_DIST_ETHTYPE BIT(2)
-+#define DPAA2_ETH_DIST_VLAN BIT(3)
-+#define DPAA2_ETH_DIST_IPSRC BIT(4)
-+#define DPAA2_ETH_DIST_IPDST BIT(5)
-+#define DPAA2_ETH_DIST_IPPROTO BIT(6)
-+#define DPAA2_ETH_DIST_L4SRC BIT(7)
-+#define DPAA2_ETH_DIST_L4DST BIT(8)
-+#define DPAA2_ETH_DIST_ALL (~0U)
-+
-+/* Default Rx hash key */
-+#define DPAA2_ETH_DIST_DEFAULT_HASH \
-+ (DPAA2_ETH_DIST_IPPROTO | \
-+ DPAA2_ETH_DIST_IPSRC | DPAA2_ETH_DIST_IPDST | \
-+ DPAA2_ETH_DIST_L4SRC | DPAA2_ETH_DIST_L4DST)
+ #define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \
+ | RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 \
+ | RXH_L4_B_2_3)
++/* default Rx hash options, set during probing */
++#define DPAA2_RXH_DEFAULT (RXH_L3_PROTO | RXH_IP_SRC | RXH_IP_DST | \
++ RXH_L4_B_0_1 | RXH_L4_B_2_3)
++
#define dpaa2_eth_hash_enabled(priv) \
((priv)->dpni_attrs.num_queues > 1)
-+#define dpaa2_eth_fs_enabled(priv) \
-+ (!((priv)->dpni_attrs.options & DPNI_OPT_NO_FS))
-+
-+#define dpaa2_eth_fs_mask_enabled(priv) \
-+ ((priv)->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)
-+
-+#define dpaa2_eth_fs_count(priv) \
-+ ((priv)->dpni_attrs.fs_entries)
-+
- /* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */
- #define DPAA2_CLASSIFIER_DMA_SIZE 256
+@@ -352,10 +437,127 @@ struct dpaa2_eth_priv {
extern const struct ethtool_ops dpaa2_ethtool_ops;
extern const char dpaa2_eth_drv_version[];
-
--static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
++extern int dpaa2_phc_index;
++
+static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
+ u16 ver_major, u16 ver_minor)
+{
+ return priv->dpni_ver_major - ver_major;
+}
+
-+#define DPNI_DIST_KEY_VER_MAJOR 7
-+#define DPNI_DIST_KEY_VER_MINOR 5
++/* Minimum firmware version that supports a more flexible API
++ * for configuring the Rx flow hash key
++ */
++#define DPNI_RX_DIST_KEY_VER_MAJOR 7
++#define DPNI_RX_DIST_KEY_VER_MINOR 5
+
-+static inline bool dpaa2_eth_has_legacy_dist(struct dpaa2_eth_priv *priv)
-+{
-+ return (dpaa2_eth_cmp_dpni_ver(priv, DPNI_DIST_KEY_VER_MAJOR,
-+ DPNI_DIST_KEY_VER_MINOR) < 0);
-+}
++#define dpaa2_eth_has_legacy_dist(priv) \
++ (dpaa2_eth_cmp_dpni_ver((priv), DPNI_RX_DIST_KEY_VER_MAJOR, \
++ DPNI_RX_DIST_KEY_VER_MINOR) < 0)
+
-+/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but the skb built around
-+ * the buffer also needs space for its shared info struct, and we need
-+ * to allocate enough to accommodate hardware alignment restrictions
-+ */
-+static inline unsigned int dpaa2_eth_buf_raw_size(struct dpaa2_eth_priv *priv)
-+{
-+ return DPAA2_ETH_SKB_SIZE + priv->rx_buf_align;
-+}
++#define dpaa2_eth_fs_enabled(priv) \
++ (!((priv)->dpni_attrs.options & DPNI_OPT_NO_FS))
+
-+/* Total headroom needed by the hardware in Tx frame buffers */
-+static inline unsigned int
-+dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv, struct sk_buff *skb)
++#define dpaa2_eth_fs_mask_enabled(priv) \
++ ((priv)->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)
++
++#define dpaa2_eth_fs_count(priv) \
++ ((priv)->dpni_attrs.fs_entries)
++
++#define dpaa2_eth_queue_count(priv) \
++ ((priv)->num_channels)
++
++#define dpaa2_eth_tc_count(priv) \
++ ((priv)->dpni_attrs.num_tcs)
++
++enum dpaa2_eth_rx_dist {
++ DPAA2_ETH_RX_DIST_HASH,
++ DPAA2_ETH_RX_DIST_CLS
++};
++
++/* Unique IDs for the supported Rx classification header fields */
++#define DPAA2_ETH_DIST_ETHDST BIT(0)
++#define DPAA2_ETH_DIST_ETHSRC BIT(1)
++#define DPAA2_ETH_DIST_ETHTYPE BIT(2)
++#define DPAA2_ETH_DIST_VLAN BIT(3)
++#define DPAA2_ETH_DIST_IPSRC BIT(4)
++#define DPAA2_ETH_DIST_IPDST BIT(5)
++#define DPAA2_ETH_DIST_IPPROTO BIT(6)
++#define DPAA2_ETH_DIST_L4SRC BIT(7)
++#define DPAA2_ETH_DIST_L4DST BIT(8)
++#define DPAA2_ETH_DIST_ALL (~0U)
++
++static inline
++unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
++ struct sk_buff *skb)
+{
+ unsigned int headroom = DPAA2_ETH_SWA_SIZE;
+
+ */
+ if (!skb)
+ return headroom;
-+
+
+-static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
+ /* For non-linear skbs we have no headroom requirement, as we build a
+ * SG frame with a newly allocated SGT buffer
+ */
+}
+
+/* Extra headroom space requested to hardware, in order to make sure there's
-+ * no realloc'ing in forwarding scenarios. We need to reserve enough space
-+ * such that we can accommodate the maximum required Tx offset and alignment
-+ * in the ingress frame buffer
++ * no realloc'ing in forwarding scenarios
+ */
+static inline unsigned int dpaa2_eth_rx_headroom(struct dpaa2_eth_priv *priv)
+{
-+ return priv->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN -
-+ DPAA2_ETH_RX_HWA_SIZE;
-+}
-+
-+static inline int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
- {
- return priv->dpni_attrs.num_queues;
- }
-
-+static inline int dpaa2_eth_tc_count(struct dpaa2_eth_priv *priv)
-+{
-+ return priv->dpni_attrs.num_tcs;
++ return priv->tx_data_offset - DPAA2_ETH_RX_HWA_SIZE;
+}
+
+static inline bool dpaa2_eth_is_pfc_enabled(struct dpaa2_eth_priv *priv,
+}
+
+static inline int dpaa2_eth_ch_count(struct dpaa2_eth_priv *priv)
-+{
+ {
+- return priv->dpni_attrs.num_queues;
+ return 1;
-+}
-+
-+void check_cls_support(struct dpaa2_eth_priv *priv);
+ }
+
++int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
++int dpaa2_eth_set_cls(struct net_device *net_dev, u64 key);
++int dpaa2_eth_cls_key_size(u64 key);
++int dpaa2_eth_cls_fld_off(int prot, int field);
++void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields);
+
+int set_rx_taildrop(struct dpaa2_eth_priv *priv);
-+
-+int dpaa2_eth_set_dist_key(struct dpaa2_eth_priv *priv,
-+ enum dpaa2_eth_rx_dist type, u32 key_fields);
+
#endif /* __DPAA2_H */
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
-@@ -1,5 +1,5 @@
+@@ -1,35 +1,10 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions are met:
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of Freescale Semiconductor nor the
+- * names of its contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- *
+- * ALTERNATIVELY, this software may be distributed under the terms of the
+- * GNU General Public License ("GPL") as published by the Free Software
+- * Foundation, either version 2 of that License or (at your option) any
+- * later version.
+- *
+- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2016-2017 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
-@@ -62,6 +62,7 @@ static char dpaa2_ethtool_extras[][ETH_G
+ */
+
++#include <linux/net_tstamp.h>
++
+ #include "dpni.h" /* DPNI_LINK_OPT_* */
+ #include "dpaa2-eth.h"
+
+@@ -52,6 +27,10 @@ static char dpaa2_ethtool_stats[][ETH_GS
+ "[hw] rx nobuffer discards",
+ "[hw] tx discarded frames",
+ "[hw] tx confirmed frames",
++ "[hw] tx dequeued bytes",
++ "[hw] tx dequeued frames",
++ "[hw] tx rejected bytes",
++ "[hw] tx rejected frames",
+ };
+
+ #define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats)
+@@ -62,6 +41,7 @@ static char dpaa2_ethtool_extras[][ETH_G
"[drv] tx conf bytes",
"[drv] tx sg frames",
"[drv] tx sg bytes",
"[drv] rx sg frames",
"[drv] rx sg bytes",
"[drv] enqueue portal busy",
-@@ -69,6 +70,15 @@ static char dpaa2_ethtool_extras[][ETH_G
+@@ -69,6 +49,12 @@ static char dpaa2_ethtool_extras[][ETH_G
"[drv] dequeue portal busy",
"[drv] channel pull errors",
"[drv] cdan",
-+ "[drv] tx congestion state",
-+#ifdef CONFIG_FSL_QBMAN_DEBUG
+ /* FQ stats */
+ "rx pending frames",
+ "rx pending bytes",
+ "tx conf pending frames",
+ "tx conf pending bytes",
+ "buffer count"
-+#endif
};
#define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras)
-@@ -76,10 +86,15 @@ static char dpaa2_ethtool_extras[][ETH_G
+@@ -76,14 +62,55 @@ static char dpaa2_ethtool_extras[][ETH_G
static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *drvinfo)
{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, dpaa2_eth_drv_version,
- sizeof(drvinfo->version));
+- strlcpy(drvinfo->version, dpaa2_eth_drv_version,
+- sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
sizeof(drvinfo->bus_info));
}
-@@ -113,25 +128,37 @@ out:
+
++#define DPNI_LINK_AUTONEG_VER_MAJOR 7
++#define DPNI_LINK_AUTONEG_VER_MINOR 8
++
++struct dpaa2_eth_link_mode_map {
++ u64 dpni_lm;
++ u64 ethtool_lm;
++};
++
++static const struct dpaa2_eth_link_mode_map dpaa2_eth_lm_map[] = {
++ {DPNI_ADVERTISED_10BASET_FULL, ETHTOOL_LINK_MODE_10baseT_Full_BIT},
++ {DPNI_ADVERTISED_100BASET_FULL, ETHTOOL_LINK_MODE_100baseT_Full_BIT},
++ {DPNI_ADVERTISED_1000BASET_FULL, ETHTOOL_LINK_MODE_1000baseT_Full_BIT},
++ {DPNI_ADVERTISED_10000BASET_FULL, ETHTOOL_LINK_MODE_10000baseT_Full_BIT},
++ {DPNI_ADVERTISED_2500BASEX_FULL, ETHTOOL_LINK_MODE_2500baseX_Full_BIT},
++ {DPNI_ADVERTISED_AUTONEG, ETHTOOL_LINK_MODE_Autoneg_BIT},
++};
++
++static void link_mode_dpni2ethtool(u64 dpni_lm, unsigned long *ethtool_lm)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(dpaa2_eth_lm_map); i++) {
++ if (dpni_lm & dpaa2_eth_lm_map[i].dpni_lm)
++ __set_bit(dpaa2_eth_lm_map[i].ethtool_lm, ethtool_lm);
++ }
++}
++
++static void link_mode_ethtool2dpni(const unsigned long *ethtool_lm,
++ u64 *dpni_lm)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(dpaa2_eth_lm_map); i++) {
++ if (test_bit(dpaa2_eth_lm_map[i].ethtool_lm, ethtool_lm))
++ *dpni_lm |= dpaa2_eth_lm_map[i].dpni_lm;
++ }
++}
++
+ static int
+ dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
+ struct ethtool_link_ksettings *link_settings)
+@@ -92,17 +119,27 @@ dpaa2_eth_get_link_ksettings(struct net_
+ int err = 0;
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+- err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
+- if (err) {
+- netdev_err(net_dev, "ERROR %d getting link state\n", err);
+- goto out;
++ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_LINK_AUTONEG_VER_MAJOR,
++ DPNI_LINK_AUTONEG_VER_MINOR) < 0) {
++ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token,
++ &state);
++ if (err) {
++ netdev_err(net_dev, "dpni_get_link_state failed\n");
++ goto out;
++ }
++ } else {
++ err = dpni_get_link_state_v2(priv->mc_io, 0, priv->mc_token,
++ &state);
++ if (err) {
++ netdev_err(net_dev, "dpni_get_link_state_v2 failed\n");
++ goto out;
++ }
++ link_mode_dpni2ethtool(state.supported,
++ link_settings->link_modes.supported);
++ link_mode_dpni2ethtool(state.advertising,
++ link_settings->link_modes.advertising);
+ }
+
+- /* At the moment, we have no way of interrogating the DPMAC
+- * from the DPNI side - and for that matter there may exist
+- * no DPMAC at all. So for now we just don't report anything
+- * beyond the DPNI attributes.
+- */
+ if (state.options & DPNI_LINK_OPT_AUTONEG)
+ link_settings->base.autoneg = AUTONEG_ENABLE;
+ if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX))
+@@ -113,25 +150,37 @@ out:
return err;
}
cfg.rate = link_settings->base.speed;
if (link_settings->base.autoneg == AUTONEG_ENABLE)
cfg.options |= DPNI_LINK_OPT_AUTONEG;
-@@ -149,6 +176,81 @@ dpaa2_eth_set_link_ksettings(struct net_
- */
- netdev_dbg(net_dev, "ERROR %d setting link cfg\n", err);
+@@ -142,13 +191,92 @@ dpaa2_eth_set_link_ksettings(struct net_
+ else
+ cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX;
++ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_LINK_AUTONEG_VER_MAJOR,
++ DPNI_LINK_AUTONEG_VER_MINOR)) {
++ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
++ } else {
++ link_mode_ethtool2dpni(link_settings->link_modes.advertising,
++ &cfg.advertising);
++ dpni_set_link_cfg_v2(priv->mc_io, 0, priv->mc_token, &cfg);
++ }
++ if (err)
++ netdev_err(net_dev, "dpni_set_link_cfg failed");
++
+out:
+ return err;
+}
+ else
+ cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
+
-+ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
+ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
+ if (err) {
+ netdev_dbg(net_dev, "Error setting link\n");
+ goto out;
+
+ priv->tx_pause_frames = pause->tx_pause;
+ err = set_rx_taildrop(priv);
-+ if (err)
+ if (err)
+- /* ethtool will be loud enough if we return an error; no point
+- * in putting our own error message on the console by default
+- */
+- netdev_dbg(net_dev, "ERROR %d setting link cfg\n", err);
+ netdev_dbg(net_dev, "Error configuring taildrop\n");
-+
+
+out:
return err;
}
-@@ -192,6 +294,13 @@ static void dpaa2_eth_get_ethtool_stats(
+@@ -192,6 +320,10 @@ static void dpaa2_eth_get_ethtool_stats(
int j, k, err;
int num_cnt;
union dpni_statistics dpni_stats;
-+
-+#ifdef CONFIG_FSL_QBMAN_DEBUG
+ u32 fcnt, bcnt;
+ u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
+ u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
+ u32 buf_cnt;
-+#endif
u64 cdan = 0;
u64 portal_busy = 0, pull_err = 0;
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-@@ -204,7 +313,7 @@ static void dpaa2_eth_get_ethtool_stats(
+@@ -202,9 +334,9 @@ static void dpaa2_eth_get_ethtool_stats(
+ sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
+
/* Print standard counters, from DPNI statistics */
- for (j = 0; j <= 2; j++) {
+- for (j = 0; j <= 2; j++) {
++ for (j = 0; j <= 3; j++) {
err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
- j, &dpni_stats);
+ j, 0, &dpni_stats);
if (err != 0)
netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j);
switch (j) {
-@@ -240,12 +349,474 @@ static void dpaa2_eth_get_ethtool_stats(
+@@ -217,6 +349,9 @@ static void dpaa2_eth_get_ethtool_stats(
+ case 2:
+ num_cnt = sizeof(dpni_stats.page_2) / sizeof(u64);
+ break;
++ case 3:
++ num_cnt = sizeof(dpni_stats.page_3) / sizeof(u64);
++ break;
+ }
+ for (k = 0; k < num_cnt; k++)
+ *(data + i++) = dpni_stats.raw.counter[k];
+@@ -240,12 +375,410 @@ static void dpaa2_eth_get_ethtool_stats(
*(data + i++) = portal_busy;
*(data + i++) = pull_err;
*(data + i++) = cdan;
+
-+ *(data + i++) = dpaa2_cscn_state_congested(priv->cscn_mem);
-+
-+#ifdef CONFIG_FSL_QBMAN_DEBUG
+ for (j = 0; j < priv->num_fqs; j++) {
+ /* Print FQ instantaneous counts */
+ err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid,
+ return;
+ }
+ *(data + i++) = buf_cnt;
-+#endif
+}
+
-+static int cls_key_off(struct dpaa2_eth_priv *priv, int prot, int field)
++static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
++ void *key, void *mask, u64 *fields)
+{
-+ int i, off = 0;
++ int off;
+
-+ for (i = 0; i < priv->num_dist_fields; i++) {
-+ if (priv->dist_fields[i].cls_prot == prot &&
-+ priv->dist_fields[i].cls_field == field)
-+ return off;
-+ off += priv->dist_fields[i].size;
++ if (eth_mask->h_proto) {
++ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
++ *(__be16 *)(key + off) = eth_value->h_proto;
++ *(__be16 *)(mask + off) = eth_mask->h_proto;
++ *fields |= DPAA2_ETH_DIST_ETHTYPE;
+ }
+
-+ return -1;
-+}
-+
-+static u8 cls_key_size(struct dpaa2_eth_priv *priv)
-+{
-+ u8 i, size = 0;
-+
-+ for (i = 0; i < priv->num_dist_fields; i++)
-+ size += priv->dist_fields[i].size;
-+
-+ return size;
-+}
-+
-+void check_cls_support(struct dpaa2_eth_priv *priv)
-+{
-+ u8 key_size = cls_key_size(priv);
-+ struct device *dev = priv->net_dev->dev.parent;
-+
-+ if (dpaa2_eth_hash_enabled(priv)) {
-+ if (priv->dpni_attrs.fs_key_size < key_size) {
-+ dev_info(dev, "max_dist_key_size = %d, expected %d. Hashing and steering are disabled\n",
-+ priv->dpni_attrs.fs_key_size,
-+ key_size);
-+ goto disable_fs;
-+ }
-+ if (priv->num_dist_fields > DPKG_MAX_NUM_OF_EXTRACTS) {
-+ dev_info(dev, "Too many key fields (max = %d). Hashing and steering are disabled\n",
-+ DPKG_MAX_NUM_OF_EXTRACTS);
-+ goto disable_fs;
-+ }
++ if (!is_zero_ether_addr(eth_mask->h_source)) {
++ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_SA);
++ ether_addr_copy(key + off, eth_value->h_source);
++ ether_addr_copy(mask + off, eth_mask->h_source);
++ *fields |= DPAA2_ETH_DIST_ETHSRC;
+ }
+
-+ if (dpaa2_eth_fs_enabled(priv)) {
-+ if (!dpaa2_eth_hash_enabled(priv)) {
-+ dev_info(dev, "Insufficient queues. Steering is disabled\n");
-+ goto disable_fs;
-+ }
-+
-+ if (!dpaa2_eth_fs_mask_enabled(priv)) {
-+ dev_info(dev, "Key masks not supported. Steering is disabled\n");
-+ goto disable_fs;
-+ }
++ if (!is_zero_ether_addr(eth_mask->h_dest)) {
++ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
++ ether_addr_copy(key + off, eth_value->h_dest);
++ ether_addr_copy(mask + off, eth_mask->h_dest);
++ *fields |= DPAA2_ETH_DIST_ETHDST;
+ }
+
-+ return;
-+
-+disable_fs:
-+ priv->dpni_attrs.options |= DPNI_OPT_NO_FS;
-+ priv->dpni_attrs.options &= ~DPNI_OPT_HAS_KEY_MASKING;
++ return 0;
+}
+
-+static int prep_l4_rule(struct dpaa2_eth_priv *priv,
-+ struct ethtool_tcpip4_spec *l4_value,
-+ struct ethtool_tcpip4_spec *l4_mask,
-+ void *key, void *mask, u8 l4_proto)
++static int prep_user_ip_rule(struct ethtool_usrip4_spec *uip_value,
++ struct ethtool_usrip4_spec *uip_mask,
++ void *key, void *mask, u64 *fields)
+{
-+ int offset;
++ int off;
++ u32 tmp_value, tmp_mask;
+
-+ if (l4_mask->tos) {
-+ netdev_err(priv->net_dev, "ToS is not supported for IPv4 L4\n");
++ if (uip_mask->tos || uip_mask->ip_ver)
+ return -EOPNOTSUPP;
-+ }
-+
-+ if (l4_mask->ip4src) {
-+ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC);
-+ *(u32 *)(key + offset) = l4_value->ip4src;
-+ *(u32 *)(mask + offset) = l4_mask->ip4src;
-+ }
+
-+ if (l4_mask->ip4dst) {
-+ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST);
-+ *(u32 *)(key + offset) = l4_value->ip4dst;
-+ *(u32 *)(mask + offset) = l4_mask->ip4dst;
++ if (uip_mask->ip4src) {
++ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
++ *(__be32 *)(key + off) = uip_value->ip4src;
++ *(__be32 *)(mask + off) = uip_mask->ip4src;
++ *fields |= DPAA2_ETH_DIST_IPSRC;
+ }
+
-+ if (l4_mask->psrc) {
-+ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
-+ *(u32 *)(key + offset) = l4_value->psrc;
-+ *(u32 *)(mask + offset) = l4_mask->psrc;
++ if (uip_mask->ip4dst) {
++ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
++ *(__be32 *)(key + off) = uip_value->ip4dst;
++ *(__be32 *)(mask + off) = uip_mask->ip4dst;
++ *fields |= DPAA2_ETH_DIST_IPDST;
+ }
+
-+ if (l4_mask->pdst) {
-+ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
-+ *(u32 *)(key + offset) = l4_value->pdst;
-+ *(u32 *)(mask + offset) = l4_mask->pdst;
++ if (uip_mask->proto) {
++ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
++ *(u8 *)(key + off) = uip_value->proto;
++ *(u8 *)(mask + off) = uip_mask->proto;
++ *fields |= DPAA2_ETH_DIST_IPPROTO;
+ }
+
-+ /* Only apply the rule for the user-specified L4 protocol
-+ * and if ethertype matches IPv4
-+ */
-+ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE);
-+ *(u16 *)(key + offset) = htons(ETH_P_IP);
-+ *(u16 *)(mask + offset) = 0xFFFF;
-+
-+ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO);
-+ *(u8 *)(key + offset) = l4_proto;
-+ *(u8 *)(mask + offset) = 0xFF;
-+
-+ /* TODO: check IP version */
-+
-+ return 0;
-+}
-+
-+static int prep_eth_rule(struct dpaa2_eth_priv *priv,
-+ struct ethhdr *eth_value, struct ethhdr *eth_mask,
-+ void *key, void *mask)
-+{
-+ int offset;
++ if (uip_mask->l4_4_bytes) {
++ tmp_value = be32_to_cpu(uip_value->l4_4_bytes);
++ tmp_mask = be32_to_cpu(uip_mask->l4_4_bytes);
+
-+ if (eth_mask->h_proto) {
-+ netdev_err(priv->net_dev, "Ethertype is not supported!\n");
-+ return -EOPNOTSUPP;
-+ }
++ off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
++ *(__be16 *)(key + off) = htons(tmp_value >> 16);
++ *(__be16 *)(mask + off) = htons(tmp_mask >> 16);
++ *fields |= DPAA2_ETH_DIST_L4SRC;
+
-+ if (!is_zero_ether_addr(eth_mask->h_source)) {
-+ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_SA);
-+ ether_addr_copy(key + offset, eth_value->h_source);
-+ ether_addr_copy(mask + offset, eth_mask->h_source);
++ off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
++ *(__be16 *)(key + off) = htons(tmp_value & 0xFFFF);
++ *(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF);
++ *fields |= DPAA2_ETH_DIST_L4DST;
+ }
+
-+ if (!is_zero_ether_addr(eth_mask->h_dest)) {
-+ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA);
-+ ether_addr_copy(key + offset, eth_value->h_dest);
-+ ether_addr_copy(mask + offset, eth_mask->h_dest);
-+ }
++ /* Only apply the rule for IPv4 frames */
++ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
++ *(__be16 *)(key + off) = htons(ETH_P_IP);
++ *(__be16 *)(mask + off) = htons(0xFFFF);
++ *fields |= DPAA2_ETH_DIST_ETHTYPE;
+
+ return 0;
+}
+
-+static int prep_user_ip_rule(struct dpaa2_eth_priv *priv,
-+ struct ethtool_usrip4_spec *uip_value,
-+ struct ethtool_usrip4_spec *uip_mask,
-+ void *key, void *mask)
++static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
++ struct ethtool_tcpip4_spec *l4_mask,
++ void *key, void *mask, u8 l4_proto, u64 *fields)
+{
-+ int offset;
++ int off;
+
-+ if (uip_mask->tos)
++ if (l4_mask->tos)
+ return -EOPNOTSUPP;
-+
-+ if (uip_mask->ip4src) {
-+ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC);
-+ *(u32 *)(key + offset) = uip_value->ip4src;
-+ *(u32 *)(mask + offset) = uip_mask->ip4src;
++ if (l4_mask->ip4src) {
++ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
++ *(__be32 *)(key + off) = l4_value->ip4src;
++ *(__be32 *)(mask + off) = l4_mask->ip4src;
++ *fields |= DPAA2_ETH_DIST_IPSRC;
+ }
+
-+ if (uip_mask->ip4dst) {
-+ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST);
-+ *(u32 *)(key + offset) = uip_value->ip4dst;
-+ *(u32 *)(mask + offset) = uip_mask->ip4dst;
++ if (l4_mask->ip4dst) {
++ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
++ *(__be32 *)(key + off) = l4_value->ip4dst;
++ *(__be32 *)(mask + off) = l4_mask->ip4dst;
++ *fields |= DPAA2_ETH_DIST_IPDST;
+ }
+
-+ if (uip_mask->proto) {
-+ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO);
-+ *(u32 *)(key + offset) = uip_value->proto;
-+ *(u32 *)(mask + offset) = uip_mask->proto;
++ if (l4_mask->psrc) {
++ off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
++ *(__be16 *)(key + off) = l4_value->psrc;
++ *(__be16 *)(mask + off) = l4_mask->psrc;
++ *fields |= DPAA2_ETH_DIST_L4SRC;
+ }
-+ if (uip_mask->l4_4_bytes) {
-+ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
-+ *(u16 *)(key + offset) = uip_value->l4_4_bytes << 16;
-+ *(u16 *)(mask + offset) = uip_mask->l4_4_bytes << 16;
+
-+ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
-+ *(u16 *)(key + offset) = uip_value->l4_4_bytes & 0xFFFF;
-+ *(u16 *)(mask + offset) = uip_mask->l4_4_bytes & 0xFFFF;
++ if (l4_mask->pdst) {
++ off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
++ *(__be16 *)(key + off) = l4_value->pdst;
++ *(__be16 *)(mask + off) = l4_mask->pdst;
++ *fields |= DPAA2_ETH_DIST_L4DST;
+ }
+
-+ /* Ethertype must be IP */
-+ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE);
-+ *(u16 *)(key + offset) = htons(ETH_P_IP);
-+ *(u16 *)(mask + offset) = 0xFFFF;
++ /* Only apply the rule for the user-specified L4 protocol
++ * and if ethertype matches IPv4
++ */
++ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
++ *(__be16 *)(key + off) = htons(ETH_P_IP);
++ *(__be16 *)(mask + off) = htons(0xFFFF);
++ *fields |= DPAA2_ETH_DIST_ETHTYPE;
++
++ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
++ *(u8 *)(key + off) = l4_proto;
++ *(u8 *)(mask + off) = 0xFF;
++ *fields |= DPAA2_ETH_DIST_IPPROTO;
+
+ return 0;
+}
+
-+static int prep_ext_rule(struct dpaa2_eth_priv *priv,
-+ struct ethtool_flow_ext *ext_value,
++static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
+ struct ethtool_flow_ext *ext_mask,
-+ void *key, void *mask)
++ void *key, void *mask, u64 *fields)
+{
-+ int offset;
++ int off;
+
+ if (ext_mask->vlan_etype)
+ return -EOPNOTSUPP;
+
+ if (ext_mask->vlan_tci) {
-+ offset = cls_key_off(priv, NET_PROT_VLAN, NH_FLD_VLAN_TCI);
-+ *(u16 *)(key + offset) = ext_value->vlan_tci;
-+ *(u16 *)(mask + offset) = ext_mask->vlan_tci;
++ off = dpaa2_eth_cls_fld_off(NET_PROT_VLAN, NH_FLD_VLAN_TCI);
++ *(__be16 *)(key + off) = ext_value->vlan_tci;
++ *(__be16 *)(mask + off) = ext_mask->vlan_tci;
++ *fields |= DPAA2_ETH_DIST_VLAN;
+ }
+
+ return 0;
+}
+
-+static int prep_mac_ext_rule(struct dpaa2_eth_priv *priv,
-+ struct ethtool_flow_ext *ext_value,
++static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
+ struct ethtool_flow_ext *ext_mask,
-+ void *key, void *mask)
++ void *key, void *mask, u64 *fields)
+{
-+ int offset;
++ int off;
+
+ if (!is_zero_ether_addr(ext_mask->h_dest)) {
-+ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA);
-+ ether_addr_copy(key + offset, ext_value->h_dest);
-+ ether_addr_copy(mask + offset, ext_mask->h_dest);
++ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
++ ether_addr_copy(key + off, ext_value->h_dest);
++ ether_addr_copy(mask + off, ext_mask->h_dest);
++ *fields |= DPAA2_ETH_DIST_ETHDST;
+ }
+
+ return 0;
+}
+
-+static int prep_cls_rule(struct net_device *net_dev,
-+ struct ethtool_rx_flow_spec *fs,
-+ void *key)
++static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask,
++ u64 *fields)
+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ const u8 key_size = cls_key_size(priv);
-+ void *msk = key + key_size;
+ int err;
+
-+ memset(key, 0, key_size * 2);
-+
-+ switch (fs->flow_type & 0xff) {
++ switch (fs->flow_type & 0xFF) {
++ case ETHER_FLOW:
++ err = prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec,
++ key, mask, fields);
++ break;
++ case IP_USER_FLOW:
++ err = prep_user_ip_rule(&fs->h_u.usr_ip4_spec,
++ &fs->m_u.usr_ip4_spec, key, mask, fields);
++ break;
+ case TCP_V4_FLOW:
-+ err = prep_l4_rule(priv, &fs->h_u.tcp_ip4_spec,
-+ &fs->m_u.tcp_ip4_spec, key, msk,
-+ IPPROTO_TCP);
++ err = prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec,
++ key, mask, IPPROTO_TCP, fields);
+ break;
+ case UDP_V4_FLOW:
-+ err = prep_l4_rule(priv, &fs->h_u.udp_ip4_spec,
-+ &fs->m_u.udp_ip4_spec, key, msk,
-+ IPPROTO_UDP);
++ err = prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec,
++ key, mask, IPPROTO_UDP, fields);
+ break;
+ case SCTP_V4_FLOW:
-+ err = prep_l4_rule(priv, &fs->h_u.sctp_ip4_spec,
-+ &fs->m_u.sctp_ip4_spec, key, msk,
-+ IPPROTO_SCTP);
-+ break;
-+ case ETHER_FLOW:
-+ err = prep_eth_rule(priv, &fs->h_u.ether_spec,
-+ &fs->m_u.ether_spec, key, msk);
-+ break;
-+ case IP_USER_FLOW:
-+ err = prep_user_ip_rule(priv, &fs->h_u.usr_ip4_spec,
-+ &fs->m_u.usr_ip4_spec, key, msk);
++ err = prep_l4_rule(&fs->h_u.sctp_ip4_spec, &fs->m_u.sctp_ip4_spec,
++ key, mask, IPPROTO_SCTP, fields);
+ break;
+ default:
-+ /* TODO: AH, ESP */
+ return -EOPNOTSUPP;
+ }
++
+ if (err)
+ return err;
+
+ if (fs->flow_type & FLOW_EXT) {
-+ err = prep_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk);
++ err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask, fields);
+ if (err)
+ return err;
+ }
+
+ if (fs->flow_type & FLOW_MAC_EXT) {
-+ err = prep_mac_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk);
++ err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask,
++ fields);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
-+static int del_cls(struct net_device *net_dev, int location);
-+
-+static int do_cls(struct net_device *net_dev,
-+ struct ethtool_rx_flow_spec *fs,
-+ bool add)
++static int do_cls_rule(struct net_device *net_dev,
++ struct ethtool_rx_flow_spec *fs,
++ bool add)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct device *dev = net_dev->dev.parent;
-+ const int rule_cnt = dpaa2_eth_fs_count(priv);
-+ struct dpni_rule_cfg rule_cfg;
++ struct dpni_rule_cfg rule_cfg = { 0 };
+ struct dpni_fs_action_cfg fs_act = { 0 };
-+ void *dma_mem;
-+ int err = 0, tc;
-+
-+ if (!dpaa2_eth_fs_enabled(priv)) {
-+ netdev_err(net_dev, "dev does not support steering!\n");
-+ /* dev doesn't support steering */
-+ return -EOPNOTSUPP;
-+ }
++ dma_addr_t key_iova;
++ u64 fields = 0;
++ void *key_buf;
++ int i, err = 0;
+
-+ if ((fs->ring_cookie != RX_CLS_FLOW_DISC &&
-+ fs->ring_cookie >= dpaa2_eth_queue_count(priv)) ||
-+ fs->location >= rule_cnt)
++ if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
++ fs->ring_cookie >= dpaa2_eth_queue_count(priv))
+ return -EINVAL;
+
-+ /* When adding a new rule, check if location if available
-+ * and if not, free the existing table entry before inserting
-+ * the new one
-+ */
-+ if (add && (priv->cls_rule[fs->location].in_use == true))
-+ del_cls(net_dev, fs->location);
-+
-+ memset(&rule_cfg, 0, sizeof(rule_cfg));
-+ rule_cfg.key_size = cls_key_size(priv);
++ rule_cfg.key_size = dpaa2_eth_cls_key_size(DPAA2_ETH_DIST_ALL);
+
+ /* allocate twice the key size, for the actual key and for mask */
-+ dma_mem = kzalloc(rule_cfg.key_size * 2, GFP_DMA | GFP_KERNEL);
-+ if (!dma_mem)
++ key_buf = kzalloc(rule_cfg.key_size * 2, GFP_KERNEL);
++ if (!key_buf)
+ return -ENOMEM;
+
-+ err = prep_cls_rule(net_dev, fs, dma_mem);
++ /* Fill the key and mask memory areas */
++ err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size, &fields);
+ if (err)
-+ goto err_free_mem;
++ goto free_mem;
++
++ if (!dpaa2_eth_fs_mask_enabled(priv)) {
++ /* Masking allows us to configure a maximal key during init and
++ * use it for all flow steering rules. Without it, we include
++ * in the key only the fields actually used, so we need to
++ * extract the others from the final key buffer.
++ *
++ * Program the FS key if needed, or return error if previously
++ * set key can't be used for the current rule. User needs to
++ * delete existing rules in this case to allow for the new one.
++ */
++ if (!priv->rx_cls_fields) {
++ err = dpaa2_eth_set_cls(net_dev, fields);
++ if (err)
++ goto free_mem;
++
++ priv->rx_cls_fields = fields;
++ } else if (priv->rx_cls_fields != fields) {
++ netdev_err(net_dev, "No support for multiple FS keys, need to delete existing rules\n");
++ err = -EOPNOTSUPP;
++ goto free_mem;
++ }
+
-+ rule_cfg.key_iova = dma_map_single(dev, dma_mem,
-+ rule_cfg.key_size * 2,
-+ DMA_TO_DEVICE);
++ dpaa2_eth_cls_trim_rule(key_buf, fields);
++ rule_cfg.key_size = dpaa2_eth_cls_key_size(fields);
++ }
+
-+ rule_cfg.mask_iova = rule_cfg.key_iova + rule_cfg.key_size;
++ key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, key_iova)) {
++ err = -ENOMEM;
++ goto free_mem;
++ }
+
-+ if (fs->ring_cookie == RX_CLS_FLOW_DISC)
-+ fs_act.options |= DPNI_FS_OPT_DISCARD;
-+ else
-+ fs_act.flow_id = fs->ring_cookie;
++ rule_cfg.key_iova = key_iova;
++ if (dpaa2_eth_fs_mask_enabled(priv))
++ rule_cfg.mask_iova = key_iova + rule_cfg.key_size;
+
-+ for (tc = 0; tc < dpaa2_eth_tc_count(priv); tc++) {
++ if (add) {
++ if (fs->ring_cookie == RX_CLS_FLOW_DISC)
++ fs_act.options |= DPNI_FS_OPT_DISCARD;
++ else
++ fs_act.flow_id = fs->ring_cookie;
++ }
++ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ if (add)
+ err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token,
-+ tc, fs->location, &rule_cfg,
++ i, fs->location, &rule_cfg,
+ &fs_act);
+ else
+ err = dpni_remove_fs_entry(priv->mc_io, 0,
-+ priv->mc_token, tc,
++ priv->mc_token, i,
+ &rule_cfg);
-+
+ if (err)
+ break;
+ }
+
-+ dma_unmap_single(dev, rule_cfg.key_iova,
-+ rule_cfg.key_size * 2, DMA_TO_DEVICE);
++ dma_unmap_single(dev, key_iova, rule_cfg.key_size * 2, DMA_TO_DEVICE);
+
-+ if (err)
-+ netdev_err(net_dev, "dpaa2_add/remove_cls() error %d\n", err);
-+
-+err_free_mem:
-+ kfree(dma_mem);
++free_mem:
++ kfree(key_buf);
+
+ return err;
+}
+
-+static int add_cls(struct net_device *net_dev,
-+ struct ethtool_rx_flow_spec *fs)
++static int num_rules(struct dpaa2_eth_priv *priv)
+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ int err;
-+
-+ err = do_cls(net_dev, fs, true);
-+ if (err)
-+ return err;
++ int i, rules = 0;
+
-+ priv->cls_rule[fs->location].in_use = true;
-+ priv->cls_rule[fs->location].fs = *fs;
++ for (i = 0; i < dpaa2_eth_fs_count(priv); i++)
++ if (priv->cls_rule[i].in_use)
++ rules++;
+
-+ return 0;
++ return rules;
+}
+
-+static int del_cls(struct net_device *net_dev, int location)
++static int update_cls_rule(struct net_device *net_dev,
++ struct ethtool_rx_flow_spec *new_fs,
++ int location)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ int err;
++ struct dpaa2_eth_cls_rule *rule;
++ int err = -EINVAL;
+
-+ err = do_cls(net_dev, &priv->cls_rule[location].fs, false);
-+ if (err)
-+ return err;
++ if (!priv->rx_cls_enabled)
++ return -EOPNOTSUPP;
+
-+ priv->cls_rule[location].in_use = false;
++ if (location >= dpaa2_eth_fs_count(priv))
++ return -EINVAL;
+
-+ return 0;
-+}
++ rule = &priv->cls_rule[location];
+
-+static int set_hash(struct net_device *net_dev, u64 data)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ u32 key = 0;
-+ int i;
++ /* If a rule is present at the specified location, delete it. */
++ if (rule->in_use) {
++ err = do_cls_rule(net_dev, &rule->fs, false);
++ if (err)
++ return err;
+
-+ if (data & RXH_DISCARD)
-+ return -EOPNOTSUPP;
++ rule->in_use = 0;
+
-+ for (i = 0; i < priv->num_dist_fields; i++)
-+ if (priv->dist_fields[i].rxnfc_field & data)
-+ key |= priv->dist_fields[i].id;
++ if (!dpaa2_eth_fs_mask_enabled(priv) && !num_rules(priv))
++ priv->rx_cls_fields = 0;
++ }
+
-+ return dpaa2_eth_set_dist_key(priv, DPAA2_ETH_RX_DIST_HASH, key);
-+}
++ /* If no new entry to add, return here */
++ if (!new_fs)
++ return err;
+
-+static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
-+ struct ethtool_rxnfc *rxnfc)
-+{
-+ int err = 0;
++ err = do_cls_rule(net_dev, new_fs, true);
++ if (err)
++ return err;
+
-+ switch (rxnfc->cmd) {
-+ case ETHTOOL_SRXCLSRLINS:
-+ err = add_cls(net_dev, &rxnfc->fs);
-+ break;
-+ case ETHTOOL_SRXCLSRLDEL:
-+ err = del_cls(net_dev, rxnfc->fs.location);
-+ break;
-+ case ETHTOOL_SRXFH:
-+ err = set_hash(net_dev, rxnfc->data);
-+ break;
-+ default:
-+ err = -EOPNOTSUPP;
-+ }
++ rule->in_use = 1;
++ rule->fs = *new_fs;
+
-+ return err;
++ return 0;
}
static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ const int rule_cnt = dpaa2_eth_fs_count(priv);
-+ int i, j;
++ int rule_cnt = dpaa2_eth_fs_count(priv);
++ int i, j = 0;
switch (rxnfc->cmd) {
case ETHTOOL_GRXFH:
-@@ -258,6 +829,33 @@ static int dpaa2_eth_get_rxnfc(struct ne
+@@ -258,6 +791,29 @@ static int dpaa2_eth_get_rxnfc(struct ne
case ETHTOOL_GRXRINGS:
rxnfc->data = dpaa2_eth_queue_count(priv);
break;
-+
+ case ETHTOOL_GRXCLSRLCNT:
-+ for (i = 0, rxnfc->rule_cnt = 0; i < rule_cnt; i++)
-+ if (priv->cls_rule[i].in_use)
-+ rxnfc->rule_cnt++;
++ rxnfc->rule_cnt = 0;
++ rxnfc->rule_cnt = num_rules(priv);
+ rxnfc->data = rule_cnt;
+ break;
-+
+ case ETHTOOL_GRXCLSRULE:
++ if (rxnfc->fs.location >= rule_cnt)
++ return -EINVAL;
+ if (!priv->cls_rule[rxnfc->fs.location].in_use)
+ return -EINVAL;
-+
+ rxnfc->fs = priv->cls_rule[rxnfc->fs.location].fs;
+ break;
-+
+ case ETHTOOL_GRXCLSRLALL:
-+ for (i = 0, j = 0; i < rule_cnt; i++) {
++ for (i = 0; i < rule_cnt; i++) {
+ if (!priv->cls_rule[i].in_use)
+ continue;
+ if (j == rxnfc->rule_cnt)
+ rxnfc->rule_cnt = j;
+ rxnfc->data = rule_cnt;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+@@ -265,13 +821,61 @@ static int dpaa2_eth_get_rxnfc(struct ne
+ return 0;
+ }
+
++int dpaa2_phc_index = -1;
++EXPORT_SYMBOL(dpaa2_phc_index);
++
++static int dpaa2_eth_get_ts_info(struct net_device *dev,
++ struct ethtool_ts_info *info)
++{
++ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
++ SOF_TIMESTAMPING_RX_HARDWARE |
++ SOF_TIMESTAMPING_RAW_HARDWARE;
++
++ info->phc_index = dpaa2_phc_index;
++
++ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
++ (1 << HWTSTAMP_TX_ON);
++
++ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
++ (1 << HWTSTAMP_FILTER_ALL);
++ return 0;
++}
++
++static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
++ struct ethtool_rxnfc *rxnfc)
++{
++ int err = 0;
++
++ switch (rxnfc->cmd) {
++ case ETHTOOL_SRXFH:
++ if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data)
++ return -EOPNOTSUPP;
++ err = dpaa2_eth_set_hash(net_dev, rxnfc->data);
++ break;
++ case ETHTOOL_SRXCLSRLINS:
++ err = update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location);
++ break;
++ case ETHTOOL_SRXCLSRLDEL:
++ err = update_cls_rule(net_dev, NULL, rxnfc->fs.location);
++ break;
++ default:
++ err = -EOPNOTSUPP;
++ }
+
- default:
- return -EOPNOTSUPP;
- }
-@@ -270,8 +868,11 @@ const struct ethtool_ops dpaa2_ethtool_o
++ return err;
++}
++
+ const struct ethtool_ops dpaa2_ethtool_ops = {
+ .get_drvinfo = dpaa2_eth_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ksettings = dpaa2_eth_get_link_ksettings,
.set_link_ksettings = dpaa2_eth_set_link_ksettings,
.get_strings = dpaa2_eth_get_strings,
.get_rxnfc = dpaa2_eth_get_rxnfc,
+ .set_rxnfc = dpaa2_eth_set_rxnfc,
++ .get_ts_info = dpaa2_eth_get_ts_info,
+ };
+--- a/drivers/staging/fsl-dpaa2/ethernet/dpkg.h
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpkg.h
+@@ -1,39 +1,10 @@
++/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+ /* Copyright 2013-2015 Freescale Semiconductor Inc.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions are met:
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of the above-listed copyright holders nor the
+- * names of any contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- *
+- * ALTERNATIVELY, this software may be distributed under the terms of the
+- * GNU General Public License ("GPL") as published by the Free Software
+- * Foundation, either version 2 of that License or (at your option) any
+- * later version.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+- * POSSIBILITY OF SUCH DAMAGE.
+ */
+ #ifndef __FSL_DPKG_H_
+ #define __FSL_DPKG_H_
+
+ #include <linux/types.h>
+-#include "net.h"
+
+ /* Data Path Key Generator API
+ * Contains initialization APIs and runtime APIs for the Key Generator
+@@ -86,6 +57,355 @@ struct dpkg_mask {
+ u8 offset;
};
+
++/* Protocol fields */
++
++/* Ethernet fields */
++#define NH_FLD_ETH_DA BIT(0)
++#define NH_FLD_ETH_SA BIT(1)
++#define NH_FLD_ETH_LENGTH BIT(2)
++#define NH_FLD_ETH_TYPE BIT(3)
++#define NH_FLD_ETH_FINAL_CKSUM BIT(4)
++#define NH_FLD_ETH_PADDING BIT(5)
++#define NH_FLD_ETH_ALL_FIELDS (BIT(6) - 1)
++
++/* VLAN fields */
++#define NH_FLD_VLAN_VPRI BIT(0)
++#define NH_FLD_VLAN_CFI BIT(1)
++#define NH_FLD_VLAN_VID BIT(2)
++#define NH_FLD_VLAN_LENGTH BIT(3)
++#define NH_FLD_VLAN_TYPE BIT(4)
++#define NH_FLD_VLAN_ALL_FIELDS (BIT(5) - 1)
++
++#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \
++ NH_FLD_VLAN_CFI | \
++ NH_FLD_VLAN_VID)
++
++/* IP (generic) fields */
++#define NH_FLD_IP_VER BIT(0)
++#define NH_FLD_IP_DSCP BIT(2)
++#define NH_FLD_IP_ECN BIT(3)
++#define NH_FLD_IP_PROTO BIT(4)
++#define NH_FLD_IP_SRC BIT(5)
++#define NH_FLD_IP_DST BIT(6)
++#define NH_FLD_IP_TOS_TC BIT(7)
++#define NH_FLD_IP_ID BIT(8)
++#define NH_FLD_IP_ALL_FIELDS (BIT(9) - 1)
++
++/* IPV4 fields */
++#define NH_FLD_IPV4_VER BIT(0)
++#define NH_FLD_IPV4_HDR_LEN BIT(1)
++#define NH_FLD_IPV4_TOS BIT(2)
++#define NH_FLD_IPV4_TOTAL_LEN BIT(3)
++#define NH_FLD_IPV4_ID BIT(4)
++#define NH_FLD_IPV4_FLAG_D BIT(5)
++#define NH_FLD_IPV4_FLAG_M BIT(6)
++#define NH_FLD_IPV4_OFFSET BIT(7)
++#define NH_FLD_IPV4_TTL BIT(8)
++#define NH_FLD_IPV4_PROTO BIT(9)
++#define NH_FLD_IPV4_CKSUM BIT(10)
++#define NH_FLD_IPV4_SRC_IP BIT(11)
++#define NH_FLD_IPV4_DST_IP BIT(12)
++#define NH_FLD_IPV4_OPTS BIT(13)
++#define NH_FLD_IPV4_OPTS_COUNT BIT(14)
++#define NH_FLD_IPV4_ALL_FIELDS (BIT(15) - 1)
++
++/* IPV6 fields */
++#define NH_FLD_IPV6_VER BIT(0)
++#define NH_FLD_IPV6_TC BIT(1)
++#define NH_FLD_IPV6_SRC_IP BIT(2)
++#define NH_FLD_IPV6_DST_IP BIT(3)
++#define NH_FLD_IPV6_NEXT_HDR BIT(4)
++#define NH_FLD_IPV6_FL BIT(5)
++#define NH_FLD_IPV6_HOP_LIMIT BIT(6)
++#define NH_FLD_IPV6_ID BIT(7)
++#define NH_FLD_IPV6_ALL_FIELDS (BIT(8) - 1)
++
++/* ICMP fields */
++#define NH_FLD_ICMP_TYPE BIT(0)
++#define NH_FLD_ICMP_CODE BIT(1)
++#define NH_FLD_ICMP_CKSUM BIT(2)
++#define NH_FLD_ICMP_ID BIT(3)
++#define NH_FLD_ICMP_SQ_NUM BIT(4)
++#define NH_FLD_ICMP_ALL_FIELDS (BIT(5) - 1)
++
++/* IGMP fields */
++#define NH_FLD_IGMP_VERSION BIT(0)
++#define NH_FLD_IGMP_TYPE BIT(1)
++#define NH_FLD_IGMP_CKSUM BIT(2)
++#define NH_FLD_IGMP_DATA BIT(3)
++#define NH_FLD_IGMP_ALL_FIELDS (BIT(4) - 1)
++
++/* TCP fields */
++#define NH_FLD_TCP_PORT_SRC BIT(0)
++#define NH_FLD_TCP_PORT_DST BIT(1)
++#define NH_FLD_TCP_SEQ BIT(2)
++#define NH_FLD_TCP_ACK BIT(3)
++#define NH_FLD_TCP_OFFSET BIT(4)
++#define NH_FLD_TCP_FLAGS BIT(5)
++#define NH_FLD_TCP_WINDOW BIT(6)
++#define NH_FLD_TCP_CKSUM BIT(7)
++#define NH_FLD_TCP_URGPTR BIT(8)
++#define NH_FLD_TCP_OPTS BIT(9)
++#define NH_FLD_TCP_OPTS_COUNT BIT(10)
++#define NH_FLD_TCP_ALL_FIELDS (BIT(11) - 1)
++
++/* UDP fields */
++#define NH_FLD_UDP_PORT_SRC BIT(0)
++#define NH_FLD_UDP_PORT_DST BIT(1)
++#define NH_FLD_UDP_LEN BIT(2)
++#define NH_FLD_UDP_CKSUM BIT(3)
++#define NH_FLD_UDP_ALL_FIELDS (BIT(4) - 1)
++
++/* UDP-lite fields */
++#define NH_FLD_UDP_LITE_PORT_SRC BIT(0)
++#define NH_FLD_UDP_LITE_PORT_DST BIT(1)
++#define NH_FLD_UDP_LITE_ALL_FIELDS (BIT(2) - 1)
++
++/* UDP-encap-ESP fields */
++#define NH_FLD_UDP_ENC_ESP_PORT_SRC BIT(0)
++#define NH_FLD_UDP_ENC_ESP_PORT_DST BIT(1)
++#define NH_FLD_UDP_ENC_ESP_LEN BIT(2)
++#define NH_FLD_UDP_ENC_ESP_CKSUM BIT(3)
++#define NH_FLD_UDP_ENC_ESP_SPI BIT(4)
++#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM BIT(5)
++#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS (BIT(6) - 1)
++
++/* SCTP fields */
++#define NH_FLD_SCTP_PORT_SRC BIT(0)
++#define NH_FLD_SCTP_PORT_DST BIT(1)
++#define NH_FLD_SCTP_VER_TAG BIT(2)
++#define NH_FLD_SCTP_CKSUM BIT(3)
++#define NH_FLD_SCTP_ALL_FIELDS (BIT(4) - 1)
++
++/* DCCP fields */
++#define NH_FLD_DCCP_PORT_SRC BIT(0)
++#define NH_FLD_DCCP_PORT_DST BIT(1)
++#define NH_FLD_DCCP_ALL_FIELDS (BIT(2) - 1)
++
++/* IPHC fields */
++#define NH_FLD_IPHC_CID BIT(0)
++#define NH_FLD_IPHC_CID_TYPE BIT(1)
++#define NH_FLD_IPHC_HCINDEX BIT(2)
++#define NH_FLD_IPHC_GEN BIT(3)
++#define NH_FLD_IPHC_D_BIT BIT(4)
++#define NH_FLD_IPHC_ALL_FIELDS (BIT(5) - 1)
++
++/* SCTP fields */
++#define NH_FLD_SCTP_CHUNK_DATA_TYPE BIT(0)
++#define NH_FLD_SCTP_CHUNK_DATA_FLAGS BIT(1)
++#define NH_FLD_SCTP_CHUNK_DATA_LENGTH BIT(2)
++#define NH_FLD_SCTP_CHUNK_DATA_TSN BIT(3)
++#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID BIT(4)
++#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN BIT(5)
++#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID BIT(6)
++#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED BIT(7)
++#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING BIT(8)
++#define NH_FLD_SCTP_CHUNK_DATA_END BIT(9)
++#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS (BIT(10) - 1)
++
++/* L2TPV2 fields */
++#define NH_FLD_L2TPV2_TYPE_BIT BIT(0)
++#define NH_FLD_L2TPV2_LENGTH_BIT BIT(1)
++#define NH_FLD_L2TPV2_SEQUENCE_BIT BIT(2)
++#define NH_FLD_L2TPV2_OFFSET_BIT BIT(3)
++#define NH_FLD_L2TPV2_PRIORITY_BIT BIT(4)
++#define NH_FLD_L2TPV2_VERSION BIT(5)
++#define NH_FLD_L2TPV2_LEN BIT(6)
++#define NH_FLD_L2TPV2_TUNNEL_ID BIT(7)
++#define NH_FLD_L2TPV2_SESSION_ID BIT(8)
++#define NH_FLD_L2TPV2_NS BIT(9)
++#define NH_FLD_L2TPV2_NR BIT(10)
++#define NH_FLD_L2TPV2_OFFSET_SIZE BIT(11)
++#define NH_FLD_L2TPV2_FIRST_BYTE BIT(12)
++#define NH_FLD_L2TPV2_ALL_FIELDS (BIT(13) - 1)
++
++/* L2TPV3 fields */
++#define NH_FLD_L2TPV3_CTRL_TYPE_BIT BIT(0)
++#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT BIT(1)
++#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT BIT(2)
++#define NH_FLD_L2TPV3_CTRL_VERSION BIT(3)
++#define NH_FLD_L2TPV3_CTRL_LENGTH BIT(4)
++#define NH_FLD_L2TPV3_CTRL_CONTROL BIT(5)
++#define NH_FLD_L2TPV3_CTRL_SENT BIT(6)
++#define NH_FLD_L2TPV3_CTRL_RECV BIT(7)
++#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE BIT(8)
++#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS (BIT(9) - 1)
++
++#define NH_FLD_L2TPV3_SESS_TYPE_BIT BIT(0)
++#define NH_FLD_L2TPV3_SESS_VERSION BIT(1)
++#define NH_FLD_L2TPV3_SESS_ID BIT(2)
++#define NH_FLD_L2TPV3_SESS_COOKIE BIT(3)
++#define NH_FLD_L2TPV3_SESS_ALL_FIELDS (BIT(4) - 1)
++
++/* PPP fields */
++#define NH_FLD_PPP_PID BIT(0)
++#define NH_FLD_PPP_COMPRESSED BIT(1)
++#define NH_FLD_PPP_ALL_FIELDS (BIT(2) - 1)
++
++/* PPPoE fields */
++#define NH_FLD_PPPOE_VER BIT(0)
++#define NH_FLD_PPPOE_TYPE BIT(1)
++#define NH_FLD_PPPOE_CODE BIT(2)
++#define NH_FLD_PPPOE_SID BIT(3)
++#define NH_FLD_PPPOE_LEN BIT(4)
++#define NH_FLD_PPPOE_SESSION BIT(5)
++#define NH_FLD_PPPOE_PID BIT(6)
++#define NH_FLD_PPPOE_ALL_FIELDS (BIT(7) - 1)
++
++/* PPP-Mux fields */
++#define NH_FLD_PPPMUX_PID BIT(0)
++#define NH_FLD_PPPMUX_CKSUM BIT(1)
++#define NH_FLD_PPPMUX_COMPRESSED BIT(2)
++#define NH_FLD_PPPMUX_ALL_FIELDS (BIT(3) - 1)
++
++/* PPP-Mux sub-frame fields */
++#define NH_FLD_PPPMUX_SUBFRM_PFF BIT(0)
++#define NH_FLD_PPPMUX_SUBFRM_LXT BIT(1)
++#define NH_FLD_PPPMUX_SUBFRM_LEN BIT(2)
++#define NH_FLD_PPPMUX_SUBFRM_PID BIT(3)
++#define NH_FLD_PPPMUX_SUBFRM_USE_PID BIT(4)
++#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS (BIT(5) - 1)
++
++/* LLC fields */
++#define NH_FLD_LLC_DSAP BIT(0)
++#define NH_FLD_LLC_SSAP BIT(1)
++#define NH_FLD_LLC_CTRL BIT(2)
++#define NH_FLD_LLC_ALL_FIELDS (BIT(3) - 1)
++
++/* NLPID fields */
++#define NH_FLD_NLPID_NLPID BIT(0)
++#define NH_FLD_NLPID_ALL_FIELDS (BIT(1) - 1)
++
++/* SNAP fields */
++#define NH_FLD_SNAP_OUI BIT(0)
++#define NH_FLD_SNAP_PID BIT(1)
++#define NH_FLD_SNAP_ALL_FIELDS (BIT(2) - 1)
++
++/* LLC SNAP fields */
++#define NH_FLD_LLC_SNAP_TYPE BIT(0)
++#define NH_FLD_LLC_SNAP_ALL_FIELDS (BIT(1) - 1)
++
++/* ARP fields */
++#define NH_FLD_ARP_HTYPE BIT(0)
++#define NH_FLD_ARP_PTYPE BIT(1)
++#define NH_FLD_ARP_HLEN BIT(2)
++#define NH_FLD_ARP_PLEN BIT(3)
++#define NH_FLD_ARP_OPER BIT(4)
++#define NH_FLD_ARP_SHA BIT(5)
++#define NH_FLD_ARP_SPA BIT(6)
++#define NH_FLD_ARP_THA BIT(7)
++#define NH_FLD_ARP_TPA BIT(8)
++#define NH_FLD_ARP_ALL_FIELDS (BIT(9) - 1)
++
++/* RFC2684 fields */
++#define NH_FLD_RFC2684_LLC BIT(0)
++#define NH_FLD_RFC2684_NLPID BIT(1)
++#define NH_FLD_RFC2684_OUI BIT(2)
++#define NH_FLD_RFC2684_PID BIT(3)
++#define NH_FLD_RFC2684_VPN_OUI BIT(4)
++#define NH_FLD_RFC2684_VPN_IDX BIT(5)
++#define NH_FLD_RFC2684_ALL_FIELDS (BIT(6) - 1)
++
++/* User defined fields */
++#define NH_FLD_USER_DEFINED_SRCPORT BIT(0)
++#define NH_FLD_USER_DEFINED_PCDID BIT(1)
++#define NH_FLD_USER_DEFINED_ALL_FIELDS (BIT(2) - 1)
++
++/* Payload fields */
++#define NH_FLD_PAYLOAD_BUFFER BIT(0)
++#define NH_FLD_PAYLOAD_SIZE BIT(1)
++#define NH_FLD_MAX_FRM_SIZE BIT(2)
++#define NH_FLD_MIN_FRM_SIZE BIT(3)
++#define NH_FLD_PAYLOAD_TYPE BIT(4)
++#define NH_FLD_FRAME_SIZE BIT(5)
++#define NH_FLD_PAYLOAD_ALL_FIELDS (BIT(6) - 1)
++
++/* GRE fields */
++#define NH_FLD_GRE_TYPE BIT(0)
++#define NH_FLD_GRE_ALL_FIELDS (BIT(1) - 1)
++
++/* MINENCAP fields */
++#define NH_FLD_MINENCAP_SRC_IP BIT(0)
++#define NH_FLD_MINENCAP_DST_IP BIT(1)
++#define NH_FLD_MINENCAP_TYPE BIT(2)
++#define NH_FLD_MINENCAP_ALL_FIELDS (BIT(3) - 1)
++
++/* IPSEC AH fields */
++#define NH_FLD_IPSEC_AH_SPI BIT(0)
++#define NH_FLD_IPSEC_AH_NH BIT(1)
++#define NH_FLD_IPSEC_AH_ALL_FIELDS (BIT(2) - 1)
++
++/* IPSEC ESP fields */
++#define NH_FLD_IPSEC_ESP_SPI BIT(0)
++#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM BIT(1)
++#define NH_FLD_IPSEC_ESP_ALL_FIELDS (BIT(2) - 1)
++
++/* MPLS fields */
++#define NH_FLD_MPLS_LABEL_STACK BIT(0)
++#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS (BIT(1) - 1)
++
++/* MACSEC fields */
++#define NH_FLD_MACSEC_SECTAG BIT(0)
++#define NH_FLD_MACSEC_ALL_FIELDS (BIT(1) - 1)
++
++/* GTP fields */
++#define NH_FLD_GTP_TEID BIT(0)
++
++/* Supported protocols */
++enum net_prot {
++ NET_PROT_NONE = 0,
++ NET_PROT_PAYLOAD,
++ NET_PROT_ETH,
++ NET_PROT_VLAN,
++ NET_PROT_IPV4,
++ NET_PROT_IPV6,
++ NET_PROT_IP,
++ NET_PROT_TCP,
++ NET_PROT_UDP,
++ NET_PROT_UDP_LITE,
++ NET_PROT_IPHC,
++ NET_PROT_SCTP,
++ NET_PROT_SCTP_CHUNK_DATA,
++ NET_PROT_PPPOE,
++ NET_PROT_PPP,
++ NET_PROT_PPPMUX,
++ NET_PROT_PPPMUX_SUBFRM,
++ NET_PROT_L2TPV2,
++ NET_PROT_L2TPV3_CTRL,
++ NET_PROT_L2TPV3_SESS,
++ NET_PROT_LLC,
++ NET_PROT_LLC_SNAP,
++ NET_PROT_NLPID,
++ NET_PROT_SNAP,
++ NET_PROT_MPLS,
++ NET_PROT_IPSEC_AH,
++ NET_PROT_IPSEC_ESP,
++ NET_PROT_UDP_ENC_ESP, /* RFC 3948 */
++ NET_PROT_MACSEC,
++ NET_PROT_GRE,
++ NET_PROT_MINENCAP,
++ NET_PROT_DCCP,
++ NET_PROT_ICMP,
++ NET_PROT_IGMP,
++ NET_PROT_ARP,
++ NET_PROT_CAPWAP_DATA,
++ NET_PROT_CAPWAP_CTRL,
++ NET_PROT_RFC2684,
++ NET_PROT_ICMPV6,
++ NET_PROT_FCOE,
++ NET_PROT_FIP,
++ NET_PROT_ISCSI,
++ NET_PROT_GTP,
++ NET_PROT_USER_DEFINED_L2,
++ NET_PROT_USER_DEFINED_L3,
++ NET_PROT_USER_DEFINED_L4,
++ NET_PROT_USER_DEFINED_L5,
++ NET_PROT_USER_DEFINED_SHIM1,
++ NET_PROT_USER_DEFINED_SHIM2,
++
++ NET_PROT_DUMMY_LAST
++};
++
+ /**
+ * struct dpkg_extract - A structure for defining a single extraction
+ * @type: Determines how the union below is interpreted:
--- a/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
-@@ -39,9 +39,11 @@
+@@ -1,34 +1,6 @@
++/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+ /* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions are met:
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of the above-listed copyright holders nor the
+- * names of any contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- *
+- * ALTERNATIVELY, this software may be distributed under the terms of the
+- * GNU General Public License ("GPL") as published by the Free Software
+- * Foundation, either version 2 of that License or (at your option) any
+- * later version.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+- * POSSIBILITY OF SUCH DAMAGE.
+ */
+ #ifndef _FSL_DPNI_CMD_H
+ #define _FSL_DPNI_CMD_H
+@@ -39,9 +11,11 @@
#define DPNI_VER_MAJOR 7
#define DPNI_VER_MINOR 0
#define DPNI_CMD_BASE_VERSION 1
#define DPNI_CMDID_OPEN DPNI_CMD(0x801)
#define DPNI_CMDID_CLOSE DPNI_CMD(0x800)
-@@ -64,7 +66,7 @@
+@@ -64,16 +38,18 @@
#define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016)
#define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017)
#define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B)
#define DPNI_CMDID_GET_QDID DPNI_CMD(0x210)
-@@ -73,7 +75,7 @@
+ #define DPNI_CMDID_GET_TX_DATA_OFFSET DPNI_CMD(0x212)
+ #define DPNI_CMDID_GET_LINK_STATE DPNI_CMD(0x215)
++#define DPNI_CMDID_GET_LINK_STATE_V2 DPNI_CMD_V2(0x215)
#define DPNI_CMDID_SET_MAX_FRAME_LENGTH DPNI_CMD(0x216)
#define DPNI_CMDID_GET_MAX_FRAME_LENGTH DPNI_CMD(0x217)
#define DPNI_CMDID_SET_LINK_CFG DPNI_CMD(0x21A)
-#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD(0x21B)
++#define DPNI_CMDID_SET_LINK_CFG_V2 DPNI_CMD_V2(0x21A)
+#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD_V2(0x21B)
#define DPNI_CMDID_SET_MCAST_PROMISC DPNI_CMD(0x220)
#define DPNI_CMDID_GET_MCAST_PROMISC DPNI_CMD(0x221)
-@@ -87,11 +89,16 @@
+@@ -87,11 +63,16 @@
#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD(0x235)
#define DPNI_CMDID_GET_QUEUE DPNI_CMD(0x25F)
#define DPNI_CMDID_SET_QUEUE DPNI_CMD(0x260)
#define DPNI_CMDID_GET_TAILDROP DPNI_CMD(0x261)
-@@ -110,6 +117,9 @@
+@@ -110,6 +91,9 @@
#define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B)
#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C)
/* Macros for accessing command fields smaller than 1byte */
#define DPNI_MASK(field) \
GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \
-@@ -126,13 +136,14 @@ struct dpni_cmd_open {
+@@ -126,13 +110,14 @@ struct dpni_cmd_open {
#define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order))
struct dpni_cmd_set_pools {
__le16 buffer_size[DPNI_MAX_DPBP];
};
-@@ -303,6 +314,7 @@ struct dpni_rsp_get_tx_data_offset {
+@@ -303,6 +288,7 @@ struct dpni_rsp_get_tx_data_offset {
struct dpni_cmd_get_statistics {
u8 page_number;
};
struct dpni_rsp_get_statistics {
-@@ -335,6 +347,22 @@ struct dpni_rsp_get_link_state {
+@@ -319,8 +305,22 @@ struct dpni_cmd_set_link_cfg {
+ __le64 options;
+ };
+
++struct dpni_cmd_set_link_cfg_v2 {
++ /* cmd word 0 */
++ __le64 pad0;
++ /* cmd word 1 */
++ __le32 rate;
++ __le32 pad1;
++ /* cmd word 2 */
++ __le64 options;
++ /* cmd word 3 */
++ __le64 advertising;
++};
++
+ #define DPNI_LINK_STATE_SHIFT 0
+ #define DPNI_LINK_STATE_SIZE 1
++#define DPNI_STATE_VALID_SHIFT 1
++#define DPNI_STATE_VALID_SIZE 1
+
+ struct dpni_rsp_get_link_state {
+ /* response word 0 */
+@@ -335,6 +335,39 @@ struct dpni_rsp_get_link_state {
__le64 options;
};
++struct dpni_rsp_get_link_state_v2 {
++ /* response word 0 */
++ __le32 pad0;
++ /* from LSB: up:1, valid:1 */
++ u8 flags;
++ u8 pad1[3];
++ /* response word 1 */
++ __le32 rate;
++ __le32 pad2;
++ /* response word 2 */
++ __le64 options;
++ /* cmd word 3 */
++ __le64 supported;
++ /* cmd word 4 */
++ __le64 advertising;
++};
++
+#define DPNI_COUPLED_SHIFT 0
+#define DPNI_COUPLED_SIZE 1
+
struct dpni_cmd_set_max_frame_length {
__le16 max_frame_length;
};
-@@ -394,6 +422,24 @@ struct dpni_cmd_clear_mac_filters {
+@@ -394,6 +427,24 @@ struct dpni_cmd_clear_mac_filters {
u8 flags;
};
#define DPNI_DIST_MODE_SHIFT 0
#define DPNI_DIST_MODE_SIZE 4
#define DPNI_MISS_ACTION_SHIFT 4
-@@ -503,6 +549,63 @@ struct dpni_cmd_set_queue {
+@@ -503,6 +554,63 @@ struct dpni_cmd_set_queue {
__le64 user_context;
};
struct dpni_cmd_set_taildrop {
/* cmd word 0 */
u8 congestion_point;
-@@ -538,4 +641,79 @@ struct dpni_rsp_get_taildrop {
+@@ -538,4 +646,79 @@ struct dpni_rsp_get_taildrop {
__le32 threshold;
};
#endif /* _FSL_DPNI_CMD_H */
--- a/drivers/staging/fsl-dpaa2/ethernet/dpni.c
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
-@@ -122,7 +122,7 @@ int dpni_open(struct fsl_mc_io *mc_io,
+@@ -1,34 +1,6 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ /* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions are met:
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of the above-listed copyright holders nor the
+- * names of any contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- *
+- * ALTERNATIVELY, this software may be distributed under the terms of the
+- * GNU General Public License ("GPL") as published by the Free Software
+- * Foundation, either version 2 of that License or (at your option) any
+- * later version.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+- * POSSIBILITY OF SUCH DAMAGE.
+ */
+ #include <linux/kernel.h>
+ #include <linux/errno.h>
+@@ -122,7 +94,7 @@ int dpni_open(struct fsl_mc_io *mc_io,
int dpni_id,
u16 *token)
{
struct dpni_cmd_open *cmd_params;
int err;
-@@ -160,7 +160,7 @@ int dpni_close(struct fsl_mc_io *mc_io,
+@@ -160,7 +132,7 @@ int dpni_close(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token)
{
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE,
-@@ -188,7 +188,7 @@ int dpni_set_pools(struct fsl_mc_io *mc_
+@@ -188,7 +160,7 @@ int dpni_set_pools(struct fsl_mc_io *mc_
u16 token,
const struct dpni_pools_cfg *cfg)
{
struct dpni_cmd_set_pools *cmd_params;
int i;
-@@ -199,7 +199,10 @@ int dpni_set_pools(struct fsl_mc_io *mc_
+@@ -199,7 +171,10 @@ int dpni_set_pools(struct fsl_mc_io *mc_
cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
cmd_params->num_dpbp = cfg->num_dpbp;
for (i = 0; i < DPNI_MAX_DPBP; i++) {
cmd_params->buffer_size[i] =
cpu_to_le16(cfg->pools[i].buffer_size);
cmd_params->backup_pool_mask |=
-@@ -222,7 +225,7 @@ int dpni_enable(struct fsl_mc_io *mc_io,
+@@ -222,7 +197,7 @@ int dpni_enable(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token)
{
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE,
-@@ -245,7 +248,7 @@ int dpni_disable(struct fsl_mc_io *mc_io
+@@ -245,7 +220,7 @@ int dpni_disable(struct fsl_mc_io *mc_io
u32 cmd_flags,
u16 token)
{
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE,
-@@ -270,7 +273,7 @@ int dpni_is_enabled(struct fsl_mc_io *mc
+@@ -270,7 +245,7 @@ int dpni_is_enabled(struct fsl_mc_io *mc
u16 token,
int *en)
{
struct dpni_rsp_is_enabled *rsp_params;
int err;
-@@ -303,7 +306,7 @@ int dpni_reset(struct fsl_mc_io *mc_io,
+@@ -303,7 +278,7 @@ int dpni_reset(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token)
{
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET,
-@@ -335,7 +338,7 @@ int dpni_set_irq_enable(struct fsl_mc_io
+@@ -335,7 +310,7 @@ int dpni_set_irq_enable(struct fsl_mc_io
u8 irq_index,
u8 en)
{
struct dpni_cmd_set_irq_enable *cmd_params;
/* prepare command */
-@@ -366,7 +369,7 @@ int dpni_get_irq_enable(struct fsl_mc_io
+@@ -366,7 +341,7 @@ int dpni_get_irq_enable(struct fsl_mc_io
u8 irq_index,
u8 *en)
{
struct dpni_cmd_get_irq_enable *cmd_params;
struct dpni_rsp_get_irq_enable *rsp_params;
-@@ -413,7 +416,7 @@ int dpni_set_irq_mask(struct fsl_mc_io *
+@@ -413,7 +388,7 @@ int dpni_set_irq_mask(struct fsl_mc_io *
u8 irq_index,
u32 mask)
{
struct dpni_cmd_set_irq_mask *cmd_params;
/* prepare command */
-@@ -447,7 +450,7 @@ int dpni_get_irq_mask(struct fsl_mc_io *
+@@ -447,7 +422,7 @@ int dpni_get_irq_mask(struct fsl_mc_io *
u8 irq_index,
u32 *mask)
{
struct dpni_cmd_get_irq_mask *cmd_params;
struct dpni_rsp_get_irq_mask *rsp_params;
int err;
-@@ -489,7 +492,7 @@ int dpni_get_irq_status(struct fsl_mc_io
+@@ -489,7 +464,7 @@ int dpni_get_irq_status(struct fsl_mc_io
u8 irq_index,
u32 *status)
{
struct dpni_cmd_get_irq_status *cmd_params;
struct dpni_rsp_get_irq_status *rsp_params;
int err;
-@@ -532,7 +535,7 @@ int dpni_clear_irq_status(struct fsl_mc_
+@@ -532,7 +507,7 @@ int dpni_clear_irq_status(struct fsl_mc_
u8 irq_index,
u32 status)
{
struct dpni_cmd_clear_irq_status *cmd_params;
/* prepare command */
-@@ -561,7 +564,7 @@ int dpni_get_attributes(struct fsl_mc_io
+@@ -561,7 +536,7 @@ int dpni_get_attributes(struct fsl_mc_io
u16 token,
struct dpni_attr *attr)
{
struct dpni_rsp_get_attr *rsp_params;
int err;
-@@ -609,7 +612,7 @@ int dpni_set_errors_behavior(struct fsl_
+@@ -609,7 +584,7 @@ int dpni_set_errors_behavior(struct fsl_
u16 token,
struct dpni_error_cfg *cfg)
{
struct dpni_cmd_set_errors_behavior *cmd_params;
/* prepare command */
-@@ -641,7 +644,7 @@ int dpni_get_buffer_layout(struct fsl_mc
+@@ -641,7 +616,7 @@ int dpni_get_buffer_layout(struct fsl_mc
enum dpni_queue_type qtype,
struct dpni_buffer_layout *layout)
{
struct dpni_cmd_get_buffer_layout *cmd_params;
struct dpni_rsp_get_buffer_layout *rsp_params;
int err;
-@@ -689,7 +692,7 @@ int dpni_set_buffer_layout(struct fsl_mc
+@@ -689,7 +664,7 @@ int dpni_set_buffer_layout(struct fsl_mc
enum dpni_queue_type qtype,
const struct dpni_buffer_layout *layout)
{
struct dpni_cmd_set_buffer_layout *cmd_params;
/* prepare command */
-@@ -731,7 +734,7 @@ int dpni_set_offload(struct fsl_mc_io *m
+@@ -731,7 +706,7 @@ int dpni_set_offload(struct fsl_mc_io *m
enum dpni_offload type,
u32 config)
{
struct dpni_cmd_set_offload *cmd_params;
cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_OFFLOAD,
-@@ -750,7 +753,7 @@ int dpni_get_offload(struct fsl_mc_io *m
+@@ -750,7 +725,7 @@ int dpni_get_offload(struct fsl_mc_io *m
enum dpni_offload type,
u32 *config)
{
struct dpni_cmd_get_offload *cmd_params;
struct dpni_rsp_get_offload *rsp_params;
int err;
-@@ -792,7 +795,7 @@ int dpni_get_qdid(struct fsl_mc_io *mc_i
+@@ -792,7 +767,7 @@ int dpni_get_qdid(struct fsl_mc_io *mc_i
enum dpni_queue_type qtype,
u16 *qdid)
{
struct dpni_cmd_get_qdid *cmd_params;
struct dpni_rsp_get_qdid *rsp_params;
int err;
-@@ -830,7 +833,7 @@ int dpni_get_tx_data_offset(struct fsl_m
+@@ -830,7 +805,7 @@ int dpni_get_tx_data_offset(struct fsl_m
u16 token,
u16 *data_offset)
{
struct dpni_rsp_get_tx_data_offset *rsp_params;
int err;
-@@ -865,7 +868,7 @@ int dpni_set_link_cfg(struct fsl_mc_io *
+@@ -865,7 +840,7 @@ int dpni_set_link_cfg(struct fsl_mc_io *
u16 token,
const struct dpni_link_cfg *cfg)
{
struct dpni_cmd_set_link_cfg *cmd_params;
/* prepare command */
-@@ -894,7 +897,7 @@ int dpni_get_link_state(struct fsl_mc_io
+@@ -881,6 +856,36 @@ int dpni_set_link_cfg(struct fsl_mc_io *
+ }
+
+ /**
++ * dpni_set_link_cfg_v2() - set the link configuration.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: Link configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_link_cfg_v2(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_link_cfg *cfg)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_set_link_cfg_v2 *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG_V2,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_link_cfg_v2 *)cmd.params;
++ cmd_params->rate = cpu_to_le32(cfg->rate);
++ cmd_params->options = cpu_to_le64(cfg->options);
++ cmd_params->advertising = cpu_to_le64(cfg->advertising);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
+ * dpni_get_link_state() - Return the link state (either up or down)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+@@ -894,7 +899,7 @@ int dpni_get_link_state(struct fsl_mc_io
u16 token,
struct dpni_link_state *state)
{
struct dpni_rsp_get_link_state *rsp_params;
int err;
-@@ -918,6 +921,44 @@ int dpni_get_link_state(struct fsl_mc_io
+@@ -918,6 +923,84 @@ int dpni_get_link_state(struct fsl_mc_io
}
/**
++ * dpni_get_link_state_v2() - Return the link state (either up or down)
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @state: Returned link state;
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_link_state_v2(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpni_link_state *state)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_rsp_get_link_state_v2 *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE_V2,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_link_state_v2 *)cmd.params;
++ state->up = dpni_get_field(rsp_params->flags, LINK_STATE);
++ state->state_valid = dpni_get_field(rsp_params->flags, STATE_VALID);
++ state->rate = le32_to_cpu(rsp_params->rate);
++ state->options = le64_to_cpu(rsp_params->options);
++ state->supported = le64_to_cpu(rsp_params->supported);
++ state->advertising = le64_to_cpu(rsp_params->advertising);
++
++ return 0;
++}
++
++/**
+ * dpni_set_tx_shaping() - Set the transmit shaping
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* dpni_set_max_frame_length() - Set the maximum received frame length.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-@@ -933,7 +974,7 @@ int dpni_set_max_frame_length(struct fsl
+@@ -933,7 +1016,7 @@ int dpni_set_max_frame_length(struct fsl
u16 token,
u16 max_frame_length)
{
struct dpni_cmd_set_max_frame_length *cmd_params;
/* prepare command */
-@@ -963,7 +1004,7 @@ int dpni_get_max_frame_length(struct fsl
+@@ -963,7 +1046,7 @@ int dpni_get_max_frame_length(struct fsl
u16 token,
u16 *max_frame_length)
{
struct dpni_rsp_get_max_frame_length *rsp_params;
int err;
-@@ -998,7 +1039,7 @@ int dpni_set_multicast_promisc(struct fs
+@@ -998,7 +1081,7 @@ int dpni_set_multicast_promisc(struct fs
u16 token,
int en)
{
struct dpni_cmd_set_multicast_promisc *cmd_params;
/* prepare command */
-@@ -1026,7 +1067,7 @@ int dpni_get_multicast_promisc(struct fs
+@@ -1026,7 +1109,7 @@ int dpni_get_multicast_promisc(struct fs
u16 token,
int *en)
{
struct dpni_rsp_get_multicast_promisc *rsp_params;
int err;
-@@ -1061,7 +1102,7 @@ int dpni_set_unicast_promisc(struct fsl_
+@@ -1061,7 +1144,7 @@ int dpni_set_unicast_promisc(struct fsl_
u16 token,
int en)
{
struct dpni_cmd_set_unicast_promisc *cmd_params;
/* prepare command */
-@@ -1089,7 +1130,7 @@ int dpni_get_unicast_promisc(struct fsl_
+@@ -1089,7 +1172,7 @@ int dpni_get_unicast_promisc(struct fsl_
u16 token,
int *en)
{
struct dpni_rsp_get_unicast_promisc *rsp_params;
int err;
-@@ -1124,7 +1165,7 @@ int dpni_set_primary_mac_addr(struct fsl
+@@ -1124,7 +1207,7 @@ int dpni_set_primary_mac_addr(struct fsl
u16 token,
const u8 mac_addr[6])
{
struct dpni_cmd_set_primary_mac_addr *cmd_params;
int i;
-@@ -1154,7 +1195,7 @@ int dpni_get_primary_mac_addr(struct fsl
+@@ -1154,7 +1237,7 @@ int dpni_get_primary_mac_addr(struct fsl
u16 token,
u8 mac_addr[6])
{
struct dpni_rsp_get_primary_mac_addr *rsp_params;
int i, err;
-@@ -1193,7 +1234,7 @@ int dpni_get_port_mac_addr(struct fsl_mc
+@@ -1193,7 +1276,7 @@ int dpni_get_port_mac_addr(struct fsl_mc
u16 token,
u8 mac_addr[6])
{
struct dpni_rsp_get_port_mac_addr *rsp_params;
int i, err;
-@@ -1229,7 +1270,7 @@ int dpni_add_mac_addr(struct fsl_mc_io *
+@@ -1229,7 +1312,7 @@ int dpni_add_mac_addr(struct fsl_mc_io *
u16 token,
const u8 mac_addr[6])
{
struct dpni_cmd_add_mac_addr *cmd_params;
int i;
-@@ -1259,7 +1300,7 @@ int dpni_remove_mac_addr(struct fsl_mc_i
+@@ -1259,7 +1342,7 @@ int dpni_remove_mac_addr(struct fsl_mc_i
u16 token,
const u8 mac_addr[6])
{
struct dpni_cmd_remove_mac_addr *cmd_params;
int i;
-@@ -1293,7 +1334,7 @@ int dpni_clear_mac_filters(struct fsl_mc
+@@ -1293,7 +1376,7 @@ int dpni_clear_mac_filters(struct fsl_mc
int unicast,
int multicast)
{
struct dpni_cmd_clear_mac_filters *cmd_params;
/* prepare command */
-@@ -1309,6 +1350,55 @@ int dpni_clear_mac_filters(struct fsl_mc
+@@ -1309,6 +1392,55 @@ int dpni_clear_mac_filters(struct fsl_mc
}
/**
* dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-@@ -1327,7 +1417,7 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io
+@@ -1327,7 +1459,7 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io
u8 tc_id,
const struct dpni_rx_tc_dist_cfg *cfg)
{
struct dpni_cmd_set_rx_tc_dist *cmd_params;
/* prepare command */
-@@ -1346,6 +1436,293 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io
+@@ -1346,6 +1478,215 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io
return mc_send_command(mc_io, &cmd);
}
+}
+
+/**
-+ * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class
-+ * (to select a flow ID)
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @tc_id: Traffic class selection (0-7)
-+ * @index: Location in the QoS table where to insert the entry.
-+ * Only relevant if MASKING is enabled for QoS
-+ * classification on this DPNI, it is ignored for exact match.
-+ * @cfg: Flow steering rule to add
-+ * @action: Action to be taken as result of a classification hit
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 tc_id,
-+ u16 index,
-+ const struct dpni_rule_cfg *cfg,
-+ const struct dpni_fs_action_cfg *action)
-+{
-+ struct dpni_cmd_add_fs_entry *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_add_fs_entry *)cmd.params;
-+ cmd_params->tc_id = tc_id;
-+ cmd_params->key_size = cfg->key_size;
-+ cmd_params->index = cpu_to_le16(index);
-+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
-+ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
-+ cmd_params->options = cpu_to_le16(action->options);
-+ cmd_params->flow_id = cpu_to_le16(action->flow_id);
-+ cmd_params->flc = cpu_to_le64(action->flc);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific
-+ * traffic class
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @tc_id: Traffic class selection (0-7)
-+ * @cfg: Flow steering rule to remove
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 tc_id,
-+ const struct dpni_rule_cfg *cfg)
-+{
-+ struct dpni_cmd_remove_fs_entry *cmd_params;
-+ struct fsl_mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_remove_fs_entry *)cmd.params;
-+ cmd_params->tc_id = tc_id;
-+ cmd_params->key_size = cfg->key_size;
-+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
-+ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
+ * dpni_set_congestion_notification() - Set traffic class congestion
+ * notification configuration
+ * @mc_io: Pointer to MC portal's I/O object
/**
* dpni_set_queue() - Set queue parameters
* @mc_io: Pointer to MC portal's I/O object
-@@ -1371,7 +1748,7 @@ int dpni_set_queue(struct fsl_mc_io *mc_
+@@ -1371,7 +1712,7 @@ int dpni_set_queue(struct fsl_mc_io *mc_
u8 options,
const struct dpni_queue *queue)
{
struct dpni_cmd_set_queue *cmd_params;
/* prepare command */
-@@ -1419,7 +1796,7 @@ int dpni_get_queue(struct fsl_mc_io *mc_
+@@ -1419,7 +1760,7 @@ int dpni_get_queue(struct fsl_mc_io *mc_
struct dpni_queue *queue,
struct dpni_queue_id *qid)
{
struct dpni_cmd_get_queue *cmd_params;
struct dpni_rsp_get_queue *rsp_params;
int err;
-@@ -1463,6 +1840,8 @@ int dpni_get_queue(struct fsl_mc_io *mc_
+@@ -1463,6 +1804,8 @@ int dpni_get_queue(struct fsl_mc_io *mc_
* @token: Token of DPNI object
* @page: Selects the statistics page to retrieve, see
* DPNI_GET_STATISTICS output. Pages are numbered 0 to 2.
* @stat: Structure containing the statistics
*
* Return: '0' on Success; Error code otherwise.
-@@ -1471,9 +1850,10 @@ int dpni_get_statistics(struct fsl_mc_io
+@@ -1471,9 +1814,10 @@ int dpni_get_statistics(struct fsl_mc_io
u32 cmd_flags,
u16 token,
u8 page,
struct dpni_cmd_get_statistics *cmd_params;
struct dpni_rsp_get_statistics *rsp_params;
int i, err;
-@@ -1484,6 +1864,7 @@ int dpni_get_statistics(struct fsl_mc_io
+@@ -1484,6 +1828,7 @@ int dpni_get_statistics(struct fsl_mc_io
token);
cmd_params = (struct dpni_cmd_get_statistics *)cmd.params;
cmd_params->page_number = page;
/* send command to mc */
err = mc_send_command(mc_io, &cmd);
-@@ -1499,6 +1880,29 @@ int dpni_get_statistics(struct fsl_mc_io
+@@ -1499,6 +1844,29 @@ int dpni_get_statistics(struct fsl_mc_io
}
/**
* dpni_set_taildrop() - Set taildrop per queue or TC
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-@@ -1506,7 +1910,10 @@ int dpni_get_statistics(struct fsl_mc_io
+@@ -1506,7 +1874,10 @@ int dpni_get_statistics(struct fsl_mc_io
* @cg_point: Congestion point
* @q_type: Queue type on which the taildrop is configured.
* Only Rx queues are supported for now
* @q_index: Index of the queue if the DPNI supports multiple queues for
* traffic distribution. Ignored if CONGESTION_POINT is not 0.
* @taildrop: Taildrop structure
-@@ -1522,7 +1929,7 @@ int dpni_set_taildrop(struct fsl_mc_io *
+@@ -1522,7 +1893,7 @@ int dpni_set_taildrop(struct fsl_mc_io *
u8 index,
struct dpni_taildrop *taildrop)
{
struct dpni_cmd_set_taildrop *cmd_params;
/* prepare command */
-@@ -1550,7 +1957,10 @@ int dpni_set_taildrop(struct fsl_mc_io *
+@@ -1550,7 +1921,10 @@ int dpni_set_taildrop(struct fsl_mc_io *
* @cg_point: Congestion point
* @q_type: Queue type on which the taildrop is configured.
* Only Rx queues are supported for now
* @q_index: Index of the queue if the DPNI supports multiple queues for
* traffic distribution. Ignored if CONGESTION_POINT is not 0.
* @taildrop: Taildrop structure
-@@ -1566,7 +1976,7 @@ int dpni_get_taildrop(struct fsl_mc_io *
+@@ -1566,7 +1940,7 @@ int dpni_get_taildrop(struct fsl_mc_io *
u8 index,
struct dpni_taildrop *taildrop)
{
struct dpni_cmd_get_taildrop *cmd_params;
struct dpni_rsp_get_taildrop *rsp_params;
int err;
-@@ -1594,3 +2004,109 @@ int dpni_get_taildrop(struct fsl_mc_io *
+@@ -1594,3 +1968,187 @@ int dpni_get_taildrop(struct fsl_mc_io *
return 0;
}
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_rx_fs_dist *)cmd.params;
-+ cmd_params->dist_size = le16_to_cpu(cfg->dist_size);
++ cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
+ dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable);
+ cmd_params->tc = cfg->tc;
-+ cmd_params->miss_flow_id = le16_to_cpu(cfg->fs_miss_flow_id);
-+ cmd_params->key_cfg_iova = le64_to_cpu(cfg->key_cfg_iova);
++ cmd_params->miss_flow_id = cpu_to_le16(cfg->fs_miss_flow_id);
++ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_rx_hash_dist *)cmd.params;
-+ cmd_params->dist_size = le16_to_cpu(cfg->dist_size);
++ cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
+ dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable);
+ cmd_params->tc = cfg->tc;
-+ cmd_params->key_cfg_iova = le64_to_cpu(cfg->key_cfg_iova);
++ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class
++ * (to select a flow ID)
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @index: Location in the QoS table where to insert the entry.
++ * Only relevant if MASKING is enabled for QoS
++ * classification on this DPNI, it is ignored for exact match.
++ * @cfg: Flow steering rule to add
++ * @action: Action to be taken as result of a classification hit
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 tc_id,
++ u16 index,
++ const struct dpni_rule_cfg *cfg,
++ const struct dpni_fs_action_cfg *action)
++{
++ struct dpni_cmd_add_fs_entry *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_add_fs_entry *)cmd.params;
++ cmd_params->tc_id = tc_id;
++ cmd_params->key_size = cfg->key_size;
++ cmd_params->index = cpu_to_le16(index);
++ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
++ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
++ cmd_params->options = cpu_to_le16(action->options);
++ cmd_params->flow_id = cpu_to_le16(action->flow_id);
++ cmd_params->flc = cpu_to_le64(action->flc);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific
++ * traffic class
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @cfg: Flow steering rule to remove
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 tc_id,
++ const struct dpni_rule_cfg *cfg)
++{
++ struct dpni_cmd_remove_fs_entry *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_remove_fs_entry *)cmd.params;
++ cmd_params->tc_id = tc_id;
++ cmd_params->key_size = cfg->key_size;
++ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
++ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
--- a/drivers/staging/fsl-dpaa2/ethernet/dpni.h
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.h
-@@ -52,6 +52,14 @@ struct fsl_mc_io;
+@@ -1,34 +1,6 @@
++/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+ /* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions are met:
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of the above-listed copyright holders nor the
+- * names of any contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- *
+- * ALTERNATIVELY, this software may be distributed under the terms of the
+- * GNU General Public License ("GPL") as published by the Free Software
+- * Foundation, either version 2 of that License or (at your option) any
+- * later version.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+- * POSSIBILITY OF SUCH DAMAGE.
+ */
+ #ifndef __FSL_DPNI_H
+ #define __FSL_DPNI_H
+@@ -52,6 +24,14 @@ struct fsl_mc_io;
* Maximum number of buffer pools per DPNI
*/
#define DPNI_MAX_DPBP 8
/**
* All traffic classes considered; see dpni_set_queue()
-@@ -123,13 +131,15 @@ struct dpni_pools_cfg {
+@@ -123,13 +103,15 @@ struct dpni_pools_cfg {
/**
* struct pools - Buffer pools parameters
* @dpbp_id: DPBP object ID
} pools[DPNI_MAX_DPBP];
};
-@@ -476,6 +486,24 @@ union dpni_statistics {
+@@ -476,6 +458,24 @@ union dpni_statistics {
u64 egress_confirmed_frames;
} page_2;
/**
* struct raw - raw statistics structure
*/
struct {
-@@ -487,8 +515,13 @@ int dpni_get_statistics(struct fsl_mc_io
+@@ -487,8 +487,13 @@ int dpni_get_statistics(struct fsl_mc_io
u32 cmd_flags,
u16 token,
u8 page,
/**
* Enable auto-negotiation
*/
-@@ -505,6 +538,10 @@ int dpni_get_statistics(struct fsl_mc_io
+@@ -505,6 +510,23 @@ int dpni_get_statistics(struct fsl_mc_io
* Enable a-symmetric pause frames
*/
#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
+ * Enable priority flow control pause frames
+ */
+#define DPNI_LINK_OPT_PFC_PAUSE 0x0000000000000010ULL
++/**
++ * Advertised link speeds
++ */
++#define DPNI_ADVERTISED_10BASET_FULL 0x0000000000000001ULL
++#define DPNI_ADVERTISED_100BASET_FULL 0x0000000000000002ULL
++#define DPNI_ADVERTISED_1000BASET_FULL 0x0000000000000004ULL
++#define DPNI_ADVERTISED_10000BASET_FULL 0x0000000000000010ULL
++#define DPNI_ADVERTISED_2500BASEX_FULL 0x0000000000000020ULL
++
++/**
++ * Advertise auto-negotiation enabled
++ */
++#define DPNI_ADVERTISED_AUTONEG 0x0000000000000008ULL
/**
* struct - Structure representing DPNI link configuration
-@@ -538,6 +575,23 @@ int dpni_get_link_state(struct fsl_mc_io
+@@ -514,6 +536,7 @@ int dpni_get_statistics(struct fsl_mc_io
+ struct dpni_link_cfg {
+ u32 rate;
+ u64 options;
++ u64 advertising;
+ };
+
+ int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
+@@ -521,6 +544,11 @@ int dpni_set_link_cfg(struct fsl_mc_io
+ u16 token,
+ const struct dpni_link_cfg *cfg);
+
++int dpni_set_link_cfg_v2(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_link_cfg *cfg);
++
+ /**
+ * struct dpni_link_state - Structure representing DPNI link state
+ * @rate: Rate
+@@ -530,7 +558,10 @@ int dpni_set_link_cfg(struct fsl_mc_io
+ struct dpni_link_state {
+ u32 rate;
+ u64 options;
++ u64 supported;
++ u64 advertising;
+ int up;
++ int state_valid;
+ };
+
+ int dpni_get_link_state(struct fsl_mc_io *mc_io,
+@@ -538,6 +569,28 @@ int dpni_get_link_state(struct fsl_mc_io
u16 token,
struct dpni_link_state *state);
++int dpni_get_link_state_v2(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpni_link_state *state);
++
+/**
+ * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration
+ * @rate_limit: rate in Mbps
int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
-@@ -639,6 +693,70 @@ int dpni_prepare_key_cfg(const struct dp
+@@ -639,6 +692,70 @@ int dpni_prepare_key_cfg(const struct dp
u8 *key_cfg_buf);
/**
* struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration
* @dist_size: Set the distribution size;
* supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96,
-@@ -784,6 +902,108 @@ enum dpni_congestion_point {
+@@ -784,6 +901,108 @@ enum dpni_congestion_point {
};
/**
* struct dpni_taildrop - Structure representing the taildrop
* @enable: Indicates whether the taildrop is active or not.
* @units: Indicates the unit of THRESHOLD. Queue taildrop only supports
-@@ -829,4 +1049,124 @@ struct dpni_rule_cfg {
+@@ -829,4 +1048,124 @@ struct dpni_rule_cfg {
u8 key_size;
};
+ const struct dpni_rx_dist_cfg *cfg);
+
#endif /* __FSL_DPNI_H */
+--- a/drivers/staging/fsl-dpaa2/ethernet/net.h
++++ b/drivers/staging/fsl-dpaa2/ethernet/net.h
+@@ -1,33 +1,5 @@
++/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+ /* Copyright 2013-2015 Freescale Semiconductor Inc.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions are met:
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of the above-listed copyright holders nor the
+- * names of any contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- *
+- * ALTERNATIVELY, this software may be distributed under the terms of the
+- * GNU General Public License ("GPL") as published by the Free Software
+- * Foundation, either version 2 of that License or (at your option) any
+- * later version.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+- * POSSIBILITY OF SUCH DAMAGE.
+ */
+ #ifndef __FSL_NET_H
+ #define __FSL_NET_H
-From bdb1d42c9398eb14e997e026bd46602543a7ed03 Mon Sep 17 00:00:00 2001
+From 1c96e22d28e1b18c41c71e7d0948378561a6526f Mon Sep 17 00:00:00 2001
From: Biwen Li <biwen.li@nxp.com>
-Date: Tue, 30 Oct 2018 18:26:16 +0800
-Subject: [PATCH 09/40] dpaa2-l2switch: support layerscape
-This is an integrated patch of dpaa2-l2switch for
- layerscape
+Date: Wed, 17 Apr 2019 18:58:29 +0800
+Subject: [PATCH] dpaa2-l2switch: support layerscape
+This is an integrated patch of dpaa2-l2switch for layerscape
+
+Signed-off-by: Biwen Li <biwen.li@nxp.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com>
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Signed-off-by: Razvan Stefanescu <razvan.stefanescu@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
---
drivers/staging/fsl-dpaa2/ethsw/Makefile | 10 +
drivers/staging/fsl-dpaa2/ethsw/README | 106 ++
-From 92cf25fe454dd42136e717ba679a9dba740db0e7 Mon Sep 17 00:00:00 2001
+From dd0cc8d0739a72ee5d85039a9ba7812383e8f555 Mon Sep 17 00:00:00 2001
From: Biwen Li <biwen.li@nxp.com>
-Date: Tue, 30 Oct 2018 18:26:18 +0800
-Subject: [PATCH 10/40] dpaa2-mac-phy: support layerscape
-This is an integrated patch of dpaa2-mac-phy for
- layerscape
+Date: Wed, 17 Apr 2019 18:58:30 +0800
+Subject: [PATCH] dpaa2-mac-phy: support layerscape
+
+This is an integrated patch of dpaa2-mac-phy for layerscape
Signed-off-by: Alex Marginean <alexandru.marginean@nxp.com>
+Signed-off-by: Biwen Li <biwen.li@nxp.com>
Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc@nxp.com>
Signed-off-by: Bogdan Purcareata <bogdan.purcareata@nxp.com>
+Signed-off-by: Catalin Neacsu <valentin-catalin.neacsu@nxp.com>
Signed-off-by: Constantin Tudor <constantin.tudor@nxp.com>
+Signed-off-by: Florin Chiculita <florinlaurentiu.chiculita@nxp.com>
+Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
Signed-off-by: Itai Katz <itai.katz@freescale.com>
Signed-off-by: J. German Rivera <German.Rivera@freescale.com>
+Signed-off-by: Pankaj Bansal <pankaj.bansal@nxp.com>
Signed-off-by: Razvan Stefanescu <razvan.stefanescu@nxp.com>
Signed-off-by: Stuart Yoder <stuart.yoder@freescale.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
+Signed-off-by: Valentin Catalin Neacsu <valentin-catalin.neacsu@nxp.com>
---
drivers/staging/fsl-dpaa2/mac/Kconfig | 23 +
drivers/staging/fsl-dpaa2/mac/Makefile | 10 +
- drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h | 172 ++++++
- drivers/staging/fsl-dpaa2/mac/dpmac.c | 619 ++++++++++++++++++++
- drivers/staging/fsl-dpaa2/mac/dpmac.h | 342 +++++++++++
- drivers/staging/fsl-dpaa2/mac/mac.c | 672 ++++++++++++++++++++++
- 6 files changed, 1838 insertions(+)
+ drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h | 196 ++++++
+ drivers/staging/fsl-dpaa2/mac/dpmac.c | 689 ++++++++++++++++++
+ drivers/staging/fsl-dpaa2/mac/dpmac.h | 374 ++++++++++
+ drivers/staging/fsl-dpaa2/mac/mac.c | 817 ++++++++++++++++++++++
+ 6 files changed, 2109 insertions(+)
create mode 100644 drivers/staging/fsl-dpaa2/mac/Kconfig
create mode 100644 drivers/staging/fsl-dpaa2/mac/Makefile
create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h
+ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h
-@@ -0,0 +1,172 @@
+@@ -0,0 +1,196 @@
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+#define DPMAC_VER_MAJOR 4
+#define DPMAC_VER_MINOR 2
+#define DPMAC_CMD_BASE_VERSION 1
++#define DPMAC_CMD_2ND_VERSION 2
+#define DPMAC_CMD_ID_OFFSET 4
+
+#define DPMAC_CMD(id) (((id) << DPMAC_CMD_ID_OFFSET) | DPMAC_CMD_BASE_VERSION)
++#define DPMAC_CMD_V2(id) (((id) << DPMAC_CMD_ID_OFFSET) | DPMAC_CMD_2ND_VERSION)
+
+/* Command IDs */
+#define DPMAC_CMDID_CLOSE DPMAC_CMD(0x800)
+#define DPMAC_CMDID_CLEAR_IRQ_STATUS DPMAC_CMD(0x017)
+
+#define DPMAC_CMDID_GET_LINK_CFG DPMAC_CMD(0x0c2)
++#define DPMAC_CMDID_GET_LINK_CFG_V2 DPMAC_CMD_V2(0x0c2)
+#define DPMAC_CMDID_SET_LINK_STATE DPMAC_CMD(0x0c3)
++#define DPMAC_CMDID_SET_LINK_STATE_V2 DPMAC_CMD_V2(0x0c3)
+#define DPMAC_CMDID_GET_COUNTER DPMAC_CMD(0x0c4)
+
+#define DPMAC_CMDID_SET_PORT_MAC_ADDR DPMAC_CMD(0x0c5)
+ u32 rate;
+};
+
++struct dpmac_rsp_get_link_cfg_v2 {
++ u64 options;
++ u32 rate;
++ u32 pad;
++ u64 advertising;
++};
++
+#define DPMAC_STATE_SIZE 1
+#define DPMAC_STATE_SHIFT 0
++#define DPMAC_STATE_VALID_SIZE 1
++#define DPMAC_STATE_VALID_SHIFT 1
+
+struct dpmac_cmd_set_link_state {
+ u64 options;
+ u8 up;
+};
+
++struct dpmac_cmd_set_link_state_v2 {
++ u64 options;
++ u32 rate;
++ u32 pad0;
++ /* from lsb: up:1, state_valid:1 */
++ u8 state;
++ u8 pad1[7];
++ u64 supported;
++ u64 advertising;
++};
++
+struct dpmac_cmd_get_counter {
+ u8 type;
+};
+#endif /* _FSL_DPMAC_CMD_H */
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/mac/dpmac.c
-@@ -0,0 +1,619 @@
+@@ -0,0 +1,689 @@
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+}
+
+/**
++ * dpmac_get_link_cfg_v2() - Get Ethernet link configuration
++ * @mc_io: Pointer to opaque I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @cfg: Returned structure with the link configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_get_link_cfg_v2(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpmac_link_cfg *cfg)
++{
++ struct dpmac_rsp_get_link_cfg_v2 *rsp_params;
++ struct fsl_mc_command cmd = { 0 };
++ int err = 0;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_LINK_CFG_V2,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ rsp_params = (struct dpmac_rsp_get_link_cfg_v2 *)cmd.params;
++ cfg->options = le64_to_cpu(rsp_params->options);
++ cfg->rate = le32_to_cpu(rsp_params->rate);
++ cfg->advertising = le64_to_cpu(rsp_params->advertising);
++
++ return 0;
++}
++
++/**
+ * dpmac_set_link_state() - Set the Ethernet link status
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ cmd_params = (struct dpmac_cmd_set_link_state *)cmd.params;
+ cmd_params->options = cpu_to_le64(link_state->options);
+ cmd_params->rate = cpu_to_le32(link_state->rate);
-+ cmd_params->up = dpmac_get_field(link_state->up, STATE);
++ dpmac_set_field(cmd_params->up, STATE, link_state->up);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpmac_set_link_state_v2() - Set the Ethernet link status
++ * @mc_io: Pointer to opaque I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMAC object
++ * @link_state: Link state configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmac_set_link_state_v2(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpmac_link_state *link_state)
++{
++ struct dpmac_cmd_set_link_state_v2 *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_LINK_STATE_V2,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpmac_cmd_set_link_state_v2 *)cmd.params;
++ cmd_params->options = cpu_to_le64(link_state->options);
++ cmd_params->rate = cpu_to_le32(link_state->rate);
++ dpmac_set_field(cmd_params->state, STATE, link_state->up);
++ dpmac_set_field(cmd_params->state, STATE_VALID,
++ link_state->state_valid);
++ cmd_params->supported = cpu_to_le64(link_state->supported);
++ cmd_params->advertising = cpu_to_le64(link_state->advertising);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/mac/dpmac.h
-@@ -0,0 +1,342 @@
+@@ -0,0 +1,374 @@
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+#define DPMAC_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
+
+/**
++ * Advertised link speeds
++ */
++#define DPMAC_ADVERTISED_10BASET_FULL 0x0000000000000001ULL
++#define DPMAC_ADVERTISED_100BASET_FULL 0x0000000000000002ULL
++#define DPMAC_ADVERTISED_1000BASET_FULL 0x0000000000000004ULL
++#define DPMAC_ADVERTISED_10000BASET_FULL 0x0000000000000010ULL
++#define DPMAC_ADVERTISED_2500BASEX_FULL 0x0000000000000020ULL
++
++/**
++ * Advertise auto-negotiation enable
++ */
++#define DPMAC_ADVERTISED_AUTONEG 0x0000000000000008ULL
++
++/**
+ * struct dpmac_link_cfg - Structure representing DPMAC link configuration
+ * @rate: Link's rate - in Mbps
+ * @options: Enable/Disable DPMAC link cfg features (bitmap)
++ * @advertising: Speeds that are advertised for autoneg (bitmap)
+ */
+struct dpmac_link_cfg {
+ u32 rate;
+ u64 options;
++ u64 advertising;
+};
+
+int dpmac_get_link_cfg(struct fsl_mc_io *mc_io,
+ u16 token,
+ struct dpmac_link_cfg *cfg);
+
++int dpmac_get_link_cfg_v2(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpmac_link_cfg *cfg);
++
+/**
+ * struct dpmac_link_state - DPMAC link configuration request
+ * @rate: Rate in Mbps
+ * @options: Enable/Disable DPMAC link cfg features (bitmap)
+ * @up: Link state
++ * @state_valid: Ignore/Update the state of the link
++ * @supported: Speeds capability of the phy (bitmap)
++ * @advertising: Speeds that are advertised for autoneg (bitmap)
+ */
+struct dpmac_link_state {
+ u32 rate;
+ u64 options;
+ int up;
++ int state_valid;
++ u64 supported;
++ u64 advertising;
+};
+
+int dpmac_set_link_state(struct fsl_mc_io *mc_io,
+ u16 token,
+ struct dpmac_link_state *link_state);
+
++int dpmac_set_link_state_v2(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpmac_link_state *link_state);
++
+/**
+ * enum dpmac_counter - DPMAC counter types
+ * @DPMAC_CNT_ING_FRAME_64: counts 64-bytes frames, good or bad.
+#endif /* __FSL_DPMAC_H */
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/mac/mac.c
-@@ -0,0 +1,672 @@
+@@ -0,0 +1,817 @@
+/* Copyright 2015 Freescale Semiconductor Inc.
++ * Copyright 2018 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ struct fsl_mc_device *mc_dev;
+ struct dpmac_attr attr;
+ struct dpmac_link_state old_state;
++ u16 dpmac_ver_major;
++ u16 dpmac_ver_minor;
+};
+
+/* TODO: fix the 10G modes, mapping can't be right:
+ PHY_INTERFACE_MODE_QSGMII, /* DPMAC_ETH_IF_QSGMII */
+ PHY_INTERFACE_MODE_XGMII, /* DPMAC_ETH_IF_XAUI */
+ PHY_INTERFACE_MODE_XGMII, /* DPMAC_ETH_IF_XFI */
++ PHY_INTERFACE_MODE_XGMII, /* DPMAC_ETH_IF_CAUI */
++ PHY_INTERFACE_MODE_XGMII, /* DPMAC_ETH_IF_1000BASEX */
++ PHY_INTERFACE_MODE_XGMII, /* DPMAC_ETH_IF_USXGMII */
+};
+
++static int cmp_dpmac_ver(struct dpaa2_mac_priv *priv,
++ u16 ver_major, u16 ver_minor)
++{
++ if (priv->dpmac_ver_major == ver_major)
++ return priv->dpmac_ver_minor - ver_minor;
++ return priv->dpmac_ver_major - ver_major;
++}
++
++#define DPMAC_LINK_AUTONEG_VER_MAJOR 4
++#define DPMAC_LINK_AUTONEG_VER_MINOR 3
++
++struct dpaa2_mac_link_mode_map {
++ u64 dpmac_lm;
++ u64 ethtool_lm;
++};
++
++static const struct dpaa2_mac_link_mode_map dpaa2_mac_lm_map[] = {
++ {DPMAC_ADVERTISED_10BASET_FULL, ETHTOOL_LINK_MODE_10baseT_Full_BIT},
++ {DPMAC_ADVERTISED_100BASET_FULL, ETHTOOL_LINK_MODE_100baseT_Full_BIT},
++ {DPMAC_ADVERTISED_1000BASET_FULL, ETHTOOL_LINK_MODE_1000baseT_Full_BIT},
++ {DPMAC_ADVERTISED_10000BASET_FULL, ETHTOOL_LINK_MODE_10000baseT_Full_BIT},
++ {DPMAC_ADVERTISED_2500BASEX_FULL, ETHTOOL_LINK_MODE_2500baseX_Full_BIT},
++ {DPMAC_ADVERTISED_AUTONEG, ETHTOOL_LINK_MODE_Autoneg_BIT},
++};
++
++static void link_mode_dpmac2phydev(u64 dpmac_lm, u32 *phydev_lm)
++{
++ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(dpaa2_mac_lm_map); i++) {
++ if (dpmac_lm & dpaa2_mac_lm_map[i].dpmac_lm)
++ __set_bit(dpaa2_mac_lm_map[i].ethtool_lm, mask);
++ }
++
++ ethtool_convert_link_mode_to_legacy_u32(phydev_lm, mask);
++}
++
++static void link_mode_phydev2dpmac(u32 phydev_lm, u64 *dpni_lm)
++{
++ unsigned long lm;
++ int i;
++
++ ethtool_convert_legacy_u32_to_link_mode(&lm, phydev_lm);
++
++ for (i = 0; i < ARRAY_SIZE(dpaa2_mac_lm_map); i++) {
++ if (test_bit(dpaa2_mac_lm_map[i].ethtool_lm, &lm))
++ *dpni_lm |= dpaa2_mac_lm_map[i].dpmac_lm;
++ }
++}
++
+static void dpaa2_mac_link_changed(struct net_device *netdev)
+{
+ struct phy_device *phydev;
+ if (phydev->autoneg)
+ state.options |= DPMAC_LINK_OPT_AUTONEG;
+
++ if (phydev->pause && (phydev->advertising & ADVERTISED_Pause))
++ state.options |= DPMAC_LINK_OPT_PAUSE;
++ if (phydev->pause &&
++ (phydev->advertising & ADVERTISED_Asym_Pause))
++ state.options |= DPMAC_LINK_OPT_ASYM_PAUSE;
++
+ netif_carrier_on(netdev);
+ } else {
+ netif_carrier_off(netdev);
+ phy_print_status(phydev);
+ }
+
-+ /* We must interrogate MC at all times, because we don't know
-+ * when and whether a potential DPNI may have read the link state.
-+ */
-+ err = dpmac_set_link_state(priv->mc_dev->mc_io, 0,
-+ priv->mc_dev->mc_handle, &state);
++ if (cmp_dpmac_ver(priv, DPMAC_LINK_AUTONEG_VER_MAJOR,
++ DPMAC_LINK_AUTONEG_VER_MINOR) < 0) {
++ err = dpmac_set_link_state(priv->mc_dev->mc_io, 0,
++ priv->mc_dev->mc_handle, &state);
++ } else {
++ link_mode_phydev2dpmac(phydev->supported, &state.supported);
++ link_mode_phydev2dpmac(phydev->advertising, &state.advertising);
++ state.state_valid = 1;
++
++ err = dpmac_set_link_state_v2(priv->mc_dev->mc_io, 0,
++ priv->mc_dev->mc_handle, &state);
++ }
+ if (unlikely(err))
+ dev_err(&priv->mc_dev->dev, "dpmac_set_link_state: %d\n", err);
+}
+ return NETDEV_TX_OK;
+}
+
++static void dpaa2_mac_get_drvinfo(struct net_device *net_dev,
++ struct ethtool_drvinfo *drvinfo)
++{
++ struct dpaa2_mac_priv *priv = netdev_priv(net_dev);
++
++ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
++ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
++ "%u.%u", priv->dpmac_ver_major, priv->dpmac_ver_minor);
++ strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
++ sizeof(drvinfo->bus_info));
++}
++
+static int dpaa2_mac_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *ks)
+{
+};
+
+static const struct ethtool_ops dpaa2_mac_ethtool_ops = {
++ .get_drvinfo = &dpaa2_mac_get_drvinfo,
+ .get_link_ksettings = &dpaa2_mac_get_link_ksettings,
+ .set_link_ksettings = &dpaa2_mac_set_link_ksettings,
+ .get_strings = &dpaa2_mac_get_strings,
+ phydev->speed = cfg->rate;
+ phydev->duplex = !!(cfg->options & DPMAC_LINK_OPT_HALF_DUPLEX);
+
++ if (cfg->advertising != 0) {
++ phydev->advertising = 0;
++ link_mode_dpmac2phydev(cfg->advertising, &phydev->advertising);
++ }
++
++ if (phydev->supported & SUPPORTED_Pause) {
++ if (cfg->options & DPMAC_LINK_OPT_PAUSE)
++ phydev->advertising |= ADVERTISED_Pause;
++ else
++ phydev->advertising &= ~ADVERTISED_Pause;
++ }
++
++ if (phydev->supported & SUPPORTED_Asym_Pause) {
++ if (cfg->options & DPMAC_LINK_OPT_ASYM_PAUSE)
++ phydev->advertising |= ADVERTISED_Asym_Pause;
++ else
++ phydev->advertising &= ~ADVERTISED_Asym_Pause;
++ }
++
+ if (cfg->options & DPMAC_LINK_OPT_AUTONEG) {
-+ phydev->autoneg = 1;
++ phydev->autoneg = AUTONEG_ENABLE;
+ phydev->advertising |= ADVERTISED_Autoneg;
+ } else {
-+ phydev->autoneg = 0;
++ phydev->autoneg = AUTONEG_DISABLE;
+ phydev->advertising &= ~ADVERTISED_Autoneg;
+ }
+
+ struct device *dev = (struct device *)arg;
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ struct dpaa2_mac_priv *priv = dev_get_drvdata(dev);
-+ struct dpmac_link_cfg link_cfg;
++ struct dpmac_link_cfg link_cfg = { 0 };
+ u32 status;
+ int err;
+
+
+ /* DPNI-initiated link configuration; 'ifconfig up' also calls this */
+ if (status & DPMAC_IRQ_EVENT_LINK_CFG_REQ) {
-+ err = dpmac_get_link_cfg(mc_dev->mc_io, 0, mc_dev->mc_handle,
-+ &link_cfg);
++ if (cmp_dpmac_ver(priv, DPMAC_LINK_AUTONEG_VER_MAJOR,
++ DPMAC_LINK_AUTONEG_VER_MINOR) < 0)
++ err = dpmac_get_link_cfg(mc_dev->mc_io, 0,
++ mc_dev->mc_handle, &link_cfg);
++ else
++ err = dpmac_get_link_cfg_v2(mc_dev->mc_io, 0,
++ mc_dev->mc_handle,
++ &link_cfg);
+ if (unlikely(err))
+ goto out;
+
+ struct dpaa2_mac_priv *priv = NULL;
+ struct device_node *phy_node, *dpmac_node;
+ struct net_device *netdev;
-+ phy_interface_t if_mode;
++ int if_mode;
+ int err = 0;
+
+ dev = &mc_dev->dev;
+ goto err_free_mcp;
+ }
+
++ err = dpmac_get_api_version(mc_dev->mc_io, 0, &priv->dpmac_ver_major,
++ &priv->dpmac_ver_minor);
++ if (err) {
++ dev_err(dev, "dpmac_get_api_version failed\n");
++ goto err_version;
++ }
++
++ if (cmp_dpmac_ver(priv, DPMAC_VER_MAJOR, DPMAC_VER_MINOR) < 0) {
++ dev_err(dev, "DPMAC version %u.%u lower than supported %u.%u\n",
++ priv->dpmac_ver_major, priv->dpmac_ver_minor,
++ DPMAC_VER_MAJOR, DPMAC_VER_MINOR);
++ err = -ENOTSUPP;
++ goto err_version;
++ }
++
+ err = dpmac_get_attributes(mc_dev->mc_io, 0,
+ mc_dev->mc_handle, &priv->attr);
+ if (err) {
+ }
+#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */
+
-+ /* probe the PHY as a fixed-link if there's a phy-handle defined
-+ * in the device tree
-+ */
-+ phy_node = of_parse_phandle(dpmac_node, "phy-handle", 0);
-+ if (!phy_node) {
-+ goto probe_fixed_link;
++ /* get the interface mode from the dpmac of node or from the MC attributes */
++ if_mode = of_get_phy_mode(dpmac_node);
++ if (if_mode >= 0) {
++ dev_dbg(dev, "\tusing if mode %s for eth_if %d\n",
++ phy_modes(if_mode), priv->attr.eth_if);
++ goto link_type;
+ }
+
+ if (priv->attr.eth_if < ARRAY_SIZE(dpaa2_mac_iface_mode)) {
+ dev_dbg(dev, "\tusing if mode %s for eth_if %d\n",
+ phy_modes(if_mode), priv->attr.eth_if);
+ } else {
-+ dev_warn(dev, "Unexpected interface mode %d, will probe as fixed link\n",
-+ priv->attr.eth_if);
++ dev_err(dev, "Unexpected interface mode %d\n",
++ priv->attr.eth_if);
++ err = -EINVAL;
++ goto err_no_if_mode;
++ }
++
++link_type:
++ /* probe the PHY as fixed-link if the DPMAC attribute indicates so */
++ if (priv->attr.link_type == DPMAC_LINK_TYPE_FIXED)
++ goto probe_fixed_link;
++
++ /* or if there's no phy-handle defined in the device tree */
++ phy_node = of_parse_phandle(dpmac_node, "phy-handle", 0);
++ if (!phy_node) {
+ goto probe_fixed_link;
+ }
+
+ err = -EFAULT;
+ goto err_no_phy;
+ }
++
++ err = phy_connect_direct(netdev, netdev->phydev,
++ &dpaa2_mac_link_changed, if_mode);
++ if (err) {
++ dev_err(dev, "error trying to connect to PHY\n");
++ goto err_no_phy;
++ }
++
+ dev_info(dev, "Registered fixed PHY.\n");
+ }
+
+
+ return 0;
+
++err_no_if_mode:
+err_defer:
+err_no_phy:
+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
+err_free_irq:
+#endif
+ teardown_irqs(mc_dev);
++err_version:
+err_close:
+ dpmac_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
+err_free_mcp:
-From 579f1f6767b1008c6e5ccc2b029acbb56002ed8b Mon Sep 17 00:00:00 2001
+From 802238feea29ddfb765fc0c162e0de34920cd58d Mon Sep 17 00:00:00 2001
From: Biwen Li <biwen.li@nxp.com>
-Date: Tue, 30 Oct 2018 18:26:20 +0800
-Subject: [PATCH 11/40] dpaa2-rtc: support layerscape
+Date: Wed, 17 Apr 2019 18:58:31 +0800
+Subject: [PATCH] dpaa2-rtc: support layerscape
+
This is an integrated patch of dpaa2-rtc for layerscape
+Signed-off-by: Biwen Li <biwen.li@nxp.com>
Signed-off-by: Catalin Horghidan <catalin.horghidan@nxp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
---
drivers/staging/fsl-dpaa2/rtc/Makefile | 10 +
drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h | 160 +++++
drivers/staging/fsl-dpaa2/rtc/dprtc.c | 746 ++++++++++++++++++++++
drivers/staging/fsl-dpaa2/rtc/dprtc.h | 172 +++++
- drivers/staging/fsl-dpaa2/rtc/rtc.c | 242 +++++++
- 5 files changed, 1330 insertions(+)
+ drivers/staging/fsl-dpaa2/rtc/rtc.c | 240 +++++++
+ drivers/staging/fsl-dpaa2/rtc/rtc.h | 14 +
+ 6 files changed, 1342 insertions(+)
create mode 100644 drivers/staging/fsl-dpaa2/rtc/Makefile
create mode 100644 drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h
create mode 100644 drivers/staging/fsl-dpaa2/rtc/dprtc.c
create mode 100644 drivers/staging/fsl-dpaa2/rtc/dprtc.h
create mode 100644 drivers/staging/fsl-dpaa2/rtc/rtc.c
+ create mode 100644 drivers/staging/fsl-dpaa2/rtc/rtc.h
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/rtc/Makefile
@@ -0,0 +1,10 @@
+
-+obj-$(CONFIG_PTP_1588_CLOCK_DPAA2) += dpaa2-rtc.o
++obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += dpaa2-rtc.o
+
+dpaa2-rtc-objs := rtc.o dprtc.o
+
+#endif /* __FSL_DPRTC_H */
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/rtc/rtc.c
-@@ -0,0 +1,242 @@
+@@ -0,0 +1,240 @@
+/* Copyright 2013-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+
+#include <linux/fsl/mc.h>
+
-+#include "dprtc.h"
-+#include "dprtc-cmd.h"
++#include "rtc.h"
+
+#define N_EXT_TS 2
+
+{
+ struct device *dev;
+ int err = 0;
-+ int dpaa2_phc_index;
+ u32 tmr_add = 0;
+
+ if (!mc_dev)
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DPAA2 RTC (PTP 1588 clock) driver (prototype)");
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/rtc/rtc.h
+@@ -0,0 +1,14 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Copyright 2018 NXP
++ */
++
++#ifndef __RTC_H
++#define __RTC_H
++
++#include "dprtc.h"
++#include "dprtc-cmd.h"
++
++extern int dpaa2_phc_index;
++
++#endif
-From 40b001913c4131b7532beacc13c811a9e39f3758 Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Tue, 30 Oct 2018 18:26:08 +0800
-Subject: [PATCH 13/40] dpaa-ethernet: support layerscape
-This is an integrated patch of dpaa-ethernet for
- layerscape
+From b443452fe13292b12295757f57e04c04834b3fc0 Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Wed, 22 May 2019 17:49:18 +0800
+Subject: [PATCH] dpaa-ethernet: support layerscape
+
+This is an integrated patch of dpaa-ethernet for layerscape
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Bhaskar Upadhaya <Bhaskar.Upadhaya@nxp.com>
+Signed-off-by: Biwen Li <biwen.li@nxp.com>
Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
-Signed-off-by: Gustavo A. R. Silva <garsilva@embeddedor.com>
+Signed-off-by: Florinel Iordache <florinel.iordache@nxp.com>
Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
Signed-off-by: Iordache Florinel-R70177 <florinel.iordache@nxp.com>
Signed-off-by: Jake Moroni <mail@jakemoroni.com>
+Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
Signed-off-by: Radu Bulie <radu-andrei.bulie@nxp.com>
Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
+Signed-off-by: Vakul Garg <vakul.garg@nxp.com>
Signed-off-by: Vicentiu Galanopulo <vicentiu.galanopulo@nxp.com>
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com>
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
Signed-off-by: yuan linyu <Linyu.Yuan@alcatel-sbell.com.cn>
Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
---
- .../net/ethernet/freescale/dpaa/dpaa_eth.c | 399 +-
- .../ethernet/freescale/dpaa/dpaa_ethtool.c | 2 +-
- drivers/net/ethernet/freescale/fman/Kconfig | 1 -
- drivers/net/ethernet/freescale/fman/Makefile | 12 +-
- .../net/ethernet/freescale/fman/fman_dtsec.c | 19 +
- .../net/ethernet/freescale/fman/fman_dtsec.h | 1 +
- .../net/ethernet/freescale/fman/fman_memac.c | 32 +-
- .../net/ethernet/freescale/fman/fman_memac.h | 1 +
- .../net/ethernet/freescale/fman/fman_port.c | 2 +
- .../net/ethernet/freescale/fman/fman_tgec.c | 33 +-
- .../net/ethernet/freescale/fman/fman_tgec.h | 1 +
- drivers/net/ethernet/freescale/fman/mac.c | 149 +-
- drivers/net/ethernet/freescale/fman/mac.h | 8 +-
- .../net/ethernet/freescale/sdk_dpaa/Kconfig | 195 +
- .../net/ethernet/freescale/sdk_dpaa/Makefile | 46 +
- .../ethernet/freescale/sdk_dpaa/dpaa_1588.c | 580 ++
- .../ethernet/freescale/sdk_dpaa/dpaa_1588.h | 138 +
- .../freescale/sdk_dpaa/dpaa_debugfs.c | 180 +
- .../freescale/sdk_dpaa/dpaa_debugfs.h | 43 +
- .../ethernet/freescale/sdk_dpaa/dpaa_eth.c | 1223 +++
- .../ethernet/freescale/sdk_dpaa/dpaa_eth.h | 691 ++
- .../freescale/sdk_dpaa/dpaa_eth_base.c | 205 +
- .../freescale/sdk_dpaa/dpaa_eth_base.h | 49 +
- .../freescale/sdk_dpaa/dpaa_eth_ceetm.c | 2099 +++++
- .../freescale/sdk_dpaa/dpaa_eth_ceetm.h | 241 +
- .../freescale/sdk_dpaa/dpaa_eth_common.c | 1776 ++++
- .../freescale/sdk_dpaa/dpaa_eth_common.h | 226 +
- .../freescale/sdk_dpaa/dpaa_eth_proxy.c | 381 +
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c | 1201 +++
- .../freescale/sdk_dpaa/dpaa_eth_sysfs.c | 278 +
- .../freescale/sdk_dpaa/dpaa_eth_trace.h | 144 +
- .../freescale/sdk_dpaa/dpaa_ethtool.c | 542 ++
- .../ethernet/freescale/sdk_dpaa/dpaa_ptp.c | 291 +
- .../net/ethernet/freescale/sdk_dpaa/mac-api.c | 931 ++
- drivers/net/ethernet/freescale/sdk_dpaa/mac.c | 489 ++
- drivers/net/ethernet/freescale/sdk_dpaa/mac.h | 135 +
- .../freescale/sdk_dpaa/offline_port.c | 848 ++
- .../freescale/sdk_dpaa/offline_port.h | 59 +
- .../net/ethernet/freescale/sdk_fman/Kconfig | 153 +
- .../net/ethernet/freescale/sdk_fman/Makefile | 11 +
- .../sdk_fman/Peripherals/FM/HC/Makefile | 15 +
- .../freescale/sdk_fman/Peripherals/FM/HC/hc.c | 1232 +++
- .../sdk_fman/Peripherals/FM/MAC/Makefile | 28 +
- .../sdk_fman/Peripherals/FM/MAC/dtsec.c | 1504 ++++
- .../sdk_fman/Peripherals/FM/MAC/dtsec.h | 228 +
- .../Peripherals/FM/MAC/dtsec_mii_acc.c | 97 +
- .../Peripherals/FM/MAC/dtsec_mii_acc.h | 42 +
- .../sdk_fman/Peripherals/FM/MAC/fm_mac.c | 674 ++
- .../sdk_fman/Peripherals/FM/MAC/fm_mac.h | 226 +
- .../sdk_fman/Peripherals/FM/MAC/fman_crc32.c | 119 +
- .../sdk_fman/Peripherals/FM/MAC/fman_crc32.h | 43 +
- .../sdk_fman/Peripherals/FM/MAC/fman_dtsec.c | 847 ++
- .../Peripherals/FM/MAC/fman_dtsec_mii_acc.c | 165 +
- .../sdk_fman/Peripherals/FM/MAC/fman_memac.c | 532 ++
- .../Peripherals/FM/MAC/fman_memac_mii_acc.c | 215 +
- .../sdk_fman/Peripherals/FM/MAC/fman_tgec.c | 367 +
- .../sdk_fman/Peripherals/FM/MAC/memac.c | 1153 +++
- .../sdk_fman/Peripherals/FM/MAC/memac.h | 110 +
- .../Peripherals/FM/MAC/memac_mii_acc.c | 78 +
- .../Peripherals/FM/MAC/memac_mii_acc.h | 73 +
- .../sdk_fman/Peripherals/FM/MAC/tgec.c | 1017 +++
- .../sdk_fman/Peripherals/FM/MAC/tgec.h | 151 +
- .../Peripherals/FM/MAC/tgec_mii_acc.c | 139 +
- .../Peripherals/FM/MAC/tgec_mii_acc.h | 80 +
- .../sdk_fman/Peripherals/FM/MACSEC/Makefile | 15 +
- .../Peripherals/FM/MACSEC/fm_macsec.c | 237 +
- .../Peripherals/FM/MACSEC/fm_macsec.h | 203 +
- .../Peripherals/FM/MACSEC/fm_macsec_guest.c | 59 +
- .../Peripherals/FM/MACSEC/fm_macsec_master.c | 1031 +++
- .../Peripherals/FM/MACSEC/fm_macsec_master.h | 479 ++
- .../Peripherals/FM/MACSEC/fm_macsec_secy.c | 883 ++
- .../Peripherals/FM/MACSEC/fm_macsec_secy.h | 144 +
- .../sdk_fman/Peripherals/FM/Makefile | 23 +
- .../sdk_fman/Peripherals/FM/Pcd/Makefile | 26 +
- .../sdk_fman/Peripherals/FM/Pcd/crc64.h | 360 +
- .../sdk_fman/Peripherals/FM/Pcd/fm_cc.c | 7582 +++++++++++++++++
- .../sdk_fman/Peripherals/FM/Pcd/fm_cc.h | 399 +
- .../sdk_fman/Peripherals/FM/Pcd/fm_kg.c | 3242 +++++++
- .../sdk_fman/Peripherals/FM/Pcd/fm_kg.h | 206 +
- .../sdk_fman/Peripherals/FM/Pcd/fm_manip.c | 5571 ++++++++++++
- .../sdk_fman/Peripherals/FM/Pcd/fm_manip.h | 555 ++
- .../sdk_fman/Peripherals/FM/Pcd/fm_pcd.c | 2095 +++++
- .../sdk_fman/Peripherals/FM/Pcd/fm_pcd.h | 543 ++
- .../sdk_fman/Peripherals/FM/Pcd/fm_pcd_ipc.h | 280 +
- .../sdk_fman/Peripherals/FM/Pcd/fm_plcr.c | 1847 ++++
- .../sdk_fman/Peripherals/FM/Pcd/fm_plcr.h | 165 +
- .../sdk_fman/Peripherals/FM/Pcd/fm_prs.c | 423 +
- .../sdk_fman/Peripherals/FM/Pcd/fm_prs.h | 316 +
- .../sdk_fman/Peripherals/FM/Pcd/fm_replic.c | 984 +++
- .../sdk_fman/Peripherals/FM/Pcd/fm_replic.h | 101 +
- .../sdk_fman/Peripherals/FM/Pcd/fman_kg.c | 890 ++
- .../sdk_fman/Peripherals/FM/Pcd/fman_prs.c | 129 +
- .../sdk_fman/Peripherals/FM/Port/Makefile | 15 +
- .../sdk_fman/Peripherals/FM/Port/fm_port.c | 6436 ++++++++++++++
- .../sdk_fman/Peripherals/FM/Port/fm_port.h | 999 +++
- .../Peripherals/FM/Port/fm_port_dsar.h | 494 ++
- .../sdk_fman/Peripherals/FM/Port/fm_port_im.c | 753 ++
- .../sdk_fman/Peripherals/FM/Port/fman_port.c | 1570 ++++
- .../sdk_fman/Peripherals/FM/Rtc/Makefile | 15 +
- .../sdk_fman/Peripherals/FM/Rtc/fm_rtc.c | 692 ++
- .../sdk_fman/Peripherals/FM/Rtc/fm_rtc.h | 96 +
- .../sdk_fman/Peripherals/FM/Rtc/fman_rtc.c | 334 +
- .../sdk_fman/Peripherals/FM/SP/Makefile | 15 +
- .../sdk_fman/Peripherals/FM/SP/fm_sp.c | 757 ++
- .../sdk_fman/Peripherals/FM/SP/fm_sp.h | 85 +
- .../sdk_fman/Peripherals/FM/SP/fman_sp.c | 197 +
- .../freescale/sdk_fman/Peripherals/FM/fm.c | 5216 ++++++++++++
- .../freescale/sdk_fman/Peripherals/FM/fm.h | 648 ++
- .../sdk_fman/Peripherals/FM/fm_ipc.h | 465 +
- .../sdk_fman/Peripherals/FM/fm_muram.c | 174 +
- .../freescale/sdk_fman/Peripherals/FM/fman.c | 1400 +++
- .../sdk_fman/Peripherals/FM/inc/fm_common.h | 1214 +++
- .../sdk_fman/Peripherals/FM/inc/fm_hc.h | 93 +
- .../Peripherals/FM/inc/fm_sp_common.h | 117 +
- .../ethernet/freescale/sdk_fman/etc/Makefile | 12 +
- .../ethernet/freescale/sdk_fman/etc/error.c | 95 +
- .../ethernet/freescale/sdk_fman/etc/list.c | 71 +
- .../ethernet/freescale/sdk_fman/etc/memcpy.c | 620 ++
- .../net/ethernet/freescale/sdk_fman/etc/mm.c | 1155 +++
- .../net/ethernet/freescale/sdk_fman/etc/mm.h | 105 +
- .../ethernet/freescale/sdk_fman/etc/sprint.c | 81 +
- .../freescale/sdk_fman/fmanv3h_dflags.h | 57 +
- .../freescale/sdk_fman/fmanv3l_dflags.h | 56 +
- .../inc/Peripherals/crc_mac_addr_ext.h | 364 +
- .../sdk_fman/inc/Peripherals/dpaa_ext.h | 210 +
- .../sdk_fman/inc/Peripherals/fm_ext.h | 1731 ++++
- .../sdk_fman/inc/Peripherals/fm_mac_ext.h | 887 ++
- .../sdk_fman/inc/Peripherals/fm_macsec_ext.h | 1271 +++
- .../sdk_fman/inc/Peripherals/fm_muram_ext.h | 170 +
- .../sdk_fman/inc/Peripherals/fm_pcd_ext.h | 3974 +++++++++
- .../sdk_fman/inc/Peripherals/fm_port_ext.h | 2608 ++++++
- .../sdk_fman/inc/Peripherals/fm_rtc_ext.h | 619 ++
- .../sdk_fman/inc/Peripherals/fm_vsp_ext.h | 411 +
- .../sdk_fman/inc/Peripherals/mii_acc_ext.h | 76 +
- .../freescale/sdk_fman/inc/core_ext.h | 90 +
- .../freescale/sdk_fman/inc/cores/arm_ext.h | 55 +
- .../freescale/sdk_fman/inc/cores/e500v2_ext.h | 476 ++
- .../freescale/sdk_fman/inc/cores/ppc_ext.h | 141 +
- .../freescale/sdk_fman/inc/ddr_std_ext.h | 77 +
- .../freescale/sdk_fman/inc/debug_ext.h | 233 +
- .../freescale/sdk_fman/inc/endian_ext.h | 447 +
- .../freescale/sdk_fman/inc/enet_ext.h | 205 +
- .../freescale/sdk_fman/inc/error_ext.h | 529 ++
- .../freescale/sdk_fman/inc/etc/list_ext.h | 358 +
- .../freescale/sdk_fman/inc/etc/mem_ext.h | 318 +
- .../freescale/sdk_fman/inc/etc/memcpy_ext.h | 208 +
- .../freescale/sdk_fman/inc/etc/mm_ext.h | 310 +
- .../freescale/sdk_fman/inc/etc/sprint_ext.h | 118 +
- .../inc/flib/common/arch/ppc_access.h | 37 +
- .../sdk_fman/inc/flib/common/general.h | 52 +
- .../freescale/sdk_fman/inc/flib/fman_common.h | 78 +
- .../freescale/sdk_fman/inc/flib/fsl_enet.h | 273 +
- .../freescale/sdk_fman/inc/flib/fsl_fman.h | 825 ++
- .../sdk_fman/inc/flib/fsl_fman_dtsec.h | 1096 +++
- .../inc/flib/fsl_fman_dtsec_mii_acc.h | 107 +
- .../freescale/sdk_fman/inc/flib/fsl_fman_kg.h | 514 ++
- .../sdk_fman/inc/flib/fsl_fman_memac.h | 434 +
- .../inc/flib/fsl_fman_memac_mii_acc.h | 78 +
- .../sdk_fman/inc/flib/fsl_fman_port.h | 593 ++
- .../sdk_fman/inc/flib/fsl_fman_prs.h | 102 +
- .../sdk_fman/inc/flib/fsl_fman_rtc.h | 449 +
- .../freescale/sdk_fman/inc/flib/fsl_fman_sp.h | 138 +
- .../sdk_fman/inc/flib/fsl_fman_tgec.h | 479 ++
- .../FMANV3H/dpaa_integration_ext.h | 291 +
- .../inc/integrations/FMANV3H/part_ext.h | 71 +
- .../FMANV3H/part_integration_ext.h | 304 +
- .../FMANV3L/dpaa_integration_ext.h | 293 +
- .../inc/integrations/FMANV3L/part_ext.h | 59 +
- .../FMANV3L/part_integration_ext.h | 304 +
- .../LS1043/dpaa_integration_ext.h | 291 +
- .../inc/integrations/LS1043/part_ext.h | 64 +
- .../LS1043/part_integration_ext.h | 185 +
- .../integrations/P1023/dpaa_integration_ext.h | 213 +
- .../inc/integrations/P1023/part_ext.h | 82 +
- .../integrations/P1023/part_integration_ext.h | 635 ++
- .../P3040_P4080_P5020/dpaa_integration_ext.h | 276 +
- .../integrations/P3040_P4080_P5020/part_ext.h | 83 +
- .../P3040_P4080_P5020/part_integration_ext.h | 336 +
- .../freescale/sdk_fman/inc/math_ext.h | 100 +
- .../freescale/sdk_fman/inc/ncsw_ext.h | 435 +
- .../ethernet/freescale/sdk_fman/inc/net_ext.h | 430 +
- .../ethernet/freescale/sdk_fman/inc/std_ext.h | 48 +
- .../freescale/sdk_fman/inc/stdarg_ext.h | 49 +
- .../freescale/sdk_fman/inc/stdlib_ext.h | 162 +
- .../freescale/sdk_fman/inc/string_ext.h | 56 +
- .../freescale/sdk_fman/inc/types_ext.h | 62 +
- .../freescale/sdk_fman/inc/xx_common.h | 56 +
- .../ethernet/freescale/sdk_fman/inc/xx_ext.h | 791 ++
- .../freescale/sdk_fman/ls1043_dflags.h | 56 +
- .../freescale/sdk_fman/ncsw_config.mk | 53 +
- .../freescale/sdk_fman/p1023_dflags.h | 65 +
- .../sdk_fman/p3040_4080_5020_dflags.h | 62 +
- .../ethernet/freescale/sdk_fman/src/Makefile | 11 +
- .../sdk_fman/src/inc/system/sys_ext.h | 118 +
- .../sdk_fman/src/inc/system/sys_io_ext.h | 46 +
- .../freescale/sdk_fman/src/inc/types_linux.h | 208 +
- .../sdk_fman/src/inc/wrapper/fsl_fman_test.h | 84 +
- .../sdk_fman/src/inc/wrapper/lnxwrp_exp_sym.h | 130 +
- .../sdk_fman/src/inc/wrapper/lnxwrp_fm_ext.h | 163 +
- .../src/inc/wrapper/lnxwrp_fsl_fman.h | 921 ++
- .../freescale/sdk_fman/src/inc/xx/xx.h | 50 +
- .../freescale/sdk_fman/src/system/Makefile | 10 +
- .../freescale/sdk_fman/src/system/sys_io.c | 171 +
- .../freescale/sdk_fman/src/wrapper/Makefile | 19 +
- .../sdk_fman/src/wrapper/fman_test.c | 1665 ++++
- .../sdk_fman/src/wrapper/lnxwrp_fm.c | 2908 +++++++
- .../sdk_fman/src/wrapper/lnxwrp_fm.h | 294 +
- .../sdk_fman/src/wrapper/lnxwrp_fm_port.c | 1512 ++++
- .../sdk_fman/src/wrapper/lnxwrp_ioctls_fm.c | 4854 +++++++++++
- .../src/wrapper/lnxwrp_ioctls_fm_compat.c | 1297 +++
- .../src/wrapper/lnxwrp_ioctls_fm_compat.h | 755 ++
- .../sdk_fman/src/wrapper/lnxwrp_resources.h | 121 +
- .../src/wrapper/lnxwrp_resources_ut.c | 191 +
- .../src/wrapper/lnxwrp_resources_ut.h | 144 +
- .../src/wrapper/lnxwrp_resources_ut.make | 28 +
- .../sdk_fman/src/wrapper/lnxwrp_sysfs.c | 60 +
- .../sdk_fman/src/wrapper/lnxwrp_sysfs.h | 60 +
- .../sdk_fman/src/wrapper/lnxwrp_sysfs_fm.c | 1855 ++++
- .../sdk_fman/src/wrapper/lnxwrp_sysfs_fm.h | 136 +
- .../src/wrapper/lnxwrp_sysfs_fm_port.c | 1268 +++
- .../src/wrapper/lnxwrp_sysfs_fm_port.h | 56 +
- .../freescale/sdk_fman/src/xx/Makefile | 18 +
- .../sdk_fman/src/xx/module_strings.c | 46 +
- .../freescale/sdk_fman/src/xx/xx_arm_linux.c | 905 ++
- .../freescale/sdk_fman/src/xx/xx_linux.c | 918 ++
- drivers/staging/fsl_qbman/Kconfig | 228 +
- drivers/staging/fsl_qbman/Makefile | 28 +
- drivers/staging/fsl_qbman/bman_config.c | 720 ++
- drivers/staging/fsl_qbman/bman_debugfs.c | 119 +
- drivers/staging/fsl_qbman/bman_driver.c | 559 ++
- drivers/staging/fsl_qbman/bman_high.c | 1145 +++
- drivers/staging/fsl_qbman/bman_low.h | 565 ++
- drivers/staging/fsl_qbman/bman_private.h | 166 +
- drivers/staging/fsl_qbman/bman_test.c | 56 +
- drivers/staging/fsl_qbman/bman_test.h | 44 +
- drivers/staging/fsl_qbman/bman_test_high.c | 183 +
- drivers/staging/fsl_qbman/bman_test_thresh.c | 196 +
- drivers/staging/fsl_qbman/dpa_alloc.c | 706 ++
- drivers/staging/fsl_qbman/dpa_sys.h | 259 +
- drivers/staging/fsl_qbman/dpa_sys_arm.h | 95 +
- drivers/staging/fsl_qbman/dpa_sys_arm64.h | 102 +
- drivers/staging/fsl_qbman/dpa_sys_ppc32.h | 70 +
- drivers/staging/fsl_qbman/dpa_sys_ppc64.h | 79 +
- drivers/staging/fsl_qbman/fsl_usdpaa.c | 2008 +++++
- drivers/staging/fsl_qbman/fsl_usdpaa_irq.c | 289 +
- drivers/staging/fsl_qbman/qbman_driver.c | 88 +
- drivers/staging/fsl_qbman/qman_config.c | 1224 +++
- drivers/staging/fsl_qbman/qman_debugfs.c | 1594 ++++
- drivers/staging/fsl_qbman/qman_driver.c | 961 +++
- drivers/staging/fsl_qbman/qman_high.c | 5652 ++++++++++++
- drivers/staging/fsl_qbman/qman_low.h | 1445 ++++
- drivers/staging/fsl_qbman/qman_private.h | 398 +
- drivers/staging/fsl_qbman/qman_test.c | 57 +
- drivers/staging/fsl_qbman/qman_test.h | 45 +
- drivers/staging/fsl_qbman/qman_test_high.c | 216 +
- .../staging/fsl_qbman/qman_test_hotpotato.c | 502 ++
- drivers/staging/fsl_qbman/qman_utility.c | 129 +
- include/linux/fsl/svr.h | 97 +
- include/linux/fsl_bman.h | 532 ++
- include/linux/fsl_qman.h | 3900 +++++++++
- include/linux/fsl_usdpaa.h | 372 +
- include/linux/netdev_features.h | 2 +
- include/uapi/linux/fmd/Kbuild | 5 +
- include/uapi/linux/fmd/Peripherals/Kbuild | 4 +
- .../uapi/linux/fmd/Peripherals/fm_ioctls.h | 628 ++
- .../linux/fmd/Peripherals/fm_pcd_ioctls.h | 3084 +++++++
- .../linux/fmd/Peripherals/fm_port_ioctls.h | 973 +++
- .../linux/fmd/Peripherals/fm_test_ioctls.h | 208 +
- include/uapi/linux/fmd/integrations/Kbuild | 1 +
- .../fmd/integrations/integration_ioctls.h | 56 +
- include/uapi/linux/fmd/ioctls.h | 96 +
- include/uapi/linux/fmd/net_ioctls.h | 430 +
- net/sched/sch_generic.c | 7 +
- 273 files changed, 153944 insertions(+), 229 deletions(-)
+ drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 616 +-
+ drivers/net/ethernet/freescale/dpaa/dpaa_eth.h | 3 +
+ drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c | 41 +-
+ drivers/net/ethernet/freescale/fman/Kconfig | 1 -
+ drivers/net/ethernet/freescale/fman/Makefile | 12 +-
+ drivers/net/ethernet/freescale/fman/fman.c | 38 +-
+ drivers/net/ethernet/freescale/fman/fman.h | 5 +
+ drivers/net/ethernet/freescale/fman/fman_dtsec.c | 46 +
+ drivers/net/ethernet/freescale/fman/fman_dtsec.h | 2 +
+ drivers/net/ethernet/freescale/fman/fman_memac.c | 37 +-
+ drivers/net/ethernet/freescale/fman/fman_memac.h | 2 +
+ drivers/net/ethernet/freescale/fman/fman_port.c | 28 +
+ drivers/net/ethernet/freescale/fman/fman_port.h | 4 +
+ drivers/net/ethernet/freescale/fman/fman_tgec.c | 54 +-
+ drivers/net/ethernet/freescale/fman/fman_tgec.h | 2 +
+ drivers/net/ethernet/freescale/fman/mac.c | 152 +-
+ drivers/net/ethernet/freescale/fman/mac.h | 9 +-
+ drivers/net/ethernet/freescale/sdk_dpaa/Kconfig | 184 +
+ drivers/net/ethernet/freescale/sdk_dpaa/Makefile | 45 +
+ .../net/ethernet/freescale/sdk_dpaa/dpaa_1588.c | 580 ++
+ .../net/ethernet/freescale/sdk_dpaa/dpaa_1588.h | 138 +
+ .../net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c | 180 +
+ .../net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h | 43 +
+ drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c | 1223 ++++
+ drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h | 674 ++
+ .../ethernet/freescale/sdk_dpaa/dpaa_eth_base.c | 205 +
+ .../ethernet/freescale/sdk_dpaa/dpaa_eth_base.h | 49 +
+ .../ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c | 2076 ++++++
+ .../ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h | 241 +
+ .../ethernet/freescale/sdk_dpaa/dpaa_eth_common.c | 1745 +++++
+ .../ethernet/freescale/sdk_dpaa/dpaa_eth_common.h | 226 +
+ .../ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c | 381 +
+ .../net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c | 1195 +++
+ .../ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c | 278 +
+ .../ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h | 144 +
+ .../net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c | 587 ++
+ drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c | 931 +++
+ drivers/net/ethernet/freescale/sdk_dpaa/mac.c | 490 ++
+ drivers/net/ethernet/freescale/sdk_dpaa/mac.h | 134 +
+ .../net/ethernet/freescale/sdk_dpaa/offline_port.c | 848 +++
+ .../net/ethernet/freescale/sdk_dpaa/offline_port.h | 59 +
+ drivers/net/ethernet/freescale/sdk_fman/Kconfig | 153 +
+ drivers/net/ethernet/freescale/sdk_fman/Makefile | 11 +
+ .../freescale/sdk_fman/Peripherals/FM/HC/Makefile | 15 +
+ .../freescale/sdk_fman/Peripherals/FM/HC/hc.c | 1232 ++++
+ .../freescale/sdk_fman/Peripherals/FM/MAC/Makefile | 28 +
+ .../freescale/sdk_fman/Peripherals/FM/MAC/dtsec.c | 1504 ++++
+ .../freescale/sdk_fman/Peripherals/FM/MAC/dtsec.h | 228 +
+ .../sdk_fman/Peripherals/FM/MAC/dtsec_mii_acc.c | 97 +
+ .../sdk_fman/Peripherals/FM/MAC/dtsec_mii_acc.h | 42 +
+ .../freescale/sdk_fman/Peripherals/FM/MAC/fm_mac.c | 674 ++
+ .../freescale/sdk_fman/Peripherals/FM/MAC/fm_mac.h | 226 +
+ .../sdk_fman/Peripherals/FM/MAC/fman_crc32.c | 119 +
+ .../sdk_fman/Peripherals/FM/MAC/fman_crc32.h | 43 +
+ .../sdk_fman/Peripherals/FM/MAC/fman_dtsec.c | 847 +++
+ .../Peripherals/FM/MAC/fman_dtsec_mii_acc.c | 165 +
+ .../sdk_fman/Peripherals/FM/MAC/fman_memac.c | 532 ++
+ .../Peripherals/FM/MAC/fman_memac_mii_acc.c | 215 +
+ .../sdk_fman/Peripherals/FM/MAC/fman_tgec.c | 367 +
+ .../freescale/sdk_fman/Peripherals/FM/MAC/memac.c | 1166 +++
+ .../freescale/sdk_fman/Peripherals/FM/MAC/memac.h | 110 +
+ .../sdk_fman/Peripherals/FM/MAC/memac_mii_acc.c | 78 +
+ .../sdk_fman/Peripherals/FM/MAC/memac_mii_acc.h | 73 +
+ .../freescale/sdk_fman/Peripherals/FM/MAC/tgec.c | 1017 +++
+ .../freescale/sdk_fman/Peripherals/FM/MAC/tgec.h | 151 +
+ .../sdk_fman/Peripherals/FM/MAC/tgec_mii_acc.c | 139 +
+ .../sdk_fman/Peripherals/FM/MAC/tgec_mii_acc.h | 80 +
+ .../sdk_fman/Peripherals/FM/MACSEC/Makefile | 15 +
+ .../sdk_fman/Peripherals/FM/MACSEC/fm_macsec.c | 237 +
+ .../sdk_fman/Peripherals/FM/MACSEC/fm_macsec.h | 203 +
+ .../Peripherals/FM/MACSEC/fm_macsec_guest.c | 59 +
+ .../Peripherals/FM/MACSEC/fm_macsec_master.c | 1031 +++
+ .../Peripherals/FM/MACSEC/fm_macsec_master.h | 479 ++
+ .../Peripherals/FM/MACSEC/fm_macsec_secy.c | 883 +++
+ .../Peripherals/FM/MACSEC/fm_macsec_secy.h | 144 +
+ .../freescale/sdk_fman/Peripherals/FM/Makefile | 23 +
+ .../freescale/sdk_fman/Peripherals/FM/Pcd/Makefile | 26 +
+ .../freescale/sdk_fman/Peripherals/FM/Pcd/crc64.h | 360 +
+ .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_cc.c | 7582 ++++++++++++++++++++
+ .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_cc.h | 399 +
+ .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_kg.c | 3242 +++++++++
+ .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_kg.h | 206 +
+ .../sdk_fman/Peripherals/FM/Pcd/fm_manip.c | 5571 ++++++++++++++
+ .../sdk_fman/Peripherals/FM/Pcd/fm_manip.h | 555 ++
+ .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_pcd.c | 2095 ++++++
+ .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_pcd.h | 543 ++
+ .../sdk_fman/Peripherals/FM/Pcd/fm_pcd_ipc.h | 280 +
+ .../sdk_fman/Peripherals/FM/Pcd/fm_plcr.c | 1847 +++++
+ .../sdk_fman/Peripherals/FM/Pcd/fm_plcr.h | 165 +
+ .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_prs.c | 423 ++
+ .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_prs.h | 316 +
+ .../sdk_fman/Peripherals/FM/Pcd/fm_replic.c | 984 +++
+ .../sdk_fman/Peripherals/FM/Pcd/fm_replic.h | 101 +
+ .../sdk_fman/Peripherals/FM/Pcd/fman_kg.c | 890 +++
+ .../sdk_fman/Peripherals/FM/Pcd/fman_prs.c | 129 +
+ .../sdk_fman/Peripherals/FM/Port/Makefile | 15 +
+ .../sdk_fman/Peripherals/FM/Port/fm_port.c | 6437 +++++++++++++++++
+ .../sdk_fman/Peripherals/FM/Port/fm_port.h | 999 +++
+ .../sdk_fman/Peripherals/FM/Port/fm_port_dsar.h | 494 ++
+ .../sdk_fman/Peripherals/FM/Port/fm_port_im.c | 753 ++
+ .../sdk_fman/Peripherals/FM/Port/fman_port.c | 1570 ++++
+ .../freescale/sdk_fman/Peripherals/FM/Rtc/Makefile | 15 +
+ .../freescale/sdk_fman/Peripherals/FM/Rtc/fm_rtc.c | 692 ++
+ .../freescale/sdk_fman/Peripherals/FM/Rtc/fm_rtc.h | 96 +
+ .../sdk_fman/Peripherals/FM/Rtc/fman_rtc.c | 334 +
+ .../freescale/sdk_fman/Peripherals/FM/SP/Makefile | 15 +
+ .../freescale/sdk_fman/Peripherals/FM/SP/fm_sp.c | 757 ++
+ .../freescale/sdk_fman/Peripherals/FM/SP/fm_sp.h | 85 +
+ .../freescale/sdk_fman/Peripherals/FM/SP/fman_sp.c | 197 +
+ .../freescale/sdk_fman/Peripherals/FM/fm.c | 5216 ++++++++++++++
+ .../freescale/sdk_fman/Peripherals/FM/fm.h | 648 ++
+ .../freescale/sdk_fman/Peripherals/FM/fm_ipc.h | 465 ++
+ .../freescale/sdk_fman/Peripherals/FM/fm_muram.c | 174 +
+ .../freescale/sdk_fman/Peripherals/FM/fman.c | 1400 ++++
+ .../sdk_fman/Peripherals/FM/inc/fm_common.h | 1214 ++++
+ .../freescale/sdk_fman/Peripherals/FM/inc/fm_hc.h | 93 +
+ .../sdk_fman/Peripherals/FM/inc/fm_sp_common.h | 117 +
+ .../net/ethernet/freescale/sdk_fman/etc/Makefile | 12 +
+ .../net/ethernet/freescale/sdk_fman/etc/error.c | 95 +
+ drivers/net/ethernet/freescale/sdk_fman/etc/list.c | 71 +
+ .../net/ethernet/freescale/sdk_fman/etc/memcpy.c | 620 ++
+ drivers/net/ethernet/freescale/sdk_fman/etc/mm.c | 1155 +++
+ drivers/net/ethernet/freescale/sdk_fman/etc/mm.h | 105 +
+ .../net/ethernet/freescale/sdk_fman/etc/sprint.c | 81 +
+ .../ethernet/freescale/sdk_fman/fmanv3h_dflags.h | 57 +
+ .../ethernet/freescale/sdk_fman/fmanv3l_dflags.h | 56 +
+ .../sdk_fman/inc/Peripherals/crc_mac_addr_ext.h | 364 +
+ .../freescale/sdk_fman/inc/Peripherals/dpaa_ext.h | 210 +
+ .../freescale/sdk_fman/inc/Peripherals/fm_ext.h | 1731 +++++
+ .../sdk_fman/inc/Peripherals/fm_mac_ext.h | 887 +++
+ .../sdk_fman/inc/Peripherals/fm_macsec_ext.h | 1271 ++++
+ .../sdk_fman/inc/Peripherals/fm_muram_ext.h | 170 +
+ .../sdk_fman/inc/Peripherals/fm_pcd_ext.h | 3974 ++++++++++
+ .../sdk_fman/inc/Peripherals/fm_port_ext.h | 2608 +++++++
+ .../sdk_fman/inc/Peripherals/fm_rtc_ext.h | 619 ++
+ .../sdk_fman/inc/Peripherals/fm_vsp_ext.h | 411 ++
+ .../sdk_fman/inc/Peripherals/mii_acc_ext.h | 76 +
+ .../net/ethernet/freescale/sdk_fman/inc/core_ext.h | 90 +
+ .../freescale/sdk_fman/inc/cores/arm_ext.h | 55 +
+ .../freescale/sdk_fman/inc/cores/e500v2_ext.h | 476 ++
+ .../freescale/sdk_fman/inc/cores/ppc_ext.h | 141 +
+ .../ethernet/freescale/sdk_fman/inc/ddr_std_ext.h | 77 +
+ .../ethernet/freescale/sdk_fman/inc/debug_ext.h | 233 +
+ .../ethernet/freescale/sdk_fman/inc/endian_ext.h | 447 ++
+ .../net/ethernet/freescale/sdk_fman/inc/enet_ext.h | 205 +
+ .../ethernet/freescale/sdk_fman/inc/error_ext.h | 529 ++
+ .../ethernet/freescale/sdk_fman/inc/etc/list_ext.h | 358 +
+ .../ethernet/freescale/sdk_fman/inc/etc/mem_ext.h | 318 +
+ .../freescale/sdk_fman/inc/etc/memcpy_ext.h | 208 +
+ .../ethernet/freescale/sdk_fman/inc/etc/mm_ext.h | 310 +
+ .../freescale/sdk_fman/inc/etc/sprint_ext.h | 118 +
+ .../sdk_fman/inc/flib/common/arch/ppc_access.h | 37 +
+ .../freescale/sdk_fman/inc/flib/common/general.h | 52 +
+ .../freescale/sdk_fman/inc/flib/fman_common.h | 78 +
+ .../freescale/sdk_fman/inc/flib/fsl_enet.h | 273 +
+ .../freescale/sdk_fman/inc/flib/fsl_fman.h | 825 +++
+ .../freescale/sdk_fman/inc/flib/fsl_fman_dtsec.h | 1096 +++
+ .../sdk_fman/inc/flib/fsl_fman_dtsec_mii_acc.h | 107 +
+ .../freescale/sdk_fman/inc/flib/fsl_fman_kg.h | 514 ++
+ .../freescale/sdk_fman/inc/flib/fsl_fman_memac.h | 434 ++
+ .../sdk_fman/inc/flib/fsl_fman_memac_mii_acc.h | 78 +
+ .../freescale/sdk_fman/inc/flib/fsl_fman_port.h | 593 ++
+ .../freescale/sdk_fman/inc/flib/fsl_fman_prs.h | 102 +
+ .../freescale/sdk_fman/inc/flib/fsl_fman_rtc.h | 449 ++
+ .../freescale/sdk_fman/inc/flib/fsl_fman_sp.h | 138 +
+ .../freescale/sdk_fman/inc/flib/fsl_fman_tgec.h | 479 ++
+ .../integrations/FMANV3H/dpaa_integration_ext.h | 291 +
+ .../sdk_fman/inc/integrations/FMANV3H/part_ext.h | 71 +
+ .../integrations/FMANV3H/part_integration_ext.h | 304 +
+ .../integrations/FMANV3L/dpaa_integration_ext.h | 293 +
+ .../sdk_fman/inc/integrations/FMANV3L/part_ext.h | 59 +
+ .../integrations/FMANV3L/part_integration_ext.h | 304 +
+ .../inc/integrations/LS1043/dpaa_integration_ext.h | 291 +
+ .../sdk_fman/inc/integrations/LS1043/part_ext.h | 64 +
+ .../inc/integrations/LS1043/part_integration_ext.h | 185 +
+ .../inc/integrations/P1023/dpaa_integration_ext.h | 213 +
+ .../sdk_fman/inc/integrations/P1023/part_ext.h | 82 +
+ .../inc/integrations/P1023/part_integration_ext.h | 635 ++
+ .../P3040_P4080_P5020/dpaa_integration_ext.h | 276 +
+ .../inc/integrations/P3040_P4080_P5020/part_ext.h | 83 +
+ .../P3040_P4080_P5020/part_integration_ext.h | 336 +
+ .../net/ethernet/freescale/sdk_fman/inc/math_ext.h | 100 +
+ .../net/ethernet/freescale/sdk_fman/inc/ncsw_ext.h | 435 ++
+ .../net/ethernet/freescale/sdk_fman/inc/net_ext.h | 430 ++
+ .../net/ethernet/freescale/sdk_fman/inc/std_ext.h | 48 +
+ .../ethernet/freescale/sdk_fman/inc/stdarg_ext.h | 49 +
+ .../ethernet/freescale/sdk_fman/inc/stdlib_ext.h | 162 +
+ .../ethernet/freescale/sdk_fman/inc/string_ext.h | 56 +
+ .../ethernet/freescale/sdk_fman/inc/types_ext.h | 62 +
+ .../ethernet/freescale/sdk_fman/inc/xx_common.h | 56 +
+ .../net/ethernet/freescale/sdk_fman/inc/xx_ext.h | 791 ++
+ .../ethernet/freescale/sdk_fman/ls1043_dflags.h | 56 +
+ .../net/ethernet/freescale/sdk_fman/ncsw_config.mk | 53 +
+ .../net/ethernet/freescale/sdk_fman/p1023_dflags.h | 65 +
+ .../freescale/sdk_fman/p3040_4080_5020_dflags.h | 62 +
+ .../net/ethernet/freescale/sdk_fman/src/Makefile | 11 +
+ .../freescale/sdk_fman/src/inc/system/sys_ext.h | 118 +
+ .../freescale/sdk_fman/src/inc/system/sys_io_ext.h | 46 +
+ .../freescale/sdk_fman/src/inc/types_linux.h | 208 +
+ .../sdk_fman/src/inc/wrapper/fsl_fman_test.h | 84 +
+ .../sdk_fman/src/inc/wrapper/lnxwrp_exp_sym.h | 130 +
+ .../sdk_fman/src/inc/wrapper/lnxwrp_fm_ext.h | 163 +
+ .../sdk_fman/src/inc/wrapper/lnxwrp_fsl_fman.h | 921 +++
+ .../ethernet/freescale/sdk_fman/src/inc/xx/xx.h | 50 +
+ .../freescale/sdk_fman/src/system/Makefile | 10 +
+ .../freescale/sdk_fman/src/system/sys_io.c | 171 +
+ .../freescale/sdk_fman/src/wrapper/Makefile | 19 +
+ .../freescale/sdk_fman/src/wrapper/fman_test.c | 1665 +++++
+ .../freescale/sdk_fman/src/wrapper/lnxwrp_fm.c | 2910 ++++++++
+ .../freescale/sdk_fman/src/wrapper/lnxwrp_fm.h | 294 +
+ .../sdk_fman/src/wrapper/lnxwrp_fm_port.c | 1512 ++++
+ .../sdk_fman/src/wrapper/lnxwrp_ioctls_fm.c | 4854 +++++++++++++
+ .../sdk_fman/src/wrapper/lnxwrp_ioctls_fm_compat.c | 1297 ++++
+ .../sdk_fman/src/wrapper/lnxwrp_ioctls_fm_compat.h | 755 ++
+ .../sdk_fman/src/wrapper/lnxwrp_resources.h | 121 +
+ .../sdk_fman/src/wrapper/lnxwrp_resources_ut.c | 191 +
+ .../sdk_fman/src/wrapper/lnxwrp_resources_ut.h | 144 +
+ .../sdk_fman/src/wrapper/lnxwrp_resources_ut.make | 28 +
+ .../freescale/sdk_fman/src/wrapper/lnxwrp_sysfs.c | 60 +
+ .../freescale/sdk_fman/src/wrapper/lnxwrp_sysfs.h | 60 +
+ .../sdk_fman/src/wrapper/lnxwrp_sysfs_fm.c | 1855 +++++
+ .../sdk_fman/src/wrapper/lnxwrp_sysfs_fm.h | 136 +
+ .../sdk_fman/src/wrapper/lnxwrp_sysfs_fm_port.c | 1268 ++++
+ .../sdk_fman/src/wrapper/lnxwrp_sysfs_fm_port.h | 56 +
+ .../ethernet/freescale/sdk_fman/src/xx/Makefile | 18 +
+ .../freescale/sdk_fman/src/xx/module_strings.c | 46 +
+ .../freescale/sdk_fman/src/xx/xx_arm_linux.c | 905 +++
+ .../ethernet/freescale/sdk_fman/src/xx/xx_linux.c | 918 +++
+ drivers/staging/fsl_qbman/Kconfig | 228 +
+ drivers/staging/fsl_qbman/Makefile | 28 +
+ drivers/staging/fsl_qbman/bman_config.c | 720 ++
+ drivers/staging/fsl_qbman/bman_debugfs.c | 119 +
+ drivers/staging/fsl_qbman/bman_driver.c | 559 ++
+ drivers/staging/fsl_qbman/bman_high.c | 1145 +++
+ drivers/staging/fsl_qbman/bman_low.h | 565 ++
+ drivers/staging/fsl_qbman/bman_private.h | 166 +
+ drivers/staging/fsl_qbman/bman_test.c | 56 +
+ drivers/staging/fsl_qbman/bman_test.h | 44 +
+ drivers/staging/fsl_qbman/bman_test_high.c | 183 +
+ drivers/staging/fsl_qbman/bman_test_thresh.c | 196 +
+ drivers/staging/fsl_qbman/dpa_alloc.c | 706 ++
+ drivers/staging/fsl_qbman/dpa_sys.h | 259 +
+ drivers/staging/fsl_qbman/dpa_sys_arm.h | 95 +
+ drivers/staging/fsl_qbman/dpa_sys_arm64.h | 102 +
+ drivers/staging/fsl_qbman/dpa_sys_ppc32.h | 70 +
+ drivers/staging/fsl_qbman/dpa_sys_ppc64.h | 79 +
+ drivers/staging/fsl_qbman/fsl_usdpaa.c | 2008 ++++++
+ drivers/staging/fsl_qbman/fsl_usdpaa_irq.c | 289 +
+ drivers/staging/fsl_qbman/qbman_driver.c | 88 +
+ drivers/staging/fsl_qbman/qman_config.c | 1224 ++++
+ drivers/staging/fsl_qbman/qman_debugfs.c | 1594 ++++
+ drivers/staging/fsl_qbman/qman_driver.c | 961 +++
+ drivers/staging/fsl_qbman/qman_high.c | 5655 +++++++++++++++
+ drivers/staging/fsl_qbman/qman_low.h | 1445 ++++
+ drivers/staging/fsl_qbman/qman_private.h | 398 +
+ drivers/staging/fsl_qbman/qman_test.c | 57 +
+ drivers/staging/fsl_qbman/qman_test.h | 45 +
+ drivers/staging/fsl_qbman/qman_test_high.c | 216 +
+ drivers/staging/fsl_qbman/qman_test_hotpotato.c | 502 ++
+ drivers/staging/fsl_qbman/qman_utility.c | 129 +
+ include/linux/fsl/svr.h | 97 +
+ include/linux/fsl_bman.h | 532 ++
+ include/linux/fsl_qman.h | 3910 ++++++++++
+ include/linux/fsl_usdpaa.h | 372 +
+ include/linux/netdev_features.h | 2 +
+ include/uapi/linux/fmd/Kbuild | 5 +
+ include/uapi/linux/fmd/Peripherals/Kbuild | 4 +
+ include/uapi/linux/fmd/Peripherals/fm_ioctls.h | 628 ++
+ include/uapi/linux/fmd/Peripherals/fm_pcd_ioctls.h | 3084 ++++++++
+ .../uapi/linux/fmd/Peripherals/fm_port_ioctls.h | 973 +++
+ .../uapi/linux/fmd/Peripherals/fm_test_ioctls.h | 208 +
+ include/uapi/linux/fmd/integrations/Kbuild | 1 +
+ .../linux/fmd/integrations/integration_ioctls.h | 56 +
+ include/uapi/linux/fmd/ioctls.h | 96 +
+ include/uapi/linux/fmd/net_ioctls.h | 430 ++
+ net/sched/sch_generic.c | 7 +
+ 276 files changed, 153982 insertions(+), 277 deletions(-)
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/Kconfig
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/Makefile
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.c
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c
- create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac.c
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/mac.h
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
-@@ -53,6 +53,9 @@
+@@ -50,9 +50,13 @@
+ #include <linux/highmem.h>
+ #include <linux/percpu.h>
+ #include <linux/dma-mapping.h>
++#include <linux/iommu.h>
#include <linux/sort.h>
#include <soc/fsl/bman.h>
#include <soc/fsl/qman.h>
#include "fman.h"
#include "fman_port.h"
-@@ -73,6 +76,10 @@ static u16 tx_timeout = 1000;
+@@ -73,6 +77,10 @@ static u16 tx_timeout = 1000;
module_param(tx_timeout, ushort, 0444);
MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
#define FM_FD_STAT_RX_ERRORS \
(FM_FD_ERR_DMA | FM_FD_ERR_PHYSICAL | \
FM_FD_ERR_SIZE | FM_FD_ERR_CLS_DISCARD | \
-@@ -388,34 +395,19 @@ out:
+@@ -388,34 +396,19 @@ out:
static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev)
{
return ERR_PTR(-EINVAL);
}
-@@ -472,6 +464,16 @@ static void dpaa_set_rx_mode(struct net_
+@@ -472,6 +465,16 @@ static void dpaa_set_rx_mode(struct net_
err);
}
err = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
if (err < 0)
netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n",
-@@ -1500,7 +1502,19 @@ static int dpaa_bp_add_8_bufs(const stru
+@@ -1176,7 +1179,7 @@ static int dpaa_eth_init_tx_port(struct
+ buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
+ buf_prefix_content.pass_prs_result = true;
+ buf_prefix_content.pass_hash_result = true;
+- buf_prefix_content.pass_time_stamp = false;
++ buf_prefix_content.pass_time_stamp = true;
+ buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
+
+ params.specific_params.non_rx_params.err_fqid = errq->fqid;
+@@ -1218,7 +1221,7 @@ static int dpaa_eth_init_rx_port(struct
+ buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
+ buf_prefix_content.pass_prs_result = true;
+ buf_prefix_content.pass_hash_result = true;
+- buf_prefix_content.pass_time_stamp = false;
++ buf_prefix_content.pass_time_stamp = true;
+ buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
+
+ rx_p = ¶ms.specific_params.rx_params;
+@@ -1500,7 +1503,19 @@ static int dpaa_bp_add_8_bufs(const stru
u8 i;
for (i = 0; i < 8; i++) {
if (unlikely(!new_buf)) {
dev_err(dev, "netdev_alloc_frag() failed, size %zu\n",
dpaa_bp->raw_size);
-@@ -1645,9 +1659,13 @@ static struct sk_buff *dpaa_cleanup_tx_f
+@@ -1600,6 +1615,17 @@ static int dpaa_eth_refill_bpools(struct
+ return 0;
+ }
+
++static phys_addr_t dpaa_iova_to_phys(struct device *dev, dma_addr_t addr)
++{
++ struct iommu_domain *domain;
++
++ domain = iommu_get_domain_for_dev(dev);
++ if (domain)
++ return iommu_iova_to_phys(domain, addr);
++ else
++ return addr;
++}
++
+ /* Cleanup function for outgoing frame descriptors that were built on Tx path,
+ * either contiguous frames or scatter/gather ones.
+ * Skb freeing is not handled here.
+@@ -1615,24 +1641,41 @@ static struct sk_buff *dpaa_cleanup_tx_f
+ {
+ const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
+ struct device *dev = priv->net_dev->dev.parent;
++ struct skb_shared_hwtstamps shhwtstamps;
+ dma_addr_t addr = qm_fd_addr(fd);
+ const struct qm_sg_entry *sgt;
+ struct sk_buff **skbh, *skb;
+ int nr_frags, i;
++ u64 ns;
+
+- skbh = (struct sk_buff **)phys_to_virt(addr);
++ skbh = (struct sk_buff **)phys_to_virt(dpaa_iova_to_phys(dev, addr));
+ skb = *skbh;
+
++ if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
++ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
++
++ if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh,
++ &ns)) {
++ shhwtstamps.hwtstamp = ns_to_ktime(ns);
++ skb_tstamp_tx(skb, &shhwtstamps);
++ } else {
++ dev_warn(dev, "fman_port_get_tstamp failed!\n");
++ }
++ }
++
+ if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
+ nr_frags = skb_shinfo(skb)->nr_frags;
+- dma_unmap_single(dev, addr,
+- qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
+- dma_dir);
+
+ /* The sgt buffer has been allocated with netdev_alloc_frag(),
+ * it's from lowmem.
+ */
+- sgt = phys_to_virt(addr + qm_fd_get_offset(fd));
++ sgt = phys_to_virt(dpaa_iova_to_phys(dev,
++ addr +
++ qm_fd_get_offset(fd)));
++
++ dma_unmap_single(dev, addr,
++ qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
++ dma_dir);
+
+ /* sgt[0] is from lowmem, was dma_map_single()-ed */
+ dma_unmap_single(dev, qm_sg_addr(&sgt[0]),
+@@ -1645,9 +1688,13 @@ static struct sk_buff *dpaa_cleanup_tx_f
dma_unmap_page(dev, qm_sg_addr(&sgt[i]),
qm_sg_entry_get_len(&sgt[i]), dma_dir);
}
+ else
+#endif
+ /* Free the page frag that we allocated on Tx */
-+ skb_free_frag(phys_to_virt(addr));
++ skb_free_frag(skbh);
} else {
dma_unmap_single(dev, addr,
skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
-@@ -1739,6 +1757,7 @@ static struct sk_buff *sg_fd_to_skb(cons
+@@ -1678,26 +1725,21 @@ static u8 rx_csum_offload(const struct d
+ * accommodate the shared info area of the skb.
+ */
+ static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
+- const struct qm_fd *fd)
++ const struct qm_fd *fd,
++ struct dpaa_bp *dpaa_bp,
++ void *vaddr)
+ {
+ ssize_t fd_off = qm_fd_get_offset(fd);
+- dma_addr_t addr = qm_fd_addr(fd);
+- struct dpaa_bp *dpaa_bp;
+ struct sk_buff *skb;
+- void *vaddr;
+
+- vaddr = phys_to_virt(addr);
+ WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
+
+- dpaa_bp = dpaa_bpid2pool(fd->bpid);
+- if (!dpaa_bp)
+- goto free_buffer;
+-
+ skb = build_skb(vaddr, dpaa_bp->size +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+ if (unlikely(!skb)) {
+ WARN_ONCE(1, "Build skb failure on Rx\n");
+- goto free_buffer;
++ skb_free_frag(vaddr);
++ return NULL;
+ }
+ WARN_ON(fd_off != priv->rx_headroom);
+ skb_reserve(skb, fd_off);
+@@ -1706,10 +1748,6 @@ static struct sk_buff *contig_fd_to_skb(
+ skb->ip_summed = rx_csum_offload(priv, fd);
+
+ return skb;
+-
+-free_buffer:
+- skb_free_frag(vaddr);
+- return NULL;
+ }
+
+ /* Build an skb with the data of the first S/G entry in the linear portion and
+@@ -1718,14 +1756,14 @@ free_buffer:
+ * The page fragment holding the S/G Table is recycled here.
+ */
+ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
+- const struct qm_fd *fd)
++ const struct qm_fd *fd,
++ struct dpaa_bp *dpaa_bp,
++ void *vaddr)
+ {
+ ssize_t fd_off = qm_fd_get_offset(fd);
+- dma_addr_t addr = qm_fd_addr(fd);
+ const struct qm_sg_entry *sgt;
+ struct page *page, *head_page;
+- struct dpaa_bp *dpaa_bp;
+- void *vaddr, *sg_vaddr;
++ void *sg_vaddr;
+ int frag_off, frag_len;
+ struct sk_buff *skb;
+ dma_addr_t sg_addr;
+@@ -1734,29 +1772,33 @@ static struct sk_buff *sg_fd_to_skb(cons
+ int *count_ptr;
+ int i;
+
+- vaddr = phys_to_virt(addr);
+ WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
/* Iterate through the SGT entries and add data buffers to the skb */
sgt = vaddr + fd_off;
for (i = 0; i < DPAA_SGT_MAX_ENTRIES; i++) {
/* Extension bit is not supported */
WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
-@@ -1756,7 +1775,7 @@ static struct sk_buff *sg_fd_to_skb(cons
+
+ sg_addr = qm_sg_addr(&sgt[i]);
+- sg_vaddr = phys_to_virt(sg_addr);
+- WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
+- SMP_CACHE_BYTES));
+
+ /* We may use multiple Rx pools */
+ dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
+- if (!dpaa_bp)
++ if (!dpaa_bp) {
++ pr_info("%s: fail to get dpaa_bp for sg bpid %d\n",
++ __func__, sgt[i].bpid);
+ goto free_buffers;
++ }
++ sg_vaddr = phys_to_virt(dpaa_iova_to_phys(dpaa_bp->dev,
++ sg_addr));
++ WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
++ SMP_CACHE_BYTES));
+
count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
dma_unmap_single(dpaa_bp->dev, sg_addr, dpaa_bp->size,
DMA_FROM_DEVICE);
sz = dpaa_bp->size +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
skb = build_skb(sg_vaddr, sz);
-@@ -1909,16 +1928,28 @@ static int skb_to_sg_fd(struct dpaa_priv
+@@ -1823,10 +1865,11 @@ free_buffers:
+ /* free all the SG entries */
+ for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) {
+ sg_addr = qm_sg_addr(&sgt[i]);
+- sg_vaddr = phys_to_virt(sg_addr);
+- skb_free_frag(sg_vaddr);
+ dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
+ if (dpaa_bp) {
++ sg_addr = dpaa_iova_to_phys(dpaa_bp->dev, sg_addr);
++ sg_vaddr = phys_to_virt(sg_addr);
++ skb_free_frag(sg_vaddr);
+ count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
+ (*count_ptr)--;
+ }
+@@ -1909,16 +1952,28 @@ static int skb_to_sg_fd(struct dpaa_priv
size_t frag_len;
void *sgt_buf;
*
* We must do this before dma_map_single(DMA_TO_DEVICE), because we may
* need to write into the skb.
-@@ -2036,6 +2067,122 @@ static inline int dpaa_xmit(struct dpaa_
+@@ -2036,12 +2091,129 @@ static inline int dpaa_xmit(struct dpaa_
return 0;
}
static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
{
const int queue_mapping = skb_get_queue_mapping(skb);
-@@ -2069,19 +2216,32 @@ static int dpaa_start_xmit(struct sk_buf
+ bool nonlinear = skb_is_nonlinear(skb);
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa_percpu_priv *percpu_priv;
++ struct netdev_queue *txq;
+ struct dpaa_priv *priv;
+ struct qm_fd fd;
+ int offset = 0;
+@@ -2069,24 +2241,47 @@ static int dpaa_start_xmit(struct sk_buf
/* MAX_SKB_FRAGS is equal or larger than our dpaa_SGT_MAX_ENTRIES;
* make sure we don't feed FMan with more fragments than it supports.
*/
*/
- if (unlikely(nonlinear) && __skb_linearize(skb))
+ if (__skb_linearize(skb))
-+ goto enomem;
-+
+ goto enomem;
+
+- /* Finally, create a contig FD from this skb */
+ nonlinear = skb_is_nonlinear(skb);
+ }
+
+ if (unlikely(dpaa_errata_a010022)) {
+ skb = dpaa_errata_a010022_prevent(skb, priv);
+ if (!skb)
- goto enomem;
++ goto enomem;
+ nonlinear = skb_is_nonlinear(skb);
+ }
+#endif
-
-- /* Finally, create a contig FD from this skb */
++
+ if (nonlinear) {
+ /* Just create a S/G fd based on the skb */
+ err = skb_to_sg_fd(priv, skb, &fd);
err = skb_to_contig_fd(priv, skb, &fd, &offset);
}
if (unlikely(err < 0))
-@@ -2218,14 +2378,8 @@ static enum qman_cb_dqrr_result rx_error
+ goto skb_to_fd_failed;
+
++ txq = netdev_get_tx_queue(net_dev, queue_mapping);
++
++ /* LLTX requires to do our own update of trans_start */
++ txq->trans_start = jiffies;
++
++ if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
++ fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
++ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
++ }
++
+ if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
+ return NETDEV_TX_OK;
+
+@@ -2218,14 +2413,8 @@ static enum qman_cb_dqrr_result rx_error
if (dpaa_eth_napi_schedule(percpu_priv, portal))
return qman_cb_dqrr_stop;
return qman_cb_dqrr_consume;
}
-@@ -2439,6 +2593,44 @@ static void dpaa_eth_napi_disable(struct
+@@ -2234,6 +2423,7 @@ static enum qman_cb_dqrr_result rx_defau
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+ {
++ struct skb_shared_hwtstamps *shhwtstamps;
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa_percpu_priv *percpu_priv;
+ const struct qm_fd *fd = &dq->fd;
+@@ -2247,6 +2437,7 @@ static enum qman_cb_dqrr_result rx_defau
+ struct sk_buff *skb;
+ int *count_ptr;
+ void *vaddr;
++ u64 ns;
+
+ fd_status = be32_to_cpu(fd->status);
+ fd_format = qm_fd_get_format(fd);
+@@ -2289,12 +2480,12 @@ static enum qman_cb_dqrr_result rx_defau
+ if (!dpaa_bp)
+ return qman_cb_dqrr_consume;
+
+- dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE);
+-
+ /* prefetch the first 64 bytes of the frame or the SGT start */
+- vaddr = phys_to_virt(addr);
++ vaddr = phys_to_virt(dpaa_iova_to_phys(dpaa_bp->dev, addr));
+ prefetch(vaddr + qm_fd_get_offset(fd));
+
++ dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE);
++
+ /* The only FD types that we may receive are contig and S/G */
+ WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg));
+
+@@ -2305,12 +2496,22 @@ static enum qman_cb_dqrr_result rx_defau
+ (*count_ptr)--;
+
+ if (likely(fd_format == qm_fd_contig))
+- skb = contig_fd_to_skb(priv, fd);
++ skb = contig_fd_to_skb(priv, fd, dpaa_bp, vaddr);
+ else
+- skb = sg_fd_to_skb(priv, fd);
++ skb = sg_fd_to_skb(priv, fd, dpaa_bp, vaddr);
+ if (!skb)
+ return qman_cb_dqrr_consume;
+
++ if (priv->rx_tstamp) {
++ shhwtstamps = skb_hwtstamps(skb);
++ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
++
++ if (!fman_port_get_tstamp(priv->mac_dev->port[RX], vaddr, &ns))
++ shhwtstamps->hwtstamp = ns_to_ktime(ns);
++ else
++ dev_warn(net_dev->dev.parent, "fman_port_get_tstamp failed!\n");
++ }
++
+ skb->protocol = eth_type_trans(skb, net_dev);
+
+ if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use &&
+@@ -2439,6 +2640,44 @@ static void dpaa_eth_napi_disable(struct
}
}
static int dpaa_open(struct net_device *net_dev)
{
struct mac_device *mac_dev;
-@@ -2449,12 +2641,9 @@ static int dpaa_open(struct net_device *
+@@ -2449,12 +2688,9 @@ static int dpaa_open(struct net_device *
mac_dev = priv->mac_dev;
dpaa_eth_napi_enable(priv);
for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
err = fman_port_enable(mac_dev->port[i]);
-@@ -2653,7 +2842,6 @@ static inline u16 dpaa_get_headroom(stru
+@@ -2495,11 +2731,58 @@ static int dpaa_eth_stop(struct net_devi
+ return err;
+ }
+
++static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
++{
++ struct dpaa_priv *priv = netdev_priv(dev);
++ struct hwtstamp_config config;
++
++ if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
++ return -EFAULT;
++
++ switch (config.tx_type) {
++ case HWTSTAMP_TX_OFF:
++ /* Couldn't disable rx/tx timestamping separately.
++ * Do nothing here.
++ */
++ priv->tx_tstamp = false;
++ break;
++ case HWTSTAMP_TX_ON:
++ priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true);
++ priv->tx_tstamp = true;
++ break;
++ default:
++ return -ERANGE;
++ }
++
++ if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
++ /* Couldn't disable rx/tx timestamping separately.
++ * Do nothing here.
++ */
++ priv->rx_tstamp = false;
++ } else {
++ priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true);
++ priv->rx_tstamp = true;
++ /* TS is set for all frame types, not only those requested */
++ config.rx_filter = HWTSTAMP_FILTER_ALL;
++ }
++
++ return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
++ -EFAULT : 0;
++}
++
+ static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
+ {
+- if (!net_dev->phydev)
+- return -EINVAL;
+- return phy_mii_ioctl(net_dev->phydev, rq, cmd);
++ int ret = -EINVAL;
++
++ if (cmd == SIOCGMIIREG) {
++ if (net_dev->phydev)
++ return phy_mii_ioctl(net_dev->phydev, rq, cmd);
++ }
++
++ if (cmd == SIOCSHWTSTAMP)
++ return dpaa_ts_ioctl(net_dev, rq, cmd);
++
++ return ret;
+ }
+
+ static const struct net_device_ops dpaa_ops = {
+@@ -2653,7 +2936,6 @@ static inline u16 dpaa_get_headroom(stru
static int dpaa_eth_probe(struct platform_device *pdev)
{
struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM] = {NULL};
struct net_device *net_dev = NULL;
struct dpaa_fq *dpaa_fq, *tmp;
struct dpaa_priv *priv = NULL;
-@@ -2662,7 +2850,13 @@ static int dpaa_eth_probe(struct platfor
+@@ -2662,7 +2944,51 @@ static int dpaa_eth_probe(struct platfor
int err = 0, i, channel;
struct device *dev;
- dev = &pdev->dev;
++ err = bman_is_probed();
++ if (!err)
++ return -EPROBE_DEFER;
++ if (err < 0) {
++ dev_err(&pdev->dev, "failing probe due to bman probe error\n");
++ return -ENODEV;
++ }
++ err = qman_is_probed();
++ if (!err)
++ return -EPROBE_DEFER;
++ if (err < 0) {
++ dev_err(&pdev->dev, "failing probe due to qman probe error\n");
++ return -ENODEV;
++ }
++ err = bman_portals_probed();
++ if (!err)
++ return -EPROBE_DEFER;
++ if (err < 0) {
++ dev_err(&pdev->dev,
++ "failing probe due to bman portals probe error\n");
++ return -ENODEV;
++ }
++ err = qman_portals_probed();
++ if (!err)
++ return -EPROBE_DEFER;
++ if (err < 0) {
++ dev_err(&pdev->dev,
++ "failing probe due to qman portals probe error\n");
++ return -ENODEV;
++ }
++
++ mac_dev = dpaa_mac_dev_get(pdev);
++ if (IS_ERR(mac_dev)) {
++ dev_err(&pdev->dev, "dpaa_mac_dev_get() failed\n");
++ err = PTR_ERR(mac_dev);
++ goto probe_err;
++ }
++
+ /* device used for DMA mapping */
-+ dev = pdev->dev.parent;
++ dev = fman_port_get_device(mac_dev->port[RX]);
+ err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ if (err) {
+ dev_err(dev, "dma_coerce_mask_and_coherent() failed\n");
/* Allocate this early, so we can store relevant information in
* the private area
-@@ -2670,7 +2864,7 @@ static int dpaa_eth_probe(struct platfor
+@@ -2670,7 +2996,7 @@ static int dpaa_eth_probe(struct platfor
net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM);
if (!net_dev) {
dev_err(dev, "alloc_etherdev_mq() failed\n");
}
/* Do this here, so we can be verbose early */
-@@ -2686,7 +2880,7 @@ static int dpaa_eth_probe(struct platfor
- if (IS_ERR(mac_dev)) {
- dev_err(dev, "dpaa_mac_dev_get() failed\n");
- err = PTR_ERR(mac_dev);
-- goto mac_probe_failed;
-+ goto free_netdev;
- }
+@@ -2682,13 +3008,6 @@ static int dpaa_eth_probe(struct platfor
+
+ priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT);
+- mac_dev = dpaa_mac_dev_get(pdev);
+- if (IS_ERR(mac_dev)) {
+- dev_err(dev, "dpaa_mac_dev_get() failed\n");
+- err = PTR_ERR(mac_dev);
+- goto mac_probe_failed;
+- }
+-
/* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
-@@ -2704,21 +2898,13 @@ static int dpaa_eth_probe(struct platfor
+ * we choose conservatively and let the user explicitly set a higher
+ * MTU via ifconfig. Otherwise, the user may end up with different MTUs
+@@ -2704,21 +3023,13 @@ static int dpaa_eth_probe(struct platfor
priv->buf_layout[RX].priv_data_size = DPAA_RX_PRIV_DATA_SIZE; /* Rx */
priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */
/* the raw size of the buffers used for reception */
dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM);
/* avoid runtime computations by keeping the usable size here */
-@@ -2726,11 +2912,8 @@ static int dpaa_eth_probe(struct platfor
+@@ -2726,11 +3037,8 @@ static int dpaa_eth_probe(struct platfor
dpaa_bps[i]->dev = dev;
err = dpaa_bp_alloc_pool(dpaa_bps[i]);
priv->dpaa_bps[i] = dpaa_bps[i];
}
-@@ -2741,7 +2924,7 @@ static int dpaa_eth_probe(struct platfor
+@@ -2741,7 +3049,7 @@ static int dpaa_eth_probe(struct platfor
err = dpaa_alloc_all_fqs(dev, &priv->dpaa_fq_list, &port_fqs);
if (err < 0) {
dev_err(dev, "dpaa_alloc_all_fqs() failed\n");
}
priv->mac_dev = mac_dev;
-@@ -2750,12 +2933,12 @@ static int dpaa_eth_probe(struct platfor
+@@ -2750,12 +3058,12 @@ static int dpaa_eth_probe(struct platfor
if (channel < 0) {
dev_err(dev, "dpaa_get_channel() failed\n");
err = channel;
* and add this pool channel to each's dequeue mask.
*/
dpaa_eth_add_channel(priv->channel);
-@@ -2770,20 +2953,20 @@ static int dpaa_eth_probe(struct platfor
+@@ -2770,20 +3078,20 @@ static int dpaa_eth_probe(struct platfor
err = dpaa_eth_cgr_init(priv);
if (err < 0) {
dev_err(dev, "Error initializing CGR\n");
}
priv->tx_headroom = dpaa_get_headroom(&priv->buf_layout[TX]);
-@@ -2793,7 +2976,7 @@ static int dpaa_eth_probe(struct platfor
+@@ -2793,7 +3101,7 @@ static int dpaa_eth_probe(struct platfor
err = dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs,
&priv->buf_layout[0], dev);
if (err)
/* Rx traffic distribution based on keygen hashing defaults to on */
priv->keygen_in_use = true;
-@@ -2802,11 +2985,7 @@ static int dpaa_eth_probe(struct platfor
+@@ -2802,11 +3110,7 @@ static int dpaa_eth_probe(struct platfor
if (!priv->percpu_priv) {
dev_err(dev, "devm_alloc_percpu() failed\n");
err = -ENOMEM;
}
priv->num_tc = 1;
-@@ -2815,11 +2994,11 @@ static int dpaa_eth_probe(struct platfor
+@@ -2815,11 +3119,11 @@ static int dpaa_eth_probe(struct platfor
/* Initialize NAPI */
err = dpaa_napi_add(net_dev);
if (err < 0)
dpaa_eth_sysfs_init(&net_dev->dev);
-@@ -2828,32 +3007,21 @@ static int dpaa_eth_probe(struct platfor
+@@ -2828,32 +3132,21 @@ static int dpaa_eth_probe(struct platfor
return 0;
-fq_probe_failed:
-dev_mask_failed:
-mac_probe_failed:
-+free_netdev:
dev_set_drvdata(dev, NULL);
free_netdev(net_dev);
-alloc_etherdev_mq_failed:
- if (atomic_read(&dpaa_bps[i]->refs) == 0)
- devm_kfree(dev, dpaa_bps[i]);
- }
++probe_err:
+
return err;
}
-@@ -2890,6 +3058,23 @@ static int dpaa_remove(struct platform_d
+@@ -2890,6 +3183,23 @@ static int dpaa_remove(struct platform_d
return err;
}
static const struct platform_device_id dpaa_devtype[] = {
{
.name = "dpaa-ethernet",
-@@ -2914,6 +3099,10 @@ static int __init dpaa_load(void)
+@@ -2914,6 +3224,10 @@ static int __init dpaa_load(void)
pr_debug("FSL DPAA Ethernet driver\n");
/* initialize dpaa_eth mirror values */
dpaa_rx_extra_headroom = fman_get_rx_extra_headroom();
dpaa_max_frm = fman_get_max_frm();
+--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
+@@ -182,6 +182,9 @@ struct dpaa_priv {
+
+ struct dpaa_buffer_layout buf_layout[2];
+ u16 rx_headroom;
++
++ bool tx_tstamp; /* Tx timestamping enabled */
++ bool rx_tstamp; /* Rx timestamping enabled */
+ };
+
+ /* from dpaa_ethtool.c */
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
-@@ -344,7 +344,7 @@ static void dpaa_get_ethtool_stats(struc
+@@ -32,6 +32,9 @@
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+ #include <linux/string.h>
++#include <linux/of_platform.h>
++#include <linux/net_tstamp.h>
++#include <linux/fsl/ptp_qoriq.h>
+
+ #include "dpaa_eth.h"
+ #include "mac.h"
+@@ -344,7 +347,7 @@ static void dpaa_get_ethtool_stats(struc
/* gather congestion related counters */
cg_num = 0;
cg_time = jiffies_to_msecs(priv->cgr_data.congested_jiffies);
if (qman_query_cgr_congested(&priv->cgr_data.cgr, &cg_status) == 0) {
cg_num = priv->cgr_data.cgr_congested_count;
+@@ -515,6 +518,41 @@ static int dpaa_set_rxnfc(struct net_dev
+ return ret;
+ }
+
++static int dpaa_get_ts_info(struct net_device *net_dev,
++ struct ethtool_ts_info *info)
++{
++ struct device *dev = net_dev->dev.parent;
++ struct device_node *mac_node = dev->of_node;
++ struct device_node *fman_node = NULL, *ptp_node = NULL;
++ struct platform_device *ptp_dev = NULL;
++ struct qoriq_ptp *ptp = NULL;
++
++ info->phc_index = -1;
++
++ fman_node = of_get_parent(mac_node);
++ if (fman_node)
++ ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
++
++ if (ptp_node)
++ ptp_dev = of_find_device_by_node(ptp_node);
++
++ if (ptp_dev)
++ ptp = platform_get_drvdata(ptp_dev);
++
++ if (ptp)
++ info->phc_index = ptp->phc_index;
++
++ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
++ SOF_TIMESTAMPING_RX_HARDWARE |
++ SOF_TIMESTAMPING_RAW_HARDWARE;
++ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
++ (1 << HWTSTAMP_TX_ON);
++ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
++ (1 << HWTSTAMP_FILTER_ALL);
++
++ return 0;
++}
++
+ const struct ethtool_ops dpaa_ethtool_ops = {
+ .get_drvinfo = dpaa_get_drvinfo,
+ .get_msglevel = dpaa_get_msglevel,
+@@ -530,4 +568,5 @@ const struct ethtool_ops dpaa_ethtool_op
+ .set_link_ksettings = dpaa_set_link_ksettings,
+ .get_rxnfc = dpaa_get_rxnfc,
+ .set_rxnfc = dpaa_set_rxnfc,
++ .get_ts_info = dpaa_get_ts_info,
+ };
--- a/drivers/net/ethernet/freescale/fman/Kconfig
+++ b/drivers/net/ethernet/freescale/fman/Kconfig
@@ -2,7 +2,6 @@ config FSL_FMAN
+fsl_dpaa_fman-objs := fman_muram.o fman.o fman_sp.o fman_keygen.o
+fsl_dpaa_fman_port-objs := fman_port.o
+fsl_dpaa_mac-objs:= mac.o fman_dtsec.o fman_memac.o fman_tgec.o
+--- a/drivers/net/ethernet/freescale/fman/fman.c
++++ b/drivers/net/ethernet/freescale/fman/fman.c
+@@ -629,6 +629,7 @@ static void set_port_order_restoration(s
+ iowrite32be(tmp, &fpm_rg->fmfp_prc);
+ }
+
++#ifdef CONFIG_PPC
+ static void set_port_liodn(struct fman *fman, u8 port_id,
+ u32 liodn_base, u32 liodn_ofst)
+ {
+@@ -646,6 +647,27 @@ static void set_port_liodn(struct fman *
+ iowrite32be(tmp, &fman->dma_regs->fmdmplr[port_id / 2]);
+ iowrite32be(liodn_ofst, &fman->bmi_regs->fmbm_spliodn[port_id - 1]);
+ }
++#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
++static void save_restore_port_icids(struct fman *fman, bool save)
++{
++ int port_idxes[] = {
++ 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc,
++ 0xd, 0xe, 0xf, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
++ 0x10, 0x11, 0x30, 0x31
++ };
++ int idx, i;
++
++ for (i = 0; i < ARRAY_SIZE(port_idxes); i++) {
++ idx = port_idxes[i];
++ if (save)
++ fman->sp_icids[idx] =
++ ioread32be(&fman->bmi_regs->fmbm_spliodn[idx]);
++ else
++ iowrite32be(fman->sp_icids[idx],
++ &fman->bmi_regs->fmbm_spliodn[idx]);
++ }
++}
++#endif
+
+ static void enable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg)
+ {
+@@ -1914,7 +1936,10 @@ _return:
+ static int fman_init(struct fman *fman)
+ {
+ struct fman_cfg *cfg = NULL;
+- int err = 0, i, count;
++ int err = 0, count;
++#ifdef CONFIG_PPC
++ int i;
++#endif
+
+ if (is_init_done(fman->cfg))
+ return -EINVAL;
+@@ -1934,6 +1959,7 @@ static int fman_init(struct fman *fman)
+ memset_io((void __iomem *)(fman->base_addr + CGP_OFFSET), 0,
+ fman->state->fm_port_num_of_cg);
+
++#ifdef CONFIG_PPC
+ /* Save LIODN info before FMan reset
+ * Skipping non-existent port 0 (i = 1)
+ */
+@@ -1953,6 +1979,9 @@ static int fman_init(struct fman *fman)
+ }
+ fman->liodn_base[i] = liodn_base;
+ }
++#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
++ save_restore_port_icids(fman, true);
++#endif
+
+ err = fman_reset(fman);
+ if (err)
+@@ -2181,8 +2210,12 @@ int fman_set_port_params(struct fman *fm
+ if (err)
+ goto return_err;
+
++#ifdef CONFIG_PPC
+ set_port_liodn(fman, port_id, fman->liodn_base[port_id],
+ fman->liodn_offset[port_id]);
++#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
++ save_restore_port_icids(fman, false);
++#endif
+
+ if (fman->state->rev_info.major < 6)
+ set_port_order_restoration(fman->fpm_regs, port_id);
+@@ -2800,7 +2833,8 @@ static struct fman *read_dts_node(struct
+
+ of_node_put(muram_node);
+
+- err = devm_request_irq(&of_dev->dev, irq, fman_irq, 0, "fman", fman);
++ err = devm_request_irq(&of_dev->dev, irq, fman_irq, IRQF_SHARED,
++ "fman", fman);
+ if (err < 0) {
+ dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n",
+ __func__, irq, err);
+--- a/drivers/net/ethernet/freescale/fman/fman.h
++++ b/drivers/net/ethernet/freescale/fman/fman.h
+@@ -41,6 +41,7 @@
+ /* Frame queue Context Override */
+ #define FM_FD_CMD_FCO 0x80000000
+ #define FM_FD_CMD_RPD 0x40000000 /* Read Prepended Data */
++#define FM_FD_CMD_UPD 0x20000000 /* Update Prepended Data */
+ #define FM_FD_CMD_DTC 0x10000000 /* Do L4 Checksum */
+
+ /* TX-Port: Unsupported Format */
+@@ -345,8 +346,12 @@ struct fman {
+ unsigned long fifo_offset;
+ size_t fifo_size;
+
++#ifdef CONFIG_PPC
+ u32 liodn_base[64];
+ u32 liodn_offset[64];
++#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
++ u32 sp_icids[64];
++#endif
+
+ struct fman_dts_params dts_params;
+ };
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
-@@ -1117,6 +1117,25 @@ int dtsec_add_hash_mac_address(struct fm
+@@ -123,11 +123,13 @@
+ #define DTSEC_ECNTRL_R100M 0x00000008
+ #define DTSEC_ECNTRL_QSGMIIM 0x00000001
+
++#define TCTRL_TTSE 0x00000040
+ #define TCTRL_GTS 0x00000020
+
+ #define RCTRL_PAL_MASK 0x001f0000
+ #define RCTRL_PAL_SHIFT 16
+ #define RCTRL_GHTX 0x00000400
++#define RCTRL_RTSE 0x00000040
+ #define RCTRL_GRS 0x00000020
+ #define RCTRL_MPROM 0x00000008
+ #define RCTRL_RSF 0x00000004
+@@ -1116,6 +1118,50 @@ int dtsec_add_hash_mac_address(struct fm
+
return 0;
}
-
++
+int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
+{
+ u32 tmp;
+ return 0;
+}
+
++int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable)
++{
++ struct dtsec_regs __iomem *regs = dtsec->regs;
++ u32 rctrl, tctrl;
++
++ if (!is_init_done(dtsec->dtsec_drv_param))
++ return -EINVAL;
++
++ rctrl = ioread32be(®s->rctrl);
++ tctrl = ioread32be(®s->tctrl);
++
++ if (enable) {
++ rctrl |= RCTRL_RTSE;
++ tctrl |= TCTRL_TTSE;
++ } else {
++ rctrl &= ~RCTRL_RTSE;
++ tctrl &= ~TCTRL_TTSE;
++ }
++
++ iowrite32be(rctrl, ®s->rctrl);
++ iowrite32be(tctrl, ®s->tctrl);
++
++ return 0;
++}
+
int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
{
- struct dtsec_regs __iomem *regs = dtsec->regs;
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.h
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
-@@ -55,5 +55,6 @@ int dtsec_set_exception(struct fman_mac
+@@ -55,5 +55,7 @@ int dtsec_set_exception(struct fman_mac
int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version);
+int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable);
++int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable);
#endif /* __DTSEC_H */
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
};
static void add_addr_in_paddr(struct memac_regs __iomem *regs, u8 *adr,
-@@ -940,6 +941,29 @@ int memac_add_hash_mac_address(struct fm
+@@ -940,6 +941,34 @@ int memac_add_hash_mac_address(struct fm
return 0;
}
+
+ return 0;
+}
++
++int memac_set_tstamp(struct fman_mac *memac, bool enable)
++{
++ return 0; /* Always enabled. */
++}
+
int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
{
struct memac_regs __iomem *regs = memac->regs;
-@@ -963,8 +987,12 @@ int memac_del_hash_mac_address(struct fm
+@@ -963,8 +992,12 @@ int memac_del_hash_mac_address(struct fm
break;
}
}
}
--- a/drivers/net/ethernet/freescale/fman/fman_memac.h
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.h
-@@ -57,5 +57,6 @@ int memac_set_exception(struct fman_mac
+@@ -57,5 +57,7 @@ int memac_set_exception(struct fman_mac
enum fman_mac_exceptions exception, bool enable);
int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
+int memac_set_allmulti(struct fman_mac *memac, bool enable);
++int memac_set_tstamp(struct fman_mac *memac, bool enable);
#endif /* __MEMAC_H */
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
default:
set_dflt_cfg(port, params);
}
+@@ -1728,6 +1730,20 @@ u32 fman_port_get_qman_channel_id(struct
+ }
+ EXPORT_SYMBOL(fman_port_get_qman_channel_id);
+
++/**
++ * fman_port_get_device
++ * port: Pointer to the FMan port device
++ *
++ * Get the 'struct device' associated to the specified FMan port device
++ *
++ * Return: pointer to associated 'struct device'
++ */
++struct device *fman_port_get_device(struct fman_port *port)
++{
++ return port->dev;
++}
++EXPORT_SYMBOL(fman_port_get_device);
++
+ int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset)
+ {
+ if (port->buffer_offsets.hash_result_offset == ILLEGAL_BASE)
+@@ -1739,6 +1755,18 @@ int fman_port_get_hash_result_offset(str
+ }
+ EXPORT_SYMBOL(fman_port_get_hash_result_offset);
+
++int fman_port_get_tstamp(struct fman_port *port, const void *data, u64 *tstamp)
++{
++ if (port->buffer_offsets.time_stamp_offset == ILLEGAL_BASE)
++ return -EINVAL;
++
++ *tstamp = be64_to_cpu(*(__be64 *)(data +
++ port->buffer_offsets.time_stamp_offset));
++
++ return 0;
++}
++EXPORT_SYMBOL(fman_port_get_tstamp);
++
+ static int fman_port_probe(struct platform_device *of_dev)
+ {
+ struct fman_port *port;
+--- a/drivers/net/ethernet/freescale/fman/fman_port.h
++++ b/drivers/net/ethernet/freescale/fman/fman_port.h
+@@ -153,6 +153,10 @@ u32 fman_port_get_qman_channel_id(struct
+
+ int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset);
+
++int fman_port_get_tstamp(struct fman_port *port, const void *data, u64 *tstamp);
++
+ struct fman_port *fman_port_bind(struct device *dev);
+
++struct device *fman_port_get_device(struct fman_port *port);
++
+ #endif /* __FMAN_PORT_H */
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
-@@ -217,6 +217,7 @@ struct fman_mac {
+@@ -44,6 +44,7 @@
+ #define TGEC_TX_IPG_LENGTH_MASK 0x000003ff
+
+ /* Command and Configuration Register (COMMAND_CONFIG) */
++#define CMD_CFG_EN_TIMESTAMP 0x00100000
+ #define CMD_CFG_NO_LEN_CHK 0x00020000
+ #define CMD_CFG_PAUSE_IGNORE 0x00000100
+ #define CMF_CFG_CRC_FWD 0x00000040
+@@ -217,6 +218,7 @@ struct fman_mac {
struct tgec_cfg *cfg;
void *fm;
struct fman_rev_info fm_rev_info;
};
static void set_mac_address(struct tgec_regs __iomem *regs, u8 *adr)
-@@ -564,6 +565,29 @@ int tgec_add_hash_mac_address(struct fma
+@@ -564,6 +566,49 @@ int tgec_add_hash_mac_address(struct fma
return 0;
}
+
+ return 0;
+}
++
++int tgec_set_tstamp(struct fman_mac *tgec, bool enable)
++{
++ struct tgec_regs __iomem *regs = tgec->regs;
++ u32 tmp;
++
++ if (!is_init_done(tgec->cfg))
++ return -EINVAL;
++
++ tmp = ioread32be(®s->command_config);
++
++ if (enable)
++ tmp |= CMD_CFG_EN_TIMESTAMP;
++ else
++ tmp &= ~CMD_CFG_EN_TIMESTAMP;
++
++ iowrite32be(tmp, ®s->command_config);
++
++ return 0;
++}
+
int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
{
struct tgec_regs __iomem *regs = tgec->regs;
-@@ -591,9 +615,12 @@ int tgec_del_hash_mac_address(struct fma
+@@ -591,9 +636,12 @@ int tgec_del_hash_mac_address(struct fma
break;
}
}
}
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.h
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.h
-@@ -51,5 +51,6 @@ int tgec_set_exception(struct fman_mac *
+@@ -51,5 +51,7 @@ int tgec_set_exception(struct fman_mac *
int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
int tgec_get_version(struct fman_mac *tgec, u32 *mac_version);
+int tgec_set_allmulti(struct fman_mac *tgec, bool enable);
++int tgec_set_tstamp(struct fman_mac *tgec, bool enable);
#endif /* __TGEC_H */
--- a/drivers/net/ethernet/freescale/fman/mac.c
mac_dev->init = dtsec_initialization;
mac_dev->set_promisc = dtsec_set_promiscuous;
mac_dev->change_addr = dtsec_modify_mac_address;
-@@ -525,17 +470,17 @@ static void setup_dtsec(struct mac_devic
+@@ -525,17 +470,18 @@ static void setup_dtsec(struct mac_devic
mac_dev->set_tx_pause = dtsec_set_tx_pause_frames;
mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames;
mac_dev->set_exception = dtsec_set_exception;
+ mac_dev->set_allmulti = dtsec_set_allmulti;
++ mac_dev->set_tstamp = dtsec_set_tstamp;
mac_dev->set_multi = set_multi;
mac_dev->start = start;
mac_dev->stop = stop;
mac_dev->init = tgec_initialization;
mac_dev->set_promisc = tgec_set_promiscuous;
mac_dev->change_addr = tgec_modify_mac_address;
-@@ -544,17 +489,17 @@ static void setup_tgec(struct mac_device
+@@ -544,17 +490,18 @@ static void setup_tgec(struct mac_device
mac_dev->set_tx_pause = tgec_set_tx_pause_frames;
mac_dev->set_rx_pause = tgec_accept_rx_pause_frames;
mac_dev->set_exception = tgec_set_exception;
+ mac_dev->set_allmulti = tgec_set_allmulti;
++ mac_dev->set_tstamp = tgec_set_tstamp;
mac_dev->set_multi = set_multi;
mac_dev->start = start;
mac_dev->stop = stop;
mac_dev->init = memac_initialization;
mac_dev->set_promisc = memac_set_promiscuous;
mac_dev->change_addr = memac_modify_mac_address;
-@@ -563,10 +508,11 @@ static void setup_memac(struct mac_devic
+@@ -563,10 +510,12 @@ static void setup_memac(struct mac_devic
mac_dev->set_tx_pause = memac_set_tx_pause_frames;
mac_dev->set_rx_pause = memac_accept_rx_pause_frames;
mac_dev->set_exception = memac_set_exception;
+ mac_dev->set_allmulti = memac_set_allmulti;
++ mac_dev->set_tstamp = memac_set_tstamp;
mac_dev->set_multi = set_multi;
mac_dev->start = start;
mac_dev->stop = stop;
mac_dev->priv->enable = memac_enable;
mac_dev->priv->disable = memac_disable;
}
-@@ -599,8 +545,7 @@ static const u16 phy2speed[] = {
+@@ -599,8 +548,7 @@ static const u16 phy2speed[] = {
};
static struct platform_device *dpaa_eth_add_device(int fman_id,
{
struct platform_device *pdev;
struct dpaa_eth_data data;
-@@ -613,19 +558,15 @@ static struct platform_device *dpaa_eth_
+@@ -613,19 +561,15 @@ static struct platform_device *dpaa_eth_
data.mac_dev = mac_dev;
data.mac_hw_id = priv->cell_index;
data.fman_hw_id = fman_id;
ret = platform_device_add_data(pdev, &data, sizeof(data));
if (ret)
-@@ -676,7 +617,6 @@ static int mac_probe(struct platform_dev
+@@ -676,7 +620,6 @@ static int mac_probe(struct platform_dev
mac_dev = devm_kzalloc(dev, sizeof(*mac_dev), GFP_KERNEL);
if (!mac_dev) {
err = -ENOMEM;
goto _return;
}
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
-@@ -706,9 +646,6 @@ static int mac_probe(struct platform_dev
+@@ -706,9 +649,6 @@ static int mac_probe(struct platform_dev
goto _return;
}
INIT_LIST_HEAD(&priv->mc_addr_list);
/* Get the FM node */
-@@ -717,7 +654,7 @@ static int mac_probe(struct platform_dev
+@@ -717,7 +657,7 @@ static int mac_probe(struct platform_dev
dev_err(dev, "of_get_parent(%pOF) failed\n",
mac_node);
err = -EINVAL;
}
of_dev = of_find_device_by_node(dev_node);
-@@ -751,7 +688,7 @@ static int mac_probe(struct platform_dev
+@@ -751,7 +691,7 @@ static int mac_probe(struct platform_dev
if (err < 0) {
dev_err(dev, "of_address_to_resource(%pOF) = %d\n",
mac_node, err);
}
mac_dev->res = __devm_request_region(dev,
-@@ -761,7 +698,7 @@ static int mac_probe(struct platform_dev
+@@ -761,7 +701,7 @@ static int mac_probe(struct platform_dev
if (!mac_dev->res) {
dev_err(dev, "__devm_request_mem_region(mac) failed\n");
err = -EBUSY;
}
priv->vaddr = devm_ioremap(dev, mac_dev->res->start,
-@@ -769,16 +706,12 @@ static int mac_probe(struct platform_dev
+@@ -769,16 +709,12 @@ static int mac_probe(struct platform_dev
if (!priv->vaddr) {
dev_err(dev, "devm_ioremap() failed\n");
err = -EIO;
}
/* Get the cell-index */
-@@ -786,7 +719,7 @@ static int mac_probe(struct platform_dev
+@@ -786,7 +722,7 @@ static int mac_probe(struct platform_dev
if (err) {
dev_err(dev, "failed to read cell-index for %pOF\n", mac_node);
err = -EINVAL;
}
priv->cell_index = (u8)val;
-@@ -795,7 +728,7 @@ static int mac_probe(struct platform_dev
+@@ -795,7 +731,7 @@ static int mac_probe(struct platform_dev
if (!mac_addr) {
dev_err(dev, "of_get_mac_address(%pOF) failed\n", mac_node);
err = -EINVAL;
}
memcpy(mac_dev->addr, mac_addr, sizeof(mac_dev->addr));
-@@ -805,14 +738,14 @@ static int mac_probe(struct platform_dev
+@@ -805,14 +741,14 @@ static int mac_probe(struct platform_dev
dev_err(dev, "of_count_phandle_with_args(%pOF, fsl,fman-ports) failed\n",
mac_node);
err = nph;
}
for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
-@@ -851,13 +784,13 @@ static int mac_probe(struct platform_dev
+@@ -851,13 +787,13 @@ static int mac_probe(struct platform_dev
mac_node);
phy_if = PHY_INTERFACE_MODE_SGMII;
}
mac_dev->if_support &= ~(SUPPORTED_10baseT_Half |
SUPPORTED_100baseT_Half);
-@@ -866,30 +799,31 @@ static int mac_probe(struct platform_dev
+@@ -866,30 +802,31 @@ static int mac_probe(struct platform_dev
mac_dev->if_support |= SUPPORTED_1000baseT_Full;
/* The 10G interface only supports one mode */
}
priv->fixed_link->link = phy->link;
-@@ -904,8 +838,8 @@ static int mac_probe(struct platform_dev
+@@ -904,8 +841,8 @@ static int mac_probe(struct platform_dev
err = mac_dev->init(mac_dev);
if (err < 0) {
dev_err(dev, "mac_dev->init() = %d\n", err);
}
/* pause frame autonegotiation enabled */
-@@ -926,7 +860,7 @@ static int mac_probe(struct platform_dev
+@@ -926,7 +863,7 @@ static int mac_probe(struct platform_dev
mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2],
mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]);
if (IS_ERR(priv->eth_dev)) {
dev_err(dev, "failed to add Ethernet platform device for MAC %d\n",
priv->cell_index);
-@@ -937,9 +871,8 @@ static int mac_probe(struct platform_dev
+@@ -937,9 +874,8 @@ static int mac_probe(struct platform_dev
_return_of_node_put:
of_node_put(dev_node);
bool autoneg_pause;
bool rx_pause_req;
-@@ -57,14 +59,15 @@ struct mac_device {
+@@ -57,14 +59,16 @@ struct mac_device {
bool rx_pause_active;
bool tx_pause_active;
bool promisc;
int (*set_promisc)(struct fman_mac *mac_dev, bool enable);
int (*change_addr)(struct fman_mac *mac_dev, enet_addr_t *enet_addr);
+ int (*set_allmulti)(struct fman_mac *mac_dev, bool enable);
++ int (*set_tstamp)(struct fman_mac *mac_dev, bool enable);
int (*set_multi)(struct net_device *net_dev,
struct mac_device *mac_dev);
int (*set_rx_pause)(struct fman_mac *mac_dev, bool en);
-@@ -82,7 +85,6 @@ struct mac_device {
+@@ -82,7 +86,6 @@ struct mac_device {
};
struct dpaa_eth_data {
int fman_hw_id;
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/Kconfig
-@@ -0,0 +1,195 @@
+@@ -0,0 +1,184 @@
+menuconfig FSL_SDK_DPAA_ETH
+ tristate "DPAA Ethernet"
+ depends on (FSL_SOC || ARM64 || ARM) && FSL_SDK_BMAN && FSL_SDK_QMAN && FSL_SDK_FMAN && !FSL_DPAA_ETH
+ hex "CEETM egress congestion threshold on 1G ports"
+ depends on FSL_DPAA_CEETM
+ range 0x1000 0x10000000
-+ default "0x000a0000"
++ default "0x00005000"
+ help
+ The size in bytes of the CEETM egress Class Congestion State threshold on 1G ports.
+ The threshold needs to be configured keeping in mind the following factors:
+ hex "CEETM egress congestion threshold on 10G ports"
+ depends on FSL_DPAA_CEETM
+ range 0x1000 0x20000000
-+ default "0x00640000"
++ default "0x00032000"
+ help
+ The size in bytes of the CEETM egress Class Congestion State threshold on 10G ports.
+ See FSL_DPAA_CEETM_CCS_THRESHOLD_1G for details.
+ help
+ Enable IEEE1588 support code.
+
-+config FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
-+ bool "Use driver's Tx queue selection mechanism"
-+ default y
-+ depends on FSL_SDK_DPAA_ETH
-+ help
-+ The DPAA-Ethernet driver defines a ndo_select_queue() callback for optimal selection
-+ of the egress FQ. That will override the XPS support for this netdevice.
-+ If for whatever reason you want to be in control of the egress FQ-to-CPU selection and mapping,
-+ or simply don't want to use the driver's ndo_select_queue() callback, then unselect this
-+ and use the standard XPS support instead.
-+
+config FSL_DPAA_ETH_MAX_BUF_COUNT
+ int "Maximum nuber of buffers in private bpool"
+ depends on FSL_SDK_DPAA_ETH
+
+config FSL_DPAA_DBG_LOOP
+ bool "DPAA Ethernet Debug loopback"
-+ depends on FSL_DPAA_ETH_DEBUGFS && FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
++ depends on FSL_DPAA_ETH_DEBUGFS
+ default n
+ help
+ This option allows to divert all received traffic on a certain interface A towards a
+endif # FSL_SDK_DPAA_ETH
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/Makefile
-@@ -0,0 +1,46 @@
+@@ -0,0 +1,45 @@
+#
+# Makefile for the Freescale Ethernet controllers
+#
+ccflags-y += -I$(NET_DPA)
+
+obj-$(CONFIG_FSL_SDK_DPAA_ETH) += fsl_mac.o fsl_dpa.o
-+obj-$(CONFIG_PTP_1588_CLOCK_DPAA) += dpaa_ptp.o
+
+fsl_dpa-objs += dpaa_ethtool.o dpaa_eth_sysfs.o dpaa_eth.o dpaa_eth_sg.o dpaa_eth_common.o
+ifeq ($(CONFIG_FSL_DPAA_DBG_LOOP),y)
+ .ndo_get_stats64 = dpa_get_stats64,
+ .ndo_set_mac_address = dpa_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
-+#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
++#ifdef CONFIG_FMAN_PFC
+ .ndo_select_queue = dpa_select_queue,
+#endif
+ .ndo_set_rx_mode = dpa_set_rx_mode,
+module_exit(dpa_unload);
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h
-@@ -0,0 +1,691 @@
+@@ -0,0 +1,674 @@
+/* Copyright 2008-2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ }
+}
+
-+#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
-+/* Use in lieu of skb_get_queue_mapping() */
+#ifdef CONFIG_FMAN_PFC
++/* Use in lieu of skb_get_queue_mapping() */
+#define dpa_get_queue_mapping(skb) \
+ (((skb)->priority < CONFIG_FMAN_PFC_COS_COUNT) ? \
+ ((skb)->priority * dpa_num_cpus + smp_processor_id()) : \
+ ((CONFIG_FMAN_PFC_COS_COUNT - 1) * \
+ dpa_num_cpus + smp_processor_id()));
-+
+#else
-+#define dpa_get_queue_mapping(skb) \
-+ raw_smp_processor_id()
-+#endif
-+#else
-+/* Use the queue selected by XPS */
-+#define dpa_get_queue_mapping(skb) \
-+ skb_get_queue_mapping(skb)
-+#endif
-+
-+#ifdef CONFIG_PTP_1588_CLOCK_DPAA
-+struct ptp_priv_s {
-+ struct device_node *node;
-+ struct platform_device *of_dev;
-+ struct ptp_clock *clock;
-+ struct mac_device *mac_dev;
-+};
-+extern struct ptp_priv_s ptp_priv;
++#define dpa_get_queue_mapping(skb) skb_get_queue_mapping(skb)
+#endif
+
+static inline void _dpa_bp_free_pf(void *addr)
+
+/* LS1043A SoC has a HW issue regarding FMan DMA transactions; The issue
+ * manifests itself at high traffic rates when frames cross 4K memory
-+ * boundaries or when they are not aligned to 16 bytes; For the moment, we
-+ * use a SW workaround that realigns frames to 256 bytes. Scatter/Gather
-+ * frames aren't supported on egress.
++ * boundaries, when they are not aligned to 16 bytes or when they have
++ * Scatter/Gather fragments; For the moment, we use a SW workaround that
++ * realigns frames to 256 bytes. Scatter/Gather frames aren't supported
++ * on egress.
+ */
+
+#ifndef CONFIG_PPC
+#endif /* __DPAA_ETH_BASE_H */
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c
-@@ -0,0 +1,2099 @@
+@@ -0,0 +1,2076 @@
+/* Copyright 2008-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+
+ /* Validate inputs */
+ if (sch->parent != TC_H_ROOT) {
-+ pr_err("CEETM: a root ceetm qdisc can not be attached to a class\n");
-+ tcf_block_put(priv->block);
-+ qdisc_class_hash_destroy(&priv->clhash);
++ pr_err("CEETM: a root ceetm qdisc must be root\n");
+ return -EINVAL;
+ }
+
+ if (!mac_dev) {
+ pr_err("CEETM: the interface is lacking a mac\n");
-+ err = -EINVAL;
-+ goto err_init_root;
++ return -EINVAL;
+ }
+
+ /* Pre-allocate underlying pfifo qdiscs.
+ sizeof(priv->root.qdiscs[0]),
+ GFP_KERNEL);
+ if (!priv->root.qdiscs) {
-+ err = -ENOMEM;
-+ goto err_init_root;
++ return -ENOMEM;
+ }
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+
+ qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
+ parent_id);
-+ if (!qdisc) {
-+ err = -ENOMEM;
-+ goto err_init_root;
-+ }
++ if (!qdisc)
++ return -ENOMEM;
+
+ priv->root.qdiscs[i] = qdisc;
+ qdisc->flags |= TCQ_F_ONETXQUEUE;
+ if (!priv->root.qstats) {
+ pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
+ __func__);
-+ err = -ENOMEM;
-+ goto err_init_root;
++ return -ENOMEM;
+ }
+
+ priv->shaped = qopt->shaped;
+ if (err) {
+ pr_err(KBUILD_BASENAME " : %s : failed to claim the SP\n",
+ __func__);
-+ goto err_init_root;
++ return err;
+ }
+
+ priv->root.sp = sp;
+ if (err) {
+ pr_err(KBUILD_BASENAME " : %s : failed to claim the LNI\n",
+ __func__);
-+ goto err_init_root;
++ return err;
+ }
+
+ priv->root.lni = lni;
+ if (err) {
+ pr_err(KBUILD_BASENAME " : %s : failed to link the SP and LNI\n",
+ __func__);
-+ goto err_init_root;
++ return err;
+ }
+
+ lni->sp = sp;
+ if (err) {
+ pr_err(KBUILD_BASENAME " : %s : failed to configure the LNI shaper\n",
+ __func__);
-+ goto err_init_root;
++ return err;
+ }
+
+ bps = priv->root.rate << 3; /* Bps -> bps */
+ if (err) {
+ pr_err(KBUILD_BASENAME " : %s : failed to configure the LNI shaper\n",
+ __func__);
-+ goto err_init_root;
++ return err;
+ }
+
+ bps = priv->root.ceil << 3; /* Bps -> bps */
+ if (err) {
+ pr_err(KBUILD_BASENAME " : %s : failed to configure the LNI shaper\n",
+ __func__);
-+ goto err_init_root;
++ return err;
+ }
+ }
+
+
+ dpa_enable_ceetm(dev);
+ return 0;
-+
-+err_init_root:
-+ ceetm_destroy(sch);
-+ return err;
+}
+
+/* Configure a prio ceetm qdisc */
+
+ if (sch->parent == TC_H_ROOT) {
+ pr_err("CEETM: a prio ceetm qdisc can not be root\n");
-+ err = -EINVAL;
-+ goto err_init_prio;
++ return -EINVAL;
+ }
+
+ parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent));
+ if (strcmp(parent_qdisc->ops->id, ceetm_qdisc_ops.id)) {
+ pr_err("CEETM: a ceetm qdisc can not be attached to other qdisc/class types\n");
-+ err = -EINVAL;
-+ goto err_init_prio;
++ return -EINVAL;
+ }
+
+ /* Obtain the parent root ceetm_class */
+
+ if (!parent_cl || parent_cl->type != CEETM_ROOT) {
+ pr_err("CEETM: a prio ceetm qdiscs can be added only under a root ceetm class\n");
-+ err = -EINVAL;
-+ goto err_init_prio;
++ return -EINVAL;
+ }
+
+ priv->prio.parent = parent_cl;
+ if (!child_cl) {
+ pr_err(KBUILD_BASENAME " : %s : kzalloc() failed\n",
+ __func__);
-+ err = -ENOMEM;
-+ goto err_init_prio;
++ return -ENOMEM;
+ }
+
+ child_cl->prio.cstats = alloc_percpu(struct ceetm_class_stats);
+
+err_init_prio_cls:
+ ceetm_cls_destroy(sch, child_cl);
-+err_init_prio:
-+ ceetm_destroy(sch);
++ /* Note: ceetm_destroy() will be called by our caller */
+ return err;
+}
+
+ /* Validate inputs */
+ if (sch->parent == TC_H_ROOT) {
+ pr_err("CEETM: a wbfs ceetm qdiscs can not be root\n");
-+ err = -EINVAL;
-+ goto err_init_wbfs;
++ return -EINVAL;
+ }
+
+ /* Obtain the parent prio ceetm qdisc */
+ parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent));
+ if (strcmp(parent_qdisc->ops->id, ceetm_qdisc_ops.id)) {
+ pr_err("CEETM: a ceetm qdisc can not be attached to other qdisc/class types\n");
-+ err = -EINVAL;
-+ goto err_init_wbfs;
++ return -EINVAL;
+ }
+
+ /* Obtain the parent prio ceetm class */
+
+ if (!parent_cl || parent_cl->type != CEETM_PRIO) {
+ pr_err("CEETM: a wbfs ceetm qdiscs can be added only under a prio ceetm class\n");
-+ err = -EINVAL;
-+ goto err_init_wbfs;
++ return -EINVAL;
+ }
+
+ if (!qopt->qcount || !qopt->qweight[0]) {
+ pr_err("CEETM: qcount and qweight are mandatory for a wbfs ceetm qdisc\n");
-+ err = -EINVAL;
-+ goto err_init_wbfs;
++ return -EINVAL;
+ }
+
+ priv->shaped = parent_cl->shaped;
+
+ if (!priv->shaped && (qopt->cr || qopt->er)) {
+ pr_err("CEETM: CR/ER can be enabled only for shaped wbfs ceetm qdiscs\n");
-+ err = -EINVAL;
-+ goto err_init_wbfs;
++ return -EINVAL;
+ }
+
+ if (priv->shaped && !(qopt->cr || qopt->er)) {
+ pr_err("CEETM: either CR or ER must be enabled for shaped wbfs ceetm qdiscs\n");
-+ err = -EINVAL;
-+ goto err_init_wbfs;
++ return -EINVAL;
+ }
+
+ /* Obtain the parent root ceetm class */
+ if ((root_cl->root.wbfs_grp_a && root_cl->root.wbfs_grp_b) ||
+ root_cl->root.wbfs_grp_large) {
+ pr_err("CEETM: no more wbfs classes are available\n");
-+ err = -EINVAL;
-+ goto err_init_wbfs;
++ return -EINVAL;
+ }
+
+ if ((root_cl->root.wbfs_grp_a || root_cl->root.wbfs_grp_b) &&
+ qopt->qcount == CEETM_MAX_WBFS_QCOUNT) {
+ pr_err("CEETM: only %d wbfs classes are available\n",
+ CEETM_MIN_WBFS_QCOUNT);
-+ err = -EINVAL;
-+ goto err_init_wbfs;
++ return -EINVAL;
+ }
+
+ priv->wbfs.parent = parent_cl;
+ if (err) {
+ pr_err(KBUILD_BASENAME " : %s : failed to get group details\n",
+ __func__);
-+ goto err_init_wbfs;
++ return err;
+ }
+
+ small_group = true;
+ if (err) {
+ pr_err(KBUILD_BASENAME " : %s : failed to get group details\n",
+ __func__);
-+ goto err_init_wbfs;
++ return err;
+ }
+
+ small_group = true;
+ err = qman_ceetm_channel_set_group(priv->wbfs.ch, small_group, prio_a,
+ prio_b);
+ if (err)
-+ goto err_init_wbfs;
++ return err;
+
+ if (priv->shaped) {
+ err = qman_ceetm_channel_set_group_cr_eligibility(priv->wbfs.ch,
+ if (err) {
+ pr_err(KBUILD_BASENAME " : %s : failed to set group CR eligibility\n",
+ __func__);
-+ goto err_init_wbfs;
++ return err;
+ }
+
+ err = qman_ceetm_channel_set_group_er_eligibility(priv->wbfs.ch,
+ if (err) {
+ pr_err(KBUILD_BASENAME " : %s : failed to set group ER eligibility\n",
+ __func__);
-+ goto err_init_wbfs;
++ return err;
+ }
+ }
+
+ if (!child_cl) {
+ pr_err(KBUILD_BASENAME " : %s : kzalloc() failed\n",
+ __func__);
-+ err = -ENOMEM;
-+ goto err_init_wbfs;
++ return -ENOMEM;
+ }
+
+ child_cl->wbfs.cstats = alloc_percpu(struct ceetm_class_stats);
+
+err_init_wbfs_cls:
+ ceetm_cls_destroy(sch, child_cl);
-+err_init_wbfs:
-+ ceetm_destroy(sch);
++ /* Note: ceetm_destroy() will be called by our caller */
+ return err;
+}
+
+ break;
+ default:
+ pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
-+ ceetm_destroy(sch);
++ /* Note: ceetm_destroy() will be called by our caller */
+ ret = -EINVAL;
+ }
+
+ }
+
+ if (!cl && priv->type != CEETM_ROOT) {
-+ pr_err("CEETM: only root ceetm classes can be attached to the root ceetm qdisc\n");
++ pr_err("CEETM: root ceetm classes can be attached to the root ceetm qdisc only\n");
+ return -EINVAL;
+ }
+
+
+int __hot ceetm_tx(struct sk_buff *skb, struct net_device *net_dev)
+{
-+ const int queue_mapping = dpa_get_queue_mapping(skb);
++ int queue_mapping = dpa_get_queue_mapping(skb);
+ struct Qdisc *sch = net_dev->qdisc;
+ struct ceetm_class_stats *cstats;
+ struct ceetm_qdisc_stats *qstats;
+ goto drop;
+ }
+
++ if (unlikely(queue_mapping >= DPAA_ETH_TX_QUEUES))
++ queue_mapping = queue_mapping % DPAA_ETH_TX_QUEUES;
++
+ priv_dpa = netdev_priv(net_dev);
+ conf_fq = priv_dpa->conf_fqs[queue_mapping];
+
+#endif
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.c
-@@ -0,0 +1,1776 @@
+@@ -0,0 +1,1745 @@
+/* Copyright 2008-2013 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+/* Size in bytes of the FQ taildrop threshold */
+#define DPA_FQ_TD 0x200000
+
-+#ifdef CONFIG_PTP_1588_CLOCK_DPAA
-+struct ptp_priv_s ptp_priv;
-+#endif
-+
+static struct dpa_bp *dpa_bp_array[64];
+
+int dpa_max_frm;
+u64 dpa_get_timestamp_ns(const struct dpa_priv_s *priv, enum port_type rx_tx,
+ const void *data)
+{
-+ u64 *ts, ns;
++ u64 *ts;
+
+ ts = fm_port_get_buffer_time_stamp(priv->mac_dev->port_dev[rx_tx],
+ data);
+
+ be64_to_cpus(ts);
+
-+ /* multiple DPA_PTP_NOMINAL_FREQ_PERIOD_NS for case of non power of 2 */
-+ ns = *ts << DPA_PTP_NOMINAL_FREQ_PERIOD_SHIFT;
-+
-+ return ns;
++ return *ts;
+}
+
+int dpa_get_ts(const struct dpa_priv_s *priv, enum port_type rx_tx,
+ struct dpa_priv_s *priv = netdev_priv(dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+
-+ if (mac_dev->fm_rtc_enable)
-+ mac_dev->fm_rtc_enable(get_fm_handle(dev));
+ if (mac_dev->ptp_enable)
+ mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev));
+
+ struct dpa_priv_s *priv = netdev_priv(dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+
-+ if (mac_dev->fm_rtc_enable)
-+ mac_dev->fm_rtc_enable(get_fm_handle(dev));
+ if (mac_dev->ptp_enable)
+ mac_dev->ptp_enable(mac_dev->get_mac_handle(mac_dev));
+
+ }
+#endif
+
-+#ifdef CONFIG_PTP_1588_CLOCK_DPAA
-+ if ((mac_dev->phy_if != PHY_INTERFACE_MODE_SGMII) ||
-+ ((mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII) &&
-+ (mac_dev->speed == SPEED_1000))) {
-+ ptp_priv.node = of_parse_phandle(mac_node, "ptp-timer", 0);
-+ if (ptp_priv.node) {
-+ ptp_priv.of_dev = of_find_device_by_node(ptp_priv.node);
-+ if (unlikely(ptp_priv.of_dev == NULL)) {
-+ dev_err(dpa_dev,
-+ "Cannot find device represented by timer_node\n");
-+ of_node_put(ptp_priv.node);
-+ return ERR_PTR(-EINVAL);
-+ }
-+ ptp_priv.mac_dev = mac_dev;
-+ }
-+ }
-+#endif
+ return mac_dev;
+}
+EXPORT_SYMBOL(dpa_mac_probe);
+ return false;
+}
+
-+#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
++#ifdef CONFIG_FMAN_PFC
+u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ return dpa_get_queue_mapping(skb);
+}
-+EXPORT_SYMBOL(dpa_select_queue);
+#endif
+
+struct dpa_fq *dpa_fq_alloc(struct device *dev,
+ memset(&initfq, 0, sizeof(initfq));
+
+ initfq.we_mask = QM_INITFQ_WE_FQCTRL;
-+ /* FIXME: why would we want to keep an empty FQ in cache? */
-+ initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
+
+ /* Try to reduce the number of portal interrupts for
+ * Tx Confirmation FQs.
+void dpa_bpid2pool_map(int bpid, struct dpa_bp *dpa_bp);
+bool dpa_bpid2pool_use(int bpid);
+void dpa_bp_drain(struct dpa_bp *bp);
-+#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
++#ifdef CONFIG_FMAN_PFC
+u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback);
+#endif
+module_exit(dpa_proxy_unload);
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c
-@@ -0,0 +1,1201 @@
+@@ -0,0 +1,1195 @@
+/* Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ return 0; /* loop disabled by default */
+
+ skb_push(skb, ETH_HLEN); /* compensate for eth_type_trans */
++ /* Save the current CPU ID in order to maintain core affinity */
++ skb_set_queue_mapping(skb, raw_smp_processor_id());
+ dpa_tx(skb, dpa_loop_netdevs[priv->loop_to]);
+
+ return 1; /* Frame Tx on the selected interface */
+ }
+#endif
+
++ skb_record_rx_queue(skb, raw_smp_processor_id());
++
+ if (use_gro) {
+ gro_result_t gro_result;
+ const struct qman_portal_config *pc =
+
+#ifndef CONFIG_PPC
+/* Verify the conditions that trigger the A010022 errata: data unaligned to
-+ * 16 bytes and 4K memory address crossings.
++ * 16 bytes, 4K memory address crossings and S/G fragments.
+ */
+static bool a010022_check_skb(struct sk_buff *skb, struct dpa_priv_s *priv)
+{
-+ int nr_frags, i = 0;
-+ skb_frag_t *frag;
-+
+ /* Check if the headroom is aligned */
+ if (((uintptr_t)skb->data - priv->tx_headroom) %
+ priv->buf_layout[TX].data_align != 0)
+ return true;
+
++ /* Check for paged data in the skb. We do not support S/G fragments */
++ if (skb_is_nonlinear(skb))
++ return true;
++
+ /* Check if the headroom crosses a boundary */
+ if (HAS_DMA_ISSUE(skb->head, skb_headroom(skb)))
+ return true;
+ if (HAS_DMA_ISSUE(skb->head, skb_end_offset(skb)))
+ return true;
+
-+ nr_frags = skb_shinfo(skb)->nr_frags;
-+
-+ while (i < nr_frags) {
-+ frag = &skb_shinfo(skb)->frags[i];
-+
-+ /* Check if a paged fragment crosses a boundary from its
-+ * offset to its end.
-+ */
-+ if (HAS_DMA_ISSUE(frag->page_offset, frag->size))
-+ return true;
-+
-+ i++;
-+ }
-+
+ return false;
+}
+
+int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev)
+{
+ struct dpa_priv_s *priv;
-+ const int queue_mapping = dpa_get_queue_mapping(skb);
++ int queue_mapping = dpa_get_queue_mapping(skb);
+ struct qman_fq *egress_fq, *conf_fq;
+
+#ifdef CONFIG_FSL_DPAA_HOOKS
+ return ceetm_tx(skb, net_dev);
+#endif
+
++ if (unlikely(queue_mapping >= DPAA_ETH_TX_QUEUES))
++ queue_mapping = queue_mapping % DPAA_ETH_TX_QUEUES;
++
+ egress_fq = priv->egress_fqs[queue_mapping];
+ conf_fq = priv->conf_fqs[queue_mapping];
+
+#include <trace/define_trace.h>
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c
-@@ -0,0 +1,542 @@
+@@ -0,0 +1,587 @@
+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+#endif
+
+#include <linux/string.h>
++#include <linux/of_platform.h>
++#include <linux/net_tstamp.h>
++#include <linux/fsl/ptp_qoriq.h>
+
+#include "dpaa_eth.h"
+#include "mac.h" /* struct mac_device */
+ memcpy(strings, dpa_stats_global, size);
+}
+
++static int dpaa_get_ts_info(struct net_device *net_dev,
++ struct ethtool_ts_info *info)
++{
++ struct dpa_priv_s *priv = netdev_priv(net_dev);
++ struct device *dev = priv->mac_dev->dev;
++ struct device_node *mac_node = dev->of_node;
++ struct device_node *fman_node = NULL, *ptp_node = NULL;
++ struct platform_device *ptp_dev = NULL;
++ struct qoriq_ptp *ptp = NULL;
++
++ info->phc_index = -1;
++
++ fman_node = of_get_parent(mac_node);
++ if (fman_node)
++ ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
++
++ if (ptp_node)
++ ptp_dev = of_find_device_by_node(ptp_node);
++
++ if (ptp_dev)
++ ptp = platform_get_drvdata(ptp_dev);
++
++ if (ptp)
++ info->phc_index = ptp->phc_index;
++
++#ifdef CONFIG_FSL_DPAA_TS
++ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
++ SOF_TIMESTAMPING_RX_HARDWARE |
++ SOF_TIMESTAMPING_RAW_HARDWARE;
++ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
++ (1 << HWTSTAMP_TX_ON);
++ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
++ (1 << HWTSTAMP_FILTER_ALL);
++#else
++ info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
++ SOF_TIMESTAMPING_SOFTWARE;
++#endif
++
++ return 0;
++}
++
+const struct ethtool_ops dpa_ethtool_ops = {
+ .get_link_ksettings = dpa_get_ksettings,
+ .set_link_ksettings = dpa_set_ksettings,
+ .get_wol = dpa_get_wol,
+ .set_wol = dpa_set_wol,
+#endif
++ .get_ts_info = dpaa_get_ts_info,
+};
--- /dev/null
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c
-@@ -0,0 +1,291 @@
-+/*
-+ * DPAA Ethernet Driver -- PTP 1588 clock using the dTSEC
-+ *
-+ * Author: Yangbo Lu <yangbo.lu@freescale.com>
-+ *
-+ * Copyright 2014 Freescale Semiconductor, Inc.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms of the GNU General Public License as published by the
-+ * Free Software Foundation; either version 2 of the License, or (at your
-+ * option) any later version.
-+*/
-+
-+#include <linux/device.h>
-+#include <linux/hrtimer.h>
-+#include <linux/init.h>
-+#include <linux/interrupt.h>
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/of.h>
-+#include <linux/of_platform.h>
-+#include <linux/timex.h>
-+#include <linux/io.h>
-+
-+#include <linux/ptp_clock_kernel.h>
-+
-+#include "dpaa_eth.h"
-+#include "mac.h"
-+
-+static struct mac_device *mac_dev;
-+static u32 freqCompensation;
-+
-+/* Bit definitions for the TMR_CTRL register */
-+#define ALM1P (1<<31) /* Alarm1 output polarity */
-+#define ALM2P (1<<30) /* Alarm2 output polarity */
-+#define FS (1<<28) /* FIPER start indication */
-+#define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */
-+#define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */
-+#define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */
-+#define TCLK_PERIOD_MASK (0x3ff)
-+#define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */
-+#define FRD (1<<14) /* FIPER Realignment Disable */
-+#define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */
-+#define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */
-+#define ETEP2 (1<<9) /* External trigger 2 edge polarity */
-+#define ETEP1 (1<<8) /* External trigger 1 edge polarity */
-+#define COPH (1<<7) /* Generated clock output phase. */
-+#define CIPH (1<<6) /* External oscillator input clock phase */
-+#define TMSR (1<<5) /* Timer soft reset. */
-+#define BYP (1<<3) /* Bypass drift compensated clock */
-+#define TE (1<<2) /* 1588 timer enable. */
-+#define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */
-+#define CKSEL_MASK (0x3)
-+
-+/* Bit definitions for the TMR_TEVENT register */
-+#define ETS2 (1<<25) /* External trigger 2 timestamp sampled */
-+#define ETS1 (1<<24) /* External trigger 1 timestamp sampled */
-+#define ALM2 (1<<17) /* Current time = alarm time register 2 */
-+#define ALM1 (1<<16) /* Current time = alarm time register 1 */
-+#define PP1 (1<<7) /* periodic pulse generated on FIPER1 */
-+#define PP2 (1<<6) /* periodic pulse generated on FIPER2 */
-+#define PP3 (1<<5) /* periodic pulse generated on FIPER3 */
-+
-+/* Bit definitions for the TMR_TEMASK register */
-+#define ETS2EN (1<<25) /* External trigger 2 timestamp enable */
-+#define ETS1EN (1<<24) /* External trigger 1 timestamp enable */
-+#define ALM2EN (1<<17) /* Timer ALM2 event enable */
-+#define ALM1EN (1<<16) /* Timer ALM1 event enable */
-+#define PP1EN (1<<7) /* Periodic pulse event 1 enable */
-+#define PP2EN (1<<6) /* Periodic pulse event 2 enable */
-+
-+/* Bit definitions for the TMR_PEVENT register */
-+#define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */
-+#define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */
-+#define RXP (1<<0) /* PTP frame has been received */
-+
-+/* Bit definitions for the TMR_PEMASK register */
-+#define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */
-+#define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */
-+#define RXPEN (1<<0) /* Receive PTP packet event enable */
-+
-+/* Bit definitions for the TMR_STAT register */
-+#define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */
-+#define STAT_VEC_MASK (0x3f)
-+
-+/* Bit definitions for the TMR_PRSC register */
-+#define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */
-+#define PRSC_OCK_MASK (0xffff)
-+
-+
-+#define N_EXT_TS 2
-+
-+static void set_alarm(void)
-+{
-+ u64 ns;
-+
-+ if (mac_dev->fm_rtc_get_cnt)
-+ mac_dev->fm_rtc_get_cnt(mac_dev->fm_dev, &ns);
-+ ns += 1500000000ULL;
-+ ns = div_u64(ns, 1000000000UL) * 1000000000ULL;
-+ ns -= DPA_PTP_NOMINAL_FREQ_PERIOD_NS;
-+ if (mac_dev->fm_rtc_set_alarm)
-+ mac_dev->fm_rtc_set_alarm(mac_dev->fm_dev, 0, ns);
-+}
-+
-+static void set_fipers(void)
-+{
-+ u64 fiper;
-+
-+ if (mac_dev->fm_rtc_disable)
-+ mac_dev->fm_rtc_disable(mac_dev->fm_dev);
-+
-+ set_alarm();
-+ fiper = 1000000000ULL - DPA_PTP_NOMINAL_FREQ_PERIOD_NS;
-+ if (mac_dev->fm_rtc_set_fiper)
-+ mac_dev->fm_rtc_set_fiper(mac_dev->fm_dev, 0, fiper);
-+
-+ if (mac_dev->fm_rtc_enable)
-+ mac_dev->fm_rtc_enable(mac_dev->fm_dev);
-+}
-+
-+/* PTP clock operations */
-+
-+static int ptp_dpa_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
-+{
-+ u64 adj;
-+ u32 diff, tmr_add;
-+ int neg_adj = 0;
-+
-+ if (ppb < 0) {
-+ neg_adj = 1;
-+ ppb = -ppb;
-+ }
-+
-+ tmr_add = freqCompensation;
-+ adj = tmr_add;
-+ adj *= ppb;
-+ diff = div_u64(adj, 1000000000ULL);
-+
-+ tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
-+
-+ if (mac_dev->fm_rtc_set_drift)
-+ mac_dev->fm_rtc_set_drift(mac_dev->fm_dev, tmr_add);
-+
-+ return 0;
-+}
-+
-+static int ptp_dpa_adjtime(struct ptp_clock_info *ptp, s64 delta)
-+{
-+ s64 now;
-+
-+ if (mac_dev->fm_rtc_get_cnt)
-+ mac_dev->fm_rtc_get_cnt(mac_dev->fm_dev, &now);
-+
-+ now += delta;
-+
-+ if (mac_dev->fm_rtc_set_cnt)
-+ mac_dev->fm_rtc_set_cnt(mac_dev->fm_dev, now);
-+ set_fipers();
-+
-+ return 0;
-+}
-+
-+static int ptp_dpa_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
-+{
-+ u64 ns;
-+ u32 remainder;
-+
-+ if (mac_dev->fm_rtc_get_cnt)
-+ mac_dev->fm_rtc_get_cnt(mac_dev->fm_dev, &ns);
-+
-+ ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
-+ ts->tv_nsec = remainder;
-+ return 0;
-+}
-+
-+static int ptp_dpa_settime(struct ptp_clock_info *ptp,
-+ const struct timespec64 *ts)
-+{
-+ u64 ns;
-+
-+ ns = ts->tv_sec * 1000000000ULL;
-+ ns += ts->tv_nsec;
-+
-+ if (mac_dev->fm_rtc_set_cnt)
-+ mac_dev->fm_rtc_set_cnt(mac_dev->fm_dev, ns);
-+ set_fipers();
-+ return 0;
-+}
-+
-+static int ptp_dpa_enable(struct ptp_clock_info *ptp,
-+ struct ptp_clock_request *rq, int on)
-+{
-+ u32 bit;
-+
-+ switch (rq->type) {
-+ case PTP_CLK_REQ_EXTTS:
-+ switch (rq->extts.index) {
-+ case 0:
-+ bit = ETS1EN;
-+ break;
-+ case 1:
-+ bit = ETS2EN;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+ if (on) {
-+ if (mac_dev->fm_rtc_enable_interrupt)
-+ mac_dev->fm_rtc_enable_interrupt(
-+ mac_dev->fm_dev, bit);
-+ } else {
-+ if (mac_dev->fm_rtc_disable_interrupt)
-+ mac_dev->fm_rtc_disable_interrupt(
-+ mac_dev->fm_dev, bit);
-+ }
-+ return 0;
-+
-+ case PTP_CLK_REQ_PPS:
-+ if (on) {
-+ if (mac_dev->fm_rtc_enable_interrupt)
-+ mac_dev->fm_rtc_enable_interrupt(
-+ mac_dev->fm_dev, PP1EN);
-+ } else {
-+ if (mac_dev->fm_rtc_disable_interrupt)
-+ mac_dev->fm_rtc_disable_interrupt(
-+ mac_dev->fm_dev, PP1EN);
-+ }
-+ return 0;
-+
-+ default:
-+ break;
-+ }
-+
-+ return -EOPNOTSUPP;
-+}
-+
-+static struct ptp_clock_info ptp_dpa_caps = {
-+ .owner = THIS_MODULE,
-+ .name = "dpaa clock",
-+ .max_adj = 512000,
-+ .n_alarm = 0,
-+ .n_ext_ts = N_EXT_TS,
-+ .n_per_out = 0,
-+ .pps = 1,
-+ .adjfreq = ptp_dpa_adjfreq,
-+ .adjtime = ptp_dpa_adjtime,
-+ .gettime64 = ptp_dpa_gettime,
-+ .settime64 = ptp_dpa_settime,
-+ .enable = ptp_dpa_enable,
-+};
-+
-+static int __init __cold dpa_ptp_load(void)
-+{
-+ struct device *ptp_dev;
-+ struct timespec64 now;
-+ struct ptp_clock *clock = ptp_priv.clock;
-+ int dpa_phc_index;
-+ int err;
-+
-+ if (!(ptp_priv.of_dev && ptp_priv.mac_dev))
-+ return -ENODEV;
-+
-+ ptp_dev = &ptp_priv.of_dev->dev;
-+ mac_dev = ptp_priv.mac_dev;
-+
-+ if (mac_dev->fm_rtc_get_drift)
-+ mac_dev->fm_rtc_get_drift(mac_dev->fm_dev, &freqCompensation);
-+
-+ getnstimeofday64(&now);
-+ ptp_dpa_settime(&ptp_dpa_caps, &now);
-+
-+ clock = ptp_clock_register(&ptp_dpa_caps, ptp_dev);
-+ if (IS_ERR(clock)) {
-+ err = PTR_ERR(clock);
-+ return err;
-+ }
-+ dpa_phc_index = ptp_clock_index(clock);
-+ return 0;
-+}
-+module_init(dpa_ptp_load);
-+
-+static void __exit __cold dpa_ptp_unload(void)
-+{
-+ struct ptp_clock *clock = ptp_priv.clock;
-+
-+ if (mac_dev->fm_rtc_disable_interrupt)
-+ mac_dev->fm_rtc_disable_interrupt(mac_dev->fm_dev, 0xffffffff);
-+ ptp_clock_unregister(clock);
-+}
-+module_exit(dpa_ptp_unload);
---- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c
@@ -0,0 +1,931 @@
+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
+};
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac.c
-@@ -0,0 +1,489 @@
+@@ -0,0 +1,490 @@
+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ mac_dev->fixed_link->duplex = phy->duplex;
+ mac_dev->fixed_link->pause = phy->pause;
+ mac_dev->fixed_link->asym_pause = phy->asym_pause;
++ printk(KERN_INFO "Setting up fixed link, speed %d duplex %d\n", mac_dev->fixed_link->speed, mac_dev->fixed_link->duplex);
+ }
+
+ _errno = mac_dev->init(mac_dev);
+module_exit(mac_unload);
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac.h
-@@ -0,0 +1,135 @@
+@@ -0,0 +1,134 @@
+/* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ int (*fm_rtc_set_alarm)(struct fm *fm_dev, uint32_t id, uint64_t time);
+ int (*fm_rtc_set_fiper)(struct fm *fm_dev, uint32_t id,
+ uint64_t fiper);
-+#ifdef CONFIG_PTP_1588_CLOCK_DPAA
+ int (*fm_rtc_enable_interrupt)(struct fm *fm_dev, uint32_t events);
+ int (*fm_rtc_disable_interrupt)(struct fm *fm_dev, uint32_t events);
-+#endif
++
+ int (*set_wol)(struct fm_port *port, struct fm_mac_dev *fm_mac_dev,
+ bool en);
+ int (*dump_mac_regs)(struct mac_device *h_mac, char *buf, int nn);
+}
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac.c
-@@ -0,0 +1,1153 @@
+@@ -0,0 +1,1166 @@
+/*
+ * Copyright 2008-2012 Freescale Semiconductor Inc.
+ *
+
+ @Description FM mEMAC driver
+*//***************************************************************************/
++#include <../../../../sdk_dpaa/mac.h>
++#include <linux/phy_fixed.h>
+
+#include "std_ext.h"
+#include "string_ext.h"
+#include "memac.h"
+
+
++static t_Error MemacAdjustLink(t_Handle h_Memac, e_EnetSpeed speed, bool fullDuplex);
++
+/*****************************************************************************/
+/* Internal routines */
+/*****************************************************************************/
+{
+ t_Memac *p_Memac = (t_Memac *)h_Memac;
+
++ struct mac_device *mac_dev = (struct mac_device *)p_Memac->h_App;
++
+ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
+ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
+
+ fman_memac_enable(p_Memac->p_MemMap, (mode & e_COMM_MODE_RX), (mode & e_COMM_MODE_TX));
+
++ if (ENET_INTERFACE_FROM_MODE(p_Memac->enetMode) == e_ENET_IF_RGMII) {
++ if (mac_dev->fixed_link) {
++ printk(KERN_INFO "This is a fixed-link, forcing speed %d duplex %d\n",mac_dev->fixed_link->speed,mac_dev->fixed_link->duplex);
++ MemacAdjustLink(h_Memac,mac_dev->fixed_link->speed,mac_dev->fixed_link->duplex);
++ }
++ }
++
+ return E_OK;
+}
+
+fsl-ncsw-Pcd-objs := fm_port.o fm_port_im.o fman_port.o
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/Port/fm_port.c
-@@ -0,0 +1,6436 @@
+@@ -0,0 +1,6437 @@
+/*
+ * Copyright 2008-2012 Freescale Semiconductor Inc.
+ *
+ p_FmPort->rxPoolsParams.numOfPools = p_ExtBufPools->numOfPoolsUsed;
+ p_FmPort->rxPoolsParams.largestBufSize =
+ sizesArray[orderedArray[p_ExtBufPools->numOfPoolsUsed - 1]];
-+ p_FmPort->rxPoolsParams.secondLargestBufSize =
++ if (p_ExtBufPools->numOfPoolsUsed > 1)
++ p_FmPort->rxPoolsParams.secondLargestBufSize =
+ sizesArray[orderedArray[p_ExtBufPools->numOfPoolsUsed - 2]];
+
+ /* FMBM_RMPD reg. - pool depletion */
+module_exit(fmt_unload);
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_fm.c
-@@ -0,0 +1,2908 @@
+@@ -0,0 +1,2910 @@
+/*
+ * Copyright 2008-2012 Freescale Semiconductor Inc.
+ *
+#ifdef CONFIG_PM_SLEEP
+ if (fman_get_normal_pending(p_Fm->p_FmFpmRegs) & INTR_EN_WAKEUP)
+ {
-+ pm_wakeup_event(p_LnxWrpFmDev->dev, 200);
++ pm_wakeup_event(p_LnxWrpFmDev->dev, 200);
+ }
+#endif
+ FM_EventIsr(p_LnxWrpFmDev->h_Dev);
+ }
+ }
+
++#if 0
+ /* Get the RTC base address and size */
+ memset(ids, 0, sizeof(ids));
+ if (WARN_ON(strlen("ptp-timer") >= sizeof(ids[0].name)))
+ p_LnxWrpFmDev->fmRtcMemSize = res.end + 1 - res.start;
+ }
+ }
++#endif
+
+#if (DPAA_VERSION >= 11)
+ /* Get the VSP base address */
+ if (unlikely(_errno < 0))
+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("can_request_irq() = %d", _errno));
+#endif
-+ _errno = devm_request_irq(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->irq, fm_irq, 0, "fman", p_LnxWrpFmDev);
++ _errno = devm_request_irq(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->irq, fm_irq, IRQF_SHARED, "fman", p_LnxWrpFmDev);
+ if (unlikely(_errno < 0))
+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("request_irq(%d) = %d", p_LnxWrpFmDev->irq, _errno));
+
+ p_LnxWrpFmDev->fmDevSettings.param.fmMacClkRatio =
+ !!(get_rcwsr(4) & 0x1); /* RCW[FM_MAC_RAT1] */
+
-+ {
++ {
+ /* T4 Devices ClkRatio is always 1 regardless of RCW[FM_MAC_RAT1] */
+ uint32_t svr;
+ svr = mfspr(SPRN_SVR);
+MODULE_DEVICE_TABLE(of, fm_match);
+#endif /* !MODULE */
+
-+#ifdef CONFIG_PM
++#if defined CONFIG_PM && (defined CONFIG_PPC || defined CONFIG_PPC64)
+
+#define SCFG_FMCLKDPSLPCR_ADDR 0xFFE0FC00C
+#define SCFG_FMCLKDPSLPCR_DS_VAL 0x48402000
+
+#define FM_PM_OPS (&fm_pm_ops)
+
-+#else /* CONFIG_PM */
++#else /* CONFIG_PM && (CONFIG_PPC || CONFIG_PPC64) */
+
+#define FM_PM_OPS NULL
+
-+#endif /* CONFIG_PM */
++#endif /* CONFIG_PM && (CONFIG_PPC || CONFIG_PPC64) */
+
+static struct platform_driver fm_driver = {
+ .driver = {
+#endif
--- /dev/null
+++ b/drivers/staging/fsl_qbman/qman_high.c
-@@ -0,0 +1,5652 @@
+@@ -0,0 +1,5655 @@
+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ char buf[16];
+ int ret;
+ u32 isdr;
++ struct platform_device_info pdev_info;
+
+ if (!portal) {
+ portal = kmalloc(sizeof(*portal), GFP_KERNEL);
+ portal->dqrr_disable_ref = 0;
+ portal->cb_dc_ern = NULL;
+ sprintf(buf, "qportal-%d", config->public_cfg.channel);
-+ portal->pdev = platform_device_alloc(buf, -1);
++
++ memset(&pdev_info, 0, sizeof(pdev_info));
++ pdev_info.name = buf;
++ pdev_info.id = PLATFORM_DEVID_NONE;
++ pdev_info.dma_mask = DMA_BIT_MASK(40);
++
++ portal->pdev = platform_device_register_full(&pdev_info);
+ if (!portal->pdev) {
+ pr_err("qman_portal - platform_device_alloc() failed\n");
-+ goto fail_devalloc;
-+ }
-+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
-+ portal->pdev->dev.coherent_dma_mask = DMA_BIT_MASK(40);
-+ portal->pdev->dev.dma_mask = &portal->pdev->dev.coherent_dma_mask;
-+#else
-+ if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40))) {
-+ pr_err("qman_portal - dma_set_mask() failed\n");
-+ goto fail_devadd;
++ goto fail_devregister;
+ }
-+#endif
++
++ arch_setup_dma_ops(&portal->pdev->dev, 0, 0, NULL, true);
++
+ portal->pdev->dev.pm_domain = &qman_portal_device_pm_domain;
+ portal->pdev->dev.platform_data = portal;
-+ ret = platform_device_add(portal->pdev);
-+ if (ret) {
-+ pr_err("qman_portal - platform_device_add() failed\n");
-+ goto fail_devadd;
-+ }
+ dpa_rbtree_init(&portal->retire_table);
+ isdr = 0xffffffff;
+ qm_isr_disable_write(__p, isdr);
+fail_affinity:
+ free_irq(config->public_cfg.irq, portal);
+fail_irq:
-+ platform_device_del(portal->pdev);
-+fail_devadd:
-+ platform_device_put(portal->pdev);
-+fail_devalloc:
++ platform_device_unregister(portal->pdev);
++fail_devregister:
+ if (num_ceetms)
+ for (ret = 0; ret < num_ceetms; ret++)
+ kfree(portal->ccgrs[ret]);
+ qm_dqrr_finish(&qm->p);
+ qm_eqcr_finish(&qm->p);
+
-+ platform_device_del(qm->pdev);
-+ platform_device_put(qm->pdev);
++ platform_device_unregister(qm->pdev);
+
+ qm->config = NULL;
+ if (qm->alloced)
+ } else {
+ phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq),
+ DMA_TO_DEVICE);
++ if (dma_mapping_error(&p->pdev->dev, phys_fq)) {
++ dev_err(&p->pdev->dev,
++ "dma_map_single failed for fqid: %u\n",
++ fq->fqid);
++ FQUNLOCK(fq);
++ PORTAL_IRQ_UNLOCK(p, irqflags);
++ put_affine_portal();
++ return -EIO;
++ }
++
+ qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
+ }
+ }
+#endif /* FSL_BMAN_H */
--- /dev/null
+++ b/include/linux/fsl_qman.h
-@@ -0,0 +1,3900 @@
+@@ -0,0 +1,3910 @@
+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+int qman_p_enqueue_precommit(struct qman_portal *p, struct qman_fq *fq,
+ const struct qm_fd *fd, u32 flags,
+ qman_cb_precommit cb, void *cb_arg);
++
++static inline int qman_is_probed(void) {
++ return 1;
++}
++
++
++static inline int qman_portals_probed(void) {
++ return 1;
++}
++
+#ifdef __cplusplus
+}
+#endif
-From 2aaf8e8caef3ec4c2c155421f62f983892c49387 Mon Sep 17 00:00:00 2001
+From ab58c737bc723f52e787e1767bbbf0fcbe39a27b Mon Sep 17 00:00:00 2001
From: Biwen Li <biwen.li@nxp.com>
-Date: Fri, 16 Nov 2018 12:20:04 +0800
-Subject: [PATCH 13/39] mc-bus: support layerscape
+Date: Wed, 17 Apr 2019 18:58:43 +0800
+Subject: [PATCH] mc-bus: support layerscape
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
This is an integrated patch of mc-bus for layerscape
Signed-off-by: Bharat Bhushan <Bharat.Bhushan@nxp.com>
+Signed-off-by: Biwen Li <biwen.li@nxp.com>
Signed-off-by: Bogdan Purcareata <bogdan.purcareata@nxp.com>
Signed-off-by: Cristian Sovaiala <cristian.sovaiala@freescale.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
Signed-off-by: Razvan Stefanescu <razvan.stefanescu@nxp.com>
+Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
Signed-off-by: Stuart Yoder <stuart.yoder@nxp.com>
-Signed-off-by: Stuart Yoder <stuyoder@gmail.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
---
drivers/bus/Kconfig | 2 +
drivers/bus/Makefile | 4 +
.../fsl-mc/bus => bus/fsl-mc}/dpcon.c | 103 +-
drivers/bus/fsl-mc/dpmcp.c | 99 ++
.../fsl-mc/bus => bus/fsl-mc}/dprc-driver.c | 96 +-
- .../{staging/fsl-mc/bus => bus/fsl-mc}/dprc.c | 288 +----
- .../bus => bus/fsl-mc}/fsl-mc-allocator.c | 112 +-
- .../fsl-mc/bus => bus/fsl-mc}/fsl-mc-bus.c | 305 ++++-
+ .../{staging/fsl-mc/bus => bus/fsl-mc}/dprc.c | 289 +----
+ .../bus => bus/fsl-mc}/fsl-mc-allocator.c | 123 +-
+ .../fsl-mc/bus => bus/fsl-mc}/fsl-mc-bus.c | 322 +++++-
.../fsl-mc/bus => bus/fsl-mc}/fsl-mc-msi.c | 16 +-
drivers/bus/fsl-mc/fsl-mc-private.h | 223 ++++
drivers/bus/fsl-mc/fsl-mc-restool.c | 219 ++++
- .../fsl-mc/bus => bus/fsl-mc}/mc-io.c | 36 +-
+ .../fsl-mc/bus => bus/fsl-mc}/mc-io.c | 51 +-
.../fsl-mc/bus => bus/fsl-mc}/mc-sys.c | 33 +-
drivers/irqchip/Kconfig | 6 +
drivers/irqchip/Makefile | 1 +
drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c | 98 ++
- drivers/staging/fsl-dpaa2/ethernet/README | 2 +-
.../staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 2 +-
.../staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 3 +-
drivers/staging/fsl-dpaa2/ethernet/dpni.c | 2 +-
- drivers/staging/fsl-mc/TODO | 18 -
- drivers/staging/fsl-mc/bus/Kconfig | 16 +-
+ drivers/staging/fsl-mc/bus/Kconfig | 15 +-
drivers/staging/fsl-mc/bus/Makefile | 13 -
drivers/staging/fsl-mc/bus/dpio/dpio-driver.c | 2 +-
.../staging/fsl-mc/bus/dpio/dpio-service.c | 2 +-
drivers/staging/fsl-mc/bus/dprc-cmd.h | 451 --------
drivers/staging/fsl-mc/bus/dprc.h | 268 -----
.../fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c | 1 +
- include/linux/fsl/mc.h | 1020 +++++++++++++++++
+ include/linux/fsl/mc.h | 1029 +++++++++++++++++
include/uapi/linux/fsl_mc.h | 31 +
- 37 files changed, 2255 insertions(+), 1546 deletions(-)
+ 35 files changed, 2302 insertions(+), 1531 deletions(-)
create mode 100644 drivers/bus/fsl-mc/Kconfig
create mode 100644 drivers/bus/fsl-mc/Makefile
rename drivers/{staging/fsl-mc/bus => bus/fsl-mc}/dpbp.c (67%)
rename drivers/{staging/fsl-mc/bus => bus/fsl-mc}/dprc-driver.c (93%)
rename drivers/{staging/fsl-mc/bus => bus/fsl-mc}/dprc.c (68%)
rename drivers/{staging/fsl-mc/bus => bus/fsl-mc}/fsl-mc-allocator.c (84%)
- rename drivers/{staging/fsl-mc/bus => bus/fsl-mc}/fsl-mc-bus.c (76%)
+ rename drivers/{staging/fsl-mc/bus => bus/fsl-mc}/fsl-mc-bus.c (75%)
rename drivers/{staging/fsl-mc/bus => bus/fsl-mc}/fsl-mc-msi.c (96%)
create mode 100644 drivers/bus/fsl-mc/fsl-mc-private.h
create mode 100644 drivers/bus/fsl-mc/fsl-mc-restool.c
rename drivers/{staging/fsl-mc/bus => bus/fsl-mc}/mc-io.c (89%)
rename drivers/{staging/fsl-mc/bus => bus/fsl-mc}/mc-sys.c (90%)
create mode 100644 drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
- delete mode 100644 drivers/staging/fsl-mc/TODO
delete mode 100644 drivers/staging/fsl-mc/bus/dpmcp-cmd.h
delete mode 100644 drivers/staging/fsl-mc/bus/dpmcp.h
delete mode 100644 drivers/staging/fsl-mc/bus/dpmng-cmd.h
-}
--- /dev/null
+++ b/drivers/bus/fsl-mc/dprc.c
-@@ -0,0 +1,575 @@
+@@ -0,0 +1,576 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+
+ /* retrieve response parameters */
+ rsp_params = (struct dprc_rsp_get_obj_region *)cmd.params;
-+ region_desc->base_offset = le32_to_cpu(rsp_params->base_addr);
++ region_desc->base_offset = le32_to_cpu(rsp_params->base_offset);
+ region_desc->size = le32_to_cpu(rsp_params->size);
+ region_desc->type = rsp_params->type;
+ region_desc->flags = le32_to_cpu(rsp_params->flags);
++ region_desc->base_address = le64_to_cpu(rsp_params->base_addr);
+
+ return 0;
+}
-}
--- /dev/null
+++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c
-@@ -0,0 +1,655 @@
+@@ -0,0 +1,666 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * fsl-mc object allocator driver
+ if (!mc_adev)
+ goto error;
+
++ mc_adev->consumer_link = device_link_add(&mc_dev->dev,
++ &mc_adev->dev,
++ DL_FLAG_AUTOREMOVE_CONSUMER);
++ if (!mc_adev->consumer_link) {
++ error = -EINVAL;
++ goto error;
++ }
++
+ *new_mc_adev = mc_adev;
+ return 0;
+error:
+ return;
+
+ fsl_mc_resource_free(resource);
++
++ device_link_del(mc_adev->consumer_link);
++ mc_adev->consumer_link = NULL;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_object_free);
+
-postcore_initcall(fsl_mc_bus_driver_init);
--- /dev/null
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
-@@ -0,0 +1,1139 @@
+@@ -0,0 +1,1148 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Freescale Management Complex (MC) bus driver
+ "dprc_get_obj_region() failed: %d\n", error);
+ goto error_cleanup_regions;
+ }
-+
-+ error = translate_mc_addr(mc_dev, mc_region_type,
-+ region_desc.base_offset,
-+ ®ions[i].start);
++ /* Older MC only returned region offset and no base address
++ * If base address is in the region_desc use it otherwise
++ * revert to old mechanism
++ */
++ if (region_desc.base_address)
++ regions[i].start = region_desc.base_address +
++ region_desc.base_offset;
++ else
++ error = translate_mc_addr(mc_dev, mc_region_type,
++ region_desc.base_offset,
++ ®ions[i].start);
+ if (error < 0) {
+ dev_err(parent_dev,
+ "Invalid MC offset: %#x (for %s.%d\'s region %d)\n",
+ regions[i].flags = IORESOURCE_IO;
+ if (region_desc.flags & DPRC_REGION_CACHEABLE)
+ regions[i].flags |= IORESOURCE_CACHEABLE;
++ if (region_desc.flags & DPRC_REGION_SHAREABLE)
++ regions[i].flags |= IORESOURCE_MEM;
+ }
+
+ mc_dev->regions = regions;
-EXPORT_SYMBOL_GPL(fsl_mc_portal_reset);
--- /dev/null
+++ b/drivers/bus/fsl-mc/mc-io.c
-@@ -0,0 +1,268 @@
+@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ struct fsl_mc_resource *resource = NULL;
+ struct fsl_mc_io *mc_io = NULL;
+
-+ if (mc_dev->flags & FSL_MC_IS_DPRC) {
++ if (fsl_mc_is_root_dprc(&mc_dev->dev)) {
+ mc_bus_dev = mc_dev;
+ } else {
+ if (!dev_is_fsl_mc(mc_dev->dev.parent))
+ if (error < 0)
+ goto error_cleanup_resource;
+
++ dpmcp_dev->consumer_link = device_link_add(&mc_dev->dev,
++ &dpmcp_dev->dev,
++ DL_FLAG_AUTOREMOVE_CONSUMER);
++ if (!dpmcp_dev->consumer_link) {
++ error = -EINVAL;
++ goto error_cleanup_mc_io;
++ }
++
+ *new_mc_io = mc_io;
+ return 0;
+
++error_cleanup_mc_io:
++ fsl_destroy_mc_io(mc_io);
+error_cleanup_resource:
+ fsl_mc_resource_free(resource);
+ return error;
+
+ fsl_destroy_mc_io(mc_io);
+ fsl_mc_resource_free(resource);
++
++ device_link_del(dpmcp_dev->consumer_link);
++ dpmcp_dev->consumer_link = NULL;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_portal_free);
+
+}
+
+early_initcall(its_fsl_mc_msi_init);
---- a/drivers/staging/fsl-dpaa2/ethernet/README
-+++ b/drivers/staging/fsl-dpaa2/ethernet/README
-@@ -36,7 +36,7 @@ are treated as internal resources of oth
-
- For a more detailed description of the DPAA2 architecture and its object
- abstractions see:
-- drivers/staging/fsl-mc/README.txt
-+ Documentation/networking/dpaa2/overview.rst
-
- Each Linux net device is built on top of a Datapath Network Interface (DPNI)
- object and uses Buffer Pools (DPBPs), I/O Portals (DPIOs) and Concentrators
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
-@@ -43,7 +43,7 @@
+@@ -16,7 +16,7 @@
#include <linux/filter.h>
#include <linux/atomic.h>
#include <net/sock.h>
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
-@@ -36,11 +36,10 @@
+@@ -9,12 +9,11 @@
#include <linux/dcbnl.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
+#include <linux/fsl/mc.h>
+ #include <linux/filter.h>
#include "../../fsl-mc/include/dpaa2-io.h"
#include "../../fsl-mc/include/dpaa2-fd.h"
--- a/drivers/staging/fsl-dpaa2/ethernet/dpni.c
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
-@@ -32,7 +32,7 @@
+@@ -4,7 +4,7 @@
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include "dpni.h"
#include "dpni-cmd.h"
---- a/drivers/staging/fsl-mc/TODO
-+++ /dev/null
-@@ -1,18 +0,0 @@
--* Add at least one device driver for a DPAA2 object (child device of the
-- fsl-mc bus). Most likely candidate for this is adding DPAA2 Ethernet
-- driver support, which depends on drivers for several objects: DPNI,
-- DPIO, DPMAC. Other pre-requisites include:
--
-- * MC firmware uprev. The MC firmware upon which the fsl-mc
-- bus driver and DPAA2 object drivers are based is continuing
-- to evolve, so minor updates are needed to keep in sync with binary
-- interface changes to the MC.
--
--* Cleanup
--
--Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
--german.rivera@freescale.com, devel@driverdev.osuosl.org,
--linux-kernel@vger.kernel.org
--
--[1] https://lkml.org/lkml/2015/7/9/93
--[2] https://lkml.org/lkml/2015/7/7/712
--- a/drivers/staging/fsl-mc/bus/Kconfig
+++ b/drivers/staging/fsl-mc/bus/Kconfig
-@@ -5,16 +5,6 @@
+@@ -5,15 +5,6 @@
# Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
#
- architecture. The fsl-mc bus driver handles discovery of
- DPAA2 objects (which are represented as Linux devices) and
- binding objects to drivers.
--
+
config FSL_MC_DPIO
tristate "QorIQ DPAA2 DPIO driver"
- depends on FSL_MC_BUS
-@@ -24,3 +14,9 @@ config FSL_MC_DPIO
+@@ -24,3 +15,9 @@ config FSL_MC_DPIO
other DPAA2 objects. This driver does not expose the DPIO
objects individually, but groups them under a service layer
API.
obj-$(CONFIG_FSL_MC_DPIO) += dpio/
--- a/drivers/staging/fsl-mc/bus/dpio/dpio-driver.c
+++ b/drivers/staging/fsl-mc/bus/dpio/dpio-driver.c
-@@ -14,7 +14,7 @@
- #include <linux/dma-mapping.h>
+@@ -15,7 +15,7 @@
#include <linux/delay.h>
+ #include <linux/io.h>
-#include "../../include/mc.h"
+#include <linux/fsl/mc.h>
static struct irq_chip its_msi_irq_chip = {
--- /dev/null
+++ b/include/linux/fsl/mc.h
-@@ -0,0 +1,1020 @@
+@@ -0,0 +1,1029 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Freescale Management Complex (MC) bus public interface
+ struct fsl_mc_device_irq **irqs;
+ struct fsl_mc_resource *resource;
+ const char *driver_override;
++ struct device_link *consumer_link;
+};
+
+#define to_fsl_mc_device(_dev) \
+
+/* DPRC command versioning */
+#define DPRC_CMD_BASE_VERSION 1
++#define DPRC_CMD_2ND_VERSION 2
+#define DPRC_CMD_ID_OFFSET 4
+
+#define DPRC_CMD(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION)
++#define DPRC_CMD_V2(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_2ND_VERSION)
+
+/* DPRC command IDs */
+#define DPRC_CMDID_CLOSE DPRC_CMD(0x800)
+#define DPRC_CMDID_GET_CONT_ID DPRC_CMD(0x830)
+#define DPRC_CMDID_GET_OBJ_COUNT DPRC_CMD(0x159)
+#define DPRC_CMDID_GET_OBJ DPRC_CMD(0x15A)
-+#define DPRC_CMDID_GET_OBJ_REG DPRC_CMD(0x15E)
++#define DPRC_CMDID_GET_OBJ_REG DPRC_CMD_V2(0x15E)
+#define DPRC_CMDID_SET_OBJ_IRQ DPRC_CMD(0x15F)
+
+struct dprc_cmd_open {
+ /* response word 0 */
+ __le64 pad0;
+ /* response word 1 */
-+ __le32 base_addr;
++ __le32 base_offset;
+ __le32 pad1;
+ /* response word 2 */
+ __le32 size;
+ u8 pad2[3];
+ /* response word 3 */
+ __le32 flags;
++ __le32 pad3;
++ /* response word 4 */
++ __le64 base_addr;
+};
+
+struct dprc_cmd_set_obj_irq {
+/* Region flags */
+/* Cacheable - Indicates that region should be mapped as cacheable */
+#define DPRC_REGION_CACHEABLE 0x00000001
++#define DPRC_REGION_SHAREABLE 0x00000002
+
+/**
+ * enum dprc_region_type - Region type
+ */
+enum dprc_region_type {
+ DPRC_REGION_TYPE_MC_PORTAL,
-+ DPRC_REGION_TYPE_QBMAN_PORTAL
++ DPRC_REGION_TYPE_QBMAN_PORTAL,
++ DPRC_REGION_TYPE_QBMAN_MEM_BACKED_PORTAL
+};
+
+/**
+ u32 size;
+ u32 flags;
+ enum dprc_region_type type;
++ u64 base_address;
+};
+
+int dprc_get_obj_region(struct fsl_mc_io *mc_io,
-From 8eb578a8c1eb55715a40f02790e43aba4a528c38 Mon Sep 17 00:00:00 2001
+From 83fe1ecb8ac6e0544ae74bf5a63806dcac768201 Mon Sep 17 00:00:00 2001
From: Biwen Li <biwen.li@nxp.com>
-Date: Tue, 30 Oct 2018 18:26:51 +0800
-Subject: [PATCH 15/40] mdio-phy: support layerscae
+Date: Wed, 17 Apr 2019 18:58:45 +0800
+Subject: [PATCH] mdio-phy: support layerscape
+
This is an integrated patch of mdio-phy for layerscape
Signed-off-by: Bhaskar Upadhaya <Bhaskar.Upadhaya@nxp.com>
+Signed-off-by: Biwen Li <biwen.li@nxp.com>
Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
Signed-off-by: Constantin Tudor <constantin.tudor@nxp.com>
Signed-off-by: costi <constantin.tudor@freescale.com>
+Signed-off-by: Florin Chiculita <florinlaurentiu.chiculita@nxp.com>
+Signed-off-by: Florinel Iordache <florinel.iordache@nxp.com>
+Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
Signed-off-by: Madalin Bucur <madalin.bucur@freescale.com>
+Signed-off-by: Pankaj Bansal <pankaj.bansal@nxp.com>
Signed-off-by: Shaohui Xie <Shaohui.Xie@freescale.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
+Signed-off-by: Valentin Catalin Neacsu <valentin-catalin.neacsu@nxp.com>
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
---
- drivers/net/phy/Kconfig | 6 +
- drivers/net/phy/Makefile | 1 +
- drivers/net/phy/fsl_backplane.c | 1358 +++++++++++++++++++++++++++++++
- drivers/net/phy/swphy.c | 1 +
- include/linux/phy.h | 3 +
- 5 files changed, 1369 insertions(+)
+ drivers/net/phy/Kconfig | 33 +
+ drivers/net/phy/Makefile | 5 +
+ drivers/net/phy/aquantia.c | 328 +++-
+ drivers/net/phy/at803x.c | 21 +
+ drivers/net/phy/fsl_backplane.c | 1780 ++++++++++++++++++++
+ drivers/net/phy/fsl_backplane.h | 41 +
+ drivers/net/phy/fsl_backplane_serdes_10g.c | 281 +++
+ drivers/net/phy/fsl_backplane_serdes_28g.c | 336 ++++
+ drivers/net/phy/inphi.c | 594 +++++++
+ drivers/net/phy/mdio-mux-multiplexer.c | 122 ++
+ drivers/net/phy/swphy.c | 1 +
+ include/linux/phy.h | 3 +
+ 12 files changed, 3526 insertions(+), 19 deletions(-)
create mode 100644 drivers/net/phy/fsl_backplane.c
+ create mode 100644 drivers/net/phy/fsl_backplane.h
+ create mode 100644 drivers/net/phy/fsl_backplane_serdes_10g.c
+ create mode 100644 drivers/net/phy/fsl_backplane_serdes_28g.c
+ create mode 100644 drivers/net/phy/inphi.c
+ create mode 100644 drivers/net/phy/mdio-mux-multiplexer.c
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
-@@ -90,6 +90,12 @@ config MDIO_BUS_MUX_MMIOREG
+@@ -87,9 +87,27 @@ config MDIO_BUS_MUX_MMIOREG
+
+ Currently, only 8/16/32 bits registers are supported.
+
++config MDIO_BUS_MUX_MULTIPLEXER
++ tristate "MDIO bus multiplexer using kernel multiplexer subsystem"
++ depends on OF
++ select MULTIPLEXER
++ select MDIO_BUS_MUX
++ help
++ This module provides a driver for MDIO bus multiplexer
++ that is controlled via the kernel multiplexer subsystem. The
++ bus multiplexer connects one of several child MDIO busses to
++ a parent bus. Child bus selection is under the control of
++ the kernel multiplexer subsystem.
++
config MDIO_CAVIUM
tristate
config MDIO_GPIO
tristate "GPIO lib-based bitbanged MDIO buses"
depends on MDIO_BITBANG && GPIOLIB
+@@ -303,6 +321,16 @@ config AT803X_PHY
+ ---help---
+ Currently supports the AT8030 and AT8035 model
+
++config AT803X_PHY_SMART_EEE
++ depends on AT803X_PHY
++ default n
++ tristate "SmartEEE feature for AT803X PHYs"
++ ---help---
++ Enables the Atheros SmartEEE feature (not IEEE 802.3az). When 2 PHYs
++ which support this feature are connected back-to-back, they may
++ negotiate a low-power sleep mode autonomously, without the Ethernet
++ controller's knowledge. May cause packet loss.
++
+ config BCM63XX_PHY
+ tristate "Broadcom 63xx SOCs internal PHY"
+ depends on BCM63XX
+@@ -385,6 +413,11 @@ config ICPLUS_PHY
+ ---help---
+ Currently supports the IP175C and IP1001 PHYs.
+
++config INPHI_PHY
++ tristate "Inphi CDR 10G/25G Ethernet PHY"
++ ---help---
++ Currently supports the IN112525_S03 part @ 25G
++
+ config INTEL_XWAY_PHY
+ tristate "Intel XWAY PHYs"
+ ---help---
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
-@@ -45,6 +45,7 @@ obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) +=
+@@ -44,7 +44,11 @@ obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o
+ obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o
obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
++obj-$(CONFIG_MDIO_BUS_MUX_MULTIPLEXER) += mdio-mux-multiplexer.o
obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o
+obj-$(CONFIG_MDIO_FSL_BACKPLANE) += fsl_backplane.o
++obj-$(CONFIG_MDIO_FSL_BACKPLANE) += fsl_backplane_serdes_10g.o
++obj-$(CONFIG_MDIO_FSL_BACKPLANE) += fsl_backplane_serdes_28g.o
obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o
obj-$(CONFIG_MDIO_I2C) += mdio-i2c.o
+@@ -75,6 +79,7 @@ obj-$(CONFIG_DP83848_PHY) += dp83848.o
+ obj-$(CONFIG_DP83867_PHY) += dp83867.o
+ obj-$(CONFIG_FIXED_PHY) += fixed_phy.o
+ obj-$(CONFIG_ICPLUS_PHY) += icplus.o
++obj-$(CONFIG_INPHI_PHY) += inphi.o
+ obj-$(CONFIG_INTEL_XWAY_PHY) += intel-xway.o
+ obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
+ obj-$(CONFIG_LXT_PHY) += lxt.o
+--- a/drivers/net/phy/aquantia.c
++++ b/drivers/net/phy/aquantia.c
+@@ -4,6 +4,7 @@
+ * Author: Shaohui Xie <Shaohui.Xie@freescale.com>
+ *
+ * Copyright 2015 Freescale Semiconductor, Inc.
++ * Copyright 2018 NXP
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+@@ -27,15 +28,200 @@
+
+ #define PHY_AQUANTIA_FEATURES (SUPPORTED_10000baseT_Full | \
+ SUPPORTED_1000baseT_Full | \
++ SUPPORTED_2500baseX_Full | \
+ SUPPORTED_100baseT_Full | \
++ SUPPORTED_Pause | \
++ SUPPORTED_Asym_Pause | \
+ PHY_DEFAULT_FEATURES)
+
++#define MDIO_PMA_CTRL1_AQ_SPEED10 0
++#define MDIO_PMA_CTRL1_AQ_SPEED2500 0x2058
++#define MDIO_PMA_CTRL1_AQ_SPEED5000 0x205c
++#define MDIO_PMA_CTRL2_AQ_2500BT 0x30
++#define MDIO_PMA_CTRL2_AQ_5000BT 0x31
++#define MDIO_PMA_CTRL2_AQ_TYPE_MASK 0x3F
++
++#define MDIO_AN_VENDOR_PROV_CTRL 0xc400
++#define MDIO_AN_RECV_LP_STATUS 0xe820
++
++#define MDIO_AN_LPA_PAUSE 0x20
++#define MDIO_AN_LPA_ASYM_PAUSE 0x10
++#define MDIO_AN_ADV_PAUSE 0x20
++#define MDIO_AN_ADV_ASYM_PAUSE 0x10
++
++static int aquantia_write_reg(struct phy_device *phydev, int devad,
++ u32 regnum, u16 val)
++{
++ u32 addr = MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff);
++
++ return mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, addr, val);
++}
++
++static int aquantia_read_reg(struct phy_device *phydev, int devad, u32 regnum)
++{
++ u32 addr = MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff);
++
++ return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, addr);
++}
++
++static int aquantia_pma_setup_forced(struct phy_device *phydev)
++{
++ int ctrl1, ctrl2, ret;
++
++ /* Half duplex is not supported */
++ if (phydev->duplex != DUPLEX_FULL)
++ return -EINVAL;
++
++ ctrl1 = aquantia_read_reg(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1);
++ if (ctrl1 < 0)
++ return ctrl1;
++
++ ctrl2 = aquantia_read_reg(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL2);
++ if (ctrl2 < 0)
++ return ctrl2;
++
++ ctrl1 &= ~MDIO_CTRL1_SPEEDSEL;
++ ctrl2 &= ~(MDIO_PMA_CTRL2_AQ_TYPE_MASK);
++
++ switch (phydev->speed) {
++ case SPEED_10:
++ ctrl2 |= MDIO_PMA_CTRL2_10BT;
++ break;
++ case SPEED_100:
++ ctrl1 |= MDIO_PMA_CTRL1_SPEED100;
++ ctrl2 |= MDIO_PMA_CTRL2_100BTX;
++ break;
++ case SPEED_1000:
++ ctrl1 |= MDIO_PMA_CTRL1_SPEED1000;
++ /* Assume 1000base-T */
++ ctrl2 |= MDIO_PMA_CTRL2_1000BT;
++ break;
++ case SPEED_10000:
++ ctrl1 |= MDIO_CTRL1_SPEED10G;
++ /* Assume 10Gbase-T */
++ ctrl2 |= MDIO_PMA_CTRL2_10GBT;
++ break;
++ case SPEED_2500:
++ ctrl1 |= MDIO_PMA_CTRL1_AQ_SPEED2500;
++ ctrl2 |= MDIO_PMA_CTRL2_AQ_2500BT;
++ break;
++ case SPEED_5000:
++ ctrl1 |= MDIO_PMA_CTRL1_AQ_SPEED5000;
++ ctrl2 |= MDIO_PMA_CTRL2_AQ_5000BT;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ ret = aquantia_write_reg(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1, ctrl1);
++ if (ret < 0)
++ return ret;
++
++ return aquantia_write_reg(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL2, ctrl2);
++}
++
++static int aquantia_aneg(struct phy_device *phydev, bool control)
++{
++ int reg = aquantia_read_reg(phydev, MDIO_MMD_AN, MDIO_CTRL1);
++
++ if (reg < 0)
++ return reg;
++
++ if (control)
++ reg |= MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART;
++ else
++ reg &= ~(MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART);
++
++ return aquantia_write_reg(phydev, MDIO_MMD_AN, MDIO_CTRL1, reg);
++}
++
++static int aquantia_config_advert(struct phy_device *phydev)
++{
++ u32 advertise;
++ int oldadv, adv, oldadv1, adv1;
++ int err, changed = 0;
++
++ /* Only allow advertising what this PHY supports */
++ phydev->advertising &= phydev->supported;
++ advertise = phydev->advertising;
++
++ /* Setup standard advertisement */
++ oldadv = aquantia_read_reg(phydev, MDIO_MMD_AN,
++ MDIO_AN_10GBT_CTRL);
++ if (oldadv < 0)
++ return oldadv;
++
++ /* Aquantia vendor specific advertisments */
++ oldadv1 = aquantia_read_reg(phydev, MDIO_MMD_AN,
++ MDIO_AN_VENDOR_PROV_CTRL);
++ if (oldadv1 < 0)
++ return oldadv1;
++
++ adv = 0;
++ adv1 = 0;
++
++ /*100BaseT_full is supported by default*/
++
++ if (advertise & ADVERTISED_1000baseT_Full)
++ adv1 |= 0x8000;
++ if (advertise & ADVERTISED_10000baseT_Full)
++ adv |= 0x1000;
++ if (advertise & ADVERTISED_2500baseX_Full)
++ adv1 |= 0x400;
++
++ if (adv != oldadv) {
++ err = aquantia_write_reg(phydev, MDIO_MMD_AN,
++ MDIO_AN_10GBT_CTRL, adv);
++ if (err < 0)
++ return err;
++ changed = 1;
++ }
++ if (adv1 != oldadv1) {
++ err = aquantia_write_reg(phydev, MDIO_MMD_AN,
++ MDIO_AN_VENDOR_PROV_CTRL, adv1);
++ if (err < 0)
++ return err;
++ changed = 1;
++ }
++
++ /* advertise flow control */
++ oldadv = aquantia_read_reg(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
++ if (oldadv < 0)
++ return oldadv;
++
++ adv = oldadv & ~(MDIO_AN_ADV_PAUSE | MDIO_AN_ADV_ASYM_PAUSE);
++ if (advertise & ADVERTISED_Pause)
++ adv |= MDIO_AN_ADV_PAUSE;
++ if (advertise & ADVERTISED_Asym_Pause)
++ adv |= MDIO_AN_ADV_ASYM_PAUSE;
++
++ if (adv != oldadv) {
++ err = aquantia_write_reg(phydev, MDIO_MMD_AN,
++ MDIO_AN_ADVERTISE, adv);
++ if (err < 0)
++ return err;
++ changed = 1;
++ }
++
++ return changed;
++}
++
+ static int aquantia_config_aneg(struct phy_device *phydev)
+ {
++ int ret = 0;
++
+ phydev->supported = PHY_AQUANTIA_FEATURES;
+- phydev->advertising = phydev->supported;
++ if (phydev->autoneg == AUTONEG_DISABLE) {
++ aquantia_pma_setup_forced(phydev);
++ return aquantia_aneg(phydev, false);
++ }
+
+- return 0;
++ ret = aquantia_config_advert(phydev);
++ if (ret > 0)
++ /* restart autoneg */
++ return aquantia_aneg(phydev, true);
++
++ return ret;
+ }
+
+ static int aquantia_aneg_done(struct phy_device *phydev)
+@@ -51,25 +237,26 @@ static int aquantia_config_intr(struct p
+ int err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+- err = phy_write_mmd(phydev, MDIO_MMD_AN, 0xd401, 1);
++ err = aquantia_write_reg(phydev, MDIO_MMD_AN, 0xd401, 1);
+ if (err < 0)
+ return err;
+
+- err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff00, 1);
++ err = aquantia_write_reg(phydev, MDIO_MMD_VEND1, 0xff00, 1);
+ if (err < 0)
+ return err;
+
+- err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff01, 0x1001);
++ err = aquantia_write_reg(phydev, MDIO_MMD_VEND1,
++ 0xff01, 0x1001);
+ } else {
+- err = phy_write_mmd(phydev, MDIO_MMD_AN, 0xd401, 0);
++ err = aquantia_write_reg(phydev, MDIO_MMD_AN, 0xd401, 0);
+ if (err < 0)
+ return err;
+
+- err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff00, 0);
++ err = aquantia_write_reg(phydev, MDIO_MMD_VEND1, 0xff00, 0);
+ if (err < 0)
+ return err;
+
+- err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff01, 0);
++ err = aquantia_write_reg(phydev, MDIO_MMD_VEND1, 0xff01, 0);
+ }
+
+ return err;
+@@ -79,42 +266,145 @@ static int aquantia_ack_interrupt(struct
+ {
+ int reg;
+
+- reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xcc01);
++ reg = aquantia_read_reg(phydev, MDIO_MMD_AN, 0xcc01);
+ return (reg < 0) ? reg : 0;
+ }
+
++static int aquantia_read_advert(struct phy_device *phydev)
++{
++ int adv, adv1;
++
++ /* Setup standard advertisement */
++ adv = aquantia_read_reg(phydev, MDIO_MMD_AN,
++ MDIO_AN_10GBT_CTRL);
++
++ /* Aquantia vendor specific advertisments */
++ adv1 = aquantia_read_reg(phydev, MDIO_MMD_AN,
++ MDIO_AN_VENDOR_PROV_CTRL);
++
++ /*100BaseT_full is supported by default*/
++ phydev->advertising |= ADVERTISED_100baseT_Full;
++
++ if (adv & 0x1000)
++ phydev->advertising |= ADVERTISED_10000baseT_Full;
++ else
++ phydev->advertising &= ~ADVERTISED_10000baseT_Full;
++ if (adv1 & 0x8000)
++ phydev->advertising |= ADVERTISED_1000baseT_Full;
++ else
++ phydev->advertising &= ~ADVERTISED_1000baseT_Full;
++ if (adv1 & 0x400)
++ phydev->advertising |= ADVERTISED_2500baseX_Full;
++ else
++ phydev->advertising &= ~ADVERTISED_2500baseX_Full;
++
++ /* flow control advertisement */
++ adv = aquantia_read_reg(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
++ if (adv & MDIO_AN_ADV_PAUSE)
++ phydev->advertising |= ADVERTISED_Pause;
++ else
++ phydev->advertising &= ~ADVERTISED_Pause;
++ if (adv & MDIO_AN_ADV_ASYM_PAUSE)
++ phydev->advertising |= ADVERTISED_Asym_Pause;
++ else
++ phydev->advertising &= ~ADVERTISED_Asym_Pause;
++
++ return 0;
++}
++
++static int aquantia_read_lp_advert(struct phy_device *phydev)
++{
++ int adv, adv1;
++
++ /* Read standard link partner advertisement */
++ adv = aquantia_read_reg(phydev, MDIO_MMD_AN,
++ MDIO_STAT1);
++
++ if (adv & 0x1)
++ phydev->lp_advertising |= ADVERTISED_Autoneg |
++ ADVERTISED_100baseT_Full;
++ else
++ phydev->lp_advertising &= ~(ADVERTISED_Autoneg |
++ ADVERTISED_100baseT_Full);
++
++ /* Read standard link partner advertisement */
++ adv = aquantia_read_reg(phydev, MDIO_MMD_AN,
++ MDIO_AN_10GBT_STAT);
++
++ /* Aquantia link partner advertisments */
++ adv1 = aquantia_read_reg(phydev, MDIO_MMD_AN,
++ MDIO_AN_RECV_LP_STATUS);
++
++ if (adv & 0x800)
++ phydev->lp_advertising |= ADVERTISED_10000baseT_Full;
++ else
++ phydev->lp_advertising &= ~ADVERTISED_10000baseT_Full;
++ if (adv1 & 0x8000)
++ phydev->lp_advertising |= ADVERTISED_1000baseT_Full;
++ else
++ phydev->lp_advertising &= ~ADVERTISED_1000baseT_Full;
++ if (adv1 & 0x400)
++ phydev->lp_advertising |= ADVERTISED_2500baseX_Full;
++ else
++ phydev->lp_advertising &= ~ADVERTISED_2500baseX_Full;
++
++ return 0;
++}
++
+ static int aquantia_read_status(struct phy_device *phydev)
+ {
+ int reg;
+
+- reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+- reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
++ /* Read the link status twice; the bit is latching low */
++ reg = aquantia_read_reg(phydev, MDIO_MMD_AN, MDIO_STAT1);
++ reg = aquantia_read_reg(phydev, MDIO_MMD_AN, MDIO_STAT1);
++
+ if (reg & MDIO_STAT1_LSTATUS)
+ phydev->link = 1;
+ else
+ phydev->link = 0;
+
+- reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800);
+ mdelay(10);
+- reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800);
++ reg = aquantia_read_reg(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1);
++
++ if ((reg & MDIO_CTRL1_SPEEDSELEXT) == MDIO_CTRL1_SPEEDSELEXT)
++ reg &= MDIO_CTRL1_SPEEDSEL;
++ else
++ reg &= MDIO_CTRL1_SPEEDSELEXT;
+
+ switch (reg) {
+- case 0x9:
++ case MDIO_PMA_CTRL1_AQ_SPEED5000:
++ phydev->speed = SPEED_5000;
++ break;
++ case MDIO_PMA_CTRL1_AQ_SPEED2500:
+ phydev->speed = SPEED_2500;
+ break;
+- case 0x5:
+- phydev->speed = SPEED_1000;
++ case MDIO_PMA_CTRL1_AQ_SPEED10:
++ phydev->speed = SPEED_10;
+ break;
+- case 0x3:
++ case MDIO_PMA_CTRL1_SPEED100:
+ phydev->speed = SPEED_100;
+ break;
+- case 0x7:
+- default:
++ case MDIO_PMA_CTRL1_SPEED1000:
++ phydev->speed = SPEED_1000;
++ break;
++ case MDIO_CTRL1_SPEED10G:
+ phydev->speed = SPEED_10000;
+ break;
++ default:
++ phydev->speed = SPEED_UNKNOWN;
++ break;
+ }
++
+ phydev->duplex = DUPLEX_FULL;
+
++ reg = aquantia_read_reg(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
++ phydev->pause = reg & MDIO_AN_LPA_PAUSE ? 1 : 0;
++ phydev->asym_pause = reg & MDIO_AN_LPA_ASYM_PAUSE ? 1 : 0;
++
++ aquantia_read_advert(phydev);
++ aquantia_read_lp_advert(phydev);
++
+ return 0;
+ }
+
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -68,6 +68,8 @@
+ #define AT803X_DEBUG_REG_5 0x05
+ #define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8)
+
++#define AT803X_LPI_EN BIT(8)
++
+ #define ATH8030_PHY_ID 0x004dd076
+ #define ATH8031_PHY_ID 0x004dd074
+ #define ATH8032_PHY_ID 0x004dd023
+@@ -290,6 +292,19 @@ static void at803x_disable_smarteee(stru
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0);
+ }
+
++static void at803x_enable_smart_eee(struct phy_device *phydev, int on)
++{
++ int value;
++
++ /* 5.1.11 Smart_eee control3 */
++ value = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x805D);
++ if (on)
++ value |= AT803X_LPI_EN;
++ else
++ value &= ~AT803X_LPI_EN;
++ phy_write_mmd(phydev, MDIO_MMD_PCS, 0x805D, value);
++}
++
+ static int at803x_config_init(struct phy_device *phydev)
+ {
+ struct at803x_platform_data *pdata;
+@@ -320,6 +335,12 @@ static int at803x_config_init(struct phy
+ if (ret < 0)
+ return ret;
+
++#ifdef CONFIG_AT803X_PHY_SMART_EEE
++ at803x_enable_smart_eee(phydev, 1);
++#else
++ at803x_enable_smart_eee(phydev, 0);
++#endif
++
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) {
+ ret = at803x_enable_rx_delay(phydev);
--- /dev/null
+++ b/drivers/net/phy/fsl_backplane.c
-@@ -0,0 +1,1358 @@
-+/* Freescale backplane driver.
+@@ -0,0 +1,1780 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * DPAA backplane driver.
+ * Author: Shaohui Xie <Shaohui.Xie@freescale.com>
++ * Florinel Iordache <florinel.iordache@nxp.com>
+ *
+ * Copyright 2015 Freescale Semiconductor, Inc.
++ * Copyright 2018 NXP
+ *
+ * Licensed under the GPL-2 or later.
+ */
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
++#include <linux/netdevice.h>
+
-+/* XFI PCS Device Identifier */
-+#define FSL_PCS_PHY_ID 0x0083e400
-+
-+/* Freescale KR PMD registers */
-+#define FSL_KR_PMD_CTRL 0x96
-+#define FSL_KR_PMD_STATUS 0x97
-+#define FSL_KR_LP_CU 0x98
-+#define FSL_KR_LP_STATUS 0x99
-+#define FSL_KR_LD_CU 0x9a
-+#define FSL_KR_LD_STATUS 0x9b
++#include "fsl_backplane.h"
+
-+/* Freescale KR PMD defines */
-+#define PMD_RESET 0x1
-+#define PMD_STATUS_SUP_STAT 0x4
-+#define PMD_STATUS_FRAME_LOCK 0x2
-+#define TRAIN_EN 0x3
-+#define TRAIN_DISABLE 0x1
-+#define RX_STAT 0x1
+
-+#define FSL_KR_RX_LINK_STAT_MASK 0x1000
-+#define FSL_XFI_PCS_10GR_SR1 0x20
++/* PCS Device Identifier */
++#define PCS_PHY_DEVICE_ID 0x0083e400
++#define PCS_PHY_DEVICE_ID_MASK 0xffffffff
+
-+/* Freescale KX PCS mode register */
-+#define FSL_PCS_IF_MODE 0x8014
++/* 10G Long cables setup: 1 m to 2 m cables */
++#define RATIO_PREQ_10G 0x3
++#define RATIO_PST1Q_10G 0xd
++#define RATIO_EQ_10G 0x20
+
-+/* Freescale KX PCS mode register init value */
-+#define IF_MODE_INIT 0x8
++/* 10G Short cables setup: up to 30 cm cable */
++//#define RATIO_PREQ_10G 0x3
++//#define RATIO_PST1Q_10G 0xa
++//#define RATIO_EQ_10G 0x29
+
-+/* Freescale KX/KR AN registers */
-+#define FSL_AN_AD1 0x11
-+#define FSL_AN_BP_STAT 0x30
-+
-+/* Freescale KX/KR AN registers defines */
-+#define AN_CTRL_INIT 0x1200
-+#define KX_AN_AD1_INIT 0x25
-+#define KR_AN_AD1_INIT 0x85
-+#define AN_LNK_UP_MASK 0x4
-+#define KR_AN_MASK 0x8
-+#define TRAIN_FAIL 0x8
++/* 40G Long cables setup: 1 m to 2 m cables */
++#define RATIO_PREQ_40G 0x2
++#define RATIO_PST1Q_40G 0xd
++#define RATIO_EQ_40G 0x20
+
-+/* C(-1) */
-+#define BIN_M1 0
-+/* C(1) */
-+#define BIN_LONG 1
-+#define BIN_M1_SEL 6
-+#define BIN_Long_SEL 7
-+#define CDR_SEL_MASK 0x00070000
-+#define BIN_SNAPSHOT_NUM 5
-+#define BIN_M1_THRESHOLD 3
-+#define BIN_LONG_THRESHOLD 2
++/* 40G Short cables setup: up to 30 cm cable */
++//#define RATIO_PREQ_40G 0x1
++//#define RATIO_PST1Q_40G 0x3
++//#define RATIO_EQ_40G 0x29
+
-+#define PRE_COE_SHIFT 22
-+#define POST_COE_SHIFT 16
-+#define ZERO_COE_SHIFT 8
++/* LX2 2x40G default RCW setup */
++//#define RATIO_PREQ_40G 0x0
++//#define RATIO_PST1Q_40G 0x3
++//#define RATIO_EQ_40G 0x30
+
-+#define PRE_COE_MAX 0x0
-+#define PRE_COE_MIN 0x8
++/* Max/Min coefficient values */
++#define PRE_COE_MAX 0x0
++#define PRE_COE_MIN 0x8
+#define POST_COE_MAX 0x0
+#define POST_COE_MIN 0x10
+#define ZERO_COE_MAX 0x30
+#define ZERO_COE_MIN 0x0
+
-+#define TECR0_INIT 0x24200000
-+#define RATIO_PREQ 0x3
-+#define RATIO_PST1Q 0xd
-+#define RATIO_EQ 0x20
++/* KR PMD defines */
++#define PMD_RESET 0x1
++#define PMD_STATUS_SUP_STAT 0x4
++#define PMD_STATUS_FRAME_LOCK 0x2
++#define TRAIN_EN 0x3
++#define TRAIN_DISABLE 0x1
++#define RX_STAT 0x1
+
-+#define GCR0_RESET_MASK 0x600000
-+#define GCR1_SNP_START_MASK 0x00000040
-+#define GCR1_CTL_SNP_START_MASK 0x00002000
-+#define GCR1_REIDL_TH_MASK 0x00700000
-+#define GCR1_REIDL_EX_SEL_MASK 0x000c0000
-+#define GCR1_REIDL_ET_MAS_MASK 0x00004000
-+#define TECR0_AMP_RED_MASK 0x0000003f
++/* PCS Link up */
++#define XFI_PCS_SR1 0x20
++#define KR_RX_LINK_STAT_MASK 0x1000
+
-+#define RECR1_CTL_SNP_DONE_MASK 0x00000002
-+#define RECR1_SNP_DONE_MASK 0x00000004
-+#define TCSR1_SNP_DATA_MASK 0x0000ffc0
-+#define TCSR1_SNP_DATA_SHIFT 6
-+#define TCSR1_EQ_SNPBIN_SIGN_MASK 0x100
++/* KX PCS mode register */
++#define KX_PCS_IF_MODE 0x8014
+
-+#define RECR1_GAINK2_MASK 0x0f000000
-+#define RECR1_GAINK2_SHIFT 24
-+#define RECR1_GAINK3_MASK 0x000f0000
-+#define RECR1_GAINK3_SHIFT 16
-+#define RECR1_OFFSET_MASK 0x00003f80
-+#define RECR1_OFFSET_SHIFT 7
-+#define RECR1_BLW_MASK 0x00000f80
-+#define RECR1_BLW_SHIFT 7
-+#define EYE_CTRL_SHIFT 12
-+#define BASE_WAND_SHIFT 10
++/* KX PCS mode register init value */
++#define KX_IF_MODE_INIT 0x8
++
++/* KX/KR AN registers */
++#define AN_CTRL_INIT 0x1200
++#define KX_AN_AD1_INIT 0x25
++#define KR_AN_AD1_INIT_10G 0x85
++#define KR_AN_AD1_INIT_40G 0x105
++#define AN_LNK_UP_MASK 0x4
++#define KR_AN_MASK_10G 0x8
++#define KR_AN_MASK_40G 0x20
++#define TRAIN_FAIL 0x8
++#define KR_AN_40G_MDIO_OFFSET 4
+
++/* XGKR Timeouts */
+#define XGKR_TIMEOUT 1050
++#define XGKR_DENY_RT_INTERVAL 3000
++#define XGKR_AN_WAIT_ITERATIONS 5
+
-+#define INCREMENT 1
-+#define DECREMENT 2
++/* XGKR Increment/Decrement Requests */
++#define INCREMENT 1
++#define DECREMENT 2
+#define TIMEOUT_LONG 3
-+#define TIMEOUT_M1 3
++#define TIMEOUT_M1 3
+
++/* XGKR Masks */
+#define RX_READY_MASK 0x8000
-+#define PRESET_MASK 0x2000
-+#define INIT_MASK 0x1000
-+#define COP1_MASK 0x30
-+#define COP1_SHIFT 4
-+#define COZ_MASK 0xc
-+#define COZ_SHIFT 2
-+#define COM1_MASK 0x3
-+#define COM1_SHIFT 0
++#define PRESET_MASK 0x2000
++#define INIT_MASK 0x1000
++#define COP1_MASK 0x30
++#define COP1_SHIFT 4
++#define COZ_MASK 0xc
++#define COZ_SHIFT 2
++#define COM1_MASK 0x3
++#define COM1_SHIFT 0
+#define REQUEST_MASK 0x3f
+#define LD_ALL_MASK (PRESET_MASK | INIT_MASK | \
+ COP1_MASK | COZ_MASK | COM1_MASK)
+
++/* Lanes definitions */
++#define MASTER_LANE 0
++#define SINGLE_LANE 0
++#define MAX_PHY_LANES_NO 4
++
++/* Invalid value */
++#define VAL_INVALID 0xff
++
++/* New XGKR Training Algorithm */
+#define NEW_ALGORITHM_TRAIN_TX
++
+#ifdef NEW_ALGORITHM_TRAIN_TX
-+#define FORCE_INC_COP1_NUMBER 0
-+#define FORCE_INC_COM1_NUMBER 1
++#define FORCE_INC_COP1_NUMBER 0
++#define FORCE_INC_COM1_NUMBER 1
+#endif
+
-+#define VAL_INVALID 0xff
++/* Link_Training_Registers offsets */
++static int lt_MDIO_MMD = 0;
++static u32 lt_KR_PMD_CTRL = 0;
++static u32 lt_KR_PMD_STATUS = 0;
++static u32 lt_KR_LP_CU = 0;
++static u32 lt_KR_LP_STATUS = 0;
++static u32 lt_KR_LD_CU = 0;
++static u32 lt_KR_LD_STATUS = 0;
++
++/* KX/KR AN registers offsets */
++static u32 g_an_AD1 = 0;
++static u32 g_an_BP_STAT = 0;
+
+static const u32 preq_table[] = {0x0, 0x1, 0x3, 0x5,
+ 0x7, 0x9, 0xb, 0xc, VAL_INVALID};
+enum backplane_mode {
+ PHY_BACKPLANE_1000BASE_KX,
+ PHY_BACKPLANE_10GBASE_KR,
++ PHY_BACKPLANE_40GBASE_KR,
+ PHY_BACKPLANE_INVAL
+};
+
++enum serdes_type {
++ SERDES_10G,
++ SERDES_28G,
++ SERDES_INVAL
++};
++
+enum coe_filed {
+ COE_COP1,
+ COE_COZ,
+ TRAINED,
+};
+
-+struct per_lane_ctrl_status {
-+ __be32 gcr0; /* 0x.000 - General Control Register 0 */
-+ __be32 gcr1; /* 0x.004 - General Control Register 1 */
-+ __be32 gcr2; /* 0x.008 - General Control Register 2 */
-+ __be32 resv1; /* 0x.00C - Reserved */
-+ __be32 recr0; /* 0x.010 - Receive Equalization Control Register 0 */
-+ __be32 recr1; /* 0x.014 - Receive Equalization Control Register 1 */
-+ __be32 tecr0; /* 0x.018 - Transmit Equalization Control Register 0 */
-+ __be32 resv2; /* 0x.01C - Reserved */
-+ __be32 tlcr0; /* 0x.020 - TTL Control Register 0 */
-+ __be32 tlcr1; /* 0x.024 - TTL Control Register 1 */
-+ __be32 tlcr2; /* 0x.028 - TTL Control Register 2 */
-+ __be32 tlcr3; /* 0x.02C - TTL Control Register 3 */
-+ __be32 tcsr0; /* 0x.030 - Test Control/Status Register 0 */
-+ __be32 tcsr1; /* 0x.034 - Test Control/Status Register 1 */
-+ __be32 tcsr2; /* 0x.038 - Test Control/Status Register 2 */
-+ __be32 tcsr3; /* 0x.03C - Test Control/Status Register 3 */
-+};
-+
+struct tx_condition {
+ bool bin_m1_late_early;
+ bool bin_long_late_early;
+#endif
+};
+
-+struct fsl_xgkr_inst {
-+ void *reg_base;
++struct xgkr_params {
++ void *reg_base; /* lane memory map: registers base address */
++ int idx; /* lane relative index inside a multi-lane PHY */
+ struct phy_device *phydev;
++ struct serdes_access *srds;
+ struct tx_condition tx_c;
+ struct delayed_work xgkr_wk;
+ enum train_state state;
++ int an_wait_count;
++ unsigned long rt_time;
+ u32 ld_update;
+ u32 ld_status;
+ u32 ratio_preq;
+ u32 ratio_pst1q;
+ u32 adpt_eq;
++ u32 tuned_ratio_preq;
++ u32 tuned_ratio_pst1q;
++ u32 tuned_adpt_eq;
++};
++
++struct xgkr_phy_data {
++ int bp_mode;
++ u32 phy_lanes;
++ struct mutex phy_lock;
++ bool aneg_done;
++ struct xgkr_params xgkr[MAX_PHY_LANES_NO];
+};
+
++static void setup_an_lt_ls(void)
++{
++ /* KR PMD registers */
++ lt_MDIO_MMD = MDIO_MMD_PMAPMD;
++ lt_KR_PMD_CTRL = 0x96;
++ lt_KR_PMD_STATUS = 0x97;
++ lt_KR_LP_CU = 0x98;
++ lt_KR_LP_STATUS = 0x99;
++ lt_KR_LD_CU = 0x9a;
++ lt_KR_LD_STATUS = 0x9b;
++
++ /* KX/KR AN registers */
++ g_an_AD1 = 0x11;
++ g_an_BP_STAT = 0x30;
++}
++
++static void setup_an_lt_lx(void)
++{
++ /* Auto-Negotiation and Link Training Core Registers page 1: 256 = 0x100 */
++ lt_MDIO_MMD = MDIO_MMD_AN;
++ lt_KR_PMD_CTRL = 0x100;
++ lt_KR_PMD_STATUS = 0x101;
++ lt_KR_LP_CU = 0x102;
++ lt_KR_LP_STATUS = 0x103;
++ lt_KR_LD_CU = 0x104;
++ lt_KR_LD_STATUS = 0x105;
++
++ /* KX/KR AN registers */
++ g_an_AD1 = 0x03;
++ g_an_BP_STAT = 0x0F;
++}
++
++static u32 le_ioread32(u32 *reg)
++{
++ return ioread32(reg);
++}
++
++static void le_iowrite32(u32 value, u32 *reg)
++{
++ iowrite32(value, reg);
++}
++
++static u32 be_ioread32(u32 *reg)
++{
++ return ioread32be(reg);
++}
++
++static void be_iowrite32(u32 value, u32 *reg)
++{
++ iowrite32be(value, reg);
++}
++
++/**
++ * xgkr_phy_write_mmd - Wrapper function for phy_write_mmd
++ * for writing a register on an MMD on a given PHY.
++ *
++ * Same rules as for phy_write_mmd();
++ */
++static int xgkr_phy_write_mmd(struct xgkr_params *xgkr, int devad, u32 regnum, u16 val)
++{
++ struct phy_device *phydev = xgkr->phydev;
++ struct xgkr_phy_data *xgkr_inst = phydev->priv;
++ int mdio_addr = phydev->mdio.addr;
++ int err;
++
++ mutex_lock(&xgkr_inst->phy_lock);
++
++ if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR && devad == MDIO_MMD_AN) {
++ //40G AN: prepare mdio address for writing phydev AN registers for 40G on respective lane
++ phydev->mdio.addr = KR_AN_40G_MDIO_OFFSET + xgkr->idx;
++ }
++
++ err = phy_write_mmd(phydev, devad, regnum, val);
++ if (err)
++ dev_err(&phydev->mdio.dev, "Writing PHY (%p) MMD = 0x%02x register = 0x%02x failed with error code: 0x%08x \n", phydev, devad, regnum, err);
++
++ if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR && devad == MDIO_MMD_AN) {
++ //40G AN: restore mdio address
++ phydev->mdio.addr = mdio_addr;
++ }
++
++ mutex_unlock(&xgkr_inst->phy_lock);
++
++ return err;
++}
++
++/**
++ * xgkr_phy_read_mmd - Wrapper function for phy_read_mmd
++ * for reading a register from an MMD on a given PHY.
++ *
++ * Same rules as for phy_read_mmd();
++ */
++static int xgkr_phy_read_mmd(struct xgkr_params *xgkr, int devad, u32 regnum)
++{
++ struct phy_device *phydev = xgkr->phydev;
++ struct xgkr_phy_data *xgkr_inst = phydev->priv;
++ int mdio_addr = phydev->mdio.addr;
++ int ret;
++
++ mutex_lock(&xgkr_inst->phy_lock);
++
++ if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR && devad == MDIO_MMD_AN) {
++ //40G AN: prepare mdio address for reading phydev AN registers for 40G on respective lane
++ phydev->mdio.addr = KR_AN_40G_MDIO_OFFSET + xgkr->idx;
++ }
++
++ ret = phy_read_mmd(phydev, devad, regnum);
++
++ if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR && devad == MDIO_MMD_AN) {
++ //40G AN: restore mdio address
++ phydev->mdio.addr = mdio_addr;
++ }
++
++ mutex_unlock(&xgkr_inst->phy_lock);
++
++ return ret;
++}
++
+static void tx_condition_init(struct tx_condition *tx_c)
+{
+ tx_c->bin_m1_late_early = true;
+#endif
+}
+
-+void tune_tecr0(struct fsl_xgkr_inst *inst)
++void tune_tecr(struct xgkr_params *xgkr)
+{
-+ struct per_lane_ctrl_status *reg_base = inst->reg_base;
-+ u32 val;
-+
-+ val = TECR0_INIT |
-+ inst->adpt_eq << ZERO_COE_SHIFT |
-+ inst->ratio_preq << PRE_COE_SHIFT |
-+ inst->ratio_pst1q << POST_COE_SHIFT;
++ struct phy_device *phydev = xgkr->phydev;
++ struct xgkr_phy_data *xgkr_inst = phydev->priv;
++ bool reset = false;
++
++ if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR) {
++ /* Reset only the Master Lane */
++ reset = (xgkr->idx == MASTER_LANE);
++ } else {
++ reset = true;
++ }
++
++ xgkr->srds->tune_tecr(xgkr->reg_base, xgkr->ratio_preq, xgkr->ratio_pst1q, xgkr->adpt_eq, reset);
+
-+ /* reset the lane */
-+ iowrite32(ioread32(®_base->gcr0) & ~GCR0_RESET_MASK,
-+ ®_base->gcr0);
-+ udelay(1);
-+ iowrite32(val, ®_base->tecr0);
-+ udelay(1);
-+ /* unreset the lane */
-+ iowrite32(ioread32(®_base->gcr0) | GCR0_RESET_MASK,
-+ ®_base->gcr0);
-+ udelay(1);
++ xgkr->tuned_ratio_preq = xgkr->ratio_preq;
++ xgkr->tuned_ratio_pst1q = xgkr->ratio_pst1q;
++ xgkr->tuned_adpt_eq = xgkr->adpt_eq;
+}
+
-+static void start_lt(struct phy_device *phydev)
++static void start_lt(struct xgkr_params *xgkr)
+{
-+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_CTRL, TRAIN_EN);
++ xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_PMD_CTRL, TRAIN_EN);
+}
+
-+static void stop_lt(struct phy_device *phydev)
++static void stop_lt(struct xgkr_params *xgkr)
+{
-+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_CTRL, TRAIN_DISABLE);
++ xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_PMD_CTRL, TRAIN_DISABLE);
+}
+
-+static void reset_gcr0(struct fsl_xgkr_inst *inst)
++static void reset_lt(struct xgkr_params *xgkr)
+{
-+ struct per_lane_ctrl_status *reg_base = inst->reg_base;
-+
-+ iowrite32(ioread32(®_base->gcr0) & ~GCR0_RESET_MASK,
-+ ®_base->gcr0);
-+ udelay(1);
-+ iowrite32(ioread32(®_base->gcr0) | GCR0_RESET_MASK,
-+ ®_base->gcr0);
-+ udelay(1);
++ xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, MDIO_CTRL1, PMD_RESET);
++ xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_PMD_CTRL, TRAIN_DISABLE);
++ xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_LD_CU, 0);
++ xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_LD_STATUS, 0);
++ xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_PMD_STATUS, 0);
++ xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_LP_CU, 0);
++ xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD, lt_KR_LP_STATUS, 0);
++
+}
+
-+void lane_set_1gkx(void *reg)
++static void ld_coe_status(struct xgkr_params *xgkr)
+{
-+ struct per_lane_ctrl_status *reg_base = reg;
-+ u32 val;
-+
-+ /* reset the lane */
-+ iowrite32(ioread32(®_base->gcr0) & ~GCR0_RESET_MASK,
-+ ®_base->gcr0);
-+ udelay(1);
-+
-+ /* set gcr1 for 1GKX */
-+ val = ioread32(®_base->gcr1);
-+ val &= ~(GCR1_REIDL_TH_MASK | GCR1_REIDL_EX_SEL_MASK |
-+ GCR1_REIDL_ET_MAS_MASK);
-+ iowrite32(val, ®_base->gcr1);
-+ udelay(1);
-+
-+ /* set tecr0 for 1GKX */
-+ val = ioread32(®_base->tecr0);
-+ val &= ~TECR0_AMP_RED_MASK;
-+ iowrite32(val, ®_base->tecr0);
-+ udelay(1);
-+
-+ /* unreset the lane */
-+ iowrite32(ioread32(®_base->gcr0) | GCR0_RESET_MASK,
-+ ®_base->gcr0);
-+ udelay(1);
++ xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD,
++ lt_KR_LD_STATUS, xgkr->ld_status);
+}
+
-+static void reset_lt(struct phy_device *phydev)
++static void ld_coe_update(struct xgkr_params *xgkr)
+{
-+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1, PMD_RESET);
-+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_CTRL, TRAIN_DISABLE);
-+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LD_CU, 0);
-+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LD_STATUS, 0);
-+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_STATUS, 0);
-+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_CU, 0);
-+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_STATUS, 0);
++ dev_dbg(&xgkr->phydev->mdio.dev, "sending request: %x\n", xgkr->ld_update);
++ xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD,
++ lt_KR_LD_CU, xgkr->ld_update);
+}
+
+static void start_xgkr_state_machine(struct delayed_work *work)
+ msecs_to_jiffies(XGKR_TIMEOUT));
+}
+
-+static void start_xgkr_an(struct phy_device *phydev)
++static void start_xgkr_an(struct xgkr_params *xgkr)
+{
-+ struct fsl_xgkr_inst *inst;
++ struct phy_device *phydev = xgkr->phydev;
++ struct xgkr_phy_data *xgkr_inst = phydev->priv;
++ int i;
++ int err;
+
-+ reset_lt(phydev);
-+ phy_write_mmd(phydev, MDIO_MMD_AN, FSL_AN_AD1, KR_AN_AD1_INIT);
-+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, AN_CTRL_INIT);
++ switch (xgkr_inst->bp_mode)
++ {
++ case PHY_BACKPLANE_1000BASE_KX:
++ dev_err(&phydev->mdio.dev, "Wrong call path for 1000Base-KX \n");
++ break;
+
-+ inst = phydev->priv;
++ case PHY_BACKPLANE_10GBASE_KR:
++ err = xgkr_phy_write_mmd(xgkr, MDIO_MMD_AN, g_an_AD1, KR_AN_AD1_INIT_10G);
++ if (err)
++ dev_err(&phydev->mdio.dev, "Setting AN register 0x%02x failed with error code: 0x%08x \n", g_an_AD1, err);
++ udelay(1);
++ err = xgkr_phy_write_mmd(xgkr, MDIO_MMD_AN, MDIO_CTRL1, AN_CTRL_INIT);
++ if (err)
++ dev_err(&phydev->mdio.dev, "Setting AN register 0x%02x failed with error code: 0x%08x \n", MDIO_CTRL1, err);
++ break;
+
-+ /* start state machine*/
-+ start_xgkr_state_machine(&inst->xgkr_wk);
-+}
++ case PHY_BACKPLANE_40GBASE_KR:
++ if (xgkr->idx == MASTER_LANE) {
++ for (i = 0; i < xgkr_inst->phy_lanes; i++) {
++ err = xgkr_phy_write_mmd(&xgkr_inst->xgkr[i], MDIO_MMD_AN, g_an_AD1, KR_AN_AD1_INIT_40G);
++ if (err)
++ dev_err(&phydev->mdio.dev, "Setting AN register 0x%02x on lane %d failed with error code: 0x%08x \n", g_an_AD1, xgkr_inst->xgkr[i].idx, err);
++ }
++ udelay(1);
++ err = xgkr_phy_write_mmd(xgkr, MDIO_MMD_AN, MDIO_CTRL1, AN_CTRL_INIT);
++ if (err)
++ dev_err(&phydev->mdio.dev, "Setting AN register 0x%02x on Master Lane failed with error code: 0x%08x \n", MDIO_CTRL1, err);
++ }
++ break;
++ }
++}
+
+static void start_1gkx_an(struct phy_device *phydev)
+{
-+ phy_write_mmd(phydev, MDIO_MMD_PCS, FSL_PCS_IF_MODE, IF_MODE_INIT);
-+ phy_write_mmd(phydev, MDIO_MMD_AN, FSL_AN_AD1, KX_AN_AD1_INIT);
++ phy_write_mmd(phydev, MDIO_MMD_PCS, KX_PCS_IF_MODE, KX_IF_MODE_INIT);
++ phy_write_mmd(phydev, MDIO_MMD_AN, g_an_AD1, KX_AN_AD1_INIT);
+ phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, AN_CTRL_INIT);
+}
+
-+static void ld_coe_status(struct fsl_xgkr_inst *inst)
++static void reset_tecr(struct xgkr_params *xgkr)
+{
-+ phy_write_mmd(inst->phydev, MDIO_MMD_PMAPMD,
-+ FSL_KR_LD_STATUS, inst->ld_status);
-+}
++ struct phy_device *phydev = xgkr->phydev;
++ struct xgkr_phy_data *xgkr_inst = phydev->priv;
+
-+static void ld_coe_update(struct fsl_xgkr_inst *inst)
-+{
-+ dev_dbg(&inst->phydev->mdio.dev, "sending request: %x\n", inst->ld_update);
-+ phy_write_mmd(inst->phydev, MDIO_MMD_PMAPMD,
-+ FSL_KR_LD_CU, inst->ld_update);
-+}
++ switch (xgkr_inst->bp_mode)
++ {
++ case PHY_BACKPLANE_1000BASE_KX:
++ dev_err(&phydev->mdio.dev, "Wrong call path for 1000Base-KX \n");
++ break;
+
-+static void init_inst(struct fsl_xgkr_inst *inst, int reset)
-+{
-+ if (reset) {
-+ inst->ratio_preq = RATIO_PREQ;
-+ inst->ratio_pst1q = RATIO_PST1Q;
-+ inst->adpt_eq = RATIO_EQ;
-+ tune_tecr0(inst);
++ case PHY_BACKPLANE_10GBASE_KR:
++ xgkr->ratio_preq = RATIO_PREQ_10G;
++ xgkr->ratio_pst1q = RATIO_PST1Q_10G;
++ xgkr->adpt_eq = RATIO_EQ_10G;
++ break;
++
++ case PHY_BACKPLANE_40GBASE_KR:
++ xgkr->ratio_preq = RATIO_PREQ_40G;
++ xgkr->ratio_pst1q = RATIO_PST1Q_40G;
++ xgkr->adpt_eq = RATIO_EQ_40G;
++ break;
+ }
+
-+ tx_condition_init(&inst->tx_c);
-+ inst->state = DETECTING_LP;
-+ inst->ld_status &= RX_READY_MASK;
-+ ld_coe_status(inst);
-+ inst->ld_update = 0;
-+ inst->ld_status &= ~RX_READY_MASK;
-+ ld_coe_status(inst);
++ tune_tecr(xgkr);
+}
+
-+#ifdef NEW_ALGORITHM_TRAIN_TX
-+static int get_median_gaink2(u32 *reg)
++static void init_xgkr(struct xgkr_params *xgkr, int reset)
+{
-+ int gaink2_snap_shot[BIN_SNAPSHOT_NUM];
-+ u32 rx_eq_snp;
-+ struct per_lane_ctrl_status *reg_base;
-+ int timeout;
-+ int i, j, tmp, pos;
-+
-+ reg_base = (struct per_lane_ctrl_status *)reg;
-+
-+ for (i = 0; i < BIN_SNAPSHOT_NUM; i++) {
-+ /* wait RECR1_CTL_SNP_DONE_MASK has cleared */
-+ timeout = 100;
-+ while (ioread32(®_base->recr1) &
-+ RECR1_CTL_SNP_DONE_MASK) {
-+ udelay(1);
-+ timeout--;
-+ if (timeout == 0)
-+ break;
-+ }
-+
-+ /* start snap shot */
-+ iowrite32((ioread32(®_base->gcr1) |
-+ GCR1_CTL_SNP_START_MASK),
-+ ®_base->gcr1);
-+
-+ /* wait for SNP done */
-+ timeout = 100;
-+ while (!(ioread32(®_base->recr1) &
-+ RECR1_CTL_SNP_DONE_MASK)) {
-+ udelay(1);
-+ timeout--;
-+ if (timeout == 0)
-+ break;
-+ }
++ if (reset)
++ reset_tecr(xgkr);
+
-+ /* read and save the snap shot */
-+ rx_eq_snp = ioread32(®_base->recr1);
-+ gaink2_snap_shot[i] = (rx_eq_snp & RECR1_GAINK2_MASK) >>
-+ RECR1_GAINK2_SHIFT;
-+
-+ /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */
-+ iowrite32((ioread32(®_base->gcr1) &
-+ ~GCR1_CTL_SNP_START_MASK),
-+ ®_base->gcr1);
-+ }
-+
-+ /* get median of the 5 snap shot */
-+ for (i = 0; i < BIN_SNAPSHOT_NUM - 1; i++) {
-+ tmp = gaink2_snap_shot[i];
-+ pos = i;
-+ for (j = i + 1; j < BIN_SNAPSHOT_NUM; j++) {
-+ if (gaink2_snap_shot[j] < tmp) {
-+ tmp = gaink2_snap_shot[j];
-+ pos = j;
-+ }
-+ }
++ tx_condition_init(&xgkr->tx_c);
++ xgkr->state = DETECTING_LP;
+
-+ gaink2_snap_shot[pos] = gaink2_snap_shot[i];
-+ gaink2_snap_shot[i] = tmp;
-+ }
++ xgkr->ld_status &= RX_READY_MASK;
++ ld_coe_status(xgkr);
++ xgkr->ld_update = 0;
++ xgkr->ld_status &= ~RX_READY_MASK;
++ ld_coe_status(xgkr);
+
-+ return gaink2_snap_shot[2];
+}
-+#endif
+
-+static bool is_bin_early(int bin_sel, void *reg)
++static void initialize(struct xgkr_params *xgkr)
+{
-+ bool early = false;
-+ int bin_snap_shot[BIN_SNAPSHOT_NUM];
-+ int i, negative_count = 0;
-+ struct per_lane_ctrl_status *reg_base = reg;
-+ int timeout;
-+
-+ for (i = 0; i < BIN_SNAPSHOT_NUM; i++) {
-+ /* wait RECR1_SNP_DONE_MASK has cleared */
-+ timeout = 100;
-+ while ((ioread32(®_base->recr1) & RECR1_SNP_DONE_MASK)) {
-+ udelay(1);
-+ timeout--;
-+ if (timeout == 0)
-+ break;
-+ }
-+
-+ /* set TCSR1[CDR_SEL] to BinM1/BinLong */
-+ if (bin_sel == BIN_M1) {
-+ iowrite32((ioread32(®_base->tcsr1) &
-+ ~CDR_SEL_MASK) | BIN_M1_SEL,
-+ ®_base->tcsr1);
-+ } else {
-+ iowrite32((ioread32(®_base->tcsr1) &
-+ ~CDR_SEL_MASK) | BIN_Long_SEL,
-+ ®_base->tcsr1);
-+ }
++ reset_tecr(xgkr);
+
-+ /* start snap shot */
-+ iowrite32(ioread32(®_base->gcr1) | GCR1_SNP_START_MASK,
-+ ®_base->gcr1);
-+
-+ /* wait for SNP done */
-+ timeout = 100;
-+ while (!(ioread32(®_base->recr1) & RECR1_SNP_DONE_MASK)) {
-+ udelay(1);
-+ timeout--;
-+ if (timeout == 0)
-+ break;
-+ }
-+
-+ /* read and save the snap shot */
-+ bin_snap_shot[i] = (ioread32(®_base->tcsr1) &
-+ TCSR1_SNP_DATA_MASK) >> TCSR1_SNP_DATA_SHIFT;
-+ if (bin_snap_shot[i] & TCSR1_EQ_SNPBIN_SIGN_MASK)
-+ negative_count++;
-+
-+ /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */
-+ iowrite32(ioread32(®_base->gcr1) & ~GCR1_SNP_START_MASK,
-+ ®_base->gcr1);
-+ }
-+
-+ if (((bin_sel == BIN_M1) && (negative_count > BIN_M1_THRESHOLD)) ||
-+ ((bin_sel == BIN_LONG && (negative_count > BIN_LONG_THRESHOLD)))) {
-+ early = true;
-+ }
-+
-+ return early;
++ xgkr->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
++ xgkr->ld_status |= COE_UPDATED << COP1_SHIFT |
++ COE_UPDATED << COZ_SHIFT |
++ COE_UPDATED << COM1_SHIFT;
++ ld_coe_status(xgkr);
+}
+
-+static void train_tx(struct fsl_xgkr_inst *inst)
++static void train_remote_tx(struct xgkr_params *xgkr)
+{
-+ struct phy_device *phydev = inst->phydev;
-+ struct tx_condition *tx_c = &inst->tx_c;
++ struct tx_condition *tx_c = &xgkr->tx_c;
+ bool bin_m1_early, bin_long_early;
+ u32 lp_status, old_ld_update;
+ u32 status_cop1, status_coz, status_com1;
+recheck:
+ if (tx_c->bin_long_stop && tx_c->bin_m1_stop) {
+ tx_c->tx_complete = true;
-+ inst->ld_status |= RX_READY_MASK;
-+ ld_coe_status(inst);
++ xgkr->ld_status |= RX_READY_MASK;
++ ld_coe_status(xgkr);
++
+ /* tell LP we are ready */
-+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD,
-+ FSL_KR_PMD_STATUS, RX_STAT);
++ xgkr_phy_write_mmd(xgkr, lt_MDIO_MMD,
++ lt_KR_PMD_STATUS, RX_STAT);
++
+ return;
+ }
+
+ * we can clear up the appropriate update request so that the
+ * subsequent code may easily issue new update requests if needed.
+ */
-+ lp_status = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_STATUS) &
++ lp_status = xgkr_phy_read_mmd(xgkr, lt_MDIO_MMD, lt_KR_LP_STATUS) &
+ REQUEST_MASK;
++
+ status_cop1 = (lp_status & COP1_MASK) >> COP1_SHIFT;
+ status_coz = (lp_status & COZ_MASK) >> COZ_SHIFT;
+ status_com1 = (lp_status & COM1_MASK) >> COM1_SHIFT;
+
-+ old_ld_update = inst->ld_update;
++ old_ld_update = xgkr->ld_update;
+ req_cop1 = (old_ld_update & COP1_MASK) >> COP1_SHIFT;
+ req_coz = (old_ld_update & COZ_MASK) >> COZ_SHIFT;
+ req_com1 = (old_ld_update & COM1_MASK) >> COM1_SHIFT;
+ if ((status_cop1 == COE_UPDATED || status_cop1 == COE_MAX) &&
+ (status_coz == COE_UPDATED || status_coz == COE_MAX) &&
+ (status_com1 == COE_UPDATED || status_com1 == COE_MAX)) {
-+ inst->ld_update &= ~PRESET_MASK;
++ xgkr->ld_update &= ~PRESET_MASK;
+ }
+ }
+
+ if (status_cop1 != COE_NOTUPDATED &&
+ status_coz != COE_NOTUPDATED &&
+ status_com1 != COE_NOTUPDATED) {
-+ inst->ld_update &= ~INIT_MASK;
++ xgkr->ld_update &= ~INIT_MASK;
+ }
+ }
+
+ */
+ if (!tx_c->sent_init) {
+ if (!lp_status && !(old_ld_update & (LD_ALL_MASK))) {
-+ inst->ld_update = INIT_MASK;
++ xgkr->ld_update = INIT_MASK;
+ tx_c->sent_init = true;
+ }
+ }
+ */
+ if (status_cop1 != COE_NOTUPDATED) {
+ if (req_cop1) {
-+ inst->ld_update &= ~COP1_MASK;
++ xgkr->ld_update &= ~COP1_MASK;
+#ifdef NEW_ALGORITHM_TRAIN_TX
+ if (tx_c->post_inc) {
+ if (req_cop1 == INCREMENT &&
+ tx_c->post_inc -= 1;
+ }
+
-+ ld_coe_update(inst);
++ ld_coe_update(xgkr);
+ goto recheck;
+ }
+#endif
+ if ((req_cop1 == DECREMENT && status_cop1 == COE_MIN) ||
+ (req_cop1 == INCREMENT && status_cop1 == COE_MAX)) {
-+ dev_dbg(&inst->phydev->mdio.dev, "COP1 hit limit %s",
++ dev_dbg(&xgkr->phydev->mdio.dev, "COP1 hit limit %s",
+ (status_cop1 == COE_MIN) ?
+ "DEC MIN" : "INC MAX");
+ tx_c->long_min_max_cnt++;
+ if (tx_c->long_min_max_cnt >= TIMEOUT_LONG) {
+ tx_c->bin_long_stop = true;
-+ ld_coe_update(inst);
++ ld_coe_update(xgkr);
+ goto recheck;
+ }
+ }
+
+ if (status_coz != COE_NOTUPDATED) {
+ if (req_coz)
-+ inst->ld_update &= ~COZ_MASK;
++ xgkr->ld_update &= ~COZ_MASK;
+ }
+
+ if (status_com1 != COE_NOTUPDATED) {
+ if (req_com1) {
-+ inst->ld_update &= ~COM1_MASK;
++ xgkr->ld_update &= ~COM1_MASK;
+#ifdef NEW_ALGORITHM_TRAIN_TX
+ if (tx_c->pre_inc) {
+ if (req_com1 == INCREMENT &&
+ else
+ tx_c->pre_inc -= 1;
+
-+ ld_coe_update(inst);
++ ld_coe_update(xgkr);
+ goto recheck;
+ }
+#endif
+ /* Stop If we have reached the limit for a parameter. */
+ if ((req_com1 == DECREMENT && status_com1 == COE_MIN) ||
+ (req_com1 == INCREMENT && status_com1 == COE_MAX)) {
-+ dev_dbg(&inst->phydev->mdio.dev, "COM1 hit limit %s",
++ dev_dbg(&xgkr->phydev->mdio.dev, "COM1 hit limit %s",
+ (status_com1 == COE_MIN) ?
+ "DEC MIN" : "INC MAX");
+ tx_c->m1_min_max_cnt++;
+ if (tx_c->m1_min_max_cnt >= TIMEOUT_M1) {
+ tx_c->bin_m1_stop = true;
-+ ld_coe_update(inst);
++ ld_coe_update(xgkr);
+ goto recheck;
+ }
+ }
+ }
+ }
+
-+ if (old_ld_update != inst->ld_update) {
-+ ld_coe_update(inst);
++ if (old_ld_update != xgkr->ld_update) {
++ ld_coe_update(xgkr);
+ /* Redo these status checks and updates until we have no more
+ * changes, to speed up the overall process.
+ */
+ return;
+
+#ifdef NEW_ALGORITHM_TRAIN_TX
-+ if (!(inst->ld_update & (PRESET_MASK | INIT_MASK))) {
++ if (!(xgkr->ld_update & (PRESET_MASK | INIT_MASK))) {
+ if (tx_c->pre_inc) {
-+ inst->ld_update = INCREMENT << COM1_SHIFT;
-+ ld_coe_update(inst);
++ xgkr->ld_update = INCREMENT << COM1_SHIFT;
++ ld_coe_update(xgkr);
+ return;
+ }
+
+ if (status_cop1 != COE_MAX) {
-+ median_gaink2 = get_median_gaink2(inst->reg_base);
++ median_gaink2 = xgkr->srds->get_median_gaink2(xgkr->reg_base);
+ if (median_gaink2 == 0xf) {
+ tx_c->post_inc = 1;
+ } else {
+ }
+
+ if (tx_c->post_inc) {
-+ inst->ld_update = INCREMENT << COP1_SHIFT;
-+ ld_coe_update(inst);
++ xgkr->ld_update = INCREMENT << COP1_SHIFT;
++ ld_coe_update(xgkr);
+ return;
+ }
+ }
+#endif
+
+ /* snapshot and select bin */
-+ bin_m1_early = is_bin_early(BIN_M1, inst->reg_base);
-+ bin_long_early = is_bin_early(BIN_LONG, inst->reg_base);
++ bin_m1_early = xgkr->srds->is_bin_early(BIN_M1, xgkr->reg_base);
++ bin_long_early = xgkr->srds->is_bin_early(BIN_LONG, xgkr->reg_base);
+
+ if (!tx_c->bin_m1_stop && !tx_c->bin_m1_late_early && bin_m1_early) {
+ tx_c->bin_m1_stop = true;
+ * pending. We also only request coefficient updates when the
+ * corresponding status is NOT UPDATED and nothing is pending.
+ */
-+ if (!(inst->ld_update & (PRESET_MASK | INIT_MASK))) {
++ if (!(xgkr->ld_update & (PRESET_MASK | INIT_MASK))) {
+ if (!tx_c->bin_long_stop) {
+ /* BinM1 correction means changing COM1 */
-+ if (!status_com1 && !(inst->ld_update & COM1_MASK)) {
++ if (!status_com1 && !(xgkr->ld_update & COM1_MASK)) {
+ /* Avoid BinM1Late by requesting an
+ * immediate decrement.
+ */
+ if (!bin_m1_early) {
+ /* request decrement c(-1) */
+ temp = DECREMENT << COM1_SHIFT;
-+ inst->ld_update = temp;
-+ ld_coe_update(inst);
++ xgkr->ld_update = temp;
++ ld_coe_update(xgkr);
+ tx_c->bin_m1_late_early = bin_m1_early;
+ return;
+ }
+ }
+
+ /* BinLong correction means changing COP1 */
-+ if (!status_cop1 && !(inst->ld_update & COP1_MASK)) {
++ if (!status_cop1 && !(xgkr->ld_update & COP1_MASK)) {
+ /* Locate BinLong transition point (if any)
+ * while avoiding BinM1Late.
+ */
+ if (bin_long_early) {
+ /* request increment c(1) */
+ temp = INCREMENT << COP1_SHIFT;
-+ inst->ld_update = temp;
++ xgkr->ld_update = temp;
+ } else {
+ /* request decrement c(1) */
+ temp = DECREMENT << COP1_SHIFT;
-+ inst->ld_update = temp;
++ xgkr->ld_update = temp;
+ }
+
-+ ld_coe_update(inst);
++ ld_coe_update(xgkr);
+ tx_c->bin_long_late_early = bin_long_early;
+ }
+ /* We try to finish BinLong before we do BinM1 */
+
+ if (!tx_c->bin_m1_stop) {
+ /* BinM1 correction means changing COM1 */
-+ if (!status_com1 && !(inst->ld_update & COM1_MASK)) {
++ if (!status_com1 && !(xgkr->ld_update & COM1_MASK)) {
+ /* Locate BinM1 transition point (if any) */
+ if (bin_m1_early) {
+ /* request increment c(-1) */
+ temp = INCREMENT << COM1_SHIFT;
-+ inst->ld_update = temp;
++ xgkr->ld_update = temp;
+ } else {
+ /* request decrement c(-1) */
+ temp = DECREMENT << COM1_SHIFT;
-+ inst->ld_update = temp;
++ xgkr->ld_update = temp;
+ }
+
-+ ld_coe_update(inst);
++ ld_coe_update(xgkr);
+ tx_c->bin_m1_late_early = bin_m1_early;
+ }
+ }
+
+static int is_link_up(struct phy_device *phydev)
+{
-+ int val;
++ struct xgkr_phy_data *xgkr_inst = phydev->priv;
++ int val = 0;
++
++ mutex_lock(&xgkr_inst->phy_lock);
+
-+ phy_read_mmd(phydev, MDIO_MMD_PCS, FSL_XFI_PCS_10GR_SR1);
-+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, FSL_XFI_PCS_10GR_SR1);
++ val = phy_read_mmd(phydev, MDIO_MMD_PCS, XFI_PCS_SR1);
+
-+ return (val & FSL_KR_RX_LINK_STAT_MASK) ? 1 : 0;
++ mutex_unlock(&xgkr_inst->phy_lock);
++
++ return (val & KR_RX_LINK_STAT_MASK) ? 1 : 0;
+}
+
-+static int is_link_training_fail(struct phy_device *phydev)
++static int is_link_training_fail(struct xgkr_params *xgkr)
+{
++ struct phy_device *phydev = xgkr->phydev;
+ int val;
+ int timeout = 100;
+
-+ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_STATUS);
++ val = xgkr_phy_read_mmd(xgkr, lt_MDIO_MMD, lt_KR_PMD_STATUS);
++
+ if (!(val & TRAIN_FAIL) && (val & RX_STAT)) {
+ /* check LNK_STAT for sure */
+ while (timeout--) {
+ return 1;
+}
+
-+static int check_rx(struct phy_device *phydev)
++static int check_rx(struct xgkr_params *xgkr)
+{
-+ return phy_read_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_STATUS) &
++ return xgkr_phy_read_mmd(xgkr, lt_MDIO_MMD, lt_KR_LP_STATUS) &
+ RX_READY_MASK;
+}
+
+/* Coefficient values have hardware restrictions */
-+static int is_ld_valid(struct fsl_xgkr_inst *inst)
++static int is_ld_valid(struct xgkr_params *xgkr)
+{
-+ u32 ratio_pst1q = inst->ratio_pst1q;
-+ u32 adpt_eq = inst->adpt_eq;
-+ u32 ratio_preq = inst->ratio_preq;
++ u32 ratio_pst1q = xgkr->ratio_pst1q;
++ u32 adpt_eq = xgkr->adpt_eq;
++ u32 ratio_preq = xgkr->ratio_preq;
+
+ if ((ratio_pst1q + adpt_eq + ratio_preq) > 48)
+ return 0;
+ }
+}
+
-+static int inc_dec(struct fsl_xgkr_inst *inst, int field, int request)
++static enum coe_update inc_dec(struct xgkr_params *xgkr, int field, int request)
+{
+ u32 ld_limit[3], ld_coe[3], step[3];
+
-+ ld_coe[0] = inst->ratio_pst1q;
-+ ld_coe[1] = inst->adpt_eq;
-+ ld_coe[2] = inst->ratio_preq;
++ ld_coe[0] = xgkr->ratio_pst1q;
++ ld_coe[1] = xgkr->adpt_eq;
++ ld_coe[2] = xgkr->ratio_preq;
+
-+ /* Information specific to the Freescale SerDes for 10GBase-KR:
++ /* Information specific to the SerDes for 10GBase-KR:
+ * Incrementing C(+1) means *decrementing* RATIO_PST1Q
+ * Incrementing C(0) means incrementing ADPT_EQ
+ * Incrementing C(-1) means *decrementing* RATIO_PREQ
+ ld_coe[field] += step[field];
+ else
+ /* MAX */
-+ return 2;
++ return COE_MAX;
+ break;
+ case DECREMENT:
+ ld_limit[0] = POST_COE_MIN;
+ ld_coe[field] -= step[field];
+ else
+ /* MIN */
-+ return 1;
++ return COE_MIN;
+ break;
+ default:
+ break;
+ }
+
-+ if (is_ld_valid(inst)) {
++ if (is_ld_valid(xgkr)) {
+ /* accept new ld */
-+ inst->ratio_pst1q = ld_coe[0];
-+ inst->adpt_eq = ld_coe[1];
-+ inst->ratio_preq = ld_coe[2];
++ xgkr->ratio_pst1q = ld_coe[0];
++ xgkr->adpt_eq = ld_coe[1];
++ xgkr->ratio_preq = ld_coe[2];
+ /* only some values for preq and pst1q can be used.
+ * for preq: 0x0, 0x1, 0x3, 0x5, 0x7, 0x9, 0xb, 0xc.
+ * for pst1q: 0x0, 0x1, 0x3, 0x5, 0x7, 0x9, 0xb, 0xd, 0xf, 0x10.
+ */
+ if (!is_value_allowed((const u32 *)&preq_table, ld_coe[2])) {
-+ dev_dbg(&inst->phydev->mdio.dev,
++ dev_dbg(&xgkr->phydev->mdio.dev,
+ "preq skipped value: %d\n", ld_coe[2]);
-+ return 0;
++ /* NOT UPDATED */
++ return COE_NOTUPDATED;
+ }
+
+ if (!is_value_allowed((const u32 *)&pst1q_table, ld_coe[0])) {
-+ dev_dbg(&inst->phydev->mdio.dev,
++ dev_dbg(&xgkr->phydev->mdio.dev,
+ "pst1q skipped value: %d\n", ld_coe[0]);
-+ return 0;
++ /* NOT UPDATED */
++ return COE_NOTUPDATED;
+ }
+
-+ tune_tecr0(inst);
++ tune_tecr(xgkr);
+ } else {
+ if (request == DECREMENT)
+ /* MIN */
-+ return 1;
++ return COE_MIN;
+ if (request == INCREMENT)
+ /* MAX */
-+ return 2;
++ return COE_MAX;
+ }
+
-+ return 0;
++ /* UPDATED */
++ return COE_UPDATED;
+}
+
-+static void min_max_updated(struct fsl_xgkr_inst *inst, int field, int new_ld)
++static void min_max_updated(struct xgkr_params *xgkr, int field, enum coe_update cs)
+{
-+ u32 ld_coe[] = {COE_UPDATED, COE_MIN, COE_MAX};
+ u32 mask, val;
++ u32 ld_cs = cs;
++
++ if (cs == COE_INV)
++ return;
+
+ switch (field) {
+ case COE_COP1:
+ mask = COP1_MASK;
-+ val = ld_coe[new_ld] << COP1_SHIFT;
++ val = ld_cs << COP1_SHIFT;
+ break;
+ case COE_COZ:
+ mask = COZ_MASK;
-+ val = ld_coe[new_ld] << COZ_SHIFT;
++ val = ld_cs << COZ_SHIFT;
+ break;
+ case COE_COM:
+ mask = COM1_MASK;
-+ val = ld_coe[new_ld] << COM1_SHIFT;
++ val = ld_cs << COM1_SHIFT;
+ break;
+ default:
+ return;
+ }
+
-+ inst->ld_status &= ~mask;
-+ inst->ld_status |= val;
++ xgkr->ld_status &= ~mask;
++ xgkr->ld_status |= val;
+}
+
-+static void check_request(struct fsl_xgkr_inst *inst, int request)
++static void check_request(struct xgkr_params *xgkr, int request)
+{
+ int cop1_req, coz_req, com_req;
-+ int old_status, new_ld_sta;
++ int old_status;
++ enum coe_update cu;
+
+ cop1_req = (request & COP1_MASK) >> COP1_SHIFT;
+ coz_req = (request & COZ_MASK) >> COZ_SHIFT;
+ /* IEEE802.3-2008, 72.6.10.2.5
+ * Ensure we only act on INCREMENT/DECREMENT when we are in NOT UPDATED
+ */
-+ old_status = inst->ld_status;
++ old_status = xgkr->ld_status;
+
-+ if (cop1_req && !(inst->ld_status & COP1_MASK)) {
-+ new_ld_sta = inc_dec(inst, COE_COP1, cop1_req);
-+ min_max_updated(inst, COE_COP1, new_ld_sta);
++ if (cop1_req && !(xgkr->ld_status & COP1_MASK)) {
++ cu = inc_dec(xgkr, COE_COP1, cop1_req);
++ min_max_updated(xgkr, COE_COP1, cu);
+ }
+
-+ if (coz_req && !(inst->ld_status & COZ_MASK)) {
-+ new_ld_sta = inc_dec(inst, COE_COZ, coz_req);
-+ min_max_updated(inst, COE_COZ, new_ld_sta);
++ if (coz_req && !(xgkr->ld_status & COZ_MASK)) {
++ cu = inc_dec(xgkr, COE_COZ, coz_req);
++ min_max_updated(xgkr, COE_COZ, cu);
+ }
+
-+ if (com_req && !(inst->ld_status & COM1_MASK)) {
-+ new_ld_sta = inc_dec(inst, COE_COM, com_req);
-+ min_max_updated(inst, COE_COM, new_ld_sta);
++ if (com_req && !(xgkr->ld_status & COM1_MASK)) {
++ cu = inc_dec(xgkr, COE_COM, com_req);
++ min_max_updated(xgkr, COE_COM, cu);
+ }
+
-+ if (old_status != inst->ld_status)
-+ ld_coe_status(inst);
++ if (old_status != xgkr->ld_status)
++ ld_coe_status(xgkr);
+}
+
-+static void preset(struct fsl_xgkr_inst *inst)
++static void preset(struct xgkr_params *xgkr)
+{
+ /* These are all MAX values from the IEEE802.3 perspective. */
-+ inst->ratio_pst1q = POST_COE_MAX;
-+ inst->adpt_eq = ZERO_COE_MAX;
-+ inst->ratio_preq = PRE_COE_MAX;
++ xgkr->ratio_pst1q = POST_COE_MAX;
++ xgkr->adpt_eq = ZERO_COE_MAX;
++ xgkr->ratio_preq = PRE_COE_MAX;
+
-+ tune_tecr0(inst);
-+ inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
-+ inst->ld_status |= COE_MAX << COP1_SHIFT |
++ tune_tecr(xgkr);
++ xgkr->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
++ xgkr->ld_status |= COE_MAX << COP1_SHIFT |
+ COE_MAX << COZ_SHIFT |
+ COE_MAX << COM1_SHIFT;
-+ ld_coe_status(inst);
-+}
-+
-+static void initialize(struct fsl_xgkr_inst *inst)
-+{
-+ inst->ratio_preq = RATIO_PREQ;
-+ inst->ratio_pst1q = RATIO_PST1Q;
-+ inst->adpt_eq = RATIO_EQ;
-+
-+ tune_tecr0(inst);
-+ inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
-+ inst->ld_status |= COE_UPDATED << COP1_SHIFT |
-+ COE_UPDATED << COZ_SHIFT |
-+ COE_UPDATED << COM1_SHIFT;
-+ ld_coe_status(inst);
++ ld_coe_status(xgkr);
+}
+
-+static void train_rx(struct fsl_xgkr_inst *inst)
++static void train_local_tx(struct xgkr_params *xgkr)
+{
-+ struct phy_device *phydev = inst->phydev;
+ int request, old_ld_status;
+
+ /* get request from LP */
-+ request = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_CU) &
++ request = xgkr_phy_read_mmd(xgkr, lt_MDIO_MMD, lt_KR_LP_CU) &
+ (LD_ALL_MASK);
-+ old_ld_status = inst->ld_status;
++
++ old_ld_status = xgkr->ld_status;
+
+ /* IEEE802.3-2008, 72.6.10.2.5
+ * Ensure we always go to NOT UDPATED for status reporting in
+ */
+ if (!(request & (PRESET_MASK | INIT_MASK))) {
+ if (!(request & COP1_MASK))
-+ inst->ld_status &= ~COP1_MASK;
++ xgkr->ld_status &= ~COP1_MASK;
+
+ if (!(request & COZ_MASK))
-+ inst->ld_status &= ~COZ_MASK;
++ xgkr->ld_status &= ~COZ_MASK;
+
+ if (!(request & COM1_MASK))
-+ inst->ld_status &= ~COM1_MASK;
++ xgkr->ld_status &= ~COM1_MASK;
+
-+ if (old_ld_status != inst->ld_status)
-+ ld_coe_status(inst);
++ if (old_ld_status != xgkr->ld_status)
++ ld_coe_status(xgkr);
+ }
+
+ /* As soon as the LP shows ready, no need to do any more updates. */
-+ if (check_rx(phydev)) {
++ if (check_rx(xgkr)) {
+ /* LP receiver is ready */
-+ if (inst->ld_status & (COP1_MASK | COZ_MASK | COM1_MASK)) {
-+ inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
-+ ld_coe_status(inst);
++ if (xgkr->ld_status & (COP1_MASK | COZ_MASK | COM1_MASK)) {
++ xgkr->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
++ ld_coe_status(xgkr);
+ }
+ } else {
+ /* IEEE802.3-2008, 72.6.10.2.3.1/2
+ * only act on PRESET/INITIALIZE if all status is NOT UPDATED.
+ */
+ if (request & (PRESET_MASK | INIT_MASK)) {
-+ if (!(inst->ld_status &
++ if (!(xgkr->ld_status &
+ (COP1_MASK | COZ_MASK | COM1_MASK))) {
+ if (request & PRESET_MASK)
-+ preset(inst);
++ preset(xgkr);
+
+ if (request & INIT_MASK)
-+ initialize(inst);
++ initialize(xgkr);
+ }
+ }
+
+ /* LP Coefficient are not in HOLD */
+ if (request & REQUEST_MASK)
-+ check_request(inst, request & REQUEST_MASK);
++ check_request(xgkr, request & REQUEST_MASK);
+ }
+}
+
-+static void xgkr_start_train(struct phy_device *phydev)
++static void xgkr_start_train(struct xgkr_params *xgkr)
+{
-+ struct fsl_xgkr_inst *inst = phydev->priv;
-+ struct tx_condition *tx_c = &inst->tx_c;
-+ int val = 0, i;
++ struct phy_device *phydev = xgkr->phydev;
++ struct xgkr_phy_data *xgkr_inst = phydev->priv;
++ struct tx_condition *tx_c = &xgkr->tx_c;
++ int val = 0, i, j;
+ int lt_state;
+ unsigned long dead_line;
-+ int rx_ok, tx_ok;
-+
-+ init_inst(inst, 0);
-+ start_lt(phydev);
++ int lp_rx_ready, tx_training_complete;
++ u32 lt_timeout = 500;
++
++ init_xgkr(xgkr, 0);
++
++ start_lt(xgkr);
++
++ if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR) {
++ lt_timeout = 2000;
++ }
+
+ for (i = 0; i < 2;) {
-+ dead_line = jiffies + msecs_to_jiffies(500);
++
++ dead_line = jiffies + msecs_to_jiffies(lt_timeout);
++
+ while (time_before(jiffies, dead_line)) {
-+ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD,
-+ FSL_KR_PMD_STATUS);
++
++ val = xgkr_phy_read_mmd(xgkr, lt_MDIO_MMD,
++ lt_KR_PMD_STATUS);
++
+ if (val & TRAIN_FAIL) {
+ /* LT failed already, reset lane to avoid
+ * it run into hanging, then start LT again.
+ */
-+ reset_gcr0(inst);
-+ start_lt(phydev);
++ if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR) {
++ /* Reset only the Master Lane */
++ if (xgkr->idx == MASTER_LANE)
++ xgkr->srds->reset_lane(xgkr->reg_base);
++ } else {
++ xgkr->srds->reset_lane(xgkr->reg_base);
++ }
++
++ start_lt(xgkr);
+ } else if ((val & PMD_STATUS_SUP_STAT) &&
+ (val & PMD_STATUS_FRAME_LOCK))
+ break;
+ }
+
+ /* init process */
-+ rx_ok = false;
-+ tx_ok = false;
++ lp_rx_ready = false;
++ tx_training_complete = false;
+ /* the LT should be finished in 500ms, failed or OK. */
-+ dead_line = jiffies + msecs_to_jiffies(500);
++ dead_line = jiffies + msecs_to_jiffies(lt_timeout);
+
+ while (time_before(jiffies, dead_line)) {
+ /* check if the LT is already failed */
-+ lt_state = phy_read_mmd(phydev, MDIO_MMD_PMAPMD,
-+ FSL_KR_PMD_STATUS);
++
++ lt_state = xgkr_phy_read_mmd(xgkr, lt_MDIO_MMD,
++ lt_KR_PMD_STATUS);
++
+ if (lt_state & TRAIN_FAIL) {
-+ reset_gcr0(inst);
++
++ if (xgkr_inst->bp_mode == PHY_BACKPLANE_40GBASE_KR) {
++ /* Reset only the Master Lane */
++ if (xgkr->idx == MASTER_LANE)
++ xgkr->srds->reset_lane(xgkr->reg_base);
++ } else {
++ xgkr->srds->reset_lane(xgkr->reg_base);
++ }
++
+ break;
+ }
+
-+ rx_ok = check_rx(phydev);
-+ tx_ok = tx_c->tx_complete;
++ lp_rx_ready = check_rx(xgkr);
++ tx_training_complete = tx_c->tx_complete;
+
-+ if (rx_ok && tx_ok)
++ if (lp_rx_ready && tx_training_complete)
+ break;
+
-+ if (!rx_ok)
-+ train_rx(inst);
++ if (!lp_rx_ready)
++ train_local_tx(xgkr);
+
-+ if (!tx_ok)
-+ train_tx(inst);
++ if (!tx_training_complete)
++ train_remote_tx(xgkr);
+
+ usleep_range(100, 500);
+ }
+
+ i++;
+ /* check LT result */
-+ if (is_link_training_fail(phydev)) {
-+ init_inst(inst, 0);
++ if (is_link_training_fail(xgkr)) {
++ init_xgkr(xgkr, 0);
+ continue;
+ } else {
-+ stop_lt(phydev);
-+ inst->state = TRAINED;
++ stop_lt(xgkr);
++ xgkr->state = TRAINED;
++
++ switch (xgkr_inst->bp_mode)
++ {
++ case PHY_BACKPLANE_10GBASE_KR:
++ if (phydev->attached_dev == NULL)
++ dev_info(&phydev->mdio.dev, "10GBase-KR link trained (Tx equalization: RATIO_PREQ = 0x%x, RATIO_PST1Q = 0x%x, ADPT_EQ = 0x%x)\n",
++ xgkr->tuned_ratio_preq, xgkr->tuned_ratio_pst1q, xgkr->tuned_adpt_eq);
++ else
++ dev_info(&phydev->mdio.dev, "%s %s: 10GBase-KR link trained (Tx equalization: RATIO_PREQ = 0x%x, RATIO_PST1Q = 0x%x, ADPT_EQ = 0x%x)\n",
++ dev_driver_string(phydev->attached_dev->dev.parent),
++ dev_name(phydev->attached_dev->dev.parent),
++ xgkr->tuned_ratio_preq, xgkr->tuned_ratio_pst1q, xgkr->tuned_adpt_eq);
++ break;
++
++ case PHY_BACKPLANE_40GBASE_KR:
++ if (xgkr->idx == xgkr_inst->phy_lanes - 1) {
++ if (phydev->attached_dev == NULL)
++ dev_info(&phydev->mdio.dev, "40GBase-KR link trained at lanes Tx equalization:\n");
++ else
++ dev_info(&phydev->mdio.dev, "%s %s: 40GBase-KR link trained at lanes Tx equalization:\n",
++ dev_driver_string(phydev->attached_dev->dev.parent),
++ dev_name(phydev->attached_dev->dev.parent));
++
++ for (j = 0; j < xgkr_inst->phy_lanes; j++) {
++ if (phydev->attached_dev == NULL)
++ dev_info(&phydev->mdio.dev, "40GBase-KR Lane %d: RATIO_PREQ = 0x%x, RATIO_PST1Q = 0x%x, ADPT_EQ = 0x%x\n",
++ j, xgkr_inst->xgkr[j].tuned_ratio_preq, xgkr_inst->xgkr[j].tuned_ratio_pst1q, xgkr_inst->xgkr[j].tuned_adpt_eq);
++ else
++ dev_info(&phydev->mdio.dev, "%s %s: 40GBase-KR Lane %d: RATIO_PREQ = 0x%x, RATIO_PST1Q = 0x%x, ADPT_EQ = 0x%x\n",
++ dev_driver_string(phydev->attached_dev->dev.parent),
++ dev_name(phydev->attached_dev->dev.parent),
++ j, xgkr_inst->xgkr[j].tuned_ratio_preq, xgkr_inst->xgkr[j].tuned_ratio_pst1q, xgkr_inst->xgkr[j].tuned_adpt_eq);
++ }
++ }
++ break;
++ }
++
+ break;
+ }
+ }
+}
+
++static void xgkr_request_restart_an(struct xgkr_params *xgkr)
++{
++ struct phy_device *phydev = xgkr->phydev;
++ struct xgkr_phy_data *xgkr_inst = phydev->priv;
++ int i;
++
++ if (time_before(jiffies, xgkr->rt_time))
++ return;
++
++ switch (xgkr_inst->bp_mode)
++ {
++ case PHY_BACKPLANE_1000BASE_KX:
++ dev_err(&phydev->mdio.dev, "Wrong call path for 1000Base-KX \n");
++ break;
++
++ case PHY_BACKPLANE_10GBASE_KR:
++ init_xgkr(xgkr, 0);
++ reset_lt(xgkr);
++ xgkr->state = DETECTING_LP;
++ start_xgkr_an(xgkr);
++ start_xgkr_state_machine(&xgkr->xgkr_wk);
++ break;
++
++ case PHY_BACKPLANE_40GBASE_KR:
++ for (i = 0; i < xgkr_inst->phy_lanes; i++) {
++ init_xgkr(&xgkr_inst->xgkr[i], 0);
++ reset_lt(&xgkr_inst->xgkr[i]);
++ xgkr_inst->xgkr[i].state = DETECTING_LP;
++ }
++ //Start AN only for Master Lane
++ start_xgkr_an(&xgkr_inst->xgkr[MASTER_LANE]);
++ //start state machine
++ for (i = 0; i < xgkr_inst->phy_lanes; i++) {
++ start_xgkr_state_machine(&xgkr_inst->xgkr[i].xgkr_wk);
++ }
++ break;
++ }
++
++ xgkr->rt_time = jiffies + msecs_to_jiffies(XGKR_DENY_RT_INTERVAL);
++}
++
+static void xgkr_state_machine(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
-+ struct fsl_xgkr_inst *inst = container_of(dwork,
-+ struct fsl_xgkr_inst,
-+ xgkr_wk);
-+ struct phy_device *phydev = inst->phydev;
++ struct xgkr_params *xgkr = container_of(dwork,
++ struct xgkr_params, xgkr_wk);
++ struct phy_device *phydev = xgkr->phydev;
++ struct xgkr_phy_data *xgkr_inst = phydev->priv;
+ int an_state;
-+ bool needs_train = false;
++ bool start_train = false;
++ bool all_lanes_trained = false;
++ int i;
+
-+ mutex_lock(&phydev->lock);
++ if (!xgkr_inst->aneg_done) {
++ start_xgkr_state_machine(&xgkr->xgkr_wk);
++ return;
++ }
+
-+ switch (inst->state) {
++ mutex_lock(&phydev->lock);
++
++ switch (xgkr->state) {
+ case DETECTING_LP:
-+ phy_read_mmd(phydev, MDIO_MMD_AN, FSL_AN_BP_STAT);
-+ an_state = phy_read_mmd(phydev, MDIO_MMD_AN, FSL_AN_BP_STAT);
-+ if ((an_state & KR_AN_MASK))
-+ needs_train = true;
++
++ switch (xgkr_inst->bp_mode)
++ {
++ case PHY_BACKPLANE_1000BASE_KX:
++ dev_err(&phydev->mdio.dev, "Wrong call path for 1000Base-KX \n");
++ break;
++
++ case PHY_BACKPLANE_10GBASE_KR:
++ an_state = xgkr_phy_read_mmd(xgkr, MDIO_MMD_AN, g_an_BP_STAT);
++ if (an_state & KR_AN_MASK_10G) {
++ //AN acquired: Train the lane
++ xgkr->an_wait_count = 0;
++ start_train = true;
++ } else {
++ //AN lost or not yet acquired
++ if (!is_link_up(phydev)) {
++ //Link is down: restart training
++ xgkr->an_wait_count = 0;
++ xgkr_request_restart_an(xgkr);
++ } else {
++ //Link is up: wait few iterations for AN to be acquired
++ if (xgkr->an_wait_count >= XGKR_AN_WAIT_ITERATIONS) {
++ xgkr->an_wait_count = 0;
++ xgkr_request_restart_an(xgkr);
++ } else {
++ xgkr->an_wait_count++;
++ }
++ }
++ }
++ break;
++
++ case PHY_BACKPLANE_40GBASE_KR:
++ //Check AN state only on Master Lane
++ an_state = xgkr_phy_read_mmd(&xgkr_inst->xgkr[MASTER_LANE], MDIO_MMD_AN, g_an_BP_STAT);
++ if (an_state & KR_AN_MASK_40G) {
++ //AN acquired: Train all lanes in order starting with Master Lane
++ xgkr->an_wait_count = 0;
++ if (xgkr->idx == MASTER_LANE) {
++ start_train = true;
++ }
++ else if (xgkr_inst->xgkr[xgkr->idx - 1].state == TRAINED) {
++ start_train = true;
++ }
++ } else {
++ //AN lost or not yet acquired
++ if (!is_link_up(phydev)) {
++ //Link is down: restart training
++ xgkr->an_wait_count = 0;
++ xgkr_request_restart_an(xgkr);
++ } else {
++ //Link is up: wait few iterations for AN to be acquired
++ if (xgkr->an_wait_count >= XGKR_AN_WAIT_ITERATIONS) {
++ xgkr->an_wait_count = 0;
++ xgkr_request_restart_an(xgkr);
++ } else {
++ xgkr->an_wait_count++;
++ }
++ }
++ }
++ break;
++ }
+ break;
++
+ case TRAINED:
+ if (!is_link_up(phydev)) {
-+ dev_info(&phydev->mdio.dev,
-+ "Detect hotplug, restart training\n");
-+ init_inst(inst, 1);
-+ start_xgkr_an(phydev);
-+ inst->state = DETECTING_LP;
++ switch (xgkr_inst->bp_mode)
++ {
++ case PHY_BACKPLANE_1000BASE_KX:
++ dev_err(&phydev->mdio.dev, "Wrong call path for 1000Base-KX \n");
++ break;
++
++ case PHY_BACKPLANE_10GBASE_KR:
++ dev_info(&phydev->mdio.dev, "Detect hotplug, restart training\n");
++ xgkr_request_restart_an(xgkr);
++ break;
++
++ case PHY_BACKPLANE_40GBASE_KR:
++ if (xgkr->idx == MASTER_LANE) {
++ //check if all lanes are trained only on Master Lane
++ all_lanes_trained = true;
++ for (i = 0; i < xgkr_inst->phy_lanes; i++) {
++ if (xgkr_inst->xgkr[i].state != TRAINED) {
++ all_lanes_trained = false;
++ break;
++ }
++ }
++ if (all_lanes_trained) {
++ dev_info(&phydev->mdio.dev, "Detect hotplug, restart training\n");
++ xgkr_request_restart_an(xgkr);
++ }
++ }
++ break;
++ }
+ }
+ break;
+ }
+
-+ if (needs_train)
-+ xgkr_start_train(phydev);
++ if (start_train) {
++ xgkr_start_train(xgkr);
++ }
+
+ mutex_unlock(&phydev->lock);
-+ queue_delayed_work(system_power_efficient_wq, &inst->xgkr_wk,
-+ msecs_to_jiffies(XGKR_TIMEOUT));
++ start_xgkr_state_machine(&xgkr->xgkr_wk);
+}
+
+static int fsl_backplane_probe(struct phy_device *phydev)
+{
-+ struct fsl_xgkr_inst *xgkr_inst;
++ struct xgkr_phy_data *xgkr_inst;
+ struct device_node *phy_node, *lane_node;
+ struct resource res_lane;
++ struct serdes_access *srds = NULL;
++ int serdes_type;
++ const char *st;
+ const char *bm;
-+ int ret;
++ int ret, i, phy_lanes;
+ int bp_mode;
-+ u32 lane[2];
++ u32 lane_base_addr[MAX_PHY_LANES_NO], lane_memmap_size;
+
+ phy_node = phydev->mdio.dev.of_node;
++ if (!phy_node) {
++ dev_err(&phydev->mdio.dev, "No associated device tree node\n");
++ return -EINVAL;
++ }
++
+ bp_mode = of_property_read_string(phy_node, "backplane-mode", &bm);
+ if (bp_mode < 0)
-+ return 0;
++ return -EINVAL;
+
++ phy_lanes = 1;
+ if (!strcasecmp(bm, "1000base-kx")) {
+ bp_mode = PHY_BACKPLANE_1000BASE_KX;
+ } else if (!strcasecmp(bm, "10gbase-kr")) {
+ bp_mode = PHY_BACKPLANE_10GBASE_KR;
++ } else if (!strcasecmp(bm, "40gbase-kr")) {
++ bp_mode = PHY_BACKPLANE_40GBASE_KR;
++ phy_lanes = 4;
+ } else {
+ dev_err(&phydev->mdio.dev, "Unknown backplane-mode\n");
+ return -EINVAL;
+ return -EINVAL;
+ }
+
++ ret = of_property_read_string(lane_node, "compatible", &st);
++ if (ret < 0) {
++ //assume SERDES-10G if compatible property is not specified
++ serdes_type = SERDES_10G;
++ }
++ else if (!strcasecmp(st, "fsl,serdes-10g")) {
++ serdes_type = SERDES_10G;
++ } else if (!strcasecmp(st, "fsl,serdes-28g")) {
++ serdes_type = SERDES_28G;
++ } else {
++ dev_err(&phydev->mdio.dev, "Unknown serdes-type\n");
++ return -EINVAL;
++ }
++
+ ret = of_address_to_resource(lane_node, 0, &res_lane);
+ if (ret) {
+ dev_err(&phydev->mdio.dev, "could not obtain memory map\n");
+
+ of_node_put(lane_node);
+ ret = of_property_read_u32_array(phy_node, "fsl,lane-reg",
-+ (u32 *)&lane, 2);
++ (u32 *)lane_base_addr, phy_lanes);
+ if (ret) {
+ dev_err(&phydev->mdio.dev, "could not get fsl,lane-reg\n");
+ return -EINVAL;
+ }
+
-+ phydev->priv = devm_ioremap_nocache(&phydev->mdio.dev,
-+ res_lane.start + lane[0],
-+ lane[1]);
-+ if (!phydev->priv) {
-+ dev_err(&phydev->mdio.dev, "ioremap_nocache failed\n");
-+ return -ENOMEM;
++ switch (serdes_type)
++ {
++ case SERDES_10G:
++ setup_an_lt_ls();
++ srds = setup_serdes_access_10g();
++ break;
++
++ case SERDES_28G:
++ setup_an_lt_lx();
++ srds = setup_serdes_access_28g();
++ break;
++
++ default:
++ dev_err(&phydev->mdio.dev, "Unsupported serdes-type\n");
++ return -EINVAL;
+ }
+
-+ if (bp_mode == PHY_BACKPLANE_1000BASE_KX) {
-+ phydev->speed = SPEED_1000;
-+ /* configure the lane for 1000BASE-KX */
-+ lane_set_1gkx(phydev->priv);
-+ return 0;
++ if (!srds) {
++ dev_err(&phydev->mdio.dev, "Unsupported serdes-type\n");
++ return -EINVAL;
++ }
++
++ srds->serdes_type = serdes_type;
++ srds->is_little_endian = of_property_read_bool(lane_node, "little-endian");
++
++ if (srds->is_little_endian) {
++ srds->ioread32 = le_ioread32;
++ srds->iowrite32 = le_iowrite32;
++ } else {
++ srds->ioread32 = be_ioread32;
++ srds->iowrite32 = be_iowrite32;
+ }
+
+ xgkr_inst = devm_kzalloc(&phydev->mdio.dev,
+ if (!xgkr_inst)
+ return -ENOMEM;
+
-+ xgkr_inst->reg_base = phydev->priv;
-+ xgkr_inst->phydev = phydev;
++ xgkr_inst->phy_lanes = phy_lanes;
++ xgkr_inst->bp_mode = bp_mode;
++ mutex_init(&xgkr_inst->phy_lock);
++
++ lane_memmap_size = srds->get_lane_memmap_size();
++
++ for (i = 0; i < phy_lanes; i++) {
++ xgkr_inst->xgkr[i].idx = i;
++ xgkr_inst->xgkr[i].phydev = phydev;
++ xgkr_inst->xgkr[i].srds = srds;
++ xgkr_inst->xgkr[i].reg_base = devm_ioremap_nocache(&phydev->mdio.dev,
++ res_lane.start + lane_base_addr[i],
++ lane_memmap_size);
++ if (!xgkr_inst->xgkr[i].reg_base) {
++ dev_err(&phydev->mdio.dev, "ioremap_nocache failed\n");
++ return -ENOMEM;
++ }
++ xgkr_inst->xgkr[i].rt_time = jiffies + msecs_to_jiffies(XGKR_DENY_RT_INTERVAL);
++ }
++
+ phydev->priv = xgkr_inst;
+
-+ if (bp_mode == PHY_BACKPLANE_10GBASE_KR) {
++ switch (bp_mode)
++ {
++ case PHY_BACKPLANE_1000BASE_KX:
++ phydev->speed = SPEED_1000;
++ /* configure the lane for 1000BASE-KX */
++ srds->lane_set_1gkx(xgkr_inst->xgkr[SINGLE_LANE].reg_base);
++ break;
++
++ case PHY_BACKPLANE_10GBASE_KR:
+ phydev->speed = SPEED_10000;
-+ INIT_DELAYED_WORK(&xgkr_inst->xgkr_wk, xgkr_state_machine);
++ INIT_DELAYED_WORK(&xgkr_inst->xgkr[SINGLE_LANE].xgkr_wk, xgkr_state_machine);
++ break;
++
++ case PHY_BACKPLANE_40GBASE_KR:
++ phydev->speed = SPEED_40000;
++ for (i = 0; i < phy_lanes; i++)
++ INIT_DELAYED_WORK(&xgkr_inst->xgkr[i].xgkr_wk, xgkr_state_machine);
++ break;
+ }
+
+ return 0;
+
+static int fsl_backplane_aneg_done(struct phy_device *phydev)
+{
++ struct xgkr_phy_data *xgkr_inst = phydev->priv;
++
++ if (!phydev->mdio.dev.of_node) {
++ dev_err(&phydev->mdio.dev, "No associated device tree node\n");
++ return -EINVAL;
++ }
++
++ xgkr_inst->aneg_done = true;
++
+ return 1;
+}
+
+static int fsl_backplane_config_aneg(struct phy_device *phydev)
+{
-+ if (phydev->speed == SPEED_10000) {
-+ phydev->supported |= SUPPORTED_10000baseKR_Full;
-+ start_xgkr_an(phydev);
-+ } else if (phydev->speed == SPEED_1000) {
-+ phydev->supported |= SUPPORTED_1000baseKX_Full;
-+ start_1gkx_an(phydev);
++ struct xgkr_phy_data *xgkr_inst = phydev->priv;
++ int i;
++
++ if (!phydev->mdio.dev.of_node) {
++ dev_err(&phydev->mdio.dev, "No associated device tree node\n");
++ return -EINVAL;
+ }
+
-+ phydev->advertising = phydev->supported;
++ switch (phydev->speed)
++ {
++ case SPEED_1000:
++ phydev->supported |= SUPPORTED_1000baseKX_Full;
++ start_1gkx_an(phydev);
++ break;
++
++ case SPEED_10000:
++ phydev->supported |= SUPPORTED_10000baseKR_Full;
++ reset_lt(&xgkr_inst->xgkr[SINGLE_LANE]);
++ start_xgkr_an(&xgkr_inst->xgkr[SINGLE_LANE]);
++ /* start state machine*/
++ start_xgkr_state_machine(&xgkr_inst->xgkr[SINGLE_LANE].xgkr_wk);
++ break;
++
++ case SPEED_40000:
++ phydev->supported |= SUPPORTED_40000baseKR4_Full;
++ for (i = 0; i < xgkr_inst->phy_lanes; i++) {
++ reset_lt(&xgkr_inst->xgkr[i]);
++ }
++ //Start AN only for Master Lane
++ start_xgkr_an(&xgkr_inst->xgkr[MASTER_LANE]);
++ /* start state machine*/
++ for (i = 0; i < xgkr_inst->phy_lanes; i++) {
++ start_xgkr_state_machine(&xgkr_inst->xgkr[i].xgkr_wk);
++ }
++
++ break;
++ }
++
++ phydev->advertising = phydev->supported;
+ phydev->duplex = 1;
+
+ return 0;
+
+static int fsl_backplane_suspend(struct phy_device *phydev)
+{
-+ if (phydev->speed == SPEED_10000) {
-+ struct fsl_xgkr_inst *xgkr_inst = phydev->priv;
++ int i;
++
++ if (!phydev->mdio.dev.of_node) {
++ dev_err(&phydev->mdio.dev, "No associated device tree node\n");
++ return -EINVAL;
++ }
+
-+ cancel_delayed_work_sync(&xgkr_inst->xgkr_wk);
++ if (phydev->speed == SPEED_10000 || phydev->speed == SPEED_40000) {
++ struct xgkr_phy_data *xgkr_inst = phydev->priv;
++
++ for (i = 0; i < xgkr_inst->phy_lanes; i++)
++ cancel_delayed_work_sync(&xgkr_inst->xgkr[i].xgkr_wk);
+ }
+ return 0;
+}
+
+static int fsl_backplane_resume(struct phy_device *phydev)
+{
-+ if (phydev->speed == SPEED_10000) {
-+ struct fsl_xgkr_inst *xgkr_inst = phydev->priv;
++ struct xgkr_phy_data *xgkr_inst = phydev->priv;
++ int i;
+
-+ init_inst(xgkr_inst, 1);
-+ queue_delayed_work(system_power_efficient_wq,
-+ &xgkr_inst->xgkr_wk,
-+ msecs_to_jiffies(XGKR_TIMEOUT));
++ if (!phydev->mdio.dev.of_node) {
++ dev_err(&phydev->mdio.dev, "No associated device tree node\n");
++ return -EINVAL;
++ }
++
++ if (phydev->speed == SPEED_10000 || phydev->speed == SPEED_40000) {
++ for (i = 0; i < xgkr_inst->phy_lanes; i++) {
++ init_xgkr(&xgkr_inst->xgkr[i], 1);
++ start_xgkr_state_machine(&xgkr_inst->xgkr[i].xgkr_wk);
++ }
+ }
+ return 0;
+}
+
+static int fsl_backplane_read_status(struct phy_device *phydev)
+{
++ if (!phydev->mdio.dev.of_node) {
++ dev_err(&phydev->mdio.dev, "No associated device tree node\n");
++ return -EINVAL;
++ }
++
+ if (is_link_up(phydev))
+ phydev->link = 1;
+ else
+ return 0;
+}
+
++static int fsl_backplane_match_phy_device(struct phy_device *phydev)
++{
++ struct device_node *phy_node, *lane_node;
++ const char *st;
++ int serdes_type, i, ret;
++ const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids);
++
++ if (!phydev->mdio.dev.of_node) {
++ return 0;
++ }
++
++ // WORKAROUND:
++ // Required for LX2 devices
++ // where PHY ID cannot be verified in PCS
++ // because PCS Device Identifier Upper and Lower registers are hidden
++ // and always return 0 when they are read:
++ // 2 02 Device_ID0 RO Bits 15:0 0
++ // val = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x2);
++ // 3 03 Device_ID1 RO Bits 31:16 0
++ // val = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x3);
++ //
++ // To be removed: After the issue will be fixed on LX2 devices
++
++ if (!phydev->is_c45)
++ return 0;
++
++ phy_node = phydev->mdio.dev.of_node;
++
++ lane_node = of_parse_phandle(phy_node, "fsl,lane-handle", 0);
++ if (!lane_node) {
++ dev_err(&phydev->mdio.dev, "parse fsl,lane-handle failed\n");
++ return 0;
++ }
++
++ ret = of_property_read_string(lane_node, "compatible", &st);
++ if (ret < 0) {
++ //assume SERDES-10G if compatible property is not specified
++ serdes_type = SERDES_10G;
++ }
++ else if (!strcasecmp(st, "fsl,serdes-10g")) {
++ serdes_type = SERDES_10G;
++ } else if (!strcasecmp(st, "fsl,serdes-28g")) {
++ serdes_type = SERDES_28G;
++ } else {
++ dev_err(&phydev->mdio.dev, "Unknown serdes-type\n");
++ return 0;
++ }
++
++ if (serdes_type == SERDES_10G) {
++ //On LS devices we must find the c45 device with correct PHY ID
++ //Implementation similar with the one existent in phy_device: @function: phy_bus_match
++ for (i = 1; i < num_ids; i++) {
++ if (!(phydev->c45_ids.devices_in_package & (1 << i)))
++ continue;
++
++ if ((PCS_PHY_DEVICE_ID & PCS_PHY_DEVICE_ID_MASK) ==
++ (phydev->c45_ids.device_ids[i] & PCS_PHY_DEVICE_ID_MASK))
++ {
++ return 1;
++ }
++ }
++ return 0;
++ }
++
++ //On LX devices we cannot verify PHY ID
++ //so we are happy only with preliminary verifications already made: mdio.dev.of_node and is_c45
++ //because we already filtered other undesired devices: non clause 45
++
++ return 1;
++}
++
+static struct phy_driver fsl_backplane_driver[] = {
+ {
-+ .phy_id = FSL_PCS_PHY_ID,
++ .phy_id = PCS_PHY_DEVICE_ID,
+ .name = "Freescale Backplane",
-+ .phy_id_mask = 0xffffffff,
++ .phy_id_mask = PCS_PHY_DEVICE_ID_MASK,
+ .features = SUPPORTED_Backplane | SUPPORTED_Autoneg |
+ SUPPORTED_MII,
+ .probe = fsl_backplane_probe,
+ .read_status = fsl_backplane_read_status,
+ .suspend = fsl_backplane_suspend,
+ .resume = fsl_backplane_resume,
++ .match_phy_device = fsl_backplane_match_phy_device,
+ },
+};
+
+module_phy_driver(fsl_backplane_driver);
+
+static struct mdio_device_id __maybe_unused freescale_tbl[] = {
-+ { FSL_PCS_PHY_ID, 0xffffffff },
++ { PCS_PHY_DEVICE_ID, PCS_PHY_DEVICE_ID_MASK },
+ { }
+};
+
+MODULE_DESCRIPTION("Freescale Backplane driver");
+MODULE_AUTHOR("Shaohui Xie <Shaohui.Xie@freescale.com>");
+MODULE_LICENSE("GPL v2");
+--- /dev/null
++++ b/drivers/net/phy/fsl_backplane.h
+@@ -0,0 +1,41 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * DPAA backplane driver.
++ * Author: Florinel Iordache <florinel.iordache@nxp.com>
++ *
++ * Copyright 2018 NXP
++ *
++ * Licensed under the GPL-2 or later.
++ */
++
++#ifndef FSL_BACKPLANE_H
++#define FSL_BACKPLANE_H
++
++/* C(-1) */
++#define BIN_M1 0
++/* C(1) */
++#define BIN_LONG 1
++
++#define BIN_SNAPSHOT_NUM 5
++#define BIN_M1_THRESHOLD 3
++#define BIN_LONG_THRESHOLD 2
++
++struct serdes_access {
++
++ int serdes_type;
++ bool is_little_endian;
++ u32 (*ioread32)(u32 *reg);
++ void (*iowrite32)(u32 value, u32 *reg);
++ u32 (*get_lane_memmap_size)(void);
++ void (*tune_tecr)(void *reg, u32 ratio_preq, u32 ratio_pst1q, u32 adpt_eq, bool reset);
++ void (*reset_lane)(void *reg);
++ void (*lane_set_1gkx)(void *reg);
++ int (*get_median_gaink2)(u32 *reg);
++ bool (*is_bin_early)(int bin_sel, void *reg);
++};
++
++struct serdes_access* setup_serdes_access_10g(void);
++struct serdes_access* setup_serdes_access_28g(void);
++
++
++#endif //FSL_BACKPLANE_H
+--- /dev/null
++++ b/drivers/net/phy/fsl_backplane_serdes_10g.c
+@@ -0,0 +1,281 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * DPAA backplane driver for SerDes 10G.
++ * Author: Florinel Iordache <florinel.iordache@nxp.com>
++ *
++ * Copyright 2018 NXP
++ *
++ * Licensed under the GPL-2 or later.
++ */
++
++#include <linux/io.h>
++#include <linux/delay.h>
++
++#include "fsl_backplane.h"
++
++#define BIN_M1_SEL 6
++#define BIN_Long_SEL 7
++#define CDR_SEL_MASK 0x00070000
++
++#define PRE_COE_SHIFT 22
++#define POST_COE_SHIFT 16
++#define ZERO_COE_SHIFT 8
++
++#define TECR0_INIT 0x24200000
++
++#define GCR0_RESET_MASK 0x00600000
++
++#define GCR1_SNP_START_MASK 0x00000040
++#define GCR1_CTL_SNP_START_MASK 0x00002000
++
++#define RECR1_CTL_SNP_DONE_MASK 0x00000002
++#define RECR1_SNP_DONE_MASK 0x00000004
++#define TCSR1_SNP_DATA_MASK 0x0000ffc0
++#define TCSR1_SNP_DATA_SHIFT 6
++#define TCSR1_EQ_SNPBIN_SIGN_MASK 0x100
++
++#define RECR1_GAINK2_MASK 0x0f000000
++#define RECR1_GAINK2_SHIFT 24
++
++/* Required only for 1000BASE KX */
++#define GCR1_REIDL_TH_MASK 0x00700000
++#define GCR1_REIDL_EX_SEL_MASK 0x000c0000
++#define GCR1_REIDL_ET_MAS_MASK 0x00004000
++#define TECR0_AMP_RED_MASK 0x0000003f
++
++struct per_lane_ctrl_status {
++ u32 gcr0; /* 0x.000 - General Control Register 0 */
++ u32 gcr1; /* 0x.004 - General Control Register 1 */
++ u32 gcr2; /* 0x.008 - General Control Register 2 */
++ u32 resv1; /* 0x.00C - Reserved */
++ u32 recr0; /* 0x.010 - Receive Equalization Control Register 0 */
++ u32 recr1; /* 0x.014 - Receive Equalization Control Register 1 */
++ u32 tecr0; /* 0x.018 - Transmit Equalization Control Register 0 */
++ u32 resv2; /* 0x.01C - Reserved */
++ u32 tlcr0; /* 0x.020 - TTL Control Register 0 */
++ u32 tlcr1; /* 0x.024 - TTL Control Register 1 */
++ u32 tlcr2; /* 0x.028 - TTL Control Register 2 */
++ u32 tlcr3; /* 0x.02C - TTL Control Register 3 */
++ u32 tcsr0; /* 0x.030 - Test Control/Status Register 0 */
++ u32 tcsr1; /* 0x.034 - Test Control/Status Register 1 */
++ u32 tcsr2; /* 0x.038 - Test Control/Status Register 2 */
++ u32 tcsr3; /* 0x.03C - Test Control/Status Register 3 */
++};
++
++static struct serdes_access srds;
++
++static u32 get_lane_memmap_size(void)
++{
++ return 0x40;
++}
++
++static void reset_lane(void *reg)
++{
++ struct per_lane_ctrl_status *reg_base = reg;
++
++ /* reset the lane */
++ srds.iowrite32(srds.ioread32(®_base->gcr0) & ~GCR0_RESET_MASK,
++ ®_base->gcr0);
++ udelay(1);
++
++ /* unreset the lane */
++ srds.iowrite32(srds.ioread32(®_base->gcr0) | GCR0_RESET_MASK,
++ ®_base->gcr0);
++ udelay(1);
++}
++
++static void tune_tecr(void *reg, u32 ratio_preq, u32 ratio_pst1q, u32 adpt_eq, bool reset)
++{
++ struct per_lane_ctrl_status *reg_base = reg;
++ u32 val;
++
++ val = TECR0_INIT |
++ adpt_eq << ZERO_COE_SHIFT |
++ ratio_preq << PRE_COE_SHIFT |
++ ratio_pst1q << POST_COE_SHIFT;
++
++ if (reset) {
++ /* reset the lane */
++ srds.iowrite32(srds.ioread32(®_base->gcr0) & ~GCR0_RESET_MASK,
++ ®_base->gcr0);
++ udelay(1);
++ }
++
++ srds.iowrite32(val, ®_base->tecr0);
++ udelay(1);
++
++ if (reset) {
++ /* unreset the lane */
++ srds.iowrite32(srds.ioread32(®_base->gcr0) | GCR0_RESET_MASK,
++ ®_base->gcr0);
++ udelay(1);
++ }
++}
++
++static void lane_set_1gkx(void *reg)
++{
++ struct per_lane_ctrl_status *reg_base = reg;
++ u32 val;
++
++ /* reset the lane */
++ srds.iowrite32(srds.ioread32(®_base->gcr0) & ~GCR0_RESET_MASK,
++ ®_base->gcr0);
++ udelay(1);
++
++ /* set gcr1 for 1GKX */
++ val = srds.ioread32(®_base->gcr1);
++ val &= ~(GCR1_REIDL_TH_MASK | GCR1_REIDL_EX_SEL_MASK |
++ GCR1_REIDL_ET_MAS_MASK);
++ srds.iowrite32(val, ®_base->gcr1);
++ udelay(1);
++
++ /* set tecr0 for 1GKX */
++ val = srds.ioread32(®_base->tecr0);
++ val &= ~TECR0_AMP_RED_MASK;
++ srds.iowrite32(val, ®_base->tecr0);
++ udelay(1);
++
++ /* unreset the lane */
++ srds.iowrite32(srds.ioread32(®_base->gcr0) | GCR0_RESET_MASK,
++ ®_base->gcr0);
++ udelay(1);
++}
++
++static int get_median_gaink2(u32 *reg)
++{
++ int gaink2_snap_shot[BIN_SNAPSHOT_NUM];
++ u32 rx_eq_snp;
++ struct per_lane_ctrl_status *reg_base;
++ int timeout;
++ int i, j, tmp, pos;
++
++ reg_base = (struct per_lane_ctrl_status *)reg;
++
++ for (i = 0; i < BIN_SNAPSHOT_NUM; i++) {
++ /* wait RECR1_CTL_SNP_DONE_MASK has cleared */
++ timeout = 100;
++ while (srds.ioread32(®_base->recr1) &
++ RECR1_CTL_SNP_DONE_MASK) {
++ udelay(1);
++ timeout--;
++ if (timeout == 0)
++ break;
++ }
++
++ /* start snap shot */
++ srds.iowrite32((srds.ioread32(®_base->gcr1) |
++ GCR1_CTL_SNP_START_MASK),
++ ®_base->gcr1);
++
++ /* wait for SNP done */
++ timeout = 100;
++ while (!(srds.ioread32(®_base->recr1) &
++ RECR1_CTL_SNP_DONE_MASK)) {
++ udelay(1);
++ timeout--;
++ if (timeout == 0)
++ break;
++ }
++
++ /* read and save the snap shot */
++ rx_eq_snp = srds.ioread32(®_base->recr1);
++ gaink2_snap_shot[i] = (rx_eq_snp & RECR1_GAINK2_MASK) >>
++ RECR1_GAINK2_SHIFT;
++
++ /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */
++ srds.iowrite32((srds.ioread32(®_base->gcr1) &
++ ~GCR1_CTL_SNP_START_MASK),
++ ®_base->gcr1);
++ }
++
++ /* get median of the 5 snap shot */
++ for (i = 0; i < BIN_SNAPSHOT_NUM - 1; i++) {
++ tmp = gaink2_snap_shot[i];
++ pos = i;
++ for (j = i + 1; j < BIN_SNAPSHOT_NUM; j++) {
++ if (gaink2_snap_shot[j] < tmp) {
++ tmp = gaink2_snap_shot[j];
++ pos = j;
++ }
++ }
++
++ gaink2_snap_shot[pos] = gaink2_snap_shot[i];
++ gaink2_snap_shot[i] = tmp;
++ }
++
++ return gaink2_snap_shot[2];
++}
++
++static bool is_bin_early(int bin_sel, void *reg)
++{
++ bool early = false;
++ int bin_snap_shot[BIN_SNAPSHOT_NUM];
++ int i, negative_count = 0;
++ struct per_lane_ctrl_status *reg_base = reg;
++ int timeout;
++
++ for (i = 0; i < BIN_SNAPSHOT_NUM; i++) {
++ /* wait RECR1_SNP_DONE_MASK has cleared */
++ timeout = 100;
++ while ((srds.ioread32(®_base->recr1) & RECR1_SNP_DONE_MASK)) {
++ udelay(1);
++ timeout--;
++ if (timeout == 0)
++ break;
++ }
++
++ /* set TCSR1[CDR_SEL] to BinM1/BinLong */
++ if (bin_sel == BIN_M1) {
++ srds.iowrite32((srds.ioread32(®_base->tcsr1) &
++ ~CDR_SEL_MASK) | BIN_M1_SEL,
++ ®_base->tcsr1);
++ } else {
++ srds.iowrite32((srds.ioread32(®_base->tcsr1) &
++ ~CDR_SEL_MASK) | BIN_Long_SEL,
++ ®_base->tcsr1);
++ }
++
++ /* start snap shot */
++ srds.iowrite32(srds.ioread32(®_base->gcr1) | GCR1_SNP_START_MASK,
++ ®_base->gcr1);
++
++ /* wait for SNP done */
++ timeout = 100;
++ while (!(srds.ioread32(®_base->recr1) & RECR1_SNP_DONE_MASK)) {
++ udelay(1);
++ timeout--;
++ if (timeout == 0)
++ break;
++ }
++
++ /* read and save the snap shot */
++ bin_snap_shot[i] = (srds.ioread32(®_base->tcsr1) &
++ TCSR1_SNP_DATA_MASK) >> TCSR1_SNP_DATA_SHIFT;
++ if (bin_snap_shot[i] & TCSR1_EQ_SNPBIN_SIGN_MASK)
++ negative_count++;
++
++ /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */
++ srds.iowrite32(srds.ioread32(®_base->gcr1) & ~GCR1_SNP_START_MASK,
++ ®_base->gcr1);
++ }
++
++ if (((bin_sel == BIN_M1) && (negative_count > BIN_M1_THRESHOLD)) ||
++ ((bin_sel == BIN_LONG && (negative_count > BIN_LONG_THRESHOLD)))) {
++ early = true;
++ }
++
++ return early;
++}
++
++struct serdes_access* setup_serdes_access_10g(void)
++{
++ srds.get_lane_memmap_size = get_lane_memmap_size;
++ srds.tune_tecr = tune_tecr;
++ srds.reset_lane = reset_lane;
++ srds.lane_set_1gkx = lane_set_1gkx;
++ srds.get_median_gaink2 = get_median_gaink2;
++ srds.is_bin_early = is_bin_early;
++
++ return &srds;
++}
++
+--- /dev/null
++++ b/drivers/net/phy/fsl_backplane_serdes_28g.c
+@@ -0,0 +1,336 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * DPAA backplane driver for SerDes 28G.
++ * Author: Florinel Iordache <florinel.iordache@nxp.com>
++ *
++ * Copyright 2018 NXP
++ *
++ * Licensed under the GPL-2 or later.
++ */
++
++#include <linux/io.h>
++#include <linux/delay.h>
++#include <linux/sched.h>
++
++#include "fsl_backplane.h"
++
++#define BIN_M1_SEL 0x0000c000
++#define BIN_Long_SEL 0x0000d000
++#define CDR_SEL_MASK 0x0000f000
++
++#define PRE_COE_SHIFT 16
++#define POST_COE_SHIFT 8
++#define ZERO_COE_SHIFT 24
++
++#define TECR0_INIT 0x20808000
++
++#define RESET_REQ_MASK 0x80000000
++
++#define RECR3_SNP_START_MASK 0x80000000
++#define RECR3_SNP_DONE_MASK 0x40000000
++
++#define RECR4_SNP_DATA_MASK 0x000003ff
++#define RECR4_SNP_DATA_SHIFT 0
++#define RECR4_EQ_SNPBIN_SIGN_MASK 0x200
++
++#define RECR3_GAINK2_MASK 0x1f000000
++#define RECR3_GAINK2_SHIFT 24
++
++/* Required only for 1000BASE KX */
++#define GCR1_REIDL_TH_MASK 0x00700000
++#define GCR1_REIDL_EX_SEL_MASK 0x000c0000
++#define GCR1_REIDL_ET_MAS_MASK 0x04000000
++#define TECR0_AMP_RED_MASK 0x0000003f
++
++struct per_lane_ctrl_status {
++ u32 gcr0; /* 0x.000 - General Control Register 0 */
++ u32 resv1; /* 0x.004 - Reserved */
++ u32 resv2; /* 0x.008 - Reserved */
++ u32 resv3; /* 0x.00C - Reserved */
++ u32 resv4; /* 0x.010 - Reserved */
++ u32 resv5; /* 0x.014 - Reserved */
++ u32 resv6; /* 0x.018 - Reserved */
++ u32 resv7; /* 0x.01C - Reserved */
++ u32 trstctl; /* 0x.020 - TX Reset Control Register */
++ u32 tgcr0; /* 0x.024 - TX General Control Register 0 */
++ u32 tgcr1; /* 0x.028 - TX General Control Register 1 */
++ u32 tgcr2; /* 0x.02C - TX General Control Register 2 */
++ u32 tecr0; /* 0x.030 - Transmit Equalization Control Register 0 */
++ u32 tecr1; /* 0x.034 - Transmit Equalization Control Register 1 */
++ u32 resv8; /* 0x.038 - Reserved */
++ u32 resv9; /* 0x.03C - Reserved */
++ u32 rrstctl; /* 0x.040 - RX Reset Control Register */
++ u32 rgcr0; /* 0x.044 - RX General Control Register 0 */
++ u32 rxgcr1; /* 0x.048 - RX General Control Register 1 */
++ u32 resv10; /* 0x.04C - Reserved */
++ u32 recr0; /* 0x.050 - RX Equalization Register 0 */
++ u32 recr1; /* 0x.054 - RX Equalization Register 1 */
++ u32 recr2; /* 0x.058 - RX Equalization Register 2 */
++ u32 recr3; /* 0x.05C - RX Equalization Register 3 */
++ u32 recr4; /* 0x.060 - RX Equalization Register 4 */
++ u32 resv11; /* 0x.064 - Reserved */
++ u32 rccr0; /* 0x.068 - RX Calibration Register 0 */
++ u32 rccr1; /* 0x.06C - RX Calibration Register 1 */
++ u32 rcpcr0; /* 0x.070 - RX Clock Path Register 0 */
++ u32 rsccr0; /* 0x.074 - RX Sampler Calibration Control Register 0 */
++ u32 rsccr1; /* 0x.078 - RX Sampler Calibration Control Register 1 */
++ u32 resv12; /* 0x.07C - Reserved */
++ u32 ttlcr0; /* 0x.080 - Transition Tracking Loop Register 0 */
++ u32 ttlcr1; /* 0x.084 - Transition Tracking Loop Register 1 */
++ u32 ttlcr2; /* 0x.088 - Transition Tracking Loop Register 2 */
++ u32 ttlcr3; /* 0x.08C - Transition Tracking Loop Register 3 */
++ u32 resv13; /* 0x.090 - Reserved */
++ u32 resv14; /* 0x.094 - Reserved */
++ u32 resv15; /* 0x.098 - Reserved */
++ u32 resv16; /* 0x.09C - Reserved */
++ u32 tcsr0; /* 0x.0A0 - Test Control/Status Register 0 */
++ u32 tcsr1; /* 0x.0A4 - Test Control/Status Register 1 */
++ u32 tcsr2; /* 0x.0A8 - Test Control/Status Register 2 */
++ u32 tcsr3; /* 0x.0AC - Test Control/Status Register 3 */
++ u32 tcsr4; /* 0x.0B0 - Test Control/Status Register 4 */
++ u32 resv17; /* 0x.0B4 - Reserved */
++ u32 resv18; /* 0x.0B8 - Reserved */
++ u32 resv19; /* 0x.0BC - Reserved */
++ u32 rxcb0; /* 0x.0C0 - RX Control Block Register 0 */
++ u32 rxcb1; /* 0x.0C4 - RX Control Block Register 1 */
++ u32 resv20; /* 0x.0C8 - Reserved */
++ u32 resv21; /* 0x.0CC - Reserved */
++ u32 rxss0; /* 0x.0D0 - RX Speed Switch Register 0 */
++ u32 rxss1; /* 0x.0D4 - RX Speed Switch Register 1 */
++ u32 rxss2; /* 0x.0D8 - RX Speed Switch Register 2 */
++ u32 resv22; /* 0x.0DC - Reserved */
++ u32 txcb0; /* 0x.0E0 - TX Control Block Register 0 */
++ u32 txcb1; /* 0x.0E4 - TX Control Block Register 1 */
++ u32 resv23; /* 0x.0E8 - Reserved */
++ u32 resv24; /* 0x.0EC - Reserved */
++ u32 txss0; /* 0x.0F0 - TX Speed Switch Register 0 */
++ u32 txss1; /* 0x.0F4 - TX Speed Switch Register 1 */
++ u32 txss2; /* 0x.0F8 - TX Speed Switch Register 2 */
++ u32 resv25; /* 0x.0FC - Reserved */
++};
++
++static struct serdes_access srds;
++
++static u32 get_lane_memmap_size(void)
++{
++ return 0x100;
++}
++
++static void reset_lane(void *reg)
++{
++ struct per_lane_ctrl_status *reg_base = reg;
++ u32 val;
++ unsigned long timeout;
++
++ /* reset Tx lane: send reset request */
++ srds.iowrite32(srds.ioread32(®_base->trstctl) | RESET_REQ_MASK,
++ ®_base->trstctl);
++ udelay(1);
++ timeout = 10;
++ while (timeout--) {
++ val = srds.ioread32(®_base->trstctl);
++ if (!(val & RESET_REQ_MASK))
++ break;
++ usleep_range(5, 20);
++ }
++
++ /* reset Rx lane: send reset request */
++ srds.iowrite32(srds.ioread32(®_base->rrstctl) | RESET_REQ_MASK,
++ ®_base->rrstctl);
++ udelay(1);
++ timeout = 10;
++ while (timeout--) {
++ val = srds.ioread32(®_base->rrstctl);
++ if (!(val & RESET_REQ_MASK))
++ break;
++ usleep_range(5, 20);
++ }
++
++ /* wait for a while after reset */
++ timeout = jiffies + 10;
++ while (time_before(jiffies, timeout)) {
++ schedule();
++ usleep_range(5, 20);
++ }
++}
++
++static void tune_tecr(void *reg, u32 ratio_preq, u32 ratio_pst1q, u32 adpt_eq, bool reset)
++{
++ struct per_lane_ctrl_status *reg_base = reg;
++ u32 val;
++
++ if (reset) {
++ /* reset lanes */
++ reset_lane(reg);
++ }
++
++ val = TECR0_INIT |
++ ratio_preq << PRE_COE_SHIFT |
++ ratio_pst1q << POST_COE_SHIFT;
++ srds.iowrite32(val, ®_base->tecr0);
++
++ val = adpt_eq << ZERO_COE_SHIFT;
++ srds.iowrite32(val, ®_base->tecr1);
++
++ udelay(1);
++}
++
++static void lane_set_1gkx(void *reg)
++{
++ struct per_lane_ctrl_status *reg_base = reg;
++ u32 val;
++
++ /* reset lanes */
++ reset_lane(reg);
++
++ /* set gcr1 for 1GKX */
++ val = srds.ioread32(®_base->rxgcr1);
++ val &= ~(GCR1_REIDL_TH_MASK | GCR1_REIDL_EX_SEL_MASK |
++ GCR1_REIDL_ET_MAS_MASK);
++ srds.iowrite32(val, ®_base->rxgcr1);
++ udelay(1);
++
++ /* set tecr0 for 1GKX */
++ val = srds.ioread32(®_base->tecr0);
++ val &= ~TECR0_AMP_RED_MASK;
++ srds.iowrite32(val, ®_base->tecr0);
++ udelay(1);
++}
++
++static int get_median_gaink2(u32 *reg)
++{
++ int gaink2_snap_shot[BIN_SNAPSHOT_NUM];
++ u32 rx_eq_snp;
++ struct per_lane_ctrl_status *reg_base;
++ int timeout;
++ int i, j, tmp, pos;
++
++ reg_base = (struct per_lane_ctrl_status *)reg;
++
++ for (i = 0; i < BIN_SNAPSHOT_NUM; i++) {
++ /* wait RECR3_SNP_DONE_MASK has cleared */
++ timeout = 100;
++ while (srds.ioread32(®_base->recr3) &
++ RECR3_SNP_DONE_MASK) {
++ udelay(1);
++ timeout--;
++ if (timeout == 0)
++ break;
++ }
++
++ /* start snap shot */
++ srds.iowrite32((srds.ioread32(®_base->recr3) |
++ RECR3_SNP_START_MASK),
++ ®_base->recr3);
++
++ /* wait for SNP done */
++ timeout = 100;
++ while (!(srds.ioread32(®_base->recr3) &
++ RECR3_SNP_DONE_MASK)) {
++ udelay(1);
++ timeout--;
++ if (timeout == 0)
++ break;
++ }
++
++ /* read and save the snap shot */
++ rx_eq_snp = srds.ioread32(®_base->recr3);
++ gaink2_snap_shot[i] = (rx_eq_snp & RECR3_GAINK2_MASK) >>
++ RECR3_GAINK2_SHIFT;
++
++ /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */
++ srds.iowrite32((srds.ioread32(®_base->recr3) &
++ ~RECR3_SNP_START_MASK),
++ ®_base->recr3);
++ }
++
++ /* get median of the 5 snap shot */
++ for (i = 0; i < BIN_SNAPSHOT_NUM - 1; i++) {
++ tmp = gaink2_snap_shot[i];
++ pos = i;
++ for (j = i + 1; j < BIN_SNAPSHOT_NUM; j++) {
++ if (gaink2_snap_shot[j] < tmp) {
++ tmp = gaink2_snap_shot[j];
++ pos = j;
++ }
++ }
++
++ gaink2_snap_shot[pos] = gaink2_snap_shot[i];
++ gaink2_snap_shot[i] = tmp;
++ }
++
++ return gaink2_snap_shot[2];
++}
++
++static bool is_bin_early(int bin_sel, void *reg)
++{
++ bool early = false;
++ int bin_snap_shot[BIN_SNAPSHOT_NUM];
++ int i, negative_count = 0;
++ struct per_lane_ctrl_status *reg_base = reg;
++ int timeout;
++
++ for (i = 0; i < BIN_SNAPSHOT_NUM; i++) {
++ /* wait RECR3_SNP_DONE_MASK has cleared */
++ timeout = 100;
++ while ((srds.ioread32(®_base->recr3) & RECR3_SNP_DONE_MASK)) {
++ udelay(1);
++ timeout--;
++ if (timeout == 0)
++ break;
++ }
++
++ /* set TCSR1[CDR_SEL] to BinM1/BinLong */
++ if (bin_sel == BIN_M1) {
++ srds.iowrite32((srds.ioread32(®_base->recr4) &
++ ~CDR_SEL_MASK) | BIN_M1_SEL,
++ ®_base->recr4);
++ } else {
++ srds.iowrite32((srds.ioread32(®_base->recr4) &
++ ~CDR_SEL_MASK) | BIN_Long_SEL,
++ ®_base->recr4);
++ }
++
++ /* start snap shot */
++ srds.iowrite32(srds.ioread32(®_base->recr3) | RECR3_SNP_START_MASK,
++ ®_base->recr3);
++
++ /* wait for SNP done */
++ timeout = 100;
++ while (!(srds.ioread32(®_base->recr3) & RECR3_SNP_DONE_MASK)) {
++ udelay(1);
++ timeout--;
++ if (timeout == 0)
++ break;
++ }
++
++ /* read and save the snap shot */
++ bin_snap_shot[i] = (srds.ioread32(®_base->recr4) &
++ RECR4_SNP_DATA_MASK) >> RECR4_SNP_DATA_SHIFT;
++ if (bin_snap_shot[i] & RECR4_EQ_SNPBIN_SIGN_MASK)
++ negative_count++;
++
++ /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */
++ srds.iowrite32(srds.ioread32(®_base->recr3) & ~RECR3_SNP_START_MASK,
++ ®_base->recr3);
++ }
++
++ if (((bin_sel == BIN_M1) && (negative_count > BIN_M1_THRESHOLD)) ||
++ ((bin_sel == BIN_LONG && (negative_count > BIN_LONG_THRESHOLD)))) {
++ early = true;
++ }
++
++ return early;
++}
++
++struct serdes_access* setup_serdes_access_28g(void)
++{
++ srds.get_lane_memmap_size = get_lane_memmap_size;
++ srds.tune_tecr = tune_tecr;
++ srds.reset_lane = reset_lane;
++ srds.lane_set_1gkx = lane_set_1gkx;
++ srds.get_median_gaink2 = get_median_gaink2;
++ srds.is_bin_early = is_bin_early;
++
++ return &srds;
++}
+--- /dev/null
++++ b/drivers/net/phy/inphi.c
+@@ -0,0 +1,594 @@
++/*
++ * Copyright 2018 NXP
++ * Copyright 2018 INPHI
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright notice,
++ * this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright notice,
++ * this list of conditions and the following disclaimer in the documentation
++ * and/or other materials provided with the distribution.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ *
++ * Inphi is a registered trademark of Inphi Corporation
++ *
++ */
++
++#include <linux/module.h>
++#include <linux/phy.h>
++#include <linux/mdio.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++#include <linux/of_irq.h>
++#include <linux/workqueue.h>
++#include <linux/i2c.h>
++#include <linux/timer.h>
++#include <linux/delay.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/fs.h>
++#include <linux/cdev.h>
++#include <linux/device.h>
++#include <linux/slab.h>
++#include <linux/uaccess.h>
++
++#define PHY_ID_IN112525 0x02107440
++
++#define INPHI_S03_DEVICE_ID_MSB 0x2
++#define INPHI_S03_DEVICE_ID_LSB 0x3
++
++#define ALL_LANES 4
++#define INPHI_POLL_DELAY 2500
++
++#define PHYCTRL_REG1 0x0012
++#define PHYCTRL_REG2 0x0014
++#define PHYCTRL_REG3 0x0120
++#define PHYCTRL_REG4 0x0121
++#define PHYCTRL_REG5 0x0180
++#define PHYCTRL_REG6 0x0580
++#define PHYCTRL_REG7 0x05C4
++#define PHYCTRL_REG8 0x01C8
++#define PHYCTRL_REG9 0x0521
++
++#define PHYSTAT_REG1 0x0021
++#define PHYSTAT_REG2 0x0022
++#define PHYSTAT_REG3 0x0123
++
++#define PHYMISC_REG1 0x0025
++#define PHYMISC_REG2 0x002c
++#define PHYMISC_REG3 0x00b3
++#define PHYMISC_REG4 0x0181
++#define PHYMISC_REG5 0x019D
++#define PHYMISC_REG6 0x0198
++#define PHYMISC_REG7 0x0199
++#define PHYMISC_REG8 0x0581
++#define PHYMISC_REG9 0x0598
++#define PHYMISC_REG10 0x059c
++#define PHYMISC_REG20 0x01B0
++#define PHYMISC_REG21 0x01BC
++#define PHYMISC_REG22 0x01C0
++
++#define RX_VCO_CODE_OFFSET 5
++#define VCO_CODE 390
++
++int vco_codes[ALL_LANES] = {
++ VCO_CODE,
++ VCO_CODE,
++ VCO_CODE,
++ VCO_CODE
++};
++
++static void mykmod_work_handler(struct work_struct *w);
++
++static struct workqueue_struct *wq;
++static DECLARE_DELAYED_WORK(mykmod_work, mykmod_work_handler);
++static unsigned long onesec;
++struct phy_device *inphi_phydev;
++
++static int mdio_wr(u32 regnum, u16 val)
++{
++ regnum = MII_ADDR_C45 | (MDIO_MMD_VEND1 << 16) | (regnum & 0xffff);
++
++ return mdiobus_write(inphi_phydev->mdio.bus, inphi_phydev->mdio.addr,
++ regnum, val);
++}
++
++static int mdio_rd(u32 regnum)
++{
++ regnum = MII_ADDR_C45 | (MDIO_MMD_VEND1 << 16) | (regnum & 0xffff);
++
++ return mdiobus_read(inphi_phydev->mdio.bus, inphi_phydev->mdio.addr,
++ regnum);
++}
++
++
++int bit_test(int value, int bit_field)
++{
++ int result;
++ int bit_mask = (1 << bit_field);
++
++ result = ((value & bit_mask) == bit_mask);
++ return result;
++}
++
++int tx_pll_lock_test(int lane)
++{
++ int i, val, locked = 1;
++
++ if (lane == ALL_LANES) {
++ for (i = 0; i < ALL_LANES; i++) {
++ val = mdio_rd(i * 0x100 + PHYSTAT_REG3);
++ locked = locked & bit_test(val, 15);
++ }
++ } else {
++ val = mdio_rd(lane * 0x100 + PHYSTAT_REG3);
++ locked = locked & bit_test(val, 15);
++ }
++
++ return locked;
++}
++
++void rx_reset_assert(int lane)
++{
++ int mask, val;
++
++ if (lane == ALL_LANES) {
++ val = mdio_rd(PHYMISC_REG2);
++ mask = (1 << 15);
++ mdio_wr(PHYMISC_REG2, val + mask);
++ } else {
++ val = mdio_rd(lane * 0x100 + PHYCTRL_REG8);
++ mask = (1 << 6);
++ mdio_wr(lane * 0x100 + PHYCTRL_REG8, val + mask);
++ }
++}
++
++void rx_reset_de_assert(int lane)
++{
++ int mask, val;
++
++ if (lane == ALL_LANES) {
++ val = mdio_rd(PHYMISC_REG2);
++ mask = 0xffff - (1 << 15);
++ mdio_wr(PHYMISC_REG2, val & mask);
++ } else {
++ val = mdio_rd(lane * 0x100 + PHYCTRL_REG8);
++ mask = 0xffff - (1 << 6);
++ mdio_wr(lane * 0x100 + PHYCTRL_REG8, val & mask);
++ }
++}
++
++void rx_powerdown_assert(int lane)
++{
++ int mask, val;
++
++ val = mdio_rd(lane * 0x100 + PHYCTRL_REG8);
++ mask = (1 << 5);
++ mdio_wr(lane * 0x100 + PHYCTRL_REG8, val + mask);
++}
++
++void rx_powerdown_de_assert(int lane)
++{
++ int mask, val;
++
++ val = mdio_rd(lane * 0x100 + PHYCTRL_REG8);
++ mask = 0xffff - (1 << 5);
++ mdio_wr(lane * 0x100 + PHYCTRL_REG8, val & mask);
++}
++
++void tx_pll_assert(int lane)
++{
++ int val, recal;
++
++ if (lane == ALL_LANES) {
++ val = mdio_rd(PHYMISC_REG2);
++ recal = (1 << 12);
++ mdio_wr(PHYMISC_REG2, val | recal);
++ } else {
++ val = mdio_rd(lane * 0x100 + PHYCTRL_REG4);
++ recal = (1 << 15);
++ mdio_wr(lane * 0x100 + PHYCTRL_REG4, val | recal);
++ }
++}
++
++void tx_pll_de_assert(int lane)
++{
++ int recal, val;
++
++ if (lane == ALL_LANES) {
++ val = mdio_rd(PHYMISC_REG2);
++ recal = 0xefff;
++ mdio_wr(PHYMISC_REG2, val & recal);
++ } else {
++ val = mdio_rd(lane * 0x100 + PHYCTRL_REG4);
++ recal = 0x7fff;
++ mdio_wr(lane * 0x100 + PHYCTRL_REG4, val & recal);
++ }
++}
++
++void tx_core_assert(int lane)
++{
++ int recal, val, val2, core_reset;
++
++ if (lane == 4) {
++ val = mdio_rd(PHYMISC_REG2);
++ recal = 1 << 10;
++ mdio_wr(PHYMISC_REG2, val | recal);
++ } else {
++ val2 = mdio_rd(PHYMISC_REG3);
++ core_reset = (1 << (lane + 8));
++ mdio_wr(PHYMISC_REG3, val2 | core_reset);
++ }
++}
++
++void lol_disable(int lane)
++{
++ int val, mask;
++
++ val = mdio_rd(PHYMISC_REG3);
++ mask = 1 << (lane + 4);
++ mdio_wr(PHYMISC_REG3, val | mask);
++}
++
++void tx_core_de_assert(int lane)
++{
++ int val, recal, val2, core_reset;
++
++ if (lane == ALL_LANES) {
++ val = mdio_rd(PHYMISC_REG2);
++ recal = 0xffff - (1 << 10);
++ mdio_wr(PHYMISC_REG2, val & recal);
++ } else {
++ val2 = mdio_rd(PHYMISC_REG3);
++ core_reset = 0xffff - (1 << (lane + 8));
++ mdio_wr(PHYMISC_REG3, val2 & core_reset);
++ }
++}
++
++void tx_restart(int lane)
++{
++ tx_core_assert(lane);
++ tx_pll_assert(lane);
++ tx_pll_de_assert(lane);
++ usleep_range(1500, 1600);
++ tx_core_de_assert(lane);
++}
++
++void disable_lane(int lane)
++{
++ rx_reset_assert(lane);
++ rx_powerdown_assert(lane);
++ tx_core_assert(lane);
++ lol_disable(lane);
++}
++
++void toggle_reset(int lane)
++{
++ int reg, val, orig;
++
++ if (lane == ALL_LANES) {
++ mdio_wr(PHYMISC_REG2, 0x8000);
++ udelay(100);
++ mdio_wr(PHYMISC_REG2, 0x0000);
++ } else {
++ reg = lane * 0x100 + PHYCTRL_REG8;
++ val = (1 << 6);
++ orig = mdio_rd(reg);
++ mdio_wr(reg, orig + val);
++ udelay(100);
++ mdio_wr(reg, orig);
++ }
++}
++
++int az_complete_test(int lane)
++{
++ int success = 1, value;
++
++ if (lane == 0 || lane == ALL_LANES) {
++ value = mdio_rd(PHYCTRL_REG5);
++ success = success & bit_test(value, 2);
++ }
++ if (lane == 1 || lane == ALL_LANES) {
++ value = mdio_rd(PHYCTRL_REG5 + 0x100);
++ success = success & bit_test(value, 2);
++ }
++ if (lane == 2 || lane == ALL_LANES) {
++ value = mdio_rd(PHYCTRL_REG5 + 0x200);
++ success = success & bit_test(value, 2);
++ }
++ if (lane == 3 || lane == ALL_LANES) {
++ value = mdio_rd(PHYCTRL_REG5 + 0x300);
++ success = success & bit_test(value, 2);
++ }
++
++ return success;
++}
++
++void save_az_offsets(int lane)
++{
++ int i;
++
++#define AZ_OFFSET_LANE_UPDATE(reg, lane) \
++ mdio_wr((reg) + (lane) * 0x100, \
++ (mdio_rd((reg) + (lane) * 0x100) >> 8))
++
++ if (lane == ALL_LANES) {
++ for (i = 0; i < ALL_LANES; i++) {
++ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20, i);
++ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20 + 1, i);
++ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20 + 2, i);
++ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20 + 3, i);
++ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21, i);
++ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21 + 1, i);
++ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21 + 2, i);
++ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21 + 3, i);
++ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG22, i);
++ }
++ } else {
++ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20, lane);
++ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20 + 1, lane);
++ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20 + 2, lane);
++ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG20 + 3, lane);
++ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21, lane);
++ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21 + 1, lane);
++ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21 + 2, lane);
++ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG21 + 3, lane);
++ AZ_OFFSET_LANE_UPDATE(PHYMISC_REG22, lane);
++ }
++
++ mdio_wr(PHYCTRL_REG7, 0x0001);
++}
++
++void save_vco_codes(int lane)
++{
++ int i;
++
++ if (lane == ALL_LANES) {
++ for (i = 0; i < ALL_LANES; i++) {
++ vco_codes[i] = mdio_rd(PHYMISC_REG5 + i * 0x100);
++ mdio_wr(PHYMISC_REG5 + i * 0x100,
++ vco_codes[i] + RX_VCO_CODE_OFFSET);
++ }
++ } else {
++ vco_codes[lane] = mdio_rd(PHYMISC_REG5 + lane * 0x100);
++ mdio_wr(PHYMISC_REG5 + lane * 0x100,
++ vco_codes[lane] + RX_VCO_CODE_OFFSET);
++ }
++}
++
++int inphi_lane_recovery(int lane)
++{
++ int i, value, az_pass;
++
++ switch (lane) {
++ case 0:
++ case 1:
++ case 2:
++ case 3:
++ rx_reset_assert(lane);
++ mdelay(20);
++ break;
++ case ALL_LANES:
++ mdio_wr(PHYMISC_REG2, 0x9C00);
++ mdelay(20);
++ do {
++ value = mdio_rd(PHYMISC_REG2);
++ udelay(10);
++ } while (!bit_test(value, 4));
++ break;
++ default:
++ dev_err(&inphi_phydev->mdio.dev,
++ "Incorrect usage of APIs in %s driver\n",
++ inphi_phydev->drv->name);
++ break;
++ }
++
++ if (lane == ALL_LANES) {
++ for (i = 0; i < ALL_LANES; i++)
++ mdio_wr(PHYMISC_REG7 + i * 0x100, VCO_CODE);
++ } else {
++ mdio_wr(PHYMISC_REG7 + lane * 0x100, VCO_CODE);
++ }
++
++ if (lane == ALL_LANES)
++ for (i = 0; i < ALL_LANES; i++)
++ mdio_wr(PHYCTRL_REG5 + i * 0x100, 0x0418);
++ else
++ mdio_wr(PHYCTRL_REG5 + lane * 0x100, 0x0418);
++
++ mdio_wr(PHYCTRL_REG7, 0x0000);
++
++ rx_reset_de_assert(lane);
++
++ if (lane == ALL_LANES) {
++ for (i = 0; i < ALL_LANES; i++) {
++ mdio_wr(PHYCTRL_REG5 + i * 0x100, 0x0410);
++ mdio_wr(PHYCTRL_REG5 + i * 0x100, 0x0412);
++ }
++ } else {
++ mdio_wr(PHYCTRL_REG5 + lane * 0x100, 0x0410);
++ mdio_wr(PHYCTRL_REG5 + lane * 0x100, 0x0412);
++ }
++
++ for (i = 0; i < 64; i++) {
++ mdelay(100);
++ az_pass = az_complete_test(lane);
++ if (az_pass) {
++ save_az_offsets(lane);
++ break;
++ }
++ }
++
++ if (!az_pass) {
++ pr_info("in112525: AZ calibration fail @ lane=%d\n", lane);
++ return -1;
++ }
++
++ if (lane == ALL_LANES) {
++ mdio_wr(PHYMISC_REG8, 0x0002);
++ mdio_wr(PHYMISC_REG9, 0x2028);
++ mdio_wr(PHYCTRL_REG6, 0x0010);
++ usleep_range(1000, 1200);
++ mdio_wr(PHYCTRL_REG6, 0x0110);
++ mdelay(30);
++ mdio_wr(PHYMISC_REG9, 0x3020);
++ } else {
++ mdio_wr(PHYMISC_REG4 + lane * 0x100, 0x0002);
++ mdio_wr(PHYMISC_REG6 + lane * 0x100, 0x2028);
++ mdio_wr(PHYCTRL_REG5 + lane * 0x100, 0x0010);
++ usleep_range(1000, 1200);
++ mdio_wr(PHYCTRL_REG5 + lane * 0x100, 0x0110);
++ mdelay(30);
++ mdio_wr(PHYMISC_REG6 + lane * 0x100, 0x3020);
++ }
++
++ if (lane == ALL_LANES) {
++ mdio_wr(PHYMISC_REG2, 0x1C00);
++ mdio_wr(PHYMISC_REG2, 0x0C00);
++ } else {
++ tx_restart(lane);
++ mdelay(11);
++ }
++
++ if (lane == ALL_LANES) {
++ if (bit_test(mdio_rd(PHYMISC_REG2), 6) == 0)
++ return -1;
++ } else {
++ if (tx_pll_lock_test(lane) == 0)
++ return -1;
++ }
++
++ save_vco_codes(lane);
++
++ if (lane == ALL_LANES) {
++ mdio_wr(PHYMISC_REG2, 0x0400);
++ mdio_wr(PHYMISC_REG2, 0x0000);
++ value = mdio_rd(PHYCTRL_REG1);
++ value = value & 0xffbf;
++ mdio_wr(PHYCTRL_REG2, value);
++ } else {
++ tx_core_de_assert(lane);
++ }
++
++ if (lane == ALL_LANES) {
++ mdio_wr(PHYMISC_REG1, 0x8000);
++ mdio_wr(PHYMISC_REG1, 0x0000);
++ }
++ mdio_rd(PHYMISC_REG1);
++ mdio_rd(PHYMISC_REG1);
++ usleep_range(1000, 1200);
++ mdio_rd(PHYSTAT_REG1);
++ mdio_rd(PHYSTAT_REG2);
++
++ return 0;
++}
++
++static void mykmod_work_handler(struct work_struct *w)
++{
++ int all_lanes_lock, lane0_lock, lane1_lock, lane2_lock, lane3_lock;
++
++ lane0_lock = bit_test(mdio_rd(0x123), 15);
++ lane1_lock = bit_test(mdio_rd(0x223), 15);
++ lane2_lock = bit_test(mdio_rd(0x323), 15);
++ lane3_lock = bit_test(mdio_rd(0x423), 15);
++
++ /* check if the chip had any successful lane lock from the previous
++ * stage (e.g. u-boot)
++ */
++ all_lanes_lock = lane0_lock | lane1_lock | lane2_lock | lane3_lock;
++
++ if (!all_lanes_lock) {
++ /* start fresh */
++ inphi_lane_recovery(ALL_LANES);
++ } else {
++ if (!lane0_lock)
++ inphi_lane_recovery(0);
++ if (!lane1_lock)
++ inphi_lane_recovery(1);
++ if (!lane2_lock)
++ inphi_lane_recovery(2);
++ if (!lane3_lock)
++ inphi_lane_recovery(3);
++ }
++
++ queue_delayed_work(wq, &mykmod_work, onesec);
++}
++
++int inphi_probe(struct phy_device *phydev)
++{
++ int phy_id = 0, id_lsb = 0, id_msb = 0;
++
++ /* setup the inphi_phydev ptr for mdio_rd/mdio_wr APIs */
++ inphi_phydev = phydev;
++
++ /* Read device id from phy registers */
++ id_lsb = mdio_rd(INPHI_S03_DEVICE_ID_MSB);
++ if (id_lsb < 0)
++ return -ENXIO;
++
++ phy_id = id_lsb << 16;
++
++ id_msb = mdio_rd(INPHI_S03_DEVICE_ID_LSB);
++ if (id_msb < 0)
++ return -ENXIO;
++
++ phy_id |= id_msb;
++
++ /* Make sure the device tree binding matched the driver with the
++ * right device.
++ */
++ if (phy_id != phydev->drv->phy_id) {
++ dev_err(&phydev->mdio.dev,
++ "Error matching phy with %s driver\n",
++ phydev->drv->name);
++ return -ENODEV;
++ }
++
++ /* update the local phydev pointer, used inside all APIs */
++ inphi_phydev = phydev;
++ onesec = msecs_to_jiffies(INPHI_POLL_DELAY);
++
++ wq = create_singlethread_workqueue("inphi_kmod");
++ if (wq) {
++ queue_delayed_work(wq, &mykmod_work, onesec);
++ } else {
++ dev_err(&phydev->mdio.dev,
++ "Error creating kernel workqueue for %s driver\n",
++ phydev->drv->name);
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
++static struct phy_driver inphi_driver[] = {
++{
++ .phy_id = PHY_ID_IN112525,
++ .phy_id_mask = 0x0ff0fff0,
++ .name = "Inphi 112525_S03",
++ .features = PHY_GBIT_FEATURES,
++ .probe = &inphi_probe,
++},
++};
++
++module_phy_driver(inphi_driver);
++
++static struct mdio_device_id __maybe_unused inphi_tbl[] = {
++ { PHY_ID_IN112525, 0x0ff0fff0},
++ {},
++};
++
++MODULE_DEVICE_TABLE(mdio, inphi_tbl);
+--- /dev/null
++++ b/drivers/net/phy/mdio-mux-multiplexer.c
+@@ -0,0 +1,122 @@
++// SPDX-License-Identifier: GPL-2.0+
++/* MDIO bus multiplexer using kernel multiplexer subsystem
++ *
++ * Copyright 2019 NXP
++ */
++
++#include <linux/platform_device.h>
++#include <linux/mdio-mux.h>
++#include <linux/module.h>
++#include <linux/mux/consumer.h>
++
++struct mdio_mux_multiplexer_state {
++ struct mux_control *muxc;
++ bool do_deselect;
++ void *mux_handle;
++};
++
++/**
++ * mdio_mux_multiplexer_switch_fn - This function is called by the mdio-mux
++ * layer when it thinks the mdio bus
++ * multiplexer needs to switch.
++ * @current_child: current value of the mux register.
++ * @desired_child: value of the 'reg' property of the target child MDIO node.
++ * @data: Private data used by this switch_fn passed to mdio_mux_init function
++ * via mdio_mux_init(.., .., .., .., data, ..).
++ *
++ * The first time this function is called, current_child == -1.
++ * If current_child == desired_child, then the mux is already set to the
++ * correct bus.
++ */
++static int mdio_mux_multiplexer_switch_fn(int current_child, int desired_child,
++ void *data)
++{
++ struct platform_device *pdev;
++ struct mdio_mux_multiplexer_state *s;
++ int ret = 0;
++
++ pdev = (struct platform_device *)data;
++ s = platform_get_drvdata(pdev);
++
++ if (!(current_child ^ desired_child))
++ return 0;
++
++ if (s->do_deselect)
++ ret = mux_control_deselect(s->muxc);
++ if (ret) {
++ dev_err(&pdev->dev, "mux_control_deselect failed in %s: %d\n",
++ __func__, ret);
++ return ret;
++ }
++
++ ret = mux_control_select(s->muxc, desired_child);
++ if (!ret) {
++ dev_dbg(&pdev->dev, "%s %d -> %d\n", __func__, current_child,
++ desired_child);
++ s->do_deselect = true;
++ } else {
++ s->do_deselect = false;
++ }
++
++ return ret;
++}
++
++static int mdio_mux_multiplexer_probe(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ struct mdio_mux_multiplexer_state *s;
++ int ret = 0;
++
++ s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
++ if (!s)
++ return -ENOMEM;
++
++ s->muxc = devm_mux_control_get(dev, NULL);
++ if (IS_ERR(s->muxc)) {
++ ret = PTR_ERR(s->muxc);
++ if (ret != -EPROBE_DEFER)
++ dev_err(&pdev->dev, "Failed to get mux: %d\n", ret);
++ return ret;
++ }
++
++ platform_set_drvdata(pdev, s);
++
++ ret = mdio_mux_init(&pdev->dev, pdev->dev.of_node,
++ mdio_mux_multiplexer_switch_fn, &s->mux_handle,
++ pdev, NULL);
++
++ return ret;
++}
++
++static int mdio_mux_multiplexer_remove(struct platform_device *pdev)
++{
++ struct mdio_mux_multiplexer_state *s = platform_get_drvdata(pdev);
++
++ mdio_mux_uninit(s->mux_handle);
++
++ if (s->do_deselect)
++ mux_control_deselect(s->muxc);
++
++ return 0;
++}
++
++static const struct of_device_id mdio_mux_multiplexer_match[] = {
++ { .compatible = "mdio-mux-multiplexer", },
++ {},
++};
++MODULE_DEVICE_TABLE(of, mdio_mux_multiplexer_match);
++
++static struct platform_driver mdio_mux_multiplexer_driver = {
++ .driver = {
++ .name = "mdio-mux-multiplexer",
++ .of_match_table = mdio_mux_multiplexer_match,
++ },
++ .probe = mdio_mux_multiplexer_probe,
++ .remove = mdio_mux_multiplexer_remove,
++};
++
++module_platform_driver(mdio_mux_multiplexer_driver);
++
++MODULE_DESCRIPTION("MDIO bus multiplexer using kernel multiplexer subsystem");
++MODULE_AUTHOR("Pankaj Bansal <pankaj.bansal@nxp.com>");
++MODULE_LICENSE("GPL");
--- a/drivers/net/phy/swphy.c
+++ b/drivers/net/phy/swphy.c
@@ -77,6 +77,7 @@ static const struct swmii_regs duplex[]
-From 93febc09be23aa75cbc5bf5e76250c923f4004e5 Mon Sep 17 00:00:00 2001
+From 35745905430a4c9827c235d42f3a61bef34043e8 Mon Sep 17 00:00:00 2001
From: Biwen Li <biwen.li@nxp.com>
-Date: Tue, 30 Oct 2018 18:26:59 +0800
-Subject: [PATCH 16/40] pfe-eth: support layerscape
+Date: Fri, 19 Apr 2019 13:21:09 +0800
+Subject: [PATCH] pfe-eth: support layerscape
+
This is an integrated patch of pfe-eth for layerscape
Signed-off-by: Akhila Kavi <akhila.kavi@nxp.com>
Signed-off-by: Anjaneyulu Jagarlmudi <anji.jagarlmudi@nxp.com>
Signed-off-by: Archana Madhavan <archana.madhavan@nxp.com>
Signed-off-by: Bhaskar Upadhaya <Bhaskar.Upadhaya@nxp.com>
+Signed-off-by: Biwen Li <biwen.li@nxp.com>
Signed-off-by: Calvin Johnson <calvin.johnson@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com>
+Signed-off-by: Sachin Saxena <sachin.saxena@nxp.com>
+Signed-off-by: Shreyansh Jain <shreyansh.jain@nxp.com>
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
---
- .../devicetree/bindings/net/fsl_ppfe/pfe.txt | 173 ++
- drivers/staging/fsl_ppfe/Kconfig | 20 +
- drivers/staging/fsl_ppfe/Makefile | 19 +
+ .../devicetree/bindings/net/fsl_ppfe/pfe.txt | 199 ++
+ drivers/staging/fsl_ppfe/Kconfig | 21 +
+ drivers/staging/fsl_ppfe/Makefile | 20 +
drivers/staging/fsl_ppfe/TODO | 2 +
drivers/staging/fsl_ppfe/include/pfe/cbus.h | 78 +
.../staging/fsl_ppfe/include/pfe/cbus/bmu.h | 55 +
.../fsl_ppfe/include/pfe/cbus/hif_nocpy.h | 50 +
.../fsl_ppfe/include/pfe/cbus/tmu_csr.h | 168 ++
.../fsl_ppfe/include/pfe/cbus/util_csr.h | 61 +
- drivers/staging/fsl_ppfe/include/pfe/pfe.h | 373 +++
- drivers/staging/fsl_ppfe/pfe_ctrl.c | 238 ++
- drivers/staging/fsl_ppfe/pfe_ctrl.h | 112 +
- drivers/staging/fsl_ppfe/pfe_debugfs.c | 111 +
- drivers/staging/fsl_ppfe/pfe_debugfs.h | 25 +
- drivers/staging/fsl_ppfe/pfe_eth.c | 2521 +++++++++++++++++
- drivers/staging/fsl_ppfe/pfe_eth.h | 185 ++
- drivers/staging/fsl_ppfe/pfe_firmware.c | 314 ++
- drivers/staging/fsl_ppfe/pfe_firmware.h | 32 +
- drivers/staging/fsl_ppfe/pfe_hal.c | 1528 ++++++++++
- drivers/staging/fsl_ppfe/pfe_hif.c | 1072 +++++++
- drivers/staging/fsl_ppfe/pfe_hif.h | 212 ++
- drivers/staging/fsl_ppfe/pfe_hif_lib.c | 640 +++++
- drivers/staging/fsl_ppfe/pfe_hif_lib.h | 241 ++
- drivers/staging/fsl_ppfe/pfe_hw.c | 176 ++
- drivers/staging/fsl_ppfe/pfe_hw.h | 27 +
- .../staging/fsl_ppfe/pfe_ls1012a_platform.c | 385 +++
- drivers/staging/fsl_ppfe/pfe_mod.c | 156 +
- drivers/staging/fsl_ppfe/pfe_mod.h | 114 +
- drivers/staging/fsl_ppfe/pfe_perfmon.h | 38 +
- drivers/staging/fsl_ppfe/pfe_sysfs.c | 818 ++++++
- drivers/staging/fsl_ppfe/pfe_sysfs.h | 29 +
- 35 files changed, 10690 insertions(+)
+ drivers/staging/fsl_ppfe/include/pfe/pfe.h | 372 +++
+ drivers/staging/fsl_ppfe/pfe_cdev.c | 258 ++
+ drivers/staging/fsl_ppfe/pfe_cdev.h | 41 +
+ drivers/staging/fsl_ppfe/pfe_ctrl.c | 226 ++
+ drivers/staging/fsl_ppfe/pfe_ctrl.h | 100 +
+ drivers/staging/fsl_ppfe/pfe_debugfs.c | 99 +
+ drivers/staging/fsl_ppfe/pfe_debugfs.h | 13 +
+ drivers/staging/fsl_ppfe/pfe_eth.c | 2554 +++++++++++++++++
+ drivers/staging/fsl_ppfe/pfe_eth.h | 175 ++
+ drivers/staging/fsl_ppfe/pfe_firmware.c | 302 ++
+ drivers/staging/fsl_ppfe/pfe_firmware.h | 20 +
+ drivers/staging/fsl_ppfe/pfe_hal.c | 1516 ++++++++++
+ drivers/staging/fsl_ppfe/pfe_hif.c | 1060 +++++++
+ drivers/staging/fsl_ppfe/pfe_hif.h | 200 ++
+ drivers/staging/fsl_ppfe/pfe_hif_lib.c | 628 ++++
+ drivers/staging/fsl_ppfe/pfe_hif_lib.h | 229 ++
+ drivers/staging/fsl_ppfe/pfe_hw.c | 164 ++
+ drivers/staging/fsl_ppfe/pfe_hw.h | 15 +
+ .../staging/fsl_ppfe/pfe_ls1012a_platform.c | 368 +++
+ drivers/staging/fsl_ppfe/pfe_mod.c | 158 +
+ drivers/staging/fsl_ppfe/pfe_mod.h | 103 +
+ drivers/staging/fsl_ppfe/pfe_perfmon.h | 26 +
+ drivers/staging/fsl_ppfe/pfe_sysfs.c | 806 ++++++
+ drivers/staging/fsl_ppfe/pfe_sysfs.h | 17 +
+ 37 files changed, 10821 insertions(+)
create mode 100644 Documentation/devicetree/bindings/net/fsl_ppfe/pfe.txt
create mode 100644 drivers/staging/fsl_ppfe/Kconfig
create mode 100644 drivers/staging/fsl_ppfe/Makefile
create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h
create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/util_csr.h
create mode 100644 drivers/staging/fsl_ppfe/include/pfe/pfe.h
+ create mode 100644 drivers/staging/fsl_ppfe/pfe_cdev.c
+ create mode 100644 drivers/staging/fsl_ppfe/pfe_cdev.h
create mode 100644 drivers/staging/fsl_ppfe/pfe_ctrl.c
create mode 100644 drivers/staging/fsl_ppfe/pfe_ctrl.h
create mode 100644 drivers/staging/fsl_ppfe/pfe_debugfs.c
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/fsl_ppfe/pfe.txt
-@@ -0,0 +1,173 @@
+@@ -0,0 +1,199 @@
+=============================================================================
+NXP Programmable Packet Forwarding Engine Device Bindings
+
+ Definition: Must be present. Value should be the id of the bus
+ connected to gemac.
+
-+- fsl,gemac-phy-id
-+ Usage: required
-+ Value type: <u32>
-+ Definition: Must be present. Value should be the id of the phy
-+ connected to gemac.
++- fsl,gemac-phy-id (deprecated binding)
++ Usage: required
++ Value type: <u32>
++ Definition: This binding shouldn't be used with new platforms.
++ Must be present. Value should be the id of the phy
++ connected to gemac.
+
+- fsl,mdio-mux-val
+ Usage: required
+ Value type: <string>
+ Definition: Must include "sgmii"
+
-+- fsl,pfe-phy-if-flags
-+ Usage: required
-+ Value type: <u32>
-+ Definition: Must be present. Value should be 0 by default.
-+ If there is not phy connected, this need to be 1.
++- fsl,pfe-phy-if-flags (deprecated binding)
++ Usage: required
++ Value type: <u32>
++ Definition: This binding shouldn't be used with new platforms.
++ Must be present. Value should be 0 by default.
++ If there is not phy connected, this need to be 1.
+
-+- mdio
-+ optional subnode that specifies the mdio bus. This has reg
-+ property which is used to enable/disable the mdio bus.
++- phy-handle
++ Usage: optional
++ Value type: <phandle>
++ Definition: phandle to the PHY device connected to this device.
++
++- mdio : A required subnode which specifies the mdio bus in the PFE and used as
++a container for phy nodes according to ../phy.txt.
+
+EXAMPLE
+
+ #size-cells = <0>;
+ reg = <0x0>; /* GEM_ID */
+ fsl,gemac-bus-id = <0x0>; /* BUS_ID */
-+ fsl,gemac-phy-id = <0x2>; /* PHY_ID */
+ fsl,mdio-mux-val = <0x0>;
+ phy-mode = "sgmii";
-+ fsl,pfe-phy-if-flags = <0x0>;
++ phy-handle = <&sgmii_phy1>;
++};
++
++
++ethernet@1 {
++ compatible = "fsl,pfe-gemac-port";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x1>; /* GEM_ID */
++ fsl,gemac-bus-id = <0x1>; /* BUS_ID */
++ fsl,mdio-mux-val = <0x0>;
++ phy-mode = "sgmii";
++ phy-handle = <&sgmii_phy2>;
++};
++
++mdio@0 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ sgmii_phy1: ethernet-phy@2 {
++ reg = <0x2>;
++ };
+
-+ mdio@0 {
-+ reg = <0x1>; /* enabled/disabled */
++ sgmii_phy2: ethernet-phy@1 {
++ reg = <0x1>;
+ };
+};
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/Kconfig
-@@ -0,0 +1,20 @@
+@@ -0,0 +1,21 @@
+#
+# Freescale Programmable Packet Forwarding Engine driver
+#
+config FSL_PPFE
+ bool "Freescale PPFE Driver"
++ select FSL_GUTS
+ default n
+ ---help---
+ Freescale LS1012A SoC has a Programmable Packet Forwarding Engine.
+endif # FSL_PPFE
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/Makefile
-@@ -0,0 +1,19 @@
+@@ -0,0 +1,20 @@
+#
+# Makefile for Freesecale PPFE driver
+#
+ pfe_sysfs.o \
+ pfe_debugfs.o \
+ pfe_ls1012a_platform.o \
-+ pfe_hal.o
++ pfe_hal.o \
++ pfe_cdev.o
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/TODO
@@ -0,0 +1,2 @@
+#endif /* _UTIL_CSR_H_ */
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/include/pfe/pfe.h
-@@ -0,0 +1,373 @@
+@@ -0,0 +1,372 @@
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+void gemac_init(void *base, void *config);
+void gemac_disable_rx_checksum_offload(void *base);
+void gemac_enable_rx_checksum_offload(void *base);
-+void gemac_set_mdc_div(void *base, int mdc_div);
+void gemac_set_speed(void *base, enum mac_speed gem_speed);
+void gemac_set_duplex(void *base, int duplex);
+void gemac_set_mode(void *base, int mode);
+
+#endif /* _PFE_H_ */
--- /dev/null
++++ b/drivers/staging/fsl_ppfe/pfe_cdev.c
+@@ -0,0 +1,258 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Copyright 2018 NXP
++ */
++
++/* @pfe_cdev.c.
++ * Dummy device representing the PFE US in userspace.
++ * - used for interacting with the kernel layer for link status
++ */
++
++#include <linux/eventfd.h>
++#include <linux/irqreturn.h>
++#include <linux/io.h>
++#include <asm/irq.h>
++
++#include "pfe_cdev.h"
++#include "pfe_mod.h"
++
++static int pfe_majno;
++static struct class *pfe_char_class;
++static struct device *pfe_char_dev;
++struct eventfd_ctx *g_trigger;
++
++struct pfe_shared_info link_states[PFE_CDEV_ETH_COUNT];
++
++static int pfe_cdev_open(struct inode *inp, struct file *fp)
++{
++ pr_debug("PFE CDEV device opened.\n");
++ return 0;
++}
++
++static ssize_t pfe_cdev_read(struct file *fp, char *buf,
++ size_t len, loff_t *off)
++{
++ int ret = 0;
++
++ pr_info("PFE CDEV attempt copying (%lu) size of user.\n",
++ sizeof(link_states));
++
++ pr_debug("Dump link_state on screen before copy_to_user\n");
++ for (; ret < PFE_CDEV_ETH_COUNT; ret++) {
++ pr_debug("%u %u", link_states[ret].phy_id,
++ link_states[ret].state);
++ pr_debug("\n");
++ }
++
++ /* Copy to user the value in buffer sized len */
++ ret = copy_to_user(buf, &link_states, sizeof(link_states));
++ if (ret != 0) {
++ pr_err("Failed to send (%d)bytes of (%lu) requested.\n",
++ ret, len);
++ return -EFAULT;
++ }
++
++ /* offset set back to 0 as there is contextual reading offset */
++ *off = 0;
++ pr_debug("Read of (%lu) bytes performed.\n", sizeof(link_states));
++
++ return sizeof(link_states);
++}
++
++/**
++ * This function is for getting some commands from user through non-IOCTL
++ * channel. It can used to configure the device.
++ * TODO: To be filled in future, if require duplex communication with user
++ * space.
++ */
++static ssize_t pfe_cdev_write(struct file *fp, const char *buf,
++ size_t len, loff_t *off)
++{
++ pr_info("PFE CDEV Write operation not supported!\n");
++
++ return -EFAULT;
++}
++
++static int pfe_cdev_release(struct inode *inp, struct file *fp)
++{
++ if (g_trigger) {
++ free_irq(pfe->hif_irq, g_trigger);
++ eventfd_ctx_put(g_trigger);
++ g_trigger = NULL;
++ }
++
++ pr_info("PFE_CDEV: Device successfully closed\n");
++ return 0;
++}
++
++/*
++ * hif_us_isr-
++ * This ISR routine processes Rx/Tx done interrupts from the HIF hardware block
++ */
++static irqreturn_t hif_us_isr(int irq, void *arg)
++{
++ struct eventfd_ctx *trigger = (struct eventfd_ctx *)arg;
++ int int_status;
++ int int_enable_mask;
++
++ /*Read hif interrupt source register */
++ int_status = readl_relaxed(HIF_INT_SRC);
++ int_enable_mask = readl_relaxed(HIF_INT_ENABLE);
++
++ if ((int_status & HIF_INT) == 0)
++ return IRQ_NONE;
++
++ if (int_status & HIF_RXPKT_INT) {
++ int_enable_mask &= ~(HIF_RXPKT_INT);
++ /* Disable interrupts, they will be enabled after
++ * they are serviced
++ */
++ writel_relaxed(int_enable_mask, HIF_INT_ENABLE);
++
++ eventfd_signal(trigger, 1);
++ }
++
++ return IRQ_HANDLED;
++}
++
++#define PFE_INTR_COAL_USECS 100
++static long pfe_cdev_ioctl(struct file *fp, unsigned int cmd,
++ unsigned long arg)
++{
++ int ret = -EFAULT;
++ int __user *argp = (int __user *)arg;
++
++ pr_debug("PFE CDEV IOCTL Called with cmd=(%u)\n", cmd);
++
++ switch (cmd) {
++ case PFE_CDEV_ETH0_STATE_GET:
++ /* Return an unsigned int (link state) for ETH0 */
++ *argp = link_states[0].state;
++ pr_debug("Returning state=%d for ETH0\n", *argp);
++ ret = 0;
++ break;
++ case PFE_CDEV_ETH1_STATE_GET:
++ /* Return an unsigned int (link state) for ETH0 */
++ *argp = link_states[1].state;
++ pr_debug("Returning state=%d for ETH1\n", *argp);
++ ret = 0;
++ break;
++ case PFE_CDEV_HIF_INTR_EN:
++ /* Return success/failure */
++ g_trigger = eventfd_ctx_fdget(*argp);
++ if (IS_ERR(g_trigger))
++ return PTR_ERR(g_trigger);
++ ret = request_irq(pfe->hif_irq, hif_us_isr, 0, "pfe_hif",
++ g_trigger);
++ if (ret) {
++ pr_err("%s: failed to get the hif IRQ = %d\n",
++ __func__, pfe->hif_irq);
++ eventfd_ctx_put(g_trigger);
++ g_trigger = NULL;
++ }
++ writel((PFE_INTR_COAL_USECS * (pfe->ctrl.sys_clk / 1000)) |
++ HIF_INT_COAL_ENABLE, HIF_INT_COAL);
++
++ pr_debug("request_irq for hif interrupt: %d\n", pfe->hif_irq);
++ ret = 0;
++ break;
++ default:
++ pr_info("Unsupport cmd (%d) for PFE CDEV.\n", cmd);
++ break;
++ };
++
++ return ret;
++}
++
++static unsigned int pfe_cdev_poll(struct file *fp,
++ struct poll_table_struct *wait)
++{
++ pr_info("PFE CDEV poll method not supported\n");
++ return 0;
++}
++
++static const struct file_operations pfe_cdev_fops = {
++ .open = pfe_cdev_open,
++ .read = pfe_cdev_read,
++ .write = pfe_cdev_write,
++ .release = pfe_cdev_release,
++ .unlocked_ioctl = pfe_cdev_ioctl,
++ .poll = pfe_cdev_poll,
++};
++
++int pfe_cdev_init(void)
++{
++ int ret;
++
++ pr_debug("PFE CDEV initialization begin\n");
++
++ /* Register the major number for the device */
++ pfe_majno = register_chrdev(0, PFE_CDEV_NAME, &pfe_cdev_fops);
++ if (pfe_majno < 0) {
++ pr_err("Unable to register PFE CDEV. PFE CDEV not available\n");
++ ret = pfe_majno;
++ goto cleanup;
++ }
++
++ pr_debug("PFE CDEV assigned major number: %d\n", pfe_majno);
++
++ /* Register the class for the device */
++ pfe_char_class = class_create(THIS_MODULE, PFE_CLASS_NAME);
++ if (IS_ERR(pfe_char_class)) {
++ pr_err(
++ "Failed to init class for PFE CDEV. PFE CDEV not available.\n");
++ goto cleanup;
++ }
++
++ pr_debug("PFE CDEV Class created successfully.\n");
++
++ /* Create the device without any parent and without any callback data */
++ pfe_char_dev = device_create(pfe_char_class, NULL,
++ MKDEV(pfe_majno, 0), NULL,
++ PFE_CDEV_NAME);
++ if (IS_ERR(pfe_char_dev)) {
++ pr_err("Unable to PFE CDEV device. PFE CDEV not available.\n");
++ ret = PTR_ERR(pfe_char_dev);
++ goto cleanup;
++ }
++
++ /* Information structure being shared with the userspace */
++ memset(link_states, 0, sizeof(struct pfe_shared_info) *
++ PFE_CDEV_ETH_COUNT);
++
++ pr_info("PFE CDEV created: %s\n", PFE_CDEV_NAME);
++
++ ret = 0;
++ return ret;
++
++cleanup:
++ if (!IS_ERR(pfe_char_class))
++ class_destroy(pfe_char_class);
++
++ if (pfe_majno > 0)
++ unregister_chrdev(pfe_majno, PFE_CDEV_NAME);
++
++ ret = -EFAULT;
++ return ret;
++}
++
++void pfe_cdev_exit(void)
++{
++ if (!IS_ERR(pfe_char_dev))
++ device_destroy(pfe_char_class, MKDEV(pfe_majno, 0));
++
++ if (!IS_ERR(pfe_char_class)) {
++ class_unregister(pfe_char_class);
++ class_destroy(pfe_char_class);
++ }
++
++ if (pfe_majno > 0)
++ unregister_chrdev(pfe_majno, PFE_CDEV_NAME);
++
++ /* reset the variables */
++ pfe_majno = 0;
++ pfe_char_class = NULL;
++ pfe_char_dev = NULL;
++
++ pr_info("PFE CDEV Removed.\n");
++}
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/pfe_cdev.h
+@@ -0,0 +1,41 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * Copyright 2018 NXP
++ */
++
++#ifndef _PFE_CDEV_H_
++#define _PFE_CDEV_H_
++
++#include <linux/init.h>
++#include <linux/device.h>
++#include <linux/err.h>
++#include <linux/kernel.h>
++#include <linux/fs.h>
++#include <linux/uaccess.h>
++#include <linux/poll.h>
++
++#define PFE_CDEV_NAME "pfe_us_cdev"
++#define PFE_CLASS_NAME "ppfe_us"
++
++/* Extracted from ls1012a_pfe_platform_data, there are 3 interfaces which are
++ * supported by PFE driver. Should be updated if number of eth devices are
++ * changed.
++ */
++#define PFE_CDEV_ETH_COUNT 3
++
++struct pfe_shared_info {
++ uint32_t phy_id; /* Link phy ID */
++ uint8_t state; /* Has either 0 or 1 */
++};
++
++extern struct pfe_shared_info link_states[PFE_CDEV_ETH_COUNT];
++
++/* IOCTL Commands */
++#define PFE_CDEV_ETH0_STATE_GET _IOR('R', 0, int)
++#define PFE_CDEV_ETH1_STATE_GET _IOR('R', 1, int)
++#define PFE_CDEV_HIF_INTR_EN _IOWR('R', 2, int)
++
++int pfe_cdev_init(void);
++void pfe_cdev_exit(void);
++
++#endif /* _PFE_CDEV_H_ */
+--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_ctrl.c
-@@ -0,0 +1,238 @@
+@@ -0,0 +1,226 @@
++// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+}
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_ctrl.h
-@@ -0,0 +1,112 @@
+@@ -0,0 +1,100 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _PFE_CTRL_H_
+#endif /* _PFE_CTRL_H_ */
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_debugfs.c
-@@ -0,0 +1,111 @@
+@@ -0,0 +1,99 @@
++// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+}
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_debugfs.h
-@@ -0,0 +1,25 @@
+@@ -0,0 +1,13 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _PFE_DEBUGFS_H_
+#endif /* _PFE_DEBUGFS_H_ */
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_eth.c
-@@ -0,0 +1,2521 @@
+@@ -0,0 +1,2554 @@
++// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* @pfe_eth.c.
+#include <net/ip.h>
+#include <net/sock.h>
+
++#include <linux/of.h>
++#include <linux/of_mdio.h>
++
+#include <linux/io.h>
+#include <asm/irq.h>
+#include <linux/delay.h>
+
+#include "pfe_mod.h"
+#include "pfe_eth.h"
++#include "pfe_cdev.h"
+
+#define LS1012A_REV_1_0 0x87040010
+
++bool pfe_use_old_dts_phy;
+bool pfe_errata_a010897;
+
+static void *cbus_emac_base[3];
+static void pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
+ from_tx, int n_desc);
+
++/* MDIO registers */
++#define MDIO_SGMII_CR 0x00
++#define MDIO_SGMII_SR 0x01
++#define MDIO_SGMII_DEV_ABIL_SGMII 0x04
++#define MDIO_SGMII_LINK_TMR_L 0x12
++#define MDIO_SGMII_LINK_TMR_H 0x13
++#define MDIO_SGMII_IF_MODE 0x14
++
++/* SGMII Control defines */
++#define SGMII_CR_RST 0x8000
++#define SGMII_CR_AN_EN 0x1000
++#define SGMII_CR_RESTART_AN 0x0200
++#define SGMII_CR_FD 0x0100
++#define SGMII_CR_SPEED_SEL1_1G 0x0040
++#define SGMII_CR_DEF_VAL (SGMII_CR_AN_EN | SGMII_CR_FD | \
++ SGMII_CR_SPEED_SEL1_1G)
++
++/* SGMII IF Mode */
++#define SGMII_DUPLEX_HALF 0x10
++#define SGMII_SPEED_10MBPS 0x00
++#define SGMII_SPEED_100MBPS 0x04
++#define SGMII_SPEED_1GBPS 0x08
++#define SGMII_USE_SGMII_AN 0x02
++#define SGMII_EN 0x01
++
++/* SGMII Device Ability for SGMII */
++#define SGMII_DEV_ABIL_ACK 0x4000
++#define SGMII_DEV_ABIL_EEE_CLK_STP_EN 0x0100
++#define SGMII_DEV_ABIL_SGMII 0x0001
++
+unsigned int gemac_regs[] = {
+ 0x0004, /* Interrupt event */
+ 0x0008, /* Interrupt mask */
+ */
+int pfe_eth_mdio_reset(struct mii_bus *bus)
+{
-+ struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
++ struct pfe_mdio_priv_s *priv = (struct pfe_mdio_priv_s *)bus->priv;
+ u32 phy_speed;
+
-+ netif_info(priv, hw, priv->ndev, "%s\n", __func__);
+
+ mutex_lock(&bus->mdio_lock);
+
+ phy_speed = (DIV_ROUND_UP((pfe->ctrl.sys_clk * 1000), 4000000)
+ << EMAC_MII_SPEED_SHIFT);
+ phy_speed |= EMAC_HOLDTIME(0x5);
-+ __raw_writel(phy_speed, priv->PHY_baseaddr + EMAC_MII_CTRL_REG);
++ __raw_writel(phy_speed, priv->mdio_base + EMAC_MII_CTRL_REG);
+
+ mutex_unlock(&bus->mdio_lock);
+
+ return 0;
+}
+
-+/* pfe_eth_gemac_phy_timeout
++/* pfe_eth_mdio_timeout
+ *
+ */
-+static int pfe_eth_gemac_phy_timeout(struct pfe_eth_priv_s *priv, int timeout)
++static int pfe_eth_mdio_timeout(struct pfe_mdio_priv_s *priv, int timeout)
+{
-+ while (!(__raw_readl(priv->PHY_baseaddr + EMAC_IEVENT_REG) &
++ while (!(__raw_readl(priv->mdio_base + EMAC_IEVENT_REG) &
+ EMAC_IEVENT_MII)) {
+ if (timeout-- <= 0)
+ return -1;
+ usleep_range(10, 20);
+ }
-+ __raw_writel(EMAC_IEVENT_MII, priv->PHY_baseaddr + EMAC_IEVENT_REG);
++ __raw_writel(EMAC_IEVENT_MII, priv->mdio_base + EMAC_IEVENT_REG);
+ return 0;
+}
+
+static int pfe_eth_mdio_write_addr(struct mii_bus *bus, int mii_id,
+ int dev_addr, int regnum)
+{
-+ struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
++ struct pfe_mdio_priv_s *priv = (struct pfe_mdio_priv_s *)bus->priv;
+
+ __raw_writel(EMAC_MII_DATA_PA(mii_id) |
+ EMAC_MII_DATA_RA(dev_addr) |
+ EMAC_MII_DATA_TA | EMAC_MII_DATA(regnum),
-+ priv->PHY_baseaddr + EMAC_MII_DATA_REG);
++ priv->mdio_base + EMAC_MII_DATA_REG);
+
-+ if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)) {
-+ netdev_err(priv->ndev, "%s: phy MDIO address write timeout\n",
-+ __func__);
++ if (pfe_eth_mdio_timeout(priv, EMAC_MDIO_TIMEOUT)) {
++ dev_err(&bus->dev, "phy MDIO address write timeout\n");
+ return -1;
+ }
+
+static int pfe_eth_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
+{
-+ struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
++ struct pfe_mdio_priv_s *priv = (struct pfe_mdio_priv_s *)bus->priv;
+
+ /*To access external PHYs on QDS board mux needs to be configured*/
+ if ((mii_id) && (pfe->mdio_muxval[mii_id]))
+ EMAC_MII_DATA_PA(mii_id) |
+ EMAC_MII_DATA_RA((regnum >> 16) & 0x1f) |
+ EMAC_MII_DATA_TA | EMAC_MII_DATA(value),
-+ priv->PHY_baseaddr + EMAC_MII_DATA_REG);
++ priv->mdio_base + EMAC_MII_DATA_REG);
+ } else {
+ /* start a write op */
+ __raw_writel(EMAC_MII_DATA_ST | EMAC_MII_DATA_OP_WR |
+ EMAC_MII_DATA_PA(mii_id) |
+ EMAC_MII_DATA_RA(regnum) |
+ EMAC_MII_DATA_TA | EMAC_MII_DATA(value),
-+ priv->PHY_baseaddr + EMAC_MII_DATA_REG);
++ priv->mdio_base + EMAC_MII_DATA_REG);
+ }
+
-+ if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)) {
-+ netdev_err(priv->ndev, "%s: phy MDIO write timeout\n",
-+ __func__);
++ if (pfe_eth_mdio_timeout(priv, EMAC_MDIO_TIMEOUT)) {
++ dev_err(&bus->dev, "%s: phy MDIO write timeout\n", __func__);
+ return -1;
+ }
-+ netif_info(priv, hw, priv->ndev, "%s: phy %x reg %x val %x\n", __func__,
-+ mii_id, regnum, value);
-+
+ return 0;
+}
+
+static int pfe_eth_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
-+ struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
++ struct pfe_mdio_priv_s *priv = (struct pfe_mdio_priv_s *)bus->priv;
+ u16 value = 0;
+
+ /*To access external PHYs on QDS board mux needs to be configured*/
+ EMAC_MII_DATA_PA(mii_id) |
+ EMAC_MII_DATA_RA((regnum >> 16) & 0x1f) |
+ EMAC_MII_DATA_TA,
-+ priv->PHY_baseaddr + EMAC_MII_DATA_REG);
++ priv->mdio_base + EMAC_MII_DATA_REG);
+ } else {
+ /* start a read op */
+ __raw_writel(EMAC_MII_DATA_ST | EMAC_MII_DATA_OP_RD |
+ EMAC_MII_DATA_PA(mii_id) |
+ EMAC_MII_DATA_RA(regnum) |
-+ EMAC_MII_DATA_TA, priv->PHY_baseaddr +
++ EMAC_MII_DATA_TA, priv->mdio_base +
+ EMAC_MII_DATA_REG);
+ }
+
-+ if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)) {
-+ netdev_err(priv->ndev, "%s: phy MDIO read timeout\n", __func__);
++ if (pfe_eth_mdio_timeout(priv, EMAC_MDIO_TIMEOUT)) {
++ dev_err(&bus->dev, "%s: phy MDIO read timeout\n", __func__);
+ return -1;
+ }
+
-+ value = EMAC_MII_DATA(__raw_readl(priv->PHY_baseaddr +
++ value = EMAC_MII_DATA(__raw_readl(priv->mdio_base +
+ EMAC_MII_DATA_REG));
-+ netif_info(priv, hw, priv->ndev, "%s: phy %x reg %x val %x\n", __func__,
-+ mii_id, regnum, value);
+ return value;
+}
+
-+static int pfe_eth_mdio_init(struct pfe_eth_priv_s *priv,
-+ struct ls1012a_mdio_platform_data *minfo)
++static int pfe_eth_mdio_init(struct pfe *pfe,
++ struct ls1012a_pfe_platform_data *pfe_info,
++ int ii)
+{
++ struct pfe_mdio_priv_s *priv = NULL;
++ struct ls1012a_mdio_platform_data *mdio_info;
+ struct mii_bus *bus;
-+ int rc, ii;
-+ struct phy_device *phydev;
++ struct device_node *mdio_node;
++ int rc = 0;
+
-+ netif_info(priv, drv, priv->ndev, "%s\n", __func__);
-+ pr_info("%s\n", __func__);
++ mdio_info = (struct ls1012a_mdio_platform_data *)
++ pfe_info->ls1012a_mdio_pdata;
++ mdio_info->id = ii;
+
-+ bus = mdiobus_alloc();
++ bus = mdiobus_alloc_size(sizeof(struct pfe_mdio_priv_s));
+ if (!bus) {
-+ netdev_err(priv->ndev, "mdiobus_alloc() failed\n");
++ pr_err("mdiobus_alloc() failed\n");
+ rc = -ENOMEM;
-+ goto err0;
++ goto err_mdioalloc;
+ }
+
+ bus->name = "ls1012a MDIO Bus";
++ snprintf(bus->id, MII_BUS_ID_SIZE, "ls1012a-%x", mdio_info->id);
++
+ bus->read = &pfe_eth_mdio_read;
+ bus->write = &pfe_eth_mdio_write;
+ bus->reset = &pfe_eth_mdio_reset;
-+ snprintf(bus->id, MII_BUS_ID_SIZE, "ls1012a-%x", priv->id);
-+ bus->priv = priv;
-+
-+ bus->phy_mask = minfo->phy_mask;
-+ priv->mdc_div = minfo->mdc_div;
++ bus->parent = pfe->dev;
++ bus->phy_mask = mdio_info->phy_mask;
++ bus->irq[0] = mdio_info->irq[0];
++ priv = bus->priv;
++ priv->mdio_base = cbus_emac_base[ii];
+
++ priv->mdc_div = mdio_info->mdc_div;
+ if (!priv->mdc_div)
+ priv->mdc_div = 64;
++ dev_info(bus->parent, "%s: mdc_div: %d, phy_mask: %x\n",
++ __func__, priv->mdc_div, bus->phy_mask);
+
-+ bus->irq[0] = minfo->irq[0];
-+
-+ bus->parent = priv->pfe->dev;
++ mdio_node = of_get_child_by_name(pfe->dev->of_node, "mdio");
++ if ((mdio_info->id == 0) && mdio_node) {
++ rc = of_mdiobus_register(bus, mdio_node);
++ of_node_put(mdio_node);
++ } else {
++ rc = mdiobus_register(bus);
++ }
+
-+ netif_info(priv, drv, priv->ndev, "%s: mdc_div: %d, phy_mask: %x\n",
-+ __func__, priv->mdc_div, bus->phy_mask);
-+ rc = mdiobus_register(bus);
+ if (rc) {
-+ netdev_err(priv->ndev, "mdiobus_register(%s) failed\n",
-+ bus->name);
-+ goto err1;
++ dev_err(bus->parent, "mdiobus_register(%s) failed\n",
++ bus->name);
++ goto err_mdioregister;
+ }
+
+ priv->mii_bus = bus;
-+
-+ /* For clause 45 we need to call get_phy_device() with it's
-+ * 3rd argument as true and then register the phy device
-+ * via phy_device_register()
-+ */
-+
-+ if (priv->einfo->mii_config == PHY_INTERFACE_MODE_2500SGMII) {
-+ for (ii = 0; ii < NUM_GEMAC_SUPPORT; ii++) {
-+ phydev = get_phy_device(priv->mii_bus,
-+ priv->einfo->phy_id + ii, true);
-+ if (!phydev || IS_ERR(phydev)) {
-+ rc = -EIO;
-+ netdev_err(priv->ndev, "fail to get device\n");
-+ goto err1;
-+ }
-+ rc = phy_device_register(phydev);
-+ if (rc) {
-+ phy_device_free(phydev);
-+ netdev_err(priv->ndev,
-+ "phy_device_register() failed\n");
-+ goto err1;
-+ }
-+ }
-+ }
++ pfe->mdio.mdio_priv[ii] = priv;
+
+ pfe_eth_mdio_reset(bus);
+
+ return 0;
+
-+err1:
++err_mdioregister:
+ mdiobus_free(bus);
-+err0:
++err_mdioalloc:
+ return rc;
+}
+
+/* pfe_eth_mdio_exit
+ */
-+static void pfe_eth_mdio_exit(struct mii_bus *bus)
++static void pfe_eth_mdio_exit(struct pfe *pfe,
++ int ii)
+{
++ struct pfe_mdio_priv_s *mdio_priv = pfe->mdio.mdio_priv[ii];
++ struct mii_bus *bus = mdio_priv->mii_bus;
++
+ if (!bus)
+ return;
-+
-+ netif_info((struct pfe_eth_priv_s *)bus->priv, drv, ((struct
-+ pfe_eth_priv_s *)(bus->priv))->ndev, "%s\n", __func__);
-+
+ mdiobus_unregister(bus);
+ mdiobus_free(bus);
+}
+#define SCFG_RGMIIPCR_SETSP_10M (0x00000002)
+#define SCFG_RGMIIPCR_SETFD (0x00000001)
+
++#define MDIOSELCR 0x484
++#define MDIOSEL_SERDES 0x0
++#define MDIOSEL_EXTPHY 0x80000000
++
+static void pfe_set_rgmii_speed(struct phy_device *phydev)
+{
+ u32 rgmii_pcr;
+ phy_print_status(phydev);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
++
++ /* Now, dump the details to the cdev.
++ * XXX: Locking would be required? (uniprocess arch)
++ * Or, maybe move it in spinlock above
++ */
++ if (us && priv->einfo->gem_id < PFE_CDEV_ETH_COUNT) {
++ pr_debug("Changing link state from (%u) to (%u) for ID=(%u)\n",
++ link_states[priv->einfo->gem_id].state,
++ phydev->link,
++ priv->einfo->gem_id);
++ link_states[priv->einfo->gem_id].phy_id = priv->einfo->gem_id;
++ link_states[priv->einfo->gem_id].state = phydev->link;
++ }
+}
+
+/* pfe_phy_exit
+ */
+static void ls1012a_configure_serdes(struct net_device *ndev)
+{
-+ struct pfe_eth_priv_s *priv = pfe->eth.eth_priv[0];
++ struct pfe_eth_priv_s *eth_priv = netdev_priv(ndev);
++ struct pfe_mdio_priv_s *mdio_priv = pfe->mdio.mdio_priv[eth_priv->id];
+ int sgmii_2500 = 0;
-+ struct mii_bus *bus = priv->mii_bus;
++ struct mii_bus *bus = mdio_priv->mii_bus;
+ u16 value = 0;
+
-+ if (priv->einfo->mii_config == PHY_INTERFACE_MODE_2500SGMII)
++ if (eth_priv->einfo->mii_config == PHY_INTERFACE_MODE_2500SGMII)
+ sgmii_2500 = 1;
+
-+ netif_info(priv, drv, ndev, "%s\n", __func__);
++ netif_info(eth_priv, drv, ndev, "%s\n", __func__);
+ /* PCS configuration done with corresponding GEMAC */
+
-+ pfe_eth_mdio_read(bus, 0, 0);
-+ pfe_eth_mdio_read(bus, 0, 1);
++ pfe_eth_mdio_read(bus, 0, MDIO_SGMII_CR);
++ pfe_eth_mdio_read(bus, 0, MDIO_SGMII_SR);
++
++ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_CR, SGMII_CR_RST);
+
-+ /*These settings taken from validtion team */
-+ pfe_eth_mdio_write(bus, 0, 0x0, 0x8000);
+ if (sgmii_2500) {
-+ pfe_eth_mdio_write(bus, 0, 0x14, 0x9);
-+ pfe_eth_mdio_write(bus, 0, 0x4, 0x4001);
-+ pfe_eth_mdio_write(bus, 0, 0x12, 0xa120);
-+ pfe_eth_mdio_write(bus, 0, 0x13, 0x7);
++ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_IF_MODE, SGMII_SPEED_1GBPS
++ | SGMII_EN);
++ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_DEV_ABIL_SGMII,
++ SGMII_DEV_ABIL_ACK | SGMII_DEV_ABIL_SGMII);
++ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_LINK_TMR_L, 0xa120);
++ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_LINK_TMR_H, 0x7);
+ /* Autonegotiation need to be disabled for 2.5G SGMII mode*/
-+ value = 0x0140;
-+ pfe_eth_mdio_write(bus, 0, 0x0, value);
++ value = SGMII_CR_FD | SGMII_CR_SPEED_SEL1_1G;
++ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_CR, value);
+ } else {
-+ pfe_eth_mdio_write(bus, 0, 0x14, 0xb);
-+ pfe_eth_mdio_write(bus, 0, 0x4, 0x1a1);
-+ pfe_eth_mdio_write(bus, 0, 0x12, 0x400);
-+ pfe_eth_mdio_write(bus, 0, 0x13, 0x0);
-+ pfe_eth_mdio_write(bus, 0, 0x0, 0x1140);
++ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_IF_MODE,
++ SGMII_SPEED_1GBPS
++ | SGMII_USE_SGMII_AN
++ | SGMII_EN);
++ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_DEV_ABIL_SGMII,
++ SGMII_DEV_ABIL_EEE_CLK_STP_EN
++ | 0xa0
++ | SGMII_DEV_ABIL_SGMII);
++ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_LINK_TMR_L, 0x400);
++ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_LINK_TMR_H, 0x0);
++ value = SGMII_CR_AN_EN | SGMII_CR_FD | SGMII_CR_SPEED_SEL1_1G;
++ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_CR, value);
+ }
+}
+
+ snprintf(bus_id, MII_BUS_ID_SIZE, "ls1012a-%d", 0);
+ snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
+ priv->einfo->phy_id);
-+
+ netif_info(priv, drv, ndev, "%s: %s\n", __func__, phy_id);
+ interface = priv->einfo->mii_config;
+ if ((interface == PHY_INTERFACE_MODE_SGMII) ||
+ (interface == PHY_INTERFACE_MODE_2500SGMII)) {
+ /*Configure SGMII PCS */
+ if (pfe->scfg) {
-+ /*Config MDIO from serdes */
-+ regmap_write(pfe->scfg, 0x484, 0x00000000);
++ /* Config MDIO from serdes */
++ regmap_write(pfe->scfg, MDIOSELCR, MDIOSEL_SERDES);
+ }
+ ls1012a_configure_serdes(ndev);
+ }
+
+ if (pfe->scfg) {
+ /*Config MDIO from PAD */
-+ regmap_write(pfe->scfg, 0x484, 0x80000000);
++ regmap_write(pfe->scfg, MDIOSELCR, MDIOSEL_EXTPHY);
+ }
+
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+ pr_info("%s interface %x\n", __func__, interface);
-+ phydev = phy_connect(ndev, phy_id, &pfe_eth_adjust_link, interface);
+
-+ if (IS_ERR(phydev)) {
-+ netdev_err(ndev, "phy_connect() failed\n");
-+ return PTR_ERR(phydev);
++ if (priv->phy_node) {
++ phydev = of_phy_connect(ndev, priv->phy_node,
++ pfe_eth_adjust_link, 0,
++ priv->einfo->mii_config);
++ if (!(phydev)) {
++ netdev_err(ndev, "Unable to connect to phy\n");
++ return -ENODEV;
++ }
++
++ } else {
++ phydev = phy_connect(ndev, phy_id,
++ &pfe_eth_adjust_link, interface);
++ if (IS_ERR(phydev)) {
++ netdev_err(ndev, "Unable to connect to phy\n");
++ return PTR_ERR(phydev);
++ }
+ }
+
+ priv->phydev = phydev;
+
+/* pfe_eth_init_one
+ */
-+static int pfe_eth_init_one(struct pfe *pfe, int id)
++static int pfe_eth_init_one(struct pfe *pfe,
++ struct ls1012a_pfe_platform_data *pfe_info,
++ int id)
+{
+ struct net_device *ndev = NULL;
+ struct pfe_eth_priv_s *priv = NULL;
+ struct ls1012a_eth_platform_data *einfo;
-+ struct ls1012a_mdio_platform_data *minfo;
-+ struct ls1012a_pfe_platform_data *pfe_info;
+ int err;
+
-+ /* Extract pltform data */
-+ pfe_info = (struct ls1012a_pfe_platform_data *)
-+ pfe->dev->platform_data;
-+ if (!pfe_info) {
-+ pr_err(
-+ "%s: pfe missing additional platform data\n"
-+ , __func__);
-+ err = -ENODEV;
-+ goto err0;
-+ }
-+
+ einfo = (struct ls1012a_eth_platform_data *)
+ pfe_info->ls1012a_eth_pdata;
+
+ goto err0;
+ }
+
-+ minfo = (struct ls1012a_mdio_platform_data *)
-+ pfe_info->ls1012a_mdio_pdata;
-+
-+ /* einfo never be NULL, but no harm in having this check */
-+ if (!minfo) {
-+ pr_err(
-+ "%s: pfe missing additional mdios platform data\n",
-+ __func__);
-+ err = -ENODEV;
-+ goto err0;
-+ }
-+
+ if (us)
+ emac_txq_cnt = EMAC_TXQ_CNT;
+ /* Create an ethernet device instance */
+ priv->ndev = ndev;
+ priv->id = einfo[id].gem_id;
+ priv->pfe = pfe;
++ priv->phy_node = einfo[id].phy_node;
+
+ SET_NETDEV_DEV(priv->ndev, priv->pfe->dev);
+
+ /* Set the info in the priv to the current info */
+ priv->einfo = &einfo[id];
+ priv->EMAC_baseaddr = cbus_emac_base[id];
-+ priv->PHY_baseaddr = cbus_emac_base[0];
+ priv->GPI_baseaddr = cbus_gpi_base[id];
+
-+#define HIF_GEMAC_TMUQ_BASE 6
-+ priv->low_tmu_q = HIF_GEMAC_TMUQ_BASE + (id * 2);
-+ priv->high_tmu_q = priv->low_tmu_q + 1;
-+
+ spin_lock_init(&priv->lock);
+
+ pfe_eth_fast_tx_timeout_init(priv);
+ /* Copy the station address into the dev structure, */
+ memcpy(ndev->dev_addr, einfo[id].mac_addr, ETH_ALEN);
+
-+ /* Initialize mdio */
-+ if (minfo[id].enabled) {
-+ err = pfe_eth_mdio_init(priv, &minfo[id]);
-+ if (err) {
-+ netdev_err(ndev, "%s: pfe_eth_mdio_init() failed\n",
-+ __func__);
-+ goto err2;
-+ }
-+ }
-+
+ if (us)
+ goto phy_init;
+
+ else
+ ndev->max_mtu = JUMBO_FRAME_SIZE_V2 - ETH_HLEN - ETH_FCS_LEN;
+
-+ /* supported features */
-+ ndev->hw_features = NETIF_F_SG;
-+
+ /*Enable after checksum offload is validated */
+ ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_SG;
+ HIF_RX_POLL_WEIGHT - 16);
+
+ err = register_netdev(ndev);
-+
+ if (err) {
+ netdev_err(ndev, "register_netdev() failed\n");
-+ goto err3;
++ goto err1;
++ }
++
++ if ((!(pfe_use_old_dts_phy) && !(priv->phy_node)) ||
++ ((pfe_use_old_dts_phy) &&
++ (priv->einfo->phy_flags & GEMAC_NO_PHY))) {
++ pr_info("%s: No PHY or fixed-link\n", __func__);
++ goto skip_phy_init;
+ }
+
+phy_init:
+ device_init_wakeup(&ndev->dev, WAKE_MAGIC);
+
-+ if (!(priv->einfo->phy_flags & GEMAC_NO_PHY)) {
-+ err = pfe_phy_init(ndev);
-+ if (err) {
-+ netdev_err(ndev, "%s: pfe_phy_init() failed\n",
-+ __func__);
-+ goto err4;
-+ }
++ err = pfe_phy_init(ndev);
++ if (err) {
++ netdev_err(ndev, "%s: pfe_phy_init() failed\n",
++ __func__);
++ goto err2;
+ }
+
+ if (us) {
+
+ netif_carrier_on(ndev);
+
++skip_phy_init:
+ /* Create all the sysfs files */
+ if (pfe_eth_sysfs_init(ndev))
-+ goto err4;
++ goto err3;
+
+ netif_info(priv, probe, ndev, "%s: created interface, baseaddr: %p\n",
+ __func__, priv->EMAC_baseaddr);
+
+ return 0;
-+err4:
-+ if (us)
-+ goto err3;
-+ unregister_netdev(ndev);
++
+err3:
-+ pfe_eth_mdio_exit(priv->mii_bus);
++ pfe_phy_exit(priv->ndev);
+err2:
++ if (us)
++ goto err1;
++ unregister_netdev(ndev);
++err1:
+ free_netdev(priv->ndev);
+err0:
+ return err;
+{
+ int ii = 0;
+ int err;
++ struct ls1012a_pfe_platform_data *pfe_info;
+
+ pr_info("%s\n", __func__);
+
+ cbus_gpi_base[0] = EGPI1_BASE_ADDR;
+ cbus_gpi_base[1] = EGPI2_BASE_ADDR;
+
++ pfe_info = (struct ls1012a_pfe_platform_data *)
++ pfe->dev->platform_data;
++ if (!pfe_info) {
++ pr_err("%s: pfe missing additional platform data\n", __func__);
++ err = -ENODEV;
++ goto err_pdata;
++ }
++
++ for (ii = 0; ii < NUM_GEMAC_SUPPORT; ii++) {
++ err = pfe_eth_mdio_init(pfe, pfe_info, ii);
++ if (err) {
++ pr_err("%s: pfe_eth_mdio_init() failed\n", __func__);
++ goto err_mdio_init;
++ }
++ }
++
+ if (fsl_guts_get_svr() == LS1012A_REV_1_0)
+ pfe_errata_a010897 = true;
+ else
+ pfe_errata_a010897 = false;
+
+ for (ii = 0; ii < NUM_GEMAC_SUPPORT; ii++) {
-+ err = pfe_eth_init_one(pfe, ii);
++ err = pfe_eth_init_one(pfe, pfe_info, ii);
+ if (err)
-+ goto err0;
++ goto err_eth_init;
+ }
+
+ return 0;
+
-+err0:
-+ while (ii--)
++err_eth_init:
++ while (ii--) {
+ pfe_eth_exit_one(pfe->eth.eth_priv[ii]);
++ pfe_eth_mdio_exit(pfe, ii);
++ }
+
-+ /* Register three network devices in the kernel */
++err_mdio_init:
++err_pdata:
+ return err;
+}
+
+ if (!us)
+ pfe_eth_sysfs_exit(priv->ndev);
+
-+ if (!(priv->einfo->phy_flags & GEMAC_NO_PHY))
-+ pfe_phy_exit(priv->ndev);
++ if ((!(pfe_use_old_dts_phy) && !(priv->phy_node)) ||
++ ((pfe_use_old_dts_phy) &&
++ (priv->einfo->phy_flags & GEMAC_NO_PHY))) {
++ pr_info("%s: No PHY or fixed-link\n", __func__);
++ goto skip_phy_exit;
++ }
++
++ pfe_phy_exit(priv->ndev);
+
++skip_phy_exit:
+ if (!us)
+ unregister_netdev(priv->ndev);
+
-+ if (priv->mii_bus)
-+ pfe_eth_mdio_exit(priv->mii_bus);
-+
+ free_netdev(priv->ndev);
+}
+
+
+ for (ii = NUM_GEMAC_SUPPORT - 1; ii >= 0; ii--)
+ pfe_eth_exit_one(pfe->eth.eth_priv[ii]);
++
++ for (ii = NUM_GEMAC_SUPPORT - 1; ii >= 0; ii--)
++ pfe_eth_mdio_exit(pfe, ii);
+}
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_eth.h
-@@ -0,0 +1,185 @@
+@@ -0,0 +1,175 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
+/*
-+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ * Copyright 2015-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
+ */
+
+#ifndef _PFE_ETH_H_
+#define GEMAC_NO_PHY BIT(0)
+
+struct ls1012a_eth_platform_data {
-+ /* device specific information */
-+ u32 device_flags;
-+ char name[16];
-+
+ /* board specific information */
+ u32 mii_config;
+ u32 phy_flags;
+ u32 gem_id;
-+ u32 bus_id;
+ u32 phy_id;
+ u32 mdio_muxval;
+ u8 mac_addr[ETH_ALEN];
++ struct device_node *phy_node;
+};
+
+struct ls1012a_mdio_platform_data {
-+ int enabled;
++ int id;
+ int irq[32];
+ u32 phy_mask;
+ int mdc_div;
+ unsigned int event_status;
+ int irq;
+ void *EMAC_baseaddr;
-+ /* This points to the EMAC base from where we access PHY */
-+ void *PHY_baseaddr;
+ void *GPI_baseaddr;
+ /* PHY stuff */
+ struct phy_device *phydev;
+ int oldspeed;
+ int oldduplex;
+ int oldlink;
-+ /* mdio info */
-+ int mdc_div;
-+ struct mii_bus *mii_bus;
++ struct device_node *phy_node;
+ struct clk *gemtx_clk;
+ int wol;
+ int pause_flag;
+ struct pfe_eth_priv_s *eth_priv[3];
+};
+
++struct pfe_mdio_priv_s {
++ void __iomem *mdio_base;
++ int mdc_div;
++ struct mii_bus *mii_bus;
++};
++
++struct pfe_mdio {
++ struct pfe_mdio_priv_s *mdio_priv[3];
++};
++
+int pfe_eth_init(struct pfe *pfe);
+void pfe_eth_exit(struct pfe *pfe);
+int pfe_eth_suspend(struct net_device *dev);
+#endif /* _PFE_ETH_H_ */
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_firmware.c
-@@ -0,0 +1,314 @@
+@@ -0,0 +1,302 @@
++// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+}
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_firmware.h
-@@ -0,0 +1,32 @@
+@@ -0,0 +1,20 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _PFE_FIRMWARE_H_
+#endif /* _PFE_FIRMWARE_H_ */
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_hal.c
-@@ -0,0 +1,1528 @@
+@@ -0,0 +1,1516 @@
++// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pfe_mod.h"
+}
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_hif.c
-@@ -0,0 +1,1072 @@
+@@ -0,0 +1,1060 @@
++// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+}
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_hif.h
-@@ -0,0 +1,212 @@
+@@ -0,0 +1,200 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _PFE_HIF_H_
+#endif /* _PFE_HIF_H_ */
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_hif_lib.c
-@@ -0,0 +1,640 @@
+@@ -0,0 +1,628 @@
++// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/version.h>
+}
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_hif_lib.h
-@@ -0,0 +1,241 @@
+@@ -0,0 +1,229 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _PFE_HIF_LIB_H_
+#endif /* _PFE_HIF_LIB_H_ */
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_hw.c
-@@ -0,0 +1,176 @@
+@@ -0,0 +1,164 @@
++// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pfe_mod.h"
+}
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_hw.h
-@@ -0,0 +1,27 @@
+@@ -0,0 +1,15 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _PFE_HW_H_
+#endif /* _PFE_HW_H_ */
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c
-@@ -0,0 +1,385 @@
+@@ -0,0 +1,368 @@
++// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
++#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
++#include <linux/of_mdio.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+
+#include "pfe_mod.h"
+
++extern bool pfe_use_old_dts_phy;
+struct ls1012a_pfe_platform_data pfe_platform_data;
+
-+static int pfe_get_gemac_if_proprties(struct device_node *parent, int port, int
-+ if_cnt,
-+ struct ls1012a_pfe_platform_data
-+ *pdata)
++static int pfe_get_gemac_if_properties(struct device_node *gem,
++ int port,
++ struct ls1012a_pfe_platform_data *pdata)
+{
-+ struct device_node *gem = NULL, *phy = NULL;
++ struct device_node *phy_node = NULL;
+ int size;
-+ int ii = 0, phy_id = 0;
++ int phy_id = 0;
+ const u32 *addr;
+ const void *mac_addr;
+
-+ for (ii = 0; ii < if_cnt; ii++) {
-+ gem = of_get_next_child(parent, gem);
-+ if (!gem)
-+ goto err;
-+ addr = of_get_property(gem, "reg", &size);
-+ if (addr && (be32_to_cpup(addr) == port))
-+ break;
-+ }
-+
-+ if (ii >= if_cnt) {
-+ pr_err("%s:%d Failed to find interface = %d\n",
-+ __func__, __LINE__, if_cnt);
-+ goto err;
-+ }
++ addr = of_get_property(gem, "reg", &size);
++ port = be32_to_cpup(addr);
+
+ pdata->ls1012a_eth_pdata[port].gem_id = port;
+
+ mac_addr = of_get_mac_address(gem);
-+
+ if (mac_addr) {
+ memcpy(pdata->ls1012a_eth_pdata[port].mac_addr, mac_addr,
+ ETH_ALEN);
+ }
+
-+ pdata->ls1012a_eth_pdata[port].mii_config = of_get_phy_mode(gem);
++ phy_node = of_parse_phandle(gem, "phy-handle", 0);
++ pdata->ls1012a_eth_pdata[port].phy_node = phy_node;
++ if (phy_node) {
++ pfe_use_old_dts_phy = false;
++ goto process_phynode;
++ } else if (of_phy_is_fixed_link(gem)) {
++ pfe_use_old_dts_phy = false;
++ if (of_phy_register_fixed_link(gem) < 0) {
++ pr_err("broken fixed-link specification\n");
++ goto err;
++ }
++ phy_node = of_node_get(gem);
++ pdata->ls1012a_eth_pdata[port].phy_node = phy_node;
++ } else if (of_get_property(gem, "fsl,pfe-phy-if-flags", &size)) {
++ pfe_use_old_dts_phy = true;
++ /* Use old dts properties for phy handling */
++ addr = of_get_property(gem, "fsl,pfe-phy-if-flags", &size);
++ pdata->ls1012a_eth_pdata[port].phy_flags = be32_to_cpup(addr);
+
-+ if ((pdata->ls1012a_eth_pdata[port].mii_config) < 0)
-+ pr_err("%s:%d Incorrect Phy mode....\n", __func__,
-+ __LINE__);
++ addr = of_get_property(gem, "fsl,gemac-phy-id", &size);
++ if (!addr) {
++ pr_err("%s:%d Invalid gemac-phy-id....\n", __func__,
++ __LINE__);
++ } else {
++ phy_id = be32_to_cpup(addr);
++ pdata->ls1012a_eth_pdata[port].phy_id = phy_id;
++ pdata->ls1012a_mdio_pdata[0].phy_mask &= ~(1 << phy_id);
++ }
+
-+ addr = of_get_property(gem, "fsl,gemac-bus-id", &size);
-+ if (!addr)
-+ pr_err("%s:%d Invalid gemac-bus-id....\n", __func__,
-+ __LINE__);
-+ else
-+ pdata->ls1012a_eth_pdata[port].bus_id = be32_to_cpup(addr);
++ /* If PHY is enabled, read mdio properties */
++ if (pdata->ls1012a_eth_pdata[port].phy_flags & GEMAC_NO_PHY)
++ goto done;
+
-+ addr = of_get_property(gem, "fsl,gemac-phy-id", &size);
-+ if (!addr) {
-+ pr_err("%s:%d Invalid gemac-phy-id....\n", __func__,
-+ __LINE__);
+ } else {
-+ phy_id = be32_to_cpup(addr);
-+ pdata->ls1012a_eth_pdata[port].phy_id = phy_id;
-+ pdata->ls1012a_mdio_pdata[0].phy_mask &= ~(1 << phy_id);
++ pr_info("%s: No PHY or fixed-link\n", __func__);
++ return 0;
+ }
+
++process_phynode:
++ pdata->ls1012a_eth_pdata[port].mii_config = of_get_phy_mode(gem);
++ if ((pdata->ls1012a_eth_pdata[port].mii_config) < 0)
++ pr_err("%s:%d Incorrect Phy mode....\n", __func__,
++ __LINE__);
++
+ addr = of_get_property(gem, "fsl,mdio-mux-val", &size);
-+ if (!addr)
++ if (!addr) {
+ pr_err("%s: Invalid mdio-mux-val....\n", __func__);
-+ else
++ } else {
+ phy_id = be32_to_cpup(addr);
+ pdata->ls1012a_eth_pdata[port].mdio_muxval = phy_id;
++ }
+
+ if (pdata->ls1012a_eth_pdata[port].phy_id < 32)
+ pfe->mdio_muxval[pdata->ls1012a_eth_pdata[port].phy_id] =
+ pdata->ls1012a_eth_pdata[port].mdio_muxval;
+
-+ addr = of_get_property(gem, "fsl,pfe-phy-if-flags", &size);
-+ if (!addr)
-+ pr_err("%s:%d Invalid pfe-phy-if-flags....\n",
-+ __func__, __LINE__);
-+ else
-+ pdata->ls1012a_eth_pdata[port].phy_flags = be32_to_cpup(addr);
-+
-+ /* If PHY is enabled, read mdio properties */
-+ if (pdata->ls1012a_eth_pdata[port].phy_flags & GEMAC_NO_PHY)
-+ goto done;
-+
-+ phy = of_get_next_child(gem, NULL);
-+
-+ addr = of_get_property(phy, "reg", &size);
-+
-+ if (!addr)
-+ pr_err("%s:%d Invalid phy enable flag....\n",
-+ __func__, __LINE__);
-+ else
-+ pdata->ls1012a_mdio_pdata[port].enabled = be32_to_cpup(addr);
+
+ pdata->ls1012a_mdio_pdata[port].irq[0] = PHY_POLL;
+
+done:
-+
+ return 0;
+
+err:
+ struct resource res;
+ int ii, rc, interface_count = 0, size = 0;
+ const u32 *prop;
-+ struct device_node *np;
++ struct device_node *np, *gem = NULL;
+ struct clk *pfe_clk;
+
+ np = pdev->dev.of_node;
+ pfe_platform_data.ls1012a_mdio_pdata[0].phy_mask = 0xffffffff;
+
+ for (ii = 0; ii < interface_count; ii++) {
-+ pfe_get_gemac_if_proprties(np, ii, interface_count,
-+ &pfe_platform_data);
++ gem = of_get_next_child(np, gem);
++ if (gem)
++ pfe_get_gemac_if_properties(gem, ii,
++ &pfe_platform_data);
++ else
++ pr_err("Unable to find interface %d\n", ii);
++
+ }
+
+ pfe->dev = &pdev->dev;
+ for (i = 0; i < (NUM_GEMAC_SUPPORT); i++) {
+ netdev = pfe->eth.eth_priv[i]->ndev;
+
-+ if (pfe->eth.eth_priv[i]->mii_bus)
-+ pfe_eth_mdio_reset(pfe->eth.eth_priv[i]->mii_bus);
++ if (pfe->mdio.mdio_priv[i]->mii_bus)
++ pfe_eth_mdio_reset(pfe->mdio.mdio_priv[i]->mii_bus);
+
+ if (netif_running(netdev))
+ pfe_eth_resume(netdev);
+MODULE_AUTHOR("NXP DNCPE");
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_mod.c
-@@ -0,0 +1,156 @@
+@@ -0,0 +1,158 @@
++// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/dma-mapping.h>
+#include "pfe_mod.h"
++#include "pfe_cdev.h"
+
+unsigned int us;
+module_param(us, uint, 0444);
+ if (rc < 0)
+ goto err_debugfs;
+
++ if (us) {
++ /* Creating a character device */
++ rc = pfe_cdev_init();
++ if (rc < 0)
++ goto err_cdev;
++ }
++
+ return 0;
+
++err_cdev:
++ pfe_debugfs_exit(pfe);
++
+err_debugfs:
+ pfe_sysfs_exit(pfe);
+
+{
+ pr_info("%s\n", __func__);
+
++ if (us)
++ pfe_cdev_exit();
++
+ pfe_debugfs_exit(pfe);
+
+ pfe_sysfs_exit(pfe);
+}
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_mod.h
-@@ -0,0 +1,114 @@
+@@ -0,0 +1,103 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _PFE_MOD_H_
+ struct pfe_ctrl ctrl;
+ struct pfe_hif hif;
+ struct pfe_eth eth;
++ struct pfe_mdio mdio;
+ struct hif_client_s *hif_client[HIF_CLIENTS_MAX];
+#if defined(CFG_DIAGS)
+ struct pfe_diags diags;
+#endif /* _PFE_MOD_H */
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_perfmon.h
-@@ -0,0 +1,38 @@
+@@ -0,0 +1,26 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _PFE_PERFMON_H_
+#endif /* _PFE_PERFMON_H_ */
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_sysfs.c
-@@ -0,0 +1,818 @@
+@@ -0,0 +1,806 @@
++// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+}
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_sysfs.h
-@@ -0,0 +1,29 @@
+@@ -0,0 +1,17 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _PFE_SYSFS_H_
-From 48dbe4b3a31795b8efdfff82f69eccd086052eed Mon Sep 17 00:00:00 2001
+From 371e99a257cb714f9a6027d6571cb1a43855d926 Mon Sep 17 00:00:00 2001
From: Biwen Li <biwen.li@nxp.com>
-Date: Fri, 16 Nov 2018 10:27:30 +0800
-Subject: [PATCH 16/39] dpaa-bqman: support layerscape
+Date: Wed, 17 Apr 2019 18:58:24 +0800
+Subject: [PATCH] dpaa-bqman: support layerscape
+
This is an integrated patch of dpaa-bqman for layerscape
+Signed-off-by: Biwen Li <biwen.li@nxp.com>
Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
+Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
Signed-off-by: Stuart Yoder <stuart.yoder@nxp.com>
Signed-off-by: Valentin Rothberg <valentinrothberg@gmail.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
---
drivers/soc/fsl/qbman/Kconfig | 2 +-
- drivers/soc/fsl/qbman/bman.c | 24 ++++-
- drivers/soc/fsl/qbman/bman_ccsr.c | 35 ++++++-
- drivers/soc/fsl/qbman/bman_portal.c | 12 ++-
+ drivers/soc/fsl/qbman/bman.c | 24 +++-
+ drivers/soc/fsl/qbman/bman_ccsr.c | 57 +++++++++-
+ drivers/soc/fsl/qbman/bman_portal.c | 44 ++++++--
drivers/soc/fsl/qbman/bman_priv.h | 3 +
drivers/soc/fsl/qbman/dpaa_sys.h | 8 +-
- drivers/soc/fsl/qbman/qman.c | 46 ++++++++-
- drivers/soc/fsl/qbman/qman_ccsr.c | 140 ++++++++++++++++++++++------
- drivers/soc/fsl/qbman/qman_portal.c | 12 ++-
+ drivers/soc/fsl/qbman/qman.c | 46 +++++++-
+ drivers/soc/fsl/qbman/qman_ccsr.c | 168 +++++++++++++++++++++++-----
+ drivers/soc/fsl/qbman/qman_portal.c | 60 ++++++++--
drivers/soc/fsl/qbman/qman_priv.h | 5 +-
drivers/soc/fsl/qbman/qman_test.h | 2 -
- 11 files changed, 236 insertions(+), 53 deletions(-)
+ include/soc/fsl/bman.h | 16 +++
+ include/soc/fsl/qman.h | 17 +++
+ 13 files changed, 390 insertions(+), 62 deletions(-)
--- a/drivers/soc/fsl/qbman/Kconfig
+++ b/drivers/soc/fsl/qbman/Kconfig
return 0;
--- a/drivers/soc/fsl/qbman/bman_ccsr.c
+++ b/drivers/soc/fsl/qbman/bman_ccsr.c
-@@ -170,10 +170,11 @@ static int fsl_bman_probe(struct platfor
+@@ -29,6 +29,7 @@
+ */
+
+ #include "bman_priv.h"
++#include <linux/iommu.h>
+
+ u16 bman_ip_rev;
+ EXPORT_SYMBOL(bman_ip_rev);
+@@ -120,6 +121,7 @@ static void bm_set_memory(u64 ba, u32 si
+ */
+ static dma_addr_t fbpr_a;
+ static size_t fbpr_sz;
++static int __bman_probed;
+
+ static int bman_fbpr(struct reserved_mem *rmem)
+ {
+@@ -166,14 +168,24 @@ static irqreturn_t bman_isr(int irq, voi
+ return IRQ_HANDLED;
+ }
+
++int bman_is_probed(void)
++{
++ return __bman_probed;
++}
++EXPORT_SYMBOL_GPL(bman_is_probed);
++
+ static int fsl_bman_probe(struct platform_device *pdev)
{
int ret, err_irq;
struct device *dev = &pdev->dev;
- struct device_node *node = dev->of_node;
+ struct device_node *mem_node, *node = dev->of_node;
++ struct iommu_domain *domain;
struct resource *res;
u16 id, bm_pool_cnt;
u8 major, minor;
+ u64 size;
++
++ __bman_probed = -1;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
-@@ -201,6 +202,38 @@ static int fsl_bman_probe(struct platfor
+@@ -201,6 +213,47 @@ static int fsl_bman_probe(struct platfor
return -ENODEV;
}
+ }
+
+ dev_dbg(dev, "Allocated FBPR 0x%llx 0x%zx\n", fbpr_a, fbpr_sz);
++
++ /* Create an 1-to-1 iommu mapping for FBPR area */
++ domain = iommu_get_domain_for_dev(dev);
++ if (domain) {
++ ret = iommu_map(domain, fbpr_a, fbpr_a, PAGE_ALIGN(fbpr_sz),
++ IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
++ if (ret)
++ dev_warn(dev, "failed to iommu_map() %d\n", ret);
++ }
+
bm_set_memory(fbpr_a, fbpr_sz);
err_irq = platform_get_irq(pdev, 0);
+@@ -240,6 +293,8 @@ static int fsl_bman_probe(struct platfor
+ return ret;
+ }
+
++ __bman_probed = 1;
++
+ return 0;
+ };
+
--- a/drivers/soc/fsl/qbman/bman_portal.c
+++ b/drivers/soc/fsl/qbman/bman_portal.c
-@@ -123,7 +123,14 @@ static int bman_portal_probe(struct plat
+@@ -32,6 +32,7 @@
+
+ static struct bman_portal *affine_bportals[NR_CPUS];
+ static struct cpumask portal_cpus;
++static int __bman_portals_probed;
+ /* protect bman global registers and global data shared among portals */
+ static DEFINE_SPINLOCK(bman_lock);
+
+@@ -85,6 +86,12 @@ static int bman_online_cpu(unsigned int
+ return 0;
+ }
+
++int bman_portals_probed(void)
++{
++ return __bman_portals_probed;
++}
++EXPORT_SYMBOL_GPL(bman_portals_probed);
++
+ static int bman_portal_probe(struct platform_device *pdev)
+ {
+ struct device *dev = &pdev->dev;
+@@ -92,11 +99,21 @@ static int bman_portal_probe(struct plat
+ struct bm_portal_config *pcfg;
+ struct resource *addr_phys[2];
+ void __iomem *va;
+- int irq, cpu;
++ int irq, cpu, err;
++
++ err = bman_is_probed();
++ if (!err)
++ return -EPROBE_DEFER;
++ if (err < 0) {
++ dev_err(&pdev->dev, "failing probe due to bman probe error\n");
++ return -ENODEV;
++ }
+
+ pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
+- if (!pcfg)
++ if (!pcfg) {
++ __bman_portals_probed = -1;
+ return -ENOMEM;
++ }
+
+ pcfg->dev = dev;
+
+@@ -104,14 +121,14 @@ static int bman_portal_probe(struct plat
+ DPAA_PORTAL_CE);
+ if (!addr_phys[0]) {
+ dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
+- return -ENXIO;
++ goto err_ioremap1;
+ }
+
+ addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
+ DPAA_PORTAL_CI);
+ if (!addr_phys[1]) {
+ dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
+- return -ENXIO;
++ goto err_ioremap1;
+ }
+
+ pcfg->cpu = -1;
+@@ -119,11 +136,18 @@ static int bman_portal_probe(struct plat
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev, "Can't get %pOF IRQ'\n", node);
+- return -ENXIO;
++ goto err_ioremap1;
}
pcfg->irq = irq;
if (!va) {
dev_err(dev, "ioremap::CE failed\n");
goto err_ioremap1;
-@@ -131,8 +138,7 @@ static int bman_portal_probe(struct plat
+@@ -131,8 +155,7 @@ static int bman_portal_probe(struct plat
pcfg->addr_virt[DPAA_PORTAL_CE] = va;
if (!va) {
dev_err(dev, "ioremap::CI failed\n");
goto err_ioremap2;
+@@ -149,6 +172,9 @@ static int bman_portal_probe(struct plat
+ }
+
+ cpumask_set_cpu(cpu, &portal_cpus);
++ if (!__bman_portals_probed &&
++ cpumask_weight(&portal_cpus) == num_online_cpus())
++ __bman_portals_probed = 1;
+ spin_unlock(&bman_lock);
+ pcfg->cpu = cpu;
+
+@@ -168,6 +194,8 @@ err_portal_init:
+ err_ioremap2:
+ iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
+ err_ioremap1:
++ __bman_portals_probed = -1;
++
+ return -ENXIO;
+ }
+
--- a/drivers/soc/fsl/qbman/bman_priv.h
+++ b/drivers/soc/fsl/qbman/bman_priv.h
@@ -33,6 +33,9 @@
/*
--- a/drivers/soc/fsl/qbman/qman_ccsr.c
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
-@@ -401,21 +401,42 @@ static int qm_init_pfdr(struct device *d
+@@ -29,6 +29,7 @@
+ */
+
+ #include "qman_priv.h"
++#include <linux/iommu.h>
+
+ u16 qman_ip_rev;
+ EXPORT_SYMBOL(qman_ip_rev);
+@@ -273,6 +274,7 @@ static const struct qman_error_info_mdat
+ static u32 __iomem *qm_ccsr_start;
+ /* A SDQCR mask comprising all the available/visible pool channels */
+ static u32 qm_pools_sdqcr;
++static int __qman_probed;
+
+ static inline u32 qm_ccsr_in(u32 offset)
+ {
+@@ -401,21 +403,42 @@ static int qm_init_pfdr(struct device *d
}
/*
return 0;
}
RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
-@@ -431,32 +452,13 @@ static int qman_pfdr(struct reserved_mem
+@@ -431,32 +454,13 @@ static int qman_pfdr(struct reserved_mem
}
RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr);
static void log_edata_bits(struct device *dev, u32 bit_count)
{
u32 i, j, mask = 0xffffffff;
-@@ -687,11 +689,12 @@ static int qman_resource_init(struct dev
+@@ -595,6 +599,7 @@ static int qman_init_ccsr(struct device
+ #define LIO_CFG_LIODN_MASK 0x0fff0000
+ void qman_liodn_fixup(u16 channel)
+ {
++#ifdef CONFIG_PPC
+ static int done;
+ static u32 liodn_offset;
+ u32 before, after;
+@@ -614,6 +619,7 @@ void qman_liodn_fixup(u16 channel)
+ qm_ccsr_out(REG_REV3_QCSP_LIO_CFG(idx), after);
+ else
+ qm_ccsr_out(REG_QCSP_LIO_CFG(idx), after);
++#endif
+ }
+
+ #define IO_CFG_SDEST_MASK 0x00ff0000
+@@ -684,14 +690,24 @@ static int qman_resource_init(struct dev
+ return 0;
+ }
+
++int qman_is_probed(void)
++{
++ return __qman_probed;
++}
++EXPORT_SYMBOL_GPL(qman_is_probed);
++
static int fsl_qman_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *node = dev->of_node;
+ struct device_node *mem_node, *node = dev->of_node;
++ struct iommu_domain *domain;
struct resource *res;
int ret, err_irq;
u16 id;
u8 major, minor;
+ u64 size;
++
++ __qman_probed = -1;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
-@@ -717,6 +720,8 @@ static int fsl_qman_probe(struct platfor
+@@ -717,6 +733,8 @@ static int fsl_qman_probe(struct platfor
qman_ip_rev = QMAN_REV30;
else if (major == 3 && minor == 1)
qman_ip_rev = QMAN_REV31;
else {
dev_err(dev, "Unknown QMan version\n");
return -ENODEV;
-@@ -727,10 +732,83 @@ static int fsl_qman_probe(struct platfor
+@@ -727,10 +745,96 @@ static int fsl_qman_probe(struct platfor
qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
}
+ }
+ }
+ dev_info(dev, "Allocated PFDR 0x%llx 0x%zx\n", pfdr_a, pfdr_sz);
++
++ /* Create an 1-to-1 iommu mapping for fqd and pfdr areas */
++ domain = iommu_get_domain_for_dev(dev);
++ if (domain) {
++ ret = iommu_map(domain, fqd_a, fqd_a, PAGE_ALIGN(fqd_sz),
++ IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
++ if (ret)
++ dev_warn(dev, "iommu_map(fqd) failed %d\n", ret);
++ ret = iommu_map(domain, pfdr_a, pfdr_a, PAGE_ALIGN(pfdr_sz),
++ IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
++ if (ret)
++ dev_warn(dev, "iommu_map(pfdr) failed %d\n", ret);
++ }
ret = qman_init_ccsr(dev);
if (ret) {
+@@ -793,6 +897,8 @@ static int fsl_qman_probe(struct platfor
+ if (ret)
+ return ret;
+
++ __qman_probed = 1;
++
+ return 0;
+ }
+
--- a/drivers/soc/fsl/qbman/qman_portal.c
+++ b/drivers/soc/fsl/qbman/qman_portal.c
-@@ -262,7 +262,14 @@ static int qman_portal_probe(struct plat
+@@ -29,6 +29,7 @@
+ */
+
+ #include "qman_priv.h"
++#include <linux/iommu.h>
+
+ struct qman_portal *qman_dma_portal;
+ EXPORT_SYMBOL(qman_dma_portal);
+@@ -38,6 +39,7 @@ EXPORT_SYMBOL(qman_dma_portal);
+ #define CONFIG_FSL_DPA_PIRQ_FAST 1
+
+ static struct cpumask portal_cpus;
++static int __qman_portals_probed;
+ /* protect qman global registers and global data shared among portals */
+ static DEFINE_SPINLOCK(qman_lock);
+
+@@ -218,19 +220,36 @@ static int qman_online_cpu(unsigned int
+ return 0;
+ }
+
++int qman_portals_probed(void)
++{
++ return __qman_portals_probed;
++}
++EXPORT_SYMBOL_GPL(qman_portals_probed);
++
+ static int qman_portal_probe(struct platform_device *pdev)
+ {
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
++ struct iommu_domain *domain;
+ struct qm_portal_config *pcfg;
+ struct resource *addr_phys[2];
+ void __iomem *va;
+ int irq, cpu, err;
+ u32 val;
+
++ err = qman_is_probed();
++ if (!err)
++ return -EPROBE_DEFER;
++ if (err < 0) {
++ dev_err(&pdev->dev, "failing probe due to qman probe error\n");
++ return -ENODEV;
++ }
++
+ pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
+- if (!pcfg)
++ if (!pcfg) {
++ __qman_portals_probed = -1;
+ return -ENOMEM;
++ }
+
+ pcfg->dev = dev;
+
+@@ -238,19 +257,20 @@ static int qman_portal_probe(struct plat
+ DPAA_PORTAL_CE);
+ if (!addr_phys[0]) {
+ dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
+- return -ENXIO;
++ goto err_ioremap1;
+ }
+
+ addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
+ DPAA_PORTAL_CI);
+ if (!addr_phys[1]) {
+ dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
+- return -ENXIO;
++ goto err_ioremap1;
+ }
+
+ err = of_property_read_u32(node, "cell-index", &val);
+ if (err) {
+ dev_err(dev, "Can't get %pOF property 'cell-index'\n", node);
++ __qman_portals_probed = -1;
+ return err;
+ }
+ pcfg->channel = val;
+@@ -258,11 +278,18 @@ static int qman_portal_probe(struct plat
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev, "Can't get %pOF IRQ\n", node);
+- return -ENXIO;
++ goto err_ioremap1;
}
pcfg->irq = irq;
if (!va) {
dev_err(dev, "ioremap::CE failed\n");
goto err_ioremap1;
-@@ -270,8 +277,7 @@ static int qman_portal_probe(struct plat
+@@ -270,8 +297,7 @@ static int qman_portal_probe(struct plat
pcfg->addr_virt[DPAA_PORTAL_CE] = va;
if (!va) {
dev_err(dev, "ioremap::CI failed\n");
goto err_ioremap2;
+@@ -279,6 +305,21 @@ static int qman_portal_probe(struct plat
+
+ pcfg->addr_virt[DPAA_PORTAL_CI] = va;
+
++ /* Create an 1-to-1 iommu mapping for cena portal area */
++ domain = iommu_get_domain_for_dev(dev);
++ if (domain) {
++ /*
++ * Note: not mapping this as cacheable triggers the infamous
++ * QMan CIDE error.
++ */
++ err = iommu_map(domain,
++ addr_phys[0]->start, addr_phys[0]->start,
++ PAGE_ALIGN(resource_size(addr_phys[0])),
++ IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
++ if (err)
++ dev_warn(dev, "failed to iommu_map() %d\n", err);
++ }
++
+ pcfg->pools = qm_get_pools_sdqcr();
+
+ spin_lock(&qman_lock);
+@@ -290,6 +331,9 @@ static int qman_portal_probe(struct plat
+ }
+
+ cpumask_set_cpu(cpu, &portal_cpus);
++ if (!__qman_portals_probed &&
++ cpumask_weight(&portal_cpus) == num_online_cpus())
++ __qman_portals_probed = 1;
+ spin_unlock(&qman_lock);
+ pcfg->cpu = cpu;
+
+@@ -314,6 +358,8 @@ err_portal_init:
+ err_ioremap2:
+ iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
+ err_ioremap1:
++ __qman_portals_probed = -1;
++
+ return -ENXIO;
+ }
+
--- a/drivers/soc/fsl/qbman/qman_priv.h
+++ b/drivers/soc/fsl/qbman/qman_priv.h
@@ -28,13 +28,13 @@
-
int qman_test_stash(void);
int qman_test_api(void);
+--- a/include/soc/fsl/bman.h
++++ b/include/soc/fsl/bman.h
+@@ -126,4 +126,20 @@ int bman_release(struct bman_pool *pool,
+ */
+ int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num);
+
++/**
++ * bman_is_probed - Check if bman is probed
++ *
++ * Returns 1 if the bman driver successfully probed, -1 if the bman driver
++ * failed to probe or 0 if the bman driver did not probed yet.
++ */
++int bman_is_probed(void);
++/**
++ * bman_portals_probed - Check if all cpu bound bman portals are probed
++ *
++ * Returns 1 if all the required cpu bound bman portals successfully probed,
++ * -1 if probe errors appeared or 0 if the bman portals did not yet finished
++ * probing.
++ */
++int bman_portals_probed(void);
++
+ #endif /* __FSL_BMAN_H */
+--- a/include/soc/fsl/qman.h
++++ b/include/soc/fsl/qman.h
+@@ -1186,4 +1186,21 @@ int qman_alloc_cgrid_range(u32 *result,
+ */
+ int qman_release_cgrid(u32 id);
+
++/**
++ * qman_is_probed - Check if qman is probed
++ *
++ * Returns 1 if the qman driver successfully probed, -1 if the qman driver
++ * failed to probe or 0 if the qman driver did not probed yet.
++ */
++int qman_is_probed(void);
++
++/**
++ * qman_portals_probed - Check if all cpu bound qman portals are probed
++ *
++ * Returns 1 if all the required cpu bound qman portals successfully probed,
++ * -1 if probe errors appeared or 0 if the qman portals did not yet finished
++ * probing.
++ */
++int qman_portals_probed(void);
++
+ #endif /* __FSL_QMAN_H */
--- /dev/null
+From f0f6e88696957d376d8875f675c1caf75a33fd67 Mon Sep 17 00:00:00 2001
+From: Biwen Li <biwen.li@nxp.com>
+Date: Wed, 17 Apr 2019 18:58:34 +0800
+Subject: [PATCH] etsec: support layerscape
+
+This is an integrated patch of etsec for layerscape
+
+Signed-off-by: Biwen Li <biwen.li@nxp.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+---
+ drivers/net/ethernet/freescale/gianfar.h | 3 ---
+ .../net/ethernet/freescale/gianfar_ethtool.c | 23 +++++++++++++++----
+ 2 files changed, 18 insertions(+), 8 deletions(-)
+
+--- a/drivers/net/ethernet/freescale/gianfar.h
++++ b/drivers/net/ethernet/freescale/gianfar.h
+@@ -1372,7 +1372,4 @@ struct filer_table {
+ struct gfar_filer_entry fe[MAX_FILER_CACHE_IDX + 20];
+ };
+
+-/* The gianfar_ptp module will set this variable */
+-extern int gfar_phc_index;
+-
+ #endif /* __GIANFAR_H */
+--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
++++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
+@@ -41,6 +41,8 @@
+ #include <linux/phy.h>
+ #include <linux/sort.h>
+ #include <linux/if_vlan.h>
++#include <linux/of_platform.h>
++#include <linux/fsl/ptp_qoriq.h>
+
+ #include "gianfar.h"
+
+@@ -1509,24 +1511,35 @@ static int gfar_get_nfc(struct net_devic
+ return ret;
+ }
+
+-int gfar_phc_index = -1;
+-EXPORT_SYMBOL(gfar_phc_index);
+-
+ static int gfar_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+ {
+ struct gfar_private *priv = netdev_priv(dev);
++ struct platform_device *ptp_dev;
++ struct device_node *ptp_node;
++ struct qoriq_ptp *ptp = NULL;
++
++ info->phc_index = -1;
+
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
+ info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+- info->phc_index = -1;
+ return 0;
+ }
++
++ ptp_node = of_find_compatible_node(NULL, NULL, "fsl,etsec-ptp");
++ if (ptp_node) {
++ ptp_dev = of_find_device_by_node(ptp_node);
++ if (ptp_dev)
++ ptp = platform_get_drvdata(ptp_dev);
++ }
++
++ if (ptp)
++ info->phc_index = ptp->phc_index;
++
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+- info->phc_index = gfar_phc_index;
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+++ /dev/null
-From b3544990f8496edda965e1ff9a14727360660676 Mon Sep 17 00:00:00 2001
-From: Mathew McBride <matt@traverse.com.au>
-Date: Mon, 7 Aug 2017 10:19:48 +1000
-Subject: [PATCH] Recognize when an RGMII Link is set as fixed (in the device
- tree) and set up the MAC accordingly
-
----
- drivers/net/ethernet/freescale/sdk_dpaa/mac.c | 1 +
- .../freescale/sdk_fman/Peripherals/FM/MAC/memac.c | 13 +++++++++++++
- 2 files changed, 14 insertions(+)
-
---- a/drivers/net/ethernet/freescale/sdk_dpaa/mac.c
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac.c
-@@ -386,6 +386,7 @@ static int __cold mac_probe(struct platf
- mac_dev->fixed_link->duplex = phy->duplex;
- mac_dev->fixed_link->pause = phy->pause;
- mac_dev->fixed_link->asym_pause = phy->asym_pause;
-+ printk(KERN_INFO "Setting up fixed link, speed %d duplex %d\n", mac_dev->fixed_link->speed, mac_dev->fixed_link->duplex);
- }
-
- _errno = mac_dev->init(mac_dev);
---- a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac.c
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac.c
-@@ -36,6 +36,8 @@
-
- @Description FM mEMAC driver
- *//***************************************************************************/
-+#include <../../../../sdk_dpaa/mac.h>
-+#include <linux/phy_fixed.h>
-
- #include "std_ext.h"
- #include "string_ext.h"
-@@ -48,6 +50,8 @@
- #include "memac.h"
-
-
-+static t_Error MemacAdjustLink(t_Handle h_Memac, e_EnetSpeed speed, bool fullDuplex);
-+
- /*****************************************************************************/
- /* Internal routines */
- /*****************************************************************************/
-@@ -276,11 +280,20 @@ static t_Error MemacEnable(t_Handle h_Me
- {
- t_Memac *p_Memac = (t_Memac *)h_Memac;
-
-+ struct mac_device *mac_dev = (struct mac_device *)p_Memac->h_App;
-+
- SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
- SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
-
- fman_memac_enable(p_Memac->p_MemMap, (mode & e_COMM_MODE_RX), (mode & e_COMM_MODE_TX));
-
-+ if (ENET_INTERFACE_FROM_MODE(p_Memac->enetMode) == e_ENET_IF_RGMII) {
-+ if (mac_dev->fixed_link) {
-+ printk(KERN_INFO "This is a fixed-link, forcing speed %d duplex %d\n",mac_dev->fixed_link->speed,mac_dev->fixed_link->duplex);
-+ MemacAdjustLink(h_Memac,mac_dev->fixed_link->speed,mac_dev->fixed_link->duplex);
-+ }
-+ }
-+
- return E_OK;
- }
-
-From 918f966af1f0e42ff8ac298e1d7d02e67afcfab4 Mon Sep 17 00:00:00 2001
+From 71fb63c92eae3f9197e2343ed5ed3676440789e1 Mon Sep 17 00:00:00 2001
From: Biwen Li <biwen.li@nxp.com>
-Date: Tue, 30 Oct 2018 18:27:42 +0800
-Subject: [PATCH 18/40] sata: support layerscape
+Date: Wed, 17 Apr 2019 18:59:01 +0800
+Subject: [PATCH] sata: support layerscape
+
This is an integrated patch of sata for layerscape
-Signed-off-by: Tang Yuantian <andy.tang@nxp.com>
Signed-off-by: Biwen Li <biwen.li@nxp.com>
+Signed-off-by: Peng Ma <peng.ma@nxp.com>
+Signed-off-by: Tang Yuantian <andy.tang@nxp.com>
---
- drivers/ata/ahci_qoriq.c | 12 ++++++++++++
- 1 file changed, 12 insertions(+)
+ drivers/ata/ahci.h | 7 ++
+ drivers/ata/ahci_qoriq.c | 168 ++++++++++++++++++++++++++++++++++++++
+ drivers/ata/libata-core.c | 3 +
+ 3 files changed, 178 insertions(+)
+--- a/drivers/ata/ahci.h
++++ b/drivers/ata/ahci.h
+@@ -445,4 +445,11 @@ static inline int ahci_nr_ports(u32 cap)
+ return (cap & 0x1f) + 1;
+ }
+
++#ifdef CONFIG_AHCI_QORIQ
++extern void fsl_sata_errata_379364(struct ata_link *link);
++#else
++static void fsl_sata_errata_379364(struct ata_link *link)
++{}
++#endif
++
+ #endif /* _AHCI_H */
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -35,6 +35,8 @@
#define AHCI_PORT_TRANS_CFG 0x08000029
#define AHCI_PORT_AXICC_CFG 0x3fffffff
-@@ -183,6 +185,8 @@ static int ahci_qoriq_phy_init(struct ah
+@@ -49,6 +51,27 @@
+ #define ECC_DIS_ARMV8_CH2 0x80000000
+ #define ECC_DIS_LS1088A 0x40000000
+
++/* errata for lx2160 */
++#define RCWSR29_BASE 0x1E00170
++#define SERDES2_BASE 0x1EB0000
++#define DEVICE_CONFIG_REG_BASE 0x1E00000
++#define SERDES2_LNAX_RX_CR(x) (0x840 + (0x100 * (x)))
++#define SERDES2_LNAX_RX_CBR(x) (0x8C0 + (0x100 * (x)))
++#define SYS_VER_REG 0xA4
++#define LN_RX_RST 0x80000010
++#define LN_RX_RST_DONE 0x3
++#define LN_RX_MASK 0xf
++#define LX2160A_VER1 0x1
++
++#define SERDES2_LNAA 0
++#define SERDES2_LNAB 1
++#define SERDES2_LNAC 2
++#define SERDES2_LNAD 3
++#define SERDES2_LNAE 4
++#define SERDES2_LNAF 5
++#define SERDES2_LNAG 6
++#define SERDES2_LNAH 7
++
+ enum ahci_qoriq_type {
+ AHCI_LS1021A,
+ AHCI_LS1043A,
+@@ -56,6 +79,7 @@ enum ahci_qoriq_type {
+ AHCI_LS1046A,
+ AHCI_LS1088A,
+ AHCI_LS2088A,
++ AHCI_LX2160A,
+ };
+
+ struct ahci_qoriq_priv {
+@@ -72,6 +96,7 @@ static const struct of_device_id ahci_qo
+ { .compatible = "fsl,ls1046a-ahci", .data = (void *)AHCI_LS1046A},
+ { .compatible = "fsl,ls1088a-ahci", .data = (void *)AHCI_LS1088A},
+ { .compatible = "fsl,ls2088a-ahci", .data = (void *)AHCI_LS2088A},
++ { .compatible = "fsl,lx2160a-ahci", .data = (void *)AHCI_LX2160A},
+ {},
+ };
+ MODULE_DEVICE_TABLE(of, ahci_qoriq_of_match);
+@@ -156,6 +181,138 @@ static struct scsi_host_template ahci_qo
+ AHCI_SHT(DRV_NAME),
+ };
+
++void fsl_sata_errata_379364(struct ata_link *link)
++{
++ struct ata_port *ap = link->ap;
++ struct ahci_host_priv *hpriv = ap->host->private_data;
++ struct ahci_qoriq_priv *qoriq_priv = hpriv->plat_data;
++ bool lx2160a_workaround = (qoriq_priv->type == AHCI_LX2160A);
++
++ int val = 0;
++ void __iomem *rcw_base = NULL;
++ void __iomem *serdes_base = NULL;
++ void __iomem *dev_con_base = NULL;
++
++ if (!lx2160a_workaround)
++ return;
++ else {
++ dev_con_base = ioremap(DEVICE_CONFIG_REG_BASE, PAGE_SIZE);
++ if (!dev_con_base) {
++ ata_link_err(link, "device config ioremap failed\n");
++ return;
++ }
++
++ val = (readl(dev_con_base + SYS_VER_REG) & GENMASK(7, 4)) >> 4;
++ if (val != LX2160A_VER1)
++ goto dev_unmap;
++
++ /*
++ * Add few msec delay.
++ * Check for corresponding serdes lane RST_DONE .
++ * apply lane reset.
++ */
++
++ serdes_base = ioremap(SERDES2_BASE, PAGE_SIZE);
++ if (!serdes_base) {
++ ata_link_err(link, "serdes ioremap failed\n");
++ goto dev_unmap;
++ }
++
++ rcw_base = ioremap(RCWSR29_BASE, PAGE_SIZE);
++ if (!rcw_base) {
++ ata_link_err(link, "rcw ioremap failed\n");
++ goto serdes_unmap;
++ }
++
++ ata_msleep(link->ap, 1);
++
++ val = (readl(rcw_base) & GENMASK(25, 21)) >> 21;
++
++ switch (val) {
++ case 1:
++ if ((readl(serdes_base + SERDES2_LNAX_RX_CBR(SERDES2_LNAC)) &
++ LN_RX_MASK) != LN_RX_RST_DONE)
++ writel(LN_RX_RST, serdes_base +
++ SERDES2_LNAX_RX_CR(SERDES2_LNAC));
++ if ((readl(serdes_base + SERDES2_LNAX_RX_CBR(SERDES2_LNAD)) &
++ LN_RX_MASK) != LN_RX_RST_DONE)
++ writel(LN_RX_RST, serdes_base +
++ SERDES2_LNAX_RX_CR(SERDES2_LNAD));
++ break;
++
++ case 4:
++ if ((readl(serdes_base + SERDES2_LNAX_RX_CBR(SERDES2_LNAG)) &
++ LN_RX_MASK) != LN_RX_RST_DONE)
++ writel(LN_RX_RST, serdes_base +
++ SERDES2_LNAX_RX_CR(SERDES2_LNAG));
++ if ((readl(serdes_base + SERDES2_LNAX_RX_CBR(SERDES2_LNAH)) &
++ LN_RX_MASK) != LN_RX_RST_DONE)
++ writel(LN_RX_RST, serdes_base +
++ SERDES2_LNAX_RX_CR(SERDES2_LNAH));
++ break;
++
++ case 5:
++ if ((readl(serdes_base + SERDES2_LNAX_RX_CBR(SERDES2_LNAE)) &
++ LN_RX_MASK) != LN_RX_RST_DONE)
++ writel(LN_RX_RST, serdes_base +
++ SERDES2_LNAX_RX_CR(SERDES2_LNAE));
++ if ((readl(serdes_base + SERDES2_LNAX_RX_CBR(SERDES2_LNAF)) &
++ LN_RX_MASK) != LN_RX_RST_DONE)
++ writel(LN_RX_RST, serdes_base +
++ SERDES2_LNAX_RX_CR(SERDES2_LNAF));
++ if ((readl(serdes_base + SERDES2_LNAX_RX_CBR(SERDES2_LNAG)) &
++ LN_RX_MASK) != LN_RX_RST_DONE)
++ writel(LN_RX_RST, serdes_base +
++ SERDES2_LNAX_RX_CR(SERDES2_LNAG));
++ if ((readl(serdes_base + SERDES2_LNAX_RX_CBR(SERDES2_LNAH)) &
++ LN_RX_MASK) != LN_RX_RST_DONE)
++ writel(LN_RX_RST, serdes_base +
++ SERDES2_LNAX_RX_CR(SERDES2_LNAH));
++ break;
++
++ case 8:
++ if ((readl(serdes_base + SERDES2_LNAX_RX_CBR(SERDES2_LNAC)) &
++ LN_RX_MASK) != LN_RX_RST_DONE)
++ writel(LN_RX_RST, serdes_base +
++ SERDES2_LNAX_RX_CR(SERDES2_LNAC));
++ if ((readl(serdes_base + SERDES2_LNAX_RX_CBR(SERDES2_LNAD)) &
++ LN_RX_MASK) != LN_RX_RST_DONE)
++ writel(LN_RX_RST, serdes_base +
++ SERDES2_LNAX_RX_CR(SERDES2_LNAD));
++ if ((readl(serdes_base + SERDES2_LNAX_RX_CBR(SERDES2_LNAE)) &
++ LN_RX_MASK) != LN_RX_RST_DONE)
++ writel(LN_RX_RST, serdes_base +
++ SERDES2_LNAX_RX_CR(SERDES2_LNAE));
++ if ((readl(serdes_base + SERDES2_LNAX_RX_CBR(SERDES2_LNAF)) &
++ LN_RX_MASK) != LN_RX_RST_DONE)
++ writel(LN_RX_RST, serdes_base +
++ SERDES2_LNAX_RX_CR(SERDES2_LNAF));
++ break;
++
++ case 12:
++ if ((readl(serdes_base + SERDES2_LNAX_RX_CBR(SERDES2_LNAG)) &
++ LN_RX_MASK) != LN_RX_RST_DONE)
++ writel(LN_RX_RST, serdes_base +
++ SERDES2_LNAX_RX_CR(SERDES2_LNAG));
++ if ((readl(serdes_base + SERDES2_LNAX_RX_CBR(SERDES2_LNAH)) &
++ LN_RX_MASK) != LN_RX_RST_DONE)
++ writel(LN_RX_RST, serdes_base +
++ SERDES2_LNAX_RX_CR(SERDES2_LNAH));
++ break;
++
++ default:
++ break;
++ }
++ }
++
++ iounmap(rcw_base);
++serdes_unmap:
++ iounmap(serdes_base);
++dev_unmap:
++ iounmap(dev_con_base);
++}
++
++
+ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
+ {
+ struct ahci_qoriq_priv *qpriv = hpriv->plat_data;
+@@ -183,13 +340,18 @@ static int ahci_qoriq_phy_init(struct ah
writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
qpriv->ecc_addr);
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
if (qpriv->is_dmacoherent)
writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
-@@ -190,6 +194,8 @@ static int ahci_qoriq_phy_init(struct ah
+ break;
case AHCI_LS2080A:
++ case AHCI_LX2160A:
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+ writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
+ writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
if (qpriv->is_dmacoherent)
writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
-@@ -201,6 +207,8 @@ static int ahci_qoriq_phy_init(struct ah
+@@ -201,6 +363,8 @@ static int ahci_qoriq_phy_init(struct ah
writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
qpriv->ecc_addr);
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
if (qpriv->is_dmacoherent)
writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
-@@ -212,6 +220,8 @@ static int ahci_qoriq_phy_init(struct ah
+@@ -212,6 +376,8 @@ static int ahci_qoriq_phy_init(struct ah
writel(readl(qpriv->ecc_addr) | ECC_DIS_LS1088A,
qpriv->ecc_addr);
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
if (qpriv->is_dmacoherent)
writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
-@@ -219,6 +229,8 @@ static int ahci_qoriq_phy_init(struct ah
+@@ -219,6 +385,8 @@ static int ahci_qoriq_phy_init(struct ah
case AHCI_LS2088A:
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
if (qpriv->is_dmacoherent)
writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -76,6 +76,7 @@
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/libata.h>
+
++#include "ahci.h"
+ #include "libata.h"
+ #include "libata-transport.h"
+
+@@ -4119,6 +4120,8 @@ int sata_link_hardreset(struct ata_link
+ */
+ ata_msleep(link->ap, 1);
+
++ fsl_sata_errata_379364(link);
++
+ /* bring link back */
+ rc = sata_link_resume(link, timing, deadline);
+ if (rc)
-From 731adfb43892a1d7fe00e2036200f33a9b61a589 Mon Sep 17 00:00:00 2001
+From 5cb4bc977d933323429050033da9c701b24df43e Mon Sep 17 00:00:00 2001
From: Biwen Li <biwen.li@nxp.com>
-Date: Tue, 30 Oct 2018 18:26:02 +0800
-Subject: [PATCH 19/40] dma: support layerscape
+Date: Wed, 17 Apr 2019 18:58:23 +0800
+Subject: [PATCH] dma: support layerscape
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
This is an integrated patch of dma for layerscape
+Signed-off-by: Biwen Li <biwen.li@nxp.com>
Signed-off-by: Catalin Horghidan <catalin.horghidan@nxp.com>
Signed-off-by: Changming Huang <jerry.huang@nxp.com>
Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Signed-off-by: jiaheng.fan <jiaheng.fan@nxp.com>
+Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
Signed-off-by: Peng Ma <peng.ma@nxp.com>
Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
Signed-off-by: Rajiv Vishwakarma <rajiv.vishwakarma@nxp.com>
Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
Signed-off-by: Wen He <wen.he_1@nxp.com>
Signed-off-by: Yuan Yao <yao.yuan@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
---
- .../devicetree/bindings/dma/fsl-qdma.txt | 51 +
- drivers/dma/Kconfig | 33 +-
- drivers/dma/Makefile | 3 +
- drivers/dma/caam_dma.c | 462 ++++++
- drivers/dma/dpaa2-qdma/Kconfig | 8 +
- drivers/dma/dpaa2-qdma/Makefile | 8 +
- drivers/dma/dpaa2-qdma/dpaa2-qdma.c | 940 ++++++++++++
- drivers/dma/dpaa2-qdma/dpaa2-qdma.h | 227 +++
- drivers/dma/dpaa2-qdma/dpdmai.c | 515 +++++++
- drivers/dma/dpaa2-qdma/fsl_dpdmai.h | 521 +++++++
- drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h | 222 +++
- drivers/dma/fsl-qdma.c | 1278 +++++++++++++++++
- 12 files changed, 4267 insertions(+), 1 deletion(-)
- create mode 100644 Documentation/devicetree/bindings/dma/fsl-qdma.txt
+ drivers/dma/Kconfig | 33 +-
+ drivers/dma/Makefile | 3 +
+ drivers/dma/caam_dma.c | 462 ++++++++
+ drivers/dma/dpaa2-qdma/Kconfig | 8 +
+ drivers/dma/dpaa2-qdma/Makefile | 8 +
+ drivers/dma/dpaa2-qdma/dpaa2-qdma.c | 781 ++++++++++++++
+ drivers/dma/dpaa2-qdma/dpaa2-qdma.h | 181 ++++
+ drivers/dma/dpaa2-qdma/dpdmai.c | 515 +++++++++
+ drivers/dma/dpaa2-qdma/fsl_dpdmai.h | 521 +++++++++
+ drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h | 222 ++++
+ drivers/dma/fsl-edma.c | 66 +-
+ drivers/dma/fsl-qdma.c | 1278 +++++++++++++++++++++++
+ 12 files changed, 4073 insertions(+), 5 deletions(-)
create mode 100644 drivers/dma/caam_dma.c
create mode 100644 drivers/dma/dpaa2-qdma/Kconfig
create mode 100644 drivers/dma/dpaa2-qdma/Makefile
create mode 100644 drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h
create mode 100644 drivers/dma/fsl-qdma.c
---- /dev/null
-+++ b/Documentation/devicetree/bindings/dma/fsl-qdma.txt
-@@ -0,0 +1,51 @@
-+* Freescale queue Direct Memory Access(qDMA) Controller
-+
-+The qDMA supports channel virtualization by allowing DMA jobs to be enqueued into
-+different command queues. Core can initiate a DMA transaction by preparing a command
-+descriptor for each DMA job and enqueuing this job to a command queue.
-+
-+* qDMA Controller
-+Required properties:
-+- compatible :
-+ should be "fsl,ls1021a-qdma".
-+- reg : Specifies base physical address(s) and size of the qDMA registers.
-+ The 1st region is qDMA control register's address and size.
-+ The 2nd region is status queue control register's address and size.
-+ The 3rd region is virtual block control register's address and size.
-+- interrupts : A list of interrupt-specifiers, one for each entry in
-+ interrupt-names.
-+- interrupt-names : Should contain:
-+ "qdma-queue0" - the block0 interrupt
-+ "qdma-queue1" - the block1 interrupt
-+ "qdma-queue2" - the block2 interrupt
-+ "qdma-queue3" - the block3 interrupt
-+ "qdma-error" - the error interrupt
-+- channels : Number of DMA channels supported
-+- block-number : the virtual block number
-+- block-offset : the offset of different virtual block
-+- queues : the number of command queue per virtual block
-+- status-sizes : status queue size of per virtual block
-+- queue-sizes : command queue size of per virtual block, the size number based on queues
-+- big-endian: If present registers and hardware scatter/gather descriptors
-+ of the qDMA are implemented in big endian mode, otherwise in little
-+ mode.
-+
-+Examples:
-+ qdma: qdma@8390000 {
-+ compatible = "fsl,ls1021a-qdma";
-+ reg = <0x0 0x8388000 0x0 0x1000>, /* Controller regs */
-+ <0x0 0x8389000 0x0 0x1000>, /* Status regs */
-+ <0x0 0x838a000 0x0 0x2000>; /* Block regs */
-+ interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>,
-+ <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
-+ interrupt-names = "qdma-error",
-+ "qdma-queue0", "qdma-queue1";
-+ channels = <8>;
-+ block-number = <2>;
-+ block-offset = <0x1000>;
-+ queues = <2>;
-+ status-sizes = <64>;
-+ queue-sizes = <64 64>;
-+ big-endian;
-+ };
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -129,6 +129,24 @@ config COH901318
+fsl-dpaa2-qdma-objs := dpaa2-qdma.o dpdmai.o
--- /dev/null
+++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.c
-@@ -0,0 +1,940 @@
+@@ -0,0 +1,781 @@
+/*
+ * drivers/dma/dpaa2-qdma/dpaa2-qdma.c
+ *
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/iommu.h>
++#include <linux/sys_soc.h>
+
+#include "../virt-dma.h"
+
+ sizeof(struct dpaa2_fl_entry) * 3;
+
+ comp_temp->qchan = dpaa2_chan;
-+ comp_temp->sg_blk_num = 0;
-+ INIT_LIST_HEAD(&comp_temp->sg_src_head);
-+ INIT_LIST_HEAD(&comp_temp->sg_dst_head);
+ return comp_temp;
+ }
+ comp_temp = list_first_entry(&dpaa2_chan->comp_free,
+/* first frame list for descriptor buffer */
+static void dpaa2_qdma_populate_first_framel(
+ struct dpaa2_fl_entry *f_list,
-+ struct dpaa2_qdma_comp *dpaa2_comp)
++ struct dpaa2_qdma_comp *dpaa2_comp,
++ bool wrt_changed)
+{
+ struct dpaa2_qdma_sd_d *sdd;
+
+ /* source and destination descriptor */
+ sdd->cmd = cpu_to_le32(QDMA_SD_CMD_RDTTYPE_COHERENT); /* source descriptor CMD */
+ sdd++;
-+ sdd->cmd = cpu_to_le32(QDMA_DD_CMD_WRTTYPE_COHERENT); /* dest descriptor CMD */
++
++ /* dest descriptor CMD */
++ if (wrt_changed)
++ sdd->cmd = cpu_to_le32(LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT);
++ else
++ sdd->cmd = cpu_to_le32(QDMA_DD_CMD_WRTTYPE_COHERENT);
+
+ memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
+ /* first frame list to source descriptor */
+ dma_addr_t src, size_t len, unsigned long flags)
+{
+ struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
++ struct dpaa2_qdma_engine *dpaa2_qdma;
+ struct dpaa2_qdma_comp *dpaa2_comp;
+ struct dpaa2_fl_entry *f_list;
++ bool wrt_changed;
+ uint32_t format;
+
++ dpaa2_qdma = dpaa2_chan->qdma;
+ dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
++ wrt_changed = dpaa2_qdma->qdma_wrtype_fixup;
+
+#ifdef LONG_FORMAT
+ format = QDMA_FD_LONG_FORMAT;
+
+#ifdef LONG_FORMAT
+ /* first frame list for descriptor buffer (logn format) */
-+ dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp);
++ dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp, wrt_changed);
+
+ f_list++;
+#endif
+ return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags);
+}
+
-+static struct qdma_sg_blk *dpaa2_qdma_get_sg_blk(
-+ struct dpaa2_qdma_comp *dpaa2_comp,
-+ struct dpaa2_qdma_chan *dpaa2_chan)
-+{
-+ struct qdma_sg_blk *sg_blk = NULL;
-+ dma_addr_t phy_sgb;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
-+ if (list_empty(&dpaa2_chan->sgb_free)) {
-+ sg_blk = (struct qdma_sg_blk *)dma_pool_alloc(
-+ dpaa2_chan->sg_blk_pool,
-+ GFP_NOWAIT, &phy_sgb);
-+ if (!sg_blk) {
-+ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
-+ return sg_blk;
-+ }
-+ sg_blk->blk_virt_addr = (void *)(sg_blk + 1);
-+ sg_blk->blk_bus_addr = phy_sgb + sizeof(*sg_blk);
-+ } else {
-+ sg_blk = list_first_entry(&dpaa2_chan->sgb_free,
-+ struct qdma_sg_blk, list);
-+ list_del(&sg_blk->list);
-+ }
-+ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
-+
-+ return sg_blk;
-+}
-+
-+static uint32_t dpaa2_qdma_populate_sg(struct device *dev,
-+ struct dpaa2_qdma_chan *dpaa2_chan,
-+ struct dpaa2_qdma_comp *dpaa2_comp,
-+ struct scatterlist *dst_sg, u32 dst_nents,
-+ struct scatterlist *src_sg, u32 src_nents)
-+{
-+ struct dpaa2_qdma_sg *src_sge;
-+ struct dpaa2_qdma_sg *dst_sge;
-+ struct qdma_sg_blk *sg_blk;
-+ struct qdma_sg_blk *sg_blk_dst;
-+ dma_addr_t src;
-+ dma_addr_t dst;
-+ uint32_t num;
-+ uint32_t blocks;
-+ uint32_t len = 0;
-+ uint32_t total_len = 0;
-+ int i, j = 0;
-+
-+ num = min(dst_nents, src_nents);
-+ blocks = num / (NUM_SG_PER_BLK - 1);
-+ if (num % (NUM_SG_PER_BLK - 1))
-+ blocks += 1;
-+ if (dpaa2_comp->sg_blk_num < blocks) {
-+ len = blocks - dpaa2_comp->sg_blk_num;
-+ for (i = 0; i < len; i++) {
-+ /* source sg blocks */
-+ sg_blk = dpaa2_qdma_get_sg_blk(dpaa2_comp, dpaa2_chan);
-+ if (!sg_blk)
-+ return 0;
-+ list_add_tail(&sg_blk->list, &dpaa2_comp->sg_src_head);
-+ /* destination sg blocks */
-+ sg_blk = dpaa2_qdma_get_sg_blk(dpaa2_comp, dpaa2_chan);
-+ if (!sg_blk)
-+ return 0;
-+ list_add_tail(&sg_blk->list, &dpaa2_comp->sg_dst_head);
-+ }
-+ } else {
-+ len = dpaa2_comp->sg_blk_num - blocks;
-+ for (i = 0; i < len; i++) {
-+ spin_lock(&dpaa2_chan->queue_lock);
-+ /* handle source sg blocks */
-+ sg_blk = list_first_entry(&dpaa2_comp->sg_src_head,
-+ struct qdma_sg_blk, list);
-+ list_del(&sg_blk->list);
-+ list_add_tail(&sg_blk->list, &dpaa2_chan->sgb_free);
-+ /* handle destination sg blocks */
-+ sg_blk = list_first_entry(&dpaa2_comp->sg_dst_head,
-+ struct qdma_sg_blk, list);
-+ list_del(&sg_blk->list);
-+ list_add_tail(&sg_blk->list, &dpaa2_chan->sgb_free);
-+ spin_unlock(&dpaa2_chan->queue_lock);
-+ }
-+ }
-+ dpaa2_comp->sg_blk_num = blocks;
-+
-+ /* get the first source sg phy address */
-+ sg_blk = list_first_entry(&dpaa2_comp->sg_src_head,
-+ struct qdma_sg_blk, list);
-+ dpaa2_comp->sge_src_bus_addr = sg_blk->blk_bus_addr;
-+ /* get the first destinaiton sg phy address */
-+ sg_blk_dst = list_first_entry(&dpaa2_comp->sg_dst_head,
-+ struct qdma_sg_blk, list);
-+ dpaa2_comp->sge_dst_bus_addr = sg_blk_dst->blk_bus_addr;
-+
-+ for (i = 0; i < blocks; i++) {
-+ src_sge = (struct dpaa2_qdma_sg *)sg_blk->blk_virt_addr;
-+ dst_sge = (struct dpaa2_qdma_sg *)sg_blk_dst->blk_virt_addr;
-+
-+ for (j = 0; j < (NUM_SG_PER_BLK - 1); j++) {
-+ len = min(sg_dma_len(dst_sg), sg_dma_len(src_sg));
-+ if (0 == len)
-+ goto fetch;
-+ total_len += len;
-+ src = sg_dma_address(src_sg);
-+ dst = sg_dma_address(dst_sg);
-+
-+ /* source SG */
-+ src_sge->addr_lo = src;
-+ src_sge->addr_hi = (src >> 32);
-+ src_sge->data_len.data_len_sl0 = len;
-+ src_sge->ctrl.sl = QDMA_SG_SL_LONG;
-+ src_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
-+ /* destination SG */
-+ dst_sge->addr_lo = dst;
-+ dst_sge->addr_hi = (dst >> 32);
-+ dst_sge->data_len.data_len_sl0 = len;
-+ dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
-+ dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
-+fetch:
-+ num--;
-+ if (0 == num) {
-+ src_sge->ctrl.f = QDMA_SG_F;
-+ dst_sge->ctrl.f = QDMA_SG_F;
-+ goto end;
-+ }
-+ dst_sg = sg_next(dst_sg);
-+ src_sg = sg_next(src_sg);
-+ src_sge++;
-+ dst_sge++;
-+ if (j == (NUM_SG_PER_BLK - 2)) {
-+ /* for next blocks, extension */
-+ sg_blk = list_next_entry(sg_blk, list);
-+ sg_blk_dst = list_next_entry(sg_blk_dst, list);
-+ src_sge->addr_lo = sg_blk->blk_bus_addr;
-+ src_sge->addr_hi = sg_blk->blk_bus_addr >> 32;
-+ src_sge->ctrl.sl = QDMA_SG_SL_LONG;
-+ src_sge->ctrl.fmt = QDMA_SG_FMT_SGTE;
-+ dst_sge->addr_lo = sg_blk_dst->blk_bus_addr;
-+ dst_sge->addr_hi =
-+ sg_blk_dst->blk_bus_addr >> 32;
-+ dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
-+ dst_sge->ctrl.fmt = QDMA_SG_FMT_SGTE;
-+ }
-+ }
-+ }
-+
-+end:
-+ return total_len;
-+}
-+
+static enum dma_status dpaa2_qdma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+ ppriv->nctx.desired_cpu = 1;
+ ppriv->nctx.id = ppriv->rsp_fqid;
+ ppriv->nctx.cb = dpaa2_qdma_fqdan_cb;
-+ err = dpaa2_io_service_register(NULL, &ppriv->nctx);
++ err = dpaa2_io_service_register(NULL, &ppriv->nctx, dev);
+ if (err) {
+ dev_err(dev, "Notification register failed\n");
+ goto err_service;
+ return 0;
+
+err_store:
-+ dpaa2_io_service_deregister(NULL, &ppriv->nctx);
++ dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
+err_service:
+ ppriv--;
+ while (ppriv >= priv->ppriv) {
-+ dpaa2_io_service_deregister(NULL, &ppriv->nctx);
++ dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
+ dpaa2_io_store_destroy(ppriv->store);
+ ppriv--;
+ }
+static void __cold dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv *priv)
+{
+ struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
++ struct device *dev = priv->dev;
+ int i;
+
+ for (i = 0; i < priv->num_pairs; i++) {
-+ dpaa2_io_service_deregister(NULL, &ppriv->nctx);
++ dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
+ ppriv++;
+ }
+}
+ return err;
+}
+
-+static void __cold dpaa2_dpdmai_free_pool(struct dpaa2_qdma_chan *qchan,
-+ struct list_head *head)
-+{
-+ struct qdma_sg_blk *sgb_tmp, *_sgb_tmp;
-+ /* free the QDMA SG pool block */
-+ list_for_each_entry_safe(sgb_tmp, _sgb_tmp, head, list) {
-+ sgb_tmp->blk_virt_addr = (void *)((struct qdma_sg_blk *)
-+ sgb_tmp->blk_virt_addr - 1);
-+ sgb_tmp->blk_bus_addr = sgb_tmp->blk_bus_addr
-+ - sizeof(*sgb_tmp);
-+ dma_pool_free(qchan->sg_blk_pool, sgb_tmp->blk_virt_addr,
-+ sgb_tmp->blk_bus_addr);
-+ }
-+
-+}
-+
+static void __cold dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan,
+ struct list_head *head)
+{
+ dma_pool_free(qchan->fd_pool,
+ comp_tmp->fd_virt_addr,
+ comp_tmp->fd_bus_addr);
-+ /* free the SG source block on comp */
-+ dpaa2_dpdmai_free_pool(qchan, &comp_tmp->sg_src_head);
-+ /* free the SG destination block on comp */
-+ dpaa2_dpdmai_free_pool(qchan, &comp_tmp->sg_dst_head);
+ list_del(&comp_tmp->list);
+ kfree(comp_tmp);
+ }
+ qchan = &dpaa2_qdma->chans[i];
+ dpaa2_dpdmai_free_comp(qchan, &qchan->comp_used);
+ dpaa2_dpdmai_free_comp(qchan, &qchan->comp_free);
-+ dpaa2_dpdmai_free_pool(qchan, &qchan->sgb_free);
+ dma_pool_destroy(qchan->fd_pool);
-+ dma_pool_destroy(qchan->sg_blk_pool);
+ }
+}
+
+ dev, FD_POOL_SIZE, 32, 0);
+ if (!dpaa2_chan->fd_pool)
+ return -1;
-+ dpaa2_chan->sg_blk_pool = dma_pool_create("sg_blk_pool",
-+ dev, SG_POOL_SIZE, 32, 0);
-+ if (!dpaa2_chan->sg_blk_pool)
-+ return -1;
+
+ spin_lock_init(&dpaa2_chan->queue_lock);
+ INIT_LIST_HEAD(&dpaa2_chan->comp_used);
+ INIT_LIST_HEAD(&dpaa2_chan->comp_free);
-+ INIT_LIST_HEAD(&dpaa2_chan->sgb_free);
+ }
+ return 0;
+}
+ /* obtain a MC portal */
+ err = fsl_mc_portal_allocate(dpdmai_dev, 0, &priv->mc_io);
+ if (err) {
-+ dev_err(dev, "MC portal allocation failed\n");
++ if (err == -ENXIO)
++ err = -EPROBE_DEFER;
++ else
++ dev_err(dev, "MC portal allocation failed\n");
+ goto err_mcportal;
+ }
+
+ goto err_reg;
+ }
+
++ if (soc_device_match(soc_fixup_tuning))
++ dpaa2_qdma->qdma_wrtype_fixup = true;
++ else
++ dpaa2_qdma->qdma_wrtype_fixup = false;
++
+ dma_cap_set(DMA_PRIVATE, dpaa2_qdma->dma_dev.cap_mask);
+ dma_cap_set(DMA_SLAVE, dpaa2_qdma->dma_dev.cap_mask);
+ dma_cap_set(DMA_MEMCPY, dpaa2_qdma->dma_dev.cap_mask);
+MODULE_LICENSE("Dual BSD/GPL");
--- /dev/null
+++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.h
-@@ -0,0 +1,227 @@
+@@ -0,0 +1,181 @@
+/* Copyright 2015 NXP Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+
+#define DPAA2_QDMA_STORE_SIZE 16
+#define NUM_CH 8
-+#define NUM_SG_PER_BLK 16
+
+#define QDMA_DMR_OFFSET 0x0
+#define QDMA_DQ_EN (0 << 30)
+/* Destination descriptor command write transaction type for RBP=0:
+ coherent copy of cacheable memory */
+#define QDMA_DD_CMD_WRTTYPE_COHERENT (0x6 << 28)
-+
-+#define QDMA_SG_FMT_SDB 0x0 /* single data buffer */
-+#define QDMA_SG_FMT_FDS 0x1 /* frame data section */
-+#define QDMA_SG_FMT_SGTE 0x2 /* SGT extension */
-+#define QDMA_SG_SL_SHORT 0x1 /* short length */
-+#define QDMA_SG_SL_LONG 0x0 /* short length */
-+#define QDMA_SG_F 0x1 /* last sg entry */
-+struct dpaa2_qdma_sg {
-+ uint32_t addr_lo; /* address 0:31 */
-+ uint32_t addr_hi:17; /* address 32:48 */
-+ uint32_t rsv:15;
-+ union {
-+ uint32_t data_len_sl0; /* SL=0, the long format */
-+ struct {
-+ uint32_t len:17; /* SL=1, the short format */
-+ uint32_t reserve:3;
-+ uint32_t sf:1;
-+ uint32_t sr:1;
-+ uint32_t size:10; /* buff size */
-+ } data_len_sl1;
-+ } data_len; /* AVAIL_LENGTH */
-+ struct {
-+ uint32_t bpid:14;
-+ uint32_t ivp:1;
-+ uint32_t mbt:1;
-+ uint32_t offset:12;
-+ uint32_t fmt:2;
-+ uint32_t sl:1;
-+ uint32_t f:1;
-+ } ctrl;
-+} __attribute__((__packed__));
++#define LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT (0xb << 28)
+
+#define QMAN_FD_FMT_ENABLE (1) /* frame list table enable */
+#define QMAN_FD_BMT_ENABLE (1 << 15) /* bypass memory translation */
+#define QMAN_FD_SL_DISABLE (0 << 14) /* short lengthe disabled */
+#define QMAN_FD_SL_ENABLE (1 << 14) /* short lengthe enabled */
+
-+#define QDMA_SB_FRAME (0 << 28) /* single frame */
-+#define QDMA_SG_FRAME (2 << 28) /* scatter gather frames */
+#define QDMA_FINAL_BIT_DISABLE (0 << 31) /* final bit disable */
+#define QDMA_FINAL_BIT_ENABLE (1 << 31) /* final bit enable */
+
+ struct mutex dpaa2_queue_mutex;
+ spinlock_t queue_lock;
+ struct dma_pool *fd_pool;
-+ struct dma_pool *sg_blk_pool;
+
+ struct list_head comp_used;
+ struct list_head comp_free;
+
-+ struct list_head sgb_free;
-+};
-+
-+struct qdma_sg_blk {
-+ dma_addr_t blk_bus_addr;
-+ void *blk_virt_addr;
-+ struct list_head list;
+};
+
+struct dpaa2_qdma_comp {
+ dma_addr_t fd_bus_addr;
+ dma_addr_t fl_bus_addr;
+ dma_addr_t desc_bus_addr;
-+ dma_addr_t sge_src_bus_addr;
-+ dma_addr_t sge_dst_bus_addr;
+ void *fd_virt_addr;
+ void *fl_virt_addr;
+ void *desc_virt_addr;
-+ void *sg_src_virt_addr;
-+ void *sg_dst_virt_addr;
-+ struct qdma_sg_blk *sg_blk;
-+ uint32_t sg_blk_num;
-+ struct list_head sg_src_head;
-+ struct list_head sg_dst_head;
+ struct dpaa2_qdma_chan *qchan;
+ struct virt_dma_desc vdesc;
+ struct list_head list;
+ struct dma_device dma_dev;
+ u32 n_chans;
+ struct dpaa2_qdma_chan chans[NUM_CH];
++ bool qdma_wrtype_fixup;
+
+ struct dpaa2_qdma_priv *priv;
+};
+ struct dpaa2_qdma_priv *priv;
+};
+
++static struct soc_device_attribute soc_fixup_tuning[] = {
++ { .family = "QorIQ LX2160A"},
++ { },
++};
++
+/* FD pool size: one FD + 3 Frame list + 2 source/destination descriptor */
+#define FD_POOL_SIZE (sizeof(struct dpaa2_fd) + \
+ sizeof(struct dpaa2_fl_entry) * 3 + \
+ sizeof(struct dpaa2_qdma_sd_d) * 2)
+
-+/* qdma_sg_blk + 16 SGs */
-+#define SG_POOL_SIZE (sizeof(struct qdma_sg_blk) +\
-+ sizeof(struct dpaa2_qdma_sg) * NUM_SG_PER_BLK)
+#endif /* __DPAA2_QDMA_H */
--- /dev/null
+++ b/drivers/dma/dpaa2-qdma/dpdmai.c
+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->fqid)
+
+#endif /* _FSL_DPDMAI_CMD_H */
+--- a/drivers/dma/fsl-edma.c
++++ b/drivers/dma/fsl-edma.c
+@@ -146,6 +146,8 @@ struct fsl_edma_slave_config {
+ u32 dev_addr;
+ u32 burst;
+ u32 attr;
++ dma_addr_t dma_dev_addr;
++ enum dma_data_direction dma_dir;
+ };
+
+ struct fsl_edma_chan {
+@@ -342,6 +344,53 @@ static int fsl_edma_resume(struct dma_ch
+ return 0;
+ }
+
++static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan)
++{
++ if (fsl_chan->fsc.dma_dir != DMA_NONE)
++ dma_unmap_resource(fsl_chan->vchan.chan.device->dev,
++ fsl_chan->fsc.dma_dev_addr,
++ fsl_chan->fsc.burst, fsl_chan->fsc.dma_dir, 0);
++ fsl_chan->fsc.dma_dir = DMA_NONE;
++}
++
++static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan *fsl_chan,
++ enum dma_transfer_direction dir)
++{
++ struct device *dev = fsl_chan->vchan.chan.device->dev;
++ enum dma_data_direction dma_dir;
++
++ switch (dir) {
++ case DMA_MEM_TO_DEV:
++ dma_dir = DMA_FROM_DEVICE;
++ break;
++ case DMA_DEV_TO_MEM:
++ dma_dir = DMA_TO_DEVICE;
++ break;
++ case DMA_DEV_TO_DEV:
++ dma_dir = DMA_BIDIRECTIONAL;
++ break;
++ default:
++ dma_dir = DMA_NONE;
++ break;
++ }
++
++ /* Already mapped for this config? */
++ if (fsl_chan->fsc.dma_dir == dma_dir)
++ return true;
++
++ fsl_edma_unprep_slave_dma(fsl_chan);
++ fsl_chan->fsc.dma_dev_addr = dma_map_resource(dev,
++ fsl_chan->fsc.dev_addr,
++ fsl_chan->fsc.burst,
++ dma_dir, 0);
++ if (dma_mapping_error(dev, fsl_chan->fsc.dma_dev_addr))
++ return false;
++
++ fsl_chan->fsc.dma_dir = dma_dir;
++
++ return true;
++}
++
+ static int fsl_edma_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg)
+ {
+@@ -361,6 +410,7 @@ static int fsl_edma_slave_config(struct
+ } else {
+ return -EINVAL;
+ }
++ fsl_edma_unprep_slave_dma(fsl_chan);
+ return 0;
+ }
+
+@@ -553,6 +603,9 @@ static struct dma_async_tx_descriptor *f
+ if (!is_slave_direction(fsl_chan->fsc.dir))
+ return NULL;
+
++ if (!fsl_edma_prep_slave_dma(fsl_chan, fsl_chan->fsc.dir))
++ return NULL;
++
+ sg_len = buf_len / period_len;
+ fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
+ if (!fsl_desc)
+@@ -572,11 +625,11 @@ static struct dma_async_tx_descriptor *f
+
+ if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
+ src_addr = dma_buf_next;
+- dst_addr = fsl_chan->fsc.dev_addr;
++ dst_addr = fsl_chan->fsc.dma_dev_addr;
+ soff = fsl_chan->fsc.addr_width;
+ doff = 0;
+ } else {
+- src_addr = fsl_chan->fsc.dev_addr;
++ src_addr = fsl_chan->fsc.dma_dev_addr;
+ dst_addr = dma_buf_next;
+ soff = 0;
+ doff = fsl_chan->fsc.addr_width;
+@@ -606,6 +659,9 @@ static struct dma_async_tx_descriptor *f
+ if (!is_slave_direction(fsl_chan->fsc.dir))
+ return NULL;
+
++ if (!fsl_edma_prep_slave_dma(fsl_chan, fsl_chan->fsc.dir))
++ return NULL;
++
+ fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
+ if (!fsl_desc)
+ return NULL;
+@@ -618,11 +674,11 @@ static struct dma_async_tx_descriptor *f
+
+ if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
+ src_addr = sg_dma_address(sg);
+- dst_addr = fsl_chan->fsc.dev_addr;
++ dst_addr = fsl_chan->fsc.dma_dev_addr;
+ soff = fsl_chan->fsc.addr_width;
+ doff = 0;
+ } else {
+- src_addr = fsl_chan->fsc.dev_addr;
++ src_addr = fsl_chan->fsc.dma_dev_addr;
+ dst_addr = sg_dma_address(sg);
+ soff = 0;
+ doff = fsl_chan->fsc.addr_width;
+@@ -802,6 +858,7 @@ static void fsl_edma_free_chan_resources
+ fsl_edma_chan_mux(fsl_chan, 0, false);
+ fsl_chan->edesc = NULL;
+ vchan_get_all_descriptors(&fsl_chan->vchan, &head);
++ fsl_edma_unprep_slave_dma(fsl_chan);
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+
+ vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
+@@ -937,6 +994,7 @@ static int fsl_edma_probe(struct platfor
+ fsl_chan->slave_id = 0;
+ fsl_chan->idle = true;
+ fsl_chan->vchan.desc_free = fsl_edma_free_desc;
++ fsl_chan->fsc.dma_dir = DMA_NONE;
+ vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
+
+ edma_writew(fsl_edma, 0x0, fsl_edma->membase + EDMA_TCD_CSR(i));
--- /dev/null
+++ b/drivers/dma/fsl-qdma.c
@@ -0,0 +1,1278 @@
-From d586effc9b71ddf240fb294b1ab1205bbe6fec4b Mon Sep 17 00:00:00 2001
+From 0f31298eb0a9b2cd7990b709ff18229fadfa474b Mon Sep 17 00:00:00 2001
From: Biwen Li <biwen.li@nxp.com>
-Date: Tue, 30 Oct 2018 18:26:32 +0800
-Subject: [PATCH 20/40] flextimer: support layerscape
+Date: Wed, 17 Apr 2019 18:58:38 +0800
+Subject: [PATCH] flextimer: support layerscape
+
This is an integrated patch of flextimer for layerscape
+Signed-off-by: Biwen Li <biwen.li@nxp.com>
Signed-off-by: Meng Yi <meng.yi@nxp.com>
Signed-off-by: Ran Wang <ran.wang_1@nxp.com>
Signed-off-by: Zhang Ying-22455 <ying.zhang22455@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
---
.../bindings/soc/fsl/layerscape/ftm-alarm.txt | 32 ++
drivers/clocksource/fsl_ftm_timer.c | 8 +-
- drivers/soc/fsl/layerscape/ftm_alarm.c | 375 ++++++++++++++++++
- 3 files changed, 411 insertions(+), 4 deletions(-)
+ drivers/soc/fsl/layerscape/ftm_alarm.c | 382 ++++++++++++++++++
+ 3 files changed, 418 insertions(+), 4 deletions(-)
create mode 100644 Documentation/devicetree/bindings/soc/fsl/layerscape/ftm-alarm.txt
create mode 100644 drivers/soc/fsl/layerscape/ftm_alarm.c
static inline void ftm_irq_enable(void __iomem *base)
--- /dev/null
+++ b/drivers/soc/fsl/layerscape/ftm_alarm.c
-@@ -0,0 +1,375 @@
+@@ -0,0 +1,382 @@
+/*
+ * Freescale FlexTimer Module (FTM) Alarm driver.
+ *
+ .flextimer_set_bit = 0x4000,
+};
+
++static struct rcpm_cfg lx2160a_rcpm_cfg = {
++ .big_endian = LITTLE_ENDIAN,
++ .flextimer_set_bit = 0x4000,
++};
++
+static const struct of_device_id ippdexpcr_of_match[] = {
+ { .compatible = "fsl,ls1012a-ftm-alarm", .data = &ls1012a_rcpm_cfg},
+ { .compatible = "fsl,ls1021a-ftm-alarm", .data = &ls1021a_rcpm_cfg},
+ { .compatible = "fsl,ls1046a-ftm-alarm", .data = &ls1046a_rcpm_cfg},
+ { .compatible = "fsl,ls1088a-ftm-alarm", .data = &ls1088a_rcpm_cfg},
+ { .compatible = "fsl,ls208xa-ftm-alarm", .data = &ls208xa_rcpm_cfg},
++ { .compatible = "fsl,lx2160a-ftm-alarm", .data = &lx2160a_rcpm_cfg},
+ {},
+};
+MODULE_DEVICE_TABLE(of, ippdexpcr_of_match);
+ { .compatible = "fsl,ls1046a-ftm-alarm", },
+ { .compatible = "fsl,ls1088a-ftm-alarm", },
+ { .compatible = "fsl,ls208xa-ftm-alarm", },
++ { .compatible = "fsl,lx2160a-ftm-alarm", },
+ { .compatible = "fsl,ftm-timer", },
+ { },
+};
-From 4f22b58a2f809aff55aa9321c9100b0caf3b6694 Mon Sep 17 00:00:00 2001
+From 3f7d59061c38287bdc2fec2e94b4df9e6e62dbc6 Mon Sep 17 00:00:00 2001
From: Biwen Li <biwen.li@nxp.com>
-Date: Tue, 30 Oct 2018 18:26:36 +0800
-Subject: [PATCH 21/40] i2c: support layerscape
+Date: Wed, 17 Apr 2019 18:58:39 +0800
+Subject: [PATCH] i2c: support layerscape
+
This is an integrated patch of i2c for layerscape
-Signed-off-by: Zhang Ying-22455 <ying.zhang22455@nxp.com>
Signed-off-by: Biwen Li <biwen.li@nxp.com>
+Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
+Signed-off-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Signed-off-by: Wolfram Sang <wsa@the-dreams.de>
+Signed-off-by: Zhang Ying-22455 <ying.zhang22455@nxp.com>
---
- drivers/i2c/busses/i2c-imx.c | 193 ++++++++++++++++++++++++++++
- drivers/i2c/muxes/i2c-mux-pca954x.c | 44 ++++++-
- 2 files changed, 236 insertions(+), 1 deletion(-)
+ drivers/i2c/busses/i2c-imx.c | 245 +++++++++++++++++++++++++---
+ drivers/i2c/muxes/i2c-mux-pca954x.c | 44 ++++-
+ 2 files changed, 268 insertions(+), 21 deletions(-)
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
};
static const struct imx_i2c_hwdata imx1_i2c_hwdata = {
-@@ -878,6 +937,78 @@ static int i2c_imx_read(struct imx_i2c_s
+@@ -281,8 +340,8 @@ static inline unsigned char imx_i2c_read
+ }
+
+ /* Functions for DMA support */
+-static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
+- dma_addr_t phy_addr)
++static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
++ dma_addr_t phy_addr)
+ {
+ struct imx_i2c_dma *dma;
+ struct dma_slave_config dma_sconfig;
+@@ -291,11 +350,13 @@ static void i2c_imx_dma_request(struct i
+
+ dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+- return;
++ return -ENOMEM;
+
+- dma->chan_tx = dma_request_slave_channel(dev, "tx");
+- if (!dma->chan_tx) {
+- dev_dbg(dev, "can't request DMA tx channel\n");
++ dma->chan_tx = dma_request_chan(dev, "tx");
++ if (IS_ERR(dma->chan_tx)) {
++ ret = PTR_ERR(dma->chan_tx);
++ if (ret != -ENODEV && ret != -EPROBE_DEFER)
++ dev_err(dev, "can't request DMA tx channel (%d)\n", ret);
+ goto fail_al;
+ }
+
+@@ -306,13 +367,15 @@ static void i2c_imx_dma_request(struct i
+ dma_sconfig.direction = DMA_MEM_TO_DEV;
+ ret = dmaengine_slave_config(dma->chan_tx, &dma_sconfig);
+ if (ret < 0) {
+- dev_dbg(dev, "can't configure tx channel\n");
++ dev_err(dev, "can't configure tx channel (%d)\n", ret);
+ goto fail_tx;
+ }
+
+- dma->chan_rx = dma_request_slave_channel(dev, "rx");
+- if (!dma->chan_rx) {
+- dev_dbg(dev, "can't request DMA rx channel\n");
++ dma->chan_rx = dma_request_chan(dev, "rx");
++ if (IS_ERR(dma->chan_rx)) {
++ ret = PTR_ERR(dma->chan_rx);
++ if (ret != -ENODEV && ret != -EPROBE_DEFER)
++ dev_err(dev, "can't request DMA rx channel (%d)\n", ret);
+ goto fail_tx;
+ }
+
+@@ -323,7 +386,7 @@ static void i2c_imx_dma_request(struct i
+ dma_sconfig.direction = DMA_DEV_TO_MEM;
+ ret = dmaengine_slave_config(dma->chan_rx, &dma_sconfig);
+ if (ret < 0) {
+- dev_dbg(dev, "can't configure rx channel\n");
++ dev_err(dev, "can't configure rx channel (%d)\n", ret);
+ goto fail_rx;
+ }
+
+@@ -332,7 +395,7 @@ static void i2c_imx_dma_request(struct i
+ dev_info(dev, "using %s (tx) and %s (rx) for DMA transfers\n",
+ dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
+
+- return;
++ return 0;
+
+ fail_rx:
+ dma_release_channel(dma->chan_rx);
+@@ -340,7 +403,8 @@ fail_tx:
+ dma_release_channel(dma->chan_tx);
+ fail_al:
+ devm_kfree(dev, dma);
+- dev_info(dev, "can't use DMA, using PIO instead.\n");
++ /* return successfully if there is no dma support */
++ return ret == -ENODEV ? 0 : ret;
+ }
+
+ static void i2c_imx_dma_callback(void *arg)
+@@ -878,6 +942,78 @@ static int i2c_imx_read(struct imx_i2c_s
return 0;
}
static int i2c_imx_xfer(struct i2c_adapter *adapter,
struct i2c_msg *msgs, int num)
{
-@@ -888,6 +1019,19 @@ static int i2c_imx_xfer(struct i2c_adapt
+@@ -888,6 +1024,19 @@ static int i2c_imx_xfer(struct i2c_adapt
dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
result = pm_runtime_get_sync(i2c_imx->adapter.dev.parent);
if (result < 0)
goto out;
-@@ -1030,6 +1174,50 @@ static int i2c_imx_init_recovery_info(st
+@@ -1030,6 +1179,50 @@ static int i2c_imx_init_recovery_info(st
return 0;
}
static u32 i2c_imx_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL
-@@ -1085,6 +1273,11 @@ static int i2c_imx_probe(struct platform
+@@ -1085,6 +1278,11 @@ static int i2c_imx_probe(struct platform
i2c_imx->adapter.dev.of_node = pdev->dev.of_node;
i2c_imx->base = base;
/* Get I2C clock */
i2c_imx->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(i2c_imx->clk)) {
+@@ -1103,7 +1301,8 @@ static int i2c_imx_probe(struct platform
+ pdev->name, i2c_imx);
+ if (ret) {
+ dev_err(&pdev->dev, "can't claim irq %d\n", irq);
+- goto clk_disable;
++ clk_disable_unprepare(i2c_imx->clk);
++ return ret;
+ }
+
+ /* Init queue */
+@@ -1150,25 +1349,31 @@ static int i2c_imx_probe(struct platform
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
++ /* Init DMA config if supported */
++ ret = i2c_imx_dma_request(i2c_imx, phy_addr);
++ if (ret) {
++ if (ret != -EPROBE_DEFER)
++ dev_info(&pdev->dev, "can't use DMA, using PIO instead.\n");
++ else
++ goto del_adapter;
++ }
++
+ dev_dbg(&i2c_imx->adapter.dev, "claimed irq %d\n", irq);
+ dev_dbg(&i2c_imx->adapter.dev, "device resources: %pR\n", res);
+ dev_dbg(&i2c_imx->adapter.dev, "adapter name: \"%s\"\n",
+ i2c_imx->adapter.name);
+- dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
+-
+- /* Init DMA config if supported */
+- i2c_imx_dma_request(i2c_imx, phy_addr);
+
++ dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
+ return 0; /* Return OK */
+
++del_adapter:
++ i2c_del_adapter(&i2c_imx->adapter);
+ rpm_disable:
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+
+-clk_disable:
+- clk_disable_unprepare(i2c_imx->clk);
+ return ret;
+ }
+
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -85,6 +85,7 @@ struct pca954x {
-From cfa7e6ed5a6ba529097ae8a50ed2c8fa12b4cad0 Mon Sep 17 00:00:00 2001
+From f4e3e2cf6484056225385d717da4e9c4f8613935 Mon Sep 17 00:00:00 2001
From: Biwen Li <biwen.li@nxp.com>
-Date: Tue, 30 Oct 2018 18:27:13 +0800
-Subject: [PATCH 22/40] qe: support layerscape
+Date: Wed, 17 Apr 2019 18:58:58 +0800
+Subject: [PATCH] qe: support layerscape
+
This is an integrated patch of qe for layerscape
-Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com
Signed-off-by: Biwen Li <biwen.li@nxp.com>
+Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
---
.../fsl/qe/qe_ic.c => irqchip/irq-qeic.c} | 389 +++++++++++-------
drivers/soc/fsl/qe/Kconfig | 2 +-
drivers/soc/fsl/qe/Makefile | 2 +-
- drivers/soc/fsl/qe/qe.c | 78 ++--
+ drivers/soc/fsl/qe/qe.c | 80 ++--
drivers/soc/fsl/qe/qe_ic.h | 103 -----
drivers/soc/fsl/qe/qe_io.c | 42 +-
drivers/soc/fsl/qe/qe_tdm.c | 8 +-
drivers/tty/serial/ucc_uart.c | 1 +
include/soc/fsl/qe/qe.h | 1 -
include/soc/fsl/qe/qe_ic.h | 139 -------
- 12 files changed, 357 insertions(+), 492 deletions(-)
+ 12 files changed, 359 insertions(+), 492 deletions(-)
rename drivers/{soc/fsl/qe/qe_ic.c => irqchip/irq-qeic.c} (54%)
delete mode 100644 drivers/soc/fsl/qe/qe_ic.h
delete mode 100644 include/soc/fsl/qe/qe_ic.h
of_node_put(qe);
-@@ -236,7 +254,7 @@ int qe_setbrg(enum qe_clock brg, unsigne
+@@ -229,14 +247,16 @@ int qe_setbrg(enum qe_clock brg, unsigne
+ /* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says
+ that the BRG divisor must be even if you're not using divide-by-16
+ mode. */
++#ifdef CONFIG_PPC
+ if (pvr_version_is(PVR_VER_836x) || pvr_version_is(PVR_VER_832x))
+ if (!div16 && (divisor & 1) && (divisor > 3))
+ divisor++;
++#endif
+
tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
QE_BRGC_ENABLE | div16;
return 0;
}
-@@ -370,9 +388,9 @@ static int qe_sdma_init(void)
+@@ -370,9 +390,9 @@ static int qe_sdma_init(void)
return -ENOMEM;
}
return 0;
}
-@@ -410,14 +428,14 @@ static void qe_upload_microcode(const vo
+@@ -410,14 +430,14 @@ static void qe_upload_microcode(const vo
"uploading microcode '%s'\n", ucode->id);
/* Use auto-increment */
}
/*
-@@ -502,7 +520,7 @@ int qe_upload_firmware(const struct qe_f
+@@ -502,7 +522,7 @@ int qe_upload_firmware(const struct qe_f
* If the microcode calls for it, split the I-RAM.
*/
if (!firmware->split)
if (firmware->soc.model)
printk(KERN_INFO
-@@ -536,11 +554,11 @@ int qe_upload_firmware(const struct qe_f
+@@ -536,11 +556,11 @@ int qe_upload_firmware(const struct qe_f
u32 trap = be32_to_cpu(ucode->traps[j]);
if (trap)
}
qe_firmware_uploaded = 1;
-@@ -659,9 +677,9 @@ EXPORT_SYMBOL(qe_get_num_of_risc);
+@@ -659,9 +679,9 @@ EXPORT_SYMBOL(qe_get_num_of_risc);
unsigned int qe_get_num_of_snums(void)
{
struct device_node *qe;
num_of_snums = 28; /* The default number of snum for threads is 28 */
qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
-@@ -675,9 +693,9 @@ unsigned int qe_get_num_of_snums(void)
+@@ -675,9 +695,9 @@ unsigned int qe_get_num_of_snums(void)
return num_of_snums;
}
-From 01b1b2989e907305d8b885468c2743f5e35e1b9a Mon Sep 17 00:00:00 2001
+From ca86ebf3fddbdfa8aecc4b887ef059948ee79621 Mon Sep 17 00:00:00 2001
From: Biwen Li <biwen.li@nxp.com>
-Date: Thu, 13 Dec 2018 11:15:15 +0800
+Date: Wed, 17 Apr 2019 18:59:08 +0800
Subject: [PATCH] usb: support layerscape
This is an integrated patch of usb for layerscape
+Signed-off-by: Anurag Kumar Vulisha <anurag.kumar.vulisha@xilinx.com>
+Signed-off-by: Biwen Li <biwen.li@nxp.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Changming Huang <jerry.huang@nxp.com>
Signed-off-by: Felipe Balbi <felipe.balbi@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Li Yang <leoli@freescale.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Nikhil Badola <nikhil.badola@freescale.com>
Signed-off-by: Rajesh Bhagat <rajesh.bhagat@nxp.com>
Signed-off-by: Ramneek Mehresh <ramneek.mehresh@freescale.com>
Signed-off-by: Roger Quadros <rogerq@ti.com>
Signed-off-by: Shengzhou Liu <Shengzhou.Liu@freescale.com>
Signed-off-by: Suresh Gupta <suresh.gupta@freescale.com>
-Signed-off-by: yinbo.zhu <yinbo.zhu@nxp.com>
-Signed-off-by: Zhao Chenhui <chenhui.zhao@freescale.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+Signed-off-by: Yinbo Zhu <yinbo.zhu@nxp.com>
+Signed-off-by: Zhao Chenhui <chenhui.zhao@freescale.com>
---
- .../devicetree/bindings/usb/dwc3.txt | 2 +
- arch/arm64/include/asm/io.h | 28 ++
- drivers/usb/common/common.c | 50 ++++
- drivers/usb/core/usb.c | 1 +
- drivers/usb/dwc3/core.c | 104 +++++++
- drivers/usb/dwc3/core.h | 44 +++
- drivers/usb/dwc3/ep0.c | 4 +-
- drivers/usb/dwc3/gadget.c | 7 +
- drivers/usb/dwc3/host.c | 9 +
- drivers/usb/gadget/udc/fsl_udc_core.c | 46 +--
- drivers/usb/gadget/udc/fsl_usb2_udc.h | 16 +-
- drivers/usb/host/Kconfig | 2 +-
- drivers/usb/host/ehci-fsl.c | 276 ++++++++++++++++--
- drivers/usb/host/ehci-fsl.h | 3 +
- drivers/usb/host/ehci-hub.c | 2 +
- drivers/usb/host/ehci.h | 3 +
- drivers/usb/host/fsl-mph-dr-of.c | 11 +
- drivers/usb/host/xhci-hub.c | 22 ++
- drivers/usb/host/xhci-plat.c | 16 +-
- drivers/usb/host/xhci-ring.c | 28 +-
- drivers/usb/host/xhci.c | 39 ++-
- drivers/usb/host/xhci.h | 6 +-
- drivers/usb/phy/phy-fsl-usb.c | 59 +++-
- drivers/usb/phy/phy-fsl-usb.h | 8 +
- include/linux/usb.h | 1 +
- include/linux/usb/of.h | 2 +
- 26 files changed, 704 insertions(+), 85 deletions(-)
+ arch/arm64/include/asm/io.h | 28 +++
+ drivers/usb/common/common.c | 50 +++++
+ drivers/usb/core/usb.c | 1 +
+ drivers/usb/dwc3/core.c | 167 ++++++++++++++++
+ drivers/usb/dwc3/core.h | 58 ++++++
+ drivers/usb/dwc3/ep0.c | 4 +-
+ drivers/usb/dwc3/gadget.c | 7 +
+ drivers/usb/dwc3/host.c | 9 +
+ drivers/usb/gadget/udc/fsl_udc_core.c | 46 +++--
+ drivers/usb/gadget/udc/fsl_usb2_udc.h | 16 +-
+ drivers/usb/host/Kconfig | 2 +-
+ drivers/usb/host/ehci-fsl.c | 276 +++++++++++++++++++++++---
+ drivers/usb/host/ehci-fsl.h | 3 +
+ drivers/usb/host/ehci-hub.c | 2 +
+ drivers/usb/host/ehci.h | 3 +
+ drivers/usb/host/fsl-mph-dr-of.c | 11 +
+ drivers/usb/host/xhci-hub.c | 22 ++
+ drivers/usb/host/xhci-plat.c | 16 +-
+ drivers/usb/host/xhci-ring.c | 28 ++-
+ drivers/usb/host/xhci.c | 37 +++-
+ drivers/usb/host/xhci.h | 10 +-
+ drivers/usb/phy/phy-fsl-usb.c | 59 ++++--
+ drivers/usb/phy/phy-fsl-usb.h | 8 +
+ include/linux/usb.h | 1 +
+ include/linux/usb/of.h | 2 +
+ 25 files changed, 780 insertions(+), 86 deletions(-)
---- a/Documentation/devicetree/bindings/usb/dwc3.txt
-+++ b/Documentation/devicetree/bindings/usb/dwc3.txt
-@@ -47,6 +47,8 @@ Optional properties:
- from P0 to P1/P2/P3 without delay.
- - snps,dis-tx-ipgap-linecheck-quirk: when set, disable u2mac linestate check
- during HS transmit.
-+ - snps,disable_devinit_u1u2: when set, disable device-initiated U1/U2
-+ LPM request in USB device mode.
- - snps,is-utmi-l1-suspend: true when DWC3 asserts output signal
- utmi_l1_suspend_n, false when asserts utmi_sleep_n
- - snps,hird-threshold: HIRD threshold
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -210,6 +210,34 @@ extern void __iomem *ioremap_cache(phys_
atomic_set(&dev->urbnum, 0);
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
-@@ -766,6 +766,96 @@ static void dwc3_core_setup_global_contr
+@@ -103,6 +103,41 @@ static int dwc3_get_dr_mode(struct dwc3
+ static void dwc3_event_buffers_cleanup(struct dwc3 *dwc);
+ static int dwc3_event_buffers_setup(struct dwc3 *dwc);
+
++/*
++ * dwc3_power_of_all_roothub_ports - Power off all Root hub ports
++ * @dwc3: Pointer to our controller context structure
++ */
++static void dwc3_power_off_all_roothub_ports(struct dwc3 *dwc)
++{
++ int i, port_num;
++ u32 reg, op_regs_base, offset;
++ void __iomem *xhci_regs;
++
++ /* xhci regs is not mapped yet, do it temperary here */
++ if (dwc->xhci_resources[0].start) {
++ xhci_regs = ioremap(dwc->xhci_resources[0].start,
++ DWC3_XHCI_REGS_END);
++ if (IS_ERR(xhci_regs)) {
++ dev_err(dwc->dev, "Failed to ioremap xhci_regs\n");
++ return;
++ }
++
++ op_regs_base = HC_LENGTH(readl(xhci_regs));
++ reg = readl(xhci_regs + XHCI_HCSPARAMS1);
++ port_num = HCS_MAX_PORTS(reg);
++
++ for (i = 1; i <= port_num; i++) {
++ offset = op_regs_base + XHCI_PORTSC_BASE + 0x10*(i-1);
++ reg = readl(xhci_regs + offset);
++ reg &= ~PORT_POWER;
++ writel(reg, xhci_regs + offset);
++ }
++
++ iounmap(xhci_regs);
++ } else
++ dev_err(dwc->dev, "xhci base reg invalid\n");
++}
++
+ static void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode)
+ {
+ u32 reg;
+@@ -111,6 +146,15 @@ static void dwc3_set_prtcap(struct dwc3
+ reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG));
+ reg |= DWC3_GCTL_PRTCAPDIR(mode);
+ dwc3_writel(dwc->regs, DWC3_GCTL, reg);
++
++ /*
++ * We have to power off all Root hub ports immediately after DWC3 set
++ * to host mode to avoid VBUS glitch happen when xhci get reset later.
++ */
++ if (dwc->host_vbus_glitches) {
++ if (mode == DWC3_GCTL_PRTCAP_HOST)
++ dwc3_power_off_all_roothub_ports(dwc);
++ }
+ }
+
+ static void __dwc3_set_mode(struct work_struct *work)
+@@ -766,6 +810,96 @@ static void dwc3_core_setup_global_contr
static int dwc3_core_get_phy(struct dwc3 *dwc);
static int dwc3_core_ulpi_init(struct dwc3 *dwc);
/**
* dwc3_core_init - Low-level initialization of DWC3 Core
* @dwc: Pointer to our controller context structure
-@@ -828,6 +918,8 @@ static int dwc3_core_init(struct dwc3 *d
+@@ -828,6 +962,8 @@ static int dwc3_core_init(struct dwc3 *d
/* Adjust Frame Length */
dwc3_frame_length_adjustment(dwc);
usb_phy_set_suspend(dwc->usb2_phy, 0);
usb_phy_set_suspend(dwc->usb3_phy, 0);
ret = phy_power_on(dwc->usb2_generic_phy);
-@@ -1074,6 +1166,8 @@ static void dwc3_get_properties(struct d
+@@ -871,6 +1007,22 @@ static int dwc3_core_init(struct dwc3 *d
+ dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
+ }
+
++ if (dwc->dr_mode == USB_DR_MODE_HOST ||
++ dwc->dr_mode == USB_DR_MODE_OTG) {
++ reg = dwc3_readl(dwc->regs, DWC3_GUCTL);
++
++ /*
++ * Enable Auto retry Feature to make the controller operating in
++ * Host mode on seeing transaction errors(CRC errors or internal
++ * overrun scenerios) on IN transfers to reply to the device
++ * with a non-terminating retry ACK (i.e, an ACK transcation
++ * packet with Retry=1 & Nump != 0)
++ */
++ reg |= DWC3_GUCTL_HSTINAUTORETRY;
++
++ dwc3_writel(dwc->regs, DWC3_GUCTL, reg);
++ }
++
+ return 0;
+
+ err4:
+@@ -1074,6 +1226,8 @@ static void dwc3_get_properties(struct d
&hird_threshold);
dwc->usb3_lpm_capable = device_property_read_bool(dev,
"snps,usb3_lpm_capable");
dwc->disable_scramble_quirk = device_property_read_bool(dev,
"snps,disable_scramble_quirk");
-@@ -1106,8 +1200,16 @@ static void dwc3_get_properties(struct d
+@@ -1106,8 +1260,16 @@ static void dwc3_get_properties(struct d
dwc->dis_tx_ipgap_linecheck_quirk = device_property_read_bool(dev,
"snps,dis-tx-ipgap-linecheck-quirk");
device_property_read_u8(dev, "snps,tx_de_emphasis",
&tx_de_emphasis);
device_property_read_string(dev, "snps,hsphy_interface",
-@@ -1365,12 +1467,14 @@ static int dwc3_resume_common(struct dwc
+@@ -1115,6 +1277,9 @@ static void dwc3_get_properties(struct d
+ device_property_read_u32(dev, "snps,quirk-frame-length-adjustment",
+ &dwc->fladj);
+
++ dwc->host_vbus_glitches = device_property_read_bool(dev,
++ "snps,host-vbus-glitches");
++
+ dwc->lpm_nyet_threshold = lpm_nyet_threshold;
+ dwc->tx_de_emphasis = tx_de_emphasis;
+
+@@ -1365,12 +1530,14 @@ static int dwc3_resume_common(struct dwc
switch (dwc->dr_mode) {
case USB_DR_MODE_PERIPHERAL:
/* Global Debug Queue/FIFO Space Available Register */
#define DWC3_GDBGFIFOSPACE_NUM(n) ((n) & 0x1f)
#define DWC3_GDBGFIFOSPACE_TYPE(n) (((n) << 5) & 0x1e0)
-@@ -788,6 +814,7 @@ struct dwc3_scratchpad_array {
+@@ -205,6 +231,9 @@
+ #define DWC3_GCTL_GBLHIBERNATIONEN BIT(1)
+ #define DWC3_GCTL_DSBLCLKGTNG BIT(0)
+
++/* Global User Control Register */
++#define DWC3_GUCTL_HSTINAUTORETRY BIT(14)
++
+ /* Global User Control 1 Register */
+ #define DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS BIT(28)
+ #define DWC3_GUCTL1_DEV_L1_EXIT_BY_HW BIT(24)
+@@ -477,6 +506,14 @@
+ #define DWC3_DEV_IMOD_INTERVAL_SHIFT 0
+ #define DWC3_DEV_IMOD_INTERVAL_MASK (0xffff << 0)
+
++/* Partial XHCI Register and Bit fields for quirk */
++#define XHCI_HCSPARAMS1 0x4
++#define XHCI_PORTSC_BASE 0x400
++#define PORT_POWER (1 << 9)
++#define HCS_MAX_PORTS(p) (((p) >> 24) & 0x7f)
++#define XHCI_HC_LENGTH(p) (((p)>>00)&0x00ff)
++#define HC_LENGTH(p) XHCI_HC_LENGTH(p)
++
+ /* Structures */
+
+ struct dwc3_trb;
+@@ -788,6 +825,7 @@ struct dwc3_scratchpad_array {
* @regs: base address for our registers
* @regs_size: address space size
* @fladj: frame length adjustment
* @irq_gadget: peripheral controller's IRQ number
* @nr_scratch: number of scratch buffers
* @u1u2: only used on revisions <1.83a for workaround
-@@ -843,6 +870,7 @@ struct dwc3_scratchpad_array {
+@@ -843,6 +881,7 @@ struct dwc3_scratchpad_array {
* @setup_packet_pending: true when there's a Setup Packet in FIFO. Workaround
* @three_stage_setup: set if we perform a three phase setup
* @usb3_lpm_capable: set if hadrware supports Link Power Management
* @disable_scramble_quirk: set if we enable the disable scramble quirk
* @u2exit_lfps_quirk: set if we enable u2exit lfps quirk
* @u2ss_inp3_quirk: set if we enable P3 OK for U2/SS Inactive quirk
-@@ -869,6 +897,11 @@ struct dwc3_scratchpad_array {
+@@ -869,6 +908,13 @@ struct dwc3_scratchpad_array {
* 1 - -3.5dB de-emphasis
* 2 - No de-emphasis
* 3 - Reserved
+ * @quirk_stop_transfer_in_block: prevent block transmission from being
+ * interrupted
+ * @quirk_stop_ep_in_u1: replace stop commad with disable slot command
++ * @host-vbus-glitches: set to avoid vbus glitch during
++ * xhci reset.
* @imod_interval: set the interrupt moderation interval in 250ns
* increments or 0 to disable.
*/
-@@ -921,6 +954,12 @@ struct dwc3 {
+@@ -921,6 +967,12 @@ struct dwc3 {
enum usb_phy_interface hsphy_mode;
u32 fladj;
u32 irq_gadget;
u32 nr_scratch;
u32 u1u2;
-@@ -1005,6 +1044,7 @@ struct dwc3 {
+@@ -1005,6 +1057,7 @@ struct dwc3 {
unsigned setup_packet_pending:1;
unsigned three_stage_setup:1;
unsigned usb3_lpm_capable:1;
unsigned disable_scramble_quirk:1;
unsigned u2exit_lfps_quirk:1;
-@@ -1024,6 +1064,10 @@ struct dwc3 {
+@@ -1024,6 +1077,11 @@ struct dwc3 {
unsigned tx_de_emphasis_quirk:1;
unsigned tx_de_emphasis:2;
+ unsigned quirk_reverse_in_out:1;
+ unsigned quirk_stop_transfer_in_block:1;
+ unsigned quirk_stop_ep_in_u1:1;
++ unsigned host_vbus_glitches:1;
u16 imod_interval;
};
}
done:
spin_unlock_irqrestore(&xhci->lock, flags);
-@@ -4988,7 +5013,7 @@ int xhci_gen_setup(struct usb_hcd *hcd,
- return retval;
- xhci_dbg(xhci, "Called HCD init\n");
-
-- xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n",
-+ xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%llx\n",
- xhci->hcc_params, xhci->hci_version, xhci->quirks);
-
- return 0;
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1794,7 +1794,7 @@ struct xhci_hcd {
#define XHCI_STATE_HALTED (1 << 1)
#define XHCI_STATE_REMOVING (1 << 2)
- unsigned long long quirks;
-+ u64 quirks;
++ unsigned long long quirks;
#define XHCI_LINK_TRB_QUIRK BIT_ULL(0)
#define XHCI_RESET_EP_QUIRK BIT_ULL(1)
#define XHCI_NEC_HOST BIT_ULL(2)
/* For controller with a broken Port Disable implementation */
#define XHCI_BROKEN_PORT_PED BIT_ULL(25)
#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 BIT_ULL(26)
-@@ -1840,6 +1843,7 @@ struct xhci_hcd {
+@@ -1838,8 +1841,9 @@ struct xhci_hcd {
+ #define XHCI_HW_LPM_DISABLE BIT_ULL(29)
+ #define XHCI_SUSPEND_DELAY BIT_ULL(30)
#define XHCI_INTEL_USB_ROLE_SW BIT_ULL(31)
- #define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34)
- #define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35)
-+#define XHCI_DIS_U1U2_WHEN_U3 BIT(36)
+-#define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34)
+-#define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35)
++#define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(35)
++#define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(36)
++#define XHCI_DIS_U1U2_WHEN_U3 BIT(37)
unsigned int num_active_eps;
unsigned int limit_active_eps;
-From 92f0ef51270b2961f63b2e985831f5e9a6251a2f Mon Sep 17 00:00:00 2001
+From 03ce521cd071706f755e3d2304ab1b8c47fd4910 Mon Sep 17 00:00:00 2001
From: Biwen Li <biwen.li@nxp.com>
-Date: Tue, 30 Oct 2018 18:29:03 +0800
-Subject: [PATCH 25/40] vfio: support layerscape
+Date: Wed, 17 Apr 2019 18:59:09 +0800
+Subject: [PATCH] vfio: support layerscape
+
This is an integrated patch of vfio for layerscape
Signed-off-by: Bharat Bhushan <Bharat.Bhushan@nxp.com>
Signed-off-by: Biwen Li <biwen.li@nxp.com>
+Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
---
drivers/vfio/Kconfig | 1 +
drivers/vfio/Makefile | 1 +
drivers/vfio/fsl-mc/Kconfig | 9 +
drivers/vfio/fsl-mc/Makefile | 2 +
- drivers/vfio/fsl-mc/vfio_fsl_mc.c | 751 ++++++++++++++++++++++
+ drivers/vfio/fsl-mc/vfio_fsl_mc.c | 759 ++++++++++++++++++++++
drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c | 199 ++++++
- drivers/vfio/fsl-mc/vfio_fsl_mc_private.h | 55 ++
+ drivers/vfio/fsl-mc/vfio_fsl_mc_private.h | 57 ++
include/uapi/linux/vfio.h | 1 +
- 8 files changed, 1019 insertions(+)
+ 8 files changed, 1029 insertions(+)
create mode 100644 drivers/vfio/fsl-mc/Kconfig
create mode 100644 drivers/vfio/fsl-mc/Makefile
create mode 100644 drivers/vfio/fsl-mc/vfio_fsl_mc.c
+obj-$(CONFIG_VFIO_FSL_MC) += vfio_fsl_mc.o vfio_fsl_mc_intr.o
--- /dev/null
+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
-@@ -0,0 +1,751 @@
+@@ -0,0 +1,759 @@
+/*
+ * Freescale Management Complex (MC) device passthrough using VFIO
+ *
+ if (mc_dev->regions[i].flags & IORESOURCE_CACHEABLE)
+ vdev->regions[i].type |=
+ VFIO_FSL_MC_REGION_TYPE_CACHEABLE;
++ if (mc_dev->regions[i].flags & IORESOURCE_MEM)
++ vdev->regions[i].type |=
++ VFIO_FSL_MC_REGION_TYPE_SHAREABLE;
++
+ vdev->regions[i].flags = VFIO_REGION_INFO_FLAG_MMAP;
+ vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
+ if (!(mc_dev->regions[i].flags & IORESOURCE_READONLY))
+ u64 header;
+ struct mc_cmd_header *resp_hdr;
+
-+ __iormb();
-+ header = readq(ioaddr);
-+ __iormb();
++ header = cpu_to_le64(readq_relaxed(ioaddr));
+
+ resp_hdr = (struct mc_cmd_header *)&header;
+ status = (enum mc_cmd_status)resp_hdr->status;
+{
+ int i;
+
-+ /* Write at command header in the end */
-+ for (i = 7; i >= 0; i--)
-+ writeq(cmd_data[i], ioaddr + i * sizeof(uint64_t));
++ /* Write at command parameter into portal */
++ for (i = 7; i >= 1; i--)
++ writeq_relaxed(cmd_data[i], ioaddr + i * sizeof(uint64_t));
++
++ /* Write command header in the end */
++ writeq(cmd_data[0], ioaddr);
+
+ /* Wait for response before returning to user-space
+ * This can be optimized in future to even prepare response
+ * cache inhibited area of the portal to avoid coherency issues
+ * if a user migrates to another core.
+ */
-+ if (region.type & VFIO_FSL_MC_REGION_TYPE_CACHEABLE)
-+ vma->vm_page_prot = pgprot_cached_ns(vma->vm_page_prot);
-+ else
++ if (region.type & VFIO_FSL_MC_REGION_TYPE_CACHEABLE) {
++ if (region.type & VFIO_FSL_MC_REGION_TYPE_SHAREABLE)
++ vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
++ else
++ vma->vm_page_prot = pgprot_cached_ns(vma->vm_page_prot);
++ } else
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
+}
--- /dev/null
+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
-@@ -0,0 +1,55 @@
+@@ -0,0 +1,57 @@
+/*
+ * Freescale Management Complex VFIO private declarations
+ *
+ u32 flags;
+#define VFIO_FSL_MC_REGION_TYPE_MMIO 1
+#define VFIO_FSL_MC_REGION_TYPE_CACHEABLE 2
++#define VFIO_FSL_MC_REGION_TYPE_SHAREABLE 4
++
+ u32 type;
+ u64 addr;
+ resource_size_t size;
-From d94f8863307c0f7fb7aeb2084cc666c47991d78b Mon Sep 17 00:00:00 2001
+From a00c035c7b82f51716a1a30637b1bd276dee3c5a Mon Sep 17 00:00:00 2001
From: Biwen Li <biwen.li@nxp.com>
-Date: Mon, 19 Nov 2018 10:26:57 +0800
+Date: Wed, 17 Apr 2019 18:58:17 +0800
Subject: [PATCH] clock: support layerscape
+
This is an integrated patch of clock for layerscape
-Signed-off-by: Tang Yuantian <andy.tang@nxp.com>
Signed-off-by: Biwen Li <biwen.li@nxp.com>
+Signed-off-by: Tang Yuantian <andy.tang@nxp.com>
+Signed-off-by: Vabhav Sharma <vabhav.sharma@nxp.com>
+Signed-off-by: Yogesh Gaur <yogeshnarayan.gaur@nxp.com>
---
- drivers/clk/clk-qoriq.c | 9 ++++++++-
- 1 file changed, 8 insertions(+), 1 deletion(-)
+ drivers/clk/clk-qoriq.c | 25 ++++++++++++++++++++++---
+ drivers/cpufreq/qoriq-cpufreq.c | 1 +
+ 2 files changed, 23 insertions(+), 3 deletions(-)
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
};
#define CLKSEL_VALID 1
-@@ -1127,6 +1127,13 @@ static void __init create_one_pll(struct
+@@ -79,7 +79,7 @@ struct clockgen_chipinfo {
+ const struct clockgen_muxinfo *cmux_groups[2];
+ const struct clockgen_muxinfo *hwaccel[NUM_HWACCEL];
+ void (*init_periph)(struct clockgen *cg);
+- int cmux_to_group[NUM_CMUX]; /* -1 terminates if fewer than NUM_CMUX */
++ int cmux_to_group[NUM_CMUX+1]; /* array should be -1 terminated */
+ u32 pll_mask; /* 1 << n bit set if PLL n is valid */
+ u32 flags; /* CG_xxx */
+ };
+@@ -570,6 +570,17 @@ static const struct clockgen_chipinfo ch
+ .flags = CG_VER3 | CG_LITTLE_ENDIAN,
+ },
+ {
++ .compat = "fsl,lx2160a-clockgen",
++ .cmux_groups = {
++ &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
++ },
++ .cmux_to_group = {
++ 0, 0, 0, 0, 1, 1, 1, 1, -1
++ },
++ .pll_mask = 0x37,
++ .flags = CG_VER3 | CG_LITTLE_ENDIAN,
++ },
++ {
+ .compat = "fsl,p2041-clockgen",
+ .guts_compat = "fsl,qoriq-device-config-1.0",
+ .init_periph = p2041_init_periph,
+@@ -601,7 +612,7 @@ static const struct clockgen_chipinfo ch
+ &p4080_cmux_grp1, &p4080_cmux_grp2
+ },
+ .cmux_to_group = {
+- 0, 0, 0, 0, 1, 1, 1, 1
++ 0, 0, 0, 0, 1, 1, 1, 1, -1
+ },
+ .pll_mask = 0x1f,
+ },
+@@ -1127,6 +1138,13 @@ static void __init create_one_pll(struct
struct clk *clk;
int ret;
snprintf(pll->div[i].name, sizeof(pll->div[i].name),
"cg-pll%d-div%d", idx, i + 1);
+@@ -1417,6 +1435,7 @@ CLK_OF_DECLARE(qoriq_clockgen_ls1043a, "
+ CLK_OF_DECLARE(qoriq_clockgen_ls1046a, "fsl,ls1046a-clockgen", clockgen_init);
+ CLK_OF_DECLARE(qoriq_clockgen_ls1088a, "fsl,ls1088a-clockgen", clockgen_init);
+ CLK_OF_DECLARE(qoriq_clockgen_ls2080a, "fsl,ls2080a-clockgen", clockgen_init);
++CLK_OF_DECLARE(qoriq_clockgen_lx2160a, "fsl,lx2160a-clockgen", clockgen_init);
+
+ /* Legacy nodes */
+ CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init);
+--- a/drivers/cpufreq/qoriq-cpufreq.c
++++ b/drivers/cpufreq/qoriq-cpufreq.c
+@@ -320,6 +320,7 @@ static const struct of_device_id node_ma
+ { .compatible = "fsl,ls1046a-clockgen", },
+ { .compatible = "fsl,ls1088a-clockgen", },
+ { .compatible = "fsl,ls2080a-clockgen", },
++ { .compatible = "fsl,lx2160a-clockgen", },
+ { .compatible = "fsl,p4080-clockgen", },
+ { .compatible = "fsl,qoriq-clockgen-1.0", },
+ { .compatible = "fsl,qoriq-clockgen-2.0", },
-From 31d0f8f19246c9a2fbecb5ca0a03ef6bb70eee2d Mon Sep 17 00:00:00 2001
+From 9875df1e872eb2b0f9d2d72c9a761a5f03400d9f Mon Sep 17 00:00:00 2001
From: Biwen Li <biwen.li@nxp.com>
-Date: Fri, 9 Nov 2018 19:32:53 +0800
+Date: Fri, 19 Apr 2019 13:23:01 +0800
Subject: [PATCH] flexspi: support layerscape
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
This is an integrated patch of flexspi for layerscape
-Signed-off-by: Alistair Strachan <astrachan@google.com>
-Signed-off-by: Ariel Elior <Ariel.Elior@cavium.com>
-Signed-off-by: Brandon Maier <brandon.maier@rockwellcollins.com>
-Signed-off-by: David S. Miller <davem@davemloft.net>
-Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Signed-off-by: Joe Thornber <ejt@redhat.com>
-Signed-off-by: Kirill Kapranov <kirill.kapranov@compulab.co.il>
-Signed-off-by: Liam Girdwood <liam.r.girdwood@linux.intel.com>
-Signed-off-by: Marcel Ziswiler <marcel.ziswiler@toradex.com>
-Signed-off-by: Mark Brown <broonie@kernel.org>
-Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-Signed-off-by: Peng Li <lipeng321@huawei.com>
-Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
-Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Signed-off-by: Tomer Tayar <Tomer.Tayar@cavium.com>
-Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Signed-off-by: Ashish Kumar <Ashish.Kumar@nxp.com>
Signed-off-by: Biwen Li <biwen.li@nxp.com>
+Signed-off-by: Rajat Srivastava <rajat.srivastava@nxp.com>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+Signed-off-by: Yogesh Gaur <yogeshnarayan.gaur@nxp.com>
---
- drivers/md/dm-thin.c | 13 +++++++++++++
- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 7 +++++++
- drivers/net/phy/xilinx_gmii2rgmii.c | 5 +++++
- drivers/spi/spi-tegra20-slink.c | 18 ++++++++++++++++++
- drivers/staging/android/ashmem.c | 6 ++++++
- drivers/tty/serial/imx.c | 8 ++++++++
- kernel/events/core.c | 6 ++++++
- sound/soc/soc-dapm.c | 7 +++++++
- 8 files changed, 70 insertions(+)
+ .../devicetree/bindings/mtd/nxp-flexspi.txt | 41 +
+ drivers/mtd/spi-nor/Kconfig | 10 +
+ drivers/mtd/spi-nor/Makefile | 1 +
+ drivers/mtd/spi-nor/nxp-flexspi.c | 1404 +++++++++++++++++
+ drivers/mtd/spi-nor/spi-nor.c | 13 +-
+ include/linux/mtd/cfi.h | 1 +
+ include/linux/mtd/spi-nor.h | 3 +-
+ 7 files changed, 1470 insertions(+), 3 deletions(-)
+ create mode 100644 Documentation/devicetree/bindings/mtd/nxp-flexspi.txt
+ create mode 100644 drivers/mtd/spi-nor/nxp-flexspi.c
---- a/drivers/md/dm-thin.c
-+++ b/drivers/md/dm-thin.c
-@@ -3697,6 +3697,19 @@ static int process_create_thin_mesg(unsi
- return r;
- }
+--- /dev/null
++++ b/Documentation/devicetree/bindings/mtd/nxp-flexspi.txt
+@@ -0,0 +1,41 @@
++* NXP Flex Serial Peripheral Interface(FlexSPI)
++
++Required properties:
++ - compatible : Should be "nxp,lx2160a-fspi"
++ - reg : the first contains the register location and length,
++ the second contains the memory mapping address and length
++ - reg-names: Should contain the reg names "FSPI" and "FSPI-memory"
++ - interrupts : Should contain the interrupt for the device
++ - clocks : The clocks needed by the FlexSPI controller
++ - clock-names : Should contain the name of the clocks: "fspi_en" and "fspi"
++
++Optional properties:
++ - nxp,fspi-has-second-chip: The controller has two buses, bus A and bus B.
++ Each bus can be connected with two NOR flashes.
++ Most of the time, each bus only has one NOR flash
++ connected, this is the default case.
++ But if there are two NOR flashes connected to the
++ bus, you should enable this property.
++ (Please check the board's schematic.)
++Example:
++fspi0: flexspi@20c0000 {
++ compatible = "nxp,lx2160a-fspi";
++ reg = <0x0 0x20c0000 0x0 0x10000>, <0x0 0x20000000 0x0 0x10000000>;
++ reg-names = "FSPI", "FSPI-memory";
++ interrupts = <0 25 0x4>; /* Level high type */
++ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
++ clock-names = "fspi_en", "fspi";
++
++ status = "okay";
++ nxp,fspi-has-second-chip;
++ flash0: mt35xu512aba@0 {
++ reg = <0>;
++ ....
++ };
++
++ flash1: mt35xu512aba@1 {
++ reg = <1>;
++ ....
++ };
++
++};
+--- a/drivers/mtd/spi-nor/Kconfig
++++ b/drivers/mtd/spi-nor/Kconfig
+@@ -97,6 +97,16 @@ config SPI_NXP_SPIFI
+ Flash. Enable this option if you have a device with a SPIFI
+ controller and want to access the Flash as a mtd device.
-+ r = dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks);
-+ if (r) {
-+ metadata_operation_failed(pool, "dm_pool_get_free_metadata_block_count", r);
-+ return r;
++config SPI_NXP_FLEXSPI
++ tristate "NXP Flex SPI controller"
++ help
++ This enables support for the Flex SPI controller in master mode.
++ Up to four slave devices can be connected on two buses with two
++ chipselects each.
++ This controller does not support generic SPI messages and only
++ supports the high-level SPI memory interface using SPI-NOR
++ framework.
++
+ config SPI_INTEL_SPI
+ tristate
+
+--- a/drivers/mtd/spi-nor/Makefile
++++ b/drivers/mtd/spi-nor/Makefile
+@@ -7,6 +7,7 @@ obj-$(CONFIG_SPI_FSL_QUADSPI) += fsl-qua
+ obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o
+ obj-$(CONFIG_MTD_MT81xx_NOR) += mtk-quadspi.o
+ obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o
++obj-$(CONFIG_SPI_NXP_FLEXSPI) += nxp-flexspi.o
+ obj-$(CONFIG_SPI_INTEL_SPI) += intel-spi.o
+ obj-$(CONFIG_SPI_INTEL_SPI_PCI) += intel-spi-pci.o
+ obj-$(CONFIG_SPI_INTEL_SPI_PLATFORM) += intel-spi-platform.o
+--- /dev/null
++++ b/drivers/mtd/spi-nor/nxp-flexspi.c
+@@ -0,0 +1,1404 @@
++/*
++ * NXP FSPI(FlexSPI controller) driver.
++ *
++ * Copyright 2018 NXP
++ * Author: Yogesh Gaur <yogeshnarayan.gaur@nxp.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ */
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/errno.h>
++#include <linux/platform_device.h>
++#include <linux/sched.h>
++#include <linux/delay.h>
++#include <linux/io.h>
++#include <linux/clk.h>
++#include <linux/err.h>
++#include <linux/of.h>
++#include <linux/of_device.h>
++#include <linux/timer.h>
++#include <linux/jiffies.h>
++#include <linux/completion.h>
++#include <linux/mtd/mtd.h>
++#include <linux/mtd/partitions.h>
++#include <linux/mtd/spi-nor.h>
++#include <linux/mutex.h>
++#include <linux/pm_qos.h>
++#include <linux/pci.h>
++
++/* The registers */
++#define FSPI_MCR0 0x00
++#define FSPI_MCR0_AHB_TIMEOUT_SHIFT 24
++#define FSPI_MCR0_AHB_TIMEOUT_MASK (0xFF << FSPI_MCR0_AHB_TIMEOUT_SHIFT)
++#define FSPI_MCR0_IP_TIMEOUT_SHIFT 16
++#define FSPI_MCR0_IP_TIMEOUT_MASK (0xFF << FSPI_MCR0_IP_TIMEOUT_SHIFT)
++#define FSPI_MCR0_LEARN_EN_SHIFT 15
++#define FSPI_MCR0_LEARN_EN_MASK (1 << FSPI_MCR0_LEARN_EN_SHIFT)
++#define FSPI_MCR0_SCRFRUN_EN_SHIFT 14
++#define FSPI_MCR0_SCRFRUN_EN_MASK (1 << FSPI_MCR0_SCRFRUN_EN_SHIFT)
++#define FSPI_MCR0_OCTCOMB_EN_SHIFT 13
++#define FSPI_MCR0_OCTCOMB_EN_MASK (1 << FSPI_MCR0_OCTCOMB_EN_SHIFT)
++#define FSPI_MCR0_DOZE_EN_SHIFT 12
++#define FSPI_MCR0_DOZE_EN_MASK (1 << FSPI_MCR0_DOZE_EN_SHIFT)
++#define FSPI_MCR0_HSEN_SHIFT 11
++#define FSPI_MCR0_HSEN_MASK (1 << FSPI_MCR0_HSEN_SHIFT)
++#define FSPI_MCR0_SERCLKDIV_SHIFT 8
++#define FSPI_MCR0_SERCLKDIV_MASK (7 << FSPI_MCR0_SERCLKDIV_SHIFT)
++#define FSPI_MCR0_ATDF_EN_SHIFT 7
++#define FSPI_MCR0_ATDF_EN_MASK (1 << FSPI_MCR0_ATDF_EN_SHIFT)
++#define FSPI_MCR0_ARDF_EN_SHIFT 6
++#define FSPI_MCR0_ARDF_EN_MASK (1 << FSPI_MCR0_ARDF_EN_SHIFT)
++#define FSPI_MCR0_RXCLKSRC_SHIFT 4
++#define FSPI_MCR0_RXCLKSRC_MASK (3 << FSPI_MCR0_RXCLKSRC_SHIFT)
++#define FSPI_MCR0_END_CFG_SHIFT 2
++#define FSPI_MCR0_END_CFG_MASK (3 << FSPI_MCR0_END_CFG_SHIFT)
++#define FSPI_MCR0_MDIS_SHIFT 1
++#define FSPI_MCR0_MDIS_MASK (1 << FSPI_MCR0_MDIS_SHIFT)
++#define FSPI_MCR0_SWRST_SHIFT 0
++#define FSPI_MCR0_SWRST_MASK (1 << FSPI_MCR0_SWRST_SHIFT)
++
++#define FSPI_MCR1 0x04
++#define FSPI_MCR1_SEQ_TIMEOUT_SHIFT 16
++#define FSPI_MCR1_SEQ_TIMEOUT_MASK \
++ (0xFFFF << FSPI_MCR1_SEQ_TIMEOUT_SHIFT)
++#define FSPI_MCR1_AHB_TIMEOUT_SHIFT 0
++#define FSPI_MCR1_AHB_TIMEOUT_MASK \
++ (0xFFFF << FSPI_MCR1_AHB_TIMEOUT_SHIFT)
++
++#define FSPI_MCR2 0x08
++#define FSPI_MCR2_IDLE_WAIT_SHIFT 24
++#define FSPI_MCR2_IDLE_WAIT_MASK (0xFF << FSPI_MCR2_IDLE_WAIT_SHIFT)
++#define FSPI_MCR2_SAMEFLASH_SHIFT 15
++#define FSPI_MCR2_SAMEFLASH_MASK (1 << FSPI_MCR2_SAMEFLASH_SHIFT)
++#define FSPI_MCR2_CLRLRPHS_SHIFT 14
++#define FSPI_MCR2_CLRLRPHS_MASK (1 << FSPI_MCR2_CLRLRPHS_SHIFT)
++#define FSPI_MCR2_ABRDATSZ_SHIFT 8
++#define FSPI_MCR2_ABRDATSZ_MASK (1 << FSPI_MCR2_ABRDATSZ_SHIFT)
++#define FSPI_MCR2_ABRLEARN_SHIFT 7
++#define FSPI_MCR2_ABRLEARN_MASK (1 << FSPI_MCR2_ABRLEARN_SHIFT)
++#define FSPI_MCR2_ABR_READ_SHIFT 6
++#define FSPI_MCR2_ABR_READ_MASK (1 << FSPI_MCR2_ABR_READ_SHIFT)
++#define FSPI_MCR2_ABRWRITE_SHIFT 5
++#define FSPI_MCR2_ABRWRITE_MASK (1 << FSPI_MCR2_ABRWRITE_SHIFT)
++#define FSPI_MCR2_ABRDUMMY_SHIFT 4
++#define FSPI_MCR2_ABRDUMMY_MASK (1 << FSPI_MCR2_ABRDUMMY_SHIFT)
++#define FSPI_MCR2_ABR_MODE_SHIFT 3
++#define FSPI_MCR2_ABR_MODE_MASK (1 << FSPI_MCR2_ABR_MODE_SHIFT)
++#define FSPI_MCR2_ABRCADDR_SHIFT 2
++#define FSPI_MCR2_ABRCADDR_MASK (1 << FSPI_MCR2_ABRCADDR_SHIFT)
++#define FSPI_MCR2_ABRRADDR_SHIFT 1
++#define FSPI_MCR2_ABRRADDR_MASK (1 << FSPI_MCR2_ABRRADDR_SHIFT)
++#define FSPI_MCR2_ABR_CMD_SHIFT 0
++#define FSPI_MCR2_ABR_CMD_MASK (1 << FSPI_MCR2_ABR_CMD_SHIFT)
++
++#define FSPI_AHBCR 0x0c
++#define FSPI_AHBCR_RDADDROPT_SHIFT 6
++#define FSPI_AHBCR_RDADDROPT_MASK (1 << FSPI_AHBCR_RDADDROPT_SHIFT)
++#define FSPI_AHBCR_PREF_EN_SHIFT 5
++#define FSPI_AHBCR_PREF_EN_MASK (1 << FSPI_AHBCR_PREF_EN_SHIFT)
++#define FSPI_AHBCR_BUFF_EN_SHIFT 4
++#define FSPI_AHBCR_BUFF_EN_MASK (1 << FSPI_AHBCR_BUFF_EN_SHIFT)
++#define FSPI_AHBCR_CACH_EN_SHIFT 3
++#define FSPI_AHBCR_CACH_EN_MASK (1 << FSPI_AHBCR_CACH_EN_SHIFT)
++#define FSPI_AHBCR_CLRTXBUF_SHIFT 2
++#define FSPI_AHBCR_CLRTXBUF_MASK (1 << FSPI_AHBCR_CLRTXBUF_SHIFT)
++#define FSPI_AHBCR_CLRRXBUF_SHIFT 1
++#define FSPI_AHBCR_CLRRXBUF_MASK (1 << FSPI_AHBCR_CLRRXBUF_SHIFT)
++#define FSPI_AHBCR_PAR_EN_SHIFT 0
++#define FSPI_AHBCR_PAR_EN_MASK (1 << FSPI_AHBCR_PAR_EN_SHIFT)
++
++#define FSPI_INTEN 0x10
++#define FSPI_INTEN_SCLKSBWR_SHIFT 9
++#define FSPI_INTEN_SCLKSBWR_MASK (1 << FSPI_INTEN_SCLKSBWR_SHIFT)
++#define FSPI_INTEN_SCLKSBRD_SHIFT 8
++#define FSPI_INTEN_SCLKSBRD_MASK (1 << FSPI_INTEN_SCLKSBRD_SHIFT)
++#define FSPI_INTEN_DATALRNFL_SHIFT 7
++#define FSPI_INTEN_DATALRNFL_MASK (1 << FSPI_INTEN_DATALRNFL_SHIFT)
++#define FSPI_INTEN_IPTXWE_SHIFT 6
++#define FSPI_INTEN_IPTXWE_MASK (1 << FSPI_INTEN_IPTXWE_SHIFT)
++#define FSPI_INTEN_IPRXWA_SHIFT 5
++#define FSPI_INTEN_IPRXWA_MASK (1 << FSPI_INTEN_IPRXWA_SHIFT)
++#define FSPI_INTEN_AHBCMDERR_SHIFT 4
++#define FSPI_INTEN_AHBCMDERR_MASK (1 << FSPI_INTEN_AHBCMDERR_SHIFT)
++#define FSPI_INTEN_IPCMDERR_SHIFT 3
++#define FSPI_INTEN_IPCMDERR_MASK (1 << FSPI_INTEN_IPCMDERR_SHIFT)
++#define FSPI_INTEN_AHBCMDGE_SHIFT 2
++#define FSPI_INTEN_AHBCMDGE_MASK (1 << FSPI_INTEN_AHBCMDGE_SHIFT)
++#define FSPI_INTEN_IPCMDGE_SHIFT 1
++#define FSPI_INTEN_IPCMDGE_MASK (1 << FSPI_INTEN_IPCMDGE_SHIFT)
++#define FSPI_INTEN_IPCMDDONE_SHIFT 0
++#define FSPI_INTEN_IPCMDDONE_MASK (1 << FSPI_INTEN_IPCMDDONE_SHIFT)
++
++#define FSPI_INTR 0x14
++#define FSPI_INTR_SCLKSBWR_SHIFT 9
++#define FSPI_INTR_SCLKSBWR_MASK (1 << FSPI_INTR_SCLKSBWR_SHIFT)
++#define FSPI_INTR_SCLKSBRD_SHIFT 8
++#define FSPI_INTR_SCLKSBRD_MASK (1 << FSPI_INTR_SCLKSBRD_SHIFT)
++#define FSPI_INTR_DATALRNFL_SHIFT 7
++#define FSPI_INTR_DATALRNFL_MASK (1 << FSPI_INTR_DATALRNFL_SHIFT)
++#define FSPI_INTR_IPTXWE_SHIFT 6
++#define FSPI_INTR_IPTXWE_MASK (1 << FSPI_INTR_IPTXWE_SHIFT)
++#define FSPI_INTR_IPRXWA_SHIFT 5
++#define FSPI_INTR_IPRXWA_MASK (1 << FSPI_INTR_IPRXWA_SHIFT)
++#define FSPI_INTR_AHBCMDERR_SHIFT 4
++#define FSPI_INTR_AHBCMDERR_MASK (1 << FSPI_INTR_AHBCMDERR_SHIFT)
++#define FSPI_INTR_IPCMDERR_SHIFT 3
++#define FSPI_INTR_IPCMDERR_MASK (1 << FSPI_INTR_IPCMDERR_SHIFT)
++#define FSPI_INTR_AHBCMDGE_SHIFT 2
++#define FSPI_INTR_AHBCMDGE_MASK (1 << FSPI_INTR_AHBCMDGE_SHIFT)
++#define FSPI_INTR_IPCMDGE_SHIFT 1
++#define FSPI_INTR_IPCMDGE_MASK (1 << FSPI_INTR_IPCMDGE_SHIFT)
++#define FSPI_INTR_IPCMDDONE_SHIFT 0
++#define FSPI_INTR_IPCMDDONE_MASK (1 << FSPI_INTR_IPCMDDONE_SHIFT)
++
++#define FSPI_LUTKEY 0x18
++#define FSPI_LUTKEY_VALUE 0x5AF05AF0
++
++#define FSPI_LCKCR 0x1C
++#define FSPI_LCKER_LOCK 0x1
++#define FSPI_LCKER_UNLOCK 0x2
++
++#define FSPI_BUFXCR_INVALID_MSTRID 0xe
++#define FSPI_AHBRX_BUF0CR0 0x20
++#define FSPI_AHBRX_BUF1CR0 0x24
++#define FSPI_AHBRX_BUF2CR0 0x28
++#define FSPI_AHBRX_BUF3CR0 0x2C
++#define FSPI_AHBRX_BUF4CR0 0x30
++#define FSPI_AHBRX_BUF5CR0 0x34
++#define FSPI_AHBRX_BUF6CR0 0x38
++#define FSPI_AHBRX_BUF7CR0 0x3C
++#define FSPI_AHBRXBUF0CR7_PREF_SHIFT 31
++#define FSPI_AHBRXBUF0CR7_PREF_MASK (1 << FSPI_AHBRXBUF0CR7_PREF_SHIFT)
++
++#define FSPI_AHBRX_BUF0CR1 0x40
++#define FSPI_AHBRX_BUF1CR1 0x44
++#define FSPI_AHBRX_BUF2CR1 0x48
++#define FSPI_AHBRX_BUF3CR1 0x4C
++#define FSPI_AHBRX_BUF4CR1 0x50
++#define FSPI_AHBRX_BUF5CR1 0x54
++#define FSPI_AHBRX_BUF6CR1 0x58
++#define FSPI_AHBRX_BUF7CR1 0x5C
++#define FSPI_BUFXCR1_MSID_SHIFT 0
++#define FSPI_BUFXCR1_MSID_MASK (0xF << FSPI_BUFXCR1_MSID_SHIFT)
++#define FSPI_BUFXCR1_PRIO_SHIFT 8
++#define FSPI_BUFXCR1_PRIO_MASK (0x7 << FSPI_BUFXCR1_PRIO_SHIFT)
++
++#define FSPI_FLSHA1CR0 0x60
++#define FSPI_FLSHA2CR0 0x64
++#define FSPI_FLSHB1CR0 0x68
++#define FSPI_FLSHB2CR0 0x6C
++#define FSPI_FLSHXCR0_SZ_SHIFT 10
++#define FSPI_FLSHXCR0_SZ_MASK (0x3FFFFF << FSPI_FLSHXCR0_SZ_SHIFT)
++
++#define FSPI_FLSHA1CR1 0x70
++#define FSPI_FLSHA2CR1 0x74
++#define FSPI_FLSHB1CR1 0x78
++#define FSPI_FLSHB2CR1 0x7C
++#define FSPI_FLSHXCR1_CSINTR_SHIFT 16
++#define FSPI_FLSHXCR1_CSINTR_MASK \
++ (0xFFFF << FSPI_FLSHXCR1_CSINTR_SHIFT)
++#define FSPI_FLSHXCR1_CAS_SHIFT 11
++#define FSPI_FLSHXCR1_CAS_MASK (0xF << FSPI_FLSHXCR1_CAS_SHIFT)
++#define FSPI_FLSHXCR1_WA_SHIFT 10
++#define FSPI_FLSHXCR1_WA_MASK (1 << FSPI_FLSHXCR1_WA_SHIFT)
++#define FSPI_FLSHXCR1_TCSH_SHIFT 5
++#define FSPI_FLSHXCR1_TCSH_MASK (0x1F << FSPI_FLSHXCR1_TCSH_SHIFT)
++#define FSPI_FLSHXCR1_TCSS_SHIFT 0
++#define FSPI_FLSHXCR1_TCSS_MASK (0x1F << FSPI_FLSHXCR1_TCSS_SHIFT)
++
++#define FSPI_FLSHA1CR2 0x80
++#define FSPI_FLSHA2CR2 0x84
++#define FSPI_FLSHB1CR2 0x88
++#define FSPI_FLSHB2CR2 0x8C
++#define FSPI_FLSHXCR2_CLRINSP_SHIFT 24
++#define FSPI_FLSHXCR2_CLRINSP_MASK (1 << FSPI_FLSHXCR2_CLRINSP_SHIFT)
++#define FSPI_FLSHXCR2_AWRWAIT_SHIFT 16
++#define FSPI_FLSHXCR2_AWRWAIT_MASK (0xFF << FSPI_FLSHXCR2_AWRWAIT_SHIFT)
++#define FSPI_FLSHXCR2_AWRSEQN_SHIFT 13
++#define FSPI_FLSHXCR2_AWRSEQN_MASK (0x7 << FSPI_FLSHXCR2_AWRSEQN_SHIFT)
++#define FSPI_FLSHXCR2_AWRSEQI_SHIFT 8
++#define FSPI_FLSHXCR2_AWRSEQI_MASK (0xF << FSPI_FLSHXCR2_AWRSEQI_SHIFT)
++#define FSPI_FLSHXCR2_ARDSEQN_SHIFT 5
++#define FSPI_FLSHXCR2_ARDSEQN_MASK (0x7 << FSPI_FLSHXCR2_ARDSEQN_SHIFT)
++#define FSPI_FLSHXCR2_ARDSEQI_SHIFT 0
++#define FSPI_FLSHXCR2_ARDSEQI_MASK (0xF << FSPI_FLSHXCR2_ARDSEQI_SHIFT)
++
++#define FSPI_IPCR0 0xA0
++
++#define FSPI_IPCR1 0xA4
++#define FSPI_IPCR1_IPAREN_SHIFT 31
++#define FSPI_IPCR1_IPAREN_MASK (1 << FSPI_IPCR1_IPAREN_SHIFT)
++#define FSPI_IPCR1_SEQNUM_SHIFT 24
++#define FSPI_IPCR1_SEQNUM_MASK (0xF << FSPI_IPCR1_SEQNUM_SHIFT)
++#define FSPI_IPCR1_SEQID_SHIFT 16
++#define FSPI_IPCR1_SEQID_MASK (0xF << FSPI_IPCR1_SEQID_SHIFT)
++#define FSPI_IPCR1_IDATSZ_SHIFT 0
++#define FSPI_IPCR1_IDATSZ_MASK (0xFFFF << FSPI_IPCR1_IDATSZ_SHIFT)
++
++#define FSPI_IPCMD 0xB0
++#define FSPI_IPCMD_TRG_SHIFT 0
++#define FSPI_IPCMD_TRG_MASK (1 << FSPI_IPCMD_TRG_SHIFT)
++
++#define FSPI_DLPR 0xB4
++
++#define FSPI_IPRXFCR 0xB8
++#define FSPI_IPRXFCR_CLR_SHIFT 0
++#define FSPI_IPRXFCR_CLR_MASK (1 << FSPI_IPRXFCR_CLR_SHIFT)
++#define FSPI_IPRXFCR_DMA_EN_SHIFT 1
++#define FSPI_IPRXFCR_DMA_EN_MASK (1 << FSPI_IPRXFCR_DMA_EN_SHIFT)
++#define FSPI_IPRXFCR_WMRK_SHIFT 2
++#define FSPI_IPRXFCR_WMRK_MASK (0x1F << FSPI_IPRXFCR_WMRK_SHIFT)
++
++#define FSPI_IPTXFCR 0xBC
++#define FSPI_IPTXFCR_CLR_SHIFT 0
++#define FSPI_IPTXFCR_CLR_MASK (1 << FSPI_IPTXFCR_CLR_SHIFT)
++#define FSPI_IPTXFCR_DMA_EN_SHIFT 1
++#define FSPI_IPTXFCR_DMA_EN_MASK (1 << FSPI_IPTXFCR_DMA_EN_SHIFT)
++#define FSPI_IPTXFCR_WMRK_SHIFT 2
++#define FSPI_IPTXFCR_WMRK_MASK (0x1F << FSPI_IPTXFCR_WMRK_SHIFT)
++
++#define FSPI_DLLACR 0xC0
++#define FSPI_DLLACR_OVRDEN_SHIFT 8
++#define FSPI_DLLACR_OVRDEN_MASK (1 << FSPI_DLLACR_OVRDEN_SHIFT)
++
++#define FSPI_DLLBCR 0xC4
++#define FSPI_DLLBCR_OVRDEN_SHIFT 8
++#define FSPI_DLLBCR_OVRDEN_MASK (1 << FSPI_DLLBCR_OVRDEN_SHIFT)
++
++#define FSPI_STS0 0xE0
++#define FSPI_STS0_DLPHA_SHIFT 9
++#define FSPI_STS0_DLPHA_MASK (0x1F << FSPI_STS0_DLPHA_SHIFT)
++#define FSPI_STS0_DLPHB_SHIFT 4
++#define FSPI_STS0_DLPHB_MASK (0x1F << FSPI_STS0_DLPHB_SHIFT)
++#define FSPI_STS0_CMD_SRC_SHIFT 2
++#define FSPI_STS0_CMD_SRC_MASK (3 << FSPI_STS0_CMD_SRC_SHIFT)
++#define FSPI_STS0_ARB_IDLE_SHIFT 1
++#define FSPI_STS0_ARB_IDLE_MASK (1 << FSPI_STS0_ARB_IDLE_SHIFT)
++#define FSPI_STS0_SEQ_IDLE_SHIFT 0
++#define FSPI_STS0_SEQ_IDLE_MASK (1 << FSPI_STS0_SEQ_IDLE_SHIFT)
++
++#define FSPI_STS1 0xE4
++#define FSPI_STS1_IP_ERRCD_SHIFT 24
++#define FSPI_STS1_IP_ERRCD_MASK (0xF << FSPI_STS1_IP_ERRCD_SHIFT)
++#define FSPI_STS1_IP_ERRID_SHIFT 16
++#define FSPI_STS1_IP_ERRID_MASK (0xF << FSPI_STS1_IP_ERRID_SHIFT)
++#define FSPI_STS1_AHB_ERRCD_SHIFT 8
++#define FSPI_STS1_AHB_ERRCD_MASK (0xF << FSPI_STS1_AHB_ERRCD_SHIFT)
++#define FSPI_STS1_AHB_ERRID_SHIFT 0
++#define FSPI_STS1_AHB_ERRID_MASK (0xF << FSPI_STS1_AHB_ERRID_SHIFT)
++
++#define FSPI_AHBSPNST 0xEC
++#define FSPI_AHBSPNST_DATLFT_SHIFT 16
++#define FSPI_AHBSPNST_DATLFT_MASK \
++ (0xFFFF << FSPI_AHBSPNST_DATLFT_SHIFT)
++#define FSPI_AHBSPNST_BUFID_SHIFT 1
++#define FSPI_AHBSPNST_BUFID_MASK (7 << FSPI_AHBSPNST_BUFID_SHIFT)
++#define FSPI_AHBSPNST_ACTIVE_SHIFT 0
++#define FSPI_AHBSPNST_ACTIVE_MASK (1 << FSPI_AHBSPNST_ACTIVE_SHIFT)
++
++#define FSPI_IPRXFSTS 0xF0
++#define FSPI_IPRXFSTS_RDCNTR_SHIFT 16
++#define FSPI_IPRXFSTS_RDCNTR_MASK \
++ (0xFFFF << FSPI_IPRXFSTS_RDCNTR_SHIFT)
++#define FSPI_IPRXFSTS_FILL_SHIFT 0
++#define FSPI_IPRXFSTS_FILL_MASK (0xFF << FSPI_IPRXFSTS_FILL_SHIFT)
++
++#define FSPI_IPTXFSTS 0xF4
++#define FSPI_IPTXFSTS_WRCNTR_SHIFT 16
++#define FSPI_IPTXFSTS_WRCNTR_MASK \
++ (0xFFFF << FSPI_IPTXFSTS_WRCNTR_SHIFT)
++#define FSPI_IPTXFSTS_FILL_SHIFT 0
++#define FSPI_IPTXFSTS_FILL_MASK (0xFF << FSPI_IPTXFSTS_FILL_SHIFT)
++
++#define FSPI_RFDR 0x100
++#define FSPI_TFDR 0x180
++
++#define FSPI_LUT_BASE 0x200
++
++/* register map end */
++
++/*
++ * The definition of the LUT register shows below:
++ *
++ * ---------------------------------------------------
++ * | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 |
++ * ---------------------------------------------------
++ */
++#define OPRND0_SHIFT 0
++#define PAD0_SHIFT 8
++#define INSTR0_SHIFT 10
++#define OPRND1_SHIFT 16
++
++/* Instruction set for the LUT register. */
++
++#define LUT_STOP 0x00
++#define LUT_CMD 0x01
++#define LUT_ADDR 0x02
++#define LUT_CADDR_SDR 0x03
++#define LUT_MODE 0x04
++#define LUT_MODE2 0x05
++#define LUT_MODE4 0x06
++#define LUT_MODE8 0x07
++#define LUT_NXP_WRITE 0x08
++#define LUT_NXP_READ 0x09
++#define LUT_LEARN_SDR 0x0A
++#define LUT_DATSZ_SDR 0x0B
++#define LUT_DUMMY 0x0C
++#define LUT_DUMMY_RWDS_SDR 0x0D
++#define LUT_JMP_ON_CS 0x1F
++#define LUT_CMD_DDR 0x21
++#define LUT_ADDR_DDR 0x22
++#define LUT_CADDR_DDR 0x23
++#define LUT_MODE_DDR 0x24
++#define LUT_MODE2_DDR 0x25
++#define LUT_MODE4_DDR 0x26
++#define LUT_MODE8_DDR 0x27
++#define LUT_WRITE_DDR 0x28
++#define LUT_READ_DDR 0x29
++#define LUT_LEARN_DDR 0x2A
++#define LUT_DATSZ_DDR 0x2B
++#define LUT_DUMMY_DDR 0x2C
++#define LUT_DUMMY_RWDS_DDR 0x2D
++
++
++/*
++ * The PAD definitions for LUT register.
++ *
++ * The pad stands for the lines number of IO[0:3].
++ * For example, the Quad read need four IO lines, so you should
++ * set LUT_PAD4 which means we use four IO lines.
++ */
++#define LUT_PAD1 0
++#define LUT_PAD2 1
++#define LUT_PAD4 2
++#define LUT_PAD8 3
++
++/* Oprands for the LUT register. */
++#define ADDR24BIT 0x18
++#define ADDR32BIT 0x20
++
++/* Macros for constructing the LUT register. */
++#define LUT0(ins, pad, opr) \
++ (((opr) << OPRND0_SHIFT) | ((LUT_##pad) << PAD0_SHIFT) | \
++ ((LUT_##ins) << INSTR0_SHIFT))
++
++#define LUT1(ins, pad, opr) (LUT0(ins, pad, opr) << OPRND1_SHIFT)
++
++/* other macros for LUT register. */
++#define FSPI_LUT(x) (FSPI_LUT_BASE + (x) * 4)
++#define FSPI_LUT_NUM 128
++
++/* SEQID -- we can have 32 seqids at most. */
++#define SEQID_READ 0
++#define SEQID_WREN 1
++#define SEQID_WRDI 2
++#define SEQID_RDSR 3
++#define SEQID_SE 4
++#define SEQID_CHIP_ERASE 5
++#define SEQID_PP 6
++#define SEQID_RDID 7
++#define SEQID_WRSR 8
++#define SEQID_RDCR 9
++#define SEQID_EN4B 10
++#define SEQID_BRWR 11
++#define SEQID_RD_EVCR 12
++#define SEQID_WD_EVCR 13
++#define SEQID_RDFSR 14
++
++#define FSPI_MIN_IOMAP SZ_4M
++
++#define FSPI_RX_MAX_IPBUF_SIZE 0x200 /* 64 * 64bits */
++#define FSPI_TX_MAX_IPBUF_SIZE 0x400 /* 128 * 64bits */
++#define FSPI_RX_MAX_AHBBUF_SIZE 0x800 /* 256 * 64bits */
++#define FSPI_TX_MAX_AHBBUF_SIZE 0x40 /* 8 * 64bits */
++
++#define TX_IPBUF_SIZE FSPI_TX_MAX_IPBUF_SIZE
++#define RX_IPBUF_SIZE FSPI_RX_MAX_IPBUF_SIZE
++#define RX_AHBBUF_SIZE FSPI_RX_MAX_AHBBUF_SIZE
++#define TX_AHBBUF_SIZE FSPI_TX_MAX_AHBBUF_SIZE
++
++#define FSPI_SINGLE_MODE 1
++#define FSPI_OCTAL_MODE 8
++
++#define FSPINOR_OP_READ_1_1_8_4B 0x7c
++
++enum nxp_fspi_devtype {
++ NXP_FSPI_LX2160A,
++};
++
++struct nxp_fspi_devtype_data {
++ enum nxp_fspi_devtype devtype;
++ int rxfifo;
++ int txfifo;
++ int ahb_buf_size;
++ int driver_data;
++};
++
++static struct nxp_fspi_devtype_data lx2160a_data = {
++ .devtype = NXP_FSPI_LX2160A,
++ .rxfifo = RX_IPBUF_SIZE,
++ .txfifo = TX_IPBUF_SIZE,
++ .ahb_buf_size = RX_AHBBUF_SIZE,
++ .driver_data = 0,
++};
++
++#define NXP_FSPI_MAX_CHIP 4
++struct nxp_fspi {
++ struct mtd_info mtd[NXP_FSPI_MAX_CHIP];
++ struct spi_nor nor[NXP_FSPI_MAX_CHIP];
++ void __iomem *iobase;
++ void __iomem *ahb_addr;
++ u32 memmap_phy;
++ u32 memmap_offs;
++ u32 memmap_len;
++ struct clk *clk, *clk_en;
++ struct device *dev;
++ struct completion c;
++ struct nxp_fspi_devtype_data *devtype_data;
++ u32 nor_size;
++ u32 nor_num;
++ u32 clk_rate;
++ u32 spi_rx_bus_width;
++ u32 spi_tx_bus_width;
++ unsigned int chip_base_addr; /* We may support two chips. */
++ bool has_second_chip;
++ struct mutex lock;
++ struct pm_qos_request pm_qos_req;
++};
++
++static inline void nxp_fspi_unlock_lut(struct nxp_fspi *fspi)
++{
++ writel(FSPI_LUTKEY_VALUE, fspi->iobase + FSPI_LUTKEY);
++ writel(FSPI_LCKER_UNLOCK, fspi->iobase + FSPI_LCKCR);
++}
++
++static inline void nxp_fspi_lock_lut(struct nxp_fspi *fspi)
++{
++ writel(FSPI_LUTKEY_VALUE, fspi->iobase + FSPI_LUTKEY);
++ writel(FSPI_LCKER_LOCK, fspi->iobase + FSPI_LCKCR);
++}
++
++static irqreturn_t nxp_fspi_irq_handler(int irq, void *dev_id)
++{
++ struct nxp_fspi *fspi = dev_id;
++ u32 reg;
++
++ reg = readl(fspi->iobase + FSPI_INTR);
++ writel(FSPI_INTR_IPCMDDONE_MASK, fspi->iobase + FSPI_INTR);
++ if (reg & FSPI_INTR_IPCMDDONE_MASK)
++ complete(&fspi->c);
++
++ return IRQ_HANDLED;
++}
++
++static void nxp_fspi_init_lut(struct nxp_fspi *fspi)
++{
++ void __iomem *base = fspi->iobase;
++ struct spi_nor *nor = &fspi->nor[0];
++ u8 addrlen = (nor->addr_width == 3) ? ADDR24BIT : ADDR32BIT;
++ u32 lut_base;
++ u8 op, dm;
++ int i;
++
++ nxp_fspi_unlock_lut(fspi);
++
++ /* Clear all the LUT table */
++ for (i = 0; i < FSPI_LUT_NUM; i++)
++ writel(0, base + FSPI_LUT_BASE + i * 4);
++
++ /* Read */
++ lut_base = SEQID_READ * 4;
++ op = nor->read_opcode;
++ dm = nor->read_dummy;
++
++ if (fspi->spi_rx_bus_width == FSPI_OCTAL_MODE) {
++ dm = 8;
++ op = FSPINOR_OP_READ_1_1_8_4B;
++ writel(LUT0(CMD, PAD1, op) | LUT1(ADDR, PAD1, addrlen),
++ base + FSPI_LUT(lut_base));
++ writel(LUT0(DUMMY, PAD8, dm) | LUT1(NXP_READ, PAD8, 0),
++ base + FSPI_LUT(lut_base + 1));
++ } else {
++ if ((op == SPINOR_OP_READ_FAST_4B) ||
++ (op == SPINOR_OP_READ_FAST) ||
++ (op == SPINOR_OP_READ) ||
++ (op == SPINOR_OP_READ_4B)) {
++ dm = 8;
++ writel(LUT0(CMD, PAD1, op) | LUT1(ADDR, PAD1, addrlen),
++ base + FSPI_LUT(lut_base));
++ writel(LUT0(DUMMY, PAD1, dm) | LUT1(NXP_READ, PAD1, 0),
++ base + FSPI_LUT(lut_base + 1));
++ } else if (nor->read_proto == SNOR_PROTO_1_4_4) {
++ dev_dbg(nor->dev, "Unsupported opcode : 0x%.2x\n", op);
++ /* TODO Add support for other Read ops. */
++ } else {
++ dev_dbg(nor->dev, "Unsupported opcode : 0x%.2x\n", op);
++ }
++ }
++
++ /* Write enable */
++ lut_base = SEQID_WREN * 4;
++ writel(LUT0(CMD, PAD1, SPINOR_OP_WREN), base + FSPI_LUT(lut_base));
++
++ /* Page Program */
++ lut_base = SEQID_PP * 4;
++ writel(LUT0(CMD, PAD1, nor->program_opcode) | LUT1(ADDR, PAD1, addrlen),
++ base + FSPI_LUT(lut_base));
++ writel(LUT0(NXP_WRITE, PAD1, 0), base + FSPI_LUT(lut_base + 1));
++
++ /* Read Status */
++ lut_base = SEQID_RDSR * 4;
++ writel(LUT0(CMD, PAD1, SPINOR_OP_RDSR) | LUT1(NXP_READ, PAD1, 0x1),
++ base + FSPI_LUT(lut_base));
++
++ /* Erase a sector */
++ lut_base = SEQID_SE * 4;
++ writel(LUT0(CMD, PAD1, nor->erase_opcode) | LUT1(ADDR, PAD1, addrlen),
++ base + FSPI_LUT(lut_base));
++
++ /* Erase the whole chip */
++ lut_base = SEQID_CHIP_ERASE * 4;
++ writel(LUT0(CMD, PAD1, SPINOR_OP_CHIP_ERASE),
++ base + FSPI_LUT(lut_base));
++
++ /* READ ID */
++ lut_base = SEQID_RDID * 4;
++ writel(LUT0(CMD, PAD1, SPINOR_OP_RDID) | LUT1(NXP_READ, PAD1, 0x8),
++ base + FSPI_LUT(lut_base));
++
++ /* Write Register */
++ lut_base = SEQID_WRSR * 4;
++ writel(LUT0(CMD, PAD1, SPINOR_OP_WRSR) | LUT1(NXP_WRITE, PAD1, 0x2),
++ base + FSPI_LUT(lut_base));
++
++ /* Read Configuration Register */
++ lut_base = SEQID_RDCR * 4;
++ writel(LUT0(CMD, PAD1, SPINOR_OP_RDCR) | LUT1(NXP_READ, PAD1, 0x1),
++ base + FSPI_LUT(lut_base));
++
++ /* Write disable */
++ lut_base = SEQID_WRDI * 4;
++ writel(LUT0(CMD, PAD1, SPINOR_OP_WRDI), base + FSPI_LUT(lut_base));
++
++ /* Enter 4 Byte Mode (Micron) */
++ lut_base = SEQID_EN4B * 4;
++ writel(LUT0(CMD, PAD1, SPINOR_OP_EN4B), base + FSPI_LUT(lut_base));
++
++ /* Enter 4 Byte Mode (Spansion) */
++ lut_base = SEQID_BRWR * 4;
++ writel(LUT0(CMD, PAD1, SPINOR_OP_BRWR), base + FSPI_LUT(lut_base));
++
++ /* Read EVCR register */
++ lut_base = SEQID_RD_EVCR * 4;
++ writel(LUT0(CMD, PAD1, SPINOR_OP_RD_EVCR),
++ base + FSPI_LUT(lut_base));
++
++ /* Write EVCR register */
++ lut_base = SEQID_WD_EVCR * 4;
++ writel(LUT0(CMD, PAD1, SPINOR_OP_WD_EVCR),
++ base + FSPI_LUT(lut_base));
++
++ /* Read Flag Status */
++ lut_base = SEQID_RDFSR * 4;
++ writel(LUT0(CMD, PAD1, SPINOR_OP_RDFSR) | LUT1(NXP_READ, PAD1, 0x1),
++ base + FSPI_LUT(lut_base));
++
++ nxp_fspi_lock_lut(fspi);
++}
++
++/* Get the SEQID for the command */
++static int nxp_fspi_get_seqid(struct nxp_fspi *fspi, u8 cmd)
++{
++
++ switch (cmd) {
++ case SPINOR_OP_READ_1_1_4_4B:
++ case SPINOR_OP_READ_1_1_4:
++ case SPINOR_OP_READ:
++ case SPINOR_OP_READ_4B:
++ case SPINOR_OP_READ_FAST:
++ case SPINOR_OP_READ_FAST_4B:
++ return SEQID_READ;
++ case SPINOR_OP_WREN:
++ return SEQID_WREN;
++ case SPINOR_OP_WRDI:
++ return SEQID_WRDI;
++ case SPINOR_OP_RDSR:
++ return SEQID_RDSR;
++ case SPINOR_OP_RDFSR:
++ return SEQID_RDFSR;
++ case SPINOR_OP_BE_4K:
++ case SPINOR_OP_SE:
++ case SPINOR_OP_SE_4B:
++ case SPINOR_OP_BE_4K_4B:
++ return SEQID_SE;
++ case SPINOR_OP_CHIP_ERASE:
++ return SEQID_CHIP_ERASE;
++ case SPINOR_OP_PP:
++ case SPINOR_OP_PP_4B:
++ return SEQID_PP;
++ case SPINOR_OP_RDID:
++ return SEQID_RDID;
++ case SPINOR_OP_WRSR:
++ return SEQID_WRSR;
++ case SPINOR_OP_RDCR:
++ return SEQID_RDCR;
++ case SPINOR_OP_EN4B:
++ return SEQID_EN4B;
++ case SPINOR_OP_BRWR:
++ return SEQID_BRWR;
++ case SPINOR_OP_RD_EVCR:
++ return SEQID_RD_EVCR;
++ case SPINOR_OP_WD_EVCR:
++ return SEQID_WD_EVCR;
++ default:
++ dev_err(fspi->dev, "Unsupported cmd 0x%.2x\n", cmd);
++ break;
+ }
++ return -EINVAL;
++}
++
++static int
++nxp_fspi_runcmd(struct nxp_fspi *fspi, u8 cmd, unsigned int addr, int len)
++{
++ void __iomem *base = fspi->iobase;
++ int seqid;
++ int seqnum = 0;
++ u32 reg;
++ int err;
++ int iprxfcr = 0;
++
++ iprxfcr = readl(fspi->iobase + FSPI_IPRXFCR);
++ /* invalid RXFIFO first */
++ iprxfcr &= ~FSPI_IPRXFCR_DMA_EN_MASK;
++ iprxfcr = iprxfcr | FSPI_IPRXFCR_CLR_MASK;
++ writel(iprxfcr, fspi->iobase + FSPI_IPRXFCR);
++
++ init_completion(&fspi->c);
++ dev_dbg(fspi->dev, "to 0x%.8x:0x%.8x, len:%d, cmd:%.2x\n",
++ fspi->chip_base_addr, addr, len, cmd);
++
++ /* write address */
++ writel(fspi->chip_base_addr + addr, base + FSPI_IPCR0);
++
++ seqid = nxp_fspi_get_seqid(fspi, cmd);
+
-+ if (!free_blocks) {
-+ /* Let's commit before we use up the metadata reserve. */
-+ r = commit(pool);
-+ if (r)
-+ return r;
++ writel((seqnum << FSPI_IPCR1_SEQNUM_SHIFT) |
++ (seqid << FSPI_IPCR1_SEQID_SHIFT) | len,
++ base + FSPI_IPCR1);
++
++ /* wait till controller is idle */
++ do {
++ reg = readl(base + FSPI_STS0);
++ if ((reg & FSPI_STS0_ARB_IDLE_MASK) &&
++ (reg & FSPI_STS0_SEQ_IDLE_MASK))
++ break;
++ udelay(1);
++ dev_dbg(fspi->dev, "The controller is busy, 0x%x\n", reg);
++ } while (1);
++
++ /* trigger the LUT now */
++ writel(1, base + FSPI_IPCMD);
++
++ /* Wait for the interrupt. */
++ if (!wait_for_completion_timeout(&fspi->c, msecs_to_jiffies(1000))) {
++ dev_err(fspi->dev,
++ "cmd 0x%.2x timeout, addr@%.8x, Status0:0x%.8x, Status1:0x%.8x\n",
++ cmd, addr, readl(base + FSPI_STS0),
++ readl(base + FSPI_STS1));
++ err = -ETIMEDOUT;
++ } else {
++ err = 0;
++ dev_dbg(fspi->dev, "FSPI Intr done,INTR:<0x%.8x>\n",
++ readl(base + FSPI_INTR));
+ }
+
- return 0;
- }
-
---- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
-+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
-@@ -616,6 +616,13 @@ static int qed_mcp_cmd_and_union(struct
- return -EBUSY;
- }
-
-+ if (p_hwfn->mcp_info->b_block_cmd) {
-+ DP_NOTICE(p_hwfn,
-+ "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
-+ p_mb_params->cmd, p_mb_params->param);
++ return err;
++}
++
++/* Read out the data from the FSPI_RBDR buffer registers. */
++static void nxp_fspi_read_data(struct nxp_fspi *fspi, int len, u8 *rxbuf)
++{
++ int i = 0, j = 0, tmp_size = 0;
++ int size;
++ u32 tmp = 0;
++
++ while (len > 0) {
++
++ size = len / 8;
++
++ for (i = 0; i < size; ++i) {
++ /* Wait for RXFIFO available*/
++ while (!(readl(fspi->iobase + FSPI_INTR)
++ & FSPI_INTR_IPRXWA_MASK))
++ ;
++
++ j = 0;
++ tmp_size = 8;
++ while (tmp_size > 0) {
++ tmp = 0;
++ tmp = readl(fspi->iobase + FSPI_RFDR + j * 4);
++ memcpy(rxbuf, &tmp, 4);
++ tmp_size -= 4;
++ j++;
++ rxbuf += 4;
++ }
++
++ /* move the FIFO pointer */
++ writel(FSPI_INTR_IPRXWA_MASK,
++ fspi->iobase + FSPI_INTR);
++ len -= 8;
++ }
++
++ size = len % 8;
++
++ j = 0;
++ if (size) {
++ /* Wait for RXFIFO available*/
++ while (!(readl(fspi->iobase + FSPI_INTR)
++ & FSPI_INTR_IPRXWA_MASK))
++ ;
++
++ while (len > 0) {
++ tmp = 0;
++ size = (len < 4) ? len : 4;
++ tmp = readl(fspi->iobase + FSPI_RFDR + j * 4);
++ memcpy(rxbuf, &tmp, size);
++ len -= size;
++ j++;
++ rxbuf += size;
++ }
++ }
++
++ /* invalid the RXFIFO */
++ writel(FSPI_IPRXFCR_CLR_MASK,
++ fspi->iobase + FSPI_IPRXFCR);
++
++ writel(FSPI_INTR_IPRXWA_MASK,
++ fspi->iobase + FSPI_INTR);
++ }
++}
++
++static inline void nxp_fspi_invalid(struct nxp_fspi *fspi)
++{
++ u32 reg;
++
++ reg = readl(fspi->iobase + FSPI_MCR0);
++ writel(reg | FSPI_MCR0_SWRST_MASK, fspi->iobase + FSPI_MCR0);
++
++ /*
++ * The minimum delay : 1 AHB + 2 SFCK clocks.
++ * Delay 1 us is enough.
++ */
++ while (readl(fspi->iobase + FSPI_MCR0) & FSPI_MCR0_SWRST_MASK)
++ ;
++}
++
++static ssize_t nxp_fspi_nor_write(struct nxp_fspi *fspi,
++ struct spi_nor *nor, u8 opcode,
++ unsigned int to, u32 *txbuf,
++ unsigned int count)
++{
++ int ret, i, j;
++ int size, tmp_size;
++ u32 data = 0;
++
++ dev_dbg(fspi->dev, "nor write to 0x%.8x:0x%.8x, len : %d\n",
++ fspi->chip_base_addr, to, count);
++
++ /* clear the TX FIFO. */
++ writel(FSPI_IPTXFCR_CLR_MASK, fspi->iobase + FSPI_IPTXFCR);
++
++ size = count / 8;
++ for (i = 0; i < size; i++) {
++ /* Wait for TXFIFO empty*/
++ while (!(readl(fspi->iobase + FSPI_INTR)
++ & FSPI_INTR_IPTXWE_MASK))
++ ;
++ j = 0;
++ tmp_size = 8;
++ while (tmp_size > 0) {
++ data = 0;
++ memcpy(&data, txbuf, 4);
++ writel(data, fspi->iobase + FSPI_TFDR + j * 4);
++ tmp_size -= 4;
++ j++;
++ txbuf += 1;
++ }
++
++ writel(FSPI_INTR_IPTXWE_MASK, fspi->iobase + FSPI_INTR);
++ }
++
++ size = count % 8;
++ if (size) {
++ /* Wait for TXFIFO empty*/
++ while (!(readl(fspi->iobase + FSPI_INTR)
++ & FSPI_INTR_IPTXWE_MASK))
++ ;
++
++ j = 0;
++ tmp_size = 0;
++ while (size > 0) {
++ data = 0;
++ tmp_size = (size < 4) ? size : 4;
++ memcpy(&data, txbuf, tmp_size);
++ writel(data, fspi->iobase + FSPI_TFDR + j * 4);
++ size -= tmp_size;
++ j++;
++ txbuf += 1;
++ }
++
++ writel(FSPI_INTR_IPTXWE_MASK, fspi->iobase + FSPI_INTR);
++ }
++
++ /* Trigger it */
++ ret = nxp_fspi_runcmd(fspi, opcode, to, count);
++
++ if (ret == 0)
++ return count;
++
++ return ret;
++}
++
++static void nxp_fspi_set_map_addr(struct nxp_fspi *fspi)
++{
++ int nor_size = fspi->nor_size >> 10;
++ void __iomem *base = fspi->iobase;
++
++ /*
++ * Supporting same flash device as slaves on different chip-select.
++ * As SAMEDEVICEEN bit set, by default, in mcr2 reg then need not to
++ * configure FLSHA2CRx/FLSHB1CRx/FLSHB2CRx register as setting for
++ * these would be ignored.
++ * Need to Reset SAMEDEVICEEN bit in mcr2 reg, when require to add
++ * support for different flashes.
++ */
++ writel(nor_size, base + FSPI_FLSHA1CR0);
++ writel(0, base + FSPI_FLSHA2CR0);
++ writel(0, base + FSPI_FLSHB1CR0);
++ writel(0, base + FSPI_FLSHB2CR0);
++}
++
++static void nxp_fspi_init_ahb_read(struct nxp_fspi *fspi)
++{
++ void __iomem *base = fspi->iobase;
++ struct spi_nor *nor = &fspi->nor[0];
++ int i = 0;
++ int seqid;
++
++ /* AHB configuration for access buffer 0~7. */
++ for (i = 0; i < 7; i++)
++ writel(0, base + FSPI_AHBRX_BUF0CR0 + 4 * i);
++
++ /*
++ * Set ADATSZ with the maximum AHB buffer size to improve the read
++ * performance.
++ */
++ writel((fspi->devtype_data->ahb_buf_size / 8 |
++ FSPI_AHBRXBUF0CR7_PREF_MASK), base + FSPI_AHBRX_BUF7CR0);
++
++ /* prefetch and no start address alignment limitation */
++ writel(FSPI_AHBCR_PREF_EN_MASK | FSPI_AHBCR_RDADDROPT_MASK,
++ base + FSPI_AHBCR);
++
++
++ /* Set the default lut sequence for AHB Read. */
++ seqid = nxp_fspi_get_seqid(fspi, nor->read_opcode);
++ writel(seqid, base + FSPI_FLSHA1CR2);
++}
++
++/* This function was used to prepare and enable FSPI clock */
++static int nxp_fspi_clk_prep_enable(struct nxp_fspi *fspi)
++{
++ int ret;
++
++ ret = clk_prepare_enable(fspi->clk_en);
++ if (ret)
++ return ret;
++
++ ret = clk_prepare_enable(fspi->clk);
++ if (ret) {
++ clk_disable_unprepare(fspi->clk_en);
++ return ret;
++ }
++
++ return 0;
++}
++
++/* This function was used to disable and unprepare FSPI clock */
++static void nxp_fspi_clk_disable_unprep(struct nxp_fspi *fspi)
++{
++ clk_disable_unprepare(fspi->clk);
++ clk_disable_unprepare(fspi->clk_en);
++}
++
++/* We use this function to do some basic init for spi_nor_scan(). */
++static int nxp_fspi_nor_setup(struct nxp_fspi *fspi)
++{
++ void __iomem *base = fspi->iobase;
++ u32 reg;
++
++ /* Reset the module */
++ writel(FSPI_MCR0_SWRST_MASK, base + FSPI_MCR0);
++ do {
++ udelay(1);
++ } while (0x1 & readl(base + FSPI_MCR0));
++
++ /* Disable the module */
++ writel(FSPI_MCR0_MDIS_MASK, base + FSPI_MCR0);
++
++ /* Reset the DLL register to default value */
++ writel(FSPI_DLLACR_OVRDEN_MASK, base + FSPI_DLLACR);
++ writel(FSPI_DLLBCR_OVRDEN_MASK, base + FSPI_DLLBCR);
++
++ /* enable module */
++ writel(FSPI_MCR0_AHB_TIMEOUT_MASK | FSPI_MCR0_IP_TIMEOUT_MASK,
++ base + FSPI_MCR0);
++
++ /* Read the register value */
++ reg = readl(base + FSPI_MCR0);
++
++ /* Init the LUT table. */
++ nxp_fspi_init_lut(fspi);
++
++ /* enable the interrupt */
++ writel(FSPI_INTEN_IPCMDDONE_MASK, fspi->iobase + FSPI_INTEN);
++ return 0;
++}
++
++static int nxp_fspi_nor_setup_last(struct nxp_fspi *fspi)
++{
++ unsigned long rate = fspi->clk_rate;
++ int ret;
++
++ /* disable and unprepare clock to avoid glitch pass to controller */
++ nxp_fspi_clk_disable_unprep(fspi);
++
++ ret = clk_set_rate(fspi->clk, rate);
++ if (ret)
++ return ret;
++
++ ret = nxp_fspi_clk_prep_enable(fspi);
++ if (ret)
++ return ret;
++
++ /* Init the LUT table again. */
++ nxp_fspi_init_lut(fspi);
++
++ /* Init for AHB read */
++ nxp_fspi_init_ahb_read(fspi);
++
++ return 0;
++}
++
++static void nxp_fspi_set_base_addr(struct nxp_fspi *fspi,
++ struct spi_nor *nor)
++{
++ fspi->chip_base_addr = fspi->nor_size * (nor - fspi->nor);
++}
++
++static int nxp_fspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
++ int len)
++{
++ int ret;
++ struct nxp_fspi *fspi = nor->priv;
++
++ ret = nxp_fspi_runcmd(fspi, opcode, 0, len);
++ if (ret)
++ return ret;
++
++ nxp_fspi_read_data(fspi, len, buf);
++ return 0;
++}
++
++static int nxp_fspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
++ int len)
++{
++ struct nxp_fspi *fspi = nor->priv;
++ int ret;
++
++ if (!buf) {
++ ret = nxp_fspi_runcmd(fspi, opcode, 0, 1);
++ if (ret)
++ return ret;
++
++ if (opcode == SPINOR_OP_CHIP_ERASE)
++ nxp_fspi_invalid(fspi);
++
++ } else if (len > 0) {
++ ret = nxp_fspi_nor_write(fspi, nor, opcode, 0,
++ (u32 *)buf, len);
++ } else {
++ dev_err(fspi->dev, "invalid cmd %d\n", opcode);
++ ret = -EINVAL;
++ }
++
++ return ret;
++}
++
++static ssize_t nxp_fspi_write(struct spi_nor *nor, loff_t to,
++ size_t len, const u_char *buf)
++{
++ struct nxp_fspi *fspi = nor->priv;
++ ssize_t tx_size = 0, act_wrt = 0, ret = 0;
++
++ while (len > 0) {
++ tx_size = (len > TX_IPBUF_SIZE) ? TX_IPBUF_SIZE : len;
++
++ act_wrt = nxp_fspi_nor_write(fspi, nor, nor->program_opcode, to,
++ (u32 *)buf, tx_size);
++ len -= tx_size;
++ to += tx_size;
++ ret += act_wrt;
++ }
++
++ /* invalid the data in the AHB buffer. */
++ nxp_fspi_invalid(fspi);
++ return ret;
++}
++
++static ssize_t nxp_fspi_read(struct spi_nor *nor, loff_t from,
++ size_t len, u_char *buf)
++{
++ struct nxp_fspi *fspi = nor->priv;
++
++ /* if necessary, ioremap buffer before AHB read, */
++ if (!fspi->ahb_addr) {
++ fspi->memmap_offs = fspi->chip_base_addr + from;
++ fspi->memmap_len = len > FSPI_MIN_IOMAP ?
++ len : FSPI_MIN_IOMAP;
++
++ fspi->ahb_addr = ioremap_nocache(
++ fspi->memmap_phy + fspi->memmap_offs,
++ fspi->memmap_len);
++ if (!fspi->ahb_addr) {
++ dev_err(fspi->dev, "ioremap failed\n");
++ return -ENOMEM;
++ }
++ /* ioremap if the data requested is out of range */
++ } else if (fspi->chip_base_addr + from < fspi->memmap_offs
++ || fspi->chip_base_addr + from + len >
++ fspi->memmap_offs + fspi->memmap_len) {
++ iounmap(fspi->ahb_addr);
++
++ fspi->memmap_offs = fspi->chip_base_addr + from;
++ fspi->memmap_len = len > FSPI_MIN_IOMAP ?
++ len : FSPI_MIN_IOMAP;
++ fspi->ahb_addr = ioremap_nocache(
++ fspi->memmap_phy + fspi->memmap_offs,
++ fspi->memmap_len);
++ if (!fspi->ahb_addr) {
++ dev_err(fspi->dev, "ioremap failed\n");
++ return -ENOMEM;
++ }
++ }
++
++ dev_dbg(fspi->dev, "cmd [%x],read from %p, len:%zd\n",
++ nor->read_opcode, fspi->ahb_addr + fspi->chip_base_addr
++ + from - fspi->memmap_offs, len);
++
++ /* Read out the data directly from the AHB buffer.*/
++ memcpy_toio(buf, fspi->ahb_addr + fspi->chip_base_addr
++ + from - fspi->memmap_offs, len);
++
++ return len;
++}
++
++static int nxp_fspi_erase(struct spi_nor *nor, loff_t offs)
++{
++ struct nxp_fspi *fspi = nor->priv;
++ int ret;
++
++ dev_dbg(nor->dev, "%dKiB at 0x%08x:0x%08x\n",
++ nor->mtd.erasesize / 1024, fspi->chip_base_addr, (u32)offs);
++
++ ret = nxp_fspi_runcmd(fspi, nor->erase_opcode, offs, 0);
++ if (ret)
++ return ret;
++
++ nxp_fspi_invalid(fspi);
++ return 0;
++}
++
++static int nxp_fspi_prep(struct spi_nor *nor, enum spi_nor_ops ops)
++{
++ struct nxp_fspi *fspi = nor->priv;
++ int ret;
++
++ mutex_lock(&fspi->lock);
++
++ ret = nxp_fspi_clk_prep_enable(fspi);
++ if (ret)
++ goto err_mutex;
++
++ nxp_fspi_set_base_addr(fspi, nor);
++ return 0;
++
++err_mutex:
++ mutex_unlock(&fspi->lock);
++ return ret;
++}
++
++static void nxp_fspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
++{
++ struct nxp_fspi *fspi = nor->priv;
++
++ nxp_fspi_clk_disable_unprep(fspi);
++ mutex_unlock(&fspi->lock);
++}
++
++static const struct of_device_id nxp_fspi_dt_ids[] = {
++ { .compatible = "nxp,lx2160a-fspi", .data = (void *)&lx2160a_data, },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, nxp_fspi_dt_ids);
++
++static int nxp_fspi_probe(struct platform_device *pdev)
++{
++ struct spi_nor_hwcaps hwcaps = {
++ .mask = SPINOR_OP_READ_FAST_4B |
++ SPINOR_OP_READ_4B |
++ SNOR_HWCAPS_PP
++ };
++ struct device_node *np = pdev->dev.of_node;
++ struct device *dev = &pdev->dev;
++ struct nxp_fspi *fspi;
++ struct resource *res;
++ struct spi_nor *nor;
++ struct mtd_info *mtd;
++ int ret, i = 0;
++ int find_node = 0;
++
++ const struct of_device_id *of_id =
++ of_match_device(nxp_fspi_dt_ids, &pdev->dev);
++
++ fspi = devm_kzalloc(dev, sizeof(*fspi), GFP_KERNEL);
++ if (!fspi)
++ return -ENOMEM;
++
++ fspi->nor_num = of_get_child_count(dev->of_node);
++ if (!fspi->nor_num || fspi->nor_num > 4)
++ return -ENODEV;
++
++ fspi->dev = dev;
++ fspi->devtype_data = (struct nxp_fspi_devtype_data *)of_id->data;
++ platform_set_drvdata(pdev, fspi);
++
++ /* find the resources */
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "FSPI");
++ if (!res) {
++ dev_err(dev, "FSPI get resource IORESOURCE_MEM failed\n");
++ return -ENODEV;
++ }
++
++ fspi->iobase = devm_ioremap_resource(dev, res);
++ if (IS_ERR(fspi->iobase))
++ return PTR_ERR(fspi->iobase);
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
++ "FSPI-memory");
++ if (!res) {
++ dev_err(dev,
++ "FSPI-memory get resource IORESOURCE_MEM failed\n");
++ return -ENODEV;
++ }
++
++ if (!devm_request_mem_region(dev, res->start, resource_size(res),
++ res->name)) {
++ dev_err(dev, "can't request region for resource %pR\n", res);
+ return -EBUSY;
+ }
+
- if (p_mb_params->data_src_size > union_data_size ||
- p_mb_params->data_dst_size > union_data_size) {
- DP_ERR(p_hwfn,
---- a/drivers/net/phy/xilinx_gmii2rgmii.c
-+++ b/drivers/net/phy/xilinx_gmii2rgmii.c
-@@ -92,6 +92,11 @@ static int xgmiitorgmii_probe(struct mdi
- return -EPROBE_DEFER;
- }
-
-+ if (!priv->phy_dev->drv) {
-+ dev_info(dev, "Attached phy not ready\n");
-+ return -EPROBE_DEFER;
-+ }
-+
- priv->addr = mdiodev->addr;
- priv->phy_drv = priv->phy_dev->drv;
- memcpy(&priv->conv_phy_drv, priv->phy_dev->drv,
---- a/drivers/spi/spi-tegra20-slink.c
-+++ b/drivers/spi/spi-tegra20-slink.c
-@@ -1081,6 +1081,24 @@ static int tegra_slink_probe(struct plat
- goto exit_free_master;
- }
-
-+ /* disabled clock may cause interrupt storm upon request */
-+ tspi->clk = devm_clk_get(&pdev->dev, NULL);
-+ if (IS_ERR(tspi->clk)) {
-+ ret = PTR_ERR(tspi->clk);
-+ dev_err(&pdev->dev, "Can not get clock %d\n", ret);
-+ goto exit_free_master;
-+ }
-+ ret = clk_prepare(tspi->clk);
-+ if (ret < 0) {
-+ dev_err(&pdev->dev, "Clock prepare failed %d\n", ret);
-+ goto exit_free_master;
++ fspi->memmap_phy = res->start;
++
++ /* find the clocks */
++ fspi->clk_en = devm_clk_get(dev, "fspi_en");
++ if (IS_ERR(fspi->clk_en))
++ return PTR_ERR(fspi->clk_en);
++
++ fspi->clk = devm_clk_get(dev, "fspi");
++ if (IS_ERR(fspi->clk))
++ return PTR_ERR(fspi->clk);
++
++ ret = nxp_fspi_clk_prep_enable(fspi);
++ if (ret) {
++ dev_err(dev, "can not enable the clock\n");
++ goto clk_failed;
+ }
-+ ret = clk_enable(tspi->clk);
++
++ /* find the irq */
++ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
-+ dev_err(&pdev->dev, "Clock enable failed %d\n", ret);
-+ goto exit_free_master;
-+ }
-+
- spi_irq = platform_get_irq(pdev, 0);
- tspi->irq = spi_irq;
- ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
---- a/drivers/staging/android/ashmem.c
-+++ b/drivers/staging/android/ashmem.c
-@@ -380,6 +380,12 @@ static int ashmem_mmap(struct file *file
- goto out;
- }
-
-+ /* requested mapping size larger than object size */
-+ if (vma->vm_end - vma->vm_start > PAGE_ALIGN(asma->size)) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
- /* requested protection bits must match our allowed protection mask */
- if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) &
- calc_vm_prot_bits(PROT_MASK, 0))) {
---- a/drivers/tty/serial/imx.c
-+++ b/drivers/tty/serial/imx.c
-@@ -2221,6 +2221,14 @@ static int serial_imx_probe(struct platf
- ret);
- return ret;
- }
-+
-+ ret = devm_request_irq(&pdev->dev, rtsirq, imx_rtsint, 0,
-+ dev_name(&pdev->dev), sport);
-+ if (ret) {
-+ dev_err(&pdev->dev, "failed to request rts irq: %d\n",
-+ ret);
-+ return ret;
++ dev_err(dev, "failed to get the irq: %d\n", ret);
++ goto irq_failed;
++ }
++
++ ret = devm_request_irq(dev, ret,
++ nxp_fspi_irq_handler, 0, pdev->name, fspi);
++ if (ret) {
++ dev_err(dev, "failed to request irq: %d\n", ret);
++ goto irq_failed;
++ }
++
++ ret = nxp_fspi_nor_setup(fspi);
++ if (ret)
++ goto irq_failed;
++
++ if (of_get_property(np, "nxp,fspi-has-second-chip", NULL))
++ fspi->has_second_chip = true;
++
++ mutex_init(&fspi->lock);
++
++ find_node = 0;
++ /* iterate the subnodes. */
++ for_each_available_child_of_node(dev->of_node, np) {
++ /* skip the holes */
++ if (!fspi->has_second_chip)
++ i *= 2;
++
++ nor = &fspi->nor[i];
++ mtd = &nor->mtd;
++
++ nor->dev = dev;
++ spi_nor_set_flash_node(nor, np);
++ nor->priv = fspi;
++
++ /* fill the hooks */
++ nor->read_reg = nxp_fspi_read_reg;
++ nor->write_reg = nxp_fspi_write_reg;
++ nor->read = nxp_fspi_read;
++ nor->write = nxp_fspi_write;
++ nor->erase = nxp_fspi_erase;
++
++ nor->prepare = nxp_fspi_prep;
++ nor->unprepare = nxp_fspi_unprep;
++
++ ret = of_property_read_u32(np, "spi-max-frequency",
++ &fspi->clk_rate);
++ if (ret < 0)
++ goto next_node;
++
++ /* set the chip address for READID */
++ nxp_fspi_set_base_addr(fspi, nor);
++
++ ret = of_property_read_u32(np, "spi-rx-bus-width",
++ &fspi->spi_rx_bus_width);
++ if (ret < 0)
++ fspi->spi_rx_bus_width = FSPI_SINGLE_MODE;
++
++ ret = of_property_read_u32(np, "spi-tx-bus-width",
++ &fspi->spi_tx_bus_width);
++ if (ret < 0)
++ fspi->spi_tx_bus_width = FSPI_SINGLE_MODE;
++
++ ret = spi_nor_scan(nor, NULL, &hwcaps);
++ if (ret)
++ goto next_node;
++
++ ret = mtd_device_register(mtd, NULL, 0);
++ if (ret)
++ goto next_node;
++
++ /* Set the correct NOR size now. */
++ if (fspi->nor_size == 0) {
++ fspi->nor_size = mtd->size;
++
++ /* Map the SPI NOR to accessiable address */
++ nxp_fspi_set_map_addr(fspi);
+ }
- } else {
- ret = devm_request_irq(&pdev->dev, rxirq, imx_int, 0,
- dev_name(&pdev->dev), sport);
---- a/kernel/events/core.c
-+++ b/kernel/events/core.c
-@@ -3763,6 +3763,12 @@ int perf_event_read_local(struct perf_ev
- goto out;
- }
++
++ /*
++ * The write is working in the unit of the TX FIFO,
++ * not in the unit of the SPI NOR's page size.
++ *
++ * So shrink the spi_nor->page_size if it is larger then the
++ * TX FIFO.
++ */
++ if (nor->page_size > fspi->devtype_data->txfifo)
++ nor->page_size = fspi->devtype_data->txfifo;
++
++ find_node++;
++next_node:
++ i++;
++ }
++
++ if (find_node == 0)
++ goto mutex_failed;
++
++ /* finish the rest init. */
++ ret = nxp_fspi_nor_setup_last(fspi);
++ if (ret)
++ goto last_init_failed;
++
++ nxp_fspi_clk_disable_unprep(fspi);
++ return 0;
++
++last_init_failed:
++ for (i = 0; i < fspi->nor_num; i++) {
++ /* skip the holes */
++ if (!fspi->has_second_chip)
++ i *= 2;
++ mtd_device_unregister(&fspi->mtd[i]);
++ }
++mutex_failed:
++ mutex_destroy(&fspi->lock);
++irq_failed:
++ nxp_fspi_clk_disable_unprep(fspi);
++clk_failed:
++ dev_err(dev, "NXP FSPI probe failed\n");
++ return ret;
++}
++
++static int nxp_fspi_remove(struct platform_device *pdev)
++{
++ struct nxp_fspi *fspi = platform_get_drvdata(pdev);
++ int i;
++
++ for (i = 0; i < fspi->nor_num; i++) {
++ /* skip the holes */
++ if (!fspi->has_second_chip)
++ i *= 2;
++ mtd_device_unregister(&fspi->nor[i].mtd);
++ }
++
++ /* disable the hardware */
++ writel(FSPI_MCR0_MDIS_MASK, fspi->iobase + FSPI_MCR0);
++
++ mutex_destroy(&fspi->lock);
++
++ if (fspi->ahb_addr)
++ iounmap(fspi->ahb_addr);
++
++ return 0;
++}
++
++static int nxp_fspi_suspend(struct platform_device *pdev, pm_message_t state)
++{
++ return 0;
++}
++
++static int nxp_fspi_resume(struct platform_device *pdev)
++{
++ return 0;
++}
++
++static struct platform_driver nxp_fspi_driver = {
++ .driver = {
++ .name = "nxp-fspi",
++ .bus = &platform_bus_type,
++ .of_match_table = nxp_fspi_dt_ids,
++ },
++ .probe = nxp_fspi_probe,
++ .remove = nxp_fspi_remove,
++ .suspend = nxp_fspi_suspend,
++ .resume = nxp_fspi_resume,
++};
++module_platform_driver(nxp_fspi_driver);
++
++MODULE_DESCRIPTION("NXP FSPI Controller Driver");
++MODULE_AUTHOR("NXP Semiconductor");
++MODULE_LICENSE("GPL v2");
+--- a/drivers/mtd/spi-nor/spi-nor.c
++++ b/drivers/mtd/spi-nor/spi-nor.c
+@@ -269,6 +269,7 @@ static inline int set_4byte(struct spi_n
+ u8 cmd;
-+ /* If this is a pinned event it must be running on this CPU */
-+ if (event->attr.pinned && event->oncpu != smp_processor_id()) {
-+ ret = -EBUSY;
-+ goto out;
-+ }
-+
- /*
- * If the event is currently on this CPU, its either a per-task event,
- * or local to this CPU. Furthermore it means its ACTIVE (otherwise
---- a/sound/soc/soc-dapm.c
-+++ b/sound/soc/soc-dapm.c
-@@ -4036,6 +4036,13 @@ int snd_soc_dapm_link_dai_widgets(struct
- continue;
- }
+ switch (JEDEC_MFR(info)) {
++ case SNOR_MFR_ST:
+ case SNOR_MFR_MICRON:
+ /* Some Micron need WREN command; all will accept it */
+ need_wren = true;
+@@ -1039,7 +1040,7 @@ static const struct flash_info spi_nor_i
+ { "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
-+ /* let users know there is no DAI to link */
-+ if (!dai_w->priv) {
-+ dev_dbg(card->dev, "dai widget %s has no DAI\n",
-+ dai_w->name);
-+ continue;
-+ }
+- /* Micron */
++ /* Micron <--> ST Micro */
+ { "n25q016a", INFO(0x20bb15, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
+ { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
+@@ -1054,6 +1055,12 @@ static const struct flash_info spi_nor_i
+ { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
+ { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
+
++ /* Micron */
++ {
++ "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512,
++ SECT_4K | USE_FSR | SPI_NOR_4B_OPCODES)
++ },
+
- dai = dai_w->priv;
+ /* PMC */
+ { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
+ { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
+@@ -2431,6 +2438,7 @@ static int spi_nor_init_params(struct sp
+ params->quad_enable = macronix_quad_enable;
+ break;
+
++ case SNOR_MFR_ST:
+ case SNOR_MFR_MICRON:
+ break;
+
+@@ -2749,7 +2757,8 @@ int spi_nor_scan(struct spi_nor *nor, co
+ mtd->_read = spi_nor_read;
+
+ /* NOR protection support for STmicro/Micron chips and similar */
+- if (JEDEC_MFR(info) == SNOR_MFR_MICRON ||
++ if (JEDEC_MFR(info) == SNOR_MFR_ST ||
++ JEDEC_MFR(info) == SNOR_MFR_MICRON ||
+ JEDEC_MFR(info) == SNOR_MFR_WINBOND ||
+ info->flags & SPI_NOR_HAS_LOCK) {
+ nor->flash_lock = stm_lock;
+--- a/include/linux/mtd/cfi.h
++++ b/include/linux/mtd/cfi.h
+@@ -377,6 +377,7 @@ struct cfi_fixup {
+ #define CFI_MFR_SHARP 0x00B0
+ #define CFI_MFR_SST 0x00BF
+ #define CFI_MFR_ST 0x0020 /* STMicroelectronics */
++#define CFI_MFR_MICRON 0x002C /* Micron */
+ #define CFI_MFR_TOSHIBA 0x0098
+ #define CFI_MFR_WINBOND 0x00DA
- /* ...find all widgets with the same stream and link them */
+--- a/include/linux/mtd/spi-nor.h
++++ b/include/linux/mtd/spi-nor.h
+@@ -23,7 +23,8 @@
+ #define SNOR_MFR_ATMEL CFI_MFR_ATMEL
+ #define SNOR_MFR_GIGADEVICE 0xc8
+ #define SNOR_MFR_INTEL CFI_MFR_INTEL
+-#define SNOR_MFR_MICRON CFI_MFR_ST /* ST Micro <--> Micron */
++#define SNOR_MFR_ST CFI_MFR_ST /* ST Micro */
++#define SNOR_MFR_MICRON CFI_MFR_MICRON /* Micron */
+ #define SNOR_MFR_MACRONIX CFI_MFR_MACRONIX
+ #define SNOR_MFR_SPANSION CFI_MFR_AMD
+ #define SNOR_MFR_SST CFI_MFR_SST
-From c503e92983f2e18c1e18e21b15d5bedd6d0be8ac Mon Sep 17 00:00:00 2001
-From: Biwen Li <biwen.li@nxp.com>
-Date: Tue, 30 Oct 2018 18:26:56 +0800
-Subject: [PATCH 33/40] pcie: support layerscape
+From c54a010fe105281259b996d318ed85efc4103fee Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Mon, 6 May 2019 15:18:05 +0800
+Subject: [PATCH] pcie: support layerscape
+
This is an integrated patch of pcie for layerscape
Signed-off-by: Bao Xiaowei <xiaowei.bao@nxp.com>
+Signed-off-by: Bhumika Goyal <bhumirks@gmail.com>
+Signed-off-by: Biwen Li <biwen.li@nxp.com>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Signed-off-by: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
Signed-off-by: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
+Signed-off-by: Jia-Ju Bai <baijiaju1990@gmail.com>
+Signed-off-by: Kishon Vijay Abraham I <kishon@ti.com>
+Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Signed-off-by: Minghuan Lian <Minghuan.Lian@nxp.com>
+Signed-off-by: Niklas Cassel <niklas.cassel@axis.com>
Signed-off-by: Po Liu <po.liu@nxp.com>
+Signed-off-by: Rob Herring <robh@kernel.org>
+Signed-off-by: Rolf Evers-Fischer <rolf.evers.fischer@aptiv.com>
+Signed-off-by: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+Signed-off-by: Xiaowei Bao <xiaowei.bao@nxp.com>
Signed-off-by: Zhang Ying-22455 <ying.zhang22455@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
---
- .../bindings/pci/layerscape-pci.txt | 14 +-
- arch/arm/kernel/bios32.c | 43 ++++++
- arch/arm64/kernel/pci.c | 43 ++++++
- drivers/misc/pci_endpoint_test.c | 20 +--
- drivers/pci/dwc/Kconfig | 8 ++
- drivers/pci/dwc/pci-layerscape.c | 132 +++++++++++++++++-
- drivers/pci/endpoint/functions/pci-epf-test.c | 2 +-
- drivers/pci/endpoint/pci-epf-core.c | 8 +-
- drivers/pci/pcie/portdrv_core.c | 29 ++++
- drivers/pci/quirks.c | 15 ++
+ arch/arm/kernel/bios32.c | 43 ++
+ arch/arm64/kernel/pci.c | 43 ++
+ drivers/misc/pci_endpoint_test.c | 332 ++++++++++---
+ drivers/pci/Kconfig | 1 +
+ drivers/pci/dwc/Kconfig | 39 +-
+ drivers/pci/dwc/Makefile | 2 +-
+ drivers/pci/dwc/pci-dra7xx.c | 9 -
+ drivers/pci/dwc/pci-layerscape-ep.c | 146 ++++++
+ drivers/pci/dwc/pci-layerscape.c | 12 +
+ drivers/pci/dwc/pcie-designware-ep.c | 338 ++++++++++++--
+ drivers/pci/dwc/pcie-designware-host.c | 5 +-
+ drivers/pci/dwc/pcie-designware-plat.c | 159 ++++++-
+ drivers/pci/dwc/pcie-designware.c | 5 +-
+ drivers/pci/dwc/pcie-designware.h | 57 ++-
+ drivers/pci/endpoint/Kconfig | 1 +
+ drivers/pci/endpoint/Makefile | 1 +
+ drivers/pci/endpoint/functions/Kconfig | 1 +
+ drivers/pci/endpoint/functions/Makefile | 1 +
+ drivers/pci/endpoint/functions/pci-epf-test.c | 191 +++++---
+ drivers/pci/endpoint/pci-ep-cfs.c | 95 +++-
+ drivers/pci/endpoint/pci-epc-core.c | 159 +++++--
+ drivers/pci/endpoint/pci-epc-mem.c | 13 +-
+ drivers/pci/endpoint/pci-epf-core.c | 116 +++--
+ drivers/pci/host/pci-host-common.c | 8 -
+ drivers/pci/host/pcie-xilinx-nwl.c | 9 -
+ drivers/pci/host/pcie-xilinx.c | 7 -
+ drivers/pci/mobiveil/Kconfig | 50 ++
+ drivers/pci/mobiveil/Makefile | 7 +
+ drivers/pci/mobiveil/pci-layerscape-gen4-ep.c | 178 +++++++
+ drivers/pci/mobiveil/pci-layerscape-gen4.c | 292 ++++++++++++
+ drivers/pci/mobiveil/pcie-mobiveil-ep.c | 512 +++++++++++++++++++++
+ drivers/pci/mobiveil/pcie-mobiveil-host.c | 640 ++++++++++++++++++++++++++
+ drivers/pci/mobiveil/pcie-mobiveil-plat.c | 54 +++
+ drivers/pci/mobiveil/pcie-mobiveil.c | 334 ++++++++++++++
+ drivers/pci/mobiveil/pcie-mobiveil.h | 296 ++++++++++++
+ drivers/pci/pcie/portdrv_core.c | 29 ++
+ drivers/pci/quirks.c | 15 +
+ include/linux/pci-ep-cfs.h | 5 +-
+ include/linux/pci-epc.h | 73 +--
+ include/linux/pci-epf.h | 12 +-
include/linux/pci.h | 1 +
- 11 files changed, 292 insertions(+), 23 deletions(-)
+ include/uapi/linux/pcitest.h | 3 +
+ tools/pci/pcitest.c | 51 +-
+ tools/pci/pcitest.sh | 15 +
+ 44 files changed, 3917 insertions(+), 443 deletions(-)
+ create mode 100644 drivers/pci/dwc/pci-layerscape-ep.c
+ create mode 100644 drivers/pci/mobiveil/Kconfig
+ create mode 100644 drivers/pci/mobiveil/Makefile
+ create mode 100644 drivers/pci/mobiveil/pci-layerscape-gen4-ep.c
+ create mode 100644 drivers/pci/mobiveil/pci-layerscape-gen4.c
+ create mode 100644 drivers/pci/mobiveil/pcie-mobiveil-ep.c
+ create mode 100644 drivers/pci/mobiveil/pcie-mobiveil-host.c
+ create mode 100644 drivers/pci/mobiveil/pcie-mobiveil-plat.c
+ create mode 100644 drivers/pci/mobiveil/pcie-mobiveil.c
+ create mode 100644 drivers/pci/mobiveil/pcie-mobiveil.h
---- a/Documentation/devicetree/bindings/pci/layerscape-pci.txt
-+++ b/Documentation/devicetree/bindings/pci/layerscape-pci.txt
-@@ -18,11 +18,16 @@ Required properties:
- "fsl,ls2088a-pcie"
- "fsl,ls1088a-pcie"
- "fsl,ls1046a-pcie"
-+ "fsl,ls1012a-pcie"
- - reg: base addresses and lengths of the PCIe controller register blocks.
- - interrupts: A list of interrupt outputs of the controller. Must contain an
- entry for each entry in the interrupt-names property.
--- interrupt-names: Must include the following entries:
-- "intr": The interrupt that is asserted for controller interrupts
-+- interrupt-names: It could include the following entries:
-+ "aer": Asserted for aer interrupt when chip support the aer interrupt with
-+ none MSI/MSI-X/INTx mode,but there is interrupt line for aer.
-+ "pme": Asserted for pme interrupt when chip support the pme interrupt with
-+ none MSI/MSI-X/INTx mode,but there is interrupt line for pme.
-+ ......
- - fsl,pcie-scfg: Must include two entries.
- The first entry must be a link to the SCFG device node
- The second entry must be '0' or '1' based on physical PCIe controller index.
-@@ -38,8 +43,9 @@ Example:
- reg = <0x00 0x03400000 0x0 0x00010000 /* controller registers */
- 0x40 0x00000000 0x0 0x00002000>; /* configuration space */
- reg-names = "regs", "config";
-- interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>; /* controller interrupt */
-- interrupt-names = "intr";
-+ interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>, /* aer interrupt */
-+ <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>; /* pme interrupt */
-+ interrupt-names = "aer", "pme";
- fsl,pcie-scfg = <&scfg 0>;
- #address-cells = <3>;
- #size-cells = <2>;
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -12,6 +12,8 @@
int raw_pci_read(unsigned int domain, unsigned int bus,
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
-@@ -97,6 +97,8 @@ struct pci_endpoint_test {
+@@ -35,38 +35,45 @@
+
+ #include <uapi/linux/pcitest.h>
+
+-#define DRV_MODULE_NAME "pci-endpoint-test"
++#define DRV_MODULE_NAME "pci-endpoint-test"
+
+-#define PCI_ENDPOINT_TEST_MAGIC 0x0
++#define IRQ_TYPE_UNDEFINED -1
++#define IRQ_TYPE_LEGACY 0
++#define IRQ_TYPE_MSI 1
++#define IRQ_TYPE_MSIX 2
++
++#define PCI_ENDPOINT_TEST_MAGIC 0x0
++
++#define PCI_ENDPOINT_TEST_COMMAND 0x4
++#define COMMAND_RAISE_LEGACY_IRQ BIT(0)
++#define COMMAND_RAISE_MSI_IRQ BIT(1)
++#define COMMAND_RAISE_MSIX_IRQ BIT(2)
++#define COMMAND_READ BIT(3)
++#define COMMAND_WRITE BIT(4)
++#define COMMAND_COPY BIT(5)
++
++#define PCI_ENDPOINT_TEST_STATUS 0x8
++#define STATUS_READ_SUCCESS BIT(0)
++#define STATUS_READ_FAIL BIT(1)
++#define STATUS_WRITE_SUCCESS BIT(2)
++#define STATUS_WRITE_FAIL BIT(3)
++#define STATUS_COPY_SUCCESS BIT(4)
++#define STATUS_COPY_FAIL BIT(5)
++#define STATUS_IRQ_RAISED BIT(6)
++#define STATUS_SRC_ADDR_INVALID BIT(7)
++#define STATUS_DST_ADDR_INVALID BIT(8)
+
+-#define PCI_ENDPOINT_TEST_COMMAND 0x4
+-#define COMMAND_RAISE_LEGACY_IRQ BIT(0)
+-#define COMMAND_RAISE_MSI_IRQ BIT(1)
+-#define MSI_NUMBER_SHIFT 2
+-/* 6 bits for MSI number */
+-#define COMMAND_READ BIT(8)
+-#define COMMAND_WRITE BIT(9)
+-#define COMMAND_COPY BIT(10)
+-
+-#define PCI_ENDPOINT_TEST_STATUS 0x8
+-#define STATUS_READ_SUCCESS BIT(0)
+-#define STATUS_READ_FAIL BIT(1)
+-#define STATUS_WRITE_SUCCESS BIT(2)
+-#define STATUS_WRITE_FAIL BIT(3)
+-#define STATUS_COPY_SUCCESS BIT(4)
+-#define STATUS_COPY_FAIL BIT(5)
+-#define STATUS_IRQ_RAISED BIT(6)
+-#define STATUS_SRC_ADDR_INVALID BIT(7)
+-#define STATUS_DST_ADDR_INVALID BIT(8)
+-
+-#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0xc
++#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c
+ #define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10
+
+ #define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14
+ #define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18
+
+-#define PCI_ENDPOINT_TEST_SIZE 0x1c
+-#define PCI_ENDPOINT_TEST_CHECKSUM 0x20
++#define PCI_ENDPOINT_TEST_SIZE 0x1c
++#define PCI_ENDPOINT_TEST_CHECKSUM 0x20
++
++#define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24
++#define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
+
+ static DEFINE_IDA(pci_endpoint_test_ida);
+
+@@ -77,6 +84,10 @@ static bool no_msi;
+ module_param(no_msi, bool, 0444);
+ MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
+
++static int irq_type = IRQ_TYPE_MSI;
++module_param(irq_type, int, 0444);
++MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
++
+ enum pci_barno {
+ BAR_0,
+ BAR_1,
+@@ -92,6 +103,7 @@ struct pci_endpoint_test {
+ void __iomem *bar[6];
+ struct completion irq_raised;
+ int last_irq;
++ int num_irqs;
+ /* mutex to protect the ioctls */
+ struct mutex mutex;
struct miscdevice miscdev;
+@@ -102,7 +114,7 @@ struct pci_endpoint_test {
+ struct pci_endpoint_test_data {
enum pci_barno test_reg_bar;
size_t alignment;
-+ char name[20];
-+ int irq_num;
+- bool no_msi;
++ int irq_type;
};
- struct pci_endpoint_test_data {
-@@ -454,9 +456,7 @@ static int pci_endpoint_test_probe(struc
+ static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
+@@ -146,6 +158,100 @@ static irqreturn_t pci_endpoint_test_irq
+ return IRQ_HANDLED;
+ }
+
++static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
++{
++ struct pci_dev *pdev = test->pdev;
++
++ pci_free_irq_vectors(pdev);
++}
++
++static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
++ int type)
++{
++ int irq = -1;
++ struct pci_dev *pdev = test->pdev;
++ struct device *dev = &pdev->dev;
++ bool res = true;
++
++ switch (type) {
++ case IRQ_TYPE_LEGACY:
++ irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
++ if (irq < 0)
++ dev_err(dev, "Failed to get Legacy interrupt\n");
++ break;
++ case IRQ_TYPE_MSI:
++ irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
++ if (irq < 0)
++ dev_err(dev, "Failed to get MSI interrupts\n");
++ break;
++ case IRQ_TYPE_MSIX:
++ irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
++ if (irq < 0)
++ dev_err(dev, "Failed to get MSI-X interrupts\n");
++ break;
++ default:
++ dev_err(dev, "Invalid IRQ type selected\n");
++ }
++
++ if (irq < 0) {
++ irq = 0;
++ res = false;
++ }
++ test->num_irqs = irq;
++
++ return res;
++}
++
++static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
++{
++ int i;
++ struct pci_dev *pdev = test->pdev;
++ struct device *dev = &pdev->dev;
++
++ for (i = 0; i < test->num_irqs; i++)
++ devm_free_irq(dev, pci_irq_vector(pdev, i), test);
++
++ test->num_irqs = 0;
++}
++
++static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
++{
++ int i;
++ int err;
++ struct pci_dev *pdev = test->pdev;
++ struct device *dev = &pdev->dev;
++
++ for (i = 0; i < test->num_irqs; i++) {
++ err = devm_request_irq(dev, pci_irq_vector(pdev, i),
++ pci_endpoint_test_irqhandler,
++ IRQF_SHARED, DRV_MODULE_NAME, test);
++ if (err)
++ goto fail;
++ }
++
++ return true;
++
++fail:
++ switch (irq_type) {
++ case IRQ_TYPE_LEGACY:
++ dev_err(dev, "Failed to request IRQ %d for Legacy\n",
++ pci_irq_vector(pdev, i));
++ break;
++ case IRQ_TYPE_MSI:
++ dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
++ pci_irq_vector(pdev, i),
++ i + 1);
++ break;
++ case IRQ_TYPE_MSIX:
++ dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
++ pci_irq_vector(pdev, i),
++ i + 1);
++ break;
++ }
++
++ return false;
++}
++
+ static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
+ enum pci_barno barno)
+ {
+@@ -178,6 +284,9 @@ static bool pci_endpoint_test_legacy_irq
+ {
+ u32 val;
+
++ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
++ IRQ_TYPE_LEGACY);
++ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
+ COMMAND_RAISE_LEGACY_IRQ);
+ val = wait_for_completion_timeout(&test->irq_raised,
+@@ -189,20 +298,24 @@ static bool pci_endpoint_test_legacy_irq
+ }
+
+ static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
+- u8 msi_num)
++ u16 msi_num, bool msix)
+ {
+ u32 val;
+ struct pci_dev *pdev = test->pdev;
+
++ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
++ msix == false ? IRQ_TYPE_MSI :
++ IRQ_TYPE_MSIX);
++ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
+- msi_num << MSI_NUMBER_SHIFT |
+- COMMAND_RAISE_MSI_IRQ);
++ msix == false ? COMMAND_RAISE_MSI_IRQ :
++ COMMAND_RAISE_MSIX_IRQ);
+ val = wait_for_completion_timeout(&test->irq_raised,
+ msecs_to_jiffies(1000));
+ if (!val)
+ return false;
+
+- if (test->last_irq - pdev->irq == msi_num - 1)
++ if (pci_irq_vector(pdev, msi_num - 1) == test->last_irq)
+ return true;
+
+ return false;
+@@ -226,10 +339,18 @@ static bool pci_endpoint_test_copy(struc
+ u32 src_crc32;
+ u32 dst_crc32;
+
++ if (size > SIZE_MAX - alignment)
++ goto err;
++
++ if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
++ dev_err(dev, "Invalid IRQ type option\n");
++ goto err;
++ }
++
+ orig_src_addr = dma_alloc_coherent(dev, size + alignment,
+ &orig_src_phys_addr, GFP_KERNEL);
+ if (!orig_src_addr) {
+- dev_err(dev, "failed to allocate source buffer\n");
++ dev_err(dev, "Failed to allocate source buffer\n");
+ ret = false;
+ goto err;
+ }
+@@ -255,7 +376,7 @@ static bool pci_endpoint_test_copy(struc
+ orig_dst_addr = dma_alloc_coherent(dev, size + alignment,
+ &orig_dst_phys_addr, GFP_KERNEL);
+ if (!orig_dst_addr) {
+- dev_err(dev, "failed to allocate destination address\n");
++ dev_err(dev, "Failed to allocate destination address\n");
+ ret = false;
+ goto err_orig_src_addr;
+ }
+@@ -277,8 +398,10 @@ static bool pci_endpoint_test_copy(struc
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
+ size);
+
++ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
++ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
+- 1 << MSI_NUMBER_SHIFT | COMMAND_COPY);
++ COMMAND_COPY);
+
+ wait_for_completion(&test->irq_raised);
+
+@@ -311,10 +434,18 @@ static bool pci_endpoint_test_write(stru
+ size_t alignment = test->alignment;
+ u32 crc32;
+
++ if (size > SIZE_MAX - alignment)
++ goto err;
++
++ if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
++ dev_err(dev, "Invalid IRQ type option\n");
++ goto err;
++ }
++
+ orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
+ GFP_KERNEL);
+ if (!orig_addr) {
+- dev_err(dev, "failed to allocate address\n");
++ dev_err(dev, "Failed to allocate address\n");
+ ret = false;
+ goto err;
+ }
+@@ -341,8 +472,10 @@ static bool pci_endpoint_test_write(stru
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
+
++ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
++ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
+- 1 << MSI_NUMBER_SHIFT | COMMAND_READ);
++ COMMAND_READ);
+
+ wait_for_completion(&test->irq_raised);
+
+@@ -369,10 +502,18 @@ static bool pci_endpoint_test_read(struc
+ size_t alignment = test->alignment;
+ u32 crc32;
+
++ if (size > SIZE_MAX - alignment)
++ goto err;
++
++ if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
++ dev_err(dev, "Invalid IRQ type option\n");
++ goto err;
++ }
++
+ orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
+ GFP_KERNEL);
+ if (!orig_addr) {
+- dev_err(dev, "failed to allocate destination address\n");
++ dev_err(dev, "Failed to allocate destination address\n");
+ ret = false;
+ goto err;
+ }
+@@ -393,8 +534,10 @@ static bool pci_endpoint_test_read(struc
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
+
++ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
++ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
+- 1 << MSI_NUMBER_SHIFT | COMMAND_WRITE);
++ COMMAND_WRITE);
+
+ wait_for_completion(&test->irq_raised);
+
+@@ -407,6 +550,38 @@ err:
+ return ret;
+ }
+
++static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
++ int req_irq_type)
++{
++ struct pci_dev *pdev = test->pdev;
++ struct device *dev = &pdev->dev;
++
++ if (req_irq_type < IRQ_TYPE_LEGACY || req_irq_type > IRQ_TYPE_MSIX) {
++ dev_err(dev, "Invalid IRQ type option\n");
++ return false;
++ }
++
++ if (irq_type == req_irq_type)
++ return true;
++
++ pci_endpoint_test_release_irq(test);
++ pci_endpoint_test_free_irq_vectors(test);
++
++ if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
++ goto err;
++
++ if (!pci_endpoint_test_request_irq(test))
++ goto err;
++
++ irq_type = req_irq_type;
++ return true;
++
++err:
++ pci_endpoint_test_free_irq_vectors(test);
++ irq_type = IRQ_TYPE_UNDEFINED;
++ return false;
++}
++
+ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+ {
+@@ -426,7 +601,8 @@ static long pci_endpoint_test_ioctl(stru
+ ret = pci_endpoint_test_legacy_irq(test);
+ break;
+ case PCITEST_MSI:
+- ret = pci_endpoint_test_msi_irq(test, arg);
++ case PCITEST_MSIX:
++ ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
+ break;
+ case PCITEST_WRITE:
+ ret = pci_endpoint_test_write(test, arg);
+@@ -437,6 +613,12 @@ static long pci_endpoint_test_ioctl(stru
+ case PCITEST_COPY:
+ ret = pci_endpoint_test_copy(test, arg);
+ break;
++ case PCITEST_SET_IRQTYPE:
++ ret = pci_endpoint_test_set_irq(test, arg);
++ break;
++ case PCITEST_GET_IRQTYPE:
++ ret = irq_type;
++ break;
+ }
+
+ ret:
+@@ -452,9 +634,7 @@ static const struct file_operations pci_
+ static int pci_endpoint_test_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
- int i;
+- int i;
int err;
- int irq = 0;
int id;
-- char name[20];
+ char name[20];
enum pci_barno bar;
- void __iomem *base;
- struct device *dev = &pdev->dev;
-@@ -501,19 +501,19 @@ static int pci_endpoint_test_probe(struc
+@@ -476,11 +656,14 @@ static int pci_endpoint_test_probe(struc
+ test->alignment = 0;
+ test->pdev = pdev;
+
++ if (no_msi)
++ irq_type = IRQ_TYPE_LEGACY;
++
+ data = (struct pci_endpoint_test_data *)ent->driver_data;
+ if (data) {
+ test_reg_bar = data->test_reg_bar;
+ test->alignment = data->alignment;
+- no_msi = data->no_msi;
++ irq_type = data->irq_type;
+ }
+
+ init_completion(&test->irq_raised);
+@@ -500,35 +683,21 @@ static int pci_endpoint_test_probe(struc
+
pci_set_master(pdev);
- if (!no_msi) {
+- if (!no_msi) {
- irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
- if (irq < 0)
-+ test->irq_num = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
-+ if (test->irq_num < 0)
- dev_err(dev, "failed to get MSI interrupts\n");
- }
+- dev_err(dev, "failed to get MSI interrupts\n");
+- }
++ if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type))
++ goto err_disable_irq;
- err = devm_request_irq(dev, pdev->irq, pci_endpoint_test_irqhandler,
-+ err = request_irq(pdev->irq, pci_endpoint_test_irqhandler,
- IRQF_SHARED, DRV_MODULE_NAME, test);
- if (err) {
- dev_err(dev, "failed to request IRQ %d\n", pdev->irq);
- goto err_disable_msi;
+- IRQF_SHARED, DRV_MODULE_NAME, test);
+- if (err) {
+- dev_err(dev, "failed to request IRQ %d\n", pdev->irq);
+- goto err_disable_msi;
+- }
+-
+- for (i = 1; i < irq; i++) {
+- err = devm_request_irq(dev, pdev->irq + i,
+- pci_endpoint_test_irqhandler,
+- IRQF_SHARED, DRV_MODULE_NAME, test);
+- if (err)
+- dev_err(dev, "failed to request IRQ %d for MSI %d\n",
+- pdev->irq + i, i + 1);
+- }
++ if (!pci_endpoint_test_request_irq(test))
++ goto err_disable_irq;
+
+ for (bar = BAR_0; bar <= BAR_5; bar++) {
+- base = pci_ioremap_bar(pdev, bar);
+- if (!base) {
+- dev_err(dev, "failed to read BAR%d\n", bar);
+- WARN_ON(bar == test_reg_bar);
++ if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
++ base = pci_ioremap_bar(pdev, bar);
++ if (!base) {
++ dev_err(dev, "Failed to read BAR%d\n", bar);
++ WARN_ON(bar == test_reg_bar);
++ }
++ test->bar[bar] = base;
+ }
+- test->bar[bar] = base;
}
-- for (i = 1; i < irq; i++) {
-+ for (i = 1; i < test->irq_num; i++) {
- err = devm_request_irq(dev, pdev->irq + i,
- pci_endpoint_test_irqhandler,
- IRQF_SHARED, DRV_MODULE_NAME, test);
-@@ -548,10 +548,10 @@ static int pci_endpoint_test_probe(struc
+ test->base = test->bar[test_reg_bar];
+@@ -544,24 +713,31 @@ static int pci_endpoint_test_probe(struc
+ id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
+ if (id < 0) {
+ err = id;
+- dev_err(dev, "unable to get id\n");
++ dev_err(dev, "Unable to get id\n");
goto err_iounmap;
}
-- snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
-+ snprintf(test->name, sizeof(test->name), DRV_MODULE_NAME ".%d", id);
+ snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
misc_device = &test->miscdev;
misc_device->minor = MISC_DYNAMIC_MINOR;
- misc_device->name = name;
-+ misc_device->name = test->name;
++ misc_device->name = kstrdup(name, GFP_KERNEL);
++ if (!misc_device->name) {
++ err = -ENOMEM;
++ goto err_ida_remove;
++ }
misc_device->fops = &pci_endpoint_test_fops,
err = misc_register(misc_device);
-@@ -584,6 +584,7 @@ err_disable_pdev:
- static void pci_endpoint_test_remove(struct pci_dev *pdev)
- {
- int id;
-+ int i;
- enum pci_barno bar;
- struct pci_endpoint_test *test = pci_get_drvdata(pdev);
- struct miscdevice *misc_device = &test->miscdev;
-@@ -599,6 +600,8 @@ static void pci_endpoint_test_remove(str
+ if (err) {
+- dev_err(dev, "failed to register device\n");
+- goto err_ida_remove;
++ dev_err(dev, "Failed to register device\n");
++ goto err_kfree_name;
+ }
+
+ return 0;
+
++err_kfree_name:
++ kfree(misc_device->name);
++
+ err_ida_remove:
+ ida_simple_remove(&pci_endpoint_test_ida, id);
+
+@@ -570,9 +746,10 @@ err_iounmap:
+ if (test->bar[bar])
+ pci_iounmap(pdev, test->bar[bar]);
+ }
++ pci_endpoint_test_release_irq(test);
+
+-err_disable_msi:
+- pci_disable_msi(pdev);
++err_disable_irq:
++ pci_endpoint_test_free_irq_vectors(test);
+ pci_release_regions(pdev);
+
+ err_disable_pdev:
+@@ -594,12 +771,16 @@ static void pci_endpoint_test_remove(str
+ return;
+
+ misc_deregister(&test->miscdev);
++ kfree(misc_device->name);
+ ida_simple_remove(&pci_endpoint_test_ida, id);
+ for (bar = BAR_0; bar <= BAR_5; bar++) {
if (test->bar[bar])
pci_iounmap(pdev, test->bar[bar]);
}
-+ for (i = 0; i < test->irq_num; i++)
-+ free_irq(pdev->irq + i, test);
- pci_disable_msi(pdev);
+- pci_disable_msi(pdev);
++
++ pci_endpoint_test_release_irq(test);
++ pci_endpoint_test_free_irq_vectors(test);
++
pci_release_regions(pdev);
pci_disable_device(pdev);
-@@ -607,6 +610,7 @@ static void pci_endpoint_test_remove(str
+ }
+@@ -607,6 +788,7 @@ static void pci_endpoint_test_remove(str
static const struct pci_device_id pci_endpoint_test_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) },
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) },
{ }
};
MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
+--- a/drivers/pci/Kconfig
++++ b/drivers/pci/Kconfig
+@@ -142,6 +142,7 @@ config PCI_HYPERV
+
+ source "drivers/pci/hotplug/Kconfig"
+ source "drivers/pci/dwc/Kconfig"
++source "drivers/pci/mobiveil/Kconfig"
+ source "drivers/pci/host/Kconfig"
+ source "drivers/pci/endpoint/Kconfig"
+ source "drivers/pci/switch/Kconfig"
--- a/drivers/pci/dwc/Kconfig
+++ b/drivers/pci/dwc/Kconfig
-@@ -111,6 +111,14 @@ config PCI_LAYERSCAPE
- help
- Say Y here if you want PCIe controller support on Layerscape SoCs.
+@@ -50,17 +50,36 @@ config PCI_DRA7XX_EP
+ endif
-+config PCI_LAYERSCAPE_EP
-+ bool "PCI layerscape Endpoint Mode"
+ config PCIE_DW_PLAT
+- bool "Platform bus based DesignWare PCIe Controller"
+- depends on PCI
+- depends on PCI_MSI_IRQ_DOMAIN
+- select PCIE_DW_HOST
+- ---help---
+- This selects the DesignWare PCIe controller support. Select this if
+- you have a PCIe controller on Platform bus.
++ bool
+
+- If you have a controller with this interface, say Y or M here.
++config PCIE_DW_PLAT_HOST
++ bool "Platform bus based DesignWare PCIe Controller - Host mode"
++ depends on PCI && PCI_MSI_IRQ_DOMAIN
++ select PCIE_DW_HOST
++ select PCIE_DW_PLAT
++ help
++ Enables support for the PCIe controller in the Designware IP to
++ work in host mode. There are two instances of PCIe controller in
++ Designware IP.
++ This controller can work either as EP or RC. In order to enable
++ host-specific features PCIE_DW_PLAT_HOST must be selected and in
++ order to enable device-specific features PCI_DW_PLAT_EP must be
++ selected.
+
+- If unsure, say N.
++config PCIE_DW_PLAT_EP
++ bool "Platform bus based DesignWare PCIe Controller - Endpoint mode"
++ depends on PCI && PCI_MSI_IRQ_DOMAIN
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
++ select PCIE_DW_PLAT
+ help
-+ Enables support for the PCIe controller in the layerscape SoC to work in
-+ endpoint mode.
++ Enables support for the PCIe controller in the Designware IP to
++ work in endpoint mode. There are two instances of PCIe controller
++ in Designware IP.
++ This controller can work either as EP or RC. In order to enable
++ host-specific features PCIE_DW_PLAT_HOST must be selected and in
++ order to enable device-specific features PCI_DW_PLAT_EP must be
++ selected.
+
+ config PCI_EXYNOS
+ bool "Samsung Exynos PCIe controller"
+--- a/drivers/pci/dwc/Makefile
++++ b/drivers/pci/dwc/Makefile
+@@ -10,7 +10,7 @@ obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
+ obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
+ obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
+ obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o
+-obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
++obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o pci-layerscape-ep.o
+ obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
+ obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
+ obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
+--- a/drivers/pci/dwc/pci-dra7xx.c
++++ b/drivers/pci/dwc/pci-dra7xx.c
+@@ -337,15 +337,6 @@ static irqreturn_t dra7xx_pcie_irq_handl
+ return IRQ_HANDLED;
+ }
+
+-static void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
+-{
+- u32 reg;
+-
+- reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+- dw_pcie_writel_dbi2(pci, reg, 0x0);
+- dw_pcie_writel_dbi(pci, reg, 0x0);
+-}
+-
+ static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
+ {
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+--- /dev/null
++++ b/drivers/pci/dwc/pci-layerscape-ep.c
+@@ -0,0 +1,146 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * PCIe controller EP driver for Freescale Layerscape SoCs
++ *
++ * Copyright (C) 2018 NXP Semiconductor.
++ *
++ * Author: Xiaowei Bao <xiaowei.bao@nxp.com>
++ */
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/of_pci.h>
++#include <linux/of_platform.h>
++#include <linux/of_address.h>
++#include <linux/pci.h>
++#include <linux/platform_device.h>
++#include <linux/resource.h>
++
++#include "pcie-designware.h"
++
++#define PCIE_DBI2_OFFSET 0x1000 /* DBI2 base address*/
++
++struct ls_pcie_ep {
++ struct dw_pcie *pci;
++};
++
++#define to_ls_pcie_ep(x) dev_get_drvdata((x)->dev)
++
++static int ls_pcie_establish_link(struct dw_pcie *pci)
++{
++ return 0;
++}
++
++static const struct dw_pcie_ops ls_pcie_ep_ops = {
++ .start_link = ls_pcie_establish_link,
++};
++
++static const struct of_device_id ls_pcie_ep_of_match[] = {
++ { .compatible = "fsl,ls-pcie-ep",},
++ { },
++};
++
++static void ls_pcie_ep_init(struct dw_pcie_ep *ep)
++{
++ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
++ struct pci_epc *epc = ep->epc;
++ enum pci_barno bar;
++
++ for (bar = BAR_0; bar <= BAR_5; bar++)
++ dw_pcie_ep_reset_bar(pci, bar);
++
++ epc->features |= EPC_FEATURE_NO_LINKUP_NOTIFIER;
++}
++
++static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
++ enum pci_epc_irq_type type, u16 interrupt_num)
++{
++ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
++
++ switch (type) {
++ case PCI_EPC_IRQ_LEGACY:
++ return dw_pcie_ep_raise_legacy_irq(ep, func_no);
++ case PCI_EPC_IRQ_MSI:
++ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
++ case PCI_EPC_IRQ_MSIX:
++ return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
++ default:
++ dev_err(pci->dev, "UNKNOWN IRQ type\n");
++ return -EINVAL;
++ }
++}
++
++static struct dw_pcie_ep_ops pcie_ep_ops = {
++ .ep_init = ls_pcie_ep_init,
++ .raise_irq = ls_pcie_ep_raise_irq,
++};
++
++static int __init ls_add_pcie_ep(struct ls_pcie_ep *pcie,
++ struct platform_device *pdev)
++{
++ struct dw_pcie *pci = pcie->pci;
++ struct device *dev = pci->dev;
++ struct dw_pcie_ep *ep;
++ struct resource *res;
++ int ret;
++
++ ep = &pci->ep;
++ ep->ops = &pcie_ep_ops;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
++ if (!res)
++ return -EINVAL;
++
++ ep->phys_base = res->start;
++ ep->addr_size = resource_size(res);
++
++ ret = dw_pcie_ep_init(ep);
++ if (ret) {
++ dev_err(dev, "failed to initialize endpoint\n");
++ return ret;
++ }
++
++ return 0;
++}
++
++static int __init ls_pcie_ep_probe(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ struct dw_pcie *pci;
++ struct ls_pcie_ep *pcie;
++ struct resource *dbi_base;
++ int ret;
++
++ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
++ if (!pcie)
++ return -ENOMEM;
++
++ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
++ if (!pci)
++ return -ENOMEM;
++
++ dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
++ pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
++ if (IS_ERR(pci->dbi_base))
++ return PTR_ERR(pci->dbi_base);
++
++ pci->dbi_base2 = pci->dbi_base + PCIE_DBI2_OFFSET;
++ pci->dev = dev;
++ pci->ops = &ls_pcie_ep_ops;
++ pcie->pci = pci;
++
++ platform_set_drvdata(pdev, pcie);
+
- config PCI_HISI
- depends on OF && ARM64
- bool "HiSilicon Hip05 and Hip06 SoCs PCIe controllers"
++ ret = ls_add_pcie_ep(pcie, pdev);
++
++ return ret;
++}
++
++static struct platform_driver ls_pcie_ep_driver = {
++ .driver = {
++ .name = "layerscape-pcie-ep",
++ .of_match_table = ls_pcie_ep_of_match,
++ .suppress_bind_attrs = true,
++ },
++};
++builtin_platform_driver_probe(ls_pcie_ep_driver, ls_pcie_ep_probe);
--- a/drivers/pci/dwc/pci-layerscape.c
+++ b/drivers/pci/dwc/pci-layerscape.c
-@@ -33,8 +33,15 @@
+@@ -33,6 +33,8 @@
/* PEX Internal Configuration Registers */
#define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */
+#define PCIE_ABSERR 0x8d0 /* Bridge Slave Error Response Register */
+#define PCIE_ABSERR_SETTING 0x9401 /* Forward error of non-posted request */
-+#define PCIE_DBI2_BASE 0x1000 /* DBI2 base address*/
-+#define PCIE_MSI_MSG_DATA_OFF 0x5c /* MSI Data register address*/
-+#define PCIE_MSI_OB_SIZE 4096
-+#define PCIE_MSI_ADDR_OFFSET (1024 * 1024)
#define PCIE_IATU_NUM 6
-+#define PCIE_EP_ADDR_SPACE_SIZE 0x100000000
-
- struct ls_pcie_drvdata {
- u32 lut_offset;
-@@ -44,12 +51,20 @@ struct ls_pcie_drvdata {
- const struct dw_pcie_ops *dw_pcie_ops;
- };
-
-+struct ls_pcie_ep {
-+ dma_addr_t msi_phys_addr;
-+ void __iomem *msi_virt_addr;
-+ u64 msi_msg_addr;
-+ u16 msi_msg_data;
-+};
-+
- struct ls_pcie {
- struct dw_pcie *pci;
- void __iomem *lut;
- struct regmap *scfg;
- const struct ls_pcie_drvdata *drvdata;
- int index;
-+ struct ls_pcie_ep *pcie_ep;
- };
- #define to_ls_pcie(x) dev_get_drvdata((x)->dev)
-@@ -124,6 +139,14 @@ static int ls_pcie_link_up(struct dw_pci
+@@ -124,6 +126,14 @@ static int ls_pcie_link_up(struct dw_pci
return 1;
}
static int ls_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
-@@ -135,6 +158,7 @@ static int ls_pcie_host_init(struct pcie
+@@ -135,6 +145,7 @@ static int ls_pcie_host_init(struct pcie
* dw_pcie_setup_rc() will reconfigure the outbound windows.
*/
ls_pcie_disable_outbound_atus(pcie);
dw_pcie_dbi_ro_wr_en(pci);
ls_pcie_clear_multifunction(pcie);
-@@ -253,6 +277,7 @@ static struct ls_pcie_drvdata ls2088_drv
+@@ -253,6 +264,7 @@ static struct ls_pcie_drvdata ls2088_drv
};
static const struct of_device_id ls_pcie_of_match[] = {
{ .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
{ .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
{ .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata },
-@@ -263,6 +288,99 @@ static const struct of_device_id ls_pcie
- { },
- };
+--- a/drivers/pci/dwc/pcie-designware-ep.c
++++ b/drivers/pci/dwc/pcie-designware-ep.c
+@@ -1,20 +1,9 @@
++// SPDX-License-Identifier: GPL-2.0
+ /**
+ * Synopsys DesignWare PCIe Endpoint controller driver
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+- *
+- * This program is free software: you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 of
+- * the License as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
-+static void ls_pcie_raise_msi_irq(struct ls_pcie_ep *pcie_ep)
+ #include <linux/of.h>
+@@ -30,7 +19,8 @@ void dw_pcie_ep_linkup(struct dw_pcie_ep
+ pci_epc_linkup(epc);
+ }
+
+-static void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
++static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar,
++ int flags)
+ {
+ u32 reg;
+
+@@ -38,10 +28,52 @@ static void dw_pcie_ep_reset_bar(struct
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writel_dbi2(pci, reg, 0x0);
+ dw_pcie_writel_dbi(pci, reg, 0x0);
++ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
++ dw_pcie_writel_dbi2(pci, reg + 4, 0x0);
++ dw_pcie_writel_dbi(pci, reg + 4, 0x0);
++ }
+ dw_pcie_dbi_ro_wr_dis(pci);
+ }
+
+-static int dw_pcie_ep_write_header(struct pci_epc *epc,
++void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
+{
-+ iowrite32(pcie_ep->msi_msg_data, pcie_ep->msi_virt_addr);
++ __dw_pcie_ep_reset_bar(pci, bar, 0);
+}
+
-+static int ls_pcie_raise_irq(struct dw_pcie_ep *ep,
-+ enum pci_epc_irq_type type, u8 interrupt_num)
++static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie *pci, u8 cap_ptr,
++ u8 cap)
+{
-+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
-+ struct ls_pcie *pcie = to_ls_pcie(pci);
-+ struct ls_pcie_ep *pcie_ep = pcie->pcie_ep;
-+ u32 free_win;
-+
-+ /* get the msi message address and msi message data */
-+ pcie_ep->msi_msg_addr = ioread32(pci->dbi_base + MSI_MESSAGE_ADDR_L32) |
-+ (((u64)ioread32(pci->dbi_base + MSI_MESSAGE_ADDR_U32)) << 32);
-+ pcie_ep->msi_msg_data = ioread16(pci->dbi_base + PCIE_MSI_MSG_DATA_OFF);
-+
-+ /* request and config the outband window for msi */
-+ free_win = find_first_zero_bit(&ep->ob_window_map,
-+ sizeof(ep->ob_window_map));
-+ if (free_win >= ep->num_ob_windows) {
-+ dev_err(pci->dev, "no free outbound window\n");
-+ return -ENOMEM;
-+ }
-+
-+ dw_pcie_prog_outbound_atu(pci, free_win, PCIE_ATU_TYPE_MEM,
-+ pcie_ep->msi_phys_addr,
-+ pcie_ep->msi_msg_addr,
-+ PCIE_MSI_OB_SIZE);
++ u8 cap_id, next_cap_ptr;
++ u16 reg;
+
-+ set_bit(free_win, &ep->ob_window_map);
++ reg = dw_pcie_readw_dbi(pci, cap_ptr);
++ next_cap_ptr = (reg & 0xff00) >> 8;
++ cap_id = (reg & 0x00ff);
+
-+ /* generate the msi interrupt */
-+ ls_pcie_raise_msi_irq(pcie_ep);
++ if (!next_cap_ptr || cap_id > PCI_CAP_ID_MAX)
++ return 0;
+
-+ /* release the outband window of msi */
-+ dw_pcie_disable_atu(pci, free_win, DW_PCIE_REGION_OUTBOUND);
-+ clear_bit(free_win, &ep->ob_window_map);
++ if (cap_id == cap)
++ return cap_ptr;
+
-+ return 0;
++ return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap);
+}
+
-+static struct dw_pcie_ep_ops pcie_ep_ops = {
-+ .raise_irq = ls_pcie_raise_irq,
-+};
-+
-+static int __init ls_add_pcie_ep(struct ls_pcie *pcie,
-+ struct platform_device *pdev)
++static u8 dw_pcie_ep_find_capability(struct dw_pcie *pci, u8 cap)
+{
-+ struct dw_pcie *pci = pcie->pci;
-+ struct device *dev = pci->dev;
-+ struct dw_pcie_ep *ep;
-+ struct ls_pcie_ep *pcie_ep;
-+ struct resource *cfg_res;
-+ int ret;
++ u8 next_cap_ptr;
++ u16 reg;
+
-+ ep = &pci->ep;
-+ ep->ops = &pcie_ep_ops;
++ reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST);
++ next_cap_ptr = (reg & 0x00ff);
+
-+ pcie_ep = devm_kzalloc(dev, sizeof(*pcie_ep), GFP_KERNEL);
-+ if (!pcie_ep)
-+ return -ENOMEM;
++ if (!next_cap_ptr)
++ return 0;
+
-+ pcie->pcie_ep = pcie_ep;
++ return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap);
++}
+
-+ cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
-+ if (cfg_res) {
-+ ep->phys_base = cfg_res->start;
-+ ep->addr_size = PCIE_EP_ADDR_SPACE_SIZE;
-+ } else {
-+ dev_err(dev, "missing *config* space\n");
++static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
+ struct pci_epf_header *hdr)
+ {
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+@@ -74,8 +106,7 @@ static int dw_pcie_ep_inbound_atu(struct
+ u32 free_win;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+- free_win = find_first_zero_bit(&ep->ib_window_map,
+- sizeof(ep->ib_window_map));
++ free_win = find_first_zero_bit(ep->ib_window_map, ep->num_ib_windows);
+ if (free_win >= ep->num_ib_windows) {
+ dev_err(pci->dev, "no free inbound window\n");
+ return -EINVAL;
+@@ -89,7 +120,7 @@ static int dw_pcie_ep_inbound_atu(struct
+ }
+
+ ep->bar_to_atu[bar] = free_win;
+- set_bit(free_win, &ep->ib_window_map);
++ set_bit(free_win, ep->ib_window_map);
+
+ return 0;
+ }
+@@ -100,8 +131,7 @@ static int dw_pcie_ep_outbound_atu(struc
+ u32 free_win;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+- free_win = find_first_zero_bit(&ep->ob_window_map,
+- sizeof(ep->ob_window_map));
++ free_win = find_first_zero_bit(ep->ob_window_map, ep->num_ob_windows);
+ if (free_win >= ep->num_ob_windows) {
+ dev_err(pci->dev, "no free outbound window\n");
+ return -EINVAL;
+@@ -110,30 +140,35 @@ static int dw_pcie_ep_outbound_atu(struc
+ dw_pcie_prog_outbound_atu(pci, free_win, PCIE_ATU_TYPE_MEM,
+ phys_addr, pci_addr, size);
+
+- set_bit(free_win, &ep->ob_window_map);
++ set_bit(free_win, ep->ob_window_map);
+ ep->outbound_addr[free_win] = phys_addr;
+
+ return 0;
+ }
+
+-static void dw_pcie_ep_clear_bar(struct pci_epc *epc, enum pci_barno bar)
++static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
++ struct pci_epf_bar *epf_bar)
+ {
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
++ enum pci_barno bar = epf_bar->barno;
+ u32 atu_index = ep->bar_to_atu[bar];
+
+- dw_pcie_ep_reset_bar(pci, bar);
++ __dw_pcie_ep_reset_bar(pci, bar, epf_bar->flags);
+
+ dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND);
+- clear_bit(atu_index, &ep->ib_window_map);
++ clear_bit(atu_index, ep->ib_window_map);
+ }
+
+-static int dw_pcie_ep_set_bar(struct pci_epc *epc, enum pci_barno bar,
+- dma_addr_t bar_phys, size_t size, int flags)
++static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
++ struct pci_epf_bar *epf_bar)
+ {
+ int ret;
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
++ enum pci_barno bar = epf_bar->barno;
++ size_t size = epf_bar->size;
++ int flags = epf_bar->flags;
+ enum dw_pcie_as_type as_type;
+ u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+
+@@ -142,13 +177,20 @@ static int dw_pcie_ep_set_bar(struct pci
+ else
+ as_type = DW_PCIE_AS_IO;
+
+- ret = dw_pcie_ep_inbound_atu(ep, bar, bar_phys, as_type);
++ ret = dw_pcie_ep_inbound_atu(ep, bar, epf_bar->phys_addr, as_type);
+ if (ret)
+ return ret;
+
+ dw_pcie_dbi_ro_wr_en(pci);
+- dw_pcie_writel_dbi2(pci, reg, size - 1);
++
++ dw_pcie_writel_dbi2(pci, reg, lower_32_bits(size - 1));
+ dw_pcie_writel_dbi(pci, reg, flags);
++
++ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
++ dw_pcie_writel_dbi2(pci, reg + 4, upper_32_bits(size - 1));
++ dw_pcie_writel_dbi(pci, reg + 4, 0);
++ }
++
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+@@ -169,7 +211,8 @@ static int dw_pcie_find_index(struct dw_
+ return -EINVAL;
+ }
+
+-static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, phys_addr_t addr)
++static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no,
++ phys_addr_t addr)
+ {
+ int ret;
+ u32 atu_index;
+@@ -181,10 +224,11 @@ static void dw_pcie_ep_unmap_addr(struct
+ return;
+
+ dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_OUTBOUND);
+- clear_bit(atu_index, &ep->ob_window_map);
++ clear_bit(atu_index, ep->ob_window_map);
+ }
+
+-static int dw_pcie_ep_map_addr(struct pci_epc *epc, phys_addr_t addr,
++static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
++ phys_addr_t addr,
+ u64 pci_addr, size_t size)
+ {
+ int ret;
+@@ -200,45 +244,93 @@ static int dw_pcie_ep_map_addr(struct pc
+ return 0;
+ }
+
+-static int dw_pcie_ep_get_msi(struct pci_epc *epc)
++static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
+ {
+- int val;
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
++ u32 val, reg;
++
++ if (!ep->msi_cap)
++ return -EINVAL;
+
+- val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL);
+- if (!(val & MSI_CAP_MSI_EN_MASK))
++ reg = ep->msi_cap + PCI_MSI_FLAGS;
++ val = dw_pcie_readw_dbi(pci, reg);
++ if (!(val & PCI_MSI_FLAGS_ENABLE))
+ return -EINVAL;
+
+- val = (val & MSI_CAP_MME_MASK) >> MSI_CAP_MME_SHIFT;
++ val = (val & PCI_MSI_FLAGS_QSIZE) >> 4;
++
+ return val;
+ }
+
+-static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 encode_int)
++static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
+ {
+- int val;
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
++ u32 val, reg;
++
++ if (!ep->msi_cap)
++ return -EINVAL;
+
+- val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL);
+- val &= ~MSI_CAP_MMC_MASK;
+- val |= (encode_int << MSI_CAP_MMC_SHIFT) & MSI_CAP_MMC_MASK;
++ reg = ep->msi_cap + PCI_MSI_FLAGS;
++ val = dw_pcie_readw_dbi(pci, reg);
++ val &= ~PCI_MSI_FLAGS_QMASK;
++ val |= (interrupts << 1) & PCI_MSI_FLAGS_QMASK;
+ dw_pcie_dbi_ro_wr_en(pci);
+- dw_pcie_writew_dbi(pci, MSI_MESSAGE_CONTROL, val);
++ dw_pcie_writew_dbi(pci, reg, val);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+ }
+
+-static int dw_pcie_ep_raise_irq(struct pci_epc *epc,
+- enum pci_epc_irq_type type, u8 interrupt_num)
++static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
++{
++ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
++ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
++ u32 val, reg;
++
++ if (!ep->msix_cap)
++ return -EINVAL;
++
++ reg = ep->msix_cap + PCI_MSIX_FLAGS;
++ val = dw_pcie_readw_dbi(pci, reg);
++ if (!(val & PCI_MSIX_FLAGS_ENABLE))
++ return -EINVAL;
++
++ val &= PCI_MSIX_FLAGS_QSIZE;
++
++ return val;
++}
++
++static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
++{
++ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
++ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
++ u32 val, reg;
++
++ if (!ep->msix_cap)
++ return -EINVAL;
++
++ reg = ep->msix_cap + PCI_MSIX_FLAGS;
++ val = dw_pcie_readw_dbi(pci, reg);
++ val &= ~PCI_MSIX_FLAGS_QSIZE;
++ val |= interrupts;
++ dw_pcie_dbi_ro_wr_en(pci);
++ dw_pcie_writew_dbi(pci, reg, val);
++ dw_pcie_dbi_ro_wr_dis(pci);
++
++ return 0;
++}
++
++static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no,
++ enum pci_epc_irq_type type, u16 interrupt_num)
+ {
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+
+ if (!ep->ops->raise_irq)
+ return -EINVAL;
+
+- return ep->ops->raise_irq(ep, type, interrupt_num);
++ return ep->ops->raise_irq(ep, func_no, type, interrupt_num);
+ }
+
+ static void dw_pcie_ep_stop(struct pci_epc *epc)
+@@ -271,15 +363,130 @@ static const struct pci_epc_ops epc_ops
+ .unmap_addr = dw_pcie_ep_unmap_addr,
+ .set_msi = dw_pcie_ep_set_msi,
+ .get_msi = dw_pcie_ep_get_msi,
++ .set_msix = dw_pcie_ep_set_msix,
++ .get_msix = dw_pcie_ep_get_msix,
+ .raise_irq = dw_pcie_ep_raise_irq,
+ .start = dw_pcie_ep_start,
+ .stop = dw_pcie_ep_stop,
+ };
+
++int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
++{
++ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
++ struct device *dev = pci->dev;
++
++ dev_err(dev, "EP cannot trigger legacy IRQs\n");
++
++ return -EINVAL;
++}
++
++int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
++ u8 interrupt_num)
++{
++ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
++ struct pci_epc *epc = ep->epc;
++ u16 msg_ctrl, msg_data;
++ u32 msg_addr_lower, msg_addr_upper, reg;
++ u64 msg_addr;
++ bool has_upper;
++ int ret;
++
++ if (!ep->msi_cap)
++ return -EINVAL;
++
++ /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
++ reg = ep->msi_cap + PCI_MSI_FLAGS;
++ msg_ctrl = dw_pcie_readw_dbi(pci, reg);
++ has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
++ reg = ep->msi_cap + PCI_MSI_ADDRESS_LO;
++ msg_addr_lower = dw_pcie_readl_dbi(pci, reg);
++ if (has_upper) {
++ reg = ep->msi_cap + PCI_MSI_ADDRESS_HI;
++ msg_addr_upper = dw_pcie_readl_dbi(pci, reg);
++ reg = ep->msi_cap + PCI_MSI_DATA_64;
++ msg_data = dw_pcie_readw_dbi(pci, reg);
++ } else {
++ msg_addr_upper = 0;
++ reg = ep->msi_cap + PCI_MSI_DATA_32;
++ msg_data = dw_pcie_readw_dbi(pci, reg);
++ }
++ msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
++ ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
++ epc->mem->page_size);
++ if (ret)
++ return ret;
++
++ writel(msg_data | (interrupt_num - 1), ep->msi_mem);
++
++ dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
++
++ return 0;
++}
++
++int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
++ u16 interrupt_num)
++{
++ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
++ struct pci_epc *epc = ep->epc;
++ u16 tbl_offset, bir;
++ u32 bar_addr_upper, bar_addr_lower;
++ u32 msg_addr_upper, msg_addr_lower;
++ u32 reg, msg_data, vec_ctrl;
++ u64 tbl_addr, msg_addr, reg_u64;
++ void __iomem *msix_tbl;
++ int ret;
++
++ reg = ep->msix_cap + PCI_MSIX_TABLE;
++ tbl_offset = dw_pcie_readl_dbi(pci, reg);
++ bir = (tbl_offset & PCI_MSIX_TABLE_BIR);
++ tbl_offset &= PCI_MSIX_TABLE_OFFSET;
++
++ reg = PCI_BASE_ADDRESS_0 + (4 * bir);
++ bar_addr_upper = 0;
++ bar_addr_lower = dw_pcie_readl_dbi(pci, reg);
++ reg_u64 = (bar_addr_lower & PCI_BASE_ADDRESS_MEM_TYPE_MASK);
++ if (reg_u64 == PCI_BASE_ADDRESS_MEM_TYPE_64)
++ bar_addr_upper = dw_pcie_readl_dbi(pci, reg + 4);
++
++ tbl_addr = ((u64) bar_addr_upper) << 32 | bar_addr_lower;
++ tbl_addr += (tbl_offset + ((interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE));
++ tbl_addr &= PCI_BASE_ADDRESS_MEM_MASK;
++
++ msix_tbl = ioremap_nocache(ep->phys_base + tbl_addr,
++ PCI_MSIX_ENTRY_SIZE);
++ if (!msix_tbl)
++ return -EINVAL;
++
++ msg_addr_lower = readl(msix_tbl + PCI_MSIX_ENTRY_LOWER_ADDR);
++ msg_addr_upper = readl(msix_tbl + PCI_MSIX_ENTRY_UPPER_ADDR);
++ msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
++ msg_data = readl(msix_tbl + PCI_MSIX_ENTRY_DATA);
++ vec_ctrl = readl(msix_tbl + PCI_MSIX_ENTRY_VECTOR_CTRL);
++
++ iounmap(msix_tbl);
++
++ if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT)
++ return -EPERM;
++
++ ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
++ epc->mem->page_size);
++ if (ret)
++ return ret;
++
++ writel(msg_data, ep->msi_mem);
++
++ dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
++
++ return 0;
++}
++
+ void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
+ {
+ struct pci_epc *epc = ep->epc;
+
++ pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
++ epc->mem->page_size);
++
+ pci_epc_mem_exit(epc);
+ }
+
+@@ -293,7 +500,7 @@ int dw_pcie_ep_init(struct dw_pcie_ep *e
+ struct device_node *np = dev->of_node;
+
+ if (!pci->dbi_base || !pci->dbi_base2) {
+- dev_err(dev, "dbi_base/deb_base2 is not populated\n");
++ dev_err(dev, "dbi_base/dbi_base2 is not populated\n");
+ return -EINVAL;
+ }
+
+@@ -302,12 +509,32 @@ int dw_pcie_ep_init(struct dw_pcie_ep *e
+ dev_err(dev, "unable to read *num-ib-windows* property\n");
+ return ret;
+ }
++ if (ep->num_ib_windows > MAX_IATU_IN) {
++ dev_err(dev, "invalid *num-ib-windows*\n");
++ return -EINVAL;
++ }
+
+ ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows);
+ if (ret < 0) {
+ dev_err(dev, "unable to read *num-ob-windows* property\n");
+ return ret;
+ }
++ if (ep->num_ob_windows > MAX_IATU_OUT) {
++ dev_err(dev, "invalid *num-ob-windows*\n");
++ return -EINVAL;
++ }
++
++ ep->ib_window_map = devm_kzalloc(dev, sizeof(long) *
++ BITS_TO_LONGS(ep->num_ib_windows),
++ GFP_KERNEL);
++ if (!ep->ib_window_map)
++ return -ENOMEM;
++
++ ep->ob_window_map = devm_kzalloc(dev, sizeof(long) *
++ BITS_TO_LONGS(ep->num_ob_windows),
++ GFP_KERNEL);
++ if (!ep->ob_window_map)
++ return -ENOMEM;
+
+ addr = devm_kzalloc(dev, sizeof(phys_addr_t) * ep->num_ob_windows,
+ GFP_KERNEL);
+@@ -315,15 +542,18 @@ int dw_pcie_ep_init(struct dw_pcie_ep *e
+ return -ENOMEM;
+ ep->outbound_addr = addr;
+
+- if (ep->ops->ep_init)
+- ep->ops->ep_init(ep);
+-
+ epc = devm_pci_epc_create(dev, &epc_ops);
+ if (IS_ERR(epc)) {
+ dev_err(dev, "failed to create epc device\n");
+ return PTR_ERR(epc);
+ }
+
++ ep->epc = epc;
++ epc_set_drvdata(epc, ep);
++
++ if (ep->ops->ep_init)
++ ep->ops->ep_init(ep);
++
+ ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
+ if (ret < 0)
+ epc->max_functions = 1;
+@@ -335,8 +565,16 @@ int dw_pcie_ep_init(struct dw_pcie_ep *e
+ return ret;
+ }
+
+- ep->epc = epc;
+- epc_set_drvdata(epc, ep);
++ ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
++ epc->mem->page_size);
++ if (!ep->msi_mem) {
++ dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
++ return -ENOMEM;
++ }
++ ep->msi_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSI);
++
++ ep->msix_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSIX);
++
+ dw_pcie_setup(pci);
+
+ return 0;
+--- a/drivers/pci/dwc/pcie-designware-host.c
++++ b/drivers/pci/dwc/pcie-designware-host.c
+@@ -1,3 +1,4 @@
++// SPDX-License-Identifier: GPL-2.0
+ /*
+ * Synopsys DesignWare PCIe host controller driver
+ *
+@@ -5,10 +6,6 @@
+ * http://www.samsung.com
+ *
+ * Author: Jingoo Han <jg1.han@samsung.com>
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+ */
+
+ #include <linux/irqdomain.h>
+--- a/drivers/pci/dwc/pcie-designware-plat.c
++++ b/drivers/pci/dwc/pcie-designware-plat.c
+@@ -1,13 +1,10 @@
++// SPDX-License-Identifier: GPL-2.0
+ /*
+ * PCIe RC driver for Synopsys DesignWare Core
+ *
+ * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Authors: Joao Pinto <Joao.Pinto@synopsys.com>
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+ */
+ #include <linux/clk.h>
+ #include <linux/delay.h>
+@@ -15,19 +12,29 @@
+ #include <linux/interrupt.h>
+ #include <linux/kernel.h>
+ #include <linux/init.h>
++#include <linux/of_device.h>
+ #include <linux/of_gpio.h>
+ #include <linux/pci.h>
+ #include <linux/platform_device.h>
+ #include <linux/resource.h>
+ #include <linux/signal.h>
+ #include <linux/types.h>
++#include <linux/regmap.h>
+
+ #include "pcie-designware.h"
+
+ struct dw_plat_pcie {
+- struct dw_pcie *pci;
++ struct dw_pcie *pci;
++ struct regmap *regmap;
++ enum dw_pcie_device_mode mode;
++};
++
++struct dw_plat_pcie_of_data {
++ enum dw_pcie_device_mode mode;
+ };
+
++static const struct of_device_id dw_plat_pcie_of_match[];
++
+ static irqreturn_t dw_plat_pcie_msi_irq_handler(int irq, void *arg)
+ {
+ struct pcie_port *pp = arg;
+@@ -52,9 +59,58 @@ static const struct dw_pcie_host_ops dw_
+ .host_init = dw_plat_pcie_host_init,
+ };
+
+-static int dw_plat_add_pcie_port(struct pcie_port *pp,
++static int dw_plat_pcie_establish_link(struct dw_pcie *pci)
++{
++ return 0;
++}
++
++static const struct dw_pcie_ops dw_pcie_ops = {
++ .start_link = dw_plat_pcie_establish_link,
++};
++
++static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)
++{
++ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
++ struct pci_epc *epc = ep->epc;
++ enum pci_barno bar;
++
++ for (bar = BAR_0; bar <= BAR_5; bar++)
++ dw_pcie_ep_reset_bar(pci, bar);
++
++ epc->features |= EPC_FEATURE_NO_LINKUP_NOTIFIER;
++ epc->features |= EPC_FEATURE_MSIX_AVAILABLE;
++}
++
++static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
++ enum pci_epc_irq_type type,
++ u16 interrupt_num)
++{
++ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
++
++ switch (type) {
++ case PCI_EPC_IRQ_LEGACY:
++ return dw_pcie_ep_raise_legacy_irq(ep, func_no);
++ case PCI_EPC_IRQ_MSI:
++ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
++ case PCI_EPC_IRQ_MSIX:
++ return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
++ default:
++ dev_err(pci->dev, "UNKNOWN IRQ type\n");
++ }
++
++ return 0;
++}
++
++static struct dw_pcie_ep_ops pcie_ep_ops = {
++ .ep_init = dw_plat_pcie_ep_init,
++ .raise_irq = dw_plat_pcie_ep_raise_irq,
++};
++
++static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,
+ struct platform_device *pdev)
+ {
++ struct dw_pcie *pci = dw_plat_pcie->pci;
++ struct pcie_port *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+@@ -82,15 +138,44 @@ static int dw_plat_add_pcie_port(struct
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+- dev_err(dev, "failed to initialize host\n");
++ dev_err(dev, "Failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+ }
+
+-static const struct dw_pcie_ops dw_pcie_ops = {
+-};
++static int dw_plat_add_pcie_ep(struct dw_plat_pcie *dw_plat_pcie,
++ struct platform_device *pdev)
++{
++ int ret;
++ struct dw_pcie_ep *ep;
++ struct resource *res;
++ struct device *dev = &pdev->dev;
++ struct dw_pcie *pci = dw_plat_pcie->pci;
++
++ ep = &pci->ep;
++ ep->ops = &pcie_ep_ops;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
++ pci->dbi_base2 = devm_ioremap_resource(dev, res);
++ if (IS_ERR(pci->dbi_base2))
++ return PTR_ERR(pci->dbi_base2);
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
++ if (!res)
++ return -EINVAL;
++
++ ep->phys_base = res->start;
++ ep->addr_size = resource_size(res);
++
++ ret = dw_pcie_ep_init(ep);
++ if (ret) {
++ dev_err(dev, "Failed to initialize endpoint\n");
++ return ret;
++ }
++ return 0;
++}
+
+ static int dw_plat_pcie_probe(struct platform_device *pdev)
+ {
+@@ -99,6 +184,16 @@ static int dw_plat_pcie_probe(struct pla
+ struct dw_pcie *pci;
+ struct resource *res; /* Resource from DT */
+ int ret;
++ const struct of_device_id *match;
++ const struct dw_plat_pcie_of_data *data;
++ enum dw_pcie_device_mode mode;
++
++ match = of_match_device(dw_plat_pcie_of_match, dev);
++ if (!match)
++ return -EINVAL;
++
++ data = (struct dw_plat_pcie_of_data *)match->data;
++ mode = (enum dw_pcie_device_mode)data->mode;
+
+ dw_plat_pcie = devm_kzalloc(dev, sizeof(*dw_plat_pcie), GFP_KERNEL);
+ if (!dw_plat_pcie)
+@@ -112,23 +207,59 @@ static int dw_plat_pcie_probe(struct pla
+ pci->ops = &dw_pcie_ops;
+
+ dw_plat_pcie->pci = pci;
++ dw_plat_pcie->mode = mode;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
++ if (!res)
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pci->dbi_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+
+ platform_set_drvdata(pdev, dw_plat_pcie);
+
+- ret = dw_plat_add_pcie_port(&pci->pp, pdev);
+- if (ret < 0)
+- return ret;
++ switch (dw_plat_pcie->mode) {
++ case DW_PCIE_RC_TYPE:
++ if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_HOST))
++ return -ENODEV;
++
++ ret = dw_plat_add_pcie_port(dw_plat_pcie, pdev);
++ if (ret < 0)
++ return ret;
++ break;
++ case DW_PCIE_EP_TYPE:
++ if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_EP))
++ return -ENODEV;
++
++ ret = dw_plat_add_pcie_ep(dw_plat_pcie, pdev);
++ if (ret < 0)
++ return ret;
++ break;
++ default:
++ dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode);
++ }
+
+ return 0;
+ }
+
++static const struct dw_plat_pcie_of_data dw_plat_pcie_rc_of_data = {
++ .mode = DW_PCIE_RC_TYPE,
++};
++
++static const struct dw_plat_pcie_of_data dw_plat_pcie_ep_of_data = {
++ .mode = DW_PCIE_EP_TYPE,
++};
++
+ static const struct of_device_id dw_plat_pcie_of_match[] = {
+- { .compatible = "snps,dw-pcie", },
++ {
++ .compatible = "snps,dw-pcie",
++ .data = &dw_plat_pcie_rc_of_data,
++ },
++ {
++ .compatible = "snps,dw-pcie-ep",
++ .data = &dw_plat_pcie_ep_of_data,
++ },
+ {},
+ };
+
+--- a/drivers/pci/dwc/pcie-designware.c
++++ b/drivers/pci/dwc/pcie-designware.c
+@@ -1,3 +1,4 @@
++// SPDX-License-Identifier: GPL-2.0
+ /*
+ * Synopsys DesignWare PCIe host controller driver
+ *
+@@ -5,10 +6,6 @@
+ * http://www.samsung.com
+ *
+ * Author: Jingoo Han <jg1.han@samsung.com>
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+ */
+
+ #include <linux/delay.h>
+--- a/drivers/pci/dwc/pcie-designware.h
++++ b/drivers/pci/dwc/pcie-designware.h
+@@ -1,3 +1,4 @@
++// SPDX-License-Identifier: GPL-2.0
+ /*
+ * Synopsys DesignWare PCIe host controller driver
+ *
+@@ -5,10 +6,6 @@
+ * http://www.samsung.com
+ *
+ * Author: Jingoo Han <jg1.han@samsung.com>
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+ */
+
+ #ifndef _PCIE_DESIGNWARE_H
+@@ -97,15 +94,6 @@
+ #define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \
+ ((0x3 << 20) | ((region) << 9) | (0x1 << 8))
+
+-#define MSI_MESSAGE_CONTROL 0x52
+-#define MSI_CAP_MMC_SHIFT 1
+-#define MSI_CAP_MMC_MASK (7 << MSI_CAP_MMC_SHIFT)
+-#define MSI_CAP_MME_SHIFT 4
+-#define MSI_CAP_MSI_EN_MASK 0x1
+-#define MSI_CAP_MME_MASK (7 << MSI_CAP_MME_SHIFT)
+-#define MSI_MESSAGE_ADDR_L32 0x54
+-#define MSI_MESSAGE_ADDR_U32 0x58
+-
+ /*
+ * Maximum number of MSI IRQs can be 256 per controller. But keep
+ * it 32 as of now. Probably we will never need more than 32. If needed,
+@@ -114,6 +102,10 @@
+ #define MAX_MSI_IRQS 32
+ #define MAX_MSI_CTRLS (MAX_MSI_IRQS / 32)
+
++/* Maximum number of inbound/outbound iATUs */
++#define MAX_IATU_IN 256
++#define MAX_IATU_OUT 256
++
+ struct pcie_port;
+ struct dw_pcie;
+ struct dw_pcie_ep;
+@@ -181,8 +173,8 @@ enum dw_pcie_as_type {
+
+ struct dw_pcie_ep_ops {
+ void (*ep_init)(struct dw_pcie_ep *ep);
+- int (*raise_irq)(struct dw_pcie_ep *ep, enum pci_epc_irq_type type,
+- u8 interrupt_num);
++ int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
++ enum pci_epc_irq_type type, u16 interrupt_num);
+ };
+
+ struct dw_pcie_ep {
+@@ -193,10 +185,14 @@ struct dw_pcie_ep {
+ size_t page_size;
+ u8 bar_to_atu[6];
+ phys_addr_t *outbound_addr;
+- unsigned long ib_window_map;
+- unsigned long ob_window_map;
++ unsigned long *ib_window_map;
++ unsigned long *ob_window_map;
+ u32 num_ib_windows;
+ u32 num_ob_windows;
++ void __iomem *msi_mem;
++ phys_addr_t msi_mem_phys;
++ u8 msi_cap; /* MSI capability offset */
++ u8 msix_cap; /* MSI-X capability offset */
+ };
+
+ struct dw_pcie_ops {
+@@ -335,6 +331,12 @@ static inline int dw_pcie_host_init(stru
+ void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
+ int dw_pcie_ep_init(struct dw_pcie_ep *ep);
+ void dw_pcie_ep_exit(struct dw_pcie_ep *ep);
++int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no);
++int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
++ u8 interrupt_num);
++int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
++ u16 interrupt_num);
++void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar);
+ #else
+ static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
+ {
+@@ -348,5 +350,26 @@ static inline int dw_pcie_ep_init(struct
+ static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
+ {
+ }
++
++static inline int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
++{
++ return 0;
++}
++
++static inline int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
++ u8 interrupt_num)
++{
++ return 0;
++}
++
++static inline int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
++ u16 interrupt_num)
++{
++ return 0;
++}
++
++static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
++{
++}
+ #endif
+ #endif /* _PCIE_DESIGNWARE_H */
+--- a/drivers/pci/endpoint/Kconfig
++++ b/drivers/pci/endpoint/Kconfig
+@@ -1,3 +1,4 @@
++# SPDX-License-Identifier: GPL-2.0
+ #
+ # PCI Endpoint Support
+ #
+--- a/drivers/pci/endpoint/Makefile
++++ b/drivers/pci/endpoint/Makefile
+@@ -1,3 +1,4 @@
++# SPDX-License-Identifier: GPL-2.0
+ #
+ # Makefile for PCI Endpoint Support
+ #
+--- a/drivers/pci/endpoint/functions/Kconfig
++++ b/drivers/pci/endpoint/functions/Kconfig
+@@ -1,3 +1,4 @@
++# SPDX-License-Identifier: GPL-2.0
+ #
+ # PCI Endpoint Functions
+ #
+--- a/drivers/pci/endpoint/functions/Makefile
++++ b/drivers/pci/endpoint/functions/Makefile
+@@ -1,3 +1,4 @@
++# SPDX-License-Identifier: GPL-2.0
+ #
+ # Makefile for PCI Endpoint Functions
+ #
+--- a/drivers/pci/endpoint/functions/pci-epf-test.c
++++ b/drivers/pci/endpoint/functions/pci-epf-test.c
+@@ -1,20 +1,9 @@
++// SPDX-License-Identifier: GPL-2.0
+ /**
+ * Test driver to test endpoint functionality
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+- *
+- * This program is free software: you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 of
+- * the License as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+ #include <linux/crc32.h>
+@@ -29,13 +18,16 @@
+ #include <linux/pci-epf.h>
+ #include <linux/pci_regs.h>
+
++#define IRQ_TYPE_LEGACY 0
++#define IRQ_TYPE_MSI 1
++#define IRQ_TYPE_MSIX 2
++
+ #define COMMAND_RAISE_LEGACY_IRQ BIT(0)
+ #define COMMAND_RAISE_MSI_IRQ BIT(1)
+-#define MSI_NUMBER_SHIFT 2
+-#define MSI_NUMBER_MASK (0x3f << MSI_NUMBER_SHIFT)
+-#define COMMAND_READ BIT(8)
+-#define COMMAND_WRITE BIT(9)
+-#define COMMAND_COPY BIT(10)
++#define COMMAND_RAISE_MSIX_IRQ BIT(2)
++#define COMMAND_READ BIT(3)
++#define COMMAND_WRITE BIT(4)
++#define COMMAND_COPY BIT(5)
+
+ #define STATUS_READ_SUCCESS BIT(0)
+ #define STATUS_READ_FAIL BIT(1)
+@@ -56,6 +48,7 @@ struct pci_epf_test {
+ struct pci_epf *epf;
+ enum pci_barno test_reg_bar;
+ bool linkup_notifier;
++ bool msix_available;
+ struct delayed_work cmd_handler;
+ };
+
+@@ -67,6 +60,8 @@ struct pci_epf_test_reg {
+ u64 dst_addr;
+ u32 size;
+ u32 checksum;
++ u32 irq_type;
++ u32 irq_number;
+ } __packed;
+
+ static struct pci_epf_header test_header = {
+@@ -81,7 +76,7 @@ struct pci_epf_test_data {
+ bool linkup_notifier;
+ };
+
+-static int bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
++static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
+
+ static int pci_epf_test_copy(struct pci_epf_test *epf_test)
+ {
+@@ -98,43 +93,45 @@ static int pci_epf_test_copy(struct pci_
+
+ src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
+ if (!src_addr) {
+- dev_err(dev, "failed to allocate source address\n");
++ dev_err(dev, "Failed to allocate source address\n");
+ reg->status = STATUS_SRC_ADDR_INVALID;
+ ret = -ENOMEM;
+ goto err;
+ }
+
+- ret = pci_epc_map_addr(epc, src_phys_addr, reg->src_addr, reg->size);
++ ret = pci_epc_map_addr(epc, epf->func_no, src_phys_addr, reg->src_addr,
++ reg->size);
+ if (ret) {
+- dev_err(dev, "failed to map source address\n");
++ dev_err(dev, "Failed to map source address\n");
+ reg->status = STATUS_SRC_ADDR_INVALID;
+ goto err_src_addr;
+ }
+
+ dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
+ if (!dst_addr) {
+- dev_err(dev, "failed to allocate destination address\n");
++ dev_err(dev, "Failed to allocate destination address\n");
+ reg->status = STATUS_DST_ADDR_INVALID;
+ ret = -ENOMEM;
+ goto err_src_map_addr;
+ }
+
+- ret = pci_epc_map_addr(epc, dst_phys_addr, reg->dst_addr, reg->size);
++ ret = pci_epc_map_addr(epc, epf->func_no, dst_phys_addr, reg->dst_addr,
++ reg->size);
+ if (ret) {
+- dev_err(dev, "failed to map destination address\n");
++ dev_err(dev, "Failed to map destination address\n");
+ reg->status = STATUS_DST_ADDR_INVALID;
+ goto err_dst_addr;
+ }
+
+ memcpy(dst_addr, src_addr, reg->size);
+
+- pci_epc_unmap_addr(epc, dst_phys_addr);
++ pci_epc_unmap_addr(epc, epf->func_no, dst_phys_addr);
+
+ err_dst_addr:
+ pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
+
+ err_src_map_addr:
+- pci_epc_unmap_addr(epc, src_phys_addr);
++ pci_epc_unmap_addr(epc, epf->func_no, src_phys_addr);
+
+ err_src_addr:
+ pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
+@@ -158,15 +155,16 @@ static int pci_epf_test_read(struct pci_
+
+ src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
+ if (!src_addr) {
+- dev_err(dev, "failed to allocate address\n");
++ dev_err(dev, "Failed to allocate address\n");
+ reg->status = STATUS_SRC_ADDR_INVALID;
+ ret = -ENOMEM;
+ goto err;
+ }
+
+- ret = pci_epc_map_addr(epc, phys_addr, reg->src_addr, reg->size);
++ ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->src_addr,
++ reg->size);
+ if (ret) {
+- dev_err(dev, "failed to map address\n");
++ dev_err(dev, "Failed to map address\n");
+ reg->status = STATUS_SRC_ADDR_INVALID;
+ goto err_addr;
+ }
+@@ -186,7 +184,7 @@ static int pci_epf_test_read(struct pci_
+ kfree(buf);
+
+ err_map_addr:
+- pci_epc_unmap_addr(epc, phys_addr);
++ pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
+
+ err_addr:
+ pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
+@@ -209,15 +207,16 @@ static int pci_epf_test_write(struct pci
+
+ dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
+ if (!dst_addr) {
+- dev_err(dev, "failed to allocate address\n");
++ dev_err(dev, "Failed to allocate address\n");
+ reg->status = STATUS_DST_ADDR_INVALID;
+ ret = -ENOMEM;
+ goto err;
+ }
+
+- ret = pci_epc_map_addr(epc, phys_addr, reg->dst_addr, reg->size);
++ ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->dst_addr,
++ reg->size);
+ if (ret) {
+- dev_err(dev, "failed to map address\n");
++ dev_err(dev, "Failed to map address\n");
+ reg->status = STATUS_DST_ADDR_INVALID;
+ goto err_addr;
+ }
+@@ -237,12 +236,12 @@ static int pci_epf_test_write(struct pci
+ * wait 1ms inorder for the write to complete. Without this delay L3
+ * error in observed in the host system.
+ */
+- mdelay(1);
++ usleep_range(1000, 2000);
+
+ kfree(buf);
+
+ err_map_addr:
+- pci_epc_unmap_addr(epc, phys_addr);
++ pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
+
+ err_addr:
+ pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
+@@ -251,31 +250,42 @@ err:
+ return ret;
+ }
+
+-static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq)
++static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq_type,
++ u16 irq)
+ {
+- u8 msi_count;
+ struct pci_epf *epf = epf_test->epf;
++ struct device *dev = &epf->dev;
+ struct pci_epc *epc = epf->epc;
+ enum pci_barno test_reg_bar = epf_test->test_reg_bar;
+ struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
+
+ reg->status |= STATUS_IRQ_RAISED;
+- msi_count = pci_epc_get_msi(epc);
+- if (irq > msi_count || msi_count <= 0)
+- pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0);
+- else
+- pci_epc_raise_irq(epc, PCI_EPC_IRQ_MSI, irq);
++
++ switch (irq_type) {
++ case IRQ_TYPE_LEGACY:
++ pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
++ break;
++ case IRQ_TYPE_MSI:
++ pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, irq);
++ break;
++ case IRQ_TYPE_MSIX:
++ pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX, irq);
++ break;
++ default:
++ dev_err(dev, "Failed to raise IRQ, unknown type\n");
++ break;
++ }
+ }
+
+ static void pci_epf_test_cmd_handler(struct work_struct *work)
+ {
+ int ret;
+- u8 irq;
+- u8 msi_count;
++ int count;
+ u32 command;
+ struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
+ cmd_handler.work);
+ struct pci_epf *epf = epf_test->epf;
++ struct device *dev = &epf->dev;
+ struct pci_epc *epc = epf->epc;
+ enum pci_barno test_reg_bar = epf_test->test_reg_bar;
+ struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
+@@ -287,11 +297,14 @@ static void pci_epf_test_cmd_handler(str
+ reg->command = 0;
+ reg->status = 0;
+
+- irq = (command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT;
++ if (reg->irq_type > IRQ_TYPE_MSIX) {
++ dev_err(dev, "Failed to detect IRQ type\n");
++ goto reset_handler;
++ }
+
+ if (command & COMMAND_RAISE_LEGACY_IRQ) {
+ reg->status = STATUS_IRQ_RAISED;
+- pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0);
++ pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
+ goto reset_handler;
+ }
+
+@@ -301,7 +314,8 @@ static void pci_epf_test_cmd_handler(str
+ reg->status |= STATUS_WRITE_FAIL;
+ else
+ reg->status |= STATUS_WRITE_SUCCESS;
+- pci_epf_test_raise_irq(epf_test, irq);
++ pci_epf_test_raise_irq(epf_test, reg->irq_type,
++ reg->irq_number);
+ goto reset_handler;
+ }
+
+@@ -311,7 +325,8 @@ static void pci_epf_test_cmd_handler(str
+ reg->status |= STATUS_READ_SUCCESS;
+ else
+ reg->status |= STATUS_READ_FAIL;
+- pci_epf_test_raise_irq(epf_test, irq);
++ pci_epf_test_raise_irq(epf_test, reg->irq_type,
++ reg->irq_number);
+ goto reset_handler;
+ }
+
+@@ -321,16 +336,28 @@ static void pci_epf_test_cmd_handler(str
+ reg->status |= STATUS_COPY_SUCCESS;
+ else
+ reg->status |= STATUS_COPY_FAIL;
+- pci_epf_test_raise_irq(epf_test, irq);
++ pci_epf_test_raise_irq(epf_test, reg->irq_type,
++ reg->irq_number);
+ goto reset_handler;
+ }
+
+ if (command & COMMAND_RAISE_MSI_IRQ) {
+- msi_count = pci_epc_get_msi(epc);
+- if (irq > msi_count || msi_count <= 0)
++ count = pci_epc_get_msi(epc, epf->func_no);
++ if (reg->irq_number > count || count <= 0)
++ goto reset_handler;
++ reg->status = STATUS_IRQ_RAISED;
++ pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI,
++ reg->irq_number);
++ goto reset_handler;
++ }
++
++ if (command & COMMAND_RAISE_MSIX_IRQ) {
++ count = pci_epc_get_msix(epc, epf->func_no);
++ if (reg->irq_number > count || count <= 0)
+ goto reset_handler;
+ reg->status = STATUS_IRQ_RAISED;
+- pci_epc_raise_irq(epc, PCI_EPC_IRQ_MSI, irq);
++ pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX,
++ reg->irq_number);
+ goto reset_handler;
+ }
+
+@@ -351,21 +378,23 @@ static void pci_epf_test_unbind(struct p
+ {
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ struct pci_epc *epc = epf->epc;
++ struct pci_epf_bar *epf_bar;
+ int bar;
+
+ cancel_delayed_work(&epf_test->cmd_handler);
+ pci_epc_stop(epc);
+ for (bar = BAR_0; bar <= BAR_5; bar++) {
++ epf_bar = &epf->bar[bar];
++
+ if (epf_test->reg[bar]) {
+ pci_epf_free_space(epf, epf_test->reg[bar], bar);
+- pci_epc_clear_bar(epc, bar);
++ pci_epc_clear_bar(epc, epf->func_no, epf_bar);
+ }
+ }
+ }
+
+ static int pci_epf_test_set_bar(struct pci_epf *epf)
+ {
+- int flags;
+ int bar;
+ int ret;
+ struct pci_epf_bar *epf_bar;
+@@ -374,20 +403,27 @@ static int pci_epf_test_set_bar(struct p
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ enum pci_barno test_reg_bar = epf_test->test_reg_bar;
+
+- flags = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32;
+- if (sizeof(dma_addr_t) == 0x8)
+- flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
+-
+ for (bar = BAR_0; bar <= BAR_5; bar++) {
+ epf_bar = &epf->bar[bar];
+- ret = pci_epc_set_bar(epc, bar, epf_bar->phys_addr,
+- epf_bar->size, flags);
++
++ epf_bar->flags |= upper_32_bits(epf_bar->size) ?
++ PCI_BASE_ADDRESS_MEM_TYPE_64 :
++ PCI_BASE_ADDRESS_MEM_TYPE_32;
++
++ ret = pci_epc_set_bar(epc, epf->func_no, epf_bar);
+ if (ret) {
+ pci_epf_free_space(epf, epf_test->reg[bar], bar);
+- dev_err(dev, "failed to set BAR%d\n", bar);
++ dev_err(dev, "Failed to set BAR%d\n", bar);
+ if (bar == test_reg_bar)
+ return ret;
+ }
++ /*
++ * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
++ * if the specific implementation required a 64-bit BAR,
++ * even if we only requested a 32-bit BAR.
++ */
++ if (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
++ bar++;
+ }
+
+ return 0;
+@@ -404,7 +440,7 @@ static int pci_epf_test_alloc_space(stru
+ base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg),
+ test_reg_bar);
+ if (!base) {
+- dev_err(dev, "failed to allocated register space\n");
++ dev_err(dev, "Failed to allocated register space\n");
+ return -ENOMEM;
+ }
+ epf_test->reg[test_reg_bar] = base;
+@@ -414,7 +450,7 @@ static int pci_epf_test_alloc_space(stru
+ continue;
+ base = pci_epf_alloc_space(epf, bar_size[bar], bar);
+ if (!base)
+- dev_err(dev, "failed to allocate space for BAR%d\n",
++ dev_err(dev, "Failed to allocate space for BAR%d\n",
+ bar);
+ epf_test->reg[bar] = base;
+ }
+@@ -433,9 +469,18 @@ static int pci_epf_test_bind(struct pci_
+ if (WARN_ON_ONCE(!epc))
+ return -EINVAL;
+
+- ret = pci_epc_write_header(epc, header);
++ if (epc->features & EPC_FEATURE_NO_LINKUP_NOTIFIER)
++ epf_test->linkup_notifier = false;
++ else
++ epf_test->linkup_notifier = true;
++
++ epf_test->msix_available = epc->features & EPC_FEATURE_MSIX_AVAILABLE;
++
++ epf_test->test_reg_bar = EPC_FEATURE_GET_BAR(epc->features);
++
++ ret = pci_epc_write_header(epc, epf->func_no, header);
+ if (ret) {
+- dev_err(dev, "configuration header write failed\n");
++ dev_err(dev, "Configuration header write failed\n");
+ return ret;
+ }
+
+@@ -447,9 +492,19 @@ static int pci_epf_test_bind(struct pci_
+ if (ret)
+ return ret;
+
+- ret = pci_epc_set_msi(epc, epf->msi_interrupts);
+- if (ret)
++ ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts);
++ if (ret) {
++ dev_err(dev, "MSI configuration failed\n");
+ return ret;
++ }
++
++ if (epf_test->msix_available) {
++ ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts);
++ if (ret) {
++ dev_err(dev, "MSI-X configuration failed\n");
++ return ret;
++ }
++ }
+
+ if (!epf_test->linkup_notifier)
+ queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
+@@ -517,7 +572,7 @@ static int __init pci_epf_test_init(void
+ WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+ ret = pci_epf_register_driver(&test_driver);
+ if (ret) {
+- pr_err("failed to register pci epf test driver --> %d\n", ret);
++ pr_err("Failed to register pci epf test driver --> %d\n", ret);
+ return ret;
+ }
+
+--- a/drivers/pci/endpoint/pci-ep-cfs.c
++++ b/drivers/pci/endpoint/pci-ep-cfs.c
+@@ -1,35 +1,28 @@
++// SPDX-License-Identifier: GPL-2.0
+ /**
+ * configfs to configure the PCI endpoint
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+- *
+- * This program is free software: you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 of
+- * the License as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+ #include <linux/module.h>
++#include <linux/idr.h>
+ #include <linux/slab.h>
+
+ #include <linux/pci-epc.h>
+ #include <linux/pci-epf.h>
+ #include <linux/pci-ep-cfs.h>
+
++static DEFINE_IDR(functions_idr);
++static DEFINE_MUTEX(functions_mutex);
+ static struct config_group *functions_group;
+ static struct config_group *controllers_group;
+
+ struct pci_epf_group {
+ struct config_group group;
+ struct pci_epf *epf;
++ int index;
+ };
+
+ struct pci_epc_group {
+@@ -151,7 +144,7 @@ static struct configfs_item_operations p
+ .drop_link = pci_epc_epf_unlink,
+ };
+
+-static struct config_item_type pci_epc_type = {
++static const struct config_item_type pci_epc_type = {
+ .ct_item_ops = &pci_epc_item_ops,
+ .ct_attrs = pci_epc_attrs,
+ .ct_owner = THIS_MODULE,
+@@ -293,6 +286,28 @@ static ssize_t pci_epf_msi_interrupts_sh
+ to_pci_epf_group(item)->epf->msi_interrupts);
+ }
+
++static ssize_t pci_epf_msix_interrupts_store(struct config_item *item,
++ const char *page, size_t len)
++{
++ u16 val;
++ int ret;
++
++ ret = kstrtou16(page, 0, &val);
++ if (ret)
++ return ret;
++
++ to_pci_epf_group(item)->epf->msix_interrupts = val;
++
++ return len;
++}
++
++static ssize_t pci_epf_msix_interrupts_show(struct config_item *item,
++ char *page)
++{
++ return sprintf(page, "%d\n",
++ to_pci_epf_group(item)->epf->msix_interrupts);
++}
++
+ PCI_EPF_HEADER_R(vendorid)
+ PCI_EPF_HEADER_W_u16(vendorid)
+
+@@ -334,6 +349,7 @@ CONFIGFS_ATTR(pci_epf_, subsys_vendor_id
+ CONFIGFS_ATTR(pci_epf_, subsys_id);
+ CONFIGFS_ATTR(pci_epf_, interrupt_pin);
+ CONFIGFS_ATTR(pci_epf_, msi_interrupts);
++CONFIGFS_ATTR(pci_epf_, msix_interrupts);
+
+ static struct configfs_attribute *pci_epf_attrs[] = {
+ &pci_epf_attr_vendorid,
+@@ -347,6 +363,7 @@ static struct configfs_attribute *pci_ep
+ &pci_epf_attr_subsys_id,
+ &pci_epf_attr_interrupt_pin,
+ &pci_epf_attr_msi_interrupts,
++ &pci_epf_attr_msix_interrupts,
+ NULL,
+ };
+
+@@ -354,6 +371,9 @@ static void pci_epf_release(struct confi
+ {
+ struct pci_epf_group *epf_group = to_pci_epf_group(item);
+
++ mutex_lock(&functions_mutex);
++ idr_remove(&functions_idr, epf_group->index);
++ mutex_unlock(&functions_mutex);
+ pci_epf_destroy(epf_group->epf);
+ kfree(epf_group);
+ }
+@@ -362,7 +382,7 @@ static struct configfs_item_operations p
+ .release = pci_epf_release,
+ };
+
+-static struct config_item_type pci_epf_type = {
++static const struct config_item_type pci_epf_type = {
+ .ct_item_ops = &pci_epf_ops,
+ .ct_attrs = pci_epf_attrs,
+ .ct_owner = THIS_MODULE,
+@@ -373,22 +393,57 @@ static struct config_group *pci_epf_make
+ {
+ struct pci_epf_group *epf_group;
+ struct pci_epf *epf;
++ char *epf_name;
++ int index, err;
+
+ epf_group = kzalloc(sizeof(*epf_group), GFP_KERNEL);
+ if (!epf_group)
+ return ERR_PTR(-ENOMEM);
+
++ mutex_lock(&functions_mutex);
++ index = idr_alloc(&functions_idr, epf_group, 0, 0, GFP_KERNEL);
++ mutex_unlock(&functions_mutex);
++ if (index < 0) {
++ err = index;
++ goto free_group;
++ }
++
++ epf_group->index = index;
++
+ config_group_init_type_name(&epf_group->group, name, &pci_epf_type);
+
+- epf = pci_epf_create(group->cg_item.ci_name);
++ epf_name = kasprintf(GFP_KERNEL, "%s.%d",
++ group->cg_item.ci_name, epf_group->index);
++ if (!epf_name) {
++ err = -ENOMEM;
++ goto remove_idr;
++ }
++
++ epf = pci_epf_create(epf_name);
+ if (IS_ERR(epf)) {
+ pr_err("failed to create endpoint function device\n");
+- return ERR_PTR(-EINVAL);
++ err = -EINVAL;
++ goto free_name;
+ }
+
+ epf_group->epf = epf;
+
++ kfree(epf_name);
++
+ return &epf_group->group;
++
++free_name:
++ kfree(epf_name);
++
++remove_idr:
++ mutex_lock(&functions_mutex);
++ idr_remove(&functions_idr, epf_group->index);
++ mutex_unlock(&functions_mutex);
++
++free_group:
++ kfree(epf_group);
++
++ return ERR_PTR(err);
+ }
+
+ static void pci_epf_drop(struct config_group *group, struct config_item *item)
+@@ -401,7 +456,7 @@ static struct configfs_group_operations
+ .drop_item = &pci_epf_drop,
+ };
+
+-static struct config_item_type pci_epf_group_type = {
++static const struct config_item_type pci_epf_group_type = {
+ .ct_group_ops = &pci_epf_group_ops,
+ .ct_owner = THIS_MODULE,
+ };
+@@ -429,15 +484,15 @@ void pci_ep_cfs_remove_epf_group(struct
+ }
+ EXPORT_SYMBOL(pci_ep_cfs_remove_epf_group);
+
+-static struct config_item_type pci_functions_type = {
++static const struct config_item_type pci_functions_type = {
+ .ct_owner = THIS_MODULE,
+ };
+
+-static struct config_item_type pci_controllers_type = {
++static const struct config_item_type pci_controllers_type = {
+ .ct_owner = THIS_MODULE,
+ };
+
+-static struct config_item_type pci_ep_type = {
++static const struct config_item_type pci_ep_type = {
+ .ct_owner = THIS_MODULE,
+ };
+
+--- a/drivers/pci/endpoint/pci-epc-core.c
++++ b/drivers/pci/endpoint/pci-epc-core.c
+@@ -1,20 +1,9 @@
++// SPDX-License-Identifier: GPL-2.0
+ /**
+ * PCI Endpoint *Controller* (EPC) library
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+- *
+- * This program is free software: you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 of
+- * the License as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+ #include <linux/device.h>
+@@ -141,25 +130,26 @@ EXPORT_SYMBOL_GPL(pci_epc_start);
+ /**
+ * pci_epc_raise_irq() - interrupt the host system
+ * @epc: the EPC device which has to interrupt the host
+- * @type: specify the type of interrupt; legacy or MSI
+- * @interrupt_num: the MSI interrupt number
++ * @func_no: the endpoint function number in the EPC device
++ * @type: specify the type of interrupt; legacy, MSI or MSI-X
++ * @interrupt_num: the MSI or MSI-X interrupt number
+ *
+- * Invoke to raise an MSI or legacy interrupt
++ * Invoke to raise an legacy, MSI or MSI-X interrupt
+ */
+-int pci_epc_raise_irq(struct pci_epc *epc, enum pci_epc_irq_type type,
+- u8 interrupt_num)
++int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
++ enum pci_epc_irq_type type, u16 interrupt_num)
+ {
+ int ret;
+ unsigned long flags;
+
+- if (IS_ERR(epc))
++ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
+ return -EINVAL;
+
+ if (!epc->ops->raise_irq)
+ return 0;
+
+ spin_lock_irqsave(&epc->lock, flags);
+- ret = epc->ops->raise_irq(epc, type, interrupt_num);
++ ret = epc->ops->raise_irq(epc, func_no, type, interrupt_num);
+ spin_unlock_irqrestore(&epc->lock, flags);
+
+ return ret;
+@@ -169,22 +159,23 @@ EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
+ /**
+ * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
+ * @epc: the EPC device to which MSI interrupts was requested
++ * @func_no: the endpoint function number in the EPC device
+ *
+ * Invoke to get the number of MSI interrupts allocated by the RC
+ */
+-int pci_epc_get_msi(struct pci_epc *epc)
++int pci_epc_get_msi(struct pci_epc *epc, u8 func_no)
+ {
+ int interrupt;
+ unsigned long flags;
+
+- if (IS_ERR(epc))
++ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
+ return 0;
+
+ if (!epc->ops->get_msi)
+ return 0;
+
+ spin_lock_irqsave(&epc->lock, flags);
+- interrupt = epc->ops->get_msi(epc);
++ interrupt = epc->ops->get_msi(epc, func_no);
+ spin_unlock_irqrestore(&epc->lock, flags);
+
+ if (interrupt < 0)
+@@ -199,17 +190,19 @@ EXPORT_SYMBOL_GPL(pci_epc_get_msi);
+ /**
+ * pci_epc_set_msi() - set the number of MSI interrupt numbers required
+ * @epc: the EPC device on which MSI has to be configured
++ * @func_no: the endpoint function number in the EPC device
+ * @interrupts: number of MSI interrupts required by the EPF
+ *
+ * Invoke to set the required number of MSI interrupts.
+ */
+-int pci_epc_set_msi(struct pci_epc *epc, u8 interrupts)
++int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
+ {
+ int ret;
+ u8 encode_int;
+ unsigned long flags;
+
+- if (IS_ERR(epc))
++ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
++ interrupts > 32)
+ return -EINVAL;
+
+ if (!epc->ops->set_msi)
+@@ -218,7 +211,7 @@ int pci_epc_set_msi(struct pci_epc *epc,
+ encode_int = order_base_2(interrupts);
+
+ spin_lock_irqsave(&epc->lock, flags);
+- ret = epc->ops->set_msi(epc, encode_int);
++ ret = epc->ops->set_msi(epc, func_no, encode_int);
+ spin_unlock_irqrestore(&epc->lock, flags);
+
+ return ret;
+@@ -226,24 +219,83 @@ int pci_epc_set_msi(struct pci_epc *epc,
+ EXPORT_SYMBOL_GPL(pci_epc_set_msi);
+
+ /**
++ * pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated
++ * @epc: the EPC device to which MSI-X interrupts was requested
++ * @func_no: the endpoint function number in the EPC device
++ *
++ * Invoke to get the number of MSI-X interrupts allocated by the RC
++ */
++int pci_epc_get_msix(struct pci_epc *epc, u8 func_no)
++{
++ int interrupt;
++ unsigned long flags;
++
++ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
++ return 0;
++
++ if (!epc->ops->get_msix)
++ return 0;
++
++ spin_lock_irqsave(&epc->lock, flags);
++ interrupt = epc->ops->get_msix(epc, func_no);
++ spin_unlock_irqrestore(&epc->lock, flags);
++
++ if (interrupt < 0)
++ return 0;
++
++ return interrupt + 1;
++}
++EXPORT_SYMBOL_GPL(pci_epc_get_msix);
++
++/**
++ * pci_epc_set_msix() - set the number of MSI-X interrupt numbers required
++ * @epc: the EPC device on which MSI-X has to be configured
++ * @func_no: the endpoint function number in the EPC device
++ * @interrupts: number of MSI-X interrupts required by the EPF
++ *
++ * Invoke to set the required number of MSI-X interrupts.
++ */
++int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
++{
++ int ret;
++ unsigned long flags;
++
++ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
++ interrupts < 1 || interrupts > 2048)
++ return -EINVAL;
++
++ if (!epc->ops->set_msix)
++ return 0;
++
++ spin_lock_irqsave(&epc->lock, flags);
++ ret = epc->ops->set_msix(epc, func_no, interrupts - 1);
++ spin_unlock_irqrestore(&epc->lock, flags);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(pci_epc_set_msix);
++
++/**
+ * pci_epc_unmap_addr() - unmap CPU address from PCI address
+ * @epc: the EPC device on which address is allocated
++ * @func_no: the endpoint function number in the EPC device
+ * @phys_addr: physical address of the local system
+ *
+ * Invoke to unmap the CPU address from PCI address.
+ */
+-void pci_epc_unmap_addr(struct pci_epc *epc, phys_addr_t phys_addr)
++void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
++ phys_addr_t phys_addr)
+ {
+ unsigned long flags;
+
+- if (IS_ERR(epc))
++ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
+ return;
+
+ if (!epc->ops->unmap_addr)
+ return;
+
+ spin_lock_irqsave(&epc->lock, flags);
+- epc->ops->unmap_addr(epc, phys_addr);
++ epc->ops->unmap_addr(epc, func_no, phys_addr);
+ spin_unlock_irqrestore(&epc->lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
+@@ -251,26 +303,27 @@ EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
+ /**
+ * pci_epc_map_addr() - map CPU address to PCI address
+ * @epc: the EPC device on which address is allocated
++ * @func_no: the endpoint function number in the EPC device
+ * @phys_addr: physical address of the local system
+ * @pci_addr: PCI address to which the physical address should be mapped
+ * @size: the size of the allocation
+ *
+ * Invoke to map CPU address with PCI address.
+ */
+-int pci_epc_map_addr(struct pci_epc *epc, phys_addr_t phys_addr,
+- u64 pci_addr, size_t size)
++int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
++ phys_addr_t phys_addr, u64 pci_addr, size_t size)
+ {
+ int ret;
+ unsigned long flags;
+
+- if (IS_ERR(epc))
++ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
+ return -EINVAL;
+
+ if (!epc->ops->map_addr)
+ return 0;
+
+ spin_lock_irqsave(&epc->lock, flags);
+- ret = epc->ops->map_addr(epc, phys_addr, pci_addr, size);
++ ret = epc->ops->map_addr(epc, func_no, phys_addr, pci_addr, size);
+ spin_unlock_irqrestore(&epc->lock, flags);
+
+ return ret;
+@@ -280,22 +333,26 @@ EXPORT_SYMBOL_GPL(pci_epc_map_addr);
+ /**
+ * pci_epc_clear_bar() - reset the BAR
+ * @epc: the EPC device for which the BAR has to be cleared
+- * @bar: the BAR number that has to be reset
++ * @func_no: the endpoint function number in the EPC device
++ * @epf_bar: the struct epf_bar that contains the BAR information
+ *
+ * Invoke to reset the BAR of the endpoint device.
+ */
+-void pci_epc_clear_bar(struct pci_epc *epc, int bar)
++void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
++ struct pci_epf_bar *epf_bar)
+ {
+ unsigned long flags;
+
+- if (IS_ERR(epc))
++ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
++ (epf_bar->barno == BAR_5 &&
++ epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
+ return;
+
+ if (!epc->ops->clear_bar)
+ return;
+
+ spin_lock_irqsave(&epc->lock, flags);
+- epc->ops->clear_bar(epc, bar);
++ epc->ops->clear_bar(epc, func_no, epf_bar);
+ spin_unlock_irqrestore(&epc->lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
+@@ -303,26 +360,32 @@ EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
+ /**
+ * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space
+ * @epc: the EPC device on which BAR has to be configured
+- * @bar: the BAR number that has to be configured
+- * @size: the size of the addr space
+- * @flags: specify memory allocation/io allocation/32bit address/64 bit address
++ * @func_no: the endpoint function number in the EPC device
++ * @epf_bar: the struct epf_bar that contains the BAR information
+ *
+ * Invoke to configure the BAR of the endpoint device.
+ */
+-int pci_epc_set_bar(struct pci_epc *epc, enum pci_barno bar,
+- dma_addr_t bar_phys, size_t size, int flags)
++int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
++ struct pci_epf_bar *epf_bar)
+ {
+ int ret;
+ unsigned long irq_flags;
++ int flags = epf_bar->flags;
+
+- if (IS_ERR(epc))
++ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
++ (epf_bar->barno == BAR_5 &&
++ flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
++ (flags & PCI_BASE_ADDRESS_SPACE_IO &&
++ flags & PCI_BASE_ADDRESS_IO_MASK) ||
++ (upper_32_bits(epf_bar->size) &&
++ !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)))
+ return -EINVAL;
+
+ if (!epc->ops->set_bar)
+ return 0;
+
+ spin_lock_irqsave(&epc->lock, irq_flags);
+- ret = epc->ops->set_bar(epc, bar, bar_phys, size, flags);
++ ret = epc->ops->set_bar(epc, func_no, epf_bar);
+ spin_unlock_irqrestore(&epc->lock, irq_flags);
+
+ return ret;
+@@ -332,6 +395,7 @@ EXPORT_SYMBOL_GPL(pci_epc_set_bar);
+ /**
+ * pci_epc_write_header() - write standard configuration header
+ * @epc: the EPC device to which the configuration header should be written
++ * @func_no: the endpoint function number in the EPC device
+ * @header: standard configuration header fields
+ *
+ * Invoke to write the configuration header to the endpoint controller. Every
+@@ -339,19 +403,20 @@ EXPORT_SYMBOL_GPL(pci_epc_set_bar);
+ * configuration header would be written. The callback function should write
+ * the header fields to this dedicated location.
+ */
+-int pci_epc_write_header(struct pci_epc *epc, struct pci_epf_header *header)
++int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
++ struct pci_epf_header *header)
+ {
+ int ret;
+ unsigned long flags;
+
+- if (IS_ERR(epc))
++ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
+ return -EINVAL;
+
+ if (!epc->ops->write_header)
+ return 0;
+
+ spin_lock_irqsave(&epc->lock, flags);
+- ret = epc->ops->write_header(epc, header);
++ ret = epc->ops->write_header(epc, func_no, header);
+ spin_unlock_irqrestore(&epc->lock, flags);
+
+ return ret;
+--- a/drivers/pci/endpoint/pci-epc-mem.c
++++ b/drivers/pci/endpoint/pci-epc-mem.c
+@@ -1,20 +1,9 @@
++// SPDX-License-Identifier: GPL-2.0
+ /**
+ * PCI Endpoint *Controller* Address Space Management
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+- *
+- * This program is free software: you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 of
+- * the License as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+ #include <linux/io.h>
+--- a/drivers/pci/endpoint/pci-epf-core.c
++++ b/drivers/pci/endpoint/pci-epf-core.c
+@@ -1,20 +1,9 @@
++// SPDX-License-Identifier: GPL-2.0
+ /**
+ * PCI Endpoint *Function* (EPF) library
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+- *
+- * This program is free software: you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 of
+- * the License as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+ #include <linux/device.h>
+@@ -26,6 +15,8 @@
+ #include <linux/pci-epf.h>
+ #include <linux/pci-ep-cfs.h>
+
++static DEFINE_MUTEX(pci_epf_mutex);
++
+ static struct bus_type pci_epf_bus_type;
+ static const struct device_type pci_epf_type;
+
+@@ -109,6 +100,8 @@ void pci_epf_free_space(struct pci_epf *
+
+ epf->bar[bar].phys_addr = 0;
+ epf->bar[bar].size = 0;
++ epf->bar[bar].barno = 0;
++ epf->bar[bar].flags = 0;
+ }
+ EXPORT_SYMBOL_GPL(pci_epf_free_space);
+
+@@ -137,11 +130,27 @@ void *pci_epf_alloc_space(struct pci_epf
+
+ epf->bar[bar].phys_addr = phys_addr;
+ epf->bar[bar].size = size;
++ epf->bar[bar].barno = bar;
++ epf->bar[bar].flags = PCI_BASE_ADDRESS_SPACE_MEMORY;
+
+ return space;
+ }
+ EXPORT_SYMBOL_GPL(pci_epf_alloc_space);
+
++static void pci_epf_remove_cfs(struct pci_epf_driver *driver)
++{
++ struct config_group *group, *tmp;
++
++ if (!IS_ENABLED(CONFIG_PCI_ENDPOINT_CONFIGFS))
++ return;
++
++ mutex_lock(&pci_epf_mutex);
++ list_for_each_entry_safe(group, tmp, &driver->epf_group, group_entry)
++ pci_ep_cfs_remove_epf_group(group);
++ list_del(&driver->epf_group);
++ mutex_unlock(&pci_epf_mutex);
++}
++
+ /**
+ * pci_epf_unregister_driver() - unregister the PCI EPF driver
+ * @driver: the PCI EPF driver that has to be unregistered
+@@ -150,11 +159,38 @@ EXPORT_SYMBOL_GPL(pci_epf_alloc_space);
+ */
+ void pci_epf_unregister_driver(struct pci_epf_driver *driver)
+ {
+- pci_ep_cfs_remove_epf_group(driver->group);
++ pci_epf_remove_cfs(driver);
+ driver_unregister(&driver->driver);
+ }
+ EXPORT_SYMBOL_GPL(pci_epf_unregister_driver);
+
++static int pci_epf_add_cfs(struct pci_epf_driver *driver)
++{
++ struct config_group *group;
++ const struct pci_epf_device_id *id;
++
++ if (!IS_ENABLED(CONFIG_PCI_ENDPOINT_CONFIGFS))
++ return 0;
++
++ INIT_LIST_HEAD(&driver->epf_group);
++
++ id = driver->id_table;
++ while (id->name[0]) {
++ group = pci_ep_cfs_add_epf_group(id->name);
++ if (IS_ERR(group)) {
++ pci_epf_remove_cfs(driver);
++ return PTR_ERR(group);
++ }
++
++ mutex_lock(&pci_epf_mutex);
++ list_add_tail(&group->group_entry, &driver->epf_group);
++ mutex_unlock(&pci_epf_mutex);
++ id++;
++ }
++
++ return 0;
++}
++
+ /**
+ * __pci_epf_register_driver() - register a new PCI EPF driver
+ * @driver: structure representing PCI EPF driver
+@@ -180,7 +216,7 @@ int __pci_epf_register_driver(struct pci
+ if (ret)
+ return ret;
+
+- driver->group = pci_ep_cfs_add_epf_group(driver->driver.name);
++ pci_epf_add_cfs(driver);
+
+ return 0;
+ }
+@@ -211,29 +247,17 @@ struct pci_epf *pci_epf_create(const cha
+ int ret;
+ struct pci_epf *epf;
+ struct device *dev;
+- char *func_name;
+- char *buf;
++ int len;
+
+ epf = kzalloc(sizeof(*epf), GFP_KERNEL);
+- if (!epf) {
+- ret = -ENOMEM;
+- goto err_ret;
+- }
++ if (!epf)
++ return ERR_PTR(-ENOMEM);
+
+- buf = kstrdup(name, GFP_KERNEL);
+- if (!buf) {
+- ret = -ENOMEM;
+- goto free_epf;
+- }
+-
+- func_name = buf;
+- buf = strchrnul(buf, '.');
+- *buf = '\0';
+-
+- epf->name = kstrdup(func_name, GFP_KERNEL);
++ len = strchrnul(name, '.') - name;
++ epf->name = kstrndup(name, len, GFP_KERNEL);
+ if (!epf->name) {
+- ret = -ENOMEM;
+- goto free_func_name;
++ kfree(epf);
++ return ERR_PTR(-ENOMEM);
+ }
+
+ dev = &epf->dev;
+@@ -242,28 +266,18 @@ struct pci_epf *pci_epf_create(const cha
+ dev->type = &pci_epf_type;
+
+ ret = dev_set_name(dev, "%s", name);
+- if (ret)
+- goto put_dev;
++ if (ret) {
++ put_device(dev);
++ return ERR_PTR(ret);
++ }
+
+ ret = device_add(dev);
+- if (ret)
+- goto put_dev;
++ if (ret) {
++ put_device(dev);
++ return ERR_PTR(ret);
++ }
+
+- kfree(func_name);
+ return epf;
+-
+-put_dev:
+- put_device(dev);
+- kfree(epf->name);
+-
+-free_func_name:
+- kfree(func_name);
+-
+-free_epf:
+- kfree(epf);
+-
+-err_ret:
+- return ERR_PTR(ret);
+ }
+ EXPORT_SYMBOL_GPL(pci_epf_create);
+
+--- a/drivers/pci/host/pci-host-common.c
++++ b/drivers/pci/host/pci-host-common.c
+@@ -113,9 +113,7 @@ err_out:
+ int pci_host_common_probe(struct platform_device *pdev,
+ struct pci_ecam_ops *ops)
+ {
+- const char *type;
+ struct device *dev = &pdev->dev;
+- struct device_node *np = dev->of_node;
+ struct pci_bus *bus, *child;
+ struct pci_host_bridge *bridge;
+ struct pci_config_window *cfg;
+@@ -126,12 +124,6 @@ int pci_host_common_probe(struct platfor
+ if (!bridge)
+ return -ENOMEM;
+
+- type = of_get_property(np, "device_type", NULL);
+- if (!type || strcmp(type, "pci")) {
+- dev_err(dev, "invalid \"device_type\" %s\n", type);
+- return -EINVAL;
+- }
+-
+ of_pci_check_probe_only();
+
+ /* Parse and map our Configuration Space windows */
+--- a/drivers/pci/host/pcie-xilinx-nwl.c
++++ b/drivers/pci/host/pcie-xilinx-nwl.c
+@@ -779,16 +779,7 @@ static int nwl_pcie_parse_dt(struct nwl_
+ struct platform_device *pdev)
+ {
+ struct device *dev = pcie->dev;
+- struct device_node *node = dev->of_node;
+ struct resource *res;
+- const char *type;
+-
+- /* Check for device type */
+- type = of_get_property(node, "device_type", NULL);
+- if (!type || strcmp(type, "pci")) {
+- dev_err(dev, "invalid \"device_type\" %s\n", type);
+- return -EINVAL;
+- }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
+ pcie->breg_base = devm_ioremap_resource(dev, res);
+--- a/drivers/pci/host/pcie-xilinx.c
++++ b/drivers/pci/host/pcie-xilinx.c
+@@ -576,15 +576,8 @@ static int xilinx_pcie_parse_dt(struct x
+ struct device *dev = port->dev;
+ struct device_node *node = dev->of_node;
+ struct resource regs;
+- const char *type;
+ int err;
+
+- type = of_get_property(node, "device_type", NULL);
+- if (!type || strcmp(type, "pci")) {
+- dev_err(dev, "invalid \"device_type\" %s\n", type);
+- return -EINVAL;
+- }
+-
+ err = of_address_to_resource(node, 0, ®s);
+ if (err) {
+ dev_err(dev, "missing \"reg\" property\n");
+--- /dev/null
++++ b/drivers/pci/mobiveil/Kconfig
+@@ -0,0 +1,50 @@
++# SPDX-License-Identifier: GPL-2.0
++
++menu "Mobiveil PCIe Core Support"
++ depends on PCI
++
++config PCIE_MOBIVEIL
++ bool
++
++config PCIE_MOBIVEIL_HOST
++ bool
++ depends on PCI_MSI_IRQ_DOMAIN
++ select PCIE_MOBIVEIL
++
++config PCIE_MOBIVEIL_EP
++ bool
++ depends on PCI_ENDPOINT
++ select PCIE_MOBIVEIL
++
++config PCIE_MOBIVEIL_PLAT
++ bool "Mobiveil AXI PCIe controller"
++ depends on ARCH_ZYNQMP || COMPILE_TEST
++ depends on OF
++ select PCIE_MOBIVEIL_HOST
++ help
++ Say Y here if you want to enable support for the Mobiveil AXI PCIe
++ Soft IP. It has up to 8 outbound and inbound windows
++ for address translation and it is a PCIe Gen4 IP.
++
++config PCI_LAYERSCAPE_GEN4
++ bool "Freescale Layerscpe PCIe Gen4 controller in RC mode"
++ depends on PCI
++ depends on OF && (ARM64 || ARCH_LAYERSCAPE)
++ depends on PCI_MSI_IRQ_DOMAIN
++ select PCIE_MOBIVEIL_HOST
++ help
++ Say Y here if you want PCIe Gen4 controller support on
++ Layerscape SoCs. And the PCIe controller work in RC mode
++ by setting the RCW[HOST_AGT_PEX] to 0.
++
++config PCI_LAYERSCAPE_GEN4_EP
++ bool "Freescale Layerscpe PCIe Gen4 controller in EP mode"
++ depends on PCI
++ depends on OF && (ARM64 || ARCH_LAYERSCAPE)
++ depends on PCI_ENDPOINT
++ select PCIE_MOBIVEIL_EP
++ help
++ Say Y here if you want PCIe Gen4 controller support on
++ Layerscape SoCs. And the PCIe controller work in EP mode
++ by setting the RCW[HOST_AGT_PEX] to 1.
++endmenu
+--- /dev/null
++++ b/drivers/pci/mobiveil/Makefile
+@@ -0,0 +1,7 @@
++# SPDX-License-Identifier: GPL-2.0
++obj-$(CONFIG_PCIE_MOBIVEIL) += pcie-mobiveil.o
++obj-$(CONFIG_PCIE_MOBIVEIL_HOST) += pcie-mobiveil-host.o
++obj-$(CONFIG_PCIE_MOBIVEIL_EP) += pcie-mobiveil-ep.o
++obj-$(CONFIG_PCIE_MOBIVEIL_PLAT) += pcie-mobiveil-plat.o
++obj-$(CONFIG_PCI_LAYERSCAPE_GEN4) += pci-layerscape-gen4.o
++obj-$(CONFIG_PCI_LAYERSCAPE_GEN4_EP) += pci-layerscape-gen4-ep.o
+--- /dev/null
++++ b/drivers/pci/mobiveil/pci-layerscape-gen4-ep.c
+@@ -0,0 +1,178 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * PCIe controller EP driver for Freescale Layerscape SoCs
++ *
++ * Copyright (C) 2018 NXP Semiconductor.
++ *
++ * Author: Xiaowei Bao <xiaowei.bao@nxp.com>
++ */
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/of_pci.h>
++#include <linux/of_platform.h>
++#include <linux/of_address.h>
++#include <linux/pci.h>
++#include <linux/platform_device.h>
++#include <linux/resource.h>
++
++#include "pcie-mobiveil.h"
++
++struct ls_pcie_g4_ep {
++ struct mobiveil_pcie *mv_pci;
++};
++
++#define to_ls_pcie_g4_ep(x) dev_get_drvdata((x)->dev)
++
++static const struct of_device_id ls_pcie_g4_ep_of_match[] = {
++ { .compatible = "fsl,lx2160a-pcie-ep",},
++ { },
++};
++
++static void ls_pcie_g4_get_bar_num(struct mobiveil_pcie_ep *ep)
++{
++ struct mobiveil_pcie *mv_pci = to_mobiveil_pcie_from_ep(ep);
++ u32 type, reg;
++ u8 bar;
++
++ ep->bar_num = BAR_5 + 1;
++
++ for (bar = BAR_0; bar <= BAR_5; bar++) {
++ reg = PCI_BASE_ADDRESS_0 + (4 * bar);
++ type = csr_readl(mv_pci, reg) &
++ PCI_BASE_ADDRESS_MEM_TYPE_MASK;
++ if (type & PCI_BASE_ADDRESS_MEM_TYPE_64)
++ ep->bar_num--;
++ }
++}
++
++static void ls_pcie_g4_ep_init(struct mobiveil_pcie_ep *ep)
++{
++ struct mobiveil_pcie *mv_pci = to_mobiveil_pcie_from_ep(ep);
++ struct pci_epc *epc = ep->epc;
++ enum pci_barno bar;
++ int win_idx, val;
++
++ /*
++ * Errata: unsupported request error on inbound posted write
++ * transaction, PCIe controller reports advisory error instead
++ * of uncorrectable error message to RC.
++ * workaround: set the bit20(unsupported_request_Error_severity) with
++ * value 1 in uncorrectable_Error_Severity_Register, make the
++ * unsupported request error generate the fatal error.
++ */
++ val = csr_readl(mv_pci, CFG_UNCORRECTABLE_ERROR_SEVERITY);
++ val |= 1 << UNSUPPORTED_REQUEST_ERROR_SHIFT;
++ csr_writel(mv_pci, val, CFG_UNCORRECTABLE_ERROR_SEVERITY);
++
++ ls_pcie_g4_get_bar_num(ep);
++
++ for (bar = BAR_0; bar < (ep->bar_num * ep->pf_num); bar++)
++ mobiveil_pcie_ep_reset_bar(mv_pci, bar);
++
++ for (win_idx = 0; win_idx < MAX_IATU_OUT; win_idx++)
++ mobiveil_pcie_disable_ob_win(mv_pci, win_idx);
++
++ epc->features |= EPC_FEATURE_NO_LINKUP_NOTIFIER;
++ epc->features |= EPC_FEATURE_MSIX_AVAILABLE;
++}
++
++static int ls_pcie_g4_ep_raise_irq(struct mobiveil_pcie_ep *ep, u8 func_no,
++ enum pci_epc_irq_type type,
++ u16 interrupt_num)
++{
++ struct mobiveil_pcie *mv_pci = to_mobiveil_pcie_from_ep(ep);
++
++ switch (type) {
++ case PCI_EPC_IRQ_LEGACY:
++ return mobiveil_pcie_ep_raise_legacy_irq(ep, func_no);
++ case PCI_EPC_IRQ_MSI:
++ return mobiveil_pcie_ep_raise_msi_irq(ep, func_no,
++ interrupt_num);
++ case PCI_EPC_IRQ_MSIX:
++ return mobiveil_pcie_ep_raise_msix_irq(ep, func_no,
++ interrupt_num);
++ default:
++ dev_err(&mv_pci->pdev->dev, "UNKNOWN IRQ type\n");
++ }
++
++ return 0;
++}
++
++static struct mobiveil_pcie_ep_ops pcie_ep_ops = {
++ .ep_init = ls_pcie_g4_ep_init,
++ .raise_irq = ls_pcie_g4_ep_raise_irq,
++};
++
++static int __init ls_pcie_gen4_add_pcie_ep(struct ls_pcie_g4_ep *ls_pcie_g4_ep,
++ struct platform_device *pdev)
++{
++ struct mobiveil_pcie *mv_pci = ls_pcie_g4_ep->mv_pci;
++ struct device *dev = &pdev->dev;
++ struct mobiveil_pcie_ep *ep;
++ struct resource *res;
++ int ret;
++ struct device_node *np = dev->of_node;
++
++ ep = &mv_pci->ep;
++ ep->ops = &pcie_ep_ops;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
++ if (!res)
++ return -EINVAL;
++
++ ep->phys_base = res->start;
++ ep->addr_size = resource_size(res);
++
++ ret = of_property_read_u32(np, "max-functions", &ep->pf_num);
++ if (ret < 0)
++ ep->pf_num = 1;
++
++ ret = mobiveil_pcie_ep_init(ep);
++ if (ret) {
++ dev_err(dev, "failed to initialize endpoint\n");
++ return ret;
++ }
++
++ return 0;
++}
++
++static int __init ls_pcie_g4_ep_probe(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ struct mobiveil_pcie *mv_pci;
++ struct ls_pcie_g4_ep *ls_pcie_g4_ep;
++ struct resource *res;
++ int ret;
++
++ ls_pcie_g4_ep = devm_kzalloc(dev, sizeof(*ls_pcie_g4_ep), GFP_KERNEL);
++ if (!ls_pcie_g4_ep)
++ return -ENOMEM;
++
++ mv_pci = devm_kzalloc(dev, sizeof(*mv_pci), GFP_KERNEL);
++ if (!mv_pci)
++ return -ENOMEM;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
++ mv_pci->csr_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
++ if (IS_ERR(mv_pci->csr_axi_slave_base))
++ return PTR_ERR(mv_pci->csr_axi_slave_base);
++
++ mv_pci->pdev = pdev;
++ ls_pcie_g4_ep->mv_pci = mv_pci;
++
++ platform_set_drvdata(pdev, ls_pcie_g4_ep);
++
++ ret = ls_pcie_gen4_add_pcie_ep(ls_pcie_g4_ep, pdev);
++
++ return ret;
++}
++
++static struct platform_driver ls_pcie_g4_ep_driver = {
++ .driver = {
++ .name = "layerscape-pcie-gen4-ep",
++ .of_match_table = ls_pcie_g4_ep_of_match,
++ .suppress_bind_attrs = true,
++ },
++};
++builtin_platform_driver_probe(ls_pcie_g4_ep_driver, ls_pcie_g4_ep_probe);
+--- /dev/null
++++ b/drivers/pci/mobiveil/pci-layerscape-gen4.c
+@@ -0,0 +1,292 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * PCIe host controller driver for NXP Layerscape SoCs
++ *
++ * Copyright 2018 NXP
++ *
++ * Author: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
++ */
++
++#include <linux/kernel.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/of_pci.h>
++#include <linux/of_platform.h>
++#include <linux/of_irq.h>
++#include <linux/of_address.h>
++#include <linux/pci.h>
++#include <linux/platform_device.h>
++#include <linux/resource.h>
++#include <linux/mfd/syscon.h>
++#include <linux/regmap.h>
++
++#include "pcie-mobiveil.h"
++
++/* LUT and PF control registers */
++#define PCIE_LUT_OFF (0x80000)
++#define PCIE_LUT_GCR (0x28)
++#define PCIE_LUT_GCR_RRE (0)
++
++#define PCIE_PF_OFF (0xc0000)
++#define PCIE_PF_INT_STAT (0x18)
++#define PF_INT_STAT_PABRST (31)
++
++#define PCIE_PF_DBG (0x7fc)
++#define PF_DBG_LTSSM_MASK (0x3f)
++#define PF_DBG_WE (31)
++#define PF_DBG_PABR (27)
++
++#define LS_PCIE_G4_LTSSM_L0 0x2d /* L0 state */
++
++#define to_ls_pcie_g4(x) platform_get_drvdata((x)->pdev)
++
++struct ls_pcie_g4 {
++ struct mobiveil_pcie *pci;
++ struct delayed_work dwork;
++ int irq;
++};
++
++static inline u32 ls_pcie_g4_lut_readl(struct ls_pcie_g4 *pcie, u32 off)
++{
++ return ioread32(pcie->pci->csr_axi_slave_base + PCIE_LUT_OFF + off);
++}
++
++static inline void ls_pcie_g4_lut_writel(struct ls_pcie_g4 *pcie,
++ u32 off, u32 val)
++{
++ iowrite32(val, pcie->pci->csr_axi_slave_base + PCIE_LUT_OFF + off);
++}
++
++static inline u32 ls_pcie_g4_pf_readl(struct ls_pcie_g4 *pcie, u32 off)
++{
++ return ioread32(pcie->pci->csr_axi_slave_base + PCIE_PF_OFF + off);
++}
++
++static inline void ls_pcie_g4_pf_writel(struct ls_pcie_g4 *pcie,
++ u32 off, u32 val)
++{
++ iowrite32(val, pcie->pci->csr_axi_slave_base + PCIE_PF_OFF + off);
++}
++
++static bool ls_pcie_g4_is_bridge(struct ls_pcie_g4 *pcie)
++{
++ struct mobiveil_pcie *mv_pci = pcie->pci;
++ u32 header_type;
++
++ header_type = csr_readb(mv_pci, PCI_HEADER_TYPE);
++ header_type &= 0x7f;
++
++ return header_type == PCI_HEADER_TYPE_BRIDGE;
++}
++
++static int ls_pcie_g4_link_up(struct mobiveil_pcie *pci)
++{
++ struct ls_pcie_g4 *pcie = to_ls_pcie_g4(pci);
++ u32 state;
++
++ state = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
++ state = state & PF_DBG_LTSSM_MASK;
++
++ if (state == LS_PCIE_G4_LTSSM_L0)
++ return 1;
++
++ return 0;
++}
++
++static void ls_pcie_g4_reinit_hw(struct ls_pcie_g4 *pcie)
++{
++ struct mobiveil_pcie *mv_pci = pcie->pci;
++ u32 val, act_stat;
++ int to = 100;
++
++ /* Poll for pab_csb_reset to set and PAB activity to clear */
++ do {
++ usleep_range(10, 15);
++ val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_INT_STAT);
++ act_stat = csr_readl(mv_pci, PAB_ACTIVITY_STAT);
++ } while (((val & 1 << PF_INT_STAT_PABRST) == 0 || act_stat) && to--);
++ if (to < 0) {
++ dev_err(&mv_pci->pdev->dev, "poll PABRST&PABACT timeout\n");
++ return;
++ }
++
++ /* clear PEX_RESET bit in PEX_PF0_DBG register */
++ val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
++ val |= 1 << PF_DBG_WE;
++ ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val);
++
++ val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
++ val |= 1 << PF_DBG_PABR;
++ ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val);
++
++ val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
++ val &= ~(1 << PF_DBG_WE);
++ ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val);
++
++ mobiveil_host_init(mv_pci, true);
++
++ to = 100;
++ while (!ls_pcie_g4_link_up(mv_pci) && to--)
++ usleep_range(200, 250);
++ if (to < 0)
++ dev_err(&mv_pci->pdev->dev, "PCIe link trainning timeout\n");
++}
++
++static irqreturn_t ls_pcie_g4_handler(int irq, void *dev_id)
++{
++ struct ls_pcie_g4 *pcie = (struct ls_pcie_g4 *)dev_id;
++ struct mobiveil_pcie *mv_pci = pcie->pci;
++ u32 val;
++
++ val = csr_readl(mv_pci, PAB_INTP_AMBA_MISC_STAT);
++ if (!val)
++ return IRQ_NONE;
++
++ if (val & PAB_INTP_RESET)
++ schedule_delayed_work(&pcie->dwork, msecs_to_jiffies(1));
++
++ csr_writel(mv_pci, val, PAB_INTP_AMBA_MISC_STAT);
++
++ return IRQ_HANDLED;
++}
++
++static int ls_pcie_g4_interrupt_init(struct mobiveil_pcie *mv_pci)
++{
++ struct ls_pcie_g4 *pcie = to_ls_pcie_g4(mv_pci);
++ u32 val;
++ int ret;
++
++ pcie->irq = platform_get_irq_byname(mv_pci->pdev, "intr");
++ if (pcie->irq < 0) {
++ dev_err(&mv_pci->pdev->dev, "Can't get 'intr' irq.\n");
++ return pcie->irq;
++ }
++ ret = devm_request_irq(&mv_pci->pdev->dev, pcie->irq,
++ ls_pcie_g4_handler, IRQF_SHARED,
++ mv_pci->pdev->name, pcie);
++ if (ret) {
++ dev_err(&mv_pci->pdev->dev, "Can't register PCIe IRQ.\n");
++ return ret;
++ }
++
++ /* Enable interrupts */
++ val = PAB_INTP_INTX_MASK | PAB_INTP_MSI | PAB_INTP_RESET |
++ PAB_INTP_PCIE_UE | PAB_INTP_IE_PMREDI | PAB_INTP_IE_EC;
++ csr_writel(mv_pci, val, PAB_INTP_AMBA_MISC_ENB);
++
++ return 0;
++}
++
++static void ls_pcie_g4_reset(struct work_struct *work)
++{
++ struct delayed_work *dwork = container_of(work, struct delayed_work,
++ work);
++ struct ls_pcie_g4 *pcie = container_of(dwork, struct ls_pcie_g4, dwork);
++ struct mobiveil_pcie *mv_pci = pcie->pci;
++ u16 ctrl;
++
++ ctrl = csr_readw(mv_pci, PCI_BRIDGE_CONTROL);
++ ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
++ csr_writew(mv_pci, ctrl, PCI_BRIDGE_CONTROL);
++ ls_pcie_g4_reinit_hw(pcie);
++}
++
++static int ls_pcie_g4_read_other_conf(struct pci_bus *bus, unsigned int devfn,
++ int where, int size, u32 *val)
++{
++ struct mobiveil_pcie *pci = bus->sysdata;
++ struct ls_pcie_g4 *pcie = to_ls_pcie_g4(pci);
++ int ret;
++
++ if (where == PCI_VENDOR_ID)
++ ls_pcie_g4_lut_writel(pcie, PCIE_LUT_GCR,
++ 0 << PCIE_LUT_GCR_RRE);
++
++ ret = pci_generic_config_read(bus, devfn, where, size, val);
++
++ if (where == PCI_VENDOR_ID)
++ ls_pcie_g4_lut_writel(pcie, PCIE_LUT_GCR,
++ 1 << PCIE_LUT_GCR_RRE);
++
++ return ret;
++}
++
++static struct mobiveil_rp_ops ls_pcie_g4_rp_ops = {
++ .interrupt_init = ls_pcie_g4_interrupt_init,
++ .read_other_conf = ls_pcie_g4_read_other_conf,
++};
++
++static const struct mobiveil_pab_ops ls_pcie_g4_pab_ops = {
++ .link_up = ls_pcie_g4_link_up,
++};
++
++static void workaround_tkt381274(struct ls_pcie_g4 *pcie)
++{
++ struct mobiveil_pcie *mv_pci = pcie->pci;
++ u32 val;
++
++ /* Set ACK latency timeout */
++ val = csr_readl(mv_pci, GPEX_ACK_REPLAY_TO);
++ val &= ~(ACK_LAT_TO_VAL_MASK << ACK_LAT_TO_VAL_SHIFT);
++ val |= (4 << ACK_LAT_TO_VAL_SHIFT);
++ csr_writel(mv_pci, val, GPEX_ACK_REPLAY_TO);
++}
++
++static int __init ls_pcie_g4_probe(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ struct mobiveil_pcie *mv_pci;
++ struct ls_pcie_g4 *pcie;
++ struct device_node *np = dev->of_node;
++ int ret;
++
++ if (!of_parse_phandle(np, "msi-parent", 0)) {
++ dev_err(dev, "failed to find msi-parent\n");
++ return -EINVAL;
++ }
++
++ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
++ if (!pcie)
++ return -ENOMEM;
++
++ mv_pci = devm_kzalloc(dev, sizeof(*mv_pci), GFP_KERNEL);
++ if (!mv_pci)
++ return -ENOMEM;
++
++ mv_pci->pdev = pdev;
++ mv_pci->ops = &ls_pcie_g4_pab_ops;
++ mv_pci->rp.ops = &ls_pcie_g4_rp_ops;
++ pcie->pci = mv_pci;
++
++ platform_set_drvdata(pdev, pcie);
++
++ INIT_DELAYED_WORK(&pcie->dwork, ls_pcie_g4_reset);
++
++ ret = mobiveil_pcie_host_probe(mv_pci);
++ if (ret) {
++ dev_err(dev, "fail to probe!\n");
++ return ret;
++ }
++
++ if (!ls_pcie_g4_is_bridge(pcie))
++ return -ENODEV;
++
++ workaround_tkt381274(pcie);
++
++ return 0;
++}
++
++static const struct of_device_id ls_pcie_g4_of_match[] = {
++ { .compatible = "fsl,lx2160a-pcie", },
++ { },
++};
++
++static struct platform_driver ls_pcie_g4_driver = {
++ .driver = {
++ .name = "layerscape-pcie-gen4",
++ .of_match_table = ls_pcie_g4_of_match,
++ .suppress_bind_attrs = true,
++ },
++};
++
++builtin_platform_driver_probe(ls_pcie_g4_driver, ls_pcie_g4_probe);
+--- /dev/null
++++ b/drivers/pci/mobiveil/pcie-mobiveil-ep.c
+@@ -0,0 +1,512 @@
++// SPDX-License-Identifier: GPL-2.0
++/**
++ * Mobiveil PCIe Endpoint controller driver
++ *
++ * Copyright (C) 2018 NXP Semiconductor.
++ * Author: Xiaowei Bao <xiaowei.bao@nxp.com>
++ */
++
++#include <linux/of.h>
++#include <linux/pci-epc.h>
++#include <linux/pci-epf.h>
++#include <linux/platform_device.h>
++#include "pcie-mobiveil.h"
++
++void mobiveil_pcie_ep_linkup(struct mobiveil_pcie_ep *ep)
++{
++ struct pci_epc *epc = ep->epc;
++
++ pci_epc_linkup(epc);
++}
++
++static void __mobiveil_pcie_ep_reset_bar(struct mobiveil_pcie *pcie,
++ enum pci_barno bar)
++{
++ csr_writel(pcie, bar, GPEX_BAR_SELECT);
++ csr_writel(pcie, 0, GPEX_BAR_SIZE_LDW);
++ csr_writel(pcie, 0, GPEX_BAR_SIZE_UDW);
++}
++
++void mobiveil_pcie_ep_reset_bar(struct mobiveil_pcie *pcie,
++ enum pci_barno bar)
++{
++ __mobiveil_pcie_ep_reset_bar(pcie, bar);
++}
++
++static u8 __mobiveil_pcie_ep_find_next_cap(struct mobiveil_pcie *pcie,
++ u8 cap_ptr, u8 cap)
++{
++ u8 cap_id, next_cap_ptr;
++ u16 reg;
++
++ reg = csr_readw(pcie, cap_ptr);
++ next_cap_ptr = (reg & 0xff00) >> 8;
++ cap_id = (reg & 0x00ff);
++
++ if (cap_id == cap)
++ return cap_ptr;
++
++ if (!next_cap_ptr || cap_id > PCI_CAP_ID_MAX)
++ return 0;
++
++ return __mobiveil_pcie_ep_find_next_cap(pcie, next_cap_ptr, cap);
++}
++
++static u8 mobiveil_pcie_ep_find_capability(struct mobiveil_pcie *pcie,
++ u8 cap)
++{
++ u8 next_cap_ptr;
++ u16 reg;
++
++ reg = csr_readw(pcie, PCI_CAPABILITY_LIST);
++ next_cap_ptr = (reg & 0x00ff);
++
++ if (!next_cap_ptr)
++ return 0;
++
++ return __mobiveil_pcie_ep_find_next_cap(pcie, next_cap_ptr, cap);
++}
++
++static int mobiveil_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
++ struct pci_epf_header *hdr)
++{
++ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
++ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
++
++ csr_writew(pcie, hdr->vendorid, PCI_VENDOR_ID);
++ csr_writew(pcie, hdr->deviceid, PCI_DEVICE_ID);
++ csr_writeb(pcie, hdr->revid, PCI_REVISION_ID);
++ csr_writeb(pcie, hdr->progif_code, PCI_CLASS_PROG);
++ csr_writew(pcie, hdr->subclass_code | hdr->baseclass_code << 8,
++ PCI_CLASS_DEVICE);
++ csr_writeb(pcie, hdr->cache_line_size, PCI_CACHE_LINE_SIZE);
++ csr_writew(pcie, hdr->subsys_vendor_id, PCI_SUBSYSTEM_VENDOR_ID);
++ csr_writew(pcie, hdr->subsys_id, PCI_SUBSYSTEM_ID);
++ csr_writeb(pcie, hdr->interrupt_pin, PCI_INTERRUPT_PIN);
++
++ return 0;
++}
++
++static int mobiveil_pcie_ep_inbound_atu(struct mobiveil_pcie_ep *ep,
++ u8 func_no, enum pci_barno bar,
++ dma_addr_t cpu_addr)
++{
++ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
++
++ program_ib_windows_ep(pcie, func_no, bar, cpu_addr);
++
++ return 0;
++}
++
++static int mobiveil_pcie_ep_outbound_atu(struct mobiveil_pcie_ep *ep,
++ phys_addr_t phys_addr,
++ u64 pci_addr, u8 func_no,
++ size_t size)
++{
++ int ret;
++ u32 free_win;
++ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
++
++ free_win = find_first_zero_bit(ep->ob_window_map, ep->num_ob_windows);
++ if (free_win >= ep->num_ob_windows) {
++ dev_err(&pcie->pdev->dev, "No free outbound window\n");
++ return -EINVAL;
++ }
++
++ ret = program_ob_windows_ep(pcie, free_win, MEM_WINDOW_TYPE,
++ phys_addr, pci_addr, func_no, size);
++ if (ret < 0) {
++ dev_err(&pcie->pdev->dev, "Failed to program IB window\n");
++ return ret;
++ }
++
++ set_bit(free_win, ep->ob_window_map);
++ ep->outbound_addr[free_win] = phys_addr;
++
++ return 0;
++}
++
++static void mobiveil_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
++ struct pci_epf_bar *epf_bar)
++{
++ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
++ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
++ enum pci_barno bar = epf_bar->barno;
++
++ if (bar < ep->bar_num) {
++ __mobiveil_pcie_ep_reset_bar(pcie,
++ func_no * ep->bar_num + bar);
++
++ mobiveil_pcie_disable_ib_win_ep(pcie, func_no, bar);
++ }
++}
++
++static int mobiveil_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
++ struct pci_epf_bar *epf_bar)
++{
++ int ret;
++ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
++ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
++ enum pci_barno bar = epf_bar->barno;
++ size_t size = epf_bar->size;
++
++ if (bar < ep->bar_num) {
++ ret = mobiveil_pcie_ep_inbound_atu(ep, func_no, bar,
++ epf_bar->phys_addr);
++ if (ret)
++ return ret;
++
++ csr_writel(pcie, func_no * ep->bar_num + bar,
++ GPEX_BAR_SELECT);
++ csr_writel(pcie, lower_32_bits(~(size - 1)),
++ GPEX_BAR_SIZE_LDW);
++ csr_writel(pcie, upper_32_bits(~(size - 1)),
++ GPEX_BAR_SIZE_UDW);
++ }
++
++ return 0;
++}
++
++static int mobiveil_pcie_find_index(struct mobiveil_pcie_ep *ep,
++ phys_addr_t addr,
++ u32 *atu_index)
++{
++ u32 index;
++
++ for (index = 0; index < ep->num_ob_windows; index++) {
++ if (ep->outbound_addr[index] != addr)
++ continue;
++ *atu_index = index;
++ return 0;
++ }
++
++ return -EINVAL;
++}
++
++static void mobiveil_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no,
++ phys_addr_t addr)
++{
++ int ret;
++ u32 atu_index;
++ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
++ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
++
++ ret = mobiveil_pcie_find_index(ep, addr, &atu_index);
++ if (ret < 0)
++ return;
++
++ mobiveil_pcie_disable_ob_win(pcie, atu_index);
++ clear_bit(atu_index, ep->ob_window_map);
++}
++
++static int mobiveil_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
++ phys_addr_t addr,
++ u64 pci_addr, size_t size)
++{
++ int ret;
++ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
++ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
++
++ ret = mobiveil_pcie_ep_outbound_atu(ep, addr, pci_addr, func_no, size);
++ if (ret) {
++ dev_err(&pcie->pdev->dev, "Failed to enable address\n");
++ return ret;
++ }
++
++ return 0;
++}
++
++static int mobiveil_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
++{
++ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
++ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
++ u32 val, reg;
++
++ if (!ep->msi_cap)
++ return -EINVAL;
++
++ reg = ep->msi_cap + PCI_MSI_FLAGS;
++ val = csr_readw(pcie, reg);
++ if (!(val & PCI_MSI_FLAGS_ENABLE))
++ return -EINVAL;
++
++ val = (val & PCI_MSI_FLAGS_QSIZE) >> 4;
++
++ return val;
++}
++
++static int mobiveil_pcie_ep_set_msi(struct pci_epc *epc,
++ u8 func_no, u8 interrupts)
++{
++ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
++ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
++ u32 val, reg;
++
++ if (!ep->msi_cap)
++ return -EINVAL;
++
++ reg = ep->msi_cap + PCI_MSI_FLAGS;
++ val = csr_readw(pcie, reg);
++ val &= ~PCI_MSI_FLAGS_QMASK;
++ val |= (interrupts << 1) & PCI_MSI_FLAGS_QMASK;
++ csr_writew(pcie, val, reg);
++
++ return 0;
++}
++
++static int mobiveil_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
++{
++ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
++ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
++ u32 val, reg;
++
++ if (!ep->msix_cap)
++ return -EINVAL;
++
++ reg = ep->msix_cap + PCI_MSIX_FLAGS;
++ val = csr_readw(pcie, reg);
++ if (!(val & PCI_MSIX_FLAGS_ENABLE))
++ return -EINVAL;
++
++ val &= PCI_MSIX_FLAGS_QSIZE;
++
++ return val;
++}
++
++static int mobiveil_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no,
++ u16 interrupts)
++{
++ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
++ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
++ u32 val, reg;
++
++ if (!ep->msix_cap)
++ return -EINVAL;
++
++ reg = ep->msix_cap + PCI_MSIX_FLAGS;
++ val = csr_readw(pcie, reg);
++ val &= ~PCI_MSIX_FLAGS_QSIZE;
++ val |= interrupts;
++ csr_writew(pcie, val, reg);
++
++ return 0;
++}
++
++static int mobiveil_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no,
++ enum pci_epc_irq_type type,
++ u16 interrupt_num)
++{
++ struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc);
++
++ if (!ep->ops->raise_irq)
++ return -EINVAL;
++
++ return ep->ops->raise_irq(ep, func_no, type, interrupt_num);
++}
++
++static const struct pci_epc_ops epc_ops = {
++ .write_header = mobiveil_pcie_ep_write_header,
++ .set_bar = mobiveil_pcie_ep_set_bar,
++ .clear_bar = mobiveil_pcie_ep_clear_bar,
++ .map_addr = mobiveil_pcie_ep_map_addr,
++ .unmap_addr = mobiveil_pcie_ep_unmap_addr,
++ .set_msi = mobiveil_pcie_ep_set_msi,
++ .get_msi = mobiveil_pcie_ep_get_msi,
++ .set_msix = mobiveil_pcie_ep_set_msix,
++ .get_msix = mobiveil_pcie_ep_get_msix,
++ .raise_irq = mobiveil_pcie_ep_raise_irq,
++};
++
++int mobiveil_pcie_ep_raise_legacy_irq(struct mobiveil_pcie_ep *ep, u8 func_no)
++{
++ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
++
++ dev_err(&pcie->pdev->dev, "EP cannot trigger legacy IRQs\n");
++
++ return -EINVAL;
++}
++
++int mobiveil_pcie_ep_raise_msi_irq(struct mobiveil_pcie_ep *ep, u8 func_no,
++ u8 interrupt_num)
++{
++ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
++ struct pci_epc *epc = ep->epc;
++ u16 msg_ctrl, msg_data;
++ u32 msg_addr_lower, msg_addr_upper, reg;
++ u64 msg_addr;
++ u32 func_num;
++ bool has_upper;
++ int ret;
++
++ if (!ep->msi_cap)
++ return -EINVAL;
++
++ func_num = csr_readl(pcie, PAB_CTRL);
++ func_num &= ~(FUNC_SEL_MASK << FUNC_SEL_SHIFT);
++ func_num |= (func_no & FUNC_SEL_MASK) << FUNC_SEL_SHIFT;
++ csr_writel(pcie, func_num, PAB_CTRL);
++
++ /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
++ reg = ep->msi_cap + PCI_MSI_FLAGS;
++ msg_ctrl = csr_readw(pcie, reg);
++ has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
++ reg = ep->msi_cap + PCI_MSI_ADDRESS_LO;
++ msg_addr_lower = csr_readl(pcie, reg);
++ if (has_upper) {
++ reg = ep->msi_cap + PCI_MSI_ADDRESS_HI;
++ msg_addr_upper = csr_readl(pcie, reg);
++ reg = ep->msi_cap + PCI_MSI_DATA_64;
++ msg_data = csr_readw(pcie, reg);
++ } else {
++ msg_addr_upper = 0;
++ reg = ep->msi_cap + PCI_MSI_DATA_32;
++ msg_data = csr_readw(pcie, reg);
++ }
++ msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
++
++ func_num = csr_readl(pcie, PAB_CTRL);
++ func_num &= ~(FUNC_SEL_MASK << FUNC_SEL_SHIFT);
++ csr_writel(pcie, func_num, PAB_CTRL);
++
++ ret = mobiveil_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys,
++ msg_addr, epc->mem->page_size);
++ if (ret)
++ return ret;
++
++ writel(msg_data | (interrupt_num - 1), ep->msi_mem);
++
++ mobiveil_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
++
++ return 0;
++}
++
++int mobiveil_pcie_ep_raise_msix_irq(struct mobiveil_pcie_ep *ep, u8 func_no,
++ u16 interrupt_num)
++{
++ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
++ struct pci_epc *epc = ep->epc;
++ u32 msg_addr_upper, msg_addr_lower;
++ u32 msg_data;
++ u64 msg_addr;
++ u32 func_num;
++ int ret;
++
++ func_num = csr_readl(pcie, PAB_CTRL);
++ func_num &= ~(FUNC_SEL_MASK << FUNC_SEL_SHIFT);
++ func_num |= (func_no & FUNC_SEL_MASK) << FUNC_SEL_SHIFT;
++ csr_writel(pcie, func_num, PAB_CTRL);
++
++ msg_addr_lower = csr_readl(pcie, PAB_MSIX_TABLE_PBA_ACCESS +
++ PCI_MSIX_ENTRY_LOWER_ADDR +
++ (interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE);
++ msg_addr_upper = csr_readl(pcie, PAB_MSIX_TABLE_PBA_ACCESS +
++ PCI_MSIX_ENTRY_UPPER_ADDR +
++ (interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE);
++ msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
++ msg_data = csr_readl(pcie, PAB_MSIX_TABLE_PBA_ACCESS +
++ PCI_MSIX_ENTRY_DATA +
++ (interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE);
++
++ func_num = csr_readl(pcie, PAB_CTRL);
++ func_num &= ~(FUNC_SEL_MASK << FUNC_SEL_SHIFT);
++ csr_writel(pcie, func_num, PAB_CTRL);
++
++ ret = mobiveil_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys,
++ msg_addr, epc->mem->page_size);
++ if (ret)
++ return ret;
++
++ writel(msg_data, ep->msi_mem);
++
++ mobiveil_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
++
++ return 0;
++}
++
++void mobiveil_pcie_ep_exit(struct mobiveil_pcie_ep *ep)
++{
++ struct pci_epc *epc = ep->epc;
++
++ pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
++ epc->mem->page_size);
++
++ pci_epc_mem_exit(epc);
++}
++
++int mobiveil_pcie_ep_init(struct mobiveil_pcie_ep *ep)
++{
++ int ret;
++ void *addr;
++ struct pci_epc *epc;
++ struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep);
++ struct device *dev = &pcie->pdev->dev;
++ struct device_node *np = dev->of_node;
++
++ if (!pcie->csr_axi_slave_base) {
++ dev_err(dev, "csr_base is not populated\n");
++ return -EINVAL;
++ }
++
++ ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows);
++ if (ret < 0) {
++ dev_err(dev, "Unable to read *num-ob-windows* property\n");
++ return ret;
++ }
++
++ if (ep->num_ob_windows > MAX_IATU_OUT) {
++ dev_err(dev, "Invalid *num-ob-windows*\n");
++ return -EINVAL;
++ }
++ ep->ob_window_map = devm_kcalloc(dev,
++ BITS_TO_LONGS(ep->num_ob_windows),
++ sizeof(long),
++ GFP_KERNEL);
++ if (!ep->ob_window_map)
++ return -ENOMEM;
++
++ addr = devm_kcalloc(dev, ep->num_ob_windows, sizeof(phys_addr_t),
++ GFP_KERNEL);
++ if (!addr)
++ return -ENOMEM;
++ ep->outbound_addr = addr;
++
++ mobiveil_pcie_enable_bridge_pio(pcie);
++ mobiveil_pcie_enable_engine_apio(pcie);
++ mobiveil_pcie_enable_engine_ppio(pcie);
++ mobiveil_pcie_enable_msi_ep(pcie);
++
++ epc = devm_pci_epc_create(dev, &epc_ops);
++ if (IS_ERR(epc)) {
++ dev_err(dev, "Failed to create epc device\n");
++ return PTR_ERR(epc);
++ }
++
++ ep->epc = epc;
++ epc_set_drvdata(epc, ep);
++
++ ep->msi_cap = mobiveil_pcie_ep_find_capability(pcie, PCI_CAP_ID_MSI);
++
++ ep->msix_cap = mobiveil_pcie_ep_find_capability(pcie,
++ PCI_CAP_ID_MSIX);
++
++ if (ep->ops->ep_init)
++ ep->ops->ep_init(ep);
++
++ epc->max_functions = ep->pf_num;
++
++ ret = __pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
++ ep->page_size);
++ if (ret < 0) {
++ dev_err(dev, "Failed to initialize address space\n");
++ return ret;
++ }
++
++ ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
++ epc->mem->page_size);
++ if (!ep->msi_mem) {
++ dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
++ return -ENOMEM;
++ }
++
++ return 0;
++}
+--- /dev/null
++++ b/drivers/pci/mobiveil/pcie-mobiveil-host.c
+@@ -0,0 +1,640 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * PCIe host controller driver for Mobiveil PCIe Host controller
++ *
++ * Copyright (c) 2018 Mobiveil Inc.
++ * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
++ * Refactor: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
++ */
++
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/irq.h>
++#include <linux/irqchip/chained_irq.h>
++#include <linux/irqdomain.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/msi.h>
++#include <linux/of_address.h>
++#include <linux/of_irq.h>
++#include <linux/of_platform.h>
++#include <linux/of_pci.h>
++#include <linux/pci.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++
++#include "pcie-mobiveil.h"
++
++static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
++{
++ struct mobiveil_pcie *pcie = bus->sysdata;
++
++ /* If there is no link, then there is no device */
++ if (bus->number > pcie->rp.root_bus_nr && !mobiveil_pcie_link_up(pcie))
++ return false;
++
++ /* Only one device down on each root port */
++ if ((bus->number == pcie->rp.root_bus_nr) && (devfn > 0))
++ return false;
++
++ /*
++ * Do not read more than one device on the bus directly
++ * attached to RC
++ */
++ if ((bus->primary == pcie->rp.root_bus_nr) && (PCI_SLOT(devfn) > 0))
++ return false;
++
++ return true;
++}
++
++/*
++ * mobiveil_pcie_map_bus - routine to get the configuration base of either
++ * root port or endpoint
++ */
++static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
++ unsigned int devfn, int where)
++{
++ struct mobiveil_pcie *pcie = bus->sysdata;
++ u32 value;
++
++ if (!mobiveil_pcie_valid_device(bus, devfn))
++ return NULL;
++
++ /* RC config access */
++ if (bus->number == pcie->rp.root_bus_nr)
++ return pcie->csr_axi_slave_base + where;
++
++ /*
++ * EP config access (in Config/APIO space)
++ * Program PEX Address base (31..16 bits) with appropriate value
++ * (BDF) in PAB_AXI_AMAP_PEX_WIN_L0 Register.
++ * Relies on pci_lock serialization
++ */
++ value = bus->number << PAB_BUS_SHIFT |
++ PCI_SLOT(devfn) << PAB_DEVICE_SHIFT |
++ PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT;
++
++ csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
++
++ return pcie->rp.config_axi_slave_base + where;
++}
++
++static int mobiveil_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
++ int where, int size, u32 *val)
++{
++ struct mobiveil_pcie *pcie = bus->sysdata;
++ struct root_port *rp = &pcie->rp;
++
++ if (bus->number > rp->root_bus_nr && rp->ops->read_other_conf)
++ return rp->ops->read_other_conf(bus, devfn, where, size, val);
++
++ return pci_generic_config_read(bus, devfn, where, size, val);
++}
++static struct pci_ops mobiveil_pcie_ops = {
++ .map_bus = mobiveil_pcie_map_bus,
++ .read = mobiveil_pcie_config_read,
++ .write = pci_generic_config_write,
++};
++
++static void mobiveil_pcie_isr(struct irq_desc *desc)
++{
++ struct irq_chip *chip = irq_desc_get_chip(desc);
++ struct mobiveil_pcie *pcie = irq_desc_get_handler_data(desc);
++ struct device *dev = &pcie->pdev->dev;
++ struct mobiveil_msi *msi = &pcie->rp.msi;
++ u32 msi_data, msi_addr_lo, msi_addr_hi;
++ u32 intr_status, msi_status;
++ unsigned long shifted_status;
++ u32 bit, virq, val, mask;
++
++ /*
++ * The core provides a single interrupt for both INTx/MSI messages.
++ * So we'll read both INTx and MSI status
++ */
++
++ chained_irq_enter(chip, desc);
++
++ /* read INTx status */
++ val = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
++ mask = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
++ intr_status = val & mask;
++
++ /* Handle INTx */
++ if (intr_status & PAB_INTP_INTX_MASK) {
++ shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
++ shifted_status &= PAB_INTP_INTX_MASK;
++ shifted_status >>= PAB_INTX_START;
++ do {
++ for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) {
++ virq = irq_find_mapping(pcie->rp.intx_domain,
++ bit + 1);
++ if (virq)
++ generic_handle_irq(virq);
++ else
++ dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n",
++ bit);
++
++ /* clear interrupt handled */
++ csr_writel(pcie, 1 << (PAB_INTX_START + bit),
++ PAB_INTP_AMBA_MISC_STAT);
++ }
++
++ shifted_status = csr_readl(pcie,
++ PAB_INTP_AMBA_MISC_STAT);
++ shifted_status &= PAB_INTP_INTX_MASK;
++ shifted_status >>= PAB_INTX_START;
++ } while (shifted_status != 0);
++ }
++
++ /* read extra MSI status register */
++ msi_status = readl_relaxed(pcie->apb_csr_base + MSI_STATUS_OFFSET);
++
++ /* handle MSI interrupts */
++ while (msi_status & 1) {
++ msi_data = readl_relaxed(pcie->apb_csr_base + MSI_DATA_OFFSET);
++
++ /*
++ * MSI_STATUS_OFFSET register gets updated to zero
++ * once we pop not only the MSI data but also address
++ * from MSI hardware FIFO. So keeping these following
++ * two dummy reads.
++ */
++ msi_addr_lo = readl_relaxed(pcie->apb_csr_base +
++ MSI_ADDR_L_OFFSET);
++ msi_addr_hi = readl_relaxed(pcie->apb_csr_base +
++ MSI_ADDR_H_OFFSET);
++ dev_dbg(dev, "MSI registers, data: %08x, addr: %08x:%08x\n",
++ msi_data, msi_addr_hi, msi_addr_lo);
++
++ virq = irq_find_mapping(msi->dev_domain, msi_data);
++ if (virq)
++ generic_handle_irq(virq);
++
++ msi_status = readl_relaxed(pcie->apb_csr_base +
++ MSI_STATUS_OFFSET);
++ }
++
++ /* Clear the interrupt status */
++ csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT);
++ chained_irq_exit(chip, desc);
++}
++
++static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
++{
++ struct device *dev = &pcie->pdev->dev;
++ struct platform_device *pdev = pcie->pdev;
++ struct device_node *node = dev->of_node;
++ struct resource *res;
++
++ /* map config resource */
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
++ "config_axi_slave");
++ pcie->rp.config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
++ if (IS_ERR(pcie->rp.config_axi_slave_base))
++ return PTR_ERR(pcie->rp.config_axi_slave_base);
++ pcie->rp.ob_io_res = res;
++
++ /* map csr resource */
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
++ "csr_axi_slave");
++ pcie->csr_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
++ if (IS_ERR(pcie->csr_axi_slave_base))
++ return PTR_ERR(pcie->csr_axi_slave_base);
++ pcie->pcie_reg_base = res->start;
++
++ /* read the number of windows requested */
++ if (of_property_read_u32(node, "apio-wins", &pcie->apio_wins))
++ pcie->apio_wins = MAX_PIO_WINDOWS;
++
++ if (of_property_read_u32(node, "ppio-wins", &pcie->ppio_wins))
++ pcie->ppio_wins = MAX_PIO_WINDOWS;
++
++ return 0;
++}
++
++static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie)
++{
++ phys_addr_t msg_addr = pcie->pcie_reg_base;
++ struct mobiveil_msi *msi = &pcie->rp.msi;
++
++ msi->num_of_vectors = PCI_NUM_MSI;
++ msi->msi_pages_phys = (phys_addr_t)msg_addr;
++
++ writel_relaxed(lower_32_bits(msg_addr),
++ pcie->apb_csr_base + MSI_BASE_LO_OFFSET);
++ writel_relaxed(upper_32_bits(msg_addr),
++ pcie->apb_csr_base + MSI_BASE_HI_OFFSET);
++ writel_relaxed(4096, pcie->apb_csr_base + MSI_SIZE_OFFSET);
++ writel_relaxed(1, pcie->apb_csr_base + MSI_ENABLE_OFFSET);
++}
++
++int mobiveil_host_init(struct mobiveil_pcie *pcie, bool reinit)
++{
++ u32 value, pab_ctrl, type;
++ struct resource_entry *win;
++ int i;
++
++ /* Disable all inbound/outbound windows */
++ for (i = 0; i < pcie->apio_wins; i++)
++ mobiveil_pcie_disable_ob_win(pcie, i);
++ for (i = 0; i < pcie->ppio_wins; i++)
++ mobiveil_pcie_disable_ib_win(pcie, i);
++
++ pcie->ib_wins_configured = 0;
++ pcie->ob_wins_configured = 0;
++
++ if (!reinit) {
++ /* setup bus numbers */
++ value = csr_readl(pcie, PCI_PRIMARY_BUS);
++ value &= 0xff000000;
++ value |= 0x00ff0100;
++ csr_writel(pcie, value, PCI_PRIMARY_BUS);
++ }
++
++ /*
++ * program Bus Master Enable Bit in Command Register in PAB Config
++ * Space
++ */
++ value = csr_readl(pcie, PCI_COMMAND);
++ value |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
++ csr_writel(pcie, value, PCI_COMMAND);
++
++ /*
++ * program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL
++ * register
++ */
++ pab_ctrl = csr_readl(pcie, PAB_CTRL);
++ pab_ctrl |= (1 << AMBA_PIO_ENABLE_SHIFT) | (1 << PEX_PIO_ENABLE_SHIFT);
++ csr_writel(pcie, pab_ctrl, PAB_CTRL);
++
++ /*
++ * program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in
++ * PAB_AXI_PIO_CTRL Register
++ */
++ value = csr_readl(pcie, PAB_AXI_PIO_CTRL);
++ value |= APIO_EN_MASK;
++ csr_writel(pcie, value, PAB_AXI_PIO_CTRL);
++
++ /* Enable PCIe PIO master */
++ value = csr_readl(pcie, PAB_PEX_PIO_CTRL);
++ value |= 1 << PIO_ENABLE_SHIFT;
++ csr_writel(pcie, value, PAB_PEX_PIO_CTRL);
++
++ /*
++ * we'll program one outbound window for config reads and
++ * another default inbound window for all the upstream traffic
++ * rest of the outbound windows will be configured according to
++ * the "ranges" field defined in device tree
++ */
++
++ /* config outbound translation window */
++ program_ob_windows(pcie, WIN_NUM_0, pcie->rp.ob_io_res->start, 0,
++ CFG_WINDOW_TYPE, resource_size(pcie->rp.ob_io_res));
++
++ /* memory inbound translation window */
++ program_ib_windows(pcie, WIN_NUM_0, 0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
++
++ /* Get the I/O and memory ranges from DT */
++ resource_list_for_each_entry(win, pcie->resources) {
++ if (resource_type(win->res) == IORESOURCE_MEM) {
++ type = MEM_WINDOW_TYPE;
++ } else if (resource_type(win->res) == IORESOURCE_IO) {
++ type = IO_WINDOW_TYPE;
++ } else if (resource_type(win->res) == IORESOURCE_BUS) {
++ pcie->rp.root_bus_nr = win->res->start;
++ continue;
++ } else {
++ continue;
++ }
++
++ /* configure outbound translation window */
++ program_ob_windows(pcie, pcie->ob_wins_configured,
++ win->res->start,
++ win->res->start - win->offset,
++ type, resource_size(win->res));
++ }
++
++ /* fixup for PCIe class register */
++ value = csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
++ value &= 0xff;
++ value |= (PCI_CLASS_BRIDGE_PCI << 16);
++ csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
++
++ return 0;
++}
++
++static void mobiveil_mask_intx_irq(struct irq_data *data)
++{
++ struct irq_desc *desc = irq_to_desc(data->irq);
++ struct mobiveil_pcie *pcie;
++ unsigned long flags;
++ u32 mask, shifted_val;
++
++ pcie = irq_desc_get_chip_data(desc);
++ mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
++ raw_spin_lock_irqsave(&pcie->rp.intx_mask_lock, flags);
++ shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
++ shifted_val &= ~mask;
++ csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
++ raw_spin_unlock_irqrestore(&pcie->rp.intx_mask_lock, flags);
++}
++
++static void mobiveil_unmask_intx_irq(struct irq_data *data)
++{
++ struct irq_desc *desc = irq_to_desc(data->irq);
++ struct mobiveil_pcie *pcie;
++ unsigned long flags;
++ u32 shifted_val, mask;
++
++ pcie = irq_desc_get_chip_data(desc);
++ mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
++ raw_spin_lock_irqsave(&pcie->rp.intx_mask_lock, flags);
++ shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
++ shifted_val |= mask;
++ csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
++ raw_spin_unlock_irqrestore(&pcie->rp.intx_mask_lock, flags);
++}
++
++static struct irq_chip intx_irq_chip = {
++ .name = "mobiveil_pcie:intx",
++ .irq_enable = mobiveil_unmask_intx_irq,
++ .irq_disable = mobiveil_mask_intx_irq,
++ .irq_mask = mobiveil_mask_intx_irq,
++ .irq_unmask = mobiveil_unmask_intx_irq,
++};
++
++/* routine to setup the INTx related data */
++static int mobiveil_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
++ irq_hw_number_t hwirq)
++{
++ irq_set_chip_and_handler(irq, &intx_irq_chip, handle_level_irq);
++ irq_set_chip_data(irq, domain->host_data);
++
++ return 0;
++}
++
++/* INTx domain operations structure */
++static const struct irq_domain_ops intx_domain_ops = {
++ .map = mobiveil_pcie_intx_map,
++};
++
++static struct irq_chip mobiveil_msi_irq_chip = {
++ .name = "Mobiveil PCIe MSI",
++ .irq_mask = pci_msi_mask_irq,
++ .irq_unmask = pci_msi_unmask_irq,
++};
++
++static struct msi_domain_info mobiveil_msi_domain_info = {
++ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
++ MSI_FLAG_PCI_MSIX),
++ .chip = &mobiveil_msi_irq_chip,
++};
++
++static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
++{
++ struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(data);
++ phys_addr_t addr = pcie->pcie_reg_base + (data->hwirq * sizeof(int));
++
++ msg->address_lo = lower_32_bits(addr);
++ msg->address_hi = upper_32_bits(addr);
++ msg->data = data->hwirq;
++
++ dev_dbg(&pcie->pdev->dev, "msi#%d address_hi %#x address_lo %#x\n",
++ (int)data->hwirq, msg->address_hi, msg->address_lo);
++}
++
++static int mobiveil_msi_set_affinity(struct irq_data *irq_data,
++ const struct cpumask *mask, bool force)
++{
++ return -EINVAL;
++}
++
++static struct irq_chip mobiveil_msi_bottom_irq_chip = {
++ .name = "Mobiveil MSI",
++ .irq_compose_msi_msg = mobiveil_compose_msi_msg,
++ .irq_set_affinity = mobiveil_msi_set_affinity,
++};
++
++static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain,
++ unsigned int virq,
++ unsigned int nr_irqs, void *args)
++{
++ struct mobiveil_pcie *pcie = domain->host_data;
++ struct mobiveil_msi *msi = &pcie->rp.msi;
++ unsigned long bit;
++
++ WARN_ON(nr_irqs != 1);
++ mutex_lock(&msi->lock);
++
++ bit = find_first_zero_bit(msi->msi_irq_in_use, msi->num_of_vectors);
++ if (bit >= msi->num_of_vectors) {
++ mutex_unlock(&msi->lock);
++ return -ENOSPC;
++ }
++
++ set_bit(bit, msi->msi_irq_in_use);
++
++ mutex_unlock(&msi->lock);
++
++ irq_domain_set_info(domain, virq, bit, &mobiveil_msi_bottom_irq_chip,
++ domain->host_data, handle_level_irq, NULL, NULL);
++ return 0;
++}
++
++static void mobiveil_irq_msi_domain_free(struct irq_domain *domain,
++ unsigned int virq,
++ unsigned int nr_irqs)
++{
++ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
++ struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(d);
++ struct mobiveil_msi *msi = &pcie->rp.msi;
++
++ mutex_lock(&msi->lock);
++
++ if (!test_bit(d->hwirq, msi->msi_irq_in_use))
++ dev_err(&pcie->pdev->dev, "trying to free unused MSI#%lu\n",
++ d->hwirq);
++ else
++ __clear_bit(d->hwirq, msi->msi_irq_in_use);
++
++ mutex_unlock(&msi->lock);
++}
++static const struct irq_domain_ops msi_domain_ops = {
++ .alloc = mobiveil_irq_msi_domain_alloc,
++ .free = mobiveil_irq_msi_domain_free,
++};
++
++static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
++{
++ struct device *dev = &pcie->pdev->dev;
++ struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
++ struct mobiveil_msi *msi = &pcie->rp.msi;
++
++ mutex_init(&msi->lock);
++ msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
++ &msi_domain_ops, pcie);
++ if (!msi->dev_domain) {
++ dev_err(dev, "failed to create IRQ domain\n");
++ return -ENOMEM;
++ }
++
++ msi->msi_domain = pci_msi_create_irq_domain(fwnode,
++ &mobiveil_msi_domain_info,
++ msi->dev_domain);
++ if (!msi->msi_domain) {
++ dev_err(dev, "failed to create MSI domain\n");
++ irq_domain_remove(msi->dev_domain);
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
++static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
++{
++ struct device *dev = &pcie->pdev->dev;
++ struct device_node *node = dev->of_node;
++ int ret;
++
++ /* setup INTx */
++ pcie->rp.intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
++ &intx_domain_ops, pcie);
++
++ if (!pcie->rp.intx_domain) {
++ dev_err(dev, "Failed to get a INTx IRQ domain\n");
++ return -ENOMEM;
++ }
++
++ raw_spin_lock_init(&pcie->rp.intx_mask_lock);
++
++ /* setup MSI */
++ ret = mobiveil_allocate_msi_domains(pcie);
++ if (ret)
++ return ret;
++
++ return 0;
++}
++
++static int mobiveil_pcie_interrupt_init(struct mobiveil_pcie *pcie)
++{
++ struct device *dev = &pcie->pdev->dev;
++ struct resource *res;
++ int ret;
++
++ if (pcie->rp.ops->interrupt_init)
++ return pcie->rp.ops->interrupt_init(pcie);
++
++ /* map MSI config resource */
++ res = platform_get_resource_byname(pcie->pdev, IORESOURCE_MEM,
++ "apb_csr");
++ pcie->apb_csr_base = devm_pci_remap_cfg_resource(dev, res);
++ if (IS_ERR(pcie->apb_csr_base))
++ return PTR_ERR(pcie->apb_csr_base);
++
++ /* setup MSI hardware registers */
++ mobiveil_pcie_enable_msi(pcie);
++
++ pcie->rp.irq = platform_get_irq(pcie->pdev, 0);
++ if (pcie->rp.irq <= 0) {
++ dev_err(dev, "failed to map IRQ: %d\n", pcie->rp.irq);
+ return -ENODEV;
+ }
+
-+ pcie_ep->msi_phys_addr = ep->phys_base + PCIE_MSI_ADDR_OFFSET;
++ /* initialize the IRQ domains */
++ ret = mobiveil_pcie_init_irq_domain(pcie);
++ if (ret) {
++ dev_err(dev, "Failed creating IRQ Domain\n");
++ return ret;
++ }
++
++ irq_set_chained_handler_and_data(pcie->rp.irq,
++ mobiveil_pcie_isr, pcie);
++
++ /* Enable interrupts */
++ csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
++ PAB_INTP_AMBA_MISC_ENB);
++
++ return 0;
++}
++
++int mobiveil_pcie_host_probe(struct mobiveil_pcie *pcie)
++{
++ struct pci_bus *bus;
++ struct pci_bus *child;
++ struct pci_host_bridge *bridge;
++ struct device *dev = &pcie->pdev->dev;
++ struct device_node *np = dev->of_node;
++ resource_size_t iobase;
++ int ret;
++
++ ret = mobiveil_pcie_parse_dt(pcie);
++ if (ret) {
++ dev_err(dev, "Parsing DT failed, ret: %x\n", ret);
++ return ret;
++ }
++
++ /* allocate the PCIe port */
++ bridge = devm_pci_alloc_host_bridge(dev, 0);
++ if (!bridge)
++ return -ENOMEM;
++
++ /* parse the host bridge base addresses from the device tree file */
++ ret = of_pci_get_host_bridge_resources(np, 0, 0xff,
++ &bridge->windows, &iobase);
++ if (ret) {
++ dev_err(dev, "Getting bridge resources failed\n");
++ return ret;
++ }
++
++ pcie->resources = &bridge->windows;
++
++ /*
++ * configure all inbound and outbound windows and prepare the RC for
++ * config access
++ */
++ ret = mobiveil_host_init(pcie, false);
++ if (ret) {
++ dev_err(dev, "Failed to initialize host\n");
++ goto error;
++ }
++
++ ret = mobiveil_pcie_interrupt_init(pcie);
++ if (ret) {
++ dev_err(dev, "Interrupt init failed\n");
++ goto error;
++ }
++
++ ret = devm_request_pci_bus_resources(dev, pcie->resources);
++ if (ret)
++ goto error;
++
++ /* Initialize bridge */
++ bridge->dev.parent = dev;
++ bridge->sysdata = pcie;
++ bridge->busnr = pcie->rp.root_bus_nr;
++ bridge->ops = &mobiveil_pcie_ops;
++ bridge->map_irq = of_irq_parse_and_map_pci;
++ bridge->swizzle_irq = pci_common_swizzle;
++
++ ret = mobiveil_bringup_link(pcie);
++ if (ret) {
++ dev_info(dev, "link bring-up failed\n");
++ }
++
++ /* setup the kernel resources for the newly added PCIe root bus */
++ ret = pci_scan_root_bus_bridge(bridge);
++ if (ret)
++ goto error;
++
++ bus = bridge->bus;
++
++ pci_assign_unassigned_bus_resources(bus);
++ list_for_each_entry(child, &bus->children, node)
++ pcie_bus_configure_settings(child);
++ pci_bus_add_devices(bus);
++
++ return 0;
++error:
++ pci_free_resource_list(pcie->resources);
++ return ret;
++}
+--- /dev/null
++++ b/drivers/pci/mobiveil/pcie-mobiveil-plat.c
+@@ -0,0 +1,54 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * PCIe host controller driver for Mobiveil PCIe Host controller
++ *
++ * Copyright (c) 2018 Mobiveil Inc.
++ * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
++ * Refactor: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
++ */
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/of_pci.h>
++#include <linux/pci.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++
++#include "pcie-mobiveil.h"
++
++static int mobiveil_pcie_probe(struct platform_device *pdev)
++{
++ struct mobiveil_pcie *pcie;
++ struct device *dev = &pdev->dev;
++
++ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
++ if (!pcie)
++ return -ENOMEM;
++
++ pcie->pdev = pdev;
++
++ return mobiveil_pcie_host_probe(pcie);
++}
++
++static const struct of_device_id mobiveil_pcie_of_match[] = {
++ {.compatible = "mbvl,gpex40-pcie",},
++ {},
++};
++
++MODULE_DEVICE_TABLE(of, mobiveil_pcie_of_match);
++
++static struct platform_driver mobiveil_pcie_driver = {
++ .probe = mobiveil_pcie_probe,
++ .driver = {
++ .name = "mobiveil-pcie",
++ .of_match_table = mobiveil_pcie_of_match,
++ .suppress_bind_attrs = true,
++ },
++};
++
++builtin_platform_driver(mobiveil_pcie_driver);
++
++MODULE_LICENSE("GPL v2");
++MODULE_DESCRIPTION("Mobiveil PCIe host controller driver");
++MODULE_AUTHOR("Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>");
+--- /dev/null
++++ b/drivers/pci/mobiveil/pcie-mobiveil.c
+@@ -0,0 +1,334 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * PCIe host controller driver for Mobiveil PCIe Host controller
++ *
++ * Copyright (c) 2018 Mobiveil Inc.
++ * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
++ * Refactor: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
++ */
++
++#include <linux/delay.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/pci.h>
++#include <linux/platform_device.h>
++
++#include "pcie-mobiveil.h"
++
++/*
++ * mobiveil_pcie_sel_page - routine to access paged register
++ *
++ * Registers whose address greater than PAGED_ADDR_BNDRY (0xc00) are paged,
++ * for this scheme to work extracted higher 6 bits of the offset will be
++ * written to pg_sel field of PAB_CTRL register and rest of the lower 10
++ * bits enabled with PAGED_ADDR_BNDRY are used as offset of the register.
++ */
++static void mobiveil_pcie_sel_page(struct mobiveil_pcie *pcie, u8 pg_idx)
++{
++ u32 val;
++
++ val = readl(pcie->csr_axi_slave_base + PAB_CTRL);
++ val &= ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT);
++ val |= (pg_idx & PAGE_SEL_MASK) << PAGE_SEL_SHIFT;
++
++ writel(val, pcie->csr_axi_slave_base + PAB_CTRL);
++}
++
++static void *mobiveil_pcie_comp_addr(struct mobiveil_pcie *pcie, u32 off)
++{
++ if (off < PAGED_ADDR_BNDRY) {
++ /* For directly accessed registers, clear the pg_sel field */
++ mobiveil_pcie_sel_page(pcie, 0);
++ return pcie->csr_axi_slave_base + off;
++ }
++
++ mobiveil_pcie_sel_page(pcie, OFFSET_TO_PAGE_IDX(off));
++ return pcie->csr_axi_slave_base + OFFSET_TO_PAGE_ADDR(off);
++}
+
-+ pcie_ep->msi_virt_addr = ioremap(pcie_ep->msi_phys_addr,
-+ PCIE_MSI_OB_SIZE);
-+ if (!pcie_ep->msi_virt_addr) {
-+ dev_err(dev, "failed to map MSI outbound region\n");
-+ return -ENOMEM;
++static int mobiveil_pcie_read(void __iomem *addr, int size, u32 *val)
++{
++ if ((uintptr_t)addr & (size - 1)) {
++ *val = 0;
++ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
-+ ret = dw_pcie_ep_init(ep);
-+ if (ret) {
-+ dev_err(dev, "failed to initialize endpoint\n");
-+ return ret;
++ switch (size) {
++ case 4:
++ *val = readl(addr);
++ break;
++ case 2:
++ *val = readw(addr);
++ break;
++ case 1:
++ *val = readb(addr);
++ break;
++ default:
++ *val = 0;
++ return PCIBIOS_BAD_REGISTER_NUMBER;
++ }
++
++ return PCIBIOS_SUCCESSFUL;
++}
++
++static int mobiveil_pcie_write(void __iomem *addr, int size, u32 val)
++{
++ if ((uintptr_t)addr & (size - 1))
++ return PCIBIOS_BAD_REGISTER_NUMBER;
++
++ switch (size) {
++ case 4:
++ writel(val, addr);
++ break;
++ case 2:
++ writew(val, addr);
++ break;
++ case 1:
++ writeb(val, addr);
++ break;
++ default:
++ return PCIBIOS_BAD_REGISTER_NUMBER;
++ }
++
++ return PCIBIOS_SUCCESSFUL;
++}
++
++u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
++{
++ void *addr;
++ u32 val;
++ int ret;
++
++ addr = mobiveil_pcie_comp_addr(pcie, off);
++
++ ret = mobiveil_pcie_read(addr, size, &val);
++ if (ret)
++ dev_err(&pcie->pdev->dev, "read CSR address failed\n");
++
++ return val;
++}
++
++void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size)
++{
++ void *addr;
++ int ret;
++
++ addr = mobiveil_pcie_comp_addr(pcie, off);
++
++ ret = mobiveil_pcie_write(addr, size, val);
++ if (ret)
++ dev_err(&pcie->pdev->dev, "write CSR address failed\n");
++}
++
++bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie)
++{
++ if (pcie->ops->link_up)
++ return pcie->ops->link_up(pcie);
++
++ return (csr_readl(pcie, LTSSM_STATUS) &
++ LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0;
++}
++
++void program_ib_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr,
++ u64 pci_addr, u32 type, u64 size)
++{
++ u32 value;
++ u64 size64 = ~(size - 1);
++
++ if (win_num >= pcie->ppio_wins) {
++ dev_err(&pcie->pdev->dev,
++ "ERROR: max inbound windows reached !\n");
++ return;
++ }
++
++ value = csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
++ value &= ~(AMAP_CTRL_TYPE_MASK << AMAP_CTRL_TYPE_SHIFT |
++ WIN_SIZE_MASK << WIN_SIZE_SHIFT);
++ value |= (type << AMAP_CTRL_TYPE_SHIFT) | (1 << AMAP_CTRL_EN_SHIFT) |
++ (lower_32_bits(size64) & WIN_SIZE_MASK << WIN_SIZE_SHIFT);
++ csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num));
++
++ csr_writel(pcie, upper_32_bits(size64),
++ PAB_EXT_PEX_AMAP_SIZEN(win_num));
++
++ csr_writel(pcie, lower_32_bits(cpu_addr),
++ PAB_PEX_AMAP_AXI_WIN(win_num));
++ csr_writel(pcie, upper_32_bits(cpu_addr),
++ PAB_EXT_PEX_AMAP_AXI_WIN(win_num));
++
++ csr_writel(pcie, lower_32_bits(pci_addr),
++ PAB_PEX_AMAP_PEX_WIN_L(win_num));
++ csr_writel(pcie, upper_32_bits(pci_addr),
++ PAB_PEX_AMAP_PEX_WIN_H(win_num));
++
++ pcie->ib_wins_configured++;
++}
++
++/*
++ * routine to program the outbound windows
++ */
++void program_ob_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr,
++ u64 pci_addr, u32 type, u64 size)
++{
++
++ u32 value;
++ u64 size64 = ~(size - 1);
++
++ if (win_num >= pcie->apio_wins) {
++ dev_err(&pcie->pdev->dev,
++ "ERROR: max outbound windows reached !\n");
++ return;
+ }
+
++ /*
++ * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit
++ * to 4 KB in PAB_AXI_AMAP_CTRL register
++ */
++ value = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
++ value &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT |
++ WIN_SIZE_MASK << WIN_SIZE_SHIFT);
++ value |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
++ (lower_32_bits(size64) & WIN_SIZE_MASK << WIN_SIZE_SHIFT);
++ csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num));
++
++ csr_writel(pcie, upper_32_bits(size64), PAB_EXT_AXI_AMAP_SIZE(win_num));
++
++ /*
++ * program AXI window base with appropriate value in
++ * PAB_AXI_AMAP_AXI_WIN0 register
++ */
++ csr_writel(pcie, lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK),
++ PAB_AXI_AMAP_AXI_WIN(win_num));
++ csr_writel(pcie, upper_32_bits(cpu_addr),
++ PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
++
++ csr_writel(pcie, lower_32_bits(pci_addr),
++ PAB_AXI_AMAP_PEX_WIN_L(win_num));
++ csr_writel(pcie, upper_32_bits(pci_addr),
++ PAB_AXI_AMAP_PEX_WIN_H(win_num));
++
++ pcie->ob_wins_configured++;
++}
++
++int program_ob_windows_ep(struct mobiveil_pcie *pcie, int win_num, int type,
++ u64 phys, u64 bus_addr, u8 func, u64 size)
++{
++ u32 val;
++ u32 size_h, size_l;
++
++ if (size & (size - 1))
++ size = 1 << (1 + ilog2(size));
++
++ size_h = upper_32_bits(~(size - 1));
++ size_l = lower_32_bits(~(size - 1));
++
++ val = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
++ val &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT |
++ WIN_SIZE_MASK << WIN_SIZE_SHIFT);
++ val |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
++ (size_l & (WIN_SIZE_MASK << WIN_SIZE_SHIFT));
++ csr_writel(pcie, val, PAB_AXI_AMAP_CTRL(win_num));
++
++ csr_writel(pcie, func, PAB_AXI_AMAP_PCI_HDR_PARAM(win_num));
++ csr_writel(pcie, lower_32_bits(phys), PAB_AXI_AMAP_AXI_WIN(win_num));
++ csr_writel(pcie, upper_32_bits(phys),
++ PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
++ csr_writel(pcie, lower_32_bits(bus_addr),
++ PAB_AXI_AMAP_PEX_WIN_L(win_num));
++ csr_writel(pcie, upper_32_bits(bus_addr),
++ PAB_AXI_AMAP_PEX_WIN_H(win_num));
++ csr_writel(pcie, size_h, PAB_EXT_AXI_AMAP_SIZE(win_num));
++
+ return 0;
+}
+
- static int __init ls_add_pcie_port(struct ls_pcie *pcie)
- {
- struct dw_pcie *pci = pcie->pci;
-@@ -309,18 +427,18 @@ static int __init ls_pcie_probe(struct p
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
-
-- pcie->lut = pci->dbi_base + pcie->drvdata->lut_offset;
-+ pci->dbi_base2 = pci->dbi_base + PCIE_DBI2_BASE;
-
-- if (!ls_pcie_is_bridge(pcie))
-- return -ENODEV;
-+ pcie->lut = pci->dbi_base + pcie->drvdata->lut_offset;
-
- platform_set_drvdata(pdev, pcie);
-
-- ret = ls_add_pcie_port(pcie);
-- if (ret < 0)
-- return ret;
-+ if (!ls_pcie_is_bridge(pcie))
-+ ret = ls_add_pcie_ep(pcie, pdev);
-+ else
-+ ret = ls_add_pcie_port(pcie);
-
-- return 0;
-+ return ret;
- }
-
- static struct platform_driver ls_pcie_driver = {
---- a/drivers/pci/endpoint/functions/pci-epf-test.c
-+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
-@@ -471,7 +471,7 @@ static int pci_epf_test_probe(struct pci
- const struct pci_epf_device_id *match;
- struct pci_epf_test_data *data;
- enum pci_barno test_reg_bar = BAR_0;
-- bool linkup_notifier = true;
-+ bool linkup_notifier = false;
-
- match = pci_epf_match_device(pci_epf_test_ids, epf);
- data = (struct pci_epf_test_data *)match->driver_data;
---- a/drivers/pci/endpoint/pci-epf-core.c
-+++ b/drivers/pci/endpoint/pci-epf-core.c
-@@ -104,8 +104,8 @@ void pci_epf_free_space(struct pci_epf *
- if (!addr)
- return;
-
-- dma_free_coherent(dev, epf->bar[bar].size, addr,
-- epf->bar[bar].phys_addr);
-+ free_pages((unsigned long)addr,
-+ get_order(epf->bar[bar].size));
-
- epf->bar[bar].phys_addr = 0;
- epf->bar[bar].size = 0;
-@@ -129,7 +129,9 @@ void *pci_epf_alloc_space(struct pci_epf
- size = 128;
- size = roundup_pow_of_two(size);
-
-- space = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL);
-+ space = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
-+ get_order(size));
-+ phys_addr = virt_to_phys(space);
- if (!space) {
- dev_err(dev, "failed to allocate mem space\n");
- return NULL;
++void program_ib_windows_ep(struct mobiveil_pcie *pcie, u8 func_no,
++ int bar, u64 phys)
++{
++ csr_writel(pcie, upper_32_bits(phys),
++ PAB_EXT_PEX_BAR_AMAP(func_no, bar));
++ csr_writel(pcie, lower_32_bits(phys) | PEX_BAR_AMAP_EN,
++ PAB_PEX_BAR_AMAP(func_no, bar));
++}
++
++void mobiveil_pcie_disable_ib_win_ep(struct mobiveil_pcie *pcie,
++ u8 func_no, u8 bar)
++{
++ u32 val;
++
++ val = csr_readl(pcie, PAB_PEX_BAR_AMAP(func_no, bar));
++ val &= ~(1 << 0);
++ csr_writel(pcie, val, PAB_PEX_BAR_AMAP(func_no, bar));
++}
++
++int mobiveil_bringup_link(struct mobiveil_pcie *pcie)
++{
++ int retries;
++
++ /* check if the link is up or not */
++ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
++ if (mobiveil_pcie_link_up(pcie))
++ return 0;
++
++ usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
++ }
++
++ dev_info(&pcie->pdev->dev, "link never came up\n");
++
++ return -ETIMEDOUT;
++}
++
++void mobiveil_pcie_disable_ib_win(struct mobiveil_pcie *pcie, int win_num)
++{
++ u32 val;
++
++ val = csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
++ val &= ~(1 << AMAP_CTRL_EN_SHIFT);
++ csr_writel(pcie, val, PAB_PEX_AMAP_CTRL(win_num));
++}
++
++void mobiveil_pcie_disable_ob_win(struct mobiveil_pcie *pcie, int win_num)
++{
++ u32 val;
++
++ val = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
++ val &= ~(1 << WIN_ENABLE_SHIFT);
++ csr_writel(pcie, val, PAB_AXI_AMAP_CTRL(win_num));
++}
++
++void mobiveil_pcie_enable_bridge_pio(struct mobiveil_pcie *pcie)
++{
++ u32 val;
++
++ val = csr_readl(pcie, PAB_CTRL);
++ val |= 1 << AMBA_PIO_ENABLE_SHIFT;
++ val |= 1 << PEX_PIO_ENABLE_SHIFT;
++ csr_writel(pcie, val, PAB_CTRL);
++}
++
++void mobiveil_pcie_enable_engine_apio(struct mobiveil_pcie *pcie)
++{
++ u32 val;
++
++ val = csr_readl(pcie, PAB_AXI_PIO_CTRL);
++ val |= APIO_EN_MASK;
++ csr_writel(pcie, val, PAB_AXI_PIO_CTRL);
++}
++
++void mobiveil_pcie_enable_engine_ppio(struct mobiveil_pcie *pcie)
++{
++ u32 val;
++
++ val = csr_readl(pcie, PAB_PEX_PIO_CTRL);
++ val |= 1 << PIO_ENABLE_SHIFT;
++ csr_writel(pcie, val, PAB_PEX_PIO_CTRL);
++}
++
++void mobiveil_pcie_enable_msi_ep(struct mobiveil_pcie *pcie)
++{
++ u32 val;
++
++ val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
++ val |= 1 << 0;
++ csr_writel(pcie, val, PAB_INTP_AMBA_MISC_ENB);
++}
+--- /dev/null
++++ b/drivers/pci/mobiveil/pcie-mobiveil.h
+@@ -0,0 +1,296 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * PCIe host controller driver for Mobiveil PCIe Host controller
++ *
++ * Copyright (c) 2018 Mobiveil Inc.
++ * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
++ * Refactor: Zhiqiang Hou <Zhiqiang.Hou@nxp.com>
++ */
++
++#ifndef _PCIE_MOBIVEIL_H
++#define _PCIE_MOBIVEIL_H
++
++#include <linux/pci.h>
++#include <linux/irq.h>
++#include <linux/msi.h>
++#include "../pci.h"
++
++#include <linux/pci-epc.h>
++#include <linux/pci-epf.h>
++
++#define MAX_IATU_OUT 256
++/* register offsets and bit positions */
++
++/*
++ * translation tables are grouped into windows, each window registers are
++ * grouped into blocks of 4 or 16 registers each
++ */
++#define PAB_REG_BLOCK_SIZE 16
++#define PAB_EXT_REG_BLOCK_SIZE 4
++
++#define PAB_REG_ADDR(offset, win) \
++ (offset + (win * PAB_REG_BLOCK_SIZE))
++#define PAB_EXT_REG_ADDR(offset, win) \
++ (offset + (win * PAB_EXT_REG_BLOCK_SIZE))
++
++#define LTSSM_STATUS 0x0404
++#define LTSSM_STATUS_L0_MASK 0x3f
++#define LTSSM_STATUS_L0 0x2d
++
++#define PAB_CTRL 0x0808
++#define AMBA_PIO_ENABLE_SHIFT 0
++#define PEX_PIO_ENABLE_SHIFT 1
++#define PAGE_SEL_SHIFT 13
++#define PAGE_SEL_MASK 0x3f
++#define PAGE_LO_MASK 0x3ff
++#define PAGE_SEL_OFFSET_SHIFT 10
++#define FUNC_SEL_SHIFT 19
++#define FUNC_SEL_MASK 0x1ff
++#define MSI_SW_CTRL_EN (1 << 29)
++
++#define PAB_ACTIVITY_STAT 0x81c
++
++#define PAB_AXI_PIO_CTRL 0x0840
++#define APIO_EN_MASK 0xf
++
++#define PAB_PEX_PIO_CTRL 0x08c0
++#define PIO_ENABLE_SHIFT 0
++
++#define PAB_INTP_AMBA_MISC_ENB 0x0b0c
++#define PAB_INTP_AMBA_MISC_STAT 0x0b1c
++#define PAB_INTP_RESET (0x1 << 1)
++#define PAB_INTP_MSI (0x1 << 3)
++#define PAB_INTP_INTA (0x1 << 5)
++#define PAB_INTP_INTB (0x1 << 6)
++#define PAB_INTP_INTC (0x1 << 7)
++#define PAB_INTP_INTD (0x1 << 8)
++#define PAB_INTP_PCIE_UE (0x1 << 9)
++#define PAB_INTP_IE_PMREDI (0x1 << 29)
++#define PAB_INTP_IE_EC (0x1 << 30)
++#define PAB_INTP_MSI_MASK PAB_INTP_MSI
++#define PAB_INTP_INTX_MASK (PAB_INTP_INTA | PAB_INTP_INTB |\
++ PAB_INTP_INTC | PAB_INTP_INTD)
++
++#define PAB_AXI_AMAP_CTRL(win) PAB_REG_ADDR(0x0ba0, win)
++#define WIN_ENABLE_SHIFT 0
++#define WIN_TYPE_SHIFT 1
++#define WIN_TYPE_MASK 0x3
++#define WIN_SIZE_SHIFT 10
++#define WIN_SIZE_MASK 0x3fffff
++
++#define PAB_EXT_AXI_AMAP_SIZE(win) PAB_EXT_REG_ADDR(0xbaf0, win)
++
++#define PAB_EXT_AXI_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0x80a0, win)
++#define PAB_AXI_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x0ba4, win)
++#define AXI_WINDOW_ALIGN_MASK 3
++
++#define PAB_AXI_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x0ba8, win)
++#define PAB_BUS_SHIFT 24
++#define PAB_DEVICE_SHIFT 19
++#define PAB_FUNCTION_SHIFT 16
++
++#define PAB_AXI_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x0bac, win)
++#define PAB_INTP_AXI_PIO_CLASS 0x474
++
++#define GPEX_ACK_REPLAY_TO 0x438
++#define ACK_LAT_TO_VAL_MASK 0x1fff
++#define ACK_LAT_TO_VAL_SHIFT 0
++
++#define PAB_PEX_AMAP_CTRL(win) PAB_REG_ADDR(0x4ba0, win)
++#define AMAP_CTRL_EN_SHIFT 0
++#define AMAP_CTRL_TYPE_SHIFT 1
++#define AMAP_CTRL_TYPE_MASK 3
++
++#define PAB_EXT_PEX_AMAP_SIZEN(win) PAB_EXT_REG_ADDR(0xbef0, win)
++#define PAB_EXT_PEX_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0xb4a0, win)
++#define PAB_PEX_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x4ba4, win)
++#define PAB_PEX_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x4ba8, win)
++#define PAB_PEX_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x4bac, win)
++
++/* PPIO WINs EP mode */
++#define PAB_PEX_BAR_AMAP(func, bar) (0x1ba0 + 0x20 * func + 4 * bar)
++#define PAB_EXT_PEX_BAR_AMAP(func, bar) (0x84a0 + 0x20 * func + 4 * bar)
++#define PEX_BAR_AMAP_EN (1 << 0)
++
++#define PAB_AXI_AMAP_PCI_HDR_PARAM(idx) (0x5ba0 + 0x04 * idx)
++#define PAB_MSIX_TABLE_PBA_ACCESS 0xD000
++
++#define GPEX_BAR_ENABLE 0x4D4
++#define GPEX_BAR_SIZE_LDW 0x4D8
++#define GPEX_BAR_SIZE_UDW 0x4DC
++#define GPEX_BAR_SELECT 0x4E0
++
++#define CFG_UNCORRECTABLE_ERROR_SEVERITY 0x10c
++#define UNSUPPORTED_REQUEST_ERROR_SHIFT 20
++#define CFG_UNCORRECTABLE_ERROR_MASK 0x108
++
++/* starting offset of INTX bits in status register */
++#define PAB_INTX_START 5
++
++/* supported number of MSI interrupts */
++#define PCI_NUM_MSI 16
++
++/* MSI registers */
++#define MSI_BASE_LO_OFFSET 0x04
++#define MSI_BASE_HI_OFFSET 0x08
++#define MSI_SIZE_OFFSET 0x0c
++#define MSI_ENABLE_OFFSET 0x14
++#define MSI_STATUS_OFFSET 0x18
++#define MSI_DATA_OFFSET 0x20
++#define MSI_ADDR_L_OFFSET 0x24
++#define MSI_ADDR_H_OFFSET 0x28
++
++/* outbound and inbound window definitions */
++#define WIN_NUM_0 0
++#define WIN_NUM_1 1
++#define CFG_WINDOW_TYPE 0
++#define IO_WINDOW_TYPE 1
++#define MEM_WINDOW_TYPE 2
++#define IB_WIN_SIZE ((u64)256 * 1024 * 1024 * 1024)
++#define MAX_PIO_WINDOWS 8
++
++/* Parameters for the waiting for link up routine */
++#define LINK_WAIT_MAX_RETRIES 10
++#define LINK_WAIT_MIN 90000
++#define LINK_WAIT_MAX 100000
++
++#define PAGED_ADDR_BNDRY 0xc00
++#define OFFSET_TO_PAGE_ADDR(off) \
++ ((off & PAGE_LO_MASK) | PAGED_ADDR_BNDRY)
++#define OFFSET_TO_PAGE_IDX(off) \
++ ((off >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK)
++
++struct mobiveil_pcie;
++struct mobiveil_pcie_ep;
++
++struct mobiveil_msi { /* MSI information */
++ struct mutex lock; /* protect bitmap variable */
++ struct irq_domain *msi_domain;
++ struct irq_domain *dev_domain;
++ phys_addr_t msi_pages_phys;
++ int num_of_vectors;
++ DECLARE_BITMAP(msi_irq_in_use, PCI_NUM_MSI);
++};
++
++struct mobiveil_rp_ops {
++ int (*interrupt_init)(struct mobiveil_pcie *pcie);
++ int (*read_other_conf)(struct pci_bus *bus, unsigned int devfn,
++ int where, int size, u32 *val);
++};
++
++struct root_port {
++ u8 root_bus_nr;
++ void __iomem *config_axi_slave_base; /* endpoint config base */
++ struct resource *ob_io_res;
++ struct mobiveil_rp_ops *ops;
++ int irq;
++ raw_spinlock_t intx_mask_lock;
++ struct irq_domain *intx_domain;
++ struct mobiveil_msi msi;
++};
++
++struct mobiveil_pab_ops {
++ int (*link_up)(struct mobiveil_pcie *pcie);
++};
++
++struct mobiveil_pcie_ep_ops {
++ void (*ep_init)(struct mobiveil_pcie_ep *ep);
++ int (*raise_irq)(struct mobiveil_pcie_ep *ep, u8 func_no,
++ enum pci_epc_irq_type type, u16 interrupt_num);
++};
++
++struct mobiveil_pcie_ep {
++ struct pci_epc *epc;
++ struct mobiveil_pcie_ep_ops *ops;
++ phys_addr_t phys_base;
++ size_t addr_size;
++ size_t page_size;
++ phys_addr_t *outbound_addr;
++ unsigned long *ob_window_map;
++ u32 num_ob_windows;
++ void __iomem *msi_mem;
++ phys_addr_t msi_mem_phys;
++ u8 msi_cap; /* MSI capability offset */
++ u8 msix_cap; /* MSI-X capability offset */
++ u8 bar_num;
++ u32 pf_num;
++};
++
++struct mobiveil_pcie {
++ struct platform_device *pdev;
++ struct list_head *resources;
++ void __iomem *csr_axi_slave_base; /* PAB registers base */
++ phys_addr_t pcie_reg_base; /* Physical PCIe Controller Base */
++ void __iomem *apb_csr_base; /* MSI register base */
++ u32 apio_wins;
++ u32 ppio_wins;
++ u32 ob_wins_configured; /* configured outbound windows */
++ u32 ib_wins_configured; /* configured inbound windows */
++ const struct mobiveil_pab_ops *ops;
++ struct root_port rp;
++ struct mobiveil_pcie_ep ep;
++};
++#define to_mobiveil_pcie_from_ep(endpoint) \
++ container_of((endpoint), struct mobiveil_pcie, ep)
++
++int mobiveil_pcie_host_probe(struct mobiveil_pcie *pcie);
++int mobiveil_host_init(struct mobiveil_pcie *pcie, bool reinit);
++bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie);
++int mobiveil_bringup_link(struct mobiveil_pcie *pcie);
++void program_ob_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr,
++ u64 pci_addr, u32 type, u64 size);
++void program_ib_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr,
++ u64 pci_addr, u32 type, u64 size);
++void mobiveil_pcie_disable_ob_win(struct mobiveil_pcie *pci, int win_num);
++void mobiveil_pcie_disable_ib_win(struct mobiveil_pcie *pci, int win_num);
++u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size);
++void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size);
++
++static inline u32 csr_readl(struct mobiveil_pcie *pcie, u32 off)
++{
++ return csr_read(pcie, off, 0x4);
++}
++
++static inline u32 csr_readw(struct mobiveil_pcie *pcie, u32 off)
++{
++ return csr_read(pcie, off, 0x2);
++}
++
++static inline u32 csr_readb(struct mobiveil_pcie *pcie, u32 off)
++{
++ return csr_read(pcie, off, 0x1);
++}
++
++static inline void csr_writel(struct mobiveil_pcie *pcie, u32 val, u32 off)
++{
++ csr_write(pcie, val, off, 0x4);
++}
++
++static inline void csr_writew(struct mobiveil_pcie *pcie, u32 val, u32 off)
++{
++ csr_write(pcie, val, off, 0x2);
++}
++
++static inline void csr_writeb(struct mobiveil_pcie *pcie, u32 val, u32 off)
++{
++ csr_write(pcie, val, off, 0x1);
++}
++
++void program_ib_windows_ep(struct mobiveil_pcie *pcie, u8 func_no,
++ int bar, u64 phys);
++int program_ob_windows_ep(struct mobiveil_pcie *pcie, int win_num, int type,
++ u64 phys, u64 bus_addr, u8 func, u64 size);
++void mobiveil_pcie_disable_ib_win_ep(struct mobiveil_pcie *pci,
++ u8 func_no, u8 bar);
++int mobiveil_pcie_ep_init(struct mobiveil_pcie_ep *ep);
++int mobiveil_pcie_ep_raise_legacy_irq(struct mobiveil_pcie_ep *ep, u8 func_no);
++int mobiveil_pcie_ep_raise_msi_irq(struct mobiveil_pcie_ep *ep, u8 func_no,
++ u8 interrupt_num);
++int mobiveil_pcie_ep_raise_msix_irq(struct mobiveil_pcie_ep *ep, u8 func_no,
++ u16 interrupt_num);
++void mobiveil_pcie_ep_reset_bar(struct mobiveil_pcie *pci, enum pci_barno bar);
++void mobiveil_pcie_enable_bridge_pio(struct mobiveil_pcie *pci);
++void mobiveil_pcie_enable_engine_apio(struct mobiveil_pcie *pci);
++void mobiveil_pcie_enable_engine_ppio(struct mobiveil_pcie *pci);
++void mobiveil_pcie_enable_msi_ep(struct mobiveil_pcie *pci);
++#endif /* _PCIE_MOBIVEIL_H */
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -45,6 +45,20 @@ static void release_pcie_device(struct d
+ pdev->no_msi = 1;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, quirk_fsl_no_msi);
+--- a/include/linux/pci-ep-cfs.h
++++ b/include/linux/pci-ep-cfs.h
+@@ -1,12 +1,9 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
+ /**
+ * PCI Endpoint ConfigFS header file
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+- *
+- * This program is free software: you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 of
+- * the License as published by the Free Software Foundation.
+ */
+
+ #ifndef __LINUX_PCI_EP_CFS_H
+--- a/include/linux/pci-epc.h
++++ b/include/linux/pci-epc.h
+@@ -1,12 +1,9 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
+ /**
+ * PCI Endpoint *Controller* (EPC) header file
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+- *
+- * This program is free software: you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 of
+- * the License as published by the Free Software Foundation.
+ */
+
+ #ifndef __LINUX_PCI_EPC_H
+@@ -20,6 +17,7 @@ enum pci_epc_irq_type {
+ PCI_EPC_IRQ_UNKNOWN,
+ PCI_EPC_IRQ_LEGACY,
+ PCI_EPC_IRQ_MSI,
++ PCI_EPC_IRQ_MSIX,
+ };
+
+ /**
+@@ -33,24 +31,32 @@ enum pci_epc_irq_type {
+ * capability register
+ * @get_msi: ops to get the number of MSI interrupts allocated by the RC from
+ * the MSI capability register
+- * @raise_irq: ops to raise a legacy or MSI interrupt
++ * @set_msix: ops to set the requested number of MSI-X interrupts in the
++ * MSI-X capability register
++ * @get_msix: ops to get the number of MSI-X interrupts allocated by the RC
++ * from the MSI-X capability register
++ * @raise_irq: ops to raise a legacy, MSI or MSI-X interrupt
+ * @start: ops to start the PCI link
+ * @stop: ops to stop the PCI link
+ * @owner: the module owner containing the ops
+ */
+ struct pci_epc_ops {
+- int (*write_header)(struct pci_epc *pci_epc,
++ int (*write_header)(struct pci_epc *epc, u8 func_no,
+ struct pci_epf_header *hdr);
+- int (*set_bar)(struct pci_epc *epc, enum pci_barno bar,
+- dma_addr_t bar_phys, size_t size, int flags);
+- void (*clear_bar)(struct pci_epc *epc, enum pci_barno bar);
+- int (*map_addr)(struct pci_epc *epc, phys_addr_t addr,
+- u64 pci_addr, size_t size);
+- void (*unmap_addr)(struct pci_epc *epc, phys_addr_t addr);
+- int (*set_msi)(struct pci_epc *epc, u8 interrupts);
+- int (*get_msi)(struct pci_epc *epc);
+- int (*raise_irq)(struct pci_epc *pci_epc,
+- enum pci_epc_irq_type type, u8 interrupt_num);
++ int (*set_bar)(struct pci_epc *epc, u8 func_no,
++ struct pci_epf_bar *epf_bar);
++ void (*clear_bar)(struct pci_epc *epc, u8 func_no,
++ struct pci_epf_bar *epf_bar);
++ int (*map_addr)(struct pci_epc *epc, u8 func_no,
++ phys_addr_t addr, u64 pci_addr, size_t size);
++ void (*unmap_addr)(struct pci_epc *epc, u8 func_no,
++ phys_addr_t addr);
++ int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 interrupts);
++ int (*get_msi)(struct pci_epc *epc, u8 func_no);
++ int (*set_msix)(struct pci_epc *epc, u8 func_no, u16 interrupts);
++ int (*get_msix)(struct pci_epc *epc, u8 func_no);
++ int (*raise_irq)(struct pci_epc *epc, u8 func_no,
++ enum pci_epc_irq_type type, u16 interrupt_num);
+ int (*start)(struct pci_epc *epc);
+ void (*stop)(struct pci_epc *epc);
+ struct module *owner;
+@@ -91,8 +97,17 @@ struct pci_epc {
+ struct config_group *group;
+ /* spinlock to protect against concurrent access of EP controller */
+ spinlock_t lock;
++ unsigned int features;
+ };
+
++#define EPC_FEATURE_NO_LINKUP_NOTIFIER BIT(0)
++#define EPC_FEATURE_BAR_MASK (BIT(1) | BIT(2) | BIT(3))
++#define EPC_FEATURE_MSIX_AVAILABLE BIT(4)
++#define EPC_FEATURE_SET_BAR(features, bar) \
++ (features |= (EPC_FEATURE_BAR_MASK & (bar << 1)))
++#define EPC_FEATURE_GET_BAR(features) \
++ ((features & EPC_FEATURE_BAR_MASK) >> 1)
++
+ #define to_pci_epc(device) container_of((device), struct pci_epc, dev)
+
+ #define pci_epc_create(dev, ops) \
+@@ -124,17 +139,23 @@ void pci_epc_destroy(struct pci_epc *epc
+ int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf);
+ void pci_epc_linkup(struct pci_epc *epc);
+ void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf);
+-int pci_epc_write_header(struct pci_epc *epc, struct pci_epf_header *hdr);
+-int pci_epc_set_bar(struct pci_epc *epc, enum pci_barno bar,
+- dma_addr_t bar_phys, size_t size, int flags);
+-void pci_epc_clear_bar(struct pci_epc *epc, int bar);
+-int pci_epc_map_addr(struct pci_epc *epc, phys_addr_t phys_addr,
++int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
++ struct pci_epf_header *hdr);
++int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
++ struct pci_epf_bar *epf_bar);
++void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
++ struct pci_epf_bar *epf_bar);
++int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
++ phys_addr_t phys_addr,
+ u64 pci_addr, size_t size);
+-void pci_epc_unmap_addr(struct pci_epc *epc, phys_addr_t phys_addr);
+-int pci_epc_set_msi(struct pci_epc *epc, u8 interrupts);
+-int pci_epc_get_msi(struct pci_epc *epc);
+-int pci_epc_raise_irq(struct pci_epc *epc, enum pci_epc_irq_type type,
+- u8 interrupt_num);
++void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
++ phys_addr_t phys_addr);
++int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts);
++int pci_epc_get_msi(struct pci_epc *epc, u8 func_no);
++int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts);
++int pci_epc_get_msix(struct pci_epc *epc, u8 func_no);
++int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
++ enum pci_epc_irq_type type, u16 interrupt_num);
+ int pci_epc_start(struct pci_epc *epc);
+ void pci_epc_stop(struct pci_epc *epc);
+ struct pci_epc *pci_epc_get(const char *epc_name);
+--- a/include/linux/pci-epf.h
++++ b/include/linux/pci-epf.h
+@@ -1,12 +1,9 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
+ /**
+ * PCI Endpoint *Function* (EPF) header file
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+- *
+- * This program is free software: you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 of
+- * the License as published by the Free Software Foundation.
+ */
+
+ #ifndef __LINUX_PCI_EPF_H
+@@ -75,7 +72,7 @@ struct pci_epf_ops {
+ * @driver: PCI EPF driver
+ * @ops: set of function pointers for performing EPF operations
+ * @owner: the owner of the module that registers the PCI EPF driver
+- * @group: configfs group corresponding to the PCI EPF driver
++ * @epf_group: list of configfs group corresponding to the PCI EPF driver
+ * @id_table: identifies EPF devices for probing
+ */
+ struct pci_epf_driver {
+@@ -85,7 +82,7 @@ struct pci_epf_driver {
+ struct device_driver driver;
+ struct pci_epf_ops *ops;
+ struct module *owner;
+- struct config_group *group;
++ struct list_head epf_group;
+ const struct pci_epf_device_id *id_table;
+ };
+
+@@ -100,6 +97,8 @@ struct pci_epf_driver {
+ struct pci_epf_bar {
+ dma_addr_t phys_addr;
+ size_t size;
++ enum pci_barno barno;
++ int flags;
+ };
+
+ /**
+@@ -120,6 +119,7 @@ struct pci_epf {
+ struct pci_epf_header *header;
+ struct pci_epf_bar bar[6];
+ u8 msi_interrupts;
++ u16 msix_interrupts;
+ u8 func_no;
+
+ struct pci_epc *epc;
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1946,6 +1946,7 @@ void pcibios_release_device(struct pci_d
#ifdef CONFIG_HIBERNATE_CALLBACKS
extern struct dev_pm_ops pcibios_pm_ops;
+--- a/include/uapi/linux/pcitest.h
++++ b/include/uapi/linux/pcitest.h
+@@ -16,5 +16,8 @@
+ #define PCITEST_WRITE _IOW('P', 0x4, unsigned long)
+ #define PCITEST_READ _IOW('P', 0x5, unsigned long)
+ #define PCITEST_COPY _IOW('P', 0x6, unsigned long)
++#define PCITEST_MSIX _IOW('P', 0x7, int)
++#define PCITEST_SET_IRQTYPE _IOW('P', 0x8, int)
++#define PCITEST_GET_IRQTYPE _IO('P', 0x9)
+
+ #endif /* __UAPI_LINUX_PCITEST_H */
+--- a/tools/pci/pcitest.c
++++ b/tools/pci/pcitest.c
+@@ -31,12 +31,17 @@
+ #define BILLION 1E9
+
+ static char *result[] = { "NOT OKAY", "OKAY" };
++static char *irq[] = { "LEGACY", "MSI", "MSI-X" };
+
+ struct pci_test {
+ char *device;
+ char barnum;
+ bool legacyirq;
+ unsigned int msinum;
++ unsigned int msixnum;
++ int irqtype;
++ bool set_irqtype;
++ bool get_irqtype;
+ bool read;
+ bool write;
+ bool copy;
+@@ -65,6 +70,24 @@ static int run_test(struct pci_test *tes
+ fprintf(stdout, "%s\n", result[ret]);
+ }
+
++ if (test->set_irqtype) {
++ ret = ioctl(fd, PCITEST_SET_IRQTYPE, test->irqtype);
++ fprintf(stdout, "SET IRQ TYPE TO %s:\t\t", irq[test->irqtype]);
++ if (ret < 0)
++ fprintf(stdout, "FAILED\n");
++ else
++ fprintf(stdout, "%s\n", result[ret]);
++ }
++
++ if (test->get_irqtype) {
++ ret = ioctl(fd, PCITEST_GET_IRQTYPE);
++ fprintf(stdout, "GET IRQ TYPE:\t\t");
++ if (ret < 0)
++ fprintf(stdout, "FAILED\n");
++ else
++ fprintf(stdout, "%s\n", irq[ret]);
++ }
++
+ if (test->legacyirq) {
+ ret = ioctl(fd, PCITEST_LEGACY_IRQ, 0);
+ fprintf(stdout, "LEGACY IRQ:\t");
+@@ -83,6 +106,15 @@ static int run_test(struct pci_test *tes
+ fprintf(stdout, "%s\n", result[ret]);
+ }
+
++ if (test->msixnum > 0 && test->msixnum <= 2048) {
++ ret = ioctl(fd, PCITEST_MSIX, test->msixnum);
++ fprintf(stdout, "MSI-X%d:\t\t", test->msixnum);
++ if (ret < 0)
++ fprintf(stdout, "TEST FAILED\n");
++ else
++ fprintf(stdout, "%s\n", result[ret]);
++ }
++
+ if (test->write) {
+ ret = ioctl(fd, PCITEST_WRITE, test->size);
+ fprintf(stdout, "WRITE (%7ld bytes):\t\t", test->size);
+@@ -133,7 +165,7 @@ int main(int argc, char **argv)
+ /* set default endpoint device */
+ test->device = "/dev/pci-endpoint-test.0";
+
+- while ((c = getopt(argc, argv, "D:b:m:lrwcs:")) != EOF)
++ while ((c = getopt(argc, argv, "D:b:m:x:i:Ilrwcs:")) != EOF)
+ switch (c) {
+ case 'D':
+ test->device = optarg;
+@@ -151,6 +183,20 @@ int main(int argc, char **argv)
+ if (test->msinum < 1 || test->msinum > 32)
+ goto usage;
+ continue;
++ case 'x':
++ test->msixnum = atoi(optarg);
++ if (test->msixnum < 1 || test->msixnum > 2048)
++ goto usage;
++ continue;
++ case 'i':
++ test->irqtype = atoi(optarg);
++ if (test->irqtype < 0 || test->irqtype > 2)
++ goto usage;
++ test->set_irqtype = true;
++ continue;
++ case 'I':
++ test->get_irqtype = true;
++ continue;
+ case 'r':
+ test->read = true;
+ continue;
+@@ -173,6 +219,9 @@ usage:
+ "\t-D <dev> PCI endpoint test device {default: /dev/pci-endpoint-test.0}\n"
+ "\t-b <bar num> BAR test (bar number between 0..5)\n"
+ "\t-m <msi num> MSI test (msi number between 1..32)\n"
++ "\t-x <msix num> \tMSI-X test (msix number between 1..2048)\n"
++ "\t-i <irq type> \tSet IRQ type (0 - Legacy, 1 - MSI, 2 - MSI-X)\n"
++ "\t-I Get current IRQ type configured\n"
+ "\t-l Legacy IRQ test\n"
+ "\t-r Read buffer test\n"
+ "\t-w Write buffer test\n"
+--- a/tools/pci/pcitest.sh
++++ b/tools/pci/pcitest.sh
+@@ -16,7 +16,10 @@ echo
+ echo "Interrupt tests"
+ echo
+
++pcitest -i 0
+ pcitest -l
++
++pcitest -i 1
+ msi=1
+
+ while [ $msi -lt 33 ]
+@@ -26,9 +29,21 @@ do
+ done
+ echo
+
++pcitest -i 2
++msix=1
++
++while [ $msix -lt 2049 ]
++do
++ pcitest -x $msix
++ msix=`expr $msix + 1`
++done
++echo
++
+ echo "Read Tests"
+ echo
+
++pcitest -i 1
++
+ pcitest -r -s 1
+ pcitest -r -s 1024
+ pcitest -r -s 1025
-From 60eee49f37b77bc2d5f46c5db5a5c24d0c31bd02 Mon Sep 17 00:00:00 2001
+From fe21ef44284a3aa6fd80448e4ab2e1e8a55fb926 Mon Sep 17 00:00:00 2001
From: Biwen Li <biwen.li@nxp.com>
-Date: Tue, 20 Nov 2018 15:36:57 +0800
+Date: Wed, 17 Apr 2019 18:58:59 +0800
Subject: [PATCH] qspi: support layerscape
This is an integrated patch of qspi for layerscape
Signed-off-by: Abhimanyu Saini <abhimanyu.saini@nxp.com>
+Signed-off-by: Biwen Li <biwen.li@nxp.com>
+Signed-off-by: Chuanhua Han <chuanhua.han@nxp.com>
Signed-off-by: Cyrille Pitchen <cyrille.pitchen@wedev4u.fr>
+Signed-off-by: Mark Brown <broonie@kernel.org>
Signed-off-by: Neil Armstrong <narmstrong@baylibre.com>
Signed-off-by: Prabhakar Kushwaha <prabhakar.kushwaha@nxp.com>
Signed-off-by: Suresh Gupta <suresh.gupta@nxp.com>
-Signed-off-by: Yogesh Gaur <yogeshnarayan.gaur@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+Signed-off-by: Yogesh Gaur <yogeshnarayan.gaur@nxp.com>
---
- .../devicetree/bindings/mtd/fsl-quadspi.txt | 31 ++
- drivers/mtd/spi-nor/fsl-quadspi.c | 444 +++++++++++-------
- drivers/mtd/spi-nor/spi-nor.c | 5 +
- 3 files changed, 320 insertions(+), 160 deletions(-)
+ drivers/mtd/spi-nor/fsl-quadspi.c | 444 +++++++++++++++++++-----------
+ drivers/mtd/spi-nor/spi-nor.c | 5 +
+ drivers/spi/spi-fsl-dspi.c | 4 +-
+ 3 files changed, 291 insertions(+), 162 deletions(-)
---- a/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt
-+++ b/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt
-@@ -7,6 +7,7 @@ Required properties:
- or
- "fsl,ls2080a-qspi" followed by "fsl,ls1021a-qspi",
- "fsl,ls1043a-qspi" followed by "fsl,ls1021a-qspi"
-+ "fsl,ls2080a-qspi" followed by "fsl,ls1088a-qspi",
- - reg : the first contains the register location and length,
- the second contains the memory mapping address and length
- - reg-names: Should contain the reg names "QuadSPI" and "QuadSPI-memory"
-@@ -39,3 +40,33 @@ qspi0: quadspi@40044000 {
- ....
- };
- };
-+
-+qspi1: quadspi@20c0000 {
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ reg = <0x0 0x20c0000 0x0 0x10000>,
-+ <0x0 0x20000000 0x0 0x10000000>;
-+ reg-names = "QuadSPI", "QuadSPI-memory";
-+ interrupts = <0 25 0x4>; /* Level high type */
-+ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
-+ clock-names = "qspi_en", "qspi";
-+ status = "okay";
-+
-+ qflash0: s25fs512s@0 {
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ spi-max-frequency = <20000000>;
-+ reg = <0>;
-+ spi-rx-bus-width = <4>;
-+ spi-tx-bus-width = <4>;
-+ };
-+
-+ qflash1: s25fs512s@1 {
-+ #address-cells = <1>;
-+ #size-cells = <1>;
-+ spi-max-frequency = <20000000>;
-+ reg = <1>;
-+ spi-rx-bus-width = <4>;
-+ spi-tx-bus-width = <4>;
-+ };
-+};
--- a/drivers/mtd/spi-nor/fsl-quadspi.c
+++ b/drivers/mtd/spi-nor/fsl-quadspi.c
@@ -41,6 +41,7 @@
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
-@@ -1147,6 +1147,11 @@ static const struct flash_info spi_nor_i
+@@ -1154,6 +1154,11 @@ static const struct flash_info spi_nor_i
{ "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
{ "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
{ "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
{ "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) },
{ "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) },
+--- a/drivers/spi/spi-fsl-dspi.c
++++ b/drivers/spi/spi-fsl-dspi.c
+@@ -1024,8 +1024,8 @@ static int dspi_probe(struct platform_de
+ goto out_clk_put;
+ }
+
+- ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, 0,
+- pdev->name, dspi);
++ ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt,
++ IRQF_SHARED, pdev->name, dspi);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
+ goto out_clk_put;
-From f901f791d07deaeba6310ac070769575a0bb790a Mon Sep 17 00:00:00 2001
+From 6ca94d2e7dc72b21703e6d9be4e8ec3ad4a26f41 Mon Sep 17 00:00:00 2001
From: Biwen Li <biwen.li@nxp.com>
-Date: Tue, 30 Oct 2018 18:27:54 +0800
-Subject: [PATCH 36/40] sdhc: support layerscape
+Date: Wed, 17 Apr 2019 18:59:02 +0800
+Subject: [PATCH] sdhc: support layerscape
+
This is an integrated patch of sdhc for layerscape
-Signed-off-by: Yinbo Zhu <yinbo.zhu@nxp.com>
Signed-off-by: Biwen Li <biwen.li@nxp.com>
+Signed-off-by: Mathew McBride <matt@traverse.com.au>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+Signed-off-by: Yinbo Zhu <yinbo.zhu@nxp.com>
---
- drivers/mmc/host/sdhci-of-esdhc.c | 85 +++++++++++++++++++++----------
- 1 file changed, 57 insertions(+), 28 deletions(-)
+ drivers/mmc/core/mmc.c | 3 +
+ drivers/mmc/host/sdhci-esdhc.h | 25 +++
+ drivers/mmc/host/sdhci-of-esdhc.c | 270 ++++++++++++++++++++++++++----
+ drivers/mmc/host/sdhci.c | 9 +-
+ drivers/mmc/host/sdhci.h | 1 +
+ include/linux/mmc/card.h | 1 +
+ include/linux/mmc/host.h | 2 +
+ 7 files changed, 272 insertions(+), 39 deletions(-)
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -1174,6 +1174,9 @@ static int mmc_select_hs400(struct mmc_c
+ goto out_err;
+
+ /* Switch card to DDR */
++ if (host->ops->prepare_ddr_to_hs400)
++ host->ops->prepare_ddr_to_hs400(host);
++
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BUS_WIDTH,
+ EXT_CSD_DDR_BUS_WIDTH_8,
+--- a/drivers/mmc/host/sdhci-esdhc.h
++++ b/drivers/mmc/host/sdhci-esdhc.h
+@@ -59,7 +59,32 @@
+
+ /* Tuning Block Control Register */
+ #define ESDHC_TBCTL 0x120
++#define ESDHC_HS400_WNDW_ADJUST 0x00000040
++#define ESDHC_HS400_MODE 0x00000010
+ #define ESDHC_TB_EN 0x00000004
++#define ESDHC_TBPTR 0x128
++
++/* SD Clock Control Register */
++#define ESDHC_SDCLKCTL 0x144
++#define ESDHC_LPBK_CLK_SEL 0x80000000
++#define ESDHC_CMD_CLK_CTL 0x00008000
++
++/* SD Timing Control Register */
++#define ESDHC_SDTIMNGCTL 0x148
++#define ESDHC_FLW_CTL_BG 0x00008000
++
++/* DLL Config 0 Register */
++#define ESDHC_DLLCFG0 0x160
++#define ESDHC_DLL_ENABLE 0x80000000
++#define ESDHC_DLL_FREQ_SEL 0x08000000
++
++/* DLL Config 1 Register */
++#define ESDHC_DLLCFG1 0x164
++#define ESDHC_DLL_PD_PULSE_STRETCH_SEL 0x80000000
++
++/* DLL Status 0 Register */
++#define ESDHC_DLLSTAT0 0x170
++#define ESDHC_DLL_STS_SLV_LOCK 0x08000000
+
+ /* Control Register for DMA transfer */
+ #define ESDHC_DMA_SYSCTL 0x40c
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
-@@ -30,11 +30,56 @@
+@@ -30,11 +30,61 @@
#define VENDOR_V_22 0x12
#define VENDOR_V_23 0x13
u8 vendor_ver;
u8 spec_ver;
bool quirk_incorrect_hostver;
++ bool quirk_limited_clk_division;
++ bool quirk_unreliable_pulse_detection;
++ bool quirk_fixup_tuning;
++ bool quirk_incorrect_delay_chain;
unsigned int peripheral_clock;
+ const struct esdhc_clk_fixup *clk_fixup;
++ u32 div_ratio;
};
/**
-@@ -502,6 +547,7 @@ static void esdhc_of_set_clock(struct sd
+@@ -495,13 +545,20 @@ static void esdhc_clock_enable(struct sd
+ }
+ }
+
++static struct soc_device_attribute soc_incorrect_delay_chain[] = {
++ { .family = "QorIQ LX2160A", .revision = "1.0", },
++ { },
++};
++
+ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
+ {
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
int pre_div = 1;
int div = 1;
++ int division;
ktime_t timeout;
+ long fixup = 0;
u32 temp;
host->mmc->actual_clock = 0;
-@@ -515,27 +561,14 @@ static void esdhc_of_set_clock(struct sd
+@@ -515,27 +572,14 @@ static void esdhc_of_set_clock(struct sd
if (esdhc->vendor_ver < VENDOR_V_23)
pre_div = 2;
temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
temp &= ~(ESDHC_CLOCK_SDCLKEN | ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN |
-@@ -800,6 +833,7 @@ static struct soc_device_attribute soc_i
+@@ -548,9 +592,30 @@ static void esdhc_of_set_clock(struct sd
+ while (host->max_clk / pre_div / div > clock && div < 16)
+ div++;
+
++ if (esdhc->quirk_limited_clk_division &&
++ clock == MMC_HS200_MAX_DTR &&
++ (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 ||
++ host->flags & SDHCI_HS400_TUNING)) {
++ division = pre_div * div;
++ if (division <= 4) {
++ pre_div = 4;
++ div = 1;
++ } else if (division <= 8) {
++ pre_div = 4;
++ div = 2;
++ } else if (division <= 12) {
++ pre_div = 4;
++ div = 3;
++ } else {
++ pr_warn("%s: using upsupported clock division.\n",
++ mmc_hostname(host->mmc));
++ }
++ }
++
+ dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
+ clock, host->max_clk / pre_div / div);
+ host->mmc->actual_clock = host->max_clk / pre_div / div;
++ esdhc->div_ratio = pre_div * div;
+ pre_div >>= 1;
+ div--;
+
+@@ -560,6 +625,29 @@ static void esdhc_of_set_clock(struct sd
+ | (pre_div << ESDHC_PREDIV_SHIFT));
+ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+
++ if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 &&
++ clock == MMC_HS200_MAX_DTR) {
++ temp = sdhci_readl(host, ESDHC_TBCTL);
++ sdhci_writel(host, temp | ESDHC_HS400_MODE, ESDHC_TBCTL);
++ temp = sdhci_readl(host, ESDHC_SDCLKCTL);
++ sdhci_writel(host, temp | ESDHC_CMD_CLK_CTL, ESDHC_SDCLKCTL);
++ esdhc_clock_enable(host, true);
++
++ temp = sdhci_readl(host, ESDHC_DLLCFG0);
++ temp |= ESDHC_DLL_ENABLE;
++ if (host->mmc->actual_clock == MMC_HS200_MAX_DTR ||
++ esdhc->quirk_incorrect_delay_chain == false)
++ temp |= ESDHC_DLL_FREQ_SEL;
++ sdhci_writel(host, temp, ESDHC_DLLCFG0);
++ temp = sdhci_readl(host, ESDHC_TBCTL);
++ sdhci_writel(host, temp | ESDHC_HS400_WNDW_ADJUST, ESDHC_TBCTL);
++
++ esdhc_clock_enable(host, false);
++ temp = sdhci_readl(host, ESDHC_DMA_SYSCTL);
++ temp |= ESDHC_FLUSH_ASYNC_FIFO;
++ sdhci_writel(host, temp, ESDHC_DMA_SYSCTL);
++ }
++
+ /* Wait max 20 ms */
+ timeout = ktime_add_ms(ktime_get(), 20);
+ while (1) {
+@@ -575,6 +663,7 @@ static void esdhc_of_set_clock(struct sd
+ udelay(10);
+ }
+
++ temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
+ temp |= ESDHC_CLOCK_SDCLKEN;
+ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+ }
+@@ -603,6 +692,8 @@ static void esdhc_pltfm_set_bus_width(st
+
+ static void esdhc_reset(struct sdhci_host *host, u8 mask)
+ {
++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
++ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
+ u32 val;
+
+ sdhci_reset(host, mask);
+@@ -617,6 +708,12 @@ static void esdhc_reset(struct sdhci_hos
+ val = sdhci_readl(host, ESDHC_TBCTL);
+ val &= ~ESDHC_TB_EN;
+ sdhci_writel(host, val, ESDHC_TBCTL);
++
++ if (esdhc->quirk_unreliable_pulse_detection) {
++ val = sdhci_readl(host, ESDHC_DLLCFG1);
++ val &= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL;
++ sdhci_writel(host, val, ESDHC_DLLCFG1);
++ }
+ }
+ }
+
+@@ -628,6 +725,7 @@ static void esdhc_reset(struct sdhci_hos
+ static const struct of_device_id scfg_device_ids[] = {
+ { .compatible = "fsl,t1040-scfg", },
+ { .compatible = "fsl,ls1012a-scfg", },
++ { .compatible = "fsl,ls1043a-scfg", },
+ { .compatible = "fsl,ls1046a-scfg", },
+ {}
+ };
+@@ -690,23 +788,91 @@ static int esdhc_signal_voltage_switch(s
+ }
+ }
+
+-static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
++static struct soc_device_attribute soc_fixup_tuning[] = {
++ { .family = "QorIQ T1040", .revision = "1.0", },
++ { .family = "QorIQ T2080", .revision = "1.0", },
++ { .family = "QorIQ T1023", .revision = "1.0", },
++ { .family = "QorIQ LS1021A", .revision = "1.0", },
++ { .family = "QorIQ LS1080A", .revision = "1.0", },
++ { .family = "QorIQ LS2080A", .revision = "1.0", },
++ { .family = "QorIQ LS1012A", .revision = "1.0", },
++ { .family = "QorIQ LS1043A", .revision = "1.*", },
++ { .family = "QorIQ LS1046A", .revision = "1.0", },
++ { },
++};
++
++static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
+ {
+- struct sdhci_host *host = mmc_priv(mmc);
+ u32 val;
+
+- /* Use tuning block for tuning procedure */
+ esdhc_clock_enable(host, false);
++
+ val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
+ val |= ESDHC_FLUSH_ASYNC_FIFO;
+ sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
+
+ val = sdhci_readl(host, ESDHC_TBCTL);
+- val |= ESDHC_TB_EN;
++ if (enable)
++ val |= ESDHC_TB_EN;
++ else
++ val &= ~ESDHC_TB_EN;
+ sdhci_writel(host, val, ESDHC_TBCTL);
++
+ esdhc_clock_enable(host, true);
++}
++
++static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
++{
++ struct sdhci_host *host = mmc_priv(mmc);
++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
++ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
++ bool hs400_tuning;
++ u32 val;
++ int ret;
++
++ if (esdhc->quirk_limited_clk_division &&
++ host->flags & SDHCI_HS400_TUNING)
++ esdhc_of_set_clock(host, host->clock);
++
++ esdhc_tuning_block_enable(host, true);
++
++ hs400_tuning = host->flags & SDHCI_HS400_TUNING;
++ ret = sdhci_execute_tuning(mmc, opcode);
++
++ if (hs400_tuning) {
++ val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
++ val |= ESDHC_FLW_CTL_BG;
++ sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
++ }
+
+- return sdhci_execute_tuning(mmc, opcode);
++ if (host->tuning_err == -EAGAIN && esdhc->quirk_fixup_tuning) {
++
++ /* program TBPTR[TB_WNDW_END_PTR] = 3*DIV_RATIO and
++ * program TBPTR[TB_WNDW_START_PTR] = 5*DIV_RATIO
++ */
++ val = sdhci_readl(host, ESDHC_TBPTR);
++ val = (val & ~((0x7f << 8) | 0x7f)) |
++ (3 * esdhc->div_ratio) | ((5 * esdhc->div_ratio) << 8);
++ sdhci_writel(host, val, ESDHC_TBPTR);
++
++ /* program the software tuning mode by setting
++ * TBCTL[TB_MODE]=2'h3
++ */
++ val = sdhci_readl(host, ESDHC_TBCTL);
++ val |= 0x3;
++ sdhci_writel(host, val, ESDHC_TBCTL);
++ sdhci_execute_tuning(mmc, opcode);
++ }
++ return ret;
++}
++
++static void esdhc_set_uhs_signaling(struct sdhci_host *host,
++ unsigned int timing)
++{
++ if (timing == MMC_TIMING_MMC_HS400)
++ esdhc_tuning_block_enable(host, true);
++ else
++ sdhci_set_uhs_signaling(host, timing);
+ }
+
+ #ifdef CONFIG_PM_SLEEP
+@@ -755,7 +921,7 @@ static const struct sdhci_ops sdhci_esdh
+ .adma_workaround = esdhc_of_adma_workaround,
+ .set_bus_width = esdhc_pltfm_set_bus_width,
+ .reset = esdhc_reset,
+- .set_uhs_signaling = sdhci_set_uhs_signaling,
++ .set_uhs_signaling = esdhc_set_uhs_signaling,
+ };
+
+ static const struct sdhci_ops sdhci_esdhc_le_ops = {
+@@ -772,7 +938,7 @@ static const struct sdhci_ops sdhci_esdh
+ .adma_workaround = esdhc_of_adma_workaround,
+ .set_bus_width = esdhc_pltfm_set_bus_width,
+ .reset = esdhc_reset,
+- .set_uhs_signaling = sdhci_set_uhs_signaling,
++ .set_uhs_signaling = esdhc_set_uhs_signaling,
+ };
+
+ static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = {
+@@ -798,8 +964,20 @@ static struct soc_device_attribute soc_i
+ { },
+ };
++static struct soc_device_attribute soc_fixup_sdhc_clkdivs[] = {
++ { .family = "QorIQ LX2160A", .revision = "1.0", },
++ { .family = "QorIQ LX2160A", .revision = "2.0", },
++ { },
++};
++
++static struct soc_device_attribute soc_unreliable_pulse_detection[] = {
++ { .family = "QorIQ LX2160A", .revision = "1.0", },
++ { },
++};
++
static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
{
+ const struct of_device_id *match;
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_esdhc *esdhc;
struct device_node *np;
-@@ -819,6 +853,9 @@ static void esdhc_init(struct platform_d
+@@ -819,6 +997,24 @@ static void esdhc_init(struct platform_d
else
esdhc->quirk_incorrect_hostver = false;
++ if (soc_device_match(soc_fixup_sdhc_clkdivs))
++ esdhc->quirk_limited_clk_division = true;
++ else
++ esdhc->quirk_limited_clk_division = false;
++
++ if (soc_device_match(soc_unreliable_pulse_detection))
++ esdhc->quirk_unreliable_pulse_detection = true;
++ else
++ esdhc->quirk_unreliable_pulse_detection = false;
++
++ if (soc_device_match(soc_incorrect_delay_chain))
++ esdhc->quirk_incorrect_delay_chain = true;
++ else
++ esdhc->quirk_incorrect_delay_chain = false;
++
+ match = of_match_node(sdhci_esdhc_of_match, pdev->dev.of_node);
+ if (match)
+ esdhc->clk_fixup = match->data;
np = pdev->dev.of_node;
clk = of_clk_get(np, 0);
if (!IS_ERR(clk)) {
-@@ -923,14 +960,6 @@ static int sdhci_esdhc_probe(struct plat
+@@ -846,6 +1042,12 @@ static void esdhc_init(struct platform_d
+ }
+ }
+
++static int esdhc_prepare_ddr_to_hs400(struct mmc_host *mmc)
++{
++ esdhc_tuning_block_enable(mmc_priv(mmc), false);
++ return 0;
++}
++
+ static int sdhci_esdhc_probe(struct platform_device *pdev)
+ {
+ struct sdhci_host *host;
+@@ -869,6 +1071,7 @@ static int sdhci_esdhc_probe(struct plat
+ host->mmc_host_ops.start_signal_voltage_switch =
+ esdhc_signal_voltage_switch;
+ host->mmc_host_ops.execute_tuning = esdhc_execute_tuning;
++ host->mmc_host_ops.prepare_ddr_to_hs400 = esdhc_prepare_ddr_to_hs400;
+ host->tuning_delay = 1;
+
+ esdhc_init(pdev, host);
+@@ -877,6 +1080,11 @@ static int sdhci_esdhc_probe(struct plat
+
+ pltfm_host = sdhci_priv(host);
+ esdhc = sdhci_pltfm_priv(pltfm_host);
++ if (soc_device_match(soc_fixup_tuning))
++ esdhc->quirk_fixup_tuning = true;
++ else
++ esdhc->quirk_fixup_tuning = false;
++
+ if (esdhc->vendor_ver == VENDOR_V_22)
+ host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
+
+@@ -923,14 +1131,6 @@ static int sdhci_esdhc_probe(struct plat
return ret;
}
static struct platform_driver sdhci_esdhc_driver = {
.driver = {
.name = "sdhci-esdhc",
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -2148,7 +2148,7 @@ static void sdhci_send_tuning(struct sdh
+
+ }
+
+-static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
++static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
+ {
+ int i;
+
+@@ -2165,13 +2165,13 @@ static void __sdhci_execute_tuning(struc
+ pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n",
+ mmc_hostname(host->mmc));
+ sdhci_abort_tuning(host, opcode);
+- return;
++ return -ETIMEDOUT;
+ }
+
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
+ if (ctrl & SDHCI_CTRL_TUNED_CLK)
+- return; /* Success! */
++ return 0; /* Success! */
+ break;
+ }
+
+@@ -2183,6 +2183,7 @@ static void __sdhci_execute_tuning(struc
+ pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
+ mmc_hostname(host->mmc));
+ sdhci_reset_tuning(host);
++ return -EAGAIN;
+ }
+
+ int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
+@@ -2244,7 +2245,7 @@ int sdhci_execute_tuning(struct mmc_host
+
+ sdhci_start_tuning(host);
+
+- __sdhci_execute_tuning(host, opcode);
++ host->tuning_err = __sdhci_execute_tuning(host, opcode);
+
+ sdhci_end_tuning(host);
+ out:
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -545,6 +545,7 @@ struct sdhci_host {
+
+ unsigned int tuning_count; /* Timer count for re-tuning */
+ unsigned int tuning_mode; /* Re-tuning mode supported by host */
++ unsigned int tuning_err; /* Error code for re-tuning */
+ #define SDHCI_TUNING_MODE_1 0
+ #define SDHCI_TUNING_MODE_2 1
+ #define SDHCI_TUNING_MODE_3 2
+--- a/include/linux/mmc/card.h
++++ b/include/linux/mmc/card.h
+@@ -156,6 +156,7 @@ struct sd_switch_caps {
+ #define UHS_DDR50_MAX_DTR 50000000
+ #define UHS_SDR25_MAX_DTR UHS_DDR50_MAX_DTR
+ #define UHS_SDR12_MAX_DTR 25000000
++#define DEFAULT_SPEED_MAX_DTR UHS_SDR12_MAX_DTR
+ unsigned int sd3_bus_mode;
+ #define UHS_SDR12_BUS_SPEED 0
+ #define HIGH_SPEED_BUS_SPEED 1
+--- a/include/linux/mmc/host.h
++++ b/include/linux/mmc/host.h
+@@ -145,6 +145,8 @@ struct mmc_host_ops {
+
+ /* Prepare HS400 target operating frequency depending host driver */
+ int (*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios);
++ int (*prepare_ddr_to_hs400)(struct mmc_host *host);
++
+ /* Prepare enhanced strobe depending host driver */
+ void (*hs400_enhanced_strobe)(struct mmc_host *host,
+ struct mmc_ios *ios);
-From 936d5f485f2ff837cdd7d49839771bd3367e8b92 Mon Sep 17 00:00:00 2001
+From ba8e92b322a3763880fdc4d19e9c7085f5504be7 Mon Sep 17 00:00:00 2001
From: Biwen Li <biwen.li@nxp.com>
-Date: Tue, 30 Oct 2018 18:28:03 +0800
-Subject: [PATCH 37/40] sec: support layerscape
+Date: Tue, 23 Apr 2019 17:41:43 +0800
+Subject: [PATCH] sec: support layerscape
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
This is an integrated patch of sec for layerscape
Signed-off-by: Alex Porosanu <alexandru.porosanu@nxp.com>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Biwen Li <biwen.li@nxp.com>
+Signed-off-by: Carmen Iorga <carmen.iorga@nxp.com>
Signed-off-by: Cristian Stoica <cristian.stoica@nxp.com>
Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
-Signed-off-by: Horia Geantă horia.geanta@nxp.com
+Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
---
crypto/Kconfig | 20 +
crypto/Makefile | 1 +
+ crypto/chacha20poly1305.c | 2 -
crypto/tcrypt.c | 27 +-
crypto/testmgr.c | 244 ++
crypto/testmgr.h | 219 ++
- crypto/tls.c | 607 +++
+ crypto/tls.c | 607 ++++
drivers/crypto/Makefile | 2 +-
- drivers/crypto/caam/Kconfig | 57 +-
- drivers/crypto/caam/Makefile | 10 +-
- drivers/crypto/caam/caamalg.c | 131 +-
- drivers/crypto/caam/caamalg_desc.c | 761 +++-
- drivers/crypto/caam/caamalg_desc.h | 47 +-
- drivers/crypto/caam/caamalg_qi.c | 927 ++++-
- drivers/crypto/caam/caamalg_qi2.c | 5691 +++++++++++++++++++++++++++
- drivers/crypto/caam/caamalg_qi2.h | 274 ++
- drivers/crypto/caam/caamhash.c | 132 +-
+ drivers/crypto/caam/Kconfig | 85 +-
+ drivers/crypto/caam/Makefile | 26 +-
+ drivers/crypto/caam/caamalg.c | 468 +++-
+ drivers/crypto/caam/caamalg_desc.c | 903 +++++-
+ drivers/crypto/caam/caamalg_desc.h | 52 +-
+ drivers/crypto/caam/caamalg_qi.c | 1060 ++++++-
+ drivers/crypto/caam/caamalg_qi2.c | 5843 +++++++++++++++++++++++++++++++++++
+ drivers/crypto/caam/caamalg_qi2.h | 276 ++
+ drivers/crypto/caam/caamhash.c | 192 +-
drivers/crypto/caam/caamhash_desc.c | 108 +
drivers/crypto/caam/caamhash_desc.h | 49 +
- drivers/crypto/caam/compat.h | 2 +
- drivers/crypto/caam/ctrl.c | 23 +-
- drivers/crypto/caam/desc.h | 62 +-
- drivers/crypto/caam/desc_constr.h | 52 +-
- drivers/crypto/caam/dpseci.c | 865 ++++
- drivers/crypto/caam/dpseci.h | 433 ++
+ drivers/crypto/caam/caampkc.c | 52 +-
+ drivers/crypto/caam/caamrng.c | 52 +-
+ drivers/crypto/caam/compat.h | 4 +
+ drivers/crypto/caam/ctrl.c | 194 +-
+ drivers/crypto/caam/desc.h | 89 +-
+ drivers/crypto/caam/desc_constr.h | 59 +-
+ drivers/crypto/caam/dpseci.c | 865 ++++++
+ drivers/crypto/caam/dpseci.h | 433 +++
drivers/crypto/caam/dpseci_cmd.h | 287 ++
- drivers/crypto/caam/error.c | 75 +-
+ drivers/crypto/caam/error.c | 81 +-
drivers/crypto/caam/error.h | 6 +-
- drivers/crypto/caam/intern.h | 1 +
- drivers/crypto/caam/jr.c | 42 +
+ drivers/crypto/caam/intern.h | 102 +-
+ drivers/crypto/caam/jr.c | 84 +
drivers/crypto/caam/jr.h | 2 +
drivers/crypto/caam/key_gen.c | 30 -
drivers/crypto/caam/key_gen.h | 30 +
- drivers/crypto/caam/qi.c | 85 +-
+ drivers/crypto/caam/qi.c | 134 +-
drivers/crypto/caam/qi.h | 2 +-
- drivers/crypto/caam/regs.h | 2 +
+ drivers/crypto/caam/regs.h | 76 +-
drivers/crypto/caam/sg_sw_qm.h | 46 +-
drivers/crypto/talitos.c | 8 +
- 37 files changed, 11006 insertions(+), 354 deletions(-)
+ include/crypto/chacha20.h | 1 +
+ 41 files changed, 12088 insertions(+), 733 deletions(-)
create mode 100644 crypto/tls.c
create mode 100644 drivers/crypto/caam/caamalg_qi2.c
create mode 100644 drivers/crypto/caam/caamalg_qi2.h
obj-$(CONFIG_CRYPTO_LZO) += lzo.o
obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
+--- a/crypto/chacha20poly1305.c
++++ b/crypto/chacha20poly1305.c
+@@ -22,8 +22,6 @@
+
+ #include "internal.h"
+
+-#define CHACHAPOLY_IV_SIZE 12
+-
+ struct chachapoly_instance_ctx {
+ struct crypto_skcipher_spawn chacha;
+ struct crypto_ahash_spawn poly;
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -76,7 +76,7 @@ static char *check[] = {
obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
-@@ -1,7 +1,11 @@
+@@ -1,7 +1,17 @@
+config CRYPTO_DEV_FSL_CAAM_COMMON
+ tristate
++
++config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
++ tristate
++
++config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
++ tristate
+
config CRYPTO_DEV_FSL_CAAM
- tristate "Freescale CAAM-Multicore driver backend"
help
Enables the driver module for Freescale's Cryptographic Accelerator
and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
-@@ -12,9 +16,16 @@ config CRYPTO_DEV_FSL_CAAM
+@@ -12,9 +22,16 @@ config CRYPTO_DEV_FSL_CAAM
To compile this driver as a module, choose M here: the module
will be called caam.
default y
help
Enables the driver module for Job Rings which are part of
-@@ -25,9 +36,10 @@ config CRYPTO_DEV_FSL_CAAM_JR
+@@ -25,9 +42,10 @@ config CRYPTO_DEV_FSL_CAAM_JR
To compile this driver as a module, choose M here: the module
will be called caam_jr.
range 2 9
default "9"
help
-@@ -45,7 +57,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
+@@ -45,7 +63,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
config CRYPTO_DEV_FSL_CAAM_INTC
bool "Job Ring interrupt coalescing"
help
Enable the Job Ring's interrupt coalescing feature.
-@@ -75,7 +86,6 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THL
+@@ -74,9 +91,9 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THL
+ threshold. Range is 1-65535.
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
- tristate "Register algorithm implementations with the Crypto API"
+- tristate "Register algorithm implementations with the Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM_JR
++ bool "Register algorithm implementations with the Crypto API"
default y
++ select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
select CRYPTO_AEAD
select CRYPTO_AUTHENC
-@@ -90,7 +100,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
+ select CRYPTO_BLKCIPHER
+@@ -85,13 +102,11 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
+ scatterlist crypto API (such as the linux native IPSec
+ stack) to the SEC4 via job ring.
+- To compile this as a module, choose M here: the module
+- will be called caamalg.
+-
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
- tristate "Queue Interface as Crypto API backend"
+- tristate "Queue Interface as Crypto API backend"
- depends on CRYPTO_DEV_FSL_CAAM_JR && FSL_DPAA && NET
++ bool "Queue Interface as Crypto API backend"
+ depends on FSL_SDK_DPA && NET
default y
++ select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
-@@ -107,7 +117,6 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
+ help
+@@ -102,36 +117,26 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
+ assigned to the kernel should also be more than the number of
+ job rings.
+- To compile this as a module, choose M here: the module
+- will be called caamalg_qi.
+-
config CRYPTO_DEV_FSL_CAAM_AHASH_API
- tristate "Register hash algorithm implementations with Crypto API"
+- tristate "Register hash algorithm implementations with Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM_JR
++ bool "Register hash algorithm implementations with Crypto API"
default y
++ select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
select CRYPTO_HASH
help
-@@ -119,7 +128,6 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
+ Selecting this will offload ahash for users of the
+ scatterlist crypto API to the SEC4 via job ring.
+- To compile this as a module, choose M here: the module
+- will be called caamhash.
+-
config CRYPTO_DEV_FSL_CAAM_PKC_API
- tristate "Register public key cryptography implementations with Crypto API"
+- tristate "Register public key cryptography implementations with Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM_JR
++ bool "Register public key cryptography implementations with Crypto API"
default y
select CRYPTO_RSA
help
-@@ -131,7 +139,6 @@ config CRYPTO_DEV_FSL_CAAM_PKC_API
+ Selecting this will allow SEC Public key support for RSA.
+ Supported cryptographic primitives: encryption, decryption,
+ signature and verification.
+- To compile this as a module, choose M here: the module
+- will be called caam_pkc.
config CRYPTO_DEV_FSL_CAAM_RNG_API
- tristate "Register caam device for hwrng API"
+- tristate "Register caam device for hwrng API"
- depends on CRYPTO_DEV_FSL_CAAM_JR
++ bool "Register caam device for hwrng API"
default y
select CRYPTO_RNG
select HW_RANDOM
-@@ -142,13 +149,31 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
- To compile this as a module, choose M here: the module
- will be called caamrng.
+@@ -139,16 +144,24 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
+ Selecting this will register the SEC4 hardware rng to
+ the hw_random API for suppying the kernel entropy pool.
+
+- To compile this as a module, choose M here: the module
+- will be called caamrng.
++endif # CRYPTO_DEV_FSL_CAAM_JR
-config CRYPTO_DEV_FSL_CAAM_DEBUG
- bool "Enable debug output in CAAM driver"
- help
- Selecting this will enable printing of various debug
- information in the CAAM driver.
-+endif # CRYPTO_DEV_FSL_CAAM_JR
-+
+endif # CRYPTO_DEV_FSL_CAAM
-+
+
+-config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
+- def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
+- CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI)
+config CRYPTO_DEV_FSL_DPAA2_CAAM
+ tristate "QorIQ DPAA2 CAAM (DPSECI) driver"
+ depends on FSL_MC_DPIO
+ select CRYPTO_DEV_FSL_CAAM_COMMON
++ select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
++ select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_AUTHENC
+ select CRYPTO_AEAD
+
+ To compile this as a module, choose M here: the module
+ will be called dpaa2_caam.
-
- config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
- def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
-- CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI)
-+ CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \
-+ CRYPTO_DEV_FSL_DPAA2_CAAM)
-+
-+config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
-+ def_tristate (CRYPTO_DEV_FSL_CAAM_AHASH_API || \
-+ CRYPTO_DEV_FSL_DPAA2_CAAM)
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -6,19 +6,27 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
- obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
- obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
+-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
+-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
- obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
-+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
- obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
- obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
-
- caam-objs := ctrl.o
+-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
+-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
+-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
+-
+-caam-objs := ctrl.o
-caam_jr-objs := jr.o key_gen.o error.o
-+caam_jr-objs := jr.o key_gen.o
- caam_pkc-y := caampkc.o pkc_desc.o
+-caam_pkc-y := caampkc.o pkc_desc.o
++obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
++
++caam-y := ctrl.o
++caam_jr-y := jr.o key_gen.o
++caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
++caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
++caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
++caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
++caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caampkc.o pkc_desc.o
++
++caam-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += qi.o
ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
ccflags-y += -DCONFIG_CAAM_QI
- caam-objs += qi.o
+- caam-objs += qi.o
endif
+
+obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
+dpaa2_caam-y := caamalg_qi2.o dpseci.o
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
-@@ -108,6 +108,7 @@ struct caam_ctx {
+@@ -71,6 +71,8 @@
+ #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
+ CAAM_CMD_SZ * 5)
+
++#define CHACHAPOLY_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + CAAM_CMD_SZ * 6)
++
+ #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
+ #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
+
+@@ -108,6 +110,7 @@ struct caam_ctx {
dma_addr_t sh_desc_dec_dma;
dma_addr_t sh_desc_givenc_dma;
dma_addr_t key_dma;
struct device *jrdev;
struct alginfo adata;
struct alginfo cdata;
-@@ -118,6 +119,7 @@ static int aead_null_set_sh_desc(struct
+@@ -118,6 +121,7 @@ static int aead_null_set_sh_desc(struct
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
u32 *desc;
int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
ctx->adata.keylen_pad;
-@@ -136,9 +138,10 @@ static int aead_null_set_sh_desc(struct
+@@ -136,9 +140,10 @@ static int aead_null_set_sh_desc(struct
/* aead_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
/*
* Job Descriptor and Shared Descriptors
-@@ -154,9 +157,10 @@ static int aead_null_set_sh_desc(struct
+@@ -154,9 +159,10 @@ static int aead_null_set_sh_desc(struct
/* aead_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
return 0;
}
-@@ -168,6 +172,7 @@ static int aead_set_sh_desc(struct crypt
+@@ -168,6 +174,7 @@ static int aead_set_sh_desc(struct crypt
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
u32 ctx1_iv_off = 0;
u32 *desc, *nonce = NULL;
u32 inl_mask;
-@@ -234,9 +239,9 @@ static int aead_set_sh_desc(struct crypt
+@@ -234,9 +241,9 @@ static int aead_set_sh_desc(struct crypt
desc = ctx->sh_desc_enc;
cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
skip_enc:
/*
-@@ -266,9 +271,9 @@ skip_enc:
+@@ -266,9 +273,9 @@ skip_enc:
desc = ctx->sh_desc_dec;
cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
ctx->authsize, alg->caam.geniv, is_rfc3686,
if (!alg->caam.geniv)
goto skip_givenc;
-@@ -300,9 +305,9 @@ skip_enc:
+@@ -300,9 +307,9 @@ skip_enc:
desc = ctx->sh_desc_enc;
cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
ctx->authsize, is_rfc3686, nonce,
skip_givenc:
return 0;
-@@ -323,6 +328,7 @@ static int gcm_set_sh_desc(struct crypto
+@@ -323,6 +330,7 @@ static int gcm_set_sh_desc(struct crypto
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
u32 *desc;
int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
ctx->cdata.keylen;
-@@ -344,9 +350,9 @@ static int gcm_set_sh_desc(struct crypto
+@@ -344,9 +352,9 @@ static int gcm_set_sh_desc(struct crypto
}
desc = ctx->sh_desc_enc;
/*
* Job Descriptor and Shared Descriptors
-@@ -361,9 +367,9 @@ static int gcm_set_sh_desc(struct crypto
+@@ -361,9 +369,9 @@ static int gcm_set_sh_desc(struct crypto
}
desc = ctx->sh_desc_dec;
return 0;
}
-@@ -382,6 +388,7 @@ static int rfc4106_set_sh_desc(struct cr
+@@ -382,6 +390,7 @@ static int rfc4106_set_sh_desc(struct cr
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
u32 *desc;
int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
ctx->cdata.keylen;
-@@ -403,9 +410,10 @@ static int rfc4106_set_sh_desc(struct cr
+@@ -403,9 +412,10 @@ static int rfc4106_set_sh_desc(struct cr
}
desc = ctx->sh_desc_enc;
/*
* Job Descriptor and Shared Descriptors
-@@ -420,9 +428,10 @@ static int rfc4106_set_sh_desc(struct cr
+@@ -420,9 +430,10 @@ static int rfc4106_set_sh_desc(struct cr
}
desc = ctx->sh_desc_dec;
return 0;
}
-@@ -442,6 +451,7 @@ static int rfc4543_set_sh_desc(struct cr
+@@ -442,6 +453,7 @@ static int rfc4543_set_sh_desc(struct cr
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
u32 *desc;
int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
ctx->cdata.keylen;
-@@ -463,9 +473,10 @@ static int rfc4543_set_sh_desc(struct cr
+@@ -463,9 +475,10 @@ static int rfc4543_set_sh_desc(struct cr
}
desc = ctx->sh_desc_enc;
/*
* Job Descriptor and Shared Descriptors
-@@ -480,9 +491,10 @@ static int rfc4543_set_sh_desc(struct cr
+@@ -480,9 +493,10 @@ static int rfc4543_set_sh_desc(struct cr
}
desc = ctx->sh_desc_dec;
return 0;
}
-@@ -503,6 +515,7 @@ static int aead_setkey(struct crypto_aea
+@@ -498,11 +512,67 @@ static int rfc4543_setauthsize(struct cr
+ return 0;
+ }
+
++static int chachapoly_set_sh_desc(struct crypto_aead *aead)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ struct device *jrdev = ctx->jrdev;
++ unsigned int ivsize = crypto_aead_ivsize(aead);
++ u32 *desc;
++
++ if (!ctx->cdata.keylen || !ctx->authsize)
++ return 0;
++
++ desc = ctx->sh_desc_enc;
++ cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
++ ctx->authsize, true, false);
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
++ desc_bytes(desc), ctx->dir);
++
++ desc = ctx->sh_desc_dec;
++ cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
++ ctx->authsize, false, false);
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
++ desc_bytes(desc), ctx->dir);
++
++ return 0;
++}
++
++static int chachapoly_setauthsize(struct crypto_aead *aead,
++ unsigned int authsize)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++
++ if (authsize != POLY1305_DIGEST_SIZE)
++ return -EINVAL;
++
++ ctx->authsize = authsize;
++ return chachapoly_set_sh_desc(aead);
++}
++
++static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
++ unsigned int keylen)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ unsigned int ivsize = crypto_aead_ivsize(aead);
++ unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
++
++ if (keylen != CHACHA20_KEY_SIZE + saltlen) {
++ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
++ return -EINVAL;
++ }
++
++ ctx->cdata.key_virt = key;
++ ctx->cdata.keylen = keylen - saltlen;
++
++ return chachapoly_set_sh_desc(aead);
++}
++
+ static int aead_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
struct crypto_authenc_keys keys;
int ret = 0;
-@@ -517,6 +530,27 @@ static int aead_setkey(struct crypto_aea
+@@ -517,6 +587,27 @@ static int aead_setkey(struct crypto_aea
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
keys.authkeylen, CAAM_MAX_KEY_SIZE -
keys.enckeylen);
-@@ -527,12 +561,14 @@ static int aead_setkey(struct crypto_aea
+@@ -527,12 +618,14 @@ static int aead_setkey(struct crypto_aea
/* postpend encryption key to auth split key */
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
ctx->cdata.keylen = keys.enckeylen;
return aead_set_sh_desc(aead);
badkey:
-@@ -552,7 +588,7 @@ static int gcm_setkey(struct crypto_aead
+@@ -552,7 +645,7 @@ static int gcm_setkey(struct crypto_aead
#endif
memcpy(ctx->key, key, keylen);
ctx->cdata.keylen = keylen;
return gcm_set_sh_desc(aead);
-@@ -580,7 +616,7 @@ static int rfc4106_setkey(struct crypto_
+@@ -580,7 +673,7 @@ static int rfc4106_setkey(struct crypto_
*/
ctx->cdata.keylen = keylen - 4;
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
return rfc4106_set_sh_desc(aead);
}
-@@ -606,7 +642,7 @@ static int rfc4543_setkey(struct crypto_
+@@ -606,7 +699,7 @@ static int rfc4543_setkey(struct crypto_
*/
ctx->cdata.keylen = keylen - 4;
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
return rfc4543_set_sh_desc(aead);
}
-@@ -658,21 +694,21 @@ static int ablkcipher_setkey(struct cryp
+@@ -658,21 +751,21 @@ static int ablkcipher_setkey(struct cryp
cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
ctx1_iv_off);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
return 0;
}
-@@ -701,13 +737,13 @@ static int xts_ablkcipher_setkey(struct
+@@ -701,13 +794,13 @@ static int xts_ablkcipher_setkey(struct
desc = ctx->sh_desc_enc;
cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
return 0;
}
-@@ -987,9 +1023,6 @@ static void init_aead_job(struct aead_re
+@@ -987,9 +1080,6 @@ static void init_aead_job(struct aead_re
append_seq_out_ptr(desc, dst_dma,
req->assoclen + req->cryptlen - authsize,
out_options);
}
static void init_gcm_job(struct aead_request *req,
-@@ -1004,6 +1037,7 @@ static void init_gcm_job(struct aead_req
+@@ -1004,6 +1094,7 @@ static void init_gcm_job(struct aead_req
unsigned int last;
init_aead_job(req, edesc, all_contig, encrypt);
/* BUG This should not be specific to generic GCM. */
last = 0;
-@@ -1030,6 +1064,7 @@ static void init_authenc_job(struct aead
+@@ -1021,6 +1112,40 @@ static void init_gcm_job(struct aead_req
+ /* End of blank commands */
+ }
+
++static void init_chachapoly_job(struct aead_request *req,
++ struct aead_edesc *edesc, bool all_contig,
++ bool encrypt)
++{
++ struct crypto_aead *aead = crypto_aead_reqtfm(req);
++ unsigned int ivsize = crypto_aead_ivsize(aead);
++ unsigned int assoclen = req->assoclen;
++ u32 *desc = edesc->hw_desc;
++ u32 ctx_iv_off = 4;
++
++ init_aead_job(req, edesc, all_contig, encrypt);
++
++ if (ivsize != CHACHAPOLY_IV_SIZE) {
++ /* IPsec specific: CONTEXT1[223:128] = {NONCE, IV} */
++ ctx_iv_off += 4;
++
++ /*
++ * The associated data comes already with the IV but we need
++ * to skip it when we authenticate or encrypt...
++ */
++ assoclen -= ivsize;
++ }
++
++ append_math_add_imm_u32(desc, REG3, ZERO, IMM, assoclen);
++
++ /*
++ * For IPsec load the IV further in the same register.
++ * For RFC7539 simply load the 12 bytes nonce in a single operation
++ */
++ append_load_as_imm(desc, req->iv, ivsize, LDST_CLASS_1_CCB |
++ LDST_SRCDST_BYTE_CONTEXT |
++ ctx_iv_off << LDST_OFFSET_SHIFT);
++}
++
+ static void init_authenc_job(struct aead_request *req,
+ struct aead_edesc *edesc,
+ bool all_contig, bool encrypt)
+@@ -1030,6 +1155,7 @@ static void init_authenc_job(struct aead
struct caam_aead_alg, aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
-@@ -1053,6 +1088,15 @@ static void init_authenc_job(struct aead
+@@ -1053,6 +1179,15 @@ static void init_authenc_job(struct aead
init_aead_job(req, edesc, all_contig, encrypt);
if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
append_load_as_imm(desc, req->iv, ivsize,
LDST_CLASS_1_CCB |
-@@ -3204,9 +3248,11 @@ struct caam_crypto_alg {
+@@ -1225,8 +1360,16 @@ static struct aead_edesc *aead_edesc_all
+ }
+ }
+
++ /*
++ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
++ * the end of the table by allocating more S/G entries.
++ */
+ sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
+- sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
++ if (mapped_dst_nents > 1)
++ sec4_sg_len += ALIGN(mapped_dst_nents, 4);
++ else
++ sec4_sg_len = ALIGN(sec4_sg_len, 4);
++
+ sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+@@ -1307,6 +1450,72 @@ static int gcm_encrypt(struct aead_reque
+ return ret;
+ }
+
++static int chachapoly_encrypt(struct aead_request *req)
++{
++ struct aead_edesc *edesc;
++ struct crypto_aead *aead = crypto_aead_reqtfm(req);
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ struct device *jrdev = ctx->jrdev;
++ bool all_contig;
++ u32 *desc;
++ int ret;
++
++ edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
++ true);
++ if (IS_ERR(edesc))
++ return PTR_ERR(edesc);
++
++ desc = edesc->hw_desc;
++
++ init_chachapoly_job(req, edesc, all_contig, true);
++ print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
++ 1);
++
++ ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
++ if (!ret) {
++ ret = -EINPROGRESS;
++ } else {
++ aead_unmap(jrdev, edesc, req);
++ kfree(edesc);
++ }
++
++ return ret;
++}
++
++static int chachapoly_decrypt(struct aead_request *req)
++{
++ struct aead_edesc *edesc;
++ struct crypto_aead *aead = crypto_aead_reqtfm(req);
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ struct device *jrdev = ctx->jrdev;
++ bool all_contig;
++ u32 *desc;
++ int ret;
++
++ edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
++ false);
++ if (IS_ERR(edesc))
++ return PTR_ERR(edesc);
++
++ desc = edesc->hw_desc;
++
++ init_chachapoly_job(req, edesc, all_contig, false);
++ print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
++ 1);
++
++ ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
++ if (!ret) {
++ ret = -EINPROGRESS;
++ } else {
++ aead_unmap(jrdev, edesc, req);
++ kfree(edesc);
++ }
++
++ return ret;
++}
++
+ static int ipsec_gcm_encrypt(struct aead_request *req)
+ {
+ if (req->assoclen < 8)
+@@ -1494,7 +1703,25 @@ static struct ablkcipher_edesc *ablkciph
+
+ sec4_sg_ents = 1 + mapped_src_nents;
+ dst_sg_idx = sec4_sg_ents;
+- sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
++
++ /*
++ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
++ * the end of the table by allocating more S/G entries. Logic:
++ * if (src != dst && output S/G)
++ * pad output S/G, if needed
++ * else if (src == dst && S/G)
++ * overlapping S/Gs; pad one of them
++ * else if (input S/G) ...
++ * pad input S/G, if needed
++ */
++ if (mapped_dst_nents > 1)
++ sec4_sg_ents += ALIGN(mapped_dst_nents, 4);
++ else if ((req->src == req->dst) && (mapped_src_nents > 1))
++ sec4_sg_ents = max(ALIGN(sec4_sg_ents, 4),
++ 1 + ALIGN(mapped_src_nents, 4));
++ else
++ sec4_sg_ents = ALIGN(sec4_sg_ents, 4);
++
+ sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
+
+ /*
+@@ -3196,6 +3423,50 @@ static struct caam_aead_alg driver_aeads
+ .geniv = true,
+ },
+ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "rfc7539(chacha20,poly1305)",
++ .cra_driver_name = "rfc7539-chacha20-poly1305-"
++ "caam",
++ .cra_blocksize = 1,
++ },
++ .setkey = chachapoly_setkey,
++ .setauthsize = chachapoly_setauthsize,
++ .encrypt = chachapoly_encrypt,
++ .decrypt = chachapoly_decrypt,
++ .ivsize = CHACHAPOLY_IV_SIZE,
++ .maxauthsize = POLY1305_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
++ OP_ALG_AAI_AEAD,
++ .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
++ OP_ALG_AAI_AEAD,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "rfc7539esp(chacha20,poly1305)",
++ .cra_driver_name = "rfc7539esp-chacha20-"
++ "poly1305-caam",
++ .cra_blocksize = 1,
++ },
++ .setkey = chachapoly_setkey,
++ .setauthsize = chachapoly_setauthsize,
++ .encrypt = chachapoly_encrypt,
++ .decrypt = chachapoly_decrypt,
++ .ivsize = 8,
++ .maxauthsize = POLY1305_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
++ OP_ALG_AAI_AEAD,
++ .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
++ OP_ALG_AAI_AEAD,
++ },
++ },
+ };
+
+ struct caam_crypto_alg {
+@@ -3204,9 +3475,11 @@ struct caam_crypto_alg {
struct caam_alg_entry caam;
};
ctx->jrdev = caam_jr_alloc();
if (IS_ERR(ctx->jrdev)) {
-@@ -3214,10 +3260,16 @@ static int caam_init_common(struct caam_
+@@ -3214,10 +3487,16 @@ static int caam_init_common(struct caam_
return PTR_ERR(ctx->jrdev);
}
if (dma_mapping_error(ctx->jrdev, dma_addr)) {
dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
caam_jr_free(ctx->jrdev);
-@@ -3245,7 +3297,7 @@ static int caam_cra_init(struct crypto_t
+@@ -3245,7 +3524,7 @@ static int caam_cra_init(struct crypto_t
container_of(alg, struct caam_crypto_alg, crypto_alg);
struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
}
static int caam_aead_init(struct crypto_aead *tfm)
-@@ -3255,14 +3307,15 @@ static int caam_aead_init(struct crypto_
+@@ -3255,14 +3534,15 @@ static int caam_aead_init(struct crypto_
container_of(alg, struct caam_aead_alg, aead);
struct caam_ctx *ctx = crypto_aead_ctx(tfm);
caam_jr_free(ctx->jrdev);
}
+@@ -3276,7 +3556,7 @@ static void caam_aead_exit(struct crypto
+ caam_exit_common(crypto_aead_ctx(tfm));
+ }
+
+-static void __exit caam_algapi_exit(void)
++void caam_algapi_exit(void)
+ {
+
+ struct caam_crypto_alg *t_alg, *n;
+@@ -3355,56 +3635,52 @@ static void caam_aead_alg_init(struct ca
+ alg->exit = caam_aead_exit;
+ }
+
+-static int __init caam_algapi_init(void)
++int caam_algapi_init(struct device *ctrldev)
+ {
+- struct device_node *dev_node;
+- struct platform_device *pdev;
+- struct device *ctrldev;
+- struct caam_drv_private *priv;
++ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
+ int i = 0, err = 0;
+- u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
++ u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
+ unsigned int md_limit = SHA512_DIGEST_SIZE;
+ bool registered = false;
+
+- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+- if (!dev_node) {
+- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+- if (!dev_node)
+- return -ENODEV;
+- }
+-
+- pdev = of_find_device_by_node(dev_node);
+- if (!pdev) {
+- of_node_put(dev_node);
+- return -ENODEV;
+- }
+-
+- ctrldev = &pdev->dev;
+- priv = dev_get_drvdata(ctrldev);
+- of_node_put(dev_node);
+-
+- /*
+- * If priv is NULL, it's probably because the caam driver wasn't
+- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+- */
+- if (!priv)
+- return -ENODEV;
+-
+-
+ INIT_LIST_HEAD(&alg_list);
+
+ /*
+ * Register crypto algorithms the device supports.
+ * First, detect presence and attributes of DES, AES, and MD blocks.
+ */
+- cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+- cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+- des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
+- aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
+- md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
++ if (priv->era < 10) {
++ u32 cha_vid, cha_inst;
++
++ cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
++ aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
++ md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
++
++ cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
++ des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
++ CHA_ID_LS_DES_SHIFT;
++ aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
++ md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
++ ccha_inst = 0;
++ ptha_inst = 0;
++ } else {
++ u32 aesa, mdha;
++
++ aesa = rd_reg32(&priv->ctrl->vreg.aesa);
++ mdha = rd_reg32(&priv->ctrl->vreg.mdha);
++
++ aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
++ md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
++
++ des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
++ aes_inst = aesa & CHA_VER_NUM_MASK;
++ md_inst = mdha & CHA_VER_NUM_MASK;
++ ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
++ ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
++ }
+
+ /* If MD is present, limit digest size based on LP256 */
+- if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
++ if (md_inst && md_vid == CHA_VER_VID_MD_LP256)
+ md_limit = SHA256_DIGEST_SIZE;
+
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+@@ -3426,10 +3702,10 @@ static int __init caam_algapi_init(void)
+ * Check support for AES modes not available
+ * on LP devices.
+ */
+- if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
+- if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
+- OP_ALG_AAI_XTS)
+- continue;
++ if (aes_vid == CHA_VER_VID_AES_LP &&
++ (alg->class1_alg_type & OP_ALG_AAI_MASK) ==
++ OP_ALG_AAI_XTS)
++ continue;
+
+ t_alg = caam_alg_alloc(alg);
+ if (IS_ERR(t_alg)) {
+@@ -3468,21 +3744,28 @@ static int __init caam_algapi_init(void)
+ if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
+ continue;
+
++ /* Skip CHACHA20 algorithms if not supported by device */
++ if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 && !ccha_inst)
++ continue;
++
++ /* Skip POLY1305 algorithms if not supported by device */
++ if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && !ptha_inst)
++ continue;
++
+ /*
+ * Check support for AES algorithms not available
+ * on LP devices.
+ */
+- if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
+- if (alg_aai == OP_ALG_AAI_GCM)
+- continue;
++ if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM)
++ continue;
+
+ /*
+ * Skip algorithms requiring message digests
+ * if MD or MD size is not supported by device.
+ */
+- if (c2_alg_sel &&
+- (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
+- continue;
++ if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
++ (!md_inst || t_alg->aead.maxauthsize > md_limit))
++ continue;
+
+ caam_aead_alg_init(t_alg);
+
+@@ -3502,10 +3785,3 @@ static int __init caam_algapi_init(void)
+
+ return err;
+ }
+-
+-module_init(caam_algapi_init);
+-module_exit(caam_algapi_exit);
+-
+-MODULE_LICENSE("GPL");
+-MODULE_DESCRIPTION("FSL CAAM support for crypto API");
+-MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -45,16 +45,16 @@ static inline void append_dec_op1(u32 *d
/* assoclen + cryptlen = seqinlen */
append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
+@@ -931,7 +1507,7 @@ void cnstr_shdsc_rfc4543_encap(u32 * con
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+- (0x8 << MOVE_LEN_SHIFT));
++ (0x8 << MOVE_LEN_SHIFT) | MOVE_WAITCOMP);
+
+ /* Will read assoclen + cryptlen bytes */
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
@@ -966,10 +1542,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap)
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
/* assoclen + cryptlen = seqoutlen */
append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-@@ -1075,7 +1666,7 @@ void cnstr_shdsc_ablkcipher_encap(u32 *
-
- /* Load nonce into CONTEXT1 reg */
- if (is_rfc3686) {
-- u8 *nonce = cdata->key_virt + cdata->keylen;
-+ const u8 *nonce = cdata->key_virt + cdata->keylen;
-
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
-@@ -1140,7 +1731,7 @@ void cnstr_shdsc_ablkcipher_decap(u32 *
-
- /* Load nonce into CONTEXT1 reg */
- if (is_rfc3686) {
-- u8 *nonce = cdata->key_virt + cdata->keylen;
-+ const u8 *nonce = cdata->key_virt + cdata->keylen;
-
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
-@@ -1209,7 +1800,7 @@ void cnstr_shdsc_ablkcipher_givencap(u32
-
- /* Load Nonce into CONTEXT1 reg */
- if (is_rfc3686) {
-- u8 *nonce = cdata->key_virt + cdata->keylen;
-+ const u8 *nonce = cdata->key_virt + cdata->keylen;
-
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
---- a/drivers/crypto/caam/caamalg_desc.h
-+++ b/drivers/crypto/caam/caamalg_desc.h
-@@ -17,6 +17,9 @@
- #define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
- #define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
+@@ -1001,7 +1592,7 @@ void cnstr_shdsc_rfc4543_decap(u32 * con
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+- (0x8 << MOVE_LEN_SHIFT));
++ (0x8 << MOVE_LEN_SHIFT) | MOVE_WAITCOMP);
-+#define DESC_TLS_BASE (4 * CAAM_CMD_SZ)
-+#define DESC_TLS10_ENC_LEN (DESC_TLS_BASE + 29 * CAAM_CMD_SZ)
-+
- /* Note: Nonce is counted in cdata.keylen */
- #define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
+ /* Will read assoclen + cryptlen bytes */
+ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+@@ -1035,6 +1626,138 @@ void cnstr_shdsc_rfc4543_decap(u32 * con
+ }
+ EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
-@@ -27,14 +30,20 @@
- #define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
++/**
++ * cnstr_shdsc_chachapoly - Chacha20 + Poly1305 generic AEAD (rfc7539) and
++ * IPsec ESP (rfc7634, a.k.a. rfc7539esp) shared
++ * descriptor (non-protocol).
++ * @desc: pointer to buffer used for descriptor construction
++ * @cdata: pointer to block cipher transform definitions
++ * Valid algorithm values - OP_ALG_ALGSEL_CHACHA20 ANDed with
++ * OP_ALG_AAI_AEAD.
++ * @adata: pointer to authentication transform definitions
++ * Valid algorithm values - OP_ALG_ALGSEL_POLY1305 ANDed with
++ * OP_ALG_AAI_AEAD.
++ * @ivsize: initialization vector size
++ * @icvsize: integrity check value (ICV) size (truncated or full)
++ * @encap: true if encapsulation, false if decapsulation
++ * @is_qi: true when called from caam/qi
++ */
++void cnstr_shdsc_chachapoly(u32 * const desc, struct alginfo *cdata,
++ struct alginfo *adata, unsigned int ivsize,
++ unsigned int icvsize, const bool encap,
++ const bool is_qi)
++{
++ u32 *key_jump_cmd, *wait_cmd;
++ u32 nfifo;
++ const bool is_ipsec = (ivsize != CHACHAPOLY_IV_SIZE);
++
++ /* Note: Context registers are saved. */
++ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
++
++ /* skip key loading if they are loaded due to sharing */
++ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
++ JUMP_COND_SHRD);
++
++ append_key_as_imm(desc, cdata->key_virt, cdata->keylen, cdata->keylen,
++ CLASS_1 | KEY_DEST_CLASS_REG);
++
++ /* For IPsec load the salt from keymat in the context register */
++ if (is_ipsec)
++ append_load_as_imm(desc, cdata->key_virt + cdata->keylen, 4,
++ LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT |
++ 4 << LDST_OFFSET_SHIFT);
++
++ set_jump_tgt_here(desc, key_jump_cmd);
++
++ /* Class 2 and 1 operations: Poly & ChaCha */
++ if (encap) {
++ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
++ OP_ALG_ENCRYPT);
++ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
++ OP_ALG_ENCRYPT);
++ } else {
++ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
++ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
++ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
++ OP_ALG_DECRYPT);
++ }
++
++ if (is_qi) {
++ u32 *wait_load_cmd;
++ u32 ctx1_iv_off = is_ipsec ? 8 : 4;
++
++ /* REG3 = assoclen */
++ append_seq_load(desc, 4, LDST_CLASS_DECO |
++ LDST_SRCDST_WORD_DECO_MATH3 |
++ 4 << LDST_OFFSET_SHIFT);
++
++ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
++ JUMP_COND_CALM | JUMP_COND_NCP |
++ JUMP_COND_NOP | JUMP_COND_NIP |
++ JUMP_COND_NIFP);
++ set_jump_tgt_here(desc, wait_load_cmd);
++
++ append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
++ LDST_SRCDST_BYTE_CONTEXT |
++ ctx1_iv_off << LDST_OFFSET_SHIFT);
++ }
++
++ /*
++ * MAGIC with NFIFO
++ * Read associated data from the input and send them to class1 and
++ * class2 alignment blocks. From class1 send data to output fifo and
++ * then write it to memory since we don't need to encrypt AD.
++ */
++ nfifo = NFIFOENTRY_DEST_BOTH | NFIFOENTRY_FC1 | NFIFOENTRY_FC2 |
++ NFIFOENTRY_DTYPE_POLY | NFIFOENTRY_BND;
++ append_load_imm_u32(desc, nfifo, LDST_CLASS_IND_CCB |
++ LDST_SRCDST_WORD_INFO_FIFO_SM | LDLEN_MATH3);
++
++ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
++ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
++ append_seq_fifo_load(desc, 0, FIFOLD_TYPE_NOINFOFIFO |
++ FIFOLD_CLASS_CLASS1 | LDST_VLF);
++ append_move_len(desc, MOVE_AUX_LS | MOVE_SRC_AUX_ABLK |
++ MOVE_DEST_OUTFIFO | MOVELEN_MRSEL_MATH3);
++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
++
++ /* IPsec - copy IV at the output */
++ if (is_ipsec)
++ append_seq_fifo_store(desc, ivsize, FIFOST_TYPE_METADATA |
++ 0x2 << 25);
++
++ wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TYPE_LOCAL |
++ JUMP_COND_NOP | JUMP_TEST_ALL);
++ set_jump_tgt_here(desc, wait_cmd);
++
++ if (encap) {
++ /* Read and write cryptlen bytes */
++ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
++ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0,
++ CAAM_CMD_SZ);
++ aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
++
++ /* Write ICV */
++ append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
++ LDST_SRCDST_BYTE_CONTEXT);
++ } else {
++ /* Read and write cryptlen bytes */
++ append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
++ append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0,
++ CAAM_CMD_SZ);
++ aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
++
++ /* Load ICV for verification */
++ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
++ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
++ }
++
++ print_hex_dump_debug("chachapoly shdesc@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
++ 1);
++}
++EXPORT_SYMBOL(cnstr_shdsc_chachapoly);
++
+ /*
+ * For ablkcipher encrypt and decrypt, read from req->src and
+ * write to req->dst
+@@ -1053,7 +1776,8 @@ static inline void ablkcipher_append_src
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+- * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
++ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128
++ * - OP_ALG_ALGSEL_CHACHA20
+ * @ivsize: initialization vector size
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+@@ -1075,7 +1799,7 @@ void cnstr_shdsc_ablkcipher_encap(u32 *
+
+ /* Load nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
+- u8 *nonce = cdata->key_virt + cdata->keylen;
++ const u8 *nonce = cdata->key_virt + cdata->keylen;
+
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+@@ -1118,7 +1842,8 @@ EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_enc
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+- * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
++ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128
++ * - OP_ALG_ALGSEL_CHACHA20
+ * @ivsize: initialization vector size
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+@@ -1140,7 +1865,7 @@ void cnstr_shdsc_ablkcipher_decap(u32 *
+
+ /* Load nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
+- u8 *nonce = cdata->key_virt + cdata->keylen;
++ const u8 *nonce = cdata->key_virt + cdata->keylen;
+
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+@@ -1209,7 +1934,7 @@ void cnstr_shdsc_ablkcipher_givencap(u32
+
+ /* Load Nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
+- u8 *nonce = cdata->key_virt + cdata->keylen;
++ const u8 *nonce = cdata->key_virt + cdata->keylen;
+
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+--- a/drivers/crypto/caam/caamalg_desc.h
++++ b/drivers/crypto/caam/caamalg_desc.h
+@@ -17,6 +17,9 @@
+ #define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
+ #define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
+
++#define DESC_TLS_BASE (4 * CAAM_CMD_SZ)
++#define DESC_TLS10_ENC_LEN (DESC_TLS_BASE + 29 * CAAM_CMD_SZ)
++
+ /* Note: Nonce is counted in cdata.keylen */
+ #define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
+
+@@ -27,14 +30,20 @@
+ #define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
+#define DESC_QI_GCM_ENC_LEN (DESC_GCM_ENC_LEN + 6 * CAAM_CMD_SZ)
#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
-@@ -43,46 +52,62 @@
+@@ -43,46 +52,67 @@
15 * CAAM_CMD_SZ)
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
- unsigned int icvsize);
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi);
++
++void cnstr_shdsc_chachapoly(u32 * const desc, struct alginfo *cdata,
++ struct alginfo *adata, unsigned int ivsize,
++ unsigned int icvsize, const bool encap,
++ const bool is_qi);
void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
unsigned int ivsize, const bool is_rfc3686,
+ memcpy(ctx->key, keys.authkey, keys.authkeylen);
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
+ keys.enckeylen);
-+ dma_sync_single_for_device(jrdev, ctx->key_dma,
++ dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
+ ctx->adata.keylen_pad +
+ keys.enckeylen, ctx->dir);
+ goto skip_split_key;
ctx->cdata.keylen = keys.enckeylen;
ret = aead_set_sh_desc(aead);
-@@ -258,6 +284,468 @@ badkey:
+@@ -258,55 +284,139 @@ badkey:
return -EINVAL;
}
+-static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+- const u8 *key, unsigned int keylen)
+static int tls_set_sh_desc(struct crypto_aead *tls)
-+{
+ {
+- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
+- const char *alg_name = crypto_tfm_alg_name(tfm);
+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
+ unsigned int ivsize = crypto_aead_ivsize(tls);
+ unsigned int blocksize = crypto_aead_blocksize(tls);
+ unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
-+ struct device *jrdev = ctx->jrdev;
+ struct device *jrdev = ctx->jrdev;
+- unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+- u32 ctx1_iv_off = 0;
+- const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
+- OP_ALG_AAI_CTR_MOD128);
+- const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
+ struct crypto_authenc_keys keys;
-+ int ret = 0;
-+
+ int ret = 0;
+
+- memcpy(ctx->key, key, keylen);
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+ goto badkey;
+
-+#ifdef DEBUG
+ #ifdef DEBUG
+ dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
+ keys.authkeylen + keys.enckeylen, keys.enckeylen,
+ keys.authkeylen);
-+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-+#endif
-+
-+ /*
+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+ #endif
+- /*
+- * AES-CTR needs to load IV in CONTEXT1 reg
+- * at an offset of 128bits (16bytes)
+- * CONTEXT1[255:128] = IV
+- */
+- if (ctr_mode)
+- ctx1_iv_off = 16;
+
+ /*
+- * RFC3686 specific:
+- * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+- * | *key = {KEY, NONCE}
+ * If DKP is supported, use it in the shared descriptor to generate
+ * the split key.
-+ */
+ */
+- if (is_rfc3686) {
+- ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+- keylen -= CTR_RFC3686_NONCE_SIZE;
+ if (ctrlpriv->era >= 6) {
+ ctx->adata.keylen = keys.authkeylen;
+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+ ctx->adata.keylen_pad +
+ keys.enckeylen, ctx->dir);
+ goto skip_split_key;
-+ }
-+
+ }
+
+- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
+- ctx->cdata.keylen = keylen;
+- ctx->cdata.key_virt = ctx->key;
+- ctx->cdata.key_inline = true;
+ ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
+ keys.authkeylen, CAAM_MAX_KEY_SIZE -
+ keys.enckeylen);
+ if (ret)
+ goto badkey;
-+
+
+- /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
+- cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
+- is_rfc3686, ctx1_iv_off);
+- cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
+- is_rfc3686, ctx1_iv_off);
+- cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
+- ivsize, is_rfc3686, ctx1_iv_off);
+ /* postpend encryption key to auth split key */
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
+ ret = tls_set_sh_desc(tls);
+ if (ret)
+ goto badkey;
-+
-+ /* Now update the driver contexts with the new shared descriptor */
-+ if (ctx->drv_ctx[ENCRYPT]) {
-+ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
-+ ctx->sh_desc_enc);
-+ if (ret) {
-+ dev_err(jrdev, "driver enc context update failed\n");
-+ goto badkey;
-+ }
-+ }
-+
-+ if (ctx->drv_ctx[DECRYPT]) {
-+ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
-+ ctx->sh_desc_dec);
-+ if (ret) {
-+ dev_err(jrdev, "driver dec context update failed\n");
-+ goto badkey;
-+ }
-+ }
-+
-+ return ret;
-+badkey:
+
+ /* Now update the driver contexts with the new shared descriptor */
+ if (ctx->drv_ctx[ENCRYPT]) {
+@@ -327,42 +437,84 @@ static int ablkcipher_setkey(struct cryp
+ }
+ }
+
+- if (ctx->drv_ctx[GIVENCRYPT]) {
+- ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
+- ctx->sh_desc_givenc);
+- if (ret) {
+- dev_err(jrdev, "driver givenc context update failed\n");
+- goto badkey;
+- }
+- }
+-
+ return ret;
+ badkey:
+- crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
-+ return -EINVAL;
-+}
-+
+ return -EINVAL;
+ }
+
+-static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+- const u8 *key, unsigned int keylen)
+static int gcm_set_sh_desc(struct crypto_aead *aead)
-+{
+ {
+- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+- struct device *jrdev = ctx->jrdev;
+- int ret = 0;
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
-+
+
+- if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
+- dev_err(jrdev, "key size mismatch\n");
+- goto badkey;
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
-+ }
-+
+ }
+
+ cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
+ ctx->authsize, true);
+
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
-+ memcpy(ctx->key, key, keylen);
+ memcpy(ctx->key, key, keylen);
+- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
-+ ctx->cdata.keylen = keylen;
-+
+ ctx->cdata.keylen = keylen;
+- ctx->cdata.key_virt = ctx->key;
+- ctx->cdata.key_inline = true;
+
+- /* xts ablkcipher encrypt, decrypt shared descriptors */
+- cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
+- cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
+ ret = gcm_set_sh_desc(aead);
+ if (ret)
+ return ret;
-+
-+ /* Now update the driver contexts with the new shared descriptor */
-+ if (ctx->drv_ctx[ENCRYPT]) {
-+ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
-+ ctx->sh_desc_enc);
-+ if (ret) {
-+ dev_err(jrdev, "driver enc context update failed\n");
+
+ /* Now update the driver contexts with the new shared descriptor */
+ if (ctx->drv_ctx[ENCRYPT]) {
+@@ -370,7 +522,7 @@ static int xts_ablkcipher_setkey(struct
+ ctx->sh_desc_enc);
+ if (ret) {
+ dev_err(jrdev, "driver enc context update failed\n");
+- goto badkey;
+ return ret;
-+ }
-+ }
-+
-+ if (ctx->drv_ctx[DECRYPT]) {
-+ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
-+ ctx->sh_desc_dec);
-+ if (ret) {
-+ dev_err(jrdev, "driver dec context update failed\n");
+ }
+ }
+
+@@ -379,151 +531,829 @@ static int xts_ablkcipher_setkey(struct
+ ctx->sh_desc_dec);
+ if (ret) {
+ dev_err(jrdev, "driver dec context update failed\n");
+- goto badkey;
+ return ret;
-+ }
-+ }
-+
+ }
+ }
+
+- return ret;
+-badkey:
+- crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+- return -EINVAL;
+ return 0;
-+}
-+
+ }
+
+-/*
+- * aead_edesc - s/w-extended aead descriptor
+- * @src_nents: number of segments in input scatterlist
+- * @dst_nents: number of segments in output scatterlist
+- * @iv_dma: dma address of iv for checking continuity and link table
+- * @qm_sg_bytes: length of dma mapped h/w link table
+- * @qm_sg_dma: bus physical mapped address of h/w link table
+- * @assoclen: associated data length, in CAAM endianness
+- * @assoclen_dma: bus physical mapped address of req->assoclen
+- * @drv_req: driver-specific request structure
+- * @sgt: the h/w link table, followed by IV
+- */
+-struct aead_edesc {
+- int src_nents;
+- int dst_nents;
+- dma_addr_t iv_dma;
+- int qm_sg_bytes;
+- dma_addr_t qm_sg_dma;
+- unsigned int assoclen;
+- dma_addr_t assoclen_dma;
+- struct caam_drv_req drv_req;
+- struct qm_sg_entry sgt[0];
+-};
+static int rfc4106_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
-+
+
+-/*
+- * ablkcipher_edesc - s/w-extended ablkcipher descriptor
+- * @src_nents: number of segments in input scatterlist
+- * @dst_nents: number of segments in output scatterlist
+- * @iv_dma: dma address of iv for checking continuity and link table
+- * @qm_sg_bytes: length of dma mapped h/w link table
+- * @qm_sg_dma: bus physical mapped address of h/w link table
+- * @drv_req: driver-specific request structure
+- * @sgt: the h/w link table, followed by IV
+- */
+-struct ablkcipher_edesc {
+- int src_nents;
+- int dst_nents;
+- dma_addr_t iv_dma;
+- int qm_sg_bytes;
+- dma_addr_t qm_sg_dma;
+- struct caam_drv_req drv_req;
+- struct qm_sg_entry sgt[0];
+-};
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ ctx->cdata.key_virt = ctx->key;
-+
-+ /*
+
+-static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
+- enum optype type)
+-{
+ /*
+- * This function is called on the fast path with values of 'type'
+- * known at compile time. Invalid arguments are not expected and
+- * thus no checks are made.
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
-+ */
+ */
+- struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
+- u32 *desc;
+ if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
-+
+
+- if (unlikely(!drv_ctx)) {
+- spin_lock(&ctx->lock);
+ cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
+ ctx->authsize, true);
-+
+
+- /* Read again to check if some other core init drv_ctx */
+- drv_ctx = ctx->drv_ctx[type];
+- if (!drv_ctx) {
+- int cpu;
+ /*
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
-+
+
+- if (type == ENCRYPT)
+- desc = ctx->sh_desc_enc;
+- else if (type == DECRYPT)
+- desc = ctx->sh_desc_dec;
+- else /* (type == GIVENCRYPT) */
+- desc = ctx->sh_desc_givenc;
+ cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
+ ctx->authsize, true);
+
+ return 0;
+}
+
- static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
- const u8 *key, unsigned int keylen)
- {
-@@ -414,6 +902,29 @@ struct aead_edesc {
- };
-
- /*
-+ * tls_edesc - s/w-extended tls descriptor
-+ * @src_nents: number of segments in input scatterlist
-+ * @dst_nents: number of segments in output scatterlist
-+ * @iv_dma: dma address of iv for checking continuity and link table
-+ * @qm_sg_bytes: length of dma mapped h/w link table
-+ * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
-+ * @qm_sg_dma: bus physical mapped address of h/w link table
-+ * @drv_req: driver-specific request structure
-+ * @sgt: the h/w link table, followed by IV
-+ */
-+struct tls_edesc {
-+ int src_nents;
-+ int dst_nents;
-+ dma_addr_t iv_dma;
-+ int qm_sg_bytes;
-+ dma_addr_t qm_sg_dma;
-+ struct scatterlist tmp[2];
-+ struct scatterlist *dst;
++static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
++ const u8 *key, unsigned int keylen)
++{
++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
++ const char *alg_name = crypto_tfm_alg_name(tfm);
++ struct device *jrdev = ctx->jrdev;
++ unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
++ u32 ctx1_iv_off = 0;
++ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
++ OP_ALG_AAI_CTR_MOD128);
++ const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
++ int ret = 0;
++
++ memcpy(ctx->key, key, keylen);
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
++#endif
++ /*
++ * AES-CTR needs to load IV in CONTEXT1 reg
++ * at an offset of 128bits (16bytes)
++ * CONTEXT1[255:128] = IV
++ */
++ if (ctr_mode)
++ ctx1_iv_off = 16;
++
++ /*
++ * RFC3686 specific:
++ * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
++ * | *key = {KEY, NONCE}
++ */
++ if (is_rfc3686) {
++ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
++ keylen -= CTR_RFC3686_NONCE_SIZE;
++ }
++
++ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
++ ctx->cdata.keylen = keylen;
++ ctx->cdata.key_virt = ctx->key;
++ ctx->cdata.key_inline = true;
++
++ /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
++ cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
++ is_rfc3686, ctx1_iv_off);
++ cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
++ is_rfc3686, ctx1_iv_off);
++ cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
++ ivsize, is_rfc3686, ctx1_iv_off);
++
++ /* Now update the driver contexts with the new shared descriptor */
++ if (ctx->drv_ctx[ENCRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
++ ctx->sh_desc_enc);
++ if (ret) {
++ dev_err(jrdev, "driver enc context update failed\n");
++ goto badkey;
++ }
++ }
++
++ if (ctx->drv_ctx[DECRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
++ ctx->sh_desc_dec);
++ if (ret) {
++ dev_err(jrdev, "driver dec context update failed\n");
++ goto badkey;
++ }
++ }
++
++ if (ctx->drv_ctx[GIVENCRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
++ ctx->sh_desc_givenc);
++ if (ret) {
++ dev_err(jrdev, "driver givenc context update failed\n");
++ goto badkey;
++ }
++ }
++
++ return ret;
++badkey:
++ crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
++ return -EINVAL;
++}
++
++static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
++ const u8 *key, unsigned int keylen)
++{
++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++ struct device *jrdev = ctx->jrdev;
++ int ret = 0;
++
++ if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
++ dev_err(jrdev, "key size mismatch\n");
++ goto badkey;
++ }
++
++ memcpy(ctx->key, key, keylen);
++ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
++ ctx->cdata.keylen = keylen;
++ ctx->cdata.key_virt = ctx->key;
++ ctx->cdata.key_inline = true;
++
++ /* xts ablkcipher encrypt, decrypt shared descriptors */
++ cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
++ cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
++
++ /* Now update the driver contexts with the new shared descriptor */
++ if (ctx->drv_ctx[ENCRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
++ ctx->sh_desc_enc);
++ if (ret) {
++ dev_err(jrdev, "driver enc context update failed\n");
++ goto badkey;
++ }
++ }
++
++ if (ctx->drv_ctx[DECRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
++ ctx->sh_desc_dec);
++ if (ret) {
++ dev_err(jrdev, "driver dec context update failed\n");
++ goto badkey;
++ }
++ }
++
++ return ret;
++badkey:
++ crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
++ return -EINVAL;
++}
++
++/*
++ * aead_edesc - s/w-extended aead descriptor
++ * @src_nents: number of segments in input scatterlist
++ * @dst_nents: number of segments in output scatterlist
++ * @iv_dma: dma address of iv for checking continuity and link table
++ * @qm_sg_bytes: length of dma mapped h/w link table
++ * @qm_sg_dma: bus physical mapped address of h/w link table
++ * @assoclen: associated data length, in CAAM endianness
++ * @assoclen_dma: bus physical mapped address of req->assoclen
++ * @drv_req: driver-specific request structure
++ * @sgt: the h/w link table, followed by IV
++ */
++struct aead_edesc {
++ int src_nents;
++ int dst_nents;
++ dma_addr_t iv_dma;
++ int qm_sg_bytes;
++ dma_addr_t qm_sg_dma;
++ unsigned int assoclen;
++ dma_addr_t assoclen_dma;
+ struct caam_drv_req drv_req;
+ struct qm_sg_entry sgt[0];
+};
+
+/*
- * ablkcipher_edesc - s/w-extended ablkcipher descriptor
- * @src_nents: number of segments in input scatterlist
- * @dst_nents: number of segments in output scatterlist
-@@ -508,6 +1019,19 @@ static void aead_unmap(struct device *de
- dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
- }
-
++ * tls_edesc - s/w-extended tls descriptor
++ * @src_nents: number of segments in input scatterlist
++ * @dst_nents: number of segments in output scatterlist
++ * @iv_dma: dma address of iv for checking continuity and link table
++ * @qm_sg_bytes: length of dma mapped h/w link table
++ * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
++ * @qm_sg_dma: bus physical mapped address of h/w link table
++ * @drv_req: driver-specific request structure
++ * @sgt: the h/w link table, followed by IV
++ */
++struct tls_edesc {
++ int src_nents;
++ int dst_nents;
++ dma_addr_t iv_dma;
++ int qm_sg_bytes;
++ dma_addr_t qm_sg_dma;
++ struct scatterlist tmp[2];
++ struct scatterlist *dst;
++ struct caam_drv_req drv_req;
++ struct qm_sg_entry sgt[0];
++};
++
++/*
++ * ablkcipher_edesc - s/w-extended ablkcipher descriptor
++ * @src_nents: number of segments in input scatterlist
++ * @dst_nents: number of segments in output scatterlist
++ * @iv_dma: dma address of iv for checking continuity and link table
++ * @qm_sg_bytes: length of dma mapped h/w link table
++ * @qm_sg_dma: bus physical mapped address of h/w link table
++ * @drv_req: driver-specific request structure
++ * @sgt: the h/w link table, followed by IV
++ */
++struct ablkcipher_edesc {
++ int src_nents;
++ int dst_nents;
++ dma_addr_t iv_dma;
++ int qm_sg_bytes;
++ dma_addr_t qm_sg_dma;
++ struct caam_drv_req drv_req;
++ struct qm_sg_entry sgt[0];
++};
++
++static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
++ enum optype type)
++{
++ /*
++ * This function is called on the fast path with values of 'type'
++ * known at compile time. Invalid arguments are not expected and
++ * thus no checks are made.
++ */
++ struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
++ u32 *desc;
++
++ if (unlikely(!drv_ctx)) {
++ spin_lock(&ctx->lock);
++
++ /* Read again to check if some other core init drv_ctx */
++ drv_ctx = ctx->drv_ctx[type];
++ if (!drv_ctx) {
++ int cpu;
++
++ if (type == ENCRYPT)
++ desc = ctx->sh_desc_enc;
++ else if (type == DECRYPT)
++ desc = ctx->sh_desc_dec;
++ else /* (type == GIVENCRYPT) */
++ desc = ctx->sh_desc_givenc;
++
++ cpu = smp_processor_id();
++ drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
++ if (likely(!IS_ERR_OR_NULL(drv_ctx)))
++ drv_ctx->op_type = type;
++
++ ctx->drv_ctx[type] = drv_ctx;
++ }
++
++ spin_unlock(&ctx->lock);
++ }
++
++ return drv_ctx;
++}
++
++static void caam_unmap(struct device *dev, struct scatterlist *src,
++ struct scatterlist *dst, int src_nents,
++ int dst_nents, dma_addr_t iv_dma, int ivsize,
++ enum optype op_type, dma_addr_t qm_sg_dma,
++ int qm_sg_bytes)
++{
++ if (dst != src) {
++ if (src_nents)
++ dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
++ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
++ } else {
++ dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
++ }
++
++ if (iv_dma)
++ dma_unmap_single(dev, iv_dma, ivsize,
++ op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
++ DMA_TO_DEVICE);
++ if (qm_sg_bytes)
++ dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
++}
++
++static void aead_unmap(struct device *dev,
++ struct aead_edesc *edesc,
++ struct aead_request *req)
++{
++ struct crypto_aead *aead = crypto_aead_reqtfm(req);
++ int ivsize = crypto_aead_ivsize(aead);
++
++ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
++ edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
++ edesc->qm_sg_dma, edesc->qm_sg_bytes);
++ dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
++}
++
+static void tls_unmap(struct device *dev,
+ struct tls_edesc *edesc,
+ struct aead_request *req)
+ edesc->qm_sg_bytes);
+}
+
- static void ablkcipher_unmap(struct device *dev,
- struct ablkcipher_edesc *edesc,
- struct ablkcipher_request *req)
-@@ -532,8 +1056,18 @@ static void aead_done(struct caam_drv_re
- qidev = caam_ctx->qidev;
-
- if (unlikely(status)) {
-+ u32 ssrc = status & JRSTA_SSRC_MASK;
-+ u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
-+
- caam_jr_strstatus(qidev, status);
-- ecode = -EIO;
-+ /*
-+ * verify hw auth check passed else return -EBADMSG
-+ */
-+ if (ssrc == JRSTA_SSRC_CCB_ERROR &&
-+ err_id == JRSTA_CCBERR_ERRID_ICVCHK)
-+ ecode = -EBADMSG;
-+ else
-+ ecode = -EIO;
- }
-
- edesc = container_of(drv_req, typeof(*edesc), drv_req);
-@@ -785,6 +1319,260 @@ static int aead_decrypt(struct aead_requ
- return aead_crypt(req, false);
- }
-
-+static int ipsec_gcm_encrypt(struct aead_request *req)
++static void ablkcipher_unmap(struct device *dev,
++ struct ablkcipher_edesc *edesc,
++ struct ablkcipher_request *req)
+{
-+ if (req->assoclen < 8)
-+ return -EINVAL;
++ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
++ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
-+ return aead_crypt(req, true);
++ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
++ edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
++ edesc->qm_sg_dma, edesc->qm_sg_bytes);
+}
+
-+static int ipsec_gcm_decrypt(struct aead_request *req)
-+{
-+ if (req->assoclen < 8)
-+ return -EINVAL;
-+
-+ return aead_crypt(req, false);
-+}
-+
-+static void tls_done(struct caam_drv_req *drv_req, u32 status)
++static void aead_done(struct caam_drv_req *drv_req, u32 status)
+{
+ struct device *qidev;
-+ struct tls_edesc *edesc;
++ struct aead_edesc *edesc;
+ struct aead_request *aead_req = drv_req->app_ctx;
+ struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
+ struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
+ qidev = caam_ctx->qidev;
+
+ if (unlikely(status)) {
++ u32 ssrc = status & JRSTA_SSRC_MASK;
++ u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
++
+ caam_jr_strstatus(qidev, status);
-+ ecode = -EIO;
++ /*
++ * verify hw auth check passed else return -EBADMSG
++ */
++ if (ssrc == JRSTA_SSRC_CCB_ERROR &&
++ err_id == JRSTA_CCBERR_ERRID_ICVCHK)
++ ecode = -EBADMSG;
++ else
++ ecode = -EIO;
+ }
+
+ edesc = container_of(drv_req, typeof(*edesc), drv_req);
-+ tls_unmap(qidev, edesc, aead_req);
++ aead_unmap(qidev, edesc, aead_req);
+
+ aead_request_complete(aead_req, ecode);
+ qi_cache_free(edesc);
+}
+
+/*
-+ * allocate and map the tls extended descriptor
++ * allocate and map the aead extended descriptor
+ */
-+static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
++static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
++ bool encrypt)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ unsigned int blocksize = crypto_aead_blocksize(aead);
-+ unsigned int padsize, authsize;
+ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+ typeof(*alg), aead);
+ struct device *qidev = ctx->qidev;
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
-+ GFP_KERNEL : GFP_ATOMIC;
++ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
-+ struct tls_edesc *edesc;
-+ dma_addr_t qm_sg_dma, iv_dma = 0;
-+ int ivsize = 0;
-+ u8 *iv;
-+ int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
-+ int in_len, out_len;
-+ struct qm_sg_entry *sg_table, *fd_sgt;
-+ struct caam_drv_ctx *drv_ctx;
-+ enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
-+ struct scatterlist *dst;
-+
-+ if (encrypt) {
-+ padsize = blocksize - ((req->cryptlen + ctx->authsize) %
-+ blocksize);
-+ authsize = ctx->authsize + padsize;
-+ } else {
-+ authsize = ctx->authsize;
-+ }
++ struct aead_edesc *edesc;
++ dma_addr_t qm_sg_dma, iv_dma = 0;
++ int ivsize = 0;
++ unsigned int authsize = ctx->authsize;
++ int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
++ int in_len, out_len;
++ struct qm_sg_entry *sg_table, *fd_sgt;
++ struct caam_drv_ctx *drv_ctx;
++ enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
+
+ drv_ctx = get_drv_ctx(ctx, op_type);
+ if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
-+ return (struct tls_edesc *)drv_ctx;
++ return (struct aead_edesc *)drv_ctx;
+
-+ /* allocate space for base edesc, link tables and IV */
++ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = qi_cache_alloc(GFP_DMA | flags);
+ if (unlikely(!edesc)) {
+ dev_err(qidev, "could not allocate extended descriptor\n");
+ if (likely(req->src == req->dst)) {
+ src_nents = sg_nents_for_len(req->src, req->assoclen +
+ req->cryptlen +
-+ (encrypt ? authsize : 0));
++ (encrypt ? authsize : 0));
+ if (unlikely(src_nents < 0)) {
+ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
+ req->assoclen + req->cryptlen +
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
-+ dst = req->dst;
+ } else {
+ src_nents = sg_nents_for_len(req->src, req->assoclen +
+ req->cryptlen);
+ return ERR_PTR(src_nents);
+ }
+
-+ dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
-+ dst_nents = sg_nents_for_len(dst, req->cryptlen +
-+ (encrypt ? authsize : 0));
++ dst_nents = sg_nents_for_len(req->dst, req->assoclen +
++ req->cryptlen +
++ (encrypt ? authsize :
++ (-authsize)));
+ if (unlikely(dst_nents < 0)) {
+ dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
-+ req->cryptlen +
-+ (encrypt ? authsize : 0));
++ req->assoclen + req->cryptlen +
++ (encrypt ? authsize : (-authsize)));
+ qi_cache_free(edesc);
+ return ERR_PTR(dst_nents);
+ }
+ mapped_src_nents = 0;
+ }
+
-+ mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
++ mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(qidev, "unable to map destination\n");
+ }
+ }
+
++ if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
++ ivsize = crypto_aead_ivsize(aead);
++
+ /*
-+ * Create S/G table: IV, src, dst.
++ * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
+ * Input is not contiguous.
++ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
++ * the end of the table by allocating more S/G entries. Logic:
++ * if (src != dst && output S/G)
++ * pad output S/G, if needed
++ * else if (src == dst && S/G)
++ * overlapping S/Gs; pad one of them
++ * else if (input S/G) ...
++ * pad input S/G, if needed
+ */
-+ qm_sg_ents = 1 + mapped_src_nents +
-+ (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
++ qm_sg_ents = 1 + !!ivsize + mapped_src_nents;
++ if (mapped_dst_nents > 1)
++ qm_sg_ents += ALIGN(mapped_dst_nents, 4);
++ else if ((req->src == req->dst) && (mapped_src_nents > 1))
++ qm_sg_ents = max(ALIGN(qm_sg_ents, 4),
++ 1 + !!ivsize + ALIGN(mapped_src_nents, 4));
++ else
++ qm_sg_ents = ALIGN(qm_sg_ents, 4);
++
+ sg_table = &edesc->sgt[0];
+ qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
-+
-+ ivsize = crypto_aead_ivsize(aead);
-+ iv = (u8 *)(sg_table + qm_sg_ents);
-+ /* Make sure IV is located in a DMAable area */
-+ memcpy(iv, req->iv, ivsize);
-+ iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
-+ if (dma_mapping_error(qidev, iv_dma)) {
-+ dev_err(qidev, "unable to map IV\n");
-+ caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0, 0,
-+ 0, 0);
++ if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
++ CAAM_QI_MEMCACHE_SIZE)) {
++ dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
++ qm_sg_ents, ivsize);
++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
++ 0, 0, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
++ if (ivsize) {
++ u8 *iv = (u8 *)(sg_table + qm_sg_ents);
++
++ /* Make sure IV is located in a DMAable area */
++ memcpy(iv, req->iv, ivsize);
++
++ iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
++ if (dma_mapping_error(qidev, iv_dma)) {
++ dev_err(qidev, "unable to map IV\n");
++ caam_unmap(qidev, req->src, req->dst, src_nents,
++ dst_nents, 0, 0, 0, 0, 0);
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
++ }
++
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
-+ edesc->dst = dst;
+ edesc->iv_dma = iv_dma;
+ edesc->drv_req.app_ctx = req;
-+ edesc->drv_req.cbk = tls_done;
++ edesc->drv_req.cbk = aead_done;
+ edesc->drv_req.drv_ctx = drv_ctx;
+
-+ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
-+ qm_sg_index = 1;
++ edesc->assoclen = cpu_to_caam32(req->assoclen);
++ edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
++ dev_err(qidev, "unable to map assoclen\n");
++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
++ iv_dma, ivsize, op_type, 0, 0);
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
+
++ dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
++ qm_sg_index++;
++ if (ivsize) {
++ dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
++ qm_sg_index++;
++ }
+ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
+ qm_sg_index += mapped_src_nents;
+
+ if (mapped_dst_nents > 1)
-+ sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
++ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
+ qm_sg_index, 0);
+
+ qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(qidev, qm_sg_dma)) {
+ dev_err(qidev, "unable to map S/G table\n");
-+ caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
-+ ivsize, op_type, 0, 0);
++ dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
++ iv_dma, ivsize, op_type, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ edesc->qm_sg_dma = qm_sg_dma;
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
-+ out_len = req->cryptlen + (encrypt ? authsize : 0);
-+ in_len = ivsize + req->assoclen + req->cryptlen;
++ out_len = req->assoclen + req->cryptlen +
++ (encrypt ? ctx->authsize : (-ctx->authsize));
++ in_len = 4 + ivsize + req->assoclen + req->cryptlen;
+
+ fd_sgt = &edesc->drv_req.fd_sgt[0];
-+
+ dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
+
-+ if (req->dst == req->src)
-+ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
-+ (sg_nents_for_len(req->src, req->assoclen) +
-+ 1) * sizeof(*sg_table), out_len, 0);
-+ else if (mapped_dst_nents == 1)
-+ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0);
-+ else
++ if (req->dst == req->src) {
++ if (mapped_src_nents == 1)
++ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
++ out_len, 0);
++ else
++ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
++ (1 + !!ivsize) * sizeof(*sg_table),
++ out_len, 0);
++ } else if (mapped_dst_nents == 1) {
++ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
++ 0);
++ } else {
+ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
+ qm_sg_index, out_len, 0);
++ }
+
+ return edesc;
+}
-+
-+static int tls_crypt(struct aead_request *req, bool encrypt)
+
+- cpu = smp_processor_id();
+- drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
+- if (likely(!IS_ERR_OR_NULL(drv_ctx)))
+- drv_ctx->op_type = type;
++static inline int aead_crypt(struct aead_request *req, bool encrypt)
+{
-+ struct tls_edesc *edesc;
++ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ret;
-+
+
+- ctx->drv_ctx[type] = drv_ctx;
+- }
+ if (unlikely(caam_congested))
+ return -EAGAIN;
-+
-+ edesc = tls_edesc_alloc(req, encrypt);
+
+- spin_unlock(&ctx->lock);
++ /* allocate extended descriptor */
++ edesc = aead_edesc_alloc(req, encrypt);
+ if (IS_ERR_OR_NULL(edesc))
+ return PTR_ERR(edesc);
+
++ /* Create and submit job descriptor */
+ ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
-+ tls_unmap(ctx->qidev, edesc, req);
++ aead_unmap(ctx->qidev, edesc, req);
+ qi_cache_free(edesc);
-+ }
-+
+ }
+
+- return drv_ctx;
+ return ret;
+ }
+
+-static void caam_unmap(struct device *dev, struct scatterlist *src,
+- struct scatterlist *dst, int src_nents,
+- int dst_nents, dma_addr_t iv_dma, int ivsize,
+- enum optype op_type, dma_addr_t qm_sg_dma,
+- int qm_sg_bytes)
++static int aead_encrypt(struct aead_request *req)
+ {
+- if (dst != src) {
+- if (src_nents)
+- dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
+- dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+- } else {
+- dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
+- }
++ return aead_crypt(req, true);
+}
+
+- if (iv_dma)
+- dma_unmap_single(dev, iv_dma, ivsize,
+- op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
+- DMA_TO_DEVICE);
+- if (qm_sg_bytes)
+- dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
++static int aead_decrypt(struct aead_request *req)
++{
++ return aead_crypt(req, false);
+ }
+
+-static void aead_unmap(struct device *dev,
+- struct aead_edesc *edesc,
+- struct aead_request *req)
++static int ipsec_gcm_encrypt(struct aead_request *req)
+ {
+- struct crypto_aead *aead = crypto_aead_reqtfm(req);
+- int ivsize = crypto_aead_ivsize(aead);
++ if (req->assoclen < 8)
++ return -EINVAL;
+
+- caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
+- edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
+- edesc->qm_sg_dma, edesc->qm_sg_bytes);
+- dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
++ return aead_crypt(req, true);
+ }
+
+-static void ablkcipher_unmap(struct device *dev,
+- struct ablkcipher_edesc *edesc,
+- struct ablkcipher_request *req)
++static int ipsec_gcm_decrypt(struct aead_request *req)
+ {
+- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
++ if (req->assoclen < 8)
++ return -EINVAL;
+
+- caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
+- edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
+- edesc->qm_sg_dma, edesc->qm_sg_bytes);
++ return aead_crypt(req, false);
+ }
+
+-static void aead_done(struct caam_drv_req *drv_req, u32 status)
++static void tls_done(struct caam_drv_req *drv_req, u32 status)
+ {
+ struct device *qidev;
+- struct aead_edesc *edesc;
++ struct tls_edesc *edesc;
+ struct aead_request *aead_req = drv_req->app_ctx;
+ struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
+ struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
+@@ -537,41 +1367,51 @@ static void aead_done(struct caam_drv_re
+ }
+
+ edesc = container_of(drv_req, typeof(*edesc), drv_req);
+- aead_unmap(qidev, edesc, aead_req);
++ tls_unmap(qidev, edesc, aead_req);
+
+ aead_request_complete(aead_req, ecode);
+ qi_cache_free(edesc);
+ }
+
+ /*
+- * allocate and map the aead extended descriptor
++ * allocate and map the tls extended descriptor
+ */
+-static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
+- bool encrypt)
++static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
+ {
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ unsigned int blocksize = crypto_aead_blocksize(aead);
++ unsigned int padsize, authsize;
+ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+ typeof(*alg), aead);
+ struct device *qidev = ctx->qidev;
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+- GFP_KERNEL : GFP_ATOMIC;
++ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+- struct aead_edesc *edesc;
++ struct tls_edesc *edesc;
+ dma_addr_t qm_sg_dma, iv_dma = 0;
+ int ivsize = 0;
+- unsigned int authsize = ctx->authsize;
+- int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
++ u8 *iv;
++ int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
+ int in_len, out_len;
+ struct qm_sg_entry *sg_table, *fd_sgt;
+ struct caam_drv_ctx *drv_ctx;
+ enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
++ struct scatterlist *dst;
++
++ if (encrypt) {
++ padsize = blocksize - ((req->cryptlen + ctx->authsize) %
++ blocksize);
++ authsize = ctx->authsize + padsize;
++ } else {
++ authsize = ctx->authsize;
++ }
+
+ drv_ctx = get_drv_ctx(ctx, op_type);
+ if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
+- return (struct aead_edesc *)drv_ctx;
++ return (struct tls_edesc *)drv_ctx;
+
+- /* allocate space for base edesc and hw desc commands, link tables */
++ /* allocate space for base edesc, link tables and IV */
+ edesc = qi_cache_alloc(GFP_DMA | flags);
+ if (unlikely(!edesc)) {
+ dev_err(qidev, "could not allocate extended descriptor\n");
+@@ -581,7 +1421,7 @@ static struct aead_edesc *aead_edesc_all
+ if (likely(req->src == req->dst)) {
+ src_nents = sg_nents_for_len(req->src, req->assoclen +
+ req->cryptlen +
+- (encrypt ? authsize : 0));
++ (encrypt ? authsize : 0));
+ if (unlikely(src_nents < 0)) {
+ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
+ req->assoclen + req->cryptlen +
+@@ -597,6 +1437,7 @@ static struct aead_edesc *aead_edesc_all
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
++ dst = req->dst;
+ } else {
+ src_nents = sg_nents_for_len(req->src, req->assoclen +
+ req->cryptlen);
+@@ -607,14 +1448,13 @@ static struct aead_edesc *aead_edesc_all
+ return ERR_PTR(src_nents);
+ }
+
+- dst_nents = sg_nents_for_len(req->dst, req->assoclen +
+- req->cryptlen +
+- (encrypt ? authsize :
+- (-authsize)));
++ dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
++ dst_nents = sg_nents_for_len(dst, req->cryptlen +
++ (encrypt ? authsize : 0));
+ if (unlikely(dst_nents < 0)) {
+ dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
+- req->assoclen + req->cryptlen +
+- (encrypt ? authsize : (-authsize)));
++ req->cryptlen +
++ (encrypt ? authsize : 0));
+ qi_cache_free(edesc);
+ return ERR_PTR(dst_nents);
+ }
+@@ -631,7 +1471,7 @@ static struct aead_edesc *aead_edesc_all
+ mapped_src_nents = 0;
+ }
+
+- mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
++ mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(qidev, "unable to map destination\n");
+@@ -641,80 +1481,51 @@ static struct aead_edesc *aead_edesc_all
+ }
+ }
+
+- if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
+- ivsize = crypto_aead_ivsize(aead);
+-
+ /*
+- * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
++ * Create S/G table: IV, src, dst.
+ * Input is not contiguous.
+ */
+- qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
++ qm_sg_ents = 1 + mapped_src_nents +
+ (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
+ sg_table = &edesc->sgt[0];
+ qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+- if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
+- CAAM_QI_MEMCACHE_SIZE)) {
+- dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
+- qm_sg_ents, ivsize);
+- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+- 0, 0, 0, 0);
++
++ ivsize = crypto_aead_ivsize(aead);
++ iv = (u8 *)(sg_table + qm_sg_ents);
++ /* Make sure IV is located in a DMAable area */
++ memcpy(iv, req->iv, ivsize);
++ iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
++ if (dma_mapping_error(qidev, iv_dma)) {
++ dev_err(qidev, "unable to map IV\n");
++ caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0, 0,
++ 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+- if (ivsize) {
+- u8 *iv = (u8 *)(sg_table + qm_sg_ents);
+-
+- /* Make sure IV is located in a DMAable area */
+- memcpy(iv, req->iv, ivsize);
+-
+- iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
+- if (dma_mapping_error(qidev, iv_dma)) {
+- dev_err(qidev, "unable to map IV\n");
+- caam_unmap(qidev, req->src, req->dst, src_nents,
+- dst_nents, 0, 0, 0, 0, 0);
+- qi_cache_free(edesc);
+- return ERR_PTR(-ENOMEM);
+- }
+- }
+-
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
++ edesc->dst = dst;
+ edesc->iv_dma = iv_dma;
+ edesc->drv_req.app_ctx = req;
+- edesc->drv_req.cbk = aead_done;
++ edesc->drv_req.cbk = tls_done;
+ edesc->drv_req.drv_ctx = drv_ctx;
+
+- edesc->assoclen = cpu_to_caam32(req->assoclen);
+- edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
+- dev_err(qidev, "unable to map assoclen\n");
+- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+- iv_dma, ivsize, op_type, 0, 0);
+- qi_cache_free(edesc);
+- return ERR_PTR(-ENOMEM);
+- }
++ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
++ qm_sg_index = 1;
+
+- dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
+- qm_sg_index++;
+- if (ivsize) {
+- dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
+- qm_sg_index++;
+- }
+ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
+ qm_sg_index += mapped_src_nents;
+
+ if (mapped_dst_nents > 1)
+- sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
++ sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
+ qm_sg_index, 0);
+
+ qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(qidev, qm_sg_dma)) {
+ dev_err(qidev, "unable to map S/G table\n");
+- dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
+- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+- iv_dma, ivsize, op_type, 0, 0);
++ caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
++ ivsize, op_type, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+@@ -722,35 +1533,29 @@ static struct aead_edesc *aead_edesc_all
+ edesc->qm_sg_dma = qm_sg_dma;
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+- out_len = req->assoclen + req->cryptlen +
+- (encrypt ? ctx->authsize : (-ctx->authsize));
+- in_len = 4 + ivsize + req->assoclen + req->cryptlen;
++ out_len = req->cryptlen + (encrypt ? authsize : 0);
++ in_len = ivsize + req->assoclen + req->cryptlen;
+
+ fd_sgt = &edesc->drv_req.fd_sgt[0];
+
+ dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
+
+- if (req->dst == req->src) {
+- if (mapped_src_nents == 1)
+- dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
+- out_len, 0);
+- else
+- dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
+- (1 + !!ivsize) * sizeof(*sg_table),
+- out_len, 0);
+- } else if (mapped_dst_nents == 1) {
+- dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
+- 0);
+- } else {
++ if (req->dst == req->src)
++ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
++ (sg_nents_for_len(req->src, req->assoclen) +
++ 1) * sizeof(*sg_table), out_len, 0);
++ else if (mapped_dst_nents == 1)
++ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0);
++ else
+ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
+ qm_sg_index, out_len, 0);
+- }
+
+ return edesc;
+ }
+
+-static inline int aead_crypt(struct aead_request *req, bool encrypt)
++static int tls_crypt(struct aead_request *req, bool encrypt)
+ {
+- struct aead_edesc *edesc;
++ struct tls_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ret;
+@@ -758,31 +1563,29 @@ static inline int aead_crypt(struct aead
+ if (unlikely(caam_congested))
+ return -EAGAIN;
+
+- /* allocate extended descriptor */
+- edesc = aead_edesc_alloc(req, encrypt);
++ edesc = tls_edesc_alloc(req, encrypt);
+ if (IS_ERR_OR_NULL(edesc))
+ return PTR_ERR(edesc);
+
+- /* Create and submit job descriptor */
+ ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+- aead_unmap(ctx->qidev, edesc, req);
++ tls_unmap(ctx->qidev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+ }
+
+-static int aead_encrypt(struct aead_request *req)
+static int tls_encrypt(struct aead_request *req)
-+{
+ {
+- return aead_crypt(req, true);
+ return tls_crypt(req, true);
-+}
-+
+ }
+
+-static int aead_decrypt(struct aead_request *req)
+static int tls_decrypt(struct aead_request *req)
-+{
+ {
+- return aead_crypt(req, false);
+ return tls_crypt(req, false);
-+}
-+
+ }
+
static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
- {
- struct ablkcipher_edesc *edesc;
-@@ -1308,6 +2096,61 @@ static struct caam_alg_template driver_a
+@@ -900,7 +1703,24 @@ static struct ablkcipher_edesc *ablkciph
+ qm_sg_ents = 1 + mapped_src_nents;
+ dst_sg_idx = qm_sg_ents;
+
+- qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
++ /*
++ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
++ * the end of the table by allocating more S/G entries. Logic:
++ * if (src != dst && output S/G)
++ * pad output S/G, if needed
++ * else if (src == dst && S/G)
++ * overlapping S/Gs; pad one of them
++ * else if (input S/G) ...
++ * pad input S/G, if needed
++ */
++ if (mapped_dst_nents > 1)
++ qm_sg_ents += ALIGN(mapped_dst_nents, 4);
++ else if ((req->src == req->dst) && (mapped_src_nents > 1))
++ qm_sg_ents = max(ALIGN(qm_sg_ents, 4),
++ 1 + ALIGN(mapped_src_nents, 4));
++ else
++ qm_sg_ents = ALIGN(qm_sg_ents, 4);
++
+ qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
+ if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
+ ivsize > CAAM_QI_MEMCACHE_SIZE)) {
+@@ -1308,6 +2128,61 @@ static struct caam_alg_template driver_a
};
static struct caam_aead_alg driver_aeads[] = {
/* single-pass ipsec_esp descriptor */
{
.aead = {
-@@ -2118,6 +2961,26 @@ static struct caam_aead_alg driver_aeads
+@@ -2118,6 +2993,26 @@ static struct caam_aead_alg driver_aeads
.geniv = true,
}
},
};
struct caam_crypto_alg {
-@@ -2126,9 +2989,20 @@ struct caam_crypto_alg {
+@@ -2126,9 +3021,21 @@ struct caam_crypto_alg {
struct caam_alg_entry caam;
};
+ bool uses_dkp)
{
struct caam_drv_private *priv;
++ struct device *dev;
+ /* Digest sizes for MD5, SHA1, SHA-224, SHA-256, SHA-384, SHA-512 */
+ static const u8 digest_size[] = {
+ MD5_DIGEST_SIZE,
/*
* distribute tfms across job rings to ensure in-order
-@@ -2140,8 +3014,14 @@ static int caam_init_common(struct caam_
+@@ -2140,10 +3047,19 @@ static int caam_init_common(struct caam_
return PTR_ERR(ctx->jrdev);
}
+- ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
+- dev_err(ctx->jrdev, "unable to map key\n");
+ priv = dev_get_drvdata(ctx->jrdev->parent);
-+ if (priv->era >= 6 && uses_dkp)
++ if (priv->era >= 6 && uses_dkp) {
+ ctx->dir = DMA_BIDIRECTIONAL;
-+ else
++ dev = ctx->jrdev->parent;
++ } else {
+ ctx->dir = DMA_TO_DEVICE;
++ dev = ctx->jrdev;
++ }
+
- ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
-- DMA_TO_DEVICE);
++ ctx->key_dma = dma_map_single(dev, ctx->key, sizeof(ctx->key),
+ ctx->dir);
- if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
- dev_err(ctx->jrdev, "unable to map key\n");
++ if (dma_mapping_error(dev, ctx->key_dma)) {
++ dev_err(dev, "unable to map key\n");
caam_jr_free(ctx->jrdev);
-@@ -2152,7 +3032,22 @@ static int caam_init_common(struct caam_
+ return -ENOMEM;
+ }
+@@ -2152,8 +3068,23 @@ static int caam_init_common(struct caam_
ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
- priv = dev_get_drvdata(ctx->jrdev->parent);
+- ctx->qidev = priv->qidev;
+ if (ctx->adata.algtype) {
+ op_id = (ctx->adata.algtype & OP_ALG_ALGSEL_SUBMASK)
+ >> OP_ALG_ALGSEL_SHIFT;
+ ctx->authsize = 0;
+ }
+
- ctx->qidev = priv->qidev;
++ ctx->qidev = ctx->jrdev->parent;
spin_lock_init(&ctx->lock);
-@@ -2170,7 +3065,7 @@ static int caam_cra_init(struct crypto_t
+ ctx->drv_ctx[ENCRYPT] = NULL;
+@@ -2170,7 +3101,7 @@ static int caam_cra_init(struct crypto_t
crypto_alg);
struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
}
static int caam_aead_init(struct crypto_aead *tfm)
-@@ -2180,7 +3075,9 @@ static int caam_aead_init(struct crypto_
+@@ -2180,17 +3111,25 @@ static int caam_aead_init(struct crypto_
aead);
struct caam_ctx *ctx = crypto_aead_ctx(tfm);
}
static void caam_exit_common(struct caam_ctx *ctx)
-@@ -2189,8 +3086,7 @@ static void caam_exit_common(struct caam
+ {
++ struct device *dev;
++
+ caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
- dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
- DMA_TO_DEVICE);
-+ dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
++ if (ctx->dir == DMA_BIDIRECTIONAL)
++ dev = ctx->jrdev->parent;
++ else
++ dev = ctx->jrdev;
++
++ dma_unmap_single(dev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
caam_jr_free(ctx->jrdev);
}
-@@ -2315,6 +3211,11 @@ static int __init caam_qi_algapi_init(vo
- if (!priv || !priv->qi_present)
- return -ENODEV;
+@@ -2206,7 +3145,7 @@ static void caam_aead_exit(struct crypto
+ }
-+ if (caam_dpaa2) {
-+ dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
-+ return -ENODEV;
-+ }
-+
+ static struct list_head alg_list;
+-static void __exit caam_qi_algapi_exit(void)
++void caam_qi_algapi_exit(void)
+ {
+ struct caam_crypto_alg *t_alg, *n;
+ int i;
+@@ -2282,53 +3221,48 @@ static void caam_aead_alg_init(struct ca
+ alg->exit = caam_aead_exit;
+ }
+
+-static int __init caam_qi_algapi_init(void)
++int caam_qi_algapi_init(struct device *ctrldev)
+ {
+- struct device_node *dev_node;
+- struct platform_device *pdev;
+- struct device *ctrldev;
+- struct caam_drv_private *priv;
++ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
+ int i = 0, err = 0;
+- u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
++ u32 aes_vid, aes_inst, des_inst, md_vid, md_inst;
+ unsigned int md_limit = SHA512_DIGEST_SIZE;
+ bool registered = false;
+
+- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+- if (!dev_node) {
+- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+- if (!dev_node)
+- return -ENODEV;
+- }
+-
+- pdev = of_find_device_by_node(dev_node);
+- of_node_put(dev_node);
+- if (!pdev)
+- return -ENODEV;
+-
+- ctrldev = &pdev->dev;
+- priv = dev_get_drvdata(ctrldev);
+-
+- /*
+- * If priv is NULL, it's probably because the caam driver wasn't
+- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+- */
+- if (!priv || !priv->qi_present)
+- return -ENODEV;
+-
INIT_LIST_HEAD(&alg_list);
/*
+ * Register crypto algorithms the device supports.
+ * First, detect presence and attributes of DES, AES, and MD blocks.
+ */
+- cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+- cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+- des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
+- aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
+- md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
++ if (priv->era < 10) {
++ u32 cha_vid, cha_inst;
++
++ cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
++ aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
++ md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
++
++ cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
++ des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
++ CHA_ID_LS_DES_SHIFT;
++ aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
++ md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
++ } else {
++ u32 aesa, mdha;
++
++ aesa = rd_reg32(&priv->ctrl->vreg.aesa);
++ mdha = rd_reg32(&priv->ctrl->vreg.mdha);
++
++ aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
++ md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
++
++ des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
++ aes_inst = aesa & CHA_VER_NUM_MASK;
++ md_inst = mdha & CHA_VER_NUM_MASK;
++ }
+
+ /* If MD is present, limit digest size based on LP256 */
+- if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
++ if (md_inst && md_vid == CHA_VER_VID_MD_LP256)
+ md_limit = SHA256_DIGEST_SIZE;
+
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+@@ -2349,14 +3283,14 @@ static int __init caam_qi_algapi_init(vo
+ t_alg = caam_alg_alloc(alg);
+ if (IS_ERR(t_alg)) {
+ err = PTR_ERR(t_alg);
+- dev_warn(priv->qidev, "%s alg allocation failed\n",
++ dev_warn(ctrldev, "%s alg allocation failed\n",
+ alg->driver_name);
+ continue;
+ }
+
+ err = crypto_register_alg(&t_alg->crypto_alg);
+ if (err) {
+- dev_warn(priv->qidev, "%s alg registration failed\n",
++ dev_warn(ctrldev, "%s alg registration failed\n",
+ t_alg->crypto_alg.cra_driver_name);
+ kfree(t_alg);
+ continue;
+@@ -2388,8 +3322,7 @@ static int __init caam_qi_algapi_init(vo
+ * Check support for AES algorithms not available
+ * on LP devices.
+ */
+- if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
+- (alg_aai == OP_ALG_AAI_GCM))
++ if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM)
+ continue;
+
+ /*
+@@ -2414,14 +3347,7 @@ static int __init caam_qi_algapi_init(vo
+ }
+
+ if (registered)
+- dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
++ dev_info(ctrldev, "algorithms registered in /proc/crypto\n");
+
+ return err;
+ }
+-
+-module_init(caam_qi_algapi_init);
+-module_exit(caam_qi_algapi_exit);
+-
+-MODULE_LICENSE("GPL");
+-MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
+-MODULE_AUTHOR("Freescale Semiconductor");
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_qi2.c
-@@ -0,0 +1,5691 @@
+@@ -0,0 +1,5843 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2015-2016 Freescale Semiconductor Inc.
+#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
+ SHA512_DIGEST_SIZE * 2)
+
-+#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM
-+bool caam_little_end;
-+EXPORT_SYMBOL(caam_little_end);
-+bool caam_imx;
-+EXPORT_SYMBOL(caam_imx);
-+#endif
-+
+/*
+ * This is a a cache of buffers, from which the users of CAAM QI driver
+ * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
+ edesc->dst_nents = dst_nents;
+ edesc->iv_dma = iv_dma;
+
-+ edesc->assoclen = cpu_to_caam32(req->assoclen);
++ if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
++ OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
++ /*
++ * The associated data comes already with the IV but we need
++ * to skip it when we authenticate or encrypt...
++ */
++ edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
++ else
++ edesc->assoclen = cpu_to_caam32(req->assoclen);
+ edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, edesc->assoclen_dma)) {
+ return edesc;
+}
+
++static int chachapoly_set_sh_desc(struct crypto_aead *aead)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ unsigned int ivsize = crypto_aead_ivsize(aead);
++ struct device *dev = ctx->dev;
++ struct caam_flc *flc;
++ u32 *desc;
++
++ if (!ctx->cdata.keylen || !ctx->authsize)
++ return 0;
++
++ flc = &ctx->flc[ENCRYPT];
++ desc = flc->sh_desc;
++ cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
++ ctx->authsize, true, true);
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
++ sizeof(flc->flc) + desc_bytes(desc),
++ ctx->dir);
++
++ flc = &ctx->flc[DECRYPT];
++ desc = flc->sh_desc;
++ cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
++ ctx->authsize, false, true);
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
++ sizeof(flc->flc) + desc_bytes(desc),
++ ctx->dir);
++
++ return 0;
++}
++
++static int chachapoly_setauthsize(struct crypto_aead *aead,
++ unsigned int authsize)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++
++ if (authsize != POLY1305_DIGEST_SIZE)
++ return -EINVAL;
++
++ ctx->authsize = authsize;
++ return chachapoly_set_sh_desc(aead);
++}
++
++static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
++ unsigned int keylen)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ unsigned int ivsize = crypto_aead_ivsize(aead);
++ unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
++
++ if (keylen != CHACHA20_KEY_SIZE + saltlen) {
++ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
++ return -EINVAL;
++ }
++
++ ctx->cdata.key_virt = key;
++ ctx->cdata.keylen = keylen - saltlen;
++
++ return chachapoly_set_sh_desc(aead);
++}
++
+static struct tls_edesc *tls_edesc_alloc(struct aead_request *req,
+ bool encrypt)
+{
+ u32 *desc;
+ u32 ctx1_iv_off = 0;
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
-+ OP_ALG_AAI_CTR_MOD128);
++ OP_ALG_AAI_CTR_MOD128) &&
++ ((ctx->cdata.algtype & OP_ALG_ALGSEL_MASK) !=
++ OP_ALG_ALGSEL_CHACHA20);
+ const bool is_rfc3686 = alg->caam.rfc3686;
+
+#ifdef DEBUG
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
-+ }
++ },
++ {
++ .skcipher = {
++ .base = {
++ .cra_name = "chacha20",
++ .cra_driver_name = "chacha20-caam-qi2",
++ .cra_blocksize = 1,
++ },
++ .setkey = skcipher_setkey,
++ .encrypt = skcipher_encrypt,
++ .decrypt = skcipher_decrypt,
++ .min_keysize = CHACHA20_KEY_SIZE,
++ .max_keysize = CHACHA20_KEY_SIZE,
++ .ivsize = CHACHA20_IV_SIZE,
++ },
++ .caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
++ },
+};
+
+static struct caam_aead_alg driver_aeads[] = {
+ {
+ .aead = {
+ .base = {
++ .cra_name = "rfc7539(chacha20,poly1305)",
++ .cra_driver_name = "rfc7539-chacha20-poly1305-"
++ "caam-qi2",
++ .cra_blocksize = 1,
++ },
++ .setkey = chachapoly_setkey,
++ .setauthsize = chachapoly_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = CHACHAPOLY_IV_SIZE,
++ .maxauthsize = POLY1305_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
++ OP_ALG_AAI_AEAD,
++ .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
++ OP_ALG_AAI_AEAD,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "rfc7539esp(chacha20,poly1305)",
++ .cra_driver_name = "rfc7539esp-chacha20-"
++ "poly1305-caam-qi2",
++ .cra_blocksize = 1,
++ },
++ .setkey = chachapoly_setkey,
++ .setauthsize = chachapoly_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = 8,
++ .maxauthsize = POLY1305_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
++ OP_ALG_AAI_AEAD,
++ .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
++ OP_ALG_AAI_AEAD,
++ },
++ },
++ {
++ .aead = {
++ .base = {
+ .cra_name = "authenc(hmac(sha512),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ nctx->cb = dpaa2_caam_fqdan_cb;
+
+ /* Register notification callbacks */
-+ err = dpaa2_io_service_register(NULL, nctx);
++ ppriv->dpio = dpaa2_io_service_select(cpu);
++ err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
+ if (unlikely(err)) {
+ dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
+ nctx->cb = NULL;
+ dev);
+ if (unlikely(!ppriv->store)) {
+ dev_err(dev, "dpaa2_io_store_create() failed\n");
++ err = -ENOMEM;
+ goto err;
+ }
+
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+ if (!ppriv->nctx.cb)
+ break;
-+ dpaa2_io_service_deregister(NULL, &ppriv->nctx);
++ dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
+ }
+
+ for_each_online_cpu(cpu) {
+static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
+{
+ struct dpaa2_caam_priv_per_cpu *ppriv;
++ struct device *dev = priv->dev;
+ int i = 0, cpu;
+
+ for_each_online_cpu(cpu) {
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
-+ dpaa2_io_service_deregister(NULL, &ppriv->nctx);
++ dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
+ dpaa2_io_store_destroy(ppriv->store);
+
+ if (++i == priv->num_pairs)
+
+ /* Retry while portal is busy */
+ do {
-+ err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
++ err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
+ ppriv->store);
+ } while (err == -EBUSY);
+
+
+ if (cleaned < budget) {
+ napi_complete_done(napi, cleaned);
-+ err = dpaa2_io_service_rearm(NULL, &ppriv->nctx);
++ err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
+ if (unlikely(err))
+ dev_err(priv->dev, "Notification rearm failed: %d\n",
+ err);
+
+ i = 0;
+ for_each_online_cpu(cpu) {
-+ dev_info(dev, "pair %d: rx queue %d, tx queue %d\n", i,
-+ priv->rx_queue_attr[i].fqid,
-+ priv->tx_queue_attr[i].fqid);
++ u8 j;
++
++ j = i % priv->num_pairs;
+
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
-+ ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
-+ ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
-+ ppriv->prio = i;
++ ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
++
++ /*
++ * Allow all cores to enqueue, while only some of them
++ * will take part in dequeuing.
++ */
++ if (++i > priv->num_pairs)
++ continue;
++
++ ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
++ ppriv->prio = j;
++
++ dev_info(dev, "pair %d: rx queue %d, tx queue %d\n", j,
++ priv->rx_queue_attr[j].fqid,
++ priv->tx_queue_attr[j].fqid);
+
+ ppriv->net_dev.dev = *dev;
+ INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
+ netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
+ DPAA2_CAAM_NAPI_WEIGHT);
-+ if (++i == priv->num_pairs)
-+ break;
+ }
+
+ return 0;
+ priv->ppriv = alloc_percpu(*priv->ppriv);
+ if (!priv->ppriv) {
+ dev_err(dev, "alloc_percpu() failed\n");
++ err = -ENOMEM;
+ goto err_alloc_ppriv;
+ }
+
+ /* DPSECI initialization */
+ err = dpaa2_dpseci_setup(dpseci_dev);
-+ if (err < 0) {
++ if (err) {
+ dev_err(dev, "dpaa2_dpseci_setup() failed\n");
+ goto err_dpseci_setup;
+ }
+ /* DPIO */
+ err = dpaa2_dpseci_dpio_setup(priv);
+ if (err) {
-+ dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
++ if (err != -EPROBE_DEFER)
++ dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
+ goto err_dpio_setup;
+ }
+
+ (alg_sel == OP_ALG_ALGSEL_AES))
+ continue;
+
++ /* Skip CHACHA20 algorithms if not supported by device */
++ if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
++ !priv->sec_attr.ccha_acc_num)
++ continue;
++
+ t_alg->caam.dev = dev;
+ caam_skcipher_alg_init(t_alg);
+
+ (c1_alg_sel == OP_ALG_ALGSEL_AES))
+ continue;
+
++ /* Skip CHACHA20 algorithms if not supported by device */
++ if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
++ !priv->sec_attr.ccha_acc_num)
++ continue;
++
++ /* Skip POLY1305 algorithms if not supported by device */
++ if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 &&
++ !priv->sec_attr.ptha_acc_num)
++ continue;
++
+ /*
+ * Skip algorithms requiring message digests
+ * if MD not supported by device.
+ */
-+ if (!priv->sec_attr.md_acc_num && c2_alg_sel)
++ if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
++ !priv->sec_attr.md_acc_num)
+ continue;
+
+ t_alg->caam.dev = dev;
+{
+ struct dpaa2_fd fd;
+ struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
-+ int err = 0, i, id;
++ struct dpaa2_caam_priv_per_cpu *ppriv;
++ int err = 0, i;
+
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
+ dpaa2_fd_set_flc(&fd, req->flc_dma);
+
-+ /*
-+ * There is no guarantee that preemption is disabled here,
-+ * thus take action.
-+ */
-+ preempt_disable();
-+ id = smp_processor_id() % priv->dpseci_attr.num_tx_queues;
++ ppriv = this_cpu_ptr(priv->ppriv);
+ for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
-+ err = dpaa2_io_service_enqueue_fq(NULL,
-+ priv->tx_queue_attr[id].fqid,
++ err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
+ &fd);
+ if (err != -EBUSY)
+ break;
++
++ cpu_relax();
+ }
-+ preempt_enable();
+
-+ if (unlikely(err < 0)) {
-+ dev_err(dev, "Error enqueuing frame: %d\n", err);
++ if (unlikely(err)) {
++ dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err);
+ goto err_out;
+ }
+
+module_fsl_mc_driver(dpaa2_caam_driver);
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_qi2.h
-@@ -0,0 +1,274 @@
+@@ -0,0 +1,276 @@
+/*
+ * Copyright 2015-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ * @nctx: notification context of response FQ
+ * @store: where dequeued frames are stored
+ * @priv: backpointer to dpaa2_caam_priv
++ * @dpio: portal used for data path operations
+ */
+struct dpaa2_caam_priv_per_cpu {
+ struct napi_struct napi;
+ struct dpaa2_io_notification_ctx nctx;
+ struct dpaa2_io_store *store;
+ struct dpaa2_caam_priv *priv;
++ struct dpaa2_io *dpio;
+};
+
+/*
+#endif /* _CAAMALG_QI2_H_ */
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
-@@ -62,6 +62,7 @@
+@@ -2,6 +2,7 @@
+ * caam - Freescale FSL CAAM support for ahash functions of crypto API
+ *
+ * Copyright 2011 Freescale Semiconductor, Inc.
++ * Copyright 2018 NXP
+ *
+ * Based on caamalg.c crypto API driver.
+ *
+@@ -62,6 +63,7 @@
#include "error.h"
#include "sg_sw_sec4.h"
#include "key_gen.h"
#define CAAM_CRA_PRIORITY 3000
-@@ -71,14 +72,6 @@
+@@ -71,14 +73,6 @@
#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
CAAM_MAX_HASH_KEY_SIZE)
#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
-@@ -107,6 +100,7 @@ struct caam_hash_ctx {
+@@ -107,6 +101,7 @@ struct caam_hash_ctx {
dma_addr_t sh_desc_update_first_dma;
dma_addr_t sh_desc_fin_dma;
dma_addr_t sh_desc_digest_dma;
struct device *jrdev;
u8 key[CAAM_MAX_HASH_KEY_SIZE];
int ctx_len;
-@@ -218,7 +212,7 @@ static inline int buf_map_to_sec4_sg(str
+@@ -218,7 +213,7 @@ static inline int buf_map_to_sec4_sg(str
}
/* Map state->caam_ctx, and add it to link table */
struct caam_hash_state *state, int ctx_len,
struct sec4_sg_entry *sec4_sg, u32 flag)
{
-@@ -234,68 +228,22 @@ static inline int ctx_map_to_sec4_sg(u32
+@@ -234,68 +229,22 @@ static inline int ctx_map_to_sec4_sg(u32
return 0;
}
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash update shdesc@"__stringify(__LINE__)": ",
-@@ -304,9 +252,10 @@ static int ahash_set_sh_desc(struct cryp
+@@ -304,9 +253,10 @@ static int ahash_set_sh_desc(struct cryp
/* ahash_update_first shared descriptor */
desc = ctx->sh_desc_update_first;
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash update first shdesc@"__stringify(__LINE__)": ",
-@@ -315,9 +264,10 @@ static int ahash_set_sh_desc(struct cryp
+@@ -315,9 +265,10 @@ static int ahash_set_sh_desc(struct cryp
/* ahash_final shared descriptor */
desc = ctx->sh_desc_fin;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
-@@ -326,9 +276,10 @@ static int ahash_set_sh_desc(struct cryp
+@@ -326,9 +277,10 @@ static int ahash_set_sh_desc(struct cryp
/* ahash_digest shared descriptor */
desc = ctx->sh_desc_digest;
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash digest shdesc@"__stringify(__LINE__)": ",
-@@ -421,6 +372,7 @@ static int ahash_setkey(struct crypto_ah
+@@ -421,6 +373,7 @@ static int ahash_setkey(struct crypto_ah
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
int digestsize = crypto_ahash_digestsize(ahash);
int ret;
u8 *hashed_key = NULL;
-@@ -441,16 +393,26 @@ static int ahash_setkey(struct crypto_ah
+@@ -441,16 +394,26 @@ static int ahash_setkey(struct crypto_ah
key = hashed_key;
}
kfree(hashed_key);
return ahash_set_sh_desc(ahash);
-@@ -773,7 +735,7 @@ static int ahash_update_ctx(struct ahash
+@@ -773,7 +736,7 @@ static int ahash_update_ctx(struct ahash
edesc->src_nents = src_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg, DMA_BIDIRECTIONAL);
if (ret)
goto unmap_ctx;
-@@ -871,9 +833,8 @@ static int ahash_final_ctx(struct ahash_
+@@ -871,9 +834,8 @@ static int ahash_final_ctx(struct ahash_
desc = edesc->hw_desc;
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg, DMA_TO_DEVICE);
if (ret)
goto unmap_ctx;
-@@ -967,7 +928,7 @@ static int ahash_finup_ctx(struct ahash_
+@@ -967,7 +929,7 @@ static int ahash_finup_ctx(struct ahash_
edesc->src_nents = src_nents;
edesc->sec4_sg, DMA_TO_DEVICE);
if (ret)
goto unmap_ctx;
-@@ -1126,7 +1087,6 @@ static int ahash_final_no_ctx(struct aha
+@@ -1126,7 +1088,6 @@ static int ahash_final_no_ctx(struct aha
dev_err(jrdev, "unable to map dst\n");
goto unmap;
}
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
-@@ -1208,7 +1168,6 @@ static int ahash_update_no_ctx(struct ah
+@@ -1208,7 +1169,6 @@ static int ahash_update_no_ctx(struct ah
edesc->src_nents = src_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
if (ret)
-@@ -1420,7 +1379,6 @@ static int ahash_update_first(struct aha
+@@ -1420,7 +1380,6 @@ static int ahash_update_first(struct aha
}
edesc->src_nents = src_nents;
ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
to_hash);
-@@ -1722,6 +1680,7 @@ static int caam_hash_cra_init(struct cry
+@@ -1722,6 +1681,7 @@ static int caam_hash_cra_init(struct cry
HASH_MSG_LEN + 64,
HASH_MSG_LEN + SHA512_DIGEST_SIZE };
dma_addr_t dma_addr;
/*
* Get a Job ring from Job Ring driver to ensure in-order
-@@ -1733,10 +1692,13 @@ static int caam_hash_cra_init(struct cry
+@@ -1733,10 +1693,13 @@ static int caam_hash_cra_init(struct cry
return PTR_ERR(ctx->jrdev);
}
if (dma_mapping_error(ctx->jrdev, dma_addr)) {
dev_err(ctx->jrdev, "unable to map shared descriptors\n");
caam_jr_free(ctx->jrdev);
-@@ -1771,7 +1733,7 @@ static void caam_hash_cra_exit(struct cr
+@@ -1771,11 +1734,11 @@ static void caam_hash_cra_exit(struct cr
dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
offsetof(struct caam_hash_ctx,
sh_desc_update_dma),
caam_jr_free(ctx->jrdev);
}
+-static void __exit caam_algapi_hash_exit(void)
++void caam_algapi_hash_exit(void)
+ {
+ struct caam_hash_alg *t_alg, *n;
+
+@@ -1834,56 +1797,38 @@ caam_hash_alloc(struct caam_hash_templat
+ return t_alg;
+ }
+
+-static int __init caam_algapi_hash_init(void)
++int caam_algapi_hash_init(struct device *ctrldev)
+ {
+- struct device_node *dev_node;
+- struct platform_device *pdev;
+- struct device *ctrldev;
+ int i = 0, err = 0;
+- struct caam_drv_private *priv;
++ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
+ unsigned int md_limit = SHA512_DIGEST_SIZE;
+- u32 cha_inst, cha_vid;
+-
+- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+- if (!dev_node) {
+- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+- if (!dev_node)
+- return -ENODEV;
+- }
+-
+- pdev = of_find_device_by_node(dev_node);
+- if (!pdev) {
+- of_node_put(dev_node);
+- return -ENODEV;
+- }
+-
+- ctrldev = &pdev->dev;
+- priv = dev_get_drvdata(ctrldev);
+- of_node_put(dev_node);
+-
+- /*
+- * If priv is NULL, it's probably because the caam driver wasn't
+- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+- */
+- if (!priv)
+- return -ENODEV;
++ u32 md_inst, md_vid;
+
+ /*
+ * Register crypto algorithms the device supports. First, identify
+ * presence and attributes of MD block.
+ */
+- cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+- cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
++ if (priv->era < 10) {
++ md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
++ CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
++ md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
++ CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
++ } else {
++ u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
++
++ md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
++ md_inst = mdha & CHA_VER_NUM_MASK;
++ }
+
+ /*
+ * Skip registration of any hashing algorithms if MD block
+ * is not present.
+ */
+- if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
++ if (!md_inst)
+ return -ENODEV;
+
+ /* Limit digest size based on LP256 */
+- if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
++ if (md_vid == CHA_VER_VID_MD_LP256)
+ md_limit = SHA256_DIGEST_SIZE;
+
+ INIT_LIST_HEAD(&hash_list);
+@@ -1934,10 +1879,3 @@ static int __init caam_algapi_hash_init(
+
+ return err;
+ }
+-
+-module_init(caam_algapi_hash_init);
+-module_exit(caam_algapi_hash_exit);
+-
+-MODULE_LICENSE("GPL");
+-MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
+-MODULE_AUTHOR("Freescale Semiconductor - NMG");
--- /dev/null
+++ b/drivers/crypto/caam/caamhash_desc.c
@@ -0,0 +1,108 @@
+void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
+ int digestsize, int ctx_len, bool import_ctx, int era);
+
-+#endif /* _CAAMHASH_DESC_H_ */
++#endif /* _CAAMHASH_DESC_H_ */
+--- a/drivers/crypto/caam/caampkc.c
++++ b/drivers/crypto/caam/caampkc.c
+@@ -2,6 +2,7 @@
+ * caam - Freescale FSL CAAM support for Public Key Cryptography
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc.
++ * Copyright 2018 NXP
+ *
+ * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
+ * all the desired key parameters, input and output pointers.
+@@ -1017,46 +1018,22 @@ static struct akcipher_alg caam_rsa = {
+ };
+
+ /* Public Key Cryptography module initialization handler */
+-static int __init caam_pkc_init(void)
++int caam_pkc_init(struct device *ctrldev)
+ {
+- struct device_node *dev_node;
+- struct platform_device *pdev;
+- struct device *ctrldev;
+- struct caam_drv_private *priv;
+- u32 cha_inst, pk_inst;
++ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
++ u32 pk_inst;
+ int err;
+
+- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+- if (!dev_node) {
+- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+- if (!dev_node)
+- return -ENODEV;
+- }
+-
+- pdev = of_find_device_by_node(dev_node);
+- if (!pdev) {
+- of_node_put(dev_node);
+- return -ENODEV;
+- }
+-
+- ctrldev = &pdev->dev;
+- priv = dev_get_drvdata(ctrldev);
+- of_node_put(dev_node);
+-
+- /*
+- * If priv is NULL, it's probably because the caam driver wasn't
+- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+- */
+- if (!priv)
+- return -ENODEV;
+-
+ /* Determine public key hardware accelerator presence. */
+- cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+- pk_inst = (cha_inst & CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
++ if (priv->era < 10)
++ pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
++ CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
++ else
++ pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK;
+
+ /* Do not register algorithms if PKHA is not present. */
+ if (!pk_inst)
+- return -ENODEV;
++ return 0;
+
+ err = crypto_register_akcipher(&caam_rsa);
+ if (err)
+@@ -1068,14 +1045,7 @@ static int __init caam_pkc_init(void)
+ return err;
+ }
+
+-static void __exit caam_pkc_exit(void)
++void caam_pkc_exit(void)
+ {
+ crypto_unregister_akcipher(&caam_rsa);
+ }
+-
+-module_init(caam_pkc_init);
+-module_exit(caam_pkc_exit);
+-
+-MODULE_LICENSE("Dual BSD/GPL");
+-MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API");
+-MODULE_AUTHOR("Freescale Semiconductor");
+--- a/drivers/crypto/caam/caamrng.c
++++ b/drivers/crypto/caam/caamrng.c
+@@ -2,6 +2,7 @@
+ * caam - Freescale FSL CAAM support for hw_random
+ *
+ * Copyright 2011 Freescale Semiconductor, Inc.
++ * Copyright 2018 NXP
+ *
+ * Based on caamalg.c crypto API driver.
+ *
+@@ -294,49 +295,29 @@ static struct hwrng caam_rng = {
+ .read = caam_read,
+ };
+
+-static void __exit caam_rng_exit(void)
++void caam_rng_exit(void)
+ {
+ caam_jr_free(rng_ctx->jrdev);
+ hwrng_unregister(&caam_rng);
+ kfree(rng_ctx);
+ }
+
+-static int __init caam_rng_init(void)
++int caam_rng_init(struct device *ctrldev)
+ {
+ struct device *dev;
+- struct device_node *dev_node;
+- struct platform_device *pdev;
+- struct device *ctrldev;
+- struct caam_drv_private *priv;
++ u32 rng_inst;
++ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
+ int err;
+
+- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+- if (!dev_node) {
+- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+- if (!dev_node)
+- return -ENODEV;
+- }
+-
+- pdev = of_find_device_by_node(dev_node);
+- if (!pdev) {
+- of_node_put(dev_node);
+- return -ENODEV;
+- }
+-
+- ctrldev = &pdev->dev;
+- priv = dev_get_drvdata(ctrldev);
+- of_node_put(dev_node);
+-
+- /*
+- * If priv is NULL, it's probably because the caam driver wasn't
+- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+- */
+- if (!priv)
+- return -ENODEV;
+-
+ /* Check for an instantiated RNG before registration */
+- if (!(rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & CHA_ID_LS_RNG_MASK))
+- return -ENODEV;
++ if (priv->era < 10)
++ rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
++ CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
++ else
++ rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK;
++
++ if (!rng_inst)
++ return 0;
+
+ dev = caam_jr_alloc();
+ if (IS_ERR(dev)) {
+@@ -361,10 +342,3 @@ free_caam_alloc:
+ caam_jr_free(dev);
+ return err;
+ }
+-
+-module_init(caam_rng_init);
+-module_exit(caam_rng_exit);
+-
+-MODULE_LICENSE("GPL");
+-MODULE_DESCRIPTION("FSL CAAM support for hw_random API");
+-MODULE_AUTHOR("Freescale Semiconductor - NMG");
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -17,6 +17,7 @@
#include <linux/spinlock.h>
#include <linux/rtnetlink.h>
#include <linux/in.h>
-@@ -38,6 +39,7 @@
+@@ -34,10 +35,13 @@
+ #include <crypto/des.h>
+ #include <crypto/sha.h>
+ #include <crypto/md5.h>
++#include <crypto/chacha20.h>
++#include <crypto/poly1305.h>
+ #include <crypto/internal/aead.h>
#include <crypto/authenc.h>
#include <crypto/akcipher.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/rsa.h>
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
-@@ -27,6 +27,8 @@ EXPORT_SYMBOL(caam_imx);
+@@ -2,6 +2,7 @@
+ * Controller-level driver, kernel property detection, initialization
+ *
+ * Copyright 2008-2012 Freescale Semiconductor, Inc.
++ * Copyright 2018 NXP
+ */
+
+ #include <linux/device.h>
+@@ -16,17 +17,15 @@
+ #include "desc_constr.h"
+ #include "ctrl.h"
+
+-bool caam_little_end;
+-EXPORT_SYMBOL(caam_little_end);
+ bool caam_dpaa2;
+ EXPORT_SYMBOL(caam_dpaa2);
+-bool caam_imx;
+-EXPORT_SYMBOL(caam_imx);
+
+ #ifdef CONFIG_CAAM_QI
#include "qi.h"
#endif
/*
* i.MX targets tend to have clock control subsystems that can
* enable/disable clocking to our device.
-@@ -332,6 +334,9 @@ static int caam_remove(struct platform_d
+@@ -105,7 +104,7 @@ static inline int run_descriptor_deco0(s
+ struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
+ struct caam_deco __iomem *deco = ctrlpriv->deco;
+ unsigned int timeout = 100000;
+- u32 deco_dbg_reg, flags;
++ u32 deco_dbg_reg, deco_state, flags;
+ int i;
+
+
+@@ -148,13 +147,22 @@ static inline int run_descriptor_deco0(s
+ timeout = 10000000;
+ do {
+ deco_dbg_reg = rd_reg32(&deco->desc_dbg);
++
++ if (ctrlpriv->era < 10)
++ deco_state = (deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) >>
++ DESC_DBG_DECO_STAT_SHIFT;
++ else
++ deco_state = (rd_reg32(&deco->dbg_exec) &
++ DESC_DER_DECO_STAT_MASK) >>
++ DESC_DER_DECO_STAT_SHIFT;
++
+ /*
+ * If an error occured in the descriptor, then
+ * the DECO status field will be set to 0x0D
+ */
+- if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) ==
+- DESC_DBG_DECO_STAT_HOST_ERR)
++ if (deco_state == DECO_STAT_HOST_ERR)
+ break;
++
+ cpu_relax();
+ } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
+
+@@ -316,15 +324,15 @@ static int caam_remove(struct platform_d
+ of_platform_depopulate(ctrldev);
+
+ #ifdef CONFIG_CAAM_QI
+- if (ctrlpriv->qidev)
+- caam_qi_shutdown(ctrlpriv->qidev);
++ if (ctrlpriv->qi_init)
++ caam_qi_shutdown(ctrldev);
+ #endif
+
+ /*
+ * De-initialize RNG state handles initialized by this driver.
+- * In case of DPAA 2.x, RNG is managed by MC firmware.
++ * In case of SoCs with Management Complex, RNG is managed by MC f/w.
+ */
+- if (!caam_dpaa2 && ctrlpriv->rng4_sh_init)
++ if (!ctrlpriv->mc_en && ctrlpriv->rng4_sh_init)
+ deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
+
+ /* Shut down debug views */
+@@ -332,6 +340,9 @@ static int caam_remove(struct platform_d
debugfs_remove_recursive(ctrlpriv->dfs_root);
#endif
/* Unmap controller region */
iounmap(ctrl);
-@@ -433,6 +438,10 @@ static int caam_probe(struct platform_de
+@@ -433,6 +444,10 @@ static int caam_probe(struct platform_de
{.family = "Freescale i.MX"},
{},
};
struct device *dev;
struct device_node *nprop, *np;
struct caam_ctrl __iomem *ctrl;
-@@ -615,6 +624,8 @@ static int caam_probe(struct platform_de
- goto iounmap_ctrl;
+@@ -442,7 +457,7 @@ static int caam_probe(struct platform_de
+ struct caam_perfmon *perfmon;
+ #endif
+ u32 scfgr, comp_params;
+- u32 cha_vid_ls;
++ u8 rng_vid;
+ int pg_size;
+ int BLOCK_OFFSET = 0;
+
+@@ -454,15 +469,54 @@ static int caam_probe(struct platform_de
+ dev_set_drvdata(dev, ctrlpriv);
+ nprop = pdev->dev.of_node;
+
++ /* Get configuration properties from device tree */
++ /* First, get register page */
++ ctrl = of_iomap(nprop, 0);
++ if (!ctrl) {
++ dev_err(dev, "caam: of_iomap() failed\n");
++ return -ENOMEM;
++ }
++
++ caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
++ (CSTA_PLEND | CSTA_ALT_PLEND));
+ caam_imx = (bool)soc_device_match(imx_soc);
+
++ comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
++ caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
++ ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
++
++#ifdef CONFIG_CAAM_QI
++ /* If (DPAA 1.x) QI present, check whether dependencies are available */
++ if (ctrlpriv->qi_present && !caam_dpaa2) {
++ ret = qman_is_probed();
++ if (!ret) {
++ ret = -EPROBE_DEFER;
++ goto iounmap_ctrl;
++ } else if (ret < 0) {
++ dev_err(dev, "failing probe due to qman probe error\n");
++ ret = -ENODEV;
++ goto iounmap_ctrl;
++ }
++
++ ret = qman_portals_probed();
++ if (!ret) {
++ ret = -EPROBE_DEFER;
++ goto iounmap_ctrl;
++ } else if (ret < 0) {
++ dev_err(dev, "failing probe due to qman portals probe error\n");
++ ret = -ENODEV;
++ goto iounmap_ctrl;
++ }
++ }
++#endif
++
+ /* Enable clocking */
+ clk = caam_drv_identify_clk(&pdev->dev, "ipg");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(&pdev->dev,
+ "can't identify CAAM ipg clk: %d\n", ret);
+- return ret;
++ goto iounmap_ctrl;
+ }
+ ctrlpriv->caam_ipg = clk;
+
+@@ -471,7 +525,7 @@ static int caam_probe(struct platform_de
+ ret = PTR_ERR(clk);
+ dev_err(&pdev->dev,
+ "can't identify CAAM mem clk: %d\n", ret);
+- return ret;
++ goto iounmap_ctrl;
+ }
+ ctrlpriv->caam_mem = clk;
+
+@@ -480,7 +534,7 @@ static int caam_probe(struct platform_de
+ ret = PTR_ERR(clk);
+ dev_err(&pdev->dev,
+ "can't identify CAAM aclk clk: %d\n", ret);
+- return ret;
++ goto iounmap_ctrl;
+ }
+ ctrlpriv->caam_aclk = clk;
+
+@@ -490,7 +544,7 @@ static int caam_probe(struct platform_de
+ ret = PTR_ERR(clk);
+ dev_err(&pdev->dev,
+ "can't identify CAAM emi_slow clk: %d\n", ret);
+- return ret;
++ goto iounmap_ctrl;
+ }
+ ctrlpriv->caam_emi_slow = clk;
+ }
+@@ -498,7 +552,7 @@ static int caam_probe(struct platform_de
+ ret = clk_prepare_enable(ctrlpriv->caam_ipg);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't enable CAAM ipg clock: %d\n", ret);
+- return ret;
++ goto iounmap_ctrl;
+ }
+
+ ret = clk_prepare_enable(ctrlpriv->caam_mem);
+@@ -523,25 +577,10 @@ static int caam_probe(struct platform_de
+ }
+ }
+
+- /* Get configuration properties from device tree */
+- /* First, get register page */
+- ctrl = of_iomap(nprop, 0);
+- if (ctrl == NULL) {
+- dev_err(dev, "caam: of_iomap() failed\n");
+- ret = -ENOMEM;
+- goto disable_caam_emi_slow;
+- }
+-
+- caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
+- (CSTA_PLEND | CSTA_ALT_PLEND));
+-
+- /* Finding the page size for using the CTPR_MS register */
+- comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
+- pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
+-
+ /* Allocating the BLOCK_OFFSET based on the supported page size on
+ * the platform
+ */
++ pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
+ if (pg_size == 0)
+ BLOCK_OFFSET = PG_SIZE_4K;
+ else
+@@ -563,11 +602,14 @@ static int caam_probe(struct platform_de
+ /*
+ * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
+ * long pointers in master configuration register.
+- * In case of DPAA 2.x, Management Complex firmware performs
++ * In case of SoCs with Management Complex, MC f/w performs
+ * the configuration.
+ */
+- caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
+- if (!caam_dpaa2)
++ np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc");
++ ctrlpriv->mc_en = !!np;
++ of_node_put(np);
++
++ if (!ctrlpriv->mc_en)
+ clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
+ MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
+ MCFGR_WDENABLE | MCFGR_LARGE_BURST |
+@@ -612,14 +654,11 @@ static int caam_probe(struct platform_de
+ }
+ if (ret) {
+ dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
+- goto iounmap_ctrl;
++ goto disable_caam_emi_slow;
}
+- ret = of_platform_populate(nprop, caam_match, NULL, dev);
+- if (ret) {
+- dev_err(dev, "JR platform devices creation error\n");
+- goto iounmap_ctrl;
+- }
+ ctrlpriv->era = caam_get_era();
++ ctrlpriv->domain = iommu_get_domain_for_dev(dev);
+
+ #ifdef CONFIG_DEBUG_FS
+ /*
+@@ -633,21 +672,7 @@ static int caam_probe(struct platform_de
+ ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
+ #endif
+
+- ring = 0;
+- for_each_available_child_of_node(nprop, np)
+- if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
+- of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
+- ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
+- ((__force uint8_t *)ctrl +
+- (ring + JR_BLOCK_NUMBER) *
+- BLOCK_OFFSET
+- );
+- ctrlpriv->total_jobrs++;
+- ring++;
+- }
+-
+ /* Check to see if (DPAA 1.x) QI present. If so, enable */
+- ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
+ if (ctrlpriv->qi_present && !caam_dpaa2) {
+ ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
+ ((__force uint8_t *)ctrl +
+@@ -664,6 +689,25 @@ static int caam_probe(struct platform_de
+ #endif
+ }
+
++ ret = of_platform_populate(nprop, caam_match, NULL, dev);
++ if (ret) {
++ dev_err(dev, "JR platform devices creation error\n");
++ goto shutdown_qi;
++ }
++
++ ring = 0;
++ for_each_available_child_of_node(nprop, np)
++ if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
++ of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
++ ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
++ ((__force uint8_t *)ctrl +
++ (ring + JR_BLOCK_NUMBER) *
++ BLOCK_OFFSET
++ );
++ ctrlpriv->total_jobrs++;
++ ring++;
++ }
+
- ret = of_platform_populate(nprop, caam_match, NULL, dev);
- if (ret) {
- dev_err(dev, "JR platform devices creation error\n");
-@@ -671,6 +682,16 @@ static int caam_probe(struct platform_de
+ /* If no QI and no rings specified, quit and go home */
+ if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
+ dev_err(dev, "no queues configured, terminating\n");
+@@ -671,15 +715,29 @@ static int caam_probe(struct platform_de
goto caam_remove;
}
+- cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
+ caam_dma_pdev_info.parent = dev;
+ caam_dma_pdev_info.dma_mask = dma_get_mask(dev);
+ caam_dma_dev = platform_device_register_full(&caam_dma_pdev_info);
+ set_dma_ops(&caam_dma_dev->dev, get_dma_ops(dev));
+ }
+
- cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
++ if (ctrlpriv->era < 10)
++ rng_vid = (rd_reg32(&ctrl->perfmon.cha_id_ls) &
++ CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
++ else
++ rng_vid = (rd_reg32(&ctrl->vreg.rng) & CHA_VER_VID_MASK) >>
++ CHA_VER_VID_SHIFT;
/*
-@@ -746,7 +767,7 @@ static int caam_probe(struct platform_de
+ * If SEC has RNG version >= 4 and RNG state handle has not been
+ * already instantiated, do RNG instantiation
+- * In case of DPAA 2.x, RNG is managed by MC firmware.
++ * In case of SoCs with Management Complex, RNG is managed by MC f/w.
+ */
+- if (!caam_dpaa2 &&
+- (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
++ if (!ctrlpriv->mc_en && rng_vid >= 4) {
+ ctrlpriv->rng4_sh_init =
+ rd_reg32(&ctrl->r4tst[0].rdsta);
+ /*
+@@ -746,10 +804,9 @@ static int caam_probe(struct platform_de
/* Report "alive" for developer to see */
dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
- caam_get_era());
+- dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
+- ctrlpriv->total_jobrs, ctrlpriv->qi_present,
+- caam_dpaa2 ? "yes" : "no");
+ ctrlpriv->era);
- dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
- ctrlpriv->total_jobrs, ctrlpriv->qi_present,
- caam_dpaa2 ? "yes" : "no");
++ dev_info(dev, "job rings = %d, qi = %d\n",
++ ctrlpriv->total_jobrs, ctrlpriv->qi_present);
+
+ #ifdef CONFIG_DEBUG_FS
+ debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
+@@ -816,8 +873,11 @@ caam_remove:
+ caam_remove(pdev);
+ return ret;
+
+-iounmap_ctrl:
+- iounmap(ctrl);
++shutdown_qi:
++#ifdef CONFIG_CAAM_QI
++ if (ctrlpriv->qi_init)
++ caam_qi_shutdown(dev);
++#endif
+ disable_caam_emi_slow:
+ if (ctrlpriv->caam_emi_slow)
+ clk_disable_unprepare(ctrlpriv->caam_emi_slow);
+@@ -827,6 +887,8 @@ disable_caam_mem:
+ clk_disable_unprepare(ctrlpriv->caam_mem);
+ disable_caam_ipg:
+ clk_disable_unprepare(ctrlpriv->caam_ipg);
++iounmap_ctrl:
++ iounmap(ctrl);
+ return ret;
+ }
+
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
-@@ -42,6 +42,7 @@
+@@ -4,6 +4,7 @@
+ * Definitions to support CAAM descriptor instruction generation
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
++ * Copyright 2018 NXP
+ */
+
+ #ifndef DESC_H
+@@ -42,6 +43,7 @@
#define CMD_SEQ_LOAD (0x03 << CMD_SHIFT)
#define CMD_FIFO_LOAD (0x04 << CMD_SHIFT)
#define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT)
#define CMD_STORE (0x0a << CMD_SHIFT)
#define CMD_SEQ_STORE (0x0b << CMD_SHIFT)
#define CMD_FIFO_STORE (0x0c << CMD_SHIFT)
-@@ -355,6 +356,7 @@
+@@ -242,6 +244,7 @@
+ #define LDST_SRCDST_WORD_DESCBUF_SHARED (0x42 << LDST_SRCDST_SHIFT)
+ #define LDST_SRCDST_WORD_DESCBUF_JOB_WE (0x45 << LDST_SRCDST_SHIFT)
+ #define LDST_SRCDST_WORD_DESCBUF_SHARED_WE (0x46 << LDST_SRCDST_SHIFT)
++#define LDST_SRCDST_WORD_INFO_FIFO_SM (0x71 << LDST_SRCDST_SHIFT)
+ #define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT)
+
+ /* Offset in source/destination */
+@@ -284,6 +287,12 @@
+ #define LDLEN_SET_OFIFO_OFFSET_SHIFT 0
+ #define LDLEN_SET_OFIFO_OFFSET_MASK (3 << LDLEN_SET_OFIFO_OFFSET_SHIFT)
+
++/* Special Length definitions when dst=sm, nfifo-{sm,m} */
++#define LDLEN_MATH0 0
++#define LDLEN_MATH1 1
++#define LDLEN_MATH2 2
++#define LDLEN_MATH3 3
++
+ /*
+ * FIFO_LOAD/FIFO_STORE/SEQ_FIFO_LOAD/SEQ_FIFO_STORE
+ * Command Constructs
+@@ -355,6 +364,7 @@
#define FIFOLD_TYPE_PK_N (0x08 << FIFOLD_TYPE_SHIFT)
#define FIFOLD_TYPE_PK_A (0x0c << FIFOLD_TYPE_SHIFT)
#define FIFOLD_TYPE_PK_B (0x0d << FIFOLD_TYPE_SHIFT)
/* Other types. Need to OR in last/flush bits as desired */
#define FIFOLD_TYPE_MSG_MASK (0x38 << FIFOLD_TYPE_SHIFT)
-@@ -408,6 +410,7 @@
+@@ -408,6 +418,7 @@
#define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
/*
-@@ -444,6 +447,18 @@
+@@ -444,6 +455,18 @@
#define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
#define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT)
#define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT)
/* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
#define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
-@@ -1093,6 +1108,22 @@
+@@ -1093,6 +1116,22 @@
/* MacSec protinfos */
#define OP_PCL_MACSEC 0x0001
/* PKI unidirectional protocol protinfo bits */
#define OP_PCL_PKPROT_TEST 0x0008
#define OP_PCL_PKPROT_DECRYPT 0x0004
-@@ -1440,10 +1471,11 @@
+@@ -1105,6 +1144,12 @@
+ #define OP_ALG_TYPE_CLASS1 (2 << OP_ALG_TYPE_SHIFT)
+ #define OP_ALG_TYPE_CLASS2 (4 << OP_ALG_TYPE_SHIFT)
+
++/* version register fields */
++#define OP_VER_CCHA_NUM 0x000000ff /* Number CCHAs instantiated */
++#define OP_VER_CCHA_MISC 0x0000ff00 /* CCHA Miscellaneous Information */
++#define OP_VER_CCHA_REV 0x00ff0000 /* CCHA Revision Number */
++#define OP_VER_CCHA_VID 0xff000000 /* CCHA Version ID */
++
+ #define OP_ALG_ALGSEL_SHIFT 16
+ #define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
+ #define OP_ALG_ALGSEL_SUBMASK (0x0f << OP_ALG_ALGSEL_SHIFT)
+@@ -1124,6 +1169,8 @@
+ #define OP_ALG_ALGSEL_KASUMI (0x70 << OP_ALG_ALGSEL_SHIFT)
+ #define OP_ALG_ALGSEL_CRC (0x90 << OP_ALG_ALGSEL_SHIFT)
+ #define OP_ALG_ALGSEL_SNOW_F9 (0xA0 << OP_ALG_ALGSEL_SHIFT)
++#define OP_ALG_ALGSEL_CHACHA20 (0xD0 << OP_ALG_ALGSEL_SHIFT)
++#define OP_ALG_ALGSEL_POLY1305 (0xE0 << OP_ALG_ALGSEL_SHIFT)
+
+ #define OP_ALG_AAI_SHIFT 4
+ #define OP_ALG_AAI_MASK (0x1ff << OP_ALG_AAI_SHIFT)
+@@ -1171,6 +1218,11 @@
+ #define OP_ALG_AAI_RNG4_AI (0x80 << OP_ALG_AAI_SHIFT)
+ #define OP_ALG_AAI_RNG4_SK (0x100 << OP_ALG_AAI_SHIFT)
+
++/* Chacha20 AAI set */
++#define OP_ALG_AAI_AEAD (0x002 << OP_ALG_AAI_SHIFT)
++#define OP_ALG_AAI_KEYSTREAM (0x001 << OP_ALG_AAI_SHIFT)
++#define OP_ALG_AAI_BC8 (0x008 << OP_ALG_AAI_SHIFT)
++
+ /* hmac/smac AAI set */
+ #define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT)
+ #define OP_ALG_AAI_HMAC (0x01 << OP_ALG_AAI_SHIFT)
+@@ -1359,6 +1411,7 @@
+ #define MOVE_SRC_MATH3 (0x07 << MOVE_SRC_SHIFT)
+ #define MOVE_SRC_INFIFO (0x08 << MOVE_SRC_SHIFT)
+ #define MOVE_SRC_INFIFO_CL (0x09 << MOVE_SRC_SHIFT)
++#define MOVE_SRC_AUX_ABLK (0x0a << MOVE_SRC_SHIFT)
+
+ #define MOVE_DEST_SHIFT 16
+ #define MOVE_DEST_MASK (0x0f << MOVE_DEST_SHIFT)
+@@ -1385,6 +1438,10 @@
+
+ #define MOVELEN_MRSEL_SHIFT 0
+ #define MOVELEN_MRSEL_MASK (0x3 << MOVE_LEN_SHIFT)
++#define MOVELEN_MRSEL_MATH0 (0 << MOVELEN_MRSEL_SHIFT)
++#define MOVELEN_MRSEL_MATH1 (1 << MOVELEN_MRSEL_SHIFT)
++#define MOVELEN_MRSEL_MATH2 (2 << MOVELEN_MRSEL_SHIFT)
++#define MOVELEN_MRSEL_MATH3 (3 << MOVELEN_MRSEL_SHIFT)
+
+ /*
+ * MATH Command Constructs
+@@ -1440,10 +1497,11 @@
#define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
#define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
#define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
/* Destination selectors */
#define MATH_DEST_SHIFT 8
-@@ -1452,6 +1484,7 @@
+@@ -1452,6 +1510,7 @@
#define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
#define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
#define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
#define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
#define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
#define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
-@@ -1624,4 +1657,31 @@
+@@ -1560,6 +1619,7 @@
+ #define NFIFOENTRY_DTYPE_IV (0x2 << NFIFOENTRY_DTYPE_SHIFT)
+ #define NFIFOENTRY_DTYPE_SAD (0x3 << NFIFOENTRY_DTYPE_SHIFT)
+ #define NFIFOENTRY_DTYPE_ICV (0xA << NFIFOENTRY_DTYPE_SHIFT)
++#define NFIFOENTRY_DTYPE_POLY (0xB << NFIFOENTRY_DTYPE_SHIFT)
+ #define NFIFOENTRY_DTYPE_SKIP (0xE << NFIFOENTRY_DTYPE_SHIFT)
+ #define NFIFOENTRY_DTYPE_MSG (0xF << NFIFOENTRY_DTYPE_SHIFT)
+
+@@ -1624,4 +1684,31 @@
/* Frame Descriptor Command for Replacement Job Descriptor */
#define FD_CMD_REPLACE_JOB_DESC 0x20000000
u32 command)
{
append_cmd(desc, command | IMMEDIATE | len);
-@@ -189,6 +189,7 @@ static inline u32 *append_##cmd(u32 * co
+@@ -189,6 +189,8 @@ static inline u32 *append_##cmd(u32 * co
}
APPEND_CMD_RET(jump, JUMP)
APPEND_CMD_RET(move, MOVE)
+APPEND_CMD_RET(moveb, MOVEB)
++APPEND_CMD_RET(move_len, MOVE_LEN)
static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd)
{
-@@ -271,7 +272,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
+@@ -271,7 +273,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
APPEND_SEQ_PTR_INTLEN(out, OUT)
#define APPEND_CMD_PTR_TO_IMM(cmd, op) \
unsigned int len, u32 options) \
{ \
PRINT_POS; \
-@@ -312,7 +313,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_
+@@ -312,7 +314,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_
* from length of immediate data provided, e.g., split keys
*/
#define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
unsigned int data_len, \
unsigned int len, u32 options) \
{ \
-@@ -452,7 +453,7 @@ struct alginfo {
+@@ -327,7 +329,11 @@ static inline void append_##cmd##_imm_##
+ u32 options) \
+ { \
+ PRINT_POS; \
+- append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(type)); \
++ if (options & LDST_LEN_MASK) \
++ append_cmd(desc, CMD_##op | IMMEDIATE | options); \
++ else \
++ append_cmd(desc, CMD_##op | IMMEDIATE | options | \
++ sizeof(type)); \
+ append_cmd(desc, immediate); \
+ }
+ APPEND_CMD_RAW_IMM(load, LOAD, u32);
+@@ -452,7 +458,7 @@ struct alginfo {
unsigned int keylen_pad;
union {
dma_addr_t key_dma;
};
bool key_inline;
};
-@@ -496,4 +497,45 @@ static inline int desc_inline_query(unsi
+@@ -496,4 +502,45 @@ static inline int desc_inline_query(unsi
return (rem_bytes >= 0) ? 0 : -1;
}
+#endif /* _DPSECI_CMD_H_ */
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
-@@ -108,6 +108,54 @@ static const struct {
+@@ -50,6 +50,12 @@ void caam_dump_sg(const char *level, con
+ #endif /* DEBUG */
+ EXPORT_SYMBOL(caam_dump_sg);
+
++bool caam_little_end;
++EXPORT_SYMBOL(caam_little_end);
++
++bool caam_imx;
++EXPORT_SYMBOL(caam_imx);
++
+ static const struct {
+ u8 value;
+ const char *error_text;
+@@ -108,6 +114,54 @@ static const struct {
{ 0xF1, "3GPP HFN matches or exceeds the Threshold" },
};
static const char * const cha_id_list[] = {
"",
"AES",
-@@ -236,6 +284,27 @@ static void report_deco_status(struct de
+@@ -236,6 +290,27 @@ static void report_deco_status(struct de
status, error, idx_str, idx, err_str, err_err_code);
}
static void report_jr_status(struct device *jrdev, const u32 status,
const char *error)
{
-@@ -250,7 +319,7 @@ static void report_cond_code_status(stru
+@@ -250,7 +325,7 @@ static void report_cond_code_status(stru
status, error, __func__);
}
{
static const struct stat_src {
void (*report_ssed)(struct device *jrdev, const u32 status,
-@@ -262,7 +331,7 @@ void caam_jr_strstatus(struct device *jr
+@@ -262,7 +337,7 @@ void caam_jr_strstatus(struct device *jr
{ report_ccb_status, "CCB" },
{ report_jump_status, "Jump" },
{ report_deco_status, "DECO" },
{ report_jr_status, "Job Ring" },
{ report_cond_code_status, "Condition Code" },
{ NULL, NULL },
-@@ -288,4 +357,4 @@ void caam_jr_strstatus(struct device *jr
+@@ -288,4 +363,4 @@ void caam_jr_strstatus(struct device *jr
else
dev_err(jrdev, "%d: unknown error source\n", ssrc);
}
int rowsize, int groupsize, struct scatterlist *sg,
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
-@@ -84,6 +84,7 @@ struct caam_drv_private {
+@@ -65,10 +65,6 @@ struct caam_drv_private_jr {
+ * Driver-private storage for a single CAAM block instance
+ */
+ struct caam_drv_private {
+-#ifdef CONFIG_CAAM_QI
+- struct device *qidev;
+-#endif
+-
+ /* Physical-presence section */
+ struct caam_ctrl __iomem *ctrl; /* controller region */
+ struct caam_deco __iomem *deco; /* DECO/CCB views */
+@@ -76,14 +72,21 @@ struct caam_drv_private {
+ struct caam_queue_if __iomem *qi; /* QI control region */
+ struct caam_job_ring __iomem *jr[4]; /* JobR's register space */
+
++ struct iommu_domain *domain;
++
+ /*
+ * Detected geometry block. Filled in from device tree if powerpc,
+ * or from register-based version detection code
+ */
+ u8 total_jobrs; /* Total Job Rings in device */
u8 qi_present; /* Nonzero if QI present in device */
++#ifdef CONFIG_CAAM_QI
++ u8 qi_init; /* Nonzero if QI has been initialized */
++#endif
++ u8 mc_en; /* Nonzero if MC f/w is active */
int secvio_irq; /* Security violation interrupt number */
int virt_en; /* Virtualization enabled in CAAM */
+ int era; /* CAAM Era (internal HW revision) */
#define RNG4_MAX_HANDLES 2
/* RNG4 block */
+@@ -108,8 +111,95 @@ struct caam_drv_private {
+ #endif
+ };
+
+-void caam_jr_algapi_init(struct device *dev);
+-void caam_jr_algapi_remove(struct device *dev);
++#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API
++
++int caam_algapi_init(struct device *dev);
++void caam_algapi_exit(void);
++
++#else
++
++static inline int caam_algapi_init(struct device *dev)
++{
++ return 0;
++}
++
++static inline void caam_algapi_exit(void)
++{
++}
++
++#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API */
++
++#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API
++
++int caam_algapi_hash_init(struct device *dev);
++void caam_algapi_hash_exit(void);
++
++#else
++
++static inline int caam_algapi_hash_init(struct device *dev)
++{
++ return 0;
++}
++
++static inline void caam_algapi_hash_exit(void)
++{
++}
++
++#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API */
++
++#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API
++
++int caam_pkc_init(struct device *dev);
++void caam_pkc_exit(void);
++
++#else
++
++static inline int caam_pkc_init(struct device *dev)
++{
++ return 0;
++}
++
++static inline void caam_pkc_exit(void)
++{
++}
++
++#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API */
++
++#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API
++
++int caam_rng_init(struct device *dev);
++void caam_rng_exit(void);
++
++#else
++
++static inline int caam_rng_init(struct device *dev)
++{
++ return 0;
++}
++
++static inline void caam_rng_exit(void)
++{
++}
++
++#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API */
++
++#ifdef CONFIG_CAAM_QI
++
++int caam_qi_algapi_init(struct device *dev);
++void caam_qi_algapi_exit(void);
++
++#else
++
++static inline int caam_qi_algapi_init(struct device *dev)
++{
++ return 0;
++}
++
++static inline void caam_qi_algapi_exit(void)
++{
++}
++
++#endif /* CONFIG_CAAM_QI */
+
+ #ifdef CONFIG_DEBUG_FS
+ static int caam_debugfs_u64_get(void *data, u64 *val)
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
-@@ -23,6 +23,14 @@ struct jr_driver_data {
+@@ -23,6 +23,52 @@ struct jr_driver_data {
static struct jr_driver_data driver_data;
+ return jr_driver_probed;
+}
+EXPORT_SYMBOL(caam_jr_driver_probed);
++
++static DEFINE_MUTEX(algs_lock);
++static unsigned int active_devs;
++
++static void register_algs(struct device *dev)
++{
++ mutex_lock(&algs_lock);
++
++ if (++active_devs != 1)
++ goto algs_unlock;
++
++ caam_algapi_init(dev);
++ caam_algapi_hash_init(dev);
++ caam_pkc_init(dev);
++ caam_rng_init(dev);
++ caam_qi_algapi_init(dev);
++
++algs_unlock:
++ mutex_unlock(&algs_lock);
++}
++
++static void unregister_algs(void)
++{
++ mutex_lock(&algs_lock);
++
++ if (--active_devs != 0)
++ goto algs_unlock;
++
++ caam_qi_algapi_exit();
++
++ caam_rng_exit();
++ caam_pkc_exit();
++ caam_algapi_hash_exit();
++ caam_algapi_exit();
++
++algs_unlock:
++ mutex_unlock(&algs_lock);
++}
+
static int caam_reset_hw_jr(struct device *dev)
{
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
-@@ -119,6 +127,8 @@ static int caam_jr_remove(struct platfor
+@@ -108,6 +154,9 @@ static int caam_jr_remove(struct platfor
+ return -EBUSY;
+ }
+
++ /* Unregister JR-based RNG & crypto algorithms */
++ unregister_algs();
++
+ /* Remove the node from Physical JobR list maintained by driver */
+ spin_lock(&driver_data.jr_alloc_lock);
+ list_del(&jrpriv->list_node);
+@@ -119,6 +168,8 @@ static int caam_jr_remove(struct platfor
dev_err(jrdev, "Failed to shut down job ring\n");
irq_dispose_mapping(jrpriv->irq);
return ret;
}
-@@ -282,6 +292,36 @@ struct device *caam_jr_alloc(void)
+@@ -282,6 +333,36 @@ struct device *caam_jr_alloc(void)
EXPORT_SYMBOL(caam_jr_alloc);
/**
* caam_jr_free() - Free the Job Ring
* @rdev - points to the dev that identifies the Job ring to
* be released.
-@@ -539,6 +579,8 @@ static int caam_jr_probe(struct platform
+@@ -539,6 +620,9 @@ static int caam_jr_probe(struct platform
atomic_set(&jrpriv->tfm_count, 0);
++ register_algs(jrdev->parent);
+ jr_driver_probed++;
+
return 0;
#include "regs.h"
#include "qi.h"
-@@ -105,23 +105,21 @@ static struct kmem_cache *qi_cache;
+@@ -58,11 +58,9 @@ static DEFINE_PER_CPU(int, last_cpu);
+ /*
+ * caam_qi_priv - CAAM QI backend private params
+ * @cgr: QMan congestion group
+- * @qi_pdev: platform device for QI backend
+ */
+ struct caam_qi_priv {
+ struct qman_cgr cgr;
+- struct platform_device *qi_pdev;
+ };
+
+ static struct caam_qi_priv qipriv ____cacheline_aligned;
+@@ -102,26 +100,34 @@ static int mod_init_cpu;
+ */
+ static struct kmem_cache *qi_cache;
+
++static void *caam_iova_to_virt(struct iommu_domain *domain,
++ dma_addr_t iova_addr)
++{
++ phys_addr_t phys_addr;
++
++ phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
++
++ return phys_to_virt(phys_addr);
++}
++
int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
{
struct qm_fd fd;
if (likely(!ret))
return 0;
-@@ -137,7 +135,7 @@ int caam_qi_enqueue(struct device *qidev
+@@ -137,20 +143,21 @@ int caam_qi_enqueue(struct device *qidev
EXPORT_SYMBOL(caam_qi_enqueue);
static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
{
const struct qm_fd *fd;
struct caam_drv_req *drv_req;
-@@ -145,7 +143,7 @@ static void caam_fq_ern_cb(struct qman_p
+ struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
++ struct caam_drv_private *priv = dev_get_drvdata(qidev);
fd = &msg->ern.fd;
dev_err(qidev, "Non-compound FD from CAAM\n");
return;
}
-@@ -180,20 +178,22 @@ static struct qman_fq *create_caam_req_f
+
+- drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
++ drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
+ if (!drv_req) {
+ dev_err(qidev,
+ "Can't find original request for CAAM response\n");
+@@ -180,20 +187,22 @@ static struct qman_fq *create_caam_req_f
req_fq->cb.fqs = NULL;
ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
opts.fqd.cgid = qipriv.cgr.cgrid;
ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
-@@ -207,7 +207,7 @@ static struct qman_fq *create_caam_req_f
+@@ -207,7 +216,7 @@ static struct qman_fq *create_caam_req_f
return req_fq;
init_req_fq_fail:
create_req_fq_fail:
kfree(req_fq);
return ERR_PTR(ret);
-@@ -275,7 +275,7 @@ empty_fq:
+@@ -275,7 +284,7 @@ empty_fq:
if (ret)
dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
kfree(fq);
return ret;
-@@ -292,7 +292,7 @@ static int empty_caam_fq(struct qman_fq
+@@ -292,7 +301,7 @@ static int empty_caam_fq(struct qman_fq
if (ret)
return ret;
break;
msleep(20);
-@@ -572,22 +572,27 @@ static enum qman_cb_dqrr_result caam_rsp
+@@ -495,7 +504,7 @@ EXPORT_SYMBOL(caam_drv_ctx_rel);
+ int caam_qi_shutdown(struct device *qidev)
+ {
+ int i, ret;
+- struct caam_qi_priv *priv = dev_get_drvdata(qidev);
++ struct caam_qi_priv *priv = &qipriv;
+ const cpumask_t *cpus = qman_affine_cpus();
+ struct cpumask old_cpumask = current->cpus_allowed;
+
+@@ -528,7 +537,6 @@ int caam_qi_shutdown(struct device *qide
+ /* Now that we're done with the CGRs, restore the cpus allowed mask */
+ set_cpus_allowed_ptr(current, &old_cpumask);
+
+- platform_device_unregister(priv->qi_pdev);
+ return ret;
+ }
+
+@@ -572,22 +580,28 @@ static enum qman_cb_dqrr_result caam_rsp
struct caam_drv_req *drv_req;
const struct qm_fd *fd;
struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
- u32 status;
++ struct caam_drv_private *priv = dev_get_drvdata(qidev);
if (caam_qi_napi_schedule(p, caam_napi))
return qman_cb_dqrr_stop;
+ if (unlikely(fd->status)) {
+ u32 ssrc = fd->status & JRSTA_SSRC_MASK;
+ u8 err_id = fd->status & JRSTA_CCBERR_ERRID_MASK;
-+
+
+- if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
+ if (ssrc != JRSTA_SSRC_CCB_ERROR ||
+ err_id != JRSTA_CCBERR_ERRID_ICVCHK)
+ dev_err(qidev, "Error: %#x in CAAM response FD\n",
+ fd->status);
+ }
-
-- if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
++
+ if (unlikely(fd->format != qm_fd_compound)) {
dev_err(qidev, "Non-compound FD from CAAM\n");
return qman_cb_dqrr_consume;
}
- drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
-+ drv_req = (struct caam_drv_req *)phys_to_virt(fd->addr);
++ drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
if (unlikely(!drv_req)) {
dev_err(qidev,
"Can't find original request for caam response\n");
-@@ -597,7 +602,7 @@ static enum qman_cb_dqrr_result caam_rsp
+@@ -597,7 +611,7 @@ static enum qman_cb_dqrr_result caam_rsp
dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
return qman_cb_dqrr_consume;
}
-@@ -621,17 +626,18 @@ static int alloc_rsp_fq_cpu(struct devic
+@@ -621,17 +635,18 @@ static int alloc_rsp_fq_cpu(struct devic
return -ENODEV;
}
ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
if (ret) {
-@@ -662,8 +668,7 @@ static int init_cgr(struct device *qidev
+@@ -650,9 +665,8 @@ static int init_cgr(struct device *qidev
+ {
+ int ret;
+ struct qm_mcc_initcgr opts;
+- const u64 cpus = *(u64 *)qman_affine_cpus();
+- const int num_cpus = hweight64(cpus);
+- const u64 val = num_cpus * MAX_RSP_FQ_BACKLOG_PER_CPU;
++ const u64 val = (u64)cpumask_weight(qman_affine_cpus()) *
++ MAX_RSP_FQ_BACKLOG_PER_CPU;
+
+ ret = qman_alloc_cgrid(&qipriv.cgr.cgrid);
+ if (ret) {
+@@ -662,8 +676,7 @@ static int init_cgr(struct device *qidev
qipriv.cgr.cb = cgr_cb;
memset(&opts, 0, sizeof(opts));
opts.cgr.cscn_en = QM_CGR_EN;
opts.cgr.mode = QMAN_CGR_MODE_FRAME;
qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
+@@ -708,15 +721,10 @@ static void free_rsp_fqs(void)
+ int caam_qi_init(struct platform_device *caam_pdev)
+ {
+ int err, i;
+- struct platform_device *qi_pdev;
+ struct device *ctrldev = &caam_pdev->dev, *qidev;
+ struct caam_drv_private *ctrlpriv;
+ const cpumask_t *cpus = qman_affine_cpus();
+ struct cpumask old_cpumask = current->cpus_allowed;
+- static struct platform_device_info qi_pdev_info = {
+- .name = "caam_qi",
+- .id = PLATFORM_DEVID_NONE
+- };
+
+ /*
+ * QMAN requires CGRs to be removed from same CPU+portal from where it
+@@ -728,24 +736,13 @@ int caam_qi_init(struct platform_device
+ mod_init_cpu = cpumask_first(cpus);
+ set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
+
+- qi_pdev_info.parent = ctrldev;
+- qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
+- qi_pdev = platform_device_register_full(&qi_pdev_info);
+- if (IS_ERR(qi_pdev))
+- return PTR_ERR(qi_pdev);
+- set_dma_ops(&qi_pdev->dev, get_dma_ops(ctrldev));
+-
+ ctrlpriv = dev_get_drvdata(ctrldev);
+- qidev = &qi_pdev->dev;
+-
+- qipriv.qi_pdev = qi_pdev;
+- dev_set_drvdata(qidev, &qipriv);
++ qidev = ctrldev;
+
+ /* Initialize the congestion detection */
+ err = init_cgr(qidev);
+ if (err) {
+ dev_err(qidev, "CGR initialization failed: %d\n", err);
+- platform_device_unregister(qi_pdev);
+ return err;
+ }
+
+@@ -754,7 +751,6 @@ int caam_qi_init(struct platform_device
+ if (err) {
+ dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
+ free_rsp_fqs();
+- platform_device_unregister(qi_pdev);
+ return err;
+ }
+
+@@ -777,15 +773,11 @@ int caam_qi_init(struct platform_device
+ napi_enable(irqtask);
+ }
+
+- /* Hook up QI device to parent controlling caam device */
+- ctrlpriv->qidev = qidev;
+-
+ qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
+ SLAB_CACHE_DMA, NULL);
+ if (!qi_cache) {
+ dev_err(qidev, "Can't allocate CAAM cache\n");
+ free_rsp_fqs();
+- platform_device_unregister(qi_pdev);
+ return -ENOMEM;
+ }
+
+@@ -795,6 +787,8 @@ int caam_qi_init(struct platform_device
+ debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
+ ×_congested, &caam_fops_u64_ro);
+ #endif
++
++ ctrlpriv->qi_init = 1;
+ dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
+ return 0;
+ }
--- a/drivers/crypto/caam/qi.h
+++ b/drivers/crypto/caam/qi.h
@@ -9,7 +9,7 @@
#include "desc_constr.h"
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
-@@ -627,6 +627,8 @@ struct caam_job_ring {
+@@ -3,6 +3,7 @@
+ * CAAM hardware register-level view
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
++ * Copyright 2018 NXP
+ */
+
+ #ifndef REGS_H
+@@ -211,6 +212,47 @@ struct jr_outentry {
+ u32 jrstatus; /* Status for completed descriptor */
+ } __packed;
+
++/* Version registers (Era 10+) e80-eff */
++struct version_regs {
++ u32 crca; /* CRCA_VERSION */
++ u32 afha; /* AFHA_VERSION */
++ u32 kfha; /* KFHA_VERSION */
++ u32 pkha; /* PKHA_VERSION */
++ u32 aesa; /* AESA_VERSION */
++ u32 mdha; /* MDHA_VERSION */
++ u32 desa; /* DESA_VERSION */
++ u32 snw8a; /* SNW8A_VERSION */
++ u32 snw9a; /* SNW9A_VERSION */
++ u32 zuce; /* ZUCE_VERSION */
++ u32 zuca; /* ZUCA_VERSION */
++ u32 ccha; /* CCHA_VERSION */
++ u32 ptha; /* PTHA_VERSION */
++ u32 rng; /* RNG_VERSION */
++ u32 trng; /* TRNG_VERSION */
++ u32 aaha; /* AAHA_VERSION */
++ u32 rsvd[10];
++ u32 sr; /* SR_VERSION */
++ u32 dma; /* DMA_VERSION */
++ u32 ai; /* AI_VERSION */
++ u32 qi; /* QI_VERSION */
++ u32 jr; /* JR_VERSION */
++ u32 deco; /* DECO_VERSION */
++};
++
++/* Version registers bitfields */
++
++/* Number of CHAs instantiated */
++#define CHA_VER_NUM_MASK 0xffull
++/* CHA Miscellaneous Information */
++#define CHA_VER_MISC_SHIFT 8
++#define CHA_VER_MISC_MASK (0xffull << CHA_VER_MISC_SHIFT)
++/* CHA Revision Number */
++#define CHA_VER_REV_SHIFT 16
++#define CHA_VER_REV_MASK (0xffull << CHA_VER_REV_SHIFT)
++/* CHA Version ID */
++#define CHA_VER_VID_SHIFT 24
++#define CHA_VER_VID_MASK (0xffull << CHA_VER_VID_SHIFT)
++
+ /*
+ * caam_perfmon - Performance Monitor/Secure Memory Status/
+ * CAAM Global Status/Component Version IDs
+@@ -223,15 +265,13 @@ struct jr_outentry {
+ #define CHA_NUM_MS_DECONUM_MASK (0xfull << CHA_NUM_MS_DECONUM_SHIFT)
+
+ /*
+- * CHA version IDs / instantiation bitfields
++ * CHA version IDs / instantiation bitfields (< Era 10)
+ * Defined for use with the cha_id fields in perfmon, but the same shift/mask
+ * selectors can be used to pull out the number of instantiated blocks within
+ * cha_num fields in perfmon because the locations are the same.
+ */
+ #define CHA_ID_LS_AES_SHIFT 0
+ #define CHA_ID_LS_AES_MASK (0xfull << CHA_ID_LS_AES_SHIFT)
+-#define CHA_ID_LS_AES_LP (0x3ull << CHA_ID_LS_AES_SHIFT)
+-#define CHA_ID_LS_AES_HP (0x4ull << CHA_ID_LS_AES_SHIFT)
+
+ #define CHA_ID_LS_DES_SHIFT 4
+ #define CHA_ID_LS_DES_MASK (0xfull << CHA_ID_LS_DES_SHIFT)
+@@ -241,9 +281,6 @@ struct jr_outentry {
+
+ #define CHA_ID_LS_MD_SHIFT 12
+ #define CHA_ID_LS_MD_MASK (0xfull << CHA_ID_LS_MD_SHIFT)
+-#define CHA_ID_LS_MD_LP256 (0x0ull << CHA_ID_LS_MD_SHIFT)
+-#define CHA_ID_LS_MD_LP512 (0x1ull << CHA_ID_LS_MD_SHIFT)
+-#define CHA_ID_LS_MD_HP (0x2ull << CHA_ID_LS_MD_SHIFT)
+
+ #define CHA_ID_LS_RNG_SHIFT 16
+ #define CHA_ID_LS_RNG_MASK (0xfull << CHA_ID_LS_RNG_SHIFT)
+@@ -269,6 +306,13 @@ struct jr_outentry {
+ #define CHA_ID_MS_JR_SHIFT 28
+ #define CHA_ID_MS_JR_MASK (0xfull << CHA_ID_MS_JR_SHIFT)
+
++/* Specific CHA version IDs */
++#define CHA_VER_VID_AES_LP 0x3ull
++#define CHA_VER_VID_AES_HP 0x4ull
++#define CHA_VER_VID_MD_LP256 0x0ull
++#define CHA_VER_VID_MD_LP512 0x1ull
++#define CHA_VER_VID_MD_HP 0x2ull
++
+ struct sec_vid {
+ u16 ip_id;
+ u8 maj_rev;
+@@ -473,8 +517,10 @@ struct caam_ctrl {
+ struct rng4tst r4tst[2];
+ };
+
+- u32 rsvd9[448];
++ u32 rsvd9[416];
+
++ /* Version registers - introduced with era 10 e80-eff */
++ struct version_regs vreg;
+ /* Performance Monitor f00-fff */
+ struct caam_perfmon perfmon;
+ };
+@@ -564,8 +610,10 @@ struct caam_job_ring {
+ u32 rsvd11;
+ u32 jrcommand; /* JRCRx - JobR command */
+
+- u32 rsvd12[932];
++ u32 rsvd12[900];
+
++ /* Version registers - introduced with era 10 e80-eff */
++ struct version_regs vreg;
+ /* Performance Monitor f00-fff */
+ struct caam_perfmon perfmon;
+ };
+@@ -627,6 +675,8 @@ struct caam_job_ring {
#define JRSTA_DECOERR_INVSIGN 0x86
#define JRSTA_DECOERR_DSASIGN 0x87
#define JRSTA_CCBERR_JUMP 0x08000000
#define JRSTA_CCBERR_INDEX_MASK 0xff00
#define JRSTA_CCBERR_INDEX_SHIFT 8
+@@ -870,13 +920,19 @@ struct caam_deco {
+ u32 rsvd29[48];
+ u32 descbuf[64]; /* DxDESB - Descriptor buffer */
+ u32 rscvd30[193];
+-#define DESC_DBG_DECO_STAT_HOST_ERR 0x00D00000
+ #define DESC_DBG_DECO_STAT_VALID 0x80000000
+ #define DESC_DBG_DECO_STAT_MASK 0x00F00000
++#define DESC_DBG_DECO_STAT_SHIFT 20
+ u32 desc_dbg; /* DxDDR - DECO Debug Register */
+- u32 rsvd31[126];
++ u32 rsvd31[13];
++#define DESC_DER_DECO_STAT_MASK 0x000F0000
++#define DESC_DER_DECO_STAT_SHIFT 16
++ u32 dbg_exec; /* DxDER - DECO Debug Exec Register */
++ u32 rsvd32[112];
+ };
+
++#define DECO_STAT_HOST_ERR 0xD
++
+ #define DECO_JQCR_WHL 0x20000000
+ #define DECO_JQCR_FOUR 0x10000000
+
--- a/drivers/crypto/caam/sg_sw_qm.h
+++ b/drivers/crypto/caam/sg_sw_qm.h
@@ -34,46 +34,61 @@
if (ret > 1) {
tbl_off += ret;
sync_needed = true;
+--- a/include/crypto/chacha20.h
++++ b/include/crypto/chacha20.h
+@@ -13,6 +13,7 @@
+ #define CHACHA20_IV_SIZE 16
+ #define CHACHA20_KEY_SIZE 32
+ #define CHACHA20_BLOCK_SIZE 64
++#define CHACHAPOLY_IV_SIZE 12
+
+ struct chacha20_ctx {
+ u32 key[8];
-From aded309f403c4202b9c6f61ea6a635e0c736eb77 Mon Sep 17 00:00:00 2001
+From 62ac0c4fda3b40a8994f2abfdc52784ced80c83b Mon Sep 17 00:00:00 2001
From: Biwen Li <biwen.li@nxp.com>
-Date: Tue, 30 Oct 2018 18:27:07 +0800
-Subject: [PATCH 40/40] pm: support layerscape
+Date: Wed, 17 Apr 2019 18:58:51 +0800
+Subject: [PATCH] pm: support layerscape
+
This is an integrated patch of pm for layerscape
+Signed-off-by: Biwen Li <biwen.li@nxp.com>
Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
Signed-off-by: Hongbo Zhang <hongbo.zhang@freescale.com>
Signed-off-by: Li Yang <leoyang.li@nxp.com>
Signed-off-by: Ran Wang <ran.wang_1@nxp.com>
Signed-off-by: Tang Yuantian <andy.tang@nxp.com>
+Signed-off-by: Yinbo Zhu <yinbo.zhu@nxp.com>
Signed-off-by: Zhao Chenhui <chenhui.zhao@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
---
- .../devicetree/bindings/powerpc/fsl/pmc.txt | 59 ++--
- drivers/firmware/psci.c | 16 +-
- drivers/soc/fsl/rcpm.c | 158 ++++++++++
- drivers/soc/fsl/sleep_fsm.c | 279 ++++++++++++++++++
- drivers/soc/fsl/sleep_fsm.h | 130 ++++++++
- 5 files changed, 615 insertions(+), 27 deletions(-)
+ drivers/firmware/psci.c | 16 ++-
+ drivers/soc/fsl/rcpm.c | 156 ++++++++++++++++++++
+ drivers/soc/fsl/sleep_fsm.c | 279 ++++++++++++++++++++++++++++++++++++
+ drivers/soc/fsl/sleep_fsm.h | 130 +++++++++++++++++
+ 4 files changed, 579 insertions(+), 2 deletions(-)
create mode 100644 drivers/soc/fsl/rcpm.c
create mode 100644 drivers/soc/fsl/sleep_fsm.c
create mode 100644 drivers/soc/fsl/sleep_fsm.h
---- a/Documentation/devicetree/bindings/powerpc/fsl/pmc.txt
-+++ b/Documentation/devicetree/bindings/powerpc/fsl/pmc.txt
-@@ -9,15 +9,20 @@ Properties:
-
- "fsl,mpc8548-pmc" should be listed for any chip whose PMC is
- compatible. "fsl,mpc8536-pmc" should also be listed for any chip
-- whose PMC is compatible, and implies deep-sleep capability.
-+ whose PMC is compatible, and implies deep-sleep capability and
-+ wake on user defined packet(wakeup on ARP).
-+
-+ "fsl,p1022-pmc" should be listed for any chip whose PMC is
-+ compatible, and implies lossless Ethernet capability during sleep.
-
- "fsl,mpc8641d-pmc" should be listed for any chip whose PMC is
- compatible; all statements below that apply to "fsl,mpc8548-pmc" also
- apply to "fsl,mpc8641d-pmc".
-
- Compatibility does not include bit assignments in SCCR/PMCDR/DEVDISR; these
-- bit assignments are indicated via the sleep specifier in each device's
-- sleep property.
-+ bit assignments are indicated via the clock nodes. Device which has a
-+ controllable clock source should have a "fsl,pmc-handle" property pointing
-+ to the clock node.
-
- - reg: For devices compatible with "fsl,mpc8349-pmc", the first resource
- is the PMC block, and the second resource is the Clock Configuration
-@@ -33,31 +38,35 @@ Properties:
- this is a phandle to an "fsl,gtm" node on which timer 4 can be used as
- a wakeup source from deep sleep.
-
--Sleep specifiers:
--
-- fsl,mpc8349-pmc: Sleep specifiers consist of one cell. For each bit
-- that is set in the cell, the corresponding bit in SCCR will be saved
-- and cleared on suspend, and restored on resume. This sleep controller
-- supports disabling and resuming devices at any time.
--
-- fsl,mpc8536-pmc: Sleep specifiers consist of three cells, the third of
-- which will be ORed into PMCDR upon suspend, and cleared from PMCDR
-- upon resume. The first two cells are as described for fsl,mpc8578-pmc.
-- This sleep controller only supports disabling devices during system
-- sleep, or permanently.
--
-- fsl,mpc8548-pmc: Sleep specifiers consist of one or two cells, the
-- first of which will be ORed into DEVDISR (and the second into
-- DEVDISR2, if present -- this cell should be zero or absent if the
-- hardware does not have DEVDISR2) upon a request for permanent device
-- disabling. This sleep controller does not support configuring devices
-- to disable during system sleep (unless supported by another compatible
-- match), or dynamically.
-+Clock nodes:
-+The clock nodes are to describe the masks in PM controller registers for each
-+soc clock.
-+- fsl,pmcdr-mask: For "fsl,mpc8548-pmc"-compatible devices, the mask will be
-+ ORed into PMCDR before suspend if the device using this clock is the wake-up
-+ source and need to be running during low power mode; clear the mask if
-+ otherwise.
-+
-+- fsl,sccr-mask: For "fsl,mpc8349-pmc"-compatible devices, the corresponding
-+ bit specified by the mask in SCCR will be saved and cleared on suspend, and
-+ restored on resume.
-+
-+- fsl,devdisr-mask: Contain one or two cells, depending on the availability of
-+ DEVDISR2 register. For compatible devices, the mask will be ORed into DEVDISR
-+ or DEVDISR2 when the clock should be permenently disabled.
-
- Example:
-
-- power@b00 {
-- compatible = "fsl,mpc8313-pmc", "fsl,mpc8349-pmc";
-- reg = <0xb00 0x100 0xa00 0x100>;
-- interrupts = <80 8>;
-+ power@e0070 {
-+ compatible = "fsl,mpc8536-pmc", "fsl,mpc8548-pmc";
-+ reg = <0xe0070 0x20>;
-+
-+ etsec1_clk: soc-clk@24 {
-+ fsl,pmcdr-mask = <0x00000080>;
-+ };
-+ etsec2_clk: soc-clk@25 {
-+ fsl,pmcdr-mask = <0x00000040>;
-+ };
-+ etsec3_clk: soc-clk@26 {
-+ fsl,pmcdr-mask = <0x00000020>;
-+ };
- };
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci.c
@@ -437,8 +437,18 @@ CPUIDLE_METHOD_OF_DECLARE(psci, "psci",
/*
--- /dev/null
+++ b/drivers/soc/fsl/rcpm.c
-@@ -0,0 +1,158 @@
+@@ -0,0 +1,156 @@
+/*
+ * Run Control and Power Management (RCPM) driver
+ *
+ struct device_node *np;
+
+ np = of_find_matching_node_and_match(NULL, rcpm_matches, &match);
-+ if (!np) {
-+ pr_err("Can't find the RCPM node.\n");
++ if (!np)
+ return -EINVAL;
-+ }
+
+ if (match->data)
+ rcpm = (struct rcpm_config *)match->data;
+++ /dev/null
-From bb7412794db9b48dc4cc041c75d380e512dfff2a Mon Sep 17 00:00:00 2001
-From: Mathew McBride <matt@traverse.com.au>
-Date: Tue, 20 Nov 2018 14:36:54 +0800
-Subject: [PATCH] mmc: sdhci-of-esdhc: add voltage switch support for ls1043a
-
-Added voltage switch support for ls1043a.
-
-Signed-off-by: Mathew McBride <matt@traverse.com.au>
-Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
----
- drivers/mmc/host/sdhci-of-esdhc.c | 1 +
- 1 file changed, 1 insertion(+)
-
---- a/drivers/mmc/host/sdhci-of-esdhc.c
-+++ b/drivers/mmc/host/sdhci-of-esdhc.c
-@@ -661,6 +661,7 @@ static void esdhc_reset(struct sdhci_hos
- static const struct of_device_id scfg_device_ids[] = {
- { .compatible = "fsl,t1040-scfg", },
- { .compatible = "fsl,ls1012a-scfg", },
-+ { .compatible = "fsl,ls1043a-scfg", },
- { .compatible = "fsl,ls1046a-scfg", },
- {}
- };
--- /dev/null
+From bba7af6efb0aad1d52ee5e7d80f9e2ab59d85e20 Mon Sep 17 00:00:00 2001
+From: Biwen Li <biwen.li@nxp.com>
+Date: Wed, 17 Apr 2019 18:58:52 +0800
+Subject: [PATCH] ptp: support layerscape
+
+This is an integrated patch of ptp for layerscape
+
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Biwen Li <biwen.li@nxp.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+---
+ drivers/net/ethernet/freescale/Makefile | 1 -
+ drivers/net/ethernet/freescale/gianfar_ptp.c | 572 ------------------
+ drivers/ptp/Makefile | 1 +
+ drivers/ptp/ptp_chardev.c | 4 +-
+ drivers/ptp/ptp_qoriq.c | 589 +++++++++++++++++++
+ include/linux/fsl/ptp_qoriq.h | 169 ++++++
+ 6 files changed, 761 insertions(+), 575 deletions(-)
+ delete mode 100644 drivers/net/ethernet/freescale/gianfar_ptp.c
+ create mode 100644 drivers/ptp/ptp_qoriq.c
+ create mode 100644 include/linux/fsl/ptp_qoriq.h
+
+--- a/drivers/net/ethernet/freescale/Makefile
++++ b/drivers/net/ethernet/freescale/Makefile
+@@ -14,7 +14,6 @@ obj-$(CONFIG_FS_ENET) += fs_enet/
+ obj-$(CONFIG_FSL_PQ_MDIO) += fsl_pq_mdio.o
+ obj-$(CONFIG_FSL_XGMAC_MDIO) += xgmac_mdio.o
+ obj-$(CONFIG_GIANFAR) += gianfar_driver.o
+-obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o
+ gianfar_driver-objs := gianfar.o \
+ gianfar_ethtool.o
+ obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
+--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
++++ /dev/null
+@@ -1,572 +0,0 @@
+-/*
+- * PTP 1588 clock using the eTSEC
+- *
+- * Copyright (C) 2010 OMICRON electronics GmbH
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+- */
+-
+-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+-
+-#include <linux/device.h>
+-#include <linux/hrtimer.h>
+-#include <linux/interrupt.h>
+-#include <linux/kernel.h>
+-#include <linux/module.h>
+-#include <linux/of.h>
+-#include <linux/of_platform.h>
+-#include <linux/timex.h>
+-#include <linux/io.h>
+-
+-#include <linux/ptp_clock_kernel.h>
+-
+-#include "gianfar.h"
+-
+-/*
+- * gianfar ptp registers
+- * Generated by regen.tcl on Thu May 13 01:38:57 PM CEST 2010
+- */
+-struct gianfar_ptp_registers {
+- u32 tmr_ctrl; /* Timer control register */
+- u32 tmr_tevent; /* Timestamp event register */
+- u32 tmr_temask; /* Timer event mask register */
+- u32 tmr_pevent; /* Timestamp event register */
+- u32 tmr_pemask; /* Timer event mask register */
+- u32 tmr_stat; /* Timestamp status register */
+- u32 tmr_cnt_h; /* Timer counter high register */
+- u32 tmr_cnt_l; /* Timer counter low register */
+- u32 tmr_add; /* Timer drift compensation addend register */
+- u32 tmr_acc; /* Timer accumulator register */
+- u32 tmr_prsc; /* Timer prescale */
+- u8 res1[4];
+- u32 tmroff_h; /* Timer offset high */
+- u32 tmroff_l; /* Timer offset low */
+- u8 res2[8];
+- u32 tmr_alarm1_h; /* Timer alarm 1 high register */
+- u32 tmr_alarm1_l; /* Timer alarm 1 high register */
+- u32 tmr_alarm2_h; /* Timer alarm 2 high register */
+- u32 tmr_alarm2_l; /* Timer alarm 2 high register */
+- u8 res3[48];
+- u32 tmr_fiper1; /* Timer fixed period interval */
+- u32 tmr_fiper2; /* Timer fixed period interval */
+- u32 tmr_fiper3; /* Timer fixed period interval */
+- u8 res4[20];
+- u32 tmr_etts1_h; /* Timestamp of general purpose external trigger */
+- u32 tmr_etts1_l; /* Timestamp of general purpose external trigger */
+- u32 tmr_etts2_h; /* Timestamp of general purpose external trigger */
+- u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */
+-};
+-
+-/* Bit definitions for the TMR_CTRL register */
+-#define ALM1P (1<<31) /* Alarm1 output polarity */
+-#define ALM2P (1<<30) /* Alarm2 output polarity */
+-#define FIPERST (1<<28) /* FIPER start indication */
+-#define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */
+-#define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */
+-#define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */
+-#define TCLK_PERIOD_MASK (0x3ff)
+-#define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */
+-#define FRD (1<<14) /* FIPER Realignment Disable */
+-#define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */
+-#define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */
+-#define ETEP2 (1<<9) /* External trigger 2 edge polarity */
+-#define ETEP1 (1<<8) /* External trigger 1 edge polarity */
+-#define COPH (1<<7) /* Generated clock output phase. */
+-#define CIPH (1<<6) /* External oscillator input clock phase */
+-#define TMSR (1<<5) /* Timer soft reset. */
+-#define BYP (1<<3) /* Bypass drift compensated clock */
+-#define TE (1<<2) /* 1588 timer enable. */
+-#define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */
+-#define CKSEL_MASK (0x3)
+-
+-/* Bit definitions for the TMR_TEVENT register */
+-#define ETS2 (1<<25) /* External trigger 2 timestamp sampled */
+-#define ETS1 (1<<24) /* External trigger 1 timestamp sampled */
+-#define ALM2 (1<<17) /* Current time = alarm time register 2 */
+-#define ALM1 (1<<16) /* Current time = alarm time register 1 */
+-#define PP1 (1<<7) /* periodic pulse generated on FIPER1 */
+-#define PP2 (1<<6) /* periodic pulse generated on FIPER2 */
+-#define PP3 (1<<5) /* periodic pulse generated on FIPER3 */
+-
+-/* Bit definitions for the TMR_TEMASK register */
+-#define ETS2EN (1<<25) /* External trigger 2 timestamp enable */
+-#define ETS1EN (1<<24) /* External trigger 1 timestamp enable */
+-#define ALM2EN (1<<17) /* Timer ALM2 event enable */
+-#define ALM1EN (1<<16) /* Timer ALM1 event enable */
+-#define PP1EN (1<<7) /* Periodic pulse event 1 enable */
+-#define PP2EN (1<<6) /* Periodic pulse event 2 enable */
+-
+-/* Bit definitions for the TMR_PEVENT register */
+-#define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */
+-#define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */
+-#define RXP (1<<0) /* PTP frame has been received */
+-
+-/* Bit definitions for the TMR_PEMASK register */
+-#define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */
+-#define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */
+-#define RXPEN (1<<0) /* Receive PTP packet event enable */
+-
+-/* Bit definitions for the TMR_STAT register */
+-#define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */
+-#define STAT_VEC_MASK (0x3f)
+-
+-/* Bit definitions for the TMR_PRSC register */
+-#define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */
+-#define PRSC_OCK_MASK (0xffff)
+-
+-
+-#define DRIVER "gianfar_ptp"
+-#define DEFAULT_CKSEL 1
+-#define N_EXT_TS 2
+-#define REG_SIZE sizeof(struct gianfar_ptp_registers)
+-
+-struct etsects {
+- struct gianfar_ptp_registers __iomem *regs;
+- spinlock_t lock; /* protects regs */
+- struct ptp_clock *clock;
+- struct ptp_clock_info caps;
+- struct resource *rsrc;
+- int irq;
+- u64 alarm_interval; /* for periodic alarm */
+- u64 alarm_value;
+- u32 tclk_period; /* nanoseconds */
+- u32 tmr_prsc;
+- u32 tmr_add;
+- u32 cksel;
+- u32 tmr_fiper1;
+- u32 tmr_fiper2;
+-};
+-
+-/*
+- * Register access functions
+- */
+-
+-/* Caller must hold etsects->lock. */
+-static u64 tmr_cnt_read(struct etsects *etsects)
+-{
+- u64 ns;
+- u32 lo, hi;
+-
+- lo = gfar_read(&etsects->regs->tmr_cnt_l);
+- hi = gfar_read(&etsects->regs->tmr_cnt_h);
+- ns = ((u64) hi) << 32;
+- ns |= lo;
+- return ns;
+-}
+-
+-/* Caller must hold etsects->lock. */
+-static void tmr_cnt_write(struct etsects *etsects, u64 ns)
+-{
+- u32 hi = ns >> 32;
+- u32 lo = ns & 0xffffffff;
+-
+- gfar_write(&etsects->regs->tmr_cnt_l, lo);
+- gfar_write(&etsects->regs->tmr_cnt_h, hi);
+-}
+-
+-/* Caller must hold etsects->lock. */
+-static void set_alarm(struct etsects *etsects)
+-{
+- u64 ns;
+- u32 lo, hi;
+-
+- ns = tmr_cnt_read(etsects) + 1500000000ULL;
+- ns = div_u64(ns, 1000000000UL) * 1000000000ULL;
+- ns -= etsects->tclk_period;
+- hi = ns >> 32;
+- lo = ns & 0xffffffff;
+- gfar_write(&etsects->regs->tmr_alarm1_l, lo);
+- gfar_write(&etsects->regs->tmr_alarm1_h, hi);
+-}
+-
+-/* Caller must hold etsects->lock. */
+-static void set_fipers(struct etsects *etsects)
+-{
+- set_alarm(etsects);
+- gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1);
+- gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2);
+-}
+-
+-/*
+- * Interrupt service routine
+- */
+-
+-static irqreturn_t isr(int irq, void *priv)
+-{
+- struct etsects *etsects = priv;
+- struct ptp_clock_event event;
+- u64 ns;
+- u32 ack = 0, lo, hi, mask, val;
+-
+- val = gfar_read(&etsects->regs->tmr_tevent);
+-
+- if (val & ETS1) {
+- ack |= ETS1;
+- hi = gfar_read(&etsects->regs->tmr_etts1_h);
+- lo = gfar_read(&etsects->regs->tmr_etts1_l);
+- event.type = PTP_CLOCK_EXTTS;
+- event.index = 0;
+- event.timestamp = ((u64) hi) << 32;
+- event.timestamp |= lo;
+- ptp_clock_event(etsects->clock, &event);
+- }
+-
+- if (val & ETS2) {
+- ack |= ETS2;
+- hi = gfar_read(&etsects->regs->tmr_etts2_h);
+- lo = gfar_read(&etsects->regs->tmr_etts2_l);
+- event.type = PTP_CLOCK_EXTTS;
+- event.index = 1;
+- event.timestamp = ((u64) hi) << 32;
+- event.timestamp |= lo;
+- ptp_clock_event(etsects->clock, &event);
+- }
+-
+- if (val & ALM2) {
+- ack |= ALM2;
+- if (etsects->alarm_value) {
+- event.type = PTP_CLOCK_ALARM;
+- event.index = 0;
+- event.timestamp = etsects->alarm_value;
+- ptp_clock_event(etsects->clock, &event);
+- }
+- if (etsects->alarm_interval) {
+- ns = etsects->alarm_value + etsects->alarm_interval;
+- hi = ns >> 32;
+- lo = ns & 0xffffffff;
+- spin_lock(&etsects->lock);
+- gfar_write(&etsects->regs->tmr_alarm2_l, lo);
+- gfar_write(&etsects->regs->tmr_alarm2_h, hi);
+- spin_unlock(&etsects->lock);
+- etsects->alarm_value = ns;
+- } else {
+- gfar_write(&etsects->regs->tmr_tevent, ALM2);
+- spin_lock(&etsects->lock);
+- mask = gfar_read(&etsects->regs->tmr_temask);
+- mask &= ~ALM2EN;
+- gfar_write(&etsects->regs->tmr_temask, mask);
+- spin_unlock(&etsects->lock);
+- etsects->alarm_value = 0;
+- etsects->alarm_interval = 0;
+- }
+- }
+-
+- if (val & PP1) {
+- ack |= PP1;
+- event.type = PTP_CLOCK_PPS;
+- ptp_clock_event(etsects->clock, &event);
+- }
+-
+- if (ack) {
+- gfar_write(&etsects->regs->tmr_tevent, ack);
+- return IRQ_HANDLED;
+- } else
+- return IRQ_NONE;
+-}
+-
+-/*
+- * PTP clock operations
+- */
+-
+-static int ptp_gianfar_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+-{
+- u64 adj, diff;
+- u32 tmr_add;
+- int neg_adj = 0;
+- struct etsects *etsects = container_of(ptp, struct etsects, caps);
+-
+- if (scaled_ppm < 0) {
+- neg_adj = 1;
+- scaled_ppm = -scaled_ppm;
+- }
+- tmr_add = etsects->tmr_add;
+- adj = tmr_add;
+-
+- /* calculate diff as adj*(scaled_ppm/65536)/1000000
+- * and round() to the nearest integer
+- */
+- adj *= scaled_ppm;
+- diff = div_u64(adj, 8000000);
+- diff = (diff >> 13) + ((diff >> 12) & 1);
+-
+- tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
+-
+- gfar_write(&etsects->regs->tmr_add, tmr_add);
+-
+- return 0;
+-}
+-
+-static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta)
+-{
+- s64 now;
+- unsigned long flags;
+- struct etsects *etsects = container_of(ptp, struct etsects, caps);
+-
+- spin_lock_irqsave(&etsects->lock, flags);
+-
+- now = tmr_cnt_read(etsects);
+- now += delta;
+- tmr_cnt_write(etsects, now);
+- set_fipers(etsects);
+-
+- spin_unlock_irqrestore(&etsects->lock, flags);
+-
+- return 0;
+-}
+-
+-static int ptp_gianfar_gettime(struct ptp_clock_info *ptp,
+- struct timespec64 *ts)
+-{
+- u64 ns;
+- unsigned long flags;
+- struct etsects *etsects = container_of(ptp, struct etsects, caps);
+-
+- spin_lock_irqsave(&etsects->lock, flags);
+-
+- ns = tmr_cnt_read(etsects);
+-
+- spin_unlock_irqrestore(&etsects->lock, flags);
+-
+- *ts = ns_to_timespec64(ns);
+-
+- return 0;
+-}
+-
+-static int ptp_gianfar_settime(struct ptp_clock_info *ptp,
+- const struct timespec64 *ts)
+-{
+- u64 ns;
+- unsigned long flags;
+- struct etsects *etsects = container_of(ptp, struct etsects, caps);
+-
+- ns = timespec64_to_ns(ts);
+-
+- spin_lock_irqsave(&etsects->lock, flags);
+-
+- tmr_cnt_write(etsects, ns);
+- set_fipers(etsects);
+-
+- spin_unlock_irqrestore(&etsects->lock, flags);
+-
+- return 0;
+-}
+-
+-static int ptp_gianfar_enable(struct ptp_clock_info *ptp,
+- struct ptp_clock_request *rq, int on)
+-{
+- struct etsects *etsects = container_of(ptp, struct etsects, caps);
+- unsigned long flags;
+- u32 bit, mask;
+-
+- switch (rq->type) {
+- case PTP_CLK_REQ_EXTTS:
+- switch (rq->extts.index) {
+- case 0:
+- bit = ETS1EN;
+- break;
+- case 1:
+- bit = ETS2EN;
+- break;
+- default:
+- return -EINVAL;
+- }
+- spin_lock_irqsave(&etsects->lock, flags);
+- mask = gfar_read(&etsects->regs->tmr_temask);
+- if (on)
+- mask |= bit;
+- else
+- mask &= ~bit;
+- gfar_write(&etsects->regs->tmr_temask, mask);
+- spin_unlock_irqrestore(&etsects->lock, flags);
+- return 0;
+-
+- case PTP_CLK_REQ_PPS:
+- spin_lock_irqsave(&etsects->lock, flags);
+- mask = gfar_read(&etsects->regs->tmr_temask);
+- if (on)
+- mask |= PP1EN;
+- else
+- mask &= ~PP1EN;
+- gfar_write(&etsects->regs->tmr_temask, mask);
+- spin_unlock_irqrestore(&etsects->lock, flags);
+- return 0;
+-
+- default:
+- break;
+- }
+-
+- return -EOPNOTSUPP;
+-}
+-
+-static const struct ptp_clock_info ptp_gianfar_caps = {
+- .owner = THIS_MODULE,
+- .name = "gianfar clock",
+- .max_adj = 512000,
+- .n_alarm = 0,
+- .n_ext_ts = N_EXT_TS,
+- .n_per_out = 0,
+- .n_pins = 0,
+- .pps = 1,
+- .adjfine = ptp_gianfar_adjfine,
+- .adjtime = ptp_gianfar_adjtime,
+- .gettime64 = ptp_gianfar_gettime,
+- .settime64 = ptp_gianfar_settime,
+- .enable = ptp_gianfar_enable,
+-};
+-
+-static int gianfar_ptp_probe(struct platform_device *dev)
+-{
+- struct device_node *node = dev->dev.of_node;
+- struct etsects *etsects;
+- struct timespec64 now;
+- int err = -ENOMEM;
+- u32 tmr_ctrl;
+- unsigned long flags;
+-
+- etsects = kzalloc(sizeof(*etsects), GFP_KERNEL);
+- if (!etsects)
+- goto no_memory;
+-
+- err = -ENODEV;
+-
+- etsects->caps = ptp_gianfar_caps;
+-
+- if (of_property_read_u32(node, "fsl,cksel", &etsects->cksel))
+- etsects->cksel = DEFAULT_CKSEL;
+-
+- if (of_property_read_u32(node,
+- "fsl,tclk-period", &etsects->tclk_period) ||
+- of_property_read_u32(node,
+- "fsl,tmr-prsc", &etsects->tmr_prsc) ||
+- of_property_read_u32(node,
+- "fsl,tmr-add", &etsects->tmr_add) ||
+- of_property_read_u32(node,
+- "fsl,tmr-fiper1", &etsects->tmr_fiper1) ||
+- of_property_read_u32(node,
+- "fsl,tmr-fiper2", &etsects->tmr_fiper2) ||
+- of_property_read_u32(node,
+- "fsl,max-adj", &etsects->caps.max_adj)) {
+- pr_err("device tree node missing required elements\n");
+- goto no_node;
+- }
+-
+- etsects->irq = platform_get_irq(dev, 0);
+-
+- if (etsects->irq < 0) {
+- pr_err("irq not in device tree\n");
+- goto no_node;
+- }
+- if (request_irq(etsects->irq, isr, 0, DRIVER, etsects)) {
+- pr_err("request_irq failed\n");
+- goto no_node;
+- }
+-
+- etsects->rsrc = platform_get_resource(dev, IORESOURCE_MEM, 0);
+- if (!etsects->rsrc) {
+- pr_err("no resource\n");
+- goto no_resource;
+- }
+- if (request_resource(&iomem_resource, etsects->rsrc)) {
+- pr_err("resource busy\n");
+- goto no_resource;
+- }
+-
+- spin_lock_init(&etsects->lock);
+-
+- etsects->regs = ioremap(etsects->rsrc->start,
+- resource_size(etsects->rsrc));
+- if (!etsects->regs) {
+- pr_err("ioremap ptp registers failed\n");
+- goto no_ioremap;
+- }
+- getnstimeofday64(&now);
+- ptp_gianfar_settime(&etsects->caps, &now);
+-
+- tmr_ctrl =
+- (etsects->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT |
+- (etsects->cksel & CKSEL_MASK) << CKSEL_SHIFT;
+-
+- spin_lock_irqsave(&etsects->lock, flags);
+-
+- gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl);
+- gfar_write(&etsects->regs->tmr_add, etsects->tmr_add);
+- gfar_write(&etsects->regs->tmr_prsc, etsects->tmr_prsc);
+- gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1);
+- gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2);
+- set_alarm(etsects);
+- gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FIPERST|RTPE|TE|FRD);
+-
+- spin_unlock_irqrestore(&etsects->lock, flags);
+-
+- etsects->clock = ptp_clock_register(&etsects->caps, &dev->dev);
+- if (IS_ERR(etsects->clock)) {
+- err = PTR_ERR(etsects->clock);
+- goto no_clock;
+- }
+- gfar_phc_index = ptp_clock_index(etsects->clock);
+-
+- platform_set_drvdata(dev, etsects);
+-
+- return 0;
+-
+-no_clock:
+- iounmap(etsects->regs);
+-no_ioremap:
+- release_resource(etsects->rsrc);
+-no_resource:
+- free_irq(etsects->irq, etsects);
+-no_node:
+- kfree(etsects);
+-no_memory:
+- return err;
+-}
+-
+-static int gianfar_ptp_remove(struct platform_device *dev)
+-{
+- struct etsects *etsects = platform_get_drvdata(dev);
+-
+- gfar_write(&etsects->regs->tmr_temask, 0);
+- gfar_write(&etsects->regs->tmr_ctrl, 0);
+-
+- gfar_phc_index = -1;
+- ptp_clock_unregister(etsects->clock);
+- iounmap(etsects->regs);
+- release_resource(etsects->rsrc);
+- free_irq(etsects->irq, etsects);
+- kfree(etsects);
+-
+- return 0;
+-}
+-
+-static const struct of_device_id match_table[] = {
+- { .compatible = "fsl,etsec-ptp" },
+- {},
+-};
+-MODULE_DEVICE_TABLE(of, match_table);
+-
+-static struct platform_driver gianfar_ptp_driver = {
+- .driver = {
+- .name = "gianfar_ptp",
+- .of_match_table = match_table,
+- },
+- .probe = gianfar_ptp_probe,
+- .remove = gianfar_ptp_remove,
+-};
+-
+-module_platform_driver(gianfar_ptp_driver);
+-
+-MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
+-MODULE_DESCRIPTION("PTP clock using the eTSEC");
+-MODULE_LICENSE("GPL");
+--- a/drivers/ptp/Makefile
++++ b/drivers/ptp/Makefile
+@@ -9,3 +9,4 @@ obj-$(CONFIG_PTP_1588_CLOCK_DTE) += ptp_
+ obj-$(CONFIG_PTP_1588_CLOCK_IXP46X) += ptp_ixp46x.o
+ obj-$(CONFIG_PTP_1588_CLOCK_PCH) += ptp_pch.o
+ obj-$(CONFIG_PTP_1588_CLOCK_KVM) += ptp_kvm.o
++obj-$(CONFIG_PTP_1588_CLOCK_QORIQ) += ptp_qoriq.o
+--- a/drivers/ptp/ptp_chardev.c
++++ b/drivers/ptp/ptp_chardev.c
+@@ -224,7 +224,7 @@ long ptp_ioctl(struct posix_clock *pc, u
+ }
+ pct = &sysoff->ts[0];
+ for (i = 0; i < sysoff->n_samples; i++) {
+- getnstimeofday64(&ts);
++ ktime_get_real_ts64(&ts);
+ pct->sec = ts.tv_sec;
+ pct->nsec = ts.tv_nsec;
+ pct++;
+@@ -235,7 +235,7 @@ long ptp_ioctl(struct posix_clock *pc, u
+ pct->nsec = ts.tv_nsec;
+ pct++;
+ }
+- getnstimeofday64(&ts);
++ ktime_get_real_ts64(&ts);
+ pct->sec = ts.tv_sec;
+ pct->nsec = ts.tv_nsec;
+ if (copy_to_user((void __user *)arg, sysoff, sizeof(*sysoff)))
+--- /dev/null
++++ b/drivers/ptp/ptp_qoriq.c
+@@ -0,0 +1,589 @@
++/*
++ * PTP 1588 clock for Freescale QorIQ 1588 timer
++ *
++ * Copyright (C) 2010 OMICRON electronics GmbH
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
++
++#include <linux/device.h>
++#include <linux/hrtimer.h>
++#include <linux/interrupt.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/of_platform.h>
++#include <linux/timex.h>
++#include <linux/slab.h>
++#include <linux/clk.h>
++
++#include <linux/fsl/ptp_qoriq.h>
++
++/*
++ * Register access functions
++ */
++
++/* Caller must hold qoriq_ptp->lock. */
++static u64 tmr_cnt_read(struct qoriq_ptp *qoriq_ptp)
++{
++ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
++ u64 ns;
++ u32 lo, hi;
++
++ lo = qoriq_read(®s->ctrl_regs->tmr_cnt_l);
++ hi = qoriq_read(®s->ctrl_regs->tmr_cnt_h);
++ ns = ((u64) hi) << 32;
++ ns |= lo;
++ return ns;
++}
++
++/* Caller must hold qoriq_ptp->lock. */
++static void tmr_cnt_write(struct qoriq_ptp *qoriq_ptp, u64 ns)
++{
++ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
++ u32 hi = ns >> 32;
++ u32 lo = ns & 0xffffffff;
++
++ qoriq_write(®s->ctrl_regs->tmr_cnt_l, lo);
++ qoriq_write(®s->ctrl_regs->tmr_cnt_h, hi);
++}
++
++/* Caller must hold qoriq_ptp->lock. */
++static void set_alarm(struct qoriq_ptp *qoriq_ptp)
++{
++ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
++ u64 ns;
++ u32 lo, hi;
++
++ ns = tmr_cnt_read(qoriq_ptp) + 1500000000ULL;
++ ns = div_u64(ns, 1000000000UL) * 1000000000ULL;
++ ns -= qoriq_ptp->tclk_period;
++ hi = ns >> 32;
++ lo = ns & 0xffffffff;
++ qoriq_write(®s->alarm_regs->tmr_alarm1_l, lo);
++ qoriq_write(®s->alarm_regs->tmr_alarm1_h, hi);
++}
++
++/* Caller must hold qoriq_ptp->lock. */
++static void set_fipers(struct qoriq_ptp *qoriq_ptp)
++{
++ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
++
++ set_alarm(qoriq_ptp);
++ qoriq_write(®s->fiper_regs->tmr_fiper1, qoriq_ptp->tmr_fiper1);
++ qoriq_write(®s->fiper_regs->tmr_fiper2, qoriq_ptp->tmr_fiper2);
++}
++
++/*
++ * Interrupt service routine
++ */
++
++static irqreturn_t isr(int irq, void *priv)
++{
++ struct qoriq_ptp *qoriq_ptp = priv;
++ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
++ struct ptp_clock_event event;
++ u64 ns;
++ u32 ack = 0, lo, hi, mask, val;
++
++ val = qoriq_read(®s->ctrl_regs->tmr_tevent);
++
++ if (val & ETS1) {
++ ack |= ETS1;
++ hi = qoriq_read(®s->etts_regs->tmr_etts1_h);
++ lo = qoriq_read(®s->etts_regs->tmr_etts1_l);
++ event.type = PTP_CLOCK_EXTTS;
++ event.index = 0;
++ event.timestamp = ((u64) hi) << 32;
++ event.timestamp |= lo;
++ ptp_clock_event(qoriq_ptp->clock, &event);
++ }
++
++ if (val & ETS2) {
++ ack |= ETS2;
++ hi = qoriq_read(®s->etts_regs->tmr_etts2_h);
++ lo = qoriq_read(®s->etts_regs->tmr_etts2_l);
++ event.type = PTP_CLOCK_EXTTS;
++ event.index = 1;
++ event.timestamp = ((u64) hi) << 32;
++ event.timestamp |= lo;
++ ptp_clock_event(qoriq_ptp->clock, &event);
++ }
++
++ if (val & ALM2) {
++ ack |= ALM2;
++ if (qoriq_ptp->alarm_value) {
++ event.type = PTP_CLOCK_ALARM;
++ event.index = 0;
++ event.timestamp = qoriq_ptp->alarm_value;
++ ptp_clock_event(qoriq_ptp->clock, &event);
++ }
++ if (qoriq_ptp->alarm_interval) {
++ ns = qoriq_ptp->alarm_value + qoriq_ptp->alarm_interval;
++ hi = ns >> 32;
++ lo = ns & 0xffffffff;
++ spin_lock(&qoriq_ptp->lock);
++ qoriq_write(®s->alarm_regs->tmr_alarm2_l, lo);
++ qoriq_write(®s->alarm_regs->tmr_alarm2_h, hi);
++ spin_unlock(&qoriq_ptp->lock);
++ qoriq_ptp->alarm_value = ns;
++ } else {
++ qoriq_write(®s->ctrl_regs->tmr_tevent, ALM2);
++ spin_lock(&qoriq_ptp->lock);
++ mask = qoriq_read(®s->ctrl_regs->tmr_temask);
++ mask &= ~ALM2EN;
++ qoriq_write(®s->ctrl_regs->tmr_temask, mask);
++ spin_unlock(&qoriq_ptp->lock);
++ qoriq_ptp->alarm_value = 0;
++ qoriq_ptp->alarm_interval = 0;
++ }
++ }
++
++ if (val & PP1) {
++ ack |= PP1;
++ event.type = PTP_CLOCK_PPS;
++ ptp_clock_event(qoriq_ptp->clock, &event);
++ }
++
++ if (ack) {
++ qoriq_write(®s->ctrl_regs->tmr_tevent, ack);
++ return IRQ_HANDLED;
++ } else
++ return IRQ_NONE;
++}
++
++/*
++ * PTP clock operations
++ */
++
++static int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
++{
++ u64 adj, diff;
++ u32 tmr_add;
++ int neg_adj = 0;
++ struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps);
++ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
++
++ if (scaled_ppm < 0) {
++ neg_adj = 1;
++ scaled_ppm = -scaled_ppm;
++ }
++ tmr_add = qoriq_ptp->tmr_add;
++ adj = tmr_add;
++
++ /* calculate diff as adj*(scaled_ppm/65536)/1000000
++ * and round() to the nearest integer
++ */
++ adj *= scaled_ppm;
++ diff = div_u64(adj, 8000000);
++ diff = (diff >> 13) + ((diff >> 12) & 1);
++
++ tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
++
++ qoriq_write(®s->ctrl_regs->tmr_add, tmr_add);
++
++ return 0;
++}
++
++static int ptp_qoriq_adjtime(struct ptp_clock_info *ptp, s64 delta)
++{
++ s64 now;
++ unsigned long flags;
++ struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps);
++
++ spin_lock_irqsave(&qoriq_ptp->lock, flags);
++
++ now = tmr_cnt_read(qoriq_ptp);
++ now += delta;
++ tmr_cnt_write(qoriq_ptp, now);
++ set_fipers(qoriq_ptp);
++
++ spin_unlock_irqrestore(&qoriq_ptp->lock, flags);
++
++ return 0;
++}
++
++static int ptp_qoriq_gettime(struct ptp_clock_info *ptp,
++ struct timespec64 *ts)
++{
++ u64 ns;
++ unsigned long flags;
++ struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps);
++
++ spin_lock_irqsave(&qoriq_ptp->lock, flags);
++
++ ns = tmr_cnt_read(qoriq_ptp);
++
++ spin_unlock_irqrestore(&qoriq_ptp->lock, flags);
++
++ *ts = ns_to_timespec64(ns);
++
++ return 0;
++}
++
++static int ptp_qoriq_settime(struct ptp_clock_info *ptp,
++ const struct timespec64 *ts)
++{
++ u64 ns;
++ unsigned long flags;
++ struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps);
++
++ ns = timespec64_to_ns(ts);
++
++ spin_lock_irqsave(&qoriq_ptp->lock, flags);
++
++ tmr_cnt_write(qoriq_ptp, ns);
++ set_fipers(qoriq_ptp);
++
++ spin_unlock_irqrestore(&qoriq_ptp->lock, flags);
++
++ return 0;
++}
++
++static int ptp_qoriq_enable(struct ptp_clock_info *ptp,
++ struct ptp_clock_request *rq, int on)
++{
++ struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps);
++ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
++ unsigned long flags;
++ u32 bit, mask;
++
++ switch (rq->type) {
++ case PTP_CLK_REQ_EXTTS:
++ switch (rq->extts.index) {
++ case 0:
++ bit = ETS1EN;
++ break;
++ case 1:
++ bit = ETS2EN;
++ break;
++ default:
++ return -EINVAL;
++ }
++ spin_lock_irqsave(&qoriq_ptp->lock, flags);
++ mask = qoriq_read(®s->ctrl_regs->tmr_temask);
++ if (on)
++ mask |= bit;
++ else
++ mask &= ~bit;
++ qoriq_write(®s->ctrl_regs->tmr_temask, mask);
++ spin_unlock_irqrestore(&qoriq_ptp->lock, flags);
++ return 0;
++
++ case PTP_CLK_REQ_PPS:
++ spin_lock_irqsave(&qoriq_ptp->lock, flags);
++ mask = qoriq_read(®s->ctrl_regs->tmr_temask);
++ if (on)
++ mask |= PP1EN;
++ else
++ mask &= ~PP1EN;
++ qoriq_write(®s->ctrl_regs->tmr_temask, mask);
++ spin_unlock_irqrestore(&qoriq_ptp->lock, flags);
++ return 0;
++
++ default:
++ break;
++ }
++
++ return -EOPNOTSUPP;
++}
++
++static const struct ptp_clock_info ptp_qoriq_caps = {
++ .owner = THIS_MODULE,
++ .name = "qoriq ptp clock",
++ .max_adj = 512000,
++ .n_alarm = 0,
++ .n_ext_ts = N_EXT_TS,
++ .n_per_out = 0,
++ .n_pins = 0,
++ .pps = 1,
++ .adjfine = ptp_qoriq_adjfine,
++ .adjtime = ptp_qoriq_adjtime,
++ .gettime64 = ptp_qoriq_gettime,
++ .settime64 = ptp_qoriq_settime,
++ .enable = ptp_qoriq_enable,
++};
++
++/**
++ * qoriq_ptp_nominal_freq - calculate nominal frequency according to
++ * reference clock frequency
++ *
++ * @clk_src: reference clock frequency
++ *
++ * The nominal frequency is the desired clock frequency.
++ * It should be less than the reference clock frequency.
++ * It should be a factor of 1000MHz.
++ *
++ * Return the nominal frequency
++ */
++static u32 qoriq_ptp_nominal_freq(u32 clk_src)
++{
++ u32 remainder = 0;
++
++ clk_src /= 1000000;
++ remainder = clk_src % 100;
++ if (remainder) {
++ clk_src -= remainder;
++ clk_src += 100;
++ }
++
++ do {
++ clk_src -= 100;
++
++ } while (1000 % clk_src);
++
++ return clk_src * 1000000;
++}
++
++/**
++ * qoriq_ptp_auto_config - calculate a set of default configurations
++ *
++ * @qoriq_ptp: pointer to qoriq_ptp
++ * @node: pointer to device_node
++ *
++ * If below dts properties are not provided, this function will be
++ * called to calculate a set of default configurations for them.
++ * "fsl,tclk-period"
++ * "fsl,tmr-prsc"
++ * "fsl,tmr-add"
++ * "fsl,tmr-fiper1"
++ * "fsl,tmr-fiper2"
++ * "fsl,max-adj"
++ *
++ * Return 0 if success
++ */
++static int qoriq_ptp_auto_config(struct qoriq_ptp *qoriq_ptp,
++ struct device_node *node)
++{
++ struct clk *clk;
++ u64 freq_comp;
++ u64 max_adj;
++ u32 nominal_freq;
++ u32 remainder = 0;
++ u32 clk_src = 0;
++
++ qoriq_ptp->cksel = DEFAULT_CKSEL;
++
++ clk = of_clk_get(node, 0);
++ if (!IS_ERR(clk)) {
++ clk_src = clk_get_rate(clk);
++ clk_put(clk);
++ }
++
++ if (clk_src <= 100000000UL) {
++ pr_err("error reference clock value, or lower than 100MHz\n");
++ return -EINVAL;
++ }
++
++ nominal_freq = qoriq_ptp_nominal_freq(clk_src);
++ if (!nominal_freq)
++ return -EINVAL;
++
++ qoriq_ptp->tclk_period = 1000000000UL / nominal_freq;
++ qoriq_ptp->tmr_prsc = DEFAULT_TMR_PRSC;
++
++ /* Calculate initial frequency compensation value for TMR_ADD register.
++ * freq_comp = ceil(2^32 / freq_ratio)
++ * freq_ratio = reference_clock_freq / nominal_freq
++ */
++ freq_comp = ((u64)1 << 32) * nominal_freq;
++ freq_comp = div_u64_rem(freq_comp, clk_src, &remainder);
++ if (remainder)
++ freq_comp++;
++
++ qoriq_ptp->tmr_add = freq_comp;
++ qoriq_ptp->tmr_fiper1 = DEFAULT_FIPER1_PERIOD - qoriq_ptp->tclk_period;
++ qoriq_ptp->tmr_fiper2 = DEFAULT_FIPER2_PERIOD - qoriq_ptp->tclk_period;
++
++ /* max_adj = 1000000000 * (freq_ratio - 1.0) - 1
++ * freq_ratio = reference_clock_freq / nominal_freq
++ */
++ max_adj = 1000000000ULL * (clk_src - nominal_freq);
++ max_adj = div_u64(max_adj, nominal_freq) - 1;
++ qoriq_ptp->caps.max_adj = max_adj;
++
++ return 0;
++}
++
++static int qoriq_ptp_probe(struct platform_device *dev)
++{
++ struct device_node *node = dev->dev.of_node;
++ struct qoriq_ptp *qoriq_ptp;
++ struct qoriq_ptp_registers *regs;
++ struct timespec64 now;
++ int err = -ENOMEM;
++ u32 tmr_ctrl;
++ unsigned long flags;
++ void __iomem *base;
++
++ qoriq_ptp = kzalloc(sizeof(*qoriq_ptp), GFP_KERNEL);
++ if (!qoriq_ptp)
++ goto no_memory;
++
++ err = -EINVAL;
++
++ qoriq_ptp->caps = ptp_qoriq_caps;
++
++ if (of_property_read_u32(node, "fsl,cksel", &qoriq_ptp->cksel))
++ qoriq_ptp->cksel = DEFAULT_CKSEL;
++
++ if (of_property_read_u32(node,
++ "fsl,tclk-period", &qoriq_ptp->tclk_period) ||
++ of_property_read_u32(node,
++ "fsl,tmr-prsc", &qoriq_ptp->tmr_prsc) ||
++ of_property_read_u32(node,
++ "fsl,tmr-add", &qoriq_ptp->tmr_add) ||
++ of_property_read_u32(node,
++ "fsl,tmr-fiper1", &qoriq_ptp->tmr_fiper1) ||
++ of_property_read_u32(node,
++ "fsl,tmr-fiper2", &qoriq_ptp->tmr_fiper2) ||
++ of_property_read_u32(node,
++ "fsl,max-adj", &qoriq_ptp->caps.max_adj)) {
++ pr_warn("device tree node missing required elements, try automatic configuration\n");
++
++ if (qoriq_ptp_auto_config(qoriq_ptp, node))
++ goto no_config;
++ }
++
++ err = -ENODEV;
++
++ qoriq_ptp->irq = platform_get_irq(dev, 0);
++
++ if (qoriq_ptp->irq < 0) {
++ pr_err("irq not in device tree\n");
++ goto no_node;
++ }
++ if (request_irq(qoriq_ptp->irq, isr, IRQF_SHARED, DRIVER, qoriq_ptp)) {
++ pr_err("request_irq failed\n");
++ goto no_node;
++ }
++
++ qoriq_ptp->rsrc = platform_get_resource(dev, IORESOURCE_MEM, 0);
++ if (!qoriq_ptp->rsrc) {
++ pr_err("no resource\n");
++ goto no_resource;
++ }
++ if (request_resource(&iomem_resource, qoriq_ptp->rsrc)) {
++ pr_err("resource busy\n");
++ goto no_resource;
++ }
++
++ spin_lock_init(&qoriq_ptp->lock);
++
++ base = ioremap(qoriq_ptp->rsrc->start,
++ resource_size(qoriq_ptp->rsrc));
++ if (!base) {
++ pr_err("ioremap ptp registers failed\n");
++ goto no_ioremap;
++ }
++
++ qoriq_ptp->base = base;
++
++ if (of_device_is_compatible(node, "fsl,fman-ptp-timer")) {
++ qoriq_ptp->regs.ctrl_regs = base + FMAN_CTRL_REGS_OFFSET;
++ qoriq_ptp->regs.alarm_regs = base + FMAN_ALARM_REGS_OFFSET;
++ qoriq_ptp->regs.fiper_regs = base + FMAN_FIPER_REGS_OFFSET;
++ qoriq_ptp->regs.etts_regs = base + FMAN_ETTS_REGS_OFFSET;
++ } else {
++ qoriq_ptp->regs.ctrl_regs = base + CTRL_REGS_OFFSET;
++ qoriq_ptp->regs.alarm_regs = base + ALARM_REGS_OFFSET;
++ qoriq_ptp->regs.fiper_regs = base + FIPER_REGS_OFFSET;
++ qoriq_ptp->regs.etts_regs = base + ETTS_REGS_OFFSET;
++ }
++
++ ktime_get_real_ts64(&now);
++ ptp_qoriq_settime(&qoriq_ptp->caps, &now);
++
++ tmr_ctrl =
++ (qoriq_ptp->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT |
++ (qoriq_ptp->cksel & CKSEL_MASK) << CKSEL_SHIFT;
++
++ spin_lock_irqsave(&qoriq_ptp->lock, flags);
++
++ regs = &qoriq_ptp->regs;
++ qoriq_write(®s->ctrl_regs->tmr_ctrl, tmr_ctrl);
++ qoriq_write(®s->ctrl_regs->tmr_add, qoriq_ptp->tmr_add);
++ qoriq_write(®s->ctrl_regs->tmr_prsc, qoriq_ptp->tmr_prsc);
++ qoriq_write(®s->fiper_regs->tmr_fiper1, qoriq_ptp->tmr_fiper1);
++ qoriq_write(®s->fiper_regs->tmr_fiper2, qoriq_ptp->tmr_fiper2);
++ set_alarm(qoriq_ptp);
++ qoriq_write(®s->ctrl_regs->tmr_ctrl, tmr_ctrl|FIPERST|RTPE|TE|FRD);
++
++ spin_unlock_irqrestore(&qoriq_ptp->lock, flags);
++
++ qoriq_ptp->clock = ptp_clock_register(&qoriq_ptp->caps, &dev->dev);
++ if (IS_ERR(qoriq_ptp->clock)) {
++ err = PTR_ERR(qoriq_ptp->clock);
++ goto no_clock;
++ }
++ qoriq_ptp->phc_index = ptp_clock_index(qoriq_ptp->clock);
++
++ platform_set_drvdata(dev, qoriq_ptp);
++
++ return 0;
++
++no_clock:
++ iounmap(qoriq_ptp->base);
++no_ioremap:
++ release_resource(qoriq_ptp->rsrc);
++no_resource:
++ free_irq(qoriq_ptp->irq, qoriq_ptp);
++no_config:
++no_node:
++ kfree(qoriq_ptp);
++no_memory:
++ return err;
++}
++
++static int qoriq_ptp_remove(struct platform_device *dev)
++{
++ struct qoriq_ptp *qoriq_ptp = platform_get_drvdata(dev);
++ struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
++
++ qoriq_write(®s->ctrl_regs->tmr_temask, 0);
++ qoriq_write(®s->ctrl_regs->tmr_ctrl, 0);
++
++ ptp_clock_unregister(qoriq_ptp->clock);
++ iounmap(qoriq_ptp->base);
++ release_resource(qoriq_ptp->rsrc);
++ free_irq(qoriq_ptp->irq, qoriq_ptp);
++ kfree(qoriq_ptp);
++
++ return 0;
++}
++
++static const struct of_device_id match_table[] = {
++ { .compatible = "fsl,etsec-ptp" },
++ { .compatible = "fsl,fman-ptp-timer" },
++ {},
++};
++MODULE_DEVICE_TABLE(of, match_table);
++
++static struct platform_driver qoriq_ptp_driver = {
++ .driver = {
++ .name = "ptp_qoriq",
++ .of_match_table = match_table,
++ },
++ .probe = qoriq_ptp_probe,
++ .remove = qoriq_ptp_remove,
++};
++
++module_platform_driver(qoriq_ptp_driver);
++
++MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
++MODULE_DESCRIPTION("PTP clock for Freescale QorIQ 1588 timer");
++MODULE_LICENSE("GPL");
+--- /dev/null
++++ b/include/linux/fsl/ptp_qoriq.h
+@@ -0,0 +1,169 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Copyright (C) 2010 OMICRON electronics GmbH
++ * Copyright 2018 NXP
++ */
++#ifndef __PTP_QORIQ_H__
++#define __PTP_QORIQ_H__
++
++#include <linux/io.h>
++#include <linux/ptp_clock_kernel.h>
++
++/*
++ * qoriq ptp registers
++ */
++struct ctrl_regs {
++ u32 tmr_ctrl; /* Timer control register */
++ u32 tmr_tevent; /* Timestamp event register */
++ u32 tmr_temask; /* Timer event mask register */
++ u32 tmr_pevent; /* Timestamp event register */
++ u32 tmr_pemask; /* Timer event mask register */
++ u32 tmr_stat; /* Timestamp status register */
++ u32 tmr_cnt_h; /* Timer counter high register */
++ u32 tmr_cnt_l; /* Timer counter low register */
++ u32 tmr_add; /* Timer drift compensation addend register */
++ u32 tmr_acc; /* Timer accumulator register */
++ u32 tmr_prsc; /* Timer prescale */
++ u8 res1[4];
++ u32 tmroff_h; /* Timer offset high */
++ u32 tmroff_l; /* Timer offset low */
++};
++
++struct alarm_regs {
++ u32 tmr_alarm1_h; /* Timer alarm 1 high register */
++ u32 tmr_alarm1_l; /* Timer alarm 1 high register */
++ u32 tmr_alarm2_h; /* Timer alarm 2 high register */
++ u32 tmr_alarm2_l; /* Timer alarm 2 high register */
++};
++
++struct fiper_regs {
++ u32 tmr_fiper1; /* Timer fixed period interval */
++ u32 tmr_fiper2; /* Timer fixed period interval */
++ u32 tmr_fiper3; /* Timer fixed period interval */
++};
++
++struct etts_regs {
++ u32 tmr_etts1_h; /* Timestamp of general purpose external trigger */
++ u32 tmr_etts1_l; /* Timestamp of general purpose external trigger */
++ u32 tmr_etts2_h; /* Timestamp of general purpose external trigger */
++ u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */
++};
++
++struct qoriq_ptp_registers {
++ struct ctrl_regs __iomem *ctrl_regs;
++ struct alarm_regs __iomem *alarm_regs;
++ struct fiper_regs __iomem *fiper_regs;
++ struct etts_regs __iomem *etts_regs;
++};
++
++/* Offset definitions for the four register groups */
++#define CTRL_REGS_OFFSET 0x0
++#define ALARM_REGS_OFFSET 0x40
++#define FIPER_REGS_OFFSET 0x80
++#define ETTS_REGS_OFFSET 0xa0
++
++#define FMAN_CTRL_REGS_OFFSET 0x80
++#define FMAN_ALARM_REGS_OFFSET 0xb8
++#define FMAN_FIPER_REGS_OFFSET 0xd0
++#define FMAN_ETTS_REGS_OFFSET 0xe0
++
++
++/* Bit definitions for the TMR_CTRL register */
++#define ALM1P (1<<31) /* Alarm1 output polarity */
++#define ALM2P (1<<30) /* Alarm2 output polarity */
++#define FIPERST (1<<28) /* FIPER start indication */
++#define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */
++#define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */
++#define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */
++#define TCLK_PERIOD_MASK (0x3ff)
++#define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */
++#define FRD (1<<14) /* FIPER Realignment Disable */
++#define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */
++#define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */
++#define ETEP2 (1<<9) /* External trigger 2 edge polarity */
++#define ETEP1 (1<<8) /* External trigger 1 edge polarity */
++#define COPH (1<<7) /* Generated clock output phase. */
++#define CIPH (1<<6) /* External oscillator input clock phase */
++#define TMSR (1<<5) /* Timer soft reset. */
++#define BYP (1<<3) /* Bypass drift compensated clock */
++#define TE (1<<2) /* 1588 timer enable. */
++#define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */
++#define CKSEL_MASK (0x3)
++
++/* Bit definitions for the TMR_TEVENT register */
++#define ETS2 (1<<25) /* External trigger 2 timestamp sampled */
++#define ETS1 (1<<24) /* External trigger 1 timestamp sampled */
++#define ALM2 (1<<17) /* Current time = alarm time register 2 */
++#define ALM1 (1<<16) /* Current time = alarm time register 1 */
++#define PP1 (1<<7) /* periodic pulse generated on FIPER1 */
++#define PP2 (1<<6) /* periodic pulse generated on FIPER2 */
++#define PP3 (1<<5) /* periodic pulse generated on FIPER3 */
++
++/* Bit definitions for the TMR_TEMASK register */
++#define ETS2EN (1<<25) /* External trigger 2 timestamp enable */
++#define ETS1EN (1<<24) /* External trigger 1 timestamp enable */
++#define ALM2EN (1<<17) /* Timer ALM2 event enable */
++#define ALM1EN (1<<16) /* Timer ALM1 event enable */
++#define PP1EN (1<<7) /* Periodic pulse event 1 enable */
++#define PP2EN (1<<6) /* Periodic pulse event 2 enable */
++
++/* Bit definitions for the TMR_PEVENT register */
++#define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */
++#define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */
++#define RXP (1<<0) /* PTP frame has been received */
++
++/* Bit definitions for the TMR_PEMASK register */
++#define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */
++#define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */
++#define RXPEN (1<<0) /* Receive PTP packet event enable */
++
++/* Bit definitions for the TMR_STAT register */
++#define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */
++#define STAT_VEC_MASK (0x3f)
++
++/* Bit definitions for the TMR_PRSC register */
++#define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */
++#define PRSC_OCK_MASK (0xffff)
++
++
++#define DRIVER "ptp_qoriq"
++#define N_EXT_TS 2
++
++#define DEFAULT_CKSEL 1
++#define DEFAULT_TMR_PRSC 2
++#define DEFAULT_FIPER1_PERIOD 1000000000
++#define DEFAULT_FIPER2_PERIOD 100000
++
++struct qoriq_ptp {
++ void __iomem *base;
++ struct qoriq_ptp_registers regs;
++ spinlock_t lock; /* protects regs */
++ struct ptp_clock *clock;
++ struct ptp_clock_info caps;
++ struct resource *rsrc;
++ int irq;
++ int phc_index;
++ u64 alarm_interval; /* for periodic alarm */
++ u64 alarm_value;
++ u32 tclk_period; /* nanoseconds */
++ u32 tmr_prsc;
++ u32 tmr_add;
++ u32 cksel;
++ u32 tmr_fiper1;
++ u32 tmr_fiper2;
++};
++
++static inline u32 qoriq_read(unsigned __iomem *addr)
++{
++ u32 val;
++
++ val = ioread32be(addr);
++ return val;
++}
++
++static inline void qoriq_write(unsigned __iomem *addr, u32 val)
++{
++ iowrite32be(val, addr);
++}
++
++#endif
--- /dev/null
+From 2ddaec76dbe9b6e911e2a1442248ab103909cce3 Mon Sep 17 00:00:00 2001
+From: Biwen Li <biwen.li@nxp.com>
+Date: Wed, 17 Apr 2019 18:59:06 +0800
+Subject: [PATCH] tmu: support layerscape
+
+This is an integrated patch of tmu for layerscape
+
+Signed-off-by: Biwen Li <biwen.li@nxp.com>
+Signed-off-by: Eduardo Valentin <edubezval@gmail.com>
+Signed-off-by: Fabio Estevam <fabio.estevam@nxp.com>
+Signed-off-by: Yuantian Tang <andy.tang@nxp.com>
+---
+ drivers/thermal/qoriq_thermal.c | 102 ++++++++++++++------------------
+ 1 file changed, 46 insertions(+), 56 deletions(-)
+
+--- a/drivers/thermal/qoriq_thermal.c
++++ b/drivers/thermal/qoriq_thermal.c
+@@ -69,14 +69,21 @@ struct qoriq_tmu_regs {
+ u32 ttr3cr; /* Temperature Range 3 Control Register */
+ };
+
++struct qoriq_tmu_data;
++
+ /*
+ * Thermal zone data
+ */
++struct qoriq_sensor {
++ struct thermal_zone_device *tzd;
++ struct qoriq_tmu_data *qdata;
++ int id;
++};
++
+ struct qoriq_tmu_data {
+- struct thermal_zone_device *tz;
+ struct qoriq_tmu_regs __iomem *regs;
+- int sensor_id;
+ bool little_endian;
++ struct qoriq_sensor *sensor[SITES_MAX];
+ };
+
+ static void tmu_write(struct qoriq_tmu_data *p, u32 val, void __iomem *addr)
+@@ -97,48 +104,51 @@ static u32 tmu_read(struct qoriq_tmu_dat
+
+ static int tmu_get_temp(void *p, int *temp)
+ {
++ struct qoriq_sensor *qsensor = p;
++ struct qoriq_tmu_data *qdata = qsensor->qdata;
+ u32 val;
+- struct qoriq_tmu_data *data = p;
+
+- val = tmu_read(data, &data->regs->site[data->sensor_id].tritsr);
++ val = tmu_read(qdata, &qdata->regs->site[qsensor->id].tritsr);
+ *temp = (val & 0xff) * 1000;
+
+ return 0;
+ }
+
+-static int qoriq_tmu_get_sensor_id(void)
++static const struct thermal_zone_of_device_ops tmu_tz_ops = {
++ .get_temp = tmu_get_temp,
++};
++
++static int qoriq_tmu_register_tmu_zone(struct platform_device *pdev)
+ {
+- int ret, id;
+- struct of_phandle_args sensor_specs;
+- struct device_node *np, *sensor_np;
++ struct qoriq_tmu_data *qdata = platform_get_drvdata(pdev);
++ int id, sites = 0;
+
+- np = of_find_node_by_name(NULL, "thermal-zones");
+- if (!np)
+- return -ENODEV;
++ for (id = 0; id < SITES_MAX; id++) {
++ qdata->sensor[id] = devm_kzalloc(&pdev->dev,
++ sizeof(struct qoriq_sensor), GFP_KERNEL);
++ if (!qdata->sensor[id])
++ return -ENOMEM;
++
++ qdata->sensor[id]->id = id;
++ qdata->sensor[id]->qdata = qdata;
++
++ qdata->sensor[id]->tzd = devm_thermal_zone_of_sensor_register(
++ &pdev->dev, id, qdata->sensor[id], &tmu_tz_ops);
++ if (IS_ERR(qdata->sensor[id]->tzd)) {
++ if (PTR_ERR(qdata->sensor[id]->tzd) == -ENODEV)
++ continue;
++ else
++ return PTR_ERR(qdata->sensor[id]->tzd);
+
+- sensor_np = of_get_next_child(np, NULL);
+- ret = of_parse_phandle_with_args(sensor_np, "thermal-sensors",
+- "#thermal-sensor-cells",
+- 0, &sensor_specs);
+- if (ret) {
+- of_node_put(np);
+- of_node_put(sensor_np);
+- return ret;
+- }
+-
+- if (sensor_specs.args_count >= 1) {
+- id = sensor_specs.args[0];
+- WARN(sensor_specs.args_count > 1,
+- "%s: too many cells in sensor specifier %d\n",
+- sensor_specs.np->name, sensor_specs.args_count);
+- } else {
+- id = 0;
+- }
++ }
+
+- of_node_put(np);
+- of_node_put(sensor_np);
++ sites |= 0x1 << (15 - id);
++ }
++ /* Enable monitoring */
++ if (sites != 0)
++ tmu_write(qdata, sites | TMR_ME | TMR_ALPF, &qdata->regs->tmr);
+
+- return id;
++ return 0;
+ }
+
+ static int qoriq_tmu_calibration(struct platform_device *pdev)
+@@ -188,16 +198,11 @@ static void qoriq_tmu_init_device(struct
+ tmu_write(data, TMR_DISABLE, &data->regs->tmr);
+ }
+
+-static const struct thermal_zone_of_device_ops tmu_tz_ops = {
+- .get_temp = tmu_get_temp,
+-};
+-
+ static int qoriq_tmu_probe(struct platform_device *pdev)
+ {
+ int ret;
+ struct qoriq_tmu_data *data;
+ struct device_node *np = pdev->dev.of_node;
+- u32 site = 0;
+
+ if (!np) {
+ dev_err(&pdev->dev, "Device OF-Node is NULL");
+@@ -213,13 +218,6 @@ static int qoriq_tmu_probe(struct platfo
+
+ data->little_endian = of_property_read_bool(np, "little-endian");
+
+- data->sensor_id = qoriq_tmu_get_sensor_id();
+- if (data->sensor_id < 0) {
+- dev_err(&pdev->dev, "Failed to get sensor id\n");
+- ret = -ENODEV;
+- goto err_iomap;
+- }
+-
+ data->regs = of_iomap(np, 0);
+ if (!data->regs) {
+ dev_err(&pdev->dev, "Failed to get memory region\n");
+@@ -233,19 +231,13 @@ static int qoriq_tmu_probe(struct platfo
+ if (ret < 0)
+ goto err_tmu;
+
+- data->tz = thermal_zone_of_sensor_register(&pdev->dev, data->sensor_id,
+- data, &tmu_tz_ops);
+- if (IS_ERR(data->tz)) {
+- ret = PTR_ERR(data->tz);
+- dev_err(&pdev->dev,
+- "Failed to register thermal zone device %d\n", ret);
+- goto err_tmu;
++ ret = qoriq_tmu_register_tmu_zone(pdev);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "Failed to register sensors\n");
++ ret = -ENODEV;
++ goto err_iomap;
+ }
+
+- /* Enable monitoring */
+- site |= 0x1 << (15 - data->sensor_id);
+- tmu_write(data, site | TMR_ME | TMR_ALPF, &data->regs->tmr);
+-
+ return 0;
+
+ err_tmu:
+@@ -261,8 +253,6 @@ static int qoriq_tmu_remove(struct platf
+ {
+ struct qoriq_tmu_data *data = platform_get_drvdata(pdev);
+
+- thermal_zone_of_sensor_unregister(&pdev->dev, data->tz);
+-
+ /* Disable monitoring */
+ tmu_write(data, TMR_DISABLE, &data->regs->tmr);
+
+++ /dev/null
-From 703b2ca94467a029942fa478a38f0a14c8109766 Mon Sep 17 00:00:00 2001
-From: Bharat Bhushan <bharat.bhushan@nxp.com>
-Date: Fri, 1 Mar 2019 13:33:58 +0800
-Subject: [PATCH] vfio/fsl-mc: Improve send mc-command and read response
-
-Actually there is no ordering need when reading response
-from mc-portal. Similarly when writing the mc-command,
-ordering needed before command is submitted.
-
-This patch removes un-necessary barriers, response is read
-relaxed and maintain ordering when submit command. This also
-fixes compilation issue with newer kernel.
-
-Signed-off-by: Bharat Bhushan <Bharat.Bhushan@nxp.com>
-Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
----
- drivers/vfio/fsl-mc/vfio_fsl_mc.c | 13 +++++++------
- 1 file changed, 7 insertions(+), 6 deletions(-)
-
---- a/drivers/vfio/fsl-mc/vfio_fsl_mc.c
-+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
-@@ -331,9 +331,7 @@ static int vfio_fsl_mc_dprc_wait_for_res
- u64 header;
- struct mc_cmd_header *resp_hdr;
-
-- __iormb();
-- header = readq(ioaddr);
-- __iormb();
-+ header = cpu_to_le64(readq_relaxed(ioaddr));
-
- resp_hdr = (struct mc_cmd_header *)&header;
- status = (enum mc_cmd_status)resp_hdr->status;
-@@ -353,9 +351,12 @@ static int vfio_fsl_mc_send_command(void
- {
- int i;
-
-- /* Write at command header in the end */
-- for (i = 7; i >= 0; i--)
-- writeq(cmd_data[i], ioaddr + i * sizeof(uint64_t));
-+ /* Write at command parameter into portal */
-+ for (i = 7; i >= 1; i--)
-+ writeq_relaxed(cmd_data[i], ioaddr + i * sizeof(uint64_t));
-+
-+ /* Write command header in the end */
-+ writeq(cmd_data[0], ioaddr);
-
- /* Wait for response before returning to user-space
- * This can be optimized in future to even prepare response