From 1a28100e68f4863ebc68625d5c6123ef0e5de8db Mon Sep 17 00:00:00 2001 From: Yangbo Lu Date: Tue, 6 Feb 2018 12:39:05 +0800 Subject: [PATCH] layerscape: update patches-4.9 to LSDK1712 Patches changes - Updated patches-4.9 to NXP LSDK1712 linux-4.9. - Merged changes of patch 303 into integrated patch 201. - Split changes of patch 706 into dpaa part and dpaa2 part, and merged these changes into integrated patches 701 and 705. - Removed patch 819 since ehci-fsl driver could be compiled now. - Refreshed these patches. Signed-off-by: Yangbo Lu --- .../201-config-support-layerscape.patch | 226 +- .../202-core-linux-support-layerscape.patch | 26 +- .../301-arch-support-layerscape.patch | 49 +- .../302-dts-support-layercape.patch | 365 +- ...elect-ARCH_DMA_ADDR_T_64BIT-for-LPAE.patch | 23 - .../401-mtd-spi-nor-support-layerscape.patch | 8 +- .../402-mtd-support-layerscape.patch | 8 +- .../701-sdk_dpaa-support-layerscape.patch | 248 +- .../702-pci-support-layerscape.patch | 68 +- .../703-phy-support-layerscape.patch | 37 +- .../704-fsl-mc-layerscape-support.patch | 154 +- .../705-dpaa2-support-layerscape.patch | 991 +++- ...706-fsl-dpaa-use-4-9-ndo-get-stats64.patch | 112 - .../706-fsl_ppfe-support-layercape.patch | 121 +- .../801-ata-support-layerscape.patch | 8 +- .../802-clk-support-layerscape.patch | 38 +- .../803-cpufreq-support-layerscape.patch | 173 +- .../804-crypto-support-layerscape.patch | 4004 +++++++++++++---- .../805-dma-support-layerscape.patch | 248 +- .../806-flextimer-support-layerscape.patch | 8 +- .../807-gpu-support-layerscape.patch | 12 +- .../808-guts-support-layerscape.patch | 41 +- .../809-i2c-support-layerscape.patch | 249 +- .../810-iommu-support-layerscape.patch | 12 +- .../811-irqchip-support-layerscape.patch | 8 +- .../812-mmc-layerscape-support.patch | 52 +- .../813-qe-support-layerscape.patch | 8 +- .../814-rtc-support-layerscape.patch | 8 +- .../815-spi-support-layerscape.patch | 11 +- .../816-tty-serial-support-layerscape.patch | 8 +- .../817-usb-support-layerscape.patch | 467 +- .../818-vfio-support-layerscape.patch | 8 +- ...ig-remove-dependency-FSL_SOC-for-ehc.patch | 28 - 33 files changed, 5661 insertions(+), 2166 deletions(-) delete mode 100644 target/linux/layerscape/patches-4.9/303-arm-imx-select-ARCH_DMA_ADDR_T_64BIT-for-LPAE.patch delete mode 100644 target/linux/layerscape/patches-4.9/706-fsl-dpaa-use-4-9-ndo-get-stats64.patch delete mode 100644 target/linux/layerscape/patches-4.9/819-Revert-usb-kconfig-remove-dependency-FSL_SOC-for-ehc.patch diff --git a/target/linux/layerscape/patches-4.9/201-config-support-layerscape.patch b/target/linux/layerscape/patches-4.9/201-config-support-layerscape.patch index ccfce1df747..218421265a2 100644 --- a/target/linux/layerscape/patches-4.9/201-config-support-layerscape.patch +++ b/target/linux/layerscape/patches-4.9/201-config-support-layerscape.patch @@ -1,12 +1,12 @@ -From 7992b4384d94c5e1bad998ca3a9a5781caac8e62 Mon Sep 17 00:00:00 2001 +From e43dec70614b55ba1ce24dfcdf8f51e36d800af2 Mon Sep 17 00:00:00 2001 From: Yangbo Lu -Date: Mon, 25 Sep 2017 09:52:26 +0800 -Subject: [PATCH] config: support layerscape +Date: Wed, 17 Jan 2018 15:26:46 +0800 +Subject: [PATCH 01/30] config: support layerscape MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -This is a integrated patch for layerscape config/makefile support. +This is an integrated patch for layerscape config/makefile support. Signed-off-by: Yuantian Tang Signed-off-by: Zhang Ying-22455 @@ -16,33 +16,42 @@ Signed-off-by: Zhao Qiang Signed-off-by: Horia Geantă Signed-off-by: Yangbo Lu --- - drivers/base/Kconfig | 1 + - drivers/crypto/Makefile | 2 +- - drivers/net/ethernet/freescale/Kconfig | 4 +- - drivers/net/ethernet/freescale/Makefile | 2 + - drivers/ptp/Kconfig | 29 ++++++ - drivers/rtc/Kconfig | 8 ++ - drivers/rtc/Makefile | 1 + - drivers/soc/Kconfig | 3 +- - drivers/soc/fsl/Kconfig | 22 +++++ - drivers/soc/fsl/Kconfig.arm | 16 ++++ - drivers/soc/fsl/Makefile | 4 + - drivers/soc/fsl/layerscape/Kconfig | 10 +++ - drivers/soc/fsl/layerscape/Makefile | 1 + - drivers/soc/fsl/rcpm.c | 154 ++++++++++++++++++++++++++++++++ - drivers/staging/Kconfig | 6 ++ - drivers/staging/Makefile | 3 + - drivers/staging/fsl-dpaa2/Kconfig | 41 +++++++++ - drivers/staging/fsl-dpaa2/Makefile | 9 ++ - 18 files changed, 312 insertions(+), 4 deletions(-) + arch/arm/mach-imx/Kconfig | 1 + + drivers/base/Kconfig | 1 + + drivers/crypto/Makefile | 2 +- + drivers/net/ethernet/freescale/Kconfig | 4 ++- + drivers/net/ethernet/freescale/Makefile | 2 ++ + drivers/ptp/Kconfig | 29 +++++++++++++++++++ + drivers/rtc/Kconfig | 8 ++++++ + drivers/rtc/Makefile | 1 + + drivers/soc/Kconfig | 3 +- + drivers/soc/fsl/Kconfig | 22 ++++++++++++++ + drivers/soc/fsl/Kconfig.arm | 16 +++++++++++ + drivers/soc/fsl/Makefile | 4 +++ + drivers/soc/fsl/layerscape/Kconfig | 10 +++++++ + drivers/soc/fsl/layerscape/Makefile | 1 + + drivers/staging/Kconfig | 6 ++++ + drivers/staging/Makefile | 3 ++ + drivers/staging/fsl-dpaa2/Kconfig | 51 +++++++++++++++++++++++++++++++++ + drivers/staging/fsl-dpaa2/Makefile | 9 ++++++ + 18 files changed, 169 insertions(+), 4 deletions(-) create mode 100644 drivers/soc/fsl/Kconfig create mode 100644 drivers/soc/fsl/Kconfig.arm create mode 100644 drivers/soc/fsl/layerscape/Kconfig create mode 100644 drivers/soc/fsl/layerscape/Makefile - create mode 100644 drivers/soc/fsl/rcpm.c create mode 100644 drivers/staging/fsl-dpaa2/Kconfig create mode 100644 drivers/staging/fsl-dpaa2/Makefile +--- a/arch/arm/mach-imx/Kconfig ++++ b/arch/arm/mach-imx/Kconfig +@@ -1,6 +1,7 @@ + menuconfig ARCH_MXC + bool "Freescale i.MX family" + depends on ARCH_MULTI_V4_V5 || ARCH_MULTI_V6_V7 || ARM_SINGLE_ARMV7M ++ select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE + select ARCH_SUPPORTS_BIG_ENDIAN + select CLKSRC_IMX_GPT + select GENERIC_IRQ_CHIP --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -240,6 +240,7 @@ config GENERIC_CPU_VULNERABILITIES @@ -239,163 +248,6 @@ Signed-off-by: Yangbo Lu +++ b/drivers/soc/fsl/layerscape/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_FTM_ALARM) += ftm_alarm.o ---- /dev/null -+++ b/drivers/soc/fsl/rcpm.c -@@ -0,0 +1,154 @@ -+/* -+ * Run Control and Power Management (RCPM) driver -+ * -+ * Copyright 2016 NXP -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ */ -+#define pr_fmt(fmt) "RCPM: %s: " fmt, __func__ -+ -+#include -+#include -+#include -+#include -+#include -+ -+/* RCPM register offset */ -+#define RCPM_IPPDEXPCR0 0x140 -+ -+#define RCPM_WAKEUP_CELL_SIZE 2 -+ -+struct rcpm_config { -+ int ipp_num; -+ int ippdexpcr_offset; -+ u32 ippdexpcr[2]; -+ void *rcpm_reg_base; -+}; -+ -+static struct rcpm_config *rcpm; -+ -+static inline void rcpm_reg_write(u32 offset, u32 value) -+{ -+ iowrite32be(value, rcpm->rcpm_reg_base + offset); -+} -+ -+static inline u32 rcpm_reg_read(u32 offset) -+{ -+ return ioread32be(rcpm->rcpm_reg_base + offset); -+} -+ -+static void rcpm_wakeup_fixup(struct device *dev, void *data) -+{ -+ struct device_node *node = dev ? dev->of_node : NULL; -+ u32 value[RCPM_WAKEUP_CELL_SIZE]; -+ int ret, i; -+ -+ if (!dev || !node || !device_may_wakeup(dev)) -+ return; -+ -+ /* -+ * Get the values in the "rcpm-wakeup" property. -+ * Three values are: -+ * The first is a pointer to the RCPM node. -+ * The second is the value of the ippdexpcr0 register. -+ * The third is the value of the ippdexpcr1 register. -+ */ -+ ret = of_property_read_u32_array(node, "fsl,rcpm-wakeup", -+ value, RCPM_WAKEUP_CELL_SIZE); -+ if (ret) -+ return; -+ -+ pr_debug("wakeup source: the device %s\n", node->full_name); -+ -+ for (i = 0; i < rcpm->ipp_num; i++) -+ rcpm->ippdexpcr[i] |= value[i + 1]; -+} -+ -+static int rcpm_suspend_prepare(void) -+{ -+ int i; -+ -+ BUG_ON(!rcpm); -+ -+ for (i = 0; i < rcpm->ipp_num; i++) -+ rcpm->ippdexpcr[i] = 0; -+ -+ dpm_for_each_dev(NULL, rcpm_wakeup_fixup); -+ -+ for (i = 0; i < rcpm->ipp_num; i++) { -+ rcpm_reg_write(rcpm->ippdexpcr_offset + 4 * i, -+ rcpm->ippdexpcr[i]); -+ pr_debug("ippdexpcr%d = 0x%x\n", i, rcpm->ippdexpcr[i]); -+ } -+ -+ return 0; -+} -+ -+static int rcpm_suspend_notifier_call(struct notifier_block *bl, -+ unsigned long state, -+ void *unused) -+{ -+ switch (state) { -+ case PM_SUSPEND_PREPARE: -+ rcpm_suspend_prepare(); -+ break; -+ } -+ -+ return NOTIFY_DONE; -+} -+ -+static struct rcpm_config rcpm_default_config = { -+ .ipp_num = 1, -+ .ippdexpcr_offset = RCPM_IPPDEXPCR0, -+}; -+ -+static const struct of_device_id rcpm_matches[] = { -+ { -+ .compatible = "fsl,qoriq-rcpm-2.1", -+ .data = &rcpm_default_config, -+ }, -+ {} -+}; -+ -+static struct notifier_block rcpm_suspend_notifier = { -+ .notifier_call = rcpm_suspend_notifier_call, -+}; -+ -+static int __init layerscape_rcpm_init(void) -+{ -+ const struct of_device_id *match; -+ struct device_node *np; -+ -+ np = of_find_matching_node_and_match(NULL, rcpm_matches, &match); -+ if (!np) { -+ pr_err("Can't find the RCPM node.\n"); -+ return -EINVAL; -+ } -+ -+ if (match->data) -+ rcpm = (struct rcpm_config *)match->data; -+ else -+ return -EINVAL; -+ -+ rcpm->rcpm_reg_base = of_iomap(np, 0); -+ of_node_put(np); -+ if (!rcpm->rcpm_reg_base) -+ return -ENOMEM; -+ -+ register_pm_notifier(&rcpm_suspend_notifier); -+ -+ pr_info("The RCPM driver initialized.\n"); -+ -+ return 0; -+} -+ -+subsys_initcall(layerscape_rcpm_init); --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -94,6 +94,8 @@ source "drivers/staging/fbtft/Kconfig" @@ -433,7 +285,7 @@ Signed-off-by: Yangbo Lu +obj-$(CONFIG_FSL_PPFE) += fsl_ppfe/ --- /dev/null +++ b/drivers/staging/fsl-dpaa2/Kconfig -@@ -0,0 +1,41 @@ +@@ -0,0 +1,51 @@ +# +# Freescale DataPath Acceleration Architecture Gen2 (DPAA2) drivers +# @@ -470,6 +322,16 @@ Signed-off-by: Yangbo Lu + default n + ---help--- + Enable advanced statistics through debugfs interface. ++ ++config FSL_DPAA2_ETH_DCB ++ bool "Data Center Bridging (DCB) Support" ++ default n ++ depends on DCB ++ ---help--- ++ Say Y here if you want to use Data Center Bridging (DCB) features ++ (PFC) in the driver. ++ ++ If unsure, say N. +endif + +source "drivers/staging/fsl-dpaa2/mac/Kconfig" diff --git a/target/linux/layerscape/patches-4.9/202-core-linux-support-layerscape.patch b/target/linux/layerscape/patches-4.9/202-core-linux-support-layerscape.patch index 51408253c02..158ce0cfc9c 100644 --- a/target/linux/layerscape/patches-4.9/202-core-linux-support-layerscape.patch +++ b/target/linux/layerscape/patches-4.9/202-core-linux-support-layerscape.patch @@ -1,9 +1,9 @@ -From c37953457a7ebeb0d97ae8574b3d41274fcd9119 Mon Sep 17 00:00:00 2001 +From 67a2eceebe9dcd92a1a5f3e912340c8975c84434 Mon Sep 17 00:00:00 2001 From: Yangbo Lu -Date: Wed, 1 Nov 2017 16:22:33 +0800 -Subject: [PATCH] core-linux: support layerscape +Date: Wed, 17 Jan 2018 14:50:41 +0800 +Subject: [PATCH 02/30] core-linux: support layerscape -This is a integrated patch for layerscape core-linux support. +This is an integrated patch for layerscape core-linux support. Signed-off-by: Madalin Bucur Signed-off-by: Zhao Qiang @@ -18,7 +18,7 @@ Signed-off-by: Arnd Bergmann Signed-off-by: Yangbo Lu --- drivers/base/devres.c | 66 ++++++++++++++++++++++++++++ - drivers/base/soc.c | 66 ++++++++++++++++++++++++++++ + drivers/base/soc.c | 70 +++++++++++++++++++++++++++++ include/linux/device.h | 19 ++++++++ include/linux/fsl/svr.h | 97 +++++++++++++++++++++++++++++++++++++++++ include/linux/fsl_devices.h | 3 ++ @@ -30,7 +30,7 @@ Signed-off-by: Yangbo Lu net/core/dev.c | 13 +++++- net/core/skbuff.c | 29 +++++++++++- net/sched/sch_generic.c | 7 +++ - 13 files changed, 309 insertions(+), 3 deletions(-) + 13 files changed, 313 insertions(+), 3 deletions(-) create mode 100644 include/linux/fsl/svr.h --- a/drivers/base/devres.c @@ -122,7 +122,7 @@ Signed-off-by: Yangbo Lu static DEFINE_IDA(soc_ida); -@@ -159,3 +160,68 @@ static int __init soc_bus_register(void) +@@ -159,3 +160,72 @@ static int __init soc_bus_register(void) return bus_register(&soc_bus_type); } core_initcall(soc_bus_register); @@ -133,19 +133,23 @@ Signed-off-by: Yangbo Lu + const struct soc_device_attribute *match = arg; + + if (match->machine && -+ !glob_match(match->machine, soc_dev->attr->machine)) ++ (!soc_dev->attr->machine || ++ !glob_match(match->machine, soc_dev->attr->machine))) + return 0; + + if (match->family && -+ !glob_match(match->family, soc_dev->attr->family)) ++ (!soc_dev->attr->family || ++ !glob_match(match->family, soc_dev->attr->family))) + return 0; + + if (match->revision && -+ !glob_match(match->revision, soc_dev->attr->revision)) ++ (!soc_dev->attr->revision || ++ !glob_match(match->revision, soc_dev->attr->revision))) + return 0; + + if (match->soc_id && -+ !glob_match(match->soc_id, soc_dev->attr->soc_id)) ++ (!soc_dev->attr->soc_id || ++ !glob_match(match->soc_id, soc_dev->attr->soc_id))) + return 0; + + return 1; diff --git a/target/linux/layerscape/patches-4.9/301-arch-support-layerscape.patch b/target/linux/layerscape/patches-4.9/301-arch-support-layerscape.patch index 135333e472c..0276ebe3399 100644 --- a/target/linux/layerscape/patches-4.9/301-arch-support-layerscape.patch +++ b/target/linux/layerscape/patches-4.9/301-arch-support-layerscape.patch @@ -1,12 +1,12 @@ -From 739029f49bd9181b821298f9d27b29ce2d292967 Mon Sep 17 00:00:00 2001 +From 45e934873f9147f692dddbb61abc088f4c8059d7 Mon Sep 17 00:00:00 2001 From: Yangbo Lu -Date: Mon, 25 Sep 2017 10:03:52 +0800 -Subject: [PATCH] arch: support layerscape +Date: Wed, 17 Jan 2018 14:51:29 +0800 +Subject: [PATCH 03/30] arch: support layerscape MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -This is a integrated patch for layerscape arch support. +This is an integrated patch for layerscape arch support. Signed-off-by: Madalin Bucur Signed-off-by: Nipun Gupta @@ -29,13 +29,13 @@ Signed-off-by: Yangbo Lu arch/arm/mm/ioremap.c | 7 ++++ arch/arm/mm/mmu.c | 9 +++++ arch/arm64/include/asm/cache.h | 2 +- - arch/arm64/include/asm/io.h | 2 ++ + arch/arm64/include/asm/io.h | 30 +++++++++++++++++ arch/arm64/include/asm/pci.h | 4 +++ arch/arm64/include/asm/pgtable-prot.h | 1 + arch/arm64/include/asm/pgtable.h | 5 +++ arch/arm64/kernel/pci.c | 62 +++++++++++++++++++++++++++++++++++ - arch/arm64/mm/dma-mapping.c | 23 ++++++++++--- - 15 files changed, 209 insertions(+), 8 deletions(-) + arch/arm64/mm/dma-mapping.c | 6 ++++ + 15 files changed, 225 insertions(+), 3 deletions(-) --- a/arch/arm/include/asm/delay.h +++ b/arch/arm/include/asm/delay.h @@ -284,6 +284,41 @@ Signed-off-by: Yangbo Lu + __pgprot(PROT_NORMAL_NS)) #define iounmap __iounmap + /* +@@ -184,6 +186,34 @@ extern void __iomem *ioremap_cache(phys_ + #define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); }) + #define iowrite64be(v,p) ({ __iowmb(); __raw_writeq((__force __u64)cpu_to_be64(v), p); }) + ++/* access ports */ ++#define setbits32(_addr, _v) iowrite32be(ioread32be(_addr) | (_v), (_addr)) ++#define clrbits32(_addr, _v) iowrite32be(ioread32be(_addr) & ~(_v), (_addr)) ++ ++#define setbits16(_addr, _v) iowrite16be(ioread16be(_addr) | (_v), (_addr)) ++#define clrbits16(_addr, _v) iowrite16be(ioread16be(_addr) & ~(_v), (_addr)) ++ ++#define setbits8(_addr, _v) iowrite8(ioread8(_addr) | (_v), (_addr)) ++#define clrbits8(_addr, _v) iowrite8(ioread8(_addr) & ~(_v), (_addr)) ++ ++/* Clear and set bits in one shot. These macros can be used to clear and ++ * set multiple bits in a register using a single read-modify-write. These ++ * macros can also be used to set a multiple-bit bit pattern using a mask, ++ * by specifying the mask in the 'clear' parameter and the new bit pattern ++ * in the 'set' parameter. ++ */ ++ ++#define clrsetbits_be32(addr, clear, set) \ ++ iowrite32be((ioread32be(addr) & ~(clear)) | (set), (addr)) ++#define clrsetbits_le32(addr, clear, set) \ ++ iowrite32le((ioread32le(addr) & ~(clear)) | (set), (addr)) ++#define clrsetbits_be16(addr, clear, set) \ ++ iowrite16be((ioread16be(addr) & ~(clear)) | (set), (addr)) ++#define clrsetbits_le16(addr, clear, set) \ ++ iowrite16le((ioread16le(addr) & ~(clear)) | (set), (addr)) ++#define clrsetbits_8(addr, clear, set) \ ++ iowrite8((ioread8(addr) & ~(clear)) | (set), (addr)) ++ + #include + /* --- a/arch/arm64/include/asm/pci.h +++ b/arch/arm64/include/asm/pci.h diff --git a/target/linux/layerscape/patches-4.9/302-dts-support-layercape.patch b/target/linux/layerscape/patches-4.9/302-dts-support-layercape.patch index 7dae7d6acaf..361f43c6b4f 100644 --- a/target/linux/layerscape/patches-4.9/302-dts-support-layercape.patch +++ b/target/linux/layerscape/patches-4.9/302-dts-support-layercape.patch @@ -1,9 +1,9 @@ -From bfa4a794f91162cfeccfa4d59121cde9a84e32a3 Mon Sep 17 00:00:00 2001 +From 1806d342beb334c8cb0a438315ad5529262b2791 Mon Sep 17 00:00:00 2001 From: Yangbo Lu -Date: Mon, 25 Sep 2017 10:02:10 +0800 -Subject: [PATCH] dts: support layercape +Date: Wed, 17 Jan 2018 14:52:50 +0800 +Subject: [PATCH 04/30] dts: support layercape -This is a integrated patch for layerscape dts support. +This is an integrated patch for layerscape dts support. Signed-off-by: Amrita Kumari Signed-off-by: Alison Wang @@ -32,9 +32,9 @@ Signed-off-by: Yangbo Lu arch/arm/boot/dts/ecx-2000.dts | 2 +- arch/arm/boot/dts/imx6ul.dtsi | 4 +- arch/arm/boot/dts/keystone.dtsi | 4 +- - arch/arm/boot/dts/ls1021a-qds.dts | 13 + - arch/arm/boot/dts/ls1021a-twr.dts | 13 + - arch/arm/boot/dts/ls1021a.dtsi | 155 ++-- + arch/arm/boot/dts/ls1021a-qds.dts | 21 + + arch/arm/boot/dts/ls1021a-twr.dts | 25 + + arch/arm/boot/dts/ls1021a.dtsi | 197 +++-- arch/arm/boot/dts/mt6580.dtsi | 2 +- arch/arm/boot/dts/mt6589.dtsi | 2 +- arch/arm/boot/dts/mt8127.dtsi | 2 +- @@ -44,28 +44,29 @@ Signed-off-by: Yangbo Lu arch/arm/boot/dts/sun7i-a20.dtsi | 4 +- arch/arm/boot/dts/sun8i-a23-a33.dtsi | 2 +- arch/arm/boot/dts/sun9i-a80.dtsi | 2 +- - arch/arm64/boot/dts/freescale/Makefile | 16 + + arch/arm64/boot/dts/freescale/Makefile | 17 + + .../boot/dts/freescale/fsl-ls1012a-2g5rdb.dts | 123 +++ arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts | 177 ++++ - arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts | 198 +++++ - arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts | 134 +++ - arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi | 594 ++++++++++++++ + arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts | 202 +++++ + arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts | 138 ++++ + arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi | 602 ++++++++++++++ arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi | 45 + .../boot/dts/freescale/fsl-ls1043a-qds-sdk.dts | 69 ++ arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts | 171 +++- .../boot/dts/freescale/fsl-ls1043a-rdb-sdk.dts | 69 ++ .../boot/dts/freescale/fsl-ls1043a-rdb-usdpaa.dts | 117 +++ arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts | 113 ++- - arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi | 302 ++++++- + arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi | 308 ++++++- arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi | 48 ++ - .../boot/dts/freescale/fsl-ls1046a-qds-sdk.dts | 109 +++ + .../boot/dts/freescale/fsl-ls1046a-qds-sdk.dts | 110 +++ arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts | 363 ++++++++ - .../boot/dts/freescale/fsl-ls1046a-rdb-sdk.dts | 76 ++ + .../boot/dts/freescale/fsl-ls1046a-rdb-sdk.dts | 83 ++ .../boot/dts/freescale/fsl-ls1046a-rdb-usdpaa.dts | 110 +++ arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts | 218 +++++ - arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi | 793 ++++++++++++++++++ + arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi | 800 ++++++++++++++++++ arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts | 173 ++++ arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts | 236 ++++++ - arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi | 818 ++++++++++++++++++ + arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi | 825 ++++++++++++++++++ arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts | 191 ++--- arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts | 169 ++-- arch/arm64/boot/dts/freescale/fsl-ls2080a-simu.dts | 9 +- @@ -76,9 +77,9 @@ Signed-off-by: Yangbo Lu arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi | 195 +++++ arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi | 198 +++++ arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi | 161 ++++ - arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi | 912 +++++++++++++++++++++ + arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi | 919 +++++++++++++++++++++ .../boot/dts/freescale/qoriq-bman1-portals.dtsi | 81 ++ - arch/arm64/boot/dts/freescale/qoriq-dpaa-eth.dtsi | 66 ++ + arch/arm64/boot/dts/freescale/qoriq-dpaa-eth.dtsi | 73 ++ .../boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi | 43 + .../boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi | 43 + .../boot/dts/freescale/qoriq-fman3-0-1g-0.dtsi | 42 + @@ -93,7 +94,8 @@ Signed-off-by: Yangbo Lu arch/powerpc/boot/dts/fsl/qoriq-bman1-portals.dtsi | 10 + arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0.dtsi | 4 +- arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1.dtsi | 4 +- - 66 files changed, 7988 insertions(+), 1021 deletions(-) + 67 files changed, 8231 insertions(+), 1022 deletions(-) + create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1012a-2g5rdb.dts create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts @@ -221,6 +223,18 @@ Signed-off-by: Yangbo Lu &enet0 { tbi-handle = <&tbi0>; phy-handle = <&sgmii_phy1c>; +@@ -331,3 +344,11 @@ + &uart1 { + status = "okay"; + }; ++ ++&can0 { ++ status = "okay"; ++}; ++ ++&can1 { ++ status = "okay"; ++}; --- a/arch/arm/boot/dts/ls1021a-twr.dts +++ b/arch/arm/boot/dts/ls1021a-twr.dts @@ -142,6 +142,19 @@ @@ -243,6 +257,29 @@ Signed-off-by: Yangbo Lu &enet0 { tbi-handle = <&tbi1>; phy-handle = <&sgmii_phy2>; +@@ -228,6 +241,10 @@ + }; + }; + ++&esdhc { ++ status = "okay"; ++}; ++ + &sai1 { + status = "okay"; + }; +@@ -243,3 +260,11 @@ + &uart1 { + status = "okay"; + }; ++ ++&can0 { ++ status = "okay"; ++}; ++ ++&can1 { ++ status = "okay"; ++}; --- a/arch/arm/boot/dts/ls1021a.dtsi +++ b/arch/arm/boot/dts/ls1021a.dtsi @@ -74,17 +74,24 @@ @@ -303,7 +340,7 @@ Signed-off-by: Yangbo Lu reg = <0x0 0x1570e08 0x0 0x8>; msi-controller; interrupts = ; -@@ -137,11 +144,12 @@ +@@ -137,16 +144,17 @@ compatible = "fsl,ifc", "simple-bus"; reg = <0x0 0x1530000 0x0 0x10000>; interrupts = ; @@ -317,6 +354,12 @@ Signed-off-by: Yangbo Lu big-endian; }; + esdhc: esdhc@1560000 { +- compatible = "fsl,esdhc"; ++ compatible = "fsl,ls1021a-esdhc","fsl,esdhc"; + reg = <0x0 0x1560000 0x0 0x10000>; + interrupts = ; + clock-frequency = <0>; @@ -163,7 +171,7 @@ <0x0 0x20220520 0x0 0x4>; reg-names = "ahci", "sata-ecc"; @@ -536,9 +579,9 @@ Signed-off-by: Yangbo Lu + + qdma: qdma@8390000 { + compatible = "fsl,ls1021a-qdma"; -+ reg = <0x0 0x8388000 0x0 0x1000>, /* Controller regs */ -+ <0x0 0x8389000 0x0 0x1000>, /* Status regs */ -+ <0x0 0x838a000 0x0 0x2000>; /* Block regs */ ++ reg = <0x0 0x8398000 0x0 0x1000>, /* Controller regs */ ++ <0x0 0x8399000 0x0 0x1000>, /* Status regs */ ++ <0x0 0x839a000 0x0 0x2000>; /* Block regs */ + interrupts = , + ; + interrupt-names = "qdma-error", "qdma-queue"; @@ -609,6 +652,52 @@ Signed-off-by: Yangbo Lu #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 7>; interrupt-map = <0000 0 0 1 &gic GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>, +@@ -674,5 +697,45 @@ + <0000 0 0 3 &gic GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>, + <0000 0 0 4 &gic GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>; + }; ++ ++ can0: can@2a70000 { ++ compatible = "fsl,ls1021ar2-flexcan"; ++ reg = <0x0 0x2a70000 0x0 0x1000>; ++ interrupts = ; ++ clocks = <&clockgen 4 1>, <&clockgen 4 1>; ++ clock-names = "ipg", "per"; ++ big-endian; ++ status = "disabled"; ++ }; ++ ++ can1: can@2a80000 { ++ compatible = "fsl,ls1021ar2-flexcan"; ++ reg = <0x0 0x2a80000 0x0 0x1000>; ++ interrupts = ; ++ clocks = <&clockgen 4 1>, <&clockgen 4 1>; ++ clock-names = "ipg", "per"; ++ big-endian; ++ status = "disabled"; ++ }; ++ ++ can2: can@2a90000 { ++ compatible = "fsl,ls1021ar2-flexcan"; ++ reg = <0x0 0x2a90000 0x0 0x1000>; ++ interrupts = ; ++ clocks = <&clockgen 4 1>, <&clockgen 4 1>; ++ clock-names = "ipg", "per"; ++ big-endian; ++ status = "disabled"; ++ }; ++ ++ can3: can@2aa0000 { ++ compatible = "fsl,ls1021ar2-flexcan"; ++ reg = <0x0 0x2aa0000 0x0 0x1000>; ++ interrupts = ; ++ clocks = <&clockgen 4 1>, <&clockgen 4 1>; ++ clock-names = "ipg", "per"; ++ big-endian; ++ status = "disabled"; ++ }; + }; + }; --- a/arch/arm/boot/dts/mt6580.dtsi +++ b/arch/arm/boot/dts/mt6580.dtsi @@ -91,7 +91,7 @@ @@ -713,10 +802,11 @@ Signed-off-by: Yangbo Lu interrupt-controller; --- a/arch/arm64/boot/dts/freescale/Makefile +++ b/arch/arm64/boot/dts/freescale/Makefile -@@ -1,8 +1,24 @@ +@@ -1,8 +1,25 @@ +dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-frdm.dtb +dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-qds.dtb +dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-rdb.dtb ++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-2g5rdb.dtb dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1043a-qds.dtb +dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1043a-qds-sdk.dtb dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1043a-rdb.dtb @@ -739,6 +829,132 @@ Signed-off-by: Yangbo Lu always := $(dtb-y) subdir-y := $(dts-dirs) --- /dev/null ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-2g5rdb.dts +@@ -0,0 +1,123 @@ ++/* ++ * Device Tree file for NXP LS1012A 2G5RDB Board. ++ * ++ * Copyright 2017 NXP ++ * ++ * Bhaskar Upadhaya ++ * ++ * This file is dual-licensed: you can use it either under the terms ++ * of the GPLv2 or the X11 license, at your option. Note that this dual ++ * licensing only applies to this file, and not this project as a ++ * whole. ++ * ++ * a) This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * Or, alternatively, ++ * ++ * b) Permission is hereby granted, free of charge, to any person ++ * obtaining a copy of this software and associated documentation ++ * files (the "Software"), to deal in the Software without ++ * restriction, including without limitation the rights to use, ++ * copy, modify, merge, publish, distribute, sublicense, and/or ++ * sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following ++ * conditions: ++ * ++ * The above copyright notice and this permission notice shall be ++ * included in all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES ++ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++/dts-v1/; ++ ++#include "fsl-ls1012a.dtsi" ++ ++/ { ++ model = "LS1012A 2G5RDB Board"; ++ compatible = "fsl,ls1012a-rdb", "fsl,ls1012a"; ++ ++ aliases { ++ ethernet0 = &pfe_mac0; ++ ethernet1 = &pfe_mac1; ++ }; ++}; ++ ++&duart0 { ++ status = "okay"; ++}; ++ ++&i2c0 { ++ status = "okay"; ++}; ++ ++&qspi { ++ num-cs = <2>; ++ bus-num = <0>; ++ status = "okay"; ++ ++ qflash0: s25fs512s@0 { ++ compatible = "spansion,m25p80"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ spi-max-frequency = <20000000>; ++ m25p,fast-read; ++ reg = <0>; ++ }; ++}; ++ ++&sata { ++ status = "okay"; ++}; ++ ++&pfe { ++ status = "okay"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ ethernet@0 { ++ compatible = "fsl,pfe-gemac-port"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0>; /* GEM_ID */ ++ fsl,gemac-bus-id = <0x0>; /* BUS_ID */ ++ fsl,gemac-phy-id = <0x1>; /* PHY_ID */ ++ fsl,mdio-mux-val = <0x0>; ++ phy-mode = "sgmii-2500"; ++ fsl,pfe-phy-if-flags = <0x0>; ++ ++ mdio@0 { ++ reg = <0x1>; /* enabled/disabled */ ++ }; ++ }; ++ ++ ethernet@1 { ++ compatible = "fsl,pfe-gemac-port"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x1>; /* GEM_ID */ ++ fsl,gemac-bus-id = < 0x0>; /* BUS_ID */ ++ fsl,gemac-phy-id = < 0x2>; /* PHY_ID */ ++ fsl,mdio-mux-val = <0x0>; ++ phy-mode = "sgmii-2500"; ++ fsl,pfe-phy-if-flags = <0x0>; ++ ++ mdio@0 { ++ reg = <0x0>; /* enabled/disabled */ ++ }; ++ }; ++}; +--- /dev/null +++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts @@ -0,0 +1,177 @@ +/* @@ -920,7 +1136,7 @@ Signed-off-by: Yangbo Lu +}; --- /dev/null +++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts -@@ -0,0 +1,198 @@ +@@ -0,0 +1,202 @@ +/* + * Device Tree file for Freescale LS1012A QDS Board. + * @@ -1021,6 +1237,10 @@ Signed-off-by: Yangbo Lu + }; +}; + ++&pcie { ++ status = "okay"; ++}; ++ +&duart0 { + status = "okay"; +}; @@ -1121,7 +1341,7 @@ Signed-off-by: Yangbo Lu +}; --- /dev/null +++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts -@@ -0,0 +1,134 @@ +@@ -0,0 +1,138 @@ +/* + * Device Tree file for Freescale LS1012A RDB Board. + * @@ -1179,6 +1399,10 @@ Signed-off-by: Yangbo Lu + }; +}; + ++&pcie { ++ status = "okay"; ++}; ++ +&duart0 { + status = "okay"; +}; @@ -1258,7 +1482,7 @@ Signed-off-by: Yangbo Lu +}; --- /dev/null +++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi -@@ -0,0 +1,594 @@ +@@ -0,0 +1,602 @@ +/* + * Device Tree Include file for Freescale Layerscape-1012A family SoC. + * @@ -1641,7 +1865,7 @@ Signed-off-by: Yangbo Lu + #size-cells = <0>; + reg = <0x0 0x2180000 0x0 0x10000>; + interrupts = <0 56 IRQ_TYPE_LEVEL_HIGH>; -+ clocks = <&clockgen 4 0>; ++ clocks = <&clockgen 4 3>; + status = "disabled"; + }; + @@ -1651,7 +1875,7 @@ Signed-off-by: Yangbo Lu + #size-cells = <0>; + reg = <0x0 0x2190000 0x0 0x10000>; + interrupts = <0 57 IRQ_TYPE_LEVEL_HIGH>; -+ clocks = <&clockgen 4 0>; ++ clocks = <&clockgen 4 3>; + status = "disabled"; + }; + @@ -1794,7 +2018,7 @@ Signed-off-by: Yangbo Lu + interrupts = <0 126 IRQ_TYPE_LEVEL_HIGH>; + }; + -+ pcie@3400000 { ++ pcie: pcie@3400000 { + compatible = "fsl,ls1012a-pcie", "snps,dw-pcie"; + reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */ + 0x40 0x00000000 0x0 0x00002000>; /* configuration space */ @@ -1816,6 +2040,7 @@ Signed-off-by: Yangbo Lu + <0000 0 0 2 &gic 0 111 IRQ_TYPE_LEVEL_HIGH>, + <0000 0 0 3 &gic 0 112 IRQ_TYPE_LEVEL_HIGH>, + <0000 0 0 4 &gic 0 113 IRQ_TYPE_LEVEL_HIGH>; ++ status = "disabled"; + }; + }; + @@ -1852,6 +2077,13 @@ Signed-off-by: Yangbo Lu + pfe_mac1: ethernet@1 { + }; + }; ++ ++ firmware { ++ optee { ++ compatible = "linaro,optee-tz"; ++ method = "smc"; ++ }; ++ }; +}; --- /dev/null +++ b/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi @@ -3006,9 +3238,16 @@ Signed-off-by: Yangbo Lu #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 7>; interrupt-map = <0000 0 0 1 &gic 0 154 0x4>, -@@ -608,3 +869,6 @@ +@@ -607,4 +868,13 @@ + }; }; ++ firmware { ++ optee { ++ compatible = "linaro,optee-tz"; ++ method = "smc"; ++ }; ++ }; }; + +#include "qoriq-qman1-portals.dtsi" @@ -3066,7 +3305,7 @@ Signed-off-by: Yangbo Lu +}; --- /dev/null +++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds-sdk.dts -@@ -0,0 +1,109 @@ +@@ -0,0 +1,110 @@ +/* + * Device Tree Include file for Freescale Layerscape-1046A family SoC. + * @@ -3137,6 +3376,7 @@ Signed-off-by: Yangbo Lu + ethernet@9 { + compatible = "fsl,dpa-ethernet"; + fsl,fman-mac = <&enet7>; ++ dma-coherent; + }; +}; + @@ -3544,7 +3784,7 @@ Signed-off-by: Yangbo Lu +}; --- /dev/null +++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb-sdk.dts -@@ -0,0 +1,76 @@ +@@ -0,0 +1,83 @@ +/* + * Device Tree Include file for Freescale Layerscape-1046A family SoC. + * @@ -3612,9 +3852,16 @@ Signed-off-by: Yangbo Lu +}; + +&fsldpaa { ++ ethernet@0 { ++ status = "disabled"; ++ }; ++ ethernet@1 { ++ status = "disabled"; ++ }; + ethernet@9 { + compatible = "fsl,dpa-ethernet"; + fsl,fman-mac = <&enet7>; ++ dma-coherent; + }; +}; + @@ -3957,7 +4204,7 @@ Signed-off-by: Yangbo Lu +}; --- /dev/null +++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi -@@ -0,0 +1,793 @@ +@@ -0,0 +1,800 @@ +/* + * Device Tree Include file for Freescale Layerscape-1046A family SoC. + * @@ -4747,6 +4994,13 @@ Signed-off-by: Yangbo Lu + no-map; + }; + }; ++ ++ firmware { ++ optee { ++ compatible = "linaro,optee-tz"; ++ method = "smc"; ++ }; ++ }; +}; + +#include "qoriq-qman1-portals.dtsi" @@ -5168,7 +5422,7 @@ Signed-off-by: Yangbo Lu +}; --- /dev/null +++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi -@@ -0,0 +1,818 @@ +@@ -0,0 +1,825 @@ +/* + * Device Tree Include file for NXP Layerscape-1088A family SoC. + * @@ -5694,7 +5948,7 @@ Signed-off-by: Yangbo Lu + #size-cells = <0>; + reg = <0x0 0x2000000 0x0 0x10000>; + interrupts = <0 34 IRQ_TYPE_LEVEL_HIGH>; -+ clocks = <&clockgen 4 3>; ++ clocks = <&clockgen 4 7>; + status = "disabled"; + }; + @@ -5704,7 +5958,7 @@ Signed-off-by: Yangbo Lu + #size-cells = <0>; + reg = <0x0 0x2010000 0x0 0x10000>; + interrupts = <0 34 IRQ_TYPE_LEVEL_HIGH>; -+ clocks = <&clockgen 4 3>; ++ clocks = <&clockgen 4 7>; + status = "disabled"; + }; + @@ -5714,7 +5968,7 @@ Signed-off-by: Yangbo Lu + #size-cells = <0>; + reg = <0x0 0x2020000 0x0 0x10000>; + interrupts = <0 35 IRQ_TYPE_LEVEL_HIGH>; -+ clocks = <&clockgen 4 3>; ++ clocks = <&clockgen 4 7>; + status = "disabled"; + }; + @@ -5724,7 +5978,7 @@ Signed-off-by: Yangbo Lu + #size-cells = <0>; + reg = <0x0 0x2030000 0x0 0x10000>; + interrupts = <0 35 IRQ_TYPE_LEVEL_HIGH>; -+ clocks = <&clockgen 4 3>; ++ clocks = <&clockgen 4 7>; + status = "disabled"; + }; + @@ -5986,6 +6240,13 @@ Signed-off-by: Yangbo Lu + }; + }; + ++ firmware { ++ optee { ++ compatible = "linaro,optee-tz"; ++ method = "smc"; ++ }; ++ }; ++ +}; --- a/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts @@ -8332,7 +8593,7 @@ Signed-off-by: Yangbo Lu +}; --- /dev/null +++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi -@@ -0,0 +1,912 @@ +@@ -0,0 +1,919 @@ +/* + * Device Tree Include file for Freescale Layerscape-2080A family SoC. + * @@ -9024,7 +9285,7 @@ Signed-off-by: Yangbo Lu + reg = <0x0 0x2000000 0x0 0x10000>; + interrupts = <0 34 0x4>; /* Level high type */ + clock-names = "i2c"; -+ clocks = <&clockgen 4 3>; ++ clocks = <&clockgen 4 1>; + }; + + i2c1: i2c@2010000 { @@ -9035,7 +9296,7 @@ Signed-off-by: Yangbo Lu + reg = <0x0 0x2010000 0x0 0x10000>; + interrupts = <0 34 0x4>; /* Level high type */ + clock-names = "i2c"; -+ clocks = <&clockgen 4 3>; ++ clocks = <&clockgen 4 1>; + }; + + i2c2: i2c@2020000 { @@ -9046,7 +9307,7 @@ Signed-off-by: Yangbo Lu + reg = <0x0 0x2020000 0x0 0x10000>; + interrupts = <0 35 0x4>; /* Level high type */ + clock-names = "i2c"; -+ clocks = <&clockgen 4 3>; ++ clocks = <&clockgen 4 1>; + }; + + i2c3: i2c@2030000 { @@ -9057,7 +9318,7 @@ Signed-off-by: Yangbo Lu + reg = <0x0 0x2030000 0x0 0x10000>; + interrupts = <0 35 0x4>; /* Level high type */ + clock-names = "i2c"; -+ clocks = <&clockgen 4 3>; ++ clocks = <&clockgen 4 1>; + }; + + ifc: ifc@2240000 { @@ -9244,6 +9505,13 @@ Signed-off-by: Yangbo Lu + interrupts = <0 18 0x4>; + little-endian; + }; ++ ++ firmware { ++ optee { ++ compatible = "linaro,optee-tz"; ++ method = "smc"; ++ }; ++ }; +}; --- /dev/null +++ b/arch/arm64/boot/dts/freescale/qoriq-bman1-portals.dtsi @@ -9331,7 +9599,7 @@ Signed-off-by: Yangbo Lu +}; --- /dev/null +++ b/arch/arm64/boot/dts/freescale/qoriq-dpaa-eth.dtsi -@@ -0,0 +1,66 @@ +@@ -0,0 +1,73 @@ +/* + * QorIQ FMan v3 10g port #1 device tree stub [ controller @ offset 0x400000 ] + * @@ -9371,30 +9639,37 @@ Signed-off-by: Yangbo Lu + ethernet@0 { + compatible = "fsl,dpa-ethernet"; + fsl,fman-mac = <&enet0>; ++ dma-coherent; + }; + ethernet@1 { + compatible = "fsl,dpa-ethernet"; + fsl,fman-mac = <&enet1>; ++ dma-coherent; + }; + ethernet@2 { + compatible = "fsl,dpa-ethernet"; + fsl,fman-mac = <&enet2>; ++ dma-coherent; + }; + ethernet@3 { + compatible = "fsl,dpa-ethernet"; + fsl,fman-mac = <&enet3>; ++ dma-coherent; + }; + ethernet@4 { + compatible = "fsl,dpa-ethernet"; + fsl,fman-mac = <&enet4>; ++ dma-coherent; + }; + ethernet@5 { + compatible = "fsl,dpa-ethernet"; + fsl,fman-mac = <&enet5>; ++ dma-coherent; + }; + ethernet@8 { + compatible = "fsl,dpa-ethernet"; + fsl,fman-mac = <&enet6>; ++ dma-coherent; + }; +}; + diff --git a/target/linux/layerscape/patches-4.9/303-arm-imx-select-ARCH_DMA_ADDR_T_64BIT-for-LPAE.patch b/target/linux/layerscape/patches-4.9/303-arm-imx-select-ARCH_DMA_ADDR_T_64BIT-for-LPAE.patch deleted file mode 100644 index 097c4320f2a..00000000000 --- a/target/linux/layerscape/patches-4.9/303-arm-imx-select-ARCH_DMA_ADDR_T_64BIT-for-LPAE.patch +++ /dev/null @@ -1,23 +0,0 @@ -From c079739fa1101dcf7a1e40a195e019065e327d15 Mon Sep 17 00:00:00 2001 -From: Yangbo Lu -Date: Fri, 20 Oct 2017 16:45:17 +0800 -Subject: [PATCH] arm: imx: select ARCH_DMA_ADDR_T_64BIT for LPAE - -Selected ARCH_DMA_ADDR_T_64BIT for LPAE since -hardware could support it. - -Signed-off-by: Yangbo Lu ---- - arch/arm/mach-imx/Kconfig | 1 + - 1 file changed, 1 insertion(+) - ---- a/arch/arm/mach-imx/Kconfig -+++ b/arch/arm/mach-imx/Kconfig -@@ -1,6 +1,7 @@ - menuconfig ARCH_MXC - bool "Freescale i.MX family" - depends on ARCH_MULTI_V4_V5 || ARCH_MULTI_V6_V7 || ARM_SINGLE_ARMV7M -+ select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE - select ARCH_SUPPORTS_BIG_ENDIAN - select CLKSRC_IMX_GPT - select GENERIC_IRQ_CHIP diff --git a/target/linux/layerscape/patches-4.9/401-mtd-spi-nor-support-layerscape.patch b/target/linux/layerscape/patches-4.9/401-mtd-spi-nor-support-layerscape.patch index e4e7ff47ac5..6ffc2e1097f 100644 --- a/target/linux/layerscape/patches-4.9/401-mtd-spi-nor-support-layerscape.patch +++ b/target/linux/layerscape/patches-4.9/401-mtd-spi-nor-support-layerscape.patch @@ -1,9 +1,9 @@ -From a3757157751a8a5302ee5e11faf828dc5db02018 Mon Sep 17 00:00:00 2001 +From 825d57369b196b64387348922b47adc5b651622c Mon Sep 17 00:00:00 2001 From: Yangbo Lu -Date: Mon, 25 Sep 2017 10:53:50 +0800 -Subject: [PATCH] mtd: spi-nor: support layerscape +Date: Wed, 17 Jan 2018 14:55:47 +0800 +Subject: [PATCH 05/30] mtd: spi-nor: support layerscape -This is a integrated patch for layerscape qspi support. +This is an integrated patch for layerscape qspi support. Signed-off-by: Suresh Gupta Signed-off-by: Yunhui Cui diff --git a/target/linux/layerscape/patches-4.9/402-mtd-support-layerscape.patch b/target/linux/layerscape/patches-4.9/402-mtd-support-layerscape.patch index c4f15111cbd..14f503f8e53 100644 --- a/target/linux/layerscape/patches-4.9/402-mtd-support-layerscape.patch +++ b/target/linux/layerscape/patches-4.9/402-mtd-support-layerscape.patch @@ -1,9 +1,9 @@ -From c0e4767d3b26f21e5043fe2d15a24a1958de766e Mon Sep 17 00:00:00 2001 +From d9d0181f74146507026c31cccd52dda27ec3d966 Mon Sep 17 00:00:00 2001 From: Yangbo Lu -Date: Mon, 25 Sep 2017 10:17:28 +0800 -Subject: [PATCH] mtd: support layerscape +Date: Wed, 17 Jan 2018 14:57:31 +0800 +Subject: [PATCH 06/30] mtd: support layerscape -This is a integrated patch for layerscape ifc-nor-nand support. +This is an integrated patch for layerscape ifc-nor-nand support. Signed-off-by: Alison Wang Signed-off-by: Prabhakar Kushwaha diff --git a/target/linux/layerscape/patches-4.9/701-sdk_dpaa-support-layerscape.patch b/target/linux/layerscape/patches-4.9/701-sdk_dpaa-support-layerscape.patch index 4ebcdc73a09..c318c579b0e 100644 --- a/target/linux/layerscape/patches-4.9/701-sdk_dpaa-support-layerscape.patch +++ b/target/linux/layerscape/patches-4.9/701-sdk_dpaa-support-layerscape.patch @@ -1,29 +1,30 @@ -From 3cd36deb674720ab34eabb9783648ed743e52121 Mon Sep 17 00:00:00 2001 +From 2f887ade916e7e1de2f8a84d3902aaa30af4b163 Mon Sep 17 00:00:00 2001 From: Yangbo Lu -Date: Mon, 25 Sep 2017 11:58:03 +0800 -Subject: [PATCH] sdk_dpaa: support layerscape +Date: Wed, 17 Jan 2018 14:59:15 +0800 +Subject: [PATCH 07/30] sdk_dpaa: support layerscape -This is a integrated patch for layerscape dpaa1-sdk support. +This is an integrated patch for layerscape dpaa1-sdk support. Signed-off-by: Camelia Groza Signed-off-by: Zhao Qiang Signed-off-by: Zhang Ying-22455 Signed-off-by: Madalin Bucur +Signed-off-by: Mathew McBride Signed-off-by: Yangbo Lu --- - drivers/net/ethernet/freescale/sdk_dpaa/Kconfig | 173 + + drivers/net/ethernet/freescale/sdk_dpaa/Kconfig | 196 + drivers/net/ethernet/freescale/sdk_dpaa/Makefile | 46 + .../net/ethernet/freescale/sdk_dpaa/dpaa_1588.c | 580 ++ .../net/ethernet/freescale/sdk_dpaa/dpaa_1588.h | 138 + .../net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c | 180 + .../net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h | 43 + - drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c | 1213 ++++ + drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c | 1224 ++++ drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h | 687 ++ .../ethernet/freescale/sdk_dpaa/dpaa_eth_base.c | 205 + .../ethernet/freescale/sdk_dpaa/dpaa_eth_base.h | 49 + - .../ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c | 1992 +++++ - .../ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h | 237 + - .../ethernet/freescale/sdk_dpaa/dpaa_eth_common.c | 1820 +++++ + .../ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c | 2013 ++++++ + .../ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h | 238 + + .../ethernet/freescale/sdk_dpaa/dpaa_eth_common.c | 1802 +++++ .../ethernet/freescale/sdk_dpaa/dpaa_eth_common.h | 225 + .../ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c | 381 + .../net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c | 1168 +++ @@ -241,14 +242,14 @@ Signed-off-by: Yangbo Lu drivers/staging/fsl_qbman/dpa_sys_arm64.h | 102 + drivers/staging/fsl_qbman/dpa_sys_ppc32.h | 70 + drivers/staging/fsl_qbman/dpa_sys_ppc64.h | 79 + - drivers/staging/fsl_qbman/fsl_usdpaa.c | 1983 +++++ + drivers/staging/fsl_qbman/fsl_usdpaa.c | 2007 ++++++ drivers/staging/fsl_qbman/fsl_usdpaa_irq.c | 289 + drivers/staging/fsl_qbman/qbman_driver.c | 88 + drivers/staging/fsl_qbman/qman_config.c | 1224 ++++ drivers/staging/fsl_qbman/qman_debugfs.c | 1594 ++++ drivers/staging/fsl_qbman/qman_driver.c | 977 +++ drivers/staging/fsl_qbman/qman_high.c | 5669 +++++++++++++++ - drivers/staging/fsl_qbman/qman_low.h | 1427 ++++ + drivers/staging/fsl_qbman/qman_low.h | 1442 ++++ drivers/staging/fsl_qbman/qman_private.h | 398 + drivers/staging/fsl_qbman/qman_test.c | 57 + drivers/staging/fsl_qbman/qman_test.h | 45 + @@ -268,7 +269,7 @@ Signed-off-by: Yangbo Lu .../linux/fmd/integrations/integration_ioctls.h | 56 + include/uapi/linux/fmd/ioctls.h | 96 + include/uapi/linux/fmd/net_ioctls.h | 430 ++ - 257 files changed, 153159 insertions(+) + 257 files changed, 153236 insertions(+) create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/Kconfig create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/Makefile create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.c @@ -529,7 +530,7 @@ Signed-off-by: Yangbo Lu --- /dev/null +++ b/drivers/net/ethernet/freescale/sdk_dpaa/Kconfig -@@ -0,0 +1,173 @@ +@@ -0,0 +1,196 @@ +menuconfig FSL_SDK_DPAA_ETH + tristate "DPAA Ethernet" + depends on (FSL_SOC || ARM64 || ARM) && FSL_SDK_BMAN && FSL_SDK_QMAN && FSL_SDK_FMAN && !FSL_DPAA_ETH @@ -552,6 +553,29 @@ Signed-off-by: Yangbo Lu + help + Enable QoS offloading support through the CEETM hardware block. + ++config FSL_DPAA_CEETM_CCS_THRESHOLD_1G ++ hex "CEETM egress congestion threshold on 1G ports" ++ depends on FSL_DPAA_CEETM ++ range 0x1000 0x10000000 ++ default "0x000a0000" ++ help ++ The size in bytes of the CEETM egress Class Congestion State threshold on 1G ports. ++ The threshold needs to be configured keeping in mind the following factors: ++ - A threshold too large will buffer frames for a long time in the TX queues, ++ when a small shaping rate is configured. This will cause buffer pool depletion ++ or out of memory errors. This in turn will cause frame loss on RX; ++ - A threshold too small will cause unnecessary frame loss by entering ++ congestion too often. ++ ++config FSL_DPAA_CEETM_CCS_THRESHOLD_10G ++ hex "CEETM egress congestion threshold on 10G ports" ++ depends on FSL_DPAA_CEETM ++ range 0x1000 0x20000000 ++ default "0x00640000" ++ help ++ The size in bytes of the CEETM egress Class Congestion State threshold on 10G ports. ++ See FSL_DPAA_CEETM_CCS_THRESHOLD_1G for details. ++ +config FSL_DPAA_OFFLINE_PORTS + bool "Offline Ports support" + depends on FSL_SDK_DPAA_ETH @@ -1707,7 +1731,7 @@ Signed-off-by: Yangbo Lu +#endif /* DPAA_DEBUGFS_H_ */ --- /dev/null +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c -@@ -0,0 +1,1213 @@ +@@ -0,0 +1,1224 @@ +/* Copyright 2008-2013 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without @@ -2485,6 +2509,17 @@ Signed-off-by: Yangbo Lu + /* Advertise NETIF_F_HW_ACCEL_MQ to avoid Tx timeout warnings */ + net_dev->features |= NETIF_F_HW_ACCEL_MQ; + ++#ifndef CONFIG_PPC ++ /* Due to the A010022 FMan errata, we can not use contig frames larger ++ * than 4K, nor S/G frames. We need to stop advertising S/G and GSO ++ * support. ++ */ ++ if (unlikely(dpaa_errata_a010022)) { ++ net_dev->hw_features &= ~NETIF_F_SG; ++ net_dev->features &= ~NETIF_F_GSO; ++ } ++#endif ++ + return dpa_netdev_init(net_dev, mac_addr, tx_timeout); +} + @@ -2573,7 +2608,7 @@ Signed-off-by: Yangbo Lu + + for (i = 0; i < count; i++) { + int err; -+ err = dpa_bp_alloc(&dpa_bp[i]); ++ err = dpa_bp_alloc(&dpa_bp[i], net_dev->dev.parent); + if (err < 0) { + dpa_bp_free(priv); + priv->dpa_bp = NULL; @@ -3792,7 +3827,7 @@ Signed-off-by: Yangbo Lu + + for (i = 0; i < count; i++) { + int err; -+ err = dpa_bp_alloc(&dpa_bp[i]); ++ err = dpa_bp_alloc(&dpa_bp[i], net_dev->dev.parent); + if (err < 0) { + dpa_bp_free(priv); + priv->dpa_bp = NULL; @@ -3873,7 +3908,7 @@ Signed-off-by: Yangbo Lu +#endif /* __DPAA_ETH_BASE_H */ --- /dev/null +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c -@@ -0,0 +1,1992 @@ +@@ -0,0 +1,2013 @@ +/* Copyright 2008-2016 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without @@ -3945,24 +3980,27 @@ Signed-off-by: Yangbo Lu +static void ceetm_ern(struct qman_portal *portal, struct qman_fq *fq, + const struct qm_mr_entry *msg) +{ -+ struct net_device *net_dev; -+ struct ceetm_class *cls; ++ struct dpa_percpu_priv_s *dpa_percpu_priv; + struct ceetm_class_stats *cstats = NULL; + const struct dpa_priv_s *dpa_priv; -+ struct dpa_percpu_priv_s *dpa_percpu_priv; -+ struct sk_buff *skb; + struct qm_fd fd = msg->ern.fd; ++ struct net_device *net_dev; ++ struct ceetm_fq *ceetm_fq; ++ struct ceetm_class *cls; ++ struct sk_buff *skb; + -+ net_dev = ((struct ceetm_fq *)fq)->net_dev; ++ ceetm_fq = container_of(fq, struct ceetm_fq, fq); ++ net_dev = ceetm_fq->net_dev; + dpa_priv = netdev_priv(net_dev); + dpa_percpu_priv = raw_cpu_ptr(dpa_priv->percpu_priv); + + /* Increment DPA counters */ + dpa_percpu_priv->stats.tx_dropped++; + dpa_percpu_priv->stats.tx_fifo_errors++; ++ count_ern(dpa_percpu_priv, msg); + + /* Increment CEETM counters */ -+ cls = ((struct ceetm_fq *)fq)->ceetm_cls; ++ cls = ceetm_fq->ceetm_cls; + switch (cls->type) { + case CEETM_PRIO: + cstats = this_cpu_ptr(cls->prio.cstats); @@ -3975,11 +4013,15 @@ Signed-off-by: Yangbo Lu + if (cstats) + cstats->ern_drop_count++; + ++ /* Release the buffers that were supposed to be recycled. */ + if (fd.bpid != 0xff) { + dpa_fd_release(net_dev, &fd); + return; + } + ++ /* Release the frames that were supposed to return on the ++ * confirmation path. ++ */ + skb = _dpa_cleanup_tx_fd(dpa_priv, &fd); + dev_kfree_skb_any(skb); +} @@ -4001,16 +4043,16 @@ Signed-off-by: Yangbo Lu + break; + } + ++ ceetm_fq->congested = congested; ++ + if (congested) { + dpa_priv->cgr_data.congestion_start_jiffies = jiffies; -+ netif_tx_stop_all_queues(dpa_priv->net_dev); + dpa_priv->cgr_data.cgr_congested_count++; + if (cstats) + cstats->congested_count++; + } else { + dpa_priv->cgr_data.congested_jiffies += + (jiffies - dpa_priv->cgr_data.congestion_start_jiffies); -+ netif_tx_wake_all_queues(dpa_priv->net_dev); + } +} + @@ -4024,6 +4066,7 @@ Signed-off-by: Yangbo Lu + + (*fq)->net_dev = dev; + (*fq)->ceetm_cls = cls; ++ (*fq)->congested = 0; + return 0; +} + @@ -4061,9 +4104,9 @@ Signed-off-by: Yangbo Lu + + /* Set the congestion state thresholds according to the link speed */ + if (dpa_priv->mac_dev->if_support & SUPPORTED_10000baseT_Full) -+ cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_10G; ++ cs_th = CONFIG_FSL_DPAA_CEETM_CCS_THRESHOLD_10G; + else -+ cs_th = CONFIG_FSL_DPAA_CS_THRESHOLD_1G; ++ cs_th = CONFIG_FSL_DPAA_CEETM_CCS_THRESHOLD_1G; + + qm_cgr_cs_thres_set64(&ccg_params.cs_thres_in, cs_th, 1); + qm_cgr_cs_thres_set64(&ccg_params.cs_thres_out, @@ -5784,17 +5827,22 @@ Signed-off-by: Yangbo Lu + +int __hot ceetm_tx(struct sk_buff *skb, struct net_device *net_dev) +{ -+ int ret; -+ bool act_drop = false; ++ const int queue_mapping = dpa_get_queue_mapping(skb); + struct Qdisc *sch = net_dev->qdisc; -+ struct ceetm_class *cl; -+ struct dpa_priv_s *priv_dpa; -+ struct qman_fq *egress_fq, *conf_fq; -+ struct ceetm_qdisc *priv = qdisc_priv(sch); -+ struct ceetm_qdisc_stats *qstats = this_cpu_ptr(priv->root.qstats); + struct ceetm_class_stats *cstats; -+ const int queue_mapping = dpa_get_queue_mapping(skb); -+ spinlock_t *root_lock = qdisc_lock(sch); ++ struct ceetm_qdisc_stats *qstats; ++ struct dpa_priv_s *priv_dpa; ++ struct ceetm_fq *ceetm_fq; ++ struct ceetm_qdisc *priv; ++ struct qman_fq *conf_fq; ++ struct ceetm_class *cl; ++ spinlock_t *root_lock; ++ bool act_drop = false; ++ int ret; ++ ++ root_lock = qdisc_lock(sch); ++ priv = qdisc_priv(sch); ++ qstats = this_cpu_ptr(priv->root.qstats); + + spin_lock(root_lock); + cl = ceetm_classify(skb, sch, &ret, &act_drop); @@ -5821,11 +5869,11 @@ Signed-off-by: Yangbo Lu + */ + switch (cl->type) { + case CEETM_PRIO: -+ egress_fq = &cl->prio.fq->fq; ++ ceetm_fq = cl->prio.fq; + cstats = this_cpu_ptr(cl->prio.cstats); + break; + case CEETM_WBFS: -+ egress_fq = &cl->wbfs.fq->fq; ++ ceetm_fq = cl->wbfs.fq; + cstats = this_cpu_ptr(cl->wbfs.cstats); + break; + default: @@ -5833,8 +5881,16 @@ Signed-off-by: Yangbo Lu + goto drop; + } + ++ /* If the FQ is congested, avoid enqueuing the frame and dropping it ++ * when it returns on the ERN path. Drop it here directly instead. ++ */ ++ if (unlikely(ceetm_fq->congested)) { ++ qstats->drops++; ++ goto drop; ++ } ++ + bstats_update(&cstats->bstats, skb); -+ return dpa_tx_extended(skb, net_dev, egress_fq, conf_fq); ++ return dpa_tx_extended(skb, net_dev, &ceetm_fq->fq, conf_fq); + +drop: + dev_kfree_skb_any(skb); @@ -5868,7 +5924,7 @@ Signed-off-by: Yangbo Lu +module_exit(ceetm_unregister); --- /dev/null +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h -@@ -0,0 +1,237 @@ +@@ -0,0 +1,238 @@ +/* Copyright 2008-2016 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without @@ -5976,6 +6032,7 @@ Signed-off-by: Yangbo Lu + struct qman_fq fq; + struct net_device *net_dev; + struct ceetm_class *ceetm_cls; ++ int congested; /* Congestion status */ +}; + +struct root_q { @@ -6108,7 +6165,7 @@ Signed-off-by: Yangbo Lu +#endif --- /dev/null +++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.c -@@ -0,0 +1,1820 @@ +@@ -0,0 +1,1802 @@ +/* Copyright 2008-2013 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without @@ -6350,8 +6407,8 @@ Signed-off-by: Yangbo Lu + * Calculates the statistics for the given device by adding the statistics + * collected by each CPU. + */ -+void __cold -+dpa_get_stats64(struct net_device *net_dev, ++struct rtnl_link_stats64 __cold ++*dpa_get_stats64(struct net_device *net_dev, + struct rtnl_link_stats64 *stats) +{ + struct dpa_priv_s *priv = netdev_priv(net_dev); @@ -6369,6 +6426,7 @@ Signed-off-by: Yangbo Lu + for (j = 0; j < numstats; j++) + netstats[j] += cpustats[j]; + } ++ return stats; +} +EXPORT_SYMBOL(dpa_get_stats64); + @@ -6580,14 +6638,18 @@ Signed-off-by: Yangbo Lu +#ifdef CONFIG_FSL_DPAA_1588 + struct dpa_priv_s *priv = netdev_priv(dev); +#endif -+ int ret = 0; ++ int ret = -EINVAL; + -+ /* at least one timestamping feature must be enabled */ -+#ifdef CONFIG_FSL_DPAA_TS + if (!netif_running(dev)) -+#endif + return -EINVAL; + ++ if (cmd == SIOCGMIIREG) { ++ if (!dev->phydev) ++ ret = -EINVAL; ++ else ++ ret = phy_mii_ioctl(dev->phydev, rq, cmd); ++ } ++ +#ifdef CONFIG_FSL_DPAA_TS + if (cmd == SIOCSHWTSTAMP) + return dpa_ts_ioctl(dev, rq, cmd); @@ -6822,11 +6884,10 @@ Signed-off-by: Yangbo Lu +EXPORT_SYMBOL(dpa_set_buffers_layout); + +int __attribute__((nonnull)) -+dpa_bp_alloc(struct dpa_bp *dpa_bp) ++dpa_bp_alloc(struct dpa_bp *dpa_bp, struct device *dev) +{ + int err; + struct bman_pool_params bp_params; -+ struct platform_device *pdev; + + if (dpa_bp->size == 0 || dpa_bp->config_count == 0) { + pr_err("Buffer pool is not properly initialized! Missing size or initial number of buffers"); @@ -6859,44 +6920,25 @@ Signed-off-by: Yangbo Lu + + dpa_bp->bpid = (uint8_t)bman_get_params(dpa_bp->pool)->bpid; + -+ pdev = platform_device_register_simple("dpaa_eth_bpool", -+ dpa_bp->bpid, NULL, 0); -+ if (IS_ERR(pdev)) { -+ pr_err("platform_device_register_simple() failed\n"); -+ err = PTR_ERR(pdev); -+ goto pdev_register_failed; -+ } -+ { -+ struct dma_map_ops *ops = get_dma_ops(&pdev->dev); -+ ops->dma_supported = NULL; -+ } -+ err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); ++ err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40)); + if (err) { + pr_err("dma_coerce_mask_and_coherent() failed\n"); -+ goto pdev_mask_failed; ++ goto bman_free_pool; + } -+#ifdef CONFIG_FMAN_ARM -+ /* force coherency */ -+ pdev->dev.archdata.dma_coherent = true; -+ arch_setup_dma_ops(&pdev->dev, 0, 0, NULL, true); -+#endif + -+ dpa_bp->dev = &pdev->dev; ++ dpa_bp->dev = dev; + + if (dpa_bp->seed_cb) { + err = dpa_bp->seed_cb(dpa_bp); + if (err) -+ goto pool_seed_failed; ++ goto bman_free_pool; + } + + dpa_bpid2pool_map(dpa_bp->bpid, dpa_bp); + + return 0; + -+pool_seed_failed: -+pdev_mask_failed: -+ platform_device_unregister(pdev); -+pdev_register_failed: ++bman_free_pool: + bman_free_pool(dpa_bp->pool); + + return err; @@ -6958,9 +7000,6 @@ Signed-off-by: Yangbo Lu + + dpa_bp_array[bp->bpid] = NULL; + bman_free_pool(bp->pool); -+ -+ if (bp->dev) -+ platform_device_unregister(to_platform_device(bp->dev)); +} + +void __cold __attribute__((nonnull)) @@ -8074,8 +8113,8 @@ Signed-off-by: Yangbo Lu +int __cold dpa_start(struct net_device *net_dev); +int __cold dpa_stop(struct net_device *net_dev); +void __cold dpa_timeout(struct net_device *net_dev); -+void __cold -+dpa_get_stats64(struct net_device *net_dev, ++struct rtnl_link_stats64 __cold ++*dpa_get_stats64(struct net_device *net_dev, + struct rtnl_link_stats64 *stats); +int dpa_change_mtu(struct net_device *net_dev, int new_mtu); +int dpa_ndo_init(struct net_device *net_dev); @@ -8098,7 +8137,7 @@ Signed-off-by: Yangbo Lu +void dpa_set_buffers_layout(struct mac_device *mac_dev, + struct dpa_buffer_layout_s *layout); +int __attribute__((nonnull)) -+dpa_bp_alloc(struct dpa_bp *dpa_bp); ++dpa_bp_alloc(struct dpa_bp *dpa_bp, struct device *dev); +void __cold __attribute__((nonnull)) +dpa_bp_free(struct dpa_priv_s *priv); +struct dpa_bp *dpa_bpid2pool(int bpid); @@ -11972,7 +12011,7 @@ Signed-off-by: Yangbo Lu + [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid", + [PHY_INTERFACE_MODE_RTBI] = "rtbi", + [PHY_INTERFACE_MODE_XGMII] = "xgmii", -+ [PHY_INTERFACE_MODE_SGMII_2500] = "sgmii-2500", ++ [PHY_INTERFACE_MODE_2500SGMII] = "sgmii-2500", +}; + +static phy_interface_t __pure __attribute__((nonnull)) str2phy(const char *str) @@ -11999,7 +12038,7 @@ Signed-off-by: Yangbo Lu + [PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000, + [PHY_INTERFACE_MODE_RTBI] = SPEED_1000, + [PHY_INTERFACE_MODE_XGMII] = SPEED_10000, -+ [PHY_INTERFACE_MODE_SGMII_2500] = SPEED_2500, ++ [PHY_INTERFACE_MODE_2500SGMII] = SPEED_2500, +}; + +static struct mac_device * __cold @@ -129503,7 +129542,7 @@ Signed-off-by: Yangbo Lu +#endif --- /dev/null +++ b/drivers/staging/fsl_qbman/fsl_usdpaa.c -@@ -0,0 +1,1983 @@ +@@ -0,0 +1,2007 @@ +/* Copyright (C) 2008-2012 Freescale Semiconductor, Inc. + * Authors: Andy Fleming + * Timur Tabi @@ -129877,6 +129916,16 @@ Signed-off-by: Yangbo Lu + +#define DQRR_MAXFILL 15 + ++ ++/* Invalidate a portal */ ++void dbci_portal(void *addr) ++{ ++ int i; ++ ++ for (i = 0; i < 0x4000; i += 64) ++ dcbi(addr + i); ++} ++ +/* Reset a QMan portal to its default state */ +static int init_qm_portal(struct qm_portal_config *config, + struct qm_portal *portal) @@ -129890,6 +129939,13 @@ Signed-off-by: Yangbo Lu + /* Make sure interrupts are inhibited */ + qm_out(IIR, 1); + ++ /* ++ * Invalidate the entire CE portal are to ensure no stale ++ * cachelines are present. This should be done on all ++ * cores as the portal is mapped as M=0 (non-coherent). ++ */ ++ on_each_cpu(dbci_portal, portal->addr.addr_ce, 1); ++ + /* Initialize the DQRR. This will stop any dequeue + commands that are in progress */ + if (qm_dqrr_init(portal, config, qm_dqrr_dpush, qm_dqrr_pvb, @@ -129941,6 +129997,13 @@ Signed-off-by: Yangbo Lu + portal->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE]; + portal->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI]; + ++ /* ++ * Invalidate the entire CE portal are to ensure no stale ++ * cachelines are present. This should be done on all ++ * cores as the portal is mapped as M=0 (non-coherent). ++ */ ++ on_each_cpu(dbci_portal, portal->addr.addr_ce, 1); ++ + if (bm_rcr_init(portal, bm_rcr_pvb, bm_rcr_cce)) { + pr_err("Bman RCR initialisation failed\n"); + return 1; @@ -141348,7 +141411,7 @@ Signed-off-by: Yangbo Lu +} --- /dev/null +++ b/drivers/staging/fsl_qbman/qman_low.h -@@ -0,0 +1,1427 @@ +@@ -0,0 +1,1442 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without @@ -142446,11 +142509,26 @@ Signed-off-by: Yangbo Lu + +static inline int qm_mc_init(struct qm_portal *portal) +{ ++ u8 rr0, rr1; + register struct qm_mc *mc = &portal->mc; ++ + mc->cr = portal->addr.addr_ce + QM_CL_CR; + mc->rr = portal->addr.addr_ce + QM_CL_RR0; -+ mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) & -+ QM_MCC_VERB_VBIT) ? 0 : 1; ++ ++ /* ++ * The expected valid bit polarity for the next CR command is 0 ++ * if RR1 contains a valid response, and is 1 if RR0 contains a ++ * valid response. If both RR contain all 0, this indicates either ++ * that no command has been executed since reset (in which case the ++ * expected valid bit polarity is 1) ++ */ ++ rr0 = __raw_readb(&mc->rr->verb); ++ rr1 = __raw_readb(&(mc->rr+1)->verb); ++ if ((rr0 == 0 && rr1 == 0) || rr0 != 0) ++ mc->rridx = 1; ++ else ++ mc->rridx = 0; ++ + mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0; +#ifdef CONFIG_FSL_DPA_CHECKING + mc->state = qman_mc_idle; diff --git a/target/linux/layerscape/patches-4.9/702-pci-support-layerscape.patch b/target/linux/layerscape/patches-4.9/702-pci-support-layerscape.patch index 65243518ae8..4d68e2371dc 100644 --- a/target/linux/layerscape/patches-4.9/702-pci-support-layerscape.patch +++ b/target/linux/layerscape/patches-4.9/702-pci-support-layerscape.patch @@ -1,9 +1,9 @@ -From 9e6e0a53b29190dbd86a39304b59c3028f5b36c2 Mon Sep 17 00:00:00 2001 +From 5fcb42fbd224e1103bacbae4785745842cfd6304 Mon Sep 17 00:00:00 2001 From: Yangbo Lu -Date: Mon, 25 Sep 2017 11:04:10 +0800 -Subject: [PATCH] pci: support layerscape +Date: Wed, 17 Jan 2018 15:00:43 +0800 +Subject: [PATCH 08/30] pci: support layerscape -This is a integrated patch for layerscape pcie support. +This is an integrated patch for layerscape pcie support. Signed-off-by: Po Liu Signed-off-by: Liu Gang @@ -20,12 +20,14 @@ Signed-off-by: Yangbo Lu drivers/pci/host/pci-layerscape-ep-debugfs.c | 758 +++++++++++++++++++++++++++ drivers/pci/host/pci-layerscape-ep.c | 309 +++++++++++ drivers/pci/host/pci-layerscape-ep.h | 115 ++++ - drivers/pci/host/pci-layerscape.c | 38 +- + drivers/pci/host/pci-layerscape.c | 48 +- drivers/pci/host/pcie-designware.c | 6 + drivers/pci/host/pcie-designware.h | 1 + + drivers/pci/pci.c | 2 +- drivers/pci/pcie/portdrv_core.c | 181 +++---- + drivers/pci/quirks.c | 8 + include/linux/pci.h | 1 + - 10 files changed, 1520 insertions(+), 148 deletions(-) + 12 files changed, 1539 insertions(+), 149 deletions(-) create mode 100644 drivers/pci/host/pci-layerscape-ep-debugfs.c create mode 100644 drivers/pci/host/pci-layerscape-ep.c create mode 100644 drivers/pci/host/pci-layerscape-ep.h @@ -1606,8 +1608,12 @@ Signed-off-by: Yangbo Lu +#endif /* _PCIE_LAYERSCAPE_EP_H */ --- a/drivers/pci/host/pci-layerscape.c +++ b/drivers/pci/host/pci-layerscape.c -@@ -35,12 +35,14 @@ +@@ -33,14 +33,18 @@ + + /* PEX Internal Configuration Registers */ #define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */ ++#define PCIE_ABSERR 0x8d0 /* Bridge Slave Error Response Register */ ++#define PCIE_ABSERR_SETTING 0x9401 /* Forward error of non-posted request */ #define PCIE_DBI_RO_WR_EN 0x8bc /* DBI Read-Only Write Enable Register */ -/* PEX LUT registers */ @@ -1623,7 +1629,7 @@ Signed-off-by: Yangbo Lu struct pcie_host_ops *ops; }; -@@ -86,6 +88,14 @@ static void ls_pcie_drop_msg_tlp(struct +@@ -86,6 +90,14 @@ static void ls_pcie_drop_msg_tlp(struct iowrite32(val, pcie->pp.dbi_base + PCIE_STRFMR1); } @@ -1638,7 +1644,7 @@ Signed-off-by: Yangbo Lu static int ls1021_pcie_link_up(struct pcie_port *pp) { u32 state; -@@ -134,7 +144,7 @@ static int ls_pcie_link_up(struct pcie_p +@@ -134,7 +146,7 @@ static int ls_pcie_link_up(struct pcie_p struct ls_pcie *pcie = to_ls_pcie(pp); u32 state; @@ -1647,17 +1653,31 @@ Signed-off-by: Yangbo Lu pcie->drvdata->ltssm_shift) & LTSSM_STATE_MASK; -@@ -153,6 +163,9 @@ static void ls_pcie_host_init(struct pci +@@ -144,6 +156,12 @@ static int ls_pcie_link_up(struct pcie_p + return 1; + } + ++/* Forward error response of outbound non-posted requests */ ++static void ls_pcie_fix_error_response(struct ls_pcie *pcie) ++{ ++ iowrite32(PCIE_ABSERR_SETTING, pcie->pp.dbi_base + PCIE_ABSERR); ++} ++ + static void ls_pcie_host_init(struct pcie_port *pp) + { + struct ls_pcie *pcie = to_ls_pcie(pp); +@@ -153,6 +171,10 @@ static void ls_pcie_host_init(struct pci ls_pcie_clear_multifunction(pcie); ls_pcie_drop_msg_tlp(pcie); iowrite32(0, pcie->pp.dbi_base + PCIE_DBI_RO_WR_EN); + + ls_pcie_disable_outbound_atus(pcie); ++ ls_pcie_fix_error_response(pcie); + dw_pcie_setup_rc(pp); } static int ls_pcie_msi_host_init(struct pcie_port *pp, -@@ -196,20 +209,39 @@ static struct ls_pcie_drvdata ls1021_drv +@@ -196,20 +218,40 @@ static struct ls_pcie_drvdata ls1021_drv static struct ls_pcie_drvdata ls1043_drvdata = { .lut_offset = 0x10000, .ltssm_shift = 24, @@ -1694,6 +1714,7 @@ Signed-off-by: Yangbo Lu { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata }, { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata }, + { .compatible = "fsl,ls2088a-pcie", .data = &ls2088_drvdata }, ++ { .compatible = "fsl,ls1088a-pcie", .data = &ls2088_drvdata }, { }, }; @@ -1721,6 +1742,17 @@ Signed-off-by: Yangbo Lu +void dw_pcie_disable_outbound_atu(struct pcie_port *pp, int index); #endif /* _PCIE_DESIGNWARE_H */ +--- a/drivers/pci/pci.c ++++ b/drivers/pci/pci.c +@@ -454,7 +454,7 @@ struct resource *pci_find_parent_resourc + pci_bus_for_each_resource(bus, r, i) { + if (!r) + continue; +- if (res->start && resource_contains(r, res)) { ++ if (resource_contains(r, res)) { + + /* + * If the window is prefetchable but the BAR is --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c @@ -44,52 +44,30 @@ static void release_pcie_device(struct d @@ -2026,6 +2058,20 @@ Signed-off-by: Yangbo Lu driver->remove(pciedev); put_device(dev); } +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -4642,3 +4642,11 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IN + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2031, quirk_no_aersid); + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2032, quirk_no_aersid); + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid); ++ ++/* Freescale PCIe doesn't support MSI in RC mode */ ++static void quirk_fsl_no_msi(struct pci_dev *pdev) ++{ ++ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) ++ pdev->no_msi = 1; ++} ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, quirk_fsl_no_msi); --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1823,6 +1823,7 @@ void pcibios_release_device(struct pci_d diff --git a/target/linux/layerscape/patches-4.9/703-phy-support-layerscape.patch b/target/linux/layerscape/patches-4.9/703-phy-support-layerscape.patch index 907fb336a95..197fd9c50fd 100644 --- a/target/linux/layerscape/patches-4.9/703-phy-support-layerscape.patch +++ b/target/linux/layerscape/patches-4.9/703-phy-support-layerscape.patch @@ -1,9 +1,9 @@ -From be07319b9897738a4ab1501880b7dd9be26eba66 Mon Sep 17 00:00:00 2001 +From 8949ebc0c5b982eab7ca493dad7b86c30befa6ec Mon Sep 17 00:00:00 2001 From: Yangbo Lu -Date: Mon, 25 Sep 2017 11:54:28 +0800 -Subject: [PATCH] phy: support layerscape +Date: Wed, 17 Jan 2018 15:01:30 +0800 +Subject: [PATCH 09/30] phy: support layerscape -This is a integrated patch for layerscape mdio-phy support. +This is an integrated patch for layerscape mdio-phy support. Signed-off-by: Bogdan Purcareata Signed-off-by: Zhang Ying-22455 @@ -18,11 +18,12 @@ Signed-off-by: Yangbo Lu drivers/net/phy/aquantia.c | 28 + drivers/net/phy/cortina.c | 118 ++++ drivers/net/phy/fsl_backplane.c | 1358 +++++++++++++++++++++++++++++++++++++++ + drivers/net/phy/marvell.c | 2 +- drivers/net/phy/phy.c | 23 +- drivers/net/phy/phy_device.c | 6 +- drivers/net/phy/swphy.c | 1 + - include/linux/phy.h | 4 + - 9 files changed, 1544 insertions(+), 7 deletions(-) + include/linux/phy.h | 6 + + 10 files changed, 1547 insertions(+), 8 deletions(-) create mode 100644 drivers/net/phy/cortina.c create mode 100644 drivers/net/phy/fsl_backplane.c @@ -1604,6 +1605,17 @@ Signed-off-by: Yangbo Lu +MODULE_DESCRIPTION("Freescale Backplane driver"); +MODULE_AUTHOR("Shaohui Xie "); +MODULE_LICENSE("GPL v2"); +--- a/drivers/net/phy/marvell.c ++++ b/drivers/net/phy/marvell.c +@@ -1610,7 +1610,7 @@ static struct phy_driver marvell_drivers + .flags = PHY_HAS_INTERRUPT, + .probe = marvell_probe, + .config_init = &m88e1145_config_init, +- .config_aneg = &marvell_config_aneg, ++ .config_aneg = &m88e1101_config_aneg, + .read_status = &genphy_read_status, + .ack_interrupt = &marvell_ack_interrupt, + .config_intr = &marvell_config_intr, --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -585,7 +585,7 @@ int phy_mii_ioctl(struct phy_device *phy @@ -1737,11 +1749,20 @@ Signed-off-by: Yangbo Lu PHY_INTERFACE_MODE_MOCA, PHY_INTERFACE_MODE_QSGMII, PHY_INTERFACE_MODE_TRGMII, -+ PHY_INTERFACE_MODE_SGMII_2500, ++ PHY_INTERFACE_MODE_2500SGMII, PHY_INTERFACE_MODE_MAX, } phy_interface_t; -@@ -791,6 +792,9 @@ int phy_stop_interrupts(struct phy_devic +@@ -126,6 +127,8 @@ static inline const char *phy_modes(phy_ + return "qsgmii"; + case PHY_INTERFACE_MODE_TRGMII: + return "trgmii"; ++ case PHY_INTERFACE_MODE_2500SGMII: ++ return "sgmii-2500"; + default: + return "unknown"; + } +@@ -791,6 +794,9 @@ int phy_stop_interrupts(struct phy_devic static inline int phy_read_status(struct phy_device *phydev) { diff --git a/target/linux/layerscape/patches-4.9/704-fsl-mc-layerscape-support.patch b/target/linux/layerscape/patches-4.9/704-fsl-mc-layerscape-support.patch index a35e5931054..976f801cdfd 100644 --- a/target/linux/layerscape/patches-4.9/704-fsl-mc-layerscape-support.patch +++ b/target/linux/layerscape/patches-4.9/704-fsl-mc-layerscape-support.patch @@ -1,12 +1,12 @@ -From afb7254de9f03c3efaf4e306dcf5f88e1873fc6b Mon Sep 17 00:00:00 2001 +From 667f0792b6f6d000c10f21c29c397c84cbe77f4a Mon Sep 17 00:00:00 2001 From: Yangbo Lu -Date: Mon, 25 Sep 2017 12:06:25 +0800 -Subject: [PATCH] fsl-mc: layerscape support +Date: Wed, 17 Jan 2018 15:11:45 +0800 +Subject: [PATCH 10/30] fsl-mc: layerscape support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -This is a integrated patch for layerscape mc-bus support. +This is an integrated patch for layerscape mc-bus support. Signed-off-by: Stuart Yoder Signed-off-by: Bharat Bhushan @@ -28,7 +28,6 @@ Signed-off-by: Yangbo Lu drivers/staging/fsl-mc/bus/dpio/Makefile | 11 + .../{include/dpcon-cmd.h => bus/dpio/dpio-cmd.h} | 73 +- drivers/staging/fsl-mc/bus/dpio/dpio-driver.c | 296 ++++++ - drivers/staging/fsl-mc/bus/dpio/dpio-driver.txt | 135 +++ drivers/staging/fsl-mc/bus/dpio/dpio-service.c | 693 +++++++++++++ drivers/staging/fsl-mc/bus/dpio/dpio.c | 224 +++++ drivers/staging/fsl-mc/bus/dpio/dpio.h | 109 ++ @@ -48,9 +47,9 @@ Signed-off-by: Yangbo Lu drivers/staging/fsl-mc/bus/fsl-mc-allocator.c | 78 +- drivers/staging/fsl-mc/bus/fsl-mc-bus.c | 318 +++--- drivers/staging/fsl-mc/bus/fsl-mc-iommu.c | 104 ++ - drivers/staging/fsl-mc/bus/fsl-mc-msi.c | 3 +- + drivers/staging/fsl-mc/bus/fsl-mc-msi.c | 2 +- drivers/staging/fsl-mc/bus/fsl-mc-private.h | 6 +- - .../staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c | 11 +- + .../staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c | 10 +- drivers/staging/fsl-mc/bus/mc-io.c | 4 +- drivers/staging/fsl-mc/bus/mc-ioctl.h | 22 + drivers/staging/fsl-mc/bus/mc-restool.c | 405 ++++++++ @@ -68,14 +67,13 @@ Signed-off-by: Yangbo Lu drivers/staging/fsl-mc/include/mc-cmd.h | 44 +- drivers/staging/fsl-mc/include/mc-sys.h | 3 +- drivers/staging/fsl-mc/include/mc.h | 17 +- - 49 files changed, 7384 insertions(+), 2612 deletions(-) + 48 files changed, 7247 insertions(+), 2612 deletions(-) create mode 100644 drivers/staging/fsl-mc/bus/dpbp-cmd.h create mode 100644 drivers/staging/fsl-mc/bus/dpcon-cmd.h create mode 100644 drivers/staging/fsl-mc/bus/dpcon.c create mode 100644 drivers/staging/fsl-mc/bus/dpio/Makefile rename drivers/staging/fsl-mc/{include/dpcon-cmd.h => bus/dpio/dpio-cmd.h} (64%) create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio-driver.c - create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio-driver.txt create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio-service.c create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio.c create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio.h @@ -1633,144 +1631,6 @@ Signed-off-by: Yangbo Lu +module_init(dpio_driver_init); +module_exit(dpio_driver_exit); --- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/dpio-driver.txt -@@ -0,0 +1,135 @@ -+Copyright 2016 NXP -+ -+Introduction -+------------ -+ -+A DPAA2 DPIO (Data Path I/O) is a hardware object that provides -+interfaces to enqueue and dequeue frames to/from network interfaces -+and other accelerators. A DPIO also provides hardware buffer -+pool management for network interfaces. -+ -+This document provides an overview the Linux DPIO driver, its -+subcomponents, and its APIs. -+ -+See Documentation/dpaa2/overview.txt for a general overview of DPAA2 -+and the general DPAA2 driver architecture in Linux. -+ -+Driver Overview -+--------------- -+ -+The DPIO driver is bound to DPIO objects discovered on the fsl-mc bus and -+provides services that: -+ A) allow other drivers, such as the Ethernet driver, to enqueue and dequeue -+ frames for their respective objects -+ B) allow drivers to register callbacks for data availability notifications -+ when data becomes available on a queue or channel -+ C) allow drivers to manage hardware buffer pools -+ -+The Linux DPIO driver consists of 3 primary components-- -+ DPIO object driver-- fsl-mc driver that manages the DPIO object -+ DPIO service-- provides APIs to other Linux drivers for services -+ QBman portal interface-- sends portal commands, gets responses -+ -+ fsl-mc other -+ bus drivers -+ | | -+ +---+----+ +------+-----+ -+ |DPIO obj| |DPIO service| -+ | driver |---| (DPIO) | -+ +--------+ +------+-----+ -+ | -+ +------+-----+ -+ | QBman | -+ | portal i/f | -+ +------------+ -+ | -+ hardware -+ -+The diagram below shows how the DPIO driver components fit with the other -+DPAA2 Linux driver components: -+ +------------+ -+ | OS Network | -+ | Stack | -+ +------------+ +------------+ -+ | Allocator |. . . . . . . | Ethernet | -+ |(DPMCP,DPBP)| | (DPNI) | -+ +-.----------+ +---+---+----+ -+ . . ^ | -+ . . | | dequeue> -+ +-------------+ . | | -+ | DPRC driver | . +--------+ +------------+ -+ | (DPRC) | . . |DPIO obj| |DPIO service| -+ +----------+--+ | driver |-| (DPIO) | -+ | +--------+ +------+-----+ -+ | +------|-----+ -+ | | QBman | -+ +----+--------------+ | portal i/f | -+ | MC-bus driver | +------------+ -+ | | | -+ | /soc/fsl-mc | | -+ +-------------------+ | -+ | -+ =========================================|=========|======================== -+ +-+--DPIO---|-----------+ -+ | | | -+ | QBman Portal | -+ +-----------------------+ -+ -+ ============================================================================ -+ -+ -+DPIO Object Driver (dpio-driver.c) -+---------------------------------- -+ -+ The dpio-driver component registers with the fsl-mc bus to handle objects of -+ type "dpio". The implementation of probe() handles basic initialization -+ of the DPIO including mapping of the DPIO regions (the QBman SW portal) -+ and initializing interrupts and registering irq handlers. The dpio-driver -+ registers the probed DPIO with dpio-service. -+ -+DPIO service (dpio-service.c, dpaa2-io.h) -+------------------------------------------ -+ -+ The dpio service component provides queuing, notification, and buffers -+ management services to DPAA2 drivers, such as the Ethernet driver. A system -+ will typically allocate 1 DPIO object per CPU to allow queuing operations -+ to happen simultaneously across all CPUs. -+ -+ Notification handling -+ dpaa2_io_service_register() -+ dpaa2_io_service_deregister() -+ dpaa2_io_service_rearm() -+ -+ Queuing -+ dpaa2_io_service_pull_fq() -+ dpaa2_io_service_pull_channel() -+ dpaa2_io_service_enqueue_fq() -+ dpaa2_io_service_enqueue_qd() -+ dpaa2_io_store_create() -+ dpaa2_io_store_destroy() -+ dpaa2_io_store_next() -+ -+ Buffer pool management -+ dpaa2_io_service_release() -+ dpaa2_io_service_acquire() -+ -+QBman portal interface (qbman-portal.c) -+--------------------------------------- -+ -+ The qbman-portal component provides APIs to do the low level hardware -+ bit twiddling for operations such as: -+ -initializing Qman software portals -+ -building and sending portal commands -+ -portal interrupt configuration and processing -+ -+ The qbman-portal APIs are not public to other drivers, and are -+ only used by dpio-service. -+ -+Other (dpaa2-fd.h, dpaa2-global.h) -+---------------------------------- -+ -+ Frame descriptor and scatter-gather definitions and the APIs used to -+ manipulate them are defined in dpaa2-fd.h. -+ -+ Dequeue result struct and parsing APIs are defined in dpaa2-global.h. ---- /dev/null +++ b/drivers/staging/fsl-mc/bus/dpio/dpio-service.c @@ -0,0 +1,693 @@ +/* diff --git a/target/linux/layerscape/patches-4.9/705-dpaa2-support-layerscape.patch b/target/linux/layerscape/patches-4.9/705-dpaa2-support-layerscape.patch index 51abc032545..c0f5819be91 100644 --- a/target/linux/layerscape/patches-4.9/705-dpaa2-support-layerscape.patch +++ b/target/linux/layerscape/patches-4.9/705-dpaa2-support-layerscape.patch @@ -1,15 +1,16 @@ -From 3a302437605308079db398b67000a77a4fe92da8 Mon Sep 17 00:00:00 2001 +From 72b1e89ab8edb5e883e812d07d0751fe2b140548 Mon Sep 17 00:00:00 2001 From: Yangbo Lu -Date: Mon, 25 Sep 2017 12:07:58 +0800 -Subject: [PATCH] dpaa2: support layerscape +Date: Wed, 17 Jan 2018 15:12:58 +0800 +Subject: [PATCH 11/30] dpaa2: support layerscape -This is a integrated patch for layerscape dpaa2 support. +This is an integrated patch for layerscape dpaa2 support. Signed-off-by: Bogdan Purcareata Signed-off-by: Ioana Radulescu Signed-off-by: Razvan Stefanescu Signed-off-by: costi Signed-off-by: Catalin Horghidan +Signed-off-by: Mathew McBride Signed-off-by: Yangbo Lu --- drivers/soc/fsl/ls2-console/Kconfig | 4 + @@ -17,41 +18,41 @@ Signed-off-by: Yangbo Lu drivers/soc/fsl/ls2-console/ls2-console.c | 284 ++ drivers/staging/fsl-dpaa2/ethernet/Makefile | 11 + drivers/staging/fsl-dpaa2/ethernet/README | 186 ++ - .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c | 350 +++ + .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c | 352 ++ .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h | 60 + - .../staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h | 184 ++ - drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 3155 ++++++++++++++++++++ - drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 460 +++ - drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c | 856 ++++++ - drivers/staging/fsl-dpaa2/ethernet/dpkg.h | 176 ++ - drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h | 600 ++++ - drivers/staging/fsl-dpaa2/ethernet/dpni.c | 1770 +++++++++++ - drivers/staging/fsl-dpaa2/ethernet/dpni.h | 989 ++++++ + .../staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h | 184 + + drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 3516 ++++++++++++++++++++ + drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 499 +++ + drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c | 864 +++++ + drivers/staging/fsl-dpaa2/ethernet/dpkg.h | 176 + + drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h | 658 ++++ + drivers/staging/fsl-dpaa2/ethernet/dpni.c | 1903 +++++++++++ + drivers/staging/fsl-dpaa2/ethernet/dpni.h | 1053 ++++++ drivers/staging/fsl-dpaa2/ethernet/net.h | 480 +++ drivers/staging/fsl-dpaa2/ethsw/Kconfig | 6 + drivers/staging/fsl-dpaa2/ethsw/Makefile | 10 + - drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h | 851 ++++++ - drivers/staging/fsl-dpaa2/ethsw/dpsw.c | 2762 +++++++++++++++++ - drivers/staging/fsl-dpaa2/ethsw/dpsw.h | 1269 ++++++++ - drivers/staging/fsl-dpaa2/ethsw/switch.c | 1857 ++++++++++++ + drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h | 851 +++++ + drivers/staging/fsl-dpaa2/ethsw/dpsw.c | 2762 +++++++++++++++ + drivers/staging/fsl-dpaa2/ethsw/dpsw.h | 1269 +++++++ + drivers/staging/fsl-dpaa2/ethsw/switch.c | 1857 +++++++++++ drivers/staging/fsl-dpaa2/evb/Kconfig | 7 + drivers/staging/fsl-dpaa2/evb/Makefile | 10 + drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h | 279 ++ drivers/staging/fsl-dpaa2/evb/dpdmux.c | 1112 +++++++ drivers/staging/fsl-dpaa2/evb/dpdmux.h | 453 +++ - drivers/staging/fsl-dpaa2/evb/evb.c | 1350 +++++++++ + drivers/staging/fsl-dpaa2/evb/evb.c | 1350 ++++++++ drivers/staging/fsl-dpaa2/mac/Kconfig | 23 + drivers/staging/fsl-dpaa2/mac/Makefile | 10 + - drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h | 172 ++ + drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h | 172 + drivers/staging/fsl-dpaa2/mac/dpmac.c | 620 ++++ - drivers/staging/fsl-dpaa2/mac/dpmac.h | 342 +++ - drivers/staging/fsl-dpaa2/mac/mac.c | 666 +++++ + drivers/staging/fsl-dpaa2/mac/dpmac.h | 342 ++ + drivers/staging/fsl-dpaa2/mac/mac.c | 669 ++++ drivers/staging/fsl-dpaa2/rtc/Makefile | 10 + drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h | 160 + drivers/staging/fsl-dpaa2/rtc/dprtc.c | 746 +++++ - drivers/staging/fsl-dpaa2/rtc/dprtc.h | 172 ++ + drivers/staging/fsl-dpaa2/rtc/dprtc.h | 172 + drivers/staging/fsl-dpaa2/rtc/rtc.c | 243 ++ - 39 files changed, 22696 insertions(+) + 39 files changed, 23364 insertions(+) create mode 100644 drivers/soc/fsl/ls2-console/Kconfig create mode 100644 drivers/soc/fsl/ls2-console/Makefile create mode 100644 drivers/soc/fsl/ls2-console/ls2-console.c @@ -595,7 +596,7 @@ Signed-off-by: Yangbo Lu +non-standard driver stats can be consulted through ethtool -S option. --- /dev/null +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c -@@ -0,0 +1,350 @@ +@@ -0,0 +1,352 @@ + +/* Copyright 2015 Freescale Semiconductor Inc. + * @@ -708,9 +709,9 @@ Signed-off-by: Yangbo Lu + int i, err; + + seq_printf(file, "FQ stats for %s:\n", priv->net_dev->name); -+ seq_printf(file, "%s%16s%16s%16s%16s%16s\n", -+ "VFQID", "CPU", "Type", "Frames", "Pending frames", -+ "Congestion"); ++ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n", ++ "VFQID", "CPU", "Traffic Class", "Type", "Frames", ++ "Pending frames", "Congestion"); + + for (i = 0; i < priv->num_fqs; i++) { + fq = &priv->fq[i]; @@ -718,9 +719,10 @@ Signed-off-by: Yangbo Lu + if (err) + fcnt = 0; + -+ seq_printf(file, "%5d%16d%16s%16llu%16u%16llu\n", ++ seq_printf(file, "%5d%16d%16d%16s%16llu%16u%16llu\n", + fq->fqid, + fq->target_cpu, ++ fq->tc, + fq_type_to_str(fq), + fq->stats.frames, + fcnt, @@ -756,19 +758,20 @@ Signed-off-by: Yangbo Lu + int i; + + seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name); -+ seq_printf(file, "%s%16s%16s%16s%16s%16s\n", ++ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n", + "CHID", "CPU", "Deq busy", "Frames", "CDANs", -+ "Avg frm/CDAN"); ++ "Avg frm/CDAN", "Buf count"); + + for (i = 0; i < priv->num_channels; i++) { + ch = priv->channel[i]; -+ seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu\n", ++ seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu%16d\n", + ch->ch_id, + ch->nctx.desired_cpu, + ch->stats.dequeue_portal_busy, + ch->stats.frames, + ch->stats.cdan, -+ ch->stats.frames / ch->stats.cdan); ++ ch->stats.frames / ch->stats.cdan, ++ ch->buf_count); + } + + return 0; @@ -1198,7 +1201,7 @@ Signed-off-by: Yangbo Lu +#include --- /dev/null +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c -@@ -0,0 +1,3155 @@ +@@ -0,0 +1,3516 @@ +/* Copyright 2014-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without @@ -1338,6 +1341,8 @@ Signed-off-by: Yangbo Lu + u16 fd_offset = dpaa2_fd_get_offset(fd); + u32 fd_length = dpaa2_fd_get_len(fd); + ++ ch->buf_count--; ++ + skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE); + if (unlikely(!skb)) + return NULL; @@ -1345,8 +1350,6 @@ Signed-off-by: Yangbo Lu + skb_reserve(skb, fd_offset); + skb_put(skb, fd_length); + -+ ch->buf_count--; -+ + return skb; +} + @@ -1384,7 +1387,7 @@ Signed-off-by: Yangbo Lu + /* We build the skb around the first data buffer */ + skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE); + if (unlikely(!skb)) -+ return NULL; ++ goto err_build; + + sg_offset = dpaa2_sg_get_offset(sge); + skb_reserve(skb, sg_offset); @@ -1415,6 +1418,32 @@ Signed-off-by: Yangbo Lu + ch->buf_count -= i + 2; + + return skb; ++ ++err_build: ++ /* We still need to subtract the buffers used by this FD from our ++ * software counter ++ */ ++ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) ++ if (dpaa2_sg_is_final(&sgt[i])) ++ break; ++ ch->buf_count -= i + 2; ++ ++ return NULL; ++} ++ ++static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ void *vaddr; ++ int i; ++ ++ for (i = 0; i < count; i++) { ++ /* Same logic as on regular Rx path */ ++ vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, buf_array[i]); ++ dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE, ++ DMA_FROM_DEVICE); ++ put_page(virt_to_head_page(vaddr)); ++ } +} + +/* Main Rx frame processing routine */ @@ -1722,7 +1751,7 @@ Signed-off-by: Yangbo Lu + dpaa2_fd_set_addr(fd, addr); + dpaa2_fd_set_len(fd, skb->len); + -+ fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | FD_CTRL_PTA | FD_CTRL_PTV1; ++ fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | FD_CTRL_PTA; + + if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) + enable_tx_tstamp(fd, sgt_buf); @@ -1779,7 +1808,7 @@ Signed-off-by: Yangbo Lu + dpaa2_fd_set_len(fd, skb->len); + dpaa2_fd_set_format(fd, dpaa2_fd_single); + -+ fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | FD_CTRL_PTA | FD_CTRL_PTV1; ++ fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | FD_CTRL_PTA; + + if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) + enable_tx_tstamp(fd, buffer_start); @@ -1798,7 +1827,7 @@ Signed-off-by: Yangbo Lu + */ +static void free_tx_fd(const struct dpaa2_eth_priv *priv, + const struct dpaa2_fd *fd, -+ u32 *status) ++ u32 *status, bool in_napi) +{ + struct device *dev = priv->net_dev->dev.parent; + dma_addr_t fd_addr; @@ -1877,7 +1906,7 @@ Signed-off-by: Yangbo Lu + kfree(skbh); + + /* Move on with skb release */ -+ dev_kfree_skb(skb); ++ napi_consume_skb(skb, in_napi); +} + +static int dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) @@ -1961,7 +1990,7 @@ Signed-off-by: Yangbo Lu + if (unlikely(err < 0)) { + percpu_stats->tx_errors++; + /* Clean up everything, including freeing the skb */ -+ free_tx_fd(priv, &fd, NULL); ++ free_tx_fd(priv, &fd, NULL, false); + } else { + percpu_stats->tx_packets++; + percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd); @@ -2014,7 +2043,7 @@ Signed-off-by: Yangbo Lu + fd->simple.ctrl & DPAA2_FD_TX_ERR_MASK); + } + -+ free_tx_fd(priv, fd, check_fas_errors ? &status : NULL); ++ free_tx_fd(priv, fd, check_fas_errors ? &status : NULL, true); + + /* if there are no errors, we're done */ + if (likely(!errors)) @@ -2084,7 +2113,7 @@ Signed-off-by: Yangbo Lu + u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; + void *buf; + dma_addr_t addr; -+ int i; ++ int i, err; + + for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) { + /* Allocate buffer visible to WRIOP + skb shared info + @@ -2111,22 +2140,25 @@ Signed-off-by: Yangbo Lu + } + +release_bufs: -+ /* In case the portal is busy, retry until successful. -+ * The buffer release function would only fail if the QBMan portal -+ * was busy, which implies portal contention (i.e. more CPUs than -+ * portals, i.e. GPPs w/o affine DPIOs). For all practical purposes, -+ * there is little we can realistically do, short of giving up - -+ * in which case we'd risk depleting the buffer pool and never again -+ * receiving the Rx interrupt which would kick-start the refill logic. -+ * So just keep retrying, at the risk of being moved to ksoftirqd. -+ */ -+ while (dpaa2_io_service_release(NULL, bpid, buf_array, i)) ++ /* In case the portal is busy, retry until successful */ ++ while ((err = dpaa2_io_service_release(NULL, bpid, ++ buf_array, i)) == -EBUSY) + cpu_relax(); ++ ++ /* If release command failed, clean up and bail out; not much ++ * else we can do about it ++ */ ++ if (unlikely(err)) { ++ free_bufs(priv, buf_array, i); ++ return 0; ++ } ++ + return i; + +err_map: + put_page(virt_to_head_page(buf)); +err_alloc: ++ /* If we managed to allocate at least some buffers, release them */ + if (i) + goto release_bufs; + @@ -2169,10 +2201,8 @@ Signed-off-by: Yangbo Lu + */ +static void drain_bufs(struct dpaa2_eth_priv *priv, int count) +{ -+ struct device *dev = priv->net_dev->dev.parent; + u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; -+ void *vaddr; -+ int ret, i; ++ int ret; + + do { + ret = dpaa2_io_service_acquire(NULL, priv->bpid, @@ -2181,15 +2211,7 @@ Signed-off-by: Yangbo Lu + netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n"); + return; + } -+ for (i = 0; i < ret; i++) { -+ /* Same logic as on regular Rx path */ -+ vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, -+ buf_array[i]); -+ dma_unmap_single(dev, buf_array[i], -+ DPAA2_ETH_RX_BUF_SIZE, -+ DMA_FROM_DEVICE); -+ put_page(virt_to_head_page(vaddr)); -+ } ++ free_bufs(priv, buf_array, ret); + } while (ret); +} + @@ -2497,7 +2519,7 @@ Signed-off-by: Yangbo Lu +/** Fill in counters maintained by the GPP driver. These may be different from + * the hardware counters obtained by ethtool. + */ -+static void dpaa2_eth_get_stats(struct net_device *net_dev, ++static struct rtnl_link_stats64 *dpaa2_eth_get_stats(struct net_device *net_dev, + struct rtnl_link_stats64 *stats) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); @@ -2513,6 +2535,7 @@ Signed-off-by: Yangbo Lu + for (j = 0; j < num; j++) + netstats[j] += cpustats[j]; + } ++ return stats; +} + +static int dpaa2_eth_change_mtu(struct net_device *net_dev, int mtu) @@ -3039,7 +3062,7 @@ Signed-off-by: Yangbo Lu + +static void setup_fqs(struct dpaa2_eth_priv *priv) +{ -+ int i; ++ int i, j; + + /* We have one TxConf FQ per Tx flow. Tx queues MUST be at the + * beginning of the queue array. @@ -3052,11 +3075,13 @@ Signed-off-by: Yangbo Lu + priv->fq[priv->num_fqs++].flowid = (u16)i; + } + -+ for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { -+ priv->fq[priv->num_fqs].type = DPAA2_RX_FQ; -+ priv->fq[priv->num_fqs].consume = dpaa2_eth_rx; -+ priv->fq[priv->num_fqs++].flowid = (u16)i; -+ } ++ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) ++ for (j = 0; j < dpaa2_eth_queue_count(priv); j++) { ++ priv->fq[priv->num_fqs].type = DPAA2_RX_FQ; ++ priv->fq[priv->num_fqs].consume = dpaa2_eth_rx; ++ priv->fq[priv->num_fqs].tc = (u8)i; ++ priv->fq[priv->num_fqs++].flowid = (u16)j; ++ } + +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE + /* We have exactly one Rx error queue per DPNI */ @@ -3299,9 +3324,6 @@ Signed-off-by: Yangbo Lu + dev_warn(dev, "Tx data offset (%d) not a multiple of 64B", + priv->tx_data_offset); + -+ /* Accommodate software annotation space (SWA) */ -+ priv->tx_data_offset += DPAA2_ETH_SWA_SIZE; -+ + /* Enable congestion notifications for Tx queues */ + err = setup_tx_congestion(priv); + if (err) @@ -3357,39 +3379,111 @@ Signed-off-by: Yangbo Lu + kfree(priv->cscn_unaligned); +} + -+int setup_fqs_taildrop(struct dpaa2_eth_priv *priv, -+ bool enable) ++static int set_queue_taildrop(struct dpaa2_eth_priv *priv, ++ struct dpni_taildrop *td) +{ + struct device *dev = priv->net_dev->dev.parent; -+ struct dpni_taildrop td; -+ int err = 0, i; ++ int err, i; + -+ td.enable = enable; -+ td.threshold = DPAA2_ETH_TAILDROP_THRESH; -+ -+ if (enable) { -+ priv->num_bufs = DPAA2_ETH_NUM_BUFS_TD; -+ priv->refill_thresh = DPAA2_ETH_REFILL_THRESH_TD; -+ } else { -+ priv->num_bufs = DPAA2_ETH_NUM_BUFS_FC / -+ priv->num_channels; -+ priv->refill_thresh = priv->num_bufs - DPAA2_ETH_BUFS_PER_CMD; -+ } + + for (i = 0; i < priv->num_fqs; i++) { + if (priv->fq[i].type != DPAA2_RX_FQ) + continue; + + err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, -+ DPNI_CP_QUEUE, DPNI_QUEUE_RX, 0, -+ priv->fq[i].flowid, &td); ++ DPNI_CP_QUEUE, DPNI_QUEUE_RX, ++ priv->fq[i].tc, priv->fq[i].flowid, ++ td); + if (err) { + dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err); -+ break; ++ return err; + } + } + -+ return err; ++ return 0; ++} ++ ++static int set_group_taildrop(struct dpaa2_eth_priv *priv, ++ struct dpni_taildrop *td) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ struct dpni_taildrop disable_td, *tc_td; ++ int i, err; ++ ++ memset(&disable_td, 0, sizeof(struct dpni_taildrop)); ++ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { ++ if (td->enable && dpaa2_eth_is_pfc_enabled(priv, i)) ++ /* Do not set taildrop thresholds for PFC-enabled ++ * traffic classes. We will enable congestion ++ * notifications for them. ++ */ ++ tc_td = &disable_td; ++ else ++ tc_td = td; ++ ++ err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, ++ DPNI_CP_GROUP, DPNI_QUEUE_RX, ++ i, 0, tc_td); ++ if (err) { ++ dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err); ++ return err; ++ } ++ } ++ return 0; ++} ++ ++/* Enable/disable Rx FQ taildrop ++ * ++ * Rx FQ taildrop is mutually exclusive with flow control and it only gets ++ * disabled when FC is active. Depending on FC status, we need to compute ++ * the maximum number of buffers in the pool differently, so use the ++ * opportunity to update max number of buffers as well. ++ */ ++int set_rx_taildrop(struct dpaa2_eth_priv *priv) ++{ ++ enum dpaa2_eth_td_cfg cfg = dpaa2_eth_get_td_type(priv); ++ struct dpni_taildrop td_queue, td_group; ++ int err = 0; ++ ++ switch (cfg) { ++ case DPAA2_ETH_TD_NONE: ++ memset(&td_queue, 0, sizeof(struct dpni_taildrop)); ++ memset(&td_group, 0, sizeof(struct dpni_taildrop)); ++ priv->num_bufs = DPAA2_ETH_NUM_BUFS_FC / ++ priv->num_channels; ++ break; ++ case DPAA2_ETH_TD_QUEUE: ++ memset(&td_group, 0, sizeof(struct dpni_taildrop)); ++ td_queue.enable = 1; ++ td_queue.units = DPNI_CONGESTION_UNIT_BYTES; ++ td_queue.threshold = DPAA2_ETH_TAILDROP_THRESH / ++ dpaa2_eth_tc_count(priv); ++ priv->num_bufs = DPAA2_ETH_NUM_BUFS_TD; ++ break; ++ case DPAA2_ETH_TD_GROUP: ++ memset(&td_queue, 0, sizeof(struct dpni_taildrop)); ++ td_group.enable = 1; ++ td_group.units = DPNI_CONGESTION_UNIT_FRAMES; ++ td_group.threshold = NAPI_POLL_WEIGHT * ++ dpaa2_eth_queue_count(priv); ++ priv->num_bufs = NAPI_POLL_WEIGHT * ++ dpaa2_eth_tc_count(priv); ++ break; ++ default: ++ break; ++ } ++ ++ err = set_queue_taildrop(priv, &td_queue); ++ if (err) ++ return err; ++ ++ err = set_group_taildrop(priv, &td_group); ++ if (err) ++ return err; ++ ++ priv->refill_thresh = priv->num_bufs - DPAA2_ETH_BUFS_PER_CMD; ++ ++ return 0; +} + +static int setup_rx_flow(struct dpaa2_eth_priv *priv, @@ -3402,7 +3496,7 @@ Signed-off-by: Yangbo Lu + int err; + + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, -+ DPNI_QUEUE_RX, 0, fq->flowid, &q, &qid); ++ DPNI_QUEUE_RX, fq->tc, fq->flowid, &q, &qid); + if (err) { + dev_err(dev, "dpni_get_queue() failed (%d)\n", err); + return err; @@ -3415,7 +3509,7 @@ Signed-off-by: Yangbo Lu + q.destination.priority = 1; + q.user_context = (u64)fq; + err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, -+ DPNI_QUEUE_RX, 0, fq->flowid, q_opt, &q); ++ DPNI_QUEUE_RX, fq->tc, fq->flowid, q_opt, &q); + if (err) { + dev_err(dev, "dpni_set_queue() failed (%d)\n", err); + return err; @@ -3612,7 +3706,13 @@ Signed-off-by: Yangbo Lu + dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; + } + -+ err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg); ++ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { ++ err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, i, ++ &dist_cfg); ++ if (err) ++ break; ++ } ++ + dma_unmap_single(dev, dist_cfg.key_cfg_iova, + DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE); + if (err) @@ -3639,6 +3739,7 @@ Signed-off-by: Yangbo Lu + pools_params.num_dpbp = 1; + pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id; + pools_params.pools[0].backup_pool = 0; ++ pools_params.pools[0].priority_mask = 0xff; + pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE; + err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); + if (err) { @@ -4124,6 +4225,264 @@ Signed-off-by: Yangbo Lu + device_remove_file(dev, &dpaa2_eth_attrs[i]); +} + ++#ifdef CONFIG_FSL_DPAA2_ETH_DCB ++static int dpaa2_eth_dcbnl_ieee_getpfc(struct net_device *net_dev, ++ struct ieee_pfc *pfc) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ struct dpni_congestion_notification_cfg notification_cfg; ++ struct dpni_link_state state; ++ int err, i; ++ ++ pfc->pfc_cap = dpaa2_eth_tc_count(priv); ++ ++ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); ++ if (err) { ++ netdev_err(net_dev, "ERROR %d getting link state", err); ++ return err; ++ } ++ ++ if (!(state.options & DPNI_LINK_OPT_PFC_PAUSE)) ++ return 0; ++ ++ priv->pfc.pfc_en = 0; ++ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { ++ err = dpni_get_congestion_notification(priv->mc_io, 0, ++ priv->mc_token, ++ DPNI_QUEUE_RX, ++ i, ¬ification_cfg); ++ if (err) { ++ netdev_err(net_dev, "Error %d getting congestion notif", ++ err); ++ return err; ++ } ++ ++ if (notification_cfg.threshold_entry) ++ priv->pfc.pfc_en |= 1 << i; ++ } ++ ++ pfc->pfc_en = priv->pfc.pfc_en; ++ pfc->mbc = priv->pfc.mbc; ++ pfc->delay = priv->pfc.delay; ++ ++ return 0; ++} ++ ++/* Configure ingress classification based on VLAN PCP */ ++static int set_vlan_qos(struct dpaa2_eth_priv *priv) ++{ ++ struct device *dev = priv->net_dev->dev.parent; ++ struct dpkg_profile_cfg kg_cfg = {0}; ++ struct dpni_qos_tbl_cfg qos_cfg = {0}; ++ struct dpni_rule_cfg key_params; ++ u8 *params_iova; ++ __be16 key, mask = cpu_to_be16(VLAN_PRIO_MASK); ++ int err = 0, i, j = 0; ++ ++ if (priv->vlan_clsf_set) ++ return 0; ++ ++ params_iova = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL); ++ if (!params_iova) ++ return -ENOMEM; ++ ++ kg_cfg.num_extracts = 1; ++ kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR; ++ kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN; ++ kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD; ++ kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI; ++ ++ err = dpni_prepare_key_cfg(&kg_cfg, params_iova); ++ if (err) { ++ dev_err(dev, "dpkg_prepare_key_cfg failed: %d\n", err); ++ goto out_free; ++ } ++ ++ /* Set QoS table */ ++ qos_cfg.default_tc = 0; ++ qos_cfg.discard_on_miss = 0; ++ qos_cfg.key_cfg_iova = dma_map_single(dev, params_iova, ++ DPAA2_CLASSIFIER_DMA_SIZE, ++ DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) { ++ dev_err(dev, "%s: DMA mapping failed\n", __func__); ++ err = -ENOMEM; ++ goto out_free; ++ } ++ err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg); ++ dma_unmap_single(dev, qos_cfg.key_cfg_iova, ++ DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE); ++ ++ if (err) { ++ dev_err(dev, "dpni_set_qos_table failed: %d\n", err); ++ goto out_free; ++ } ++ ++ key_params.key_size = sizeof(key); ++ ++ if (dpaa2_eth_fs_mask_enabled(priv)) { ++ key_params.mask_iova = dma_map_single(dev, &mask, sizeof(mask), ++ DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, key_params.mask_iova)) { ++ dev_err(dev, "DMA mapping failed %s\n", __func__); ++ err = -ENOMEM; ++ goto out_free; ++ } ++ } else { ++ key_params.mask_iova = 0; ++ } ++ ++ key_params.key_iova = dma_map_single(dev, &key, sizeof(key), ++ DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, key_params.key_iova)) { ++ dev_err(dev, "%s: DMA mapping failed\n", __func__); ++ err = -ENOMEM; ++ goto out_unmap_mask; ++ } ++ ++ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { ++ key = cpu_to_be16(i << VLAN_PRIO_SHIFT); ++ dma_sync_single_for_device(dev, key_params.key_iova, ++ sizeof(key), DMA_TO_DEVICE); ++ ++ err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token, ++ &key_params, i, j++); ++ if (err) { ++ dev_err(dev, "dpni_add_qos_entry failed: %d\n", err); ++ goto out_unmap; ++ } ++ } ++ ++ priv->vlan_clsf_set = true; ++ ++out_unmap: ++ dma_unmap_single(dev, key_params.key_iova, sizeof(key), DMA_TO_DEVICE); ++out_unmap_mask: ++ if (key_params.mask_iova) ++ dma_unmap_single(dev, key_params.mask_iova, sizeof(mask), ++ DMA_TO_DEVICE); ++out_free: ++ kfree(params_iova); ++ return err; ++} ++ ++static int dpaa2_eth_dcbnl_ieee_setpfc(struct net_device *net_dev, ++ struct ieee_pfc *pfc) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ struct dpni_congestion_notification_cfg notification_cfg = {0}; ++ struct dpni_link_state state = {0}; ++ struct dpni_link_cfg cfg = {0}; ++ int err = 0, i; ++ ++ if (priv->pfc.pfc_en == pfc->pfc_en) ++ /* Same enabled mask, nothing to be done */ ++ return 0; ++ ++ err = set_vlan_qos(priv); ++ if (err) ++ return err; ++ ++ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); ++ if (err) { ++ netdev_err(net_dev, "ERROR %d getting link state", err); ++ return err; ++ } ++ ++ cfg.rate = state.rate; ++ cfg.options = state.options; ++ if (pfc->pfc_en) ++ cfg.options |= DPNI_LINK_OPT_PFC_PAUSE; ++ else ++ cfg.options &= ~DPNI_LINK_OPT_PFC_PAUSE; ++ ++ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg); ++ if (err) { ++ netdev_err(net_dev, "ERROR %d setting link cfg", err); ++ return err; ++ } ++ ++ memcpy(&priv->pfc, pfc, sizeof(priv->pfc)); ++ ++ err = set_rx_taildrop(priv); ++ if (err) ++ return err; ++ ++ /* configure congestion notifications */ ++ notification_cfg.notification_mode = DPNI_CONG_OPT_FLOW_CONTROL; ++ notification_cfg.units = DPNI_CONGESTION_UNIT_FRAMES; ++ notification_cfg.message_iova = 0ULL; ++ notification_cfg.message_ctx = 0ULL; ++ ++ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { ++ if (dpaa2_eth_is_pfc_enabled(priv, i)) { ++ notification_cfg.threshold_entry = NAPI_POLL_WEIGHT; ++ notification_cfg.threshold_exit = NAPI_POLL_WEIGHT / 2; ++ } else { ++ notification_cfg.threshold_entry = 0; ++ notification_cfg.threshold_exit = 0; ++ } ++ ++ err = dpni_set_congestion_notification(priv->mc_io, 0, ++ priv->mc_token, ++ DPNI_QUEUE_RX, ++ i, ¬ification_cfg); ++ if (err) { ++ netdev_err(net_dev, "Error %d setting congestion notif", ++ err); ++ return err; ++ } ++ } ++ ++ return 0; ++} ++ ++static u8 dpaa2_eth_dcbnl_getdcbx(struct net_device *net_dev) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ ++ return priv->dcbx_mode; ++} ++ ++static u8 dpaa2_eth_dcbnl_setdcbx(struct net_device *net_dev, u8 mode) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ ++ priv->dcbx_mode = mode; ++ return 0; ++} ++ ++static u8 dpaa2_eth_dcbnl_getcap(struct net_device *net_dev, int capid, u8 *cap) ++{ ++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); ++ ++ switch (capid) { ++ case DCB_CAP_ATTR_PFC: ++ *cap = true; ++ break; ++ case DCB_CAP_ATTR_PFC_TCS: ++ *cap = 1 << dpaa2_eth_tc_count(priv); ++ break; ++ case DCB_CAP_ATTR_DCBX: ++ *cap = priv->dcbx_mode; ++ break; ++ default: ++ *cap = false; ++ break; ++ } ++ ++ return 0; ++} ++ ++const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops = { ++ .ieee_getpfc = dpaa2_eth_dcbnl_ieee_getpfc, ++ .ieee_setpfc = dpaa2_eth_dcbnl_ieee_setpfc, ++ .getdcbx = dpaa2_eth_dcbnl_getdcbx, ++ .setdcbx = dpaa2_eth_dcbnl_setdcbx, ++ .getcap = dpaa2_eth_dcbnl_getcap, ++}; ++#endif ++ +static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) +{ + struct device *dev; @@ -4152,7 +4511,8 @@ Signed-off-by: Yangbo Lu + err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, + &priv->mc_io); + if (err) { -+ dev_err(dev, "MC portal allocation failed\n"); ++ dev_dbg(dev, "MC portal allocation failed\n"); ++ err = -EPROBE_DEFER; + goto err_portal_alloc; + } + @@ -4178,10 +4538,6 @@ Signed-off-by: Yangbo Lu + if (err) + goto err_bind; + -+ /* Add a NAPI context for each channel */ -+ add_ch_napi(priv); -+ enable_ch_napi(priv); -+ + /* Percpu statistics */ + priv->percpu_stats = alloc_percpu(*priv->percpu_stats); + if (!priv->percpu_stats) { @@ -4224,6 +4580,14 @@ Signed-off-by: Yangbo Lu + goto err_alloc_rings; + + net_dev->ethtool_ops = &dpaa2_ethtool_ops; ++#ifdef CONFIG_FSL_DPAA2_ETH_DCB ++ net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops; ++ priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; ++#endif ++ ++ /* Add a NAPI context for each channel */ ++ add_ch_napi(priv); ++ enable_ch_napi(priv); + + err = setup_irqs(dpni_dev); + if (err) { @@ -4287,6 +4651,9 @@ Signed-off-by: Yangbo Lu +#endif + dpaa2_eth_sysfs_remove(&net_dev->dev); + ++ disable_ch_napi(priv); ++ del_ch_napi(priv); ++ + unregister_netdev(net_dev); + dev_info(net_dev->dev.parent, "Removed interface %s\n", net_dev->name); + @@ -4298,9 +4665,6 @@ Signed-off-by: Yangbo Lu + free_rings(priv); + free_percpu(priv->percpu_stats); + free_percpu(priv->percpu_extras); -+ -+ disable_ch_napi(priv); -+ del_ch_napi(priv); + free_dpbp(priv); + free_dpio(priv); + free_dpni(priv); @@ -4356,7 +4720,7 @@ Signed-off-by: Yangbo Lu +module_exit(dpaa2_eth_driver_exit); --- /dev/null +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h -@@ -0,0 +1,460 @@ +@@ -0,0 +1,499 @@ +/* Copyright 2014-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without @@ -4392,6 +4756,7 @@ Signed-off-by: Yangbo Lu +#define __DPAA2_ETH_H + +#include ++#include +#include +#include +#include "../../fsl-mc/include/dpaa2-io.h" @@ -4455,7 +4820,7 @@ Signed-off-by: Yangbo Lu +#define DPAA2_ETH_RX_BUF_ALIGN 64 +#define DPAA2_ETH_RX_BUF_ALIGN_V1 256 +#define DPAA2_ETH_NEEDED_HEADROOM(p_priv) \ -+ ((p_priv)->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN) ++ ((p_priv)->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN - HH_DATA_MOD) + +/* rx_extra_head prevents reallocations in L3 processing. */ +#define DPAA2_ETH_SKB_SIZE \ @@ -4473,17 +4838,19 @@ Signed-off-by: Yangbo Lu +/* PTP nominal frequency 1GHz */ +#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1 + -+/* Leave enough extra space in the headroom to make sure the skb is -+ * not realloc'd in forwarding scenarios. -+ */ -+#define DPAA2_ETH_RX_HEAD_ROOM 192 -+ +/* We are accommodating a skb backpointer and some S/G info + * in the frame's software annotation. The hardware + * options are either 0 or 64, so we choose the latter. + */ +#define DPAA2_ETH_SWA_SIZE 64 + ++/* Extra headroom space requested to hardware, in order to make sure there's ++ * no realloc'ing in forwarding scenarios ++ */ ++#define DPAA2_ETH_RX_HEAD_ROOM \ ++ (DPAA2_ETH_TX_HWA_SIZE - DPAA2_ETH_RX_HWA_SIZE + \ ++ DPAA2_ETH_TX_BUF_ALIGN) ++ +/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */ +struct dpaa2_eth_swa { + struct sk_buff *skb; @@ -4660,16 +5027,17 @@ Signed-off-by: Yangbo Lu + __u64 pull_err; +}; + ++#define DPAA2_ETH_MAX_DPCONS NR_CPUS ++#define DPAA2_ETH_MAX_TCS 8 ++ +/* Maximum number of queues associated with a DPNI */ -+#define DPAA2_ETH_MAX_RX_QUEUES 16 -+#define DPAA2_ETH_MAX_TX_QUEUES NR_CPUS ++#define DPAA2_ETH_MAX_RX_QUEUES (DPNI_MAX_DIST_SIZE * DPAA2_ETH_MAX_TCS) ++#define DPAA2_ETH_MAX_TX_QUEUES DPNI_MAX_SENDERS +#define DPAA2_ETH_MAX_RX_ERR_QUEUES 1 +#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \ + DPAA2_ETH_MAX_TX_QUEUES + \ + DPAA2_ETH_MAX_RX_ERR_QUEUES) + -+#define DPAA2_ETH_MAX_DPCONS NR_CPUS -+ +enum dpaa2_eth_fq_type { + DPAA2_RX_FQ = 0, + DPAA2_TX_CONF_FQ, @@ -4682,6 +5050,7 @@ Signed-off-by: Yangbo Lu + u32 fqid; + u32 tx_qdbin; + u16 flowid; ++ u8 tc; + int target_cpu; + struct dpaa2_eth_channel *channel; + enum dpaa2_eth_fq_type type; @@ -4788,6 +5157,10 @@ Signed-off-by: Yangbo Lu + struct dpaa2_eth_cls_rule *cls_rule; + + struct dpni_tx_shaping_cfg shaping_cfg; ++ ++ u8 dcbx_mode; ++ struct ieee_pfc pfc; ++ bool vlan_clsf_set; +}; + +#define dpaa2_eth_hash_enabled(priv) \ @@ -4813,13 +5186,43 @@ Signed-off-by: Yangbo Lu + return priv->dpni_attrs.num_queues; +} + ++static inline int dpaa2_eth_tc_count(struct dpaa2_eth_priv *priv) ++{ ++ return priv->dpni_attrs.num_tcs; ++} ++ ++static inline bool dpaa2_eth_is_pfc_enabled(struct dpaa2_eth_priv *priv, ++ int traffic_class) ++{ ++ return priv->pfc.pfc_en & (1 << traffic_class); ++} ++ ++enum dpaa2_eth_td_cfg { ++ DPAA2_ETH_TD_NONE, ++ DPAA2_ETH_TD_QUEUE, ++ DPAA2_ETH_TD_GROUP ++}; ++ ++static inline enum dpaa2_eth_td_cfg ++dpaa2_eth_get_td_type(struct dpaa2_eth_priv *priv) ++{ ++ bool pfc_enabled = !!(priv->pfc.pfc_en); ++ ++ if (pfc_enabled) ++ return DPAA2_ETH_TD_GROUP; ++ else if (priv->tx_pause_frames) ++ return DPAA2_ETH_TD_NONE; ++ else ++ return DPAA2_ETH_TD_QUEUE; ++} ++ +void check_cls_support(struct dpaa2_eth_priv *priv); + -+int setup_fqs_taildrop(struct dpaa2_eth_priv *priv, bool enable); ++int set_rx_taildrop(struct dpaa2_eth_priv *priv); +#endif /* __DPAA2_H */ --- /dev/null +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c -@@ -0,0 +1,856 @@ +@@ -0,0 +1,864 @@ +/* Copyright 2014-2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without @@ -5052,7 +5455,8 @@ Signed-off-by: Yangbo Lu + if (current_tx_pause == pause->tx_pause) + goto out; + -+ err = setup_fqs_taildrop(priv, !pause->tx_pause); ++ priv->tx_pause_frames = pause->tx_pause; ++ err = set_rx_taildrop(priv); + if (err) + netdev_dbg(net_dev, "ERROR %d configuring taildrop", err); + @@ -5498,7 +5902,7 @@ Signed-off-by: Yangbo Lu + struct dpni_rule_cfg rule_cfg; + struct dpni_fs_action_cfg fs_act = { 0 }; + void *dma_mem; -+ int err = 0; ++ int err = 0, tc; + + if (!dpaa2_eth_fs_enabled(priv)) { + netdev_err(net_dev, "dev does not support steering!\n"); @@ -5541,12 +5945,19 @@ Signed-off-by: Yangbo Lu + else + fs_act.flow_id = fs->ring_cookie; + -+ if (add) -+ err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, -+ 0, fs->location, &rule_cfg, &fs_act); -+ else -+ err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token, -+ 0, &rule_cfg); ++ for (tc = 0; tc < dpaa2_eth_tc_count(priv); tc++) { ++ if (add) ++ err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, ++ tc, fs->location, &rule_cfg, ++ &fs_act); ++ else ++ err = dpni_remove_fs_entry(priv->mc_io, 0, ++ priv->mc_token, tc, ++ &rule_cfg); ++ ++ if (err) ++ break; ++ } + + dma_unmap_single(dev, rule_cfg.key_iova, + rule_cfg.key_size * 2, DMA_TO_DEVICE); @@ -5857,7 +6268,7 @@ Signed-off-by: Yangbo Lu +#endif /* __FSL_DPKG_H_ */ --- /dev/null +++ b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h -@@ -0,0 +1,600 @@ +@@ -0,0 +1,658 @@ +/* Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2016 NXP + * @@ -5897,9 +6308,11 @@ Signed-off-by: Yangbo Lu +#define DPNI_VER_MAJOR 7 +#define DPNI_VER_MINOR 0 +#define DPNI_CMD_BASE_VERSION 1 ++#define DPNI_CMD_2ND_VERSION 2 +#define DPNI_CMD_ID_OFFSET 4 + +#define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION) ++#define DPNI_CMD_V2(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_2ND_VERSION) + +#define DPNI_CMDID_OPEN DPNI_CMD(0x801) +#define DPNI_CMDID_CLOSE DPNI_CMD(0x800) @@ -5922,7 +6335,7 @@ Signed-off-by: Yangbo Lu +#define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016) +#define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017) + -+#define DPNI_CMDID_SET_POOLS DPNI_CMD(0x200) ++#define DPNI_CMDID_SET_POOLS DPNI_CMD_V2(0x200) +#define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B) + +#define DPNI_CMDID_GET_QDID DPNI_CMD(0x210) @@ -5945,6 +6358,8 @@ Signed-off-by: Yangbo Lu + +#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD(0x235) + ++#define DPNI_CMDID_SET_QOS_TBL DPNI_CMD(0x240) ++#define DPNI_CMDID_ADD_QOS_ENT DPNI_CMD(0x241) +#define DPNI_CMDID_ADD_FS_ENT DPNI_CMD(0x244) +#define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245) +#define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246) @@ -5985,13 +6400,14 @@ Signed-off-by: Yangbo Lu + +#define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order)) +struct dpni_cmd_set_pools { -+ /* cmd word 0 */ + u8 num_dpbp; + u8 backup_pool_mask; + __le16 pad; -+ /* cmd word 0..4 */ -+ __le32 dpbp_id[DPNI_MAX_DPBP]; -+ /* cmd word 4..6 */ ++ struct { ++ __le16 dpbp_id; ++ u8 priority_mask; ++ u8 pad; ++ } pool[DPNI_MAX_DPBP]; + __le16 buffer_size[DPNI_MAX_DPBP]; +}; + @@ -6370,6 +6786,36 @@ Signed-off-by: Yangbo Lu + __le64 user_context; +}; + ++#define DPNI_DISCARD_ON_MISS_SHIFT 0 ++#define DPNI_DISCARD_ON_MISS_SIZE 1 ++ ++struct dpni_cmd_set_qos_table { ++ u32 pad; ++ u8 default_tc; ++ /* only the LSB */ ++ u8 discard_on_miss; ++ u16 pad1[21]; ++ u64 key_cfg_iova; ++}; ++ ++struct dpni_cmd_add_qos_entry { ++ u16 pad; ++ u8 tc_id; ++ u8 key_size; ++ u16 index; ++ u16 pad2; ++ u64 key_iova; ++ u64 mask_iova; ++}; ++ ++struct dpni_cmd_remove_qos_entry { ++ u8 pad1[3]; ++ u8 key_size; ++ u32 pad2; ++ u64 key_iova; ++ u64 mask_iova; ++}; ++ +struct dpni_cmd_add_fs_entry { + /* cmd word 0 */ + u16 options; @@ -6457,10 +6903,33 @@ Signed-off-by: Yangbo Lu + u32 threshold_exit; +}; + ++struct dpni_cmd_get_congestion_notification { ++ /* cmd word 0 */ ++ u8 qtype; ++ u8 tc; ++}; ++ ++struct dpni_rsp_get_congestion_notification { ++ /* cmd word 0 */ ++ u64 pad; ++ /* cmd word 1 */ ++ u32 dest_id; ++ u16 notification_mode; ++ u8 dest_priority; ++ /* from LSB: dest_type: 4 units:2 */ ++ u8 type_units; ++ /* cmd word 2 */ ++ u64 message_iova; ++ /* cmd word 3 */ ++ u64 message_ctx; ++ /* cmd word 4 */ ++ u32 threshold_entry; ++ u32 threshold_exit; ++}; +#endif /* _FSL_DPNI_CMD_H */ --- /dev/null +++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c -@@ -0,0 +1,1770 @@ +@@ -0,0 +1,1903 @@ +/* Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2016 NXP + * @@ -6661,7 +7130,10 @@ Signed-off-by: Yangbo Lu + cmd_params = (struct dpni_cmd_set_pools *)cmd.params; + cmd_params->num_dpbp = cfg->num_dpbp; + for (i = 0; i < DPNI_MAX_DPBP; i++) { -+ cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id); ++ cmd_params->pool[i].dpbp_id = ++ cpu_to_le16(cfg->pools[i].dpbp_id); ++ cmd_params->pool[i].priority_mask = ++ cfg->pools[i].priority_mask; + cmd_params->buffer_size[i] = + cpu_to_le16(cfg->pools[i].buffer_size); + cmd_params->backup_pool_mask |= @@ -7837,6 +8309,82 @@ Signed-off-by: Yangbo Lu + return mc_send_command(mc_io, &cmd); +} + ++/* ++ * dpni_set_qos_table() - Set QoS mapping table ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @cfg: QoS table configuration ++ * ++ * This function and all QoS-related functions require that ++ *'max_tcs > 1' was set at DPNI creation. ++ * ++ * warning: Before calling this function, call dpkg_prepare_key_cfg() to ++ * prepare the key_cfg_iova parameter ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_set_qos_table(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ const struct dpni_qos_tbl_cfg *cfg) ++{ ++ struct dpni_cmd_set_qos_table *cmd_params; ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpni_cmd_set_qos_table *)cmd.params; ++ cmd_params->default_tc = cfg->default_tc; ++ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova); ++ dpni_set_field(cmd_params->discard_on_miss, ++ ENABLE, ++ cfg->discard_on_miss); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ ++/** ++ * dpni_add_qos_entry() - Add QoS mapping entry (to select a traffic class) ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @cfg: QoS rule to add ++ * @tc_id: Traffic class selection (0-7) ++ * @index: Location in the QoS table where to insert the entry. ++ * Only relevant if MASKING is enabled for QoS classification on ++ * this DPNI, it is ignored for exact match. ++ * ++ * Return: '0' on Success; Error code otherwise. ++ */ ++int dpni_add_qos_entry(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ const struct dpni_rule_cfg *cfg, ++ u8 tc_id, ++ u16 index) ++{ ++ struct dpni_cmd_add_qos_entry *cmd_params; ++ struct mc_command cmd = { 0 }; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpni_cmd_add_qos_entry *)cmd.params; ++ cmd_params->tc_id = tc_id; ++ cmd_params->key_size = cfg->key_size; ++ cmd_params->index = cpu_to_le16(index); ++ cmd_params->key_iova = cpu_to_le64(cfg->key_iova); ++ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova); ++ ++ /* send command to mc*/ ++ return mc_send_command(mc_io, &cmd); ++} ++ +/** + * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class + * (to select a flow ID) @@ -7961,6 +8509,60 @@ Signed-off-by: Yangbo Lu +} + +/** ++ * dpni_get_congestion_notification() - Get traffic class congestion ++ * notification configuration ++ * @mc_io: Pointer to MC portal's I/O object ++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' ++ * @token: Token of DPNI object ++ * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported ++ * @tc_id: Traffic class selection (0-7) ++ * @cfg: congestion notification configuration ++ * ++ * Return: '0' on Success; error code otherwise. ++ */ ++int dpni_get_congestion_notification( ++ struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ enum dpni_queue_type qtype, ++ u8 tc_id, ++ struct dpni_congestion_notification_cfg *cfg) ++{ ++ struct dpni_rsp_get_congestion_notification *rsp_params; ++ struct dpni_cmd_get_congestion_notification *cmd_params; ++ struct mc_command cmd = { 0 }; ++ int err; ++ ++ /* prepare command */ ++ cmd.header = mc_encode_cmd_header( ++ DPNI_CMDID_GET_CONGESTION_NOTIFICATION, ++ cmd_flags, ++ token); ++ cmd_params = (struct dpni_cmd_get_congestion_notification *)cmd.params; ++ cmd_params->qtype = qtype; ++ cmd_params->tc = tc_id; ++ ++ /* send command to mc*/ ++ err = mc_send_command(mc_io, &cmd); ++ if (err) ++ return err; ++ ++ rsp_params = (struct dpni_rsp_get_congestion_notification *)cmd.params; ++ cfg->units = dpni_get_field(rsp_params->type_units, CONG_UNITS); ++ cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry); ++ cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit); ++ cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx); ++ cfg->message_iova = le64_to_cpu(rsp_params->message_iova); ++ cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode); ++ cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id); ++ cfg->dest_cfg.priority = rsp_params->dest_priority; ++ cfg->dest_cfg.dest_type = dpni_get_field(rsp_params->type_units, ++ DEST_TYPE); ++ ++ return 0; ++} ++ ++/** + * dpni_set_queue() - Set queue parameters + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' @@ -8233,7 +8835,7 @@ Signed-off-by: Yangbo Lu +} --- /dev/null +++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.h -@@ -0,0 +1,989 @@ +@@ -0,0 +1,1053 @@ +/* Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2016 NXP + * @@ -8288,6 +8890,14 @@ Signed-off-by: Yangbo Lu + * Maximum number of buffer pools per DPNI + */ +#define DPNI_MAX_DPBP 8 ++/** ++ * Maximum number of senders ++ */ ++#define DPNI_MAX_SENDERS 8 ++/** ++ * Maximum distribution size ++ */ ++#define DPNI_MAX_DIST_SIZE 8 + +/** + * All traffic classes considered; see dpni_set_queue() @@ -8359,13 +8969,15 @@ Signed-off-by: Yangbo Lu + /** + * struct pools - Buffer pools parameters + * @dpbp_id: DPBP object ID ++ * @priority_mask: priorities served by DPBP + * @buffer_size: Buffer size + * @backup_pool: Backup pool + */ + struct { -+ int dpbp_id; ++ u16 dpbp_id; ++ u8 priority_mask; + u16 buffer_size; -+ int backup_pool; ++ u8 backup_pool; + } pools[DPNI_MAX_DPBP]; +}; + @@ -8745,6 +9357,10 @@ Signed-off-by: Yangbo Lu + * Enable a-symmetric pause frames + */ +#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL ++/** ++ * Enable priority flow control pause frames ++ */ ++#define DPNI_LINK_OPT_PFC_PAUSE 0x0000000000000010ULL + +/** + * struct - Structure representing DPNI link configuration @@ -8894,6 +9510,26 @@ Signed-off-by: Yangbo Lu + u8 *key_cfg_buf); + +/** ++ * struct dpni_qos_tbl_cfg - Structure representing QOS table configuration ++ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with ++ * key extractions to be used as the QoS criteria by calling ++ * dpkg_prepare_key_cfg() ++ * @discard_on_miss: Set to '1' to discard frames in case of no match (miss); ++ * '0' to use the 'default_tc' in such cases ++ * @default_tc: Used in case of no-match and 'discard_on_miss'= 0 ++ */ ++struct dpni_qos_tbl_cfg { ++ u64 key_cfg_iova; ++ int discard_on_miss; ++ u8 default_tc; ++}; ++ ++int dpni_set_qos_table(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ const struct dpni_qos_tbl_cfg *cfg); ++ ++/** + * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration + * @dist_size: Set the distribution size; + * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96, @@ -9086,6 +9722,12 @@ Signed-off-by: Yangbo Lu + * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled) + */ +#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020 ++/** ++ * This congestion will trigger flow control or priority flow control. ++ * This will have effect only if flow control is enabled with ++ * dpni_set_link_cfg(). ++ */ ++#define DPNI_CONG_OPT_FLOW_CONTROL 0x00000040 + +/** + * struct dpni_congestion_notification_cfg - congestion notification @@ -9119,6 +9761,14 @@ Signed-off-by: Yangbo Lu + u8 tc_id, + const struct dpni_congestion_notification_cfg *cfg); + ++int dpni_get_congestion_notification( ++ struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ enum dpni_queue_type qtype, ++ u8 tc_id, ++ struct dpni_congestion_notification_cfg *cfg); ++ +/** + * struct dpni_taildrop - Structure representing the taildrop + * @enable: Indicates whether the taildrop is active or not. @@ -9165,6 +9815,22 @@ Signed-off-by: Yangbo Lu + u8 key_size; +}; + ++int dpni_add_qos_entry(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ const struct dpni_rule_cfg *cfg, ++ u8 tc_id, ++ u16 index); ++ ++int dpni_remove_qos_entry(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token, ++ const struct dpni_rule_cfg *cfg); ++ ++int dpni_clear_qos_table(struct fsl_mc_io *mc_io, ++ u32 cmd_flags, ++ u16 token); ++ +/** + * Discard matching traffic. If set, this takes precedence over any other + * configuration and matching traffic is always discarded. @@ -15718,7 +16384,7 @@ Signed-off-by: Yangbo Lu + return 0; +} + -+void ethsw_port_get_stats(struct net_device *netdev, ++struct rtnl_link_stats64 *ethsw_port_get_stats(struct net_device *netdev, + struct rtnl_link_stats64 *storage) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); @@ -15778,7 +16444,7 @@ Signed-off-by: Yangbo Lu + if (err) + goto error; + -+ return; ++ return storage; + +error: + netdev_err(netdev, "dpsw_if_get_counter err %d\n", err); @@ -19125,7 +19791,7 @@ Signed-off-by: Yangbo Lu + return 0; +} + -+void evb_port_get_stats(struct net_device *netdev, ++struct rtnl_link_stats64 *evb_port_get_stats(struct net_device *netdev, + struct rtnl_link_stats64 *storage) +{ + struct evb_port_priv *port_priv = netdev_priv(netdev); @@ -19202,7 +19868,7 @@ Signed-off-by: Yangbo Lu + if (unlikely(err)) + goto error; + -+ return; ++ return storage; + +error: + netdev_err(netdev, "dpdmux_if_get_counter err %d\n", err); @@ -20892,7 +21558,7 @@ Signed-off-by: Yangbo Lu +#endif /* __FSL_DPMAC_H */ --- /dev/null +++ b/drivers/staging/fsl-dpaa2/mac/mac.c -@@ -0,0 +1,666 @@ +@@ -0,0 +1,669 @@ +/* Copyright 2015 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without @@ -21014,15 +21680,6 @@ Signed-off-by: Yangbo Lu + dev_err(&priv->mc_dev->dev, "dpmac_set_link_state: %d\n", err); +} + -+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS -+static netdev_tx_t dpaa2_mac_drop_frame(struct sk_buff *skb, -+ struct net_device *dev) -+{ -+ /* we don't support I/O for now, drop the frame */ -+ dev_kfree_skb_any(skb); -+ return NETDEV_TX_OK; -+} -+ +static int dpaa2_mac_open(struct net_device *netdev) +{ + /* start PHY state machine */ @@ -21047,6 +21704,15 @@ Signed-off-by: Yangbo Lu + return 0; +} + ++#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS ++static netdev_tx_t dpaa2_mac_drop_frame(struct sk_buff *skb, ++ struct net_device *dev) ++{ ++ /* we don't support I/O for now, drop the frame */ ++ dev_kfree_skb_any(skb); ++ return NETDEV_TX_OK; ++} ++ +static int dpaa2_mac_get_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ @@ -21207,9 +21873,9 @@ Signed-off-by: Yangbo Lu +} + +static const struct net_device_ops dpaa2_mac_ndo_ops = { -+ .ndo_start_xmit = &dpaa2_mac_drop_frame, + .ndo_open = &dpaa2_mac_open, + .ndo_stop = &dpaa2_mac_stop, ++ .ndo_start_xmit = &dpaa2_mac_drop_frame, + .ndo_get_stats64 = &dpaa2_mac_get_stats, +}; + @@ -21437,10 +22103,9 @@ Signed-off-by: Yangbo Lu + } +#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */ + -+ /* probe the PHY as a fixed-link if the link type declared in DPC -+ * explicitly mandates this ++ /* probe the PHY as a fixed-link if there's a phy-handle defined ++ * in the device tree + */ -+ + phy_node = of_parse_phandle(dpmac_node, "phy-handle", 0); + if (!phy_node) { + goto probe_fixed_link; @@ -21492,12 +22157,8 @@ Signed-off-by: Yangbo Lu + dev_info(dev, "Registered fixed PHY.\n"); + } + -+ /* start PHY state machine */ -+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS + dpaa2_mac_open(netdev); -+#else /* CONFIG_FSL_DPAA2_MAC_NETDEVS */ -+ phy_start(netdev->phydev); -+#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */ ++ + return 0; + +err_defer: @@ -21521,6 +22182,15 @@ Signed-off-by: Yangbo Lu +{ + struct device *dev = &mc_dev->dev; + struct dpaa2_mac_priv *priv = dev_get_drvdata(dev); ++ struct net_device *netdev = priv->netdev; ++ ++ dpaa2_mac_stop(netdev); ++ ++ if (phy_is_pseudo_fixed_link(netdev->phydev)) ++ fixed_phy_unregister(netdev->phydev); ++ else ++ phy_disconnect(netdev->phydev); ++ netdev->phydev = NULL; + +#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS + unregister_netdev(priv->netdev); @@ -21531,7 +22201,6 @@ Signed-off-by: Yangbo Lu + free_netdev(priv->netdev); + + dev_set_drvdata(dev, NULL); -+ kfree(priv); + + return 0; +} diff --git a/target/linux/layerscape/patches-4.9/706-fsl-dpaa-use-4-9-ndo-get-stats64.patch b/target/linux/layerscape/patches-4.9/706-fsl-dpaa-use-4-9-ndo-get-stats64.patch deleted file mode 100644 index 4fa29b29098..00000000000 --- a/target/linux/layerscape/patches-4.9/706-fsl-dpaa-use-4-9-ndo-get-stats64.patch +++ /dev/null @@ -1,112 +0,0 @@ -From: Mathew McBride -Date: Tue, 24 Oct 2017 11:30:00 +1100 -Subject: [PATCH] dpaa: backport use of 4.9 ndo_get_stats64 - -This patch changes the declarations of ndo_get_stats64 handlers -to the previous struct rtnl_link_stats64 * return type instead of -the mainline void return. - -Suggested-by: Adrien Gallouët -Signed-off-by: Mathew McBride - ---- - drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.c | 5 +++-- - drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.h | 4 ++-- - drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 3 ++- - drivers/staging/fsl-dpaa2/ethsw/switch.c | 4 ++-- - drivers/staging/fsl-dpaa2/evb/evb.c | 4 ++-- - 5 files changed, 11 insertions(+), 9 deletions(-) - ---- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c -@@ -1296,7 +1296,7 @@ static int dpaa2_eth_set_addr(struct net - /** Fill in counters maintained by the GPP driver. These may be different from - * the hardware counters obtained by ethtool. - */ --static void dpaa2_eth_get_stats(struct net_device *net_dev, -+static struct rtnl_link_stats64 *dpaa2_eth_get_stats(struct net_device *net_dev, - struct rtnl_link_stats64 *stats) - { - struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -@@ -1312,6 +1312,7 @@ static void dpaa2_eth_get_stats(struct n - for (j = 0; j < num; j++) - netstats[j] += cpustats[j]; - } -+ return stats; - } - - static int dpaa2_eth_change_mtu(struct net_device *net_dev, int mtu) ---- a/drivers/staging/fsl-dpaa2/ethsw/switch.c -+++ b/drivers/staging/fsl-dpaa2/ethsw/switch.c -@@ -1094,7 +1094,7 @@ static int ethsw_port_fdb_del(struct ndm - return 0; - } - --void ethsw_port_get_stats(struct net_device *netdev, -+struct rtnl_link_stats64 *ethsw_port_get_stats(struct net_device *netdev, - struct rtnl_link_stats64 *storage) - { - struct ethsw_port_priv *port_priv = netdev_priv(netdev); -@@ -1154,7 +1154,7 @@ void ethsw_port_get_stats(struct net_dev - if (err) - goto error; - -- return; -+ return storage; - - error: - netdev_err(netdev, "dpsw_if_get_counter err %d\n", err); ---- a/drivers/staging/fsl-dpaa2/evb/evb.c -+++ b/drivers/staging/fsl-dpaa2/evb/evb.c -@@ -765,7 +765,7 @@ static int evb_dellink(struct net_device - return 0; - } - --void evb_port_get_stats(struct net_device *netdev, -+struct rtnl_link_stats64 *evb_port_get_stats(struct net_device *netdev, - struct rtnl_link_stats64 *storage) - { - struct evb_port_priv *port_priv = netdev_priv(netdev); -@@ -842,7 +842,7 @@ void evb_port_get_stats(struct net_devic - if (unlikely(err)) - goto error; - -- return; -+ return storage; - - error: - netdev_err(netdev, "dpdmux_if_get_counter err %d\n", err); ---- a/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.c -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.c -@@ -239,8 +239,8 @@ EXPORT_SYMBOL(dpa_timeout); - * Calculates the statistics for the given device by adding the statistics - * collected by each CPU. - */ --void __cold --dpa_get_stats64(struct net_device *net_dev, -+struct rtnl_link_stats64 __cold -+*dpa_get_stats64(struct net_device *net_dev, - struct rtnl_link_stats64 *stats) - { - struct dpa_priv_s *priv = netdev_priv(net_dev); -@@ -258,6 +258,7 @@ dpa_get_stats64(struct net_device *net_d - for (j = 0; j < numstats; j++) - netstats[j] += cpustats[j]; - } -+ return stats; - } - EXPORT_SYMBOL(dpa_get_stats64); - ---- a/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.h -+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_common.h -@@ -140,8 +140,8 @@ int dpa_netdev_init(struct net_device *n - int __cold dpa_start(struct net_device *net_dev); - int __cold dpa_stop(struct net_device *net_dev); - void __cold dpa_timeout(struct net_device *net_dev); --void __cold --dpa_get_stats64(struct net_device *net_dev, -+struct rtnl_link_stats64 __cold -+*dpa_get_stats64(struct net_device *net_dev, - struct rtnl_link_stats64 *stats); - int dpa_change_mtu(struct net_device *net_dev, int new_mtu); - int dpa_ndo_init(struct net_device *net_dev); diff --git a/target/linux/layerscape/patches-4.9/706-fsl_ppfe-support-layercape.patch b/target/linux/layerscape/patches-4.9/706-fsl_ppfe-support-layercape.patch index 4104272eff4..068f0219d27 100644 --- a/target/linux/layerscape/patches-4.9/706-fsl_ppfe-support-layercape.patch +++ b/target/linux/layerscape/patches-4.9/706-fsl_ppfe-support-layercape.patch @@ -1,9 +1,9 @@ -From 8b7935a883d42187716fe486c83352f24d01ddcd Mon Sep 17 00:00:00 2001 +From 8089957ac5ac5f4f8436b1052dda7840f3bff3ea Mon Sep 17 00:00:00 2001 From: Yangbo Lu -Date: Thu, 19 Oct 2017 12:48:19 +0800 -Subject: [PATCH] fsl_ppfe: support layercape +Date: Wed, 17 Jan 2018 15:14:12 +0800 +Subject: [PATCH 12/30] fsl_ppfe: support layercape -This is a integrated patch for layerscape pfe support. +This is an integrated patch for layerscape pfe support. Calvin Johnson Signed-off-by: Yangbo Lu @@ -25,15 +25,15 @@ Signed-off-by: Yangbo Lu drivers/staging/fsl_ppfe/pfe_ctrl.h | 112 + drivers/staging/fsl_ppfe/pfe_debugfs.c | 111 + drivers/staging/fsl_ppfe/pfe_debugfs.h | 25 + - drivers/staging/fsl_ppfe/pfe_eth.c | 2434 ++++++++++++++++++++ + drivers/staging/fsl_ppfe/pfe_eth.c | 2474 ++++++++++++++++++++ drivers/staging/fsl_ppfe/pfe_eth.h | 184 ++ drivers/staging/fsl_ppfe/pfe_firmware.c | 314 +++ drivers/staging/fsl_ppfe/pfe_firmware.h | 32 + drivers/staging/fsl_ppfe/pfe_hal.c | 1516 ++++++++++++ drivers/staging/fsl_ppfe/pfe_hif.c | 1072 +++++++++ drivers/staging/fsl_ppfe/pfe_hif.h | 211 ++ - drivers/staging/fsl_ppfe/pfe_hif_lib.c | 601 +++++ - drivers/staging/fsl_ppfe/pfe_hif_lib.h | 239 ++ + drivers/staging/fsl_ppfe/pfe_hif_lib.c | 637 +++++ + drivers/staging/fsl_ppfe/pfe_hif_lib.h | 240 ++ drivers/staging/fsl_ppfe/pfe_hw.c | 176 ++ drivers/staging/fsl_ppfe/pfe_hw.h | 27 + drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c | 394 ++++ @@ -42,7 +42,7 @@ Signed-off-by: Yangbo Lu drivers/staging/fsl_ppfe/pfe_perfmon.h | 38 + drivers/staging/fsl_ppfe/pfe_sysfs.c | 818 +++++++ drivers/staging/fsl_ppfe/pfe_sysfs.h | 29 + - 34 files changed, 10366 insertions(+) + 34 files changed, 10443 insertions(+) create mode 100644 drivers/staging/fsl_ppfe/Kconfig create mode 100644 drivers/staging/fsl_ppfe/Makefile create mode 100644 drivers/staging/fsl_ppfe/TODO @@ -2159,7 +2159,7 @@ Signed-off-by: Yangbo Lu +#endif /* _PFE_DEBUGFS_H_ */ --- /dev/null +++ b/drivers/staging/fsl_ppfe/pfe_eth.c -@@ -0,0 +1,2434 @@ +@@ -0,0 +1,2474 @@ +/* + * Copyright 2015-2016 Freescale Semiconductor, Inc. + * Copyright 2017 NXP @@ -2455,10 +2455,10 @@ Signed-off-by: Yangbo Lu + /* Initialize the default values */ + + /* -+ * By default, packets without conntrack will use this default high ++ * By default, packets without conntrack will use this default low + * priority queue + */ -+ priv->default_priority = 15; ++ priv->default_priority = 0; + + /* Create our sysfs files */ + err = device_create_file(&ndev->dev, &dev_attr_default_priority); @@ -2739,7 +2739,9 @@ Signed-off-by: Yangbo Lu + if (!phydev) + return -ENODEV; + -+ return phy_ethtool_ksettings_get(phydev, cmd); ++ phy_ethtool_ksettings_get(phydev, cmd); ++ ++ return 0; +} + +/* @@ -3083,7 +3085,8 @@ Signed-off-by: Yangbo Lu + struct ls1012a_mdio_platform_data *minfo) +{ + struct mii_bus *bus; -+ int rc; ++ int rc, ii; ++ struct phy_device *phydev; + + netif_info(priv, drv, priv->ndev, "%s\n", __func__); + pr_info("%s\n", __func__); @@ -3122,6 +3125,31 @@ Signed-off-by: Yangbo Lu + } + + priv->mii_bus = bus; ++ ++ /* For clause 45 we need to call get_phy_device() with it's ++ * 3rd argument as true and then register the phy device ++ * via phy_device_register() ++ */ ++ ++ if (priv->einfo->mii_config == PHY_INTERFACE_MODE_2500SGMII) { ++ for (ii = 0; ii < NUM_GEMAC_SUPPORT; ii++) { ++ phydev = get_phy_device(priv->mii_bus, ++ priv->einfo->phy_id + ii, true); ++ if (!phydev || IS_ERR(phydev)) { ++ rc = -EIO; ++ netdev_err(priv->ndev, "fail to get device\n"); ++ goto err1; ++ } ++ rc = phy_device_register(phydev); ++ if (rc) { ++ phy_device_free(phydev); ++ netdev_err(priv->ndev, ++ "phy_device_register() failed\n"); ++ goto err1; ++ } ++ } ++ } ++ + pfe_eth_mdio_reset(bus); + + return 0; @@ -3307,8 +3335,9 @@ Signed-off-by: Yangbo Lu + struct pfe_eth_priv_s *priv = pfe->eth.eth_priv[0]; + int sgmii_2500 = 0; + struct mii_bus *bus = priv->mii_bus; ++ u16 value = 0; + -+ if (priv->einfo->mii_config == PHY_INTERFACE_MODE_SGMII_2500) ++ if (priv->einfo->mii_config == PHY_INTERFACE_MODE_2500SGMII) + sgmii_2500 = 1; + + netif_info(priv, drv, ndev, "%s\n", __func__); @@ -3324,14 +3353,16 @@ Signed-off-by: Yangbo Lu + pfe_eth_mdio_write(bus, 0, 0x4, 0x4001); + pfe_eth_mdio_write(bus, 0, 0x12, 0xa120); + pfe_eth_mdio_write(bus, 0, 0x13, 0x7); ++ /* Autonegotiation need to be disabled for 2.5G SGMII mode*/ ++ value = 0x0140; ++ pfe_eth_mdio_write(bus, 0, 0x0, value); + } else { + pfe_eth_mdio_write(bus, 0, 0x14, 0xb); + pfe_eth_mdio_write(bus, 0, 0x4, 0x1a1); + pfe_eth_mdio_write(bus, 0, 0x12, 0x400); + pfe_eth_mdio_write(bus, 0, 0x13, 0x0); ++ pfe_eth_mdio_write(bus, 0, 0x0, 0x1140); + } -+ -+ pfe_eth_mdio_write(bus, 0, 0x0, 0x1140); +} + +/* @@ -3357,7 +3388,7 @@ Signed-off-by: Yangbo Lu + netif_info(priv, drv, ndev, "%s: %s\n", __func__, phy_id); + interface = priv->einfo->mii_config; + if ((interface == PHY_INTERFACE_MODE_SGMII) || -+ (interface == PHY_INTERFACE_MODE_SGMII_2500)) { ++ (interface == PHY_INTERFACE_MODE_2500SGMII)) { + /*Configure SGMII PCS */ + if (pfe->scfg) { + /*Config MDIO from serdes */ @@ -3725,10 +3756,17 @@ Signed-off-by: Yangbo Lu + unsigned int n_segs) +{ + ktime_t kt; ++ int tried = 0; + ++try_again: + if (unlikely((__hif_tx_avail(&pfe->hif) < n_desc) || -+ (hif_lib_tx_avail(&priv->client, queuenum) < n_desc) || ++ (hif_lib_tx_avail(&priv->client, queuenum) < n_desc) || + (hif_lib_tx_credit_avail(pfe, priv->id, queuenum) < n_segs))) { ++ if (!tried) { ++ __hif_lib_update_credit(&priv->client, queuenum); ++ tried = 1; ++ goto try_again; ++ } +#ifdef PFE_ETH_TX_STATS + if (__hif_tx_avail(&pfe->hif) < n_desc) { + priv->stop_queue_hif[queuenum]++; @@ -3851,8 +3889,10 @@ Signed-off-by: Yangbo Lu + + netif_info(priv, tx_done, priv->ndev, "%s\n", __func__); + -+ for (ii = 0; ii < emac_txq_cnt; ii++) ++ for (ii = 0; ii < emac_txq_cnt; ii++) { + pfe_eth_flush_txQ(priv, ii, 0, 0); ++ __hif_lib_update_credit(&priv->client, ii); ++ } +} + +void pfe_tx_get_req_desc(struct sk_buff *skb, unsigned int *n_desc, unsigned int @@ -7943,7 +7983,7 @@ Signed-off-by: Yangbo Lu +#endif /* _PFE_HIF_H_ */ --- /dev/null +++ b/drivers/staging/fsl_ppfe/pfe_hif_lib.c -@@ -0,0 +1,601 @@ +@@ -0,0 +1,637 @@ +/* + * Copyright 2015-2016 Freescale Semiconductor, Inc. + * Copyright 2017 NXP @@ -7980,7 +8020,10 @@ Signed-off-by: Yangbo Lu + +unsigned int lro_mode; +unsigned int page_mode; -+unsigned int tx_qos; ++unsigned int tx_qos = 1; ++module_param(tx_qos, uint, 0444); ++MODULE_PARM_DESC(tx_qos, "0: disable ,\n" ++ "1: enable (default), guarantee no packet drop at TMU level\n"); +unsigned int pfe_pkt_size; +unsigned int pfe_pkt_headroom; +unsigned int emac_txq_cnt; @@ -8511,6 +8554,39 @@ Signed-off-by: Yangbo Lu + } +} + ++/* __hif_lib_update_credit ++ * ++ * @param[in] client hif client context ++ * @param[in] queue queue number in match with TMU ++ */ ++void __hif_lib_update_credit(struct hif_client_s *client, unsigned int queue) ++{ ++ unsigned int tmu_tx_packets, tmp; ++ ++ if (tx_qos) { ++ tmu_tx_packets = be32_to_cpu(pe_dmem_read(TMU0_ID + ++ client->id, (TMU_DM_TX_TRANS + (queue * 4)), 4)); ++ ++ /* tx_packets counter overflowed */ ++ if (tmu_tx_packets > ++ pfe->tmu_credit.tx_packets[client->id][queue]) { ++ tmp = UINT_MAX - tmu_tx_packets + ++ pfe->tmu_credit.tx_packets[client->id][queue]; ++ ++ pfe->tmu_credit.tx_credit[client->id][queue] = ++ pfe->tmu_credit.tx_credit_max[client->id][queue] - tmp; ++ } else { ++ /* TMU tx <= pfe_eth tx, normal case or both OF since ++ * last time ++ */ ++ pfe->tmu_credit.tx_credit[client->id][queue] = ++ pfe->tmu_credit.tx_credit_max[client->id][queue] - ++ (pfe->tmu_credit.tx_packets[client->id][queue] - ++ tmu_tx_packets); ++ } ++ } ++} ++ +int pfe_hif_lib_init(struct pfe *pfe) +{ + int rc; @@ -8547,7 +8623,7 @@ Signed-off-by: Yangbo Lu +} --- /dev/null +++ b/drivers/staging/fsl_ppfe/pfe_hif_lib.h -@@ -0,0 +1,239 @@ +@@ -0,0 +1,240 @@ +/* + * Copyright 2015-2016 Freescale Semiconductor, Inc. + * Copyright 2017 NXP @@ -8735,6 +8811,7 @@ Signed-off-by: Yangbo Lu +void *hif_lib_receive_pkt(struct hif_client_s *client, int qno, int *len, int + *ofst, unsigned int *rx_ctrl, + unsigned int *desc_ctrl, void **priv_data); ++void __hif_lib_update_credit(struct hif_client_s *client, unsigned int queue); +void hif_lib_set_rx_cpu_affinity(struct hif_client_s *client, int cpu_id); +void hif_lib_set_tx_queue_nocpy(struct hif_client_s *client, int qno, int + enable); diff --git a/target/linux/layerscape/patches-4.9/801-ata-support-layerscape.patch b/target/linux/layerscape/patches-4.9/801-ata-support-layerscape.patch index 0c1cd1bf84d..e9d34d36cc7 100644 --- a/target/linux/layerscape/patches-4.9/801-ata-support-layerscape.patch +++ b/target/linux/layerscape/patches-4.9/801-ata-support-layerscape.patch @@ -1,9 +1,9 @@ -From 505eb62bdb7a4cc25b13491dd5c68d0741c5d6da Mon Sep 17 00:00:00 2001 +From 4c3979602db05bca439bfc98db88dc14a8663db0 Mon Sep 17 00:00:00 2001 From: Yangbo Lu -Date: Mon, 25 Sep 2017 12:21:13 +0800 -Subject: [PATCH] ata: support layerscape +Date: Wed, 17 Jan 2018 15:14:57 +0800 +Subject: [PATCH 13/30] ata: support layerscape -This is a integrated patch for layerscape sata support. +This is an integrated patch for layerscape sata support. Signed-off-by: Tang Yuantian Signed-off-by: Yangbo Lu diff --git a/target/linux/layerscape/patches-4.9/802-clk-support-layerscape.patch b/target/linux/layerscape/patches-4.9/802-clk-support-layerscape.patch index 2f7d6f84781..0d05dc78abd 100644 --- a/target/linux/layerscape/patches-4.9/802-clk-support-layerscape.patch +++ b/target/linux/layerscape/patches-4.9/802-clk-support-layerscape.patch @@ -1,17 +1,17 @@ -From bd3df6d053a28d5aa630524c9087c21def30e764 Mon Sep 17 00:00:00 2001 +From 82a391a067491f4c46b75d0dfe2bf9e5a11aca8e Mon Sep 17 00:00:00 2001 From: Yangbo Lu -Date: Mon, 25 Sep 2017 12:09:35 +0800 -Subject: [PATCH] clk: support layerscape +Date: Wed, 17 Jan 2018 15:15:44 +0800 +Subject: [PATCH 14/30] clk: support layerscape -This is a integrated patch for layerscape clock support. +This is an integrated patch for layerscape clock support. Signed-off-by: Yuantian Tang Signed-off-by: Mingkai Hu Signed-off-by: Scott Wood Signed-off-by: Yangbo Lu --- - drivers/clk/clk-qoriq.c | 170 ++++++++++++++++++++++++++++++++++++++++++++---- - 1 file changed, 156 insertions(+), 14 deletions(-) + drivers/clk/clk-qoriq.c | 179 ++++++++++++++++++++++++++++++++++++++++++++---- + 1 file changed, 164 insertions(+), 15 deletions(-) --- a/drivers/clk/clk-qoriq.c +++ b/drivers/clk/clk-qoriq.c @@ -23,6 +23,15 @@ Signed-off-by: Yangbo Lu #include #include #include +@@ -40,7 +41,7 @@ struct clockgen_pll_div { + }; + + struct clockgen_pll { +- struct clockgen_pll_div div[4]; ++ struct clockgen_pll_div div[8]; + }; + + #define CLKSEL_VALID 1 @@ -87,7 +88,7 @@ struct clockgen { struct device_node *node; void __iomem *regs; @@ -244,11 +253,18 @@ Signed-off-by: Yangbo Lu if (cg->info.flags & CG_VER3) { switch (idx) { case PLATFORM_PLL: -@@ -1000,12 +1125,13 @@ static void __init create_one_pll(struct +@@ -1000,12 +1125,20 @@ static void __init create_one_pll(struct for (i = 0; i < ARRAY_SIZE(pll->div); i++) { struct clk *clk; + int ret; ++ ++ /* ++ * For platform PLL, there are 8 divider clocks. ++ * For core PLL, there are 4 divider clocks at most. ++ */ ++ if (idx != 0 && i >= 4) ++ break; snprintf(pll->div[i].name, sizeof(pll->div[i].name), "cg-pll%d-div%d", idx, i + 1); @@ -259,7 +275,7 @@ Signed-off-by: Yangbo Lu if (IS_ERR(clk)) { pr_err("%s: %s: register failed %ld\n", __func__, pll->div[i].name, PTR_ERR(clk)); -@@ -1013,6 +1139,11 @@ static void __init create_one_pll(struct +@@ -1013,6 +1146,11 @@ static void __init create_one_pll(struct } pll->div[i].clk = clk; @@ -271,7 +287,7 @@ Signed-off-by: Yangbo Lu } } -@@ -1142,6 +1273,13 @@ static struct clk *clockgen_clk_get(stru +@@ -1142,6 +1280,13 @@ static struct clk *clockgen_clk_get(stru goto bad_args; clk = pll->div[idx].clk; break; @@ -285,7 +301,7 @@ Signed-off-by: Yangbo Lu default: goto bad_args; } -@@ -1253,6 +1391,7 @@ static void __init clockgen_init(struct +@@ -1253,6 +1398,7 @@ static void __init clockgen_init(struct clockgen.info.flags |= CG_CMUX_GE_PLAT; clockgen.sysclk = create_sysclk("cg-sysclk"); @@ -293,7 +309,7 @@ Signed-off-by: Yangbo Lu create_plls(&clockgen); create_muxes(&clockgen); -@@ -1273,8 +1412,11 @@ err: +@@ -1273,8 +1419,11 @@ err: CLK_OF_DECLARE(qoriq_clockgen_1, "fsl,qoriq-clockgen-1.0", clockgen_init); CLK_OF_DECLARE(qoriq_clockgen_2, "fsl,qoriq-clockgen-2.0", clockgen_init); diff --git a/target/linux/layerscape/patches-4.9/803-cpufreq-support-layerscape.patch b/target/linux/layerscape/patches-4.9/803-cpufreq-support-layerscape.patch index 2bc0f24f720..2da45b6cf0d 100644 --- a/target/linux/layerscape/patches-4.9/803-cpufreq-support-layerscape.patch +++ b/target/linux/layerscape/patches-4.9/803-cpufreq-support-layerscape.patch @@ -1,9 +1,9 @@ -From a9ebdf9fa18fd317a4e97f46e8c5263898094864 Mon Sep 17 00:00:00 2001 +From b018e44a68dc2f4df819ae194e39e07313841dad Mon Sep 17 00:00:00 2001 From: Yangbo Lu -Date: Mon, 25 Sep 2017 12:20:10 +0800 -Subject: [PATCH] cpufreq: support layerscape +Date: Wed, 17 Jan 2018 15:27:58 +0800 +Subject: [PATCH 15/30] cpufreq: support layerscape -This is a integrated patch for layerscape pm support. +This is an integrated patch for layerscape pm support. Signed-off-by: Tang Yuantian Signed-off-by: Yangbo Lu @@ -11,7 +11,9 @@ Signed-off-by: Yangbo Lu drivers/cpufreq/Kconfig | 2 +- drivers/cpufreq/qoriq-cpufreq.c | 176 +++++++++++++++------------------------- drivers/firmware/psci.c | 12 ++- - 3 files changed, 77 insertions(+), 113 deletions(-) + drivers/soc/fsl/rcpm.c | 158 ++++++++++++++++++++++++++++++++++++ + 4 files changed, 235 insertions(+), 113 deletions(-) + create mode 100644 drivers/soc/fsl/rcpm.c --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -359,3 +361,164 @@ Signed-off-by: Yangbo Lu } /* +--- /dev/null ++++ b/drivers/soc/fsl/rcpm.c +@@ -0,0 +1,158 @@ ++/* ++ * Run Control and Power Management (RCPM) driver ++ * ++ * Copyright 2016 NXP ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++#define pr_fmt(fmt) "RCPM: %s: " fmt, __func__ ++ ++#include ++#include ++#include ++#include ++#include ++ ++/* RCPM register offset */ ++#define RCPM_IPPDEXPCR0 0x140 ++ ++#define RCPM_WAKEUP_CELL_SIZE 2 ++ ++struct rcpm_config { ++ int ipp_num; ++ int ippdexpcr_offset; ++ u32 ippdexpcr[2]; ++ void *rcpm_reg_base; ++}; ++ ++static struct rcpm_config *rcpm; ++ ++static inline void rcpm_reg_write(u32 offset, u32 value) ++{ ++ iowrite32be(value, rcpm->rcpm_reg_base + offset); ++} ++ ++static inline u32 rcpm_reg_read(u32 offset) ++{ ++ return ioread32be(rcpm->rcpm_reg_base + offset); ++} ++ ++static void rcpm_wakeup_fixup(struct device *dev, void *data) ++{ ++ struct device_node *node = dev ? dev->of_node : NULL; ++ u32 value[RCPM_WAKEUP_CELL_SIZE]; ++ int ret, i; ++ ++ if (!dev || !node || !device_may_wakeup(dev)) ++ return; ++ ++ /* ++ * Get the values in the "rcpm-wakeup" property. ++ * Three values are: ++ * The first is a pointer to the RCPM node. ++ * The second is the value of the ippdexpcr0 register. ++ * The third is the value of the ippdexpcr1 register. ++ */ ++ ret = of_property_read_u32_array(node, "fsl,rcpm-wakeup", ++ value, RCPM_WAKEUP_CELL_SIZE); ++ if (ret) ++ return; ++ ++ pr_debug("wakeup source: the device %s\n", node->full_name); ++ ++ for (i = 0; i < rcpm->ipp_num; i++) ++ rcpm->ippdexpcr[i] |= value[i + 1]; ++} ++ ++static int rcpm_suspend_prepare(void) ++{ ++ int i; ++ u32 val; ++ ++ BUG_ON(!rcpm); ++ ++ for (i = 0; i < rcpm->ipp_num; i++) ++ rcpm->ippdexpcr[i] = 0; ++ ++ dpm_for_each_dev(NULL, rcpm_wakeup_fixup); ++ ++ for (i = 0; i < rcpm->ipp_num; i++) { ++ if (rcpm->ippdexpcr[i]) { ++ val = rcpm_reg_read(rcpm->ippdexpcr_offset + 4 * i); ++ rcpm_reg_write(rcpm->ippdexpcr_offset + 4 * i, ++ val | rcpm->ippdexpcr[i]); ++ pr_debug("ippdexpcr%d = 0x%x\n", i, rcpm->ippdexpcr[i]); ++ } ++ } ++ ++ return 0; ++} ++ ++static int rcpm_suspend_notifier_call(struct notifier_block *bl, ++ unsigned long state, ++ void *unused) ++{ ++ switch (state) { ++ case PM_SUSPEND_PREPARE: ++ rcpm_suspend_prepare(); ++ break; ++ } ++ ++ return NOTIFY_DONE; ++} ++ ++static struct rcpm_config rcpm_default_config = { ++ .ipp_num = 1, ++ .ippdexpcr_offset = RCPM_IPPDEXPCR0, ++}; ++ ++static const struct of_device_id rcpm_matches[] = { ++ { ++ .compatible = "fsl,qoriq-rcpm-2.1", ++ .data = &rcpm_default_config, ++ }, ++ {} ++}; ++ ++static struct notifier_block rcpm_suspend_notifier = { ++ .notifier_call = rcpm_suspend_notifier_call, ++}; ++ ++static int __init layerscape_rcpm_init(void) ++{ ++ const struct of_device_id *match; ++ struct device_node *np; ++ ++ np = of_find_matching_node_and_match(NULL, rcpm_matches, &match); ++ if (!np) { ++ pr_err("Can't find the RCPM node.\n"); ++ return -EINVAL; ++ } ++ ++ if (match->data) ++ rcpm = (struct rcpm_config *)match->data; ++ else ++ return -EINVAL; ++ ++ rcpm->rcpm_reg_base = of_iomap(np, 0); ++ of_node_put(np); ++ if (!rcpm->rcpm_reg_base) ++ return -ENOMEM; ++ ++ register_pm_notifier(&rcpm_suspend_notifier); ++ ++ pr_info("The RCPM driver initialized.\n"); ++ ++ return 0; ++} ++ ++subsys_initcall(layerscape_rcpm_init); diff --git a/target/linux/layerscape/patches-4.9/804-crypto-support-layerscape.patch b/target/linux/layerscape/patches-4.9/804-crypto-support-layerscape.patch index 6deb5f97565..c456eb862cd 100644 --- a/target/linux/layerscape/patches-4.9/804-crypto-support-layerscape.patch +++ b/target/linux/layerscape/patches-4.9/804-crypto-support-layerscape.patch @@ -1,12 +1,12 @@ -From 9c9579d76ccd6e738ab98c9b4c73c168912cdb8a Mon Sep 17 00:00:00 2001 +From a3310d64d7cb1ba0f9279e77d21f13a75fa66ab5 Mon Sep 17 00:00:00 2001 From: Yangbo Lu -Date: Wed, 27 Sep 2017 15:02:01 +0800 -Subject: [PATCH] crypto: support layerscape +Date: Wed, 17 Jan 2018 15:29:23 +0800 +Subject: [PATCH 16/30] crypto: support layerscape MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -This is a integrated patch for layerscape sec support. +This is an integrated patch for layerscape sec support. Signed-off-by: Radu Alexe Signed-off-by: Fabio Estevam @@ -36,39 +36,41 @@ Signed-off-by: Yangbo Lu crypto/crypto_user.c | 19 + crypto/scompress.c | 356 ++ crypto/tcrypt.c | 17 +- - crypto/testmgr.c | 1701 ++++---- - crypto/testmgr.h | 1125 +++--- + crypto/testmgr.c | 1708 +++--- + crypto/testmgr.h | 1125 ++-- crypto/tls.c | 607 +++ - drivers/crypto/caam/Kconfig | 72 +- - drivers/crypto/caam/Makefile | 15 +- - drivers/crypto/caam/caamalg.c | 2125 +++------- - drivers/crypto/caam/caamalg_desc.c | 1913 +++++++++ + drivers/crypto/caam/Kconfig | 77 +- + drivers/crypto/caam/Makefile | 16 +- + drivers/crypto/caam/caamalg.c | 2171 ++------ + drivers/crypto/caam/caamalg_desc.c | 1961 +++++++ drivers/crypto/caam/caamalg_desc.h | 127 + - drivers/crypto/caam/caamalg_qi.c | 2877 +++++++++++++ - drivers/crypto/caam/caamalg_qi2.c | 4428 +++++++++++++++++++++ - drivers/crypto/caam/caamalg_qi2.h | 265 ++ - drivers/crypto/caam/caamhash.c | 521 +-- - drivers/crypto/caam/caampkc.c | 471 ++- + drivers/crypto/caam/caamalg_qi.c | 2929 ++++++++++ + drivers/crypto/caam/caamalg_qi2.c | 5920 +++++++++++++++++++++ + drivers/crypto/caam/caamalg_qi2.h | 281 + + drivers/crypto/caam/caamhash.c | 550 +- + drivers/crypto/caam/caamhash_desc.c | 108 + + drivers/crypto/caam/caamhash_desc.h | 49 + + drivers/crypto/caam/caampkc.c | 471 +- drivers/crypto/caam/caampkc.h | 58 + drivers/crypto/caam/caamrng.c | 16 +- drivers/crypto/caam/compat.h | 1 + - drivers/crypto/caam/ctrl.c | 356 +- + drivers/crypto/caam/ctrl.c | 358 +- drivers/crypto/caam/ctrl.h | 2 + - drivers/crypto/caam/desc.h | 55 +- - drivers/crypto/caam/desc_constr.h | 139 +- - drivers/crypto/caam/dpseci.c | 859 ++++ + drivers/crypto/caam/desc.h | 84 +- + drivers/crypto/caam/desc_constr.h | 180 +- + drivers/crypto/caam/dpseci.c | 859 +++ drivers/crypto/caam/dpseci.h | 395 ++ - drivers/crypto/caam/dpseci_cmd.h | 261 ++ + drivers/crypto/caam/dpseci_cmd.h | 261 + drivers/crypto/caam/error.c | 127 +- drivers/crypto/caam/error.h | 10 +- drivers/crypto/caam/intern.h | 31 +- - drivers/crypto/caam/jr.c | 97 +- + drivers/crypto/caam/jr.c | 72 +- drivers/crypto/caam/jr.h | 2 + drivers/crypto/caam/key_gen.c | 32 +- drivers/crypto/caam/key_gen.h | 36 +- drivers/crypto/caam/pdb.h | 62 + drivers/crypto/caam/pkc_desc.c | 36 + - drivers/crypto/caam/qi.c | 797 ++++ + drivers/crypto/caam/qi.c | 797 +++ drivers/crypto/caam/qi.h | 204 + drivers/crypto/caam/regs.h | 63 +- drivers/crypto/caam/sg_sw_qm.h | 126 + @@ -77,14 +79,14 @@ Signed-off-by: Yangbo Lu drivers/net/wireless/rsi/rsi_91x_usb.c | 2 +- drivers/staging/wilc1000/linux_wlan.c | 2 +- drivers/staging/wilc1000/wilc_wfi_cfgoperations.c | 2 +- - include/crypto/acompress.h | 269 ++ + include/crypto/acompress.h | 269 + include/crypto/internal/acompress.h | 81 + include/crypto/internal/scompress.h | 136 + include/linux/crypto.h | 3 + include/uapi/linux/cryptouser.h | 5 + scripts/spelling.txt | 3 + sound/soc/amd/acp-pcm-dma.c | 2 +- - 55 files changed, 17310 insertions(+), 3955 deletions(-) + 57 files changed, 19177 insertions(+), 3988 deletions(-) create mode 100644 crypto/acompress.c create mode 100644 crypto/scompress.c create mode 100644 crypto/tls.c @@ -93,6 +95,8 @@ Signed-off-by: Yangbo Lu create mode 100644 drivers/crypto/caam/caamalg_qi.c create mode 100644 drivers/crypto/caam/caamalg_qi2.c create mode 100644 drivers/crypto/caam/caamalg_qi2.h + create mode 100644 drivers/crypto/caam/caamhash_desc.c + create mode 100644 drivers/crypto/caam/caamhash_desc.h create mode 100644 drivers/crypto/caam/dpseci.c create mode 100644 drivers/crypto/caam/dpseci.h create mode 100644 drivers/crypto/caam/dpseci_cmd.h @@ -1257,7 +1261,31 @@ Signed-off-by: Yangbo Lu const bool diff_dst, const int align_offset) { const char *algo = -@@ -1330,7 +1571,8 @@ out_nobuf: +@@ -1079,12 +1320,16 @@ static int __test_skcipher(struct crypto + const char *e, *d; + struct tcrypt_result result; + void *data; +- char iv[MAX_IVLEN]; ++ char *iv; + char *xbuf[XBUFSIZE]; + char *xoutbuf[XBUFSIZE]; + int ret = -ENOMEM; + unsigned int ivsize = crypto_skcipher_ivsize(tfm); + ++ iv = kmalloc(MAX_IVLEN, GFP_KERNEL); ++ if (!iv) ++ return ret; ++ + if (testmgr_alloc_buf(xbuf)) + goto out_nobuf; + +@@ -1325,12 +1570,14 @@ out: + testmgr_free_buf(xoutbuf); + out_nooutbuf: + testmgr_free_buf(xbuf); ++ kfree(iv); + out_nobuf: + return ret; } static int test_skcipher(struct crypto_skcipher *tfm, int enc, @@ -1267,7 +1295,7 @@ Signed-off-by: Yangbo Lu { unsigned int alignmask; int ret; -@@ -1362,8 +1604,10 @@ static int test_skcipher(struct crypto_s +@@ -1362,8 +1609,10 @@ static int test_skcipher(struct crypto_s return 0; } @@ -1280,7 +1308,7 @@ Signed-off-by: Yangbo Lu { const char *algo = crypto_tfm_alg_driver_name(crypto_comp_tfm(tfm)); unsigned int i; -@@ -1442,7 +1686,154 @@ out: +@@ -1442,7 +1691,154 @@ out: return ret; } @@ -1436,7 +1464,7 @@ Signed-off-by: Yangbo Lu unsigned int tcount) { const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm)); -@@ -1509,7 +1900,7 @@ static int alg_test_aead(const struct al +@@ -1509,7 +1905,7 @@ static int alg_test_aead(const struct al struct crypto_aead *tfm; int err = 0; @@ -1445,7 +1473,7 @@ Signed-off-by: Yangbo Lu if (IS_ERR(tfm)) { printk(KERN_ERR "alg: aead: Failed to load transform for %s: " "%ld\n", driver, PTR_ERR(tfm)); -@@ -1538,7 +1929,7 @@ static int alg_test_cipher(const struct +@@ -1538,7 +1934,7 @@ static int alg_test_cipher(const struct struct crypto_cipher *tfm; int err = 0; @@ -1454,7 +1482,7 @@ Signed-off-by: Yangbo Lu if (IS_ERR(tfm)) { printk(KERN_ERR "alg: cipher: Failed to load transform for " "%s: %ld\n", driver, PTR_ERR(tfm)); -@@ -1567,7 +1958,7 @@ static int alg_test_skcipher(const struc +@@ -1567,7 +1963,7 @@ static int alg_test_skcipher(const struc struct crypto_skcipher *tfm; int err = 0; @@ -1463,7 +1491,7 @@ Signed-off-by: Yangbo Lu if (IS_ERR(tfm)) { printk(KERN_ERR "alg: skcipher: Failed to load transform for " "%s: %ld\n", driver, PTR_ERR(tfm)); -@@ -1593,22 +1984,38 @@ out: +@@ -1593,22 +1989,38 @@ out: static int alg_test_comp(const struct alg_test_desc *desc, const char *driver, u32 type, u32 mask) { @@ -1514,7 +1542,7 @@ Signed-off-by: Yangbo Lu return err; } -@@ -1618,7 +2025,7 @@ static int alg_test_hash(const struct al +@@ -1618,7 +2030,7 @@ static int alg_test_hash(const struct al struct crypto_ahash *tfm; int err; @@ -1523,7 +1551,7 @@ Signed-off-by: Yangbo Lu if (IS_ERR(tfm)) { printk(KERN_ERR "alg: hash: Failed to load transform for %s: " "%ld\n", driver, PTR_ERR(tfm)); -@@ -1646,7 +2053,7 @@ static int alg_test_crc32c(const struct +@@ -1646,7 +2058,7 @@ static int alg_test_crc32c(const struct if (err) goto out; @@ -1532,7 +1560,7 @@ Signed-off-by: Yangbo Lu if (IS_ERR(tfm)) { printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: " "%ld\n", driver, PTR_ERR(tfm)); -@@ -1688,7 +2095,7 @@ static int alg_test_cprng(const struct a +@@ -1688,7 +2100,7 @@ static int alg_test_cprng(const struct a struct crypto_rng *rng; int err; @@ -1541,7 +1569,7 @@ Signed-off-by: Yangbo Lu if (IS_ERR(rng)) { printk(KERN_ERR "alg: cprng: Failed to load transform for %s: " "%ld\n", driver, PTR_ERR(rng)); -@@ -1703,7 +2110,7 @@ static int alg_test_cprng(const struct a +@@ -1703,7 +2115,7 @@ static int alg_test_cprng(const struct a } @@ -1550,7 +1578,7 @@ Signed-off-by: Yangbo Lu const char *driver, u32 type, u32 mask) { int ret = -EAGAIN; -@@ -1715,7 +2122,7 @@ static int drbg_cavs_test(struct drbg_te +@@ -1715,7 +2127,7 @@ static int drbg_cavs_test(struct drbg_te if (!buf) return -ENOMEM; @@ -1559,7 +1587,7 @@ Signed-off-by: Yangbo Lu if (IS_ERR(drng)) { printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for " "%s\n", driver); -@@ -1777,7 +2184,7 @@ static int alg_test_drbg(const struct al +@@ -1777,7 +2189,7 @@ static int alg_test_drbg(const struct al int err = 0; int pr = 0; int i = 0; @@ -1568,7 +1596,7 @@ Signed-off-by: Yangbo Lu unsigned int tcount = desc->suite.drbg.count; if (0 == memcmp(driver, "drbg_pr_", 8)) -@@ -1796,7 +2203,7 @@ static int alg_test_drbg(const struct al +@@ -1796,7 +2208,7 @@ static int alg_test_drbg(const struct al } @@ -1577,7 +1605,7 @@ Signed-off-by: Yangbo Lu const char *alg) { struct kpp_request *req; -@@ -1888,7 +2295,7 @@ free_req: +@@ -1888,7 +2300,7 @@ free_req: } static int test_kpp(struct crypto_kpp *tfm, const char *alg, @@ -1586,7 +1614,7 @@ Signed-off-by: Yangbo Lu { int ret, i; -@@ -1909,7 +2316,7 @@ static int alg_test_kpp(const struct alg +@@ -1909,7 +2321,7 @@ static int alg_test_kpp(const struct alg struct crypto_kpp *tfm; int err = 0; @@ -1595,7 +1623,7 @@ Signed-off-by: Yangbo Lu if (IS_ERR(tfm)) { pr_err("alg: kpp: Failed to load tfm for %s: %ld\n", driver, PTR_ERR(tfm)); -@@ -1924,7 +2331,7 @@ static int alg_test_kpp(const struct alg +@@ -1924,7 +2336,7 @@ static int alg_test_kpp(const struct alg } static int test_akcipher_one(struct crypto_akcipher *tfm, @@ -1604,7 +1632,7 @@ Signed-off-by: Yangbo Lu { char *xbuf[XBUFSIZE]; struct akcipher_request *req; -@@ -2044,7 +2451,8 @@ free_xbuf: +@@ -2044,7 +2456,8 @@ free_xbuf: } static int test_akcipher(struct crypto_akcipher *tfm, const char *alg, @@ -1614,7 +1642,7 @@ Signed-off-by: Yangbo Lu { const char *algo = crypto_tfm_alg_driver_name(crypto_akcipher_tfm(tfm)); -@@ -2068,7 +2476,7 @@ static int alg_test_akcipher(const struc +@@ -2068,7 +2481,7 @@ static int alg_test_akcipher(const struc struct crypto_akcipher *tfm; int err = 0; @@ -1623,7 +1651,7 @@ Signed-off-by: Yangbo Lu if (IS_ERR(tfm)) { pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n", driver, PTR_ERR(tfm)); -@@ -2088,112 +2496,23 @@ static int alg_test_null(const struct al +@@ -2088,112 +2501,23 @@ static int alg_test_null(const struct al return 0; } @@ -1741,7 +1769,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2201,12 +2520,7 @@ static const struct alg_test_desc alg_te +@@ -2201,12 +2525,7 @@ static const struct alg_test_desc alg_te .test = alg_test_aead, .suite = { .aead = { @@ -1755,7 +1783,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2214,12 +2528,7 @@ static const struct alg_test_desc alg_te +@@ -2214,12 +2533,7 @@ static const struct alg_test_desc alg_te .test = alg_test_aead, .suite = { .aead = { @@ -1769,7 +1797,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2228,12 +2537,7 @@ static const struct alg_test_desc alg_te +@@ -2228,12 +2542,7 @@ static const struct alg_test_desc alg_te .fips_allowed = 1, .suite = { .aead = { @@ -1783,7 +1811,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2245,18 +2549,8 @@ static const struct alg_test_desc alg_te +@@ -2245,18 +2554,8 @@ static const struct alg_test_desc alg_te .test = alg_test_aead, .suite = { .aead = { @@ -1804,7 +1832,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2268,12 +2562,7 @@ static const struct alg_test_desc alg_te +@@ -2268,12 +2567,7 @@ static const struct alg_test_desc alg_te .test = alg_test_aead, .suite = { .aead = { @@ -1818,7 +1846,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2282,12 +2571,7 @@ static const struct alg_test_desc alg_te +@@ -2282,12 +2576,7 @@ static const struct alg_test_desc alg_te .fips_allowed = 1, .suite = { .aead = { @@ -1832,7 +1860,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2296,12 +2580,7 @@ static const struct alg_test_desc alg_te +@@ -2296,12 +2585,7 @@ static const struct alg_test_desc alg_te .fips_allowed = 1, .suite = { .aead = { @@ -1846,7 +1874,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2309,12 +2588,7 @@ static const struct alg_test_desc alg_te +@@ -2309,12 +2593,7 @@ static const struct alg_test_desc alg_te .test = alg_test_aead, .suite = { .aead = { @@ -1860,7 +1888,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2323,12 +2597,7 @@ static const struct alg_test_desc alg_te +@@ -2323,12 +2602,7 @@ static const struct alg_test_desc alg_te .fips_allowed = 1, .suite = { .aead = { @@ -1874,7 +1902,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2344,12 +2613,7 @@ static const struct alg_test_desc alg_te +@@ -2344,12 +2618,7 @@ static const struct alg_test_desc alg_te .test = alg_test_aead, .suite = { .aead = { @@ -1888,7 +1916,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2358,12 +2622,7 @@ static const struct alg_test_desc alg_te +@@ -2358,12 +2627,7 @@ static const struct alg_test_desc alg_te .fips_allowed = 1, .suite = { .aead = { @@ -1902,7 +1930,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2380,12 +2639,7 @@ static const struct alg_test_desc alg_te +@@ -2380,12 +2644,7 @@ static const struct alg_test_desc alg_te .test = alg_test_aead, .suite = { .aead = { @@ -1916,7 +1944,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2393,12 +2647,7 @@ static const struct alg_test_desc alg_te +@@ -2393,12 +2652,7 @@ static const struct alg_test_desc alg_te .test = alg_test_aead, .suite = { .aead = { @@ -1930,7 +1958,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2407,12 +2656,7 @@ static const struct alg_test_desc alg_te +@@ -2407,12 +2661,7 @@ static const struct alg_test_desc alg_te .fips_allowed = 1, .suite = { .aead = { @@ -1944,7 +1972,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2429,14 +2673,8 @@ static const struct alg_test_desc alg_te +@@ -2429,14 +2678,8 @@ static const struct alg_test_desc alg_te .fips_allowed = 1, .suite = { .cipher = { @@ -1961,7 +1989,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2444,14 +2682,8 @@ static const struct alg_test_desc alg_te +@@ -2444,14 +2687,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -1978,7 +2006,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2459,14 +2691,8 @@ static const struct alg_test_desc alg_te +@@ -2459,14 +2696,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -1995,7 +2023,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2474,14 +2700,8 @@ static const struct alg_test_desc alg_te +@@ -2474,14 +2705,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -2012,7 +2040,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2489,14 +2709,8 @@ static const struct alg_test_desc alg_te +@@ -2489,14 +2714,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -2029,7 +2057,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2504,14 +2718,8 @@ static const struct alg_test_desc alg_te +@@ -2504,14 +2723,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -2046,7 +2074,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2519,14 +2727,8 @@ static const struct alg_test_desc alg_te +@@ -2519,14 +2732,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -2063,7 +2091,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2535,14 +2737,8 @@ static const struct alg_test_desc alg_te +@@ -2535,14 +2742,8 @@ static const struct alg_test_desc alg_te .fips_allowed = 1, .suite = { .cipher = { @@ -2080,7 +2108,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2550,14 +2746,8 @@ static const struct alg_test_desc alg_te +@@ -2550,14 +2751,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -2097,7 +2125,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2565,30 +2755,25 @@ static const struct alg_test_desc alg_te +@@ -2565,30 +2760,25 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -2139,7 +2167,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2596,14 +2781,8 @@ static const struct alg_test_desc alg_te +@@ -2596,14 +2786,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -2156,7 +2184,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2611,20 +2790,14 @@ static const struct alg_test_desc alg_te +@@ -2611,20 +2795,14 @@ static const struct alg_test_desc alg_te .fips_allowed = 1, .test = alg_test_hash, .suite = { @@ -2179,7 +2207,7 @@ Signed-off-by: Yangbo Lu } }, { .alg = "compress_null", -@@ -2633,94 +2806,30 @@ static const struct alg_test_desc alg_te +@@ -2633,94 +2811,30 @@ static const struct alg_test_desc alg_te .alg = "crc32", .test = alg_test_hash, .suite = { @@ -2279,7 +2307,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2728,14 +2837,8 @@ static const struct alg_test_desc alg_te +@@ -2728,14 +2842,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -2296,7 +2324,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2743,14 +2846,8 @@ static const struct alg_test_desc alg_te +@@ -2743,14 +2851,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -2313,7 +2341,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2758,14 +2855,8 @@ static const struct alg_test_desc alg_te +@@ -2758,14 +2860,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -2330,7 +2358,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2773,14 +2864,8 @@ static const struct alg_test_desc alg_te +@@ -2773,14 +2869,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -2347,7 +2375,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2788,29 +2873,18 @@ static const struct alg_test_desc alg_te +@@ -2788,29 +2878,18 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -2382,7 +2410,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2818,14 +2892,8 @@ static const struct alg_test_desc alg_te +@@ -2818,14 +2897,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -2399,7 +2427,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2833,14 +2901,8 @@ static const struct alg_test_desc alg_te +@@ -2833,14 +2906,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -2416,7 +2444,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2848,14 +2910,8 @@ static const struct alg_test_desc alg_te +@@ -2848,14 +2915,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -2433,7 +2461,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2864,14 +2920,8 @@ static const struct alg_test_desc alg_te +@@ -2864,14 +2925,8 @@ static const struct alg_test_desc alg_te .fips_allowed = 1, .suite = { .comp = { @@ -2450,7 +2478,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -2879,10 +2929,7 @@ static const struct alg_test_desc alg_te +@@ -2879,10 +2934,7 @@ static const struct alg_test_desc alg_te .test = alg_test_kpp, .fips_allowed = 1, .suite = { @@ -2462,7 +2490,7 @@ Signed-off-by: Yangbo Lu } }, { .alg = "digest_null", -@@ -2892,30 +2939,21 @@ static const struct alg_test_desc alg_te +@@ -2892,30 +2944,21 @@ static const struct alg_test_desc alg_te .test = alg_test_drbg, .fips_allowed = 1, .suite = { @@ -2496,7 +2524,7 @@ Signed-off-by: Yangbo Lu } }, { /* -@@ -2930,11 +2968,7 @@ static const struct alg_test_desc alg_te +@@ -2930,11 +2973,7 @@ static const struct alg_test_desc alg_te .test = alg_test_drbg, .fips_allowed = 1, .suite = { @@ -2509,7 +2537,7 @@ Signed-off-by: Yangbo Lu } }, { /* covered by drbg_nopr_hmac_sha256 test */ -@@ -2954,10 +2988,7 @@ static const struct alg_test_desc alg_te +@@ -2954,10 +2993,7 @@ static const struct alg_test_desc alg_te .test = alg_test_drbg, .fips_allowed = 1, .suite = { @@ -2521,7 +2549,7 @@ Signed-off-by: Yangbo Lu } }, { /* covered by drbg_nopr_sha256 test */ -@@ -2973,10 +3004,7 @@ static const struct alg_test_desc alg_te +@@ -2973,10 +3009,7 @@ static const struct alg_test_desc alg_te .test = alg_test_drbg, .fips_allowed = 1, .suite = { @@ -2533,7 +2561,7 @@ Signed-off-by: Yangbo Lu } }, { /* covered by drbg_pr_ctr_aes128 test */ -@@ -2996,10 +3024,7 @@ static const struct alg_test_desc alg_te +@@ -2996,10 +3029,7 @@ static const struct alg_test_desc alg_te .test = alg_test_drbg, .fips_allowed = 1, .suite = { @@ -2545,7 +2573,7 @@ Signed-off-by: Yangbo Lu } }, { /* covered by drbg_pr_hmac_sha256 test */ -@@ -3019,10 +3044,7 @@ static const struct alg_test_desc alg_te +@@ -3019,10 +3049,7 @@ static const struct alg_test_desc alg_te .test = alg_test_drbg, .fips_allowed = 1, .suite = { @@ -2557,7 +2585,7 @@ Signed-off-by: Yangbo Lu } }, { /* covered by drbg_pr_sha256 test */ -@@ -3034,23 +3056,13 @@ static const struct alg_test_desc alg_te +@@ -3034,23 +3061,13 @@ static const struct alg_test_desc alg_te .fips_allowed = 1, .test = alg_test_null, }, { @@ -2583,7 +2611,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -3058,14 +3070,8 @@ static const struct alg_test_desc alg_te +@@ -3058,14 +3075,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -2600,7 +2628,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -3073,14 +3079,8 @@ static const struct alg_test_desc alg_te +@@ -3073,14 +3084,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -2617,7 +2645,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -3088,14 +3088,8 @@ static const struct alg_test_desc alg_te +@@ -3088,14 +3093,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -2634,7 +2662,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -3103,14 +3097,8 @@ static const struct alg_test_desc alg_te +@@ -3103,14 +3102,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -2651,7 +2679,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -3118,14 +3106,8 @@ static const struct alg_test_desc alg_te +@@ -3118,14 +3111,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -2668,7 +2696,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -3133,14 +3115,8 @@ static const struct alg_test_desc alg_te +@@ -3133,14 +3120,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -2685,7 +2713,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -3151,14 +3127,8 @@ static const struct alg_test_desc alg_te +@@ -3151,14 +3132,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -2702,7 +2730,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -3167,14 +3137,8 @@ static const struct alg_test_desc alg_te +@@ -3167,14 +3142,8 @@ static const struct alg_test_desc alg_te .fips_allowed = 1, .suite = { .cipher = { @@ -2719,7 +2747,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -3197,14 +3161,8 @@ static const struct alg_test_desc alg_te +@@ -3197,14 +3166,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -2736,7 +2764,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -3212,14 +3170,8 @@ static const struct alg_test_desc alg_te +@@ -3212,14 +3175,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -2753,7 +2781,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -3227,14 +3179,8 @@ static const struct alg_test_desc alg_te +@@ -3227,14 +3184,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -2770,7 +2798,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -3242,14 +3188,8 @@ static const struct alg_test_desc alg_te +@@ -3242,14 +3193,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -2787,7 +2815,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -3257,14 +3197,8 @@ static const struct alg_test_desc alg_te +@@ -3257,14 +3202,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -2804,7 +2832,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -3272,14 +3206,8 @@ static const struct alg_test_desc alg_te +@@ -3272,14 +3211,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -2821,7 +2849,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -3287,14 +3215,8 @@ static const struct alg_test_desc alg_te +@@ -3287,14 +3220,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -2838,7 +2866,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -3302,14 +3224,8 @@ static const struct alg_test_desc alg_te +@@ -3302,14 +3229,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -2855,7 +2883,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -3317,10 +3233,7 @@ static const struct alg_test_desc alg_te +@@ -3317,10 +3238,7 @@ static const struct alg_test_desc alg_te .test = alg_test_kpp, .fips_allowed = 1, .suite = { @@ -2867,7 +2895,7 @@ Signed-off-by: Yangbo Lu } }, { .alg = "gcm(aes)", -@@ -3328,14 +3241,8 @@ static const struct alg_test_desc alg_te +@@ -3328,14 +3246,8 @@ static const struct alg_test_desc alg_te .fips_allowed = 1, .suite = { .aead = { @@ -2884,7 +2912,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -3343,136 +3250,94 @@ static const struct alg_test_desc alg_te +@@ -3343,136 +3255,94 @@ static const struct alg_test_desc alg_te .test = alg_test_hash, .fips_allowed = 1, .suite = { @@ -3035,7 +3063,7 @@ Signed-off-by: Yangbo Lu } }, { .alg = "jitterentropy_rng", -@@ -3484,14 +3349,8 @@ static const struct alg_test_desc alg_te +@@ -3484,14 +3354,8 @@ static const struct alg_test_desc alg_te .fips_allowed = 1, .suite = { .cipher = { @@ -3052,7 +3080,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -3499,14 +3358,8 @@ static const struct alg_test_desc alg_te +@@ -3499,14 +3363,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -3069,7 +3097,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -3514,14 +3367,8 @@ static const struct alg_test_desc alg_te +@@ -3514,14 +3372,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -3086,7 +3114,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -3529,14 +3376,8 @@ static const struct alg_test_desc alg_te +@@ -3529,14 +3381,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -3103,7 +3131,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -3544,14 +3385,8 @@ static const struct alg_test_desc alg_te +@@ -3544,14 +3390,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -3120,7 +3148,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -3559,14 +3394,8 @@ static const struct alg_test_desc alg_te +@@ -3559,14 +3399,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -3137,7 +3165,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -3575,14 +3404,8 @@ static const struct alg_test_desc alg_te +@@ -3575,14 +3409,8 @@ static const struct alg_test_desc alg_te .fips_allowed = 1, .suite = { .comp = { @@ -3154,7 +3182,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -3591,14 +3414,8 @@ static const struct alg_test_desc alg_te +@@ -3591,14 +3419,8 @@ static const struct alg_test_desc alg_te .fips_allowed = 1, .suite = { .comp = { @@ -3171,7 +3199,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -3607,42 +3424,27 @@ static const struct alg_test_desc alg_te +@@ -3607,42 +3429,27 @@ static const struct alg_test_desc alg_te .fips_allowed = 1, .suite = { .comp = { @@ -3219,7 +3247,7 @@ Signed-off-by: Yangbo Lu } }, { .alg = "ofb(aes)", -@@ -3650,14 +3452,8 @@ static const struct alg_test_desc alg_te +@@ -3650,14 +3457,8 @@ static const struct alg_test_desc alg_te .fips_allowed = 1, .suite = { .cipher = { @@ -3236,7 +3264,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -3665,24 +3461,15 @@ static const struct alg_test_desc alg_te +@@ -3665,24 +3466,15 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -3264,7 +3292,7 @@ Signed-off-by: Yangbo Lu } }, { .alg = "rfc3686(ctr(aes))", -@@ -3690,14 +3477,8 @@ static const struct alg_test_desc alg_te +@@ -3690,14 +3482,8 @@ static const struct alg_test_desc alg_te .fips_allowed = 1, .suite = { .cipher = { @@ -3281,7 +3309,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -3706,14 +3487,8 @@ static const struct alg_test_desc alg_te +@@ -3706,14 +3492,8 @@ static const struct alg_test_desc alg_te .fips_allowed = 1, .suite = { .aead = { @@ -3298,7 +3326,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -3722,14 +3497,8 @@ static const struct alg_test_desc alg_te +@@ -3722,14 +3502,8 @@ static const struct alg_test_desc alg_te .fips_allowed = 1, .suite = { .aead = { @@ -3315,7 +3343,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -3737,14 +3506,8 @@ static const struct alg_test_desc alg_te +@@ -3737,14 +3511,8 @@ static const struct alg_test_desc alg_te .test = alg_test_aead, .suite = { .aead = { @@ -3332,7 +3360,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -3752,14 +3515,8 @@ static const struct alg_test_desc alg_te +@@ -3752,14 +3520,8 @@ static const struct alg_test_desc alg_te .test = alg_test_aead, .suite = { .aead = { @@ -3349,7 +3377,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -3767,71 +3524,47 @@ static const struct alg_test_desc alg_te +@@ -3767,71 +3529,47 @@ static const struct alg_test_desc alg_te .test = alg_test_aead, .suite = { .aead = { @@ -3429,7 +3457,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -3839,162 +3572,120 @@ static const struct alg_test_desc alg_te +@@ -3839,162 +3577,120 @@ static const struct alg_test_desc alg_te .test = alg_test_hash, .fips_allowed = 1, .suite = { @@ -3617,7 +3645,7 @@ Signed-off-by: Yangbo Lu } }, { .alg = "xts(aes)", -@@ -4002,14 +3693,8 @@ static const struct alg_test_desc alg_te +@@ -4002,14 +3698,8 @@ static const struct alg_test_desc alg_te .fips_allowed = 1, .suite = { .cipher = { @@ -3634,7 +3662,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -4017,14 +3702,8 @@ static const struct alg_test_desc alg_te +@@ -4017,14 +3707,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -3651,7 +3679,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -4032,14 +3711,8 @@ static const struct alg_test_desc alg_te +@@ -4032,14 +3716,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -3668,7 +3696,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -4047,14 +3720,8 @@ static const struct alg_test_desc alg_te +@@ -4047,14 +3725,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -3685,7 +3713,7 @@ Signed-off-by: Yangbo Lu } } }, { -@@ -4062,14 +3729,8 @@ static const struct alg_test_desc alg_te +@@ -4062,14 +3734,8 @@ static const struct alg_test_desc alg_te .test = alg_test_skcipher, .suite = { .cipher = { @@ -6987,7 +7015,7 @@ Signed-off-by: Yangbo Lu default y select CRYPTO_RNG select HW_RANDOM -@@ -124,13 +149,26 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API +@@ -124,13 +149,31 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API To compile this as a module, choose M here: the module will be called caamrng. @@ -7011,6 +7039,7 @@ Signed-off-by: Yangbo Lu + select CRYPTO_BLKCIPHER + select CRYPTO_AUTHENC + select CRYPTO_AEAD ++ select CRYPTO_HASH + ---help--- + CAAM driver for QorIQ Data Path Acceleration Architecture 2. + It handles DPSECI DPAA2 objects that sit on the Management Complex @@ -7023,9 +7052,13 @@ Signed-off-by: Yangbo Lu + def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \ + CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \ + CRYPTO_DEV_FSL_DPAA2_CAAM) ++ ++config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC ++ def_tristate (CRYPTO_DEV_FSL_CAAM_AHASH_API || \ ++ CRYPTO_DEV_FSL_DPAA2_CAAM) --- a/drivers/crypto/caam/Makefile +++ b/drivers/crypto/caam/Makefile -@@ -5,13 +5,26 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG +@@ -5,13 +5,27 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG ccflags-y := -DDEBUG endif @@ -7038,6 +7071,7 @@ Signed-off-by: Yangbo Lu +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o ++obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o @@ -7166,7 +7200,7 @@ Signed-off-by: Yangbo Lu bool rfc3686; bool geniv; }; -@@ -163,302 +96,67 @@ struct caam_aead_alg { +@@ -163,302 +96,70 @@ struct caam_aead_alg { bool registered; }; @@ -7308,6 +7342,7 @@ Signed-off-by: Yangbo Lu struct device *jrdev = ctx->jrdev; - bool keys_fit_inline = false; - u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd; ++ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); u32 *desc; + int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN - + ctx->adata.keylen_pad; @@ -7394,7 +7429,8 @@ Signed-off-by: Yangbo Lu - DUMP_PREFIX_ADDRESS, 16, 4, desc, - desc_bytes(desc), 1); -#endif -+ cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize); ++ cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize, ++ ctrlpriv->era); + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, + desc_bytes(desc), DMA_TO_DEVICE); @@ -7490,18 +7526,20 @@ Signed-off-by: Yangbo Lu - desc_bytes(desc), 1); -#endif + desc = ctx->sh_desc_dec; -+ cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize); ++ cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize, ++ ctrlpriv->era); + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, + desc_bytes(desc), DMA_TO_DEVICE); return 0; } -@@ -470,11 +168,11 @@ static int aead_set_sh_desc(struct crypt +@@ -470,11 +171,12 @@ static int aead_set_sh_desc(struct crypt unsigned int ivsize = crypto_aead_ivsize(aead); struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *jrdev = ctx->jrdev; - bool keys_fit_inline; - u32 geniv, moveiv; ++ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); u32 ctx1_iv_off = 0; - u32 *desc; - const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == @@ -7512,7 +7550,7 @@ Signed-off-by: Yangbo Lu OP_ALG_AAI_CTR_MOD128); const bool is_rfc3686 = alg->caam.rfc3686; -@@ -482,7 +180,7 @@ static int aead_set_sh_desc(struct crypt +@@ -482,7 +184,7 @@ static int aead_set_sh_desc(struct crypt return 0; /* NULL encryption / decryption */ @@ -7521,7 +7559,7 @@ Signed-off-by: Yangbo Lu return aead_null_set_sh_desc(aead); /* -@@ -497,8 +195,14 @@ static int aead_set_sh_desc(struct crypt +@@ -497,8 +199,14 @@ static int aead_set_sh_desc(struct crypt * RFC3686 specific: * CONTEXT1[255:128] = {NONCE, IV, COUNTER} */ @@ -7537,7 +7575,7 @@ Signed-off-by: Yangbo Lu if (alg->caam.geniv) goto skip_enc; -@@ -507,146 +211,64 @@ static int aead_set_sh_desc(struct crypt +@@ -507,146 +215,64 @@ static int aead_set_sh_desc(struct crypt * Job Descriptor and Shared Descriptors * must all fit into the 64-word Descriptor h/w Buffer */ @@ -7557,33 +7595,31 @@ Signed-off-by: Yangbo Lu - /* Class 2 operation */ - append_operation(desc, ctx->class2_alg_type | - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); +- +- /* Read and write assoclen bytes */ +- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); +- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); + if (desc_inline_query(DESC_AEAD_ENC_LEN + + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), + AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask, + ARRAY_SIZE(data_len)) < 0) + return -EINVAL; -- /* Read and write assoclen bytes */ -- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); -- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); +- /* Skip assoc data */ +- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); + if (inl_mask & 1) + ctx->adata.key_virt = ctx->key; + else + ctx->adata.key_dma = ctx->key_dma; -- /* Skip assoc data */ -- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); +- /* read assoc before reading payload */ +- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | +- FIFOLDST_VLF); + if (inl_mask & 2) + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; + else + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; -- /* read assoc before reading payload */ -- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | -- FIFOLDST_VLF); -+ ctx->adata.key_inline = !!(inl_mask & 1); -+ ctx->cdata.key_inline = !!(inl_mask & 2); - - /* Load Counter into CONTEXT1 reg */ - if (is_rfc3686) - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | @@ -7603,7 +7639,9 @@ Signed-off-by: Yangbo Lu - /* Write ICV */ - append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | - LDST_SRCDST_BYTE_CONTEXT); -- ++ ctx->adata.key_inline = !!(inl_mask & 1); ++ ctx->cdata.key_inline = !!(inl_mask & 2); + - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, - desc_bytes(desc), - DMA_TO_DEVICE); @@ -7620,7 +7658,7 @@ Signed-off-by: Yangbo Lu + desc = ctx->sh_desc_enc; + cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize, + ctx->authsize, is_rfc3686, nonce, ctx1_iv_off, -+ false); ++ false, ctrlpriv->era); + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, + desc_bytes(desc), DMA_TO_DEVICE); @@ -7720,13 +7758,13 @@ Signed-off-by: Yangbo Lu + desc = ctx->sh_desc_dec; + cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize, + ctx->authsize, alg->caam.geniv, is_rfc3686, -+ nonce, ctx1_iv_off, false); ++ nonce, ctx1_iv_off, false, ctrlpriv->era); + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, + desc_bytes(desc), DMA_TO_DEVICE); if (!alg->caam.geniv) goto skip_givenc; -@@ -655,107 +277,32 @@ skip_enc: +@@ -655,107 +281,32 @@ skip_enc: * Job Descriptor and Shared Descriptors * must all fit into the 64-word Descriptor h/w Buffer */ @@ -7850,13 +7888,13 @@ Signed-off-by: Yangbo Lu + desc = ctx->sh_desc_enc; + cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize, + ctx->authsize, is_rfc3686, nonce, -+ ctx1_iv_off, false); ++ ctx1_iv_off, false, ctrlpriv->era); + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, + desc_bytes(desc), DMA_TO_DEVICE); skip_givenc: return 0; -@@ -776,12 +323,12 @@ static int gcm_set_sh_desc(struct crypto +@@ -776,12 +327,12 @@ static int gcm_set_sh_desc(struct crypto { struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *jrdev = ctx->jrdev; @@ -7873,7 +7911,7 @@ Signed-off-by: Yangbo Lu return 0; /* -@@ -789,175 +336,35 @@ static int gcm_set_sh_desc(struct crypto +@@ -789,175 +340,35 @@ static int gcm_set_sh_desc(struct crypto * Job Descriptor and Shared Descriptor * must fit into the 64-word Descriptor h/w Buffer */ @@ -8069,7 +8107,7 @@ Signed-off-by: Yangbo Lu return 0; } -@@ -976,11 +383,12 @@ static int rfc4106_set_sh_desc(struct cr +@@ -976,11 +387,12 @@ static int rfc4106_set_sh_desc(struct cr { struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *jrdev = ctx->jrdev; @@ -8085,7 +8123,7 @@ Signed-off-by: Yangbo Lu return 0; /* -@@ -988,148 +396,37 @@ static int rfc4106_set_sh_desc(struct cr +@@ -988,148 +400,37 @@ static int rfc4106_set_sh_desc(struct cr * Job Descriptor and Shared Descriptor * must fit into the 64-word Descriptor h/w Buffer */ @@ -8256,7 +8294,7 @@ Signed-off-by: Yangbo Lu return 0; } -@@ -1149,12 +446,12 @@ static int rfc4543_set_sh_desc(struct cr +@@ -1149,12 +450,12 @@ static int rfc4543_set_sh_desc(struct cr { struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *jrdev = ctx->jrdev; @@ -8273,7 +8311,7 @@ Signed-off-by: Yangbo Lu return 0; /* -@@ -1162,151 +459,37 @@ static int rfc4543_set_sh_desc(struct cr +@@ -1162,151 +463,37 @@ static int rfc4543_set_sh_desc(struct cr * Job Descriptor and Shared Descriptor * must fit into the 64-word Descriptor h/w Buffer */ @@ -8447,7 +8485,7 @@ Signed-off-by: Yangbo Lu return 0; } -@@ -1322,19 +505,9 @@ static int rfc4543_setauthsize(struct cr +@@ -1322,74 +509,67 @@ static int rfc4543_setauthsize(struct cr return 0; } @@ -8466,8 +8504,10 @@ Signed-off-by: Yangbo Lu - static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *jrdev = ctx->jrdev; ++ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); struct crypto_authenc_keys keys; -@@ -1343,53 +516,32 @@ static int aead_setkey(struct crypto_aea + int ret = 0; + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) goto badkey; @@ -8490,6 +8530,27 @@ Signed-off-by: Yangbo Lu #endif - ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen); ++ /* ++ * If DKP is supported, use it in the shared descriptor to generate ++ * the split key. ++ */ ++ if (ctrlpriv->era >= 6) { ++ ctx->adata.keylen = keys.authkeylen; ++ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & ++ OP_ALG_ALGSEL_MASK); ++ ++ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) ++ goto badkey; ++ ++ memcpy(ctx->key, keys.authkey, keys.authkeylen); ++ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, ++ keys.enckeylen); ++ dma_sync_single_for_device(jrdev, ctx->key_dma, ++ ctx->adata.keylen_pad + ++ keys.enckeylen, DMA_TO_DEVICE); ++ goto skip_split_key; ++ } ++ + ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey, + keys.authkeylen, CAAM_MAX_KEY_SIZE - + keys.enckeylen); @@ -8515,7 +8576,7 @@ Signed-off-by: Yangbo Lu - ctx->split_key_pad_len + keys.enckeylen, 1); + ctx->adata.keylen_pad + keys.enckeylen, 1); #endif -- + - ctx->enckeylen = keys.enckeylen; - - ret = aead_set_sh_desc(aead); @@ -8525,12 +8586,13 @@ Signed-off-by: Yangbo Lu - } - - return ret; ++skip_split_key: + ctx->cdata.keylen = keys.enckeylen; + return aead_set_sh_desc(aead); badkey: crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; -@@ -1400,7 +552,6 @@ static int gcm_setkey(struct crypto_aead +@@ -1400,7 +580,6 @@ static int gcm_setkey(struct crypto_aead { struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *jrdev = ctx->jrdev; @@ -8538,7 +8600,7 @@ Signed-off-by: Yangbo Lu #ifdef DEBUG print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", -@@ -1408,21 +559,10 @@ static int gcm_setkey(struct crypto_aead +@@ -1408,21 +587,10 @@ static int gcm_setkey(struct crypto_aead #endif memcpy(ctx->key, key, keylen); @@ -8549,21 +8611,21 @@ Signed-off-by: Yangbo Lu - return -ENOMEM; - } - ctx->enckeylen = keylen; -- ++ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); ++ ctx->cdata.keylen = keylen; + - ret = gcm_set_sh_desc(aead); - if (ret) { - dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen, - DMA_TO_DEVICE); - } -+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); -+ ctx->cdata.keylen = keylen; - +- - return ret; + return gcm_set_sh_desc(aead); } static int rfc4106_setkey(struct crypto_aead *aead, -@@ -1430,7 +570,6 @@ static int rfc4106_setkey(struct crypto_ +@@ -1430,7 +598,6 @@ static int rfc4106_setkey(struct crypto_ { struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *jrdev = ctx->jrdev; @@ -8571,7 +8633,7 @@ Signed-off-by: Yangbo Lu if (keylen < 4) return -EINVAL; -@@ -1446,22 +585,10 @@ static int rfc4106_setkey(struct crypto_ +@@ -1446,22 +613,10 @@ static int rfc4106_setkey(struct crypto_ * The last four bytes of the key material are used as the salt value * in the nonce. Update the AES key length. */ @@ -8598,7 +8660,7 @@ Signed-off-by: Yangbo Lu } static int rfc4543_setkey(struct crypto_aead *aead, -@@ -1469,7 +596,6 @@ static int rfc4543_setkey(struct crypto_ +@@ -1469,7 +624,6 @@ static int rfc4543_setkey(struct crypto_ { struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *jrdev = ctx->jrdev; @@ -8606,7 +8668,7 @@ Signed-off-by: Yangbo Lu if (keylen < 4) return -EINVAL; -@@ -1485,43 +611,28 @@ static int rfc4543_setkey(struct crypto_ +@@ -1485,43 +639,28 @@ static int rfc4543_setkey(struct crypto_ * The last four bytes of the key material are used as the salt value * in the nonce. Update the AES key length. */ @@ -8657,7 +8719,7 @@ Signed-off-by: Yangbo Lu #ifdef DEBUG print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); -@@ -1544,215 +655,33 @@ static int ablkcipher_setkey(struct cryp +@@ -1544,215 +683,33 @@ static int ablkcipher_setkey(struct cryp keylen -= CTR_RFC3686_NONCE_SIZE; } @@ -8890,7 +8952,7 @@ Signed-off-by: Yangbo Lu } static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, -@@ -1760,8 +689,7 @@ static int xts_ablkcipher_setkey(struct +@@ -1760,8 +717,7 @@ static int xts_ablkcipher_setkey(struct { struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); struct device *jrdev = ctx->jrdev; @@ -8900,7 +8962,7 @@ Signed-off-by: Yangbo Lu if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { crypto_ablkcipher_set_flags(ablkcipher, -@@ -1771,126 +699,38 @@ static int xts_ablkcipher_setkey(struct +@@ -1771,126 +727,38 @@ static int xts_ablkcipher_setkey(struct } memcpy(ctx->key, key, keylen); @@ -9040,7 +9102,7 @@ Signed-off-by: Yangbo Lu int sec4_sg_bytes; dma_addr_t sec4_sg_dma; struct sec4_sg_entry *sec4_sg; -@@ -1899,12 +739,12 @@ struct aead_edesc { +@@ -1899,12 +767,12 @@ struct aead_edesc { /* * ablkcipher_edesc - s/w-extended ablkcipher descriptor @@ -9056,7 +9118,7 @@ Signed-off-by: Yangbo Lu * @hw_desc: the h/w job descriptor followed by any referenced link tables */ struct ablkcipher_edesc { -@@ -1924,10 +764,11 @@ static void caam_unmap(struct device *de +@@ -1924,10 +792,11 @@ static void caam_unmap(struct device *de int sec4_sg_bytes) { if (dst != src) { @@ -9071,7 +9133,7 @@ Signed-off-by: Yangbo Lu } if (iv_dma) -@@ -2021,8 +862,7 @@ static void ablkcipher_encrypt_done(stru +@@ -2021,8 +890,7 @@ static void ablkcipher_encrypt_done(stru dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); #endif @@ -9081,7 +9143,7 @@ Signed-off-by: Yangbo Lu if (err) caam_jr_strstatus(jrdev, err); -@@ -2031,10 +871,10 @@ static void ablkcipher_encrypt_done(stru +@@ -2031,10 +899,10 @@ static void ablkcipher_encrypt_done(stru print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->info, edesc->src_nents > 1 ? 100 : ivsize, 1); @@ -9095,7 +9157,7 @@ Signed-off-by: Yangbo Lu ablkcipher_unmap(jrdev, edesc, req); -@@ -2062,8 +902,7 @@ static void ablkcipher_decrypt_done(stru +@@ -2062,8 +930,7 @@ static void ablkcipher_decrypt_done(stru dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); #endif @@ -9105,7 +9167,7 @@ Signed-off-by: Yangbo Lu if (err) caam_jr_strstatus(jrdev, err); -@@ -2071,10 +910,10 @@ static void ablkcipher_decrypt_done(stru +@@ -2071,10 +938,10 @@ static void ablkcipher_decrypt_done(stru print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->info, ivsize, 1); @@ -9119,7 +9181,7 @@ Signed-off-by: Yangbo Lu ablkcipher_unmap(jrdev, edesc, req); -@@ -2114,7 +953,7 @@ static void init_aead_job(struct aead_re +@@ -2114,7 +981,7 @@ static void init_aead_job(struct aead_re init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); if (all_contig) { @@ -9128,7 +9190,7 @@ Signed-off-by: Yangbo Lu in_options = 0; } else { src_dma = edesc->sec4_sg_dma; -@@ -2129,7 +968,7 @@ static void init_aead_job(struct aead_re +@@ -2129,7 +996,7 @@ static void init_aead_job(struct aead_re out_options = in_options; if (unlikely(req->src != req->dst)) { @@ -9137,7 +9199,25 @@ Signed-off-by: Yangbo Lu dst_dma = sg_dma_address(req->dst); } else { dst_dma = edesc->sec4_sg_dma + -@@ -2175,7 +1014,7 @@ static void init_gcm_job(struct aead_req +@@ -2147,9 +1014,6 @@ static void init_aead_job(struct aead_re + append_seq_out_ptr(desc, dst_dma, + req->assoclen + req->cryptlen - authsize, + out_options); +- +- /* REG3 = assoclen */ +- append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen); + } + + static void init_gcm_job(struct aead_request *req, +@@ -2164,6 +1028,7 @@ static void init_gcm_job(struct aead_req + unsigned int last; + + init_aead_job(req, edesc, all_contig, encrypt); ++ append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen); + + /* BUG This should not be specific to generic GCM. */ + last = 0; +@@ -2175,7 +1040,7 @@ static void init_gcm_job(struct aead_req FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last); /* Append Salt */ if (!generic_gcm) @@ -9146,16 +9226,33 @@ Signed-off-by: Yangbo Lu /* Append IV */ append_data(desc, req->iv, ivsize); /* End of blank commands */ -@@ -2190,7 +1029,7 @@ static void init_authenc_job(struct aead +@@ -2190,7 +1055,8 @@ static void init_authenc_job(struct aead struct caam_aead_alg, aead); unsigned int ivsize = crypto_aead_ivsize(aead); struct caam_ctx *ctx = crypto_aead_ctx(aead); - const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == ++ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == OP_ALG_AAI_CTR_MOD128); const bool is_rfc3686 = alg->caam.rfc3686; u32 *desc = edesc->hw_desc; -@@ -2236,16 +1075,15 @@ static void init_ablkcipher_job(u32 *sh_ +@@ -2213,6 +1079,15 @@ static void init_authenc_job(struct aead + + init_aead_job(req, edesc, all_contig, encrypt); + ++ /* ++ * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports ++ * having DPOVRD as destination. ++ */ ++ if (ctrlpriv->era < 3) ++ append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen); ++ else ++ append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen); ++ + if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv)) + append_load_as_imm(desc, req->iv, ivsize, + LDST_CLASS_1_CCB | +@@ -2236,16 +1111,15 @@ static void init_ablkcipher_job(u32 *sh_ int len, sec4_sg_index = 0; #ifdef DEBUG @@ -9177,7 +9274,7 @@ Signed-off-by: Yangbo Lu len = desc_len(sh_desc); init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); -@@ -2261,7 +1099,7 @@ static void init_ablkcipher_job(u32 *sh_ +@@ -2261,7 +1135,7 @@ static void init_ablkcipher_job(u32 *sh_ append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options); if (likely(req->src == req->dst)) { @@ -9186,7 +9283,7 @@ Signed-off-by: Yangbo Lu dst_dma = sg_dma_address(req->src); } else { dst_dma = edesc->sec4_sg_dma + -@@ -2269,7 +1107,7 @@ static void init_ablkcipher_job(u32 *sh_ +@@ -2269,7 +1143,7 @@ static void init_ablkcipher_job(u32 *sh_ out_options = LDST_SGF; } } else { @@ -9195,7 +9292,7 @@ Signed-off-by: Yangbo Lu dst_dma = sg_dma_address(req->dst); } else { dst_dma = edesc->sec4_sg_dma + -@@ -2296,20 +1134,18 @@ static void init_ablkcipher_giv_job(u32 +@@ -2296,20 +1170,18 @@ static void init_ablkcipher_giv_job(u32 int len, sec4_sg_index = 0; #ifdef DEBUG @@ -9220,7 +9317,7 @@ Signed-off-by: Yangbo Lu src_dma = sg_dma_address(req->src); in_options = 0; } else { -@@ -2340,87 +1176,100 @@ static struct aead_edesc *aead_edesc_all +@@ -2340,87 +1212,100 @@ static struct aead_edesc *aead_edesc_all struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *jrdev = ctx->jrdev; @@ -9376,7 +9473,7 @@ Signed-off-by: Yangbo Lu edesc->sec4_sg + sec4_sg_index, 0); } -@@ -2573,13 +1422,9 @@ static int aead_decrypt(struct aead_requ +@@ -2573,13 +1458,9 @@ static int aead_decrypt(struct aead_requ u32 *desc; int ret = 0; @@ -9393,7 +9490,7 @@ Signed-off-by: Yangbo Lu /* allocate extended descriptor */ edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN, -@@ -2619,51 +1464,80 @@ static struct ablkcipher_edesc *ablkciph +@@ -2619,51 +1500,80 @@ static struct ablkcipher_edesc *ablkciph struct device *jrdev = ctx->jrdev; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; @@ -9497,7 +9594,7 @@ Signed-off-by: Yangbo Lu return ERR_PTR(-ENOMEM); } -@@ -2673,23 +1547,24 @@ static struct ablkcipher_edesc *ablkciph +@@ -2673,23 +1583,24 @@ static struct ablkcipher_edesc *ablkciph edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + desc_bytes; @@ -9529,7 +9626,7 @@ Signed-off-by: Yangbo Lu return ERR_PTR(-ENOMEM); } -@@ -2701,7 +1576,7 @@ static struct ablkcipher_edesc *ablkciph +@@ -2701,7 +1612,7 @@ static struct ablkcipher_edesc *ablkciph sec4_sg_bytes, 1); #endif @@ -9538,7 +9635,7 @@ Signed-off-by: Yangbo Lu return edesc; } -@@ -2792,30 +1667,54 @@ static struct ablkcipher_edesc *ablkciph +@@ -2792,30 +1703,54 @@ static struct ablkcipher_edesc *ablkciph struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); struct device *jrdev = ctx->jrdev; @@ -9555,10 +9652,10 @@ Signed-off-by: Yangbo Lu + bool out_contig; int ivsize = crypto_ablkcipher_ivsize(ablkcipher); - int sec4_sg_index; -- -- src_nents = sg_count(req->src, req->nbytes); + int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes; +- src_nents = sg_count(req->src, req->nbytes); +- - if (unlikely(req->dst != req->src)) - dst_nents = sg_count(req->dst, req->nbytes); + src_nents = sg_nents_for_len(req->src, req->nbytes); @@ -9609,7 +9706,7 @@ Signed-off-by: Yangbo Lu } /* -@@ -2825,21 +1724,29 @@ static struct ablkcipher_edesc *ablkciph +@@ -2825,21 +1760,29 @@ static struct ablkcipher_edesc *ablkciph iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE); if (dma_mapping_error(jrdev, iv_dma)) { dev_err(jrdev, "unable to map IV\n"); @@ -9645,7 +9742,7 @@ Signed-off-by: Yangbo Lu return ERR_PTR(-ENOMEM); } -@@ -2849,24 +1756,24 @@ static struct ablkcipher_edesc *ablkciph +@@ -2849,24 +1792,24 @@ static struct ablkcipher_edesc *ablkciph edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + desc_bytes; @@ -9680,7 +9777,7 @@ Signed-off-by: Yangbo Lu return ERR_PTR(-ENOMEM); } edesc->iv_dma = iv_dma; -@@ -2878,7 +1785,7 @@ static struct ablkcipher_edesc *ablkciph +@@ -2878,7 +1821,7 @@ static struct ablkcipher_edesc *ablkciph sec4_sg_bytes, 1); #endif @@ -9689,7 +9786,7 @@ Signed-off-by: Yangbo Lu return edesc; } -@@ -2889,7 +1796,7 @@ static int ablkcipher_givencrypt(struct +@@ -2889,7 +1832,7 @@ static int ablkcipher_givencrypt(struct struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); struct device *jrdev = ctx->jrdev; @@ -9698,7 +9795,7 @@ Signed-off-by: Yangbo Lu u32 *desc; int ret = 0; -@@ -2933,7 +1840,6 @@ struct caam_alg_template { +@@ -2933,7 +1876,6 @@ struct caam_alg_template { } template_u; u32 class1_alg_type; u32 class2_alg_type; @@ -9706,7 +9803,7 @@ Signed-off-by: Yangbo Lu }; static struct caam_alg_template driver_algs[] = { -@@ -3118,7 +2024,6 @@ static struct caam_aead_alg driver_aeads +@@ -3118,7 +2060,6 @@ static struct caam_aead_alg driver_aeads .caam = { .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9714,7 +9811,7 @@ Signed-off-by: Yangbo Lu }, }, { -@@ -3140,7 +2045,6 @@ static struct caam_aead_alg driver_aeads +@@ -3140,7 +2081,6 @@ static struct caam_aead_alg driver_aeads .caam = { .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9722,7 +9819,7 @@ Signed-off-by: Yangbo Lu }, }, { -@@ -3162,7 +2066,6 @@ static struct caam_aead_alg driver_aeads +@@ -3162,7 +2102,6 @@ static struct caam_aead_alg driver_aeads .caam = { .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9730,7 +9827,7 @@ Signed-off-by: Yangbo Lu }, }, { -@@ -3184,7 +2087,6 @@ static struct caam_aead_alg driver_aeads +@@ -3184,7 +2123,6 @@ static struct caam_aead_alg driver_aeads .caam = { .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9738,7 +9835,7 @@ Signed-off-by: Yangbo Lu }, }, { -@@ -3206,7 +2108,6 @@ static struct caam_aead_alg driver_aeads +@@ -3206,7 +2144,6 @@ static struct caam_aead_alg driver_aeads .caam = { .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9746,7 +9843,7 @@ Signed-off-by: Yangbo Lu }, }, { -@@ -3228,7 +2129,6 @@ static struct caam_aead_alg driver_aeads +@@ -3228,7 +2165,6 @@ static struct caam_aead_alg driver_aeads .caam = { .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9754,7 +9851,7 @@ Signed-off-by: Yangbo Lu }, }, { -@@ -3250,7 +2150,6 @@ static struct caam_aead_alg driver_aeads +@@ -3250,7 +2186,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9762,7 +9859,7 @@ Signed-off-by: Yangbo Lu }, }, { -@@ -3273,7 +2172,6 @@ static struct caam_aead_alg driver_aeads +@@ -3273,7 +2208,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9770,7 +9867,7 @@ Signed-off-by: Yangbo Lu .geniv = true, }, }, -@@ -3296,7 +2194,6 @@ static struct caam_aead_alg driver_aeads +@@ -3296,7 +2230,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9778,7 +9875,7 @@ Signed-off-by: Yangbo Lu }, }, { -@@ -3319,7 +2216,6 @@ static struct caam_aead_alg driver_aeads +@@ -3319,7 +2252,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9786,7 +9883,7 @@ Signed-off-by: Yangbo Lu .geniv = true, }, }, -@@ -3342,7 +2238,6 @@ static struct caam_aead_alg driver_aeads +@@ -3342,7 +2274,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9794,7 +9891,7 @@ Signed-off-by: Yangbo Lu }, }, { -@@ -3365,7 +2260,6 @@ static struct caam_aead_alg driver_aeads +@@ -3365,7 +2296,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9802,7 +9899,7 @@ Signed-off-by: Yangbo Lu .geniv = true, }, }, -@@ -3388,7 +2282,6 @@ static struct caam_aead_alg driver_aeads +@@ -3388,7 +2318,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9810,7 +9907,7 @@ Signed-off-by: Yangbo Lu }, }, { -@@ -3411,7 +2304,6 @@ static struct caam_aead_alg driver_aeads +@@ -3411,7 +2340,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9818,7 +9915,7 @@ Signed-off-by: Yangbo Lu .geniv = true, }, }, -@@ -3434,7 +2326,6 @@ static struct caam_aead_alg driver_aeads +@@ -3434,7 +2362,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9826,7 +9923,7 @@ Signed-off-by: Yangbo Lu }, }, { -@@ -3457,7 +2348,6 @@ static struct caam_aead_alg driver_aeads +@@ -3457,7 +2384,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9834,7 +9931,7 @@ Signed-off-by: Yangbo Lu .geniv = true, }, }, -@@ -3480,7 +2370,6 @@ static struct caam_aead_alg driver_aeads +@@ -3480,7 +2406,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9842,7 +9939,7 @@ Signed-off-by: Yangbo Lu }, }, { -@@ -3503,7 +2392,6 @@ static struct caam_aead_alg driver_aeads +@@ -3503,7 +2428,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9850,7 +9947,7 @@ Signed-off-by: Yangbo Lu .geniv = true, }, }, -@@ -3526,7 +2414,6 @@ static struct caam_aead_alg driver_aeads +@@ -3526,7 +2450,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9858,7 +9955,7 @@ Signed-off-by: Yangbo Lu } }, { -@@ -3549,7 +2436,6 @@ static struct caam_aead_alg driver_aeads +@@ -3549,7 +2472,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9866,7 +9963,7 @@ Signed-off-by: Yangbo Lu .geniv = true, } }, -@@ -3573,7 +2459,6 @@ static struct caam_aead_alg driver_aeads +@@ -3573,7 +2495,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9874,7 +9971,7 @@ Signed-off-by: Yangbo Lu }, }, { -@@ -3597,7 +2482,6 @@ static struct caam_aead_alg driver_aeads +@@ -3597,7 +2518,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9882,7 +9979,7 @@ Signed-off-by: Yangbo Lu .geniv = true, }, }, -@@ -3621,7 +2505,6 @@ static struct caam_aead_alg driver_aeads +@@ -3621,7 +2541,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9890,7 +9987,7 @@ Signed-off-by: Yangbo Lu }, }, { -@@ -3645,7 +2528,6 @@ static struct caam_aead_alg driver_aeads +@@ -3645,7 +2564,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9898,7 +9995,7 @@ Signed-off-by: Yangbo Lu .geniv = true, }, }, -@@ -3669,7 +2551,6 @@ static struct caam_aead_alg driver_aeads +@@ -3669,7 +2587,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9906,7 +10003,7 @@ Signed-off-by: Yangbo Lu }, }, { -@@ -3693,7 +2574,6 @@ static struct caam_aead_alg driver_aeads +@@ -3693,7 +2610,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9914,7 +10011,7 @@ Signed-off-by: Yangbo Lu .geniv = true, }, }, -@@ -3717,7 +2597,6 @@ static struct caam_aead_alg driver_aeads +@@ -3717,7 +2633,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9922,7 +10019,7 @@ Signed-off-by: Yangbo Lu }, }, { -@@ -3741,7 +2620,6 @@ static struct caam_aead_alg driver_aeads +@@ -3741,7 +2656,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9930,7 +10027,7 @@ Signed-off-by: Yangbo Lu .geniv = true, }, }, -@@ -3765,7 +2643,6 @@ static struct caam_aead_alg driver_aeads +@@ -3765,7 +2679,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9938,7 +10035,7 @@ Signed-off-by: Yangbo Lu }, }, { -@@ -3789,7 +2666,6 @@ static struct caam_aead_alg driver_aeads +@@ -3789,7 +2702,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9946,7 +10043,7 @@ Signed-off-by: Yangbo Lu .geniv = true, }, }, -@@ -3812,7 +2688,6 @@ static struct caam_aead_alg driver_aeads +@@ -3812,7 +2724,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9954,7 +10051,7 @@ Signed-off-by: Yangbo Lu }, }, { -@@ -3835,7 +2710,6 @@ static struct caam_aead_alg driver_aeads +@@ -3835,7 +2746,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9962,7 +10059,7 @@ Signed-off-by: Yangbo Lu .geniv = true, }, }, -@@ -3858,7 +2732,6 @@ static struct caam_aead_alg driver_aeads +@@ -3858,7 +2768,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9970,7 +10067,7 @@ Signed-off-by: Yangbo Lu }, }, { -@@ -3881,7 +2754,6 @@ static struct caam_aead_alg driver_aeads +@@ -3881,7 +2790,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9978,7 +10075,7 @@ Signed-off-by: Yangbo Lu .geniv = true, }, }, -@@ -3904,7 +2776,6 @@ static struct caam_aead_alg driver_aeads +@@ -3904,7 +2812,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9986,7 +10083,7 @@ Signed-off-by: Yangbo Lu }, }, { -@@ -3927,7 +2798,6 @@ static struct caam_aead_alg driver_aeads +@@ -3927,7 +2834,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, @@ -9994,7 +10091,7 @@ Signed-off-by: Yangbo Lu .geniv = true, }, }, -@@ -3950,7 +2820,6 @@ static struct caam_aead_alg driver_aeads +@@ -3950,7 +2856,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, @@ -10002,7 +10099,7 @@ Signed-off-by: Yangbo Lu }, }, { -@@ -3973,7 +2842,6 @@ static struct caam_aead_alg driver_aeads +@@ -3973,7 +2878,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, @@ -10010,7 +10107,7 @@ Signed-off-by: Yangbo Lu .geniv = true, }, }, -@@ -3996,7 +2864,6 @@ static struct caam_aead_alg driver_aeads +@@ -3996,7 +2900,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, @@ -10018,7 +10115,7 @@ Signed-off-by: Yangbo Lu }, }, { -@@ -4019,7 +2886,6 @@ static struct caam_aead_alg driver_aeads +@@ -4019,7 +2922,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, @@ -10026,7 +10123,7 @@ Signed-off-by: Yangbo Lu .geniv = true, }, }, -@@ -4042,7 +2908,6 @@ static struct caam_aead_alg driver_aeads +@@ -4042,7 +2944,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, @@ -10034,7 +10131,7 @@ Signed-off-by: Yangbo Lu }, }, { -@@ -4065,7 +2930,6 @@ static struct caam_aead_alg driver_aeads +@@ -4065,7 +2966,6 @@ static struct caam_aead_alg driver_aeads .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, @@ -10042,7 +10139,7 @@ Signed-off-by: Yangbo Lu .geniv = true, }, }, -@@ -4090,7 +2954,6 @@ static struct caam_aead_alg driver_aeads +@@ -4090,7 +2990,6 @@ static struct caam_aead_alg driver_aeads OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, @@ -10050,7 +10147,7 @@ Signed-off-by: Yangbo Lu .rfc3686 = true, }, }, -@@ -4115,7 +2978,6 @@ static struct caam_aead_alg driver_aeads +@@ -4115,7 +3014,6 @@ static struct caam_aead_alg driver_aeads OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, @@ -10058,7 +10155,7 @@ Signed-off-by: Yangbo Lu .rfc3686 = true, .geniv = true, }, -@@ -4141,7 +3003,6 @@ static struct caam_aead_alg driver_aeads +@@ -4141,7 +3039,6 @@ static struct caam_aead_alg driver_aeads OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, @@ -10066,7 +10163,7 @@ Signed-off-by: Yangbo Lu .rfc3686 = true, }, }, -@@ -4166,7 +3027,6 @@ static struct caam_aead_alg driver_aeads +@@ -4166,7 +3063,6 @@ static struct caam_aead_alg driver_aeads OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, @@ -10074,7 +10171,7 @@ Signed-off-by: Yangbo Lu .rfc3686 = true, .geniv = true, }, -@@ -4192,7 +3052,6 @@ static struct caam_aead_alg driver_aeads +@@ -4192,7 +3088,6 @@ static struct caam_aead_alg driver_aeads OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, @@ -10082,7 +10179,7 @@ Signed-off-by: Yangbo Lu .rfc3686 = true, }, }, -@@ -4217,7 +3076,6 @@ static struct caam_aead_alg driver_aeads +@@ -4217,7 +3112,6 @@ static struct caam_aead_alg driver_aeads OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, @@ -10090,7 +10187,7 @@ Signed-off-by: Yangbo Lu .rfc3686 = true, .geniv = true, }, -@@ -4243,7 +3101,6 @@ static struct caam_aead_alg driver_aeads +@@ -4243,7 +3137,6 @@ static struct caam_aead_alg driver_aeads OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, @@ -10098,7 +10195,7 @@ Signed-off-by: Yangbo Lu .rfc3686 = true, }, }, -@@ -4268,7 +3125,6 @@ static struct caam_aead_alg driver_aeads +@@ -4268,7 +3161,6 @@ static struct caam_aead_alg driver_aeads OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, @@ -10106,7 +10203,7 @@ Signed-off-by: Yangbo Lu .rfc3686 = true, .geniv = true, }, -@@ -4294,7 +3150,6 @@ static struct caam_aead_alg driver_aeads +@@ -4294,7 +3186,6 @@ static struct caam_aead_alg driver_aeads OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, @@ -10114,7 +10211,7 @@ Signed-off-by: Yangbo Lu .rfc3686 = true, }, }, -@@ -4319,7 +3174,6 @@ static struct caam_aead_alg driver_aeads +@@ -4319,7 +3210,6 @@ static struct caam_aead_alg driver_aeads OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, @@ -10122,7 +10219,7 @@ Signed-off-by: Yangbo Lu .rfc3686 = true, .geniv = true, }, -@@ -4345,7 +3199,6 @@ static struct caam_aead_alg driver_aeads +@@ -4345,7 +3235,6 @@ static struct caam_aead_alg driver_aeads OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, @@ -10130,7 +10227,7 @@ Signed-off-by: Yangbo Lu .rfc3686 = true, }, }, -@@ -4370,7 +3223,6 @@ static struct caam_aead_alg driver_aeads +@@ -4370,7 +3259,6 @@ static struct caam_aead_alg driver_aeads OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, @@ -10138,7 +10235,7 @@ Signed-off-by: Yangbo Lu .rfc3686 = true, .geniv = true, }, -@@ -4385,16 +3237,34 @@ struct caam_crypto_alg { +@@ -4385,16 +3273,34 @@ struct caam_crypto_alg { static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam) { @@ -10176,7 +10273,7 @@ Signed-off-by: Yangbo Lu return 0; } -@@ -4421,25 +3291,9 @@ static int caam_aead_init(struct crypto_ +@@ -4421,25 +3327,9 @@ static int caam_aead_init(struct crypto_ static void caam_exit_common(struct caam_ctx *ctx) { @@ -10205,7 +10302,7 @@ Signed-off-by: Yangbo Lu caam_jr_free(ctx->jrdev); } -@@ -4515,7 +3369,6 @@ static struct caam_crypto_alg *caam_alg_ +@@ -4515,7 +3405,6 @@ static struct caam_crypto_alg *caam_alg_ t_alg->caam.class1_alg_type = template->class1_alg_type; t_alg->caam.class2_alg_type = template->class2_alg_type; @@ -10215,7 +10312,7 @@ Signed-off-by: Yangbo Lu } --- /dev/null +++ b/drivers/crypto/caam/caamalg_desc.c -@@ -0,0 +1,1913 @@ +@@ -0,0 +1,1961 @@ +/* + * Shared descriptors for aead, ablkcipher algorithms + * @@ -10263,16 +10360,16 @@ Signed-off-by: Yangbo Lu + * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor + * (non-protocol) with no (null) encryption. + * @desc: pointer to buffer used for descriptor construction -+ * @adata: pointer to authentication transform definitions. Note that since a -+ * split key is to be used, the size of the split key itself is -+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, -+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. ++ * @adata: pointer to authentication transform definitions. ++ * A split key is required for SEC Era < 6; the size of the split key ++ * is specified in this case. Valid algorithm values - one of ++ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed ++ * with OP_ALG_AAI_HMAC_PRECOMP. + * @icvsize: integrity check value (ICV) size (truncated or full) -+ * -+ * Note: Requires an MDHA split key. ++ * @era: SEC Era + */ +void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, -+ unsigned int icvsize) ++ unsigned int icvsize, int era) +{ + u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd; + @@ -10281,13 +10378,18 @@ Signed-off-by: Yangbo Lu + /* Skip if already shared */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); -+ if (adata->key_inline) -+ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad, -+ adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT | -+ KEY_ENC); -+ else -+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 | -+ KEY_DEST_MDHA_SPLIT | KEY_ENC); ++ if (era < 6) { ++ if (adata->key_inline) ++ append_key_as_imm(desc, adata->key_virt, ++ adata->keylen_pad, adata->keylen, ++ CLASS_2 | KEY_DEST_MDHA_SPLIT | ++ KEY_ENC); ++ else ++ append_key(desc, adata->key_dma, adata->keylen, ++ CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); ++ } else { ++ append_proto_dkp(desc, adata); ++ } + set_jump_tgt_here(desc, key_jump_cmd); + + /* assoclen + cryptlen = seqinlen */ @@ -10339,16 +10441,16 @@ Signed-off-by: Yangbo Lu + * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor + * (non-protocol) with no (null) decryption. + * @desc: pointer to buffer used for descriptor construction -+ * @adata: pointer to authentication transform definitions. Note that since a -+ * split key is to be used, the size of the split key itself is -+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, -+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. ++ * @adata: pointer to authentication transform definitions. ++ * A split key is required for SEC Era < 6; the size of the split key ++ * is specified in this case. Valid algorithm values - one of ++ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed ++ * with OP_ALG_AAI_HMAC_PRECOMP. + * @icvsize: integrity check value (ICV) size (truncated or full) -+ * -+ * Note: Requires an MDHA split key. ++ * @era: SEC Era + */ +void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata, -+ unsigned int icvsize) ++ unsigned int icvsize, int era) +{ + u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd; + @@ -10357,13 +10459,18 @@ Signed-off-by: Yangbo Lu + /* Skip if already shared */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); -+ if (adata->key_inline) -+ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad, -+ adata->keylen, CLASS_2 | -+ KEY_DEST_MDHA_SPLIT | KEY_ENC); -+ else -+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 | -+ KEY_DEST_MDHA_SPLIT | KEY_ENC); ++ if (era < 6) { ++ if (adata->key_inline) ++ append_key_as_imm(desc, adata->key_virt, ++ adata->keylen_pad, adata->keylen, ++ CLASS_2 | KEY_DEST_MDHA_SPLIT | ++ KEY_ENC); ++ else ++ append_key(desc, adata->key_dma, adata->keylen, ++ CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); ++ } else { ++ append_proto_dkp(desc, adata); ++ } + set_jump_tgt_here(desc, key_jump_cmd); + + /* Class 2 operation */ @@ -10422,7 +10529,7 @@ Signed-off-by: Yangbo Lu +static void init_sh_desc_key_aead(u32 * const desc, + struct alginfo * const cdata, + struct alginfo * const adata, -+ const bool is_rfc3686, u32 *nonce) ++ const bool is_rfc3686, u32 *nonce, int era) +{ + u32 *key_jump_cmd; + unsigned int enckeylen = cdata->keylen; @@ -10442,13 +10549,18 @@ Signed-off-by: Yangbo Lu + if (is_rfc3686) + enckeylen -= CTR_RFC3686_NONCE_SIZE; + -+ if (adata->key_inline) -+ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad, -+ adata->keylen, CLASS_2 | -+ KEY_DEST_MDHA_SPLIT | KEY_ENC); -+ else -+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 | -+ KEY_DEST_MDHA_SPLIT | KEY_ENC); ++ if (era < 6) { ++ if (adata->key_inline) ++ append_key_as_imm(desc, adata->key_virt, ++ adata->keylen_pad, adata->keylen, ++ CLASS_2 | KEY_DEST_MDHA_SPLIT | ++ KEY_ENC); ++ else ++ append_key(desc, adata->key_dma, adata->keylen, ++ CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); ++ } else { ++ append_proto_dkp(desc, adata); ++ } + + if (cdata->key_inline) + append_key_as_imm(desc, cdata->key_virt, enckeylen, @@ -10479,26 +10591,27 @@ Signed-off-by: Yangbo Lu + * @cdata: pointer to block cipher transform definitions + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. -+ * @adata: pointer to authentication transform definitions. Note that since a -+ * split key is to be used, the size of the split key itself is -+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, -+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. ++ * @adata: pointer to authentication transform definitions. ++ * A split key is required for SEC Era < 6; the size of the split key ++ * is specified in this case. Valid algorithm values - one of ++ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed ++ * with OP_ALG_AAI_HMAC_PRECOMP. + * @ivsize: initialization vector size + * @icvsize: integrity check value (ICV) size (truncated or full) + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template + * @nonce: pointer to rfc3686 nonce + * @ctx1_iv_off: IV offset in CONTEXT1 register + * @is_qi: true when called from caam/qi -+ * -+ * Note: Requires an MDHA split key. ++ * @era: SEC Era + */ +void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata, + struct alginfo *adata, unsigned int ivsize, + unsigned int icvsize, const bool is_rfc3686, -+ u32 *nonce, const u32 ctx1_iv_off, const bool is_qi) ++ u32 *nonce, const u32 ctx1_iv_off, const bool is_qi, ++ int era) +{ + /* Note: Context registers are saved. */ -+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce); ++ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era); + + /* Class 2 operation */ + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | @@ -10524,8 +10637,13 @@ Signed-off-by: Yangbo Lu + } + + /* Read and write assoclen bytes */ -+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); -+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); ++ if (is_qi || era < 3) { ++ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); ++ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); ++ } else { ++ append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ); ++ append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ); ++ } + + /* Skip assoc data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); @@ -10568,27 +10686,27 @@ Signed-off-by: Yangbo Lu + * @cdata: pointer to block cipher transform definitions + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. -+ * @adata: pointer to authentication transform definitions. Note that since a -+ * split key is to be used, the size of the split key itself is -+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, -+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. ++ * @adata: pointer to authentication transform definitions. ++ * A split key is required for SEC Era < 6; the size of the split key ++ * is specified in this case. Valid algorithm values - one of ++ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed ++ * with OP_ALG_AAI_HMAC_PRECOMP. + * @ivsize: initialization vector size + * @icvsize: integrity check value (ICV) size (truncated or full) + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template + * @nonce: pointer to rfc3686 nonce + * @ctx1_iv_off: IV offset in CONTEXT1 register + * @is_qi: true when called from caam/qi -+ * -+ * Note: Requires an MDHA split key. ++ * @era: SEC Era + */ +void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata, + struct alginfo *adata, unsigned int ivsize, + unsigned int icvsize, const bool geniv, + const bool is_rfc3686, u32 *nonce, -+ const u32 ctx1_iv_off, const bool is_qi) ++ const u32 ctx1_iv_off, const bool is_qi, int era) +{ + /* Note: Context registers are saved. */ -+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce); ++ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era); + + /* Class 2 operation */ + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | @@ -10615,11 +10733,23 @@ Signed-off-by: Yangbo Lu + } + + /* Read and write assoclen bytes */ -+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); -+ if (geniv) -+ append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize); -+ else -+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); ++ if (is_qi || era < 3) { ++ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); ++ if (geniv) ++ append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ++ ivsize); ++ else ++ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, ++ CAAM_CMD_SZ); ++ } else { ++ append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ); ++ if (geniv) ++ append_math_add_imm_u32(desc, VARSEQOUTLEN, DPOVRD, IMM, ++ ivsize); ++ else ++ append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, ++ CAAM_CMD_SZ); ++ } + + /* Skip assoc data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); @@ -10674,29 +10804,29 @@ Signed-off-by: Yangbo Lu + * @cdata: pointer to block cipher transform definitions + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. -+ * @adata: pointer to authentication transform definitions. Note that since a -+ * split key is to be used, the size of the split key itself is -+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, -+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. ++ * @adata: pointer to authentication transform definitions. ++ * A split key is required for SEC Era < 6; the size of the split key ++ * is specified in this case. Valid algorithm values - one of ++ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed ++ * with OP_ALG_AAI_HMAC_PRECOMP. + * @ivsize: initialization vector size + * @icvsize: integrity check value (ICV) size (truncated or full) + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template + * @nonce: pointer to rfc3686 nonce + * @ctx1_iv_off: IV offset in CONTEXT1 register + * @is_qi: true when called from caam/qi -+ * -+ * Note: Requires an MDHA split key. ++ * @era: SEC Era + */ +void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata, + struct alginfo *adata, unsigned int ivsize, + unsigned int icvsize, const bool is_rfc3686, + u32 *nonce, const u32 ctx1_iv_off, -+ const bool is_qi) ++ const bool is_qi, int era) +{ + u32 geniv, moveiv; + + /* Note: Context registers are saved. */ -+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce); ++ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era); + + if (is_qi) { + u32 *wait_load_cmd; @@ -10746,8 +10876,13 @@ Signed-off-by: Yangbo Lu + OP_ALG_ENCRYPT); + + /* Read and write assoclen bytes */ -+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); -+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); ++ if (is_qi || era < 3) { ++ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); ++ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); ++ } else { ++ append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ); ++ append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ); ++ } + + /* Skip assoc data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); @@ -10806,19 +10941,20 @@ Signed-off-by: Yangbo Lu + * @cdata: pointer to block cipher transform definitions + * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed + * with OP_ALG_AAI_CBC -+ * @adata: pointer to authentication transform definitions. Note that since a -+ * split key is to be used, the size of the split key itself is -+ * specified. Valid algorithm values OP_ALG_ALGSEL_SHA1 ANDed with -+ * OP_ALG_AAI_HMAC_PRECOMP. ++ * @adata: pointer to authentication transform definitions. ++ * A split key is required for SEC Era < 6; the size of the split key ++ * is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1 ++ * ANDed with OP_ALG_AAI_HMAC_PRECOMP. + * @assoclen: associated data length + * @ivsize: initialization vector size + * @authsize: authentication data size + * @blocksize: block cipher size ++ * @era: SEC Era + */ +void cnstr_shdsc_tls_encap(u32 * const desc, struct alginfo *cdata, + struct alginfo *adata, unsigned int assoclen, + unsigned int ivsize, unsigned int authsize, -+ unsigned int blocksize) ++ unsigned int blocksize, int era) +{ + u32 *key_jump_cmd, *zero_payload_jump_cmd; + u32 genpad, idx_ld_datasz, idx_ld_pad, stidx; @@ -10846,13 +10982,18 @@ Signed-off-by: Yangbo Lu + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + -+ if (adata->key_inline) -+ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad, -+ adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT | -+ KEY_ENC); -+ else -+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 | -+ KEY_DEST_MDHA_SPLIT | KEY_ENC); ++ if (era < 6) { ++ if (adata->key_inline) ++ append_key_as_imm(desc, adata->key_virt, ++ adata->keylen_pad, adata->keylen, ++ CLASS_2 | KEY_DEST_MDHA_SPLIT | ++ KEY_ENC); ++ else ++ append_key(desc, adata->key_dma, adata->keylen, ++ CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); ++ } else { ++ append_proto_dkp(desc, adata); ++ } + + if (cdata->key_inline) + append_key_as_imm(desc, cdata->key_virt, cdata->keylen, @@ -10959,19 +11100,20 @@ Signed-off-by: Yangbo Lu + * @cdata: pointer to block cipher transform definitions + * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed + * with OP_ALG_AAI_CBC -+ * @adata: pointer to authentication transform definitions. Note that since a -+ * split key is to be used, the size of the split key itself is -+ * specified. Valid algorithm values OP_ALG_ALGSEL_ SHA1 ANDed with -+ * OP_ALG_AAI_HMAC_PRECOMP. ++ * @adata: pointer to authentication transform definitions. ++ * A split key is required for SEC Era < 6; the size of the split key ++ * is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1 ++ * ANDed with OP_ALG_AAI_HMAC_PRECOMP. + * @assoclen: associated data length + * @ivsize: initialization vector size + * @authsize: authentication data size + * @blocksize: block cipher size ++ * @era: SEC Era + */ +void cnstr_shdsc_tls_decap(u32 * const desc, struct alginfo *cdata, + struct alginfo *adata, unsigned int assoclen, + unsigned int ivsize, unsigned int authsize, -+ unsigned int blocksize) ++ unsigned int blocksize, int era) +{ + u32 stidx, jumpback; + u32 *key_jump_cmd, *zero_payload_jump_cmd, *skip_zero_jump_cmd; @@ -10989,8 +11131,11 @@ Signed-off-by: Yangbo Lu + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + -+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 | -+ KEY_DEST_MDHA_SPLIT | KEY_ENC); ++ if (era < 6) ++ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 | ++ KEY_DEST_MDHA_SPLIT | KEY_ENC); ++ else ++ append_proto_dkp(desc, adata); + + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 | + KEY_DEST_CLASS_REG); @@ -11836,7 +11981,7 @@ Signed-off-by: Yangbo Lu + + /* Load nonce into CONTEXT1 reg */ + if (is_rfc3686) { -+ u8 *nonce = cdata->key_virt + cdata->keylen; ++ const u8 *nonce = cdata->key_virt + cdata->keylen; + + append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, + LDST_CLASS_IND_CCB | @@ -11901,7 +12046,7 @@ Signed-off-by: Yangbo Lu + + /* Load nonce into CONTEXT1 reg */ + if (is_rfc3686) { -+ u8 *nonce = cdata->key_virt + cdata->keylen; ++ const u8 *nonce = cdata->key_virt + cdata->keylen; + + append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, + LDST_CLASS_IND_CCB | @@ -11970,7 +12115,7 @@ Signed-off-by: Yangbo Lu + + /* Load Nonce into CONTEXT1 reg */ + if (is_rfc3686) { -+ u8 *nonce = cdata->key_virt + cdata->keylen; ++ const u8 *nonce = cdata->key_virt + cdata->keylen; + + append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, + LDST_CLASS_IND_CCB | @@ -12185,38 +12330,38 @@ Signed-off-by: Yangbo Lu + 15 * CAAM_CMD_SZ) + +void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, -+ unsigned int icvsize); ++ unsigned int icvsize, int era); + +void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata, -+ unsigned int icvsize); ++ unsigned int icvsize, int era); + +void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata, + struct alginfo *adata, unsigned int ivsize, + unsigned int icvsize, const bool is_rfc3686, + u32 *nonce, const u32 ctx1_iv_off, -+ const bool is_qi); ++ const bool is_qi, int era); + +void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata, + struct alginfo *adata, unsigned int ivsize, + unsigned int icvsize, const bool geniv, + const bool is_rfc3686, u32 *nonce, -+ const u32 ctx1_iv_off, const bool is_qi); ++ const u32 ctx1_iv_off, const bool is_qi, int era); + +void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata, + struct alginfo *adata, unsigned int ivsize, + unsigned int icvsize, const bool is_rfc3686, + u32 *nonce, const u32 ctx1_iv_off, -+ const bool is_qi); ++ const bool is_qi, int era); + +void cnstr_shdsc_tls_encap(u32 *const desc, struct alginfo *cdata, + struct alginfo *adata, unsigned int assoclen, + unsigned int ivsize, unsigned int authsize, -+ unsigned int blocksize); ++ unsigned int blocksize, int era); + +void cnstr_shdsc_tls_decap(u32 *const desc, struct alginfo *cdata, + struct alginfo *adata, unsigned int assoclen, + unsigned int ivsize, unsigned int authsize, -+ unsigned int blocksize); ++ unsigned int blocksize, int era); + +void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata, + unsigned int ivsize, unsigned int icvsize, @@ -12261,7 +12406,7 @@ Signed-off-by: Yangbo Lu +#endif /* _CAAMALG_DESC_H_ */ --- /dev/null +++ b/drivers/crypto/caam/caamalg_qi.c -@@ -0,0 +1,2877 @@ +@@ -0,0 +1,2929 @@ +/* + * Freescale FSL CAAM support for crypto API over QI backend. + * Based on caamalg.c @@ -12338,6 +12483,7 @@ Signed-off-by: Yangbo Lu + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == + OP_ALG_AAI_CTR_MOD128); + const bool is_rfc3686 = alg->caam.rfc3686; ++ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); + + if (!ctx->cdata.keylen || !ctx->authsize) + return 0; @@ -12388,7 +12534,7 @@ Signed-off-by: Yangbo Lu + + cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, + ivsize, ctx->authsize, is_rfc3686, nonce, -+ ctx1_iv_off, true); ++ ctx1_iv_off, true, ctrlpriv->era); + +skip_enc: + /* aead_decrypt shared descriptor */ @@ -12413,7 +12559,8 @@ Signed-off-by: Yangbo Lu + + cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata, + ivsize, ctx->authsize, alg->caam.geniv, -+ is_rfc3686, nonce, ctx1_iv_off, true); ++ is_rfc3686, nonce, ctx1_iv_off, true, ++ ctrlpriv->era); + + if (!alg->caam.geniv) + goto skip_givenc; @@ -12440,7 +12587,7 @@ Signed-off-by: Yangbo Lu + + cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, + ivsize, ctx->authsize, is_rfc3686, nonce, -+ ctx1_iv_off, true); ++ ctx1_iv_off, true, ctrlpriv->era); + +skip_givenc: + return 0; @@ -12461,6 +12608,7 @@ Signed-off-by: Yangbo Lu +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; ++ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); + struct crypto_authenc_keys keys; + int ret = 0; + @@ -12475,6 +12623,27 @@ Signed-off-by: Yangbo Lu + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); +#endif + ++ /* ++ * If DKP is supported, use it in the shared descriptor to generate ++ * the split key. ++ */ ++ if (ctrlpriv->era >= 6) { ++ ctx->adata.keylen = keys.authkeylen; ++ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & ++ OP_ALG_ALGSEL_MASK); ++ ++ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) ++ goto badkey; ++ ++ memcpy(ctx->key, keys.authkey, keys.authkeylen); ++ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, ++ keys.enckeylen); ++ dma_sync_single_for_device(jrdev, ctx->key_dma, ++ ctx->adata.keylen_pad + ++ keys.enckeylen, DMA_TO_DEVICE); ++ goto skip_split_key; ++ } ++ + ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey, + keys.authkeylen, CAAM_MAX_KEY_SIZE - + keys.enckeylen); @@ -12491,6 +12660,7 @@ Signed-off-by: Yangbo Lu + ctx->adata.keylen_pad + keys.enckeylen, 1); +#endif + ++skip_split_key: + ctx->cdata.keylen = keys.enckeylen; + + ret = aead_set_sh_desc(aead); @@ -12530,6 +12700,7 @@ Signed-off-by: Yangbo Lu + unsigned int assoclen = 13; /* always 13 bytes for TLS */ + unsigned int data_len[2]; + u32 inl_mask; ++ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); + + if (!ctx->cdata.keylen || !ctx->authsize) + return 0; @@ -12560,17 +12731,20 @@ Signed-off-by: Yangbo Lu + ctx->cdata.key_inline = !!(inl_mask & 2); + + cnstr_shdsc_tls_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, -+ assoclen, ivsize, ctx->authsize, blocksize); ++ assoclen, ivsize, ctx->authsize, blocksize, ++ ctrlpriv->era); + + /* + * TLS 1.0 decrypt shared descriptor + * Keys do not fit inline, regardless of algorithms used + */ ++ ctx->adata.key_inline = false; + ctx->adata.key_dma = ctx->key_dma; + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; + + cnstr_shdsc_tls_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata, -+ assoclen, ivsize, ctx->authsize, blocksize); ++ assoclen, ivsize, ctx->authsize, blocksize, ++ ctrlpriv->era); + + return 0; +} @@ -12590,6 +12764,7 @@ Signed-off-by: Yangbo Lu +{ + struct caam_ctx *ctx = crypto_aead_ctx(tls); + struct device *jrdev = ctx->jrdev; ++ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); + struct crypto_authenc_keys keys; + int ret = 0; + @@ -12604,6 +12779,27 @@ Signed-off-by: Yangbo Lu + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); +#endif + ++ /* ++ * If DKP is supported, use it in the shared descriptor to generate ++ * the split key. ++ */ ++ if (ctrlpriv->era >= 6) { ++ ctx->adata.keylen = keys.authkeylen; ++ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & ++ OP_ALG_ALGSEL_MASK); ++ ++ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) ++ goto badkey; ++ ++ memcpy(ctx->key, keys.authkey, keys.authkeylen); ++ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, ++ keys.enckeylen); ++ dma_sync_single_for_device(jrdev, ctx->key_dma, ++ ctx->adata.keylen_pad + ++ keys.enckeylen, DMA_TO_DEVICE); ++ goto skip_split_key; ++ } ++ + ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey, + keys.authkeylen, CAAM_MAX_KEY_SIZE - + keys.enckeylen); @@ -12623,6 +12819,7 @@ Signed-off-by: Yangbo Lu + ctx->adata.keylen_pad + keys.enckeylen, 1); +#endif + ++skip_split_key: + ctx->cdata.keylen = keys.enckeylen; + + ret = tls_set_sh_desc(tls); @@ -15141,7 +15338,7 @@ Signed-off-by: Yangbo Lu +MODULE_AUTHOR("Freescale Semiconductor"); --- /dev/null +++ b/drivers/crypto/caam/caamalg_qi2.c -@@ -0,0 +1,4428 @@ +@@ -0,0 +1,5920 @@ +/* + * Copyright 2015-2016 Freescale Semiconductor Inc. + * Copyright 2017 NXP @@ -15186,6 +15383,7 @@ Signed-off-by: Yangbo Lu +#include "sg_sw_qm2.h" +#include "key_gen.h" +#include "caamalg_desc.h" ++#include "caamhash_desc.h" +#include "../../../drivers/staging/fsl-mc/include/mc.h" +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h" +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h" @@ -15232,6 +15430,7 @@ Signed-off-by: Yangbo Lu + * caam_ctx - per-session context + * @flc: Flow Contexts array + * @key: virtual address of the key(s): [authentication key], encryption key ++ * @flc_dma: I/O virtual addresses of the Flow Contexts + * @key_dma: I/O virtual address of the key + * @dev: dpseci device + * @adata: authentication algorithm details @@ -15241,6 +15440,7 @@ Signed-off-by: Yangbo Lu +struct caam_ctx { + struct caam_flc flc[NUM_OP]; + u8 key[CAAM_MAX_KEY_SIZE]; ++ dma_addr_t flc_dma[NUM_OP]; + dma_addr_t key_dma; + struct device *dev; + struct alginfo adata; @@ -15298,6 +15498,8 @@ Signed-off-by: Yangbo Lu + case CRYPTO_ALG_TYPE_AEAD: + return aead_request_ctx(container_of(areq, struct aead_request, + base)); ++ case CRYPTO_ALG_TYPE_AHASH: ++ return ahash_request_ctx(ahash_request_cast(areq)); + default: + return ERR_PTR(-EINVAL); + } @@ -15333,6 +15535,7 @@ Signed-off-by: Yangbo Lu + struct caam_ctx *ctx = crypto_aead_ctx(aead); + unsigned int ivsize = crypto_aead_ivsize(aead); + struct device *dev = ctx->dev; ++ struct dpaa2_caam_priv *priv = dev_get_drvdata(dev); + struct caam_flc *flc; + u32 *desc; + u32 ctx1_iv_off = 0; @@ -15394,19 +15597,17 @@ Signed-off-by: Yangbo Lu + if (alg->caam.geniv) + cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, + ivsize, ctx->authsize, is_rfc3686, -+ nonce, ctx1_iv_off, true); ++ nonce, ctx1_iv_off, true, ++ priv->sec_attr.era); + else + cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, + ivsize, ctx->authsize, is_rfc3686, nonce, -+ ctx1_iv_off, true); ++ ctx1_iv_off, true, priv->sec_attr.era); + + flc->flc[1] = desc_len(desc); /* SDL */ -+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) + -+ desc_bytes(desc), DMA_TO_DEVICE); -+ if (dma_mapping_error(dev, flc->flc_dma)) { -+ dev_err(dev, "unable to map shared descriptor\n"); -+ return -ENOMEM; -+ } ++ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], ++ sizeof(flc->flc) + desc_bytes(desc), ++ DMA_BIDIRECTIONAL); + + /* aead_decrypt shared descriptor */ + if (desc_inline_query(DESC_QI_AEAD_DEC_LEN + @@ -15430,18 +15631,14 @@ Signed-off-by: Yangbo Lu + + flc = &ctx->flc[DECRYPT]; + desc = flc->sh_desc; -+ + cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, + ivsize, ctx->authsize, alg->caam.geniv, -+ is_rfc3686, nonce, ctx1_iv_off, true); -+ ++ is_rfc3686, nonce, ctx1_iv_off, true, ++ priv->sec_attr.era); + flc->flc[1] = desc_len(desc); /* SDL */ -+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) + -+ desc_bytes(desc), DMA_TO_DEVICE); -+ if (dma_mapping_error(dev, flc->flc_dma)) { -+ dev_err(dev, "unable to map shared descriptor\n"); -+ return -ENOMEM; -+ } ++ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], ++ sizeof(flc->flc) + desc_bytes(desc), ++ DMA_BIDIRECTIONAL); + + return 0; +} @@ -15477,137 +15674,12 @@ Signed-off-by: Yangbo Lu + complete(&res->completion); +} + -+static int gen_split_key_sh(struct device *dev, u8 *key_out, -+ struct alginfo * const adata, const u8 *key_in, -+ u32 keylen) -+{ -+ struct caam_request *req_ctx; -+ u32 *desc; -+ struct split_key_sh_result result; -+ dma_addr_t dma_addr_in, dma_addr_out; -+ struct caam_flc *flc; -+ struct dpaa2_fl_entry *in_fle, *out_fle; -+ int ret = -ENOMEM; -+ -+ req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA); -+ if (!req_ctx) -+ return -ENOMEM; -+ -+ in_fle = &req_ctx->fd_flt[1]; -+ out_fle = &req_ctx->fd_flt[0]; -+ -+ flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA); -+ if (!flc) -+ goto err_flc; -+ -+ dma_addr_in = dma_map_single(dev, (void *)key_in, keylen, -+ DMA_TO_DEVICE); -+ if (dma_mapping_error(dev, dma_addr_in)) { -+ dev_err(dev, "unable to map key input memory\n"); -+ goto err_dma_addr_in; -+ } -+ -+ dma_addr_out = dma_map_single(dev, key_out, adata->keylen_pad, -+ DMA_FROM_DEVICE); -+ if (dma_mapping_error(dev, dma_addr_out)) { -+ dev_err(dev, "unable to map key output memory\n"); -+ goto err_dma_addr_out; -+ } -+ -+ desc = flc->sh_desc; -+ -+ init_sh_desc(desc, 0); -+ append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG); -+ -+ /* Sets MDHA up into an HMAC-INIT */ -+ append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) | -+ OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT | -+ OP_ALG_AS_INIT); -+ -+ /* -+ * do a FIFO_LOAD of zero, this will trigger the internal key expansion -+ * into both pads inside MDHA -+ */ -+ append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB | -+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2); -+ -+ /* -+ * FIFO_STORE with the explicit split-key content store -+ * (0x26 output type) -+ */ -+ append_fifo_store(desc, dma_addr_out, adata->keylen, -+ LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); -+ -+ flc->flc[1] = desc_len(desc); /* SDL */ -+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) + -+ desc_bytes(desc), DMA_TO_DEVICE); -+ if (dma_mapping_error(dev, flc->flc_dma)) { -+ dev_err(dev, "unable to map shared descriptor\n"); -+ goto err_flc_dma; -+ } -+ -+ dpaa2_fl_set_final(in_fle, true); -+ dpaa2_fl_set_format(in_fle, dpaa2_fl_single); -+ dpaa2_fl_set_addr(in_fle, dma_addr_in); -+ dpaa2_fl_set_len(in_fle, keylen); -+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single); -+ dpaa2_fl_set_addr(out_fle, dma_addr_out); -+ dpaa2_fl_set_len(out_fle, adata->keylen_pad); -+ -+#ifdef DEBUG -+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", -+ DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1); -+ print_hex_dump(KERN_ERR, "desc@" __stringify(__LINE__)": ", -+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); -+#endif -+ -+ result.err = 0; -+ init_completion(&result.completion); -+ result.dev = dev; -+ -+ req_ctx->flc = flc; -+ req_ctx->cbk = split_key_sh_done; -+ req_ctx->ctx = &result; -+ -+ ret = dpaa2_caam_enqueue(dev, req_ctx); -+ if (ret == -EINPROGRESS) { -+ /* in progress */ -+ wait_for_completion(&result.completion); -+ ret = result.err; -+#ifdef DEBUG -+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", -+ DUMP_PREFIX_ADDRESS, 16, 4, key_out, -+ adata->keylen_pad, 1); -+#endif -+ } -+ -+ dma_unmap_single(dev, flc->flc_dma, sizeof(flc->flc) + desc_bytes(desc), -+ DMA_TO_DEVICE); -+err_flc_dma: -+ dma_unmap_single(dev, dma_addr_out, adata->keylen_pad, DMA_FROM_DEVICE); -+err_dma_addr_out: -+ dma_unmap_single(dev, dma_addr_in, keylen, DMA_TO_DEVICE); -+err_dma_addr_in: -+ kfree(flc); -+err_flc: -+ kfree(req_ctx); -+ return ret; -+} -+ -+static int gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in, -+ u32 authkeylen) -+{ -+ return gen_split_key_sh(ctx->dev, ctx->key, &ctx->adata, key_in, -+ authkeylen); -+} -+ +static int aead_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *dev = ctx->dev; + struct crypto_authenc_keys keys; -+ int ret; + + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) + goto badkey; @@ -15620,34 +15692,17 @@ Signed-off-by: Yangbo Lu + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); +#endif + -+ ctx->adata.keylen = split_key_len(ctx->adata.algtype & -+ OP_ALG_ALGSEL_MASK); -+ ctx->adata.keylen_pad = split_key_pad_len(ctx->adata.algtype & -+ OP_ALG_ALGSEL_MASK); -+ -+#ifdef DEBUG -+ dev_err(dev, "split keylen %d split keylen padded %d\n", -+ ctx->adata.keylen, ctx->adata.keylen_pad); -+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", -+ DUMP_PREFIX_ADDRESS, 16, 4, keys.authkey, keylen, 1); -+#endif ++ ctx->adata.keylen = keys.authkeylen; ++ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & ++ OP_ALG_ALGSEL_MASK); + + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) + goto badkey; + -+ ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen); -+ if (ret) -+ goto badkey; -+ -+ /* postpend encryption key to auth split key */ ++ memcpy(ctx->key, keys.authkey, keys.authkeylen); + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); -+ -+ ctx->key_dma = dma_map_single(dev, ctx->key, ctx->adata.keylen_pad + -+ keys.enckeylen, DMA_TO_DEVICE); -+ if (dma_mapping_error(dev, ctx->key_dma)) { -+ dev_err(dev, "unable to map key i/o memory\n"); -+ return -ENOMEM; -+ } ++ dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad + ++ keys.enckeylen, DMA_BIDIRECTIONAL); +#ifdef DEBUG + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, @@ -15656,12 +15711,7 @@ Signed-off-by: Yangbo Lu + + ctx->cdata.keylen = keys.enckeylen; + -+ ret = aead_set_sh_desc(aead); -+ if (ret) -+ dma_unmap_single(dev, ctx->key_dma, ctx->adata.keylen_pad + -+ keys.enckeylen, DMA_TO_DEVICE); -+ -+ return ret; ++ return aead_set_sh_desc(aead); +badkey: + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; @@ -16045,6 +16095,7 @@ Signed-off-by: Yangbo Lu + unsigned int ivsize = crypto_aead_ivsize(tls); + unsigned int blocksize = crypto_aead_blocksize(tls); + struct device *dev = ctx->dev; ++ struct dpaa2_caam_priv *priv = dev_get_drvdata(dev); + struct caam_flc *flc; + u32 *desc; + unsigned int assoclen = 13; /* always 13 bytes for TLS */ @@ -16081,39 +16132,30 @@ Signed-off-by: Yangbo Lu + + flc = &ctx->flc[ENCRYPT]; + desc = flc->sh_desc; -+ + cnstr_shdsc_tls_encap(desc, &ctx->cdata, &ctx->adata, -+ assoclen, ivsize, ctx->authsize, blocksize); -+ ++ assoclen, ivsize, ctx->authsize, blocksize, ++ priv->sec_attr.era); + flc->flc[1] = desc_len(desc); -+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) + -+ desc_bytes(desc), DMA_TO_DEVICE); -+ -+ if (dma_mapping_error(dev, flc->flc_dma)) { -+ dev_err(dev, "unable to map shared descriptor\n"); -+ return -ENOMEM; -+ } ++ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], ++ sizeof(flc->flc) + desc_bytes(desc), ++ DMA_BIDIRECTIONAL); + + /* + * TLS 1.0 decrypt shared descriptor + * Keys do not fit inline, regardless of algorithms used + */ ++ ctx->adata.key_inline = false; + ctx->adata.key_dma = ctx->key_dma; + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; + + flc = &ctx->flc[DECRYPT]; + desc = flc->sh_desc; -+ + cnstr_shdsc_tls_decap(desc, &ctx->cdata, &ctx->adata, assoclen, ivsize, -+ ctx->authsize, blocksize); -+ ++ ctx->authsize, blocksize, priv->sec_attr.era); + flc->flc[1] = desc_len(desc); /* SDL */ -+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) + -+ desc_bytes(desc), DMA_TO_DEVICE); -+ if (dma_mapping_error(dev, flc->flc_dma)) { -+ dev_err(dev, "unable to map shared descriptor\n"); -+ return -ENOMEM; -+ } ++ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], ++ sizeof(flc->flc) + desc_bytes(desc), ++ DMA_BIDIRECTIONAL); + + return 0; +} @@ -16124,7 +16166,6 @@ Signed-off-by: Yangbo Lu + struct caam_ctx *ctx = crypto_aead_ctx(tls); + struct device *dev = ctx->dev; + struct crypto_authenc_keys keys; -+ int ret; + + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) + goto badkey; @@ -16137,35 +16178,17 @@ Signed-off-by: Yangbo Lu + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); +#endif + -+ ctx->adata.keylen = split_key_len(ctx->adata.algtype & -+ OP_ALG_ALGSEL_MASK); -+ ctx->adata.keylen_pad = split_key_pad_len(ctx->adata.algtype & -+ OP_ALG_ALGSEL_MASK); -+ -+#ifdef DEBUG -+ dev_err(dev, "split keylen %d split keylen padded %d\n", -+ ctx->adata.keylen, ctx->adata.keylen_pad); -+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", -+ DUMP_PREFIX_ADDRESS, 16, 4, keys.authkey, -+ keys.authkeylen + keys.enckeylen, 1); -+#endif ++ ctx->adata.keylen = keys.authkeylen; ++ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & ++ OP_ALG_ALGSEL_MASK); + + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) + goto badkey; + -+ ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen); -+ if (ret) -+ goto badkey; -+ -+ /* postpend encryption key to auth split key */ ++ memcpy(ctx->key, keys.authkey, keys.authkeylen); + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); -+ -+ ctx->key_dma = dma_map_single(dev, ctx->key, ctx->adata.keylen_pad + -+ keys.enckeylen, DMA_TO_DEVICE); -+ if (dma_mapping_error(dev, ctx->key_dma)) { -+ dev_err(dev, "unable to map key i/o memory\n"); -+ return -ENOMEM; -+ } ++ dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad + ++ keys.enckeylen, DMA_BIDIRECTIONAL); +#ifdef DEBUG + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, @@ -16174,12 +16197,7 @@ Signed-off-by: Yangbo Lu + + ctx->cdata.keylen = keys.enckeylen; + -+ ret = tls_set_sh_desc(tls); -+ if (ret) -+ dma_unmap_single(dev, ctx->key_dma, ctx->adata.keylen_pad + -+ keys.enckeylen, DMA_TO_DEVICE); -+ -+ return ret; ++ return tls_set_sh_desc(tls); +badkey: + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; @@ -16224,14 +16242,10 @@ Signed-off-by: Yangbo Lu + flc = &ctx->flc[ENCRYPT]; + desc = flc->sh_desc; + cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true); -+ + flc->flc[1] = desc_len(desc); /* SDL */ -+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) + -+ desc_bytes(desc), DMA_TO_DEVICE); -+ if (dma_mapping_error(dev, flc->flc_dma)) { -+ dev_err(dev, "unable to map shared descriptor\n"); -+ return -ENOMEM; -+ } ++ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], ++ sizeof(flc->flc) + desc_bytes(desc), ++ DMA_BIDIRECTIONAL); + + /* + * Job Descriptor and Shared Descriptors @@ -16248,14 +16262,10 @@ Signed-off-by: Yangbo Lu + flc = &ctx->flc[DECRYPT]; + desc = flc->sh_desc; + cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true); -+ + flc->flc[1] = desc_len(desc); /* SDL */ -+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) + -+ desc_bytes(desc), DMA_TO_DEVICE); -+ if (dma_mapping_error(dev, flc->flc_dma)) { -+ dev_err(dev, "unable to map shared descriptor\n"); -+ return -ENOMEM; -+ } ++ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], ++ sizeof(flc->flc) + desc_bytes(desc), ++ DMA_BIDIRECTIONAL); + + return 0; +} @@ -16275,7 +16285,6 @@ Signed-off-by: Yangbo Lu +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *dev = ctx->dev; -+ int ret; + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", @@ -16283,19 +16292,11 @@ Signed-off-by: Yangbo Lu +#endif + + memcpy(ctx->key, key, keylen); -+ ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE); -+ if (dma_mapping_error(dev, ctx->key_dma)) { -+ dev_err(dev, "unable to map key i/o memory\n"); -+ return -ENOMEM; -+ } ++ dma_sync_single_for_device(dev, ctx->key_dma, keylen, ++ DMA_BIDIRECTIONAL); + ctx->cdata.keylen = keylen; + -+ ret = gcm_set_sh_desc(aead); -+ if (ret) -+ dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen, -+ DMA_TO_DEVICE); -+ -+ return ret; ++ return gcm_set_sh_desc(aead); +} + +static int rfc4106_set_sh_desc(struct crypto_aead *aead) @@ -16329,14 +16330,10 @@ Signed-off-by: Yangbo Lu + desc = flc->sh_desc; + cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize, + true); -+ + flc->flc[1] = desc_len(desc); /* SDL */ -+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) + -+ desc_bytes(desc), DMA_TO_DEVICE); -+ if (dma_mapping_error(dev, flc->flc_dma)) { -+ dev_err(dev, "unable to map shared descriptor\n"); -+ return -ENOMEM; -+ } ++ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], ++ sizeof(flc->flc) + desc_bytes(desc), ++ DMA_BIDIRECTIONAL); + + /* + * Job Descriptor and Shared Descriptors @@ -16353,14 +16350,10 @@ Signed-off-by: Yangbo Lu + desc = flc->sh_desc; + cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize, + true); -+ + flc->flc[1] = desc_len(desc); /* SDL */ -+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) + -+ desc_bytes(desc), DMA_TO_DEVICE); -+ if (dma_mapping_error(dev, flc->flc_dma)) { -+ dev_err(dev, "unable to map shared descriptor\n"); -+ return -ENOMEM; -+ } ++ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], ++ sizeof(flc->flc) + desc_bytes(desc), ++ DMA_BIDIRECTIONAL); + + return 0; +} @@ -16381,7 +16374,6 @@ Signed-off-by: Yangbo Lu +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *dev = ctx->dev; -+ int ret; + + if (keylen < 4) + return -EINVAL; @@ -16397,19 +16389,10 @@ Signed-off-by: Yangbo Lu + * in the nonce. Update the AES key length. + */ + ctx->cdata.keylen = keylen - 4; -+ ctx->key_dma = dma_map_single(dev, ctx->key, ctx->cdata.keylen, -+ DMA_TO_DEVICE); -+ if (dma_mapping_error(dev, ctx->key_dma)) { -+ dev_err(dev, "unable to map key i/o memory\n"); -+ return -ENOMEM; -+ } -+ -+ ret = rfc4106_set_sh_desc(aead); -+ if (ret) -+ dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen, -+ DMA_TO_DEVICE); ++ dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen, ++ DMA_BIDIRECTIONAL); + -+ return ret; ++ return rfc4106_set_sh_desc(aead); +} + +static int rfc4543_set_sh_desc(struct crypto_aead *aead) @@ -16443,14 +16426,10 @@ Signed-off-by: Yangbo Lu + desc = flc->sh_desc; + cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize, + true); -+ + flc->flc[1] = desc_len(desc); /* SDL */ -+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) + -+ desc_bytes(desc), DMA_TO_DEVICE); -+ if (dma_mapping_error(dev, flc->flc_dma)) { -+ dev_err(dev, "unable to map shared descriptor\n"); -+ return -ENOMEM; -+ } ++ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], ++ sizeof(flc->flc) + desc_bytes(desc), ++ DMA_BIDIRECTIONAL); + + /* + * Job Descriptor and Shared Descriptors @@ -16467,14 +16446,10 @@ Signed-off-by: Yangbo Lu + desc = flc->sh_desc; + cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize, + true); -+ + flc->flc[1] = desc_len(desc); /* SDL */ -+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) + -+ desc_bytes(desc), DMA_TO_DEVICE); -+ if (dma_mapping_error(dev, flc->flc_dma)) { -+ dev_err(dev, "unable to map shared descriptor\n"); -+ return -ENOMEM; -+ } ++ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], ++ sizeof(flc->flc) + desc_bytes(desc), ++ DMA_BIDIRECTIONAL); + + return 0; +} @@ -16495,7 +16470,6 @@ Signed-off-by: Yangbo Lu +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *dev = ctx->dev; -+ int ret; + + if (keylen < 4) + return -EINVAL; @@ -16511,19 +16485,10 @@ Signed-off-by: Yangbo Lu + * in the nonce. Update the AES key length. + */ + ctx->cdata.keylen = keylen - 4; -+ ctx->key_dma = dma_map_single(dev, ctx->key, ctx->cdata.keylen, -+ DMA_TO_DEVICE); -+ if (dma_mapping_error(dev, ctx->key_dma)) { -+ dev_err(dev, "unable to map key i/o memory\n"); -+ return -ENOMEM; -+ } -+ -+ ret = rfc4543_set_sh_desc(aead); -+ if (ret) -+ dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen, -+ DMA_TO_DEVICE); ++ dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen, ++ DMA_BIDIRECTIONAL); + -+ return ret; ++ return rfc4543_set_sh_desc(aead); +} + +static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, @@ -16541,7 +16506,6 @@ Signed-off-by: Yangbo Lu + OP_ALG_AAI_CTR_MOD128); + const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686")); + -+ memcpy(ctx->key, key, keylen); +#ifdef DEBUG + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); @@ -16564,59 +16528,39 @@ Signed-off-by: Yangbo Lu + keylen -= CTR_RFC3686_NONCE_SIZE; + } + -+ ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE); -+ if (dma_mapping_error(dev, ctx->key_dma)) { -+ dev_err(dev, "unable to map key i/o memory\n"); -+ return -ENOMEM; -+ } + ctx->cdata.keylen = keylen; -+ ctx->cdata.key_virt = ctx->key; ++ ctx->cdata.key_virt = key; + ctx->cdata.key_inline = true; + + /* ablkcipher_encrypt shared descriptor */ + flc = &ctx->flc[ENCRYPT]; + desc = flc->sh_desc; -+ + cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, + is_rfc3686, ctx1_iv_off); -+ + flc->flc[1] = desc_len(desc); /* SDL */ -+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) + -+ desc_bytes(desc), DMA_TO_DEVICE); -+ if (dma_mapping_error(dev, flc->flc_dma)) { -+ dev_err(dev, "unable to map shared descriptor\n"); -+ return -ENOMEM; -+ } ++ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], ++ sizeof(flc->flc) + desc_bytes(desc), ++ DMA_BIDIRECTIONAL); + + /* ablkcipher_decrypt shared descriptor */ + flc = &ctx->flc[DECRYPT]; + desc = flc->sh_desc; -+ + cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, + is_rfc3686, ctx1_iv_off); -+ + flc->flc[1] = desc_len(desc); /* SDL */ -+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) + -+ desc_bytes(desc), DMA_TO_DEVICE); -+ if (dma_mapping_error(dev, flc->flc_dma)) { -+ dev_err(dev, "unable to map shared descriptor\n"); -+ return -ENOMEM; -+ } ++ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], ++ sizeof(flc->flc) + desc_bytes(desc), ++ DMA_BIDIRECTIONAL); + + /* ablkcipher_givencrypt shared descriptor */ + flc = &ctx->flc[GIVENCRYPT]; + desc = flc->sh_desc; -+ + cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, + ivsize, is_rfc3686, ctx1_iv_off); -+ + flc->flc[1] = desc_len(desc); /* SDL */ -+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) + -+ desc_bytes(desc), DMA_TO_DEVICE); -+ if (dma_mapping_error(dev, flc->flc_dma)) { -+ dev_err(dev, "unable to map shared descriptor\n"); -+ return -ENOMEM; -+ } ++ dma_sync_single_for_device(dev, ctx->flc_dma[GIVENCRYPT], ++ sizeof(flc->flc) + desc_bytes(desc), ++ DMA_BIDIRECTIONAL); + + return 0; +} @@ -16636,42 +16580,27 @@ Signed-off-by: Yangbo Lu + return -EINVAL; + } + -+ memcpy(ctx->key, key, keylen); -+ ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE); -+ if (dma_mapping_error(dev, ctx->key_dma)) { -+ dev_err(dev, "unable to map key i/o memory\n"); -+ return -ENOMEM; -+ } + ctx->cdata.keylen = keylen; -+ ctx->cdata.key_virt = ctx->key; ++ ctx->cdata.key_virt = key; + ctx->cdata.key_inline = true; + + /* xts_ablkcipher_encrypt shared descriptor */ + flc = &ctx->flc[ENCRYPT]; + desc = flc->sh_desc; + cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata); -+ + flc->flc[1] = desc_len(desc); /* SDL */ -+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) + -+ desc_bytes(desc), DMA_TO_DEVICE); -+ if (dma_mapping_error(dev, flc->flc_dma)) { -+ dev_err(dev, "unable to map shared descriptor\n"); -+ return -ENOMEM; -+ } ++ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], ++ sizeof(flc->flc) + desc_bytes(desc), ++ DMA_BIDIRECTIONAL); + + /* xts_ablkcipher_decrypt shared descriptor */ + flc = &ctx->flc[DECRYPT]; + desc = flc->sh_desc; -+ + cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata); -+ + flc->flc[1] = desc_len(desc); /* SDL */ -+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) + -+ desc_bytes(desc), DMA_TO_DEVICE); -+ if (dma_mapping_error(dev, flc->flc_dma)) { -+ dev_err(dev, "unable to map shared descriptor\n"); -+ return -ENOMEM; -+ } ++ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], ++ sizeof(flc->flc) + desc_bytes(desc), ++ DMA_BIDIRECTIONAL); + + return 0; +} @@ -17084,6 +17013,7 @@ Signed-off-by: Yangbo Lu + return PTR_ERR(edesc); + + caam_req->flc = &ctx->flc[ENCRYPT]; ++ caam_req->flc_dma = ctx->flc_dma[ENCRYPT]; + caam_req->op_type = ENCRYPT; + caam_req->cbk = aead_encrypt_done; + caam_req->ctx = &req->base; @@ -17112,6 +17042,7 @@ Signed-off-by: Yangbo Lu + return PTR_ERR(edesc); + + caam_req->flc = &ctx->flc[DECRYPT]; ++ caam_req->flc_dma = ctx->flc_dma[DECRYPT]; + caam_req->op_type = DECRYPT; + caam_req->cbk = aead_decrypt_done; + caam_req->ctx = &req->base; @@ -17197,6 +17128,7 @@ Signed-off-by: Yangbo Lu + return PTR_ERR(edesc); + + caam_req->flc = &ctx->flc[ENCRYPT]; ++ caam_req->flc_dma = ctx->flc_dma[ENCRYPT]; + caam_req->op_type = ENCRYPT; + caam_req->cbk = tls_encrypt_done; + caam_req->ctx = &req->base; @@ -17225,6 +17157,7 @@ Signed-off-by: Yangbo Lu + return PTR_ERR(edesc); + + caam_req->flc = &ctx->flc[DECRYPT]; ++ caam_req->flc_dma = ctx->flc_dma[DECRYPT]; + caam_req->op_type = DECRYPT; + caam_req->cbk = tls_decrypt_done; + caam_req->ctx = &req->base; @@ -17311,6 +17244,7 @@ Signed-off-by: Yangbo Lu + return PTR_ERR(edesc); + + caam_req->flc = &ctx->flc[ENCRYPT]; ++ caam_req->flc_dma = ctx->flc_dma[ENCRYPT]; + caam_req->op_type = ENCRYPT; + caam_req->cbk = ablkcipher_done; + caam_req->ctx = &req->base; @@ -17340,6 +17274,7 @@ Signed-off-by: Yangbo Lu + return PTR_ERR(edesc); + + caam_req->flc = &ctx->flc[GIVENCRYPT]; ++ caam_req->flc_dma = ctx->flc_dma[GIVENCRYPT]; + caam_req->op_type = GIVENCRYPT; + caam_req->cbk = ablkcipher_done; + caam_req->ctx = &req->base; @@ -17368,6 +17303,7 @@ Signed-off-by: Yangbo Lu + return PTR_ERR(edesc); + + caam_req->flc = &ctx->flc[DECRYPT]; ++ caam_req->flc_dma = ctx->flc_dma[DECRYPT]; + caam_req->op_type = DECRYPT; + caam_req->cbk = ablkcipher_done; + caam_req->ctx = &req->base; @@ -17394,6 +17330,8 @@ Signed-off-by: Yangbo Lu + struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg), + crypto_alg); + struct caam_ctx *ctx = crypto_tfm_ctx(tfm); ++ dma_addr_t dma_addr; ++ int i; + + /* copy descriptor header template value */ + ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | @@ -17403,6 +17341,19 @@ Signed-off-by: Yangbo Lu + + ctx->dev = caam_alg->caam.dev; + ++ dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, ++ offsetof(struct caam_ctx, flc_dma), ++ DMA_BIDIRECTIONAL, ++ DMA_ATTR_SKIP_CPU_SYNC); ++ if (dma_mapping_error(ctx->dev, dma_addr)) { ++ dev_err(ctx->dev, "unable to map key, shared descriptors\n"); ++ return -ENOMEM; ++ } ++ ++ for (i = 0; i < NUM_OP; i++) ++ ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]); ++ ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]); ++ + return 0; +} + @@ -17423,21 +17374,9 @@ Signed-off-by: Yangbo Lu + +static void caam_exit_common(struct caam_ctx *ctx) +{ -+ int i; -+ -+ for (i = 0; i < NUM_OP; i++) { -+ if (!ctx->flc[i].flc_dma) -+ continue; -+ dma_unmap_single(ctx->dev, ctx->flc[i].flc_dma, -+ sizeof(ctx->flc[i].flc) + -+ desc_bytes(ctx->flc[i].sh_desc), -+ DMA_TO_DEVICE); -+ } -+ -+ if (ctx->key_dma) -+ dma_unmap_single(ctx->dev, ctx->key_dma, -+ ctx->cdata.keylen + ctx->adata.keylen_pad, -+ DMA_TO_DEVICE); ++ dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], ++ offsetof(struct caam_ctx, flc_dma), ++ DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC); +} + +static void caam_cra_exit(struct crypto_tfm *tfm) @@ -18806,127 +18745,1811 @@ Signed-off-by: Yangbo Lu + alg->exit = caam_cra_exit_aead; +} + -+static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx) -+{ -+ struct dpaa2_caam_priv_per_cpu *ppriv; -+ -+ ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx); -+ napi_schedule_irqoff(&ppriv->napi); -+} -+ -+static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv) -+{ -+ struct device *dev = priv->dev; -+ struct dpaa2_io_notification_ctx *nctx; -+ struct dpaa2_caam_priv_per_cpu *ppriv; -+ int err, i = 0, cpu; ++/* max hash key is max split key size */ ++#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2) + -+ for_each_online_cpu(cpu) { -+ ppriv = per_cpu_ptr(priv->ppriv, cpu); -+ ppriv->priv = priv; -+ nctx = &ppriv->nctx; -+ nctx->is_cdan = 0; -+ nctx->id = ppriv->rsp_fqid; -+ nctx->desired_cpu = cpu; -+ nctx->cb = dpaa2_caam_fqdan_cb; ++#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE ++#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE + -+ /* Register notification callbacks */ -+ err = dpaa2_io_service_register(NULL, nctx); -+ if (unlikely(err)) { -+ dev_err(dev, "notification register failed\n"); -+ nctx->cb = NULL; -+ goto err; -+ } ++#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \ ++ CAAM_MAX_HASH_KEY_SIZE) ++#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ) + -+ ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE, -+ dev); -+ if (unlikely(!ppriv->store)) { -+ dev_err(dev, "dpaa2_io_store_create() failed\n"); -+ goto err; -+ } ++/* caam context sizes for hashes: running digest + 8 */ ++#define HASH_MSG_LEN 8 ++#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE) + -+ if (++i == priv->num_pairs) -+ break; -+ } ++enum hash_optype { ++ UPDATE = 0, ++ UPDATE_FIRST, ++ FINALIZE, ++ DIGEST, ++ HASH_NUM_OP ++}; + -+ return 0; ++/** ++ * caam_hash_ctx - ahash per-session context ++ * @flc: Flow Contexts array ++ * @flc_dma: I/O virtual addresses of the Flow Contexts ++ * @key: virtual address of the authentication key ++ * @dev: dpseci device ++ * @ctx_len: size of Context Register ++ * @adata: hashing algorithm details ++ */ ++struct caam_hash_ctx { ++ struct caam_flc flc[HASH_NUM_OP]; ++ dma_addr_t flc_dma[HASH_NUM_OP]; ++ u8 key[CAAM_MAX_HASH_KEY_SIZE]; ++ struct device *dev; ++ int ctx_len; ++ struct alginfo adata; ++}; + -+err: -+ for_each_online_cpu(cpu) { -+ ppriv = per_cpu_ptr(priv->ppriv, cpu); -+ if (!ppriv->nctx.cb) -+ break; -+ dpaa2_io_service_deregister(NULL, &ppriv->nctx); -+ } ++/* ahash state */ ++struct caam_hash_state { ++ struct caam_request caam_req; ++ dma_addr_t buf_dma; ++ dma_addr_t ctx_dma; ++ u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; ++ int buflen_0; ++ u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; ++ int buflen_1; ++ u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned; ++ int (*update)(struct ahash_request *req); ++ int (*final)(struct ahash_request *req); ++ int (*finup)(struct ahash_request *req); ++ int current_buf; ++}; + -+ for_each_online_cpu(cpu) { -+ ppriv = per_cpu_ptr(priv->ppriv, cpu); -+ if (!ppriv->store) -+ break; -+ dpaa2_io_store_destroy(ppriv->store); -+ } ++struct caam_export_state { ++ u8 buf[CAAM_MAX_HASH_BLOCK_SIZE]; ++ u8 caam_ctx[MAX_CTX_LEN]; ++ int buflen; ++ int (*update)(struct ahash_request *req); ++ int (*final)(struct ahash_request *req); ++ int (*finup)(struct ahash_request *req); ++}; + -+ return err; ++static inline void switch_buf(struct caam_hash_state *state) ++{ ++ state->current_buf ^= 1; +} + -+static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv) ++static inline u8 *current_buf(struct caam_hash_state *state) +{ -+ struct dpaa2_caam_priv_per_cpu *ppriv; -+ int i = 0, cpu; -+ -+ for_each_online_cpu(cpu) { -+ ppriv = per_cpu_ptr(priv->ppriv, cpu); -+ dpaa2_io_service_deregister(NULL, &ppriv->nctx); -+ dpaa2_io_store_destroy(ppriv->store); ++ return state->current_buf ? state->buf_1 : state->buf_0; ++} + -+ if (++i == priv->num_pairs) -+ return; -+ } ++static inline u8 *alt_buf(struct caam_hash_state *state) ++{ ++ return state->current_buf ? state->buf_0 : state->buf_1; +} + -+static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv) ++static inline int *current_buflen(struct caam_hash_state *state) +{ -+ struct dpseci_rx_queue_cfg rx_queue_cfg; -+ struct device *dev = priv->dev; -+ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev); -+ struct dpaa2_caam_priv_per_cpu *ppriv; -+ int err = 0, i = 0, cpu; ++ return state->current_buf ? &state->buflen_1 : &state->buflen_0; ++} + -+ /* Configure Rx queues */ -+ for_each_online_cpu(cpu) { -+ ppriv = per_cpu_ptr(priv->ppriv, cpu); ++static inline int *alt_buflen(struct caam_hash_state *state) ++{ ++ return state->current_buf ? &state->buflen_0 : &state->buflen_1; ++} + -+ rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST | -+ DPSECI_QUEUE_OPT_USER_CTX; -+ rx_queue_cfg.order_preservation_en = 0; -+ rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO; -+ rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id; -+ /* -+ * Rx priority (WQ) doesn't really matter, since we use -+ * pull mode, i.e. volatile dequeues from specific FQs -+ */ -+ rx_queue_cfg.dest_cfg.priority = 0; -+ rx_queue_cfg.user_ctx = ppriv->nctx.qman64; ++/* Map current buffer in state (if length > 0) and put it in link table */ ++static inline int buf_map_to_qm_sg(struct device *dev, ++ struct dpaa2_sg_entry *qm_sg, ++ struct caam_hash_state *state) ++{ ++ int buflen = *current_buflen(state); + -+ err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i, -+ &rx_queue_cfg); -+ if (err) { -+ dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n", -+ err); -+ return err; -+ } ++ if (!buflen) ++ return 0; + -+ if (++i == priv->num_pairs) -+ break; ++ state->buf_dma = dma_map_single(dev, current_buf(state), buflen, ++ DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, state->buf_dma)) { ++ dev_err(dev, "unable to map buf\n"); ++ state->buf_dma = 0; ++ return -ENOMEM; + } + -+ return err; ++ dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0); ++ ++ return 0; +} + -+static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv) ++/* Map state->caam_ctx, and add it to link table */ ++static inline int ctx_map_to_qm_sg(struct device *dev, ++ struct caam_hash_state *state, int ctx_len, ++ struct dpaa2_sg_entry *qm_sg, u32 flag) +{ -+ struct device *dev = priv->dev; -+ ++ state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag); ++ if (dma_mapping_error(dev, state->ctx_dma)) { ++ dev_err(dev, "unable to map ctx\n"); ++ state->ctx_dma = 0; ++ return -ENOMEM; ++ } ++ ++ dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0); ++ ++ return 0; ++} ++ ++static int ahash_set_sh_desc(struct crypto_ahash *ahash) ++{ ++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); ++ int digestsize = crypto_ahash_digestsize(ahash); ++ struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev); ++ struct caam_flc *flc; ++ u32 *desc; ++ ++ ctx->adata.key_virt = ctx->key; ++ ctx->adata.key_inline = true; ++ ++ /* ahash_update shared descriptor */ ++ flc = &ctx->flc[UPDATE]; ++ desc = flc->sh_desc; ++ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len, ++ ctx->ctx_len, true, priv->sec_attr.era); ++ flc->flc[1] = desc_len(desc); /* SDL */ ++ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE], ++ desc_bytes(desc), DMA_BIDIRECTIONAL); ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, ++ "ahash update shdesc@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ++#endif ++ ++ /* ahash_update_first shared descriptor */ ++ flc = &ctx->flc[UPDATE_FIRST]; ++ desc = flc->sh_desc; ++ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, ++ ctx->ctx_len, false, priv->sec_attr.era); ++ flc->flc[1] = desc_len(desc); /* SDL */ ++ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST], ++ desc_bytes(desc), DMA_BIDIRECTIONAL); ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, ++ "ahash update first shdesc@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ++#endif ++ ++ /* ahash_final shared descriptor */ ++ flc = &ctx->flc[FINALIZE]; ++ desc = flc->sh_desc; ++ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize, ++ ctx->ctx_len, true, priv->sec_attr.era); ++ flc->flc[1] = desc_len(desc); /* SDL */ ++ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE], ++ desc_bytes(desc), DMA_BIDIRECTIONAL); ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, ++ "ahash final shdesc@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ++#endif ++ ++ /* ahash_digest shared descriptor */ ++ flc = &ctx->flc[DIGEST]; ++ desc = flc->sh_desc; ++ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize, ++ ctx->ctx_len, false, priv->sec_attr.era); ++ flc->flc[1] = desc_len(desc); /* SDL */ ++ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST], ++ desc_bytes(desc), DMA_BIDIRECTIONAL); ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, ++ "ahash digest shdesc@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ++#endif ++ ++ return 0; ++} ++ ++/* Digest hash size if it is too large */ ++static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, ++ u32 *keylen, u8 *key_out, u32 digestsize) ++{ ++ struct caam_request *req_ctx; ++ u32 *desc; ++ struct split_key_sh_result result; ++ dma_addr_t src_dma, dst_dma; ++ struct caam_flc *flc; ++ dma_addr_t flc_dma; ++ int ret = -ENOMEM; ++ struct dpaa2_fl_entry *in_fle, *out_fle; ++ ++ req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA); ++ if (!req_ctx) ++ return -ENOMEM; ++ ++ in_fle = &req_ctx->fd_flt[1]; ++ out_fle = &req_ctx->fd_flt[0]; ++ ++ flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA); ++ if (!flc) ++ goto err_flc; ++ ++ src_dma = dma_map_single(ctx->dev, (void *)key_in, *keylen, ++ DMA_TO_DEVICE); ++ if (dma_mapping_error(ctx->dev, src_dma)) { ++ dev_err(ctx->dev, "unable to map key input memory\n"); ++ goto err_src_dma; ++ } ++ dst_dma = dma_map_single(ctx->dev, (void *)key_out, digestsize, ++ DMA_FROM_DEVICE); ++ if (dma_mapping_error(ctx->dev, dst_dma)) { ++ dev_err(ctx->dev, "unable to map key output memory\n"); ++ goto err_dst_dma; ++ } ++ ++ desc = flc->sh_desc; ++ ++ init_sh_desc(desc, 0); ++ ++ /* descriptor to perform unkeyed hash on key_in */ ++ append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT | ++ OP_ALG_AS_INITFINAL); ++ append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 | ++ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG); ++ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | ++ LDST_SRCDST_BYTE_CONTEXT); ++ ++ flc->flc[1] = desc_len(desc); /* SDL */ ++ flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) + ++ desc_bytes(desc), DMA_TO_DEVICE); ++ if (dma_mapping_error(ctx->dev, flc_dma)) { ++ dev_err(ctx->dev, "unable to map shared descriptor\n"); ++ goto err_flc_dma; ++ } ++ ++ dpaa2_fl_set_final(in_fle, true); ++ dpaa2_fl_set_format(in_fle, dpaa2_fl_single); ++ dpaa2_fl_set_addr(in_fle, src_dma); ++ dpaa2_fl_set_len(in_fle, *keylen); ++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single); ++ dpaa2_fl_set_addr(out_fle, dst_dma); ++ dpaa2_fl_set_len(out_fle, digestsize); ++ ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, "key_in@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1); ++ print_hex_dump(KERN_ERR, "shdesc@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ++#endif ++ ++ result.err = 0; ++ init_completion(&result.completion); ++ result.dev = ctx->dev; ++ ++ req_ctx->flc = flc; ++ req_ctx->flc_dma = flc_dma; ++ req_ctx->cbk = split_key_sh_done; ++ req_ctx->ctx = &result; ++ ++ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); ++ if (ret == -EINPROGRESS) { ++ /* in progress */ ++ wait_for_completion(&result.completion); ++ ret = result.err; ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, ++ "digested key@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, key_in, digestsize, ++ 1); ++#endif ++ } ++ ++ dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc), ++ DMA_TO_DEVICE); ++err_flc_dma: ++ dma_unmap_single(ctx->dev, dst_dma, digestsize, DMA_FROM_DEVICE); ++err_dst_dma: ++ dma_unmap_single(ctx->dev, src_dma, *keylen, DMA_TO_DEVICE); ++err_src_dma: ++ kfree(flc); ++err_flc: ++ kfree(req_ctx); ++ ++ *keylen = digestsize; ++ ++ return ret; ++} ++ ++static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key, ++ unsigned int keylen) ++{ ++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); ++ unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base); ++ unsigned int digestsize = crypto_ahash_digestsize(ahash); ++ int ret; ++ u8 *hashed_key = NULL; ++ ++#ifdef DEBUG ++ dev_err(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize); ++#endif ++ ++ if (keylen > blocksize) { ++ hashed_key = kmalloc_array(digestsize, sizeof(*hashed_key), ++ GFP_KERNEL | GFP_DMA); ++ if (!hashed_key) ++ return -ENOMEM; ++ ret = hash_digest_key(ctx, key, &keylen, hashed_key, ++ digestsize); ++ if (ret) ++ goto bad_free_key; ++ key = hashed_key; ++ } ++ ++ ctx->adata.keylen = keylen; ++ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & ++ OP_ALG_ALGSEL_MASK); ++ if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE) ++ goto bad_free_key; ++ ++ memcpy(ctx->key, key, keylen); ++ ++ kfree(hashed_key); ++ return ahash_set_sh_desc(ahash); ++bad_free_key: ++ kfree(hashed_key); ++ crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); ++ return -EINVAL; ++} ++ ++static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc, ++ struct ahash_request *req, int dst_len) ++{ ++ struct caam_hash_state *state = ahash_request_ctx(req); ++ ++ if (edesc->src_nents) ++ dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); ++ if (edesc->dst_dma) ++ dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE); ++ ++ if (edesc->qm_sg_bytes) ++ dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes, ++ DMA_TO_DEVICE); ++ ++ if (state->buf_dma) { ++ dma_unmap_single(dev, state->buf_dma, *current_buflen(state), ++ DMA_TO_DEVICE); ++ state->buf_dma = 0; ++ } ++} ++ ++static inline void ahash_unmap_ctx(struct device *dev, ++ struct ahash_edesc *edesc, ++ struct ahash_request *req, int dst_len, ++ u32 flag) ++{ ++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); ++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); ++ struct caam_hash_state *state = ahash_request_ctx(req); ++ ++ if (state->ctx_dma) { ++ dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag); ++ state->ctx_dma = 0; ++ } ++ ahash_unmap(dev, edesc, req, dst_len); ++} ++ ++static void ahash_done(void *cbk_ctx, u32 status) ++{ ++ struct crypto_async_request *areq = cbk_ctx; ++ struct ahash_request *req = ahash_request_cast(areq); ++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); ++ struct caam_hash_state *state = ahash_request_ctx(req); ++ struct ahash_edesc *edesc = state->caam_req.edesc; ++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); ++ int digestsize = crypto_ahash_digestsize(ahash); ++ int ecode = 0; ++ ++#ifdef DEBUG ++ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); ++#endif ++ ++ if (unlikely(status)) { ++ caam_qi2_strstatus(ctx->dev, status); ++ ecode = -EIO; ++ } ++ ++ ahash_unmap(ctx->dev, edesc, req, digestsize); ++ qi_cache_free(edesc); ++ ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, ++ ctx->ctx_len, 1); ++ if (req->result) ++ print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, req->result, ++ digestsize, 1); ++#endif ++ ++ req->base.complete(&req->base, ecode); ++} ++ ++static void ahash_done_bi(void *cbk_ctx, u32 status) ++{ ++ struct crypto_async_request *areq = cbk_ctx; ++ struct ahash_request *req = ahash_request_cast(areq); ++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); ++ struct caam_hash_state *state = ahash_request_ctx(req); ++ struct ahash_edesc *edesc = state->caam_req.edesc; ++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); ++ int ecode = 0; ++#ifdef DEBUG ++ int digestsize = crypto_ahash_digestsize(ahash); ++ ++ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); ++#endif ++ ++ if (unlikely(status)) { ++ caam_qi2_strstatus(ctx->dev, status); ++ ecode = -EIO; ++ } ++ ++ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); ++ switch_buf(state); ++ qi_cache_free(edesc); ++ ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, ++ ctx->ctx_len, 1); ++ if (req->result) ++ print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, req->result, ++ digestsize, 1); ++#endif ++ ++ req->base.complete(&req->base, ecode); ++} ++ ++static void ahash_done_ctx_src(void *cbk_ctx, u32 status) ++{ ++ struct crypto_async_request *areq = cbk_ctx; ++ struct ahash_request *req = ahash_request_cast(areq); ++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); ++ struct caam_hash_state *state = ahash_request_ctx(req); ++ struct ahash_edesc *edesc = state->caam_req.edesc; ++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); ++ int digestsize = crypto_ahash_digestsize(ahash); ++ int ecode = 0; ++ ++#ifdef DEBUG ++ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); ++#endif ++ ++ if (unlikely(status)) { ++ caam_qi2_strstatus(ctx->dev, status); ++ ecode = -EIO; ++ } ++ ++ ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE); ++ qi_cache_free(edesc); ++ ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, ++ ctx->ctx_len, 1); ++ if (req->result) ++ print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, req->result, ++ digestsize, 1); ++#endif ++ ++ req->base.complete(&req->base, ecode); ++} ++ ++static void ahash_done_ctx_dst(void *cbk_ctx, u32 status) ++{ ++ struct crypto_async_request *areq = cbk_ctx; ++ struct ahash_request *req = ahash_request_cast(areq); ++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); ++ struct caam_hash_state *state = ahash_request_ctx(req); ++ struct ahash_edesc *edesc = state->caam_req.edesc; ++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); ++ int ecode = 0; ++#ifdef DEBUG ++ int digestsize = crypto_ahash_digestsize(ahash); ++ ++ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); ++#endif ++ ++ if (unlikely(status)) { ++ caam_qi2_strstatus(ctx->dev, status); ++ ecode = -EIO; ++ } ++ ++ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); ++ switch_buf(state); ++ qi_cache_free(edesc); ++ ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, ++ ctx->ctx_len, 1); ++ if (req->result) ++ print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, req->result, ++ digestsize, 1); ++#endif ++ ++ req->base.complete(&req->base, ecode); ++} ++ ++static int ahash_update_ctx(struct ahash_request *req) ++{ ++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); ++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); ++ struct caam_hash_state *state = ahash_request_ctx(req); ++ struct caam_request *req_ctx = &state->caam_req; ++ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; ++ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; ++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? ++ GFP_KERNEL : GFP_ATOMIC; ++ u8 *buf = current_buf(state); ++ int *buflen = current_buflen(state); ++ u8 *next_buf = alt_buf(state); ++ int *next_buflen = alt_buflen(state), last_buflen; ++ int in_len = *buflen + req->nbytes, to_hash; ++ int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index; ++ struct ahash_edesc *edesc; ++ int ret = 0; ++ ++ last_buflen = *next_buflen; ++ *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); ++ to_hash = in_len - *next_buflen; ++ ++ if (to_hash) { ++ struct dpaa2_sg_entry *sg_table; ++ ++ src_nents = sg_nents_for_len(req->src, ++ req->nbytes - (*next_buflen)); ++ if (src_nents < 0) { ++ dev_err(ctx->dev, "Invalid number of src SG.\n"); ++ return src_nents; ++ } ++ ++ if (src_nents) { ++ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, ++ DMA_TO_DEVICE); ++ if (!mapped_nents) { ++ dev_err(ctx->dev, "unable to DMA map source\n"); ++ return -ENOMEM; ++ } ++ } else { ++ mapped_nents = 0; ++ } ++ ++ /* allocate space for base edesc and link tables */ ++ edesc = qi_cache_zalloc(GFP_DMA | flags); ++ if (!edesc) { ++ dma_unmap_sg(ctx->dev, req->src, src_nents, ++ DMA_TO_DEVICE); ++ return -ENOMEM; ++ } ++ ++ edesc->src_nents = src_nents; ++ qm_sg_src_index = 1 + (*buflen ? 1 : 0); ++ qm_sg_bytes = (qm_sg_src_index + mapped_nents) * ++ sizeof(*sg_table); ++ sg_table = &edesc->sgt[0]; ++ ++ ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, ++ DMA_BIDIRECTIONAL); ++ if (ret) ++ goto unmap_ctx; ++ ++ ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state); ++ if (ret) ++ goto unmap_ctx; ++ ++ if (mapped_nents) { ++ sg_to_qm_sg_last(req->src, mapped_nents, ++ sg_table + qm_sg_src_index, 0); ++ if (*next_buflen) ++ scatterwalk_map_and_copy(next_buf, req->src, ++ to_hash - *buflen, ++ *next_buflen, 0); ++ } else { ++ dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, ++ true); ++ } ++ ++ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, ++ qm_sg_bytes, DMA_TO_DEVICE); ++ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { ++ dev_err(ctx->dev, "unable to map S/G table\n"); ++ ret = -ENOMEM; ++ goto unmap_ctx; ++ } ++ edesc->qm_sg_bytes = qm_sg_bytes; ++ ++ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); ++ dpaa2_fl_set_final(in_fle, true); ++ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); ++ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); ++ dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash); ++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single); ++ dpaa2_fl_set_addr(out_fle, state->ctx_dma); ++ dpaa2_fl_set_len(out_fle, ctx->ctx_len); ++ ++ req_ctx->flc = &ctx->flc[UPDATE]; ++ req_ctx->flc_dma = ctx->flc_dma[UPDATE]; ++ req_ctx->cbk = ahash_done_bi; ++ req_ctx->ctx = &req->base; ++ req_ctx->edesc = edesc; ++ ++ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); ++ if (ret != -EINPROGRESS && ++ !(ret == -EBUSY && ++ req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) ++ goto unmap_ctx; ++ } else if (*next_buflen) { ++ scatterwalk_map_and_copy(buf + *buflen, req->src, 0, ++ req->nbytes, 0); ++ *buflen = *next_buflen; ++ *next_buflen = last_buflen; ++ } ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, "buf@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); ++ print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, next_buf, ++ *next_buflen, 1); ++#endif ++ ++ return ret; ++unmap_ctx: ++ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); ++ qi_cache_free(edesc); ++ return ret; ++} ++ ++static int ahash_final_ctx(struct ahash_request *req) ++{ ++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); ++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); ++ struct caam_hash_state *state = ahash_request_ctx(req); ++ struct caam_request *req_ctx = &state->caam_req; ++ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; ++ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; ++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? ++ GFP_KERNEL : GFP_ATOMIC; ++ int buflen = *current_buflen(state); ++ int qm_sg_bytes, qm_sg_src_index; ++ int digestsize = crypto_ahash_digestsize(ahash); ++ struct ahash_edesc *edesc; ++ struct dpaa2_sg_entry *sg_table; ++ int ret; ++ ++ /* allocate space for base edesc and link tables */ ++ edesc = qi_cache_zalloc(GFP_DMA | flags); ++ if (!edesc) ++ return -ENOMEM; ++ ++ qm_sg_src_index = 1 + (buflen ? 1 : 0); ++ qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table); ++ sg_table = &edesc->sgt[0]; ++ ++ ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, ++ DMA_TO_DEVICE); ++ if (ret) ++ goto unmap_ctx; ++ ++ ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state); ++ if (ret) ++ goto unmap_ctx; ++ ++ dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true); ++ ++ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, ++ DMA_TO_DEVICE); ++ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { ++ dev_err(ctx->dev, "unable to map S/G table\n"); ++ ret = -ENOMEM; ++ goto unmap_ctx; ++ } ++ edesc->qm_sg_bytes = qm_sg_bytes; ++ ++ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize, ++ DMA_FROM_DEVICE); ++ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) { ++ dev_err(ctx->dev, "unable to map dst\n"); ++ edesc->dst_dma = 0; ++ ret = -ENOMEM; ++ goto unmap_ctx; ++ } ++ ++ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); ++ dpaa2_fl_set_final(in_fle, true); ++ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); ++ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); ++ dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen); ++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single); ++ dpaa2_fl_set_addr(out_fle, edesc->dst_dma); ++ dpaa2_fl_set_len(out_fle, digestsize); ++ ++ req_ctx->flc = &ctx->flc[FINALIZE]; ++ req_ctx->flc_dma = ctx->flc_dma[FINALIZE]; ++ req_ctx->cbk = ahash_done_ctx_src; ++ req_ctx->ctx = &req->base; ++ req_ctx->edesc = edesc; ++ ++ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); ++ if (ret == -EINPROGRESS || ++ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) ++ return ret; ++ ++unmap_ctx: ++ ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE); ++ qi_cache_free(edesc); ++ return ret; ++} ++ ++static int ahash_finup_ctx(struct ahash_request *req) ++{ ++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); ++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); ++ struct caam_hash_state *state = ahash_request_ctx(req); ++ struct caam_request *req_ctx = &state->caam_req; ++ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; ++ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; ++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? ++ GFP_KERNEL : GFP_ATOMIC; ++ int buflen = *current_buflen(state); ++ int qm_sg_bytes, qm_sg_src_index; ++ int src_nents, mapped_nents; ++ int digestsize = crypto_ahash_digestsize(ahash); ++ struct ahash_edesc *edesc; ++ struct dpaa2_sg_entry *sg_table; ++ int ret; ++ ++ src_nents = sg_nents_for_len(req->src, req->nbytes); ++ if (src_nents < 0) { ++ dev_err(ctx->dev, "Invalid number of src SG.\n"); ++ return src_nents; ++ } ++ ++ if (src_nents) { ++ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, ++ DMA_TO_DEVICE); ++ if (!mapped_nents) { ++ dev_err(ctx->dev, "unable to DMA map source\n"); ++ return -ENOMEM; ++ } ++ } else { ++ mapped_nents = 0; ++ } ++ ++ /* allocate space for base edesc and link tables */ ++ edesc = qi_cache_zalloc(GFP_DMA | flags); ++ if (!edesc) { ++ dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); ++ return -ENOMEM; ++ } ++ ++ edesc->src_nents = src_nents; ++ qm_sg_src_index = 1 + (buflen ? 1 : 0); ++ qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table); ++ sg_table = &edesc->sgt[0]; ++ ++ ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, ++ DMA_TO_DEVICE); ++ if (ret) ++ goto unmap_ctx; ++ ++ ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state); ++ if (ret) ++ goto unmap_ctx; ++ ++ sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0); ++ ++ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, ++ DMA_TO_DEVICE); ++ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { ++ dev_err(ctx->dev, "unable to map S/G table\n"); ++ ret = -ENOMEM; ++ goto unmap_ctx; ++ } ++ edesc->qm_sg_bytes = qm_sg_bytes; ++ ++ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize, ++ DMA_FROM_DEVICE); ++ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) { ++ dev_err(ctx->dev, "unable to map dst\n"); ++ edesc->dst_dma = 0; ++ ret = -ENOMEM; ++ goto unmap_ctx; ++ } ++ ++ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); ++ dpaa2_fl_set_final(in_fle, true); ++ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); ++ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); ++ dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes); ++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single); ++ dpaa2_fl_set_addr(out_fle, edesc->dst_dma); ++ dpaa2_fl_set_len(out_fle, digestsize); ++ ++ req_ctx->flc = &ctx->flc[FINALIZE]; ++ req_ctx->flc_dma = ctx->flc_dma[FINALIZE]; ++ req_ctx->cbk = ahash_done_ctx_src; ++ req_ctx->ctx = &req->base; ++ req_ctx->edesc = edesc; ++ ++ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); ++ if (ret == -EINPROGRESS || ++ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) ++ return ret; ++ ++unmap_ctx: ++ ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE); ++ qi_cache_free(edesc); ++ return ret; ++} ++ ++static int ahash_digest(struct ahash_request *req) ++{ ++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); ++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); ++ struct caam_hash_state *state = ahash_request_ctx(req); ++ struct caam_request *req_ctx = &state->caam_req; ++ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; ++ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; ++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? ++ GFP_KERNEL : GFP_ATOMIC; ++ int digestsize = crypto_ahash_digestsize(ahash); ++ int src_nents, mapped_nents; ++ struct ahash_edesc *edesc; ++ int ret = -ENOMEM; ++ ++ state->buf_dma = 0; ++ ++ src_nents = sg_nents_for_len(req->src, req->nbytes); ++ if (src_nents < 0) { ++ dev_err(ctx->dev, "Invalid number of src SG.\n"); ++ return src_nents; ++ } ++ ++ if (src_nents) { ++ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, ++ DMA_TO_DEVICE); ++ if (!mapped_nents) { ++ dev_err(ctx->dev, "unable to map source for DMA\n"); ++ return ret; ++ } ++ } else { ++ mapped_nents = 0; ++ } ++ ++ /* allocate space for base edesc and link tables */ ++ edesc = qi_cache_zalloc(GFP_DMA | flags); ++ if (!edesc) { ++ dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); ++ return ret; ++ } ++ ++ edesc->src_nents = src_nents; ++ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); ++ ++ if (mapped_nents > 1) { ++ int qm_sg_bytes; ++ struct dpaa2_sg_entry *sg_table = &edesc->sgt[0]; ++ ++ qm_sg_bytes = mapped_nents * sizeof(*sg_table); ++ sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0); ++ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, ++ qm_sg_bytes, DMA_TO_DEVICE); ++ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { ++ dev_err(ctx->dev, "unable to map S/G table\n"); ++ goto unmap; ++ } ++ edesc->qm_sg_bytes = qm_sg_bytes; ++ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); ++ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); ++ } else { ++ dpaa2_fl_set_format(in_fle, dpaa2_fl_single); ++ dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src)); ++ } ++ ++ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize, ++ DMA_FROM_DEVICE); ++ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) { ++ dev_err(ctx->dev, "unable to map dst\n"); ++ edesc->dst_dma = 0; ++ goto unmap; ++ } ++ ++ dpaa2_fl_set_final(in_fle, true); ++ dpaa2_fl_set_len(in_fle, req->nbytes); ++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single); ++ dpaa2_fl_set_addr(out_fle, edesc->dst_dma); ++ dpaa2_fl_set_len(out_fle, digestsize); ++ ++ req_ctx->flc = &ctx->flc[DIGEST]; ++ req_ctx->flc_dma = ctx->flc_dma[DIGEST]; ++ req_ctx->cbk = ahash_done; ++ req_ctx->ctx = &req->base; ++ req_ctx->edesc = edesc; ++ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); ++ if (ret == -EINPROGRESS || ++ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) ++ return ret; ++ ++unmap: ++ ahash_unmap(ctx->dev, edesc, req, digestsize); ++ qi_cache_free(edesc); ++ return ret; ++} ++ ++static int ahash_final_no_ctx(struct ahash_request *req) ++{ ++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); ++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); ++ struct caam_hash_state *state = ahash_request_ctx(req); ++ struct caam_request *req_ctx = &state->caam_req; ++ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; ++ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; ++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? ++ GFP_KERNEL : GFP_ATOMIC; ++ u8 *buf = current_buf(state); ++ int buflen = *current_buflen(state); ++ int digestsize = crypto_ahash_digestsize(ahash); ++ struct ahash_edesc *edesc; ++ int ret = -ENOMEM; ++ ++ /* allocate space for base edesc and link tables */ ++ edesc = qi_cache_zalloc(GFP_DMA | flags); ++ if (!edesc) ++ return ret; ++ ++ state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE); ++ if (dma_mapping_error(ctx->dev, state->buf_dma)) { ++ dev_err(ctx->dev, "unable to map src\n"); ++ goto unmap; ++ } ++ ++ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize, ++ DMA_FROM_DEVICE); ++ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) { ++ dev_err(ctx->dev, "unable to map dst\n"); ++ edesc->dst_dma = 0; ++ goto unmap; ++ } ++ ++ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); ++ dpaa2_fl_set_final(in_fle, true); ++ dpaa2_fl_set_format(in_fle, dpaa2_fl_single); ++ dpaa2_fl_set_addr(in_fle, state->buf_dma); ++ dpaa2_fl_set_len(in_fle, buflen); ++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single); ++ dpaa2_fl_set_addr(out_fle, edesc->dst_dma); ++ dpaa2_fl_set_len(out_fle, digestsize); ++ ++ req_ctx->flc = &ctx->flc[DIGEST]; ++ req_ctx->flc_dma = ctx->flc_dma[DIGEST]; ++ req_ctx->cbk = ahash_done; ++ req_ctx->ctx = &req->base; ++ req_ctx->edesc = edesc; ++ ++ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); ++ if (ret == -EINPROGRESS || ++ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) ++ return ret; ++ ++unmap: ++ ahash_unmap(ctx->dev, edesc, req, digestsize); ++ qi_cache_free(edesc); ++ return ret; ++} ++ ++static int ahash_update_no_ctx(struct ahash_request *req) ++{ ++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); ++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); ++ struct caam_hash_state *state = ahash_request_ctx(req); ++ struct caam_request *req_ctx = &state->caam_req; ++ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; ++ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; ++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? ++ GFP_KERNEL : GFP_ATOMIC; ++ u8 *buf = current_buf(state); ++ int *buflen = current_buflen(state); ++ u8 *next_buf = alt_buf(state); ++ int *next_buflen = alt_buflen(state); ++ int in_len = *buflen + req->nbytes, to_hash; ++ int qm_sg_bytes, src_nents, mapped_nents; ++ struct ahash_edesc *edesc; ++ int ret = 0; ++ ++ *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); ++ to_hash = in_len - *next_buflen; ++ ++ if (to_hash) { ++ struct dpaa2_sg_entry *sg_table; ++ ++ src_nents = sg_nents_for_len(req->src, ++ req->nbytes - *next_buflen); ++ if (src_nents < 0) { ++ dev_err(ctx->dev, "Invalid number of src SG.\n"); ++ return src_nents; ++ } ++ ++ if (src_nents) { ++ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, ++ DMA_TO_DEVICE); ++ if (!mapped_nents) { ++ dev_err(ctx->dev, "unable to DMA map source\n"); ++ return -ENOMEM; ++ } ++ } else { ++ mapped_nents = 0; ++ } ++ ++ /* allocate space for base edesc and link tables */ ++ edesc = qi_cache_zalloc(GFP_DMA | flags); ++ if (!edesc) { ++ dma_unmap_sg(ctx->dev, req->src, src_nents, ++ DMA_TO_DEVICE); ++ return -ENOMEM; ++ } ++ ++ edesc->src_nents = src_nents; ++ qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table); ++ sg_table = &edesc->sgt[0]; ++ ++ ret = buf_map_to_qm_sg(ctx->dev, sg_table, state); ++ if (ret) ++ goto unmap_ctx; ++ ++ sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0); ++ ++ if (*next_buflen) ++ scatterwalk_map_and_copy(next_buf, req->src, ++ to_hash - *buflen, ++ *next_buflen, 0); ++ ++ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, ++ qm_sg_bytes, DMA_TO_DEVICE); ++ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { ++ dev_err(ctx->dev, "unable to map S/G table\n"); ++ ret = -ENOMEM; ++ goto unmap_ctx; ++ } ++ edesc->qm_sg_bytes = qm_sg_bytes; ++ ++ state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, ++ ctx->ctx_len, DMA_FROM_DEVICE); ++ if (dma_mapping_error(ctx->dev, state->ctx_dma)) { ++ dev_err(ctx->dev, "unable to map ctx\n"); ++ state->ctx_dma = 0; ++ ret = -ENOMEM; ++ goto unmap_ctx; ++ } ++ ++ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); ++ dpaa2_fl_set_final(in_fle, true); ++ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); ++ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); ++ dpaa2_fl_set_len(in_fle, to_hash); ++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single); ++ dpaa2_fl_set_addr(out_fle, state->ctx_dma); ++ dpaa2_fl_set_len(out_fle, ctx->ctx_len); ++ ++ req_ctx->flc = &ctx->flc[UPDATE_FIRST]; ++ req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST]; ++ req_ctx->cbk = ahash_done_ctx_dst; ++ req_ctx->ctx = &req->base; ++ req_ctx->edesc = edesc; ++ ++ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); ++ if (ret != -EINPROGRESS && ++ !(ret == -EBUSY && ++ req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) ++ goto unmap_ctx; ++ ++ state->update = ahash_update_ctx; ++ state->finup = ahash_finup_ctx; ++ state->final = ahash_final_ctx; ++ } else if (*next_buflen) { ++ scatterwalk_map_and_copy(buf + *buflen, req->src, 0, ++ req->nbytes, 0); ++ *buflen = *next_buflen; ++ *next_buflen = 0; ++ } ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, "buf@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); ++ print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, next_buf, ++ *next_buflen, 1); ++#endif ++ ++ return ret; ++unmap_ctx: ++ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); ++ qi_cache_free(edesc); ++ return ret; ++} ++ ++static int ahash_finup_no_ctx(struct ahash_request *req) ++{ ++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); ++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); ++ struct caam_hash_state *state = ahash_request_ctx(req); ++ struct caam_request *req_ctx = &state->caam_req; ++ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; ++ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; ++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? ++ GFP_KERNEL : GFP_ATOMIC; ++ int buflen = *current_buflen(state); ++ int qm_sg_bytes, src_nents, mapped_nents; ++ int digestsize = crypto_ahash_digestsize(ahash); ++ struct ahash_edesc *edesc; ++ struct dpaa2_sg_entry *sg_table; ++ int ret; ++ ++ src_nents = sg_nents_for_len(req->src, req->nbytes); ++ if (src_nents < 0) { ++ dev_err(ctx->dev, "Invalid number of src SG.\n"); ++ return src_nents; ++ } ++ ++ if (src_nents) { ++ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, ++ DMA_TO_DEVICE); ++ if (!mapped_nents) { ++ dev_err(ctx->dev, "unable to DMA map source\n"); ++ return -ENOMEM; ++ } ++ } else { ++ mapped_nents = 0; ++ } ++ ++ /* allocate space for base edesc and link tables */ ++ edesc = qi_cache_zalloc(GFP_DMA | flags); ++ if (!edesc) { ++ dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); ++ return -ENOMEM; ++ } ++ ++ edesc->src_nents = src_nents; ++ qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table); ++ sg_table = &edesc->sgt[0]; ++ ++ ret = buf_map_to_qm_sg(ctx->dev, sg_table, state); ++ if (ret) ++ goto unmap; ++ ++ sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0); ++ ++ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, ++ DMA_TO_DEVICE); ++ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { ++ dev_err(ctx->dev, "unable to map S/G table\n"); ++ ret = -ENOMEM; ++ goto unmap; ++ } ++ edesc->qm_sg_bytes = qm_sg_bytes; ++ ++ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize, ++ DMA_FROM_DEVICE); ++ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) { ++ dev_err(ctx->dev, "unable to map dst\n"); ++ edesc->dst_dma = 0; ++ ret = -ENOMEM; ++ goto unmap; ++ } ++ ++ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); ++ dpaa2_fl_set_final(in_fle, true); ++ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); ++ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); ++ dpaa2_fl_set_len(in_fle, buflen + req->nbytes); ++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single); ++ dpaa2_fl_set_addr(out_fle, edesc->dst_dma); ++ dpaa2_fl_set_len(out_fle, digestsize); ++ ++ req_ctx->flc = &ctx->flc[DIGEST]; ++ req_ctx->flc_dma = ctx->flc_dma[DIGEST]; ++ req_ctx->cbk = ahash_done; ++ req_ctx->ctx = &req->base; ++ req_ctx->edesc = edesc; ++ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); ++ if (ret != -EINPROGRESS && ++ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) ++ goto unmap; ++ ++ return ret; ++unmap: ++ ahash_unmap(ctx->dev, edesc, req, digestsize); ++ qi_cache_free(edesc); ++ return -ENOMEM; ++} ++ ++static int ahash_update_first(struct ahash_request *req) ++{ ++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); ++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); ++ struct caam_hash_state *state = ahash_request_ctx(req); ++ struct caam_request *req_ctx = &state->caam_req; ++ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; ++ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; ++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? ++ GFP_KERNEL : GFP_ATOMIC; ++ u8 *next_buf = alt_buf(state); ++ int *next_buflen = alt_buflen(state); ++ int to_hash; ++ int src_nents, mapped_nents; ++ struct ahash_edesc *edesc; ++ int ret = 0; ++ ++ *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) - ++ 1); ++ to_hash = req->nbytes - *next_buflen; ++ ++ if (to_hash) { ++ struct dpaa2_sg_entry *sg_table; ++ ++ src_nents = sg_nents_for_len(req->src, ++ req->nbytes - (*next_buflen)); ++ if (src_nents < 0) { ++ dev_err(ctx->dev, "Invalid number of src SG.\n"); ++ return src_nents; ++ } ++ ++ if (src_nents) { ++ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, ++ DMA_TO_DEVICE); ++ if (!mapped_nents) { ++ dev_err(ctx->dev, "unable to map source for DMA\n"); ++ return -ENOMEM; ++ } ++ } else { ++ mapped_nents = 0; ++ } ++ ++ /* allocate space for base edesc and link tables */ ++ edesc = qi_cache_zalloc(GFP_DMA | flags); ++ if (!edesc) { ++ dma_unmap_sg(ctx->dev, req->src, src_nents, ++ DMA_TO_DEVICE); ++ return -ENOMEM; ++ } ++ ++ edesc->src_nents = src_nents; ++ sg_table = &edesc->sgt[0]; ++ ++ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); ++ dpaa2_fl_set_final(in_fle, true); ++ dpaa2_fl_set_len(in_fle, to_hash); ++ ++ if (mapped_nents > 1) { ++ int qm_sg_bytes; ++ ++ sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0); ++ qm_sg_bytes = mapped_nents * sizeof(*sg_table); ++ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, ++ qm_sg_bytes, ++ DMA_TO_DEVICE); ++ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { ++ dev_err(ctx->dev, "unable to map S/G table\n"); ++ ret = -ENOMEM; ++ goto unmap_ctx; ++ } ++ edesc->qm_sg_bytes = qm_sg_bytes; ++ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); ++ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); ++ } else { ++ dpaa2_fl_set_format(in_fle, dpaa2_fl_single); ++ dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src)); ++ } ++ ++ if (*next_buflen) ++ scatterwalk_map_and_copy(next_buf, req->src, to_hash, ++ *next_buflen, 0); ++ ++ state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, ++ ctx->ctx_len, DMA_FROM_DEVICE); ++ if (dma_mapping_error(ctx->dev, state->ctx_dma)) { ++ dev_err(ctx->dev, "unable to map ctx\n"); ++ state->ctx_dma = 0; ++ ret = -ENOMEM; ++ goto unmap_ctx; ++ } ++ ++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single); ++ dpaa2_fl_set_addr(out_fle, state->ctx_dma); ++ dpaa2_fl_set_len(out_fle, ctx->ctx_len); ++ ++ req_ctx->flc = &ctx->flc[UPDATE_FIRST]; ++ req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST]; ++ req_ctx->cbk = ahash_done_ctx_dst; ++ req_ctx->ctx = &req->base; ++ req_ctx->edesc = edesc; ++ ++ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); ++ if (ret != -EINPROGRESS && ++ !(ret == -EBUSY && req->base.flags & ++ CRYPTO_TFM_REQ_MAY_BACKLOG)) ++ goto unmap_ctx; ++ ++ state->update = ahash_update_ctx; ++ state->finup = ahash_finup_ctx; ++ state->final = ahash_final_ctx; ++ } else if (*next_buflen) { ++ state->update = ahash_update_no_ctx; ++ state->finup = ahash_finup_no_ctx; ++ state->final = ahash_final_no_ctx; ++ scatterwalk_map_and_copy(next_buf, req->src, 0, ++ req->nbytes, 0); ++ switch_buf(state); ++ } ++#ifdef DEBUG ++ print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen, 1); ++#endif ++ ++ return ret; ++unmap_ctx: ++ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); ++ qi_cache_free(edesc); ++ return ret; ++} ++ ++static int ahash_finup_first(struct ahash_request *req) ++{ ++ return ahash_digest(req); ++} ++ ++static int ahash_init(struct ahash_request *req) ++{ ++ struct caam_hash_state *state = ahash_request_ctx(req); ++ ++ state->update = ahash_update_first; ++ state->finup = ahash_finup_first; ++ state->final = ahash_final_no_ctx; ++ ++ state->ctx_dma = 0; ++ state->current_buf = 0; ++ state->buf_dma = 0; ++ state->buflen_0 = 0; ++ state->buflen_1 = 0; ++ ++ return 0; ++} ++ ++static int ahash_update(struct ahash_request *req) ++{ ++ struct caam_hash_state *state = ahash_request_ctx(req); ++ ++ return state->update(req); ++} ++ ++static int ahash_finup(struct ahash_request *req) ++{ ++ struct caam_hash_state *state = ahash_request_ctx(req); ++ ++ return state->finup(req); ++} ++ ++static int ahash_final(struct ahash_request *req) ++{ ++ struct caam_hash_state *state = ahash_request_ctx(req); ++ ++ return state->final(req); ++} ++ ++static int ahash_export(struct ahash_request *req, void *out) ++{ ++ struct caam_hash_state *state = ahash_request_ctx(req); ++ struct caam_export_state *export = out; ++ int len; ++ u8 *buf; ++ ++ if (state->current_buf) { ++ buf = state->buf_1; ++ len = state->buflen_1; ++ } else { ++ buf = state->buf_0; ++ len = state->buflen_0; ++ } ++ ++ memcpy(export->buf, buf, len); ++ memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx)); ++ export->buflen = len; ++ export->update = state->update; ++ export->final = state->final; ++ export->finup = state->finup; ++ ++ return 0; ++} ++ ++static int ahash_import(struct ahash_request *req, const void *in) ++{ ++ struct caam_hash_state *state = ahash_request_ctx(req); ++ const struct caam_export_state *export = in; ++ ++ memset(state, 0, sizeof(*state)); ++ memcpy(state->buf_0, export->buf, export->buflen); ++ memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx)); ++ state->buflen_0 = export->buflen; ++ state->update = export->update; ++ state->final = export->final; ++ state->finup = export->finup; ++ ++ return 0; ++} ++ ++struct caam_hash_template { ++ char name[CRYPTO_MAX_ALG_NAME]; ++ char driver_name[CRYPTO_MAX_ALG_NAME]; ++ char hmac_name[CRYPTO_MAX_ALG_NAME]; ++ char hmac_driver_name[CRYPTO_MAX_ALG_NAME]; ++ unsigned int blocksize; ++ struct ahash_alg template_ahash; ++ u32 alg_type; ++}; ++ ++/* ahash descriptors */ ++static struct caam_hash_template driver_hash[] = { ++ { ++ .name = "sha1", ++ .driver_name = "sha1-caam-qi2", ++ .hmac_name = "hmac(sha1)", ++ .hmac_driver_name = "hmac-sha1-caam-qi2", ++ .blocksize = SHA1_BLOCK_SIZE, ++ .template_ahash = { ++ .init = ahash_init, ++ .update = ahash_update, ++ .final = ahash_final, ++ .finup = ahash_finup, ++ .digest = ahash_digest, ++ .export = ahash_export, ++ .import = ahash_import, ++ .setkey = ahash_setkey, ++ .halg = { ++ .digestsize = SHA1_DIGEST_SIZE, ++ .statesize = sizeof(struct caam_export_state), ++ }, ++ }, ++ .alg_type = OP_ALG_ALGSEL_SHA1, ++ }, { ++ .name = "sha224", ++ .driver_name = "sha224-caam-qi2", ++ .hmac_name = "hmac(sha224)", ++ .hmac_driver_name = "hmac-sha224-caam-qi2", ++ .blocksize = SHA224_BLOCK_SIZE, ++ .template_ahash = { ++ .init = ahash_init, ++ .update = ahash_update, ++ .final = ahash_final, ++ .finup = ahash_finup, ++ .digest = ahash_digest, ++ .export = ahash_export, ++ .import = ahash_import, ++ .setkey = ahash_setkey, ++ .halg = { ++ .digestsize = SHA224_DIGEST_SIZE, ++ .statesize = sizeof(struct caam_export_state), ++ }, ++ }, ++ .alg_type = OP_ALG_ALGSEL_SHA224, ++ }, { ++ .name = "sha256", ++ .driver_name = "sha256-caam-qi2", ++ .hmac_name = "hmac(sha256)", ++ .hmac_driver_name = "hmac-sha256-caam-qi2", ++ .blocksize = SHA256_BLOCK_SIZE, ++ .template_ahash = { ++ .init = ahash_init, ++ .update = ahash_update, ++ .final = ahash_final, ++ .finup = ahash_finup, ++ .digest = ahash_digest, ++ .export = ahash_export, ++ .import = ahash_import, ++ .setkey = ahash_setkey, ++ .halg = { ++ .digestsize = SHA256_DIGEST_SIZE, ++ .statesize = sizeof(struct caam_export_state), ++ }, ++ }, ++ .alg_type = OP_ALG_ALGSEL_SHA256, ++ }, { ++ .name = "sha384", ++ .driver_name = "sha384-caam-qi2", ++ .hmac_name = "hmac(sha384)", ++ .hmac_driver_name = "hmac-sha384-caam-qi2", ++ .blocksize = SHA384_BLOCK_SIZE, ++ .template_ahash = { ++ .init = ahash_init, ++ .update = ahash_update, ++ .final = ahash_final, ++ .finup = ahash_finup, ++ .digest = ahash_digest, ++ .export = ahash_export, ++ .import = ahash_import, ++ .setkey = ahash_setkey, ++ .halg = { ++ .digestsize = SHA384_DIGEST_SIZE, ++ .statesize = sizeof(struct caam_export_state), ++ }, ++ }, ++ .alg_type = OP_ALG_ALGSEL_SHA384, ++ }, { ++ .name = "sha512", ++ .driver_name = "sha512-caam-qi2", ++ .hmac_name = "hmac(sha512)", ++ .hmac_driver_name = "hmac-sha512-caam-qi2", ++ .blocksize = SHA512_BLOCK_SIZE, ++ .template_ahash = { ++ .init = ahash_init, ++ .update = ahash_update, ++ .final = ahash_final, ++ .finup = ahash_finup, ++ .digest = ahash_digest, ++ .export = ahash_export, ++ .import = ahash_import, ++ .setkey = ahash_setkey, ++ .halg = { ++ .digestsize = SHA512_DIGEST_SIZE, ++ .statesize = sizeof(struct caam_export_state), ++ }, ++ }, ++ .alg_type = OP_ALG_ALGSEL_SHA512, ++ }, { ++ .name = "md5", ++ .driver_name = "md5-caam-qi2", ++ .hmac_name = "hmac(md5)", ++ .hmac_driver_name = "hmac-md5-caam-qi2", ++ .blocksize = MD5_BLOCK_WORDS * 4, ++ .template_ahash = { ++ .init = ahash_init, ++ .update = ahash_update, ++ .final = ahash_final, ++ .finup = ahash_finup, ++ .digest = ahash_digest, ++ .export = ahash_export, ++ .import = ahash_import, ++ .setkey = ahash_setkey, ++ .halg = { ++ .digestsize = MD5_DIGEST_SIZE, ++ .statesize = sizeof(struct caam_export_state), ++ }, ++ }, ++ .alg_type = OP_ALG_ALGSEL_MD5, ++ } ++}; ++ ++struct caam_hash_alg { ++ struct list_head entry; ++ struct device *dev; ++ int alg_type; ++ struct ahash_alg ahash_alg; ++}; ++ ++static int caam_hash_cra_init(struct crypto_tfm *tfm) ++{ ++ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); ++ struct crypto_alg *base = tfm->__crt_alg; ++ struct hash_alg_common *halg = ++ container_of(base, struct hash_alg_common, base); ++ struct ahash_alg *alg = ++ container_of(halg, struct ahash_alg, halg); ++ struct caam_hash_alg *caam_hash = ++ container_of(alg, struct caam_hash_alg, ahash_alg); ++ struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); ++ /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ ++ static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, ++ HASH_MSG_LEN + SHA1_DIGEST_SIZE, ++ HASH_MSG_LEN + 32, ++ HASH_MSG_LEN + SHA256_DIGEST_SIZE, ++ HASH_MSG_LEN + 64, ++ HASH_MSG_LEN + SHA512_DIGEST_SIZE }; ++ dma_addr_t dma_addr; ++ int i; ++ ++ ctx->dev = caam_hash->dev; ++ ++ dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc), ++ DMA_BIDIRECTIONAL, ++ DMA_ATTR_SKIP_CPU_SYNC); ++ if (dma_mapping_error(ctx->dev, dma_addr)) { ++ dev_err(ctx->dev, "unable to map shared descriptors\n"); ++ return -ENOMEM; ++ } ++ ++ for (i = 0; i < HASH_NUM_OP; i++) ++ ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]); ++ ++ /* copy descriptor header template value */ ++ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; ++ ++ ctx->ctx_len = runninglen[(ctx->adata.algtype & ++ OP_ALG_ALGSEL_SUBMASK) >> ++ OP_ALG_ALGSEL_SHIFT]; ++ ++ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), ++ sizeof(struct caam_hash_state)); ++ ++ return ahash_set_sh_desc(ahash); ++} ++ ++static void caam_hash_cra_exit(struct crypto_tfm *tfm) ++{ ++ struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); ++ ++ dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc), ++ DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC); ++} ++ ++static struct caam_hash_alg *caam_hash_alloc(struct device *dev, ++ struct caam_hash_template *template, bool keyed) ++{ ++ struct caam_hash_alg *t_alg; ++ struct ahash_alg *halg; ++ struct crypto_alg *alg; ++ ++ t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); ++ if (!t_alg) ++ return ERR_PTR(-ENOMEM); ++ ++ t_alg->ahash_alg = template->template_ahash; ++ halg = &t_alg->ahash_alg; ++ alg = &halg->halg.base; ++ ++ if (keyed) { ++ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", ++ template->hmac_name); ++ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", ++ template->hmac_driver_name); ++ } else { ++ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", ++ template->name); ++ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", ++ template->driver_name); ++ } ++ alg->cra_module = THIS_MODULE; ++ alg->cra_init = caam_hash_cra_init; ++ alg->cra_exit = caam_hash_cra_exit; ++ alg->cra_ctxsize = sizeof(struct caam_hash_ctx); ++ alg->cra_priority = CAAM_CRA_PRIORITY; ++ alg->cra_blocksize = template->blocksize; ++ alg->cra_alignmask = 0; ++ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH; ++ alg->cra_type = &crypto_ahash_type; ++ ++ t_alg->alg_type = template->alg_type; ++ t_alg->dev = dev; ++ ++ return t_alg; ++} ++ ++static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx) ++{ ++ struct dpaa2_caam_priv_per_cpu *ppriv; ++ ++ ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx); ++ napi_schedule_irqoff(&ppriv->napi); ++} ++ ++static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv) ++{ ++ struct device *dev = priv->dev; ++ struct dpaa2_io_notification_ctx *nctx; ++ struct dpaa2_caam_priv_per_cpu *ppriv; ++ int err, i = 0, cpu; ++ ++ for_each_online_cpu(cpu) { ++ ppriv = per_cpu_ptr(priv->ppriv, cpu); ++ ppriv->priv = priv; ++ nctx = &ppriv->nctx; ++ nctx->is_cdan = 0; ++ nctx->id = ppriv->rsp_fqid; ++ nctx->desired_cpu = cpu; ++ nctx->cb = dpaa2_caam_fqdan_cb; ++ ++ /* Register notification callbacks */ ++ err = dpaa2_io_service_register(NULL, nctx); ++ if (unlikely(err)) { ++ dev_err(dev, "notification register failed\n"); ++ nctx->cb = NULL; ++ goto err; ++ } ++ ++ ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE, ++ dev); ++ if (unlikely(!ppriv->store)) { ++ dev_err(dev, "dpaa2_io_store_create() failed\n"); ++ goto err; ++ } ++ ++ if (++i == priv->num_pairs) ++ break; ++ } ++ ++ return 0; ++ ++err: ++ for_each_online_cpu(cpu) { ++ ppriv = per_cpu_ptr(priv->ppriv, cpu); ++ if (!ppriv->nctx.cb) ++ break; ++ dpaa2_io_service_deregister(NULL, &ppriv->nctx); ++ } ++ ++ for_each_online_cpu(cpu) { ++ ppriv = per_cpu_ptr(priv->ppriv, cpu); ++ if (!ppriv->store) ++ break; ++ dpaa2_io_store_destroy(ppriv->store); ++ } ++ ++ return err; ++} ++ ++static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv) ++{ ++ struct dpaa2_caam_priv_per_cpu *ppriv; ++ int i = 0, cpu; ++ ++ for_each_online_cpu(cpu) { ++ ppriv = per_cpu_ptr(priv->ppriv, cpu); ++ dpaa2_io_service_deregister(NULL, &ppriv->nctx); ++ dpaa2_io_store_destroy(ppriv->store); ++ ++ if (++i == priv->num_pairs) ++ return; ++ } ++} ++ ++static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv) ++{ ++ struct dpseci_rx_queue_cfg rx_queue_cfg; ++ struct device *dev = priv->dev; ++ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev); ++ struct dpaa2_caam_priv_per_cpu *ppriv; ++ int err = 0, i = 0, cpu; ++ ++ /* Configure Rx queues */ ++ for_each_online_cpu(cpu) { ++ ppriv = per_cpu_ptr(priv->ppriv, cpu); ++ ++ rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST | ++ DPSECI_QUEUE_OPT_USER_CTX; ++ rx_queue_cfg.order_preservation_en = 0; ++ rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO; ++ rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id; ++ /* ++ * Rx priority (WQ) doesn't really matter, since we use ++ * pull mode, i.e. volatile dequeues from specific FQs ++ */ ++ rx_queue_cfg.dest_cfg.priority = 0; ++ rx_queue_cfg.user_ctx = ppriv->nctx.qman64; ++ ++ err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i, ++ &rx_queue_cfg); ++ if (err) { ++ dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n", ++ err); ++ return err; ++ } ++ ++ if (++i == priv->num_pairs) ++ break; ++ } ++ ++ return err; ++} ++ ++static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv) ++{ ++ struct device *dev = priv->dev; ++ + if (!priv->cscn_mem) + return; + @@ -19266,6 +20889,7 @@ Signed-off-by: Yangbo Lu +} + +static struct list_head alg_list; ++static struct list_head hash_list; + +static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev) +{ @@ -19429,6 +21053,61 @@ Signed-off-by: Yangbo Lu + if (registered) + dev_info(dev, "algorithms registered in /proc/crypto\n"); + ++ /* register hash algorithms the device supports */ ++ INIT_LIST_HEAD(&hash_list); ++ ++ /* ++ * Skip registration of any hashing algorithms if MD block ++ * is not present. ++ */ ++ if (!priv->sec_attr.md_acc_num) ++ return 0; ++ ++ for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { ++ struct caam_hash_alg *t_alg; ++ struct caam_hash_template *alg = driver_hash + i; ++ ++ /* register hmac version */ ++ t_alg = caam_hash_alloc(dev, alg, true); ++ if (IS_ERR(t_alg)) { ++ err = PTR_ERR(t_alg); ++ dev_warn(dev, "%s hash alg allocation failed: %d\n", ++ alg->driver_name, err); ++ continue; ++ } ++ ++ err = crypto_register_ahash(&t_alg->ahash_alg); ++ if (err) { ++ dev_warn(dev, "%s alg registration failed: %d\n", ++ t_alg->ahash_alg.halg.base.cra_driver_name, ++ err); ++ kfree(t_alg); ++ } else { ++ list_add_tail(&t_alg->entry, &hash_list); ++ } ++ ++ /* register unkeyed version */ ++ t_alg = caam_hash_alloc(dev, alg, false); ++ if (IS_ERR(t_alg)) { ++ err = PTR_ERR(t_alg); ++ dev_warn(dev, "%s alg allocation failed: %d\n", ++ alg->driver_name, err); ++ continue; ++ } ++ ++ err = crypto_register_ahash(&t_alg->ahash_alg); ++ if (err) { ++ dev_warn(dev, "%s alg registration failed: %d\n", ++ t_alg->ahash_alg.halg.base.cra_driver_name, ++ err); ++ kfree(t_alg); ++ } else { ++ list_add_tail(&t_alg->entry, &hash_list); ++ } ++ } ++ if (!list_empty(&hash_list)) ++ dev_info(dev, "hash algorithms registered in /proc/crypto\n"); ++ + return err; + +err_bind: @@ -19473,6 +21152,16 @@ Signed-off-by: Yangbo Lu + } + } + ++ if (hash_list.next) { ++ struct caam_hash_alg *t_hash_alg, *p; ++ ++ list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) { ++ crypto_unregister_ahash(&t_hash_alg->ahash_alg); ++ list_del(&t_hash_alg->entry); ++ kfree(t_hash_alg); ++ } ++ } ++ + dpaa2_dpseci_disable(priv); + dpaa2_dpseci_dpio_free(priv); + dpaa2_dpseci_free(priv); @@ -19503,7 +21192,7 @@ Signed-off-by: Yangbo Lu + } + } + -+ dpaa2_fl_set_flc(&req->fd_flt[1], req->flc->flc_dma); ++ dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma); + + req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt), + DMA_BIDIRECTIONAL); @@ -19516,7 +21205,7 @@ Signed-off-by: Yangbo Lu + dpaa2_fd_set_format(&fd, dpaa2_fd_list); + dpaa2_fd_set_addr(&fd, req->fd_flt_dma); + dpaa2_fd_set_len(&fd, req->fd_flt[1].len); -+ dpaa2_fd_set_flc(&fd, req->flc->flc_dma); ++ dpaa2_fd_set_flc(&fd, req->flc_dma); + + /* + * There is no guarantee that preemption is disabled here, @@ -19572,7 +21261,7 @@ Signed-off-by: Yangbo Lu +module_fsl_mc_driver(dpaa2_caam_driver); --- /dev/null +++ b/drivers/crypto/caam/caamalg_qi2.h -@@ -0,0 +1,265 @@ +@@ -0,0 +1,281 @@ +/* + * Copyright 2015-2016 Freescale Semiconductor Inc. + * Copyright 2017 NXP @@ -19776,15 +21465,31 @@ Signed-off-by: Yangbo Lu + * @qm_sg_dma: I/O virtual address of h/w link table + * @sgt: the h/w link table + */ -+struct ablkcipher_edesc { ++struct ablkcipher_edesc { ++ int src_nents; ++ int dst_nents; ++ dma_addr_t iv_dma; ++ int qm_sg_bytes; ++ dma_addr_t qm_sg_dma; ++#define CAAM_QI_MAX_ABLKCIPHER_SG \ ++ ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \ ++ sizeof(struct dpaa2_sg_entry)) ++ struct dpaa2_sg_entry sgt[0]; ++}; ++ ++/* ++ * ahash_edesc - s/w-extended ahash descriptor ++ * @dst_dma: I/O virtual address of req->result ++ * @qm_sg_dma: I/O virtual address of h/w link table ++ * @src_nents: number of segments in input scatterlist ++ * @qm_sg_bytes: length of dma mapped qm_sg space ++ * @sgt: pointer to h/w link table ++ */ ++struct ahash_edesc { ++ dma_addr_t dst_dma; ++ dma_addr_t qm_sg_dma; + int src_nents; -+ int dst_nents; -+ dma_addr_t iv_dma; + int qm_sg_bytes; -+ dma_addr_t qm_sg_dma; -+#define CAAM_QI_MAX_ABLKCIPHER_SG \ -+ ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \ -+ sizeof(struct dpaa2_sg_entry)) + struct dpaa2_sg_entry sgt[0]; +}; + @@ -19792,12 +21497,10 @@ Signed-off-by: Yangbo Lu + * caam_flc - Flow Context (FLC) + * @flc: Flow Context options + * @sh_desc: Shared Descriptor -+ * @flc_dma: DMA address of the Flow Context + */ +struct caam_flc { + u32 flc[16]; + u32 sh_desc[MAX_SDLEN]; -+ dma_addr_t flc_dma; +} ____cacheline_aligned; + +enum optype { @@ -19815,6 +21518,7 @@ Signed-off-by: Yangbo Lu + * fd_flt[1] - FLE pointing to input buffer + * @fd_flt_dma: DMA address for the frame list table + * @flc: Flow Context ++ * @flc_dma: I/O virtual address of Flow Context + * @op_type: operation type + * @cbk: Callback function to invoke when job is completed + * @ctx: arbit context attached with request by the application @@ -19824,6 +21528,7 @@ Signed-off-by: Yangbo Lu + struct dpaa2_fl_entry fd_flt[2]; + dma_addr_t fd_flt_dma; + struct caam_flc *flc; ++ dma_addr_t flc_dma; + enum optype op_type; + void (*cbk)(void *ctx, u32 err); + void *ctx; @@ -19840,16 +21545,30 @@ Signed-off-by: Yangbo Lu +#endif /* _CAAMALG_QI2_H_ */ --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c -@@ -72,7 +72,7 @@ +@@ -62,6 +62,7 @@ + #include "error.h" + #include "sg_sw_sec4.h" + #include "key_gen.h" ++#include "caamhash_desc.h" + + #define CAAM_CRA_PRIORITY 3000 + +@@ -71,14 +72,6 @@ + #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE - /* length of descriptors text */ +-/* length of descriptors text */ -#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ) -+#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ) - #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ) - #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) - #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ) -@@ -103,20 +103,14 @@ struct caam_hash_ctx { +-#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ) +-#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) +-#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ) +-#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ) +-#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) +- + #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \ + CAAM_MAX_HASH_KEY_SIZE) + #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ) +@@ -103,20 +96,14 @@ struct caam_hash_ctx { u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; @@ -19871,7 +21590,7 @@ Signed-off-by: Yangbo Lu }; /* ahash state */ -@@ -143,6 +137,31 @@ struct caam_export_state { +@@ -143,6 +130,31 @@ struct caam_export_state { int (*finup)(struct ahash_request *req); }; @@ -19903,7 +21622,7 @@ Signed-off-by: Yangbo Lu /* Common job descriptor seq in/out ptr routines */ /* Map state->caam_ctx, and append seq_out_ptr command that points to it */ -@@ -175,36 +194,27 @@ static inline dma_addr_t map_seq_out_ptr +@@ -175,40 +187,31 @@ static inline dma_addr_t map_seq_out_ptr return dst_dma; } @@ -19956,7 +21675,12 @@ Signed-off-by: Yangbo Lu } /* Map state->caam_ctx, and add it to link table */ -@@ -224,89 +234,54 @@ static inline int ctx_map_to_sec4_sg(u32 +-static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev, ++static inline int ctx_map_to_sec4_sg(struct device *jrdev, + struct caam_hash_state *state, int ctx_len, + struct sec4_sg_entry *sec4_sg, u32 flag) + { +@@ -224,124 +227,22 @@ static inline int ctx_map_to_sec4_sg(u32 return 0; } @@ -19989,39 +21713,25 @@ Signed-off-by: Yangbo Lu - append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); -} - - /* +-/* - * For ahash read data from seqin following state->caam_ctx, - * and write resulting class2 context to seqout, which may be state->caam_ctx - * or req->result -+ * For ahash update, final and finup (import_ctx = true) -+ * import context, read and write to seqout -+ * For ahash firsts and digest (import_ctx = false) -+ * read and write to seqout - */ +- */ -static inline void ahash_append_load_str(u32 *desc, int digestsize) -+static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize, -+ struct caam_hash_ctx *ctx, bool import_ctx) - { +-{ - /* Calculate remaining bytes to read */ - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); -+ u32 op = ctx->adata.algtype; -+ u32 *skip_key_load; - +- - /* Read remaining bytes */ - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 | - FIFOLD_TYPE_MSG | KEY_VLF); -+ init_sh_desc(desc, HDR_SHARE_SERIAL); - +- - /* Store class2 context bytes */ - append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | - LDST_SRCDST_BYTE_CONTEXT); -} -+ /* Append key if it has been set; ahash update excluded */ -+ if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) { -+ /* Skip key loading if already shared */ -+ skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | -+ JUMP_COND_SHRD); - +- -/* - * For ahash update, final and finup, import context, read and write to seqout - */ @@ -20034,60 +21744,44 @@ Signed-off-by: Yangbo Lu - /* Import context from software */ - append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | - LDST_CLASS_2_CCB | ctx->ctx_len); -+ append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad, -+ ctx->adata.keylen, CLASS_2 | -+ KEY_DEST_MDHA_SPLIT | KEY_ENC); - +- - /* Class 2 operation */ - append_operation(desc, op | state | OP_ALG_ENCRYPT); -+ set_jump_tgt_here(desc, skip_key_load); - +- - /* - * Load from buf and/or src and write to req->result or state->context - */ - ahash_append_load_str(desc, digestsize); -} -+ op |= OP_ALG_AAI_HMAC_PRECOMP; -+ } - +- -/* For ahash firsts and digest, read and write to seqout */ -static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state, - int digestsize, struct caam_hash_ctx *ctx) -{ - init_sh_desc_key_ahash(desc, ctx); -+ /* If needed, import context from software */ -+ if (import_ctx) -+ append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB | -+ LDST_SRCDST_BYTE_CONTEXT); - - /* Class 2 operation */ - append_operation(desc, op | state | OP_ALG_ENCRYPT); - - /* - * Load from buf and/or src and write to req->result or state->context -+ * Calculate remaining bytes to read - */ +- +- /* Class 2 operation */ +- append_operation(desc, op | state | OP_ALG_ENCRYPT); +- +- /* +- * Load from buf and/or src and write to req->result or state->context +- */ - ahash_append_load_str(desc, digestsize); -+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); -+ /* Read remaining bytes */ -+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 | -+ FIFOLD_TYPE_MSG | KEY_VLF); -+ /* Store class2 context bytes */ -+ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | -+ LDST_SRCDST_BYTE_CONTEXT); - } - +-} +- static int ahash_set_sh_desc(struct crypto_ahash *ahash) -@@ -314,34 +289,13 @@ static int ahash_set_sh_desc(struct cryp + { struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); int digestsize = crypto_ahash_digestsize(ahash); struct device *jrdev = ctx->jrdev; - u32 have_key = 0; ++ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); u32 *desc; - if (ctx->split_key_len) - have_key = OP_ALG_AAI_HMAC_PRECOMP; -- ++ ctx->adata.key_virt = ctx->key; + /* ahash_update shared descriptor */ desc = ctx->sh_desc_update; - @@ -20110,13 +21804,14 @@ Signed-off-by: Yangbo Lu - dev_err(jrdev, "unable to map shared descriptor\n"); - return -ENOMEM; - } -+ ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true); ++ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len, ++ ctx->ctx_len, true, ctrlpriv->era); + dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, + desc_bytes(desc), DMA_TO_DEVICE); #ifdef DEBUG print_hex_dump(KERN_ERR, "ahash update shdesc@"__stringify(__LINE__)": ", -@@ -350,17 +304,9 @@ static int ahash_set_sh_desc(struct cryp +@@ -350,17 +251,10 @@ static int ahash_set_sh_desc(struct cryp /* ahash_update_first shared descriptor */ desc = ctx->sh_desc_update_first; @@ -20131,13 +21826,14 @@ Signed-off-by: Yangbo Lu - dev_err(jrdev, "unable to map shared descriptor\n"); - return -ENOMEM; - } -+ ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false); ++ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, ++ ctx->ctx_len, false, ctrlpriv->era); + dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, + desc_bytes(desc), DMA_TO_DEVICE); #ifdef DEBUG print_hex_dump(KERN_ERR, "ahash update first shdesc@"__stringify(__LINE__)": ", -@@ -369,53 +315,20 @@ static int ahash_set_sh_desc(struct cryp +@@ -369,53 +263,22 @@ static int ahash_set_sh_desc(struct cryp /* ahash_final shared descriptor */ desc = ctx->sh_desc_fin; @@ -20151,7 +21847,8 @@ Signed-off-by: Yangbo Lu - dev_err(jrdev, "unable to map shared descriptor\n"); - return -ENOMEM; - } -+ ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true); ++ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize, ++ ctx->ctx_len, true, ctrlpriv->era); + dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, + desc_bytes(desc), DMA_TO_DEVICE); #ifdef DEBUG @@ -20191,13 +21888,14 @@ Signed-off-by: Yangbo Lu - dev_err(jrdev, "unable to map shared descriptor\n"); - return -ENOMEM; - } -+ ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false); ++ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize, ++ ctx->ctx_len, false, ctrlpriv->era); + dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, + desc_bytes(desc), DMA_TO_DEVICE); #ifdef DEBUG print_hex_dump(KERN_ERR, "ahash digest shdesc@"__stringify(__LINE__)": ", -@@ -426,14 +339,6 @@ static int ahash_set_sh_desc(struct cryp +@@ -426,14 +289,6 @@ static int ahash_set_sh_desc(struct cryp return 0; } @@ -20212,7 +21910,7 @@ Signed-off-by: Yangbo Lu /* Digest hash size if it is too large */ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, u32 *keylen, u8 *key_out, u32 digestsize) -@@ -469,7 +374,7 @@ static int hash_digest_key(struct caam_h +@@ -469,7 +324,7 @@ static int hash_digest_key(struct caam_h } /* Job descriptor to perform unkeyed hash on key_in */ @@ -20221,7 +21919,7 @@ Signed-off-by: Yangbo Lu OP_ALG_AS_INITFINAL); append_seq_in_ptr(desc, src_dma, *keylen, 0); append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 | -@@ -513,10 +418,7 @@ static int hash_digest_key(struct caam_h +@@ -513,12 +368,10 @@ static int hash_digest_key(struct caam_h static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key, unsigned int keylen) { @@ -20231,8 +21929,11 @@ Signed-off-by: Yangbo Lu - struct device *jrdev = ctx->jrdev; int blocksize = crypto_tfm_alg_blocksize(&ahash->base); int digestsize = crypto_ahash_digestsize(ahash); ++ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); int ret; -@@ -539,43 +441,19 @@ static int ahash_setkey(struct crypto_ah + u8 *hashed_key = NULL; + +@@ -539,43 +392,29 @@ static int ahash_setkey(struct crypto_ah key = hashed_key; } @@ -20247,12 +21948,21 @@ Signed-off-by: Yangbo Lu - print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); -#endif -- ++ /* ++ * If DKP is supported, use it in the shared descriptor to generate ++ * the split key. ++ */ ++ if (ctrlpriv->era >= 6) { ++ ctx->adata.key_inline = true; ++ ctx->adata.keylen = keylen; ++ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & ++ OP_ALG_ALGSEL_MASK); + - ret = gen_split_hash_key(ctx, key, keylen); -+ ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen, -+ CAAM_MAX_HASH_KEY_SIZE); - if (ret) - goto bad_free_key; +- if (ret) +- goto bad_free_key; ++ if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE) ++ goto bad_free_key; - ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len, - DMA_TO_DEVICE); @@ -20260,13 +21970,18 @@ Signed-off-by: Yangbo Lu - dev_err(jrdev, "unable to map key i/o memory\n"); - ret = -ENOMEM; - goto error_free_key; -- } - #ifdef DEBUG - print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, ++ memcpy(ctx->key, key, keylen); ++ } else { ++ ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, ++ keylen, CAAM_MAX_HASH_KEY_SIZE); ++ if (ret) ++ goto bad_free_key; + } +-#ifdef DEBUG +- print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", +- DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, - ctx->split_key_pad_len, 1); -+ ctx->adata.keylen_pad, 1); - #endif +-#endif - ret = ahash_set_sh_desc(ahash); - if (ret) { @@ -20280,7 +21995,7 @@ Signed-off-by: Yangbo Lu bad_free_key: kfree(hashed_key); crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); -@@ -604,6 +482,8 @@ static inline void ahash_unmap(struct de +@@ -604,6 +443,8 @@ static inline void ahash_unmap(struct de struct ahash_edesc *edesc, struct ahash_request *req, int dst_len) { @@ -20289,7 +22004,7 @@ Signed-off-by: Yangbo Lu if (edesc->src_nents) dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); if (edesc->dst_dma) -@@ -612,6 +492,12 @@ static inline void ahash_unmap(struct de +@@ -612,6 +453,12 @@ static inline void ahash_unmap(struct de if (edesc->sec4_sg_bytes) dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes, DMA_TO_DEVICE); @@ -20302,7 +22017,7 @@ Signed-off-by: Yangbo Lu } static inline void ahash_unmap_ctx(struct device *dev, -@@ -643,8 +529,7 @@ static void ahash_done(struct device *jr +@@ -643,8 +490,7 @@ static void ahash_done(struct device *jr dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); #endif @@ -20312,7 +22027,7 @@ Signed-off-by: Yangbo Lu if (err) caam_jr_strstatus(jrdev, err); -@@ -671,19 +556,19 @@ static void ahash_done_bi(struct device +@@ -671,19 +517,19 @@ static void ahash_done_bi(struct device struct ahash_edesc *edesc; struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); @@ -20335,7 +22050,7 @@ Signed-off-by: Yangbo Lu kfree(edesc); #ifdef DEBUG -@@ -713,8 +598,7 @@ static void ahash_done_ctx_src(struct de +@@ -713,8 +559,7 @@ static void ahash_done_ctx_src(struct de dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); #endif @@ -20345,7 +22060,7 @@ Signed-off-by: Yangbo Lu if (err) caam_jr_strstatus(jrdev, err); -@@ -741,19 +625,19 @@ static void ahash_done_ctx_dst(struct de +@@ -741,19 +586,19 @@ static void ahash_done_ctx_dst(struct de struct ahash_edesc *edesc; struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); @@ -20368,7 +22083,7 @@ Signed-off-by: Yangbo Lu kfree(edesc); #ifdef DEBUG -@@ -835,13 +719,12 @@ static int ahash_update_ctx(struct ahash +@@ -835,13 +680,12 @@ static int ahash_update_ctx(struct ahash struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); struct caam_hash_state *state = ahash_request_ctx(req); struct device *jrdev = ctx->jrdev; @@ -20388,7 +22103,13 @@ Signed-off-by: Yangbo Lu int in_len = *buflen + req->nbytes, to_hash; u32 *desc; int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index; -@@ -895,10 +778,9 @@ static int ahash_update_ctx(struct ahash +@@ -890,15 +734,14 @@ static int ahash_update_ctx(struct ahash + edesc->src_nents = src_nents; + edesc->sec4_sg_bytes = sec4_sg_bytes; + +- ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, ++ ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, + edesc->sec4_sg, DMA_BIDIRECTIONAL); if (ret) goto unmap_ctx; @@ -20402,7 +22123,7 @@ Signed-off-by: Yangbo Lu if (mapped_nents) { sg_to_sec4_sg_last(req->src, mapped_nents, -@@ -909,12 +791,10 @@ static int ahash_update_ctx(struct ahash +@@ -909,12 +752,10 @@ static int ahash_update_ctx(struct ahash to_hash - *buflen, *next_buflen, 0); } else { @@ -20417,7 +22138,7 @@ Signed-off-by: Yangbo Lu desc = edesc->hw_desc; edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, -@@ -969,12 +849,9 @@ static int ahash_final_ctx(struct ahash_ +@@ -969,12 +810,9 @@ static int ahash_final_ctx(struct ahash_ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); struct caam_hash_state *state = ahash_request_ctx(req); struct device *jrdev = ctx->jrdev; @@ -20433,7 +22154,15 @@ Signed-off-by: Yangbo Lu u32 *desc; int sec4_sg_bytes, sec4_sg_src_index; int digestsize = crypto_ahash_digestsize(ahash); -@@ -1001,11 +878,11 @@ static int ahash_final_ctx(struct ahash_ +@@ -994,18 +832,17 @@ static int ahash_final_ctx(struct ahash_ + desc = edesc->hw_desc; + + edesc->sec4_sg_bytes = sec4_sg_bytes; +- edesc->src_nents = 0; + +- ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, ++ ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, + edesc->sec4_sg, DMA_TO_DEVICE); if (ret) goto unmap_ctx; @@ -20450,7 +22179,7 @@ Signed-off-by: Yangbo Lu edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes, DMA_TO_DEVICE); -@@ -1048,12 +925,9 @@ static int ahash_finup_ctx(struct ahash_ +@@ -1048,12 +885,9 @@ static int ahash_finup_ctx(struct ahash_ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); struct caam_hash_state *state = ahash_request_ctx(req); struct device *jrdev = ctx->jrdev; @@ -20466,7 +22195,7 @@ Signed-off-by: Yangbo Lu u32 *desc; int sec4_sg_src_index; int src_nents, mapped_nents; -@@ -1082,7 +956,7 @@ static int ahash_finup_ctx(struct ahash_ +@@ -1082,7 +916,7 @@ static int ahash_finup_ctx(struct ahash_ /* allocate space for base edesc and hw desc commands, link tables */ edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, @@ -20475,7 +22204,13 @@ Signed-off-by: Yangbo Lu flags); if (!edesc) { dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); -@@ -1098,9 +972,9 @@ static int ahash_finup_ctx(struct ahash_ +@@ -1093,14 +927,14 @@ static int ahash_finup_ctx(struct ahash_ + + edesc->src_nents = src_nents; + +- ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, ++ ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, + edesc->sec4_sg, DMA_TO_DEVICE); if (ret) goto unmap_ctx; @@ -20488,7 +22223,7 @@ Signed-off-by: Yangbo Lu ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, sec4_sg_src_index, ctx->ctx_len + buflen, -@@ -1136,15 +1010,18 @@ static int ahash_digest(struct ahash_req +@@ -1136,15 +970,18 @@ static int ahash_digest(struct ahash_req { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); @@ -20509,7 +22244,7 @@ Signed-off-by: Yangbo Lu src_nents = sg_nents_for_len(req->src, req->nbytes); if (src_nents < 0) { dev_err(jrdev, "Invalid number of src SG.\n"); -@@ -1215,10 +1092,10 @@ static int ahash_final_no_ctx(struct aha +@@ -1215,10 +1052,10 @@ static int ahash_final_no_ctx(struct aha struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); struct caam_hash_state *state = ahash_request_ctx(req); struct device *jrdev = ctx->jrdev; @@ -20524,7 +22259,15 @@ Signed-off-by: Yangbo Lu u32 *desc; int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; -@@ -1276,13 +1153,12 @@ static int ahash_update_no_ctx(struct ah +@@ -1246,7 +1083,6 @@ static int ahash_final_no_ctx(struct aha + dev_err(jrdev, "unable to map dst\n"); + goto unmap; + } +- edesc->src_nents = 0; + + #ifdef DEBUG + print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", +@@ -1276,13 +1112,12 @@ static int ahash_update_no_ctx(struct ah struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); struct caam_hash_state *state = ahash_request_ctx(req); struct device *jrdev = ctx->jrdev; @@ -20544,9 +22287,11 @@ Signed-off-by: Yangbo Lu int in_len = *buflen + req->nbytes, to_hash; int sec4_sg_bytes, src_nents, mapped_nents; struct ahash_edesc *edesc; -@@ -1331,8 +1207,10 @@ static int ahash_update_no_ctx(struct ah +@@ -1329,10 +1164,11 @@ static int ahash_update_no_ctx(struct ah + + edesc->src_nents = src_nents; edesc->sec4_sg_bytes = sec4_sg_bytes; - edesc->dst_dma = 0; +- edesc->dst_dma = 0; - state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, - buf, *buflen); @@ -20557,7 +22302,7 @@ Signed-off-by: Yangbo Lu sg_to_sec4_sg_last(req->src, mapped_nents, edesc->sec4_sg + 1, 0); -@@ -1342,8 +1220,6 @@ static int ahash_update_no_ctx(struct ah +@@ -1342,8 +1178,6 @@ static int ahash_update_no_ctx(struct ah *next_buflen, 0); } @@ -20566,7 +22311,7 @@ Signed-off-by: Yangbo Lu desc = edesc->hw_desc; edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, -@@ -1403,12 +1279,9 @@ static int ahash_finup_no_ctx(struct aha +@@ -1403,12 +1237,9 @@ static int ahash_finup_no_ctx(struct aha struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); struct caam_hash_state *state = ahash_request_ctx(req); struct device *jrdev = ctx->jrdev; @@ -20582,7 +22327,7 @@ Signed-off-by: Yangbo Lu u32 *desc; int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents; int digestsize = crypto_ahash_digestsize(ahash); -@@ -1450,9 +1323,9 @@ static int ahash_finup_no_ctx(struct aha +@@ -1450,9 +1281,9 @@ static int ahash_finup_no_ctx(struct aha edesc->src_nents = src_nents; edesc->sec4_sg_bytes = sec4_sg_bytes; @@ -20595,7 +22340,7 @@ Signed-off-by: Yangbo Lu ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen, req->nbytes); -@@ -1496,11 +1369,10 @@ static int ahash_update_first(struct aha +@@ -1496,11 +1327,10 @@ static int ahash_update_first(struct aha struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); struct caam_hash_state *state = ahash_request_ctx(req); struct device *jrdev = ctx->jrdev; @@ -20611,7 +22356,15 @@ Signed-off-by: Yangbo Lu int to_hash; u32 *desc; int src_nents, mapped_nents; -@@ -1582,6 +1454,7 @@ static int ahash_update_first(struct aha +@@ -1545,7 +1375,6 @@ static int ahash_update_first(struct aha + } + + edesc->src_nents = src_nents; +- edesc->dst_dma = 0; + + ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, + to_hash); +@@ -1582,6 +1411,7 @@ static int ahash_update_first(struct aha state->final = ahash_final_no_ctx; scatterwalk_map_and_copy(next_buf, req->src, 0, req->nbytes, 0); @@ -20619,7 +22372,7 @@ Signed-off-by: Yangbo Lu } #ifdef DEBUG print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", -@@ -1688,7 +1561,6 @@ struct caam_hash_template { +@@ -1688,7 +1518,6 @@ struct caam_hash_template { unsigned int blocksize; struct ahash_alg template_ahash; u32 alg_type; @@ -20627,7 +22380,7 @@ Signed-off-by: Yangbo Lu }; /* ahash descriptors */ -@@ -1714,7 +1586,6 @@ static struct caam_hash_template driver_ +@@ -1714,7 +1543,6 @@ static struct caam_hash_template driver_ }, }, .alg_type = OP_ALG_ALGSEL_SHA1, @@ -20635,7 +22388,7 @@ Signed-off-by: Yangbo Lu }, { .name = "sha224", .driver_name = "sha224-caam", -@@ -1736,7 +1607,6 @@ static struct caam_hash_template driver_ +@@ -1736,7 +1564,6 @@ static struct caam_hash_template driver_ }, }, .alg_type = OP_ALG_ALGSEL_SHA224, @@ -20643,7 +22396,7 @@ Signed-off-by: Yangbo Lu }, { .name = "sha256", .driver_name = "sha256-caam", -@@ -1758,7 +1628,6 @@ static struct caam_hash_template driver_ +@@ -1758,7 +1585,6 @@ static struct caam_hash_template driver_ }, }, .alg_type = OP_ALG_ALGSEL_SHA256, @@ -20651,7 +22404,7 @@ Signed-off-by: Yangbo Lu }, { .name = "sha384", .driver_name = "sha384-caam", -@@ -1780,7 +1649,6 @@ static struct caam_hash_template driver_ +@@ -1780,7 +1606,6 @@ static struct caam_hash_template driver_ }, }, .alg_type = OP_ALG_ALGSEL_SHA384, @@ -20659,7 +22412,7 @@ Signed-off-by: Yangbo Lu }, { .name = "sha512", .driver_name = "sha512-caam", -@@ -1802,7 +1670,6 @@ static struct caam_hash_template driver_ +@@ -1802,7 +1627,6 @@ static struct caam_hash_template driver_ }, }, .alg_type = OP_ALG_ALGSEL_SHA512, @@ -20667,7 +22420,7 @@ Signed-off-by: Yangbo Lu }, { .name = "md5", .driver_name = "md5-caam", -@@ -1824,14 +1691,12 @@ static struct caam_hash_template driver_ +@@ -1824,14 +1648,12 @@ static struct caam_hash_template driver_ }, }, .alg_type = OP_ALG_ALGSEL_MD5, @@ -20682,7 +22435,7 @@ Signed-off-by: Yangbo Lu struct ahash_alg ahash_alg; }; -@@ -1853,6 +1718,7 @@ static int caam_hash_cra_init(struct cry +@@ -1853,6 +1675,7 @@ static int caam_hash_cra_init(struct cry HASH_MSG_LEN + SHA256_DIGEST_SIZE, HASH_MSG_LEN + 64, HASH_MSG_LEN + SHA512_DIGEST_SIZE }; @@ -20690,7 +22443,7 @@ Signed-off-by: Yangbo Lu /* * Get a Job ring from Job Ring driver to ensure in-order -@@ -1863,11 +1729,31 @@ static int caam_hash_cra_init(struct cry +@@ -1863,11 +1686,31 @@ static int caam_hash_cra_init(struct cry pr_err("Job Ring Device allocation for transform failed\n"); return PTR_ERR(ctx->jrdev); } @@ -20725,7 +22478,7 @@ Signed-off-by: Yangbo Lu OP_ALG_ALGSEL_SHIFT]; crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), -@@ -1879,30 +1765,10 @@ static void caam_hash_cra_exit(struct cr +@@ -1879,30 +1722,10 @@ static void caam_hash_cra_exit(struct cr { struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); @@ -20760,7 +22513,7 @@ Signed-off-by: Yangbo Lu caam_jr_free(ctx->jrdev); } -@@ -1961,7 +1827,6 @@ caam_hash_alloc(struct caam_hash_templat +@@ -1961,7 +1784,6 @@ caam_hash_alloc(struct caam_hash_templat alg->cra_type = &crypto_ahash_type; t_alg->alg_type = template->alg_type; @@ -20768,6 +22521,169 @@ Signed-off-by: Yangbo Lu return t_alg; } +--- /dev/null ++++ b/drivers/crypto/caam/caamhash_desc.c +@@ -0,0 +1,108 @@ ++/* ++ * Shared descriptors for ahash algorithms ++ * ++ * Copyright 2017 NXP ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the names of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "compat.h" ++#include "desc_constr.h" ++#include "caamhash_desc.h" ++ ++/** ++ * cnstr_shdsc_ahash - ahash shared descriptor ++ * @desc: pointer to buffer used for descriptor construction ++ * @adata: pointer to authentication transform definitions. ++ * A split key is required for SEC Era < 6; the size of the split key ++ * is specified in this case. ++ * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224, ++ * SHA256, SHA384, SHA512}. ++ * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE} ++ * @digestsize: algorithm's digest size ++ * @ctx_len: size of Context Register ++ * @import_ctx: true if previous Context Register needs to be restored ++ * must be true for ahash update and final ++ * must be false for for ahash first and digest ++ * @era: SEC Era ++ */ ++void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state, ++ int digestsize, int ctx_len, bool import_ctx, int era) ++{ ++ u32 op = adata->algtype; ++ ++ init_sh_desc(desc, HDR_SHARE_SERIAL); ++ ++ /* Append key if it has been set; ahash update excluded */ ++ if (state != OP_ALG_AS_UPDATE && adata->keylen) { ++ u32 *skip_key_load; ++ ++ /* Skip key loading if already shared */ ++ skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | ++ JUMP_COND_SHRD); ++ ++ if (era < 6) ++ append_key_as_imm(desc, adata->key_virt, ++ adata->keylen_pad, ++ adata->keylen, CLASS_2 | ++ KEY_DEST_MDHA_SPLIT | KEY_ENC); ++ else ++ append_proto_dkp(desc, adata); ++ ++ set_jump_tgt_here(desc, skip_key_load); ++ ++ op |= OP_ALG_AAI_HMAC_PRECOMP; ++ } ++ ++ /* If needed, import context from software */ ++ if (import_ctx) ++ append_seq_load(desc, ctx_len, LDST_CLASS_2_CCB | ++ LDST_SRCDST_BYTE_CONTEXT); ++ ++ /* Class 2 operation */ ++ append_operation(desc, op | state | OP_ALG_ENCRYPT); ++ ++ /* ++ * Load from buf and/or src and write to req->result or state->context ++ * Calculate remaining bytes to read ++ */ ++ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); ++ /* Read remaining bytes */ ++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 | ++ FIFOLD_TYPE_MSG | KEY_VLF); ++ /* Store class2 context bytes */ ++ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | ++ LDST_SRCDST_BYTE_CONTEXT); ++} ++EXPORT_SYMBOL(cnstr_shdsc_ahash); ++ ++MODULE_LICENSE("Dual BSD/GPL"); ++MODULE_DESCRIPTION("FSL CAAM ahash descriptors support"); ++MODULE_AUTHOR("NXP Semiconductors"); +--- /dev/null ++++ b/drivers/crypto/caam/caamhash_desc.h +@@ -0,0 +1,49 @@ ++/* ++ * Shared descriptors for ahash algorithms ++ * ++ * Copyright 2017 NXP ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * * Neither the names of the above-listed copyright holders nor the ++ * names of any contributors may be used to endorse or promote products ++ * derived from this software without specific prior written permission. ++ * ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef _CAAMHASH_DESC_H_ ++#define _CAAMHASH_DESC_H_ ++ ++/* length of descriptors text */ ++#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ) ++#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ) ++#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) ++#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ) ++#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) ++ ++void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state, ++ int digestsize, int ctx_len, bool import_ctx, int era); ++ ++#endif /* _CAAMHASH_DESC_H_ */ --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c @@ -18,6 +18,10 @@ @@ -21773,7 +23689,7 @@ Signed-off-by: Yangbo Lu /* * Read the Compile Time paramters and SCFGR to determine -@@ -590,64 +597,67 @@ static int caam_probe(struct platform_de +@@ -590,64 +597,69 @@ static int caam_probe(struct platform_de JRSTART_JR1_START | JRSTART_JR2_START | JRSTART_JR3_START); @@ -21789,7 +23705,15 @@ Signed-off-by: Yangbo Lu - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36)); - else - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); -- ++ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36)); ++ } else { ++ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); ++ } ++ if (ret) { ++ dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret); ++ goto iounmap_ctrl; ++ } + - /* - * Detect and enable JobRs - * First, find out how many ring spec'ed, allocate references @@ -21800,14 +23724,7 @@ Signed-off-by: Yangbo Lu - if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") || - of_device_is_compatible(np, "fsl,sec4.0-job-ring")) - rspec++; -+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36)); -+ } else { -+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); -+ } -+ if (ret) { -+ dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret); -+ goto iounmap_ctrl; -+ } ++ ctrlpriv->era = caam_get_era(); - ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec, - sizeof(*ctrlpriv->jrpdev), GFP_KERNEL); @@ -21880,7 +23797,7 @@ Signed-off-by: Yangbo Lu } /* If no QI and no rings specified, quit and go home */ -@@ -662,8 +672,10 @@ static int caam_probe(struct platform_de +@@ -662,8 +674,10 @@ static int caam_probe(struct platform_de /* * If SEC has RNG version >= 4 and RNG state handle has not been * already instantiated, do RNG instantiation @@ -21892,12 +23809,14 @@ Signed-off-by: Yangbo Lu ctrlpriv->rng4_sh_init = rd_reg32(&ctrl->r4tst[0].rdsta); /* -@@ -731,77 +743,46 @@ static int caam_probe(struct platform_de +@@ -730,78 +744,47 @@ static int caam_probe(struct platform_de + /* Report "alive" for developer to see */ dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id, - caam_get_era()); +- caam_get_era()); - dev_info(dev, "job rings = %d, qi = %d\n", - ctrlpriv->total_jobrs, ctrlpriv->qi_present); ++ ctrlpriv->era); + dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n", + ctrlpriv->total_jobrs, ctrlpriv->qi_present, + caam_dpaa2 ? "yes" : "no"); @@ -22004,7 +23923,7 @@ Signed-off-by: Yangbo Lu ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32); ctrlpriv->ctl_kek = debugfs_create_blob("kek", S_IRUSR | -@@ -809,7 +790,7 @@ static int caam_probe(struct platform_de +@@ -809,7 +792,7 @@ static int caam_probe(struct platform_de ctrlpriv->ctl, &ctrlpriv->ctl_kek_wrap); @@ -22013,7 +23932,7 @@ Signed-off-by: Yangbo Lu ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32); ctrlpriv->ctl_tkek = debugfs_create_blob("tkek", S_IRUSR | -@@ -817,7 +798,7 @@ static int caam_probe(struct platform_de +@@ -817,7 +800,7 @@ static int caam_probe(struct platform_de ctrlpriv->ctl, &ctrlpriv->ctl_tkek_wrap); @@ -22022,7 +23941,7 @@ Signed-off-by: Yangbo Lu ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32); ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk", S_IRUSR | -@@ -828,13 +809,17 @@ static int caam_probe(struct platform_de +@@ -828,13 +811,17 @@ static int caam_probe(struct platform_de return 0; caam_remove: @@ -22041,7 +23960,7 @@ Signed-off-by: Yangbo Lu disable_caam_aclk: clk_disable_unprepare(ctrlpriv->caam_aclk); disable_caam_mem: -@@ -844,17 +829,6 @@ disable_caam_ipg: +@@ -844,17 +831,6 @@ disable_caam_ipg: return ret; } @@ -22148,7 +24067,49 @@ Signed-off-by: Yangbo Lu #define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT) /* -@@ -1107,8 +1104,8 @@ struct sec4_sg_entry { +@@ -449,6 +446,18 @@ struct sec4_sg_entry { + #define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT) + #define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT) + #define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT) ++#define OP_PCLID_DKP_MD5 (0x20 << OP_PCLID_SHIFT) ++#define OP_PCLID_DKP_SHA1 (0x21 << OP_PCLID_SHIFT) ++#define OP_PCLID_DKP_SHA224 (0x22 << OP_PCLID_SHIFT) ++#define OP_PCLID_DKP_SHA256 (0x23 << OP_PCLID_SHIFT) ++#define OP_PCLID_DKP_SHA384 (0x24 << OP_PCLID_SHIFT) ++#define OP_PCLID_DKP_SHA512 (0x25 << OP_PCLID_SHIFT) ++#define OP_PCLID_DKP_RIF_MD5 (0x60 << OP_PCLID_SHIFT) ++#define OP_PCLID_DKP_RIF_SHA1 (0x61 << OP_PCLID_SHIFT) ++#define OP_PCLID_DKP_RIF_SHA224 (0x62 << OP_PCLID_SHIFT) ++#define OP_PCLID_DKP_RIF_SHA256 (0x63 << OP_PCLID_SHIFT) ++#define OP_PCLID_DKP_RIF_SHA384 (0x64 << OP_PCLID_SHIFT) ++#define OP_PCLID_DKP_RIF_SHA512 (0x65 << OP_PCLID_SHIFT) + + /* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */ + #define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT) +@@ -1098,6 +1107,22 @@ struct sec4_sg_entry { + /* MacSec protinfos */ + #define OP_PCL_MACSEC 0x0001 + ++/* Derived Key Protocol (DKP) Protinfo */ ++#define OP_PCL_DKP_SRC_SHIFT 14 ++#define OP_PCL_DKP_SRC_MASK (3 << OP_PCL_DKP_SRC_SHIFT) ++#define OP_PCL_DKP_SRC_IMM (0 << OP_PCL_DKP_SRC_SHIFT) ++#define OP_PCL_DKP_SRC_SEQ (1 << OP_PCL_DKP_SRC_SHIFT) ++#define OP_PCL_DKP_SRC_PTR (2 << OP_PCL_DKP_SRC_SHIFT) ++#define OP_PCL_DKP_SRC_SGF (3 << OP_PCL_DKP_SRC_SHIFT) ++#define OP_PCL_DKP_DST_SHIFT 12 ++#define OP_PCL_DKP_DST_MASK (3 << OP_PCL_DKP_DST_SHIFT) ++#define OP_PCL_DKP_DST_IMM (0 << OP_PCL_DKP_DST_SHIFT) ++#define OP_PCL_DKP_DST_SEQ (1 << OP_PCL_DKP_DST_SHIFT) ++#define OP_PCL_DKP_DST_PTR (2 << OP_PCL_DKP_DST_SHIFT) ++#define OP_PCL_DKP_DST_SGF (3 << OP_PCL_DKP_DST_SHIFT) ++#define OP_PCL_DKP_KEY_SHIFT 0 ++#define OP_PCL_DKP_KEY_MASK (0xfff << OP_PCL_DKP_KEY_SHIFT) ++ + /* PKI unidirectional protocol protinfo bits */ + #define OP_PCL_PKPROT_TEST 0x0008 + #define OP_PCL_PKPROT_DECRYPT 0x0004 +@@ -1107,8 +1132,8 @@ struct sec4_sg_entry { /* For non-protocol/alg-only op commands */ #define OP_ALG_TYPE_SHIFT 24 #define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT) @@ -22159,7 +24120,7 @@ Signed-off-by: Yangbo Lu #define OP_ALG_ALGSEL_SHIFT 16 #define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT) -@@ -1249,7 +1246,7 @@ struct sec4_sg_entry { +@@ -1249,7 +1274,7 @@ struct sec4_sg_entry { #define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f /* PKHA mode copy-memory functions */ @@ -22168,7 +24129,7 @@ Signed-off-by: Yangbo Lu #define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT) #define OP_ALG_PKMODE_DST_REG_SHIFT 10 #define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT) -@@ -1445,10 +1442,11 @@ struct sec4_sg_entry { +@@ -1445,10 +1470,11 @@ struct sec4_sg_entry { #define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT) #define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT) #define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT) @@ -22181,7 +24142,15 @@ Signed-off-by: Yangbo Lu /* Destination selectors */ #define MATH_DEST_SHIFT 8 -@@ -1629,4 +1627,31 @@ struct sec4_sg_entry { +@@ -1457,6 +1483,7 @@ struct sec4_sg_entry { + #define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT) + #define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT) + #define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT) ++#define MATH_DEST_DPOVRD (0x07 << MATH_DEST_SHIFT) + #define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT) + #define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT) + #define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT) +@@ -1629,4 +1656,31 @@ struct sec4_sg_entry { /* Frame Descriptor Command for Replacement Job Descriptor */ #define FD_CMD_REPLACE_JOB_DESC 0x20000000 @@ -22312,7 +24281,7 @@ Signed-off-by: Yangbo Lu } -static inline void append_data(u32 *desc, void *data, int len) -+static inline void append_data(u32 * const desc, void *data, int len) ++static inline void append_data(u32 * const desc, const void *data, int len) { u32 *offset = desc_end(desc); @@ -22365,7 +24334,7 @@ Signed-off-by: Yangbo Lu } -static inline void append_cmd_data(u32 *desc, void *data, int len, -+static inline void append_cmd_data(u32 * const desc, void *data, int len, ++static inline void append_cmd_data(u32 * const desc, const void *data, int len, u32 command) { append_cmd(desc, command | IMMEDIATE | len); @@ -22452,7 +24421,7 @@ Signed-off-by: Yangbo Lu #define APPEND_CMD_PTR_TO_IMM(cmd, op) \ -static inline void append_##cmd##_as_imm(u32 *desc, void *data, \ -+static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \ ++static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \ unsigned int len, u32 options) \ { \ PRINT_POS; \ @@ -22479,7 +24448,7 @@ Signed-off-by: Yangbo Lu */ #define APPEND_CMD_PTR_TO_IMM2(cmd, op) \ -static inline void append_##cmd##_as_imm(u32 *desc, void *data, \ -+static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \ ++static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \ unsigned int data_len, \ unsigned int len, u32 options) \ { \ @@ -22492,7 +24461,7 @@ Signed-off-by: Yangbo Lu u32 options) \ { \ PRINT_POS; \ -@@ -426,3 +434,66 @@ do { \ +@@ -426,3 +434,107 @@ do { \ APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data) #define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \ APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data) @@ -22514,7 +24483,7 @@ Signed-off-by: Yangbo Lu + unsigned int keylen_pad; + union { + dma_addr_t key_dma; -+ void *key_virt; ++ const void *key_virt; + }; + bool key_inline; +}; @@ -22558,6 +24527,47 @@ Signed-off-by: Yangbo Lu + return (rem_bytes >= 0) ? 0 : -1; +} + ++/** ++ * append_proto_dkp - Derived Key Protocol (DKP): key -> split key ++ * @desc: pointer to buffer used for descriptor construction ++ * @adata: pointer to authentication transform definitions. ++ * keylen should be the length of initial key, while keylen_pad ++ * the length of the derived (split) key. ++ * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224, ++ * SHA256, SHA384, SHA512}. ++ */ ++static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata) ++{ ++ u32 protid; ++ ++ /* ++ * Quick & dirty translation from OP_ALG_ALGSEL_{MD5, SHA*} ++ * to OP_PCLID_DKP_{MD5, SHA*} ++ */ ++ protid = (adata->algtype & OP_ALG_ALGSEL_SUBMASK) | ++ (0x20 << OP_ALG_ALGSEL_SHIFT); ++ ++ if (adata->key_inline) { ++ int words; ++ ++ append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid | ++ OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM | ++ adata->keylen); ++ append_data(desc, adata->key_virt, adata->keylen); ++ ++ /* Reserve space in descriptor buffer for the derived key */ ++ words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) - ++ ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ; ++ if (words) ++ (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words); ++ } else { ++ append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid | ++ OP_PCL_DKP_SRC_PTR | OP_PCL_DKP_DST_PTR | ++ adata->keylen); ++ append_ptr(desc, adata->key_dma); ++ } ++} ++ +#endif /* DESC_CONSTR_H */ --- /dev/null +++ b/drivers/crypto/caam/dpseci.c @@ -24295,7 +26305,15 @@ Signed-off-by: Yangbo Lu /* Physical-presence section */ struct caam_ctrl __iomem *ctrl; /* controller region */ -@@ -103,11 +102,6 @@ struct caam_drv_private { +@@ -84,6 +83,7 @@ struct caam_drv_private { + u8 qi_present; /* Nonzero if QI present in device */ + int secvio_irq; /* Security violation interrupt number */ + int virt_en; /* Virtualization enabled in CAAM */ ++ int era; /* CAAM Era (internal HW revision) */ + + #define RNG4_MAX_HANDLES 2 + /* RNG4 block */ +@@ -103,11 +103,6 @@ struct caam_drv_private { #ifdef CONFIG_DEBUG_FS struct dentry *dfs_root; struct dentry *ctl; /* controller dir */ @@ -24307,7 +26325,7 @@ Signed-off-by: Yangbo Lu struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap; struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk; #endif -@@ -115,4 +109,22 @@ struct caam_drv_private { +@@ -115,4 +110,22 @@ struct caam_drv_private { void caam_jr_algapi_init(struct device *dev); void caam_jr_algapi_remove(struct device *dev); @@ -24829,7 +26847,7 @@ Signed-off-by: Yangbo Lu + + fd.cmd = 0; + fd.format = qm_fd_compound; -+ fd.cong_weight = req->fd_sgt[1].length; ++ fd.cong_weight = caam32_to_cpu(req->fd_sgt[1].length); + fd.addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt), + DMA_BIDIRECTIONAL); + if (dma_mapping_error(qidev, fd.addr)) { diff --git a/target/linux/layerscape/patches-4.9/805-dma-support-layerscape.patch b/target/linux/layerscape/patches-4.9/805-dma-support-layerscape.patch index 94f0a3444e2..0eeeb9d77fd 100644 --- a/target/linux/layerscape/patches-4.9/805-dma-support-layerscape.patch +++ b/target/linux/layerscape/patches-4.9/805-dma-support-layerscape.patch @@ -1,25 +1,25 @@ -From 854c1f0e9574e9b25a55b439608c71e013b34a56 Mon Sep 17 00:00:00 2001 +From 515d590e3d5313110faa4f2c86f7784d9b070fa9 Mon Sep 17 00:00:00 2001 From: Yangbo Lu -Date: Mon, 25 Sep 2017 12:12:20 +0800 -Subject: [PATCH] dma: support layerscape +Date: Wed, 17 Jan 2018 15:30:59 +0800 +Subject: [PATCH 17/30] dma: support layerscape -This is a integrated patch for layerscape dma support. +This is an integrated patch for layerscape dma support. Signed-off-by: jiaheng.fan Signed-off-by: Yangbo Lu --- drivers/dma/Kconfig | 31 + drivers/dma/Makefile | 3 + - drivers/dma/caam_dma.c | 563 +++++++++++++++ + drivers/dma/caam_dma.c | 563 ++++++++++++++ drivers/dma/dpaa2-qdma/Kconfig | 8 + drivers/dma/dpaa2-qdma/Makefile | 8 + - drivers/dma/dpaa2-qdma/dpaa2-qdma.c | 986 +++++++++++++++++++++++++ + drivers/dma/dpaa2-qdma/dpaa2-qdma.c | 986 ++++++++++++++++++++++++ drivers/dma/dpaa2-qdma/dpaa2-qdma.h | 262 +++++++ - drivers/dma/dpaa2-qdma/dpdmai.c | 454 ++++++++++++ - drivers/dma/dpaa2-qdma/fsl_dpdmai.h | 521 ++++++++++++++ + drivers/dma/dpaa2-qdma/dpdmai.c | 454 +++++++++++ + drivers/dma/dpaa2-qdma/fsl_dpdmai.h | 521 +++++++++++++ drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h | 222 ++++++ - drivers/dma/fsl-qdma.c | 1201 +++++++++++++++++++++++++++++++ - 11 files changed, 4259 insertions(+) + drivers/dma/fsl-qdma.c | 1243 +++++++++++++++++++++++++++++++ + 11 files changed, 4301 insertions(+) create mode 100644 drivers/dma/caam_dma.c create mode 100644 drivers/dma/dpaa2-qdma/Kconfig create mode 100644 drivers/dma/dpaa2-qdma/Makefile @@ -3146,7 +3146,7 @@ Signed-off-by: Yangbo Lu +#endif /* _FSL_DPDMAI_CMD_H */ --- /dev/null +++ b/drivers/dma/fsl-qdma.c -@@ -0,0 +1,1201 @@ +@@ -0,0 +1,1243 @@ +/* + * drivers/dma/fsl-qdma.c + * @@ -3268,67 +3268,111 @@ Signed-off-by: Yangbo Lu + +u64 pre_addr, pre_queue; + ++/* qDMA Command Descriptor Fotmats */ ++ ++/* Compound Command Descriptor Fotmat */ +struct fsl_qdma_ccdf { -+ u8 status; -+ u32 rev1:22; -+ u32 ser:1; -+ u32 rev2:1; -+ u32 rev3:20; -+ u32 offset:9; -+ u32 format:3; ++ __le32 status; /* ser, status */ ++ __le32 cfg; /* format, offset */ + union { + struct { -+ u32 addr_lo; /* low 32-bits of 40-bit address */ -+ u32 addr_hi:8; /* high 8-bits of 40-bit address */ -+ u32 rev4:16; -+ u32 queue:3; -+ u32 rev5:3; -+ u32 dd:2; /* dynamic debug */ -+ }; -+ struct { -+ u64 addr:40; -+ /* More efficient address accessor */ -+ u64 __notaddress:24; -+ }; ++ __le32 addr_lo; /* low 32-bits of 40-bit address */ ++ u8 addr_hi; /* high 8-bits of 40-bit address */ ++ u8 __reserved1[2]; ++ u8 cfg8b_w1; /* dd, queue*/ ++ } __packed; ++ __le64 data; + }; +} __packed; + ++#define QDMA_CCDF_STATUS 20 ++#define QDMA_CCDF_OFFSET 20 ++#define QDMA_CCDF_MASK GENMASK(28, 20) ++#define QDMA_CCDF_FOTMAT BIT(29) ++#define QDMA_CCDF_SER BIT(30) ++ ++static inline u64 qdma_ccdf_addr_get64(const struct fsl_qdma_ccdf *ccdf) ++{ ++ return le64_to_cpu(ccdf->data) & 0xffffffffffLLU; ++} ++static inline u64 qdma_ccdf_get_queue(const struct fsl_qdma_ccdf *ccdf) ++{ ++ return ccdf->cfg8b_w1 & 0xff; ++} ++static inline void qdma_ccdf_addr_set64(struct fsl_qdma_ccdf *ccdf, u64 addr) ++{ ++ ccdf->addr_hi = upper_32_bits(addr); ++ ccdf->addr_lo = cpu_to_le32(lower_32_bits(addr)); ++} ++static inline int qdma_ccdf_get_offset(const struct fsl_qdma_ccdf *ccdf) ++{ ++ return (le32_to_cpu(ccdf->cfg) & QDMA_CCDF_MASK) >> QDMA_CCDF_OFFSET; ++} ++static inline void qdma_ccdf_set_format(struct fsl_qdma_ccdf *ccdf, int offset) ++{ ++ ccdf->cfg = cpu_to_le32(QDMA_CCDF_FOTMAT | offset); ++} ++static inline int qdma_ccdf_get_status(const struct fsl_qdma_ccdf *ccdf) ++{ ++ return (le32_to_cpu(ccdf->status) & QDMA_CCDF_MASK) >> QDMA_CCDF_STATUS; ++} ++static inline void qdma_ccdf_set_ser(struct fsl_qdma_ccdf *ccdf, int status) ++{ ++ ccdf->status = cpu_to_le32(QDMA_CCDF_SER | status); ++} ++/* qDMA Compound S/G Format */ +struct fsl_qdma_csgf { -+ u32 offset:13; -+ u32 rev1:19; -+ u32 length:30; -+ u32 f:1; -+ u32 e:1; ++ __le32 offset; /* offset */ ++ __le32 cfg; /* E bit, F bit, length */ + union { + struct { -+ u32 addr_lo; /* low 32-bits of 40-bit address */ -+ u32 addr_hi:8; /* high 8-bits of 40-bit address */ -+ u32 rev2:24; -+ }; -+ struct { -+ u64 addr:40; -+ /* More efficient address accessor */ -+ u64 __notaddress:24; ++ __le32 addr_lo; /* low 32-bits of 40-bit address */ ++ u8 addr_hi; /* high 8-bits of 40-bit address */ ++ u8 __reserved1[3]; + }; ++ __le64 data; + }; +} __packed; + ++#define QDMA_SG_FIN BIT(30) ++#define QDMA_SG_EXT BIT(31) ++#define QDMA_SG_LEN_MASK GENMASK(29, 0) ++static inline u64 qdma_csgf_addr_get64(const struct fsl_qdma_csgf *sg) ++{ ++ return be64_to_cpu(sg->data) & 0xffffffffffLLU; ++} ++static inline void qdma_csgf_addr_set64(struct fsl_qdma_csgf *sg, u64 addr) ++{ ++ sg->addr_hi = upper_32_bits(addr); ++ sg->addr_lo = cpu_to_le32(lower_32_bits(addr)); ++} ++static inline void qdma_csgf_set_len(struct fsl_qdma_csgf *csgf, int len) ++{ ++ csgf->cfg = cpu_to_le32(len & QDMA_SG_LEN_MASK); ++} ++static inline void qdma_csgf_set_f(struct fsl_qdma_csgf *csgf, int len) ++{ ++ csgf->cfg = cpu_to_le32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK)); ++} ++static inline void qdma_csgf_set_e(struct fsl_qdma_csgf *csgf, int len) ++{ ++ csgf->cfg = cpu_to_le32(QDMA_SG_EXT | (len & QDMA_SG_LEN_MASK)); ++} ++ ++/* qDMA Source Descriptor Format */ +struct fsl_qdma_sdf { -+ u32 rev3:32; -+ u32 ssd:12; /* souce stride distance */ -+ u32 sss:12; /* souce stride size */ -+ u32 rev4:8; -+ u32 rev5:32; -+ u32 cmd; ++ __le32 rev3; ++ __le32 cfg; /* rev4, bit[0-11] - ssd, bit[12-23] sss */ ++ __le32 rev5; ++ __le32 cmd; +} __packed; + ++/*qDMA Destination Descriptor Format*/ +struct fsl_qdma_ddf { -+ u32 rev1:32; -+ u32 dsd:12; /* Destination stride distance */ -+ u32 dss:12; /* Destination stride size */ -+ u32 rev2:8; -+ u32 rev3:32; -+ u32 cmd; ++ __le32 rev1; ++ __le32 cfg; /* rev2, bit[0-11] - dsd, bit[12-23] - dss */ ++ __le32 rev3; ++ __le32 cmd; +} __packed; + +struct fsl_qdma_chan { @@ -3453,24 +3497,27 @@ Signed-off-by: Yangbo Lu + + memset(fsl_comp->virt_addr, 0, FSL_QDMA_BASE_BUFFER_SIZE); + /* Head Command Descriptor(Frame Descriptor) */ -+ ccdf->addr = fsl_comp->bus_addr + 16; -+ ccdf->format = 1; /* Compound S/G format */ ++ qdma_ccdf_addr_set64(ccdf, fsl_comp->bus_addr + 16); ++ qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf)); ++ qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf)); + /* Status notification is enqueued to status queue. */ -+ ccdf->ser = 1; + /* Compound Command Descriptor(Frame List Table) */ -+ csgf_desc->addr = fsl_comp->bus_addr + 64; ++ qdma_csgf_addr_set64(csgf_desc, fsl_comp->bus_addr + 64); + /* It must be 32 as Compound S/G Descriptor */ -+ csgf_desc->length = 32; -+ csgf_src->addr = src; -+ csgf_src->length = len; -+ csgf_dest->addr = dst; -+ csgf_dest->length = len; ++ qdma_csgf_set_len(csgf_desc, 32); ++ qdma_csgf_addr_set64(csgf_src, src); ++ qdma_csgf_set_len(csgf_src, len); ++ qdma_csgf_addr_set64(csgf_dest, dst); ++ qdma_csgf_set_len(csgf_dest, len); + /* This entry is the last entry. */ -+ csgf_dest->f = FSL_QDMA_F_LAST_ENTRY; ++ qdma_csgf_set_f(csgf_dest, len); + /* Descriptor Buffer */ -+ sdf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET; -+ ddf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET; -+ ddf->cmd |= FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET; ++ sdf->cmd = cpu_to_le32( ++ FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET); ++ ddf->cmd = cpu_to_le32( ++ FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET); ++ ddf->cmd |= cpu_to_le32( ++ FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET); +} + +static void fsl_qdma_comp_fill_sg( @@ -3494,49 +3541,48 @@ Signed-off-by: Yangbo Lu + csgf_dest = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 3; + sdf = (struct fsl_qdma_sdf *)fsl_comp->virt_addr + 4; + ddf = (struct fsl_qdma_ddf *)fsl_comp->virt_addr + 5; -+ + memset(fsl_comp->virt_addr, 0, FSL_QDMA_BASE_BUFFER_SIZE); + /* Head Command Descriptor(Frame Descriptor) */ -+ ccdf->addr = fsl_comp->bus_addr + 16; -+ ccdf->format = 1; /* Compound S/G format */ ++ qdma_ccdf_addr_set64(ccdf, fsl_comp->bus_addr + 16); ++ qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf)); + /* Status notification is enqueued to status queue. */ -+ ccdf->ser = 1; ++ qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf)); + + /* Compound Command Descriptor(Frame List Table) */ -+ csgf_desc->addr = fsl_comp->bus_addr + 64; ++ qdma_csgf_addr_set64(csgf_desc, fsl_comp->bus_addr + 64); + /* It must be 32 as Compound S/G Descriptor */ -+ csgf_desc->length = 32; ++ qdma_csgf_set_len(csgf_desc, 32); + + sg_block = fsl_comp->sg_block; -+ csgf_src->addr = sg_block->bus_addr; ++ qdma_csgf_addr_set64(csgf_src, sg_block->bus_addr); + /* This entry link to the s/g entry. */ -+ csgf_src->e = FSL_QDMA_E_SG_TABLE; ++ qdma_csgf_set_e(csgf_src, 32); + + temp = sg_block + fsl_comp->sg_block_src; -+ csgf_dest->addr = temp->bus_addr; ++ qdma_csgf_addr_set64(csgf_dest, temp->bus_addr); + /* This entry is the last entry. */ -+ csgf_dest->f = FSL_QDMA_F_LAST_ENTRY; ++ qdma_csgf_set_f(csgf_dest, 32); + /* This entry link to the s/g entry. */ -+ csgf_dest->e = FSL_QDMA_E_SG_TABLE; ++ qdma_csgf_set_e(csgf_dest, 32); + + for_each_sg(src_sg, sg, src_nents, i) { + temp = sg_block + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1); + csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr + + i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1); -+ csgf_sg->addr = sg_dma_address(sg); -+ csgf_sg->length = sg_dma_len(sg); ++ qdma_csgf_addr_set64(csgf_sg, sg_dma_address(sg)); ++ qdma_csgf_set_len(csgf_sg, sg_dma_len(sg)); + total_src_len += sg_dma_len(sg); + + if (i == src_nents - 1) -+ csgf_sg->f = FSL_QDMA_F_LAST_ENTRY; ++ qdma_csgf_set_f(csgf_sg, sg_dma_len(sg)); + if (i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) == + FSL_QDMA_EXPECT_SG_ENTRY_NUM - 2) { + csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr + + FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1; + temp = sg_block + + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1; -+ csgf_sg->addr = temp->bus_addr; -+ csgf_sg->e = FSL_QDMA_E_SG_TABLE; ++ qdma_csgf_addr_set64(csgf_sg, temp->bus_addr); ++ qdma_csgf_set_e(csgf_sg, sg_dma_len(sg)); + } + } + @@ -3545,20 +3591,20 @@ Signed-off-by: Yangbo Lu + temp = sg_block + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1); + csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr + + i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1); -+ csgf_sg->addr = sg_dma_address(sg); -+ csgf_sg->length = sg_dma_len(sg); ++ qdma_csgf_addr_set64(csgf_sg, sg_dma_address(sg)); ++ qdma_csgf_set_len(csgf_sg, sg_dma_len(sg)); + total_dst_len += sg_dma_len(sg); + + if (i == dst_nents - 1) -+ csgf_sg->f = FSL_QDMA_F_LAST_ENTRY; ++ qdma_csgf_set_f(csgf_sg, sg_dma_len(sg)); + if (i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) == + FSL_QDMA_EXPECT_SG_ENTRY_NUM - 2) { + csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr + + FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1; + temp = sg_block + + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1; -+ csgf_sg->addr = temp->bus_addr; -+ csgf_sg->e = FSL_QDMA_E_SG_TABLE; ++ qdma_csgf_addr_set64(csgf_sg, temp->bus_addr); ++ qdma_csgf_set_e(csgf_sg, sg_dma_len(sg)); + } + } + @@ -3566,12 +3612,10 @@ Signed-off-by: Yangbo Lu + dev_err(&fsl_comp->qchan->vchan.chan.dev->device, + "The data length for src and dst isn't match.\n"); + -+ csgf_src->length = total_src_len; -+ csgf_dest->length = total_dst_len; ++ qdma_csgf_set_len(csgf_src, total_src_len); ++ qdma_csgf_set_len(csgf_dest, total_dst_len); + + /* Descriptor Buffer */ -+ sdf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET; -+ ddf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET; +} + +/* @@ -3843,13 +3887,12 @@ Signed-off-by: Yangbo Lu + if (reg & FSL_QDMA_BSQSR_QE) + return 0; + status_addr = fsl_status->virt_head; -+ if (status_addr->queue == pre_queue && -+ status_addr->addr == pre_addr) ++ if (qdma_ccdf_get_queue(status_addr) == pre_queue && ++ qdma_ccdf_addr_get64(status_addr) == pre_addr) + duplicate = 1; -+ -+ i = status_addr->queue; -+ pre_queue = status_addr->queue; -+ pre_addr = status_addr->addr; ++ i = qdma_ccdf_get_queue(status_addr); ++ pre_queue = qdma_ccdf_get_queue(status_addr); ++ pre_addr = qdma_ccdf_addr_get64(status_addr); + temp_queue = fsl_queue + i; + spin_lock(&temp_queue->queue_lock); + if (list_empty(&temp_queue->comp_used)) { @@ -3865,8 +3908,7 @@ Signed-off-by: Yangbo Lu + list); + csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + + 2; -+ if (fsl_comp->bus_addr + 16 != -+ (dma_addr_t)status_addr->addr) { ++ if (fsl_comp->bus_addr + 16 != pre_addr) { + if (duplicate) + duplicate_handle = 1; + else { @@ -3879,7 +3921,7 @@ Signed-off-by: Yangbo Lu + if (duplicate_handle) { + reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR); + reg |= FSL_QDMA_BSQMR_DI; -+ status_addr->addr = 0x0; ++ qdma_ccdf_addr_set64(status_addr, 0x0); + fsl_status->virt_head++; + if (fsl_status->virt_head == fsl_status->cq + + fsl_status->n_cq) @@ -3892,7 +3934,7 @@ Signed-off-by: Yangbo Lu + + reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR); + reg |= FSL_QDMA_BSQMR_DI; -+ status_addr->addr = 0x0; ++ qdma_ccdf_addr_set64(status_addr, 0x0); + fsl_status->virt_head++; + if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq) + fsl_status->virt_head = fsl_status->cq; diff --git a/target/linux/layerscape/patches-4.9/806-flextimer-support-layerscape.patch b/target/linux/layerscape/patches-4.9/806-flextimer-support-layerscape.patch index 7ead7b6eca4..9826d822e93 100644 --- a/target/linux/layerscape/patches-4.9/806-flextimer-support-layerscape.patch +++ b/target/linux/layerscape/patches-4.9/806-flextimer-support-layerscape.patch @@ -1,9 +1,9 @@ -From 76cd2ef6b69b67c09480a3248f7b910897f0bb2f Mon Sep 17 00:00:00 2001 +From b92e223750a07b28f175eae97d5ce3978df41be8 Mon Sep 17 00:00:00 2001 From: Yangbo Lu -Date: Mon, 25 Sep 2017 12:13:12 +0800 -Subject: [PATCH] flextimer: support layerscape +Date: Wed, 17 Jan 2018 15:32:05 +0800 +Subject: [PATCH 18/30] flextimer: support layerscape -This is a integrated patch for layerscape flextimer support. +This is an integrated patch for layerscape flextimer support. Signed-off-by: Wang Dongsheng Signed-off-by: Meng Yi diff --git a/target/linux/layerscape/patches-4.9/807-gpu-support-layerscape.patch b/target/linux/layerscape/patches-4.9/807-gpu-support-layerscape.patch index cd99f949254..5aea9ea738d 100644 --- a/target/linux/layerscape/patches-4.9/807-gpu-support-layerscape.patch +++ b/target/linux/layerscape/patches-4.9/807-gpu-support-layerscape.patch @@ -1,15 +1,15 @@ -From 4278a546526094dd57bfa3cf7ae2bf34092246db Mon Sep 17 00:00:00 2001 +From 177f92a14d8177124f37db0fafc11182e2dcdd62 Mon Sep 17 00:00:00 2001 From: Yangbo Lu -Date: Mon, 25 Sep 2017 12:10:01 +0800 -Subject: [PATCH] gpu: support layerscape +Date: Wed, 17 Jan 2018 15:33:05 +0800 +Subject: [PATCH 19/30] gpu: support layerscape -This is a integrated patch for layerscape dcu support. +This is an integrated patch for layerscape dcu support. Signed-off-by: Alison Wang Signed-off-by: Yangbo Lu --- - drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c | 18 ++++++++++++++++-- - 1 file changed, 16 insertions(+), 2 deletions(-) + drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c | 15 ++++++++++++++- + 1 file changed, 14 insertions(+), 1 deletion(-) --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c diff --git a/target/linux/layerscape/patches-4.9/808-guts-support-layerscape.patch b/target/linux/layerscape/patches-4.9/808-guts-support-layerscape.patch index ffda8a6cf82..0999832a162 100644 --- a/target/linux/layerscape/patches-4.9/808-guts-support-layerscape.patch +++ b/target/linux/layerscape/patches-4.9/808-guts-support-layerscape.patch @@ -1,51 +1,20 @@ -From d51e307e4ecf51832c9e3bc30acb5dbd559d5f4d Mon Sep 17 00:00:00 2001 +From 45b0e1589b25ea3106a8c8d18bf653fde95baa9f Mon Sep 17 00:00:00 2001 From: Yangbo Lu -Date: Mon, 25 Sep 2017 12:19:34 +0800 -Subject: [PATCH] guts: support layerscape +Date: Wed, 17 Jan 2018 15:34:22 +0800 +Subject: [PATCH 20/30] guts: support layerscape -This is a integrated patch for layerscape guts support. +This is an integrated patch for layerscape guts support. Signed-off-by: Roy Pledge Signed-off-by: Geert Uytterhoeven Signed-off-by: Amrita Kumari Signed-off-by: Yangbo Lu --- - drivers/base/soc.c | 12 ++- drivers/soc/fsl/guts.c | 238 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/fsl/guts.h | 125 +++++++++++++++---------- - 3 files changed, 323 insertions(+), 52 deletions(-) + 2 files changed, 315 insertions(+), 48 deletions(-) create mode 100644 drivers/soc/fsl/guts.c ---- a/drivers/base/soc.c -+++ b/drivers/base/soc.c -@@ -167,19 +167,23 @@ static int soc_device_match_one(struct d - const struct soc_device_attribute *match = arg; - - if (match->machine && -- !glob_match(match->machine, soc_dev->attr->machine)) -+ (!soc_dev->attr->machine || -+ !glob_match(match->machine, soc_dev->attr->machine))) - return 0; - - if (match->family && -- !glob_match(match->family, soc_dev->attr->family)) -+ (!soc_dev->attr->family || -+ !glob_match(match->family, soc_dev->attr->family))) - return 0; - - if (match->revision && -- !glob_match(match->revision, soc_dev->attr->revision)) -+ (!soc_dev->attr->revision || -+ !glob_match(match->revision, soc_dev->attr->revision))) - return 0; - - if (match->soc_id && -- !glob_match(match->soc_id, soc_dev->attr->soc_id)) -+ (!soc_dev->attr->soc_id || -+ !glob_match(match->soc_id, soc_dev->attr->soc_id))) - return 0; - - return 1; --- /dev/null +++ b/drivers/soc/fsl/guts.c @@ -0,0 +1,238 @@ diff --git a/target/linux/layerscape/patches-4.9/809-i2c-support-layerscape.patch b/target/linux/layerscape/patches-4.9/809-i2c-support-layerscape.patch index edb61b5c714..0b5f5837fb0 100644 --- a/target/linux/layerscape/patches-4.9/809-i2c-support-layerscape.patch +++ b/target/linux/layerscape/patches-4.9/809-i2c-support-layerscape.patch @@ -1,21 +1,180 @@ -From 3c5032fe34f1af50e9e5fe58d40bf93c1717302f Mon Sep 17 00:00:00 2001 +From 659aa30c59fb188b533a7edcb9bd38ac007a2739 Mon Sep 17 00:00:00 2001 From: Yangbo Lu -Date: Mon, 25 Sep 2017 12:19:53 +0800 -Subject: [PATCH] i2c: support layerscape +Date: Wed, 17 Jan 2018 15:35:11 +0800 +Subject: [PATCH 21/30] i2c: support layerscape -This is a integrated patch for layerscape i2c support. +This is an integrated patch for layerscape i2c support. Signed-off-by: Zhang Ying-22455 Signed-off-by: Priyanka Jain Signed-off-by: Yangbo Lu --- - drivers/i2c/busses/i2c-imx.c | 10 ++++++++- - drivers/i2c/muxes/i2c-mux-pca954x.c | 43 +++++++++++++++++++++++++++++++++++++ - 2 files changed, 52 insertions(+), 1 deletion(-) + drivers/i2c/busses/i2c-imx.c | 195 +++++++++++++++++++++++++++++++++++- + drivers/i2c/muxes/i2c-mux-pca954x.c | 43 ++++++++ + 2 files changed, 237 insertions(+), 1 deletion(-) --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c -@@ -889,6 +889,14 @@ static int i2c_imx_xfer(struct i2c_adapt +@@ -53,6 +53,11 @@ + #include + #include + #include ++#include ++#include ++#include ++#include ++#include + + /* This will be the driver name the kernel reports */ + #define DRIVER_NAME "imx-i2c" +@@ -117,6 +122,54 @@ + + #define I2C_PM_TIMEOUT 10 /* ms */ + ++enum pinmux_endian_type { ++ BIG_ENDIAN, ++ LITTLE_ENDIAN, ++}; ++ ++struct pinmux_cfg { ++ enum pinmux_endian_type endian; /* endian of RCWPMUXCR0 */ ++ u32 pmuxcr_offset; ++ u32 pmuxcr_set_bit; /* pin mux of RCWPMUXCR0 */ ++}; ++ ++static struct pinmux_cfg ls1012a_pinmux_cfg = { ++ .endian = BIG_ENDIAN, ++ .pmuxcr_offset = 0x430, ++ .pmuxcr_set_bit = 0x10, ++}; ++ ++static struct pinmux_cfg ls1043a_pinmux_cfg = { ++ .endian = BIG_ENDIAN, ++ .pmuxcr_offset = 0x40C, ++ .pmuxcr_set_bit = 0x10, ++}; ++ ++static struct pinmux_cfg ls1046a_pinmux_cfg = { ++ .endian = BIG_ENDIAN, ++ .pmuxcr_offset = 0x40C, ++ .pmuxcr_set_bit = 0x80000000, ++}; ++ ++static const struct of_device_id pinmux_of_match[] = { ++ { .compatible = "fsl,ls1012a-vf610-i2c", .data = &ls1012a_pinmux_cfg}, ++ { .compatible = "fsl,ls1043a-vf610-i2c", .data = &ls1043a_pinmux_cfg}, ++ { .compatible = "fsl,ls1046a-vf610-i2c", .data = &ls1046a_pinmux_cfg}, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, pinmux_of_match); ++ ++/* The SCFG, Supplemental Configuration Unit, provides SoC specific ++ * configuration and status registers for the device. There is a ++ * SDHC IO VSEL control register on SCFG for some platforms. It's ++ * used to support SDHC IO voltage switching. ++ */ ++static const struct of_device_id scfg_device_ids[] = { ++ { .compatible = "fsl,ls1012a-scfg", }, ++ { .compatible = "fsl,ls1043a-scfg", }, ++ { .compatible = "fsl,ls1046a-scfg", }, ++ {} ++}; + /* + * sorted list of clock divider, register value pairs + * taken from table 26-5, p.26-9, Freescale i.MX +@@ -210,6 +263,12 @@ struct imx_i2c_struct { + struct pinctrl_state *pinctrl_pins_gpio; + + struct imx_i2c_dma *dma; ++ int layerscape_bus_recover; ++ int gpio; ++ int need_set_pmuxcr; ++ int pmuxcr_set; ++ int pmuxcr_endian; ++ void __iomem *pmuxcr_addr; + }; + + static const struct imx_i2c_hwdata imx1_i2c_hwdata = { +@@ -879,6 +938,78 @@ static int i2c_imx_read(struct imx_i2c_s + return 0; + } + ++/* ++ * Based on the I2C specification, if the data line (SDA) is ++ * stuck low, the master should send nine * clock pulses. ++ * The I2C slave device that held the bus low should release it ++ * sometime within * those nine clocks. Due to this erratum, ++ * the I2C controller cannot generate nine clock pulses. ++ */ ++static int i2c_imx_recovery_for_layerscape(struct imx_i2c_struct *i2c_imx) ++{ ++ u32 pmuxcr = 0; ++ int ret; ++ unsigned int i, temp; ++ ++ /* configure IICx_SCL/GPIO pin as a GPIO */ ++ if (i2c_imx->need_set_pmuxcr == 1) { ++ pmuxcr = ioread32be(i2c_imx->pmuxcr_addr); ++ if (i2c_imx->pmuxcr_endian == BIG_ENDIAN) ++ iowrite32be(i2c_imx->pmuxcr_set|pmuxcr, ++ i2c_imx->pmuxcr_addr); ++ else ++ iowrite32(i2c_imx->pmuxcr_set|pmuxcr, ++ i2c_imx->pmuxcr_addr); ++ } ++ ++ ret = gpio_request(i2c_imx->gpio, i2c_imx->adapter.name); ++ if (ret) { ++ dev_err(&i2c_imx->adapter.dev, ++ "can't get gpio: %d\n", ret); ++ return ret; ++ } ++ ++ /* Configure GPIO pin as an output and open drain. */ ++ gpio_direction_output(i2c_imx->gpio, 1); ++ udelay(10); ++ ++ /* Write data to generate 9 pulses */ ++ for (i = 0; i < 9; i++) { ++ gpio_set_value(i2c_imx->gpio, 1); ++ udelay(10); ++ gpio_set_value(i2c_imx->gpio, 0); ++ udelay(10); ++ } ++ /* ensure that the last level sent is always high */ ++ gpio_set_value(i2c_imx->gpio, 1); ++ ++ /* ++ * Set I2Cx_IBCR = 0h00 to generate a STOP and then ++ * set I2Cx_IBCR = 0h80 to reset ++ */ ++ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); ++ temp &= ~(I2CR_MSTA | I2CR_MTX); ++ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); ++ ++ /* Restore the saved value of the register SCFG_RCWPMUXCR0 */ ++ if (i2c_imx->need_set_pmuxcr == 1) { ++ if (i2c_imx->pmuxcr_endian == BIG_ENDIAN) ++ iowrite32be(pmuxcr, i2c_imx->pmuxcr_addr); ++ else ++ iowrite32(pmuxcr, i2c_imx->pmuxcr_addr); ++ } ++ /* ++ * Set I2C_IBSR[IBAL] to clear the IBAL bit if- ++ * I2C_IBSR[IBAL] = 1 ++ */ ++ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR); ++ if (temp & I2SR_IAL) { ++ temp &= ~I2SR_IAL; ++ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR); ++ } ++ return 0; ++} ++ + static int i2c_imx_xfer(struct i2c_adapter *adapter, + struct i2c_msg *msgs, int num) + { +@@ -889,6 +1020,19 @@ static int i2c_imx_xfer(struct i2c_adapt dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); @@ -24,13 +183,81 @@ Signed-off-by: Yangbo Lu + * before switching to master mode and attempting a Start cycle + */ + result = i2c_imx_bus_busy(i2c_imx, 0); -+ if (result) -+ goto out; ++ if (result) { ++ /* timeout */ ++ if ((result == -ETIMEDOUT) && (i2c_imx->layerscape_bus_recover == 1)) ++ i2c_imx_recovery_for_layerscape(i2c_imx); ++ else ++ goto out; ++ } + result = pm_runtime_get_sync(i2c_imx->adapter.dev.parent); if (result < 0) goto out; -@@ -1100,7 +1108,7 @@ static int i2c_imx_probe(struct platform +@@ -1031,6 +1175,50 @@ static int i2c_imx_init_recovery_info(st + return 0; + } + ++/* ++ * switch SCL and SDA to their GPIO function and do some bitbanging ++ * for bus recovery. ++ * There are platforms such as Layerscape that don't support pinctrl, so add ++ * workaround for layerscape, it has no effect for other platforms. ++ */ ++static int i2c_imx_init_recovery_for_layerscape( ++ struct imx_i2c_struct *i2c_imx, ++ struct platform_device *pdev) ++{ ++ const struct of_device_id *of_id; ++ struct device_node *np = pdev->dev.of_node; ++ struct pinmux_cfg *pinmux_cfg; ++ struct device_node *scfg_node; ++ void __iomem *scfg_base = NULL; ++ ++ i2c_imx->gpio = of_get_named_gpio(np, "fsl-scl-gpio", 0); ++ if (!gpio_is_valid(i2c_imx->gpio)) { ++ dev_info(&pdev->dev, "fsl-scl-gpio not found\n"); ++ return 0; ++ } ++ pinmux_cfg = devm_kzalloc(&pdev->dev, sizeof(*pinmux_cfg), GFP_KERNEL); ++ if (!pinmux_cfg) ++ return -ENOMEM; ++ ++ i2c_imx->need_set_pmuxcr = 0; ++ of_id = of_match_node(pinmux_of_match, np); ++ if (of_id) { ++ pinmux_cfg = (struct pinmux_cfg *)of_id->data; ++ i2c_imx->pmuxcr_endian = pinmux_cfg->endian; ++ i2c_imx->pmuxcr_set = pinmux_cfg->pmuxcr_set_bit; ++ scfg_node = of_find_matching_node(NULL, scfg_device_ids); ++ if (scfg_node) { ++ scfg_base = of_iomap(scfg_node, 0); ++ if (scfg_base) { ++ i2c_imx->pmuxcr_addr = scfg_base + pinmux_cfg->pmuxcr_offset; ++ i2c_imx->need_set_pmuxcr = 1; ++ } ++ } ++ } ++ i2c_imx->layerscape_bus_recover = 1; ++ return 0; ++} ++ + static u32 i2c_imx_func(struct i2c_adapter *adapter) + { + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL +@@ -1086,6 +1274,11 @@ static int i2c_imx_probe(struct platform + i2c_imx->adapter.dev.of_node = pdev->dev.of_node; + i2c_imx->base = base; + ++ /* Init optional bus recovery for layerscape */ ++ ret = i2c_imx_init_recovery_for_layerscape(i2c_imx, pdev); ++ if (ret) ++ return ret; ++ + /* Get I2C clock */ + i2c_imx->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(i2c_imx->clk)) { +@@ -1100,7 +1293,7 @@ static int i2c_imx_probe(struct platform } /* Request IRQ */ diff --git a/target/linux/layerscape/patches-4.9/810-iommu-support-layerscape.patch b/target/linux/layerscape/patches-4.9/810-iommu-support-layerscape.patch index 71ef5d87aff..bbf60211f7f 100644 --- a/target/linux/layerscape/patches-4.9/810-iommu-support-layerscape.patch +++ b/target/linux/layerscape/patches-4.9/810-iommu-support-layerscape.patch @@ -1,9 +1,9 @@ -From 152f316e7829f6aeb3a36009e7e5ec0f1d97d770 Mon Sep 17 00:00:00 2001 +From 0a6c701f92e1aa368c44632fa0985e92703354ed Mon Sep 17 00:00:00 2001 From: Yangbo Lu -Date: Wed, 27 Sep 2017 10:33:26 +0800 -Subject: [PATCH] iommu: support layerscape +Date: Wed, 17 Jan 2018 15:35:48 +0800 +Subject: [PATCH 22/30] iommu: support layerscape -This is a integrated patch for layerscape smmu support. +This is an integrated patch for layerscape smmu support. Signed-off-by: Eric Auger Signed-off-by: Robin Murphy @@ -12,7 +12,7 @@ Signed-off-by: Sunil Goutham Signed-off-by: Yangbo Lu --- drivers/iommu/amd_iommu.c | 56 ++++++---- - drivers/iommu/arm-smmu-v3.c | 117 ++++++++++++++------- + drivers/iommu/arm-smmu-v3.c | 111 ++++++++++++++------ drivers/iommu/arm-smmu.c | 100 +++++++++++++++--- drivers/iommu/dma-iommu.c | 242 ++++++++++++++++++++++++++++++++++++------- drivers/iommu/intel-iommu.c | 92 ++++++++++++---- @@ -21,7 +21,7 @@ Signed-off-by: Yangbo Lu drivers/iommu/mtk_iommu_v1.c | 2 + include/linux/dma-iommu.h | 11 ++ include/linux/iommu.h | 55 +++++++--- - 10 files changed, 739 insertions(+), 157 deletions(-) + 10 files changed, 739 insertions(+), 151 deletions(-) --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c diff --git a/target/linux/layerscape/patches-4.9/811-irqchip-support-layerscape.patch b/target/linux/layerscape/patches-4.9/811-irqchip-support-layerscape.patch index ab16306983c..ec1af3d92db 100644 --- a/target/linux/layerscape/patches-4.9/811-irqchip-support-layerscape.patch +++ b/target/linux/layerscape/patches-4.9/811-irqchip-support-layerscape.patch @@ -1,9 +1,9 @@ -From 1d596855b596db88f10b12a1be6fd19e249be170 Mon Sep 17 00:00:00 2001 +From 5a5ff01c790d49c0f6fd247f68f2fd9a2128ea91 Mon Sep 17 00:00:00 2001 From: Yangbo Lu -Date: Mon, 25 Sep 2017 12:13:29 +0800 -Subject: [PATCH] irqchip: support layerscape +Date: Wed, 17 Jan 2018 15:36:28 +0800 +Subject: [PATCH 23/30] irqchip: support layerscape -This is a integrated patch for layerscape gic support. +This is an integrated patch for layerscape gic support. Signed-off-by: Eric Auger Signed-off-by: Zhao Qiang diff --git a/target/linux/layerscape/patches-4.9/812-mmc-layerscape-support.patch b/target/linux/layerscape/patches-4.9/812-mmc-layerscape-support.patch index 6cad565ad23..1d3899d28bc 100644 --- a/target/linux/layerscape/patches-4.9/812-mmc-layerscape-support.patch +++ b/target/linux/layerscape/patches-4.9/812-mmc-layerscape-support.patch @@ -1,9 +1,9 @@ -From b31046c51c72232363711f0c623df08bf28c37e4 Mon Sep 17 00:00:00 2001 +From 4215d5757595e7ec7ca146c2b901beb177f415d8 Mon Sep 17 00:00:00 2001 From: Yangbo Lu -Date: Mon, 25 Sep 2017 12:21:30 +0800 -Subject: [PATCH] mmc: layerscape support +Date: Wed, 17 Jan 2018 15:37:13 +0800 +Subject: [PATCH 24/30] mmc: layerscape support -This is a integrated patch for layerscape mmc support. +This is an integrated patch for layerscape mmc support. Adrian Hunter Jaehoon Chung @@ -12,10 +12,10 @@ Signed-off-by: Yangbo Lu --- drivers/mmc/host/Kconfig | 1 + drivers/mmc/host/sdhci-esdhc.h | 52 +++++--- - drivers/mmc/host/sdhci-of-esdhc.c | 251 ++++++++++++++++++++++++++++++++++++-- + drivers/mmc/host/sdhci-of-esdhc.c | 265 ++++++++++++++++++++++++++++++++++++-- drivers/mmc/host/sdhci.c | 45 ++++--- drivers/mmc/host/sdhci.h | 3 + - 5 files changed, 306 insertions(+), 46 deletions(-) + 5 files changed, 320 insertions(+), 46 deletions(-) --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -253,9 +253,16 @@ Signed-off-by: Yangbo Lu /* Workaround to reduce the clock frequency for p1010 esdhc */ if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) { if (clock > 20000000) -@@ -441,8 +503,8 @@ static void esdhc_of_set_clock(struct sd +@@ -440,9 +502,15 @@ static void esdhc_of_set_clock(struct sd + clock -= 5000000; } ++ /* Workaround to reduce the clock frequency for ls1021a esdhc */ ++ if (of_find_compatible_node(NULL, NULL, "fsl,ls1021a-esdhc")) { ++ if (clock == 50000000) ++ clock = 46500000; ++ } ++ temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); - temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN - | ESDHC_CLOCK_MASK); @@ -264,7 +271,7 @@ Signed-off-by: Yangbo Lu sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); while (host->max_clk / pre_div / 16 > clock && pre_div < 256) -@@ -462,7 +524,20 @@ static void esdhc_of_set_clock(struct sd +@@ -462,7 +530,20 @@ static void esdhc_of_set_clock(struct sd | (div << ESDHC_DIVIDER_SHIFT) | (pre_div << ESDHC_PREDIV_SHIFT)); sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); @@ -286,7 +293,7 @@ Signed-off-by: Yangbo Lu } static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) -@@ -487,6 +562,33 @@ static void esdhc_pltfm_set_bus_width(st +@@ -487,12 +568,136 @@ static void esdhc_pltfm_set_bus_width(st sdhci_writel(host, ctrl, ESDHC_PROCTL); } @@ -319,11 +326,20 @@ Signed-off-by: Yangbo Lu + static void esdhc_reset(struct sdhci_host *host, u8 mask) { ++ u32 val; ++ sdhci_reset(host, mask); -@@ -495,6 +597,95 @@ static void esdhc_reset(struct sdhci_hos - sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); - } + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); ++ ++ if (mask & SDHCI_RESET_ALL) { ++ val = sdhci_readl(host, ESDHC_TBCTL); ++ val &= ~ESDHC_TB_EN; ++ sdhci_writel(host, val, ESDHC_TBCTL); ++ } ++} ++ +/* The SCFG, Supplemental Configuration Unit, provides SoC specific + * configuration and status registers for the device. There is a + * SDHC IO VSEL control register on SCFG for some platforms. It's @@ -411,12 +427,10 @@ Signed-off-by: Yangbo Lu + esdhc_clock_enable(host, true); + + return sdhci_execute_tuning(mmc, opcode); -+} -+ + } + #ifdef CONFIG_PM_SLEEP - static u32 esdhc_proctl; - static int esdhc_of_suspend(struct device *dev) -@@ -575,10 +766,19 @@ static const struct sdhci_pltfm_data sdh +@@ -575,10 +780,19 @@ static const struct sdhci_pltfm_data sdh .ops = &sdhci_esdhc_le_ops, }; @@ -436,7 +450,7 @@ Signed-off-by: Yangbo Lu u16 host_ver; pltfm_host = sdhci_priv(host); -@@ -588,6 +788,36 @@ static void esdhc_init(struct platform_d +@@ -588,6 +802,36 @@ static void esdhc_init(struct platform_d esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK; @@ -473,7 +487,7 @@ Signed-off-by: Yangbo Lu } static int sdhci_esdhc_probe(struct platform_device *pdev) -@@ -610,6 +840,11 @@ static int sdhci_esdhc_probe(struct plat +@@ -610,6 +854,11 @@ static int sdhci_esdhc_probe(struct plat if (IS_ERR(host)) return PTR_ERR(host); diff --git a/target/linux/layerscape/patches-4.9/813-qe-support-layerscape.patch b/target/linux/layerscape/patches-4.9/813-qe-support-layerscape.patch index 3675f3350de..2efbba304ae 100644 --- a/target/linux/layerscape/patches-4.9/813-qe-support-layerscape.patch +++ b/target/linux/layerscape/patches-4.9/813-qe-support-layerscape.patch @@ -1,9 +1,9 @@ -From adb377019768396f339010ebb9e80fa8384992f7 Mon Sep 17 00:00:00 2001 +From 2ab544f7e943c63c300933d34815e78451cc0c26 Mon Sep 17 00:00:00 2001 From: Yangbo Lu -Date: Mon, 25 Sep 2017 12:20:30 +0800 -Subject: [PATCH] qe: support layerscape +Date: Wed, 17 Jan 2018 15:37:56 +0800 +Subject: [PATCH 25/30] qe: support layerscape -This is a integrated patch for layerscape qe support. +This is an integrated patch for layerscape qe support. Signed-off-by: Zhao Qiang Signed-off-by: Yangbo Lu diff --git a/target/linux/layerscape/patches-4.9/814-rtc-support-layerscape.patch b/target/linux/layerscape/patches-4.9/814-rtc-support-layerscape.patch index 0c726fbb7b0..9510a524d8e 100644 --- a/target/linux/layerscape/patches-4.9/814-rtc-support-layerscape.patch +++ b/target/linux/layerscape/patches-4.9/814-rtc-support-layerscape.patch @@ -1,9 +1,9 @@ -From 7e7944c484954ff7b5d53047194e59bfffd1540a Mon Sep 17 00:00:00 2001 +From bda12381598c3df43f4e60362a8cd4af58b7f5b0 Mon Sep 17 00:00:00 2001 From: Yangbo Lu -Date: Mon, 25 Sep 2017 12:20:55 +0800 -Subject: [PATCH] rtc: support layerscape +Date: Wed, 17 Jan 2018 15:38:54 +0800 +Subject: [PATCH 26/30] rtc: support layerscape -This is a integrated patch for layerscape rtc support. +This is an integrated patch for layerscape rtc support. Signed-off-by: Zhang Ying-22455 Signed-off-by: Yangbo Lu diff --git a/target/linux/layerscape/patches-4.9/815-spi-support-layerscape.patch b/target/linux/layerscape/patches-4.9/815-spi-support-layerscape.patch index 761680913b8..22904eec9d5 100644 --- a/target/linux/layerscape/patches-4.9/815-spi-support-layerscape.patch +++ b/target/linux/layerscape/patches-4.9/815-spi-support-layerscape.patch @@ -1,9 +1,9 @@ -From a12f522b48a8cb637c1c026b46a76b2ef7983f8d Mon Sep 17 00:00:00 2001 +From 027b679f248f15dea36c6cd6782d6643e2151057 Mon Sep 17 00:00:00 2001 From: Yangbo Lu -Date: Mon, 25 Sep 2017 12:12:41 +0800 -Subject: [PATCH] spi: support layerscape +Date: Wed, 17 Jan 2018 15:39:43 +0800 +Subject: [PATCH 27/30] spi: support layerscape -This is a integrated patch for layerscape dspi support. +This is an integrated patch for layerscape dspi support. Signed-off-by: Christophe JAILLET Signed-off-by: Sanchayan Maity @@ -11,9 +11,8 @@ Signed-off-by: Geert Uytterhoeven Signed-off-by: Sanchayan Maity Signed-off-by: Yangbo Lu --- - drivers/spi/Kconfig | 1 + drivers/spi/spi-fsl-dspi.c | 309 ++++++++++++++++++++++++++++++++++++++++++++- - 2 files changed, 305 insertions(+), 5 deletions(-) + 1 file changed, 304 insertions(+), 5 deletions(-) --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c diff --git a/target/linux/layerscape/patches-4.9/816-tty-serial-support-layerscape.patch b/target/linux/layerscape/patches-4.9/816-tty-serial-support-layerscape.patch index a14df9d70ec..f7119e494ce 100644 --- a/target/linux/layerscape/patches-4.9/816-tty-serial-support-layerscape.patch +++ b/target/linux/layerscape/patches-4.9/816-tty-serial-support-layerscape.patch @@ -1,9 +1,9 @@ -From 469daac0faff06209bc1d1390571b860d153a82b Mon Sep 17 00:00:00 2001 +From c35aec61e5bb0faafb2847a0d750ebd7345a4b0f Mon Sep 17 00:00:00 2001 From: Yangbo Lu -Date: Wed, 27 Sep 2017 10:33:47 +0800 -Subject: [PATCH] tty: serial: support layerscape +Date: Wed, 17 Jan 2018 15:40:24 +0800 +Subject: [PATCH 28/30] tty: serial: support layerscape -This is a integrated patch for layerscape uart support. +This is an integrated patch for layerscape uart support. Signed-off-by: Nikita Yushchenko Signed-off-by: Yuan Yao diff --git a/target/linux/layerscape/patches-4.9/817-usb-support-layerscape.patch b/target/linux/layerscape/patches-4.9/817-usb-support-layerscape.patch index 2e7885a694f..e199a4f4982 100644 --- a/target/linux/layerscape/patches-4.9/817-usb-support-layerscape.patch +++ b/target/linux/layerscape/patches-4.9/817-usb-support-layerscape.patch @@ -1,9 +1,9 @@ -From b14460ee524a34d3b94b44032b52155c4cd708e5 Mon Sep 17 00:00:00 2001 +From a2a97f0d2c07a772899ca09967547bea6c9124c5 Mon Sep 17 00:00:00 2001 From: Yangbo Lu -Date: Wed, 27 Sep 2017 10:34:07 +0800 -Subject: [PATCH] usb: support layerscape +Date: Wed, 17 Jan 2018 15:46:03 +0800 +Subject: [PATCH 29/30] usb: support layerscape -This is a integrated patch for layerscape usb support. +This is an integrated patch for layerscape usb support. Signed-off-by: yinbo.zhu Signed-off-by: Ramneek Mehresh @@ -15,29 +15,68 @@ Signed-off-by: Suresh Gupta Signed-off-by: Zhao Chenhui Signed-off-by: Yangbo Lu --- - drivers/net/usb/r8152.c | 4 + + drivers/net/usb/cdc_ether.c | 8 + + drivers/net/usb/r8152.c | 6 + drivers/usb/common/common.c | 50 ++++++ drivers/usb/core/hub.c | 8 + - drivers/usb/dwc3/core.c | 235 ++++++++++++++++++++++++++- - drivers/usb/dwc3/core.h | 46 +++++- - drivers/usb/dwc3/host.c | 15 +- + drivers/usb/dwc3/core.c | 243 ++++++++++++++++++++++++++++- + drivers/usb/dwc3/core.h | 51 ++++++- + drivers/usb/dwc3/ep0.c | 4 +- + drivers/usb/dwc3/gadget.c | 7 + + drivers/usb/dwc3/host.c | 24 ++- drivers/usb/gadget/udc/fsl_udc_core.c | 46 +++--- drivers/usb/gadget/udc/fsl_usb2_udc.h | 16 +- drivers/usb/host/Kconfig | 2 +- - drivers/usb/host/ehci-fsl.c | 289 +++++++++++++++++++++++++++++++--- + drivers/usb/host/ehci-fsl.c | 279 +++++++++++++++++++++++++++++++--- drivers/usb/host/ehci-fsl.h | 3 + - drivers/usb/host/ehci-hub.c | 2 + - drivers/usb/host/ehci.h | 5 + + drivers/usb/host/ehci-hub.c | 4 + + drivers/usb/host/ehci.h | 9 ++ drivers/usb/host/fsl-mph-dr-of.c | 12 ++ + drivers/usb/host/xhci-plat.c | 10 ++ + drivers/usb/host/xhci-ring.c | 29 +++- + drivers/usb/host/xhci.c | 38 ++++- + drivers/usb/host/xhci.h | 5 +- drivers/usb/phy/phy-fsl-usb.c | 59 +++++-- drivers/usb/phy/phy-fsl-usb.h | 8 + include/linux/usb.h | 1 + include/linux/usb/of.h | 2 + - 18 files changed, 730 insertions(+), 73 deletions(-) + 25 files changed, 836 insertions(+), 88 deletions(-) +--- a/drivers/net/usb/cdc_ether.c ++++ b/drivers/net/usb/cdc_ether.c +@@ -532,6 +532,7 @@ static const struct driver_info wwan_inf + #define LENOVO_VENDOR_ID 0x17ef + #define NVIDIA_VENDOR_ID 0x0955 + #define HP_VENDOR_ID 0x03f0 ++#define TPLINK_VENDOR_ID 0x2357 + + static const struct usb_device_id products[] = { + /* BLACKLIST !! +@@ -732,6 +733,13 @@ static const struct usb_device_id produc + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), + .driver_info = 0, + }, ++ ++ /* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */ ++{ ++ USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, 0x0601, USB_CLASS_COMM, ++ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), ++ .driver_info = 0, ++}, + + /* WHITELIST!!! + * --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c -@@ -1816,6 +1816,10 @@ static int rx_bottom(struct r8152 *tp, i +@@ -520,6 +520,7 @@ enum rtl8152_flags { + #define VENDOR_ID_SAMSUNG 0x04e8 + #define VENDOR_ID_LENOVO 0x17ef + #define VENDOR_ID_NVIDIA 0x0955 ++#define VENDOR_ID_TPLINK 0x2357 + + #define MCU_TYPE_PLA 0x0100 + #define MCU_TYPE_USB 0x0000 +@@ -1816,6 +1817,10 @@ static int rx_bottom(struct r8152 *tp, i unsigned int pkt_len; struct sk_buff *skb; @@ -48,6 +87,14 @@ Signed-off-by: Yangbo Lu pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK; if (pkt_len < ETH_ZLEN) break; +@@ -4507,6 +4512,7 @@ static struct usb_device_id rtl8152_tabl + {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)}, + {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)}, + {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)}, ++ {REALTEK_USB_DEVICE(VENDOR_ID_TPLINK, 0x0601)}, + {} + }; + --- a/drivers/usb/common/common.c +++ b/drivers/usb/common/common.c @@ -105,6 +105,56 @@ static const char *const usb_dr_modes[] @@ -280,7 +327,7 @@ Signed-off-by: Yangbo Lu /* Adjust Frame Length */ dwc3_frame_length_adjustment(dwc); -@@ -919,11 +1034,109 @@ static void dwc3_core_exit_mode(struct d +@@ -919,11 +1034,117 @@ static void dwc3_core_exit_mode(struct d } } @@ -325,6 +372,12 @@ Signed-off-by: Yangbo Lu + &hird_threshold); + dwc->usb3_lpm_capable = device_property_read_bool(dev, + "snps,usb3_lpm_capable"); ++ dwc->quirk_reverse_in_out = device_property_read_bool(dev, ++ "snps,quirk_reverse_in_out"); ++ dwc->quirk_stop_transfer_in_block = device_property_read_bool(dev, ++ "snps,quirk_stop_transfer_in_block"); ++ dwc->quirk_stop_ep_in_u1 = device_property_read_bool(dev, ++ "snps,quirk_stop_ep_in_u1"); + + dwc->needs_fifo_resize = of_property_read_bool(node, "tx-fifo-resize"); + @@ -365,6 +418,8 @@ Signed-off-by: Yangbo Lu + + dwc->tx_de_emphasis_quirk = device_property_read_bool(dev, + "snps,tx_de_emphasis_quirk"); ++ dwc->disable_devinit_u1u2_quirk = device_property_read_bool(dev, ++ "snps,disable_devinit_u1u2"); + device_property_read_u8(dev, "snps,tx_de_emphasis", + &tx_de_emphasis); + device_property_read_string(dev, "snps,hsphy_interface", @@ -390,7 +445,7 @@ Signed-off-by: Yangbo Lu struct resource *res; struct dwc3 *dwc; u8 lpm_nyet_threshold; -@@ -955,6 +1168,11 @@ static int dwc3_probe(struct platform_de +@@ -955,6 +1176,11 @@ static int dwc3_probe(struct platform_de dwc->xhci_resources[0].flags = res->flags; dwc->xhci_resources[0].name = res->name; @@ -402,7 +457,7 @@ Signed-off-by: Yangbo Lu res->start += DWC3_GLOBALS_REGS_START; /* -@@ -997,6 +1215,12 @@ static int dwc3_probe(struct platform_de +@@ -997,6 +1223,12 @@ static int dwc3_probe(struct platform_de dwc->usb3_lpm_capable = device_property_read_bool(dev, "snps,usb3_lpm_capable"); @@ -415,7 +470,7 @@ Signed-off-by: Yangbo Lu dwc->disable_scramble_quirk = device_property_read_bool(dev, "snps,disable_scramble_quirk"); dwc->u2exit_lfps_quirk = device_property_read_bool(dev, -@@ -1041,6 +1265,8 @@ static int dwc3_probe(struct platform_de +@@ -1041,6 +1273,8 @@ static int dwc3_probe(struct platform_de dwc->hird_threshold = hird_threshold | (dwc->is_utmi_l1_suspend << 4); @@ -424,7 +479,7 @@ Signed-off-by: Yangbo Lu platform_set_drvdata(pdev, dwc); dwc3_cache_hwparams(dwc); -@@ -1064,6 +1290,11 @@ static int dwc3_probe(struct platform_de +@@ -1064,6 +1298,11 @@ static int dwc3_probe(struct platform_de if (ret < 0) goto err1; @@ -506,7 +561,15 @@ Signed-off-by: Yangbo Lu * @irq_gadget: peripheral controller's IRQ number * @nr_scratch: number of scratch buffers * @u1u2: only used on revisions <1.83a for workaround -@@ -847,6 +878,7 @@ struct dwc3 { +@@ -829,6 +860,7 @@ struct dwc3_scratchpad_array { + * 1 - -3.5dB de-emphasis + * 2 - No de-emphasis + * 3 - Reserved ++ * @disable_devinit_u1u2_quirk: disable device-initiated U1/U2 request. + */ + struct dwc3 { + struct usb_ctrlrequest *ctrl_req; +@@ -847,6 +879,7 @@ struct dwc3 { spinlock_t lock; struct device *dev; @@ -514,7 +577,7 @@ Signed-off-by: Yangbo Lu struct platform_device *xhci; struct resource xhci_resources[DWC3_XHCI_RESOURCES_NUM]; -@@ -872,6 +904,12 @@ struct dwc3 { +@@ -872,6 +905,12 @@ struct dwc3 { enum usb_phy_interface hsphy_mode; u32 fladj; @@ -527,7 +590,7 @@ Signed-off-by: Yangbo Lu u32 irq_gadget; u32 nr_scratch; u32 u1u2; -@@ -948,9 +986,12 @@ struct dwc3 { +@@ -948,9 +987,12 @@ struct dwc3 { unsigned ep0_bounced:1; unsigned ep0_expect_in:1; unsigned has_hibernation:1; @@ -540,7 +603,7 @@ Signed-off-by: Yangbo Lu unsigned pending_events:1; unsigned pullups_connected:1; unsigned setup_packet_pending:1; -@@ -971,9 +1012,12 @@ struct dwc3 { +@@ -971,9 +1013,16 @@ struct dwc3 { unsigned dis_rxdet_inp3_quirk:1; unsigned dis_u2_freeclk_exists_quirk:1; unsigned dis_del_phy_power_chg_quirk:1; @@ -548,11 +611,52 @@ Signed-off-by: Yangbo Lu unsigned tx_de_emphasis_quirk:1; unsigned tx_de_emphasis:2; ++ unsigned disable_devinit_u1u2_quirk:1; ++ unsigned quirk_reverse_in_out:1; ++ unsigned quirk_stop_transfer_in_block:1; ++ unsigned quirk_stop_ep_in_u1:1; + + u16 imod_interval; }; /* -------------------------------------------------------------------------- */ +--- a/drivers/usb/dwc3/ep0.c ++++ b/drivers/usb/dwc3/ep0.c +@@ -360,9 +360,9 @@ static int dwc3_ep0_handle_status(struct + if ((dwc->speed == DWC3_DSTS_SUPERSPEED) || + (dwc->speed == DWC3_DSTS_SUPERSPEED_PLUS)) { + reg = dwc3_readl(dwc->regs, DWC3_DCTL); +- if (reg & DWC3_DCTL_INITU1ENA) ++ if ((reg & DWC3_DCTL_INITU1ENA) && !dwc->disable_devinit_u1u2_quirk) + usb_status |= 1 << USB_DEV_STAT_U1_ENABLED; +- if (reg & DWC3_DCTL_INITU2ENA) ++ if ((reg & DWC3_DCTL_INITU2ENA) && !dwc->disable_devinit_u1u2_quirk) + usb_status |= 1 << USB_DEV_STAT_U2_ENABLED; + } + +--- a/drivers/usb/dwc3/gadget.c ++++ b/drivers/usb/dwc3/gadget.c +@@ -2930,6 +2930,7 @@ static irqreturn_t dwc3_interrupt(int ir + int dwc3_gadget_init(struct dwc3 *dwc) + { + int ret, irq; ++ u32 reg; + struct platform_device *dwc3_pdev = to_platform_device(dwc->dev); + + irq = platform_get_irq_byname(dwc3_pdev, "peripheral"); +@@ -3044,6 +3045,12 @@ int dwc3_gadget_init(struct dwc3 *dwc) + goto err5; + } + ++ if (dwc->disable_devinit_u1u2_quirk) { ++ reg = dwc3_readl(dwc->regs, DWC3_DCTL); ++ reg &= ~(DWC3_DCTL_INITU1ENA | DWC3_DCTL_INITU2ENA); ++ dwc3_writel(dwc->regs, DWC3_DCTL, reg); ++ } ++ + return 0; + + err5: --- a/drivers/usb/dwc3/host.c +++ b/drivers/usb/dwc3/host.c @@ -17,6 +17,8 @@ @@ -588,6 +692,22 @@ Signed-off-by: Yangbo Lu dwc->xhci = xhci; ret = platform_device_add_resources(xhci, dwc->xhci_resources, +@@ -90,6 +101,15 @@ int dwc3_host_init(struct dwc3 *dwc) + + memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props)); + ++ if (dwc->quirk_reverse_in_out) ++ props[prop_idx++].name = "quirk-reverse-in-out"; ++ ++ if (dwc->quirk_stop_transfer_in_block) ++ props[prop_idx++].name = "quirk-stop-transfer-in-block"; ++ ++ if (dwc->quirk_stop_ep_in_u1) ++ props[prop_idx++].name = "quirk-stop-ep-in-u1"; ++ + if (dwc->usb3_lpm_capable) + props[prop_idx++].name = "usb3-lpm-capable"; + --- a/drivers/usb/gadget/udc/fsl_udc_core.c +++ b/drivers/usb/gadget/udc/fsl_udc_core.c @@ -198,7 +198,11 @@ __acquires(ep->udc->lock) @@ -815,15 +935,17 @@ Signed-off-by: Yangbo Lu Variation of ARC USB block used in some Freescale chips. --- a/drivers/usb/host/ehci-fsl.c +++ b/drivers/usb/host/ehci-fsl.c -@@ -37,13 +37,141 @@ +@@ -36,15 +36,127 @@ + #include #include #include - ++#include ++ +#ifdef CONFIG_PPC +#include +#include +#endif -+ + #include "ehci.h" #include "ehci-fsl.h" @@ -864,13 +986,23 @@ Signed-off-by: Yangbo Lu #define DRV_NAME "ehci-fsl" static struct hc_driver __read_mostly fsl_ehci_hc_driver; + +struct ehci_fsl { -+ /* store current hcd state for otg; -+ * have_hcd is true when host drv al already part of otg framework, -+ * otherwise false; -+ * hcd_add is true when otg framework wants to add host -+ * drv as part of otg;flase when it wants to remove it -+ */ ++ struct ehci_hcd ehci; ++ ++#ifdef CONFIG_PM ++struct ehci_regs saved_regs; ++struct ccsr_usb_phy saved_phy_regs; ++/* Saved USB PHY settings, need to restore after deep sleep. */ ++u32 usb_ctrl; ++#endif ++ /* ++ * store current hcd state for otg; ++ * have_hcd is true when host drv al already part of otg framework, ++ * otherwise false; ++ * hcd_add is true when otg framework wants to add host ++ * drv as part of otg;flase when it wants to remove it ++ */ +unsigned have_hcd:1; +unsigned hcd_add:1; +}; @@ -897,7 +1029,7 @@ Signed-off-by: Yangbo Lu + /* host, gadget and otg share same int line */ + retval = usb_add_hcd(hcd, hcd->irq, IRQF_SHARED); + if (retval == 0) -+ ehci_fsl->have_hcd = 1; ++ ehci_fsl->have_hcd = 1; + } else if (!ehci_fsl->hcd_add && ehci_fsl->have_hcd) { + usb_remove_hcd(hcd); + ehci_fsl->have_hcd = 0; @@ -905,59 +1037,33 @@ Signed-off-by: Yangbo Lu +} +#endif + -+struct ehci_fsl { -+ struct ehci_hcd ehci; -+ -+#ifdef CONFIG_PM -+struct ehci_regs saved_regs; -+struct ccsr_usb_phy saved_phy_regs; -+/* Saved USB PHY settings, need to restore after deep sleep. */ -+u32 usb_ctrl; -+#endif -+ /* -+ * store current hcd state for otg; -+ * have_hcd is true when host drv al already part of otg framework, -+ * otherwise false; -+ * hcd_add is true when otg framework wants to add host -+ * drv as part of otg;flase when it wants to remove it -+ */ -+unsigned have_hcd:1; -+unsigned hcd_add:1; -+}; -+ -+static strut ehci_fsl *hcd_to_ehci_fsl(struct usb_hcd *hcd) -+{ -+struct ehci_hcd *ehci = hcd_to_ehci(hcd); -+ -+return container_of(ehci, struct ehci_fsl, ehci); -+} -+ +#if defined(CONFIG_FSL_USB2_OTG) || defined(CONFIG_FSL_USB2_OTG_MODULE) +static void do_change_hcd(struct work_struct *work) +{ -+struct ehci_hcd *ehci = container_of(work, struct ehci_hcd, -+change_hcd_work); -+struct usb_hcd *hcd = ehci_to_hcd(ehci); -+struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd); -+void __iomem *non_ehci = hcd->regs; -+int retval; ++ struct ehci_hcd *ehci = container_of(work, struct ehci_hcd, ++ change_hcd_work); ++ struct usb_hcd *hcd = ehci_to_hcd(ehci); ++ struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd); ++ void __iomem *non_ehci = hcd->regs; ++ int retval; + -+if (ehci_fsl->hcd_add && !ehci_fsl->have_hcd) { -+writel(USBMODE_CM_HOST, non_ehci + FSL_SOC_USB_USBMODE); -+/* host, gadget and otg share same int line */ -+retval = usb_add_hcd(hcd, hcd->irq, IRQF_SHARED); -+if (retval == 0) -+ehci_fsl->have_hcd = 1; -+} else if (!ehci_fsl->hcd_add && ehci_fsl->have_hcd) { -+ usb_remove_hcd(hcd); -+ehci_fsl->have_hcd = 0; -+} ++ if (ehci_fsl->hcd_add && !ehci_fsl->have_hcd) { ++ writel(USBMODE_CM_HOST, non_ehci + FSL_SOC_USB_USBMODE); ++ /* host, gadget and otg share same int line */ ++ retval = usb_add_hcd(hcd, hcd->irq, IRQF_SHARED); ++ if (retval == 0) ++ ehci_fsl->have_hcd = 1; ++ } else if (!ehci_fsl->hcd_add && ehci_fsl->have_hcd) { ++ usb_remove_hcd(hcd); ++ ehci_fsl->have_hcd = 0; ++ } +} +#endif - ++ /* configure so an HC device and id are always provided */ /* always called with process context; sleeping is OK */ -@@ -131,6 +259,12 @@ static int fsl_ehci_drv_probe(struct pla + +@@ -131,6 +243,12 @@ static int fsl_ehci_drv_probe(struct pla clrsetbits_be32(hcd->regs + FSL_SOC_USB_CTRL, CONTROL_REGISTER_W1C_MASK, 0x4); @@ -970,7 +1076,7 @@ Signed-off-by: Yangbo Lu /* * Enable UTMI phy and program PTS field in UTMI mode before asserting * controller reset for USB Controller version 2.5 -@@ -143,16 +277,20 @@ static int fsl_ehci_drv_probe(struct pla +@@ -143,16 +261,20 @@ static int fsl_ehci_drv_probe(struct pla /* Don't need to set host mode here. It will be done by tdi_reset() */ @@ -993,7 +1099,7 @@ Signed-off-by: Yangbo Lu dev_dbg(&pdev->dev, "hcd=0x%p ehci=0x%p, phy=0x%p\n", hcd, ehci, hcd->usb_phy); -@@ -168,6 +306,11 @@ static int fsl_ehci_drv_probe(struct pla +@@ -168,6 +290,11 @@ static int fsl_ehci_drv_probe(struct pla retval = -ENODEV; goto err2; } @@ -1005,17 +1111,16 @@ Signed-off-by: Yangbo Lu } #endif return retval; -@@ -181,6 +324,18 @@ static int fsl_ehci_drv_probe(struct pla +@@ -181,6 +308,17 @@ static int fsl_ehci_drv_probe(struct pla return retval; } -+static bool usb_phy_clk_valid(struct usb_hcd *hcd, -+ enum fsl_usb2_phy_modes phy_mode) ++static bool usb_phy_clk_valid(struct usb_hcd *hcd) +{ + void __iomem *non_ehci = hcd->regs; + bool ret = true; + -+ if (!(in_be32(non_ehci + FSL_SOC_USB_CTRL) & PHY_CLK_VALID)) ++ if (!(ioread32be(non_ehci + FSL_SOC_USB_CTRL) & PHY_CLK_VALID)) + ret = false; + + return ret; @@ -1024,7 +1129,7 @@ Signed-off-by: Yangbo Lu static int ehci_fsl_setup_phy(struct usb_hcd *hcd, enum fsl_usb2_phy_modes phy_mode, unsigned int port_offset) -@@ -219,6 +374,21 @@ static int ehci_fsl_setup_phy(struct usb +@@ -219,6 +357,21 @@ static int ehci_fsl_setup_phy(struct usb /* fall through */ case FSL_USB2_PHY_UTMI: case FSL_USB2_PHY_UTMI_DUAL: @@ -1046,7 +1151,16 @@ Signed-off-by: Yangbo Lu if (pdata->have_sysif_regs && pdata->controller_ver) { /* controller version 1.6 or above */ clrsetbits_be32(non_ehci + FSL_SOC_USB_CTRL, -@@ -292,14 +462,9 @@ static int ehci_fsl_usb_setup(struct ehc +@@ -286,20 +439,18 @@ static int ehci_fsl_usb_setup(struct ehc + if (pdata->has_fsl_erratum_a005275 == 1) + ehci->has_fsl_hs_errata = 1; + ++ if (pdata->has_fsl_erratum_a005697 == 1) ++ ehci->has_fsl_susp_errata = 1; ++ + if ((pdata->operating_mode == FSL_USB2_DR_HOST) || + (pdata->operating_mode == FSL_USB2_DR_OTG)) + if (ehci_fsl_setup_phy(hcd, pdata->phy_mode, 0)) return -EINVAL; if (pdata->operating_mode == FSL_USB2_MPH_HOST) { @@ -1062,7 +1176,7 @@ Signed-off-by: Yangbo Lu ehci->has_fsl_port_bug = 1; if (pdata->port_enables & FSL_USB2_PORT0_ENABLED) -@@ -379,16 +544,57 @@ static int ehci_fsl_setup(struct usb_hcd +@@ -379,16 +530,57 @@ static int ehci_fsl_setup(struct usb_hcd return retval; } @@ -1127,7 +1241,7 @@ Signed-off-by: Yangbo Lu #ifdef CONFIG_PPC_MPC512x static int ehci_fsl_mpc512x_drv_suspend(struct device *dev) -@@ -535,26 +741,43 @@ static inline int ehci_fsl_mpc512x_drv_r +@@ -535,26 +727,45 @@ static inline int ehci_fsl_mpc512x_drv_r } #endif /* CONFIG_PPC_MPC512x */ @@ -1149,7 +1263,9 @@ Signed-off-by: Yangbo Lu + +#ifdef CONFIG_PPC +suspend_state_t pm_state; -+pm_state = pm_suspend_state(); ++/* FIXME:Need to port fsl_pm.h before enable below code. */ ++/*pm_state = pm_suspend_state();*/ ++pm_state = PM_SUSPEND_MEM; + +if (pm_state == PM_SUSPEND_MEM) + ehci_fsl_save_context(hcd); @@ -1178,7 +1294,7 @@ Signed-off-by: Yangbo Lu if (!fsl_deep_sleep()) return 0; -@@ -568,12 +791,34 @@ static int ehci_fsl_drv_resume(struct de +@@ -568,12 +779,36 @@ static int ehci_fsl_drv_resume(struct de struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd); struct ehci_hcd *ehci = hcd_to_ehci(hcd); void __iomem *non_ehci = hcd->regs; @@ -1188,7 +1304,9 @@ Signed-off-by: Yangbo Lu + +#ifdef CONFIG_PPC +suspend_state_t pm_state; -+pm_state = pm_suspend_state(); ++/* FIXME:Need to port fsl_pm.h before enable below code.*/ ++/* pm_state = pm_suspend_state(); */ ++pm_state = PM_SUSPEND_MEM; + +if (pm_state == PM_SUSPEND_MEM) + ehci_fsl_restore_context(hcd); @@ -1225,7 +1343,16 @@ Signed-off-by: Yangbo Lu #endif /* _EHCI_FSL_H */ --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c -@@ -305,6 +305,8 @@ static int ehci_bus_suspend (struct usb_ +@@ -278,6 +278,8 @@ static int ehci_bus_suspend (struct usb_ + else if ((t1 & PORT_PE) && !(t1 & PORT_SUSPEND)) { + t2 |= PORT_SUSPEND; + set_bit(port, &ehci->bus_suspended); ++ if (ehci_has_fsl_susp_errata(ehci)) ++ usleep_range(10000, 20000); + } + + /* enable remote wakeup on all ports, if told to do so */ +@@ -305,6 +307,8 @@ static int ehci_bus_suspend (struct usb_ USB_PORT_STAT_HIGH_SPEED) fs_idle_delay = true; ehci_writel(ehci, t2, reg); @@ -1246,8 +1373,21 @@ Signed-off-by: Yangbo Lu /* list of itds & sitds completed while now_frame was still active */ struct list_head cached_itd_list; -@@ -706,8 +709,10 @@ ehci_port_speed(struct ehci_hcd *ehci, u +@@ -219,6 +222,7 @@ struct ehci_hcd { /* one per controlle + unsigned no_selective_suspend:1; + unsigned has_fsl_port_bug:1; /* FreeScale */ + unsigned has_fsl_hs_errata:1; /* Freescale HS quirk */ ++ unsigned has_fsl_susp_errata:1; /*Freescale SUSP quirk*/ + unsigned big_endian_mmio:1; + unsigned big_endian_desc:1; + unsigned big_endian_capbase:1; +@@ -704,10 +708,15 @@ ehci_port_speed(struct ehci_hcd *ehci, u + #if defined(CONFIG_PPC_85xx) + /* Some Freescale processors have an erratum (USB A-005275) in which * incoming packets get corrupted in HS mode ++ * Some Freescale processors have an erratum (USB A-005697) in which ++ * we need to wait for 10ms for bus to fo into suspend mode after ++ * setting SUSP bit */ #define ehci_has_fsl_hs_errata(e) ((e)->has_fsl_hs_errata) +#define ehci_has_fsl_susp_errata(e) ((e)->has_fsl_susp_errata) @@ -1278,6 +1418,149 @@ Signed-off-by: Yangbo Lu /* * Determine whether phy_clk_valid needs to be checked +--- a/drivers/usb/host/xhci-plat.c ++++ b/drivers/usb/host/xhci-plat.c +@@ -223,6 +223,16 @@ static int xhci_plat_probe(struct platfo + if (device_property_read_bool(&pdev->dev, "usb3-lpm-capable")) + xhci->quirks |= XHCI_LPM_SUPPORT; + ++ if (device_property_read_bool(&pdev->dev, "quirk-reverse-in-out")) ++ xhci->quirks |= XHCI_REVERSE_IN_OUT; ++ ++ if (device_property_read_bool(&pdev->dev, ++ "quirk-stop-transfer-in-block")) ++ xhci->quirks |= XHCI_STOP_TRANSFER_IN_BLOCK; ++ ++ if (device_property_read_bool(&pdev->dev, "quirk-stop-ep-in-u1")) ++ xhci->quirks |= XHCI_STOP_EP_IN_U1; ++ + if (device_property_read_bool(&pdev->dev, "quirk-broken-port-ped")) + xhci->quirks |= XHCI_BROKEN_PORT_PED; + +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -1852,14 +1852,17 @@ static int finish_td(struct xhci_hcd *xh + union xhci_trb *event_trb, struct xhci_transfer_event *event, + struct xhci_virt_ep *ep, int *status, bool skip) + { ++ struct xhci_dequeue_state deq_state; + struct xhci_virt_device *xdev; + struct xhci_ring *ep_ring; ++ unsigned int stream_id; + unsigned int slot_id; + int ep_index; + struct urb *urb = NULL; + struct xhci_ep_ctx *ep_ctx; + int ret = 0; + struct urb_priv *urb_priv; ++ u32 remaining; + u32 trb_comp_code; + + slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); +@@ -1885,13 +1888,29 @@ static int finish_td(struct xhci_hcd *xh + if (trb_comp_code == COMP_STALL || + xhci_requires_manual_halt_cleanup(xhci, ep_ctx, + trb_comp_code)) { +- /* Issue a reset endpoint command to clear the host side +- * halt, followed by a set dequeue command to move the +- * dequeue pointer past the TD. +- * The class driver clears the device side halt later. ++ /* ++ * A-007463: After transaction error, controller switches ++ * control transfer data stage from IN to OUT direction. + */ +- xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, ++ remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); ++ if (remaining && xhci_requires_manual_halt_cleanup(xhci, ep_ctx, ++ trb_comp_code) && ++ (xhci->quirks & XHCI_REVERSE_IN_OUT)) { ++ memset(&deq_state, 0, sizeof(deq_state)); ++ xhci_find_new_dequeue_state(xhci, slot_id, ++ ep_index, td->urb->stream_id, td, &deq_state); ++ xhci_queue_new_dequeue_state(xhci, slot_id, ep_index, ++ stream_id, &deq_state); ++ xhci_ring_cmd_db(xhci); ++ } else { ++ /* Issue a reset endpoint command to clear the host side ++ * halt, followed by a set dequeue command to move the ++ * dequeue pointer past the TD. ++ * The class driver clears the device side halt later. ++ */ ++ xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, + ep_ring->stream_id, td, event_trb); ++ } + } else { + /* Update ring dequeue pointer */ + while (ep_ring->dequeue != td->last_trb) +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -1570,14 +1570,38 @@ int xhci_urb_dequeue(struct usb_hcd *hcd + ret = -ENOMEM; + goto done; + } +- ep->ep_state |= EP_HALT_PENDING; +- ep->stop_cmds_pending++; +- ep->stop_cmd_timer.expires = jiffies + ++ /* ++ *A-009611: Issuing an End Transfer command on an IN endpoint. ++ *when a transfer is in progress on USB blocks the transmission ++ *Workaround: Software must wait for all existing TRBs to ++ *complete before issuing End transfer command. ++ */ ++ if ((ep_ring->enqueue == ep_ring->dequeue && ++ (xhci->quirks & XHCI_STOP_TRANSFER_IN_BLOCK)) || ++ !(xhci->quirks & XHCI_STOP_TRANSFER_IN_BLOCK)) { ++ ep->ep_state |= EP_HALT_PENDING; ++ ep->stop_cmds_pending++; ++ ep->stop_cmd_timer.expires = jiffies + + XHCI_STOP_EP_CMD_TIMEOUT * HZ; +- add_timer(&ep->stop_cmd_timer); +- xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id, +- ep_index, 0); +- xhci_ring_cmd_db(xhci); ++ add_timer(&ep->stop_cmd_timer); ++ xhci_queue_stop_endpoint(xhci, command, ++ urb->dev->slot_id, ++ ep_index, 0); ++ xhci_ring_cmd_db(xhci); ++ } ++ ++ /* ++ *A-009668: Stop Endpoint Command does not complete. ++ *Workaround: Instead of issuing a Stop Endpoint Command, ++ *issue a Disable Slot Command with the corresponding slot ID. ++ *Alternately, you can issue an Address Device Command with ++ *BSR=1 ++ */ ++ if ((urb->dev->speed <= USB_SPEED_HIGH) && ++ (xhci->quirks & XHCI_STOP_EP_IN_U1)) { ++ xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT, ++ urb->dev->slot_id); ++ } + } + done: + spin_unlock_irqrestore(&xhci->lock, flags); +--- a/drivers/usb/host/xhci.h ++++ b/drivers/usb/host/xhci.h +@@ -1621,7 +1621,7 @@ struct xhci_hcd { + #define XHCI_STATE_REMOVING (1 << 2) + /* Statistics */ + int error_bitmask; +- unsigned int quirks; ++ u64 quirks; + #define XHCI_LINK_TRB_QUIRK (1 << 0) + #define XHCI_RESET_EP_QUIRK (1 << 1) + #define XHCI_NEC_HOST (1 << 2) +@@ -1657,6 +1657,9 @@ struct xhci_hcd { + #define XHCI_SSIC_PORT_UNUSED (1 << 22) + #define XHCI_NO_64BIT_SUPPORT (1 << 23) + #define XHCI_MISSING_CAS (1 << 24) ++#define XHCI_REVERSE_IN_OUT (1 << 29) ++#define XHCI_STOP_TRANSFER_IN_BLOCK (1 << 30) ++#define XHCI_STOP_EP_IN_U1 (1 << 31) + /* For controller with a broken Port Disable implementation */ + #define XHCI_BROKEN_PORT_PED (1 << 25) + #define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26) --- a/drivers/usb/phy/phy-fsl-usb.c +++ b/drivers/usb/phy/phy-fsl-usb.c @@ -1,5 +1,5 @@ diff --git a/target/linux/layerscape/patches-4.9/818-vfio-support-layerscape.patch b/target/linux/layerscape/patches-4.9/818-vfio-support-layerscape.patch index 4854738d38e..05806a98d01 100644 --- a/target/linux/layerscape/patches-4.9/818-vfio-support-layerscape.patch +++ b/target/linux/layerscape/patches-4.9/818-vfio-support-layerscape.patch @@ -1,9 +1,9 @@ -From 8d82d92ea697145c32bb36d9f39afd5bb0927bc2 Mon Sep 17 00:00:00 2001 +From 954edeee88305fecefe3f681e67a298f06e27974 Mon Sep 17 00:00:00 2001 From: Yangbo Lu -Date: Wed, 27 Sep 2017 10:34:46 +0800 -Subject: [PATCH] vfio: support layerscape +Date: Wed, 17 Jan 2018 15:48:47 +0800 +Subject: [PATCH 30/30] vfio: support layerscape -This is a integrated patch for layerscape vfio support. +This is an integrated patch for layerscape vfio support. Signed-off-by: Bharat Bhushan Signed-off-by: Eric Auger diff --git a/target/linux/layerscape/patches-4.9/819-Revert-usb-kconfig-remove-dependency-FSL_SOC-for-ehc.patch b/target/linux/layerscape/patches-4.9/819-Revert-usb-kconfig-remove-dependency-FSL_SOC-for-ehc.patch deleted file mode 100644 index c2081b2d092..00000000000 --- a/target/linux/layerscape/patches-4.9/819-Revert-usb-kconfig-remove-dependency-FSL_SOC-for-ehc.patch +++ /dev/null @@ -1,28 +0,0 @@ -From ba4f9dd74ccb9da91195b3570310754716064ef2 Mon Sep 17 00:00:00 2001 -From: Yangbo Lu -Date: Tue, 10 Oct 2017 15:55:31 +0800 -Subject: [PATCH] Revert "usb: kconfig: remove dependency FSL_SOC for ehci fsl - driver" - -This reverts commit 92042e8b3622a9bbfce0ebfc90edf6cd14d45708 on -LSDK linux (https://github.com/qoriq-open-source/linux). - -The patch reverted allowed to build ehci-fsl driver for non-PPC -platforms, but actually the driver was not ready. - -Signed-off-by: Yangbo Lu ---- - drivers/usb/host/Kconfig | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/usb/host/Kconfig -+++ b/drivers/usb/host/Kconfig -@@ -165,7 +165,7 @@ config XPS_USB_HCD_XILINX - - config USB_EHCI_FSL - tristate "Support for Freescale PPC on-chip EHCI USB controller" -- depends on USB_EHCI_HCD -+ depends on FSL_SOC - select USB_EHCI_ROOT_HUB_TT - ---help--- - Variation of ARC USB block used in some Freescale chips. -- 2.30.2