# CONFIG_CAN_SJA1000 is not set
# CONFIG_CAN_SOFTING is not set
# CONFIG_CAN_TI_HECC is not set
+CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
# CONFIG_CFS_BANDWIDTH is not set
CONFIG_CGROUPS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_GENERIC_IDLE_POLL_SETUP=y
CONFIG_GENERIC_IO=y
CONFIG_GENERIC_IRQ_CHIP=y
+# CONFIG_GENERIC_IRQ_DEBUGFS is not set
CONFIG_GENERIC_IRQ_SHOW=y
CONFIG_GENERIC_IRQ_SHOW_LEVEL=y
CONFIG_GENERIC_MSI_IRQ=y
CONFIG_ARM_GIC_V2M=y
CONFIG_ARM_GIC_V3=y
CONFIG_ARM_GIC_V3_ITS=y
+CONFIG_ARM_GIC_V3_ITS_FSL_MC=y
# CONFIG_ARM_PL172_MPMC is not set
CONFIG_ARM_PMU=y
CONFIG_ARM_PSCI_FW=y
CONFIG_CAVIUM_ERRATUM_23144=y
CONFIG_CAVIUM_ERRATUM_23154=y
CONFIG_CAVIUM_ERRATUM_27456=y
+CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_CEPH_LIB=y
# CONFIG_CEPH_LIB_PRETTYDEBUG is not set
# CONFIG_CEPH_LIB_USE_DNS_RESOLVER is not set
CONFIG_CGROUPS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_DEVICE=y
-# CONFIG_CGROUP_FREEZER is not set
+CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_HUGETLB=y
CONFIG_CGROUP_PERF=y
CONFIG_CGROUP_PIDS=y
CONFIG_FSL_DPAA2=y
CONFIG_FSL_DPAA2_ETH=y
CONFIG_FSL_DPAA2_ETHSW=y
+# CONFIG_FSL_DPAA2_ETH_CEETM is not set
+# CONFIG_FSL_DPAA2_ETH_DEBUGFS is not set
# CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE is not set
CONFIG_FSL_DPAA2_EVB=y
CONFIG_FSL_DPAA2_MAC=y
CONFIG_GENERIC_IDLE_POLL_SETUP=y
CONFIG_GENERIC_IO=y
CONFIG_GENERIC_IRQ_CHIP=y
+# CONFIG_GENERIC_IRQ_DEBUGFS is not set
CONFIG_GENERIC_IRQ_MIGRATION=y
CONFIG_GENERIC_IRQ_SHOW=y
CONFIG_GENERIC_IRQ_SHOW_LEVEL=y
CONFIG_NET_IP_TUNNEL=y
CONFIG_NET_NS=y
CONFIG_NET_PTP_CLASSIFY=y
+CONFIG_NET_SWITCHDEV=y
CONFIG_NLS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
# CONFIG_QFMT_V2 is not set
CONFIG_QMAN_CEETM_UPDATE_PERIOD=1000
CONFIG_QORIQ_CPUFREQ=y
+CONFIG_QORIQ_THERMAL=y
# CONFIG_QUICC_ENGINE is not set
CONFIG_QUOTA=y
CONFIG_QUOTACTL=y
CONFIG_XZ_DEC_X86=y
CONFIG_ZLIB_DEFLATE=y
CONFIG_ZLIB_INFLATE=y
-CONFIG_QORIQ_THERMAL=y
-From e43dec70614b55ba1ce24dfcdf8f51e36d800af2 Mon Sep 17 00:00:00 2001
+From 0774b97305507af18f8c43efb69aa00e6c57ae90 Mon Sep 17 00:00:00 2001
From: Yangbo Lu <yangbo.lu@nxp.com>
-Date: Wed, 17 Jan 2018 15:26:46 +0800
-Subject: [PATCH 01/30] config: support layerscape
+Date: Fri, 6 Jul 2018 15:31:14 +0800
+Subject: [PATCH] config: support layerscape
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
arch/arm/mach-imx/Kconfig | 1 +
drivers/base/Kconfig | 1 +
drivers/crypto/Makefile | 2 +-
- drivers/net/ethernet/freescale/Kconfig | 4 ++-
- drivers/net/ethernet/freescale/Makefile | 2 ++
- drivers/ptp/Kconfig | 29 +++++++++++++++++++
- drivers/rtc/Kconfig | 8 ++++++
+ drivers/net/ethernet/freescale/Kconfig | 4 +-
+ drivers/net/ethernet/freescale/Makefile | 2 +
+ drivers/ptp/Kconfig | 29 +++++++++++
+ drivers/rtc/Kconfig | 8 +++
drivers/rtc/Makefile | 1 +
drivers/soc/Kconfig | 3 +-
- drivers/soc/fsl/Kconfig | 22 ++++++++++++++
- drivers/soc/fsl/Kconfig.arm | 16 +++++++++++
- drivers/soc/fsl/Makefile | 4 +++
- drivers/soc/fsl/layerscape/Kconfig | 10 +++++++
+ drivers/soc/fsl/Kconfig | 30 ++++++++++++
+ drivers/soc/fsl/Kconfig.arm | 16 ++++++
+ drivers/soc/fsl/Makefile | 5 ++
+ drivers/soc/fsl/layerscape/Kconfig | 10 ++++
drivers/soc/fsl/layerscape/Makefile | 1 +
- drivers/staging/Kconfig | 6 ++++
+ drivers/staging/Kconfig | 6 +++
drivers/staging/Makefile | 3 ++
- drivers/staging/fsl-dpaa2/Kconfig | 51 +++++++++++++++++++++++++++++++++
- drivers/staging/fsl-dpaa2/Makefile | 9 ++++++
- 18 files changed, 169 insertions(+), 4 deletions(-)
+ drivers/staging/fsl-dpaa2/Kconfig | 65 +++++++++++++++++++++++++
+ drivers/staging/fsl-dpaa2/Makefile | 9 ++++
+ 18 files changed, 192 insertions(+), 4 deletions(-)
create mode 100644 drivers/soc/fsl/Kconfig
create mode 100644 drivers/soc/fsl/Kconfig.arm
create mode 100644 drivers/soc/fsl/layerscape/Kconfig
source "drivers/soc/rockchip/Kconfig"
--- /dev/null
+++ b/drivers/soc/fsl/Kconfig
-@@ -0,0 +1,22 @@
+@@ -0,0 +1,30 @@
+#
+# Freescale SOC drivers
+#
+ Initially only reading SVR and registering soc device are supported.
+ Other guts accesses, such as reading RCW, should eventually be moved
+ into this driver as well.
++
++config FSL_SLEEP_FSM
++ bool
++ help
++ This driver configures a hardware FSM (Finite State Machine) for deep sleep.
++ The FSM is used to finish clean-ups at the last stage of system entering deep
++ sleep, and also wakes up system when a wake up event happens.
++
+if ARM || ARM64
+source "drivers/soc/fsl/Kconfig.arm"
+endif
+endif
--- a/drivers/soc/fsl/Makefile
+++ b/drivers/soc/fsl/Makefile
-@@ -5,3 +5,7 @@
+@@ -5,3 +5,8 @@
obj-$(CONFIG_FSL_DPAA) += qbman/
obj-$(CONFIG_QUICC_ENGINE) += qe/
obj-$(CONFIG_CPM) += qe/
+obj-$(CONFIG_FSL_LS2_CONSOLE) += ls2-console/
+obj-$(CONFIG_SUSPEND) += rcpm.o
+obj-$(CONFIG_LS_SOC_DRIVERS) += layerscape/
++obj-$(CONFIG_FSL_SLEEP_FSM) += sleep_fsm.o
--- /dev/null
+++ b/drivers/soc/fsl/layerscape/Kconfig
@@ -0,0 +1,10 @@
+obj-$(CONFIG_FSL_PPFE) += fsl_ppfe/
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/Kconfig
-@@ -0,0 +1,51 @@
+@@ -0,0 +1,65 @@
+#
+# Freescale DataPath Acceleration Architecture Gen2 (DPAA2) drivers
+#
+
+# QBMAN_DEBUG requires some additional DPIO APIs
+config FSL_DPAA2_ETH_DEBUGFS
-+ depends on DEBUG_FS && FSL_QBMAN_DEBUG
++ depends on DEBUG_FS
+ bool "Enable debugfs support"
+ default n
+ ---help---
+ (PFC) in the driver.
+
+ If unsure, say N.
++
++config FSL_DPAA2_ETH_CEETM
++ depends on NET_SCHED
++ bool "DPAA2 Ethernet CEETM QoS"
++ default n
++ ---help---
++ Enable QoS offloading support through the CEETM hardware block.
+endif
+
+source "drivers/staging/fsl-dpaa2/mac/Kconfig"
+source "drivers/staging/fsl-dpaa2/evb/Kconfig"
-+source "drivers/staging/fsl-dpaa2/ethsw/Kconfig"
++
++config FSL_DPAA2_ETHSW
++ tristate "Freescale DPAA2 Ethernet Switch"
++ depends on FSL_DPAA2
++ depends on NET_SWITCHDEV
++ ---help---
++ Driver for Freescale DPAA2 Ethernet Switch. Select
++ BRIDGE to have support for bridge tools.
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/Makefile
@@ -0,0 +1,9 @@
+obj-$(CONFIG_FSL_DPAA2_ETH) += ethernet/
+obj-$(CONFIG_FSL_DPAA2_MAC) += mac/
+obj-$(CONFIG_FSL_DPAA2_EVB) += evb/
-+obj-$(CONFIG_FSL_DPAA2_ETHSW) += ethsw/
+obj-$(CONFIG_PTP_1588_CLOCK_DPAA2) += rtc/
++obj-$(CONFIG_FSL_DPAA2_ETHSW) += ethsw/
-From 67a2eceebe9dcd92a1a5f3e912340c8975c84434 Mon Sep 17 00:00:00 2001
+From f339945a8e81fff22df95284e142b79c37fd2333 Mon Sep 17 00:00:00 2001
From: Yangbo Lu <yangbo.lu@nxp.com>
-Date: Wed, 17 Jan 2018 14:50:41 +0800
-Subject: [PATCH 02/30] core-linux: support layerscape
+Date: Thu, 5 Jul 2018 16:07:09 +0800
+Subject: [PATCH 02/32] core-linux: support layerscape
This is an integrated patch for layerscape core-linux support.
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
---
- drivers/base/devres.c | 66 ++++++++++++++++++++++++++++
- drivers/base/soc.c | 70 +++++++++++++++++++++++++++++
- include/linux/device.h | 19 ++++++++
- include/linux/fsl/svr.h | 97 +++++++++++++++++++++++++++++++++++++++++
- include/linux/fsl_devices.h | 3 ++
- include/linux/netdev_features.h | 2 +
- include/linux/netdevice.h | 4 ++
- include/linux/skbuff.h | 2 +
- include/linux/sys_soc.h | 3 ++
- include/uapi/linux/if_ether.h | 1 +
- net/core/dev.c | 13 +++++-
- net/core/skbuff.c | 29 +++++++++++-
- net/sched/sch_generic.c | 7 +++
- 13 files changed, 313 insertions(+), 3 deletions(-)
+ drivers/base/devres.c | 66 ++++++
+ drivers/base/soc.c | 70 ++++++
+ .../net/ethernet/mellanox/mlxsw/spectrum.c | 2 +-
+ .../mellanox/mlxsw/spectrum_switchdev.c | 2 +-
+ drivers/net/ethernet/rocker/rocker_ofdpa.c | 4 +-
+ include/linux/device.h | 19 ++
+ include/linux/dma-mapping.h | 5 +
+ include/linux/fsl/svr.h | 97 ++++++++
+ include/linux/fsl_devices.h | 3 +
+ include/linux/irqdesc.h | 4 +
+ include/linux/irqdomain.h | 13 +-
+ include/linux/netdev_features.h | 2 +
+ include/linux/netdevice.h | 10 +-
+ include/linux/skbuff.h | 2 +
+ include/linux/sys_soc.h | 3 +
+ include/net/switchdev.h | 8 +-
+ include/uapi/linux/if_ether.h | 1 +
+ kernel/irq/Kconfig | 11 +
+ kernel/irq/Makefile | 1 +
+ kernel/irq/debugfs.c | 215 ++++++++++++++++++
+ kernel/irq/internals.h | 22 ++
+ kernel/irq/irqdesc.c | 1 +
+ kernel/irq/irqdomain.c | 171 ++++++++++----
+ kernel/irq/manage.c | 1 +
+ kernel/irq/msi.c | 2 +-
+ net/bridge/br.c | 4 +-
+ net/bridge/br_fdb.c | 2 +
+ net/bridge/br_private.h | 7 +
+ net/bridge/br_switchdev.c | 33 +++
+ net/core/dev.c | 30 ++-
+ net/core/net-sysfs.c | 20 +-
+ net/core/rtnetlink.c | 4 +-
+ net/core/skbuff.c | 29 ++-
+ net/sched/sch_generic.c | 7 +
+ 34 files changed, 809 insertions(+), 62 deletions(-)
create mode 100644 include/linux/fsl/svr.h
+ create mode 100644 kernel/irq/debugfs.c
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(soc_device_match);
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+@@ -859,7 +859,7 @@ mlxsw_sp_port_get_sw_stats64(const struc
+ return 0;
+ }
+
+-static bool mlxsw_sp_port_has_offload_stats(int attr_id)
++static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
+ {
+ switch (attr_id) {
+ case IFLA_OFFLOAD_XSTATS_CPU_HIT:
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+@@ -1405,7 +1405,7 @@ static void mlxsw_sp_fdb_call_notifiers(
+ if (learning_sync) {
+ info.addr = mac;
+ info.vid = vid;
+- notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
++ notifier_type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
+ call_switchdev_notifiers(notifier_type, dev, &info.info);
+ }
+ }
+--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
++++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
+@@ -1939,10 +1939,10 @@ static void ofdpa_port_fdb_learn_work(st
+
+ rtnl_lock();
+ if (learned && removing)
+- call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
++ call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
+ lw->ofdpa_port->dev, &info.info);
+ else if (learned && !removing)
+- call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
++ call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
+ lw->ofdpa_port->dev, &info.info);
+ rtnl_unlock();
+
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -688,6 +688,25 @@ void __iomem *devm_ioremap_resource(stru
static inline int devm_add_action_or_reset(struct device *dev,
void (*action)(void *), void *data)
{
+--- a/include/linux/dma-mapping.h
++++ b/include/linux/dma-mapping.h
+@@ -164,6 +164,11 @@ int dma_mmap_from_coherent(struct device
+
+ #ifdef CONFIG_HAS_DMA
+ #include <asm/dma-mapping.h>
++static inline void set_dma_ops(struct device *dev,
++ struct dma_map_ops *dma_ops)
++{
++ dev->archdata.dma_ops = dma_ops;
++}
+ #else
+ /*
+ * Define the dma api to allow compilation but not linking of
--- /dev/null
+++ b/include/linux/fsl/svr.h
@@ -0,0 +1,97 @@
unsigned check_phy_clk_valid:1;
/* register save area for suspend/resume */
+--- a/include/linux/irqdesc.h
++++ b/include/linux/irqdesc.h
+@@ -46,6 +46,7 @@ struct pt_regs;
+ * @rcu: rcu head for delayed free
+ * @kobj: kobject used to represent this struct in sysfs
+ * @dir: /proc/irq/ procfs entry
++ * @debugfs_file: dentry for the debugfs file
+ * @name: flow handler name for /proc/interrupts output
+ */
+ struct irq_desc {
+@@ -88,6 +89,9 @@ struct irq_desc {
+ #ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *dir;
+ #endif
++#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
++ struct dentry *debugfs_file;
++#endif
+ #ifdef CONFIG_SPARSE_IRQ
+ struct rcu_head rcu;
+ struct kobject kobj;
+--- a/include/linux/irqdomain.h
++++ b/include/linux/irqdomain.h
+@@ -138,6 +138,7 @@ struct irq_domain_chip_generic;
+ * setting up one or more generic chips for interrupt controllers
+ * drivers using the generic chip library which uses this pointer.
+ * @parent: Pointer to parent irq_domain to support hierarchy irq_domains
++ * @debugfs_file: dentry for the domain debugfs file
+ *
+ * Revmap data, used internally by irq_domain
+ * @revmap_direct_max_irq: The largest hwirq that can be set for controllers that
+@@ -160,6 +161,9 @@ struct irq_domain {
+ #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+ struct irq_domain *parent;
+ #endif
++#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
++ struct dentry *debugfs_file;
++#endif
+
+ /* reverse map data. The linear map gets appended to the irq_domain */
+ irq_hw_number_t hwirq_max;
+@@ -174,8 +178,8 @@ enum {
+ /* Irq domain is hierarchical */
+ IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0),
+
+- /* Core calls alloc/free recursive through the domain hierarchy. */
+- IRQ_DOMAIN_FLAG_AUTO_RECURSIVE = (1 << 1),
++ /* Irq domain name was allocated in __irq_domain_add() */
++ IRQ_DOMAIN_NAME_ALLOCATED = (1 << 6),
+
+ /* Irq domain is an IPI domain with virq per cpu */
+ IRQ_DOMAIN_FLAG_IPI_PER_CPU = (1 << 2),
+@@ -231,6 +235,9 @@ static inline bool is_fwnode_irqchip(str
+ return fwnode && fwnode->type == FWNODE_IRQCHIP;
+ }
+
++extern void irq_domain_update_bus_token(struct irq_domain *domain,
++ enum irq_domain_bus_token bus_token);
++
+ static inline
+ struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
+ enum irq_domain_bus_token bus_token)
+@@ -403,7 +410,7 @@ static inline int irq_domain_alloc_irqs(
+ NULL);
+ }
+
+-extern int irq_domain_alloc_irqs_recursive(struct irq_domain *domain,
++extern int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
+ unsigned int irq_base,
+ unsigned int nr_irqs, void *arg);
+ extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain,
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -74,6 +74,7 @@ enum {
for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
+@@ -930,7 +930,7 @@ struct netdev_xdp {
+ * 3. Update dev->stats asynchronously and atomically, and define
+ * neither operation.
+ *
+- * bool (*ndo_has_offload_stats)(int attr_id)
++ * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id)
+ * Return true if this device supports offload stats of this attr_id.
+ *
+ * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev,
+@@ -1167,7 +1167,7 @@ struct net_device_ops {
+
+ struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
+ struct rtnl_link_stats64 *storage);
+- bool (*ndo_has_offload_stats)(int attr_id);
++ bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
+ int (*ndo_get_offload_stats)(int attr_id,
+ const struct net_device *dev,
+ void *attr_data);
@@ -1509,6 +1509,8 @@ enum netdev_priv_flags {
* @if_port: Selectable AUI, TP, ...
* @dma: DMA channel
unsigned short type;
unsigned short hard_header_len;
unsigned short min_header_len;
+@@ -1938,6 +1942,8 @@ int netdev_set_prio_tc_map(struct net_de
+ return 0;
+ }
+
++int netdev_txq_to_tc(struct net_device *dev, unsigned int txq);
++
+ static inline
+ void netdev_reset_tc(struct net_device *dev)
+ {
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -903,6 +903,7 @@ void kfree_skb(struct sk_buff *skb);
+const struct soc_device_attribute *soc_device_match(
+ const struct soc_device_attribute *matches);
#endif /* __SOC_BUS_H */
+--- a/include/net/switchdev.h
++++ b/include/net/switchdev.h
+@@ -46,6 +46,7 @@ enum switchdev_attr_id {
+ SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
+ SWITCHDEV_ATTR_ID_PORT_STP_STATE,
+ SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
++ SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT,
+ SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME,
+ SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
+ };
+@@ -60,6 +61,7 @@ struct switchdev_attr {
+ struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */
+ u8 stp_state; /* PORT_STP_STATE */
+ unsigned long brport_flags; /* PORT_BRIDGE_FLAGS */
++ unsigned long brport_flags_support; /* PORT_BRIDGE_FLAGS_SUPPORT */
+ clock_t ageing_time; /* BRIDGE_AGEING_TIME */
+ bool vlan_filtering; /* BRIDGE_VLAN_FILTERING */
+ } u;
+@@ -149,8 +151,10 @@ struct switchdev_ops {
+ };
+
+ enum switchdev_notifier_type {
+- SWITCHDEV_FDB_ADD = 1,
+- SWITCHDEV_FDB_DEL,
++ SWITCHDEV_FDB_ADD_TO_BRIDGE = 1,
++ SWITCHDEV_FDB_DEL_TO_BRIDGE,
++ SWITCHDEV_FDB_ADD_TO_DEVICE,
++ SWITCHDEV_FDB_DEL_TO_DEVICE,
+ };
+
+ struct switchdev_notifier_info {
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -36,6 +36,7 @@
/*
* These are the defined Ethernet Protocol ID's.
+--- a/kernel/irq/Kconfig
++++ b/kernel/irq/Kconfig
+@@ -108,4 +108,15 @@ config SPARSE_IRQ
+
+ If you don't know what to do here, say N.
+
++config GENERIC_IRQ_DEBUGFS
++ bool "Expose irq internals in debugfs"
++ depends on DEBUG_FS
++ default n
++ ---help---
++
++ Exposes internal state information through debugfs. Mostly for
++ developers and debugging of hard to diagnose interrupt problems.
++
++ If you don't know what to do here, say N.
++
+ endmenu
+--- a/kernel/irq/Makefile
++++ b/kernel/irq/Makefile
+@@ -10,3 +10,4 @@ obj-$(CONFIG_PM_SLEEP) += pm.o
+ obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o
+ obj-$(CONFIG_GENERIC_IRQ_IPI) += ipi.o
+ obj-$(CONFIG_SMP) += affinity.o
++obj-$(CONFIG_GENERIC_IRQ_DEBUGFS) += debugfs.o
+--- /dev/null
++++ b/kernel/irq/debugfs.c
+@@ -0,0 +1,215 @@
++/*
++ * Copyright 2017 Thomas Gleixner <tglx@linutronix.de>
++ *
++ * This file is licensed under the GPL V2.
++ */
++#include <linux/debugfs.h>
++#include <linux/irqdomain.h>
++#include <linux/irq.h>
++
++#include "internals.h"
++
++static struct dentry *irq_dir;
++
++struct irq_bit_descr {
++ unsigned int mask;
++ char *name;
++};
++#define BIT_MASK_DESCR(m) { .mask = m, .name = #m }
++
++static void irq_debug_show_bits(struct seq_file *m, int ind, unsigned int state,
++ const struct irq_bit_descr *sd, int size)
++{
++ int i;
++
++ for (i = 0; i < size; i++, sd++) {
++ if (state & sd->mask)
++ seq_printf(m, "%*s%s\n", ind + 12, "", sd->name);
++ }
++}
++
++#ifdef CONFIG_SMP
++static void irq_debug_show_masks(struct seq_file *m, struct irq_desc *desc)
++{
++ struct irq_data *data = irq_desc_get_irq_data(desc);
++ struct cpumask *msk;
++
++ msk = irq_data_get_affinity_mask(data);
++ seq_printf(m, "affinity: %*pbl\n", cpumask_pr_args(msk));
++#ifdef CONFIG_GENERIC_PENDING_IRQ
++ msk = desc->pending_mask;
++ seq_printf(m, "pending: %*pbl\n", cpumask_pr_args(msk));
++#endif
++}
++#else
++static void irq_debug_show_masks(struct seq_file *m, struct irq_desc *desc) { }
++#endif
++
++static const struct irq_bit_descr irqchip_flags[] = {
++ BIT_MASK_DESCR(IRQCHIP_SET_TYPE_MASKED),
++ BIT_MASK_DESCR(IRQCHIP_EOI_IF_HANDLED),
++ BIT_MASK_DESCR(IRQCHIP_MASK_ON_SUSPEND),
++ BIT_MASK_DESCR(IRQCHIP_ONOFFLINE_ENABLED),
++ BIT_MASK_DESCR(IRQCHIP_SKIP_SET_WAKE),
++ BIT_MASK_DESCR(IRQCHIP_ONESHOT_SAFE),
++ BIT_MASK_DESCR(IRQCHIP_EOI_THREADED),
++};
++
++static void
++irq_debug_show_chip(struct seq_file *m, struct irq_data *data, int ind)
++{
++ struct irq_chip *chip = data->chip;
++
++ if (!chip) {
++ seq_printf(m, "chip: None\n");
++ return;
++ }
++ seq_printf(m, "%*schip: %s\n", ind, "", chip->name);
++ seq_printf(m, "%*sflags: 0x%lx\n", ind + 1, "", chip->flags);
++ irq_debug_show_bits(m, ind, chip->flags, irqchip_flags,
++ ARRAY_SIZE(irqchip_flags));
++}
++
++static void
++irq_debug_show_data(struct seq_file *m, struct irq_data *data, int ind)
++{
++ seq_printf(m, "%*sdomain: %s\n", ind, "",
++ data->domain ? data->domain->name : "");
++ seq_printf(m, "%*shwirq: 0x%lx\n", ind + 1, "", data->hwirq);
++ irq_debug_show_chip(m, data, ind + 1);
++#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
++ if (!data->parent_data)
++ return;
++ seq_printf(m, "%*sparent:\n", ind + 1, "");
++ irq_debug_show_data(m, data->parent_data, ind + 4);
++#endif
++}
++
++static const struct irq_bit_descr irqdata_states[] = {
++ BIT_MASK_DESCR(IRQ_TYPE_EDGE_RISING),
++ BIT_MASK_DESCR(IRQ_TYPE_EDGE_FALLING),
++ BIT_MASK_DESCR(IRQ_TYPE_LEVEL_HIGH),
++ BIT_MASK_DESCR(IRQ_TYPE_LEVEL_LOW),
++ BIT_MASK_DESCR(IRQD_LEVEL),
++
++ BIT_MASK_DESCR(IRQD_ACTIVATED),
++ BIT_MASK_DESCR(IRQD_IRQ_STARTED),
++ BIT_MASK_DESCR(IRQD_IRQ_DISABLED),
++ BIT_MASK_DESCR(IRQD_IRQ_MASKED),
++ BIT_MASK_DESCR(IRQD_IRQ_INPROGRESS),
++
++ BIT_MASK_DESCR(IRQD_PER_CPU),
++ BIT_MASK_DESCR(IRQD_NO_BALANCING),
++
++ BIT_MASK_DESCR(IRQD_MOVE_PCNTXT),
++ BIT_MASK_DESCR(IRQD_AFFINITY_SET),
++ BIT_MASK_DESCR(IRQD_SETAFFINITY_PENDING),
++ BIT_MASK_DESCR(IRQD_AFFINITY_MANAGED),
++ BIT_MASK_DESCR(IRQD_MANAGED_SHUTDOWN),
++
++ BIT_MASK_DESCR(IRQD_FORWARDED_TO_VCPU),
++
++ BIT_MASK_DESCR(IRQD_WAKEUP_STATE),
++ BIT_MASK_DESCR(IRQD_WAKEUP_ARMED),
++};
++
++static const struct irq_bit_descr irqdesc_states[] = {
++ BIT_MASK_DESCR(_IRQ_NOPROBE),
++ BIT_MASK_DESCR(_IRQ_NOREQUEST),
++ BIT_MASK_DESCR(_IRQ_NOTHREAD),
++ BIT_MASK_DESCR(_IRQ_NOAUTOEN),
++ BIT_MASK_DESCR(_IRQ_NESTED_THREAD),
++ BIT_MASK_DESCR(_IRQ_PER_CPU_DEVID),
++ BIT_MASK_DESCR(_IRQ_IS_POLLED),
++ BIT_MASK_DESCR(_IRQ_DISABLE_UNLAZY),
++};
++
++static const struct irq_bit_descr irqdesc_istates[] = {
++ BIT_MASK_DESCR(IRQS_AUTODETECT),
++ BIT_MASK_DESCR(IRQS_SPURIOUS_DISABLED),
++ BIT_MASK_DESCR(IRQS_POLL_INPROGRESS),
++ BIT_MASK_DESCR(IRQS_ONESHOT),
++ BIT_MASK_DESCR(IRQS_REPLAY),
++ BIT_MASK_DESCR(IRQS_WAITING),
++ BIT_MASK_DESCR(IRQS_PENDING),
++ BIT_MASK_DESCR(IRQS_SUSPENDED),
++};
++
++
++static int irq_debug_show(struct seq_file *m, void *p)
++{
++ struct irq_desc *desc = m->private;
++ struct irq_data *data;
++
++ raw_spin_lock_irq(&desc->lock);
++ data = irq_desc_get_irq_data(desc);
++ seq_printf(m, "handler: %pf\n", desc->handle_irq);
++ seq_printf(m, "status: 0x%08x\n", desc->status_use_accessors);
++ irq_debug_show_bits(m, 0, desc->status_use_accessors, irqdesc_states,
++ ARRAY_SIZE(irqdesc_states));
++ seq_printf(m, "istate: 0x%08x\n", desc->istate);
++ irq_debug_show_bits(m, 0, desc->istate, irqdesc_istates,
++ ARRAY_SIZE(irqdesc_istates));
++ seq_printf(m, "ddepth: %u\n", desc->depth);
++ seq_printf(m, "wdepth: %u\n", desc->wake_depth);
++ seq_printf(m, "dstate: 0x%08x\n", irqd_get(data));
++ irq_debug_show_bits(m, 0, irqd_get(data), irqdata_states,
++ ARRAY_SIZE(irqdata_states));
++ seq_printf(m, "node: %d\n", irq_data_get_node(data));
++ irq_debug_show_masks(m, desc);
++ irq_debug_show_data(m, data, 0);
++ raw_spin_unlock_irq(&desc->lock);
++ return 0;
++}
++
++static int irq_debug_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, irq_debug_show, inode->i_private);
++}
++
++static const struct file_operations dfs_irq_ops = {
++ .open = irq_debug_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc)
++{
++ char name [10];
++
++ if (!irq_dir || !desc || desc->debugfs_file)
++ return;
++
++ sprintf(name, "%d", irq);
++ desc->debugfs_file = debugfs_create_file(name, 0444, irq_dir, desc,
++ &dfs_irq_ops);
++}
++
++void irq_remove_debugfs_entry(struct irq_desc *desc)
++{
++ if (desc->debugfs_file)
++ debugfs_remove(desc->debugfs_file);
++}
++
++static int __init irq_debugfs_init(void)
++{
++ struct dentry *root_dir;
++ int irq;
++
++ root_dir = debugfs_create_dir("irq", NULL);
++ if (!root_dir)
++ return -ENOMEM;
++
++ irq_domain_debugfs_init(root_dir);
++
++ irq_dir = debugfs_create_dir("irqs", root_dir);
++
++ irq_lock_sparse();
++ for_each_active_irq(irq)
++ irq_add_debugfs_entry(irq, irq_to_desc(irq));
++ irq_unlock_sparse();
++
++ return 0;
++}
++__initcall(irq_debugfs_init);
+--- a/kernel/irq/internals.h
++++ b/kernel/irq/internals.h
+@@ -169,6 +169,11 @@ irq_put_desc_unlock(struct irq_desc *des
+
+ #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
+
++static inline unsigned int irqd_get(struct irq_data *d)
++{
++ return __irqd_to_state(d);
++}
++
+ /*
+ * Manipulation functions for irq_data.state
+ */
+@@ -226,3 +231,20 @@ irq_pm_install_action(struct irq_desc *d
+ static inline void
+ irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) { }
+ #endif
++
++#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
++void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc);
++void irq_remove_debugfs_entry(struct irq_desc *desc);
++# ifdef CONFIG_IRQ_DOMAIN
++void irq_domain_debugfs_init(struct dentry *root);
++# else
++static inline void irq_domain_debugfs_init(struct dentry *root);
++# endif
++#else /* CONFIG_GENERIC_IRQ_DEBUGFS */
++static inline void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *d)
++{
++}
++static inline void irq_remove_debugfs_entry(struct irq_desc *d)
++{
++}
++#endif /* CONFIG_GENERIC_IRQ_DEBUGFS */
+--- a/kernel/irq/irqdesc.c
++++ b/kernel/irq/irqdesc.c
+@@ -394,6 +394,7 @@ static void free_desc(unsigned int irq)
+ {
+ struct irq_desc *desc = irq_to_desc(irq);
+
++ irq_remove_debugfs_entry(desc);
+ unregister_irq_proc(irq, desc);
+
+ /*
+--- a/kernel/irq/irqdomain.c
++++ b/kernel/irq/irqdomain.c
+@@ -31,6 +31,14 @@ struct irqchip_fwid {
+ void *data;
+ };
+
++#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
++static void debugfs_add_domain_dir(struct irq_domain *d);
++static void debugfs_remove_domain_dir(struct irq_domain *d);
++#else
++static inline void debugfs_add_domain_dir(struct irq_domain *d) { }
++static inline void debugfs_remove_domain_dir(struct irq_domain *d) { }
++#endif
++
+ /**
+ * irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for
+ * identifying an irq domain
+@@ -117,6 +125,7 @@ struct irq_domain *__irq_domain_add(stru
+ irq_domain_check_hierarchy(domain);
+
+ mutex_lock(&irq_domain_mutex);
++ debugfs_add_domain_dir(domain);
+ list_add(&domain->link, &irq_domain_list);
+ mutex_unlock(&irq_domain_mutex);
+
+@@ -136,6 +145,7 @@ EXPORT_SYMBOL_GPL(__irq_domain_add);
+ void irq_domain_remove(struct irq_domain *domain)
+ {
+ mutex_lock(&irq_domain_mutex);
++ debugfs_remove_domain_dir(domain);
+
+ WARN_ON(!radix_tree_empty(&domain->revmap_tree));
+
+@@ -156,6 +166,37 @@ void irq_domain_remove(struct irq_domain
+ }
+ EXPORT_SYMBOL_GPL(irq_domain_remove);
+
++void irq_domain_update_bus_token(struct irq_domain *domain,
++ enum irq_domain_bus_token bus_token)
++{
++ char *name;
++
++ if (domain->bus_token == bus_token)
++ return;
++
++ mutex_lock(&irq_domain_mutex);
++
++ domain->bus_token = bus_token;
++
++ name = kasprintf(GFP_KERNEL, "%s-%d", domain->name, bus_token);
++ if (!name) {
++ mutex_unlock(&irq_domain_mutex);
++ return;
++ }
++
++ debugfs_remove_domain_dir(domain);
++
++ if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED)
++ kfree(domain->name);
++ else
++ domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
++
++ domain->name = name;
++ debugfs_add_domain_dir(domain);
++
++ mutex_unlock(&irq_domain_mutex);
++}
++
+ /**
+ * irq_domain_add_simple() - Register an irq_domain and optionally map a range of irqs
+ * @of_node: pointer to interrupt controller's device tree node.
+@@ -1164,43 +1205,18 @@ void irq_domain_free_irqs_top(struct irq
+ irq_domain_free_irqs_common(domain, virq, nr_irqs);
+ }
+
+-static bool irq_domain_is_auto_recursive(struct irq_domain *domain)
+-{
+- return domain->flags & IRQ_DOMAIN_FLAG_AUTO_RECURSIVE;
+-}
+-
+-static void irq_domain_free_irqs_recursive(struct irq_domain *domain,
++static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain,
+ unsigned int irq_base,
+ unsigned int nr_irqs)
+ {
+ domain->ops->free(domain, irq_base, nr_irqs);
+- if (irq_domain_is_auto_recursive(domain)) {
+- BUG_ON(!domain->parent);
+- irq_domain_free_irqs_recursive(domain->parent, irq_base,
+- nr_irqs);
+- }
+ }
+
+-int irq_domain_alloc_irqs_recursive(struct irq_domain *domain,
++int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
+ unsigned int irq_base,
+ unsigned int nr_irqs, void *arg)
+ {
+- int ret = 0;
+- struct irq_domain *parent = domain->parent;
+- bool recursive = irq_domain_is_auto_recursive(domain);
+-
+- BUG_ON(recursive && !parent);
+- if (recursive)
+- ret = irq_domain_alloc_irqs_recursive(parent, irq_base,
+- nr_irqs, arg);
+- if (ret < 0)
+- return ret;
+-
+- ret = domain->ops->alloc(domain, irq_base, nr_irqs, arg);
+- if (ret < 0 && recursive)
+- irq_domain_free_irqs_recursive(parent, irq_base, nr_irqs);
+-
+- return ret;
++ return domain->ops->alloc(domain, irq_base, nr_irqs, arg);
+ }
+
+ /**
+@@ -1261,7 +1277,7 @@ int __irq_domain_alloc_irqs(struct irq_d
+ }
+
+ mutex_lock(&irq_domain_mutex);
+- ret = irq_domain_alloc_irqs_recursive(domain, virq, nr_irqs, arg);
++ ret = irq_domain_alloc_irqs_hierarchy(domain, virq, nr_irqs, arg);
+ if (ret < 0) {
+ mutex_unlock(&irq_domain_mutex);
+ goto out_free_irq_data;
+@@ -1296,7 +1312,7 @@ void irq_domain_free_irqs(unsigned int v
+ mutex_lock(&irq_domain_mutex);
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_remove_irq(virq + i);
+- irq_domain_free_irqs_recursive(data->domain, virq, nr_irqs);
++ irq_domain_free_irqs_hierarchy(data->domain, virq, nr_irqs);
+ mutex_unlock(&irq_domain_mutex);
+
+ irq_domain_free_irq_data(virq, nr_irqs);
+@@ -1316,15 +1332,11 @@ int irq_domain_alloc_irqs_parent(struct
+ unsigned int irq_base, unsigned int nr_irqs,
+ void *arg)
+ {
+- /* irq_domain_alloc_irqs_recursive() has called parent's alloc() */
+- if (irq_domain_is_auto_recursive(domain))
+- return 0;
++ if (!domain->parent)
++ return -ENOSYS;
+
+- domain = domain->parent;
+- if (domain)
+- return irq_domain_alloc_irqs_recursive(domain, irq_base,
+- nr_irqs, arg);
+- return -ENOSYS;
++ return irq_domain_alloc_irqs_hierarchy(domain->parent, irq_base,
++ nr_irqs, arg);
+ }
+ EXPORT_SYMBOL_GPL(irq_domain_alloc_irqs_parent);
+
+@@ -1339,10 +1351,10 @@ EXPORT_SYMBOL_GPL(irq_domain_alloc_irqs_
+ void irq_domain_free_irqs_parent(struct irq_domain *domain,
+ unsigned int irq_base, unsigned int nr_irqs)
+ {
+- /* irq_domain_free_irqs_recursive() will call parent's free */
+- if (!irq_domain_is_auto_recursive(domain) && domain->parent)
+- irq_domain_free_irqs_recursive(domain->parent, irq_base,
+- nr_irqs);
++ if (!domain->parent)
++ return;
++
++ irq_domain_free_irqs_hierarchy(domain->parent, irq_base, nr_irqs);
+ }
+ EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
+
+@@ -1448,3 +1460,78 @@ static void irq_domain_check_hierarchy(s
+ {
+ }
+ #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
++
++#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
++static struct dentry *domain_dir;
++
++static void
++irq_domain_debug_show_one(struct seq_file *m, struct irq_domain *d, int ind)
++{
++ seq_printf(m, "%*sname: %s\n", ind, "", d->name);
++ seq_printf(m, "%*ssize: %u\n", ind + 1, "",
++ d->revmap_size + d->revmap_direct_max_irq);
++ seq_printf(m, "%*smapped: %u\n", ind + 1, "", d->mapcount);
++ seq_printf(m, "%*sflags: 0x%08x\n", ind +1 , "", d->flags);
++#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
++ if (!d->parent)
++ return;
++ seq_printf(m, "%*sparent: %s\n", ind + 1, "", d->parent->name);
++ irq_domain_debug_show_one(m, d->parent, ind + 4);
++#endif
++}
++
++static int irq_domain_debug_show(struct seq_file *m, void *p)
++{
++ struct irq_domain *d = m->private;
++
++ /* Default domain? Might be NULL */
++ if (!d) {
++ if (!irq_default_domain)
++ return 0;
++ d = irq_default_domain;
++ }
++ irq_domain_debug_show_one(m, d, 0);
++ return 0;
++}
++
++static int irq_domain_debug_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, irq_domain_debug_show, inode->i_private);
++}
++
++static const struct file_operations dfs_domain_ops = {
++ .open = irq_domain_debug_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static void debugfs_add_domain_dir(struct irq_domain *d)
++{
++ if (!d->name || !domain_dir || d->debugfs_file)
++ return;
++ d->debugfs_file = debugfs_create_file(d->name, 0444, domain_dir, d,
++ &dfs_domain_ops);
++}
++
++static void debugfs_remove_domain_dir(struct irq_domain *d)
++{
++ if (d->debugfs_file)
++ debugfs_remove(d->debugfs_file);
++}
++
++void __init irq_domain_debugfs_init(struct dentry *root)
++{
++ struct irq_domain *d;
++
++ domain_dir = debugfs_create_dir("domains", root);
++ if (!domain_dir)
++ return;
++
++ debugfs_create_file("default", 0444, domain_dir, NULL, &dfs_domain_ops);
++ mutex_lock(&irq_domain_mutex);
++ list_for_each_entry(d, &irq_domain_list, link)
++ debugfs_add_domain_dir(d);
++ mutex_unlock(&irq_domain_mutex);
++}
++#endif
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -1387,6 +1387,7 @@ __setup_irq(unsigned int irq, struct irq
+ wake_up_process(new->secondary->thread);
+
+ register_irq_proc(irq, desc);
++ irq_add_debugfs_entry(irq, desc);
+ new->dir = NULL;
+ register_handler_proc(irq, new);
+ free_cpumask_var(mask);
+--- a/kernel/irq/msi.c
++++ b/kernel/irq/msi.c
+@@ -310,7 +310,7 @@ int msi_domain_populate_irqs(struct irq_
+
+ ops->set_desc(arg, desc);
+ /* Assumes the domain mutex is held! */
+- ret = irq_domain_alloc_irqs_recursive(domain, virq, 1, arg);
++ ret = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
+ if (ret)
+ break;
+
+--- a/net/bridge/br.c
++++ b/net/bridge/br.c
+@@ -138,14 +138,14 @@ static int br_switchdev_event(struct not
+ br = p->br;
+
+ switch (event) {
+- case SWITCHDEV_FDB_ADD:
++ case SWITCHDEV_FDB_ADD_TO_BRIDGE:
+ fdb_info = ptr;
+ err = br_fdb_external_learn_add(br, p, fdb_info->addr,
+ fdb_info->vid);
+ if (err)
+ err = notifier_from_errno(err);
+ break;
+- case SWITCHDEV_FDB_DEL:
++ case SWITCHDEV_FDB_DEL_TO_BRIDGE:
+ fdb_info = ptr;
+ err = br_fdb_external_learn_del(br, p, fdb_info->addr,
+ fdb_info->vid);
+--- a/net/bridge/br_fdb.c
++++ b/net/bridge/br_fdb.c
+@@ -688,6 +688,8 @@ static void fdb_notify(struct net_bridge
+ struct sk_buff *skb;
+ int err = -ENOBUFS;
+
++ br_switchdev_fdb_notify(fdb, type);
++
+ skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC);
+ if (skb == NULL)
+ goto errout;
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -1060,6 +1060,8 @@ void nbp_switchdev_frame_mark(const stru
+ struct sk_buff *skb);
+ bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p,
+ const struct sk_buff *skb);
++void br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb,
++ int type);
+ #else
+ static inline int nbp_switchdev_mark_set(struct net_bridge_port *p)
+ {
+@@ -1076,6 +1078,11 @@ static inline bool nbp_switchdev_allowed
+ {
+ return true;
+ }
++
++static inline void
++br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
++{
++}
+ #endif /* CONFIG_NET_SWITCHDEV */
+
+ #endif
+--- a/net/bridge/br_switchdev.c
++++ b/net/bridge/br_switchdev.c
+@@ -55,3 +55,36 @@ bool nbp_switchdev_allowed_egress(const
+ return !skb->offload_fwd_mark ||
+ BR_INPUT_SKB_CB(skb)->offload_fwd_mark != p->offload_fwd_mark;
+ }
++
++static void
++br_switchdev_fdb_call_notifiers(bool adding, const unsigned char *mac,
++ u16 vid, struct net_device *dev)
++{
++ struct switchdev_notifier_fdb_info info;
++ unsigned long notifier_type;
++
++ info.addr = mac;
++ info.vid = vid;
++ notifier_type = adding ? SWITCHDEV_FDB_ADD_TO_DEVICE : SWITCHDEV_FDB_DEL_TO_DEVICE;
++ call_switchdev_notifiers(notifier_type, dev, &info.info);
++}
++
++void
++br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
++{
++ if (!fdb->added_by_user)
++ return;
++
++ switch (type) {
++ case RTM_DELNEIGH:
++ br_switchdev_fdb_call_notifiers(false, fdb->addr.addr,
++ fdb->vlan_id,
++ fdb->dst->dev);
++ break;
++ case RTM_NEWNEIGH:
++ br_switchdev_fdb_call_notifiers(true, fdb->addr.addr,
++ fdb->vlan_id,
++ fdb->dst->dev);
++ break;
++ }
++}
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -6630,9 +6630,18 @@ int dev_set_mtu(struct net_device *dev,
+@@ -1968,6 +1968,23 @@ static void netif_setup_tc(struct net_de
+ }
+ }
+
++int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
++{
++ if (dev->num_tc) {
++ struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
++ int i;
++
++ for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
++ if ((txq - tc->offset) < tc->count)
++ return i;
++ }
++
++ return -1;
++ }
++
++ return 0;
++}
++
+ #ifdef CONFIG_XPS
+ static DEFINE_MUTEX(xps_map_mutex);
+ #define xmap_dereference(P) \
+@@ -6630,9 +6647,18 @@ int dev_set_mtu(struct net_device *dev,
if (new_mtu == dev->mtu)
return 0;
if (!netif_device_present(dev))
return -ENODEV;
+--- a/net/core/net-sysfs.c
++++ b/net/core/net-sysfs.c
+@@ -1021,7 +1021,6 @@ static ssize_t show_trans_timeout(struct
+ return sprintf(buf, "%lu", trans_timeout);
+ }
+
+-#ifdef CONFIG_XPS
+ static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
+ {
+ struct net_device *dev = queue->dev;
+@@ -1033,6 +1032,21 @@ static unsigned int get_netdev_queue_ind
+ return i;
+ }
+
++static ssize_t show_traffic_class(struct netdev_queue *queue,
++ struct netdev_queue_attribute *attribute,
++ char *buf)
++{
++ struct net_device *dev = queue->dev;
++ int index = get_netdev_queue_index(queue);
++ int tc = netdev_txq_to_tc(dev, index);
++
++ if (tc < 0)
++ return -EINVAL;
++
++ return sprintf(buf, "%u\n", tc);
++}
++
++#ifdef CONFIG_XPS
+ static ssize_t show_tx_maxrate(struct netdev_queue *queue,
+ struct netdev_queue_attribute *attribute,
+ char *buf)
+@@ -1075,6 +1089,9 @@ static struct netdev_queue_attribute que
+ static struct netdev_queue_attribute queue_trans_timeout =
+ __ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL);
+
++static struct netdev_queue_attribute queue_traffic_class =
++ __ATTR(traffic_class, S_IRUGO, show_traffic_class, NULL);
++
+ #ifdef CONFIG_BQL
+ /*
+ * Byte queue limits sysfs structures and functions.
+@@ -1260,6 +1277,7 @@ static struct netdev_queue_attribute xps
+
+ static struct attribute *netdev_queue_default_attrs[] = {
+ &queue_trans_timeout.attr,
++ &queue_traffic_class.attr,
+ #ifdef CONFIG_XPS
+ &xps_cpus_attribute.attr,
+ &queue_tx_maxrate.attr,
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -3690,7 +3690,7 @@ static int rtnl_get_offload_stats(struct
+ if (!size)
+ continue;
+
+- if (!dev->netdev_ops->ndo_has_offload_stats(attr_id))
++ if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id))
+ continue;
+
+ attr = nla_reserve_64bit(skb, attr_id, size,
+@@ -3731,7 +3731,7 @@ static int rtnl_get_offload_stats_size(c
+
+ for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST;
+ attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) {
+- if (!dev->netdev_ops->ndo_has_offload_stats(attr_id))
++ if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id))
+ continue;
+ size = rtnl_get_offload_stats_attr_size(attr_id);
+ nla_size += nla_total_size_64bit(size);
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -842,6 +842,32 @@ void napi_consume_skb(struct sk_buff *sk
-From 45e934873f9147f692dddbb61abc088f4c8059d7 Mon Sep 17 00:00:00 2001
+From 2f2a0ab9e4b3186be981f7151a4f4f794d4b6caa Mon Sep 17 00:00:00 2001
From: Yangbo Lu <yangbo.lu@nxp.com>
-Date: Wed, 17 Jan 2018 14:51:29 +0800
-Subject: [PATCH 03/30] arch: support layerscape
+Date: Thu, 5 Jul 2018 16:18:37 +0800
+Subject: [PATCH 03/32] arch: support layerscape
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
---
- arch/arm/include/asm/delay.h | 16 +++++++++
- arch/arm/include/asm/io.h | 31 ++++++++++++++++++
- arch/arm/include/asm/mach/map.h | 4 +--
- arch/arm/include/asm/pgtable.h | 7 ++++
- arch/arm/kernel/bios32.c | 43 ++++++++++++++++++++++++
- arch/arm/mm/dma-mapping.c | 1 +
- arch/arm/mm/ioremap.c | 7 ++++
- arch/arm/mm/mmu.c | 9 +++++
- arch/arm64/include/asm/cache.h | 2 +-
- arch/arm64/include/asm/io.h | 30 +++++++++++++++++
- arch/arm64/include/asm/pci.h | 4 +++
- arch/arm64/include/asm/pgtable-prot.h | 1 +
- arch/arm64/include/asm/pgtable.h | 5 +++
- arch/arm64/kernel/pci.c | 62 +++++++++++++++++++++++++++++++++++
- arch/arm64/mm/dma-mapping.c | 6 ++++
- 15 files changed, 225 insertions(+), 3 deletions(-)
+ arch/arm/include/asm/delay.h | 16 +++++++
+ arch/arm/include/asm/dma-mapping.h | 6 ---
+ arch/arm/include/asm/io.h | 31 +++++++++++++
+ arch/arm/include/asm/mach/map.h | 4 +-
+ arch/arm/include/asm/pgtable.h | 7 +++
+ arch/arm/kernel/bios32.c | 43 ++++++++++++++++++
+ arch/arm/mm/dma-mapping.c | 1 +
+ arch/arm/mm/ioremap.c | 7 +++
+ arch/arm/mm/mmu.c | 9 ++++
+ arch/arm64/include/asm/cache.h | 2 +-
+ arch/arm64/include/asm/io.h | 30 +++++++++++++
+ arch/arm64/include/asm/pci.h | 4 ++
+ arch/arm64/include/asm/pgtable-prot.h | 2 +
+ arch/arm64/include/asm/pgtable.h | 5 +++
+ arch/arm64/kernel/pci.c | 62 ++++++++++++++++++++++++++
+ arch/arm64/mm/dma-mapping.c | 6 +++
+ arch/powerpc/include/asm/dma-mapping.h | 5 ---
+ arch/tile/include/asm/dma-mapping.h | 5 ---
+ 18 files changed, 226 insertions(+), 19 deletions(-)
--- a/arch/arm/include/asm/delay.h
+++ b/arch/arm/include/asm/delay.h
/* Loop-based definitions for assembly code. */
extern void __loop_delay(unsigned long loops);
extern void __loop_udelay(unsigned long usecs);
+--- a/arch/arm/include/asm/dma-mapping.h
++++ b/arch/arm/include/asm/dma-mapping.h
+@@ -31,12 +31,6 @@ static inline struct dma_map_ops *get_dm
+ return __generic_dma_ops(dev);
+ }
+
+-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
+-{
+- BUG_ON(!dev);
+- dev->archdata.dma_ops = ops;
+-}
+-
+ #define HAVE_ARCH_DMA_SUPPORTED 1
+ extern int dma_supported(struct device *dev, u64 mask);
+
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -129,6 +129,7 @@ static inline u32 __raw_readl(const vola
#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
#define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
+@@ -68,6 +69,7 @@
+ #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
+
+ #define PAGE_S2 __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
++#define PAGE_S2_NS __pgprot(PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDWR | PTE_TYPE_PAGE | PTE_AF)
+ #define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
+
+ #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_NG | PTE_PXN | PTE_UXN)
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -370,6 +370,11 @@ static inline int pmd_protnone(pmd_t pmd
#include <linux/swiotlb.h>
#include <asm/cacheflush.h>
-+#include <../../../drivers/staging/fsl-mc/include/mc-bus.h>
++#include <linux/fsl/mc.h>
static int swiotlb __ro_after_init;
__iommu_setup_dma_ops(dev, dma_base, size, iommu);
}
+EXPORT_SYMBOL(arch_setup_dma_ops);
+--- a/arch/powerpc/include/asm/dma-mapping.h
++++ b/arch/powerpc/include/asm/dma-mapping.h
+@@ -91,11 +91,6 @@ static inline struct dma_map_ops *get_dm
+ return dev->archdata.dma_ops;
+ }
+
+-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
+-{
+- dev->archdata.dma_ops = ops;
+-}
+-
+ /*
+ * get_dma_offset()
+ *
+--- a/arch/tile/include/asm/dma-mapping.h
++++ b/arch/tile/include/asm/dma-mapping.h
+@@ -59,11 +59,6 @@ static inline phys_addr_t dma_to_phys(st
+
+ static inline void dma_mark_clean(void *addr, size_t size) {}
+
+-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
+-{
+- dev->archdata.dma_ops = ops;
+-}
+-
+ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+ {
+ if (!dev->dma_mask)
-From 1806d342beb334c8cb0a438315ad5529262b2791 Mon Sep 17 00:00:00 2001
+From 2ba4c76bc645b7b4ff04364f294f3022d369108a Mon Sep 17 00:00:00 2001
From: Yangbo Lu <yangbo.lu@nxp.com>
-Date: Wed, 17 Jan 2018 14:52:50 +0800
-Subject: [PATCH 04/30] dts: support layercape
+Date: Thu, 5 Jul 2018 16:20:56 +0800
+Subject: [PATCH 04/32] dts: support layercape
This is an integrated patch for layerscape dts support.
Signed-off-by: Ran Wang <ran.wang_1@nxp.com>
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
---
- arch/arm/boot/dts/alpine.dtsi | 2 +-
- arch/arm/boot/dts/axm55xx.dtsi | 2 +-
- arch/arm/boot/dts/ecx-2000.dts | 2 +-
- arch/arm/boot/dts/imx6ul.dtsi | 4 +-
- arch/arm/boot/dts/keystone.dtsi | 4 +-
- arch/arm/boot/dts/ls1021a-qds.dts | 21 +
- arch/arm/boot/dts/ls1021a-twr.dts | 25 +
- arch/arm/boot/dts/ls1021a.dtsi | 197 +++--
- arch/arm/boot/dts/mt6580.dtsi | 2 +-
- arch/arm/boot/dts/mt6589.dtsi | 2 +-
- arch/arm/boot/dts/mt8127.dtsi | 2 +-
- arch/arm/boot/dts/mt8135.dtsi | 2 +-
- arch/arm/boot/dts/rk3288.dtsi | 2 +-
- arch/arm/boot/dts/sun6i-a31.dtsi | 2 +-
- arch/arm/boot/dts/sun7i-a20.dtsi | 4 +-
- arch/arm/boot/dts/sun8i-a23-a33.dtsi | 2 +-
- arch/arm/boot/dts/sun9i-a80.dtsi | 2 +-
- arch/arm64/boot/dts/freescale/Makefile | 17 +
- .../boot/dts/freescale/fsl-ls1012a-2g5rdb.dts | 123 +++
- arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts | 177 ++++
- arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts | 202 +++++
- arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts | 138 ++++
- arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi | 602 ++++++++++++++
- arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi | 45 +
- .../boot/dts/freescale/fsl-ls1043a-qds-sdk.dts | 69 ++
- arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts | 171 +++-
- .../boot/dts/freescale/fsl-ls1043a-rdb-sdk.dts | 69 ++
- .../boot/dts/freescale/fsl-ls1043a-rdb-usdpaa.dts | 117 +++
- arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts | 113 ++-
- arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi | 308 ++++++-
- arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi | 48 ++
- .../boot/dts/freescale/fsl-ls1046a-qds-sdk.dts | 110 +++
- arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts | 363 ++++++++
- .../boot/dts/freescale/fsl-ls1046a-rdb-sdk.dts | 83 ++
- .../boot/dts/freescale/fsl-ls1046a-rdb-usdpaa.dts | 110 +++
- arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts | 218 +++++
- arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi | 800 ++++++++++++++++++
- arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts | 173 ++++
- arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts | 236 ++++++
- arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi | 825 ++++++++++++++++++
- arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts | 191 ++---
- arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts | 169 ++--
- arch/arm64/boot/dts/freescale/fsl-ls2080a-simu.dts | 9 +-
- arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi | 763 +++--------------
- arch/arm64/boot/dts/freescale/fsl-ls2081a-rdb.dts | 161 ++++
- arch/arm64/boot/dts/freescale/fsl-ls2088a-qds.dts | 162 ++++
- arch/arm64/boot/dts/freescale/fsl-ls2088a-rdb.dts | 140 ++++
- arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi | 195 +++++
- arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi | 198 +++++
- arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi | 161 ++++
- arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi | 919 +++++++++++++++++++++
- .../boot/dts/freescale/qoriq-bman1-portals.dtsi | 81 ++
- arch/arm64/boot/dts/freescale/qoriq-dpaa-eth.dtsi | 73 ++
- .../boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi | 43 +
- .../boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi | 43 +
- .../boot/dts/freescale/qoriq-fman3-0-1g-0.dtsi | 42 +
- .../boot/dts/freescale/qoriq-fman3-0-1g-1.dtsi | 42 +
- .../boot/dts/freescale/qoriq-fman3-0-1g-2.dtsi | 42 +
- .../boot/dts/freescale/qoriq-fman3-0-1g-3.dtsi | 42 +
- .../boot/dts/freescale/qoriq-fman3-0-1g-4.dtsi | 42 +
- .../boot/dts/freescale/qoriq-fman3-0-1g-5.dtsi | 42 +
- .../boot/dts/freescale/qoriq-fman3-0-6oh.dtsi | 47 ++
- arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi | 130 +++
- .../boot/dts/freescale/qoriq-qman1-portals.dtsi | 104 +++
- arch/powerpc/boot/dts/fsl/qoriq-bman1-portals.dtsi | 10 +
- arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0.dtsi | 4 +-
- arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1.dtsi | 4 +-
- 67 files changed, 8231 insertions(+), 1022 deletions(-)
+ arch/arm/boot/dts/alpine.dtsi | 2 +-
+ arch/arm/boot/dts/axm55xx.dtsi | 2 +-
+ arch/arm/boot/dts/ecx-2000.dts | 2 +-
+ arch/arm/boot/dts/imx6ul.dtsi | 4 +-
+ arch/arm/boot/dts/keystone.dtsi | 4 +-
+ arch/arm/boot/dts/ls1021a-qds.dts | 26 +
+ arch/arm/boot/dts/ls1021a-twr.dts | 25 +
+ arch/arm/boot/dts/ls1021a.dtsi | 284 ++++--
+ arch/arm/boot/dts/mt6580.dtsi | 2 +-
+ arch/arm/boot/dts/mt6589.dtsi | 2 +-
+ arch/arm/boot/dts/mt8127.dtsi | 2 +-
+ arch/arm/boot/dts/mt8135.dtsi | 2 +-
+ arch/arm/boot/dts/rk3288.dtsi | 2 +-
+ arch/arm/boot/dts/sun6i-a31.dtsi | 2 +-
+ arch/arm/boot/dts/sun7i-a20.dtsi | 4 +-
+ arch/arm/boot/dts/sun8i-a23-a33.dtsi | 2 +-
+ arch/arm/boot/dts/sun9i-a80.dtsi | 2 +-
+ arch/arm64/boot/dts/freescale/Makefile | 18 +
+ .../boot/dts/freescale/fsl-ls1012a-2g5rdb.dts | 123 +++
+ .../boot/dts/freescale/fsl-ls1012a-frdm.dts | 141 +++
+ .../boot/dts/freescale/fsl-ls1012a-frwy.dts | 177 ++++
+ .../boot/dts/freescale/fsl-ls1012a-qds.dts | 166 ++++
+ .../boot/dts/freescale/fsl-ls1012a-rdb.dts | 102 ++
+ .../arm64/boot/dts/freescale/fsl-ls1012a.dtsi | 567 +++++++++++
+ .../boot/dts/freescale/fsl-ls1043-post.dtsi | 44 +
+ .../dts/freescale/fsl-ls1043a-qds-sdk.dts | 71 ++
+ .../boot/dts/freescale/fsl-ls1043a-qds.dts | 210 ++++-
+ .../dts/freescale/fsl-ls1043a-rdb-sdk.dts | 71 ++
+ .../dts/freescale/fsl-ls1043a-rdb-usdpaa.dts | 117 +++
+ .../boot/dts/freescale/fsl-ls1043a-rdb.dts | 152 ++-
+ .../arm64/boot/dts/freescale/fsl-ls1043a.dtsi | 357 +++++--
+ .../boot/dts/freescale/fsl-ls1046-post.dtsi | 48 +
+ .../dts/freescale/fsl-ls1046a-qds-sdk.dts | 112 +++
+ .../boot/dts/freescale/fsl-ls1046a-qds.dts | 326 +++++++
+ .../dts/freescale/fsl-ls1046a-rdb-sdk.dts | 85 ++
+ .../dts/freescale/fsl-ls1046a-rdb-usdpaa.dts | 110 +++
+ .../boot/dts/freescale/fsl-ls1046a-rdb.dts | 181 ++++
+ .../arm64/boot/dts/freescale/fsl-ls1046a.dtsi | 771 +++++++++++++++
+ .../boot/dts/freescale/fsl-ls1088a-qds.dts | 137 +++
+ .../boot/dts/freescale/fsl-ls1088a-rdb.dts | 200 ++++
+ .../arm64/boot/dts/freescale/fsl-ls1088a.dtsi | 800 ++++++++++++++++
+ .../boot/dts/freescale/fsl-ls2080a-qds.dts | 229 +----
+ .../boot/dts/freescale/fsl-ls2080a-rdb.dts | 207 ++--
+ .../boot/dts/freescale/fsl-ls2080a-simu.dts | 47 +-
+ .../arm64/boot/dts/freescale/fsl-ls2080a.dtsi | 803 ++--------------
+ .../boot/dts/freescale/fsl-ls2081a-rdb.dts | 161 ++++
+ .../boot/dts/freescale/fsl-ls2088a-qds.dts | 126 +++
+ .../boot/dts/freescale/fsl-ls2088a-rdb.dts | 104 ++
+ .../arm64/boot/dts/freescale/fsl-ls2088a.dtsi | 159 ++++
+ .../boot/dts/freescale/fsl-ls208xa-qds.dtsi | 162 ++++
+ .../boot/dts/freescale/fsl-ls208xa-rdb.dtsi | 136 +++
+ .../arm64/boot/dts/freescale/fsl-ls208xa.dtsi | 885 ++++++++++++++++++
+ .../dts/freescale/qoriq-bman-portals-sdk.dtsi | 55 ++
+ .../dts/freescale/qoriq-bman-portals.dtsi | 77 ++
+ .../boot/dts/freescale/qoriq-dpaa-eth.dtsi | 73 ++
+ .../dts/freescale/qoriq-fman3-0-10g-0.dtsi | 43 +
+ .../dts/freescale/qoriq-fman3-0-10g-1.dtsi | 43 +
+ .../dts/freescale/qoriq-fman3-0-1g-0.dtsi | 42 +
+ .../dts/freescale/qoriq-fman3-0-1g-1.dtsi | 42 +
+ .../dts/freescale/qoriq-fman3-0-1g-2.dtsi | 42 +
+ .../dts/freescale/qoriq-fman3-0-1g-3.dtsi | 42 +
+ .../dts/freescale/qoriq-fman3-0-1g-4.dtsi | 42 +
+ .../dts/freescale/qoriq-fman3-0-1g-5.dtsi | 42 +
+ .../boot/dts/freescale/qoriq-fman3-0-6oh.dtsi | 47 +
+ .../boot/dts/freescale/qoriq-fman3-0.dtsi | 130 +++
+ .../dts/freescale/qoriq-qman-portals-sdk.dtsi | 38 +
+ .../dts/freescale/qoriq-qman-portals.dtsi | 87 ++
+ .../boot/dts/fsl/qoriq-bman1-portals.dtsi | 10 +
+ .../boot/dts/fsl/qoriq-fman3-0-10g-0.dtsi | 4 +-
+ .../boot/dts/fsl/qoriq-fman3-0-10g-1.dtsi | 4 +-
+ 70 files changed, 8051 insertions(+), 1286 deletions(-)
create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1012a-2g5rdb.dts
create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts
+ create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1012a-frwy.dts
create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts
create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts
create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi
create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi
create mode 100644 arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
- create mode 100644 arch/arm64/boot/dts/freescale/qoriq-bman1-portals.dtsi
+ create mode 100644 arch/arm64/boot/dts/freescale/qoriq-bman-portals-sdk.dtsi
+ create mode 100644 arch/arm64/boot/dts/freescale/qoriq-bman-portals.dtsi
create mode 100644 arch/arm64/boot/dts/freescale/qoriq-dpaa-eth.dtsi
create mode 100644 arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi
create mode 100644 arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi
create mode 100644 arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-5.dtsi
create mode 100644 arch/arm64/boot/dts/freescale/qoriq-fman3-0-6oh.dtsi
create mode 100644 arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi
- create mode 100644 arch/arm64/boot/dts/freescale/qoriq-qman1-portals.dtsi
+ create mode 100644 arch/arm64/boot/dts/freescale/qoriq-qman-portals-sdk.dtsi
+ create mode 100644 arch/arm64/boot/dts/freescale/qoriq-qman-portals.dtsi
--- a/arch/arm/boot/dts/alpine.dtsi
+++ b/arch/arm/boot/dts/alpine.dtsi
&enet0 {
tbi-handle = <&tbi0>;
phy-handle = <&sgmii_phy1c>;
-@@ -331,3 +344,11 @@
+@@ -239,6 +252,11 @@
+ device-width = <1>;
+ };
+
++ nand@2,0 {
++ compatible = "fsl,ifc-nand";
++ reg = <0x2 0x0 0x10000>;
++ };
++
+ fpga: board-control@3,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+@@ -331,3 +349,11 @@
&uart1 {
status = "okay";
};
+};
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
-@@ -74,17 +74,24 @@
+@@ -47,6 +47,7 @@
+
+ #include "skeleton64.dtsi"
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
++#include <dt-bindings/thermal/thermal.h>
+
+ / {
+ compatible = "fsl,ls1021a";
+@@ -70,21 +71,29 @@
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+- cpu@f00 {
++ cpu0: @f00 {
compatible = "arm,cortex-a7";
device_type = "cpu";
reg = <0xf00>;
- clocks = <&cluster1_clk>;
+ clocks = <&clockgen 1 0>;
++ #cooling-cells = <2>;
};
- cpu@f01 {
+- cpu@f01 {
++ cpu1: @f01 {
compatible = "arm,cortex-a7";
device_type = "cpu";
reg = <0xf01>;
timer {
compatible = "arm,armv7-timer";
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
-@@ -108,11 +115,11 @@
+@@ -108,11 +117,11 @@
ranges;
gic: interrupt-controller@1400000 {
<0x0 0x1404000 0x0 0x2000>,
<0x0 0x1406000 0x0 0x2000>;
interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
-@@ -120,14 +127,14 @@
+@@ -120,14 +129,14 @@
};
msi1: msi-controller@1570e00 {
reg = <0x0 0x1570e08 0x0 0x8>;
msi-controller;
interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
-@@ -137,11 +144,12 @@
+@@ -137,11 +146,12 @@
compatible = "fsl,ifc", "simple-bus";
reg = <0x0 0x1530000 0x0 0x10000>;
interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
big-endian;
};
-@@ -163,7 +171,7 @@
+@@ -163,7 +173,7 @@
<0x0 0x20220520 0x0 0x4>;
reg-names = "ahci", "sata-ecc";
interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
dma-coherent;
status = "disabled";
};
-@@ -214,41 +222,10 @@
+@@ -214,43 +224,89 @@
};
clockgen: clocking@1ee1000 {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x0 0x0 0x1ee1000 0x10000>;
--
++ compatible = "fsl,ls1021a-clockgen";
++ reg = <0x0 0x1ee1000 0x0 0x1000>;
++ #clock-cells = <2>;
++ clocks = <&sysclk>;
++ };
+
- sysclk: sysclk {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-names = "pll1cga", "pll1cga-div2", "pll1cga-div4";
- clocks = <&cga_pll1 0>, <&cga_pll1 1>, <&cga_pll1 2>;
- clock-output-names = "cluster1-clk";
-- };
-+ compatible = "fsl,ls1021a-clockgen";
-+ reg = <0x0 0x1ee1000 0x0 0x1000>;
-+ #clock-cells = <2>;
-+ clocks = <&sysclk>;
++ tmu: tmu@1f00000 {
++ compatible = "fsl,qoriq-tmu";
++ reg = <0x0 0x1f00000 0x0 0x10000>;
++ interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
++ fsl,tmu-range = <0xb0000 0xa0026 0x80048 0x30061>;
++ fsl,tmu-calibration = <0x00000000 0x0000000f
++ 0x00000001 0x00000017
++ 0x00000002 0x0000001e
++ 0x00000003 0x00000026
++ 0x00000004 0x0000002e
++ 0x00000005 0x00000035
++ 0x00000006 0x0000003d
++ 0x00000007 0x00000044
++ 0x00000008 0x0000004c
++ 0x00000009 0x00000053
++ 0x0000000a 0x0000005b
++ 0x0000000b 0x00000064
++
++ 0x00010000 0x00000011
++ 0x00010001 0x0000001c
++ 0x00010002 0x00000024
++ 0x00010003 0x0000002b
++ 0x00010004 0x00000034
++ 0x00010005 0x00000039
++ 0x00010006 0x00000042
++ 0x00010007 0x0000004c
++ 0x00010008 0x00000051
++ 0x00010009 0x0000005a
++ 0x0001000a 0x00000063
++
++ 0x00020000 0x00000013
++ 0x00020001 0x00000019
++ 0x00020002 0x00000024
++ 0x00020003 0x0000002c
++ 0x00020004 0x00000035
++ 0x00020005 0x0000003d
++ 0x00020006 0x00000046
++ 0x00020007 0x00000050
++ 0x00020008 0x00000059
++
++ 0x00030000 0x00000002
++ 0x00030001 0x0000000d
++ 0x00030002 0x00000019
++ 0x00030003 0x00000024>;
++ #thermal-sensor-cells = <1>;
++ };
++
++ thermal-zones {
++ cpu_thermal: cpu-thermal {
++ polling-delay-passive = <1000>;
++ polling-delay = <5000>;
++
++ thermal-sensors = <&tmu 0>;
++
++ trips {
++ cpu_alert: cpu-alert {
++ temperature = <85000>;
++ hysteresis = <2000>;
++ type = "passive";
++ };
++ cpu_crit: cpu-crit {
++ temperature = <95000>;
++ hysteresis = <2000>;
++ type = "critical";
++ };
++ };
++
++ cooling-maps {
++ map0 {
++ trip = <&cpu_alert>;
++ cooling-device =
++ <&cpu0 THERMAL_NO_LIMIT
++ THERMAL_NO_LIMIT>;
++ };
++ };
+ };
};
-
+-
dspi0: dspi@2100000 {
-@@ -258,7 +235,7 @@
+ compatible = "fsl,ls1021a-v1.0-dspi";
+ #address-cells = <1>;
+@@ -258,7 +314,7 @@
reg = <0x0 0x2100000 0x0 0x10000>;
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "dspi";
spi-num-chipselects = <6>;
big-endian;
status = "disabled";
-@@ -271,12 +248,27 @@
+@@ -271,31 +327,48 @@
reg = <0x0 0x2110000 0x0 0x10000>;
interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "dspi";
+ };
+
i2c0: i2c@2180000 {
- compatible = "fsl,vf610-i2c";
+- compatible = "fsl,vf610-i2c";
++ compatible = "fsl,vf610-i2c", "fsl,ls1021a-vf610-i2c";
#address-cells = <1>;
-@@ -284,7 +276,7 @@
+ #size-cells = <0>;
reg = <0x0 0x2180000 0x0 0x10000>;
interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "i2c";
- clocks = <&platform_clk 1>;
+ clocks = <&clockgen 4 1>;
++ fsl-scl-gpio = <&gpio3 23 0>;
status = "disabled";
};
-@@ -295,7 +287,7 @@
+ i2c1: i2c@2190000 {
+- compatible = "fsl,vf610-i2c";
++ compatible = "fsl,vf610-i2c", "fsl,ls1021a-vf610-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
reg = <0x0 0x2190000 0x0 0x10000>;
interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "i2c";
- clocks = <&platform_clk 1>;
+ clocks = <&clockgen 4 1>;
++ fsl-scl-gpio = <&gpio3 23 0>;
status = "disabled";
};
-@@ -306,7 +298,7 @@
+@@ -306,7 +379,7 @@
reg = <0x0 0x21a0000 0x0 0x10000>;
interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "i2c";
status = "disabled";
};
-@@ -399,7 +391,7 @@
+@@ -399,7 +472,7 @@
compatible = "fsl,ls1021a-lpuart";
reg = <0x0 0x2960000 0x0 0x1000>;
interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "ipg";
status = "disabled";
};
-@@ -408,7 +400,7 @@
+@@ -408,7 +481,7 @@
compatible = "fsl,ls1021a-lpuart";
reg = <0x0 0x2970000 0x0 0x1000>;
interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "ipg";
status = "disabled";
};
-@@ -417,7 +409,7 @@
+@@ -417,7 +490,7 @@
compatible = "fsl,ls1021a-lpuart";
reg = <0x0 0x2980000 0x0 0x1000>;
interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "ipg";
status = "disabled";
};
-@@ -426,7 +418,7 @@
+@@ -426,7 +499,7 @@
compatible = "fsl,ls1021a-lpuart";
reg = <0x0 0x2990000 0x0 0x1000>;
interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "ipg";
status = "disabled";
};
-@@ -435,16 +427,26 @@
+@@ -435,16 +508,26 @@
compatible = "fsl,ls1021a-lpuart";
reg = <0x0 0x29a0000 0x0 0x1000>;
interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "wdog-en";
big-endian;
};
-@@ -454,8 +456,8 @@
+@@ -454,8 +537,8 @@
compatible = "fsl,vf610-sai";
reg = <0x0 0x2b50000 0x0 0x10000>;
interrupts = <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "bus", "mclk1", "mclk2", "mclk3";
dma-names = "tx", "rx";
dmas = <&edma0 1 47>,
-@@ -468,8 +470,8 @@
+@@ -468,8 +551,8 @@
compatible = "fsl,vf610-sai";
reg = <0x0 0x2b60000 0x0 0x10000>;
interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "bus", "mclk1", "mclk2", "mclk3";
dma-names = "tx", "rx";
dmas = <&edma0 1 45>,
-@@ -489,16 +491,31 @@
+@@ -489,16 +572,31 @@
dma-channels = <32>;
big-endian;
clock-names = "dmamux0", "dmamux1";
clock-names = "dcu", "pix";
big-endian;
status = "disabled";
-@@ -626,6 +643,8 @@
+@@ -626,7 +724,11 @@
interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
+ configure-gfladj;
+ dma-coherent;
snps,dis_rxdet_inp3_quirk;
++ usb3-lpm-capable;
++ snps,dis-u1u2-when-u3-quirk;
};
-@@ -634,7 +653,9 @@
+ pcie@3400000 {
+@@ -634,7 +736,9 @@
reg = <0x00 0x03400000 0x0 0x00010000 /* controller registers */
0x40 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "regs", "config";
fsl,pcie-scfg = <&scfg 0>;
#address-cells = <3>;
#size-cells = <2>;
-@@ -643,7 +664,7 @@
+@@ -643,7 +747,7 @@
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>,
-@@ -657,7 +678,9 @@
+@@ -657,7 +761,9 @@
reg = <0x00 0x03500000 0x0 0x00010000 /* controller registers */
0x48 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "regs", "config";
fsl,pcie-scfg = <&scfg 1>;
#address-cells = <3>;
#size-cells = <2>;
-@@ -666,7 +689,7 @@
+@@ -666,7 +772,7 @@
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>,
-@@ -674,5 +697,45 @@
+@@ -674,5 +780,45 @@
<0000 0 0 3 &gic GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>,
<0000 0 0 4 &gic GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
};
interrupt-controller;
--- a/arch/arm64/boot/dts/freescale/Makefile
+++ b/arch/arm64/boot/dts/freescale/Makefile
-@@ -1,8 +1,25 @@
+@@ -1,8 +1,26 @@
+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-frdm.dtb
++dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-frwy.dtb
+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-qds.dtb
+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-rdb.dtb
+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-2g5rdb.dtb
+};
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts
-@@ -0,0 +1,177 @@
+@@ -0,0 +1,141 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Device Tree file for Freescale LS1012A Freedom Board.
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ *
-+ * This file is dual-licensed: you can use it either under the terms
-+ * of the GPLv2 or the X11 license, at your option. Note that this dual
-+ * licensing only applies to this file, and not this project as a
-+ * whole.
-+ *
-+ * a) This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License as
-+ * published by the Free Software Foundation; either version 2 of the
-+ * License, or (at your option) any later version.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * Or, alternatively,
-+ *
-+ * b) Permission is hereby granted, free of charge, to any person
-+ * obtaining a copy of this software and associated documentation
-+ * files (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use,
-+ * copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following
-+ * conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be
-+ * included in all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+/dts-v1/;
+
+ status = "okay";
+};
--- /dev/null
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts
-@@ -0,0 +1,202 @@
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-frwy.dts
+@@ -0,0 +1,177 @@
+/*
-+ * Device Tree file for Freescale LS1012A QDS Board.
++ * Device Tree file for NXP LS1012A FRWY Board.
+ *
-+ * Copyright 2016 Freescale Semiconductor, Inc.
++ * Copyright 2018 NXP
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPLv2 or the X11 license, at your option. Note that this dual
+#include "fsl-ls1012a.dtsi"
+
+/ {
++ model = "LS1012A FRWY Board";
++ compatible = "fsl,ls1012a-frwy", "fsl,ls1012a";
++
++ aliases {
++ ethernet0 = &pfe_mac0;
++ ethernet1 = &pfe_mac1;
++ };
++
++ sys_mclk: clock-mclk {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ clock-frequency = <25000000>;
++ };
++
++ reg_1p8v: regulator-1p8v {
++ compatible = "regulator-fixed";
++ regulator-name = "1P8V";
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
++ regulator-always-on;
++ };
++
++ sound {
++ compatible = "simple-audio-card";
++ simple-audio-card,format = "i2s";
++ simple-audio-card,widgets =
++ "Microphone", "Microphone Jack",
++ "Headphone", "Headphone Jack",
++ "Speaker", "Speaker Ext",
++ "Line", "Line In Jack";
++ simple-audio-card,routing =
++ "MIC_IN", "Microphone Jack",
++ "Microphone Jack", "Mic Bias",
++ "LINE_IN", "Line In Jack",
++ "Headphone Jack", "HP_OUT",
++ "Speaker Ext", "LINE_OUT";
++
++ simple-audio-card,cpu {
++ sound-dai = <&sai2>;
++ frame-master;
++ bitclock-master;
++ };
++
++ simple-audio-card,codec {
++ sound-dai = <&codec>;
++ frame-master;
++ bitclock-master;
++ system-clock-frequency = <25000000>;
++ };
++ };
++};
++
++&pcie {
++ status = "okay";
++};
++
++&duart0 {
++ status = "okay";
++};
++
++&i2c0 {
++ status = "okay";
++
++ codec: sgtl5000@a {
++ compatible = "fsl,sgtl5000";
++ #sound-dai-cells = <0>;
++ reg = <0xa>;
++ VDDA-supply = <®_1p8v>;
++ VDDIO-supply = <®_1p8v>;
++ clocks = <&sys_mclk>;
++ };
++};
++
++&qspi {
++ num-cs = <1>;
++ bus-num = <0>;
++ status = "okay";
++
++ qflash0: w25q16dw@0 {
++ compatible = "spansion,m25p80";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ m25p,fast-read;
++ spi-max-frequency = <20000000>;
++ reg = <0>;
++ };
++};
++
++&pfe {
++ status = "okay";
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ ethernet@0 {
++ compatible = "fsl,pfe-gemac-port";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0>; /* GEM_ID */
++ fsl,gemac-bus-id = <0x0>; /* BUS_ID */
++ fsl,gemac-phy-id = <0x2>; /* PHY_ID */
++ fsl,mdio-mux-val = <0x0>;
++ phy-mode = "sgmii";
++ fsl,pfe-phy-if-flags = <0x0>;
++
++ mdio@0 {
++ reg = <0x1>; /* enabled/disabled */
++ };
++ };
++
++ ethernet@1 {
++ compatible = "fsl,pfe-gemac-port";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x1>; /* GEM_ID */
++ fsl,gemac-bus-id = <0x1>; /* BUS_ID */
++ fsl,gemac-phy-id = <0x1>; /* PHY_ID */
++ fsl,mdio-mux-val = <0x0>;
++ phy-mode = "sgmii";
++ fsl,pfe-phy-if-flags = <0x0>;
++
++ mdio@0 {
++ reg = <0x0>; /* enabled/disabled */
++ };
++ };
++};
++
++&sai2 {
++ status = "okay";
++};
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts
+@@ -0,0 +1,166 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
++/*
++ * Device Tree file for Freescale LS1012A QDS Board.
++ *
++ * Copyright 2016 Freescale Semiconductor, Inc.
++ *
++ */
++/dts-v1/;
++
++#include "fsl-ls1012a.dtsi"
++
++/ {
+ model = "LS1012A QDS Board";
+ compatible = "fsl,ls1012a-qds", "fsl,ls1012a";
+
+};
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts
-@@ -0,0 +1,138 @@
+@@ -0,0 +1,102 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Device Tree file for Freescale LS1012A RDB Board.
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ *
-+ * This file is dual-licensed: you can use it either under the terms
-+ * of the GPLv2 or the X11 license, at your option. Note that this dual
-+ * licensing only applies to this file, and not this project as a
-+ * whole.
-+ *
-+ * a) This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License as
-+ * published by the Free Software Foundation; either version 2 of the
-+ * License, or (at your option) any later version.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * Or, alternatively,
-+ *
-+ * b) Permission is hereby granted, free of charge, to any person
-+ * obtaining a copy of this software and associated documentation
-+ * files (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use,
-+ * copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following
-+ * conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be
-+ * included in all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+/dts-v1/;
+
+};
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
-@@ -0,0 +1,602 @@
+@@ -0,0 +1,567 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Device Tree Include file for Freescale Layerscape-1012A family SoC.
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ *
-+ * This file is dual-licensed: you can use it either under the terms
-+ * of the GPLv2 or the X11 license, at your option. Note that this dual
-+ * licensing only applies to this file, and not this project as a
-+ * whole.
-+ *
-+ * a) This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License as
-+ * published by the Free Software Foundation; either version 2 of the
-+ * License, or (at your option) any later version.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * Or, alternatively,
-+ *
-+ * b) Permission is hereby granted, free of charge, to any person
-+ * obtaining a copy of this software and associated documentation
-+ * files (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use,
-+ * copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following
-+ * conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be
-+ * included in all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+ };
+
+ i2c0: i2c@2180000 {
-+ compatible = "fsl,vf610-i2c";
++ compatible = "fsl,vf610-i2c", "fsl,ls1012a-vf610-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0 0x2180000 0x0 0x10000>;
+ interrupts = <0 56 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clockgen 4 3>;
++ fsl-scl-gpio = <&gpio0 13 0>;
+ status = "disabled";
+ };
+
+};
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi
-@@ -0,0 +1,45 @@
+@@ -0,0 +1,44 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * QorIQ FMan v3 device tree nodes for ls1043
+ *
+ * Copyright 2015-2016 Freescale Semiconductor Inc.
-+ *
-+ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
+&soc {
+};
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds-sdk.dts
-@@ -0,0 +1,69 @@
+@@ -0,0 +1,71 @@
+/*
+ * Device Tree Include file for Freescale Layerscape-1043A family SoC.
+ *
+ */
+
+#include "fsl-ls1043a-qds.dts"
++#include "qoriq-qman-portals-sdk.dtsi"
++#include "qoriq-bman-portals-sdk.dtsi"
+
+&bman_fbpr {
+ compatible = "fsl,bman-fbpr";
+};
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
-@@ -1,7 +1,7 @@
+@@ -1,51 +1,14 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Device Tree Include file for Freescale Layerscape-1043A family SoC.
*
+ * Copyright 2014-2015 Freescale Semiconductor, Inc.
*
* Mingkai Hu <Mingkai.hu@freescale.com>
- *
-@@ -45,7 +45,7 @@
+- *
+- * This file is dual-licensed: you can use it either under the terms
+- * of the GPLv2 or the X11 license, at your option. Note that this dual
+- * licensing only applies to this file, and not this project as a
+- * whole.
+- *
+- * a) This library is free software; you can redistribute it and/or
+- * modify it under the terms of the GNU General Public License as
+- * published by the Free Software Foundation; either version 2 of the
+- * License, or (at your option) any later version.
+- *
+- * This library is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * Or, alternatively,
+- *
+- * b) Permission is hereby granted, free of charge, to any person
+- * obtaining a copy of this software and associated documentation
+- * files (the "Software"), to deal in the Software without
+- * restriction, including without limitation the rights to use,
+- * copy, modify, merge, publish, distribute, sublicense, and/or
+- * sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following
+- * conditions:
+- *
+- * The above copyright notice and this permission notice shall be
+- * included in all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
*/
/dts-v1/;
/ {
model = "LS1043A QDS Board";
-@@ -60,6 +60,22 @@
+@@ -60,6 +23,22 @@
serial1 = &duart1;
serial2 = &duart2;
serial3 = &duart3;
};
chosen {
-@@ -97,8 +113,11 @@
+@@ -97,8 +76,11 @@
};
fpga: board-control@2,0 {
};
};
-@@ -181,3 +200,149 @@
+@@ -181,3 +163,149 @@
reg = <0>;
};
};
+};
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb-sdk.dts
-@@ -0,0 +1,69 @@
+@@ -0,0 +1,71 @@
+/*
+ * Device Tree Include file for Freescale Layerscape-1043A family SoC.
+ *
+ */
+
+#include "fsl-ls1043a-rdb.dts"
++#include "qoriq-qman-portals-sdk.dtsi"
++#include "qoriq-bman-portals-sdk.dtsi"
+
+&bman_fbpr {
+ compatible = "fsl,bman-fbpr";
+};
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
-@@ -1,7 +1,7 @@
+@@ -1,51 +1,14 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Device Tree Include file for Freescale Layerscape-1043A family SoC.
*
+ * Copyright 2014-2015 Freescale Semiconductor, Inc.
*
* Mingkai Hu <Mingkai.hu@freescale.com>
- *
-@@ -45,7 +45,7 @@
+- *
+- * This file is dual-licensed: you can use it either under the terms
+- * of the GPLv2 or the X11 license, at your option. Note that this dual
+- * licensing only applies to this file, and not this project as a
+- * whole.
+- *
+- * a) This library is free software; you can redistribute it and/or
+- * modify it under the terms of the GNU General Public License as
+- * published by the Free Software Foundation; either version 2 of the
+- * License, or (at your option) any later version.
+- *
+- * This library is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * Or, alternatively,
+- *
+- * b) Permission is hereby granted, free of charge, to any person
+- * obtaining a copy of this software and associated documentation
+- * files (the "Software"), to deal in the Software without
+- * restriction, including without limitation the rights to use,
+- * copy, modify, merge, publish, distribute, sublicense, and/or
+- * sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following
+- * conditions:
+- *
+- * The above copyright notice and this permission notice shall be
+- * included in all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
*/
/dts-v1/;
/ {
model = "LS1043A RDB Board";
-@@ -86,6 +86,10 @@
+@@ -86,6 +49,10 @@
compatible = "pericom,pt7c4338";
reg = <0x68>;
};
};
&ifc {
-@@ -130,6 +134,38 @@
+@@ -130,6 +97,38 @@
reg = <0>;
spi-max-frequency = <1000000>; /* input clock */
};
};
&duart0 {
-@@ -139,3 +175,76 @@
+@@ -139,3 +138,76 @@
&duart1 {
status = "okay";
};
+};
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
-@@ -1,7 +1,7 @@
+@@ -1,55 +1,32 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Device Tree Include file for Freescale Layerscape-1043A family SoC.
*
+ * Copyright 2014-2015 Freescale Semiconductor, Inc.
*
* Mingkai Hu <Mingkai.hu@freescale.com>
- *
-@@ -44,12 +44,25 @@
- * OTHER DEALINGS IN THE SOFTWARE.
+- *
+- * This file is dual-licensed: you can use it either under the terms
+- * of the GPLv2 or the X11 license, at your option. Note that this dual
+- * licensing only applies to this file, and not this project as a
+- * whole.
+- *
+- * a) This library is free software; you can redistribute it and/or
+- * modify it under the terms of the GNU General Public License as
+- * published by the Free Software Foundation; either version 2 of the
+- * License, or (at your option) any later version.
+- *
+- * This library is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * Or, alternatively,
+- *
+- * b) Permission is hereby granted, free of charge, to any person
+- * obtaining a copy of this software and associated documentation
+- * files (the "Software"), to deal in the Software without
+- * restriction, including without limitation the rights to use,
+- * copy, modify, merge, publish, distribute, sublicense, and/or
+- * sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following
+- * conditions:
+- *
+- * The above copyright notice and this permission notice shall be
+- * included in all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <dt-bindings/thermal/thermal.h>
++#include <dt-bindings/interrupt-controller/arm-gic.h>
+
/ {
compatible = "fsl,ls1043a";
cpus {
#address-cells = <1>;
#size-cells = <0>;
-@@ -66,6 +79,8 @@
+@@ -66,6 +43,8 @@
reg = <0x0>;
clocks = <&clockgen 1 0>;
next-level-cache = <&l2>;
};
cpu1: cpu@1 {
-@@ -74,6 +89,7 @@
+@@ -74,6 +53,7 @@
reg = <0x1>;
clocks = <&clockgen 1 0>;
next-level-cache = <&l2>;
};
cpu2: cpu@2 {
-@@ -82,6 +98,7 @@
+@@ -82,6 +62,7 @@
reg = <0x2>;
clocks = <&clockgen 1 0>;
next-level-cache = <&l2>;
};
cpu3: cpu@3 {
-@@ -90,6 +107,7 @@
+@@ -90,6 +71,7 @@
reg = <0x3>;
clocks = <&clockgen 1 0>;
next-level-cache = <&l2>;
};
l2: l2-cache {
-@@ -97,12 +115,56 @@
+@@ -97,12 +79,56 @@
};
};
sysclk: sysclk {
compatible = "fixed-clock";
#clock-cells = <0>;
-@@ -149,7 +211,7 @@
+@@ -149,7 +175,7 @@
interrupts = <1 9 0xf08>;
};
compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <2>;
-@@ -213,13 +275,14 @@
+@@ -213,13 +239,14 @@
dcfg: dcfg@1ee0000 {
compatible = "fsl,ls1043a-dcfg", "syscon";
interrupts = <0 43 0x4>;
};
-@@ -255,6 +318,103 @@
+@@ -255,6 +282,103 @@
big-endian;
};
dspi0: dspi@2100000 {
compatible = "fsl,ls1043a-dspi", "fsl,ls1021a-v1.0-dspi";
#address-cells = <1>;
-@@ -396,6 +556,72 @@
+@@ -282,7 +406,7 @@
+ };
+
+ i2c0: i2c@2180000 {
+- compatible = "fsl,vf610-i2c";
++ compatible = "fsl,vf610-i2c", "fsl,ls1043a-vf610-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0 0x2180000 0x0 0x10000>;
+@@ -292,6 +416,7 @@
+ dmas = <&edma0 1 39>,
+ <&edma0 1 38>;
+ dma-names = "tx", "rx";
++ fsl-scl-gpio = <&gpio4 12 0>;
+ status = "disabled";
+ };
+
+@@ -396,6 +521,72 @@
#interrupt-cells = <2>;
};
lpuart0: serial@2950000 {
compatible = "fsl,ls1021a-lpuart";
reg = <0x0 0x2950000 0x0 0x1000>;
-@@ -450,6 +676,16 @@
+@@ -450,6 +641,16 @@
status = "disabled";
};
wdog0: wdog@2ad0000 {
compatible = "fsl,ls1043a-wdt", "fsl,imx21-wdt";
reg = <0x0 0x2ad0000 0x0 0x10000>;
-@@ -482,6 +718,8 @@
+@@ -482,6 +683,10 @@
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
snps,dis_rxdet_inp3_quirk;
+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
+ snps,dma-snooping;
++ usb3-lpm-capable;
++ snps,dis-u1u2-when-u3-quirk;
};
usb1: usb3@3000000 {
-@@ -491,6 +729,9 @@
+@@ -491,6 +696,11 @@
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
snps,dis_rxdet_inp3_quirk;
+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
+ snps,dma-snooping;
+ configure-gfladj;
++ usb3-lpm-capable;
++ snps,dis-u1u2-when-u3-quirk;
};
usb2: usb3@3100000 {
-@@ -500,32 +741,52 @@
+@@ -500,32 +710,54 @@
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
snps,dis_rxdet_inp3_quirk;
+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
+ snps,dma-snooping;
+ configure-gfladj;
++ usb3-lpm-capable;
++ snps,dis-u1u2-when-u3-quirk;
};
sata: sata@3200000 {
reg = <0x0 0x1573000 0x0 0x8>;
msi-controller;
interrupts = <0 160 0x4>;
-@@ -536,9 +797,9 @@
+@@ -536,9 +768,9 @@
reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
0x40 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "regs", "config";
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
-@@ -547,7 +808,7 @@
+@@ -547,7 +779,7 @@
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic 0 110 0x4>,
-@@ -561,9 +822,9 @@
+@@ -561,9 +793,9 @@
reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */
0x48 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "regs", "config";
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
-@@ -572,7 +833,7 @@
+@@ -572,7 +804,7 @@
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic 0 120 0x4>,
-@@ -586,9 +847,9 @@
+@@ -586,9 +818,9 @@
reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */
0x50 0x00000000 0x0 0x00002000>; /* configuration space */
reg-names = "regs", "config";
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
-@@ -597,7 +858,7 @@
+@@ -597,7 +829,7 @@
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x50 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic 0 154 0x4>,
-@@ -607,4 +868,13 @@
+@@ -607,4 +839,13 @@
};
};
+ };
};
+
-+#include "qoriq-qman1-portals.dtsi"
-+#include "qoriq-bman1-portals.dtsi"
++#include "qoriq-qman-portals.dtsi"
++#include "qoriq-bman-portals.dtsi"
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi
@@ -0,0 +1,48 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * QorIQ FMan v3 device tree nodes for ls1046
+ *
+ * Copyright 2015-2016 Freescale Semiconductor Inc.
+ *
-+ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
+&soc {
+};
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds-sdk.dts
-@@ -0,0 +1,110 @@
+@@ -0,0 +1,112 @@
+/*
+ * Device Tree Include file for Freescale Layerscape-1046A family SoC.
+ *
+ */
+
+#include "fsl-ls1046a-qds.dts"
++#include "qoriq-qman-portals-sdk.dtsi"
++#include "qoriq-bman-portals-sdk.dtsi"
+
+&bman_fbpr {
+ compatible = "fsl,bman-fbpr";
+};
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts
-@@ -0,0 +1,363 @@
+@@ -0,0 +1,326 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Device Tree Include file for Freescale Layerscape-1046A family SoC.
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ *
+ * Shaohui Xie <Shaohui.Xie@nxp.com>
-+ *
-+ * This file is dual-licensed: you can use it either under the terms
-+ * of the GPLv2 or the X11 license, at your option. Note that this dual
-+ * licensing only applies to this file, and not this project as a
-+ * whole.
-+ *
-+ * a) This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License as
-+ * published by the Free Software Foundation; either version 2 of the
-+ * License, or (at your option) any later version.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * Or, alternatively,
-+ *
-+ * b) Permission is hereby granted, free of charge, to any person
-+ * obtaining a copy of this software and associated documentation
-+ * files (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use,
-+ * copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following
-+ * conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be
-+ * included in all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+};
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb-sdk.dts
-@@ -0,0 +1,83 @@
+@@ -0,0 +1,85 @@
+/*
+ * Device Tree Include file for Freescale Layerscape-1046A family SoC.
+ *
+ */
+
+#include "fsl-ls1046a-rdb.dts"
++#include "qoriq-qman-portals-sdk.dtsi"
++#include "qoriq-bman-portals-sdk.dtsi"
+
+&bman_fbpr {
+ compatible = "fsl,bman-fbpr";
+};
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts
-@@ -0,0 +1,218 @@
+@@ -0,0 +1,181 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
-+ * Device Tree Include file for Freescale Layerscape-1046A family SoC.
-+ *
-+ * Copyright 2016 Freescale Semiconductor, Inc.
-+ *
-+ * Mingkai Hu <mingkai.hu@nxp.com>
-+ *
-+ * This file is dual-licensed: you can use it either under the terms
-+ * of the GPLv2 or the X11 license, at your option. Note that this dual
-+ * licensing only applies to this file, and not this project as a
-+ * whole.
-+ *
-+ * a) This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License as
-+ * published by the Free Software Foundation; either version 2 of the
-+ * License, or (at your option) any later version.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * Or, alternatively,
-+ *
-+ * b) Permission is hereby granted, free of charge, to any person
-+ * obtaining a copy of this software and associated documentation
-+ * files (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use,
-+ * copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following
-+ * conditions:
++ * Device Tree Include file for Freescale Layerscape-1046A family SoC.
+ *
-+ * The above copyright notice and this permission notice shall be
-+ * included in all copies or substantial portions of the Software.
++ * Copyright 2016 Freescale Semiconductor, Inc.
+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
++ * Mingkai Hu <mingkai.hu@nxp.com>
+ */
+
+/dts-v1/;
+};
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
-@@ -0,0 +1,800 @@
+@@ -0,0 +1,771 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Device Tree Include file for Freescale Layerscape-1046A family SoC.
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ *
+ * Mingkai Hu <mingkai.hu@nxp.com>
-+ *
-+ * This file is dual-licensed: you can use it either under the terms
-+ * of the GPLv2 or the X11 license, at your option. Note that this dual
-+ * licensing only applies to this file, and not this project as a
-+ * whole.
-+ *
-+ * a) This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License as
-+ * published by the Free Software Foundation; either version 2 of the
-+ * License, or (at your option) any later version.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * Or, alternatively,
-+ *
-+ * b) Permission is hereby granted, free of charge, to any person
-+ * obtaining a copy of this software and associated documentation
-+ * files (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use,
-+ * copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following
-+ * conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be
-+ * included in all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+ };
+
+ i2c0: i2c@2180000 {
-+ compatible = "fsl,vf610-i2c";
++ compatible = "fsl,vf610-i2c", "fsl,ls1046a-vf610-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0 0x2180000 0x0 0x10000>;
+ dmas = <&edma0 1 39>,
+ <&edma0 1 38>;
+ dma-names = "tx", "rx";
++ fsl-scl-gpio = <&gpio3 12 0>;
+ status = "disabled";
+ };
+
+ };
+
+ i2c3: i2c@21b0000 {
-+ compatible = "fsl,vf610-i2c";
++ compatible = "fsl,vf610-i2c", "fsl,ls1046a-vf610-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0 0x21b0000 0x0 0x10000>;
+ interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clockgen 4 1>;
++ fsl-scl-gpio = <&gpio3 12 0>;
+ status = "disabled";
+ };
+
+ dr_mode = "host";
+ snps,quirk-frame-length-adjustment = <0x20>;
+ snps,dis_rxdet_inp3_quirk;
++ usb3-lpm-capable;
++ snps,dis-u1u2-when-u3-quirk;
+ };
+
+ usb1: usb@3000000 {
+ dr_mode = "host";
+ snps,quirk-frame-length-adjustment = <0x20>;
+ snps,dis_rxdet_inp3_quirk;
++ usb3-lpm-capable;
++ snps,dis-u1u2-when-u3-quirk;
+ };
+
+ usb2: usb@3100000 {
+ dr_mode = "host";
+ snps,quirk-frame-length-adjustment = <0x20>;
+ snps,dis_rxdet_inp3_quirk;
++ usb3-lpm-capable;
++ snps,dis-u1u2-when-u3-quirk;
+ };
+
+ sata: sata@3200000 {
+ };
+};
+
-+#include "qoriq-qman1-portals.dtsi"
-+#include "qoriq-bman1-portals.dtsi"
++#include "qoriq-qman-portals.dtsi"
++#include "qoriq-bman-portals.dtsi"
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts
-@@ -0,0 +1,173 @@
+@@ -0,0 +1,137 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Device Tree file for NXP LS1088A QDS Board.
+ *
+ *
+ * Harninder Rai <harninder.rai@nxp.com>
+ *
-+ * This file is dual-licensed: you can use it either under the terms
-+ * of the GPLv2 or the X11 license, at your option. Note that this dual
-+ * licensing only applies to this file, and not this project as a
-+ * whole.
-+ *
-+ * a) This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License as
-+ * published by the Free Software Foundation; either version 2 of the
-+ * License, or (at your option) any later version.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * Or, alternatively,
-+ *
-+ * b) Permission is hereby granted, free of charge, to any person
-+ * obtaining a copy of this software and associated documentation
-+ * files (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use,
-+ * copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following
-+ * conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be
-+ * included in all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+};
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
-@@ -0,0 +1,236 @@
+@@ -0,0 +1,200 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Device Tree file for NXP LS1088A RDB Board.
+ *
+ *
+ * Harninder Rai <harninder.rai@nxp.com>
+ *
-+ * This file is dual-licensed: you can use it either under the terms
-+ * of the GPLv2 or the X11 license, at your option. Note that this dual
-+ * licensing only applies to this file, and not this project as a
-+ * whole.
-+ *
-+ * a) This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License as
-+ * published by the Free Software Foundation; either version 2 of the
-+ * License, or (at your option) any later version.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * Or, alternatively,
-+ *
-+ * b) Permission is hereby granted, free of charge, to any person
-+ * obtaining a copy of this software and associated documentation
-+ * files (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use,
-+ * copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following
-+ * conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be
-+ * included in all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+};
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
-@@ -0,0 +1,825 @@
+@@ -0,0 +1,800 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Device Tree Include file for NXP Layerscape-1088A family SoC.
+ *
+ *
+ * Harninder Rai <harninder.rai@nxp.com>
+ *
-+ * This file is dual-licensed: you can use it either under the terms
-+ * of the GPLv2 or the X11 license, at your option. Note that this dual
-+ * licensing only applies to this file, and not this project as a
-+ * whole.
-+ *
-+ * a) This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License as
-+ * published by the Free Software Foundation; either version 2 of the
-+ * License, or (at your option) any later version.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * Or, alternatively,
-+ *
-+ * b) Permission is hereby granted, free of charge, to any person
-+ * obtaining a copy of this software and associated documentation
-+ * files (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use,
-+ * copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following
-+ * conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be
-+ * included in all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/thermal/thermal.h>
+ <1 10 IRQ_TYPE_LEVEL_LOW>;/* Hypervisor PPI */
+ };
+
-+ fsl_mc: fsl-mc@80c000000 {
-+ compatible = "fsl,qoriq-mc";
-+ reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */
-+ <0x00000000 0x08340000 0 0x40000>; /* MC control reg */
-+ msi-parent = <&its>;
-+ iommu-map = <0 &smmu 0 0>; /* This is fixed-up by u-boot */
-+ #address-cells = <3>;
-+ #size-cells = <1>;
-+
-+ /*
-+ * Region type 0x0 - MC portals
-+ * Region type 0x1 - QBMAN portals
-+ */
-+ ranges = <0x0 0x0 0x0 0x8 0x0c000000 0x4000000
-+ 0x1 0x0 0x0 0x8 0x18000000 0x8000000>;
-+
-+ dpmacs {
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+
-+ dpmac1: dpmac@1 {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <1>;
-+ };
-+ dpmac2: dpmac@2 {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <2>;
-+ };
-+ dpmac3: dpmac@3 {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <3>;
-+ };
-+ dpmac4: dpmac@4 {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <4>;
-+ };
-+ dpmac5: dpmac@5 {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <5>;
-+ };
-+ dpmac6: dpmac@6 {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <6>;
-+ };
-+ dpmac7: dpmac@7 {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <7>;
-+ };
-+ dpmac8: dpmac@8 {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <8>;
-+ };
-+ dpmac9: dpmac@9 {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <9>;
-+ };
-+ dpmac10: dpmac@10 {
-+ compatible = "fsl,qoriq-mc-dpmac";
-+ reg = <0xa>;
-+ };
-+ };
-+
-+ };
-+
+ sysclk: sysclk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
++ dma-ranges = <0x0 0x0 0x0 0x0 0x10000 0x00000000>;
+
+ clockgen: clocking@1300000 {
+ compatible = "fsl,ls1088a-clockgen";
+ };
+
+ i2c0: i2c@2000000 {
-+ compatible = "fsl,vf610-i2c";
++ compatible = "fsl,vf610-i2c", "fsl,ls1088a-vf610-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0 0x2000000 0x0 0x10000>;
+ interrupts = <0 34 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clockgen 4 7>;
++ fsl-scl-gpio = <&gpio3 30 0>;
+ status = "disabled";
+ };
+
+ <0000 0 0 4 &gic 0 0 0 117 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
-+ pcie@3600000 {
-+ compatible = "fsl,ls2088a-pcie", "fsl,ls1088a-pcie",
-+ "snps,dw-pcie";
-+ reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */
-+ 0x30 0x00000000 0x0 0x00002000>; /* configuration space */
-+ reg-names = "regs", "config";
-+ interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>; /* aer interrupt */
-+ interrupt-names = "aer";
-+ #address-cells = <3>;
-+ #size-cells = <2>;
-+ device_type = "pci";
-+ dma-coherent;
-+ num-lanes = <8>;
-+ bus-range = <0x0 0xff>;
-+ ranges = <0x81000000 0x0 0x00000000 0x30 0x00010000 0x0 0x00010000 /* downstream I/O */
-+ 0x82000000 0x0 0x40000000 0x30 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
-+ msi-parent = <&its>;
-+ iommu-map = <0 &smmu 0 1>; /* This is fixed-up by u-boot */
-+ #interrupt-cells = <1>;
-+ interrupt-map-mask = <0 0 0 7>;
-+ interrupt-map = <0000 0 0 1 &gic 0 0 0 119 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 2 &gic 0 0 0 120 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 3 &gic 0 0 0 121 IRQ_TYPE_LEVEL_HIGH>,
-+ <0000 0 0 4 &gic 0 0 0 122 IRQ_TYPE_LEVEL_HIGH>;
++ pcie@3600000 {
++ compatible = "fsl,ls2088a-pcie", "fsl,ls1088a-pcie",
++ "snps,dw-pcie";
++ reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */
++ 0x30 0x00000000 0x0 0x00002000>; /* configuration space */
++ reg-names = "regs", "config";
++ interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>; /* aer interrupt */
++ interrupt-names = "aer";
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
++ dma-coherent;
++ num-lanes = <8>;
++ bus-range = <0x0 0xff>;
++ ranges = <0x81000000 0x0 0x00000000 0x30 0x00010000 0x0 0x00010000 /* downstream I/O */
++ 0x82000000 0x0 0x40000000 0x30 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
++ msi-parent = <&its>;
++ iommu-map = <0 &smmu 0 1>; /* This is fixed-up by u-boot */
++ #interrupt-cells = <1>;
++ interrupt-map-mask = <0 0 0 7>;
++ interrupt-map = <0000 0 0 1 &gic 0 0 0 119 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 2 &gic 0 0 0 120 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 3 &gic 0 0 0 121 IRQ_TYPE_LEVEL_HIGH>,
++ <0000 0 0 4 &gic 0 0 0 122 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ fsl_mc: fsl-mc@80c000000 {
++ compatible = "fsl,qoriq-mc";
++ reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */
++ <0x00000000 0x08340000 0 0x40000>; /* MC control reg */
++ msi-parent = <&its>;
++ iommu-map = <0 &smmu 0 0>; /* This is fixed-up by u-boot */
++ dma-coherent;
++ #address-cells = <3>;
++ #size-cells = <1>;
++
++ /*
++ * Region type 0x0 - MC portals
++ * Region type 0x1 - QBMAN portals
++ */
++ ranges = <0x0 0x0 0x0 0x8 0x0c000000 0x4000000
++ 0x1 0x0 0x0 0x8 0x18000000 0x8000000>;
++
++ dpmacs {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ dpmac1: dpmac@1 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <1>;
++ };
++
++ dpmac2: dpmac@2 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <2>;
++ };
++
++ dpmac3: dpmac@3 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <3>;
++ };
++
++ dpmac4: dpmac@4 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <4>;
++ };
++
++ dpmac5: dpmac@5 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <5>;
++ };
++
++ dpmac6: dpmac@6 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <6>;
++ };
++
++ dpmac7: dpmac@7 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <7>;
++ };
++
++ dpmac8: dpmac@8 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <8>;
++ };
++
++ dpmac9: dpmac@9 {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <9>;
++ };
++
++ dpmac10: dpmac@a {
++ compatible = "fsl,qoriq-mc-dpmac";
++ reg = <0xa>;
++ };
++ };
+ };
+
+ smmu: iommu@5000000 {
+};
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts
-@@ -1,8 +1,10 @@
+@@ -1,214 +1,87 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Device Tree file for Freescale LS2080a QDS Board.
*
+ * Abhimanyu Saini <abhimanyu.saini@nxp.com>
* Bhupesh Sharma <bhupesh.sharma@freescale.com>
*
- * This file is dual-licensed: you can use it either under the terms
-@@ -46,169 +48,76 @@
+- * This file is dual-licensed: you can use it either under the terms
+- * of the GPLv2 or the X11 license, at your option. Note that this dual
+- * licensing only applies to this file, and not this project as a
+- * whole.
+- *
+- * a) This library is free software; you can redistribute it and/or
+- * modify it under the terms of the GNU General Public License as
+- * published by the Free Software Foundation; either version 2 of the
+- * License, or (at your option) any later version.
+- *
+- * This library is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * Or, alternatively,
+- *
+- * b) Permission is hereby granted, free of charge, to any person
+- * obtaining a copy of this software and associated documentation
+- * files (the "Software"), to deal in the Software without
+- * restriction, including without limitation the rights to use,
+- * copy, modify, merge, publish, distribute, sublicense, and/or
+- * sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following
+- * conditions:
+- *
+- * The above copyright notice and this permission notice shall be
+- * included in all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+ */
/dts-v1/;
- bank-width = <2>;
- device-width = <1>;
- };
-+ compatible = "fsl,tetra-fpga", "fsl,fpga-qixis", "simple-bus";
-+ reg = <3 0 0x300>; /* TODO check address */
-+ ranges = <0 3 0 0x300>;
-
+-
- nand@2,0 {
- compatible = "fsl,ifc-nand";
- reg = <0x2 0x0 0x10000>;
- };
-+ mdio_mux_emi1 {
-+ compatible = "mdio-mux-mmioreg", "mdio-mux";
-+ mdio-parent-bus = <&emdio1>;
-+ reg = <0x54 1>; /* BRDCFG4 */
-+ mux-mask = <0xe0>; /* EMI1_MDIO */
++ compatible = "fsl,tetra-fpga", "fsl,fpga-qixis", "simple-bus";
++ reg = <3 0 0x300>; /* TODO check address */
++ ranges = <0 3 0 0x300>;
- cpld@3,0 {
- reg = <0x3 0x0 0x10000>;
- #size-cells = <0>;
- i2c@0 {
- #address-cells = <1>;
-+ #address-cells=<1>;
- #size-cells = <0>;
+- #size-cells = <0>;
- reg = <0x00>;
- rtc@68 {
- compatible = "dallas,ds3232";
- reg = <0x68>;
- };
- };
-
+-
- i2c@2 {
- #address-cells = <1>;
- #size-cells = <0>;
- shunt-resistor = <1000>;
- };
- };
--
++ mdio_mux_emi1 {
++ compatible = "mdio-mux-mmioreg", "mdio-mux";
++ mdio-parent-bus = <&emdio1>;
++ reg = <0x54 1>; /* BRDCFG4 */
++ mux-mask = <0xe0>; /* EMI1_MDIO */
+
- i2c@3 {
- #address-cells = <1>;
-- #size-cells = <0>;
++ #address-cells=<1>;
+ #size-cells = <0>;
- reg = <0x3>;
--
+
- adt7481@4c {
- compatible = "adi,adt7461";
- reg = <0x4c>;
-&i2c1 {
- status = "disabled";
--};
++/* Update DPMAC connections to external PHYs, under SerDes 0x2a_0x49. */
++&dpmac9 {
++ phy-handle = <&mdio0_phy12>;
+ };
-
-&i2c2 {
- status = "disabled";
- spi-max-frequency = <3000000>;
- reg = <2>;
- };
--};
++&dpmac10 {
++ phy-handle = <&mdio0_phy13>;
+ };
-
-&qspi {
- status = "okay";
- spi-max-frequency = <20000000>;
- reg = <0>;
- };
--};
++&dpmac11 {
++ phy-handle = <&mdio0_phy14>;
+ };
-
-&sata0 {
- status = "okay";
-+/* Update DPMAC connections to external PHYs, under SerDes 0x2a_0x49. */
-+&dpmac9 {
-+ phy-handle = <&mdio0_phy12>;
- };
+-};
-
-&sata1 {
- status = "okay";
-+&dpmac10 {
-+ phy-handle = <&mdio0_phy13>;
- };
+-};
-
-&usb0 {
- status = "okay";
-+&dpmac11 {
-+ phy-handle = <&mdio0_phy14>;
- };
+-};
-
-&usb1 {
- status = "okay";
};
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts
-@@ -1,8 +1,10 @@
+@@ -1,170 +1,105 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Device Tree file for Freescale LS2080a RDB Board.
*
+ * Abhimanyu Saini <abhimanyu.saini@nxp.com>
* Bhupesh Sharma <bhupesh.sharma@freescale.com>
*
- * This file is dual-licensed: you can use it either under the terms
-@@ -46,125 +48,94 @@
+- * This file is dual-licensed: you can use it either under the terms
+- * of the GPLv2 or the X11 license, at your option. Note that this dual
+- * licensing only applies to this file, and not this project as a
+- * whole.
+- *
+- * a) This library is free software; you can redistribute it and/or
+- * modify it under the terms of the GNU General Public License as
+- * published by the Free Software Foundation; either version 2 of the
+- * License, or (at your option) any later version.
+- *
+- * This library is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * Or, alternatively,
+- *
+- * b) Permission is hereby granted, free of charge, to any person
+- * obtaining a copy of this software and associated documentation
+- * files (the "Software"), to deal in the Software without
+- * restriction, including without limitation the rights to use,
+- * copy, modify, merge, publish, distribute, sublicense, and/or
+- * sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following
+- * conditions:
+- *
+- * The above copyright notice and this permission notice shall be
+- * included in all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+ */
/dts-v1/;
};
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a-simu.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a-simu.dts
-@@ -1,7 +1,7 @@
+@@ -1,62 +1,21 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Device Tree file for Freescale LS2080a software Simulator model
*
*
* Bhupesh Sharma <bhupesh.sharma@freescale.com>
*
-@@ -46,17 +46,12 @@
+- * This file is dual-licensed: you can use it either under the terms
+- * of the GPL or the X11 license, at your option. Note that this dual
+- * licensing only applies to this file, and not this project as a
+- * whole.
+- *
+- * a) This library is free software; you can redistribute it and/or
+- * modify it under the terms of the GNU General Public License as
+- * published by the Free Software Foundation; either version 2 of the
+- * License, or (at your option) any later version.
+- *
+- * This library is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * Or, alternatively,
+- *
+- * b) Permission is hereby granted, free of charge, to any person
+- * obtaining a copy of this software and associated documentation
+- * files (the "Software"), to deal in the Software without
+- * restriction, including without limitation the rights to use,
+- * copy, modify, merge, publish, distribute, sublicense, and/or
+- * sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following
+- * conditions:
+- *
+- * The above copyright notice and this permission notice shall be
+- * included in all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+ */
/dts-v1/;
reg = <0x0 0x2210000 0x0 0x100>;
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
-@@ -1,8 +1,9 @@
+@@ -1,739 +1,144 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Device Tree Include file for Freescale Layerscape-2080A family SoC.
*
+ * Abhimanyu Saini <abhimanyu.saini@nxp.com>
* Bhupesh Sharma <bhupesh.sharma@freescale.com>
*
- * This file is dual-licensed: you can use it either under the terms
-@@ -44,696 +45,132 @@
- * OTHER DEALINGS IN THE SOFTWARE.
+- * This file is dual-licensed: you can use it either under the terms
+- * of the GPLv2 or the X11 license, at your option. Note that this dual
+- * licensing only applies to this file, and not this project as a
+- * whole.
+- *
+- * a) This library is free software; you can redistribute it and/or
+- * modify it under the terms of the GNU General Public License as
+- * published by the Free Software Foundation; either version 2 of the
+- * License, or (at your option) any later version.
+- *
+- * This library is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * Or, alternatively,
+- *
+- * b) Permission is hereby granted, free of charge, to any person
+- * obtaining a copy of this software and associated documentation
+- * files (the "Software"), to deal in the Software without
+- * restriction, including without limitation the rights to use,
+- * copy, modify, merge, publish, distribute, sublicense, and/or
+- * sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following
+- * conditions:
+- *
+- * The above copyright notice and this permission notice shall be
+- * included in all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
*/
-/ {
+ snps,dma-snooping;
+};
+
++&timer {
++ fsl,erratum-a008585;
++};
++
+&pcie1 {
+ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
+ 0x10 0x00000000 0x0 0x00002000>; /* configuration space */
+};
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2088a-qds.dts
-@@ -0,0 +1,162 @@
+@@ -0,0 +1,126 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Device Tree file for Freescale LS2088A QDS Board.
+ *
+ *
+ * Abhimanyu Saini <abhimanyu.saini@nxp.com>
+ *
-+ * This file is dual-licensed: you can use it either under the terms
-+ * of the GPLv2 or the X11 license, at your option. Note that this dual
-+ * licensing only applies to this file, and not this project as a
-+ * whole.
-+ *
-+ * a) This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License as
-+ * published by the Free Software Foundation; either version 2 of the
-+ * License, or (at your option) any later version.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * Or, alternatively,
-+ *
-+ * b) Permission is hereby granted, free of charge, to any person
-+ * obtaining a copy of this software and associated documentation
-+ * files (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use,
-+ * copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following
-+ * conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be
-+ * included in all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+};
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2088a-rdb.dts
-@@ -0,0 +1,140 @@
+@@ -0,0 +1,104 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Device Tree file for Freescale LS2088A RDB Board.
+ *
+ *
+ * Abhimanyu Saini <abhimanyu.saini@nxp.com>
+ *
-+ * This file is dual-licensed: you can use it either under the terms
-+ * of the GPLv2 or the X11 license, at your option. Note that this dual
-+ * licensing only applies to this file, and not this project as a
-+ * whole.
-+ *
-+ * a) This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License as
-+ * published by the Free Software Foundation; either version 2 of the
-+ * License, or (at your option) any later version.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * Or, alternatively,
-+ *
-+ * b) Permission is hereby granted, free of charge, to any person
-+ * obtaining a copy of this software and associated documentation
-+ * files (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use,
-+ * copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following
-+ * conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be
-+ * included in all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+};
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi
-@@ -0,0 +1,195 @@
+@@ -0,0 +1,159 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Device Tree Include file for Freescale Layerscape-2088A family SoC.
+ *
+ *
+ * Abhimanyu Saini <abhimanyu.saini@nxp.com>
+ *
-+ * This file is dual-licensed: you can use it either under the terms
-+ * of the GPLv2 or the X11 license, at your option. Note that this dual
-+ * licensing only applies to this file, and not this project as a
-+ * whole.
-+ *
-+ * a) This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License as
-+ * published by the Free Software Foundation; either version 2 of the
-+ * License, or (at your option) any later version.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * Or, alternatively,
-+ *
-+ * b) Permission is hereby granted, free of charge, to any person
-+ * obtaining a copy of this software and associated documentation
-+ * files (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use,
-+ * copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following
-+ * conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be
-+ * included in all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "fsl-ls208xa.dtsi"
+
+ ranges = <0x81000000 0x0 0x00000000 0x38 0x00010000 0x0 0x00010000
+ 0x82000000 0x0 0x40000000 0x38 0x40000000 0x0 0x40000000>;
-+};
---- /dev/null
-+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi
-@@ -0,0 +1,198 @@
-+/*
-+ * Device Tree file for Freescale LS2080A QDS Board.
-+ *
-+ * Copyright 2016 Freescale Semiconductor, Inc.
-+ * Copyright 2017 NXP
-+ *
-+ * Abhimanyu Saini <abhimanyu.saini@nxp.com>
-+ *
-+ * This file is dual-licensed: you can use it either under the terms
-+ * of the GPLv2 or the X11 license, at your option. Note that this dual
-+ * licensing only applies to this file, and not this project as a
-+ * whole.
-+ *
-+ * a) This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License as
-+ * published by the Free Software Foundation; either version 2 of the
-+ * License, or (at your option) any later version.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * Or, alternatively,
-+ *
-+ * b) Permission is hereby granted, free of charge, to any person
-+ * obtaining a copy of this software and associated documentation
-+ * files (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use,
-+ * copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following
-+ * conditions:
++};
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi
+@@ -0,0 +1,162 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
++/*
++ * Device Tree file for Freescale LS2080A QDS Board.
+ *
-+ * The above copyright notice and this permission notice shall be
-+ * included in all copies or substantial portions of the Software.
++ * Copyright 2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * Abhimanyu Saini <abhimanyu.saini@nxp.com>
+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+&esdhc {
+};
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi
-@@ -0,0 +1,161 @@
+@@ -0,0 +1,136 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Device Tree file for Freescale LS2080A RDB Board.
+ *
+ *
+ * Abhimanyu Saini <abhimanyu.saini@nxp.com>
+ *
-+ * This file is dual-licensed: you can use it either under the terms
-+ * of the GPLv2 or the X11 license, at your option. Note that this dual
-+ * licensing only applies to this file, and not this project as a
-+ * whole.
-+ *
-+ * a) This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License as
-+ * published by the Free Software Foundation; either version 2 of the
-+ * License, or (at your option) any later version.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * Or, alternatively,
-+ *
-+ * b) Permission is hereby granted, free of charge, to any person
-+ * obtaining a copy of this software and associated documentation
-+ * files (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use,
-+ * copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following
-+ * conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be
-+ * included in all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+&esdhc {
+ };
+ };
+
++ i2c@2 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x02>;
++ ina220@40 {
++ compatible = "ti,ina220";
++ reg = <0x40>;
++ shunt-resistor = <500>;
++ };
++ };
++
+ i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+};
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
-@@ -0,0 +1,919 @@
+@@ -0,0 +1,885 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Device Tree Include file for Freescale Layerscape-2080A family SoC.
+ *
+ *
+ * Abhimanyu Saini <abhimanyu.saini@nxp.com>
+ *
-+ * This file is dual-licensed: you can use it either under the terms
-+ * of the GPLv2 or the X11 license, at your option. Note that this dual
-+ * licensing only applies to this file, and not this project as a
-+ * whole.
-+ *
-+ * a) This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License as
-+ * published by the Free Software Foundation; either version 2 of the
-+ * License, or (at your option) any later version.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * Or, alternatively,
-+ *
-+ * b) Permission is hereby granted, free of charge, to any person
-+ * obtaining a copy of this software and associated documentation
-+ * files (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use,
-+ * copy, modify, merge, publish, distribute, sublicense, and/or
-+ * sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following
-+ * conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be
-+ * included in all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/thermal/thermal.h>
+ mask = <0x2>;
+ };
+
-+ timer {
++ timer: timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <1 13 4>, /* Physical Secure PPI, active-low */
+ <1 14 4>, /* Physical Non-Secure PPI, active-low */
+ <1 11 4>, /* Virtual PPI, active-low */
+ <1 10 4>; /* Hypervisor PPI, active-low */
-+ fsl,erratum-a008585;
+ };
+
+ pmu {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
++ dma-ranges = <0x0 0x0 0x0 0x0 0x10000 0x00000000>;
+
+ clockgen: clocking@1300000 {
+ compatible = "fsl,ls2080a-clockgen";
+ <0x00000000 0x08340000 0 0x40000>; /* MC control reg */
+ msi-parent = <&its>;
+ iommu-map = <0 &smmu 0 0>; /* This is fixed-up by u-boot */
++ dma-coherent;
+ #address-cells = <3>;
+ #size-cells = <1>;
+
+
+ i2c0: i2c@2000000 {
+ status = "disabled";
-+ compatible = "fsl,vf610-i2c";
++ compatible = "fsl,vf610-i2c", "fsl,ls208xa-vf610-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0 0x2000000 0x0 0x10000>;
+ interrupts = <0 34 0x4>; /* Level high type */
+ clock-names = "i2c";
+ clocks = <&clockgen 4 1>;
++ fsl-scl-gpio = <&gpio3 10 0>;
+ };
+
+ i2c1: i2c@2010000 {
+ };
+};
--- /dev/null
-+++ b/arch/arm64/boot/dts/freescale/qoriq-bman1-portals.dtsi
-@@ -0,0 +1,81 @@
++++ b/arch/arm64/boot/dts/freescale/qoriq-bman-portals-sdk.dtsi
+@@ -0,0 +1,55 @@
+/*
-+ * QorIQ BMan Portals device tree
++ * QorIQ BMan SDK Portals device tree nodes
+ *
+ * Copyright 2011-2016 Freescale Semiconductor Inc.
++ * Copyright 2017 NXP
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
+&bportals {
++ bman-portal@0 {
++ cell-index = <0>;
++ };
++
++ bman-portal@10000 {
++ cell-index = <1>;
++ };
++
++ bman-portal@20000 {
++ cell-index = <2>;
++ };
++
++ bman-portal@30000 {
++ cell-index = <3>;
++ };
++
++ bman-portal@40000 {
++ cell-index = <4>;
++ };
++
++ bman-portal@50000 {
++ cell-index = <5>;
++ };
++
++ bman-portal@60000 {
++ cell-index = <6>;
++ };
++
++ bman-portal@70000 {
++ cell-index = <7>;
++ };
++
++ bman-portal@80000 {
++ cell-index = <8>;
++ };
++
++ bman-portal@90000 {
++ cell-index = <9>;
++ };
++
++ bman-bpids@0 {
++ compatible = "fsl,bpid-range";
++ fsl,bpid-range = <32 32>;
++ };
++};
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/qoriq-bman-portals.dtsi
+@@ -0,0 +1,77 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++/*
++ * QorIQ BMan Portals device tree
++ *
++ * Copyright 2011-2016 Freescale Semiconductor Inc.
++ *
++ */
++
++&bportals {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+
+ bman-portal@0 {
-+ cell-index = <0>;
++ /*
++ * bootloader fix-ups are expected to provide the
++ * "fsl,bman-portal-<hardware revision>" compatible
++ */
+ compatible = "fsl,bman-portal";
-+ reg = <0x0 0x4000 0x4000000 0x4000>;
-+ interrupts = <0 173 0x4>;
++ reg = <0x0 0x4000>, <0x4000000 0x4000>;
++ interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ bman-portal@10000 {
-+ cell-index = <1>;
+ compatible = "fsl,bman-portal";
-+ reg = <0x10000 0x4000 0x4010000 0x4000>;
-+ interrupts = <0 175 0x4>;
++ reg = <0x10000 0x4000>, <0x4010000 0x4000>;
++ interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ bman-portal@20000 {
-+ cell-index = <2>;
+ compatible = "fsl,bman-portal";
-+ reg = <0x20000 0x4000 0x4020000 0x4000>;
-+ interrupts = <0 177 0x4>;
++ reg = <0x20000 0x4000>, <0x4020000 0x4000>;
++ interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ bman-portal@30000 {
-+ cell-index = <3>;
+ compatible = "fsl,bman-portal";
-+ reg = <0x30000 0x4000 0x4030000 0x4000>;
-+ interrupts = <0 179 0x4>;
++ reg = <0x30000 0x4000>, <0x4030000 0x4000>;
++ interrupts = <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ bman-portal@40000 {
-+ cell-index = <4>;
+ compatible = "fsl,bman-portal";
-+ reg = <0x40000 0x4000 0x4040000 0x4000>;
-+ interrupts = <0 181 0x4>;
++ reg = <0x40000 0x4000>, <0x4040000 0x4000>;
++ interrupts = <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ bman-portal@50000 {
-+ cell-index = <5>;
+ compatible = "fsl,bman-portal";
-+ reg = <0x50000 0x4000 0x4050000 0x4000>;
-+ interrupts = <0 183 0x4>;
++ reg = <0x50000 0x4000>, <0x4050000 0x4000>;
++ interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ bman-portal@60000 {
-+ cell-index = <6>;
+ compatible = "fsl,bman-portal";
-+ reg = <0x60000 0x4000 0x4060000 0x4000>;
-+ interrupts = <0 185 0x4>;
++ reg = <0x60000 0x4000>, <0x4060000 0x4000>;
++ interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ bman-portal@70000 {
-+ cell-index = <7>;
+ compatible = "fsl,bman-portal";
-+ reg = <0x70000 0x4000 0x4070000 0x4000>;
-+ interrupts = <0 187 0x4>;
++ reg = <0x70000 0x4000>, <0x4070000 0x4000>;
++ interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ bman-portal@80000 {
-+ cell-index = <8>;
+ compatible = "fsl,bman-portal";
-+ reg = <0x80000 0x4000 0x4080000 0x4000>;
-+ interrupts = <0 189 0x4>;
++ reg = <0x80000 0x4000>, <0x4080000 0x4000>;
++ interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
-+ bman-bpids@0 {
-+ compatible = "fsl,bpid-range";
-+ fsl,bpid-range = <32 32>;
++ bman-portal@90000 {
++ compatible = "fsl,bman-portal";
++ reg = <0x90000 0x4000>, <0x4090000 0x4000>;
++ interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
+ };
+};
--- /dev/null
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi
@@ -0,0 +1,43 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * QorIQ FMan v3 10g port #0 device tree
+ *
+ * Copyright 2012-2015 Freescale Semiconductor Inc.
+ *
-+ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
+fman@1a00000 {
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi
@@ -0,0 +1,43 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * QorIQ FMan v3 10g port #1 device tree
+ *
+ * Copyright 2012-2015 Freescale Semiconductor Inc.
+ *
-+ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
+fman@1a00000 {
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-0.dtsi
@@ -0,0 +1,42 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * QorIQ FMan v3 1g port #0 device tree
+ *
+ * Copyright 2012-2015 Freescale Semiconductor Inc.
+ *
-+ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
+fman@1a00000 {
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-1.dtsi
@@ -0,0 +1,42 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * QorIQ FMan v3 1g port #1 device tree
+ *
+ * Copyright 2012-2015 Freescale Semiconductor Inc.
+ *
-+ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
+fman@1a00000 {
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-2.dtsi
@@ -0,0 +1,42 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * QorIQ FMan v3 1g port #2 device tree
+ *
+ * Copyright 2012-2015 Freescale Semiconductor Inc.
+ *
-+ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
+fman@1a00000 {
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-3.dtsi
@@ -0,0 +1,42 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * QorIQ FMan v3 1g port #3 device tree
+ *
+ * Copyright 2012-2015 Freescale Semiconductor Inc.
+ *
-+ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
+fman@1a00000 {
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-4.dtsi
@@ -0,0 +1,42 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * QorIQ FMan v3 1g port #4 device tree
+ *
+ * Copyright 2012-2015 Freescale Semiconductor Inc.
+ *
-+ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
+fman@1a00000 {
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-5.dtsi
@@ -0,0 +1,42 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * QorIQ FMan v3 1g port #5 device tree
+ *
+ * Copyright 2012-2015 Freescale Semiconductor Inc.
+ *
-+ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
+fman@1a00000 {
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi
@@ -0,0 +1,130 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * QorIQ FMan v3 device tree
+ *
+ * Copyright 2012-2015 Freescale Semiconductor Inc.
+ *
-+ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
+fman0: fman@1a00000 {
+ };
+};
--- /dev/null
-+++ b/arch/arm64/boot/dts/freescale/qoriq-qman1-portals.dtsi
-@@ -0,0 +1,104 @@
++++ b/arch/arm64/boot/dts/freescale/qoriq-qman-portals-sdk.dtsi
+@@ -0,0 +1,38 @@
+/*
-+ * QorIQ QMan Portals device tree
++ * QorIQ QMan SDK Portals device tree nodes
+ *
+ * Copyright 2011-2016 Freescale Semiconductor Inc.
++ * Copyright 2017 NXP
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
+&qportals {
++ qman-fqids@0 {
++ compatible = "fsl,fqid-range";
++ fsl,fqid-range = <256 256>;
++ };
++
++ qman-fqids@1 {
++ compatible = "fsl,fqid-range";
++ fsl,fqid-range = <32768 32768>;
++ };
++
++ qman-pools@0 {
++ compatible = "fsl,pool-channel-range";
++ fsl,pool-channel-range = <0x401 0xf>;
++ };
++
++ qman-cgrids@0 {
++ compatible = "fsl,cgrid-range";
++ fsl,cgrid-range = <0 256>;
++ };
++
++ qman-ceetm@0 {
++ compatible = "fsl,qman-ceetm";
++ fsl,ceetm-lfqid-range = <0xf00000 0x1000>;
++ fsl,ceetm-sp-range = <0 16>;
++ fsl,ceetm-lni-range = <0 8>;
++ fsl,ceetm-channel-range = <0 32>;
++ };
++};
+--- /dev/null
++++ b/arch/arm64/boot/dts/freescale/qoriq-qman-portals.dtsi
+@@ -0,0 +1,87 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++/*
++ * QorIQ QMan Portals device tree
++ *
++ * Copyright 2011-2016 Freescale Semiconductor Inc.
++ *
++ */
++
++&qportals {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+
+ qportal0: qman-portal@0 {
++ /*
++ * bootloader fix-ups are expected to provide the
++ * "fsl,bman-portal-<hardware revision>" compatible
++ */
+ compatible = "fsl,qman-portal";
-+ reg = <0x0 0x4000 0x4000000 0x4000>;
-+ interrupts = <0 172 0x4>;
++ reg = <0x0 0x4000>, <0x4000000 0x4000>;
++ interrupts = <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>;
+ cell-index = <0>;
+ };
+
+ qportal1: qman-portal@10000 {
+ compatible = "fsl,qman-portal";
-+ reg = <0x10000 0x4000 0x4010000 0x4000>;
-+ interrupts = <0 174 0x4>;
++ reg = <0x10000 0x4000>, <0x4010000 0x4000>;
++ interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
+ cell-index = <1>;
+ };
+
+ qportal2: qman-portal@20000 {
+ compatible = "fsl,qman-portal";
-+ reg = <0x20000 0x4000 0x4020000 0x4000>;
-+ interrupts = <0 176 0x4>;
++ reg = <0x20000 0x4000>, <0x4020000 0x4000>;
++ interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
+ cell-index = <2>;
+ };
+
+ qportal3: qman-portal@30000 {
+ compatible = "fsl,qman-portal";
-+ reg = <0x30000 0x4000 0x4030000 0x4000>;
-+ interrupts = <0 178 0x4>;
++ reg = <0x30000 0x4000>, <0x4030000 0x4000>;
++ interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>;
+ cell-index = <3>;
+ };
+
+ qportal4: qman-portal@40000 {
+ compatible = "fsl,qman-portal";
-+ reg = <0x40000 0x4000 0x4040000 0x4000>;
-+ interrupts = <0 180 0x4>;
++ reg = <0x40000 0x4000>, <0x4040000 0x4000>;
++ interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
+ cell-index = <4>;
+ };
+
+ qportal5: qman-portal@50000 {
+ compatible = "fsl,qman-portal";
-+ reg = <0x50000 0x4000 0x4050000 0x4000>;
-+ interrupts = <0 182 0x4>;
++ reg = <0x50000 0x4000>, <0x4050000 0x4000>;
++ interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>;
+ cell-index = <5>;
+ };
+
+ qportal6: qman-portal@60000 {
+ compatible = "fsl,qman-portal";
-+ reg = <0x60000 0x4000 0x4060000 0x4000>;
-+ interrupts = <0 184 0x4>;
++ reg = <0x60000 0x4000>, <0x4060000 0x4000>;
++ interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
+ cell-index = <6>;
+ };
+
+ qportal7: qman-portal@70000 {
+ compatible = "fsl,qman-portal";
-+ reg = <0x70000 0x4000 0x4070000 0x4000>;
-+ interrupts = <0 186 0x4>;
++ reg = <0x70000 0x4000>, <0x4070000 0x4000>;
++ interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
+ cell-index = <7>;
+ };
+
+ qportal8: qman-portal@80000 {
+ compatible = "fsl,qman-portal";
-+ reg = <0x80000 0x4000 0x4080000 0x4000>;
-+ interrupts = <0 188 0x4>;
++ reg = <0x80000 0x4000>, <0x4080000 0x4000>;
++ interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
+ cell-index = <8>;
+ };
+
-+ qman-fqids@0 {
-+ compatible = "fsl,fqid-range";
-+ fsl,fqid-range = <256 256>;
-+ };
-+
-+ qman-fqids@1 {
-+ compatible = "fsl,fqid-range";
-+ fsl,fqid-range = <32768 32768>;
-+ };
-+
-+ qman-pools@0 {
-+ compatible = "fsl,pool-channel-range";
-+ fsl,pool-channel-range = <0x401 0xf>;
-+ };
-+
-+ qman-cgrids@0 {
-+ compatible = "fsl,cgrid-range";
-+ fsl,cgrid-range = <0 256>;
-+ };
-+
-+ qman-ceetm@0 {
-+ compatible = "fsl,qman-ceetm";
-+ fsl,ceetm-lfqid-range = <0xf00000 0x1000>;
-+ fsl,ceetm-sp-range = <0 12>;
-+ fsl,ceetm-lni-range = <0 8>;
-+ fsl,ceetm-channel-range = <0 32>;
++ qportal9: qman-portal@90000 {
++ compatible = "fsl,qman-portal";
++ reg = <0x90000 0x4000>, <0x4090000 0x4000>;
++ interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
++ cell-index = <9>;
+ };
+};
--- a/arch/powerpc/boot/dts/fsl/qoriq-bman1-portals.dtsi
--- /dev/null
+From c0612164b379ebc8964da6bc6f6ced9736dce488 Mon Sep 17 00:00:00 2001
+From: Mathew McBride <matt@traverse.com.au>
+Date: Tue, 17 Apr 2018 10:01:03 +1000
+Subject: [PATCH] add DTS for Traverse LS1043 Boards
+
+Signed-off-by: Mathew McBride <matt@traverse.com.au>
+---
+ arch/arm64/boot/dts/freescale/Makefile | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+ create mode 100644 arch/arm64/boot/dts/freescale/traverse-ls1043v.dts
+
+--- a/arch/arm64/boot/dts/freescale/Makefile
++++ b/arch/arm64/boot/dts/freescale/Makefile
+@@ -21,7 +21,10 @@ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2
+ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-simu.dtb
+ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2088a-qds.dtb
+ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2088a-rdb.dtb
+-
++
++dtb-$(CONFIG_ARCH_LAYERSCAPE) += traverse-ls1043v.dtb
++dtb-$(CONFIG_ARCH_LAYERSCAPE) += traverse-ls1043s.dtb
++
+ always := $(dtb-y)
+ subdir-y := $(dts-dirs)
+ clean-files := *.dtb
+--- a/arch/arm64/boot/dts/freescale/traverse-ls1043s.dts
++++ b/arch/arm64/boot/dts/freescale/traverse-ls1043s.dts
+@@ -330,3 +330,29 @@
+ &sata {
+ status = "disabled";
+ };
++
++/* Additions for Layerscape SDK (4.4/4.9) Kernel only
++ * These kernels need additional setup for FMan/QMan DMA shared memory
++ */
++
++&bman_fbpr {
++ compatible = "fsl,bman-fbpr";
++ alloc-ranges = <0 0 0x10000 0>;
++};
++&qman_fqd {
++ compatible = "fsl,qman-fqd";
++ alloc-ranges = <0 0 0x10000 0>;
++};
++&qman_pfdr {
++ compatible = "fsl,qman-pfdr";
++ alloc-ranges = <0 0 0x10000 0>;
++};
++
++&soc {
++#include "qoriq-dpaa-eth.dtsi"
++#include "qoriq-fman3-0-6oh.dtsi"
++};
++
++&fman0 {
++ compatible = "fsl,fman", "simple-bus";
++};
+--- a/arch/arm64/boot/dts/freescale/traverse-ls1043v.dts
++++ b/arch/arm64/boot/dts/freescale/traverse-ls1043v.dts
+@@ -251,3 +251,29 @@
+ &sata {
+ status = "disabled";
+ };
++
++/* Additions for Layerscape SDK (4.4/4.9) Kernel only
++ * These kernels need additional setup for FMan/QMan DMA shared memory
++ */
++
++&bman_fbpr {
++ compatible = "fsl,bman-fbpr";
++ alloc-ranges = <0 0 0x10000 0>;
++};
++&qman_fqd {
++ compatible = "fsl,qman-fqd";
++ alloc-ranges = <0 0 0x10000 0>;
++};
++&qman_pfdr {
++ compatible = "fsl,qman-pfdr";
++ alloc-ranges = <0 0 0x10000 0>;
++};
++
++&soc {
++#include "qoriq-dpaa-eth.dtsi"
++#include "qoriq-fman3-0-6oh.dtsi"
++};
++
++&fman0 {
++ compatible = "fsl,fman", "simple-bus";
++};
+++ /dev/null
-From c0612164b379ebc8964da6bc6f6ced9736dce488 Mon Sep 17 00:00:00 2001
-From: Mathew McBride <matt@traverse.com.au>
-Date: Tue, 17 Apr 2018 10:01:03 +1000
-Subject: [PATCH] add DTS for Traverse LS1043 Boards
-
-Signed-off-by: Mathew McBride <matt@traverse.com.au>
----
- arch/arm64/boot/dts/freescale/Makefile | 5 ++++-
- 1 file changed, 4 insertions(+), 1 deletion(-)
- create mode 100644 arch/arm64/boot/dts/freescale/traverse-ls1043v.dts
-
---- a/arch/arm64/boot/dts/freescale/Makefile
-+++ b/arch/arm64/boot/dts/freescale/Makefile
-@@ -20,7 +20,10 @@ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2
- dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-simu.dtb
- dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2088a-qds.dtb
- dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2088a-rdb.dtb
--
-+
-+dtb-$(CONFIG_ARCH_LAYERSCAPE) += traverse-ls1043v.dtb
-+dtb-$(CONFIG_ARCH_LAYERSCAPE) += traverse-ls1043s.dtb
-+
- always := $(dtb-y)
- subdir-y := $(dts-dirs)
- clean-files := *.dtb
---- a/arch/arm64/boot/dts/freescale/traverse-ls1043s.dts
-+++ b/arch/arm64/boot/dts/freescale/traverse-ls1043s.dts
-@@ -330,3 +330,29 @@
- &sata {
- status = "disabled";
- };
-+
-+/* Additions for Layerscape SDK (4.4/4.9) Kernel only
-+ * These kernels need additional setup for FMan/QMan DMA shared memory
-+ */
-+
-+&bman_fbpr {
-+ compatible = "fsl,bman-fbpr";
-+ alloc-ranges = <0 0 0x10000 0>;
-+};
-+&qman_fqd {
-+ compatible = "fsl,qman-fqd";
-+ alloc-ranges = <0 0 0x10000 0>;
-+};
-+&qman_pfdr {
-+ compatible = "fsl,qman-pfdr";
-+ alloc-ranges = <0 0 0x10000 0>;
-+};
-+
-+&soc {
-+#include "qoriq-dpaa-eth.dtsi"
-+#include "qoriq-fman3-0-6oh.dtsi"
-+};
-+
-+&fman0 {
-+ compatible = "fsl,fman", "simple-bus";
-+};
---- a/arch/arm64/boot/dts/freescale/traverse-ls1043v.dts
-+++ b/arch/arm64/boot/dts/freescale/traverse-ls1043v.dts
-@@ -251,3 +251,29 @@
- &sata {
- status = "disabled";
- };
-+
-+/* Additions for Layerscape SDK (4.4/4.9) Kernel only
-+ * These kernels need additional setup for FMan/QMan DMA shared memory
-+ */
-+
-+&bman_fbpr {
-+ compatible = "fsl,bman-fbpr";
-+ alloc-ranges = <0 0 0x10000 0>;
-+};
-+&qman_fqd {
-+ compatible = "fsl,qman-fqd";
-+ alloc-ranges = <0 0 0x10000 0>;
-+};
-+&qman_pfdr {
-+ compatible = "fsl,qman-pfdr";
-+ alloc-ranges = <0 0 0x10000 0>;
-+};
-+
-+&soc {
-+#include "qoriq-dpaa-eth.dtsi"
-+#include "qoriq-fman3-0-6oh.dtsi"
-+};
-+
-+&fman0 {
-+ compatible = "fsl,fman", "simple-bus";
-+};
-From 825d57369b196b64387348922b47adc5b651622c Mon Sep 17 00:00:00 2001
+From c03c545e064a81515fe109ddcc4ecb3895528e58 Mon Sep 17 00:00:00 2001
From: Yangbo Lu <yangbo.lu@nxp.com>
-Date: Wed, 17 Jan 2018 14:55:47 +0800
-Subject: [PATCH 05/30] mtd: spi-nor: support layerscape
+Date: Fri, 6 Jul 2018 15:32:05 +0800
+Subject: [PATCH] mtd: spi-nor: support layerscape
This is an integrated patch for layerscape qspi support.
Signed-off-by: Ash Benz <ash.benz@bk.ru>
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
---
+ drivers/mtd/devices/m25p80.c | 3 +-
drivers/mtd/mtdchar.c | 2 +-
- drivers/mtd/spi-nor/fsl-quadspi.c | 327 +++++++++++++++++++++++++++++++-------
- drivers/mtd/spi-nor/spi-nor.c | 136 ++++++++++++++--
+ drivers/mtd/spi-nor/fsl-quadspi.c | 327 ++++++++++++++++++++++++------
+ drivers/mtd/spi-nor/spi-nor.c | 141 ++++++++++++-
include/linux/mtd/spi-nor.h | 14 +-
- 4 files changed, 409 insertions(+), 70 deletions(-)
+ 5 files changed, 416 insertions(+), 71 deletions(-)
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
{ "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
{ "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
-@@ -1131,6 +1152,9 @@ static const struct flash_info spi_nor_i
+@@ -1130,7 +1151,15 @@ static const struct flash_info spi_nor_i
+ { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
{ "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
++ {
++ "w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32,
++ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
++ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
++ },
{ "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
+ { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) },
+ { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) },
{ "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
{
"w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64,
-@@ -1197,6 +1221,53 @@ static const struct flash_info *spi_nor_
+@@ -1197,6 +1226,53 @@ static const struct flash_info *spi_nor_
id[0], id[1], id[2]);
return ERR_PTR(-ENODEV);
}
static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
-@@ -1416,7 +1487,7 @@ static int macronix_quad_enable(struct s
+@@ -1416,7 +1492,7 @@ static int macronix_quad_enable(struct s
* Write status Register and configuration register with 2 bytes
* The first byte will be written to the status register, while the
* second byte will be written to the configuration register.
*/
static int write_sr_cr(struct spi_nor *nor, u16 val)
{
-@@ -1464,6 +1535,24 @@ static int spansion_quad_enable(struct s
+@@ -1464,6 +1540,24 @@ static int spansion_quad_enable(struct s
return 0;
}
static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
{
int status;
-@@ -1610,9 +1699,25 @@ int spi_nor_scan(struct spi_nor *nor, co
+@@ -1610,9 +1704,25 @@ int spi_nor_scan(struct spi_nor *nor, co
write_sr(nor, 0);
spi_nor_wait_till_ready(nor);
}
mtd->priv = nor;
mtd->type = MTD_NORFLASH;
mtd->writesize = 1;
-@@ -1646,6 +1751,8 @@ int spi_nor_scan(struct spi_nor *nor, co
+@@ -1646,6 +1756,8 @@ int spi_nor_scan(struct spi_nor *nor, co
nor->flags |= SNOR_F_USE_FSR;
if (info->flags & SPI_NOR_HAS_TB)
nor->flags |= SNOR_F_HAS_SR_TB;
#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
/* prefer "small sector" erase if possible */
-@@ -1685,9 +1792,15 @@ int spi_nor_scan(struct spi_nor *nor, co
+@@ -1685,9 +1797,15 @@ int spi_nor_scan(struct spi_nor *nor, co
/* Some devices cannot do fast-read, no matter what DT tells us */
if (info->flags & SPI_NOR_NO_FR)
nor->flash_read = SPI_NOR_NORMAL;
ret = set_quad_mode(nor, info);
if (ret) {
dev_err(dev, "quad mode not supported\n");
-@@ -1700,6 +1813,9 @@ int spi_nor_scan(struct spi_nor *nor, co
+@@ -1700,6 +1818,9 @@ int spi_nor_scan(struct spi_nor *nor, co
/* Default commands */
switch (nor->flash_read) {
-From 2f887ade916e7e1de2f8a84d3902aaa30af4b163 Mon Sep 17 00:00:00 2001
+From 6cc4cbfd0456c752f9f59d7d07fbb4b514dc6909 Mon Sep 17 00:00:00 2001
From: Yangbo Lu <yangbo.lu@nxp.com>
-Date: Wed, 17 Jan 2018 14:59:15 +0800
-Subject: [PATCH 07/30] sdk_dpaa: support layerscape
+Date: Thu, 5 Jul 2018 16:25:00 +0800
+Subject: [PATCH 07/32] sdk_dpaa: support layerscape
This is an integrated patch for layerscape dpaa1-sdk support.
Signed-off-by: Mathew McBride <matt@traverse.com.au>
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
---
- drivers/net/ethernet/freescale/sdk_dpaa/Kconfig | 196 +
- drivers/net/ethernet/freescale/sdk_dpaa/Makefile | 46 +
- .../net/ethernet/freescale/sdk_dpaa/dpaa_1588.c | 580 ++
- .../net/ethernet/freescale/sdk_dpaa/dpaa_1588.h | 138 +
- .../net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.c | 180 +
- .../net/ethernet/freescale/sdk_dpaa/dpaa_debugfs.h | 43 +
- drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.c | 1224 ++++
- drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth.h | 687 ++
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_base.c | 205 +
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_base.h | 49 +
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c | 2013 ++++++
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h | 238 +
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_common.c | 1802 +++++
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_common.h | 225 +
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_proxy.c | 381 +
- .../net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c | 1168 +++
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_sysfs.c | 278 +
- .../ethernet/freescale/sdk_dpaa/dpaa_eth_trace.h | 144 +
- .../net/ethernet/freescale/sdk_dpaa/dpaa_ethtool.c | 544 ++
- drivers/net/ethernet/freescale/sdk_dpaa/dpaa_ptp.c | 291 +
- drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c | 907 +++
- drivers/net/ethernet/freescale/sdk_dpaa/mac.c | 489 ++
- drivers/net/ethernet/freescale/sdk_dpaa/mac.h | 135 +
- .../net/ethernet/freescale/sdk_dpaa/offline_port.c | 848 +++
- .../net/ethernet/freescale/sdk_dpaa/offline_port.h | 59 +
- drivers/net/ethernet/freescale/sdk_fman/Kconfig | 153 +
- drivers/net/ethernet/freescale/sdk_fman/Makefile | 11 +
- .../freescale/sdk_fman/Peripherals/FM/HC/Makefile | 15 +
- .../freescale/sdk_fman/Peripherals/FM/HC/hc.c | 1232 ++++
- .../freescale/sdk_fman/Peripherals/FM/MAC/Makefile | 28 +
- .../freescale/sdk_fman/Peripherals/FM/MAC/dtsec.c | 1465 ++++
- .../freescale/sdk_fman/Peripherals/FM/MAC/dtsec.h | 228 +
- .../sdk_fman/Peripherals/FM/MAC/dtsec_mii_acc.c | 97 +
- .../sdk_fman/Peripherals/FM/MAC/dtsec_mii_acc.h | 42 +
- .../freescale/sdk_fman/Peripherals/FM/MAC/fm_mac.c | 674 ++
- .../freescale/sdk_fman/Peripherals/FM/MAC/fm_mac.h | 226 +
- .../sdk_fman/Peripherals/FM/MAC/fman_crc32.c | 119 +
- .../sdk_fman/Peripherals/FM/MAC/fman_crc32.h | 43 +
- .../sdk_fman/Peripherals/FM/MAC/fman_dtsec.c | 845 +++
- .../Peripherals/FM/MAC/fman_dtsec_mii_acc.c | 163 +
- .../sdk_fman/Peripherals/FM/MAC/fman_memac.c | 532 ++
- .../Peripherals/FM/MAC/fman_memac_mii_acc.c | 213 +
- .../sdk_fman/Peripherals/FM/MAC/fman_tgec.c | 367 +
- .../freescale/sdk_fman/Peripherals/FM/MAC/memac.c | 1153 +++
- .../freescale/sdk_fman/Peripherals/FM/MAC/memac.h | 110 +
- .../sdk_fman/Peripherals/FM/MAC/memac_mii_acc.c | 78 +
- .../sdk_fman/Peripherals/FM/MAC/memac_mii_acc.h | 73 +
- .../freescale/sdk_fman/Peripherals/FM/MAC/tgec.c | 1017 +++
- .../freescale/sdk_fman/Peripherals/FM/MAC/tgec.h | 151 +
- .../sdk_fman/Peripherals/FM/MAC/tgec_mii_acc.c | 139 +
- .../sdk_fman/Peripherals/FM/MAC/tgec_mii_acc.h | 80 +
- .../sdk_fman/Peripherals/FM/MACSEC/Makefile | 15 +
- .../sdk_fman/Peripherals/FM/MACSEC/fm_macsec.c | 237 +
- .../sdk_fman/Peripherals/FM/MACSEC/fm_macsec.h | 203 +
- .../Peripherals/FM/MACSEC/fm_macsec_guest.c | 59 +
- .../Peripherals/FM/MACSEC/fm_macsec_master.c | 1031 +++
- .../Peripherals/FM/MACSEC/fm_macsec_master.h | 479 ++
- .../Peripherals/FM/MACSEC/fm_macsec_secy.c | 883 +++
- .../Peripherals/FM/MACSEC/fm_macsec_secy.h | 144 +
- .../freescale/sdk_fman/Peripherals/FM/Makefile | 23 +
- .../freescale/sdk_fman/Peripherals/FM/Pcd/Makefile | 26 +
- .../freescale/sdk_fman/Peripherals/FM/Pcd/crc64.h | 360 +
- .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_cc.c | 7582 ++++++++++++++++++++
- .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_cc.h | 399 +
- .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_kg.c | 3242 +++++++++
- .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_kg.h | 206 +
- .../sdk_fman/Peripherals/FM/Pcd/fm_manip.c | 5571 ++++++++++++++
- .../sdk_fman/Peripherals/FM/Pcd/fm_manip.h | 555 ++
- .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_pcd.c | 2095 ++++++
- .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_pcd.h | 543 ++
- .../sdk_fman/Peripherals/FM/Pcd/fm_pcd_ipc.h | 280 +
- .../sdk_fman/Peripherals/FM/Pcd/fm_plcr.c | 1847 +++++
- .../sdk_fman/Peripherals/FM/Pcd/fm_plcr.h | 165 +
- .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_prs.c | 423 ++
- .../freescale/sdk_fman/Peripherals/FM/Pcd/fm_prs.h | 316 +
- .../sdk_fman/Peripherals/FM/Pcd/fm_replic.c | 984 +++
- .../sdk_fman/Peripherals/FM/Pcd/fm_replic.h | 101 +
- .../sdk_fman/Peripherals/FM/Pcd/fman_kg.c | 888 +++
- .../sdk_fman/Peripherals/FM/Pcd/fman_prs.c | 129 +
- .../sdk_fman/Peripherals/FM/Port/Makefile | 15 +
- .../sdk_fman/Peripherals/FM/Port/fm_port.c | 6436 +++++++++++++++++
- .../sdk_fman/Peripherals/FM/Port/fm_port.h | 999 +++
- .../sdk_fman/Peripherals/FM/Port/fm_port_dsar.h | 494 ++
- .../sdk_fman/Peripherals/FM/Port/fm_port_im.c | 753 ++
- .../sdk_fman/Peripherals/FM/Port/fman_port.c | 1568 ++++
- .../freescale/sdk_fman/Peripherals/FM/Rtc/Makefile | 15 +
- .../freescale/sdk_fman/Peripherals/FM/Rtc/fm_rtc.c | 692 ++
- .../freescale/sdk_fman/Peripherals/FM/Rtc/fm_rtc.h | 96 +
- .../sdk_fman/Peripherals/FM/Rtc/fman_rtc.c | 334 +
- .../freescale/sdk_fman/Peripherals/FM/SP/Makefile | 15 +
- .../freescale/sdk_fman/Peripherals/FM/SP/fm_sp.c | 757 ++
- .../freescale/sdk_fman/Peripherals/FM/SP/fm_sp.h | 85 +
- .../freescale/sdk_fman/Peripherals/FM/SP/fman_sp.c | 197 +
- .../freescale/sdk_fman/Peripherals/FM/fm.c | 5216 ++++++++++++++
- .../freescale/sdk_fman/Peripherals/FM/fm.h | 648 ++
- .../freescale/sdk_fman/Peripherals/FM/fm_ipc.h | 465 ++
- .../freescale/sdk_fman/Peripherals/FM/fm_muram.c | 174 +
- .../freescale/sdk_fman/Peripherals/FM/fman.c | 1398 ++++
- .../sdk_fman/Peripherals/FM/inc/fm_common.h | 1214 ++++
- .../freescale/sdk_fman/Peripherals/FM/inc/fm_hc.h | 93 +
- .../sdk_fman/Peripherals/FM/inc/fm_sp_common.h | 117 +
- .../net/ethernet/freescale/sdk_fman/etc/Makefile | 12 +
- .../net/ethernet/freescale/sdk_fman/etc/error.c | 95 +
- drivers/net/ethernet/freescale/sdk_fman/etc/list.c | 71 +
- .../net/ethernet/freescale/sdk_fman/etc/memcpy.c | 620 ++
- drivers/net/ethernet/freescale/sdk_fman/etc/mm.c | 1155 +++
- drivers/net/ethernet/freescale/sdk_fman/etc/mm.h | 105 +
- .../net/ethernet/freescale/sdk_fman/etc/sprint.c | 81 +
- .../ethernet/freescale/sdk_fman/fmanv3h_dflags.h | 57 +
- .../ethernet/freescale/sdk_fman/fmanv3l_dflags.h | 56 +
- .../sdk_fman/inc/Peripherals/crc_mac_addr_ext.h | 364 +
- .../freescale/sdk_fman/inc/Peripherals/dpaa_ext.h | 210 +
- .../freescale/sdk_fman/inc/Peripherals/fm_ext.h | 1731 +++++
- .../sdk_fman/inc/Peripherals/fm_mac_ext.h | 887 +++
- .../sdk_fman/inc/Peripherals/fm_macsec_ext.h | 1271 ++++
- .../sdk_fman/inc/Peripherals/fm_muram_ext.h | 170 +
- .../sdk_fman/inc/Peripherals/fm_pcd_ext.h | 3974 ++++++++++
- .../sdk_fman/inc/Peripherals/fm_port_ext.h | 2608 +++++++
- .../sdk_fman/inc/Peripherals/fm_rtc_ext.h | 619 ++
- .../sdk_fman/inc/Peripherals/fm_vsp_ext.h | 411 ++
- .../sdk_fman/inc/Peripherals/mii_acc_ext.h | 76 +
- .../net/ethernet/freescale/sdk_fman/inc/core_ext.h | 90 +
- .../freescale/sdk_fman/inc/cores/arm_ext.h | 55 +
- .../freescale/sdk_fman/inc/cores/e500v2_ext.h | 476 ++
- .../freescale/sdk_fman/inc/cores/ppc_ext.h | 141 +
- .../ethernet/freescale/sdk_fman/inc/ddr_std_ext.h | 77 +
- .../ethernet/freescale/sdk_fman/inc/debug_ext.h | 233 +
- .../ethernet/freescale/sdk_fman/inc/endian_ext.h | 447 ++
- .../net/ethernet/freescale/sdk_fman/inc/enet_ext.h | 205 +
- .../ethernet/freescale/sdk_fman/inc/error_ext.h | 529 ++
- .../ethernet/freescale/sdk_fman/inc/etc/list_ext.h | 358 +
- .../ethernet/freescale/sdk_fman/inc/etc/mem_ext.h | 318 +
- .../freescale/sdk_fman/inc/etc/memcpy_ext.h | 208 +
- .../ethernet/freescale/sdk_fman/inc/etc/mm_ext.h | 310 +
- .../freescale/sdk_fman/inc/etc/sprint_ext.h | 118 +
- .../sdk_fman/inc/flib/common/arch/ppc_access.h | 37 +
- .../freescale/sdk_fman/inc/flib/common/general.h | 52 +
- .../freescale/sdk_fman/inc/flib/fman_common.h | 78 +
- .../freescale/sdk_fman/inc/flib/fsl_enet.h | 273 +
- .../freescale/sdk_fman/inc/flib/fsl_fman.h | 825 +++
- .../freescale/sdk_fman/inc/flib/fsl_fman_dtsec.h | 1096 +++
- .../sdk_fman/inc/flib/fsl_fman_dtsec_mii_acc.h | 107 +
- .../freescale/sdk_fman/inc/flib/fsl_fman_kg.h | 514 ++
- .../freescale/sdk_fman/inc/flib/fsl_fman_memac.h | 434 ++
- .../sdk_fman/inc/flib/fsl_fman_memac_mii_acc.h | 78 +
- .../freescale/sdk_fman/inc/flib/fsl_fman_port.h | 593 ++
- .../freescale/sdk_fman/inc/flib/fsl_fman_prs.h | 102 +
- .../freescale/sdk_fman/inc/flib/fsl_fman_rtc.h | 449 ++
- .../freescale/sdk_fman/inc/flib/fsl_fman_sp.h | 138 +
- .../freescale/sdk_fman/inc/flib/fsl_fman_tgec.h | 479 ++
- .../integrations/FMANV3H/dpaa_integration_ext.h | 291 +
- .../sdk_fman/inc/integrations/FMANV3H/part_ext.h | 71 +
- .../integrations/FMANV3H/part_integration_ext.h | 304 +
- .../integrations/FMANV3L/dpaa_integration_ext.h | 293 +
- .../sdk_fman/inc/integrations/FMANV3L/part_ext.h | 59 +
- .../integrations/FMANV3L/part_integration_ext.h | 304 +
- .../inc/integrations/LS1043/dpaa_integration_ext.h | 291 +
- .../sdk_fman/inc/integrations/LS1043/part_ext.h | 64 +
- .../inc/integrations/LS1043/part_integration_ext.h | 185 +
- .../inc/integrations/P1023/dpaa_integration_ext.h | 213 +
- .../sdk_fman/inc/integrations/P1023/part_ext.h | 82 +
- .../inc/integrations/P1023/part_integration_ext.h | 635 ++
- .../P3040_P4080_P5020/dpaa_integration_ext.h | 276 +
- .../inc/integrations/P3040_P4080_P5020/part_ext.h | 83 +
- .../P3040_P4080_P5020/part_integration_ext.h | 336 +
- .../net/ethernet/freescale/sdk_fman/inc/math_ext.h | 100 +
- .../net/ethernet/freescale/sdk_fman/inc/ncsw_ext.h | 435 ++
- .../net/ethernet/freescale/sdk_fman/inc/net_ext.h | 430 ++
- .../net/ethernet/freescale/sdk_fman/inc/std_ext.h | 48 +
- .../ethernet/freescale/sdk_fman/inc/stdarg_ext.h | 49 +
- .../ethernet/freescale/sdk_fman/inc/stdlib_ext.h | 162 +
- .../ethernet/freescale/sdk_fman/inc/string_ext.h | 56 +
- .../ethernet/freescale/sdk_fman/inc/types_ext.h | 62 +
- .../ethernet/freescale/sdk_fman/inc/xx_common.h | 56 +
- .../net/ethernet/freescale/sdk_fman/inc/xx_ext.h | 791 ++
- .../ethernet/freescale/sdk_fman/ls1043_dflags.h | 56 +
- .../net/ethernet/freescale/sdk_fman/ncsw_config.mk | 53 +
- .../net/ethernet/freescale/sdk_fman/p1023_dflags.h | 65 +
- .../freescale/sdk_fman/p3040_4080_5020_dflags.h | 62 +
- .../net/ethernet/freescale/sdk_fman/src/Makefile | 11 +
- .../freescale/sdk_fman/src/inc/system/sys_ext.h | 118 +
- .../freescale/sdk_fman/src/inc/system/sys_io_ext.h | 46 +
- .../freescale/sdk_fman/src/inc/types_linux.h | 208 +
- .../sdk_fman/src/inc/wrapper/fsl_fman_test.h | 84 +
- .../sdk_fman/src/inc/wrapper/lnxwrp_exp_sym.h | 130 +
- .../sdk_fman/src/inc/wrapper/lnxwrp_fm_ext.h | 163 +
- .../sdk_fman/src/inc/wrapper/lnxwrp_fsl_fman.h | 921 +++
- .../ethernet/freescale/sdk_fman/src/inc/xx/xx.h | 50 +
- .../freescale/sdk_fman/src/system/Makefile | 10 +
- .../freescale/sdk_fman/src/system/sys_io.c | 171 +
- .../freescale/sdk_fman/src/wrapper/Makefile | 19 +
- .../freescale/sdk_fman/src/wrapper/fman_test.c | 1665 +++++
- .../freescale/sdk_fman/src/wrapper/lnxwrp_fm.c | 2908 ++++++++
- .../freescale/sdk_fman/src/wrapper/lnxwrp_fm.h | 294 +
- .../sdk_fman/src/wrapper/lnxwrp_fm_port.c | 1480 ++++
- .../sdk_fman/src/wrapper/lnxwrp_ioctls_fm.c | 4854 +++++++++++++
- .../sdk_fman/src/wrapper/lnxwrp_ioctls_fm_compat.c | 1297 ++++
- .../sdk_fman/src/wrapper/lnxwrp_ioctls_fm_compat.h | 755 ++
- .../sdk_fman/src/wrapper/lnxwrp_resources.h | 121 +
- .../sdk_fman/src/wrapper/lnxwrp_resources_ut.c | 191 +
- .../sdk_fman/src/wrapper/lnxwrp_resources_ut.h | 144 +
- .../sdk_fman/src/wrapper/lnxwrp_resources_ut.make | 28 +
- .../freescale/sdk_fman/src/wrapper/lnxwrp_sysfs.c | 60 +
- .../freescale/sdk_fman/src/wrapper/lnxwrp_sysfs.h | 60 +
- .../sdk_fman/src/wrapper/lnxwrp_sysfs_fm.c | 1855 +++++
- .../sdk_fman/src/wrapper/lnxwrp_sysfs_fm.h | 136 +
- .../sdk_fman/src/wrapper/lnxwrp_sysfs_fm_port.c | 1268 ++++
- .../sdk_fman/src/wrapper/lnxwrp_sysfs_fm_port.h | 56 +
- .../ethernet/freescale/sdk_fman/src/xx/Makefile | 18 +
- .../freescale/sdk_fman/src/xx/module_strings.c | 46 +
- .../freescale/sdk_fman/src/xx/xx_arm_linux.c | 905 +++
- .../ethernet/freescale/sdk_fman/src/xx/xx_linux.c | 918 +++
- drivers/staging/fsl_qbman/Kconfig | 228 +
- drivers/staging/fsl_qbman/Makefile | 28 +
- drivers/staging/fsl_qbman/bman_config.c | 720 ++
- drivers/staging/fsl_qbman/bman_debugfs.c | 119 +
- drivers/staging/fsl_qbman/bman_driver.c | 575 ++
- drivers/staging/fsl_qbman/bman_high.c | 1145 +++
- drivers/staging/fsl_qbman/bman_low.h | 565 ++
- drivers/staging/fsl_qbman/bman_private.h | 166 +
- drivers/staging/fsl_qbman/bman_test.c | 56 +
- drivers/staging/fsl_qbman/bman_test.h | 44 +
- drivers/staging/fsl_qbman/bman_test_high.c | 183 +
- drivers/staging/fsl_qbman/bman_test_thresh.c | 196 +
- drivers/staging/fsl_qbman/dpa_alloc.c | 706 ++
- drivers/staging/fsl_qbman/dpa_sys.h | 259 +
- drivers/staging/fsl_qbman/dpa_sys_arm.h | 95 +
- drivers/staging/fsl_qbman/dpa_sys_arm64.h | 102 +
- drivers/staging/fsl_qbman/dpa_sys_ppc32.h | 70 +
- drivers/staging/fsl_qbman/dpa_sys_ppc64.h | 79 +
- drivers/staging/fsl_qbman/fsl_usdpaa.c | 2007 ++++++
- drivers/staging/fsl_qbman/fsl_usdpaa_irq.c | 289 +
- drivers/staging/fsl_qbman/qbman_driver.c | 88 +
- drivers/staging/fsl_qbman/qman_config.c | 1224 ++++
- drivers/staging/fsl_qbman/qman_debugfs.c | 1594 ++++
- drivers/staging/fsl_qbman/qman_driver.c | 977 +++
- drivers/staging/fsl_qbman/qman_high.c | 5669 +++++++++++++++
- drivers/staging/fsl_qbman/qman_low.h | 1442 ++++
- drivers/staging/fsl_qbman/qman_private.h | 398 +
- drivers/staging/fsl_qbman/qman_test.c | 57 +
- drivers/staging/fsl_qbman/qman_test.h | 45 +
- drivers/staging/fsl_qbman/qman_test_high.c | 216 +
- drivers/staging/fsl_qbman/qman_test_hotpotato.c | 502 ++
- drivers/staging/fsl_qbman/qman_utility.c | 129 +
- include/linux/fsl_bman.h | 532 ++
- include/linux/fsl_qman.h | 3888 ++++++++++
- include/linux/fsl_usdpaa.h | 372 +
- include/uapi/linux/fmd/Kbuild | 5 +
- include/uapi/linux/fmd/Peripherals/Kbuild | 4 +
- include/uapi/linux/fmd/Peripherals/fm_ioctls.h | 628 ++
- include/uapi/linux/fmd/Peripherals/fm_pcd_ioctls.h | 3084 ++++++++
- .../uapi/linux/fmd/Peripherals/fm_port_ioctls.h | 973 +++
- .../uapi/linux/fmd/Peripherals/fm_test_ioctls.h | 208 +
- include/uapi/linux/fmd/integrations/Kbuild | 1 +
- .../linux/fmd/integrations/integration_ioctls.h | 56 +
- include/uapi/linux/fmd/ioctls.h | 96 +
- include/uapi/linux/fmd/net_ioctls.h | 430 ++
- 257 files changed, 153236 insertions(+)
+ .../net/ethernet/freescale/sdk_dpaa/Kconfig | 196 +
+ .../net/ethernet/freescale/sdk_dpaa/Makefile | 46 +
+ .../ethernet/freescale/sdk_dpaa/dpaa_1588.c | 580 ++
+ .../ethernet/freescale/sdk_dpaa/dpaa_1588.h | 138 +
+ .../freescale/sdk_dpaa/dpaa_debugfs.c | 180 +
+ .../freescale/sdk_dpaa/dpaa_debugfs.h | 43 +
+ .../ethernet/freescale/sdk_dpaa/dpaa_eth.c | 1224 +++
+ .../ethernet/freescale/sdk_dpaa/dpaa_eth.h | 687 ++
+ .../freescale/sdk_dpaa/dpaa_eth_base.c | 205 +
+ .../freescale/sdk_dpaa/dpaa_eth_base.h | 49 +
+ .../freescale/sdk_dpaa/dpaa_eth_ceetm.c | 2115 +++++
+ .../freescale/sdk_dpaa/dpaa_eth_ceetm.h | 240 +
+ .../freescale/sdk_dpaa/dpaa_eth_common.c | 1802 ++++
+ .../freescale/sdk_dpaa/dpaa_eth_common.h | 225 +
+ .../freescale/sdk_dpaa/dpaa_eth_proxy.c | 381 +
+ .../ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c | 1193 +++
+ .../freescale/sdk_dpaa/dpaa_eth_sysfs.c | 278 +
+ .../freescale/sdk_dpaa/dpaa_eth_trace.h | 144 +
+ .../freescale/sdk_dpaa/dpaa_ethtool.c | 544 ++
+ .../ethernet/freescale/sdk_dpaa/dpaa_ptp.c | 291 +
+ .../net/ethernet/freescale/sdk_dpaa/mac-api.c | 931 ++
+ drivers/net/ethernet/freescale/sdk_dpaa/mac.c | 489 ++
+ drivers/net/ethernet/freescale/sdk_dpaa/mac.h | 135 +
+ .../freescale/sdk_dpaa/offline_port.c | 848 ++
+ .../freescale/sdk_dpaa/offline_port.h | 59 +
+ .../net/ethernet/freescale/sdk_fman/Kconfig | 153 +
+ .../net/ethernet/freescale/sdk_fman/Makefile | 11 +
+ .../sdk_fman/Peripherals/FM/HC/Makefile | 15 +
+ .../freescale/sdk_fman/Peripherals/FM/HC/hc.c | 1232 +++
+ .../sdk_fman/Peripherals/FM/MAC/Makefile | 28 +
+ .../sdk_fman/Peripherals/FM/MAC/dtsec.c | 1504 ++++
+ .../sdk_fman/Peripherals/FM/MAC/dtsec.h | 228 +
+ .../Peripherals/FM/MAC/dtsec_mii_acc.c | 97 +
+ .../Peripherals/FM/MAC/dtsec_mii_acc.h | 42 +
+ .../sdk_fman/Peripherals/FM/MAC/fm_mac.c | 674 ++
+ .../sdk_fman/Peripherals/FM/MAC/fm_mac.h | 226 +
+ .../sdk_fman/Peripherals/FM/MAC/fman_crc32.c | 119 +
+ .../sdk_fman/Peripherals/FM/MAC/fman_crc32.h | 43 +
+ .../sdk_fman/Peripherals/FM/MAC/fman_dtsec.c | 845 ++
+ .../Peripherals/FM/MAC/fman_dtsec_mii_acc.c | 163 +
+ .../sdk_fman/Peripherals/FM/MAC/fman_memac.c | 532 ++
+ .../Peripherals/FM/MAC/fman_memac_mii_acc.c | 213 +
+ .../sdk_fman/Peripherals/FM/MAC/fman_tgec.c | 367 +
+ .../sdk_fman/Peripherals/FM/MAC/memac.c | 1153 +++
+ .../sdk_fman/Peripherals/FM/MAC/memac.h | 110 +
+ .../Peripherals/FM/MAC/memac_mii_acc.c | 78 +
+ .../Peripherals/FM/MAC/memac_mii_acc.h | 73 +
+ .../sdk_fman/Peripherals/FM/MAC/tgec.c | 1017 +++
+ .../sdk_fman/Peripherals/FM/MAC/tgec.h | 151 +
+ .../Peripherals/FM/MAC/tgec_mii_acc.c | 139 +
+ .../Peripherals/FM/MAC/tgec_mii_acc.h | 80 +
+ .../sdk_fman/Peripherals/FM/MACSEC/Makefile | 15 +
+ .../Peripherals/FM/MACSEC/fm_macsec.c | 237 +
+ .../Peripherals/FM/MACSEC/fm_macsec.h | 203 +
+ .../Peripherals/FM/MACSEC/fm_macsec_guest.c | 59 +
+ .../Peripherals/FM/MACSEC/fm_macsec_master.c | 1031 +++
+ .../Peripherals/FM/MACSEC/fm_macsec_master.h | 479 ++
+ .../Peripherals/FM/MACSEC/fm_macsec_secy.c | 883 ++
+ .../Peripherals/FM/MACSEC/fm_macsec_secy.h | 144 +
+ .../sdk_fman/Peripherals/FM/Makefile | 23 +
+ .../sdk_fman/Peripherals/FM/Pcd/Makefile | 26 +
+ .../sdk_fman/Peripherals/FM/Pcd/crc64.h | 360 +
+ .../sdk_fman/Peripherals/FM/Pcd/fm_cc.c | 7582 +++++++++++++++++
+ .../sdk_fman/Peripherals/FM/Pcd/fm_cc.h | 399 +
+ .../sdk_fman/Peripherals/FM/Pcd/fm_kg.c | 3242 +++++++
+ .../sdk_fman/Peripherals/FM/Pcd/fm_kg.h | 206 +
+ .../sdk_fman/Peripherals/FM/Pcd/fm_manip.c | 5571 ++++++++++++
+ .../sdk_fman/Peripherals/FM/Pcd/fm_manip.h | 555 ++
+ .../sdk_fman/Peripherals/FM/Pcd/fm_pcd.c | 2095 +++++
+ .../sdk_fman/Peripherals/FM/Pcd/fm_pcd.h | 543 ++
+ .../sdk_fman/Peripherals/FM/Pcd/fm_pcd_ipc.h | 280 +
+ .../sdk_fman/Peripherals/FM/Pcd/fm_plcr.c | 1847 ++++
+ .../sdk_fman/Peripherals/FM/Pcd/fm_plcr.h | 165 +
+ .../sdk_fman/Peripherals/FM/Pcd/fm_prs.c | 423 +
+ .../sdk_fman/Peripherals/FM/Pcd/fm_prs.h | 316 +
+ .../sdk_fman/Peripherals/FM/Pcd/fm_replic.c | 984 +++
+ .../sdk_fman/Peripherals/FM/Pcd/fm_replic.h | 101 +
+ .../sdk_fman/Peripherals/FM/Pcd/fman_kg.c | 888 ++
+ .../sdk_fman/Peripherals/FM/Pcd/fman_prs.c | 129 +
+ .../sdk_fman/Peripherals/FM/Port/Makefile | 15 +
+ .../sdk_fman/Peripherals/FM/Port/fm_port.c | 6436 ++++++++++++++
+ .../sdk_fman/Peripherals/FM/Port/fm_port.h | 999 +++
+ .../Peripherals/FM/Port/fm_port_dsar.h | 494 ++
+ .../sdk_fman/Peripherals/FM/Port/fm_port_im.c | 753 ++
+ .../sdk_fman/Peripherals/FM/Port/fman_port.c | 1568 ++++
+ .../sdk_fman/Peripherals/FM/Rtc/Makefile | 15 +
+ .../sdk_fman/Peripherals/FM/Rtc/fm_rtc.c | 692 ++
+ .../sdk_fman/Peripherals/FM/Rtc/fm_rtc.h | 96 +
+ .../sdk_fman/Peripherals/FM/Rtc/fman_rtc.c | 334 +
+ .../sdk_fman/Peripherals/FM/SP/Makefile | 15 +
+ .../sdk_fman/Peripherals/FM/SP/fm_sp.c | 757 ++
+ .../sdk_fman/Peripherals/FM/SP/fm_sp.h | 85 +
+ .../sdk_fman/Peripherals/FM/SP/fman_sp.c | 197 +
+ .../freescale/sdk_fman/Peripherals/FM/fm.c | 5216 ++++++++++++
+ .../freescale/sdk_fman/Peripherals/FM/fm.h | 648 ++
+ .../sdk_fman/Peripherals/FM/fm_ipc.h | 465 +
+ .../sdk_fman/Peripherals/FM/fm_muram.c | 174 +
+ .../freescale/sdk_fman/Peripherals/FM/fman.c | 1398 +++
+ .../sdk_fman/Peripherals/FM/inc/fm_common.h | 1214 +++
+ .../sdk_fman/Peripherals/FM/inc/fm_hc.h | 93 +
+ .../Peripherals/FM/inc/fm_sp_common.h | 117 +
+ .../ethernet/freescale/sdk_fman/etc/Makefile | 12 +
+ .../ethernet/freescale/sdk_fman/etc/error.c | 95 +
+ .../ethernet/freescale/sdk_fman/etc/list.c | 71 +
+ .../ethernet/freescale/sdk_fman/etc/memcpy.c | 620 ++
+ .../net/ethernet/freescale/sdk_fman/etc/mm.c | 1155 +++
+ .../net/ethernet/freescale/sdk_fman/etc/mm.h | 105 +
+ .../ethernet/freescale/sdk_fman/etc/sprint.c | 81 +
+ .../freescale/sdk_fman/fmanv3h_dflags.h | 57 +
+ .../freescale/sdk_fman/fmanv3l_dflags.h | 56 +
+ .../inc/Peripherals/crc_mac_addr_ext.h | 364 +
+ .../sdk_fman/inc/Peripherals/dpaa_ext.h | 210 +
+ .../sdk_fman/inc/Peripherals/fm_ext.h | 1731 ++++
+ .../sdk_fman/inc/Peripherals/fm_mac_ext.h | 887 ++
+ .../sdk_fman/inc/Peripherals/fm_macsec_ext.h | 1271 +++
+ .../sdk_fman/inc/Peripherals/fm_muram_ext.h | 170 +
+ .../sdk_fman/inc/Peripherals/fm_pcd_ext.h | 3974 +++++++++
+ .../sdk_fman/inc/Peripherals/fm_port_ext.h | 2608 ++++++
+ .../sdk_fman/inc/Peripherals/fm_rtc_ext.h | 619 ++
+ .../sdk_fman/inc/Peripherals/fm_vsp_ext.h | 411 +
+ .../sdk_fman/inc/Peripherals/mii_acc_ext.h | 76 +
+ .../freescale/sdk_fman/inc/core_ext.h | 90 +
+ .../freescale/sdk_fman/inc/cores/arm_ext.h | 55 +
+ .../freescale/sdk_fman/inc/cores/e500v2_ext.h | 476 ++
+ .../freescale/sdk_fman/inc/cores/ppc_ext.h | 141 +
+ .../freescale/sdk_fman/inc/ddr_std_ext.h | 77 +
+ .../freescale/sdk_fman/inc/debug_ext.h | 233 +
+ .../freescale/sdk_fman/inc/endian_ext.h | 447 +
+ .../freescale/sdk_fman/inc/enet_ext.h | 205 +
+ .../freescale/sdk_fman/inc/error_ext.h | 529 ++
+ .../freescale/sdk_fman/inc/etc/list_ext.h | 358 +
+ .../freescale/sdk_fman/inc/etc/mem_ext.h | 318 +
+ .../freescale/sdk_fman/inc/etc/memcpy_ext.h | 208 +
+ .../freescale/sdk_fman/inc/etc/mm_ext.h | 310 +
+ .../freescale/sdk_fman/inc/etc/sprint_ext.h | 118 +
+ .../inc/flib/common/arch/ppc_access.h | 37 +
+ .../sdk_fman/inc/flib/common/general.h | 52 +
+ .../freescale/sdk_fman/inc/flib/fman_common.h | 78 +
+ .../freescale/sdk_fman/inc/flib/fsl_enet.h | 273 +
+ .../freescale/sdk_fman/inc/flib/fsl_fman.h | 825 ++
+ .../sdk_fman/inc/flib/fsl_fman_dtsec.h | 1096 +++
+ .../inc/flib/fsl_fman_dtsec_mii_acc.h | 107 +
+ .../freescale/sdk_fman/inc/flib/fsl_fman_kg.h | 514 ++
+ .../sdk_fman/inc/flib/fsl_fman_memac.h | 434 +
+ .../inc/flib/fsl_fman_memac_mii_acc.h | 78 +
+ .../sdk_fman/inc/flib/fsl_fman_port.h | 593 ++
+ .../sdk_fman/inc/flib/fsl_fman_prs.h | 102 +
+ .../sdk_fman/inc/flib/fsl_fman_rtc.h | 449 +
+ .../freescale/sdk_fman/inc/flib/fsl_fman_sp.h | 138 +
+ .../sdk_fman/inc/flib/fsl_fman_tgec.h | 479 ++
+ .../FMANV3H/dpaa_integration_ext.h | 291 +
+ .../inc/integrations/FMANV3H/part_ext.h | 71 +
+ .../FMANV3H/part_integration_ext.h | 304 +
+ .../FMANV3L/dpaa_integration_ext.h | 293 +
+ .../inc/integrations/FMANV3L/part_ext.h | 59 +
+ .../FMANV3L/part_integration_ext.h | 304 +
+ .../LS1043/dpaa_integration_ext.h | 291 +
+ .../inc/integrations/LS1043/part_ext.h | 64 +
+ .../LS1043/part_integration_ext.h | 185 +
+ .../integrations/P1023/dpaa_integration_ext.h | 213 +
+ .../inc/integrations/P1023/part_ext.h | 82 +
+ .../integrations/P1023/part_integration_ext.h | 635 ++
+ .../P3040_P4080_P5020/dpaa_integration_ext.h | 276 +
+ .../integrations/P3040_P4080_P5020/part_ext.h | 83 +
+ .../P3040_P4080_P5020/part_integration_ext.h | 336 +
+ .../freescale/sdk_fman/inc/math_ext.h | 100 +
+ .../freescale/sdk_fman/inc/ncsw_ext.h | 435 +
+ .../ethernet/freescale/sdk_fman/inc/net_ext.h | 430 +
+ .../ethernet/freescale/sdk_fman/inc/std_ext.h | 48 +
+ .../freescale/sdk_fman/inc/stdarg_ext.h | 49 +
+ .../freescale/sdk_fman/inc/stdlib_ext.h | 162 +
+ .../freescale/sdk_fman/inc/string_ext.h | 56 +
+ .../freescale/sdk_fman/inc/types_ext.h | 62 +
+ .../freescale/sdk_fman/inc/xx_common.h | 56 +
+ .../ethernet/freescale/sdk_fman/inc/xx_ext.h | 791 ++
+ .../freescale/sdk_fman/ls1043_dflags.h | 56 +
+ .../freescale/sdk_fman/ncsw_config.mk | 53 +
+ .../freescale/sdk_fman/p1023_dflags.h | 65 +
+ .../sdk_fman/p3040_4080_5020_dflags.h | 62 +
+ .../ethernet/freescale/sdk_fman/src/Makefile | 11 +
+ .../sdk_fman/src/inc/system/sys_ext.h | 118 +
+ .../sdk_fman/src/inc/system/sys_io_ext.h | 46 +
+ .../freescale/sdk_fman/src/inc/types_linux.h | 208 +
+ .../sdk_fman/src/inc/wrapper/fsl_fman_test.h | 84 +
+ .../sdk_fman/src/inc/wrapper/lnxwrp_exp_sym.h | 130 +
+ .../sdk_fman/src/inc/wrapper/lnxwrp_fm_ext.h | 163 +
+ .../src/inc/wrapper/lnxwrp_fsl_fman.h | 921 ++
+ .../freescale/sdk_fman/src/inc/xx/xx.h | 50 +
+ .../freescale/sdk_fman/src/system/Makefile | 10 +
+ .../freescale/sdk_fman/src/system/sys_io.c | 171 +
+ .../freescale/sdk_fman/src/wrapper/Makefile | 19 +
+ .../sdk_fman/src/wrapper/fman_test.c | 1665 ++++
+ .../sdk_fman/src/wrapper/lnxwrp_fm.c | 2908 +++++++
+ .../sdk_fman/src/wrapper/lnxwrp_fm.h | 294 +
+ .../sdk_fman/src/wrapper/lnxwrp_fm_port.c | 1512 ++++
+ .../sdk_fman/src/wrapper/lnxwrp_ioctls_fm.c | 4854 +++++++++++
+ .../src/wrapper/lnxwrp_ioctls_fm_compat.c | 1297 +++
+ .../src/wrapper/lnxwrp_ioctls_fm_compat.h | 755 ++
+ .../sdk_fman/src/wrapper/lnxwrp_resources.h | 121 +
+ .../src/wrapper/lnxwrp_resources_ut.c | 191 +
+ .../src/wrapper/lnxwrp_resources_ut.h | 144 +
+ .../src/wrapper/lnxwrp_resources_ut.make | 28 +
+ .../sdk_fman/src/wrapper/lnxwrp_sysfs.c | 60 +
+ .../sdk_fman/src/wrapper/lnxwrp_sysfs.h | 60 +
+ .../sdk_fman/src/wrapper/lnxwrp_sysfs_fm.c | 1855 ++++
+ .../sdk_fman/src/wrapper/lnxwrp_sysfs_fm.h | 136 +
+ .../src/wrapper/lnxwrp_sysfs_fm_port.c | 1268 +++
+ .../src/wrapper/lnxwrp_sysfs_fm_port.h | 56 +
+ .../freescale/sdk_fman/src/xx/Makefile | 18 +
+ .../sdk_fman/src/xx/module_strings.c | 46 +
+ .../freescale/sdk_fman/src/xx/xx_arm_linux.c | 905 ++
+ .../freescale/sdk_fman/src/xx/xx_linux.c | 918 ++
+ drivers/staging/fsl_qbman/Kconfig | 228 +
+ drivers/staging/fsl_qbman/Makefile | 28 +
+ drivers/staging/fsl_qbman/bman_config.c | 720 ++
+ drivers/staging/fsl_qbman/bman_debugfs.c | 119 +
+ drivers/staging/fsl_qbman/bman_driver.c | 575 ++
+ drivers/staging/fsl_qbman/bman_high.c | 1145 +++
+ drivers/staging/fsl_qbman/bman_low.h | 565 ++
+ drivers/staging/fsl_qbman/bman_private.h | 166 +
+ drivers/staging/fsl_qbman/bman_test.c | 56 +
+ drivers/staging/fsl_qbman/bman_test.h | 44 +
+ drivers/staging/fsl_qbman/bman_test_high.c | 183 +
+ drivers/staging/fsl_qbman/bman_test_thresh.c | 196 +
+ drivers/staging/fsl_qbman/dpa_alloc.c | 706 ++
+ drivers/staging/fsl_qbman/dpa_sys.h | 259 +
+ drivers/staging/fsl_qbman/dpa_sys_arm.h | 95 +
+ drivers/staging/fsl_qbman/dpa_sys_arm64.h | 102 +
+ drivers/staging/fsl_qbman/dpa_sys_ppc32.h | 70 +
+ drivers/staging/fsl_qbman/dpa_sys_ppc64.h | 79 +
+ drivers/staging/fsl_qbman/fsl_usdpaa.c | 2007 +++++
+ drivers/staging/fsl_qbman/fsl_usdpaa_irq.c | 289 +
+ drivers/staging/fsl_qbman/qbman_driver.c | 88 +
+ drivers/staging/fsl_qbman/qman_config.c | 1224 +++
+ drivers/staging/fsl_qbman/qman_debugfs.c | 1594 ++++
+ drivers/staging/fsl_qbman/qman_driver.c | 977 +++
+ drivers/staging/fsl_qbman/qman_high.c | 5652 ++++++++++++
+ drivers/staging/fsl_qbman/qman_low.h | 1442 ++++
+ drivers/staging/fsl_qbman/qman_private.h | 398 +
+ drivers/staging/fsl_qbman/qman_test.c | 57 +
+ drivers/staging/fsl_qbman/qman_test.h | 45 +
+ drivers/staging/fsl_qbman/qman_test_high.c | 216 +
+ .../staging/fsl_qbman/qman_test_hotpotato.c | 502 ++
+ drivers/staging/fsl_qbman/qman_utility.c | 129 +
+ include/linux/fsl_bman.h | 532 ++
+ include/linux/fsl_qman.h | 3900 +++++++++
+ include/linux/fsl_usdpaa.h | 372 +
+ include/uapi/linux/fmd/Kbuild | 5 +
+ include/uapi/linux/fmd/Peripherals/Kbuild | 4 +
+ .../uapi/linux/fmd/Peripherals/fm_ioctls.h | 628 ++
+ .../linux/fmd/Peripherals/fm_pcd_ioctls.h | 3084 +++++++
+ .../linux/fmd/Peripherals/fm_port_ioctls.h | 973 +++
+ .../linux/fmd/Peripherals/fm_test_ioctls.h | 208 +
+ include/uapi/linux/fmd/integrations/Kbuild | 1 +
+ .../fmd/integrations/integration_ioctls.h | 56 +
+ include/uapi/linux/fmd/ioctls.h | 96 +
+ include/uapi/linux/fmd/net_ioctls.h | 430 +
+ 257 files changed, 153455 insertions(+)
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/Kconfig
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/Makefile
create mode 100644 drivers/net/ethernet/freescale/sdk_dpaa/dpaa_1588.c
+#endif /* __DPAA_ETH_BASE_H */
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.c
-@@ -0,0 +1,2013 @@
+@@ -0,0 +1,2115 @@
+/* Copyright 2008-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ }
+}
+
++/* Wait for the DPAA Eth driver WQ TX FQs to empty */
++static void dpaa_drain_fqs(struct net_device *dev)
++{
++ const struct dpa_priv_s *priv = netdev_priv(dev);
++ struct qm_mcr_queryfq_np np;
++ struct qman_fq *fq;
++ int ret, i;
++
++ for (i = 0; i < DPAA_ETH_TX_QUEUES; i ++) {
++ fq = priv->egress_fqs[i];
++ while (true) {
++ ret = qman_query_fq_np(fq, &np);
++ if (unlikely(ret)) {
++ pr_err(KBUILD_BASENAME
++ " : %s : unable to query FQ %x: %d\n",
++ __func__, fq->fqid, ret);
++ break;
++ }
++
++ if (np.frm_cnt == 0)
++ break;
++ }
++ }
++}
++
++/* Wait for the DPAA CEETM TX CQs to empty */
++static void ceetm_drain_class(struct ceetm_class *cl)
++{
++ struct qm_mcr_ceetm_cq_query cq_query;
++ struct qm_ceetm_cq *cq;
++ unsigned int idx;
++ int ret;
++
++ if (!cl)
++ return;
++
++ switch (cl->type) {
++ case CEETM_ROOT:
++ /* The ROOT classes aren't directly linked to CEETM CQs */
++ return;
++ case CEETM_PRIO:
++ cq = (struct qm_ceetm_cq*)cl->prio.cq;
++ break;
++ case CEETM_WBFS:
++ cq = (struct qm_ceetm_cq*)cl->wbfs.cq;
++ break;
++ }
++
++ if (!cq || !cl->ch)
++ return;
++
++ /* Build the query CQID by merging the channel and the CQ IDs */
++ idx = (cq->parent->idx << 4) | cq->idx;
++
++ while (true) {
++ ret = qman_ceetm_query_cq(idx,
++ cl->ch->dcp_idx,
++ &cq_query);
++ if (unlikely(ret)) {
++ pr_err(KBUILD_BASENAME
++ " : %s : unable to query CQ %x: %d\n",
++ __func__, idx, ret);
++ break;
++ }
++
++ if (cq_query.frm_cnt == 0)
++ break;
++ }
++}
++
+/* Enqueue Rejection Notification callback */
+static void ceetm_ern(struct qman_portal *portal, struct qman_fq *fq,
+ const struct qm_mr_entry *msg)
+/* Configure a prio ceetm class */
+static int ceetm_config_prio_cls(struct ceetm_class *cls,
+ struct net_device *dev,
-+ struct qm_ceetm_channel *channel,
+ unsigned int id)
+{
+ int err;
+ return err;
+
+ /* Claim and configure the CCG */
-+ err = ceetm_config_ccg(&cls->prio.ccg, channel, id, cls->prio.fq,
++ err = ceetm_config_ccg(&cls->prio.ccg, cls->ch, id, cls->prio.fq,
+ dpa_priv);
+ if (err)
+ return err;
+
+ /* Claim and configure the CQ */
-+ err = qman_ceetm_cq_claim(&cls->prio.cq, channel, id, cls->prio.ccg);
++ err = qman_ceetm_cq_claim(&cls->prio.cq, cls->ch, id, cls->prio.ccg);
+ if (err)
+ return err;
+
+ if (cls->shaped) {
-+ err = qman_ceetm_channel_set_cq_cr_eligibility(channel, id, 1);
++ err = qman_ceetm_channel_set_cq_cr_eligibility(cls->ch, id, 1);
+ if (err)
+ return err;
+
-+ err = qman_ceetm_channel_set_cq_er_eligibility(channel, id, 1);
++ err = qman_ceetm_channel_set_cq_er_eligibility(cls->ch, id, 1);
+ if (err)
+ return err;
+ }
+/* Configure a wbfs ceetm class */
+static int ceetm_config_wbfs_cls(struct ceetm_class *cls,
+ struct net_device *dev,
-+ struct qm_ceetm_channel *channel,
+ unsigned int id, int type)
+{
+ int err;
+ return err;
+
+ /* Claim and configure the CCG */
-+ err = ceetm_config_ccg(&cls->wbfs.ccg, channel, id, cls->wbfs.fq,
++ err = ceetm_config_ccg(&cls->wbfs.ccg, cls->ch, id, cls->wbfs.fq,
+ dpa_priv);
+ if (err)
+ return err;
+
+ /* Claim and configure the CQ */
+ if (type == WBFS_GRP_B)
-+ err = qman_ceetm_cq_claim_B(&cls->wbfs.cq, channel, id,
++ err = qman_ceetm_cq_claim_B(&cls->wbfs.cq, cls->ch, id,
+ cls->wbfs.ccg);
+ else
-+ err = qman_ceetm_cq_claim_A(&cls->wbfs.cq, channel, id,
++ err = qman_ceetm_cq_claim_A(&cls->wbfs.cq, cls->ch, id,
+ cls->wbfs.ccg);
+ if (err)
+ return err;
+/* Destroy a ceetm class */
+static void ceetm_cls_destroy(struct Qdisc *sch, struct ceetm_class *cl)
+{
++ struct net_device *dev = qdisc_dev(sch);
++
+ if (!cl)
+ return;
+
+ cl->root.child = NULL;
+ }
+
-+ if (cl->root.ch && qman_ceetm_channel_release(cl->root.ch))
++ if (cl->ch && qman_ceetm_channel_release(cl->ch))
+ pr_err(KBUILD_BASENAME
+ " : %s : error releasing the channel %d\n",
-+ __func__, cl->root.ch->idx);
++ __func__, cl->ch->idx);
+
+ break;
+
+ cl->prio.child = NULL;
+ }
+
++ /* We must make sure the CQ is empty before releasing it.
++ * Pause all transmissions while we wait for it to drain.
++ */
++ netif_tx_stop_all_queues(dev);
++ ceetm_drain_class(cl);
++
+ if (cl->prio.lfq && qman_ceetm_lfq_release(cl->prio.lfq))
+ pr_err(KBUILD_BASENAME
+ " : %s : error releasing the LFQ %d\n",
+ if (cl->prio.cstats)
+ free_percpu(cl->prio.cstats);
+
++ netif_tx_wake_all_queues(dev);
+ break;
+
+ case CEETM_WBFS:
++ /* We must make sure the CQ is empty before releasing it.
++ * Pause all transmissions while we wait for it to drain.
++ */
++ netif_tx_stop_all_queues(dev);
++ ceetm_drain_class(cl);
++
+ if (cl->wbfs.lfq && qman_ceetm_lfq_release(cl->wbfs.lfq))
+ pr_err(KBUILD_BASENAME
+ " : %s : error releasing the LFQ %d\n",
+
+ if (cl->wbfs.cstats)
+ free_percpu(cl->wbfs.cstats);
++
++ netif_tx_wake_all_queues(dev);
+ }
+
+ tcf_destroy_chain(&cl->filter_list);
+ if (!priv->root.qdiscs)
+ break;
+
-+ /* Remove the pfifo qdiscs */
++ /* Destroy the pfifo qdiscs in case they haven't been attached
++ * to the netdev queues yet.
++ */
+ for (ntx = 0; ntx < dev->num_tx_queues; ntx++)
+ if (priv->root.qdiscs[ntx])
+ qdisc_destroy(priv->root.qdiscs[ntx]);
+ break;
+
+ case CEETM_WBFS:
++ /* Reset the WBFS groups and priorities */
++ if (priv->wbfs.ch)
++ qman_ceetm_channel_set_group(priv->wbfs.ch, 1, 0, 0);
++
+ if (priv->wbfs.parent)
+ priv->wbfs.parent->prio.child = NULL;
+ break;
+ goto err_init_root;
+ }
+
-+ /* pre-allocate underlying pfifo qdiscs */
++ /* Pre-allocate underlying pfifo qdiscs.
++ *
++ * We want to offload shaping and scheduling decisions to the hardware.
++ * The pfifo qdiscs will be attached to the netdev queues and will
++ * guide the traffic from the IP stack down to the driver with minimum
++ * interference.
++ *
++ * The CEETM qdiscs and classes will be crossed when the traffic
++ * reaches the driver.
++ */
+ priv->root.qdiscs = kcalloc(dev->num_tx_queues,
+ sizeof(priv->root.qdiscs[0]),
+ GFP_KERNEL);
+
+ priv->shaped = parent_cl->shaped;
+ priv->prio.qcount = qopt->qcount;
++ priv->prio.ch = parent_cl->ch;
+
+ /* Create and configure qcount child classes */
+ for (i = 0; i < priv->prio.qcount; i++) {
+ child_cl->type = CEETM_PRIO;
+ child_cl->shaped = priv->shaped;
+ child_cl->prio.child = NULL;
++ child_cl->ch = priv->prio.ch;
+
+ /* All shaped CQs have CR and ER enabled by default */
+ child_cl->prio.cr = child_cl->shaped;
+ child_cl->prio.cq = NULL;
+
+ /* Configure the corresponding hardware CQ */
-+ err = ceetm_config_prio_cls(child_cl, dev,
-+ parent_cl->root.ch, i);
++ err = ceetm_config_prio_cls(child_cl, dev, i);
+ if (err) {
+ pr_err(KBUILD_BASENAME " : %s : failed to configure the ceetm prio class %X\n",
+ __func__, child_cl->common.classid);
+ struct ceetm_class *parent_cl, *child_cl, *root_cl;
+ struct Qdisc *parent_qdisc;
+ struct ceetm_qdisc *parent_priv;
-+ struct qm_ceetm_channel *channel;
+ struct net_device *dev = qdisc_dev(sch);
+
+ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
+ priv->wbfs.qcount = qopt->qcount;
+ priv->wbfs.cr = qopt->cr;
+ priv->wbfs.er = qopt->er;
-+
-+ channel = root_cl->root.ch;
++ priv->wbfs.ch = parent_cl->ch;
+
+ /* Configure the hardware wbfs channel groups */
+ if (priv->wbfs.qcount == CEETM_MAX_WBFS_QCOUNT) {
+ /* Configure the group B */
+ priv->wbfs.group_type = WBFS_GRP_B;
+
-+ err = qman_ceetm_channel_get_group(channel, &small_group,
++ err = qman_ceetm_channel_get_group(priv->wbfs.ch, &small_group,
+ &prio_a, &prio_b);
+ if (err) {
+ pr_err(KBUILD_BASENAME " : %s : failed to get group details\n",
+ /* Configure the small group A */
+ priv->wbfs.group_type = WBFS_GRP_A;
+
-+ err = qman_ceetm_channel_get_group(channel, &small_group,
++ err = qman_ceetm_channel_get_group(priv->wbfs.ch, &small_group,
+ &prio_a, &prio_b);
+ if (err) {
+ pr_err(KBUILD_BASENAME " : %s : failed to get group details\n",
+ prio_b = prio_b ? : prio_a;
+ }
+
-+ err = qman_ceetm_channel_set_group(channel, small_group, prio_a,
++ err = qman_ceetm_channel_set_group(priv->wbfs.ch, small_group, prio_a,
+ prio_b);
+ if (err)
+ goto err_init_wbfs;
+
+ if (priv->shaped) {
-+ err = qman_ceetm_channel_set_group_cr_eligibility(channel,
++ err = qman_ceetm_channel_set_group_cr_eligibility(priv->wbfs.ch,
+ group_b,
+ priv->wbfs.cr);
+ if (err) {
+ goto err_init_wbfs;
+ }
+
-+ err = qman_ceetm_channel_set_group_er_eligibility(channel,
++ err = qman_ceetm_channel_set_group_er_eligibility(priv->wbfs.ch,
+ group_b,
+ priv->wbfs.er);
+ if (err) {
+ child_cl->wbfs.fq = NULL;
+ child_cl->wbfs.cq = NULL;
+ child_cl->wbfs.weight = qopt->qweight[i];
++ child_cl->ch = priv->wbfs.ch;
+
+ if (priv->wbfs.group_type == WBFS_GRP_B)
+ id = WBFS_GRP_B_OFFSET + i;
+ else
+ id = WBFS_GRP_A_OFFSET + i;
+
-+ err = ceetm_config_wbfs_cls(child_cl, dev, channel, id,
++ err = ceetm_config_wbfs_cls(child_cl, dev, id,
+ priv->wbfs.group_type);
+ if (err) {
+ pr_err(KBUILD_BASENAME " : %s : failed to configure the ceetm wbfs class %X\n",
+
+ switch (priv->type) {
+ case CEETM_ROOT:
++ netif_tx_stop_all_queues(dev);
++ dpaa_drain_fqs(dev);
+ ret = ceetm_init_root(sch, priv, qopt);
++ netif_tx_wake_all_queues(dev);
+ break;
+ case CEETM_PRIO:
+ ret = ceetm_init_prio(sch, priv, qopt);
+{
+ int err;
+ bool group_b;
-+ struct qm_ceetm_channel *channel;
-+ struct ceetm_class *prio_class, *root_class;
-+ struct ceetm_qdisc *prio_qdisc;
+
+ if (qopt->qcount) {
+ pr_err("CEETM: the qcount can not be modified\n");
+ if (!priv->shaped)
+ return 0;
+
-+ prio_class = priv->wbfs.parent;
-+ prio_qdisc = qdisc_priv(prio_class->parent);
-+ root_class = prio_qdisc->prio.parent;
-+ channel = root_class->root.ch;
+ group_b = priv->wbfs.group_type == WBFS_GRP_B;
+
+ if (qopt->cr != priv->wbfs.cr) {
-+ err = qman_ceetm_channel_set_group_cr_eligibility(channel,
++ err = qman_ceetm_channel_set_group_cr_eligibility(priv->wbfs.ch,
+ group_b,
+ qopt->cr);
+ if (err)
+ }
+
+ if (qopt->er != priv->wbfs.er) {
-+ err = qman_ceetm_channel_set_group_er_eligibility(channel,
++ err = qman_ceetm_channel_set_group_er_eligibility(priv->wbfs.ch,
+ group_b,
+ qopt->er);
+ if (err)
+ return ret;
+}
+
-+/* Attach the underlying pfifo qdiscs */
++/* Graft the underlying pfifo qdiscs to the netdev queues.
++ * It's safe to remove our references at this point, since the kernel will
++ * destroy the qdiscs on its own and no cleanup from our part is required.
++ */
+static void ceetm_attach(struct Qdisc *sch)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ if (old_qdisc)
+ qdisc_destroy(old_qdisc);
+ }
++
++ kfree(priv->root.qdiscs);
++ priv->root.qdiscs = NULL;
+}
+
+static unsigned long ceetm_cls_get(struct Qdisc *sch, u32 classid)
+
+ if (cl->shaped && cl->root.rate != copt->rate) {
+ bps = copt->rate << 3; /* Bps -> bps */
-+ err = qman_ceetm_channel_set_commit_rate_bps(cl->root.ch, bps,
++ err = qman_ceetm_channel_set_commit_rate_bps(cl->ch, bps,
+ dev->mtu);
+ if (err)
+ goto change_cls_err;
+
+ if (cl->shaped && cl->root.ceil != copt->ceil) {
+ bps = copt->ceil << 3; /* Bps -> bps */
-+ err = qman_ceetm_channel_set_excess_rate_bps(cl->root.ch, bps,
++ err = qman_ceetm_channel_set_excess_rate_bps(cl->ch, bps,
+ dev->mtu);
+ if (err)
+ goto change_cls_err;
+ }
+
+ if (!cl->shaped && cl->root.tbl != copt->tbl) {
-+ err = qman_ceetm_channel_set_weight(cl->root.ch, copt->tbl);
++ err = qman_ceetm_channel_set_weight(cl->ch, copt->tbl);
+ if (err)
+ goto change_cls_err;
+ cl->root.tbl = copt->tbl;
+ goto claim_err;
+ }
+
-+ cl->root.ch = channel;
++ cl->ch = channel;
+
+ if (cl->shaped) {
+ /* Configure the channel shaper */
+module_exit(ceetm_unregister);
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_ceetm.h
-@@ -0,0 +1,238 @@
+@@ -0,0 +1,240 @@
+/* Copyright 2008-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+struct prio_q {
+ __u16 qcount;
+ struct ceetm_class *parent;
++ struct qm_ceetm_channel *ch;
+};
+
+struct wbfs_q {
+ __u16 qcount;
+ int group_type;
+ struct ceetm_class *parent;
++ struct qm_ceetm_channel *ch;
+ __u16 cr;
+ __u16 er;
+};
+ bool wbfs_grp_b;
+ bool wbfs_grp_large;
+ struct Qdisc *child;
-+ struct qm_ceetm_channel *ch;
+};
+
+struct prio_c {
+ int refcnt; /* usage count of this class */
+ struct tcf_proto *filter_list; /* class attached filters */
+ struct Qdisc *parent;
++ struct qm_ceetm_channel *ch;
+ bool shaped;
+ int type; /* ROOT/PRIO/WBFS */
+ union {
+module_exit(dpa_proxy_unload);
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/dpaa_eth_sg.c
-@@ -0,0 +1,1168 @@
+@@ -0,0 +1,1193 @@
+/* Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ put_page(virt_to_head_page(new_buf));
+ goto build_skb_failed;
+ }
++
++ /* Store the skb back-pointer before the start of the buffer.
++ * Otherwise it will be overwritten by the FMan.
++ */
+ DPA_WRITE_SKB_PTR(skb, skbh, new_buf, -1);
+
+ addr = dma_map_single(dev, new_buf,
+ * - buffer address aligned to cacheline bytes
+ * - offset of data from start of buffer no lower than a minimum value
+ * - offset of data from start of buffer no higher than a maximum value
++ * - the skb back-pointer is stored safely
+ */
++
++ /* guarantee both the minimum size and the minimum data offset */
+ new = min(skb_end_pointer(skb) - min_size, skb->data - min_offset);
+
+ /* left align to the nearest cacheline */
+ new = (unsigned char *)((unsigned long)new & ~(SMP_CACHE_BYTES - 1));
+
-+ if (likely(new >= skb->head &&
++ /* Make sure there is enough space to store the skb back-pointer in
++ * the headroom, right before the start of the buffer.
++ *
++ * Guarantee that both maximum size and maximum data offsets aren't
++ * crossed.
++ */
++ if (likely(new >= (skb->head + sizeof(void *)) &&
+ new >= (skb->data - DPA_MAX_FD_OFFSET) &&
+ skb_end_pointer(skb) - new <= DPA_RECYCLE_MAX_SIZE)) {
+ *new_buf_start = new;
+ dma_dir = DMA_BIDIRECTIONAL;
+ dma_map_size = dpa_bp->size;
+
++ /* Store the skb back-pointer before the start of the buffer.
++ * Otherwise it will be overwritten by the FMan.
++ */
+ DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, -1);
+ *offset = skb_headroom(skb) - fd->offset;
+ } else
+
+ /* The buffer will be Tx-confirmed, but the TxConf cb must
+ * necessarily look at our Tx private data to retrieve the
-+ * skbuff. (In short: can't use DPA_WRITE_SKB_PTR() here.)
++ * skbuff. Store the back-pointer inside the buffer.
+ */
+ DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0);
+ }
+ fd->length20 = skb->len;
+ fd->offset = priv->tx_headroom;
+
-+ /* DMA map the SGT page */
++ /* DMA map the SGT page
++ *
++ * It's safe to store the skb back-pointer inside the buffer since
++ * S/G frames are non-recyclable.
++ */
+ DPA_WRITE_SKB_PTR(skb, skbh, sgt_buf, 0);
+ addr = dma_map_single(dpa_bp->dev, sgt_buf,
+ priv->tx_headroom + sgt_size,
+ percpu_stats->tx_errors++;
+ return NETDEV_TX_OK;
+ }
++
++ /* propagate the skb ownership information */
++ if (skb->sk)
++ skb_set_owner_w(skb_new, skb->sk);
++
+ dev_kfree_skb(skb);
+ skb = skb_new;
+ }
+module_exit(dpa_ptp_unload);
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac-api.c
-@@ -0,0 +1,907 @@
+@@ -0,0 +1,931 @@
+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ struct mac_device *mac_dev)
+{
+ struct phy_device *phy_dev;
++ void (*adjust_link_handler)(struct net_device *);
+
+ if ((macdev2enetinterface(mac_dev) == e_ENET_MODE_XGMII_10000) ||
-+ (macdev2enetinterface(mac_dev) == e_ENET_MODE_SGMII_2500) ||
-+ of_phy_is_fixed_link(mac_dev->phy_node)) {
-+ phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
-+ &adjust_link_void, 0,
-+ mac_dev->phy_if);
++ (macdev2enetinterface(mac_dev) == e_ENET_MODE_SGMII_2500)) {
++ /* Pass a void link state handler to the PHY state machine
++ * for XGMII (10G) and SGMII 2.5G, as the hardware does not
++ * permit dynamic link speed adjustments. */
++ adjust_link_handler = adjust_link_void;
++ } else if (macdev2enetinterface(mac_dev) & e_ENET_IF_RGMII) {
++ /* Regular RGMII ports connected to a PHY, as well as
++ * ports that are marked as "fixed-link" in the DTS,
++ * will have the adjust_link callback. This calls
++ * fman_memac_adjust_link in order to configure the
++ * IF_MODE register, which is needed in both cases.
++ */
++ adjust_link_handler = adjust_link;
++ } else if (of_phy_is_fixed_link(mac_dev->phy_node)) {
++ /* Pass a void link state handler for fixed-link
++ * interfaces that are not RGMII. Only RGMII has been
++ * tested and confirmed to work with fixed-link. Other
++ * MII interfaces may need further work.
++ * TODO: Change this as needed.
++ */
++ adjust_link_handler = adjust_link_void;
+ } else {
-+ phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
-+ &adjust_link, 0, mac_dev->phy_if);
++ /* MII, RMII, SMII, GMII, SGMII, BASEX ports,
++ * that are NOT fixed-link.
++ * TODO: May not be needed for interfaces that
++ * pass through the SerDes block (*SGMII, XFI).
++ */
++ adjust_link_handler = adjust_link;
+ }
++ phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
++ adjust_link_handler, 0,
++ mac_dev->phy_if);
+
+ if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) {
+ netdev_err(net_dev, "Could not connect to PHY %s\n",
+
+static const struct of_device_id mac_match[] = {
+ [DTSEC] = {
-+ .compatible = "fsl,fman-1g-mac"
++ .compatible = "fsl,fman-dtsec"
+ },
+ [XGMAC] = {
-+ .compatible = "fsl,fman-10g-mac"
++ .compatible = "fsl,fman-xgec"
+ },
+ [MEMAC] = {
+ .compatible = "fsl,fman-memac"
+
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/dtsec.c
-@@ -0,0 +1,1465 @@
+@@ -0,0 +1,1504 @@
+/*
+ * Copyright 2008-2013 Freescale Semiconductor Inc.
+ *
+static t_Error GracefulStop(t_Dtsec *p_Dtsec, e_CommMode mode)
+{
+ struct dtsec_regs *p_MemMap;
++ int pollTimeout = 0;
+
+ ASSERT_COND(p_Dtsec);
+
+ }
+
+ if (mode & e_COMM_MODE_TX)
-+#if defined(FM_GTS_ERRATA_DTSEC_A004) || defined(FM_GTS_AFTER_MAC_ABORTED_FRAME_ERRATA_DTSEC_A0012)
-+ if (p_Dtsec->fmMacControllerDriver.fmRevInfo.majorRev == 2)
-+ DBG(INFO, ("GTS not supported due to DTSEC_A004 errata."));
-+#else /* not defined(FM_GTS_ERRATA_DTSEC_A004) ||... */
-+#ifdef FM_GTS_UNDERRUN_ERRATA_DTSEC_A0014
-+ DBG(INFO, ("GTS not supported due to DTSEC_A0014 errata."));
-+#else /* FM_GTS_UNDERRUN_ERRATA_DTSEC_A0014 */
++ {
++#if defined(FM_GTS_ERRATA_DTSEC_A004)
++ if (p_Dtsec->fmMacControllerDriver.fmRevInfo.majorRev == 2)
++ DBG(INFO, ("GTS not supported due to DTSEC_A004 errata."));
++#else /* not defined(FM_GTS_ERRATA_DTSEC_A004) */
++
+ fman_dtsec_stop_tx(p_MemMap);
-+#endif /* FM_GTS_UNDERRUN_ERRATA_DTSEC_A0014 */
-+#endif /* defined(FM_GTS_ERRATA_DTSEC_A004) ||... */
++
++#if defined(FM_GTS_UNDERRUN_ERRATA_DTSEC_A0014) || defined(FM_GTS_AFTER_MAC_ABORTED_FRAME_ERRATA_DTSEC_A0012)
++ XX_UDelay(10);
++#endif /* FM_GTS_UNDERRUN_ERRATA_DTSEC_A0014 || FM_GTS_AFTER_MAC_ABORTED_FRAME_ERRATA_DTSEC_A0012 */
++#endif /* defined(FM_GTS_ERRATA_DTSEC_A004) */
++ }
++
++ /* Poll GRSC/GTSC bits in IEVENT register until both are set */
++#if defined(FM_GRS_ERRATA_DTSEC_A002) || defined(FM_GTS_ERRATA_DTSEC_A004) || defined(FM_GTS_AFTER_MAC_ABORTED_FRAME_ERRATA_DTSEC_A0012) || defined(FM_GTS_UNDERRUN_ERRATA_DTSEC_A0014) || defined(FM_GTS_AFTER_DROPPED_FRAME_ERRATA_DTSEC_A004839)
++ XX_UDelay(10);
++#else
++ while (fman_dtsec_get_event(p_MemMap, DTSEC_IMASK_GRSCEN | DTSEC_IMASK_GTSCEN) != (DTSEC_IMASK_GRSCEN | DTSEC_IMASK_GTSCEN))
++ {
++ if (pollTimeout == 100)
++ break;
++ XX_UDelay(1);
++ pollTimeout++;
++ }
++#endif
+
+ return E_OK;
+}
+ " value should be greater than 320."));
+#endif /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 */
+
++ GracefulStop(p_Dtsec, e_COMM_MODE_RX_AND_TX);
++
+ fman_dtsec_set_tx_pause_frames(p_Dtsec->p_MemMap, pauseTime);
++
++ GracefulRestart(p_Dtsec, e_COMM_MODE_RX_AND_TX);
++
+ return E_OK;
+}
+
+ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_STATE);
+ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
+
++ GracefulStop(p_Dtsec, e_COMM_MODE_RX_AND_TX);
++
+ fman_dtsec_handle_rx_pause(p_Dtsec->p_MemMap, accept_pause);
+
++ GracefulRestart(p_Dtsec, e_COMM_MODE_RX_AND_TX);
++
+ return E_OK;
+}
+
+ /* Initialize MAC Station Address registers (1 & 2) */
+ /* Station address have to be swapped (big endian to little endian */
+ p_Dtsec->addr = ENET_ADDR_TO_UINT64(*p_EnetAddr);
++
++ GracefulStop(p_Dtsec, e_COMM_MODE_RX_AND_TX);
++
+ fman_dtsec_set_mac_address(p_Dtsec->p_MemMap, (uint8_t *)(*p_EnetAddr));
+
++ GracefulRestart(p_Dtsec, e_COMM_MODE_RX_AND_TX);
++
+ return E_OK;
+}
+
+ SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_STATE);
+ SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE);
+
++ GracefulStop(p_Dtsec, e_COMM_MODE_RX_AND_TX);
++
+ fman_dtsec_set_wol(p_Dtsec->p_MemMap, en);
+
++ GracefulRestart(p_Dtsec, e_COMM_MODE_RX_AND_TX);
++
+ return E_OK;
+}
+
+ enet_speed = (enum enet_speed) ENET_SPEED_FROM_MODE(p_Dtsec->enetMode);
+ p_Dtsec->halfDuplex = !fullDuplex;
+
++ GracefulStop(p_Dtsec, e_COMM_MODE_RX_AND_TX);
++
+ err = fman_dtsec_adjust_link(p_Dtsec->p_MemMap, enet_interface, enet_speed, fullDuplex);
+
+ if (err == -EINVAL)
+ RETURN_ERROR(MAJOR, E_CONFLICT, ("Ethernet interface does not support Half Duplex mode"));
+
++ GracefulRestart(p_Dtsec, e_COMM_MODE_RX_AND_TX);
++
+ return (t_Error)err;
+}
+
+#endif /* __LNXWRP_FM_H__ */
--- /dev/null
+++ b/drivers/net/ethernet/freescale/sdk_fman/src/wrapper/lnxwrp_fm_port.c
-@@ -0,0 +1,1480 @@
+@@ -0,0 +1,1512 @@
+/*
+ * Copyright 2008-2012 Freescale Semiconductor Inc.
+ *
+ tmp_prop = be32_to_cpu(*uint32_prop);
+ if (WARN_ON(lenp != sizeof(uint32_t)))
+ return NULL;
-+ if (of_device_is_compatible(port_node, "fsl,fman-port-oh")) {
++ if (of_device_is_compatible(port_node, "fsl,fman-port-oh") ||
++ of_device_is_compatible(port_node, "fsl,fman-v2-port-oh") ||
++ of_device_is_compatible(port_node, "fsl,fman-v3-port-oh")) {
++#ifndef CONFIG_FMAN_ARM
++#ifdef CONFIG_FMAN_P3040_P4080_P5020
++ /* On PPC FMan v2, OH ports start from cell-index 0x1 */
++ tmp_prop -= 0x1;
++#else
++ /* On PPC FMan v3 (Low and High), OH ports start from
++ * cell-index 0x2
++ */
++ tmp_prop -= 0x2;
++#endif // CONFIG_FMAN_P3040_P4080_P5020
++#endif // CONFIG_FMAN_ARM
++
+ if (unlikely(tmp_prop >= FM_MAX_NUM_OF_OH_PORTS)) {
+ REPORT_ERROR(MAJOR, E_INVALID_VALUE,
+ ("of_get_property(%s, cell-index) failed",
+ settings.param.specificParams.nonRxParams.qmChannel =
+ p_LnxWrpFmPortDev->txCh;
+ } else if (of_device_is_compatible(port_node, "fsl,fman-port-10g-tx")) {
++#ifndef CONFIG_FMAN_ARM
++ /* On T102x, the 10G TX port IDs start from 0x28 */
++ if (IS_T1023_T1024)
++ tmp_prop -= 0x28;
++ else
++#endif
+ tmp_prop -= 0x30;
++
+ if (unlikely(tmp_prop>= FM_MAX_NUM_OF_10G_TX_PORTS)) {
+ REPORT_ERROR(MAJOR, E_INVALID_VALUE,
+ ("of_get_property(%s, cell-index) failed",
+ FM_MAX_NUM_OF_1G_TX_PORTS];
+#ifndef CONFIG_FMAN_ARM
+ if (IS_T1023_T1024)
-+ p_LnxWrpFmPortDev = &p_LnxWrpFmDev->txPorts[*uint32_prop];
++ p_LnxWrpFmPortDev = &p_LnxWrpFmDev->txPorts[tmp_prop];
+#endif
+
+ p_LnxWrpFmPortDev->id = tmp_prop;
+ if (p_LnxWrpFmDev->pcdActive)
+ p_LnxWrpFmPortDev->defPcd = p_LnxWrpFmDev->defPcd;
+ } else if (of_device_is_compatible(port_node, "fsl,fman-port-10g-rx")) {
++#ifndef CONFIG_FMAN_ARM
++ /* On T102x, the 10G RX port IDs start from 0x08 */
++ if (IS_T1023_T1024)
++ tmp_prop -= 0x8;
++ else
++#endif
+ tmp_prop -= 0x10;
++
+ if (unlikely(tmp_prop >= FM_MAX_NUM_OF_10G_RX_PORTS)) {
+ REPORT_ERROR(MAJOR, E_INVALID_VALUE,
+ ("of_get_property(%s, cell-index) failed",
+
+#ifndef CONFIG_FMAN_ARM
+ if (IS_T1023_T1024)
-+ p_LnxWrpFmPortDev = &p_LnxWrpFmDev->rxPorts[*uint32_prop];
++ p_LnxWrpFmPortDev = &p_LnxWrpFmDev->rxPorts[tmp_prop];
+#endif
+
+ p_LnxWrpFmPortDev->id = tmp_prop;
+ uint32_prop = (uint32_t *)of_get_property(port_node, "ar-tables-sizes",
+ &lenp);
+ if (uint32_prop) {
-+
++
+ if (WARN_ON(lenp != sizeof(uint32_t)*8))
+ RETURN_ERROR(MINOR, E_INVALID_VALUE, NO_MSG);
+ if (WARN_ON(p_LnxWrpFmPortDev->settings.param.portType !=
+ if (uint32_prop) {
+ if (WARN_ON(lenp != sizeof(uint32_t)*3))
+ RETURN_ERROR(MINOR, E_INVALID_VALUE, NO_MSG);
-+
++
+ p_LnxWrpFmPortDev->dsar_table_sizes.max_num_of_ip_prot_filtering =
+ (uint16_t)be32_to_cpu(uint32_prop[0]);
+ p_LnxWrpFmPortDev->dsar_table_sizes.max_num_of_tcp_port_filtering =
+ p_LnxWrpFmPortDev->dsar_table_sizes.max_num_of_udp_port_filtering =
+ (uint16_t)be32_to_cpu(uint32_prop[2]);
+ }
-+
++
+ if ((err = FM_PORT_ConfigDsarSupport(p_LnxWrpFmPortDev->h_Dev,
+ (t_FmPortDsarTablesSizes*)&p_LnxWrpFmPortDev->dsar_table_sizes)) != E_OK)
+ RETURN_ERROR(MINOR, err, NO_MSG);
+ {
+ .compatible = "fsl,fman-port-oh"},
+ {
++ .compatible = "fsl,fman-v2-port-oh"},
++ {
++ .compatible = "fsl,fman-v3-port-oh"},
++ {
+ .compatible = "fsl,fman-port-1g-rx"},
+ {
+ .compatible = "fsl,fman-port-10g-rx"},
+#endif
--- /dev/null
+++ b/drivers/staging/fsl_qbman/qman_high.c
-@@ -0,0 +1,5669 @@
+@@ -0,0 +1,5652 @@
+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ struct completion completion;
+};
+
-+static int qman_delete_cgr_thread(void *p)
++static void qman_delete_cgr_smp_call(void *p)
+{
-+ struct cgr_comp *cgr_comp = (struct cgr_comp *)p;
-+ int res;
-+
-+ res = qman_delete_cgr((struct qman_cgr *)cgr_comp->cgr);
-+ complete(&cgr_comp->completion);
-+
-+ return res;
++ qman_delete_cgr((struct qman_cgr *)p);
+}
+
+void qman_delete_cgr_safe(struct qman_cgr *cgr)
+{
-+ struct task_struct *thread;
-+ struct cgr_comp cgr_comp;
-+
+ preempt_disable();
+ if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
-+ init_completion(&cgr_comp.completion);
-+ cgr_comp.cgr = cgr;
-+ thread = kthread_create(qman_delete_cgr_thread, &cgr_comp,
-+ "cgr_del");
-+
-+ if (likely(!IS_ERR(thread))) {
-+ kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]);
-+ wake_up_process(thread);
-+ wait_for_completion(&cgr_comp.completion);
-+ preempt_enable();
-+ return;
-+ }
++ smp_call_function_single(qman_cgr_cpus[cgr->cgrid],
++ qman_delete_cgr_smp_call, cgr, true);
++ preempt_enable();
++ return;
+ }
+ qman_delete_cgr(cgr);
+ preempt_enable();
+#endif /* FSL_BMAN_H */
--- /dev/null
+++ b/include/linux/fsl_qman.h
-@@ -0,0 +1,3888 @@
+@@ -0,0 +1,3900 @@
+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ struct qm_mcr_ceetm_lfqmt_query *lfqmt_query);
+
+/**
++ * qman_ceetm_query_cq - Queries a CEETM CQ
++ * @cqid: the channel ID (first byte) followed by the CQ idx
++ * @dcpid: CEETM portal ID
++ * @cq_query: storage for the queried CQ fields
++ *
++ * Returns zero for success or -EIO if the query command returns error.
++ *
++*/
++int qman_ceetm_query_cq(unsigned int cqid, unsigned int dcpid,
++ struct qm_mcr_ceetm_cq_query *cq_query);
++
++/**
+ * qman_ceetm_query_write_statistics - Query (and optionally write) statistics
+ * @cid: Target ID (CQID or CCGRID)
+ * @dcp_idx: CEETM portal ID
-From 5fcb42fbd224e1103bacbae4785745842cfd6304 Mon Sep 17 00:00:00 2001
+From b2ee6e29bad31facbbf5ac1ce98235ac163d9fa9 Mon Sep 17 00:00:00 2001
From: Yangbo Lu <yangbo.lu@nxp.com>
-Date: Wed, 17 Jan 2018 15:00:43 +0800
-Subject: [PATCH 08/30] pci: support layerscape
+Date: Thu, 5 Jul 2018 16:26:47 +0800
+Subject: [PATCH 08/32] pci: support layerscape
This is an integrated patch for layerscape pcie support.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
---
- drivers/irqchip/irq-ls-scfg-msi.c | 257 +++++++--
+ drivers/irqchip/irq-ls-scfg-msi.c | 257 ++++++-
drivers/pci/host/Makefile | 2 +-
- drivers/pci/host/pci-layerscape-ep-debugfs.c | 758 +++++++++++++++++++++++++++
- drivers/pci/host/pci-layerscape-ep.c | 309 +++++++++++
- drivers/pci/host/pci-layerscape-ep.h | 115 ++++
+ drivers/pci/host/pci-layerscape-ep-debugfs.c | 758 +++++++++++++++++++
+ drivers/pci/host/pci-layerscape-ep.c | 309 ++++++++
+ drivers/pci/host/pci-layerscape-ep.h | 115 +++
drivers/pci/host/pci-layerscape.c | 48 +-
drivers/pci/host/pcie-designware.c | 6 +
drivers/pci/host/pcie-designware.h | 1 +
drivers/pci/pci.c | 2 +-
- drivers/pci/pcie/portdrv_core.c | 181 +++----
- drivers/pci/quirks.c | 8 +
+ drivers/pci/pcie/portdrv_core.c | 181 ++---
+ drivers/pci/quirks.c | 15 +
include/linux/pci.h | 1 +
- 12 files changed, 1539 insertions(+), 149 deletions(-)
+ 12 files changed, 1546 insertions(+), 149 deletions(-)
create mode 100644 drivers/pci/host/pci-layerscape-ep-debugfs.c
create mode 100644 drivers/pci/host/pci-layerscape-ep.c
create mode 100644 drivers/pci/host/pci-layerscape-ep.h
}
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
-@@ -4679,3 +4679,11 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IN
+@@ -3329,6 +3329,13 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_A
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
+
++/*
++ * NXP (Freescale Vendor ID) LS1088 chips do not behave correctly after
++ * bus reset. Link state of device does not comes UP and so config space
++ * never accessible again.
++ */
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, 0x80c0, quirk_no_bus_reset);
++
+ static void quirk_no_pm_reset(struct pci_dev *dev)
+ {
+ /*
+@@ -4679,3 +4686,11 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IN
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2031, quirk_no_aersid);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2032, quirk_no_aersid);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid);
-From 667f0792b6f6d000c10f21c29c397c84cbe77f4a Mon Sep 17 00:00:00 2001
+From ab7b47676f9334bb55f80e0ac096c7aa289810e2 Mon Sep 17 00:00:00 2001
From: Yangbo Lu <yangbo.lu@nxp.com>
-Date: Wed, 17 Jan 2018 15:11:45 +0800
-Subject: [PATCH 10/30] fsl-mc: layerscape support
+Date: Thu, 5 Jul 2018 16:44:34 +0800
+Subject: [PATCH 10/32] fsl-mc: layerscape support
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
---
- drivers/staging/fsl-mc/bus/Kconfig | 41 +-
- drivers/staging/fsl-mc/bus/Makefile | 10 +-
- drivers/staging/fsl-mc/bus/dpbp-cmd.h | 80 ++
- drivers/staging/fsl-mc/bus/dpbp.c | 450 +--------
- drivers/staging/fsl-mc/bus/dpcon-cmd.h | 85 ++
- drivers/staging/fsl-mc/bus/dpcon.c | 317 ++++++
- drivers/staging/fsl-mc/bus/dpio/Makefile | 11 +
- .../{include/dpcon-cmd.h => bus/dpio/dpio-cmd.h} | 73 +-
- drivers/staging/fsl-mc/bus/dpio/dpio-driver.c | 296 ++++++
- drivers/staging/fsl-mc/bus/dpio/dpio-service.c | 693 +++++++++++++
- drivers/staging/fsl-mc/bus/dpio/dpio.c | 224 +++++
- drivers/staging/fsl-mc/bus/dpio/dpio.h | 109 ++
- drivers/staging/fsl-mc/bus/dpio/qbman-portal.c | 1049 ++++++++++++++++++++
- drivers/staging/fsl-mc/bus/dpio/qbman-portal.h | 662 ++++++++++++
- drivers/staging/fsl-mc/bus/dpio/qbman_debug.c | 853 ++++++++++++++++
- drivers/staging/fsl-mc/bus/dpio/qbman_debug.h | 136 +++
- drivers/staging/fsl-mc/bus/dpio/qbman_private.h | 171 ++++
- drivers/staging/fsl-mc/bus/dpmcp-cmd.h | 112 +--
- drivers/staging/fsl-mc/bus/dpmcp.c | 374 +------
- drivers/staging/fsl-mc/bus/dpmcp.h | 127 +--
- drivers/staging/fsl-mc/bus/dpmng-cmd.h | 14 +-
- drivers/staging/fsl-mc/bus/dpmng.c | 37 +-
- drivers/staging/fsl-mc/bus/dprc-cmd.h | 82 +-
- drivers/staging/fsl-mc/bus/dprc-driver.c | 38 +-
- drivers/staging/fsl-mc/bus/dprc.c | 629 +-----------
- drivers/staging/fsl-mc/bus/fsl-mc-allocator.c | 78 +-
- drivers/staging/fsl-mc/bus/fsl-mc-bus.c | 318 +++---
- drivers/staging/fsl-mc/bus/fsl-mc-iommu.c | 104 ++
- drivers/staging/fsl-mc/bus/fsl-mc-msi.c | 2 +-
- drivers/staging/fsl-mc/bus/fsl-mc-private.h | 6 +-
- .../staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c | 10 +-
- drivers/staging/fsl-mc/bus/mc-io.c | 4 +-
- drivers/staging/fsl-mc/bus/mc-ioctl.h | 22 +
- drivers/staging/fsl-mc/bus/mc-restool.c | 405 ++++++++
- drivers/staging/fsl-mc/bus/mc-sys.c | 14 +-
- drivers/staging/fsl-mc/include/dpaa2-fd.h | 706 +++++++++++++
- drivers/staging/fsl-mc/include/dpaa2-global.h | 202 ++++
- drivers/staging/fsl-mc/include/dpaa2-io.h | 190 ++++
- drivers/staging/fsl-mc/include/dpbp-cmd.h | 185 ----
- drivers/staging/fsl-mc/include/dpbp.h | 158 +--
- drivers/staging/fsl-mc/include/dpcon.h | 115 +++
- drivers/staging/fsl-mc/include/dpmng.h | 16 +-
- drivers/staging/fsl-mc/include/dpopr.h | 110 ++
- drivers/staging/fsl-mc/include/dprc.h | 470 +++------
- drivers/staging/fsl-mc/include/mc-bus.h | 7 +-
- drivers/staging/fsl-mc/include/mc-cmd.h | 44 +-
- drivers/staging/fsl-mc/include/mc-sys.h | 3 +-
- drivers/staging/fsl-mc/include/mc.h | 17 +-
- 48 files changed, 7247 insertions(+), 2612 deletions(-)
- create mode 100644 drivers/staging/fsl-mc/bus/dpbp-cmd.h
- create mode 100644 drivers/staging/fsl-mc/bus/dpcon-cmd.h
- create mode 100644 drivers/staging/fsl-mc/bus/dpcon.c
+ Documentation/ABI/stable/sysfs-bus-fsl-mc | 13 +
+ Documentation/ioctl/ioctl-number.txt | 1 +
+ Documentation/networking/dpaa2/index.rst | 8 +
+ Documentation/networking/dpaa2/overview.rst | 408 +++++
+ MAINTAINERS | 11 +-
+ drivers/bus/Kconfig | 3 +
+ drivers/bus/Makefile | 4 +
+ drivers/bus/fsl-mc/Kconfig | 23 +
+ drivers/bus/fsl-mc/Makefile | 22 +
+ drivers/bus/fsl-mc/dpbp.c | 186 +++
+ drivers/bus/fsl-mc/dpcon.c | 222 +++
+ drivers/bus/fsl-mc/dpmcp.c | 99 ++
+ .../fsl-mc/bus => bus/fsl-mc}/dprc-driver.c | 180 ++-
+ drivers/bus/fsl-mc/dprc.c | 575 +++++++
+ .../bus => bus/fsl-mc}/fsl-mc-allocator.c | 195 ++-
+ .../fsl-mc/bus => bus/fsl-mc}/fsl-mc-bus.c | 523 +++++--
+ drivers/bus/fsl-mc/fsl-mc-iommu.c | 78 +
+ .../fsl-mc/bus => bus/fsl-mc}/fsl-mc-msi.c | 34 +-
+ drivers/bus/fsl-mc/fsl-mc-private.h | 223 +++
+ drivers/bus/fsl-mc/fsl-mc-restool.c | 219 +++
+ .../fsl-mc/bus => bus/fsl-mc}/mc-io.c | 80 +-
+ .../fsl-mc/bus => bus/fsl-mc}/mc-sys.c | 105 +-
+ drivers/irqchip/Kconfig | 6 +
+ drivers/irqchip/Makefile | 1 +
+ .../irq-gic-v3-its-fsl-mc-msi.c | 52 +-
+ drivers/staging/fsl-mc/Kconfig | 1 +
+ drivers/staging/fsl-mc/Makefile | 1 +
+ drivers/staging/fsl-mc/TODO | 18 -
+ drivers/staging/fsl-mc/bus/Kconfig | 37 +-
+ drivers/staging/fsl-mc/bus/Makefile | 17 +-
+ drivers/staging/fsl-mc/bus/dpbp.c | 691 --------
+ drivers/staging/fsl-mc/bus/dpio/Makefile | 8 +
+ drivers/staging/fsl-mc/bus/dpio/dpio-cmd.h | 50 +
+ drivers/staging/fsl-mc/bus/dpio/dpio-driver.c | 278 ++++
+ .../staging/fsl-mc/bus/dpio/dpio-service.c | 780 +++++++++
+ drivers/staging/fsl-mc/bus/dpio/dpio.c | 221 +++
+ drivers/staging/fsl-mc/bus/dpio/dpio.h | 87 ++
+ .../staging/fsl-mc/bus/dpio/qbman-portal.c | 1164 ++++++++++++++
+ .../staging/fsl-mc/bus/dpio/qbman-portal.h | 505 ++++++
+ drivers/staging/fsl-mc/bus/dpmcp-cmd.h | 140 --
+ drivers/staging/fsl-mc/bus/dpmcp.c | 504 ------
+ drivers/staging/fsl-mc/bus/dpmcp.h | 159 --
+ drivers/staging/fsl-mc/bus/dpmng-cmd.h | 58 -
+ drivers/staging/fsl-mc/bus/dpmng.c | 107 --
+ drivers/staging/fsl-mc/bus/dprc-cmd.h | 465 ------
+ drivers/staging/fsl-mc/bus/dprc.c | 1388 -----------------
+ drivers/staging/fsl-mc/bus/fsl-mc-private.h | 52 -
+ drivers/staging/fsl-mc/include/dpaa2-fd.h | 681 ++++++++
+ drivers/staging/fsl-mc/include/dpaa2-global.h | 177 +++
+ drivers/staging/fsl-mc/include/dpaa2-io.h | 178 +++
+ drivers/staging/fsl-mc/include/dpbp-cmd.h | 185 ---
+ drivers/staging/fsl-mc/include/dpbp.h | 220 ---
+ drivers/staging/fsl-mc/include/dpcon-cmd.h | 62 -
+ drivers/staging/fsl-mc/include/dpmng.h | 69 -
+ drivers/staging/fsl-mc/include/dpopr.h | 112 ++
+ drivers/staging/fsl-mc/include/dprc.h | 544 -------
+ drivers/staging/fsl-mc/include/mc-bus.h | 111 --
+ drivers/staging/fsl-mc/include/mc-cmd.h | 108 --
+ drivers/staging/fsl-mc/include/mc-sys.h | 98 --
+ drivers/staging/fsl-mc/include/mc.h | 201 ---
+ include/linux/fsl/mc.h | 1025 ++++++++++++
+ include/uapi/linux/fsl_mc.h | 31 +
+ 62 files changed, 8068 insertions(+), 5736 deletions(-)
+ create mode 100644 Documentation/ABI/stable/sysfs-bus-fsl-mc
+ create mode 100644 Documentation/networking/dpaa2/index.rst
+ create mode 100644 Documentation/networking/dpaa2/overview.rst
+ create mode 100644 drivers/bus/fsl-mc/Kconfig
+ create mode 100644 drivers/bus/fsl-mc/Makefile
+ create mode 100644 drivers/bus/fsl-mc/dpbp.c
+ create mode 100644 drivers/bus/fsl-mc/dpcon.c
+ create mode 100644 drivers/bus/fsl-mc/dpmcp.c
+ rename drivers/{staging/fsl-mc/bus => bus/fsl-mc}/dprc-driver.c (84%)
+ create mode 100644 drivers/bus/fsl-mc/dprc.c
+ rename drivers/{staging/fsl-mc/bus => bus/fsl-mc}/fsl-mc-allocator.c (71%)
+ rename drivers/{staging/fsl-mc/bus => bus/fsl-mc}/fsl-mc-bus.c (64%)
+ create mode 100644 drivers/bus/fsl-mc/fsl-mc-iommu.c
+ rename drivers/{staging/fsl-mc/bus => bus/fsl-mc}/fsl-mc-msi.c (89%)
+ create mode 100644 drivers/bus/fsl-mc/fsl-mc-private.h
+ create mode 100644 drivers/bus/fsl-mc/fsl-mc-restool.c
+ rename drivers/{staging/fsl-mc/bus => bus/fsl-mc}/mc-io.c (68%)
+ rename drivers/{staging/fsl-mc/bus => bus/fsl-mc}/mc-sys.c (66%)
+ rename drivers/{staging/fsl-mc/bus => irqchip}/irq-gic-v3-its-fsl-mc-msi.c (60%)
+ delete mode 100644 drivers/staging/fsl-mc/TODO
+ delete mode 100644 drivers/staging/fsl-mc/bus/dpbp.c
create mode 100644 drivers/staging/fsl-mc/bus/dpio/Makefile
- rename drivers/staging/fsl-mc/{include/dpcon-cmd.h => bus/dpio/dpio-cmd.h} (64%)
+ create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio-cmd.h
create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio-driver.c
create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio-service.c
create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio.c
create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio.h
create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman-portal.c
create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman-portal.h
- create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_debug.c
- create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_debug.h
- create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_private.h
- create mode 100644 drivers/staging/fsl-mc/bus/fsl-mc-iommu.c
- create mode 100644 drivers/staging/fsl-mc/bus/mc-ioctl.h
- create mode 100644 drivers/staging/fsl-mc/bus/mc-restool.c
+ delete mode 100644 drivers/staging/fsl-mc/bus/dpmcp-cmd.h
+ delete mode 100644 drivers/staging/fsl-mc/bus/dpmcp.c
+ delete mode 100644 drivers/staging/fsl-mc/bus/dpmcp.h
+ delete mode 100644 drivers/staging/fsl-mc/bus/dpmng-cmd.h
+ delete mode 100644 drivers/staging/fsl-mc/bus/dpmng.c
+ delete mode 100644 drivers/staging/fsl-mc/bus/dprc-cmd.h
+ delete mode 100644 drivers/staging/fsl-mc/bus/dprc.c
+ delete mode 100644 drivers/staging/fsl-mc/bus/fsl-mc-private.h
create mode 100644 drivers/staging/fsl-mc/include/dpaa2-fd.h
create mode 100644 drivers/staging/fsl-mc/include/dpaa2-global.h
create mode 100644 drivers/staging/fsl-mc/include/dpaa2-io.h
delete mode 100644 drivers/staging/fsl-mc/include/dpbp-cmd.h
- create mode 100644 drivers/staging/fsl-mc/include/dpcon.h
+ delete mode 100644 drivers/staging/fsl-mc/include/dpbp.h
+ delete mode 100644 drivers/staging/fsl-mc/include/dpcon-cmd.h
+ delete mode 100644 drivers/staging/fsl-mc/include/dpmng.h
create mode 100644 drivers/staging/fsl-mc/include/dpopr.h
+ delete mode 100644 drivers/staging/fsl-mc/include/dprc.h
+ delete mode 100644 drivers/staging/fsl-mc/include/mc-bus.h
+ delete mode 100644 drivers/staging/fsl-mc/include/mc-cmd.h
+ delete mode 100644 drivers/staging/fsl-mc/include/mc-sys.h
+ delete mode 100644 drivers/staging/fsl-mc/include/mc.h
+ create mode 100644 include/linux/fsl/mc.h
+ create mode 100644 include/uapi/linux/fsl_mc.h
---- a/drivers/staging/fsl-mc/bus/Kconfig
-+++ b/drivers/staging/fsl-mc/bus/Kconfig
-@@ -1,25 +1,40 @@
- #
--# Freescale Management Complex (MC) bus drivers
+--- /dev/null
++++ b/Documentation/ABI/stable/sysfs-bus-fsl-mc
+@@ -0,0 +1,13 @@
++What: /sys/bus/fsl-mc/devices/dprc.*/rescan
++Date: March. 2018
++KernelVersion: 4.16
++Contact: Ioana Ciornei <ioana.ciornei@nxp.com>
++Description: Root dprc rescan attribute
++Users: Userspace drivers and management tools
++
++What: /sys/bus/fsl-mc/rescan
++Date: March. 2018
++KernelVersion: 4.16
++Contact: Ioana Ciornei <ioana.ciornei@nxp.com>
++Description: Bus rescan attribute
++Users: Userspace drivers and management tools
+--- a/Documentation/ioctl/ioctl-number.txt
++++ b/Documentation/ioctl/ioctl-number.txt
+@@ -170,6 +170,7 @@ Code Seq#(hex) Include File Comments
+ 'R' 00-1F linux/random.h conflict!
+ 'R' 01 linux/rfkill.h conflict!
+ 'R' C0-DF net/bluetooth/rfcomm.h
++'R' E0 uapi/linux/fsl_mc.h
+ 'S' all linux/cdrom.h conflict!
+ 'S' 80-81 scsi/scsi_ioctl.h conflict!
+ 'S' 82-FF scsi/scsi.h conflict!
+--- /dev/null
++++ b/Documentation/networking/dpaa2/index.rst
+@@ -0,0 +1,8 @@
++===================
++DPAA2 Documentation
++===================
++
++.. toctree::
++ :maxdepth: 1
++
++ overview
+--- /dev/null
++++ b/Documentation/networking/dpaa2/overview.rst
+@@ -0,0 +1,408 @@
++.. include:: <isonum.txt>
++
++DPAA2 (Data Path Acceleration Architecture Gen2) Overview
++=========================================================
++
++:Copyright: |copy| 2015 Freescale Semiconductor Inc.
++:Copyright: |copy| 2018 NXP
++
++This document provides an overview of the Freescale DPAA2 architecture
++and how it is integrated into the Linux kernel.
++
++Introduction
++============
++
++DPAA2 is a hardware architecture designed for high-speeed network
++packet processing. DPAA2 consists of sophisticated mechanisms for
++processing Ethernet packets, queue management, buffer management,
++autonomous L2 switching, virtual Ethernet bridging, and accelerator
++(e.g. crypto) sharing.
++
++A DPAA2 hardware component called the Management Complex (or MC) manages the
++DPAA2 hardware resources. The MC provides an object-based abstraction for
++software drivers to use the DPAA2 hardware.
++The MC uses DPAA2 hardware resources such as queues, buffer pools, and
++network ports to create functional objects/devices such as network
++interfaces, an L2 switch, or accelerator instances.
++The MC provides memory-mapped I/O command interfaces (MC portals)
++which DPAA2 software drivers use to operate on DPAA2 objects.
++
++The diagram below shows an overview of the DPAA2 resource management
++architecture::
++
++ +--------------------------------------+
++ | OS |
++ | DPAA2 drivers |
++ | | |
++ +-----------------------------|--------+
++ |
++ | (create,discover,connect
++ | config,use,destroy)
++ |
++ DPAA2 |
++ +------------------------| mc portal |-+
++ | | |
++ | +- - - - - - - - - - - - -V- - -+ |
++ | | | |
++ | | Management Complex (MC) | |
++ | | | |
++ | +- - - - - - - - - - - - - - - -+ |
++ | |
++ | Hardware Hardware |
++ | Resources Objects |
++ | --------- ------- |
++ | -queues -DPRC |
++ | -buffer pools -DPMCP |
++ | -Eth MACs/ports -DPIO |
++ | -network interface -DPNI |
++ | profiles -DPMAC |
++ | -queue portals -DPBP |
++ | -MC portals ... |
++ | ... |
++ | |
++ +--------------------------------------+
++
++
++The MC mediates operations such as create, discover,
++connect, configuration, and destroy. Fast-path operations
++on data, such as packet transmit/receive, are not mediated by
++the MC and are done directly using memory mapped regions in
++DPIO objects.
++
++Overview of DPAA2 Objects
++=========================
++
++The section provides a brief overview of some key DPAA2 objects.
++A simple scenario is described illustrating the objects involved
++in creating a network interfaces.
++
++DPRC (Datapath Resource Container)
++----------------------------------
++
++A DPRC is a container object that holds all the other
++types of DPAA2 objects. In the example diagram below there
++are 8 objects of 5 types (DPMCP, DPIO, DPBP, DPNI, and DPMAC)
++in the container.
++
++::
++
++ +---------------------------------------------------------+
++ | DPRC |
++ | |
++ | +-------+ +-------+ +-------+ +-------+ +-------+ |
++ | | DPMCP | | DPIO | | DPBP | | DPNI | | DPMAC | |
++ | +-------+ +-------+ +-------+ +---+---+ +---+---+ |
++ | | DPMCP | | DPIO | |
++ | +-------+ +-------+ |
++ | | DPMCP | |
++ | +-------+ |
++ | |
++ +---------------------------------------------------------+
++
++From the point of view of an OS, a DPRC behaves similar to a plug and
++play bus, like PCI. DPRC commands can be used to enumerate the contents
++of the DPRC, discover the hardware objects present (including mappable
++regions and interrupts).
++
++::
++
++ DPRC.1 (bus)
++ |
++ +--+--------+-------+-------+-------+
++ | | | | |
++ DPMCP.1 DPIO.1 DPBP.1 DPNI.1 DPMAC.1
++ DPMCP.2 DPIO.2
++ DPMCP.3
++
++Hardware objects can be created and destroyed dynamically, providing
++the ability to hot plug/unplug objects in and out of the DPRC.
++
++A DPRC has a mappable MMIO region (an MC portal) that can be used
++to send MC commands. It has an interrupt for status events (like
++hotplug).
++All objects in a container share the same hardware "isolation context".
++This means that with respect to an IOMMU the isolation granularity
++is at the DPRC (container) level, not at the individual object
++level.
++
++DPRCs can be defined statically and populated with objects
++via a config file passed to the MC when firmware starts it.
++There is also a Linux user space tool called "restool" that can be
++used to create/destroy containers and objects dynamically. The latest
++version of restool can be found at:
++ https://github.com/qoriq-open-source/restool
++
++DPAA2 Objects for an Ethernet Network Interface
++-----------------------------------------------
++
++A typical Ethernet NIC is monolithic-- the NIC device contains TX/RX
++queuing mechanisms, configuration mechanisms, buffer management,
++physical ports, and interrupts. DPAA2 uses a more granular approach
++utilizing multiple hardware objects. Each object provides specialized
++functions. Groups of these objects are used by software to provide
++Ethernet network interface functionality. This approach provides
++efficient use of finite hardware resources, flexibility, and
++performance advantages.
++
++The diagram below shows the objects needed for a simple
++network interface configuration on a system with 2 CPUs.
++
++::
++
++ +---+---+ +---+---+
++ CPU0 CPU1
++ +---+---+ +---+---+
++ | |
++ +---+---+ +---+---+
++ DPIO DPIO
++ +---+---+ +---+---+
++ \ /
++ \ /
++ \ /
++ +---+---+
++ DPNI --- DPBP,DPMCP
++ +---+---+
++ |
++ |
++ +---+---+
++ DPMAC
++ +---+---+
++ |
++ port/PHY
++
++Below the objects are described. For each object a brief description
++is provided along with a summary of the kinds of operations the object
++supports and a summary of key resources of the object (MMIO regions
++and IRQs).
++
++DPMAC (Datapath Ethernet MAC)
++~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++Represents an Ethernet MAC, a hardware device that connects to an Ethernet
++PHY and allows physical transmission and reception of Ethernet frames.
++
++- MMIO regions: none
++- IRQs: DPNI link change
++- commands: set link up/down, link config, get stats,
++ IRQ config, enable, reset
++
++DPNI (Datapath Network Interface)
++Contains TX/RX queues, network interface configuration, and RX buffer pool
++configuration mechanisms. The TX/RX queues are in memory and are identified
++by queue number.
++
++- MMIO regions: none
++- IRQs: link state
++- commands: port config, offload config, queue config,
++ parse/classify config, IRQ config, enable, reset
++
++DPIO (Datapath I/O)
++~~~~~~~~~~~~~~~~~~~
++Provides interfaces to enqueue and dequeue
++packets and do hardware buffer pool management operations. The DPAA2
++architecture separates the mechanism to access queues (the DPIO object)
++from the queues themselves. The DPIO provides an MMIO interface to
++enqueue/dequeue packets. To enqueue something a descriptor is written
++to the DPIO MMIO region, which includes the target queue number.
++There will typically be one DPIO assigned to each CPU. This allows all
++CPUs to simultaneously perform enqueue/dequeued operations. DPIOs are
++expected to be shared by different DPAA2 drivers.
++
++- MMIO regions: queue operations, buffer management
++- IRQs: data availability, congestion notification, buffer
++ pool depletion
++- commands: IRQ config, enable, reset
++
++DPBP (Datapath Buffer Pool)
++~~~~~~~~~~~~~~~~~~~~~~~~~~~
++Represents a hardware buffer pool.
++
++- MMIO regions: none
++- IRQs: none
++- commands: enable, reset
++
++DPMCP (Datapath MC Portal)
++~~~~~~~~~~~~~~~~~~~~~~~~~~
++Provides an MC command portal.
++Used by drivers to send commands to the MC to manage
++objects.
++
++- MMIO regions: MC command portal
++- IRQs: command completion
++- commands: IRQ config, enable, reset
++
++Object Connections
++==================
++Some objects have explicit relationships that must
++be configured:
++
++- DPNI <--> DPMAC
++- DPNI <--> DPNI
++- DPNI <--> L2-switch-port
++
++ A DPNI must be connected to something such as a DPMAC,
++ another DPNI, or L2 switch port. The DPNI connection
++ is made via a DPRC command.
++
++::
++
++ +-------+ +-------+
++ | DPNI | | DPMAC |
++ +---+---+ +---+---+
++ | |
++ +==========+
++
++- DPNI <--> DPBP
++
++ A network interface requires a 'buffer pool' (DPBP
++ object) which provides a list of pointers to memory
++ where received Ethernet data is to be copied. The
++ Ethernet driver configures the DPBPs associated with
++ the network interface.
++
++Interrupts
++==========
++All interrupts generated by DPAA2 objects are message
++interrupts. At the hardware level message interrupts
++generated by devices will normally have 3 components--
++1) a non-spoofable 'device-id' expressed on the hardware
++bus, 2) an address, 3) a data value.
++
++In the case of DPAA2 devices/objects, all objects in the
++same container/DPRC share the same 'device-id'.
++For ARM-based SoC this is the same as the stream ID.
++
++
++DPAA2 Linux Drivers Overview
++============================
++
++This section provides an overview of the Linux kernel drivers for
++DPAA2-- 1) the bus driver and associated "DPAA2 infrastructure"
++drivers and 2) functional object drivers (such as Ethernet).
++
++As described previously, a DPRC is a container that holds the other
++types of DPAA2 objects. It is functionally similar to a plug-and-play
++bus controller.
++Each object in the DPRC is a Linux "device" and is bound to a driver.
++The diagram below shows the Linux drivers involved in a networking
++scenario and the objects bound to each driver. A brief description
++of each driver follows.
++
++::
++
++ +------------+
++ | OS Network |
++ | Stack |
++ +------------+ +------------+
++ | Allocator |. . . . . . . | Ethernet |
++ |(DPMCP,DPBP)| | (DPNI) |
++ +-.----------+ +---+---+----+
++ . . ^ |
++ . . <data avail, | | <enqueue,
++ . . tx confirm> | | dequeue>
++ +-------------+ . | |
++ | DPRC driver | . +---+---V----+ +---------+
++ | (DPRC) | . . . . . .| DPIO driver| | MAC |
++ +----------+--+ | (DPIO) | | (DPMAC) |
++ | +------+-----+ +-----+---+
++ |<dev add/remove> | |
++ | | |
++ +--------+----------+ | +--+---+
++ | MC-bus driver | | | PHY |
++ | | | |driver|
++ | /bus/fsl-mc | | +--+---+
++ +-------------------+ | |
++ | |
++ ========================= HARDWARE =========|=================|======
++ DPIO |
++ | |
++ DPNI---DPBP |
++ | |
++ DPMAC |
++ | |
++ PHY ---------------+
++ ============================================|========================
++
++A brief description of each driver is provided below.
++
++MC-bus driver
++-------------
++The MC-bus driver is a platform driver and is probed from a
++node in the device tree (compatible "fsl,qoriq-mc") passed in by boot
++firmware. It is responsible for bootstrapping the DPAA2 kernel
++infrastructure.
++Key functions include:
++
++- registering a new bus type named "fsl-mc" with the kernel,
++ and implementing bus call-backs (e.g. match/uevent/dev_groups)
++- implementing APIs for DPAA2 driver registration and for device
++ add/remove
++- creates an MSI IRQ domain
++- doing a 'device add' to expose the 'root' DPRC, in turn triggering
++ a bind of the root DPRC to the DPRC driver
++
++The binding for the MC-bus device-tree node can be consulted at
++*Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt*.
++The sysfs bind/unbind interfaces for the MC-bus can be consulted at
++*Documentation/ABI/testing/sysfs-bus-fsl-mc*.
++
++DPRC driver
++-----------
++The DPRC driver is bound to DPRC objects and does runtime management
++of a bus instance. It performs the initial bus scan of the DPRC
++and handles interrupts for container events such as hot plug by
++re-scanning the DPRC.
++
++Allocator
++---------
++Certain objects such as DPMCP and DPBP are generic and fungible,
++and are intended to be used by other drivers. For example,
++the DPAA2 Ethernet driver needs:
++
++- DPMCPs to send MC commands, to configure network interfaces
++- DPBPs for network buffer pools
++
++The allocator driver registers for these allocatable object types
++and those objects are bound to the allocator when the bus is probed.
++The allocator maintains a pool of objects that are available for
++allocation by other DPAA2 drivers.
++
++DPIO driver
++-----------
++The DPIO driver is bound to DPIO objects and provides services that allow
++other drivers such as the Ethernet driver to enqueue and dequeue data for
++their respective objects.
++Key services include:
++
++- data availability notifications
++- hardware queuing operations (enqueue and dequeue of data)
++- hardware buffer pool management
++
++To transmit a packet the Ethernet driver puts data on a queue and
++invokes a DPIO API. For receive, the Ethernet driver registers
++a data availability notification callback. To dequeue a packet
++a DPIO API is used.
++There is typically one DPIO object per physical CPU for optimum
++performance, allowing different CPUs to simultaneously enqueue
++and dequeue data.
++
++The DPIO driver operates on behalf of all DPAA2 drivers
++active in the kernel-- Ethernet, crypto, compression,
++etc.
++
++Ethernet driver
++---------------
++The Ethernet driver is bound to a DPNI and implements the kernel
++interfaces needed to connect the DPAA2 network interface to
++the network stack.
++Each DPNI corresponds to a Linux network interface.
++
++MAC driver
++----------
++An Ethernet PHY is an off-chip, board specific component and is managed
++by the appropriate PHY driver via an mdio bus. The MAC driver
++plays a role of being a proxy between the PHY driver and the
++MC. It does this proxy via the MC commands to a DPMAC object.
++If the PHY driver signals a link change, the MAC driver notifies
++the MC via a DPMAC command. If a network interface is brought
++up or down, the MC notifies the DPMAC driver via an interrupt and
++the driver can take appropriate action.
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -3980,6 +3980,12 @@ S: Maintained
+ F: drivers/char/dtlk.c
+ F: include/linux/dtlk.h
+
++DPAA2 DATAPATH I/O (DPIO) DRIVER
++M: Roy Pledge <Roy.Pledge@nxp.com>
++L: linux-kernel@vger.kernel.org
++S: Maintained
++F: drivers/staging/fsl-mc/bus/dpio
++
+ DPT_I2O SCSI RAID DRIVER
+ M: Adaptec OEM Raid Solutions <aacraid@adaptec.com>
+ L: linux-scsi@vger.kernel.org
+@@ -5110,7 +5116,10 @@ M: "J. German Rivera" <German.Rivera@fre
+ M: Stuart Yoder <stuart.yoder@nxp.com>
+ L: linux-kernel@vger.kernel.org
+ S: Maintained
+-F: drivers/staging/fsl-mc/
++F: drivers/bus/fsl-mc/
++F: Documentation/networking/dpaa2/overview.rst
++F: include/uapi/linux/fsl_mc.h
++F: Documentation/ABI/stable/sysfs-bus-fsl-mc
+
+ FREEVXFS FILESYSTEM
+ M: Christoph Hellwig <hch@infradead.org>
+--- a/drivers/bus/Kconfig
++++ b/drivers/bus/Kconfig
+@@ -167,4 +167,7 @@ config VEXPRESS_CONFIG
+ help
+ Platform configuration infrastructure for the ARM Ltd.
+ Versatile Express.
++
++source "drivers/bus/fsl-mc/Kconfig"
++
+ endmenu
+--- a/drivers/bus/Makefile
++++ b/drivers/bus/Makefile
+@@ -7,6 +7,10 @@ obj-$(CONFIG_ARM_CCI) += arm-cci.o
+ obj-$(CONFIG_ARM_CCN) += arm-ccn.o
+
+ obj-$(CONFIG_BRCMSTB_GISB_ARB) += brcmstb_gisb.o
++
+# DPAA2 fsl-mc bus
- #
--# Copyright (C) 2014 Freescale Semiconductor, Inc.
++obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/
++
+ obj-$(CONFIG_IMX_WEIM) += imx-weim.o
+ obj-$(CONFIG_MIPS_CDMM) += mips_cdmm.o
+ obj-$(CONFIG_MVEBU_MBUS) += mvebu-mbus.o
+--- /dev/null
++++ b/drivers/bus/fsl-mc/Kconfig
+@@ -0,0 +1,23 @@
++# SPDX-License-Identifier: GPL-2.0
++#
++# DPAA2 fsl-mc bus
++#
+# Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
- #
- # This file is released under the GPLv2
- #
-
- config FSL_MC_BUS
-- bool "Freescale Management Complex (MC) bus driver"
-- depends on OF && ARM64
++#
++
++config FSL_MC_BUS
+ bool "QorIQ DPAA2 fsl-mc bus driver"
-+ depends on OF && ARCH_LAYERSCAPE
- select GENERIC_MSI_IRQ_DOMAIN
- help
-- Driver to enable the bus infrastructure for the Freescale
-- QorIQ Management Complex (fsl-mc). The fsl-mc is a hardware
-- module of the QorIQ LS2 SoCs, that does resource management
-- for hardware building-blocks in the SoC that can be used
-- to dynamically create networking hardware objects such as
-- network interfaces (NICs), crypto accelerator instances,
-- or L2 switches.
++ depends on OF && (ARCH_LAYERSCAPE || (COMPILE_TEST && (ARM || ARM64 || X86 || PPC)))
++ select GENERIC_MSI_IRQ_DOMAIN
++ help
+ Driver to enable the bus infrastructure for the QorIQ DPAA2
+ architecture. The fsl-mc bus driver handles discovery of
+ DPAA2 objects (which are represented as Linux devices) and
+ binding objects to drivers.
-
-- Only enable this option when building the kernel for
-- Freescale QorQIQ LS2xxxx SoCs.
-+config FSL_MC_DPIO
-+ tristate "QorIQ DPAA2 DPIO driver"
-+ depends on FSL_MC_BUS
-+ help
-+ Driver for the DPAA2 DPIO object. A DPIO provides queue and
-+ buffer management facilities for software to interact with
-+ other DPAA2 objects. This driver does not expose the DPIO
-+ objects individually, but groups them under a service layer
-+ API.
-
-+config FSL_QBMAN_DEBUG
-+ tristate "Freescale QBMAN Debug APIs"
-+ depends on FSL_MC_DPIO
-+ help
-+ QBMan debug assistant APIs.
-
++
+config FSL_MC_RESTOOL
-+ tristate "Freescale Management Complex (MC) restool driver"
-+ depends on FSL_MC_BUS
-+ help
-+ Driver that provides kernel support for the Freescale Management
-+ Complex resource manager user-space tool.
---- a/drivers/staging/fsl-mc/bus/Makefile
-+++ b/drivers/staging/fsl-mc/bus/Makefile
-@@ -17,4 +17,12 @@ mc-bus-driver-objs := fsl-mc-bus.o \
- fsl-mc-msi.o \
- irq-gic-v3-its-fsl-mc-msi.o \
- dpmcp.o \
-- dpbp.o
++ bool "Management Complex (MC) restool support"
++ depends on FSL_MC_BUS
++ help
++ Provides kernel support for the Management Complex resource
++ manager user-space tool - restool.
+--- /dev/null
++++ b/drivers/bus/fsl-mc/Makefile
+@@ -0,0 +1,22 @@
++# SPDX-License-Identifier: GPL-2.0
++#
++# Freescale Management Complex (MC) bus drivers
++#
++# Copyright (C) 2014 Freescale Semiconductor, Inc.
++#
++obj-$(CONFIG_FSL_MC_BUS) += mc-bus-driver.o
++
++mc-bus-driver-objs := fsl-mc-bus.o \
++ mc-sys.o \
++ mc-io.o \
+ dpbp.o \
+ dpcon.o \
++ dprc.o \
++ dprc-driver.o \
++ fsl-mc-allocator.o \
++ fsl-mc-msi.o \
++ dpmcp.o \
+ fsl-mc-iommu.o
+
-+# MC DPIO driver
-+obj-$(CONFIG_FSL_MC_DPIO) += dpio/
-+
+# MC restool kernel support
-+obj-$(CONFIG_FSL_MC_RESTOOL) += mc-restool.o
++obj-$(CONFIG_FSL_MC_RESTOOL) += fsl-mc-restool.o
--- /dev/null
-+++ b/drivers/staging/fsl-mc/bus/dpbp-cmd.h
-@@ -0,0 +1,80 @@
++++ b/drivers/bus/fsl-mc/dpbp.c
+@@ -0,0 +1,186 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
++ */
++#include <linux/kernel.h>
++#include <linux/fsl/mc.h>
++#include <linux/fsl/mc.h>
++
++#include "fsl-mc-private.h"
++
++/**
++ * dpbp_open() - Open a control session for the specified object.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dpbp_id: DPBP unique ID
++ * @token: Returned token; use in subsequent API calls
+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpbp_create function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object
+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
++ * Return: '0' on Success; Error code otherwise.
+ */
-+#ifndef _FSL_DPBP_CMD_H
-+#define _FSL_DPBP_CMD_H
++int dpbp_open(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int dpbp_id,
++ u16 *token)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpbp_cmd_open *cmd_params;
++ int err;
+
-+/* DPBP Version */
-+#define DPBP_VER_MAJOR 3
-+#define DPBP_VER_MINOR 2
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_OPEN,
++ cmd_flags, 0);
++ cmd_params = (struct dpbp_cmd_open *)cmd.params;
++ cmd_params->dpbp_id = cpu_to_le32(dpbp_id);
+
-+/* Command versioning */
-+#define DPBP_CMD_BASE_VERSION 1
-+#define DPBP_CMD_ID_OFFSET 4
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = mc_cmd_hdr_read_token(&cmd);
+
-+#define DPBP_CMD(id) ((id << DPBP_CMD_ID_OFFSET) | DPBP_CMD_BASE_VERSION)
++ return err;
++}
++EXPORT_SYMBOL_GPL(dpbp_open);
+
-+/* Command IDs */
-+#define DPBP_CMDID_CLOSE DPBP_CMD(0x800)
-+#define DPBP_CMDID_OPEN DPBP_CMD(0x804)
-+#define DPBP_CMDID_GET_API_VERSION DPBP_CMD(0xa04)
++/**
++ * dpbp_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPBP object
++ *
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpbp_close(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct fsl_mc_command cmd = { 0 };
+
-+#define DPBP_CMDID_ENABLE DPBP_CMD(0x002)
-+#define DPBP_CMDID_DISABLE DPBP_CMD(0x003)
-+#define DPBP_CMDID_GET_ATTR DPBP_CMD(0x004)
-+#define DPBP_CMDID_RESET DPBP_CMD(0x005)
-+#define DPBP_CMDID_IS_ENABLED DPBP_CMD(0x006)
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLOSE, cmd_flags,
++ token);
+
-+struct dpbp_cmd_open {
-+ __le32 dpbp_id;
-+};
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++EXPORT_SYMBOL_GPL(dpbp_close);
+
-+struct dpbp_cmd_destroy {
-+ __le32 object_id;
-+};
++/**
++ * dpbp_enable() - Enable the DPBP.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPBP object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpbp_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct fsl_mc_command cmd = { 0 };
+
-+#define DPBP_ENABLE 0x1
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_ENABLE, cmd_flags,
++ token);
+
-+struct dpbp_rsp_is_enabled {
-+ u8 enabled;
-+};
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++EXPORT_SYMBOL_GPL(dpbp_enable);
+
-+struct dpbp_rsp_get_attributes {
-+ /* response word 0 */
-+ __le16 pad;
-+ __le16 bpid;
-+ __le32 id;
-+ /* response word 1 */
-+ __le16 version_major;
-+ __le16 version_minor;
-+};
++/**
++ * dpbp_disable() - Disable the DPBP.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPBP object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpbp_disable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct fsl_mc_command cmd = { 0 };
+
-+#endif /* _FSL_DPBP_CMD_H */
---- a/drivers/staging/fsl-mc/bus/dpbp.c
-+++ b/drivers/staging/fsl-mc/bus/dpbp.c
-@@ -1,4 +1,5 @@
--/* Copyright 2013-2016 Freescale Semiconductor Inc.
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_DISABLE,
++ cmd_flags, token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++EXPORT_SYMBOL_GPL(dpbp_disable);
++
++/**
++ * dpbp_reset() - Reset the DPBP, returns the object to initial state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPBP object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpbp_reset(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct fsl_mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_RESET,
++ cmd_flags, token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++EXPORT_SYMBOL_GPL(dpbp_reset);
++
++/**
++ * dpbp_get_attributes - Retrieve DPBP attributes.
++ *
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPBP object
++ * @attr: Returned object's attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpbp_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpbp_attr *attr)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpbp_rsp_get_attributes *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_ATTR,
++ cmd_flags, token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpbp_rsp_get_attributes *)cmd.params;
++ attr->bpid = le16_to_cpu(rsp_params->bpid);
++ attr->id = le32_to_cpu(rsp_params->id);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(dpbp_get_attributes);
+--- /dev/null
++++ b/drivers/bus/fsl-mc/dpcon.c
+@@ -0,0 +1,222 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
-@@ -11,7 +12,6 @@
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
-- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
-@@ -32,7 +32,8 @@
- #include "../include/mc-sys.h"
- #include "../include/mc-cmd.h"
- #include "../include/dpbp.h"
--#include "../include/dpbp-cmd.h"
++ *
++ */
++#include <linux/kernel.h>
++#include <linux/fsl/mc.h>
++#include <linux/fsl/mc.h>
+
-+#include "dpbp-cmd.h"
-
- /**
- * dpbp_open() - Open a control session for the specified object.
-@@ -105,74 +106,6 @@ int dpbp_close(struct fsl_mc_io *mc_io,
- EXPORT_SYMBOL(dpbp_close);
-
- /**
-- * dpbp_create() - Create the DPBP object.
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @cfg: Configuration structure
-- * @token: Returned token; use in subsequent API calls
-- *
-- * Create the DPBP object, allocate required resources and
-- * perform required initialization.
-- *
-- * The object can be created either by declaring it in the
-- * DPL file, or by calling this function.
-- * This function returns a unique authentication token,
-- * associated with the specific object ID and the specific MC
-- * portal; this token must be used in all subsequent calls to
-- * this specific object. For objects that are created using the
-- * DPL file, call dpbp_open function to get an authentication
-- * token first.
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dpbp_create(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- const struct dpbp_cfg *cfg,
-- u16 *token)
--{
-- struct mc_command cmd = { 0 };
-- int err;
--
-- (void)(cfg); /* unused */
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPBP_CMDID_CREATE,
-- cmd_flags, 0);
--
-- /* send command to mc*/
-- err = mc_send_command(mc_io, &cmd);
-- if (err)
-- return err;
--
-- /* retrieve response parameters */
-- *token = mc_cmd_hdr_read_token(&cmd);
--
-- return 0;
--}
--
--/**
-- * dpbp_destroy() - Destroy the DPBP object and release all its resources.
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPBP object
-- *
-- * Return: '0' on Success; error code otherwise.
-- */
--int dpbp_destroy(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token)
--{
-- struct mc_command cmd = { 0 };
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPBP_CMDID_DESTROY,
-- cmd_flags, token);
--
-- /* send command to mc*/
-- return mc_send_command(mc_io, &cmd);
--}
--
--/**
- * dpbp_enable() - Enable the DPBP.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-@@ -250,6 +183,7 @@ int dpbp_is_enabled(struct fsl_mc_io *mc
-
- return 0;
- }
-+EXPORT_SYMBOL(dpbp_is_enabled);
-
- /**
- * dpbp_reset() - Reset the DPBP, returns the object to initial state.
-@@ -272,310 +206,7 @@ int dpbp_reset(struct fsl_mc_io *mc_io,
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
- }
--
--/**
-- * dpbp_set_irq() - Set IRQ information for the DPBP to trigger an interrupt.
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPBP object
-- * @irq_index: Identifies the interrupt index to configure
-- * @irq_cfg: IRQ configuration
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dpbp_set_irq(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- struct dpbp_irq_cfg *irq_cfg)
--{
-- struct mc_command cmd = { 0 };
-- struct dpbp_cmd_set_irq *cmd_params;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ,
-- cmd_flags, token);
-- cmd_params = (struct dpbp_cmd_set_irq *)cmd.params;
-- cmd_params->irq_index = irq_index;
-- cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
-- cmd_params->irq_addr = cpu_to_le64(irq_cfg->addr);
-- cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
--
-- /* send command to mc*/
-- return mc_send_command(mc_io, &cmd);
--}
--
--/**
-- * dpbp_get_irq() - Get IRQ information from the DPBP.
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPBP object
-- * @irq_index: The interrupt index to configure
-- * @type: Interrupt type: 0 represents message interrupt
-- * type (both irq_addr and irq_val are valid)
-- * @irq_cfg: IRQ attributes
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dpbp_get_irq(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- int *type,
-- struct dpbp_irq_cfg *irq_cfg)
--{
-- struct mc_command cmd = { 0 };
-- struct dpbp_cmd_get_irq *cmd_params;
-- struct dpbp_rsp_get_irq *rsp_params;
-- int err;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ,
-- cmd_flags, token);
-- cmd_params = (struct dpbp_cmd_get_irq *)cmd.params;
-- cmd_params->irq_index = irq_index;
--
-- /* send command to mc*/
-- err = mc_send_command(mc_io, &cmd);
-- if (err)
-- return err;
--
-- /* retrieve response parameters */
-- rsp_params = (struct dpbp_rsp_get_irq *)cmd.params;
-- irq_cfg->val = le32_to_cpu(rsp_params->irq_val);
-- irq_cfg->addr = le64_to_cpu(rsp_params->irq_addr);
-- irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num);
-- *type = le32_to_cpu(rsp_params->type);
--
-- return 0;
--}
--
--/**
-- * dpbp_set_irq_enable() - Set overall interrupt state.
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPBP object
-- * @irq_index: The interrupt index to configure
-- * @en: Interrupt state - enable = 1, disable = 0
-- *
-- * Allows GPP software to control when interrupts are generated.
-- * Each interrupt can have up to 32 causes. The enable/disable control's the
-- * overall interrupt state. if the interrupt is disabled no causes will cause
-- * an interrupt.
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dpbp_set_irq_enable(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u8 en)
--{
-- struct mc_command cmd = { 0 };
-- struct dpbp_cmd_set_irq_enable *cmd_params;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ_ENABLE,
-- cmd_flags, token);
-- cmd_params = (struct dpbp_cmd_set_irq_enable *)cmd.params;
-- cmd_params->enable = en & DPBP_ENABLE;
-- cmd_params->irq_index = irq_index;
--
-- /* send command to mc*/
-- return mc_send_command(mc_io, &cmd);
--}
--
--/**
-- * dpbp_get_irq_enable() - Get overall interrupt state
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPBP object
-- * @irq_index: The interrupt index to configure
-- * @en: Returned interrupt state - enable = 1, disable = 0
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dpbp_get_irq_enable(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u8 *en)
--{
-- struct mc_command cmd = { 0 };
-- struct dpbp_cmd_get_irq_enable *cmd_params;
-- struct dpbp_rsp_get_irq_enable *rsp_params;
-- int err;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_ENABLE,
-- cmd_flags, token);
-- cmd_params = (struct dpbp_cmd_get_irq_enable *)cmd.params;
-- cmd_params->irq_index = irq_index;
--
-- /* send command to mc*/
-- err = mc_send_command(mc_io, &cmd);
-- if (err)
-- return err;
--
-- /* retrieve response parameters */
-- rsp_params = (struct dpbp_rsp_get_irq_enable *)cmd.params;
-- *en = rsp_params->enabled & DPBP_ENABLE;
-- return 0;
--}
--
--/**
-- * dpbp_set_irq_mask() - Set interrupt mask.
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPBP object
-- * @irq_index: The interrupt index to configure
-- * @mask: Event mask to trigger interrupt;
-- * each bit:
-- * 0 = ignore event
-- * 1 = consider event for asserting IRQ
-- *
-- * Every interrupt can have up to 32 causes and the interrupt model supports
-- * masking/unmasking each cause independently
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dpbp_set_irq_mask(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u32 mask)
--{
-- struct mc_command cmd = { 0 };
-- struct dpbp_cmd_set_irq_mask *cmd_params;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ_MASK,
-- cmd_flags, token);
-- cmd_params = (struct dpbp_cmd_set_irq_mask *)cmd.params;
-- cmd_params->mask = cpu_to_le32(mask);
-- cmd_params->irq_index = irq_index;
--
-- /* send command to mc*/
-- return mc_send_command(mc_io, &cmd);
--}
--
--/**
-- * dpbp_get_irq_mask() - Get interrupt mask.
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPBP object
-- * @irq_index: The interrupt index to configure
-- * @mask: Returned event mask to trigger interrupt
-- *
-- * Every interrupt can have up to 32 causes and the interrupt model supports
-- * masking/unmasking each cause independently
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dpbp_get_irq_mask(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u32 *mask)
--{
-- struct mc_command cmd = { 0 };
-- struct dpbp_cmd_get_irq_mask *cmd_params;
-- struct dpbp_rsp_get_irq_mask *rsp_params;
-- int err;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_MASK,
-- cmd_flags, token);
-- cmd_params = (struct dpbp_cmd_get_irq_mask *)cmd.params;
-- cmd_params->irq_index = irq_index;
--
-- /* send command to mc*/
-- err = mc_send_command(mc_io, &cmd);
-- if (err)
-- return err;
--
-- /* retrieve response parameters */
-- rsp_params = (struct dpbp_rsp_get_irq_mask *)cmd.params;
-- *mask = le32_to_cpu(rsp_params->mask);
--
-- return 0;
--}
--
--/**
-- * dpbp_get_irq_status() - Get the current status of any pending interrupts.
-- *
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPBP object
-- * @irq_index: The interrupt index to configure
-- * @status: Returned interrupts status - one bit per cause:
-- * 0 = no interrupt pending
-- * 1 = interrupt pending
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dpbp_get_irq_status(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u32 *status)
--{
-- struct mc_command cmd = { 0 };
-- struct dpbp_cmd_get_irq_status *cmd_params;
-- struct dpbp_rsp_get_irq_status *rsp_params;
-- int err;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_STATUS,
-- cmd_flags, token);
-- cmd_params = (struct dpbp_cmd_get_irq_status *)cmd.params;
-- cmd_params->status = cpu_to_le32(*status);
-- cmd_params->irq_index = irq_index;
--
-- /* send command to mc*/
-- err = mc_send_command(mc_io, &cmd);
-- if (err)
-- return err;
--
-- /* retrieve response parameters */
-- rsp_params = (struct dpbp_rsp_get_irq_status *)cmd.params;
-- *status = le32_to_cpu(rsp_params->status);
--
-- return 0;
--}
--
--/**
-- * dpbp_clear_irq_status() - Clear a pending interrupt's status
-- *
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPBP object
-- * @irq_index: The interrupt index to configure
-- * @status: Bits to clear (W1C) - one bit per cause:
-- * 0 = don't change
-- * 1 = clear status bit
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dpbp_clear_irq_status(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u32 status)
--{
-- struct mc_command cmd = { 0 };
-- struct dpbp_cmd_clear_irq_status *cmd_params;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLEAR_IRQ_STATUS,
-- cmd_flags, token);
-- cmd_params = (struct dpbp_cmd_clear_irq_status *)cmd.params;
-- cmd_params->status = cpu_to_le32(status);
-- cmd_params->irq_index = irq_index;
--
-- /* send command to mc*/
-- return mc_send_command(mc_io, &cmd);
--}
-+EXPORT_SYMBOL(dpbp_reset);
-
- /**
- * dpbp_get_attributes - Retrieve DPBP attributes.
-@@ -609,83 +240,40 @@ int dpbp_get_attributes(struct fsl_mc_io
- rsp_params = (struct dpbp_rsp_get_attributes *)cmd.params;
- attr->bpid = le16_to_cpu(rsp_params->bpid);
- attr->id = le32_to_cpu(rsp_params->id);
-- attr->version.major = le16_to_cpu(rsp_params->version_major);
-- attr->version.minor = le16_to_cpu(rsp_params->version_minor);
-
- return 0;
- }
- EXPORT_SYMBOL(dpbp_get_attributes);
-
- /**
-- * dpbp_set_notifications() - Set notifications towards software
-- * @mc_io: Pointer to MC portal's I/O object
-+ * dpbp_get_api_version - Get Data Path Buffer Pool API version
-+ * @mc_io: Pointer to Mc portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPBP object
-- * @cfg: notifications configuration
-+ * @major_ver: Major version of Buffer Pool API
-+ * @minor_ver: Minor version of Buffer Pool API
- *
- * Return: '0' on Success; Error code otherwise.
- */
--int dpbp_set_notifications(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- struct dpbp_notification_cfg *cfg)
-+int dpbp_get_api_version(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 *major_ver,
-+ u16 *minor_ver)
- {
- struct mc_command cmd = { 0 };
-- struct dpbp_cmd_set_notifications *cmd_params;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_NOTIFICATIONS,
-- cmd_flags, token);
-- cmd_params = (struct dpbp_cmd_set_notifications *)cmd.params;
-- cmd_params->depletion_entry = cpu_to_le32(cfg->depletion_entry);
-- cmd_params->depletion_exit = cpu_to_le32(cfg->depletion_exit);
-- cmd_params->surplus_entry = cpu_to_le32(cfg->surplus_entry);
-- cmd_params->surplus_exit = cpu_to_le32(cfg->surplus_exit);
-- cmd_params->options = cpu_to_le16(cfg->options);
-- cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
-- cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
--
-- /* send command to mc*/
-- return mc_send_command(mc_io, &cmd);
--}
--
--/**
-- * dpbp_get_notifications() - Get the notifications configuration
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPBP object
-- * @cfg: notifications configuration
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dpbp_get_notifications(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- struct dpbp_notification_cfg *cfg)
--{
-- struct mc_command cmd = { 0 };
-- struct dpbp_rsp_get_notifications *rsp_params;
- int err;
-
- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_NOTIFICATIONS,
-- cmd_flags,
-- token);
-+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_API_VERSION,
-+ cmd_flags, 0);
-
-- /* send command to mc*/
-+ /* send command to mc */
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
-- rsp_params = (struct dpbp_rsp_get_notifications *)cmd.params;
-- cfg->depletion_entry = le32_to_cpu(rsp_params->depletion_entry);
-- cfg->depletion_exit = le32_to_cpu(rsp_params->depletion_exit);
-- cfg->surplus_entry = le32_to_cpu(rsp_params->surplus_entry);
-- cfg->surplus_exit = le32_to_cpu(rsp_params->surplus_exit);
-- cfg->options = le16_to_cpu(rsp_params->options);
-- cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
-- cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
-+ mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
-
- return 0;
- }
-+EXPORT_SYMBOL(dpbp_get_api_version);
---- /dev/null
-+++ b/drivers/staging/fsl-mc/bus/dpcon-cmd.h
-@@ -0,0 +1,85 @@
-+/*
-+ * Copyright 2013-2016 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef _FSL_DPCON_CMD_H
-+#define _FSL_DPCON_CMD_H
-+
-+/* DPCON Version */
-+#define DPCON_VER_MAJOR 3
-+#define DPCON_VER_MINOR 2
-+
-+/* Command versioning */
-+#define DPCON_CMD_BASE_VERSION 1
-+#define DPCON_CMD_ID_OFFSET 4
-+
-+#define DPCON_CMD(id) (((id) << DPCON_CMD_ID_OFFSET) | DPCON_CMD_BASE_VERSION)
-+
-+/* Command IDs */
-+#define DPCON_CMDID_CLOSE DPCON_CMD(0x800)
-+#define DPCON_CMDID_OPEN DPCON_CMD(0x808)
-+#define DPCON_CMDID_GET_API_VERSION DPCON_CMD(0xa08)
-+
-+#define DPCON_CMDID_ENABLE DPCON_CMD(0x002)
-+#define DPCON_CMDID_DISABLE DPCON_CMD(0x003)
-+#define DPCON_CMDID_GET_ATTR DPCON_CMD(0x004)
-+#define DPCON_CMDID_RESET DPCON_CMD(0x005)
-+#define DPCON_CMDID_IS_ENABLED DPCON_CMD(0x006)
-+
-+#define DPCON_CMDID_SET_NOTIFICATION DPCON_CMD(0x100)
-+
-+struct dpcon_cmd_open {
-+ __le32 dpcon_id;
-+};
-+
-+#define DPCON_ENABLE 1
-+
-+struct dpcon_rsp_is_enabled {
-+ u8 enabled;
-+};
-+
-+struct dpcon_rsp_get_attr {
-+ /* response word 0 */
-+ __le32 id;
-+ __le16 qbman_ch_id;
-+ u8 num_priorities;
-+ u8 pad;
-+};
-+
-+struct dpcon_cmd_set_notification {
-+ /* cmd word 0 */
-+ __le32 dpio_id;
-+ u8 priority;
-+ u8 pad[3];
-+ /* cmd word 1 */
-+ __le64 user_ctx;
-+};
-+
-+#endif /* _FSL_DPCON_CMD_H */
---- /dev/null
-+++ b/drivers/staging/fsl-mc/bus/dpcon.c
-@@ -0,0 +1,317 @@
-+/* Copyright 2013-2016 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#include "../include/mc-sys.h"
-+#include "../include/mc-cmd.h"
-+#include "../include/dpcon.h"
-+
-+#include "dpcon-cmd.h"
++#include "fsl-mc-private.h"
+
+/**
+ * dpcon_open() - Open a control session for the specified object
+ int dpcon_id,
+ u16 *token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpcon_cmd_open *dpcon_cmd;
+ int err;
+
+
+ return 0;
+}
-+EXPORT_SYMBOL(dpcon_open);
++EXPORT_SYMBOL_GPL(dpcon_open);
+
+/**
+ * dpcon_close() - Close the control session of the object
+ u32 cmd_flags,
+ u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLOSE,
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
-+EXPORT_SYMBOL(dpcon_close);
++EXPORT_SYMBOL_GPL(dpcon_close);
+
+/**
+ * dpcon_enable() - Enable the DPCON
+ u32 cmd_flags,
+ u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_ENABLE,
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
-+EXPORT_SYMBOL(dpcon_enable);
++EXPORT_SYMBOL_GPL(dpcon_enable);
+
+/**
+ * dpcon_disable() - Disable the DPCON
+ u32 cmd_flags,
+ u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DISABLE,
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
-+EXPORT_SYMBOL(dpcon_disable);
-+
-+/**
-+ * dpcon_is_enabled() - Check if the DPCON is enabled.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCON object
-+ * @en: Returns '1' if object is enabled; '0' otherwise
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpcon_is_enabled(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpcon_rsp_is_enabled *dpcon_rsp;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_IS_ENABLED,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ dpcon_rsp = (struct dpcon_rsp_is_enabled *)cmd.params;
-+ *en = dpcon_rsp->enabled & DPCON_ENABLE;
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(dpcon_is_enabled);
++EXPORT_SYMBOL_GPL(dpcon_disable);
+
+/**
+ * dpcon_reset() - Reset the DPCON, returns the object to initial state.
+ u32 cmd_flags,
+ u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_RESET,
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
-+EXPORT_SYMBOL(dpcon_reset);
++EXPORT_SYMBOL_GPL(dpcon_reset);
+
+/**
+ * dpcon_get_attributes() - Retrieve DPCON attributes.
+ u16 token,
+ struct dpcon_attr *attr)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpcon_rsp_get_attr *dpcon_rsp;
+ int err;
+
+
+ return 0;
+}
-+EXPORT_SYMBOL(dpcon_get_attributes);
++EXPORT_SYMBOL_GPL(dpcon_get_attributes);
+
+/**
+ * dpcon_set_notification() - Set DPCON notification destination
+ u16 token,
+ struct dpcon_notification_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpcon_cmd_set_notification *dpcon_cmd;
+
+ /* prepare command */
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
-+EXPORT_SYMBOL(dpcon_set_notification);
++EXPORT_SYMBOL_GPL(dpcon_set_notification);
+--- /dev/null
++++ b/drivers/bus/fsl-mc/dpmcp.c
+@@ -0,0 +1,99 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
++ *
++ */
++#include <linux/kernel.h>
++#include <linux/fsl/mc.h>
++
++#include "fsl-mc-private.h"
+
+/**
-+ * dpcon_get_api_version - Get Data Path Concentrator API version
-+ * @mc_io: Pointer to MC portal's DPCON object
++ * dpmcp_open() - Open a control session for the specified object.
++ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @major_ver: Major version of DPCON API
-+ * @minor_ver: Minor version of DPCON API
++ * @dpmcp_id: DPMCP unique ID
++ * @token: Returned token; use in subsequent API calls
+ *
-+ * Return: '0' on Success; Error code otherwise
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpmcp_create function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object
++ *
++ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpcon_get_api_version(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 *major_ver,
-+ u16 *minor_ver)
++int dpmcp_open(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int dpmcp_id,
++ u16 *token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
++ struct dpmcp_cmd_open *cmd_params;
+ int err;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_API_VERSION,
++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_OPEN,
+ cmd_flags, 0);
++ cmd_params = (struct dpmcp_cmd_open *)cmd.params;
++ cmd_params->dpmcp_id = cpu_to_le32(dpmcp_id);
+
-+ /* send command to mc */
++ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
-+ mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
++ *token = mc_cmd_hdr_read_token(&cmd);
+
-+ return 0;
++ return err;
+}
-+EXPORT_SYMBOL(dpcon_get_api_version);
---- /dev/null
-+++ b/drivers/staging/fsl-mc/bus/dpio/Makefile
-@@ -0,0 +1,11 @@
-+#
-+# QorIQ DPAA2 DPIO driver
-+#
+
-+subdir-ccflags-y := -Werror
++/**
++ * dpmcp_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMCP object
++ *
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmcp_close(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct fsl_mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CLOSE,
++ cmd_flags, token);
+
-+obj-$(CONFIG_FSL_MC_DPIO) += fsl-mc-dpio.o
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+
-+fsl-mc-dpio-objs := dpio.o qbman-portal.o dpio-service.o dpio-driver.o
++/**
++ * dpmcp_reset() - Reset the DPMCP, returns the object to initial state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPMCP object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpmcp_reset(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct fsl_mc_command cmd = { 0 };
+
-+obj-$(CONFIG_FSL_QBMAN_DEBUG) += qbman_debug.o
---- a/drivers/staging/fsl-mc/include/dpcon-cmd.h
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_RESET,
++ cmd_flags, token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
+++ /dev/null
-@@ -1,62 +0,0 @@
--/* Copyright 2013-2015 Freescale Semiconductor Inc.
+@@ -1,805 +0,0 @@
+-/*
+- * Freescale data path resource container (DPRC) driver
+- *
+- * Copyright (C) 2014 Freescale Semiconductor, Inc.
+- * Author: German Rivera <German.Rivera@freescale.com>
+- *
+- * This file is licensed under the terms of the GNU General Public
+- * License version 2. This program is licensed "as is" without any
+- * warranty of any kind, whether express or implied.
+- */
+-
+-#include <linux/module.h>
+-#include <linux/slab.h>
+-#include <linux/interrupt.h>
+-#include <linux/msi.h>
+-#include "../include/mc-bus.h"
+-#include "../include/mc-sys.h"
+-
+-#include "dprc-cmd.h"
+-#include "fsl-mc-private.h"
+-
+-#define FSL_MC_DPRC_DRIVER_NAME "fsl_mc_dprc"
+-
+-#define FSL_MC_DEVICE_MATCH(_mc_dev, _obj_desc) \
+- (strcmp((_mc_dev)->obj_desc.type, (_obj_desc)->type) == 0 && \
+- (_mc_dev)->obj_desc.id == (_obj_desc)->id)
+-
+-struct dprc_child_objs {
+- int child_count;
+- struct dprc_obj_desc *child_array;
+-};
+-
+-static int __fsl_mc_device_remove_if_not_in_mc(struct device *dev, void *data)
+-{
+- int i;
+- struct dprc_child_objs *objs;
+- struct fsl_mc_device *mc_dev;
+-
+- WARN_ON(!dev);
+- WARN_ON(!data);
+- mc_dev = to_fsl_mc_device(dev);
+- objs = data;
+-
+- for (i = 0; i < objs->child_count; i++) {
+- struct dprc_obj_desc *obj_desc = &objs->child_array[i];
+-
+- if (strlen(obj_desc->type) != 0 &&
+- FSL_MC_DEVICE_MATCH(mc_dev, obj_desc))
+- break;
+- }
+-
+- if (i == objs->child_count)
+- fsl_mc_device_remove(mc_dev);
+-
+- return 0;
+-}
+-
+-static int __fsl_mc_device_remove(struct device *dev, void *data)
+-{
+- WARN_ON(!dev);
+- WARN_ON(data);
+- fsl_mc_device_remove(to_fsl_mc_device(dev));
+- return 0;
+-}
+-
+-/**
+- * dprc_remove_devices - Removes devices for objects removed from a DPRC
+- *
+- * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
+- * @obj_desc_array: array of object descriptors for child objects currently
+- * present in the DPRC in the MC.
+- * @num_child_objects_in_mc: number of entries in obj_desc_array
+- *
+- * Synchronizes the state of the Linux bus driver with the actual state of
+- * the MC by removing devices that represent MC objects that have
+- * been dynamically removed in the physical DPRC.
+- */
+-static void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev,
+- struct dprc_obj_desc *obj_desc_array,
+- int num_child_objects_in_mc)
+-{
+- if (num_child_objects_in_mc != 0) {
+- /*
+- * Remove child objects that are in the DPRC in Linux,
+- * but not in the MC:
+- */
+- struct dprc_child_objs objs;
+-
+- objs.child_count = num_child_objects_in_mc;
+- objs.child_array = obj_desc_array;
+- device_for_each_child(&mc_bus_dev->dev, &objs,
+- __fsl_mc_device_remove_if_not_in_mc);
+- } else {
+- /*
+- * There are no child objects for this DPRC in the MC.
+- * So, remove all the child devices from Linux:
+- */
+- device_for_each_child(&mc_bus_dev->dev, NULL,
+- __fsl_mc_device_remove);
+- }
+-}
+-
+-static int __fsl_mc_device_match(struct device *dev, void *data)
+-{
+- struct dprc_obj_desc *obj_desc = data;
+- struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+-
+- return FSL_MC_DEVICE_MATCH(mc_dev, obj_desc);
+-}
+-
+-static struct fsl_mc_device *fsl_mc_device_lookup(struct dprc_obj_desc
+- *obj_desc,
+- struct fsl_mc_device
+- *mc_bus_dev)
+-{
+- struct device *dev;
+-
+- dev = device_find_child(&mc_bus_dev->dev, obj_desc,
+- __fsl_mc_device_match);
+-
+- return dev ? to_fsl_mc_device(dev) : NULL;
+-}
+-
+-/**
+- * check_plugged_state_change - Check change in an MC object's plugged state
+- *
+- * @mc_dev: pointer to the fsl-mc device for a given MC object
+- * @obj_desc: pointer to the MC object's descriptor in the MC
+- *
+- * If the plugged state has changed from unplugged to plugged, the fsl-mc
+- * device is bound to the corresponding device driver.
+- * If the plugged state has changed from plugged to unplugged, the fsl-mc
+- * device is unbound from the corresponding device driver.
+- */
+-static void check_plugged_state_change(struct fsl_mc_device *mc_dev,
+- struct dprc_obj_desc *obj_desc)
+-{
+- int error;
+- u32 plugged_flag_at_mc =
+- obj_desc->state & DPRC_OBJ_STATE_PLUGGED;
+-
+- if (plugged_flag_at_mc !=
+- (mc_dev->obj_desc.state & DPRC_OBJ_STATE_PLUGGED)) {
+- if (plugged_flag_at_mc) {
+- mc_dev->obj_desc.state |= DPRC_OBJ_STATE_PLUGGED;
+- error = device_attach(&mc_dev->dev);
+- if (error < 0) {
+- dev_err(&mc_dev->dev,
+- "device_attach() failed: %d\n",
+- error);
+- }
+- } else {
+- mc_dev->obj_desc.state &= ~DPRC_OBJ_STATE_PLUGGED;
+- device_release_driver(&mc_dev->dev);
+- }
+- }
+-}
+-
+-/**
+- * dprc_add_new_devices - Adds devices to the logical bus for a DPRC
+- *
+- * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
+- * @obj_desc_array: array of device descriptors for child devices currently
+- * present in the physical DPRC.
+- * @num_child_objects_in_mc: number of entries in obj_desc_array
+- *
+- * Synchronizes the state of the Linux bus driver with the actual
+- * state of the MC by adding objects that have been newly discovered
+- * in the physical DPRC.
+- */
+-static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
+- struct dprc_obj_desc *obj_desc_array,
+- int num_child_objects_in_mc)
+-{
+- int error;
+- int i;
+-
+- for (i = 0; i < num_child_objects_in_mc; i++) {
+- struct fsl_mc_device *child_dev;
+- struct dprc_obj_desc *obj_desc = &obj_desc_array[i];
+-
+- if (strlen(obj_desc->type) == 0)
+- continue;
+-
+- /*
+- * Check if device is already known to Linux:
+- */
+- child_dev = fsl_mc_device_lookup(obj_desc, mc_bus_dev);
+- if (child_dev) {
+- check_plugged_state_change(child_dev, obj_desc);
+- continue;
+- }
+-
+- error = fsl_mc_device_add(obj_desc, NULL, &mc_bus_dev->dev,
+- &child_dev);
+- if (error < 0)
+- continue;
+- }
+-}
+-
+-/**
+- * dprc_scan_objects - Discover objects in a DPRC
+- *
+- * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
+- * @total_irq_count: total number of IRQs needed by objects in the DPRC.
+- *
+- * Detects objects added and removed from a DPRC and synchronizes the
+- * state of the Linux bus driver, MC by adding and removing
+- * devices accordingly.
+- * Two types of devices can be found in a DPRC: allocatable objects (e.g.,
+- * dpbp, dpmcp) and non-allocatable devices (e.g., dprc, dpni).
+- * All allocatable devices needed to be probed before all non-allocatable
+- * devices, to ensure that device drivers for non-allocatable
+- * devices can allocate any type of allocatable devices.
+- * That is, we need to ensure that the corresponding resource pools are
+- * populated before they can get allocation requests from probe callbacks
+- * of the device drivers for the non-allocatable devices.
+- */
+-int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
+- unsigned int *total_irq_count)
+-{
+- int num_child_objects;
+- int dprc_get_obj_failures;
+- int error;
+- unsigned int irq_count = mc_bus_dev->obj_desc.irq_count;
+- struct dprc_obj_desc *child_obj_desc_array = NULL;
+-
+- error = dprc_get_obj_count(mc_bus_dev->mc_io,
+- 0,
+- mc_bus_dev->mc_handle,
+- &num_child_objects);
+- if (error < 0) {
+- dev_err(&mc_bus_dev->dev, "dprc_get_obj_count() failed: %d\n",
+- error);
+- return error;
+- }
+-
+- if (num_child_objects != 0) {
+- int i;
+-
+- child_obj_desc_array =
+- devm_kmalloc_array(&mc_bus_dev->dev, num_child_objects,
+- sizeof(*child_obj_desc_array),
+- GFP_KERNEL);
+- if (!child_obj_desc_array)
+- return -ENOMEM;
+-
+- /*
+- * Discover objects currently present in the physical DPRC:
+- */
+- dprc_get_obj_failures = 0;
+- for (i = 0; i < num_child_objects; i++) {
+- struct dprc_obj_desc *obj_desc =
+- &child_obj_desc_array[i];
+-
+- error = dprc_get_obj(mc_bus_dev->mc_io,
+- 0,
+- mc_bus_dev->mc_handle,
+- i, obj_desc);
+- if (error < 0) {
+- dev_err(&mc_bus_dev->dev,
+- "dprc_get_obj(i=%d) failed: %d\n",
+- i, error);
+- /*
+- * Mark the obj entry as "invalid", by using the
+- * empty string as obj type:
+- */
+- obj_desc->type[0] = '\0';
+- obj_desc->id = error;
+- dprc_get_obj_failures++;
+- continue;
+- }
+-
+- /*
+- * add a quirk for all versions of dpsec < 4.0...none
+- * are coherent regardless of what the MC reports.
+- */
+- if ((strcmp(obj_desc->type, "dpseci") == 0) &&
+- (obj_desc->ver_major < 4))
+- obj_desc->flags |=
+- DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY;
+-
+- irq_count += obj_desc->irq_count;
+- dev_dbg(&mc_bus_dev->dev,
+- "Discovered object: type %s, id %d\n",
+- obj_desc->type, obj_desc->id);
+- }
+-
+- if (dprc_get_obj_failures != 0) {
+- dev_err(&mc_bus_dev->dev,
+- "%d out of %d devices could not be retrieved\n",
+- dprc_get_obj_failures, num_child_objects);
+- }
+- }
+-
+- *total_irq_count = irq_count;
+- dprc_remove_devices(mc_bus_dev, child_obj_desc_array,
+- num_child_objects);
+-
+- dprc_add_new_devices(mc_bus_dev, child_obj_desc_array,
+- num_child_objects);
+-
+- if (child_obj_desc_array)
+- devm_kfree(&mc_bus_dev->dev, child_obj_desc_array);
+-
+- return 0;
+-}
+-EXPORT_SYMBOL_GPL(dprc_scan_objects);
+-
+-/**
+- * dprc_scan_container - Scans a physical DPRC and synchronizes Linux bus state
+- *
+- * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
+- *
+- * Scans the physical DPRC and synchronizes the state of the Linux
+- * bus driver with the actual state of the MC by adding and removing
+- * devices as appropriate.
+- */
+-int dprc_scan_container(struct fsl_mc_device *mc_bus_dev)
+-{
+- int error;
+- unsigned int irq_count;
+- struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+-
+- fsl_mc_init_all_resource_pools(mc_bus_dev);
+-
+- /*
+- * Discover objects in the DPRC:
+- */
+- mutex_lock(&mc_bus->scan_mutex);
+- error = dprc_scan_objects(mc_bus_dev, &irq_count);
+- mutex_unlock(&mc_bus->scan_mutex);
+- if (error < 0)
+- goto error;
+-
+- if (dev_get_msi_domain(&mc_bus_dev->dev) && !mc_bus->irq_resources) {
+- if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) {
+- dev_warn(&mc_bus_dev->dev,
+- "IRQs needed (%u) exceed IRQs preallocated (%u)\n",
+- irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
+- }
+-
+- error = fsl_mc_populate_irq_pool(
+- mc_bus,
+- FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
+- if (error < 0)
+- goto error;
+- }
+-
+- return 0;
+-error:
+- fsl_mc_cleanup_all_resource_pools(mc_bus_dev);
+- return error;
+-}
+-EXPORT_SYMBOL_GPL(dprc_scan_container);
+-
+-/**
+- * dprc_irq0_handler - Regular ISR for DPRC interrupt 0
+- *
+- * @irq: IRQ number of the interrupt being handled
+- * @arg: Pointer to device structure
+- */
+-static irqreturn_t dprc_irq0_handler(int irq_num, void *arg)
+-{
+- return IRQ_WAKE_THREAD;
+-}
+-
+-/**
+- * dprc_irq0_handler_thread - Handler thread function for DPRC interrupt 0
+- *
+- * @irq: IRQ number of the interrupt being handled
+- * @arg: Pointer to device structure
+- */
+-static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg)
+-{
+- int error;
+- u32 status;
+- struct device *dev = arg;
+- struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+- struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+- struct fsl_mc_io *mc_io = mc_dev->mc_io;
+- struct msi_desc *msi_desc = mc_dev->irqs[0]->msi_desc;
+-
+- dev_dbg(dev, "DPRC IRQ %d triggered on CPU %u\n",
+- irq_num, smp_processor_id());
+-
+- if (WARN_ON(!(mc_dev->flags & FSL_MC_IS_DPRC)))
+- return IRQ_HANDLED;
+-
+- mutex_lock(&mc_bus->scan_mutex);
+- if (WARN_ON(!msi_desc || msi_desc->irq != (u32)irq_num))
+- goto out;
+-
+- status = 0;
+- error = dprc_get_irq_status(mc_io, 0, mc_dev->mc_handle, 0,
+- &status);
+- if (error < 0) {
+- dev_err(dev,
+- "dprc_get_irq_status() failed: %d\n", error);
+- goto out;
+- }
+-
+- error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, 0,
+- status);
+- if (error < 0) {
+- dev_err(dev,
+- "dprc_clear_irq_status() failed: %d\n", error);
+- goto out;
+- }
+-
+- if (status & (DPRC_IRQ_EVENT_OBJ_ADDED |
+- DPRC_IRQ_EVENT_OBJ_REMOVED |
+- DPRC_IRQ_EVENT_CONTAINER_DESTROYED |
+- DPRC_IRQ_EVENT_OBJ_DESTROYED |
+- DPRC_IRQ_EVENT_OBJ_CREATED)) {
+- unsigned int irq_count;
+-
+- error = dprc_scan_objects(mc_dev, &irq_count);
+- if (error < 0) {
+- /*
+- * If the error is -ENXIO, we ignore it, as it indicates
+- * that the object scan was aborted, as we detected that
+- * an object was removed from the DPRC in the MC, while
+- * we were scanning the DPRC.
+- */
+- if (error != -ENXIO) {
+- dev_err(dev, "dprc_scan_objects() failed: %d\n",
+- error);
+- }
+-
+- goto out;
+- }
+-
+- if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) {
+- dev_warn(dev,
+- "IRQs needed (%u) exceed IRQs preallocated (%u)\n",
+- irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
+- }
+- }
+-
+-out:
+- mutex_unlock(&mc_bus->scan_mutex);
+- return IRQ_HANDLED;
+-}
+-
+-/*
+- * Disable and clear interrupt for a given DPRC object
+- */
+-static int disable_dprc_irq(struct fsl_mc_device *mc_dev)
+-{
+- int error;
+- struct fsl_mc_io *mc_io = mc_dev->mc_io;
+-
+- WARN_ON(mc_dev->obj_desc.irq_count != 1);
+-
+- /*
+- * Disable generation of interrupt, while we configure it:
+- */
+- error = dprc_set_irq_enable(mc_io, 0, mc_dev->mc_handle, 0, 0);
+- if (error < 0) {
+- dev_err(&mc_dev->dev,
+- "Disabling DPRC IRQ failed: dprc_set_irq_enable() failed: %d\n",
+- error);
+- return error;
+- }
+-
+- /*
+- * Disable all interrupt causes for the interrupt:
+- */
+- error = dprc_set_irq_mask(mc_io, 0, mc_dev->mc_handle, 0, 0x0);
+- if (error < 0) {
+- dev_err(&mc_dev->dev,
+- "Disabling DPRC IRQ failed: dprc_set_irq_mask() failed: %d\n",
+- error);
+- return error;
+- }
+-
+- /*
+- * Clear any leftover interrupts:
+- */
+- error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, 0, ~0x0U);
+- if (error < 0) {
+- dev_err(&mc_dev->dev,
+- "Disabling DPRC IRQ failed: dprc_clear_irq_status() failed: %d\n",
+- error);
+- return error;
+- }
+-
+- return 0;
+-}
+-
+-static int register_dprc_irq_handler(struct fsl_mc_device *mc_dev)
+-{
+- int error;
+- struct fsl_mc_device_irq *irq = mc_dev->irqs[0];
+-
+- WARN_ON(mc_dev->obj_desc.irq_count != 1);
+-
+- /*
+- * NOTE: devm_request_threaded_irq() invokes the device-specific
+- * function that programs the MSI physically in the device
+- */
+- error = devm_request_threaded_irq(&mc_dev->dev,
+- irq->msi_desc->irq,
+- dprc_irq0_handler,
+- dprc_irq0_handler_thread,
+- IRQF_NO_SUSPEND | IRQF_ONESHOT,
+- "FSL MC DPRC irq0",
+- &mc_dev->dev);
+- if (error < 0) {
+- dev_err(&mc_dev->dev,
+- "devm_request_threaded_irq() failed: %d\n",
+- error);
+- return error;
+- }
+-
+- return 0;
+-}
+-
+-static int enable_dprc_irq(struct fsl_mc_device *mc_dev)
+-{
+- int error;
+-
+- /*
+- * Enable all interrupt causes for the interrupt:
+- */
+- error = dprc_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle, 0,
+- ~0x0u);
+- if (error < 0) {
+- dev_err(&mc_dev->dev,
+- "Enabling DPRC IRQ failed: dprc_set_irq_mask() failed: %d\n",
+- error);
+-
+- return error;
+- }
+-
+- /*
+- * Enable generation of the interrupt:
+- */
+- error = dprc_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle, 0, 1);
+- if (error < 0) {
+- dev_err(&mc_dev->dev,
+- "Enabling DPRC IRQ failed: dprc_set_irq_enable() failed: %d\n",
+- error);
+-
+- return error;
+- }
+-
+- return 0;
+-}
+-
+-/*
+- * Setup interrupt for a given DPRC device
+- */
+-static int dprc_setup_irq(struct fsl_mc_device *mc_dev)
+-{
+- int error;
+-
+- error = fsl_mc_allocate_irqs(mc_dev);
+- if (error < 0)
+- return error;
+-
+- error = disable_dprc_irq(mc_dev);
+- if (error < 0)
+- goto error_free_irqs;
+-
+- error = register_dprc_irq_handler(mc_dev);
+- if (error < 0)
+- goto error_free_irqs;
+-
+- error = enable_dprc_irq(mc_dev);
+- if (error < 0)
+- goto error_free_irqs;
+-
+- return 0;
+-
+-error_free_irqs:
+- fsl_mc_free_irqs(mc_dev);
+- return error;
+-}
+-
+-/**
+- * dprc_probe - callback invoked when a DPRC is being bound to this driver
- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of the above-listed copyright holders nor the
-- * names of any contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
+- * @mc_dev: Pointer to fsl-mc device representing a DPRC
- *
+- * It opens the physical DPRC in the MC.
+- * It scans the DPRC to discover the MC objects contained in it.
+- * It creates the interrupt pool for the MC bus associated with the DPRC.
+- * It configures the interrupts for the DPRC device itself.
+- */
+-static int dprc_probe(struct fsl_mc_device *mc_dev)
+-{
+- int error;
+- size_t region_size;
+- struct device *parent_dev = mc_dev->dev.parent;
+- struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+- bool mc_io_created = false;
+- bool msi_domain_set = false;
+-
+- if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0))
+- return -EINVAL;
+-
+- if (WARN_ON(dev_get_msi_domain(&mc_dev->dev)))
+- return -EINVAL;
+-
+- if (!mc_dev->mc_io) {
+- /*
+- * This is a child DPRC:
+- */
+- if (WARN_ON(!dev_is_fsl_mc(parent_dev)))
+- return -EINVAL;
+-
+- if (WARN_ON(mc_dev->obj_desc.region_count == 0))
+- return -EINVAL;
+-
+- region_size = mc_dev->regions[0].end -
+- mc_dev->regions[0].start + 1;
+-
+- error = fsl_create_mc_io(&mc_dev->dev,
+- mc_dev->regions[0].start,
+- region_size,
+- NULL,
+- FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
+- &mc_dev->mc_io);
+- if (error < 0)
+- return error;
+-
+- mc_io_created = true;
+-
+- /*
+- * Inherit parent MSI domain:
+- */
+- dev_set_msi_domain(&mc_dev->dev,
+- dev_get_msi_domain(parent_dev));
+- msi_domain_set = true;
+- } else {
+- /*
+- * This is a root DPRC
+- */
+- struct irq_domain *mc_msi_domain;
+-
+- if (WARN_ON(dev_is_fsl_mc(parent_dev)))
+- return -EINVAL;
+-
+- error = fsl_mc_find_msi_domain(parent_dev,
+- &mc_msi_domain);
+- if (error < 0) {
+- dev_warn(&mc_dev->dev,
+- "WARNING: MC bus without interrupt support\n");
+- } else {
+- dev_set_msi_domain(&mc_dev->dev, mc_msi_domain);
+- msi_domain_set = true;
+- }
+- }
+-
+- error = dprc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
+- &mc_dev->mc_handle);
+- if (error < 0) {
+- dev_err(&mc_dev->dev, "dprc_open() failed: %d\n", error);
+- goto error_cleanup_msi_domain;
+- }
+-
+- error = dprc_get_attributes(mc_dev->mc_io, 0, mc_dev->mc_handle,
+- &mc_bus->dprc_attr);
+- if (error < 0) {
+- dev_err(&mc_dev->dev, "dprc_get_attributes() failed: %d\n",
+- error);
+- goto error_cleanup_open;
+- }
+-
+- if (mc_bus->dprc_attr.version.major < DPRC_MIN_VER_MAJOR ||
+- (mc_bus->dprc_attr.version.major == DPRC_MIN_VER_MAJOR &&
+- mc_bus->dprc_attr.version.minor < DPRC_MIN_VER_MINOR)) {
+- dev_err(&mc_dev->dev,
+- "ERROR: DPRC version %d.%d not supported\n",
+- mc_bus->dprc_attr.version.major,
+- mc_bus->dprc_attr.version.minor);
+- error = -ENOTSUPP;
+- goto error_cleanup_open;
+- }
+-
+- mutex_init(&mc_bus->scan_mutex);
+-
+- /*
+- * Discover MC objects in DPRC object:
+- */
+- error = dprc_scan_container(mc_dev);
+- if (error < 0)
+- goto error_cleanup_open;
+-
+- /*
+- * Configure interrupt for the DPRC object associated with this MC bus:
+- */
+- error = dprc_setup_irq(mc_dev);
+- if (error < 0)
+- goto error_cleanup_open;
+-
+- dev_info(&mc_dev->dev, "DPRC device bound to driver");
+- return 0;
+-
+-error_cleanup_open:
+- (void)dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
+-
+-error_cleanup_msi_domain:
+- if (msi_domain_set)
+- dev_set_msi_domain(&mc_dev->dev, NULL);
+-
+- if (mc_io_created) {
+- fsl_destroy_mc_io(mc_dev->mc_io);
+- mc_dev->mc_io = NULL;
+- }
+-
+- return error;
+-}
+-
+-/*
+- * Tear down interrupt for a given DPRC object
+- */
+-static void dprc_teardown_irq(struct fsl_mc_device *mc_dev)
+-{
+- struct fsl_mc_device_irq *irq = mc_dev->irqs[0];
+-
+- (void)disable_dprc_irq(mc_dev);
+-
+- devm_free_irq(&mc_dev->dev, irq->msi_desc->irq, &mc_dev->dev);
+-
+- fsl_mc_free_irqs(mc_dev);
+-}
+-
+-/**
+- * dprc_remove - callback invoked when a DPRC is being unbound from this driver
- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
+- * @mc_dev: Pointer to fsl-mc device representing the DPRC
- *
-- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-- * POSSIBILITY OF SUCH DAMAGE.
+- * It removes the DPRC's child objects from Linux (not from the MC) and
+- * closes the DPRC device in the MC.
+- * It tears down the interrupts that were configured for the DPRC device.
+- * It destroys the interrupt pool associated with this MC bus.
- */
--#ifndef _FSL_DPCON_CMD_H
--#define _FSL_DPCON_CMD_H
+-static int dprc_remove(struct fsl_mc_device *mc_dev)
+-{
+- int error;
+- struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
-
--/* DPCON Version */
--#define DPCON_VER_MAJOR 2
--#define DPCON_VER_MINOR 1
+- if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0))
+- return -EINVAL;
+- if (WARN_ON(!mc_dev->mc_io))
+- return -EINVAL;
-
--/* Command IDs */
--#define DPCON_CMDID_CLOSE 0x800
--#define DPCON_CMDID_OPEN 0x808
--#define DPCON_CMDID_CREATE 0x908
--#define DPCON_CMDID_DESTROY 0x900
+- if (WARN_ON(!mc_bus->irq_resources))
+- return -EINVAL;
-
--#define DPCON_CMDID_ENABLE 0x002
--#define DPCON_CMDID_DISABLE 0x003
--#define DPCON_CMDID_GET_ATTR 0x004
--#define DPCON_CMDID_RESET 0x005
--#define DPCON_CMDID_IS_ENABLED 0x006
+- if (dev_get_msi_domain(&mc_dev->dev))
+- dprc_teardown_irq(mc_dev);
-
--#define DPCON_CMDID_SET_IRQ 0x010
--#define DPCON_CMDID_GET_IRQ 0x011
--#define DPCON_CMDID_SET_IRQ_ENABLE 0x012
--#define DPCON_CMDID_GET_IRQ_ENABLE 0x013
--#define DPCON_CMDID_SET_IRQ_MASK 0x014
--#define DPCON_CMDID_GET_IRQ_MASK 0x015
--#define DPCON_CMDID_GET_IRQ_STATUS 0x016
--#define DPCON_CMDID_CLEAR_IRQ_STATUS 0x017
+- device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove);
-
--#define DPCON_CMDID_SET_NOTIFICATION 0x100
+- if (dev_get_msi_domain(&mc_dev->dev)) {
+- fsl_mc_cleanup_irq_pool(mc_bus);
+- dev_set_msi_domain(&mc_dev->dev, NULL);
+- }
-
--#endif /* _FSL_DPCON_CMD_H */
---- /dev/null
-+++ b/drivers/staging/fsl-mc/bus/dpio/dpio-cmd.h
-@@ -0,0 +1,75 @@
-+/*
-+ * Copyright 2013-2016 Freescale Semiconductor Inc.
-+ * Copyright 2016 NXP
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef _FSL_DPIO_CMD_H
-+#define _FSL_DPIO_CMD_H
-+
-+/* DPIO Version */
-+#define DPIO_VER_MAJOR 4
-+#define DPIO_VER_MINOR 2
-+
-+/* Command Versioning */
-+
-+#define DPIO_CMD_ID_OFFSET 4
-+#define DPIO_CMD_BASE_VERSION 1
-+
-+#define DPIO_CMD(id) (((id) << DPIO_CMD_ID_OFFSET) | DPIO_CMD_BASE_VERSION)
-+
-+/* Command IDs */
-+#define DPIO_CMDID_CLOSE DPIO_CMD(0x800)
-+#define DPIO_CMDID_OPEN DPIO_CMD(0x803)
-+#define DPIO_CMDID_GET_API_VERSION DPIO_CMD(0xa03)
-+#define DPIO_CMDID_ENABLE DPIO_CMD(0x002)
-+#define DPIO_CMDID_DISABLE DPIO_CMD(0x003)
-+#define DPIO_CMDID_GET_ATTR DPIO_CMD(0x004)
-+
-+struct dpio_cmd_open {
-+ __le32 dpio_id;
-+};
-+
-+#define DPIO_CHANNEL_MODE_MASK 0x3
-+
-+struct dpio_rsp_get_attr {
-+ /* cmd word 0 */
-+ __le32 id;
-+ __le16 qbman_portal_id;
-+ u8 num_priorities;
-+ u8 channel_mode;
-+ /* cmd word 1 */
-+ __le64 qbman_portal_ce_addr;
-+ /* cmd word 2 */
-+ __le64 qbman_portal_ci_addr;
-+ /* cmd word 3 */
-+ __le32 qbman_version;
-+};
-+
-+#endif /* _FSL_DPIO_CMD_H */
+- fsl_mc_cleanup_all_resource_pools(mc_dev);
+-
+- error = dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
+- if (error < 0)
+- dev_err(&mc_dev->dev, "dprc_close() failed: %d\n", error);
+-
+- if (!fsl_mc_is_root_dprc(&mc_dev->dev)) {
+- fsl_destroy_mc_io(mc_dev->mc_io);
+- mc_dev->mc_io = NULL;
+- }
+-
+- dev_info(&mc_dev->dev, "DPRC device unbound from driver");
+- return 0;
+-}
+-
+-static const struct fsl_mc_device_id match_id_table[] = {
+- {
+- .vendor = FSL_MC_VENDOR_FREESCALE,
+- .obj_type = "dprc"},
+- {.vendor = 0x0},
+-};
+-
+-static struct fsl_mc_driver dprc_driver = {
+- .driver = {
+- .name = FSL_MC_DPRC_DRIVER_NAME,
+- .owner = THIS_MODULE,
+- .pm = NULL,
+- },
+- .match_id_table = match_id_table,
+- .probe = dprc_probe,
+- .remove = dprc_remove,
+-};
+-
+-int __init dprc_driver_init(void)
+-{
+- return fsl_mc_driver_register(&dprc_driver);
+-}
+-
+-void dprc_driver_exit(void)
+-{
+- fsl_mc_driver_unregister(&dprc_driver);
+-}
--- /dev/null
-+++ b/drivers/staging/fsl-mc/bus/dpio/dpio-driver.c
-@@ -0,0 +1,296 @@
++++ b/drivers/bus/fsl-mc/dprc-driver.c
+@@ -0,0 +1,815 @@
++// SPDX-License-Identifier: GPL-2.0
+/*
-+ * Copyright 2014-2016 Freescale Semiconductor Inc.
-+ * Copyright 2016 NXP
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
++ * Freescale data path resource container (DPRC) driver
+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
++ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
++ * Author: German Rivera <German.Rivera@freescale.com>
+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
-+#include <linux/types.h>
-+#include <linux/init.h>
+#include <linux/module.h>
-+#include <linux/platform_device.h>
++#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/msi.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/delay.h>
-+
-+#include "../../include/mc.h"
-+#include "../../include/dpaa2-io.h"
++#include <linux/fsl/mc.h>
+
-+#include "qbman-portal.h"
-+#include "dpio.h"
-+#include "dpio-cmd.h"
++#include "fsl-mc-private.h"
+
-+MODULE_LICENSE("Dual BSD/GPL");
-+MODULE_AUTHOR("Freescale Semiconductor, Inc");
-+MODULE_DESCRIPTION("DPIO Driver");
++#define FSL_MC_DPRC_DRIVER_NAME "fsl_mc_dprc"
+
-+struct dpio_priv {
-+ struct dpaa2_io *io;
++struct fsl_mc_child_objs {
++ int child_count;
++ struct fsl_mc_obj_desc *child_array;
+};
+
-+static irqreturn_t dpio_irq_handler(int irq_num, void *arg)
++static bool fsl_mc_device_match(struct fsl_mc_device *mc_dev,
++ struct fsl_mc_obj_desc *obj_desc)
+{
-+ struct device *dev = (struct device *)arg;
-+ struct dpio_priv *priv = dev_get_drvdata(dev);
++ return mc_dev->obj_desc.id == obj_desc->id &&
++ strcmp(mc_dev->obj_desc.type, obj_desc->type) == 0;
+
-+ return dpaa2_io_irq(priv->io);
+}
+
-+static void unregister_dpio_irq_handlers(struct fsl_mc_device *dpio_dev)
++static int __fsl_mc_device_remove_if_not_in_mc(struct device *dev, void *data)
+{
-+ struct fsl_mc_device_irq *irq;
++ int i;
++ struct fsl_mc_child_objs *objs;
++ struct fsl_mc_device *mc_dev;
+
-+ irq = dpio_dev->irqs[0];
++ mc_dev = to_fsl_mc_device(dev);
++ objs = data;
+
-+ /* clear the affinity hint */
-+ irq_set_affinity_hint(irq->msi_desc->irq, NULL);
++ for (i = 0; i < objs->child_count; i++) {
++ struct fsl_mc_obj_desc *obj_desc = &objs->child_array[i];
++
++ if (strlen(obj_desc->type) != 0 &&
++ fsl_mc_device_match(mc_dev, obj_desc))
++ break;
++ }
++
++ if (i == objs->child_count)
++ fsl_mc_device_remove(mc_dev);
++
++ return 0;
+}
+
-+static int register_dpio_irq_handlers(struct fsl_mc_device *dpio_dev, int cpu)
++static int __fsl_mc_device_remove(struct device *dev, void *data)
++{
++ fsl_mc_device_remove(to_fsl_mc_device(dev));
++ return 0;
++}
++
++/**
++ * dprc_remove_devices - Removes devices for objects removed from a DPRC
++ *
++ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
++ * @obj_desc_array: array of object descriptors for child objects currently
++ * present in the DPRC in the MC.
++ * @num_child_objects_in_mc: number of entries in obj_desc_array
++ *
++ * Synchronizes the state of the Linux bus driver with the actual state of
++ * the MC by removing devices that represent MC objects that have
++ * been dynamically removed in the physical DPRC.
++ */
++static void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev,
++ struct fsl_mc_obj_desc *obj_desc_array,
++ int num_child_objects_in_mc)
++{
++ if (num_child_objects_in_mc != 0) {
++ /*
++ * Remove child objects that are in the DPRC in Linux,
++ * but not in the MC:
++ */
++ struct fsl_mc_child_objs objs;
++
++ objs.child_count = num_child_objects_in_mc;
++ objs.child_array = obj_desc_array;
++ device_for_each_child(&mc_bus_dev->dev, &objs,
++ __fsl_mc_device_remove_if_not_in_mc);
++ } else {
++ /*
++ * There are no child objects for this DPRC in the MC.
++ * So, remove all the child devices from Linux:
++ */
++ device_for_each_child(&mc_bus_dev->dev, NULL,
++ __fsl_mc_device_remove);
++ }
++}
++
++static int __fsl_mc_device_match(struct device *dev, void *data)
++{
++ struct fsl_mc_obj_desc *obj_desc = data;
++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
++
++ return fsl_mc_device_match(mc_dev, obj_desc);
++}
++
++static struct fsl_mc_device *fsl_mc_device_lookup(struct fsl_mc_obj_desc
++ *obj_desc,
++ struct fsl_mc_device
++ *mc_bus_dev)
++{
++ struct device *dev;
++
++ dev = device_find_child(&mc_bus_dev->dev, obj_desc,
++ __fsl_mc_device_match);
++
++ return dev ? to_fsl_mc_device(dev) : NULL;
++}
++
++/**
++ * check_plugged_state_change - Check change in an MC object's plugged state
++ *
++ * @mc_dev: pointer to the fsl-mc device for a given MC object
++ * @obj_desc: pointer to the MC object's descriptor in the MC
++ *
++ * If the plugged state has changed from unplugged to plugged, the fsl-mc
++ * device is bound to the corresponding device driver.
++ * If the plugged state has changed from plugged to unplugged, the fsl-mc
++ * device is unbound from the corresponding device driver.
++ */
++static void check_plugged_state_change(struct fsl_mc_device *mc_dev,
++ struct fsl_mc_obj_desc *obj_desc)
+{
-+ struct dpio_priv *priv;
+ int error;
-+ struct fsl_mc_device_irq *irq;
-+ cpumask_t mask;
++ u32 plugged_flag_at_mc =
++ obj_desc->state & FSL_MC_OBJ_STATE_PLUGGED;
++
++ if (plugged_flag_at_mc !=
++ (mc_dev->obj_desc.state & FSL_MC_OBJ_STATE_PLUGGED)) {
++ if (plugged_flag_at_mc) {
++ mc_dev->obj_desc.state |= FSL_MC_OBJ_STATE_PLUGGED;
++ error = device_attach(&mc_dev->dev);
++ if (error < 0) {
++ dev_err(&mc_dev->dev,
++ "device_attach() failed: %d\n",
++ error);
++ }
++ } else {
++ mc_dev->obj_desc.state &= ~FSL_MC_OBJ_STATE_PLUGGED;
++ device_release_driver(&mc_dev->dev);
++ }
++ }
++}
+
-+ priv = dev_get_drvdata(&dpio_dev->dev);
++/**
++ * dprc_add_new_devices - Adds devices to the logical bus for a DPRC
++ *
++ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
++ * @driver_override: driver override to apply to new objects found in the
++ * DPRC, or NULL, if none.
++ * @obj_desc_array: array of device descriptors for child devices currently
++ * present in the physical DPRC.
++ * @num_child_objects_in_mc: number of entries in obj_desc_array
++ *
++ * Synchronizes the state of the Linux bus driver with the actual
++ * state of the MC by adding objects that have been newly discovered
++ * in the physical DPRC.
++ */
++static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
++ const char *driver_override,
++ struct fsl_mc_obj_desc *obj_desc_array,
++ int num_child_objects_in_mc)
++{
++ int error;
++ int i;
+
-+ irq = dpio_dev->irqs[0];
-+ error = devm_request_irq(&dpio_dev->dev,
-+ irq->msi_desc->irq,
-+ dpio_irq_handler,
-+ 0,
-+ dev_name(&dpio_dev->dev),
-+ &dpio_dev->dev);
++ for (i = 0; i < num_child_objects_in_mc; i++) {
++ struct fsl_mc_device *child_dev;
++ struct fsl_mc_obj_desc *obj_desc = &obj_desc_array[i];
++
++ if (strlen(obj_desc->type) == 0)
++ continue;
++
++ /*
++ * Check if device is already known to Linux:
++ */
++ child_dev = fsl_mc_device_lookup(obj_desc, mc_bus_dev);
++ if (child_dev) {
++ check_plugged_state_change(child_dev, obj_desc);
++ put_device(&child_dev->dev);
++ continue;
++ }
++
++ error = fsl_mc_device_add(obj_desc, NULL, &mc_bus_dev->dev,
++ driver_override, &child_dev);
++ if (error < 0)
++ continue;
++ }
++}
++
++/**
++ * dprc_scan_objects - Discover objects in a DPRC
++ *
++ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
++ * @driver_override: driver override to apply to new objects found in the
++ * DPRC, or NULL, if none.
++ * @total_irq_count: If argument is provided the function populates the
++ * total number of IRQs created by objects in the DPRC.
++ *
++ * Detects objects added and removed from a DPRC and synchronizes the
++ * state of the Linux bus driver, MC by adding and removing
++ * devices accordingly.
++ * Two types of devices can be found in a DPRC: allocatable objects (e.g.,
++ * dpbp, dpmcp) and non-allocatable devices (e.g., dprc, dpni).
++ * All allocatable devices needed to be probed before all non-allocatable
++ * devices, to ensure that device drivers for non-allocatable
++ * devices can allocate any type of allocatable devices.
++ * That is, we need to ensure that the corresponding resource pools are
++ * populated before they can get allocation requests from probe callbacks
++ * of the device drivers for the non-allocatable devices.
++ */
++int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
++ const char *driver_override,
++ unsigned int *total_irq_count)
++{
++ int num_child_objects;
++ int dprc_get_obj_failures;
++ int error;
++ unsigned int irq_count = mc_bus_dev->obj_desc.irq_count;
++ struct fsl_mc_obj_desc *child_obj_desc_array = NULL;
++ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
++
++ error = dprc_get_obj_count(mc_bus_dev->mc_io,
++ 0,
++ mc_bus_dev->mc_handle,
++ &num_child_objects);
+ if (error < 0) {
-+ dev_err(&dpio_dev->dev,
-+ "devm_request_irq() failed: %d\n",
++ dev_err(&mc_bus_dev->dev, "dprc_get_obj_count() failed: %d\n",
+ error);
+ return error;
+ }
+
-+ /* set the affinity hint */
-+ cpumask_clear(&mask);
-+ cpumask_set_cpu(cpu, &mask);
-+ if (irq_set_affinity_hint(irq->msi_desc->irq, &mask))
-+ dev_err(&dpio_dev->dev,
-+ "irq_set_affinity failed irq %d cpu %d\n",
-+ irq->msi_desc->irq, cpu);
++ if (num_child_objects != 0) {
++ int i;
++
++ child_obj_desc_array =
++ devm_kmalloc_array(&mc_bus_dev->dev, num_child_objects,
++ sizeof(*child_obj_desc_array),
++ GFP_KERNEL);
++ if (!child_obj_desc_array)
++ return -ENOMEM;
++
++ /*
++ * Discover objects currently present in the physical DPRC:
++ */
++ dprc_get_obj_failures = 0;
++ for (i = 0; i < num_child_objects; i++) {
++ struct fsl_mc_obj_desc *obj_desc =
++ &child_obj_desc_array[i];
++
++ error = dprc_get_obj(mc_bus_dev->mc_io,
++ 0,
++ mc_bus_dev->mc_handle,
++ i, obj_desc);
++ if (error < 0) {
++ dev_err(&mc_bus_dev->dev,
++ "dprc_get_obj(i=%d) failed: %d\n",
++ i, error);
++ /*
++ * Mark the obj entry as "invalid", by using the
++ * empty string as obj type:
++ */
++ obj_desc->type[0] = '\0';
++ obj_desc->id = error;
++ dprc_get_obj_failures++;
++ continue;
++ }
++
++ /*
++ * add a quirk for all versions of dpsec < 4.0...none
++ * are coherent regardless of what the MC reports.
++ */
++ if ((strcmp(obj_desc->type, "dpseci") == 0) &&
++ (obj_desc->ver_major < 4))
++ obj_desc->flags |=
++ FSL_MC_OBJ_FLAG_NO_MEM_SHAREABILITY;
++
++ irq_count += obj_desc->irq_count;
++ dev_dbg(&mc_bus_dev->dev,
++ "Discovered object: type %s, id %d\n",
++ obj_desc->type, obj_desc->id);
++ }
++
++ if (dprc_get_obj_failures != 0) {
++ dev_err(&mc_bus_dev->dev,
++ "%d out of %d devices could not be retrieved\n",
++ dprc_get_obj_failures, num_child_objects);
++ }
++ }
++
++ /*
++ * Allocate IRQ's before binding the scanned devices with their
++ * respective drivers.
++ */
++ if (dev_get_msi_domain(&mc_bus_dev->dev) && !mc_bus->irq_resources) {
++ if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) {
++ dev_warn(&mc_bus_dev->dev,
++ "IRQs needed (%u) exceed IRQs preallocated (%u)\n",
++ irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
++ }
++
++ error = fsl_mc_populate_irq_pool(mc_bus,
++ FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
++ if (error < 0)
++ return error;
++ }
++
++ if (total_irq_count)
++ *total_irq_count = irq_count;
++
++ dprc_remove_devices(mc_bus_dev, child_obj_desc_array,
++ num_child_objects);
++
++ dprc_add_new_devices(mc_bus_dev, driver_override, child_obj_desc_array,
++ num_child_objects);
++
++ if (child_obj_desc_array)
++ devm_kfree(&mc_bus_dev->dev, child_obj_desc_array);
+
+ return 0;
+}
+
-+static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
++/**
++ * dprc_scan_container - Scans a physical DPRC and synchronizes Linux bus state
++ *
++ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
++ *
++ * Scans the physical DPRC and synchronizes the state of the Linux
++ * bus driver with the actual state of the MC by adding and removing
++ * devices as appropriate.
++ */
++static int dprc_scan_container(struct fsl_mc_device *mc_bus_dev)
+{
-+ struct dpio_attr dpio_attrs;
-+ struct dpaa2_io_desc desc;
-+ struct dpio_priv *priv;
-+ int err = -ENOMEM;
-+ struct device *dev = &dpio_dev->dev;
-+ static int next_cpu = -1;
++ int error;
++ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+
-+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
-+ if (!priv)
-+ goto err_priv_alloc;
++ fsl_mc_init_all_resource_pools(mc_bus_dev);
+
-+ dev_set_drvdata(dev, priv);
++ /*
++ * Discover objects in the DPRC:
++ */
++ mutex_lock(&mc_bus->scan_mutex);
++ error = dprc_scan_objects(mc_bus_dev, NULL, NULL);
++ mutex_unlock(&mc_bus->scan_mutex);
++ if (error < 0) {
++ fsl_mc_cleanup_all_resource_pools(mc_bus_dev);
++ return error;
++ }
+
-+ err = fsl_mc_portal_allocate(dpio_dev, 0, &dpio_dev->mc_io);
-+ if (err) {
-+ dev_dbg(dev, "MC portal allocation failed\n");
-+ err = -EPROBE_DEFER;
-+ goto err_mcportal;
++ return 0;
++}
++
++/**
++ * dprc_irq0_handler - Regular ISR for DPRC interrupt 0
++ *
++ * @irq: IRQ number of the interrupt being handled
++ * @arg: Pointer to device structure
++ */
++static irqreturn_t dprc_irq0_handler(int irq_num, void *arg)
++{
++ return IRQ_WAKE_THREAD;
++}
++
++/**
++ * dprc_irq0_handler_thread - Handler thread function for DPRC interrupt 0
++ *
++ * @irq: IRQ number of the interrupt being handled
++ * @arg: Pointer to device structure
++ */
++static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg)
++{
++ int error;
++ u32 status;
++ struct device *dev = arg;
++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
++ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
++ struct fsl_mc_io *mc_io = mc_dev->mc_io;
++ struct msi_desc *msi_desc = mc_dev->irqs[0]->msi_desc;
++
++ dev_dbg(dev, "DPRC IRQ %d triggered on CPU %u\n",
++ irq_num, smp_processor_id());
++
++ if (!(mc_dev->flags & FSL_MC_IS_DPRC))
++ return IRQ_HANDLED;
++
++ mutex_lock(&mc_bus->scan_mutex);
++ if (!msi_desc || msi_desc->irq != (u32)irq_num)
++ goto out;
++
++ status = 0;
++ error = dprc_get_irq_status(mc_io, 0, mc_dev->mc_handle, 0,
++ &status);
++ if (error < 0) {
++ dev_err(dev,
++ "dprc_get_irq_status() failed: %d\n", error);
++ goto out;
+ }
+
-+ err = dpio_open(dpio_dev->mc_io, 0, dpio_dev->obj_desc.id,
-+ &dpio_dev->mc_handle);
-+ if (err) {
-+ dev_err(dev, "dpio_open() failed\n");
-+ goto err_open;
++ error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, 0,
++ status);
++ if (error < 0) {
++ dev_err(dev,
++ "dprc_clear_irq_status() failed: %d\n", error);
++ goto out;
+ }
+
-+ err = dpio_get_attributes(dpio_dev->mc_io, 0, dpio_dev->mc_handle,
-+ &dpio_attrs);
-+ if (err) {
-+ dev_err(dev, "dpio_get_attributes() failed %d\n", err);
-+ goto err_get_attr;
++ if (status & (DPRC_IRQ_EVENT_OBJ_ADDED |
++ DPRC_IRQ_EVENT_OBJ_REMOVED |
++ DPRC_IRQ_EVENT_CONTAINER_DESTROYED |
++ DPRC_IRQ_EVENT_OBJ_DESTROYED |
++ DPRC_IRQ_EVENT_OBJ_CREATED)) {
++ unsigned int irq_count;
++
++ error = dprc_scan_objects(mc_dev, NULL, &irq_count);
++ if (error < 0) {
++ /*
++ * If the error is -ENXIO, we ignore it, as it indicates
++ * that the object scan was aborted, as we detected that
++ * an object was removed from the DPRC in the MC, while
++ * we were scanning the DPRC.
++ */
++ if (error != -ENXIO) {
++ dev_err(dev, "dprc_scan_objects() failed: %d\n",
++ error);
++ }
++
++ goto out;
++ }
++
++ if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) {
++ dev_warn(dev,
++ "IRQs needed (%u) exceed IRQs preallocated (%u)\n",
++ irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
++ }
++ }
++
++out:
++ mutex_unlock(&mc_bus->scan_mutex);
++ return IRQ_HANDLED;
++}
++
++/*
++ * Disable and clear interrupt for a given DPRC object
++ */
++static int disable_dprc_irq(struct fsl_mc_device *mc_dev)
++{
++ int error;
++ struct fsl_mc_io *mc_io = mc_dev->mc_io;
++
++ /*
++ * Disable generation of interrupt, while we configure it:
++ */
++ error = dprc_set_irq_enable(mc_io, 0, mc_dev->mc_handle, 0, 0);
++ if (error < 0) {
++ dev_err(&mc_dev->dev,
++ "Disabling DPRC IRQ failed: dprc_set_irq_enable() failed: %d\n",
++ error);
++ return error;
++ }
++
++ /*
++ * Disable all interrupt causes for the interrupt:
++ */
++ error = dprc_set_irq_mask(mc_io, 0, mc_dev->mc_handle, 0, 0x0);
++ if (error < 0) {
++ dev_err(&mc_dev->dev,
++ "Disabling DPRC IRQ failed: dprc_set_irq_mask() failed: %d\n",
++ error);
++ return error;
+ }
-+ desc.qman_version = dpio_attrs.qbman_version;
+
-+ err = dpio_enable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
-+ if (err) {
-+ dev_err(dev, "dpio_enable() failed %d\n", err);
-+ goto err_get_attr;
++ /*
++ * Clear any leftover interrupts:
++ */
++ error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, 0, ~0x0U);
++ if (error < 0) {
++ dev_err(&mc_dev->dev,
++ "Disabling DPRC IRQ failed: dprc_clear_irq_status() failed: %d\n",
++ error);
++ return error;
+ }
+
-+ /* initialize DPIO descriptor */
-+ desc.receives_notifications = dpio_attrs.num_priorities ? 1 : 0;
-+ desc.has_8prio = dpio_attrs.num_priorities == 8 ? 1 : 0;
-+ desc.dpio_id = dpio_dev->obj_desc.id;
++ return 0;
++}
+
-+ /* get the cpu to use for the affinity hint */
-+ if (next_cpu == -1)
-+ next_cpu = cpumask_first(cpu_online_mask);
-+ else
-+ next_cpu = cpumask_next(next_cpu, cpu_online_mask);
++static int register_dprc_irq_handler(struct fsl_mc_device *mc_dev)
++{
++ int error;
++ struct fsl_mc_device_irq *irq = mc_dev->irqs[0];
+
-+ if (!cpu_possible(next_cpu)) {
-+ dev_err(dev, "probe failed. Number of DPIOs exceeds NR_CPUS.\n");
-+ err = -ERANGE;
-+ goto err_allocate_irqs;
++ /*
++ * NOTE: devm_request_threaded_irq() invokes the device-specific
++ * function that programs the MSI physically in the device
++ */
++ error = devm_request_threaded_irq(&mc_dev->dev,
++ irq->msi_desc->irq,
++ dprc_irq0_handler,
++ dprc_irq0_handler_thread,
++ IRQF_NO_SUSPEND | IRQF_ONESHOT,
++ dev_name(&mc_dev->dev),
++ &mc_dev->dev);
++ if (error < 0) {
++ dev_err(&mc_dev->dev,
++ "devm_request_threaded_irq() failed: %d\n",
++ error);
++ return error;
+ }
-+ desc.cpu = next_cpu;
++
++ return 0;
++}
++
++static int enable_dprc_irq(struct fsl_mc_device *mc_dev)
++{
++ int error;
+
+ /*
-+ * Set the CENA regs to be the cache enabled area of the portal to
-+ * achieve the best performance.
++ * Enable all interrupt causes for the interrupt:
+ */
-+ desc.regs_cena = ioremap_cache_ns(dpio_dev->regions[0].start,
-+ resource_size(&dpio_dev->regions[0]));
-+ desc.regs_cinh = ioremap(dpio_dev->regions[1].start,
-+ resource_size(&dpio_dev->regions[1]));
++ error = dprc_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle, 0,
++ ~0x0u);
++ if (error < 0) {
++ dev_err(&mc_dev->dev,
++ "Enabling DPRC IRQ failed: dprc_set_irq_mask() failed: %d\n",
++ error);
+
-+ err = fsl_mc_allocate_irqs(dpio_dev);
-+ if (err) {
-+ dev_err(dev, "fsl_mc_allocate_irqs failed. err=%d\n", err);
-+ goto err_allocate_irqs;
++ return error;
+ }
+
-+ err = register_dpio_irq_handlers(dpio_dev, desc.cpu);
-+ if (err)
-+ goto err_register_dpio_irq;
++ /*
++ * Enable generation of the interrupt:
++ */
++ error = dprc_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle, 0, 1);
++ if (error < 0) {
++ dev_err(&mc_dev->dev,
++ "Enabling DPRC IRQ failed: dprc_set_irq_enable() failed: %d\n",
++ error);
+
-+ priv->io = dpaa2_io_create(&desc);
-+ if (!priv->io) {
-+ dev_err(dev, "dpaa2_io_create failed\n");
-+ goto err_dpaa2_io_create;
++ return error;
+ }
+
-+ dev_info(dev, "probed\n");
-+ dev_dbg(dev, " receives_notifications = %d\n",
-+ desc.receives_notifications);
-+ dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
-+ fsl_mc_portal_free(dpio_dev->mc_io);
-+
+ return 0;
-+
-+err_dpaa2_io_create:
-+ unregister_dpio_irq_handlers(dpio_dev);
-+err_register_dpio_irq:
-+ fsl_mc_free_irqs(dpio_dev);
-+err_allocate_irqs:
-+ dpio_disable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
-+err_get_attr:
-+ dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
-+err_open:
-+ fsl_mc_portal_free(dpio_dev->mc_io);
-+err_mcportal:
-+ dev_set_drvdata(dev, NULL);
-+err_priv_alloc:
-+ return err;
+}
+
-+/* Tear down interrupts for a given DPIO object */
-+static void dpio_teardown_irqs(struct fsl_mc_device *dpio_dev)
++/*
++ * Setup interrupt for a given DPRC device
++ */
++static int dprc_setup_irq(struct fsl_mc_device *mc_dev)
+{
-+ unregister_dpio_irq_handlers(dpio_dev);
-+ fsl_mc_free_irqs(dpio_dev);
-+}
++ int error;
+
-+static int dpaa2_dpio_remove(struct fsl_mc_device *dpio_dev)
-+{
-+ struct device *dev;
-+ struct dpio_priv *priv;
-+ int err;
++ error = fsl_mc_allocate_irqs(mc_dev);
++ if (error < 0)
++ return error;
+
-+ dev = &dpio_dev->dev;
-+ priv = dev_get_drvdata(dev);
++ error = disable_dprc_irq(mc_dev);
++ if (error < 0)
++ goto error_free_irqs;
+
-+ dpaa2_io_down(priv->io);
++ error = register_dprc_irq_handler(mc_dev);
++ if (error < 0)
++ goto error_free_irqs;
+
-+ dpio_teardown_irqs(dpio_dev);
++ error = enable_dprc_irq(mc_dev);
++ if (error < 0)
++ goto error_free_irqs;
+
-+ err = fsl_mc_portal_allocate(dpio_dev, 0, &dpio_dev->mc_io);
-+ if (err) {
-+ dev_err(dev, "MC portal allocation failed\n");
-+ goto err_mcportal;
-+ }
++ return 0;
+
-+ err = dpio_open(dpio_dev->mc_io, 0, dpio_dev->obj_desc.id,
-+ &dpio_dev->mc_handle);
-+ if (err) {
-+ dev_err(dev, "dpio_open() failed\n");
-+ goto err_open;
-+ }
++error_free_irqs:
++ fsl_mc_free_irqs(mc_dev);
++ return error;
++}
+
-+ dpio_disable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
++/**
++ * dprc_probe - callback invoked when a DPRC is being bound to this driver
++ *
++ * @mc_dev: Pointer to fsl-mc device representing a DPRC
++ *
++ * It opens the physical DPRC in the MC.
++ * It scans the DPRC to discover the MC objects contained in it.
++ * It creates the interrupt pool for the MC bus associated with the DPRC.
++ * It configures the interrupts for the DPRC device itself.
++ */
++static int dprc_probe(struct fsl_mc_device *mc_dev)
++{
++ int error;
++ size_t region_size;
++ struct device *parent_dev = mc_dev->dev.parent;
++ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
++ bool mc_io_created = false;
++ bool msi_domain_set = false;
++ u16 major_ver, minor_ver;
+
-+ dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
++ if (!is_fsl_mc_bus_dprc(mc_dev))
++ return -EINVAL;
+
-+ fsl_mc_portal_free(dpio_dev->mc_io);
++ if (dev_get_msi_domain(&mc_dev->dev))
++ return -EINVAL;
+
-+ dev_set_drvdata(dev, NULL);
++ if (!mc_dev->mc_io) {
++ /*
++ * This is a child DPRC:
++ */
++ if (!dev_is_fsl_mc(parent_dev))
++ return -EINVAL;
+
-+ return 0;
++ if (mc_dev->obj_desc.region_count == 0)
++ return -EINVAL;
+
-+err_open:
-+ fsl_mc_portal_free(dpio_dev->mc_io);
-+err_mcportal:
-+ return err;
-+}
++ region_size = resource_size(mc_dev->regions);
+
-+static const struct fsl_mc_device_id dpaa2_dpio_match_id_table[] = {
-+ {
-+ .vendor = FSL_MC_VENDOR_FREESCALE,
-+ .obj_type = "dpio",
-+ },
-+ { .vendor = 0x0 }
-+};
++ error = fsl_create_mc_io(&mc_dev->dev,
++ mc_dev->regions[0].start,
++ region_size,
++ NULL,
++ FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
++ &mc_dev->mc_io);
++ if (error < 0)
++ return error;
+
-+static struct fsl_mc_driver dpaa2_dpio_driver = {
-+ .driver = {
-+ .name = KBUILD_MODNAME,
-+ .owner = THIS_MODULE,
-+ },
-+ .probe = dpaa2_dpio_probe,
-+ .remove = dpaa2_dpio_remove,
-+ .match_id_table = dpaa2_dpio_match_id_table
-+};
++ mc_io_created = true;
+
-+static int dpio_driver_init(void)
-+{
-+ return fsl_mc_driver_register(&dpaa2_dpio_driver);
-+}
++ /*
++ * Inherit parent MSI domain:
++ */
++ dev_set_msi_domain(&mc_dev->dev,
++ dev_get_msi_domain(parent_dev));
++ msi_domain_set = true;
++ } else {
++ /*
++ * This is a root DPRC
++ */
++ struct irq_domain *mc_msi_domain;
+
-+static void dpio_driver_exit(void)
-+{
-+ fsl_mc_driver_unregister(&dpaa2_dpio_driver);
-+}
-+module_init(dpio_driver_init);
-+module_exit(dpio_driver_exit);
---- /dev/null
-+++ b/drivers/staging/fsl-mc/bus/dpio/dpio-service.c
-@@ -0,0 +1,693 @@
-+/*
-+ * Copyright 2014-2016 Freescale Semiconductor Inc.
-+ * Copyright 2016 NXP
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#include <linux/types.h>
-+#include "../../include/mc.h"
-+#include "../../include/dpaa2-io.h"
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/platform_device.h>
-+#include <linux/interrupt.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/slab.h>
++ if (dev_is_fsl_mc(parent_dev))
++ return -EINVAL;
+
-+#include "dpio.h"
-+#include "qbman-portal.h"
-+#include "qbman_debug.h"
++ error = fsl_mc_find_msi_domain(parent_dev,
++ &mc_msi_domain);
++ if (error < 0) {
++ dev_warn(&mc_dev->dev,
++ "WARNING: MC bus without interrupt support\n");
++ } else {
++ dev_set_msi_domain(&mc_dev->dev, mc_msi_domain);
++ msi_domain_set = true;
++ }
++ }
+
-+struct dpaa2_io {
-+ atomic_t refs;
-+ struct dpaa2_io_desc dpio_desc;
-+ struct qbman_swp_desc swp_desc;
-+ struct qbman_swp *swp;
-+ struct list_head node;
-+ /* protect against multiple management commands */
-+ spinlock_t lock_mgmt_cmd;
-+ /* protect notifications list */
-+ spinlock_t lock_notifications;
-+ struct list_head notifications;
-+};
++ error = dprc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
++ &mc_dev->mc_handle);
++ if (error < 0) {
++ dev_err(&mc_dev->dev, "dprc_open() failed: %d\n", error);
++ goto error_cleanup_msi_domain;
++ }
+
-+struct dpaa2_io_store {
-+ unsigned int max;
-+ dma_addr_t paddr;
-+ struct dpaa2_dq *vaddr;
-+ void *alloced_addr; /* unaligned value from kmalloc() */
-+ unsigned int idx; /* position of the next-to-be-returned entry */
-+ struct qbman_swp *swp; /* portal used to issue VDQCR */
-+ struct device *dev; /* device used for DMA mapping */
-+};
++ error = dprc_get_attributes(mc_dev->mc_io, 0, mc_dev->mc_handle,
++ &mc_bus->dprc_attr);
++ if (error < 0) {
++ dev_err(&mc_dev->dev, "dprc_get_attributes() failed: %d\n",
++ error);
++ goto error_cleanup_open;
++ }
+
-+/* keep a per cpu array of DPIOs for fast access */
-+static struct dpaa2_io *dpio_by_cpu[NR_CPUS];
-+static struct list_head dpio_list = LIST_HEAD_INIT(dpio_list);
-+static DEFINE_SPINLOCK(dpio_list_lock);
++ error = dprc_get_api_version(mc_dev->mc_io, 0,
++ &major_ver,
++ &minor_ver);
++ if (error < 0) {
++ dev_err(&mc_dev->dev, "dprc_get_api_version() failed: %d\n",
++ error);
++ goto error_cleanup_open;
++ }
+
-+static inline struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d,
-+ int cpu)
-+{
-+ if (d)
-+ return d;
++ if (major_ver < DPRC_MIN_VER_MAJOR ||
++ (major_ver == DPRC_MIN_VER_MAJOR &&
++ minor_ver < DPRC_MIN_VER_MINOR)) {
++ dev_err(&mc_dev->dev,
++ "ERROR: DPRC version %d.%d not supported\n",
++ major_ver, minor_ver);
++ error = -ENOTSUPP;
++ goto error_cleanup_open;
++ }
+
-+ if (unlikely(cpu >= (int)num_possible_cpus()))
-+ return NULL;
++ mutex_init(&mc_bus->scan_mutex);
+
+ /*
-+ * If cpu == -1, choose the current cpu, with no guarantees about
-+ * potentially being migrated away.
++ * Discover MC objects in DPRC object:
+ */
-+ if (cpu < 0)
-+ cpu = smp_processor_id();
++ error = dprc_scan_container(mc_dev);
++ if (error < 0)
++ goto error_cleanup_open;
+
-+ /* If a specific cpu was requested, pick it up immediately */
-+ return dpio_by_cpu[cpu];
++ /*
++ * Configure interrupt for the DPRC object associated with this MC bus:
++ */
++ error = dprc_setup_irq(mc_dev);
++ if (error < 0)
++ goto error_cleanup_open;
++
++ dev_info(&mc_dev->dev, "DPRC device bound to driver");
++ return 0;
++
++error_cleanup_open:
++ (void)dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
++
++error_cleanup_msi_domain:
++ if (msi_domain_set)
++ dev_set_msi_domain(&mc_dev->dev, NULL);
++
++ if (mc_io_created) {
++ fsl_destroy_mc_io(mc_dev->mc_io);
++ mc_dev->mc_io = NULL;
++ }
++
++ return error;
+}
+
-+static inline struct dpaa2_io *service_select(struct dpaa2_io *d)
++/*
++ * Tear down interrupt for a given DPRC object
++ */
++static void dprc_teardown_irq(struct fsl_mc_device *mc_dev)
+{
-+ if (d)
-+ return d;
++ struct fsl_mc_device_irq *irq = mc_dev->irqs[0];
+
-+ d = service_select_by_cpu(d, -1);
-+ if (d)
-+ return d;
++ (void)disable_dprc_irq(mc_dev);
+
-+ spin_lock(&dpio_list_lock);
-+ d = list_entry(dpio_list.next, struct dpaa2_io, node);
-+ list_del(&d->node);
-+ list_add_tail(&d->node, &dpio_list);
-+ spin_unlock(&dpio_list_lock);
++ devm_free_irq(&mc_dev->dev, irq->msi_desc->irq, &mc_dev->dev);
+
-+ return d;
++ fsl_mc_free_irqs(mc_dev);
+}
+
+/**
-+ * dpaa2_io_create() - create a dpaa2_io object.
-+ * @desc: the dpaa2_io descriptor
++ * dprc_remove - callback invoked when a DPRC is being unbound from this driver
+ *
-+ * Activates a "struct dpaa2_io" corresponding to the given config of an actual
-+ * DPIO object.
++ * @mc_dev: Pointer to fsl-mc device representing the DPRC
+ *
-+ * Return a valid dpaa2_io object for success, or NULL for failure.
++ * It removes the DPRC's child objects from Linux (not from the MC) and
++ * closes the DPRC device in the MC.
++ * It tears down the interrupts that were configured for the DPRC device.
++ * It destroys the interrupt pool associated with this MC bus.
+ */
-+struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc)
++static int dprc_remove(struct fsl_mc_device *mc_dev)
+{
-+ struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL);
++ int error;
++ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+
-+ if (!obj)
-+ return NULL;
++ if (!is_fsl_mc_bus_dprc(mc_dev))
++ return -EINVAL;
++ if (!mc_dev->mc_io)
++ return -EINVAL;
+
-+ /* check if CPU is out of range (-1 means any cpu) */
-+ if (desc->cpu >= (int)num_possible_cpus()) {
-+ kfree(obj);
-+ return NULL;
++ if (!mc_bus->irq_resources)
++ return -EINVAL;
++
++ if (dev_get_msi_domain(&mc_dev->dev))
++ dprc_teardown_irq(mc_dev);
++
++ device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove);
++
++ if (dev_get_msi_domain(&mc_dev->dev)) {
++ fsl_mc_cleanup_irq_pool(mc_bus);
++ dev_set_msi_domain(&mc_dev->dev, NULL);
+ }
+
-+ atomic_set(&obj->refs, 1);
-+ obj->dpio_desc = *desc;
-+ obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena;
-+ obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh;
-+ obj->swp_desc.qman_version = obj->dpio_desc.qman_version;
-+ obj->swp = qbman_swp_init(&obj->swp_desc);
++ fsl_mc_cleanup_all_resource_pools(mc_dev);
+
-+ if (!obj->swp) {
-+ kfree(obj);
-+ return NULL;
++ error = dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
++ if (error < 0)
++ dev_err(&mc_dev->dev, "dprc_close() failed: %d\n", error);
++
++ if (!fsl_mc_is_root_dprc(&mc_dev->dev)) {
++ fsl_destroy_mc_io(mc_dev->mc_io);
++ mc_dev->mc_io = NULL;
+ }
+
-+ INIT_LIST_HEAD(&obj->node);
-+ spin_lock_init(&obj->lock_mgmt_cmd);
-+ spin_lock_init(&obj->lock_notifications);
-+ INIT_LIST_HEAD(&obj->notifications);
++ dev_info(&mc_dev->dev, "DPRC device unbound from driver");
++ return 0;
++}
+
-+ /* For now only enable DQRR interrupts */
-+ qbman_swp_interrupt_set_trigger(obj->swp,
-+ QBMAN_SWP_INTERRUPT_DQRI);
-+ qbman_swp_interrupt_clear_status(obj->swp, 0xffffffff);
-+ if (obj->dpio_desc.receives_notifications)
-+ qbman_swp_push_set(obj->swp, 0, 1);
++static const struct fsl_mc_device_id match_id_table[] = {
++ {
++ .vendor = FSL_MC_VENDOR_FREESCALE,
++ .obj_type = "dprc"},
++ {.vendor = 0x0},
++};
+
-+ spin_lock(&dpio_list_lock);
-+ list_add_tail(&obj->node, &dpio_list);
-+ if (desc->cpu >= 0 && !dpio_by_cpu[desc->cpu])
-+ dpio_by_cpu[desc->cpu] = obj;
-+ spin_unlock(&dpio_list_lock);
++static struct fsl_mc_driver dprc_driver = {
++ .driver = {
++ .name = FSL_MC_DPRC_DRIVER_NAME,
++ .owner = THIS_MODULE,
++ .pm = NULL,
++ },
++ .match_id_table = match_id_table,
++ .probe = dprc_probe,
++ .remove = dprc_remove,
++};
+
-+ return obj;
++int __init dprc_driver_init(void)
++{
++ return fsl_mc_driver_register(&dprc_driver);
+}
-+EXPORT_SYMBOL(dpaa2_io_create);
+
-+/**
-+ * dpaa2_io_down() - release the dpaa2_io object.
-+ * @d: the dpaa2_io object to be released.
-+ *
-+ * The "struct dpaa2_io" type can represent an individual DPIO object (as
-+ * described by "struct dpaa2_io_desc") or an instance of a "DPIO service",
-+ * which can be used to group/encapsulate multiple DPIO objects. In all cases,
-+ * each handle obtained should be released using this function.
-+ */
-+void dpaa2_io_down(struct dpaa2_io *d)
++void dprc_driver_exit(void)
+{
-+ if (!atomic_dec_and_test(&d->refs))
-+ return;
-+ kfree(d);
++ fsl_mc_driver_unregister(&dprc_driver);
+}
-+EXPORT_SYMBOL(dpaa2_io_down);
+--- /dev/null
++++ b/drivers/bus/fsl-mc/dprc.c
+@@ -0,0 +1,575 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
++ *
++ */
++#include <linux/kernel.h>
++#include <linux/fsl/mc.h>
+
-+#define DPAA_POLL_MAX 32
++#include "fsl-mc-private.h"
+
+/**
-+ * dpaa2_io_irq() - ISR for DPIO interrupts
++ * dprc_open() - Open DPRC object for use
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @container_id: Container ID to open
++ * @token: Returned token of DPRC object
+ *
-+ * @obj: the given DPIO object.
++ * Return: '0' on Success; Error code otherwise.
+ *
-+ * Return IRQ_HANDLED for success or IRQ_NONE if there
-+ * were no pending interrupts.
++ * @warning Required before any operation on the object.
+ */
-+irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj)
++int dprc_open(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int container_id,
++ u16 *token)
+{
-+ const struct dpaa2_dq *dq;
-+ int max = 0;
-+ struct qbman_swp *swp;
-+ u32 status;
++ struct fsl_mc_command cmd = { 0 };
++ struct dprc_cmd_open *cmd_params;
++ int err;
+
-+ swp = obj->swp;
-+ status = qbman_swp_interrupt_read_status(swp);
-+ if (!status)
-+ return IRQ_NONE;
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_OPEN, cmd_flags,
++ 0);
++ cmd_params = (struct dprc_cmd_open *)cmd.params;
++ cmd_params->container_id = cpu_to_le32(container_id);
+
-+ dq = qbman_swp_dqrr_next(swp);
-+ while (dq) {
-+ if (qbman_result_is_SCN(dq)) {
-+ struct dpaa2_io_notification_ctx *ctx;
-+ u64 q64;
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
+
-+ q64 = qbman_result_SCN_ctx(dq);
-+ ctx = (void *)q64;
-+ ctx->cb(ctx);
-+ } else {
-+ pr_crit("fsl-mc-dpio: Unrecognised/ignored DQRR entry\n");
-+ }
-+ qbman_swp_dqrr_consume(swp, dq);
-+ ++max;
-+ if (max > DPAA_POLL_MAX)
-+ goto done;
-+ dq = qbman_swp_dqrr_next(swp);
-+ }
-+done:
-+ qbman_swp_interrupt_clear_status(swp, status);
-+ qbman_swp_interrupt_set_inhibit(swp, 0);
-+ return IRQ_HANDLED;
++ /* retrieve response parameters */
++ *token = mc_cmd_hdr_read_token(&cmd);
++
++ return 0;
+}
-+EXPORT_SYMBOL(dpaa2_io_irq);
++EXPORT_SYMBOL_GPL(dprc_open);
+
+/**
-+ * dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN
-+ * notifications on the given DPIO service.
-+ * @d: the given DPIO service.
-+ * @ctx: the notification context.
++ * dprc_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
+ *
-+ * The caller should make the MC command to attach a DPAA2 object to
-+ * a DPIO after this function completes successfully. In that way:
-+ * (a) The DPIO service is "ready" to handle a notification arrival
-+ * (which might happen before the "attach" command to MC has
-+ * returned control of execution back to the caller)
-+ * (b) The DPIO service can provide back to the caller the 'dpio_id' and
-+ * 'qman64' parameters that it should pass along in the MC command
-+ * in order for the object to be configured to produce the right
-+ * notification fields to the DPIO service.
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
+ *
-+ * Return 0 for success, or -ENODEV for failure.
++ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpaa2_io_service_register(struct dpaa2_io *d,
-+ struct dpaa2_io_notification_ctx *ctx)
++int dprc_close(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
+{
-+ unsigned long irqflags;
-+
-+ d = service_select_by_cpu(d, ctx->desired_cpu);
-+ if (!d)
-+ return -ENODEV;
++ struct fsl_mc_command cmd = { 0 };
+
-+ ctx->dpio_id = d->dpio_desc.dpio_id;
-+ ctx->qman64 = (u64)ctx;
-+ ctx->dpio_private = d;
-+ spin_lock_irqsave(&d->lock_notifications, irqflags);
-+ list_add(&ctx->node, &d->notifications);
-+ spin_unlock_irqrestore(&d->lock_notifications, irqflags);
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLOSE, cmd_flags,
++ token);
+
-+ /* Enable the generation of CDAN notifications */
-+ if (ctx->is_cdan)
-+ qbman_swp_CDAN_set_context_enable(d->swp,
-+ (u16)ctx->id,
-+ ctx->qman64);
-+ return 0;
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
+}
-+EXPORT_SYMBOL(dpaa2_io_service_register);
++EXPORT_SYMBOL_GPL(dprc_close);
+
+/**
-+ * dpaa2_io_service_deregister - The opposite of 'register'.
-+ * @service: the given DPIO service.
-+ * @ctx: the notification context.
++ * dprc_reset_container - Reset child container.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @child_container_id: ID of the container to reset
+ *
-+ * This function should be called only after sending the MC command to
-+ * to detach the notification-producing device from the DPIO.
++ * In case a software context crashes or becomes non-responsive, the parent
++ * may wish to reset its resources container before the software context is
++ * restarted.
++ *
++ * This routine informs all objects assigned to the child container that the
++ * container is being reset, so they may perform any cleanup operations that are
++ * needed. All objects handles that were owned by the child container shall be
++ * closed.
++ *
++ * Note that such request may be submitted even if the child software context
++ * has not crashed, but the resulting object cleanup operations will not be
++ * aware of that.
++ *
++ * Return: '0' on Success; Error code otherwise.
+ */
-+void dpaa2_io_service_deregister(struct dpaa2_io *service,
-+ struct dpaa2_io_notification_ctx *ctx)
++int dprc_reset_container(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int child_container_id)
+{
-+ struct dpaa2_io *d = ctx->dpio_private;
-+ unsigned long irqflags;
++ struct fsl_mc_command cmd = { 0 };
++ struct dprc_cmd_reset_container *cmd_params;
+
-+ if (ctx->is_cdan)
-+ qbman_swp_CDAN_disable(d->swp, (u16)ctx->id);
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_RESET_CONT,
++ cmd_flags, token);
++ cmd_params = (struct dprc_cmd_reset_container *)cmd.params;
++ cmd_params->child_container_id = cpu_to_le32(child_container_id);
+
-+ spin_lock_irqsave(&d->lock_notifications, irqflags);
-+ list_del(&ctx->node);
-+ spin_unlock_irqrestore(&d->lock_notifications, irqflags);
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
+}
-+EXPORT_SYMBOL(dpaa2_io_service_deregister);
++EXPORT_SYMBOL_GPL(dprc_reset_container);
+
+/**
-+ * dpaa2_io_service_rearm() - Rearm the notification for the given DPIO service.
-+ * @d: the given DPIO service.
-+ * @ctx: the notification context.
-+ *
-+ * Once a FQDAN/CDAN has been produced, the corresponding FQ/channel is
-+ * considered "disarmed". Ie. the user can issue pull dequeue operations on that
-+ * traffic source for as long as it likes. Eventually it may wish to "rearm"
-+ * that source to allow it to produce another FQDAN/CDAN, that's what this
-+ * function achieves.
++ * dprc_set_irq() - Set IRQ information for the DPRC to trigger an interrupt.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @irq_index: Identifies the interrupt index to configure
++ * @irq_cfg: IRQ configuration
+ *
-+ * Return 0 for success.
++ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpaa2_io_service_rearm(struct dpaa2_io *d,
-+ struct dpaa2_io_notification_ctx *ctx)
++int dprc_set_irq(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ struct dprc_irq_cfg *irq_cfg)
+{
-+ unsigned long irqflags;
-+ int err;
-+
-+ d = service_select_by_cpu(d, ctx->desired_cpu);
-+ if (!unlikely(d))
-+ return -ENODEV;
++ struct fsl_mc_command cmd = { 0 };
++ struct dprc_cmd_set_irq *cmd_params;
+
-+ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
-+ if (ctx->is_cdan)
-+ err = qbman_swp_CDAN_enable(d->swp, (u16)ctx->id);
-+ else
-+ err = qbman_swp_fq_schedule(d->swp, ctx->id);
-+ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ,
++ cmd_flags,
++ token);
++ cmd_params = (struct dprc_cmd_set_irq *)cmd.params;
++ cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
++ cmd_params->irq_index = irq_index;
++ cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
++ cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
+
-+ return err;
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
+}
-+EXPORT_SYMBOL(dpaa2_io_service_rearm);
+
+/**
-+ * dpaa2_io_service_pull_fq() - pull dequeue functions from a fq.
-+ * @d: the given DPIO service.
-+ * @fqid: the given frame queue id.
-+ * @s: the dpaa2_io_store object for the result.
++ * dprc_set_irq_enable() - Set overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @irq_index: The interrupt index to configure
++ * @en: Interrupt state - enable = 1, disable = 0
+ *
-+ * Return 0 for success, or error code for failure.
++ * Allows GPP software to control when interrupts are generated.
++ * Each interrupt can have up to 32 causes. The enable/disable control's the
++ * overall interrupt state. if the interrupt is disabled no causes will cause
++ * an interrupt.
++ *
++ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid,
-+ struct dpaa2_io_store *s)
++int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u8 en)
+{
-+ struct qbman_pull_desc pd;
-+ int err;
-+
-+ qbman_pull_desc_clear(&pd);
-+ qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
-+ qbman_pull_desc_set_numframes(&pd, (u8)s->max);
-+ qbman_pull_desc_set_fq(&pd, fqid);
++ struct fsl_mc_command cmd = { 0 };
++ struct dprc_cmd_set_irq_enable *cmd_params;
+
-+ d = service_select(d);
-+ if (!d)
-+ return -ENODEV;
-+ s->swp = d->swp;
-+ err = qbman_swp_pull(d->swp, &pd);
-+ if (err)
-+ s->swp = NULL;
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_ENABLE,
++ cmd_flags, token);
++ cmd_params = (struct dprc_cmd_set_irq_enable *)cmd.params;
++ cmd_params->enable = en & DPRC_ENABLE;
++ cmd_params->irq_index = irq_index;
+
-+ return err;
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
+}
-+EXPORT_SYMBOL(dpaa2_io_service_pull_fq);
+
+/**
-+ * dpaa2_io_service_pull_channel() - pull dequeue functions from a channel.
-+ * @d: the given DPIO service.
-+ * @channelid: the given channel id.
-+ * @s: the dpaa2_io_store object for the result.
++ * dprc_set_irq_mask() - Set interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @irq_index: The interrupt index to configure
++ * @mask: event mask to trigger interrupt;
++ * each bit:
++ * 0 = ignore event
++ * 1 = consider event for asserting irq
+ *
-+ * Return 0 for success, or error code for failure.
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
-+ struct dpaa2_io_store *s)
++int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 mask)
+{
-+ struct qbman_pull_desc pd;
-+ int err;
-+
-+ qbman_pull_desc_clear(&pd);
-+ qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
-+ qbman_pull_desc_set_numframes(&pd, (u8)s->max);
-+ qbman_pull_desc_set_channel(&pd, channelid, qbman_pull_type_prio);
-+
-+ d = service_select(d);
-+ if (!d)
-+ return -ENODEV;
++ struct fsl_mc_command cmd = { 0 };
++ struct dprc_cmd_set_irq_mask *cmd_params;
+
-+ s->swp = d->swp;
-+ err = qbman_swp_pull(d->swp, &pd);
-+ if (err)
-+ s->swp = NULL;
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_MASK,
++ cmd_flags, token);
++ cmd_params = (struct dprc_cmd_set_irq_mask *)cmd.params;
++ cmd_params->mask = cpu_to_le32(mask);
++ cmd_params->irq_index = irq_index;
+
-+ return err;
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
+}
-+EXPORT_SYMBOL(dpaa2_io_service_pull_channel);
+
+/**
-+ * dpaa2_io_service_enqueue_fq() - Enqueue a frame to a frame queue.
-+ * @d: the given DPIO service.
-+ * @fqid: the given frame queue id.
-+ * @fd: the frame descriptor which is enqueued.
++ * dprc_get_irq_status() - Get the current status of any pending interrupts.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @irq_index: The interrupt index to configure
++ * @status: Returned interrupts status - one bit per cause:
++ * 0 = no interrupt pending
++ * 1 = interrupt pending
+ *
-+ * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
-+ * or -ENODEV if there is no dpio service.
++ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d,
-+ u32 fqid,
-+ const struct dpaa2_fd *fd)
++int dprc_get_irq_status(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 *status)
+{
-+ struct qbman_eq_desc ed;
++ struct fsl_mc_command cmd = { 0 };
++ struct dprc_cmd_get_irq_status *cmd_params;
++ struct dprc_rsp_get_irq_status *rsp_params;
++ int err;
+
-+ d = service_select(d);
-+ if (!d)
-+ return -ENODEV;
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_STATUS,
++ cmd_flags, token);
++ cmd_params = (struct dprc_cmd_get_irq_status *)cmd.params;
++ cmd_params->status = cpu_to_le32(*status);
++ cmd_params->irq_index = irq_index;
+
-+ qbman_eq_desc_clear(&ed);
-+ qbman_eq_desc_set_no_orp(&ed, 0);
-+ qbman_eq_desc_set_fq(&ed, fqid);
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
+
-+ return qbman_swp_enqueue(d->swp, &ed, fd);
++ /* retrieve response parameters */
++ rsp_params = (struct dprc_rsp_get_irq_status *)cmd.params;
++ *status = le32_to_cpu(rsp_params->status);
++
++ return 0;
+}
-+EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq);
+
+/**
-+ * dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD.
-+ * @d: the given DPIO service.
-+ * @qdid: the given queuing destination id.
-+ * @prio: the given queuing priority.
-+ * @qdbin: the given queuing destination bin.
-+ * @fd: the frame descriptor which is enqueued.
++ * dprc_clear_irq_status() - Clear a pending interrupt's status
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @irq_index: The interrupt index to configure
++ * @status: bits to clear (W1C) - one bit per cause:
++ * 0 = don't change
++ * 1 = clear status bit
+ *
-+ * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready,
-+ * or -ENODEV if there is no dpio service.
++ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d,
-+ u32 qdid, u8 prio, u16 qdbin,
-+ const struct dpaa2_fd *fd)
++int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 status)
+{
-+ struct qbman_eq_desc ed;
++ struct fsl_mc_command cmd = { 0 };
++ struct dprc_cmd_clear_irq_status *cmd_params;
+
-+ d = service_select(d);
-+ if (!d)
-+ return -ENODEV;
-+
-+ qbman_eq_desc_clear(&ed);
-+ qbman_eq_desc_set_no_orp(&ed, 0);
-+ qbman_eq_desc_set_qd(&ed, qdid, qdbin, prio);
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLEAR_IRQ_STATUS,
++ cmd_flags, token);
++ cmd_params = (struct dprc_cmd_clear_irq_status *)cmd.params;
++ cmd_params->status = cpu_to_le32(status);
++ cmd_params->irq_index = irq_index;
+
-+ return qbman_swp_enqueue(d->swp, &ed, fd);
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
+}
-+EXPORT_SYMBOL(dpaa2_io_service_enqueue_qd);
+
+/**
-+ * dpaa2_io_service_release() - Release buffers to a buffer pool.
-+ * @d: the given DPIO object.
-+ * @bpid: the buffer pool id.
-+ * @buffers: the buffers to be released.
-+ * @num_buffers: the number of the buffers to be released.
++ * dprc_get_attributes() - Obtains container attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @attributes Returned container attributes
+ *
-+ * Return 0 for success, and negative error code for failure.
++ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpaa2_io_service_release(struct dpaa2_io *d,
-+ u32 bpid,
-+ const u64 *buffers,
-+ unsigned int num_buffers)
++int dprc_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dprc_attributes *attr)
+{
-+ struct qbman_release_desc rd;
++ struct fsl_mc_command cmd = { 0 };
++ struct dprc_rsp_get_attributes *rsp_params;
++ int err;
+
-+ d = service_select(d);
-+ if (!d)
-+ return -ENODEV;
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
+
-+ qbman_release_desc_clear(&rd);
-+ qbman_release_desc_set_bpid(&rd, bpid);
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
+
-+ return qbman_swp_release(d->swp, &rd, buffers, num_buffers);
++ /* retrieve response parameters */
++ rsp_params = (struct dprc_rsp_get_attributes *)cmd.params;
++ attr->container_id = le32_to_cpu(rsp_params->container_id);
++ attr->icid = le32_to_cpu(rsp_params->icid);
++ attr->options = le32_to_cpu(rsp_params->options);
++ attr->portal_id = le32_to_cpu(rsp_params->portal_id);
++
++ return 0;
+}
-+EXPORT_SYMBOL(dpaa2_io_service_release);
+
+/**
-+ * dpaa2_io_service_acquire() - Acquire buffers from a buffer pool.
-+ * @d: the given DPIO object.
-+ * @bpid: the buffer pool id.
-+ * @buffers: the buffer addresses for acquired buffers.
-+ * @num_buffers: the expected number of the buffers to acquire.
++ * dprc_get_obj_count() - Obtains the number of objects in the DPRC
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @obj_count: Number of objects assigned to the DPRC
+ *
-+ * Return a negative error code if the command failed, otherwise it returns
-+ * the number of buffers acquired, which may be less than the number requested.
-+ * Eg. if the buffer pool is empty, this will return zero.
++ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpaa2_io_service_acquire(struct dpaa2_io *d,
-+ u32 bpid,
-+ u64 *buffers,
-+ unsigned int num_buffers)
++int dprc_get_obj_count(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int *obj_count)
+{
-+ unsigned long irqflags;
++ struct fsl_mc_command cmd = { 0 };
++ struct dprc_rsp_get_obj_count *rsp_params;
+ int err;
+
-+ d = service_select(d);
-+ if (!d)
-+ return -ENODEV;
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_COUNT,
++ cmd_flags, token);
+
-+ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
-+ err = qbman_swp_acquire(d->swp, bpid, buffers, num_buffers);
-+ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
+
-+ return err;
-+}
-+EXPORT_SYMBOL(dpaa2_io_service_acquire);
++ /* retrieve response parameters */
++ rsp_params = (struct dprc_rsp_get_obj_count *)cmd.params;
++ *obj_count = le32_to_cpu(rsp_params->obj_count);
+
-+/*
-+ * 'Stores' are reusable memory blocks for holding dequeue results, and to
-+ * assist with parsing those results.
-+ */
++ return 0;
++}
++EXPORT_SYMBOL_GPL(dprc_get_obj_count);
+
+/**
-+ * dpaa2_io_store_create() - Create the dma memory storage for dequeue result.
-+ * @max_frames: the maximum number of dequeued result for frames, must be <= 16.
-+ * @dev: the device to allow mapping/unmapping the DMAable region.
++ * dprc_get_obj() - Get general information on an object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @obj_index: Index of the object to be queried (< obj_count)
++ * @obj_desc: Returns the requested object descriptor
+ *
-+ * The size of the storage is "max_frames*sizeof(struct dpaa2_dq)".
-+ * The 'dpaa2_io_store' returned is a DPIO service managed object.
++ * The object descriptors are retrieved one by one by incrementing
++ * obj_index up to (not including) the value of obj_count returned
++ * from dprc_get_obj_count(). dprc_get_obj_count() must
++ * be called prior to dprc_get_obj().
+ *
-+ * Return pointer to dpaa2_io_store struct for successfuly created storage
-+ * memory, or NULL on error.
++ * Return: '0' on Success; Error code otherwise.
+ */
-+struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
-+ struct device *dev)
-+{
-+ struct dpaa2_io_store *ret;
-+ size_t size;
-+
-+ if (!max_frames || (max_frames > 16))
-+ return NULL;
-+
-+ ret = kmalloc(sizeof(*ret), GFP_KERNEL);
-+ if (!ret)
-+ return NULL;
-+
-+ ret->max = max_frames;
-+ size = max_frames * sizeof(struct dpaa2_dq) + 64;
-+ ret->alloced_addr = kzalloc(size, GFP_KERNEL);
-+ if (!ret->alloced_addr) {
-+ kfree(ret);
-+ return NULL;
-+ }
-+
-+ ret->vaddr = PTR_ALIGN(ret->alloced_addr, 64);
-+ ret->paddr = dma_map_single(dev, ret->vaddr,
-+ sizeof(struct dpaa2_dq) * max_frames,
-+ DMA_FROM_DEVICE);
-+ if (dma_mapping_error(dev, ret->paddr)) {
-+ kfree(ret->alloced_addr);
-+ kfree(ret);
-+ return NULL;
-+ }
++int dprc_get_obj(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int obj_index,
++ struct fsl_mc_obj_desc *obj_desc)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dprc_cmd_get_obj *cmd_params;
++ struct dprc_rsp_get_obj *rsp_params;
++ int err;
+
-+ ret->idx = 0;
-+ ret->dev = dev;
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ,
++ cmd_flags,
++ token);
++ cmd_params = (struct dprc_cmd_get_obj *)cmd.params;
++ cmd_params->obj_index = cpu_to_le32(obj_index);
+
-+ return ret;
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dprc_rsp_get_obj *)cmd.params;
++ obj_desc->id = le32_to_cpu(rsp_params->id);
++ obj_desc->vendor = le16_to_cpu(rsp_params->vendor);
++ obj_desc->irq_count = rsp_params->irq_count;
++ obj_desc->region_count = rsp_params->region_count;
++ obj_desc->state = le32_to_cpu(rsp_params->state);
++ obj_desc->ver_major = le16_to_cpu(rsp_params->version_major);
++ obj_desc->ver_minor = le16_to_cpu(rsp_params->version_minor);
++ obj_desc->flags = le16_to_cpu(rsp_params->flags);
++ strncpy(obj_desc->type, rsp_params->type, 16);
++ obj_desc->type[15] = '\0';
++ strncpy(obj_desc->label, rsp_params->label, 16);
++ obj_desc->label[15] = '\0';
++ return 0;
+}
-+EXPORT_SYMBOL(dpaa2_io_store_create);
++EXPORT_SYMBOL_GPL(dprc_get_obj);
+
+/**
-+ * dpaa2_io_store_destroy() - Frees the dma memory storage for dequeue
-+ * result.
-+ * @s: the storage memory to be destroyed.
++ * dprc_set_obj_irq() - Set IRQ information for object to trigger an interrupt.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @obj_type: Type of the object to set its IRQ
++ * @obj_id: ID of the object to set its IRQ
++ * @irq_index: The interrupt index to configure
++ * @irq_cfg: IRQ configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
+ */
-+void dpaa2_io_store_destroy(struct dpaa2_io_store *s)
++int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ char *obj_type,
++ int obj_id,
++ u8 irq_index,
++ struct dprc_irq_cfg *irq_cfg)
+{
-+ dma_unmap_single(s->dev, s->paddr, sizeof(struct dpaa2_dq) * s->max,
-+ DMA_FROM_DEVICE);
-+ kfree(s->alloced_addr);
-+ kfree(s);
++ struct fsl_mc_command cmd = { 0 };
++ struct dprc_cmd_set_obj_irq *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_IRQ,
++ cmd_flags,
++ token);
++ cmd_params = (struct dprc_cmd_set_obj_irq *)cmd.params;
++ cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
++ cmd_params->irq_index = irq_index;
++ cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
++ cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
++ cmd_params->obj_id = cpu_to_le32(obj_id);
++ strncpy(cmd_params->obj_type, obj_type, 16);
++ cmd_params->obj_type[15] = '\0';
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
+}
-+EXPORT_SYMBOL(dpaa2_io_store_destroy);
++EXPORT_SYMBOL_GPL(dprc_set_obj_irq);
+
+/**
-+ * dpaa2_io_store_next() - Determine when the next dequeue result is available.
-+ * @s: the dpaa2_io_store object.
-+ * @is_last: indicate whether this is the last frame in the pull command.
-+ *
-+ * When an object driver performs dequeues to a dpaa2_io_store, this function
-+ * can be used to determine when the next frame result is available. Once
-+ * this function returns non-NULL, a subsequent call to it will try to find
-+ * the next dequeue result.
-+ *
-+ * Note that if a pull-dequeue has a NULL result because the target FQ/channel
-+ * was empty, then this function will also return NULL (rather than expecting
-+ * the caller to always check for this. As such, "is_last" can be used to
-+ * differentiate between "end-of-empty-dequeue" and "still-waiting".
++ * dprc_get_obj_region() - Get region information for a specified object.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPRC object
++ * @obj_type; Object type as returned in dprc_get_obj()
++ * @obj_id: Unique object instance as returned in dprc_get_obj()
++ * @region_index: The specific region to query
++ * @region_desc: Returns the requested region descriptor
+ *
-+ * Return dequeue result for a valid dequeue result, or NULL for empty dequeue.
++ * Return: '0' on Success; Error code otherwise.
+ */
-+struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last)
++int dprc_get_obj_region(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ char *obj_type,
++ int obj_id,
++ u8 region_index,
++ struct dprc_region_desc *region_desc)
+{
-+ int match;
-+ struct dpaa2_dq *ret = &s->vaddr[s->idx];
++ struct fsl_mc_command cmd = { 0 };
++ struct dprc_cmd_get_obj_region *cmd_params;
++ struct dprc_rsp_get_obj_region *rsp_params;
++ int err;
+
-+ match = qbman_result_has_new_result(s->swp, ret);
-+ if (!match) {
-+ *is_last = 0;
-+ return NULL;
-+ }
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG,
++ cmd_flags, token);
++ cmd_params = (struct dprc_cmd_get_obj_region *)cmd.params;
++ cmd_params->obj_id = cpu_to_le32(obj_id);
++ cmd_params->region_index = region_index;
++ strncpy(cmd_params->obj_type, obj_type, 16);
++ cmd_params->obj_type[15] = '\0';
+
-+ s->idx++;
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
+
-+ if (dpaa2_dq_is_pull_complete(ret)) {
-+ *is_last = 1;
-+ s->idx = 0;
-+ /*
-+ * If we get an empty dequeue result to terminate a zero-results
-+ * vdqcr, return NULL to the caller rather than expecting him to
-+ * check non-NULL results every time.
-+ */
-+ if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME))
-+ ret = NULL;
-+ } else {
-+ *is_last = 0;
-+ }
++ /* retrieve response parameters */
++ rsp_params = (struct dprc_rsp_get_obj_region *)cmd.params;
++ region_desc->base_offset = le32_to_cpu(rsp_params->base_addr);
++ region_desc->size = le32_to_cpu(rsp_params->size);
++ region_desc->type = rsp_params->type;
++ region_desc->flags = le32_to_cpu(rsp_params->flags);
+
-+ return ret;
++ return 0;
+}
-+EXPORT_SYMBOL(dpaa2_io_store_next);
++EXPORT_SYMBOL_GPL(dprc_get_obj_region);
+
-+#ifdef CONFIG_FSL_QBMAN_DEBUG
+/**
-+ * dpaa2_io_query_fq_count() - Get the frame and byte count for a given fq.
-+ * @d: the given DPIO object.
-+ * @fqid: the id of frame queue to be queried.
-+ * @fcnt: the queried frame count.
-+ * @bcnt: the queried byte count.
-+ *
-+ * Knowing the FQ count at run-time can be useful in debugging situations.
-+ * The instantaneous frame- and byte-count are hereby returned.
++ * dprc_get_api_version - Get Data Path Resource Container API version
++ * @mc_io: Pointer to Mc portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @major_ver: Major version of Data Path Resource Container API
++ * @minor_ver: Minor version of Data Path Resource Container API
+ *
-+ * Return 0 for a successful query, and negative error code if query fails.
++ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpaa2_io_query_fq_count(struct dpaa2_io *d, uint32_t fqid,
-+ u32 *fcnt, u32 *bcnt)
++int dprc_get_api_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 *major_ver,
++ u16 *minor_ver)
+{
-+ struct qbman_attr state;
-+ struct qbman_swp *swp;
-+ unsigned long irqflags;
-+ int ret;
++ struct fsl_mc_command cmd = { 0 };
++ int err;
+
-+ d = service_select(d);
-+ if (!d)
-+ return -ENODEV;
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_API_VERSION,
++ cmd_flags, 0);
+
-+ swp = d->swp;
-+ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
-+ ret = qbman_fq_query_state(swp, fqid, &state);
-+ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
-+ if (ret)
-+ return ret;
-+ *fcnt = qbman_fq_state_frame_count(&state);
-+ *bcnt = qbman_fq_state_byte_count(&state);
++ /* send command to mc */
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
+
+ return 0;
+}
-+EXPORT_SYMBOL(dpaa2_io_query_fq_count);
+
+/**
-+ * dpaa2_io_query_bp_count() - Query the number of buffers currenty in a
-+ * buffer pool.
-+ * @d: the given DPIO object.
-+ * @bpid: the index of buffer pool to be queried.
-+ * @num: the queried number of buffers in the buffer pool.
++ * dprc_get_container_id - Get container ID associated with a given portal.
++ * @mc_io: Pointer to Mc portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @container_id: Requested container id
+ *
-+ * Return 0 for a sucessful query, and negative error code if query fails.
++ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpaa2_io_query_bp_count(struct dpaa2_io *d, uint32_t bpid, u32 *num)
++int dprc_get_container_id(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int *container_id)
+{
-+ struct qbman_attr state;
-+ struct qbman_swp *swp;
-+ unsigned long irqflags;
-+ int ret;
++ struct fsl_mc_command cmd = { 0 };
++ int err;
+
-+ d = service_select(d);
-+ if (!d)
-+ return -ENODEV;
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONT_ID,
++ cmd_flags,
++ 0);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *container_id = (int)mc_cmd_read_object_id(&cmd);
+
-+ swp = d->swp;
-+ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
-+ ret = qbman_bp_query(swp, bpid, &state);
-+ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
-+ if (ret)
-+ return ret;
-+ *num = qbman_bp_info_num_free_bufs(&state);
+ return 0;
+}
-+EXPORT_SYMBOL(dpaa2_io_query_bp_count);
-+#endif
+--- a/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
++++ /dev/null
+@@ -1,668 +0,0 @@
+-/*
+- * Freescale MC object device allocator driver
+- *
+- * Copyright (C) 2013 Freescale Semiconductor, Inc.
+- *
+- * This file is licensed under the terms of the GNU General Public
+- * License version 2. This program is licensed "as is" without any
+- * warranty of any kind, whether express or implied.
+- */
+-
+-#include <linux/module.h>
+-#include <linux/msi.h>
+-#include "../include/mc-bus.h"
+-#include "../include/mc-sys.h"
+-#include "../include/dpbp-cmd.h"
+-#include "../include/dpcon-cmd.h"
+-
+-#include "fsl-mc-private.h"
+-
+-#define FSL_MC_IS_ALLOCATABLE(_obj_type) \
+- (strcmp(_obj_type, "dpbp") == 0 || \
+- strcmp(_obj_type, "dpmcp") == 0 || \
+- strcmp(_obj_type, "dpcon") == 0)
+-
+-/**
+- * fsl_mc_resource_pool_add_device - add allocatable device to a resource
+- * pool of a given MC bus
+- *
+- * @mc_bus: pointer to the MC bus
+- * @pool_type: MC bus pool type
+- * @mc_dev: Pointer to allocatable MC object device
+- *
+- * It adds an allocatable MC object device to a container's resource pool of
+- * the given resource type
+- */
+-static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
+- *mc_bus,
+- enum fsl_mc_pool_type
+- pool_type,
+- struct fsl_mc_device
+- *mc_dev)
+-{
+- struct fsl_mc_resource_pool *res_pool;
+- struct fsl_mc_resource *resource;
+- struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
+- int error = -EINVAL;
+-
+- if (WARN_ON(pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES))
+- goto out;
+- if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type)))
+- goto out;
+- if (WARN_ON(mc_dev->resource))
+- goto out;
+-
+- res_pool = &mc_bus->resource_pools[pool_type];
+- if (WARN_ON(res_pool->type != pool_type))
+- goto out;
+- if (WARN_ON(res_pool->mc_bus != mc_bus))
+- goto out;
+-
+- mutex_lock(&res_pool->mutex);
+-
+- if (WARN_ON(res_pool->max_count < 0))
+- goto out_unlock;
+- if (WARN_ON(res_pool->free_count < 0 ||
+- res_pool->free_count > res_pool->max_count))
+- goto out_unlock;
+-
+- resource = devm_kzalloc(&mc_bus_dev->dev, sizeof(*resource),
+- GFP_KERNEL);
+- if (!resource) {
+- error = -ENOMEM;
+- dev_err(&mc_bus_dev->dev,
+- "Failed to allocate memory for fsl_mc_resource\n");
+- goto out_unlock;
+- }
+-
+- resource->type = pool_type;
+- resource->id = mc_dev->obj_desc.id;
+- resource->data = mc_dev;
+- resource->parent_pool = res_pool;
+- INIT_LIST_HEAD(&resource->node);
+- list_add_tail(&resource->node, &res_pool->free_list);
+- mc_dev->resource = resource;
+- res_pool->free_count++;
+- res_pool->max_count++;
+- error = 0;
+-out_unlock:
+- mutex_unlock(&res_pool->mutex);
+-out:
+- return error;
+-}
+-
+-/**
+- * fsl_mc_resource_pool_remove_device - remove an allocatable device from a
+- * resource pool
+- *
+- * @mc_dev: Pointer to allocatable MC object device
+- *
+- * It permanently removes an allocatable MC object device from the resource
+- * pool, the device is currently in, as long as it is in the pool's free list.
+- */
+-static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
+- *mc_dev)
+-{
+- struct fsl_mc_device *mc_bus_dev;
+- struct fsl_mc_bus *mc_bus;
+- struct fsl_mc_resource_pool *res_pool;
+- struct fsl_mc_resource *resource;
+- int error = -EINVAL;
+-
+- if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type)))
+- goto out;
+-
+- resource = mc_dev->resource;
+- if (WARN_ON(!resource || resource->data != mc_dev))
+- goto out;
+-
+- mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
+- mc_bus = to_fsl_mc_bus(mc_bus_dev);
+- res_pool = resource->parent_pool;
+- if (WARN_ON(res_pool != &mc_bus->resource_pools[resource->type]))
+- goto out;
+-
+- mutex_lock(&res_pool->mutex);
+-
+- if (WARN_ON(res_pool->max_count <= 0))
+- goto out_unlock;
+- if (WARN_ON(res_pool->free_count <= 0 ||
+- res_pool->free_count > res_pool->max_count))
+- goto out_unlock;
+-
+- /*
+- * If the device is currently allocated, its resource is not
+- * in the free list and thus, the device cannot be removed.
+- */
+- if (list_empty(&resource->node)) {
+- error = -EBUSY;
+- dev_err(&mc_bus_dev->dev,
+- "Device %s cannot be removed from resource pool\n",
+- dev_name(&mc_dev->dev));
+- goto out_unlock;
+- }
+-
+- list_del_init(&resource->node);
+- res_pool->free_count--;
+- res_pool->max_count--;
+-
+- devm_kfree(&mc_bus_dev->dev, resource);
+- mc_dev->resource = NULL;
+- error = 0;
+-out_unlock:
+- mutex_unlock(&res_pool->mutex);
+-out:
+- return error;
+-}
+-
+-static const char *const fsl_mc_pool_type_strings[] = {
+- [FSL_MC_POOL_DPMCP] = "dpmcp",
+- [FSL_MC_POOL_DPBP] = "dpbp",
+- [FSL_MC_POOL_DPCON] = "dpcon",
+- [FSL_MC_POOL_IRQ] = "irq",
+-};
+-
+-static int __must_check object_type_to_pool_type(const char *object_type,
+- enum fsl_mc_pool_type
+- *pool_type)
+-{
+- unsigned int i;
+-
+- for (i = 0; i < ARRAY_SIZE(fsl_mc_pool_type_strings); i++) {
+- if (strcmp(object_type, fsl_mc_pool_type_strings[i]) == 0) {
+- *pool_type = i;
+- return 0;
+- }
+- }
+-
+- return -EINVAL;
+-}
+-
+-int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
+- enum fsl_mc_pool_type pool_type,
+- struct fsl_mc_resource **new_resource)
+-{
+- struct fsl_mc_resource_pool *res_pool;
+- struct fsl_mc_resource *resource;
+- struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
+- int error = -EINVAL;
+-
+- BUILD_BUG_ON(ARRAY_SIZE(fsl_mc_pool_type_strings) !=
+- FSL_MC_NUM_POOL_TYPES);
+-
+- *new_resource = NULL;
+- if (WARN_ON(pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES))
+- goto out;
+-
+- res_pool = &mc_bus->resource_pools[pool_type];
+- if (WARN_ON(res_pool->mc_bus != mc_bus))
+- goto out;
+-
+- mutex_lock(&res_pool->mutex);
+- resource = list_first_entry_or_null(&res_pool->free_list,
+- struct fsl_mc_resource, node);
+-
+- if (!resource) {
+- WARN_ON(res_pool->free_count != 0);
+- error = -ENXIO;
+- dev_err(&mc_bus_dev->dev,
+- "No more resources of type %s left\n",
+- fsl_mc_pool_type_strings[pool_type]);
+- goto out_unlock;
+- }
+-
+- if (WARN_ON(resource->type != pool_type))
+- goto out_unlock;
+- if (WARN_ON(resource->parent_pool != res_pool))
+- goto out_unlock;
+- if (WARN_ON(res_pool->free_count <= 0 ||
+- res_pool->free_count > res_pool->max_count))
+- goto out_unlock;
+-
+- list_del_init(&resource->node);
+-
+- res_pool->free_count--;
+- error = 0;
+-out_unlock:
+- mutex_unlock(&res_pool->mutex);
+- *new_resource = resource;
+-out:
+- return error;
+-}
+-EXPORT_SYMBOL_GPL(fsl_mc_resource_allocate);
+-
+-void fsl_mc_resource_free(struct fsl_mc_resource *resource)
+-{
+- struct fsl_mc_resource_pool *res_pool;
+-
+- res_pool = resource->parent_pool;
+- if (WARN_ON(resource->type != res_pool->type))
+- return;
+-
+- mutex_lock(&res_pool->mutex);
+- if (WARN_ON(res_pool->free_count < 0 ||
+- res_pool->free_count >= res_pool->max_count))
+- goto out_unlock;
+-
+- if (WARN_ON(!list_empty(&resource->node)))
+- goto out_unlock;
+-
+- list_add_tail(&resource->node, &res_pool->free_list);
+- res_pool->free_count++;
+-out_unlock:
+- mutex_unlock(&res_pool->mutex);
+-}
+-EXPORT_SYMBOL_GPL(fsl_mc_resource_free);
+-
+-/**
+- * fsl_mc_object_allocate - Allocates a MC object device of the given
+- * pool type from a given MC bus
+- *
+- * @mc_dev: MC device for which the MC object device is to be allocated
+- * @pool_type: MC bus resource pool type
+- * @new_mc_dev: Pointer to area where the pointer to the allocated
+- * MC object device is to be returned
+- *
+- * This function allocates a MC object device from the device's parent DPRC,
+- * from the corresponding MC bus' pool of allocatable MC object devices of
+- * the given resource type. mc_dev cannot be a DPRC itself.
+- *
+- * NOTE: pool_type must be different from FSL_MC_POOL_MCP, since MC
+- * portals are allocated using fsl_mc_portal_allocate(), instead of
+- * this function.
+- */
+-int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
+- enum fsl_mc_pool_type pool_type,
+- struct fsl_mc_device **new_mc_adev)
+-{
+- struct fsl_mc_device *mc_bus_dev;
+- struct fsl_mc_bus *mc_bus;
+- struct fsl_mc_device *mc_adev;
+- int error = -EINVAL;
+- struct fsl_mc_resource *resource = NULL;
+-
+- *new_mc_adev = NULL;
+- if (WARN_ON(mc_dev->flags & FSL_MC_IS_DPRC))
+- goto error;
+-
+- if (WARN_ON(!dev_is_fsl_mc(mc_dev->dev.parent)))
+- goto error;
+-
+- if (WARN_ON(pool_type == FSL_MC_POOL_DPMCP))
+- goto error;
+-
+- mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
+- mc_bus = to_fsl_mc_bus(mc_bus_dev);
+- error = fsl_mc_resource_allocate(mc_bus, pool_type, &resource);
+- if (error < 0)
+- goto error;
+-
+- mc_adev = resource->data;
+- if (WARN_ON(!mc_adev))
+- goto error;
+-
+- *new_mc_adev = mc_adev;
+- return 0;
+-error:
+- if (resource)
+- fsl_mc_resource_free(resource);
+-
+- return error;
+-}
+-EXPORT_SYMBOL_GPL(fsl_mc_object_allocate);
+-
+-/**
+- * fsl_mc_object_free - Returns an allocatable MC object device to the
+- * corresponding resource pool of a given MC bus.
+- *
+- * @mc_adev: Pointer to the MC object device
+- */
+-void fsl_mc_object_free(struct fsl_mc_device *mc_adev)
+-{
+- struct fsl_mc_resource *resource;
+-
+- resource = mc_adev->resource;
+- if (WARN_ON(resource->type == FSL_MC_POOL_DPMCP))
+- return;
+- if (WARN_ON(resource->data != mc_adev))
+- return;
+-
+- fsl_mc_resource_free(resource);
+-}
+-EXPORT_SYMBOL_GPL(fsl_mc_object_free);
+-
+-/*
+- * Initialize the interrupt pool associated with a MC bus.
+- * It allocates a block of IRQs from the GIC-ITS
+- */
+-int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
+- unsigned int irq_count)
+-{
+- unsigned int i;
+- struct msi_desc *msi_desc;
+- struct fsl_mc_device_irq *irq_resources;
+- struct fsl_mc_device_irq *mc_dev_irq;
+- int error;
+- struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
+- struct fsl_mc_resource_pool *res_pool =
+- &mc_bus->resource_pools[FSL_MC_POOL_IRQ];
+-
+- if (WARN_ON(irq_count == 0 ||
+- irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS))
+- return -EINVAL;
+-
+- error = fsl_mc_msi_domain_alloc_irqs(&mc_bus_dev->dev, irq_count);
+- if (error < 0)
+- return error;
+-
+- irq_resources = devm_kzalloc(&mc_bus_dev->dev,
+- sizeof(*irq_resources) * irq_count,
+- GFP_KERNEL);
+- if (!irq_resources) {
+- error = -ENOMEM;
+- goto cleanup_msi_irqs;
+- }
+-
+- for (i = 0; i < irq_count; i++) {
+- mc_dev_irq = &irq_resources[i];
+-
+- /*
+- * NOTE: This mc_dev_irq's MSI addr/value pair will be set
+- * by the fsl_mc_msi_write_msg() callback
+- */
+- mc_dev_irq->resource.type = res_pool->type;
+- mc_dev_irq->resource.data = mc_dev_irq;
+- mc_dev_irq->resource.parent_pool = res_pool;
+- INIT_LIST_HEAD(&mc_dev_irq->resource.node);
+- list_add_tail(&mc_dev_irq->resource.node, &res_pool->free_list);
+- }
+-
+- for_each_msi_entry(msi_desc, &mc_bus_dev->dev) {
+- mc_dev_irq = &irq_resources[msi_desc->fsl_mc.msi_index];
+- mc_dev_irq->msi_desc = msi_desc;
+- mc_dev_irq->resource.id = msi_desc->irq;
+- }
+-
+- res_pool->max_count = irq_count;
+- res_pool->free_count = irq_count;
+- mc_bus->irq_resources = irq_resources;
+- return 0;
+-
+-cleanup_msi_irqs:
+- fsl_mc_msi_domain_free_irqs(&mc_bus_dev->dev);
+- return error;
+-}
+-EXPORT_SYMBOL_GPL(fsl_mc_populate_irq_pool);
+-
+-/**
+- * Teardown the interrupt pool associated with an MC bus.
+- * It frees the IRQs that were allocated to the pool, back to the GIC-ITS.
+- */
+-void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus)
+-{
+- struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
+- struct fsl_mc_resource_pool *res_pool =
+- &mc_bus->resource_pools[FSL_MC_POOL_IRQ];
+-
+- if (WARN_ON(!mc_bus->irq_resources))
+- return;
+-
+- if (WARN_ON(res_pool->max_count == 0))
+- return;
+-
+- if (WARN_ON(res_pool->free_count != res_pool->max_count))
+- return;
+-
+- INIT_LIST_HEAD(&res_pool->free_list);
+- res_pool->max_count = 0;
+- res_pool->free_count = 0;
+- mc_bus->irq_resources = NULL;
+- fsl_mc_msi_domain_free_irqs(&mc_bus_dev->dev);
+-}
+-EXPORT_SYMBOL_GPL(fsl_mc_cleanup_irq_pool);
+-
+-/**
+- * It allocates the IRQs required by a given MC object device. The
+- * IRQs are allocated from the interrupt pool associated with the
+- * MC bus that contains the device, if the device is not a DPRC device.
+- * Otherwise, the IRQs are allocated from the interrupt pool associated
+- * with the MC bus that represents the DPRC device itself.
+- */
+-int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev)
+-{
+- int i;
+- int irq_count;
+- int res_allocated_count = 0;
+- int error = -EINVAL;
+- struct fsl_mc_device_irq **irqs = NULL;
+- struct fsl_mc_bus *mc_bus;
+- struct fsl_mc_resource_pool *res_pool;
+-
+- if (WARN_ON(mc_dev->irqs))
+- return -EINVAL;
+-
+- irq_count = mc_dev->obj_desc.irq_count;
+- if (WARN_ON(irq_count == 0))
+- return -EINVAL;
+-
+- if (strcmp(mc_dev->obj_desc.type, "dprc") == 0)
+- mc_bus = to_fsl_mc_bus(mc_dev);
+- else
+- mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent));
+-
+- if (WARN_ON(!mc_bus->irq_resources))
+- return -EINVAL;
+-
+- res_pool = &mc_bus->resource_pools[FSL_MC_POOL_IRQ];
+- if (res_pool->free_count < irq_count) {
+- dev_err(&mc_dev->dev,
+- "Not able to allocate %u irqs for device\n", irq_count);
+- return -ENOSPC;
+- }
+-
+- irqs = devm_kzalloc(&mc_dev->dev, irq_count * sizeof(irqs[0]),
+- GFP_KERNEL);
+- if (!irqs)
+- return -ENOMEM;
+-
+- for (i = 0; i < irq_count; i++) {
+- struct fsl_mc_resource *resource;
+-
+- error = fsl_mc_resource_allocate(mc_bus, FSL_MC_POOL_IRQ,
+- &resource);
+- if (error < 0)
+- goto error_resource_alloc;
+-
+- irqs[i] = to_fsl_mc_irq(resource);
+- res_allocated_count++;
+-
+- WARN_ON(irqs[i]->mc_dev);
+- irqs[i]->mc_dev = mc_dev;
+- irqs[i]->dev_irq_index = i;
+- }
+-
+- mc_dev->irqs = irqs;
+- return 0;
+-
+-error_resource_alloc:
+- for (i = 0; i < res_allocated_count; i++) {
+- irqs[i]->mc_dev = NULL;
+- fsl_mc_resource_free(&irqs[i]->resource);
+- }
+-
+- return error;
+-}
+-EXPORT_SYMBOL_GPL(fsl_mc_allocate_irqs);
+-
+-/*
+- * It frees the IRQs that were allocated for a MC object device, by
+- * returning them to the corresponding interrupt pool.
+- */
+-void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev)
+-{
+- int i;
+- int irq_count;
+- struct fsl_mc_bus *mc_bus;
+- struct fsl_mc_device_irq **irqs = mc_dev->irqs;
+-
+- if (WARN_ON(!irqs))
+- return;
+-
+- irq_count = mc_dev->obj_desc.irq_count;
+-
+- if (strcmp(mc_dev->obj_desc.type, "dprc") == 0)
+- mc_bus = to_fsl_mc_bus(mc_dev);
+- else
+- mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent));
+-
+- if (WARN_ON(!mc_bus->irq_resources))
+- return;
+-
+- for (i = 0; i < irq_count; i++) {
+- WARN_ON(!irqs[i]->mc_dev);
+- irqs[i]->mc_dev = NULL;
+- fsl_mc_resource_free(&irqs[i]->resource);
+- }
+-
+- mc_dev->irqs = NULL;
+-}
+-EXPORT_SYMBOL_GPL(fsl_mc_free_irqs);
+-
+-void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
+-{
+- int pool_type;
+- struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+-
+- for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++) {
+- struct fsl_mc_resource_pool *res_pool =
+- &mc_bus->resource_pools[pool_type];
+-
+- res_pool->type = pool_type;
+- res_pool->max_count = 0;
+- res_pool->free_count = 0;
+- res_pool->mc_bus = mc_bus;
+- INIT_LIST_HEAD(&res_pool->free_list);
+- mutex_init(&res_pool->mutex);
+- }
+-}
+-
+-static void fsl_mc_cleanup_resource_pool(struct fsl_mc_device *mc_bus_dev,
+- enum fsl_mc_pool_type pool_type)
+-{
+- struct fsl_mc_resource *resource;
+- struct fsl_mc_resource *next;
+- struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+- struct fsl_mc_resource_pool *res_pool =
+- &mc_bus->resource_pools[pool_type];
+- int free_count = 0;
+-
+- WARN_ON(res_pool->type != pool_type);
+- WARN_ON(res_pool->free_count != res_pool->max_count);
+-
+- list_for_each_entry_safe(resource, next, &res_pool->free_list, node) {
+- free_count++;
+- WARN_ON(resource->type != res_pool->type);
+- WARN_ON(resource->parent_pool != res_pool);
+- devm_kfree(&mc_bus_dev->dev, resource);
+- }
+-
+- WARN_ON(free_count != res_pool->free_count);
+-}
+-
+-void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
+-{
+- int pool_type;
+-
+- for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++)
+- fsl_mc_cleanup_resource_pool(mc_bus_dev, pool_type);
+-}
+-
+-/**
+- * fsl_mc_allocator_probe - callback invoked when an allocatable device is
+- * being added to the system
+- */
+-static int fsl_mc_allocator_probe(struct fsl_mc_device *mc_dev)
+-{
+- enum fsl_mc_pool_type pool_type;
+- struct fsl_mc_device *mc_bus_dev;
+- struct fsl_mc_bus *mc_bus;
+- int error;
+-
+- if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type)))
+- return -EINVAL;
+-
+- mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
+- if (WARN_ON(!dev_is_fsl_mc(&mc_bus_dev->dev)))
+- return -EINVAL;
+-
+- mc_bus = to_fsl_mc_bus(mc_bus_dev);
+- error = object_type_to_pool_type(mc_dev->obj_desc.type, &pool_type);
+- if (error < 0)
+- return error;
+-
+- error = fsl_mc_resource_pool_add_device(mc_bus, pool_type, mc_dev);
+- if (error < 0)
+- return error;
+-
+- dev_dbg(&mc_dev->dev,
+- "Allocatable MC object device bound to fsl_mc_allocator driver");
+- return 0;
+-}
+-
+-/**
+- * fsl_mc_allocator_remove - callback invoked when an allocatable device is
+- * being removed from the system
+- */
+-static int fsl_mc_allocator_remove(struct fsl_mc_device *mc_dev)
+-{
+- int error;
+-
+- if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type)))
+- return -EINVAL;
+-
+- if (mc_dev->resource) {
+- error = fsl_mc_resource_pool_remove_device(mc_dev);
+- if (error < 0)
+- return error;
+- }
+-
+- dev_dbg(&mc_dev->dev,
+- "Allocatable MC object device unbound from fsl_mc_allocator driver");
+- return 0;
+-}
+-
+-static const struct fsl_mc_device_id match_id_table[] = {
+- {
+- .vendor = FSL_MC_VENDOR_FREESCALE,
+- .obj_type = "dpbp",
+- },
+- {
+- .vendor = FSL_MC_VENDOR_FREESCALE,
+- .obj_type = "dpmcp",
+- },
+- {
+- .vendor = FSL_MC_VENDOR_FREESCALE,
+- .obj_type = "dpcon",
+- },
+- {.vendor = 0x0},
+-};
+-
+-static struct fsl_mc_driver fsl_mc_allocator_driver = {
+- .driver = {
+- .name = "fsl_mc_allocator",
+- .pm = NULL,
+- },
+- .match_id_table = match_id_table,
+- .probe = fsl_mc_allocator_probe,
+- .remove = fsl_mc_allocator_remove,
+-};
+-
+-int __init fsl_mc_allocator_driver_init(void)
+-{
+- return fsl_mc_driver_register(&fsl_mc_allocator_driver);
+-}
+-
+-void fsl_mc_allocator_driver_exit(void)
+-{
+- fsl_mc_driver_unregister(&fsl_mc_allocator_driver);
+-}
--- /dev/null
-+++ b/drivers/staging/fsl-mc/bus/dpio/dpio.c
-@@ -0,0 +1,224 @@
++++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c
+@@ -0,0 +1,655 @@
++// SPDX-License-Identifier: GPL-2.0
+/*
-+ * Copyright 2013-2016 Freescale Semiconductor Inc.
-+ * Copyright 2016 NXP
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
++ * fsl-mc object allocator driver
+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
++ * Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
+ */
-+#include "../../include/mc-sys.h"
-+#include "../../include/mc-cmd.h"
+
-+#include "dpio.h"
-+#include "dpio-cmd.h"
++#include <linux/module.h>
++#include <linux/msi.h>
++#include <linux/fsl/mc.h>
+
-+/*
-+ * Data Path I/O Portal API
-+ * Contains initialization APIs and runtime control APIs for DPIO
-+ */
++#include "fsl-mc-private.h"
+
-+/**
-+ * dpio_open() - Open a control session for the specified object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @dpio_id: DPIO unique ID
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * This function can be used to open a control session for an
-+ * already created object; an object may have been declared in
-+ * the DPL or by calling the dpio_create() function.
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent commands for
-+ * this specific object.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpio_open(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ int dpio_id,
-+ u16 *token)
++static bool __must_check fsl_mc_is_allocatable(struct fsl_mc_device *mc_dev)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpio_cmd_open *dpio_cmd;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_OPEN,
-+ cmd_flags,
-+ 0);
-+ dpio_cmd = (struct dpio_cmd_open *)cmd.params;
-+ dpio_cmd->dpio_id = cpu_to_le32(dpio_id);
-+
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ *token = mc_cmd_hdr_read_token(&cmd);
-+
-+ return 0;
++ return is_fsl_mc_bus_dpbp(mc_dev) ||
++ is_fsl_mc_bus_dpmcp(mc_dev) ||
++ is_fsl_mc_bus_dpcon(mc_dev);
+}
+
+/**
-+ * dpio_close() - Close the control session of the object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPIO object
++ * fsl_mc_resource_pool_add_device - add allocatable object to a resource
++ * pool of a given fsl-mc bus
+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpio_close(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLOSE,
-+ cmd_flags,
-+ token);
++ * @mc_bus: pointer to the fsl-mc bus
++ * @pool_type: pool type
++ * @mc_dev: pointer to allocatable fsl-mc device
++ */
++static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
++ *mc_bus,
++ enum fsl_mc_pool_type
++ pool_type,
++ struct fsl_mc_device
++ *mc_dev)
++{
++ struct fsl_mc_resource_pool *res_pool;
++ struct fsl_mc_resource *resource;
++ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
++ int error = -EINVAL;
++
++ if (pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES)
++ goto out;
++ if (!fsl_mc_is_allocatable(mc_dev))
++ goto out;
++ if (mc_dev->resource)
++ goto out;
++
++ res_pool = &mc_bus->resource_pools[pool_type];
++ if (res_pool->type != pool_type)
++ goto out;
++ if (res_pool->mc_bus != mc_bus)
++ goto out;
++
++ mutex_lock(&res_pool->mutex);
++
++ if (res_pool->max_count < 0)
++ goto out_unlock;
++ if (res_pool->free_count < 0 ||
++ res_pool->free_count > res_pool->max_count)
++ goto out_unlock;
++
++ resource = devm_kzalloc(&mc_bus_dev->dev, sizeof(*resource),
++ GFP_KERNEL);
++ if (!resource) {
++ error = -ENOMEM;
++ dev_err(&mc_bus_dev->dev,
++ "Failed to allocate memory for fsl_mc_resource\n");
++ goto out_unlock;
++ }
+
-+ return mc_send_command(mc_io, &cmd);
++ resource->type = pool_type;
++ resource->id = mc_dev->obj_desc.id;
++ resource->data = mc_dev;
++ resource->parent_pool = res_pool;
++ INIT_LIST_HEAD(&resource->node);
++ list_add_tail(&resource->node, &res_pool->free_list);
++ mc_dev->resource = resource;
++ res_pool->free_count++;
++ res_pool->max_count++;
++ error = 0;
++out_unlock:
++ mutex_unlock(&res_pool->mutex);
++out:
++ return error;
+}
+
+/**
-+ * dpio_enable() - Enable the DPIO, allow I/O portal operations.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPIO object
++ * fsl_mc_resource_pool_remove_device - remove an allocatable device from a
++ * resource pool
+ *
-+ * Return: '0' on Success; Error code otherwise
++ * @mc_dev: pointer to allocatable fsl-mc device
++ *
++ * It permanently removes an allocatable fsl-mc device from the resource
++ * pool. It's an error if the device is in use.
+ */
-+int dpio_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
++static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
++ *mc_dev)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_device *mc_bus_dev;
++ struct fsl_mc_bus *mc_bus;
++ struct fsl_mc_resource_pool *res_pool;
++ struct fsl_mc_resource *resource;
++ int error = -EINVAL;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ENABLE,
-+ cmd_flags,
-+ token);
++ if (!fsl_mc_is_allocatable(mc_dev))
++ goto out;
+
-+ return mc_send_command(mc_io, &cmd);
++ resource = mc_dev->resource;
++ if (!resource || resource->data != mc_dev)
++ goto out;
++
++ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
++ mc_bus = to_fsl_mc_bus(mc_bus_dev);
++ res_pool = resource->parent_pool;
++ if (res_pool != &mc_bus->resource_pools[resource->type])
++ goto out;
++
++ mutex_lock(&res_pool->mutex);
++
++ if (res_pool->max_count <= 0)
++ goto out_unlock;
++ if (res_pool->free_count <= 0 ||
++ res_pool->free_count > res_pool->max_count)
++ goto out_unlock;
++
++ /*
++ * If the device is currently allocated, its resource is not
++ * in the free list and thus, the device cannot be removed.
++ */
++ if (list_empty(&resource->node)) {
++ error = -EBUSY;
++ dev_err(&mc_bus_dev->dev,
++ "Device %s cannot be removed from resource pool\n",
++ dev_name(&mc_dev->dev));
++ goto out_unlock;
++ }
++
++ list_del_init(&resource->node);
++ res_pool->free_count--;
++ res_pool->max_count--;
++
++ devm_kfree(&mc_bus_dev->dev, resource);
++ mc_dev->resource = NULL;
++ error = 0;
++out_unlock:
++ mutex_unlock(&res_pool->mutex);
++out:
++ return error;
+}
+
-+/**
-+ * dpio_disable() - Disable the DPIO, stop any I/O portal operation.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPIO object
-+ *
-+ * Return: '0' on Success; Error code otherwise
-+ */
-+int dpio_disable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
++static const char *const fsl_mc_pool_type_strings[] = {
++ [FSL_MC_POOL_DPMCP] = "dpmcp",
++ [FSL_MC_POOL_DPBP] = "dpbp",
++ [FSL_MC_POOL_DPCON] = "dpcon",
++ [FSL_MC_POOL_IRQ] = "irq",
++};
++
++static int __must_check object_type_to_pool_type(const char *object_type,
++ enum fsl_mc_pool_type
++ *pool_type)
+{
-+ struct mc_command cmd = { 0 };
++ unsigned int i;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DISABLE,
-+ cmd_flags,
-+ token);
++ for (i = 0; i < ARRAY_SIZE(fsl_mc_pool_type_strings); i++) {
++ if (strcmp(object_type, fsl_mc_pool_type_strings[i]) == 0) {
++ *pool_type = i;
++ return 0;
++ }
++ }
+
-+ return mc_send_command(mc_io, &cmd);
++ return -EINVAL;
+}
+
-+/**
-+ * dpio_get_attributes() - Retrieve DPIO attributes
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPIO object
-+ * @attr: Returned object's attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise
-+ */
-+int dpio_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpio_attr *attr)
++int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
++ enum fsl_mc_pool_type pool_type,
++ struct fsl_mc_resource **new_resource)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpio_rsp_get_attr *dpio_rsp;
-+ int err;
++ struct fsl_mc_resource_pool *res_pool;
++ struct fsl_mc_resource *resource;
++ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
++ int error = -EINVAL;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_ATTR,
-+ cmd_flags,
-+ token);
++ BUILD_BUG_ON(ARRAY_SIZE(fsl_mc_pool_type_strings) !=
++ FSL_MC_NUM_POOL_TYPES);
+
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++ *new_resource = NULL;
++ if (pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES)
++ goto out;
+
-+ /* retrieve response parameters */
-+ dpio_rsp = (struct dpio_rsp_get_attr *)cmd.params;
-+ attr->id = le32_to_cpu(dpio_rsp->id);
-+ attr->qbman_portal_id = le16_to_cpu(dpio_rsp->qbman_portal_id);
-+ attr->num_priorities = dpio_rsp->num_priorities;
-+ attr->channel_mode = dpio_rsp->channel_mode & DPIO_CHANNEL_MODE_MASK;
-+ attr->qbman_portal_ce_offset =
-+ le64_to_cpu(dpio_rsp->qbman_portal_ce_addr);
-+ attr->qbman_portal_ci_offset =
-+ le64_to_cpu(dpio_rsp->qbman_portal_ci_addr);
-+ attr->qbman_version = le32_to_cpu(dpio_rsp->qbman_version);
++ res_pool = &mc_bus->resource_pools[pool_type];
++ if (res_pool->mc_bus != mc_bus)
++ goto out;
+
-+ return 0;
++ mutex_lock(&res_pool->mutex);
++ resource = list_first_entry_or_null(&res_pool->free_list,
++ struct fsl_mc_resource, node);
++
++ if (!resource) {
++ error = -ENXIO;
++ dev_err(&mc_bus_dev->dev,
++ "No more resources of type %s left\n",
++ fsl_mc_pool_type_strings[pool_type]);
++ goto out_unlock;
++ }
++
++ if (resource->type != pool_type)
++ goto out_unlock;
++ if (resource->parent_pool != res_pool)
++ goto out_unlock;
++ if (res_pool->free_count <= 0 ||
++ res_pool->free_count > res_pool->max_count)
++ goto out_unlock;
++
++ list_del_init(&resource->node);
++
++ res_pool->free_count--;
++ error = 0;
++out_unlock:
++ mutex_unlock(&res_pool->mutex);
++ *new_resource = resource;
++out:
++ return error;
+}
++EXPORT_SYMBOL_GPL(fsl_mc_resource_allocate);
+
-+/**
-+ * dpio_get_api_version - Get Data Path I/O API version
-+ * @mc_io: Pointer to MC portal's DPIO object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @major_ver: Major version of DPIO API
-+ * @minor_ver: Minor version of DPIO API
-+ *
-+ * Return: '0' on Success; Error code otherwise
-+ */
-+int dpio_get_api_version(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 *major_ver,
-+ u16 *minor_ver)
++void fsl_mc_resource_free(struct fsl_mc_resource *resource)
+{
-+ struct mc_command cmd = { 0 };
-+ int err;
++ struct fsl_mc_resource_pool *res_pool;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_API_VERSION,
-+ cmd_flags, 0);
++ res_pool = resource->parent_pool;
++ if (resource->type != res_pool->type)
++ return;
+
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++ mutex_lock(&res_pool->mutex);
++ if (res_pool->free_count < 0 ||
++ res_pool->free_count >= res_pool->max_count)
++ goto out_unlock;
+
-+ /* retrieve response parameters */
-+ mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
++ if (!list_empty(&resource->node))
++ goto out_unlock;
+
-+ return 0;
++ list_add_tail(&resource->node, &res_pool->free_list);
++ res_pool->free_count++;
++out_unlock:
++ mutex_unlock(&res_pool->mutex);
+}
---- /dev/null
-+++ b/drivers/staging/fsl-mc/bus/dpio/dpio.h
-@@ -0,0 +1,109 @@
-+/*
-+ * Copyright 2013-2016 Freescale Semiconductor Inc.
-+ * Copyright 2016 NXP
++EXPORT_SYMBOL_GPL(fsl_mc_resource_free);
++
++/**
++ * fsl_mc_object_allocate - Allocates an fsl-mc object of the given
++ * pool type from a given fsl-mc bus instance
+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
++ * @mc_dev: fsl-mc device which is used in conjunction with the
++ * allocated object
++ * @pool_type: pool type
++ * @new_mc_dev: pointer to area where the pointer to the allocated device
++ * is to be returned
+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
++ * Allocatable objects are always used in conjunction with some functional
++ * device. This function allocates an object of the specified type from
++ * the DPRC containing the functional device.
+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
++ * NOTE: pool_type must be different from FSL_MC_POOL_MCP, since MC
++ * portals are allocated using fsl_mc_portal_allocate(), instead of
++ * this function.
+ */
-+#ifndef __FSL_DPIO_H
-+#define __FSL_DPIO_H
++int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
++ enum fsl_mc_pool_type pool_type,
++ struct fsl_mc_device **new_mc_adev)
++{
++ struct fsl_mc_device *mc_bus_dev;
++ struct fsl_mc_bus *mc_bus;
++ struct fsl_mc_device *mc_adev;
++ int error = -EINVAL;
++ struct fsl_mc_resource *resource = NULL;
+
-+struct fsl_mc_io;
++ *new_mc_adev = NULL;
++ if (mc_dev->flags & FSL_MC_IS_DPRC)
++ goto error;
+
-+int dpio_open(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ int dpio_id,
-+ u16 *token);
++ if (!dev_is_fsl_mc(mc_dev->dev.parent))
++ goto error;
+
-+int dpio_close(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
++ if (pool_type == FSL_MC_POOL_DPMCP)
++ goto error;
+
-+/**
-+ * enum dpio_channel_mode - DPIO notification channel mode
-+ * @DPIO_NO_CHANNEL: No support for notification channel
-+ * @DPIO_LOCAL_CHANNEL: Notifications on data availability can be received by a
-+ * dedicated channel in the DPIO; user should point the queue's
-+ * destination in the relevant interface to this DPIO
-+ */
-+enum dpio_channel_mode {
-+ DPIO_NO_CHANNEL = 0,
-+ DPIO_LOCAL_CHANNEL = 1,
-+};
++ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
++ mc_bus = to_fsl_mc_bus(mc_bus_dev);
++ error = fsl_mc_resource_allocate(mc_bus, pool_type, &resource);
++ if (error < 0)
++ goto error;
++
++ mc_adev = resource->data;
++ if (!mc_adev)
++ goto error;
++
++ *new_mc_adev = mc_adev;
++ return 0;
++error:
++ if (resource)
++ fsl_mc_resource_free(resource);
++
++ return error;
++}
++EXPORT_SYMBOL_GPL(fsl_mc_object_allocate);
+
+/**
-+ * struct dpio_cfg - Structure representing DPIO configuration
-+ * @channel_mode: Notification channel mode
-+ * @num_priorities: Number of priorities for the notification channel (1-8);
-+ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
++ * fsl_mc_object_free - Returns an fsl-mc object to the resource
++ * pool where it came from.
++ * @mc_adev: Pointer to the fsl-mc device
+ */
-+struct dpio_cfg {
-+ enum dpio_channel_mode channel_mode;
-+ u8 num_priorities;
-+};
++void fsl_mc_object_free(struct fsl_mc_device *mc_adev)
++{
++ struct fsl_mc_resource *resource;
+
-+int dpio_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
++ resource = mc_adev->resource;
++ if (resource->type == FSL_MC_POOL_DPMCP)
++ return;
++ if (resource->data != mc_adev)
++ return;
+
-+int dpio_disable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
++ fsl_mc_resource_free(resource);
++}
++EXPORT_SYMBOL_GPL(fsl_mc_object_free);
+
-+/**
-+ * struct dpio_attr - Structure representing DPIO attributes
-+ * @id: DPIO object ID
-+ * @qbman_portal_ce_offset: offset of the software portal cache-enabled area
-+ * @qbman_portal_ci_offset: offset of the software portal cache-inhibited area
-+ * @qbman_portal_id: Software portal ID
-+ * @channel_mode: Notification channel mode
-+ * @num_priorities: Number of priorities for the notification channel (1-8);
-+ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
-+ * @qbman_version: QBMAN version
++/*
++ * A DPRC and the devices in the DPRC all share the same GIC-ITS device
++ * ID. A block of IRQs is pre-allocated and maintained in a pool
++ * from which devices can allocate them when needed.
+ */
-+struct dpio_attr {
-+ int id;
-+ u64 qbman_portal_ce_offset;
-+ u64 qbman_portal_ci_offset;
-+ u16 qbman_portal_id;
-+ enum dpio_channel_mode channel_mode;
-+ u8 num_priorities;
-+ u32 qbman_version;
-+};
+
-+int dpio_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpio_attr *attr);
++/*
++ * Initialize the interrupt pool associated with an fsl-mc bus.
++ * It allocates a block of IRQs from the GIC-ITS.
++ */
++int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
++ unsigned int irq_count)
++{
++ unsigned int i;
++ struct msi_desc *msi_desc;
++ struct fsl_mc_device_irq *irq_resources;
++ struct fsl_mc_device_irq *mc_dev_irq;
++ int error;
++ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
++ struct fsl_mc_resource_pool *res_pool =
++ &mc_bus->resource_pools[FSL_MC_POOL_IRQ];
+
-+int dpio_get_api_version(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 *major_ver,
-+ u16 *minor_ver);
++ if (irq_count == 0 ||
++ irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS)
++ return -EINVAL;
++
++ error = fsl_mc_msi_domain_alloc_irqs(&mc_bus_dev->dev, irq_count);
++ if (error < 0)
++ return error;
++
++ irq_resources = devm_kzalloc(&mc_bus_dev->dev,
++ sizeof(*irq_resources) * irq_count,
++ GFP_KERNEL);
++ if (!irq_resources) {
++ error = -ENOMEM;
++ goto cleanup_msi_irqs;
++ }
++
++ for (i = 0; i < irq_count; i++) {
++ mc_dev_irq = &irq_resources[i];
++
++ /*
++ * NOTE: This mc_dev_irq's MSI addr/value pair will be set
++ * by the fsl_mc_msi_write_msg() callback
++ */
++ mc_dev_irq->resource.type = res_pool->type;
++ mc_dev_irq->resource.data = mc_dev_irq;
++ mc_dev_irq->resource.parent_pool = res_pool;
++ INIT_LIST_HEAD(&mc_dev_irq->resource.node);
++ list_add_tail(&mc_dev_irq->resource.node, &res_pool->free_list);
++ }
++
++ for_each_msi_entry(msi_desc, &mc_bus_dev->dev) {
++ mc_dev_irq = &irq_resources[msi_desc->fsl_mc.msi_index];
++ mc_dev_irq->msi_desc = msi_desc;
++ mc_dev_irq->resource.id = msi_desc->irq;
++ }
++
++ res_pool->max_count = irq_count;
++ res_pool->free_count = irq_count;
++ mc_bus->irq_resources = irq_resources;
++ return 0;
+
-+#endif /* __FSL_DPIO_H */
---- /dev/null
-+++ b/drivers/staging/fsl-mc/bus/dpio/qbman-portal.c
-@@ -0,0 +1,1049 @@
-+/*
-+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2016 NXP
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++cleanup_msi_irqs:
++ fsl_mc_msi_domain_free_irqs(&mc_bus_dev->dev);
++ return error;
++}
++EXPORT_SYMBOL_GPL(fsl_mc_populate_irq_pool);
++
++/**
++ * Teardown the interrupt pool associated with an fsl-mc bus.
++ * It frees the IRQs that were allocated to the pool, back to the GIC-ITS.
+ */
++void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus)
++{
++ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
++ struct fsl_mc_resource_pool *res_pool =
++ &mc_bus->resource_pools[FSL_MC_POOL_IRQ];
+
-+#include <asm/cacheflush.h>
-+#include <linux/io.h>
-+#include <linux/slab.h>
-+#include "../../include/dpaa2-global.h"
++ if (!mc_bus->irq_resources)
++ return;
+
-+#include "qbman-portal.h"
++ if (res_pool->max_count == 0)
++ return;
+
-+struct qb_attr_code code_generic_verb = QB_CODE(0, 0, 7);
-+struct qb_attr_code code_generic_rslt = QB_CODE(0, 8, 8);
++ if (res_pool->free_count != res_pool->max_count)
++ return;
+
-+#define QMAN_REV_4000 0x04000000
-+#define QMAN_REV_4100 0x04010000
-+#define QMAN_REV_4101 0x04010001
-+#define QMAN_REV_MASK 0xffff0000
++ INIT_LIST_HEAD(&res_pool->free_list);
++ res_pool->max_count = 0;
++ res_pool->free_count = 0;
++ mc_bus->irq_resources = NULL;
++ fsl_mc_msi_domain_free_irqs(&mc_bus_dev->dev);
++}
++EXPORT_SYMBOL_GPL(fsl_mc_cleanup_irq_pool);
+
-+/* All QBMan command and result structures use this "valid bit" encoding */
-+#define QB_VALID_BIT ((u32)0x80)
++/**
++ * Allocate the IRQs required by a given fsl-mc device.
++ */
++int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev)
++{
++ int i;
++ int irq_count;
++ int res_allocated_count = 0;
++ int error = -EINVAL;
++ struct fsl_mc_device_irq **irqs = NULL;
++ struct fsl_mc_bus *mc_bus;
++ struct fsl_mc_resource_pool *res_pool;
++
++ if (mc_dev->irqs)
++ return -EINVAL;
+
-+/* QBMan portal management command codes */
-+#define QBMAN_MC_ACQUIRE 0x30
-+#define QBMAN_WQCHAN_CONFIGURE 0x46
++ irq_count = mc_dev->obj_desc.irq_count;
++ if (irq_count == 0)
++ return -EINVAL;
+
-+/* CINH register offsets */
-+#define QBMAN_CINH_SWP_EQAR 0x8c0
-+#define QBMAN_CINH_SWP_DQPI 0xa00
-+#define QBMAN_CINH_SWP_DCAP 0xac0
-+#define QBMAN_CINH_SWP_SDQCR 0xb00
-+#define QBMAN_CINH_SWP_RAR 0xcc0
-+#define QBMAN_CINH_SWP_ISR 0xe00
-+#define QBMAN_CINH_SWP_IER 0xe40
-+#define QBMAN_CINH_SWP_ISDR 0xe80
-+#define QBMAN_CINH_SWP_IIR 0xec0
++ if (is_fsl_mc_bus_dprc(mc_dev))
++ mc_bus = to_fsl_mc_bus(mc_dev);
++ else
++ mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent));
+
-+/* CENA register offsets */
-+#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6))
-+#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((u32)(n) << 6))
-+#define QBMAN_CENA_SWP_RCR(n) (0x400 + ((u32)(n) << 6))
-+#define QBMAN_CENA_SWP_CR 0x600
-+#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1))
-+#define QBMAN_CENA_SWP_VDQCR 0x780
++ if (!mc_bus->irq_resources)
++ return -EINVAL;
+
-+/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
-+#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6)
++ res_pool = &mc_bus->resource_pools[FSL_MC_POOL_IRQ];
++ if (res_pool->free_count < irq_count) {
++ dev_err(&mc_dev->dev,
++ "Not able to allocate %u irqs for device\n", irq_count);
++ return -ENOSPC;
++ }
+
-+/* Define token used to determine if response written to memory is valid */
-+#define QMAN_DQ_TOKEN_VALID 1
++ irqs = devm_kzalloc(&mc_dev->dev, irq_count * sizeof(irqs[0]),
++ GFP_KERNEL);
++ if (!irqs)
++ return -ENOMEM;
+
-+/* SDQCR attribute codes */
-+#define QB_SDQCR_FC_SHIFT 29
-+#define QB_SDQCR_FC_MASK 0x1
-+#define QB_SDQCR_DCT_SHIFT 24
-+#define QB_SDQCR_DCT_MASK 0x3
-+#define QB_SDQCR_TOK_SHIFT 16
-+#define QB_SDQCR_TOK_MASK 0xff
-+#define QB_SDQCR_SRC_SHIFT 0
-+#define QB_SDQCR_SRC_MASK 0xffff
++ for (i = 0; i < irq_count; i++) {
++ struct fsl_mc_resource *resource;
+
-+/* opaque token for static dequeues */
-+#define QMAN_SDQCR_TOKEN 0xbb
++ error = fsl_mc_resource_allocate(mc_bus, FSL_MC_POOL_IRQ,
++ &resource);
++ if (error < 0)
++ goto error_resource_alloc;
+
-+enum qbman_sdqcr_dct {
-+ qbman_sdqcr_dct_null = 0,
-+ qbman_sdqcr_dct_prio_ics,
-+ qbman_sdqcr_dct_active_ics,
-+ qbman_sdqcr_dct_active
-+};
++ irqs[i] = to_fsl_mc_irq(resource);
++ res_allocated_count++;
+
-+enum qbman_sdqcr_fc {
-+ qbman_sdqcr_fc_one = 0,
-+ qbman_sdqcr_fc_up_to_3 = 1
-+};
++ irqs[i]->mc_dev = mc_dev;
++ irqs[i]->dev_irq_index = i;
++ }
+
-+#define dccvac(p) { asm volatile("dc cvac, %0;" : : "r" (p) : "memory"); }
-+#define dcivac(p) { asm volatile("dc ivac, %0" : : "r"(p) : "memory"); }
-+static inline void qbman_inval_prefetch(struct qbman_swp *p, uint32_t offset)
-+{
-+ dcivac(p->addr_cena + offset);
-+ prefetch(p->addr_cena + offset);
-+}
++ mc_dev->irqs = irqs;
++ return 0;
+
-+/* Portal Access */
++error_resource_alloc:
++ for (i = 0; i < res_allocated_count; i++) {
++ irqs[i]->mc_dev = NULL;
++ fsl_mc_resource_free(&irqs[i]->resource);
++ }
+
-+static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset)
-+{
-+ return readl_relaxed(p->addr_cinh + offset);
++ return error;
+}
++EXPORT_SYMBOL_GPL(fsl_mc_allocate_irqs);
+
-+static inline void qbman_write_register(struct qbman_swp *p, u32 offset,
-+ u32 value)
++/*
++ * Frees the IRQs that were allocated for an fsl-mc device.
++ */
++void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev)
+{
-+ writel_relaxed(value, p->addr_cinh + offset);
-+}
++ int i;
++ int irq_count;
++ struct fsl_mc_bus *mc_bus;
++ struct fsl_mc_device_irq **irqs = mc_dev->irqs;
+
-+static inline void *qbman_get_cmd(struct qbman_swp *p, u32 offset)
-+{
-+ return p->addr_cena + offset;
-+}
++ if (!irqs)
++ return;
+
-+#define QBMAN_CINH_SWP_CFG 0xd00
++ irq_count = mc_dev->obj_desc.irq_count;
+
-+#define SWP_CFG_DQRR_MF_SHIFT 20
-+#define SWP_CFG_EST_SHIFT 16
-+#define SWP_CFG_WN_SHIFT 14
-+#define SWP_CFG_RPM_SHIFT 12
-+#define SWP_CFG_DCM_SHIFT 10
-+#define SWP_CFG_EPM_SHIFT 8
-+#define SWP_CFG_SD_SHIFT 5
-+#define SWP_CFG_SP_SHIFT 4
-+#define SWP_CFG_SE_SHIFT 3
-+#define SWP_CFG_DP_SHIFT 2
-+#define SWP_CFG_DE_SHIFT 1
-+#define SWP_CFG_EP_SHIFT 0
++ if (is_fsl_mc_bus_dprc(mc_dev))
++ mc_bus = to_fsl_mc_bus(mc_dev);
++ else
++ mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent));
+
-+static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm,
-+ u8 epm, int sd, int sp, int se,
-+ int dp, int de, int ep)
-+{
-+ return cpu_to_le32 (max_fill << SWP_CFG_DQRR_MF_SHIFT |
-+ est << SWP_CFG_EST_SHIFT |
-+ wn << SWP_CFG_WN_SHIFT |
-+ rpm << SWP_CFG_RPM_SHIFT |
-+ dcm << SWP_CFG_DCM_SHIFT |
-+ epm << SWP_CFG_EPM_SHIFT |
-+ sd << SWP_CFG_SD_SHIFT |
-+ sp << SWP_CFG_SP_SHIFT |
-+ se << SWP_CFG_SE_SHIFT |
-+ dp << SWP_CFG_DP_SHIFT |
-+ de << SWP_CFG_DE_SHIFT |
-+ ep << SWP_CFG_EP_SHIFT);
++ if (!mc_bus->irq_resources)
++ return;
++
++ for (i = 0; i < irq_count; i++) {
++ irqs[i]->mc_dev = NULL;
++ fsl_mc_resource_free(&irqs[i]->resource);
++ }
++
++ mc_dev->irqs = NULL;
+}
++EXPORT_SYMBOL_GPL(fsl_mc_free_irqs);
+
-+/**
-+ * qbman_swp_init() - Create a functional object representing the given
-+ * QBMan portal descriptor.
-+ * @d: the given qbman swp descriptor
-+ *
-+ * Return qbman_swp portal for success, NULL if the object cannot
-+ * be created.
-+ */
-+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
++void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
+{
-+ struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL);
-+ u32 reg;
-+
-+ if (!p)
-+ return NULL;
-+ p->desc = d;
-+ p->mc.valid_bit = QB_VALID_BIT;
-+ p->sdq = 0;
-+ p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
-+ p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
-+ p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
++ int pool_type;
++ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+
-+ atomic_set(&p->vdq.available, 1);
-+ p->vdq.valid_bit = QB_VALID_BIT;
-+ p->dqrr.next_idx = 0;
-+ p->dqrr.valid_bit = QB_VALID_BIT;
++ for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++) {
++ struct fsl_mc_resource_pool *res_pool =
++ &mc_bus->resource_pools[pool_type];
+
-+ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
-+ p->dqrr.dqrr_size = 4;
-+ p->dqrr.reset_bug = 1;
-+ } else {
-+ p->dqrr.dqrr_size = 8;
-+ p->dqrr.reset_bug = 0;
++ res_pool->type = pool_type;
++ res_pool->max_count = 0;
++ res_pool->free_count = 0;
++ res_pool->mc_bus = mc_bus;
++ INIT_LIST_HEAD(&res_pool->free_list);
++ mutex_init(&res_pool->mutex);
+ }
++}
++EXPORT_SYMBOL_GPL(fsl_mc_init_all_resource_pools);
+
-+ p->addr_cena = d->cena_bar;
-+ p->addr_cinh = d->cinh_bar;
-+
-+ reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
-+ 0, /* Writes cacheable */
-+ 0, /* EQCR_CI stashing threshold */
-+ 3, /* RPM: Valid bit mode, RCR in array mode */
-+ 2, /* DCM: Discrete consumption ack mode */
-+ 3, /* EPM: Valid bit mode, EQCR in array mode */
-+ 0, /* mem stashing drop enable == FALSE */
-+ 1, /* mem stashing priority == TRUE */
-+ 0, /* mem stashing enable == FALSE */
-+ 1, /* dequeue stashing priority == TRUE */
-+ 0, /* dequeue stashing enable == FALSE */
-+ 0); /* EQCR_CI stashing priority == FALSE */
++static void fsl_mc_cleanup_resource_pool(struct fsl_mc_device *mc_bus_dev,
++ enum fsl_mc_pool_type pool_type)
++{
++ struct fsl_mc_resource *resource;
++ struct fsl_mc_resource *next;
++ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
++ struct fsl_mc_resource_pool *res_pool =
++ &mc_bus->resource_pools[pool_type];
++ int free_count = 0;
+
-+ qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
-+ reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
-+ if (!reg) {
-+ pr_err("qbman: the portal is not enabled!\n");
-+ return NULL;
++ list_for_each_entry_safe(resource, next, &res_pool->free_list, node) {
++ free_count++;
++ devm_kfree(&mc_bus_dev->dev, resource);
+ }
++}
+
-+ /*
-+ * SDQCR needs to be initialized to 0 when no channels are
-+ * being dequeued from or else the QMan HW will indicate an
-+ * error. The values that were calculated above will be
-+ * applied when dequeues from a specific channel are enabled.
-+ */
-+ qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0);
-+ return p;
++void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
++{
++ int pool_type;
++
++ for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++)
++ fsl_mc_cleanup_resource_pool(mc_bus_dev, pool_type);
+}
++EXPORT_SYMBOL_GPL(fsl_mc_cleanup_all_resource_pools);
+
+/**
-+ * qbman_swp_finish() - Create and destroy a functional object representing
-+ * the given QBMan portal descriptor.
-+ * @p: the qbman_swp object to be destroyed
++ * fsl_mc_allocator_probe - callback invoked when an allocatable device is
++ * being added to the system
+ */
-+void qbman_swp_finish(struct qbman_swp *p)
++static int fsl_mc_allocator_probe(struct fsl_mc_device *mc_dev)
+{
-+ kfree(p);
++ enum fsl_mc_pool_type pool_type;
++ struct fsl_mc_device *mc_bus_dev;
++ struct fsl_mc_bus *mc_bus;
++ int error;
++
++ if (!fsl_mc_is_allocatable(mc_dev))
++ return -EINVAL;
++
++ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
++ if (!dev_is_fsl_mc(&mc_bus_dev->dev))
++ return -EINVAL;
++
++ mc_bus = to_fsl_mc_bus(mc_bus_dev);
++ error = object_type_to_pool_type(mc_dev->obj_desc.type, &pool_type);
++ if (error < 0)
++ return error;
++
++ error = fsl_mc_resource_pool_add_device(mc_bus, pool_type, mc_dev);
++ if (error < 0)
++ return error;
++
++ dev_dbg(&mc_dev->dev,
++ "Allocatable fsl-mc device bound to fsl_mc_allocator driver");
++ return 0;
+}
+
+/**
-+ * qbman_swp_interrupt_read_status()
-+ * @p: the given software portal
-+ *
-+ * Return the value in the SWP_ISR register.
++ * fsl_mc_allocator_remove - callback invoked when an allocatable device is
++ * being removed from the system
+ */
-+u32 qbman_swp_interrupt_read_status(struct qbman_swp *p)
++static int fsl_mc_allocator_remove(struct fsl_mc_device *mc_dev)
+{
-+ return qbman_read_register(p, QBMAN_CINH_SWP_ISR);
++ int error;
++
++ if (!fsl_mc_is_allocatable(mc_dev))
++ return -EINVAL;
++
++ if (mc_dev->resource) {
++ error = fsl_mc_resource_pool_remove_device(mc_dev);
++ if (error < 0)
++ return error;
++ }
++
++ dev_dbg(&mc_dev->dev,
++ "Allocatable fsl-mc device unbound from fsl_mc_allocator driver");
++ return 0;
+}
+
-+/**
-+ * qbman_swp_interrupt_clear_status()
-+ * @p: the given software portal
-+ * @mask: The mask to clear in SWP_ISR register
-+ */
-+void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask)
++static const struct fsl_mc_device_id match_id_table[] = {
++ {
++ .vendor = FSL_MC_VENDOR_FREESCALE,
++ .obj_type = "dpbp",
++ },
++ {
++ .vendor = FSL_MC_VENDOR_FREESCALE,
++ .obj_type = "dpmcp",
++ },
++ {
++ .vendor = FSL_MC_VENDOR_FREESCALE,
++ .obj_type = "dpcon",
++ },
++ {.vendor = 0x0},
++};
++
++static struct fsl_mc_driver fsl_mc_allocator_driver = {
++ .driver = {
++ .name = "fsl_mc_allocator",
++ .pm = NULL,
++ },
++ .match_id_table = match_id_table,
++ .probe = fsl_mc_allocator_probe,
++ .remove = fsl_mc_allocator_remove,
++};
++
++int __init fsl_mc_allocator_driver_init(void)
+{
-+ qbman_write_register(p, QBMAN_CINH_SWP_ISR, mask);
++ return fsl_mc_driver_register(&fsl_mc_allocator_driver);
+}
+
-+/**
-+ * qbman_swp_interrupt_get_trigger() - read interrupt enable register
-+ * @p: the given software portal
++void fsl_mc_allocator_driver_exit(void)
++{
++ fsl_mc_driver_unregister(&fsl_mc_allocator_driver);
++}
+--- a/drivers/staging/fsl-mc/bus/fsl-mc-bus.c
++++ /dev/null
+@@ -1,920 +0,0 @@
+-/*
+- * Freescale Management Complex (MC) bus driver
+- *
+- * Copyright (C) 2014 Freescale Semiconductor, Inc.
+- * Author: German Rivera <German.Rivera@freescale.com>
+- *
+- * This file is licensed under the terms of the GNU General Public
+- * License version 2. This program is licensed "as is" without any
+- * warranty of any kind, whether express or implied.
+- */
+-
+-#include <linux/module.h>
+-#include <linux/of_device.h>
+-#include <linux/of_address.h>
+-#include <linux/ioport.h>
+-#include <linux/slab.h>
+-#include <linux/limits.h>
+-#include <linux/bitops.h>
+-#include <linux/msi.h>
+-#include <linux/dma-mapping.h>
+-#include "../include/mc-bus.h"
+-#include "../include/dpmng.h"
+-#include "../include/mc-sys.h"
+-
+-#include "fsl-mc-private.h"
+-#include "dprc-cmd.h"
+-
+-static struct kmem_cache *mc_dev_cache;
+-
+-/**
+- * Default DMA mask for devices on a fsl-mc bus
+- */
+-#define FSL_MC_DEFAULT_DMA_MASK (~0ULL)
+-
+-/**
+- * struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device
+- * @root_mc_bus_dev: MC object device representing the root DPRC
+- * @num_translation_ranges: number of entries in addr_translation_ranges
+- * @translation_ranges: array of bus to system address translation ranges
+- */
+-struct fsl_mc {
+- struct fsl_mc_device *root_mc_bus_dev;
+- u8 num_translation_ranges;
+- struct fsl_mc_addr_translation_range *translation_ranges;
+-};
+-
+-/**
+- * struct fsl_mc_addr_translation_range - bus to system address translation
+- * range
+- * @mc_region_type: Type of MC region for the range being translated
+- * @start_mc_offset: Start MC offset of the range being translated
+- * @end_mc_offset: MC offset of the first byte after the range (last MC
+- * offset of the range is end_mc_offset - 1)
+- * @start_phys_addr: system physical address corresponding to start_mc_addr
+- */
+-struct fsl_mc_addr_translation_range {
+- enum dprc_region_type mc_region_type;
+- u64 start_mc_offset;
+- u64 end_mc_offset;
+- phys_addr_t start_phys_addr;
+-};
+-
+-/**
+- * fsl_mc_bus_match - device to driver matching callback
+- * @dev: the MC object device structure to match against
+- * @drv: the device driver to search for matching MC object device id
+- * structures
+- *
+- * Returns 1 on success, 0 otherwise.
+- */
+-static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
+-{
+- const struct fsl_mc_device_id *id;
+- struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+- struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv);
+- bool found = false;
+-
+- if (WARN_ON(!fsl_mc_bus_exists()))
+- goto out;
+-
+- if (!mc_drv->match_id_table)
+- goto out;
+-
+- /*
+- * If the object is not 'plugged' don't match.
+- * Only exception is the root DPRC, which is a special case.
+- */
+- if ((mc_dev->obj_desc.state & DPRC_OBJ_STATE_PLUGGED) == 0 &&
+- !fsl_mc_is_root_dprc(&mc_dev->dev))
+- goto out;
+-
+- /*
+- * Traverse the match_id table of the given driver, trying to find
+- * a matching for the given MC object device.
+- */
+- for (id = mc_drv->match_id_table; id->vendor != 0x0; id++) {
+- if (id->vendor == mc_dev->obj_desc.vendor &&
+- strcmp(id->obj_type, mc_dev->obj_desc.type) == 0) {
+- found = true;
+-
+- break;
+- }
+- }
+-
+-out:
+- dev_dbg(dev, "%smatched\n", found ? "" : "not ");
+- return found;
+-}
+-
+-/**
+- * fsl_mc_bus_uevent - callback invoked when a device is added
+- */
+-static int fsl_mc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+-{
+- struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+-
+- if (add_uevent_var(env, "MODALIAS=fsl-mc:v%08Xd%s",
+- mc_dev->obj_desc.vendor,
+- mc_dev->obj_desc.type))
+- return -ENOMEM;
+-
+- return 0;
+-}
+-
+-static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+- char *buf)
+-{
+- struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+-
+- return sprintf(buf, "fsl-mc:v%08Xd%s\n", mc_dev->obj_desc.vendor,
+- mc_dev->obj_desc.type);
+-}
+-static DEVICE_ATTR_RO(modalias);
+-
+-static struct attribute *fsl_mc_dev_attrs[] = {
+- &dev_attr_modalias.attr,
+- NULL,
+-};
+-
+-ATTRIBUTE_GROUPS(fsl_mc_dev);
+-
+-struct bus_type fsl_mc_bus_type = {
+- .name = "fsl-mc",
+- .match = fsl_mc_bus_match,
+- .uevent = fsl_mc_bus_uevent,
+- .dev_groups = fsl_mc_dev_groups,
+-};
+-EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
+-
+-static atomic_t root_dprc_count = ATOMIC_INIT(0);
+-
+-static int fsl_mc_driver_probe(struct device *dev)
+-{
+- struct fsl_mc_driver *mc_drv;
+- struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+- int error;
+-
+- if (WARN_ON(!dev->driver))
+- return -EINVAL;
+-
+- mc_drv = to_fsl_mc_driver(dev->driver);
+- if (WARN_ON(!mc_drv->probe))
+- return -EINVAL;
+-
+- error = mc_drv->probe(mc_dev);
+- if (error < 0) {
+- dev_err(dev, "MC object device probe callback failed: %d\n",
+- error);
+- return error;
+- }
+-
+- return 0;
+-}
+-
+-static int fsl_mc_driver_remove(struct device *dev)
+-{
+- struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
+- struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+- int error;
+-
+- if (WARN_ON(!dev->driver))
+- return -EINVAL;
+-
+- error = mc_drv->remove(mc_dev);
+- if (error < 0) {
+- dev_err(dev,
+- "MC object device remove callback failed: %d\n",
+- error);
+- return error;
+- }
+-
+- return 0;
+-}
+-
+-static void fsl_mc_driver_shutdown(struct device *dev)
+-{
+- struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
+- struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+-
+- mc_drv->shutdown(mc_dev);
+-}
+-
+-/**
+- * __fsl_mc_driver_register - registers a child device driver with the
+- * MC bus
+- *
+- * This function is implicitly invoked from the registration function of
+- * fsl_mc device drivers, which is generated by the
+- * module_fsl_mc_driver() macro.
+- */
+-int __fsl_mc_driver_register(struct fsl_mc_driver *mc_driver,
+- struct module *owner)
+-{
+- int error;
+-
+- mc_driver->driver.owner = owner;
+- mc_driver->driver.bus = &fsl_mc_bus_type;
+-
+- if (mc_driver->probe)
+- mc_driver->driver.probe = fsl_mc_driver_probe;
+-
+- if (mc_driver->remove)
+- mc_driver->driver.remove = fsl_mc_driver_remove;
+-
+- if (mc_driver->shutdown)
+- mc_driver->driver.shutdown = fsl_mc_driver_shutdown;
+-
+- error = driver_register(&mc_driver->driver);
+- if (error < 0) {
+- pr_err("driver_register() failed for %s: %d\n",
+- mc_driver->driver.name, error);
+- return error;
+- }
+-
+- pr_info("MC object device driver %s registered\n",
+- mc_driver->driver.name);
+- return 0;
+-}
+-EXPORT_SYMBOL_GPL(__fsl_mc_driver_register);
+-
+-/**
+- * fsl_mc_driver_unregister - unregisters a device driver from the
+- * MC bus
+- */
+-void fsl_mc_driver_unregister(struct fsl_mc_driver *mc_driver)
+-{
+- driver_unregister(&mc_driver->driver);
+-}
+-EXPORT_SYMBOL_GPL(fsl_mc_driver_unregister);
+-
+-/**
+- * fsl_mc_bus_exists - check if a root dprc exists
+- */
+-bool fsl_mc_bus_exists(void)
+-{
+- return atomic_read(&root_dprc_count) > 0;
+-}
+-EXPORT_SYMBOL_GPL(fsl_mc_bus_exists);
+-
+-/**
+- * fsl_mc_get_root_dprc - function to traverse to the root dprc
+- */
+-void fsl_mc_get_root_dprc(struct device *dev,
+- struct device **root_dprc_dev)
+-{
+- if (WARN_ON(!dev)) {
+- *root_dprc_dev = NULL;
+- } else if (WARN_ON(!dev_is_fsl_mc(dev))) {
+- *root_dprc_dev = NULL;
+- } else {
+- *root_dprc_dev = dev;
+- while (dev_is_fsl_mc((*root_dprc_dev)->parent))
+- *root_dprc_dev = (*root_dprc_dev)->parent;
+- }
+-}
+-EXPORT_SYMBOL_GPL(fsl_mc_get_root_dprc);
+-
+-static int get_dprc_attr(struct fsl_mc_io *mc_io,
+- int container_id, struct dprc_attributes *attr)
+-{
+- u16 dprc_handle;
+- int error;
+-
+- error = dprc_open(mc_io, 0, container_id, &dprc_handle);
+- if (error < 0) {
+- dev_err(mc_io->dev, "dprc_open() failed: %d\n", error);
+- return error;
+- }
+-
+- memset(attr, 0, sizeof(struct dprc_attributes));
+- error = dprc_get_attributes(mc_io, 0, dprc_handle, attr);
+- if (error < 0) {
+- dev_err(mc_io->dev, "dprc_get_attributes() failed: %d\n",
+- error);
+- goto common_cleanup;
+- }
+-
+- error = 0;
+-
+-common_cleanup:
+- (void)dprc_close(mc_io, 0, dprc_handle);
+- return error;
+-}
+-
+-static int get_dprc_icid(struct fsl_mc_io *mc_io,
+- int container_id, u16 *icid)
+-{
+- struct dprc_attributes attr;
+- int error;
+-
+- error = get_dprc_attr(mc_io, container_id, &attr);
+- if (error == 0)
+- *icid = attr.icid;
+-
+- return error;
+-}
+-
+-static int get_dprc_version(struct fsl_mc_io *mc_io,
+- int container_id, u16 *major, u16 *minor)
+-{
+- struct dprc_attributes attr;
+- int error;
+-
+- error = get_dprc_attr(mc_io, container_id, &attr);
+- if (error == 0) {
+- *major = attr.version.major;
+- *minor = attr.version.minor;
+- }
+-
+- return error;
+-}
+-
+-static int translate_mc_addr(struct fsl_mc_device *mc_dev,
+- enum dprc_region_type mc_region_type,
+- u64 mc_offset, phys_addr_t *phys_addr)
+-{
+- int i;
+- struct device *root_dprc_dev;
+- struct fsl_mc *mc;
+-
+- fsl_mc_get_root_dprc(&mc_dev->dev, &root_dprc_dev);
+- if (WARN_ON(!root_dprc_dev))
+- return -EINVAL;
+- mc = dev_get_drvdata(root_dprc_dev->parent);
+-
+- if (mc->num_translation_ranges == 0) {
+- /*
+- * Do identity mapping:
+- */
+- *phys_addr = mc_offset;
+- return 0;
+- }
+-
+- for (i = 0; i < mc->num_translation_ranges; i++) {
+- struct fsl_mc_addr_translation_range *range =
+- &mc->translation_ranges[i];
+-
+- if (mc_region_type == range->mc_region_type &&
+- mc_offset >= range->start_mc_offset &&
+- mc_offset < range->end_mc_offset) {
+- *phys_addr = range->start_phys_addr +
+- (mc_offset - range->start_mc_offset);
+- return 0;
+- }
+- }
+-
+- return -EFAULT;
+-}
+-
+-static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev,
+- struct fsl_mc_device *mc_bus_dev)
+-{
+- int i;
+- int error;
+- struct resource *regions;
+- struct dprc_obj_desc *obj_desc = &mc_dev->obj_desc;
+- struct device *parent_dev = mc_dev->dev.parent;
+- enum dprc_region_type mc_region_type;
+-
+- if (strcmp(obj_desc->type, "dprc") == 0 ||
+- strcmp(obj_desc->type, "dpmcp") == 0) {
+- mc_region_type = DPRC_REGION_TYPE_MC_PORTAL;
+- } else if (strcmp(obj_desc->type, "dpio") == 0) {
+- mc_region_type = DPRC_REGION_TYPE_QBMAN_PORTAL;
+- } else {
+- /*
+- * This function should not have been called for this MC object
+- * type, as this object type is not supposed to have MMIO
+- * regions
+- */
+- WARN_ON(true);
+- return -EINVAL;
+- }
+-
+- regions = kmalloc_array(obj_desc->region_count,
+- sizeof(regions[0]), GFP_KERNEL);
+- if (!regions)
+- return -ENOMEM;
+-
+- for (i = 0; i < obj_desc->region_count; i++) {
+- struct dprc_region_desc region_desc;
+-
+- error = dprc_get_obj_region(mc_bus_dev->mc_io,
+- 0,
+- mc_bus_dev->mc_handle,
+- obj_desc->type,
+- obj_desc->id, i, ®ion_desc);
+- if (error < 0) {
+- dev_err(parent_dev,
+- "dprc_get_obj_region() failed: %d\n", error);
+- goto error_cleanup_regions;
+- }
+-
+- WARN_ON(region_desc.size == 0);
+- error = translate_mc_addr(mc_dev, mc_region_type,
+- region_desc.base_offset,
+- ®ions[i].start);
+- if (error < 0) {
+- dev_err(parent_dev,
+- "Invalid MC offset: %#x (for %s.%d\'s region %d)\n",
+- region_desc.base_offset,
+- obj_desc->type, obj_desc->id, i);
+- goto error_cleanup_regions;
+- }
+-
+- regions[i].end = regions[i].start + region_desc.size - 1;
+- regions[i].name = "fsl-mc object MMIO region";
+- regions[i].flags = IORESOURCE_IO;
+- if (region_desc.flags & DPRC_REGION_CACHEABLE)
+- regions[i].flags |= IORESOURCE_CACHEABLE;
+- }
+-
+- mc_dev->regions = regions;
+- return 0;
+-
+-error_cleanup_regions:
+- kfree(regions);
+- return error;
+-}
+-
+-/**
+- * fsl_mc_is_root_dprc - function to check if a given device is a root dprc
+- */
+-bool fsl_mc_is_root_dprc(struct device *dev)
+-{
+- struct device *root_dprc_dev;
+-
+- fsl_mc_get_root_dprc(dev, &root_dprc_dev);
+- if (!root_dprc_dev)
+- return false;
+- return dev == root_dprc_dev;
+-}
+-
+-/**
+- * Add a newly discovered MC object device to be visible in Linux
+- */
+-int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
+- struct fsl_mc_io *mc_io,
+- struct device *parent_dev,
+- struct fsl_mc_device **new_mc_dev)
+-{
+- int error;
+- struct fsl_mc_device *mc_dev = NULL;
+- struct fsl_mc_bus *mc_bus = NULL;
+- struct fsl_mc_device *parent_mc_dev;
+-
+- if (dev_is_fsl_mc(parent_dev))
+- parent_mc_dev = to_fsl_mc_device(parent_dev);
+- else
+- parent_mc_dev = NULL;
+-
+- if (strcmp(obj_desc->type, "dprc") == 0) {
+- /*
+- * Allocate an MC bus device object:
+- */
+- mc_bus = devm_kzalloc(parent_dev, sizeof(*mc_bus), GFP_KERNEL);
+- if (!mc_bus)
+- return -ENOMEM;
+-
+- mc_dev = &mc_bus->mc_dev;
+- } else {
+- /*
+- * Allocate a regular fsl_mc_device object:
+- */
+- mc_dev = kmem_cache_zalloc(mc_dev_cache, GFP_KERNEL);
+- if (!mc_dev)
+- return -ENOMEM;
+- }
+-
+- mc_dev->obj_desc = *obj_desc;
+- mc_dev->mc_io = mc_io;
+- device_initialize(&mc_dev->dev);
+- mc_dev->dev.parent = parent_dev;
+- mc_dev->dev.bus = &fsl_mc_bus_type;
+- dev_set_name(&mc_dev->dev, "%s.%d", obj_desc->type, obj_desc->id);
+-
+- if (strcmp(obj_desc->type, "dprc") == 0) {
+- struct fsl_mc_io *mc_io2;
+-
+- mc_dev->flags |= FSL_MC_IS_DPRC;
+-
+- /*
+- * To get the DPRC's ICID, we need to open the DPRC
+- * in get_dprc_icid(). For child DPRCs, we do so using the
+- * parent DPRC's MC portal instead of the child DPRC's MC
+- * portal, in case the child DPRC is already opened with
+- * its own portal (e.g., the DPRC used by AIOP).
+- *
+- * NOTE: There cannot be more than one active open for a
+- * given MC object, using the same MC portal.
+- */
+- if (parent_mc_dev) {
+- /*
+- * device being added is a child DPRC device
+- */
+- mc_io2 = parent_mc_dev->mc_io;
+- } else {
+- /*
+- * device being added is the root DPRC device
+- */
+- if (WARN_ON(!mc_io)) {
+- error = -EINVAL;
+- goto error_cleanup_dev;
+- }
+-
+- mc_io2 = mc_io;
+-
+- atomic_inc(&root_dprc_count);
+- }
+-
+- error = get_dprc_icid(mc_io2, obj_desc->id, &mc_dev->icid);
+- if (error < 0)
+- goto error_cleanup_dev;
+- } else {
+- /*
+- * A non-DPRC MC object device has to be a child of another
+- * MC object (specifically a DPRC object)
+- */
+- mc_dev->icid = parent_mc_dev->icid;
+- mc_dev->dma_mask = FSL_MC_DEFAULT_DMA_MASK;
+- mc_dev->dev.dma_mask = &mc_dev->dma_mask;
+- dev_set_msi_domain(&mc_dev->dev,
+- dev_get_msi_domain(&parent_mc_dev->dev));
+- }
+-
+- /*
+- * Get MMIO regions for the device from the MC:
+- *
+- * NOTE: the root DPRC is a special case as its MMIO region is
+- * obtained from the device tree
+- */
+- if (parent_mc_dev && obj_desc->region_count != 0) {
+- error = fsl_mc_device_get_mmio_regions(mc_dev,
+- parent_mc_dev);
+- if (error < 0)
+- goto error_cleanup_dev;
+- }
+-
+- /* Objects are coherent, unless 'no shareability' flag set. */
+- if (!(obj_desc->flags & DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY))
+- arch_setup_dma_ops(&mc_dev->dev, 0, 0, NULL, true);
+-
+- /*
+- * The device-specific probe callback will get invoked by device_add()
+- */
+- error = device_add(&mc_dev->dev);
+- if (error < 0) {
+- dev_err(parent_dev,
+- "device_add() failed for device %s: %d\n",
+- dev_name(&mc_dev->dev), error);
+- goto error_cleanup_dev;
+- }
+-
+- (void)get_device(&mc_dev->dev);
+- dev_dbg(parent_dev, "Added MC object device %s\n",
+- dev_name(&mc_dev->dev));
+-
+- *new_mc_dev = mc_dev;
+- return 0;
+-
+-error_cleanup_dev:
+- kfree(mc_dev->regions);
+- if (mc_bus)
+- devm_kfree(parent_dev, mc_bus);
+- else
+- kmem_cache_free(mc_dev_cache, mc_dev);
+-
+- return error;
+-}
+-EXPORT_SYMBOL_GPL(fsl_mc_device_add);
+-
+-/**
+- * fsl_mc_device_remove - Remove a MC object device from being visible to
+- * Linux
+- *
+- * @mc_dev: Pointer to a MC object device object
+- */
+-void fsl_mc_device_remove(struct fsl_mc_device *mc_dev)
+-{
+- struct fsl_mc_bus *mc_bus = NULL;
+-
+- kfree(mc_dev->regions);
+-
+- /*
+- * The device-specific remove callback will get invoked by device_del()
+- */
+- device_del(&mc_dev->dev);
+- put_device(&mc_dev->dev);
+-
+- if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) {
+- mc_bus = to_fsl_mc_bus(mc_dev);
+-
+- if (fsl_mc_is_root_dprc(&mc_dev->dev)) {
+- if (atomic_read(&root_dprc_count) > 0)
+- atomic_dec(&root_dprc_count);
+- else
+- WARN_ON(1);
+- }
+- }
+-
+- if (mc_bus)
+- devm_kfree(mc_dev->dev.parent, mc_bus);
+- else
+- kmem_cache_free(mc_dev_cache, mc_dev);
+-}
+-EXPORT_SYMBOL_GPL(fsl_mc_device_remove);
+-
+-static int parse_mc_ranges(struct device *dev,
+- int *paddr_cells,
+- int *mc_addr_cells,
+- int *mc_size_cells,
+- const __be32 **ranges_start,
+- u8 *num_ranges)
+-{
+- const __be32 *prop;
+- int range_tuple_cell_count;
+- int ranges_len;
+- int tuple_len;
+- struct device_node *mc_node = dev->of_node;
+-
+- *ranges_start = of_get_property(mc_node, "ranges", &ranges_len);
+- if (!(*ranges_start) || !ranges_len) {
+- dev_warn(dev,
+- "missing or empty ranges property for device tree node '%s'\n",
+- mc_node->name);
+-
+- *num_ranges = 0;
+- return 0;
+- }
+-
+- *paddr_cells = of_n_addr_cells(mc_node);
+-
+- prop = of_get_property(mc_node, "#address-cells", NULL);
+- if (prop)
+- *mc_addr_cells = be32_to_cpup(prop);
+- else
+- *mc_addr_cells = *paddr_cells;
+-
+- prop = of_get_property(mc_node, "#size-cells", NULL);
+- if (prop)
+- *mc_size_cells = be32_to_cpup(prop);
+- else
+- *mc_size_cells = of_n_size_cells(mc_node);
+-
+- range_tuple_cell_count = *paddr_cells + *mc_addr_cells +
+- *mc_size_cells;
+-
+- tuple_len = range_tuple_cell_count * sizeof(__be32);
+- if (ranges_len % tuple_len != 0) {
+- dev_err(dev, "malformed ranges property '%s'\n", mc_node->name);
+- return -EINVAL;
+- }
+-
+- *num_ranges = ranges_len / tuple_len;
+- return 0;
+-}
+-
+-static int get_mc_addr_translation_ranges(struct device *dev,
+- struct fsl_mc_addr_translation_range
+- **ranges,
+- u8 *num_ranges)
+-{
+- int error;
+- int paddr_cells;
+- int mc_addr_cells;
+- int mc_size_cells;
+- int i;
+- const __be32 *ranges_start;
+- const __be32 *cell;
+-
+- error = parse_mc_ranges(dev,
+- &paddr_cells,
+- &mc_addr_cells,
+- &mc_size_cells,
+- &ranges_start,
+- num_ranges);
+- if (error < 0)
+- return error;
+-
+- if (!(*num_ranges)) {
+- /*
+- * Missing or empty ranges property ("ranges;") for the
+- * 'fsl,qoriq-mc' node. In this case, identity mapping
+- * will be used.
+- */
+- *ranges = NULL;
+- return 0;
+- }
+-
+- *ranges = devm_kcalloc(dev, *num_ranges,
+- sizeof(struct fsl_mc_addr_translation_range),
+- GFP_KERNEL);
+- if (!(*ranges))
+- return -ENOMEM;
+-
+- cell = ranges_start;
+- for (i = 0; i < *num_ranges; ++i) {
+- struct fsl_mc_addr_translation_range *range = &(*ranges)[i];
+-
+- range->mc_region_type = of_read_number(cell, 1);
+- range->start_mc_offset = of_read_number(cell + 1,
+- mc_addr_cells - 1);
+- cell += mc_addr_cells;
+- range->start_phys_addr = of_read_number(cell, paddr_cells);
+- cell += paddr_cells;
+- range->end_mc_offset = range->start_mc_offset +
+- of_read_number(cell, mc_size_cells);
+-
+- cell += mc_size_cells;
+- }
+-
+- return 0;
+-}
+-
+-/**
+- * fsl_mc_bus_probe - callback invoked when the root MC bus is being
+- * added
+- */
+-static int fsl_mc_bus_probe(struct platform_device *pdev)
+-{
+- struct dprc_obj_desc obj_desc;
+- int error;
+- struct fsl_mc *mc;
+- struct fsl_mc_device *mc_bus_dev = NULL;
+- struct fsl_mc_io *mc_io = NULL;
+- int container_id;
+- phys_addr_t mc_portal_phys_addr;
+- u32 mc_portal_size;
+- struct mc_version mc_version;
+- struct resource res;
+-
+- dev_info(&pdev->dev, "Root MC bus device probed");
+-
+- mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
+- if (!mc)
+- return -ENOMEM;
+-
+- platform_set_drvdata(pdev, mc);
+-
+- /*
+- * Get physical address of MC portal for the root DPRC:
+- */
+- error = of_address_to_resource(pdev->dev.of_node, 0, &res);
+- if (error < 0) {
+- dev_err(&pdev->dev,
+- "of_address_to_resource() failed for %s\n",
+- pdev->dev.of_node->full_name);
+- return error;
+- }
+-
+- mc_portal_phys_addr = res.start;
+- mc_portal_size = resource_size(&res);
+- error = fsl_create_mc_io(&pdev->dev, mc_portal_phys_addr,
+- mc_portal_size, NULL,
+- FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, &mc_io);
+- if (error < 0)
+- return error;
+-
+- error = mc_get_version(mc_io, 0, &mc_version);
+- if (error != 0) {
+- dev_err(&pdev->dev,
+- "mc_get_version() failed with error %d\n", error);
+- goto error_cleanup_mc_io;
+- }
+-
+- dev_info(&pdev->dev,
+- "Freescale Management Complex Firmware version: %u.%u.%u\n",
+- mc_version.major, mc_version.minor, mc_version.revision);
+-
+- error = get_mc_addr_translation_ranges(&pdev->dev,
+- &mc->translation_ranges,
+- &mc->num_translation_ranges);
+- if (error < 0)
+- goto error_cleanup_mc_io;
+-
+- error = dpmng_get_container_id(mc_io, 0, &container_id);
+- if (error < 0) {
+- dev_err(&pdev->dev,
+- "dpmng_get_container_id() failed: %d\n", error);
+- goto error_cleanup_mc_io;
+- }
+-
+- memset(&obj_desc, 0, sizeof(struct dprc_obj_desc));
+- error = get_dprc_version(mc_io, container_id,
+- &obj_desc.ver_major, &obj_desc.ver_minor);
+- if (error < 0)
+- goto error_cleanup_mc_io;
+-
+- obj_desc.vendor = FSL_MC_VENDOR_FREESCALE;
+- strcpy(obj_desc.type, "dprc");
+- obj_desc.id = container_id;
+- obj_desc.irq_count = 1;
+- obj_desc.region_count = 0;
+-
+- error = fsl_mc_device_add(&obj_desc, mc_io, &pdev->dev, &mc_bus_dev);
+- if (error < 0)
+- goto error_cleanup_mc_io;
+-
+- mc->root_mc_bus_dev = mc_bus_dev;
+- return 0;
+-
+-error_cleanup_mc_io:
+- fsl_destroy_mc_io(mc_io);
+- return error;
+-}
+-
+-/**
+- * fsl_mc_bus_remove - callback invoked when the root MC bus is being
+- * removed
+- */
+-static int fsl_mc_bus_remove(struct platform_device *pdev)
+-{
+- struct fsl_mc *mc = platform_get_drvdata(pdev);
+-
+- if (WARN_ON(!fsl_mc_is_root_dprc(&mc->root_mc_bus_dev->dev)))
+- return -EINVAL;
+-
+- fsl_mc_device_remove(mc->root_mc_bus_dev);
+-
+- fsl_destroy_mc_io(mc->root_mc_bus_dev->mc_io);
+- mc->root_mc_bus_dev->mc_io = NULL;
+-
+- dev_info(&pdev->dev, "Root MC bus device removed");
+- return 0;
+-}
+-
+-static const struct of_device_id fsl_mc_bus_match_table[] = {
+- {.compatible = "fsl,qoriq-mc",},
+- {},
+-};
+-
+-MODULE_DEVICE_TABLE(of, fsl_mc_bus_match_table);
+-
+-static struct platform_driver fsl_mc_bus_driver = {
+- .driver = {
+- .name = "fsl_mc_bus",
+- .pm = NULL,
+- .of_match_table = fsl_mc_bus_match_table,
+- },
+- .probe = fsl_mc_bus_probe,
+- .remove = fsl_mc_bus_remove,
+-};
+-
+-static int __init fsl_mc_bus_driver_init(void)
+-{
+- int error;
+-
+- mc_dev_cache = kmem_cache_create("fsl_mc_device",
+- sizeof(struct fsl_mc_device), 0, 0,
+- NULL);
+- if (!mc_dev_cache) {
+- pr_err("Could not create fsl_mc_device cache\n");
+- return -ENOMEM;
+- }
+-
+- error = bus_register(&fsl_mc_bus_type);
+- if (error < 0) {
+- pr_err("fsl-mc bus type registration failed: %d\n", error);
+- goto error_cleanup_cache;
+- }
+-
+- pr_info("fsl-mc bus type registered\n");
+-
+- error = platform_driver_register(&fsl_mc_bus_driver);
+- if (error < 0) {
+- pr_err("platform_driver_register() failed: %d\n", error);
+- goto error_cleanup_bus;
+- }
+-
+- error = dprc_driver_init();
+- if (error < 0)
+- goto error_cleanup_driver;
+-
+- error = fsl_mc_allocator_driver_init();
+- if (error < 0)
+- goto error_cleanup_dprc_driver;
+-
+- error = its_fsl_mc_msi_init();
+- if (error < 0)
+- goto error_cleanup_mc_allocator;
+-
+- return 0;
+-
+-error_cleanup_mc_allocator:
+- fsl_mc_allocator_driver_exit();
+-
+-error_cleanup_dprc_driver:
+- dprc_driver_exit();
+-
+-error_cleanup_driver:
+- platform_driver_unregister(&fsl_mc_bus_driver);
+-
+-error_cleanup_bus:
+- bus_unregister(&fsl_mc_bus_type);
+-
+-error_cleanup_cache:
+- kmem_cache_destroy(mc_dev_cache);
+- return error;
+-}
+-postcore_initcall(fsl_mc_bus_driver_init);
+--- /dev/null
++++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
+@@ -0,0 +1,1151 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Freescale Management Complex (MC) bus driver
+ *
-+ * Return the value in the SWP_IER register.
-+ */
-+u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
-+{
-+ return qbman_read_register(p, QBMAN_CINH_SWP_IER);
-+}
-+
-+/**
-+ * qbman_swp_interrupt_set_trigger() - enable interrupts for a swp
-+ * @p: the given software portal
-+ * @mask: The mask of bits to enable in SWP_IER
-+ */
-+void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask)
-+{
-+ qbman_write_register(p, QBMAN_CINH_SWP_IER, mask);
-+}
-+
-+/**
-+ * qbman_swp_interrupt_get_inhibit() - read interrupt mask register
-+ * @p: the given software portal object
++ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
++ * Author: German Rivera <German.Rivera@freescale.com>
+ *
-+ * Return the value in the SWP_IIR register.
-+ */
-+int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
-+{
-+ return qbman_read_register(p, QBMAN_CINH_SWP_IIR);
-+}
-+
-+/**
-+ * qbman_swp_interrupt_set_inhibit() - write interrupt mask register
-+ * @p: the given software portal object
-+ * @mask: The mask to set in SWP_IIR register
+ */
-+void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
-+{
-+ qbman_write_register(p, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
-+}
-+
-+/*
-+ * Different management commands all use this common base layer of code to issue
-+ * commands and poll for results.
-+ */
-+
-+/*
-+ * Returns a pointer to where the caller should fill in their management command
-+ * (caller should ignore the verb byte)
-+ */
-+void *qbman_swp_mc_start(struct qbman_swp *p)
-+{
-+ return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
-+}
-+
-+/*
-+ * Commits merges in the caller-supplied command verb (which should not include
-+ * the valid-bit) and submits the command to hardware
-+ */
-+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb)
-+{
-+ u8 *v = cmd;
-+
-+ dma_wmb();
-+ *v = cmd_verb | p->mc.valid_bit;
-+ dccvac(cmd);
-+}
-+
-+/*
-+ * Checks for a completed response (returns non-NULL if only if the response
-+ * is complete).
-+ */
-+void *qbman_swp_mc_result(struct qbman_swp *p)
-+{
-+ u32 *ret, verb;
-+
-+ qbman_inval_prefetch(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
-+ ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
-+
-+ /* Remove the valid-bit - command completed if the rest is non-zero */
-+ verb = ret[0] & ~QB_VALID_BIT;
-+ if (!verb)
-+ return NULL;
-+ p->mc.valid_bit ^= QB_VALID_BIT;
-+ return ret;
-+}
+
-+#define QB_ENQUEUE_CMD_OPTIONS_SHIFT 0
-+enum qb_enqueue_commands {
-+ enqueue_empty = 0,
-+ enqueue_response_always = 1,
-+ enqueue_rejects_to_fq = 2
-+};
++#define pr_fmt(fmt) "fsl-mc: " fmt
+
-+#define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2
-+#define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
-+#define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
++#include <linux/module.h>
++#include <linux/of_device.h>
++#include <linux/of_address.h>
++#include <linux/ioport.h>
++#include <linux/slab.h>
++#include <linux/limits.h>
++#include <linux/bitops.h>
++#include <linux/msi.h>
++#include <linux/dma-mapping.h>
++#include <linux/fsl/mc.h>
+
-+/**
-+ * qbman_eq_desc_clear() - Clear the contents of a descriptor to
-+ * default/starting state.
-+ */
-+void qbman_eq_desc_clear(struct qbman_eq_desc *d)
-+{
-+ memset(d, 0, sizeof(*d));
-+}
++#include "fsl-mc-private.h"
+
+/**
-+ * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
-+ * @d: the enqueue descriptor.
-+ * @response_success: 1 = enqueue with response always; 0 = enqueue with
-+ * rejections returned on a FQ.
++ * Default DMA mask for devices on a fsl-mc bus
+ */
-+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
-+{
-+ d->verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
-+ if (respond_success)
-+ d->verb |= enqueue_response_always;
-+ else
-+ d->verb |= enqueue_rejects_to_fq;
-+}
++#define FSL_MC_DEFAULT_DMA_MASK (~0ULL)
+
-+/*
-+ * Exactly one of the following descriptor "targets" should be set. (Calling any
-+ * one of these will replace the effect of any prior call to one of these.)
-+ * -enqueue to a frame queue
-+ * -enqueue to a queuing destination
++/**
++ * struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device
++ * @root_mc_bus_dev: fsl-mc device representing the root DPRC
++ * @num_translation_ranges: number of entries in addr_translation_ranges
++ * @translation_ranges: array of bus to system address translation ranges
+ */
++struct fsl_mc {
++ struct fsl_mc_device *root_mc_bus_dev;
++ u8 num_translation_ranges;
++ struct fsl_mc_addr_translation_range *translation_ranges;
++};
+
+/**
-+ * qbman_eq_desc_set_fq() - set the FQ for the enqueue command
-+ * @d: the enqueue descriptor
-+ * @fqid: the id of the frame queue to be enqueued
++ * struct fsl_mc_addr_translation_range - bus to system address translation
++ * range
++ * @mc_region_type: Type of MC region for the range being translated
++ * @start_mc_offset: Start MC offset of the range being translated
++ * @end_mc_offset: MC offset of the first byte after the range (last MC
++ * offset of the range is end_mc_offset - 1)
++ * @start_phys_addr: system physical address corresponding to start_mc_addr
+ */
-+void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid)
-+{
-+ d->verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
-+ d->tgtid = cpu_to_le32(fqid);
-+}
++struct fsl_mc_addr_translation_range {
++ enum dprc_region_type mc_region_type;
++ u64 start_mc_offset;
++ u64 end_mc_offset;
++ phys_addr_t start_phys_addr;
++};
+
+/**
-+ * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command
-+ * @d: the enqueue descriptor
-+ * @qdid: the id of the queuing destination to be enqueued
-+ * @qd_bin: the queuing destination bin
-+ * @qd_prio: the queuing destination priority
++ * struct mc_version
++ * @major: Major version number: incremented on API compatibility changes
++ * @minor: Minor version number: incremented on API additions (that are
++ * backward compatible); reset when major version is incremented
++ * @revision: Internal revision number: incremented on implementation changes
++ * and/or bug fixes that have no impact on API
+ */
-+void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
-+ u32 qd_bin, u32 qd_prio)
-+{
-+ d->verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
-+ d->tgtid = cpu_to_le32(qdid);
-+ d->qdbin = cpu_to_le16(qd_bin);
-+ d->qpri = qd_prio;
-+}
-+
-+#define EQAR_IDX(eqar) ((eqar) & 0x7)
-+#define EQAR_VB(eqar) ((eqar) & 0x80)
-+#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
++struct mc_version {
++ u32 major;
++ u32 minor;
++ u32 revision;
++};
+
+/**
-+ * qbman_swp_enqueue() - Issue an enqueue command
-+ * @s: the software portal used for enqueue
-+ * @d: the enqueue descriptor
-+ * @fd: the frame descriptor to be enqueued
-+ *
-+ * Please note that 'fd' should only be NULL if the "action" of the
-+ * descriptor is "orp_hole" or "orp_nesn".
++ * fsl_mc_bus_match - device to driver matching callback
++ * @dev: the fsl-mc device to match against
++ * @drv: the device driver to search for matching fsl-mc object type
++ * structures
+ *
-+ * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
++ * Returns 1 on success, 0 otherwise.
+ */
-+int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
-+ const struct dpaa2_fd *fd)
++static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
+{
-+ struct qbman_eq_desc *p;
-+ u32 eqar = qbman_read_register(s, QBMAN_CINH_SWP_EQAR);
++ const struct fsl_mc_device_id *id;
++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
++ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv);
++ bool found = false;
+
-+ if (!EQAR_SUCCESS(eqar))
-+ return -EBUSY;
++ /* When driver_override is set, only bind to the matching driver */
++ if (mc_dev->driver_override) {
++ found = !strcmp(mc_dev->driver_override, mc_drv->driver.name);
++ goto out;
++ }
+
-+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
-+ memcpy(&p->dca, &d->dca, 31);
-+ memcpy(&p->fd, fd, sizeof(*fd));
++ if (!mc_drv->match_id_table)
++ goto out;
+
-+ /* Set the verb byte, have to substitute in the valid-bit */
-+ dma_wmb();
-+ p->verb = d->verb | EQAR_VB(eqar);
-+ dccvac(p);
++ /*
++ * If the object is not 'plugged' don't match.
++ * Only exception is the root DPRC, which is a special case.
++ */
++ if ((mc_dev->obj_desc.state & FSL_MC_OBJ_STATE_PLUGGED) == 0 &&
++ !fsl_mc_is_root_dprc(&mc_dev->dev))
++ goto out;
+
-+ return 0;
-+}
++ /*
++ * Traverse the match_id table of the given driver, trying to find
++ * a matching for the given device.
++ */
++ for (id = mc_drv->match_id_table; id->vendor != 0x0; id++) {
++ if (id->vendor == mc_dev->obj_desc.vendor &&
++ strcmp(id->obj_type, mc_dev->obj_desc.type) == 0) {
++ found = true;
+
-+/* Static (push) dequeue */
++ break;
++ }
++ }
++
++out:
++ dev_dbg(dev, "%smatched\n", found ? "" : "not ");
++ return found;
++}
+
+/**
-+ * qbman_swp_push_get() - Get the push dequeue setup
-+ * @p: the software portal object
-+ * @channel_idx: the channel index to query
-+ * @enabled: returned boolean to show whether the push dequeue is enabled
-+ * for the given channel
++ * fsl_mc_bus_uevent - callback invoked when a device is added
+ */
-+void qbman_swp_push_get(struct qbman_swp *s, u8 channel_idx, int *enabled)
++static int fsl_mc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
-+ u16 src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
-+ WARN_ON(channel_idx > 15);
-+ *enabled = src | (1 << channel_idx);
++ if (add_uevent_var(env, "MODALIAS=fsl-mc:v%08Xd%s",
++ mc_dev->obj_desc.vendor,
++ mc_dev->obj_desc.type))
++ return -ENOMEM;
++
++ return 0;
+}
+
-+/**
-+ * qbman_swp_push_set() - Enable or disable push dequeue
-+ * @p: the software portal object
-+ * @channel_idx: the channel index (0 to 15)
-+ * @enable: enable or disable push dequeue
-+ */
-+void qbman_swp_push_set(struct qbman_swp *s, u8 channel_idx, int enable)
++static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
++ char *buf)
+{
-+ u16 dqsrc;
++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
-+ WARN_ON(channel_idx > 15);
-+ if (enable)
-+ s->sdq |= 1 << channel_idx;
-+ else
-+ s->sdq &= ~(1 << channel_idx);
++ return sprintf(buf, "fsl-mc:v%08Xd%s\n", mc_dev->obj_desc.vendor,
++ mc_dev->obj_desc.type);
++}
++static DEVICE_ATTR_RO(modalias);
+
-+ /* Read make the complete src map. If no channels are enabled
-+ * the SDQCR must be 0 or else QMan will assert errors
-+ */
-+ dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
-+ if (dqsrc != 0)
-+ qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, s->sdq);
-+ else
-+ qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, 0);
++static ssize_t rescan_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct fsl_mc_device *root_mc_dev;
++ struct fsl_mc_bus *root_mc_bus;
++ unsigned long val;
++
++ if (!fsl_mc_is_root_dprc(dev))
++ return -EINVAL;
++
++ root_mc_dev = to_fsl_mc_device(dev);
++ root_mc_bus = to_fsl_mc_bus(root_mc_dev);
++
++ if (kstrtoul(buf, 0, &val) < 0)
++ return -EINVAL;
++
++ if (val) {
++ mutex_lock(&root_mc_bus->scan_mutex);
++ dprc_scan_objects(root_mc_dev, NULL, NULL);
++ mutex_unlock(&root_mc_bus->scan_mutex);
++ }
++
++ return count;
+}
++static DEVICE_ATTR_WO(rescan);
+
-+#define QB_VDQCR_VERB_DCT_SHIFT 0
-+#define QB_VDQCR_VERB_DT_SHIFT 2
-+#define QB_VDQCR_VERB_RLS_SHIFT 4
-+#define QB_VDQCR_VERB_WAE_SHIFT 5
++static ssize_t driver_override_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
++ const char *driver_override, *old = mc_dev->driver_override;
++ char *cp;
+
-+enum qb_pull_dt_e {
-+ qb_pull_dt_channel,
-+ qb_pull_dt_workqueue,
-+ qb_pull_dt_framequeue
-+};
++ if (WARN_ON(dev->bus != &fsl_mc_bus_type))
++ return -EINVAL;
+
-+/**
-+ * qbman_pull_desc_clear() - Clear the contents of a descriptor to
-+ * default/starting state
-+ * @d: the pull dequeue descriptor to be cleared
-+ */
-+void qbman_pull_desc_clear(struct qbman_pull_desc *d)
++ if (count >= (PAGE_SIZE - 1))
++ return -EINVAL;
++
++ driver_override = kstrndup(buf, count, GFP_KERNEL);
++ if (!driver_override)
++ return -ENOMEM;
++
++ cp = strchr(driver_override, '\n');
++ if (cp)
++ *cp = '\0';
++
++ if (strlen(driver_override)) {
++ mc_dev->driver_override = driver_override;
++ } else {
++ kfree(driver_override);
++ mc_dev->driver_override = NULL;
++ }
++
++ kfree(old);
++
++ return count;
++}
++
++static ssize_t driver_override_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
+{
-+ memset(d, 0, sizeof(*d));
++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
++
++ return snprintf(buf, PAGE_SIZE, "%s\n", mc_dev->driver_override);
+}
++static DEVICE_ATTR_RW(driver_override);
+
-+/**
-+ * qbman_pull_desc_set_storage()- Set the pull dequeue storage
-+ * @d: the pull dequeue descriptor to be set
-+ * @storage: the pointer of the memory to store the dequeue result
-+ * @storage_phys: the physical address of the storage memory
-+ * @stash: to indicate whether write allocate is enabled
-+ *
-+ * If not called, or if called with 'storage' as NULL, the result pull dequeues
-+ * will produce results to DQRR. If 'storage' is non-NULL, then results are
-+ * produced to the given memory location (using the DMA address which
-+ * the caller provides in 'storage_phys'), and 'stash' controls whether or not
-+ * those writes to main-memory express a cache-warming attribute.
-+ */
-+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
-+ struct dpaa2_dq *storage,
-+ dma_addr_t storage_phys,
-+ int stash)
++static struct attribute *fsl_mc_dev_attrs[] = {
++ &dev_attr_modalias.attr,
++ &dev_attr_rescan.attr,
++ &dev_attr_driver_override.attr,
++ NULL,
++};
++
++ATTRIBUTE_GROUPS(fsl_mc_dev);
++
++static int scan_fsl_mc_bus(struct device *dev, void *data)
+{
-+ /* save the virtual address */
-+ d->rsp_addr_virt = (u64)storage;
++ struct fsl_mc_device *root_mc_dev;
++ struct fsl_mc_bus *root_mc_bus;
+
-+ if (!storage) {
-+ d->verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
-+ return;
-+ }
-+ d->verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
-+ if (stash)
-+ d->verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
-+ else
-+ d->verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
++ if (!fsl_mc_is_root_dprc(dev))
++ goto exit;
++
++ root_mc_dev = to_fsl_mc_device(dev);
++ root_mc_bus = to_fsl_mc_bus(root_mc_dev);
++ mutex_lock(&root_mc_bus->scan_mutex);
++ dprc_scan_objects(root_mc_dev, NULL, NULL);
++ mutex_unlock(&root_mc_bus->scan_mutex);
+
-+ d->rsp_addr = cpu_to_le64(storage_phys);
++exit:
++ return 0;
+}
+
-+/**
-+ * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued
-+ * @d: the pull dequeue descriptor to be set
-+ * @numframes: number of frames to be set, must be between 1 and 16, inclusive
-+ */
-+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes)
++static ssize_t bus_rescan_store(struct bus_type *bus,
++ const char *buf, size_t count)
+{
-+ d->numf = numframes - 1;
-+}
++ unsigned long val;
+
-+void qbman_pull_desc_set_token(struct qbman_pull_desc *d, u8 token)
-+{
-+ d->tok = token;
-+}
++ if (kstrtoul(buf, 0, &val) < 0)
++ return -EINVAL;
+
-+/*
-+ * Exactly one of the following descriptor "actions" should be set. (Calling any
-+ * one of these will replace the effect of any prior call to one of these.)
-+ * - pull dequeue from the given frame queue (FQ)
-+ * - pull dequeue from any FQ in the given work queue (WQ)
-+ * - pull dequeue from any FQ in any WQ in the given channel
-+ */
++ if (val)
++ bus_for_each_dev(bus, NULL, NULL, scan_fsl_mc_bus);
+
-+/**
-+ * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues
-+ * @fqid: the frame queue index of the given FQ
-+ */
-+void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid)
-+{
-+ d->verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
-+ d->verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
-+ d->dq_src = cpu_to_le32(fqid);
++ return count;
+}
++static BUS_ATTR(rescan, 0220, NULL, bus_rescan_store);
+
-+/**
-+ * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues
-+ * @wqid: composed of channel id and wqid within the channel
-+ * @dct: the dequeue command type
-+ */
-+void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
-+ enum qbman_pull_type_e dct)
-+{
-+ d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
-+ d->verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
-+ d->dq_src = cpu_to_le32(wqid);
-+}
++static struct attribute *fsl_mc_bus_attrs[] = {
++ &bus_attr_rescan.attr,
++ NULL,
++};
+
-+/**
-+ * qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
-+ * dequeues
-+ * @chid: the channel id to be dequeued
-+ * @dct: the dequeue command type
-+ */
-+void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
-+ enum qbman_pull_type_e dct)
-+{
-+ d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
-+ d->verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
-+ d->dq_src = cpu_to_le32(chid);
-+}
++static const struct attribute_group fsl_mc_bus_group = {
++ .attrs = fsl_mc_bus_attrs,
++};
+
-+/**
-+ * qbman_swp_pull() - Issue the pull dequeue command
-+ * @s: the software portal object
-+ * @d: the software portal descriptor which has been configured with
-+ * the set of qbman_pull_desc_set_*() calls
-+ *
-+ * Return 0 for success, and -EBUSY if the software portal is not ready
-+ * to do pull dequeue.
-+ */
-+int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
-+{
-+ struct qbman_pull_desc *p;
++static const struct attribute_group *fsl_mc_bus_groups[] = {
++ &fsl_mc_bus_group,
++ NULL,
++};
+
-+ if (!atomic_dec_and_test(&s->vdq.available)) {
-+ atomic_inc(&s->vdq.available);
-+ return -EBUSY;
-+ }
-+ s->vdq.storage = (void *)d->rsp_addr_virt;
-+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
-+ p->numf = d->numf;
-+ p->tok = QMAN_DQ_TOKEN_VALID;
-+ p->dq_src = d->dq_src;
-+ p->rsp_addr = d->rsp_addr;
-+ p->rsp_addr_virt = d->rsp_addr_virt;
-+ dma_wmb();
++struct bus_type fsl_mc_bus_type = {
++ .name = "fsl-mc",
++ .match = fsl_mc_bus_match,
++ .uevent = fsl_mc_bus_uevent,
++ .dev_groups = fsl_mc_dev_groups,
++ .bus_groups = fsl_mc_bus_groups,
++};
++EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
+
-+ /* Set the verb byte, have to substitute in the valid-bit */
-+ p->verb = d->verb | s->vdq.valid_bit;
-+ s->vdq.valid_bit ^= QB_VALID_BIT;
-+ dccvac(p);
++struct device_type fsl_mc_bus_dprc_type = {
++ .name = "fsl_mc_bus_dprc"
++};
+
-+ return 0;
-+}
++struct device_type fsl_mc_bus_dpni_type = {
++ .name = "fsl_mc_bus_dpni"
++};
+
-+#define QMAN_DQRR_PI_MASK 0xf
++struct device_type fsl_mc_bus_dpio_type = {
++ .name = "fsl_mc_bus_dpio"
++};
+
-+/**
-+ * qbman_swp_dqrr_next() - Get an valid DQRR entry
-+ * @s: the software portal object
-+ *
-+ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
-+ * only once, so repeated calls can return a sequence of DQRR entries, without
-+ * requiring they be consumed immediately or in any particular order.
-+ */
-+const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
-+{
-+ u32 verb;
-+ u32 response_verb;
-+ u32 flags;
-+ struct dpaa2_dq *p;
++struct device_type fsl_mc_bus_dpsw_type = {
++ .name = "fsl_mc_bus_dpsw"
++};
+
-+ /* Before using valid-bit to detect if something is there, we have to
-+ * handle the case of the DQRR reset bug...
-+ */
-+ if (unlikely(s->dqrr.reset_bug)) {
-+ /*
-+ * We pick up new entries by cache-inhibited producer index,
-+ * which means that a non-coherent mapping would require us to
-+ * invalidate and read *only* once that PI has indicated that
-+ * there's an entry here. The first trip around the DQRR ring
-+ * will be much less efficient than all subsequent trips around
-+ * it...
-+ */
-+ u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
-+ QMAN_DQRR_PI_MASK;
++struct device_type fsl_mc_bus_dpdmux_type = {
++ .name = "fsl_mc_bus_dpdmux"
++};
+
-+ /* there are new entries if pi != next_idx */
-+ if (pi == s->dqrr.next_idx)
-+ return NULL;
++struct device_type fsl_mc_bus_dpbp_type = {
++ .name = "fsl_mc_bus_dpbp"
++};
+
-+ /*
-+ * if next_idx is/was the last ring index, and 'pi' is
-+ * different, we can disable the workaround as all the ring
-+ * entries have now been DMA'd to so valid-bit checking is
-+ * repaired. Note: this logic needs to be based on next_idx
-+ * (which increments one at a time), rather than on pi (which
-+ * can burst and wrap-around between our snapshots of it).
-+ */
-+ if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
-+ pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
-+ s->dqrr.next_idx, pi);
-+ s->dqrr.reset_bug = 0;
-+ }
-+ qbman_inval_prefetch(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
-+ }
++struct device_type fsl_mc_bus_dpcon_type = {
++ .name = "fsl_mc_bus_dpcon"
++};
+
-+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
-+ verb = p->dq.verb;
++struct device_type fsl_mc_bus_dpmcp_type = {
++ .name = "fsl_mc_bus_dpmcp"
++};
+
-+ /*
-+ * If the valid-bit isn't of the expected polarity, nothing there. Note,
-+ * in the DQRR reset bug workaround, we shouldn't need to skip these
-+ * check, because we've already determined that a new entry is available
-+ * and we've invalidated the cacheline before reading it, so the
-+ * valid-bit behaviour is repaired and should tell us what we already
-+ * knew from reading PI.
-+ */
-+ if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
-+ qbman_inval_prefetch(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
-+ return NULL;
-+ }
-+ /*
-+ * There's something there. Move "next_idx" attention to the next ring
-+ * entry (and prefetch it) before returning what we found.
-+ */
-+ s->dqrr.next_idx++;
-+ s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
-+ if (!s->dqrr.next_idx)
-+ s->dqrr.valid_bit ^= QB_VALID_BIT;
++struct device_type fsl_mc_bus_dpmac_type = {
++ .name = "fsl_mc_bus_dpmac"
++};
+
-+ /*
-+ * If this is the final response to a volatile dequeue command
-+ * indicate that the vdq is available
-+ */
-+ flags = p->dq.stat;
-+ response_verb = verb & QBMAN_RESULT_MASK;
-+ if ((response_verb == QBMAN_RESULT_DQ) &&
-+ (flags & DPAA2_DQ_STAT_VOLATILE) &&
-+ (flags & DPAA2_DQ_STAT_EXPIRED))
-+ atomic_inc(&s->vdq.available);
++struct device_type fsl_mc_bus_dprtc_type = {
++ .name = "fsl_mc_bus_dprtc"
++};
+
-+ qbman_inval_prefetch(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
++struct device_type fsl_mc_bus_dpseci_type = {
++ .name = "fsl_mc_bus_dpseci"
++};
+
-+ return p;
-+}
++struct device_type fsl_mc_bus_dpdcei_type = {
++ .name = "fsl_mc_bus_dpdcei"
++};
+
-+/**
-+ * qbman_swp_dqrr_consume() - Consume DQRR entries previously returned from
-+ * qbman_swp_dqrr_next().
-+ * @s: the software portal object
-+ * @dq: the DQRR entry to be consumed
-+ */
-+void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq)
-+{
-+ qbman_write_register(s, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
-+}
++struct device_type fsl_mc_bus_dpaiop_type = {
++ .name = "fsl_mc_bus_dpaiop"
++};
+
-+/**
-+ * qbman_result_has_new_result() - Check and get the dequeue response from the
-+ * dq storage memory set in pull dequeue command
-+ * @s: the software portal object
-+ * @dq: the dequeue result read from the memory
-+ *
-+ * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
-+ * dequeue result.
-+ *
-+ * Only used for user-provided storage of dequeue results, not DQRR. For
-+ * efficiency purposes, the driver will perform any required endianness
-+ * conversion to ensure that the user's dequeue result storage is in host-endian
-+ * format. As such, once the user has called qbman_result_has_new_result() and
-+ * been returned a valid dequeue result, they should not call it again on
-+ * the same memory location (except of course if another dequeue command has
-+ * been executed to produce a new result to that location).
-+ */
-+int qbman_result_has_new_result(struct qbman_swp *s, const struct dpaa2_dq *dq)
-+{
-+ if (dq->dq.tok != QMAN_DQ_TOKEN_VALID)
-+ return 0;
++struct device_type fsl_mc_bus_dpci_type = {
++ .name = "fsl_mc_bus_dpci"
++};
+
-+ /*
-+ * Set token to be 0 so we will detect change back to 1
-+ * next time the looping is traversed. Const is cast away here
-+ * as we want users to treat the dequeue responses as read only.
-+ */
-+ ((struct dpaa2_dq *)dq)->dq.tok = 0;
++struct device_type fsl_mc_bus_dpdmai_type = {
++ .name = "fsl_mc_bus_dpdmai"
++};
+
-+ /*
-+ * Determine whether VDQCR is available based on whether the
-+ * current result is sitting in the first storage location of
-+ * the busy command.
-+ */
-+ if (s->vdq.storage == dq) {
-+ s->vdq.storage = NULL;
-+ atomic_inc(&s->vdq.available);
++static struct device_type *fsl_mc_get_device_type(const char *type)
++{
++ static const struct {
++ struct device_type *dev_type;
++ const char *type;
++ } dev_types[] = {
++ { &fsl_mc_bus_dprc_type, "dprc" },
++ { &fsl_mc_bus_dpni_type, "dpni" },
++ { &fsl_mc_bus_dpio_type, "dpio" },
++ { &fsl_mc_bus_dpsw_type, "dpsw" },
++ { &fsl_mc_bus_dpdmux_type, "dpdmux" },
++ { &fsl_mc_bus_dpbp_type, "dpbp" },
++ { &fsl_mc_bus_dpcon_type, "dpcon" },
++ { &fsl_mc_bus_dpmcp_type, "dpmcp" },
++ { &fsl_mc_bus_dpmac_type, "dpmac" },
++ { &fsl_mc_bus_dprtc_type, "dprtc" },
++ { &fsl_mc_bus_dpseci_type, "dpseci" },
++ { &fsl_mc_bus_dpdcei_type, "dpdcei" },
++ { &fsl_mc_bus_dpaiop_type, "dpaiop" },
++ { &fsl_mc_bus_dpci_type, "dpci" },
++ { &fsl_mc_bus_dpdmai_type, "dpdmai" },
++ { NULL, NULL }
++ };
++ int i;
++
++ for (i = 0; dev_types[i].dev_type; i++)
++ if (!strcmp(dev_types[i].type, type))
++ return dev_types[i].dev_type;
++
++ return NULL;
++}
++
++static int fsl_mc_driver_probe(struct device *dev)
++{
++ struct fsl_mc_driver *mc_drv;
++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
++ int error;
++
++ mc_drv = to_fsl_mc_driver(dev->driver);
++
++ error = mc_drv->probe(mc_dev);
++ if (error < 0) {
++ if (error != -EPROBE_DEFER)
++ dev_err(dev, "%s failed: %d\n", __func__, error);
++ return error;
+ }
+
-+ return 1;
++ return 0;
+}
+
-+/**
-+ * qbman_release_desc_clear() - Clear the contents of a descriptor to
-+ * default/starting state.
-+ */
-+void qbman_release_desc_clear(struct qbman_release_desc *d)
++static int fsl_mc_driver_remove(struct device *dev)
+{
-+ memset(d, 0, sizeof(*d));
-+ d->verb = 1 << 5; /* Release Command Valid */
-+}
++ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
++ int error;
+
-+/**
-+ * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
-+ */
-+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid)
-+{
-+ d->bpid = cpu_to_le16(bpid);
++ error = mc_drv->remove(mc_dev);
++ if (error < 0) {
++ dev_err(dev, "%s failed: %d\n", __func__, error);
++ return error;
++ }
++
++ return 0;
+}
+
-+/**
-+ * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
-+ * interrupt source should be asserted after the release command is completed.
-+ */
-+void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
++static void fsl_mc_driver_shutdown(struct device *dev)
+{
-+ if (enable)
-+ d->verb |= 1 << 6;
-+ else
-+ d->verb &= ~(1 << 6);
-+}
++ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
-+#define RAR_IDX(rar) ((rar) & 0x7)
-+#define RAR_VB(rar) ((rar) & 0x80)
-+#define RAR_SUCCESS(rar) ((rar) & 0x100)
++ mc_drv->shutdown(mc_dev);
++}
+
+/**
-+ * qbman_swp_release() - Issue a buffer release command
-+ * @s: the software portal object
-+ * @d: the release descriptor
-+ * @buffers: a pointer pointing to the buffer address to be released
-+ * @num_buffers: number of buffers to be released, must be less than 8
++ * __fsl_mc_driver_register - registers a child device driver with the
++ * MC bus
+ *
-+ * Return 0 for success, -EBUSY if the release command ring is not ready.
++ * This function is implicitly invoked from the registration function of
++ * fsl_mc device drivers, which is generated by the
++ * module_fsl_mc_driver() macro.
+ */
-+int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
-+ const u64 *buffers, unsigned int num_buffers)
++int __fsl_mc_driver_register(struct fsl_mc_driver *mc_driver,
++ struct module *owner)
+{
-+ int i;
-+ struct qbman_release_desc *p;
-+ u32 rar;
++ int error;
+
-+ if (!num_buffers || (num_buffers > 7))
-+ return -EINVAL;
++ mc_driver->driver.owner = owner;
++ mc_driver->driver.bus = &fsl_mc_bus_type;
+
-+ rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
-+ if (!RAR_SUCCESS(rar))
-+ return -EBUSY;
++ if (mc_driver->probe)
++ mc_driver->driver.probe = fsl_mc_driver_probe;
+
-+ /* Start the release command */
-+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
-+ /* Copy the caller's buffer pointers to the command */
-+ for (i = 0; i < num_buffers; i++)
-+ p->buf[i] = cpu_to_le64(buffers[i]);
-+ p->bpid = d->bpid;
++ if (mc_driver->remove)
++ mc_driver->driver.remove = fsl_mc_driver_remove;
+
-+ /*
-+ * Set the verb byte, have to substitute in the valid-bit and the number
-+ * of buffers.
-+ */
-+ dma_wmb();
-+ p->verb = d->verb | RAR_VB(rar) | num_buffers;
-+ dccvac(p);
++ if (mc_driver->shutdown)
++ mc_driver->driver.shutdown = fsl_mc_driver_shutdown;
++
++ error = driver_register(&mc_driver->driver);
++ if (error < 0) {
++ pr_err("driver_register() failed for %s: %d\n",
++ mc_driver->driver.name, error);
++ return error;
++ }
+
+ return 0;
+}
++EXPORT_SYMBOL_GPL(__fsl_mc_driver_register);
+
-+struct qbman_acquire_desc {
-+ u8 verb;
-+ u8 reserved;
-+ u16 bpid;
-+ u8 num;
-+ u8 reserved2[59];
-+};
-+
-+struct qbman_acquire_rslt {
-+ u8 verb;
-+ u8 rslt;
-+ u16 reserved;
-+ u8 num;
-+ u8 reserved2[3];
-+ u64 buf[7];
-+};
++/**
++ * fsl_mc_driver_unregister - unregisters a device driver from the
++ * MC bus
++ */
++void fsl_mc_driver_unregister(struct fsl_mc_driver *mc_driver)
++{
++ driver_unregister(&mc_driver->driver);
++}
++EXPORT_SYMBOL_GPL(fsl_mc_driver_unregister);
+
+/**
-+ * qbman_swp_acquire() - Issue a buffer acquire command
-+ * @s: the software portal object
-+ * @bpid: the buffer pool index
-+ * @buffers: a pointer pointing to the acquired buffer addresses
-+ * @num_buffers: number of buffers to be acquired, must be less than 8
++ * mc_get_version() - Retrieves the Management Complex firmware
++ * version information
++ * @mc_io: Pointer to opaque I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @mc_ver_info: Returned version information structure
+ *
-+ * Return 0 for success, or negative error code if the acquire command
-+ * fails.
++ * Return: '0' on Success; Error code otherwise.
+ */
-+int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
-+ unsigned int num_buffers)
++static int mc_get_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ struct mc_version *mc_ver_info)
+{
-+ struct qbman_acquire_desc *p;
-+ struct qbman_acquire_rslt *r;
-+ int i;
-+
-+ if (!num_buffers || (num_buffers > 7))
-+ return -EINVAL;
-+
-+ /* Start the management command */
-+ p = qbman_swp_mc_start(s);
++ struct fsl_mc_command cmd = { 0 };
++ struct dpmng_rsp_get_version *rsp_params;
++ int err;
+
-+ if (!p)
-+ return -EBUSY;
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_VERSION,
++ cmd_flags,
++ 0);
+
-+ /* Encode the caller-provided attributes */
-+ p->bpid = cpu_to_le16(bpid);
-+ p->num = num_buffers;
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
+
-+ /* Complete the management command */
-+ r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
-+ if (unlikely(!r)) {
-+ pr_err("qbman: acquire from BPID %d failed, no response\n",
-+ bpid);
-+ return -EIO;
-+ }
++ /* retrieve response parameters */
++ rsp_params = (struct dpmng_rsp_get_version *)cmd.params;
++ mc_ver_info->revision = le32_to_cpu(rsp_params->revision);
++ mc_ver_info->major = le32_to_cpu(rsp_params->version_major);
++ mc_ver_info->minor = le32_to_cpu(rsp_params->version_minor);
+
-+ /* Decode the outcome */
-+ WARN_ON((r->verb & 0x7f) != QBMAN_MC_ACQUIRE);
++ return 0;
++}
+
-+ /* Determine success or failure */
-+ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
-+ pr_err("qbman: acquire from BPID 0x%x failed, code=0x%02x\n",
-+ bpid, r->rslt);
-+ return -EIO;
++/**
++ * fsl_mc_get_root_dprc - function to traverse to the root dprc
++ */
++void fsl_mc_get_root_dprc(struct device *dev,
++ struct device **root_dprc_dev)
++{
++ if (!dev) {
++ *root_dprc_dev = NULL;
++ } else if (!dev_is_fsl_mc(dev)) {
++ *root_dprc_dev = NULL;
++ } else {
++ *root_dprc_dev = dev;
++ while (dev_is_fsl_mc((*root_dprc_dev)->parent))
++ *root_dprc_dev = (*root_dprc_dev)->parent;
+ }
-+
-+ WARN_ON(r->num > num_buffers);
-+
-+ /* Copy the acquired buffers to the caller's array */
-+ for (i = 0; i < r->num; i++)
-+ buffers[i] = le64_to_cpu(r->buf[i]);
-+
-+ return (int)r->num;
+}
++EXPORT_SYMBOL_GPL(fsl_mc_get_root_dprc);
+
-+struct qbman_alt_fq_state_desc {
-+ u8 verb;
-+ u8 reserved[3];
-+ u32 fqid;
-+ u8 reserved2[56];
-+};
-+
-+struct qbman_alt_fq_state_rslt {
-+ u8 verb;
-+ u8 rslt;
-+ u8 reserved[62];
-+};
-+
-+#define ALT_FQ_FQID_MASK 0x00FFFFFF
-+
-+int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
-+ u8 alt_fq_verb)
++static int get_dprc_attr(struct fsl_mc_io *mc_io,
++ int container_id, struct dprc_attributes *attr)
+{
-+ struct qbman_alt_fq_state_desc *p;
-+ struct qbman_alt_fq_state_rslt *r;
-+
-+ /* Start the management command */
-+ p = qbman_swp_mc_start(s);
-+ if (!p)
-+ return -EBUSY;
-+
-+ p->fqid = cpu_to_le32(fqid) & ALT_FQ_FQID_MASK;
++ u16 dprc_handle;
++ int error;
+
-+ /* Complete the management command */
-+ r = qbman_swp_mc_complete(s, p, alt_fq_verb);
-+ if (unlikely(!r)) {
-+ pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
-+ alt_fq_verb);
-+ return -EIO;
++ error = dprc_open(mc_io, 0, container_id, &dprc_handle);
++ if (error < 0) {
++ dev_err(mc_io->dev, "dprc_open() failed: %d\n", error);
++ return error;
+ }
+
-+ /* Decode the outcome */
-+ WARN_ON((r->verb & QBMAN_RESULT_MASK) != alt_fq_verb);
-+
-+ /* Determine success or failure */
-+ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
-+ pr_err("qbman: ALT FQID %d failed: verb = 0x%08x code = 0x%02x\n",
-+ fqid, r->verb, r->rslt);
-+ return -EIO;
++ memset(attr, 0, sizeof(struct dprc_attributes));
++ error = dprc_get_attributes(mc_io, 0, dprc_handle, attr);
++ if (error < 0) {
++ dev_err(mc_io->dev, "dprc_get_attributes() failed: %d\n",
++ error);
++ goto common_cleanup;
+ }
+
-+ return 0;
++ error = 0;
++
++common_cleanup:
++ (void)dprc_close(mc_io, 0, dprc_handle);
++ return error;
+}
+
-+struct qbman_cdan_ctrl_desc {
-+ u8 verb;
-+ u8 reserved;
-+ u16 ch;
-+ u8 we;
-+ u8 ctrl;
-+ u16 reserved2;
-+ u64 cdan_ctx;
-+ u8 reserved3[48];
++static int get_dprc_icid(struct fsl_mc_io *mc_io,
++ int container_id, u32 *icid)
++{
++ struct dprc_attributes attr;
++ int error;
+
-+};
++ error = get_dprc_attr(mc_io, container_id, &attr);
++ if (error == 0)
++ *icid = attr.icid;
+
-+struct qbman_cdan_ctrl_rslt {
-+ u8 verb;
-+ u8 rslt;
-+ u16 ch;
-+ u8 reserved[60];
-+};
++ return error;
++}
+
-+int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
-+ u8 we_mask, u8 cdan_en,
-+ u64 ctx)
++static int translate_mc_addr(struct fsl_mc_device *mc_dev,
++ enum dprc_region_type mc_region_type,
++ u64 mc_offset, phys_addr_t *phys_addr)
+{
-+ struct qbman_cdan_ctrl_desc *p = NULL;
-+ struct qbman_cdan_ctrl_rslt *r = NULL;
-+
-+ /* Start the management command */
-+ p = qbman_swp_mc_start(s);
-+ if (!p)
-+ return -EBUSY;
++ int i;
++ struct device *root_dprc_dev;
++ struct fsl_mc *mc;
+
-+ /* Encode the caller-provided attributes */
-+ p->ch = cpu_to_le16(channelid);
-+ p->we = we_mask;
-+ if (cdan_en)
-+ p->ctrl = 1;
-+ else
-+ p->ctrl = 0;
-+ p->cdan_ctx = cpu_to_le64(ctx);
++ fsl_mc_get_root_dprc(&mc_dev->dev, &root_dprc_dev);
++ mc = dev_get_drvdata(root_dprc_dev->parent);
+
-+ /* Complete the management command */
-+ r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
-+ if (unlikely(!r)) {
-+ pr_err("qbman: wqchan config failed, no response\n");
-+ return -EIO;
++ if (mc->num_translation_ranges == 0) {
++ /*
++ * Do identity mapping:
++ */
++ *phys_addr = mc_offset;
++ return 0;
+ }
+
-+ WARN_ON((r->verb & 0x7f) != QBMAN_WQCHAN_CONFIGURE);
++ for (i = 0; i < mc->num_translation_ranges; i++) {
++ struct fsl_mc_addr_translation_range *range =
++ &mc->translation_ranges[i];
+
-+ /* Determine success or failure */
-+ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
-+ pr_err("qbman: CDAN cQID %d failed: code = 0x%02x\n",
-+ channelid, r->rslt);
-+ return -EIO;
++ if (mc_region_type == range->mc_region_type &&
++ mc_offset >= range->start_mc_offset &&
++ mc_offset < range->end_mc_offset) {
++ *phys_addr = range->start_phys_addr +
++ (mc_offset - range->start_mc_offset);
++ return 0;
++ }
+ }
+
-+ return 0;
++ return -EFAULT;
+}
---- /dev/null
-+++ b/drivers/staging/fsl-mc/bus/dpio/qbman-portal.h
-@@ -0,0 +1,662 @@
-+/*
-+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
-+ * Copyright 2016 NXP
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef __FSL_QBMAN_PORTAL_H
-+#define __FSL_QBMAN_PORTAL_H
+
-+#include "qbman_private.h"
-+#include "../../include/dpaa2-fd.h"
++static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev,
++ struct fsl_mc_device *mc_bus_dev)
++{
++ int i;
++ int error;
++ struct resource *regions;
++ struct fsl_mc_obj_desc *obj_desc = &mc_dev->obj_desc;
++ struct device *parent_dev = mc_dev->dev.parent;
++ enum dprc_region_type mc_region_type;
++
++ if (is_fsl_mc_bus_dprc(mc_dev) ||
++ is_fsl_mc_bus_dpmcp(mc_dev)) {
++ mc_region_type = DPRC_REGION_TYPE_MC_PORTAL;
++ } else if (is_fsl_mc_bus_dpio(mc_dev)) {
++ mc_region_type = DPRC_REGION_TYPE_QBMAN_PORTAL;
++ } else {
++ /*
++ * This function should not have been called for this MC object
++ * type, as this object type is not supposed to have MMIO
++ * regions
++ */
++ return -EINVAL;
++ }
+
-+struct dpaa2_dq;
-+struct qbman_swp;
++ regions = kmalloc_array(obj_desc->region_count,
++ sizeof(regions[0]), GFP_KERNEL);
++ if (!regions)
++ return -ENOMEM;
+
-+/* qbman software portal descriptor structure */
-+struct qbman_swp_desc {
-+ void *cena_bar; /* Cache-enabled portal base address */
-+ void *cinh_bar; /* Cache-inhibited portal base address */
-+ u32 qman_version;
-+};
++ for (i = 0; i < obj_desc->region_count; i++) {
++ struct dprc_region_desc region_desc;
+
-+#define QBMAN_SWP_INTERRUPT_EQRI 0x01
-+#define QBMAN_SWP_INTERRUPT_EQDI 0x02
-+#define QBMAN_SWP_INTERRUPT_DQRI 0x04
-+#define QBMAN_SWP_INTERRUPT_RCRI 0x08
-+#define QBMAN_SWP_INTERRUPT_RCDI 0x10
-+#define QBMAN_SWP_INTERRUPT_VDCI 0x20
++ error = dprc_get_obj_region(mc_bus_dev->mc_io,
++ 0,
++ mc_bus_dev->mc_handle,
++ obj_desc->type,
++ obj_desc->id, i, ®ion_desc);
++ if (error < 0) {
++ dev_err(parent_dev,
++ "dprc_get_obj_region() failed: %d\n", error);
++ goto error_cleanup_regions;
++ }
+
-+/* the structure for pull dequeue descriptor */
-+struct qbman_pull_desc {
-+ u8 verb;
-+ u8 numf;
-+ u8 tok;
-+ u8 reserved;
-+ u32 dq_src;
-+ u64 rsp_addr;
-+ u64 rsp_addr_virt;
-+ u8 padding[40];
-+};
++ error = translate_mc_addr(mc_dev, mc_region_type,
++ region_desc.base_offset,
++ ®ions[i].start);
++ if (error < 0) {
++ dev_err(parent_dev,
++ "Invalid MC offset: %#x (for %s.%d\'s region %d)\n",
++ region_desc.base_offset,
++ obj_desc->type, obj_desc->id, i);
++ goto error_cleanup_regions;
++ }
+
-+enum qbman_pull_type_e {
-+ /* dequeue with priority precedence, respect intra-class scheduling */
-+ qbman_pull_type_prio = 1,
-+ /* dequeue with active FQ precedence, respect ICS */
-+ qbman_pull_type_active,
-+ /* dequeue with active FQ precedence, no ICS */
-+ qbman_pull_type_active_noics
-+};
++ regions[i].end = regions[i].start + region_desc.size - 1;
++ regions[i].name = "fsl-mc object MMIO region";
++ regions[i].flags = IORESOURCE_IO;
++ if (region_desc.flags & DPRC_REGION_CACHEABLE)
++ regions[i].flags |= IORESOURCE_CACHEABLE;
++ }
+
-+/* Definitions for parsing dequeue entries */
-+#define QBMAN_RESULT_MASK 0x7f
-+#define QBMAN_RESULT_DQ 0x60
-+#define QBMAN_RESULT_FQRN 0x21
-+#define QBMAN_RESULT_FQRNI 0x22
-+#define QBMAN_RESULT_FQPN 0x24
-+#define QBMAN_RESULT_FQDAN 0x25
-+#define QBMAN_RESULT_CDAN 0x26
-+#define QBMAN_RESULT_CSCN_MEM 0x27
-+#define QBMAN_RESULT_CGCU 0x28
-+#define QBMAN_RESULT_BPSCN 0x29
-+#define QBMAN_RESULT_CSCN_WQ 0x2a
++ mc_dev->regions = regions;
++ return 0;
+
-+/* QBMan FQ management command codes */
-+#define QBMAN_FQ_SCHEDULE 0x48
-+#define QBMAN_FQ_FORCE 0x49
-+#define QBMAN_FQ_XON 0x4d
-+#define QBMAN_FQ_XOFF 0x4e
++error_cleanup_regions:
++ kfree(regions);
++ return error;
++}
+
-+/* structure of enqueue descriptor */
-+struct qbman_eq_desc {
-+ u8 verb;
-+ u8 dca;
-+ u16 seqnum;
-+ u16 orpid;
-+ u16 reserved1;
-+ u32 tgtid;
-+ u32 tag;
-+ u16 qdbin;
-+ u8 qpri;
-+ u8 reserved[3];
-+ u8 wae;
-+ u8 rspid;
-+ u64 rsp_addr;
-+ u8 fd[32];
-+};
++/**
++ * fsl_mc_is_root_dprc - function to check if a given device is a root dprc
++ */
++bool fsl_mc_is_root_dprc(struct device *dev)
++{
++ struct device *root_dprc_dev;
+
-+/* buffer release descriptor */
-+struct qbman_release_desc {
-+ u8 verb;
-+ u8 reserved;
-+ u16 bpid;
-+ u32 reserved2;
-+ u64 buf[7];
-+};
++ fsl_mc_get_root_dprc(dev, &root_dprc_dev);
++ if (!root_dprc_dev)
++ return false;
++ return dev == root_dprc_dev;
++}
+
-+/* Management command result codes */
-+#define QBMAN_MC_RSLT_OK 0xf0
++static void fsl_mc_device_release(struct device *dev)
++{
++ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
-+#define CODE_CDAN_WE_EN 0x1
-+#define CODE_CDAN_WE_CTX 0x4
++ kfree(mc_dev->regions);
+
-+/* portal data structure */
-+struct qbman_swp {
-+ const struct qbman_swp_desc *desc;
-+ void __iomem *addr_cena;
-+ void __iomem *addr_cinh;
++ if (is_fsl_mc_bus_dprc(mc_dev))
++ kfree(to_fsl_mc_bus(mc_dev));
++ else
++ kfree(mc_dev);
++}
+
-+ /* Management commands */
-+ struct {
-+ u32 valid_bit; /* 0x00 or 0x80 */
-+ } mc;
++/**
++ * Add a newly discovered fsl-mc device to be visible in Linux
++ */
++int fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
++ struct fsl_mc_io *mc_io,
++ struct device *parent_dev,
++ const char *driver_override,
++ struct fsl_mc_device **new_mc_dev)
++{
++ int error;
++ struct fsl_mc_device *mc_dev = NULL;
++ struct fsl_mc_bus *mc_bus = NULL;
++ struct fsl_mc_device *parent_mc_dev;
++ struct device *fsl_mc_platform_dev;
++ struct device_node *fsl_mc_platform_node;
+
-+ /* Push dequeues */
-+ u32 sdq;
++ if (dev_is_fsl_mc(parent_dev))
++ parent_mc_dev = to_fsl_mc_device(parent_dev);
++ else
++ parent_mc_dev = NULL;
+
-+ /* Volatile dequeues */
-+ struct {
-+ atomic_t available; /* indicates if a command can be sent */
-+ u32 valid_bit; /* 0x00 or 0x80 */
-+ struct dpaa2_dq *storage; /* NULL if DQRR */
-+ } vdq;
++ if (strcmp(obj_desc->type, "dprc") == 0) {
++ /*
++ * Allocate an MC bus device object:
++ */
++ mc_bus = kzalloc(sizeof(*mc_bus), GFP_KERNEL);
++ if (!mc_bus)
++ return -ENOMEM;
+
-+ /* DQRR */
-+ struct {
-+ u32 next_idx;
-+ u32 valid_bit;
-+ u8 dqrr_size;
-+ int reset_bug; /* indicates dqrr reset workaround is needed */
-+ } dqrr;
-+};
++ mc_dev = &mc_bus->mc_dev;
++ } else {
++ /*
++ * Allocate a regular fsl_mc_device object:
++ */
++ mc_dev = kzalloc(sizeof(*mc_dev), GFP_KERNEL);
++ if (!mc_dev)
++ return -ENOMEM;
++ }
+
-+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
-+void qbman_swp_finish(struct qbman_swp *p);
-+u32 qbman_swp_interrupt_read_status(struct qbman_swp *p);
-+void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask);
-+u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
-+void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask);
-+int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);
-+void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
++ mc_dev->obj_desc = *obj_desc;
++ mc_dev->mc_io = mc_io;
+
-+void qbman_swp_push_get(struct qbman_swp *p, u8 channel_idx, int *enabled);
-+void qbman_swp_push_set(struct qbman_swp *p, u8 channel_idx, int enable);
++ if (driver_override) {
++ /*
++ * We trust driver_override, so we don't need to use
++ * kstrndup() here
++ */
++ mc_dev->driver_override = kstrdup(driver_override, GFP_KERNEL);
++ if (!mc_dev->driver_override) {
++ error = -ENOMEM;
++ goto error_cleanup_dev;
++ }
++ }
+
-+void qbman_pull_desc_clear(struct qbman_pull_desc *d);
-+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
-+ struct dpaa2_dq *storage,
-+ dma_addr_t storage_phys,
-+ int stash);
-+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes);
-+void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid);
-+void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
-+ enum qbman_pull_type_e dct);
-+void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
-+ enum qbman_pull_type_e dct);
++ device_initialize(&mc_dev->dev);
++ mc_dev->dev.parent = parent_dev;
++ mc_dev->dev.bus = &fsl_mc_bus_type;
++ mc_dev->dev.release = fsl_mc_device_release;
++ mc_dev->dev.type = fsl_mc_get_device_type(obj_desc->type);
++ if (!mc_dev->dev.type) {
++ error = -ENODEV;
++ dev_err(parent_dev, "unknown device type %s\n", obj_desc->type);
++ goto error_cleanup_dev;
++ }
++ dev_set_name(&mc_dev->dev, "%s.%d", obj_desc->type, obj_desc->id);
+
-+int qbman_swp_pull(struct qbman_swp *p, struct qbman_pull_desc *d);
++ if (strcmp(obj_desc->type, "dprc") == 0) {
++ struct fsl_mc_io *mc_io2;
+
-+const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s);
-+void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq);
++ mc_dev->flags |= FSL_MC_IS_DPRC;
+
-+int qbman_result_has_new_result(struct qbman_swp *p, const struct dpaa2_dq *dq);
++ /*
++ * To get the DPRC's ICID, we need to open the DPRC
++ * in get_dprc_icid(). For child DPRCs, we do so using the
++ * parent DPRC's MC portal instead of the child DPRC's MC
++ * portal, in case the child DPRC is already opened with
++ * its own portal (e.g., the DPRC used by AIOP).
++ *
++ * NOTE: There cannot be more than one active open for a
++ * given MC object, using the same MC portal.
++ */
++ if (parent_mc_dev) {
++ /*
++ * device being added is a child DPRC device
++ */
++ mc_io2 = parent_mc_dev->mc_io;
++ } else {
++ /*
++ * device being added is the root DPRC device
++ */
++ if (!mc_io) {
++ error = -EINVAL;
++ goto error_cleanup_dev;
++ }
+
-+void qbman_eq_desc_clear(struct qbman_eq_desc *d);
-+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
-+void qbman_eq_desc_set_token(struct qbman_eq_desc *d, u8 token);
-+void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid);
-+void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
-+ u32 qd_bin, u32 qd_prio);
++ mc_io2 = mc_io;
++ }
+
-+int qbman_swp_enqueue(struct qbman_swp *p, const struct qbman_eq_desc *d,
-+ const struct dpaa2_fd *fd);
++ error = get_dprc_icid(mc_io2, obj_desc->id, &mc_dev->icid);
++ if (error < 0)
++ goto error_cleanup_dev;
++ } else {
++ /*
++ * A non-DPRC object has to be a child of a DPRC, use the
++ * parent's ICID and interrupt domain.
++ */
++ mc_dev->icid = parent_mc_dev->icid;
++ mc_dev->dma_mask = FSL_MC_DEFAULT_DMA_MASK;
++ mc_dev->dev.dma_mask = &mc_dev->dma_mask;
++ mc_dev->dev.coherent_dma_mask = mc_dev->dma_mask;
++ dev_set_msi_domain(&mc_dev->dev,
++ dev_get_msi_domain(&parent_mc_dev->dev));
++ }
+
-+void qbman_release_desc_clear(struct qbman_release_desc *d);
-+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid);
-+void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
++ /*
++ * Get MMIO regions for the device from the MC:
++ *
++ * NOTE: the root DPRC is a special case as its MMIO region is
++ * obtained from the device tree
++ */
++ if (parent_mc_dev && obj_desc->region_count != 0) {
++ error = fsl_mc_device_get_mmio_regions(mc_dev,
++ parent_mc_dev);
++ if (error < 0)
++ goto error_cleanup_dev;
++ }
+
-+int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
-+ const u64 *buffers, unsigned int num_buffers);
-+int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
-+ unsigned int num_buffers);
-+int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
-+ u8 alt_fq_verb);
-+int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
-+ u8 we_mask, u8 cdan_en,
-+ u64 ctx);
++ fsl_mc_platform_dev = &mc_dev->dev;
++ while (dev_is_fsl_mc(fsl_mc_platform_dev))
++ fsl_mc_platform_dev = fsl_mc_platform_dev->parent;
++ fsl_mc_platform_node = fsl_mc_platform_dev->of_node;
+
-+void *qbman_swp_mc_start(struct qbman_swp *p);
-+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb);
-+void *qbman_swp_mc_result(struct qbman_swp *p);
++ /* Set up the iommu configuration for the devices. */
++ fsl_mc_dma_configure(mc_dev, fsl_mc_platform_node,
++ !(obj_desc->flags & DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY));
+
-+/**
-+ * qbman_result_is_DQ() - check if the dequeue result is a dequeue response
-+ * @dq: the dequeue result to be checked
-+ *
-+ * DQRR entries may contain non-dequeue results, ie. notifications
-+ */
-+static inline int qbman_result_is_DQ(const struct dpaa2_dq *dq)
-+{
-+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_DQ);
++ /*
++ * The device-specific probe callback will get invoked by device_add()
++ */
++ error = device_add(&mc_dev->dev);
++ if (error < 0) {
++ dev_err(parent_dev,
++ "device_add() failed for device %s: %d\n",
++ dev_name(&mc_dev->dev), error);
++ goto error_cleanup_dev;
++ }
++
++ dev_dbg(parent_dev, "added %s\n", dev_name(&mc_dev->dev));
++
++ *new_mc_dev = mc_dev;
++ return 0;
++
++error_cleanup_dev:
++ kfree(mc_dev->regions);
++ kfree(mc_bus);
++ kfree(mc_dev);
++
++ return error;
+}
++EXPORT_SYMBOL_GPL(fsl_mc_device_add);
+
+/**
-+ * qbman_result_is_SCN() - Check the dequeue result is notification or not
-+ * @dq: the dequeue result to be checked
++ * fsl_mc_device_remove - Remove an fsl-mc device from being visible to
++ * Linux
+ *
++ * @mc_dev: Pointer to an fsl-mc device
+ */
-+static inline int qbman_result_is_SCN(const struct dpaa2_dq *dq)
++void fsl_mc_device_remove(struct fsl_mc_device *mc_dev)
+{
-+ return !qbman_result_is_DQ(dq);
-+}
++ kfree(mc_dev->driver_override);
++ mc_dev->driver_override = NULL;
+
-+/* FQ Data Availability */
-+static inline int qbman_result_is_FQDAN(const struct dpaa2_dq *dq)
-+{
-+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQDAN);
++ /*
++ * The device-specific remove callback will get invoked by device_del()
++ */
++ device_del(&mc_dev->dev);
++ put_device(&mc_dev->dev);
+}
++EXPORT_SYMBOL_GPL(fsl_mc_device_remove);
+
-+/* Channel Data Availability */
-+static inline int qbman_result_is_CDAN(const struct dpaa2_dq *dq)
++static int parse_mc_ranges(struct device *dev,
++ int *paddr_cells,
++ int *mc_addr_cells,
++ int *mc_size_cells,
++ const __be32 **ranges_start)
+{
-+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CDAN);
-+}
++ const __be32 *prop;
++ int range_tuple_cell_count;
++ int ranges_len;
++ int tuple_len;
++ struct device_node *mc_node = dev->of_node;
++
++ *ranges_start = of_get_property(mc_node, "ranges", &ranges_len);
++ if (!(*ranges_start) || !ranges_len) {
++ dev_warn(dev,
++ "missing or empty ranges property for device tree node '%s'\n",
++ mc_node->name);
++ return 0;
++ }
+
-+/* Congestion State Change */
-+static inline int qbman_result_is_CSCN(const struct dpaa2_dq *dq)
-+{
-+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CSCN_WQ);
-+}
++ *paddr_cells = of_n_addr_cells(mc_node);
+
-+/* Buffer Pool State Change */
-+static inline int qbman_result_is_BPSCN(const struct dpaa2_dq *dq)
-+{
-+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_BPSCN);
-+}
++ prop = of_get_property(mc_node, "#address-cells", NULL);
++ if (prop)
++ *mc_addr_cells = be32_to_cpup(prop);
++ else
++ *mc_addr_cells = *paddr_cells;
+
-+/* Congestion Group Count Update */
-+static inline int qbman_result_is_CGCU(const struct dpaa2_dq *dq)
-+{
-+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CGCU);
-+}
++ prop = of_get_property(mc_node, "#size-cells", NULL);
++ if (prop)
++ *mc_size_cells = be32_to_cpup(prop);
++ else
++ *mc_size_cells = of_n_size_cells(mc_node);
+
-+/* Retirement */
-+static inline int qbman_result_is_FQRN(const struct dpaa2_dq *dq)
-+{
-+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRN);
-+}
++ range_tuple_cell_count = *paddr_cells + *mc_addr_cells +
++ *mc_size_cells;
+
-+/* Retirement Immediate */
-+static inline int qbman_result_is_FQRNI(const struct dpaa2_dq *dq)
-+{
-+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRNI);
-+}
++ tuple_len = range_tuple_cell_count * sizeof(__be32);
++ if (ranges_len % tuple_len != 0) {
++ dev_err(dev, "malformed ranges property '%s'\n", mc_node->name);
++ return -EINVAL;
++ }
+
-+ /* Park */
-+static inline int qbman_result_is_FQPN(const struct dpaa2_dq *dq)
-+{
-+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQPN);
++ return ranges_len / tuple_len;
+}
+
-+/**
-+ * qbman_result_SCN_state() - Get the state field in State-change notification
-+ */
-+static inline u8 qbman_result_SCN_state(const struct dpaa2_dq *scn)
++static int get_mc_addr_translation_ranges(struct device *dev,
++ struct fsl_mc_addr_translation_range
++ **ranges,
++ u8 *num_ranges)
+{
-+ return scn->scn.state;
-+}
++ int ret;
++ int paddr_cells;
++ int mc_addr_cells;
++ int mc_size_cells;
++ int i;
++ const __be32 *ranges_start;
++ const __be32 *cell;
+
-+#define SCN_RID_MASK 0x00FFFFFF
++ ret = parse_mc_ranges(dev,
++ &paddr_cells,
++ &mc_addr_cells,
++ &mc_size_cells,
++ &ranges_start);
++ if (ret < 0)
++ return ret;
+
-+/**
-+ * qbman_result_SCN_rid() - Get the resource id in State-change notification
-+ */
-+static inline u32 qbman_result_SCN_rid(const struct dpaa2_dq *scn)
-+{
-+ return le32_to_cpu(scn->scn.rid_tok) & SCN_RID_MASK;
++ *num_ranges = ret;
++ if (!ret) {
++ /*
++ * Missing or empty ranges property ("ranges;") for the
++ * 'fsl,qoriq-mc' node. In this case, identity mapping
++ * will be used.
++ */
++ *ranges = NULL;
++ return 0;
++ }
++
++ *ranges = devm_kcalloc(dev, *num_ranges,
++ sizeof(struct fsl_mc_addr_translation_range),
++ GFP_KERNEL);
++ if (!(*ranges))
++ return -ENOMEM;
++
++ cell = ranges_start;
++ for (i = 0; i < *num_ranges; ++i) {
++ struct fsl_mc_addr_translation_range *range = &(*ranges)[i];
++
++ range->mc_region_type = of_read_number(cell, 1);
++ range->start_mc_offset = of_read_number(cell + 1,
++ mc_addr_cells - 1);
++ cell += mc_addr_cells;
++ range->start_phys_addr = of_read_number(cell, paddr_cells);
++ cell += paddr_cells;
++ range->end_mc_offset = range->start_mc_offset +
++ of_read_number(cell, mc_size_cells);
++
++ cell += mc_size_cells;
++ }
++
++ return 0;
+}
+
+/**
-+ * qbman_result_SCN_ctx() - Get the context data in State-change notification
++ * fsl_mc_bus_probe - callback invoked when the root MC bus is being
++ * added
+ */
-+static inline u64 qbman_result_SCN_ctx(const struct dpaa2_dq *scn)
++static int fsl_mc_bus_probe(struct platform_device *pdev)
+{
-+ return le64_to_cpu(scn->scn.ctx);
++ struct fsl_mc_obj_desc obj_desc;
++ int error;
++ struct fsl_mc *mc;
++ struct fsl_mc_device *mc_bus_dev = NULL;
++ struct fsl_mc_io *mc_io = NULL;
++ struct fsl_mc_bus *mc_bus = NULL;
++ int container_id;
++ phys_addr_t mc_portal_phys_addr;
++ u32 mc_portal_size;
++ struct mc_version mc_version;
++ struct resource res;
++
++ mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
++ if (!mc)
++ return -ENOMEM;
++
++ platform_set_drvdata(pdev, mc);
++
++ /*
++ * Get physical address of MC portal for the root DPRC:
++ */
++ error = of_address_to_resource(pdev->dev.of_node, 0, &res);
++ if (error < 0) {
++ dev_err(&pdev->dev,
++ "of_address_to_resource() failed for %pOF\n",
++ pdev->dev.of_node);
++ return error;
++ }
++
++ mc_portal_phys_addr = res.start;
++ mc_portal_size = resource_size(&res);
++ error = fsl_create_mc_io(&pdev->dev, mc_portal_phys_addr,
++ mc_portal_size, NULL,
++ FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, &mc_io);
++ if (error < 0)
++ return error;
++
++ error = mc_get_version(mc_io, 0, &mc_version);
++ if (error != 0) {
++ dev_err(&pdev->dev,
++ "mc_get_version() failed with error %d\n", error);
++ goto error_cleanup_mc_io;
++ }
++
++ dev_info(&pdev->dev, "MC firmware version: %u.%u.%u\n",
++ mc_version.major, mc_version.minor, mc_version.revision);
++
++ error = get_mc_addr_translation_ranges(&pdev->dev,
++ &mc->translation_ranges,
++ &mc->num_translation_ranges);
++ if (error < 0)
++ goto error_cleanup_mc_io;
++
++ error = dprc_get_container_id(mc_io, 0, &container_id);
++ if (error < 0) {
++ dev_err(&pdev->dev,
++ "dprc_get_container_id() failed: %d\n", error);
++ goto error_cleanup_mc_io;
++ }
++
++ memset(&obj_desc, 0, sizeof(struct fsl_mc_obj_desc));
++ error = dprc_get_api_version(mc_io, 0,
++ &obj_desc.ver_major,
++ &obj_desc.ver_minor);
++ if (error < 0)
++ goto error_cleanup_mc_io;
++
++ obj_desc.vendor = FSL_MC_VENDOR_FREESCALE;
++ strcpy(obj_desc.type, "dprc");
++ obj_desc.id = container_id;
++ obj_desc.irq_count = 1;
++ obj_desc.region_count = 0;
++
++ error = fsl_mc_device_add(&obj_desc, mc_io, &pdev->dev, NULL,
++ &mc_bus_dev);
++ if (error < 0)
++ goto error_cleanup_mc_io;
++
++ mc_bus = to_fsl_mc_bus(mc_bus_dev);
++ error = fsl_mc_restool_create_device_file(mc_bus);
++ if (error < 0)
++ goto error_cleanup_device;
++
++ mc->root_mc_bus_dev = mc_bus_dev;
++
++ return 0;
++
++error_cleanup_device:
++ fsl_mc_device_remove(mc_bus_dev);
++
++error_cleanup_mc_io:
++ fsl_destroy_mc_io(mc_io);
++ return error;
+}
+
+/**
-+ * qbman_swp_fq_schedule() - Move the fq to the scheduled state
-+ * @s: the software portal object
-+ * @fqid: the index of frame queue to be scheduled
-+ *
-+ * There are a couple of different ways that a FQ can end up parked state,
-+ * This schedules it.
-+ *
-+ * Return 0 for success, or negative error code for failure.
++ * fsl_mc_bus_remove - callback invoked when the root MC bus is being
++ * removed
+ */
-+static inline int qbman_swp_fq_schedule(struct qbman_swp *s, u32 fqid)
++static int fsl_mc_bus_remove(struct platform_device *pdev)
+{
-+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
++ struct fsl_mc *mc = platform_get_drvdata(pdev);
++ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc->root_mc_bus_dev);
++
++ if (!fsl_mc_is_root_dprc(&mc->root_mc_bus_dev->dev))
++ return -EINVAL;
++
++ fsl_mc_restool_remove_device_file(mc_bus);
++ fsl_mc_device_remove(mc->root_mc_bus_dev);
++
++ fsl_destroy_mc_io(mc->root_mc_bus_dev->mc_io);
++ mc->root_mc_bus_dev->mc_io = NULL;
++
++ return 0;
+}
+
-+/**
-+ * qbman_swp_fq_force() - Force the FQ to fully scheduled state
-+ * @s: the software portal object
-+ * @fqid: the index of frame queue to be forced
-+ *
-+ * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
-+ * and thus be available for selection by any channel-dequeuing behaviour (push
-+ * or pull). If the FQ is subsequently "dequeued" from the channel and is still
-+ * empty at the time this happens, the resulting dq_entry will have no FD.
-+ * (qbman_result_DQ_fd() will return NULL.)
-+ *
-+ * Return 0 for success, or negative error code for failure.
-+ */
-+static inline int qbman_swp_fq_force(struct qbman_swp *s, u32 fqid)
++static const struct of_device_id fsl_mc_bus_match_table[] = {
++ {.compatible = "fsl,qoriq-mc",},
++ {},
++};
++
++MODULE_DEVICE_TABLE(of, fsl_mc_bus_match_table);
++
++static struct platform_driver fsl_mc_bus_driver = {
++ .driver = {
++ .name = "fsl_mc_bus",
++ .pm = NULL,
++ .of_match_table = fsl_mc_bus_match_table,
++ },
++ .probe = fsl_mc_bus_probe,
++ .remove = fsl_mc_bus_remove,
++};
++
++static int __init fsl_mc_bus_driver_init(void)
+{
-+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
-+}
++ int error;
+
-+/**
-+ * qbman_swp_fq_xon() - sets FQ flow-control to XON
-+ * @s: the software portal object
-+ * @fqid: the index of frame queue
-+ *
-+ * This setting doesn't affect enqueues to the FQ, just dequeues.
++ error = bus_register(&fsl_mc_bus_type);
++ if (error < 0) {
++ pr_err("bus type registration failed: %d\n", error);
++ goto error_cleanup_cache;
++ }
++
++ error = platform_driver_register(&fsl_mc_bus_driver);
++ if (error < 0) {
++ pr_err("platform_driver_register() failed: %d\n", error);
++ goto error_cleanup_bus;
++ }
++
++ error = dprc_driver_init();
++ if (error < 0)
++ goto error_cleanup_driver;
++
++ error = fsl_mc_allocator_driver_init();
++ if (error < 0)
++ goto error_cleanup_dprc_driver;
++
++ error = fsl_mc_restool_init();
++ if (error < 0)
++ goto error_cleanup_mc_allocator;
++
++ return 0;
++
++error_cleanup_mc_allocator:
++ fsl_mc_allocator_driver_exit();
++
++error_cleanup_dprc_driver:
++ dprc_driver_exit();
++
++error_cleanup_driver:
++ platform_driver_unregister(&fsl_mc_bus_driver);
++
++error_cleanup_bus:
++ bus_unregister(&fsl_mc_bus_type);
++
++error_cleanup_cache:
++ return error;
++}
++postcore_initcall(fsl_mc_bus_driver_init);
+--- /dev/null
++++ b/drivers/bus/fsl-mc/fsl-mc-iommu.c
+@@ -0,0 +1,78 @@
++/*
++ * Copyright 2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ * Author: Nipun Gupta <nipun.gupta@nxp.com>
+ *
-+ * Return 0 for success, or negative error code for failure.
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2. This program is licensed "as is" without any
++ * warranty of any kind, whether express or implied.
+ */
-+static inline int qbman_swp_fq_xon(struct qbman_swp *s, u32 fqid)
++
++#include <linux/iommu.h>
++#include <linux/of.h>
++#include <linux/of_iommu.h>
++#include <linux/fsl/mc.h>
++
++/* Setup the IOMMU for the DPRC container */
++static const struct iommu_ops
++*fsl_mc_iommu_configure(struct fsl_mc_device *mc_dev,
++ struct device_node *fsl_mc_platform_node)
+{
-+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
++ struct of_phandle_args iommu_spec;
++ const struct iommu_ops *ops;
++ u32 iommu_phandle;
++ struct device_node *iommu_node;
++ const __be32 *map = NULL;
++ int iommu_cells, map_len, ret;
++
++ map = of_get_property(fsl_mc_platform_node, "iommu-map", &map_len);
++ if (!map)
++ return NULL;
++
++ ops = mc_dev->dev.bus->iommu_ops;
++ if (!ops || !ops->of_xlate)
++ return NULL;
++
++ iommu_phandle = be32_to_cpup(map + 1);
++ iommu_node = of_find_node_by_phandle(iommu_phandle);
++
++ if (of_property_read_u32(iommu_node, "#iommu-cells", &iommu_cells)) {
++ pr_err("%s: missing #iommu-cells property\n", iommu_node->name);
++ return NULL;
++ }
++
++ /* Initialize the fwspec */
++ ret = iommu_fwspec_init(&mc_dev->dev, &iommu_node->fwnode, ops);
++ if (ret)
++ return NULL;
++
++ /*
++ * Fill in the required stream-id before calling the iommu's
++ * ops->xlate callback.
++ */
++ iommu_spec.np = iommu_node;
++ iommu_spec.args[0] = mc_dev->icid;
++ iommu_spec.args_count = 1;
++
++ ret = ops->of_xlate(&mc_dev->dev, &iommu_spec);
++ if (ret)
++ return NULL;
++
++ of_node_put(iommu_spec.np);
++
++ return ops;
+}
+
-+/**
-+ * qbman_swp_fq_xoff() - sets FQ flow-control to XOFF
-+ * @s: the software portal object
-+ * @fqid: the index of frame queue
-+ *
-+ * This setting doesn't affect enqueues to the FQ, just dequeues.
-+ * XOFF FQs will remain in the tenatively-scheduled state, even when
-+ * non-empty, meaning they won't be selected for scheduled dequeuing.
-+ * If a FQ is changed to XOFF after it had already become truly-scheduled
-+ * to a channel, and a pull dequeue of that channel occurs that selects
-+ * that FQ for dequeuing, then the resulting dq_entry will have no FD.
-+ * (qbman_result_DQ_fd() will return NULL.)
-+ *
-+ * Return 0 for success, or negative error code for failure.
-+ */
-+static inline int qbman_swp_fq_xoff(struct qbman_swp *s, u32 fqid)
++/* Set up DMA configuration for fsl-mc devices */
++void fsl_mc_dma_configure(struct fsl_mc_device *mc_dev,
++ struct device_node *fsl_mc_platform_node, int coherent)
+{
-+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
-+}
++ const struct iommu_ops *ops;
+
-+/* If the user has been allocated a channel object that is going to generate
-+ * CDANs to another channel, then the qbman_swp_CDAN* functions will be
-+ * necessary.
-+ *
-+ * CDAN-enabled channels only generate a single CDAN notification, after which
-+ * they need to be reenabled before they'll generate another. The idea is
-+ * that pull dequeuing will occur in reaction to the CDAN, followed by a
-+ * reenable step. Each function generates a distinct command to hardware, so a
-+ * combination function is provided if the user wishes to modify the "context"
-+ * (which shows up in each CDAN message) each time they reenable, as a single
-+ * command to hardware.
-+ */
++ ops = fsl_mc_iommu_configure(mc_dev, fsl_mc_platform_node);
+
-+/**
-+ * qbman_swp_CDAN_set_context() - Set CDAN context
-+ * @s: the software portal object
-+ * @channelid: the channel index
-+ * @ctx: the context to be set in CDAN
-+ *
-+ * Return 0 for success, or negative error code for failure.
-+ */
-+static inline int qbman_swp_CDAN_set_context(struct qbman_swp *s, u16 channelid,
-+ u64 ctx)
-+{
-+ return qbman_swp_CDAN_set(s, channelid,
-+ CODE_CDAN_WE_CTX,
-+ 0, ctx);
++ mc_dev->dev.coherent_dma_mask = DMA_BIT_MASK(48);
++ mc_dev->dev.dma_mask = &mc_dev->dev.coherent_dma_mask;
++ arch_setup_dma_ops(&mc_dev->dev, 0,
++ mc_dev->dev.coherent_dma_mask + 1, ops, coherent);
+}
-+
-+/**
-+ * qbman_swp_CDAN_enable() - Enable CDAN for the channel
-+ * @s: the software portal object
-+ * @channelid: the index of the channel to generate CDAN
+--- a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
++++ /dev/null
+@@ -1,285 +0,0 @@
+-/*
+- * Freescale Management Complex (MC) bus driver MSI support
+- *
+- * Copyright (C) 2015 Freescale Semiconductor, Inc.
+- * Author: German Rivera <German.Rivera@freescale.com>
+- *
+- * This file is licensed under the terms of the GNU General Public
+- * License version 2. This program is licensed "as is" without any
+- * warranty of any kind, whether express or implied.
+- */
+-
+-#include <linux/of_device.h>
+-#include <linux/of_address.h>
+-#include <linux/irqchip/arm-gic-v3.h>
+-#include <linux/of_irq.h>
+-#include <linux/irq.h>
+-#include <linux/irqdomain.h>
+-#include <linux/msi.h>
+-#include "../include/mc-bus.h"
+-#include "fsl-mc-private.h"
+-
+-/*
+- * Generate a unique ID identifying the interrupt (only used within the MSI
+- * irqdomain. Combine the icid with the interrupt index.
+- */
+-static irq_hw_number_t fsl_mc_domain_calc_hwirq(struct fsl_mc_device *dev,
+- struct msi_desc *desc)
+-{
+- /*
+- * Make the base hwirq value for ICID*10000 so it is readable
+- * as a decimal value in /proc/interrupts.
+- */
+- return (irq_hw_number_t)(desc->fsl_mc.msi_index + (dev->icid * 10000));
+-}
+-
+-static void fsl_mc_msi_set_desc(msi_alloc_info_t *arg,
+- struct msi_desc *desc)
+-{
+- arg->desc = desc;
+- arg->hwirq = fsl_mc_domain_calc_hwirq(to_fsl_mc_device(desc->dev),
+- desc);
+-}
+-
+-static void fsl_mc_msi_update_dom_ops(struct msi_domain_info *info)
+-{
+- struct msi_domain_ops *ops = info->ops;
+-
+- if (WARN_ON(!ops))
+- return;
+-
+- /*
+- * set_desc should not be set by the caller
+- */
+- if (!ops->set_desc)
+- ops->set_desc = fsl_mc_msi_set_desc;
+-}
+-
+-static void __fsl_mc_msi_write_msg(struct fsl_mc_device *mc_bus_dev,
+- struct fsl_mc_device_irq *mc_dev_irq)
+-{
+- int error;
+- struct fsl_mc_device *owner_mc_dev = mc_dev_irq->mc_dev;
+- struct msi_desc *msi_desc = mc_dev_irq->msi_desc;
+- struct dprc_irq_cfg irq_cfg;
+-
+- /*
+- * msi_desc->msg.address is 0x0 when this function is invoked in
+- * the free_irq() code path. In this case, for the MC, we don't
+- * really need to "unprogram" the MSI, so we just return.
+- */
+- if (msi_desc->msg.address_lo == 0x0 && msi_desc->msg.address_hi == 0x0)
+- return;
+-
+- if (WARN_ON(!owner_mc_dev))
+- return;
+-
+- irq_cfg.paddr = ((u64)msi_desc->msg.address_hi << 32) |
+- msi_desc->msg.address_lo;
+- irq_cfg.val = msi_desc->msg.data;
+- irq_cfg.irq_num = msi_desc->irq;
+-
+- if (owner_mc_dev == mc_bus_dev) {
+- /*
+- * IRQ is for the mc_bus_dev's DPRC itself
+- */
+- error = dprc_set_irq(mc_bus_dev->mc_io,
+- MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI,
+- mc_bus_dev->mc_handle,
+- mc_dev_irq->dev_irq_index,
+- &irq_cfg);
+- if (error < 0) {
+- dev_err(&owner_mc_dev->dev,
+- "dprc_set_irq() failed: %d\n", error);
+- }
+- } else {
+- /*
+- * IRQ is for for a child device of mc_bus_dev
+- */
+- error = dprc_set_obj_irq(mc_bus_dev->mc_io,
+- MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI,
+- mc_bus_dev->mc_handle,
+- owner_mc_dev->obj_desc.type,
+- owner_mc_dev->obj_desc.id,
+- mc_dev_irq->dev_irq_index,
+- &irq_cfg);
+- if (error < 0) {
+- dev_err(&owner_mc_dev->dev,
+- "dprc_obj_set_irq() failed: %d\n", error);
+- }
+- }
+-}
+-
+-/*
+- * NOTE: This function is invoked with interrupts disabled
+- */
+-static void fsl_mc_msi_write_msg(struct irq_data *irq_data,
+- struct msi_msg *msg)
+-{
+- struct msi_desc *msi_desc = irq_data_get_msi_desc(irq_data);
+- struct fsl_mc_device *mc_bus_dev = to_fsl_mc_device(msi_desc->dev);
+- struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+- struct fsl_mc_device_irq *mc_dev_irq =
+- &mc_bus->irq_resources[msi_desc->fsl_mc.msi_index];
+-
+- WARN_ON(mc_dev_irq->msi_desc != msi_desc);
+- msi_desc->msg = *msg;
+-
+- /*
+- * Program the MSI (paddr, value) pair in the device:
+- */
+- __fsl_mc_msi_write_msg(mc_bus_dev, mc_dev_irq);
+-}
+-
+-static void fsl_mc_msi_update_chip_ops(struct msi_domain_info *info)
+-{
+- struct irq_chip *chip = info->chip;
+-
+- if (WARN_ON((!chip)))
+- return;
+-
+- /*
+- * irq_write_msi_msg should not be set by the caller
+- */
+- if (!chip->irq_write_msi_msg)
+- chip->irq_write_msi_msg = fsl_mc_msi_write_msg;
+-}
+-
+-/**
+- * fsl_mc_msi_create_irq_domain - Create a fsl-mc MSI interrupt domain
+- * @np: Optional device-tree node of the interrupt controller
+- * @info: MSI domain info
+- * @parent: Parent irq domain
+- *
+- * Updates the domain and chip ops and creates a fsl-mc MSI
+- * interrupt domain.
+- *
+- * Returns:
+- * A domain pointer or NULL in case of failure.
+- */
+-struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode,
+- struct msi_domain_info *info,
+- struct irq_domain *parent)
+-{
+- struct irq_domain *domain;
+-
+- if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
+- fsl_mc_msi_update_dom_ops(info);
+- if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
+- fsl_mc_msi_update_chip_ops(info);
+-
+- domain = msi_create_irq_domain(fwnode, info, parent);
+- if (domain)
+- domain->bus_token = DOMAIN_BUS_FSL_MC_MSI;
+-
+- return domain;
+-}
+-
+-int fsl_mc_find_msi_domain(struct device *mc_platform_dev,
+- struct irq_domain **mc_msi_domain)
+-{
+- struct irq_domain *msi_domain;
+- struct device_node *mc_of_node = mc_platform_dev->of_node;
+-
+- msi_domain = of_msi_get_domain(mc_platform_dev, mc_of_node,
+- DOMAIN_BUS_FSL_MC_MSI);
+- if (!msi_domain) {
+- pr_err("Unable to find fsl-mc MSI domain for %s\n",
+- mc_of_node->full_name);
+-
+- return -ENOENT;
+- }
+-
+- *mc_msi_domain = msi_domain;
+- return 0;
+-}
+-
+-static void fsl_mc_msi_free_descs(struct device *dev)
+-{
+- struct msi_desc *desc, *tmp;
+-
+- list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) {
+- list_del(&desc->list);
+- free_msi_entry(desc);
+- }
+-}
+-
+-static int fsl_mc_msi_alloc_descs(struct device *dev, unsigned int irq_count)
+-
+-{
+- unsigned int i;
+- int error;
+- struct msi_desc *msi_desc;
+-
+- for (i = 0; i < irq_count; i++) {
+- msi_desc = alloc_msi_entry(dev, 1, NULL);
+- if (!msi_desc) {
+- dev_err(dev, "Failed to allocate msi entry\n");
+- error = -ENOMEM;
+- goto cleanup_msi_descs;
+- }
+-
+- msi_desc->fsl_mc.msi_index = i;
+- INIT_LIST_HEAD(&msi_desc->list);
+- list_add_tail(&msi_desc->list, dev_to_msi_list(dev));
+- }
+-
+- return 0;
+-
+-cleanup_msi_descs:
+- fsl_mc_msi_free_descs(dev);
+- return error;
+-}
+-
+-int fsl_mc_msi_domain_alloc_irqs(struct device *dev,
+- unsigned int irq_count)
+-{
+- struct irq_domain *msi_domain;
+- int error;
+-
+- if (WARN_ON(!list_empty(dev_to_msi_list(dev))))
+- return -EINVAL;
+-
+- error = fsl_mc_msi_alloc_descs(dev, irq_count);
+- if (error < 0)
+- return error;
+-
+- msi_domain = dev_get_msi_domain(dev);
+- if (WARN_ON(!msi_domain)) {
+- error = -EINVAL;
+- goto cleanup_msi_descs;
+- }
+-
+- /*
+- * NOTE: Calling this function will trigger the invocation of the
+- * its_fsl_mc_msi_prepare() callback
+- */
+- error = msi_domain_alloc_irqs(msi_domain, dev, irq_count);
+-
+- if (error) {
+- dev_err(dev, "Failed to allocate IRQs\n");
+- goto cleanup_msi_descs;
+- }
+-
+- return 0;
+-
+-cleanup_msi_descs:
+- fsl_mc_msi_free_descs(dev);
+- return error;
+-}
+-
+-void fsl_mc_msi_domain_free_irqs(struct device *dev)
+-{
+- struct irq_domain *msi_domain;
+-
+- msi_domain = dev_get_msi_domain(dev);
+- if (WARN_ON(!msi_domain))
+- return;
+-
+- msi_domain_free_irqs(msi_domain, dev);
+-
+- if (WARN_ON(list_empty(dev_to_msi_list(dev))))
+- return;
+-
+- fsl_mc_msi_free_descs(dev);
+-}
+--- /dev/null
++++ b/drivers/bus/fsl-mc/fsl-mc-msi.c
+@@ -0,0 +1,285 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Freescale Management Complex (MC) bus driver MSI support
++ *
++ * Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
++ * Author: German Rivera <German.Rivera@freescale.com>
+ *
-+ * Return 0 for success, or negative error code for failure.
+ */
-+static inline int qbman_swp_CDAN_enable(struct qbman_swp *s, u16 channelid)
-+{
-+ return qbman_swp_CDAN_set(s, channelid,
-+ CODE_CDAN_WE_EN,
-+ 1, 0);
-+}
+
-+/**
-+ * qbman_swp_CDAN_disable() - disable CDAN for the channel
-+ * @s: the software portal object
-+ * @channelid: the index of the channel to generate CDAN
-+ *
-+ * Return 0 for success, or negative error code for failure.
++#include <linux/of_device.h>
++#include <linux/of_address.h>
++#include <linux/of_irq.h>
++#include <linux/irq.h>
++#include <linux/irqdomain.h>
++#include <linux/msi.h>
++
++#include "fsl-mc-private.h"
++
++#ifdef GENERIC_MSI_DOMAIN_OPS
++/*
++ * Generate a unique ID identifying the interrupt (only used within the MSI
++ * irqdomain. Combine the icid with the interrupt index.
+ */
-+static inline int qbman_swp_CDAN_disable(struct qbman_swp *s, u16 channelid)
++static irq_hw_number_t fsl_mc_domain_calc_hwirq(struct fsl_mc_device *dev,
++ struct msi_desc *desc)
+{
-+ return qbman_swp_CDAN_set(s, channelid,
-+ CODE_CDAN_WE_EN,
-+ 0, 0);
++ /*
++ * Make the base hwirq value for ICID*10000 so it is readable
++ * as a decimal value in /proc/interrupts.
++ */
++ return (irq_hw_number_t)(desc->fsl_mc.msi_index + (dev->icid * 10000));
+}
+
-+/**
-+ * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
-+ * @s: the software portal object
-+ * @channelid: the index of the channel to generate CDAN
-+ * @ctx:i the context set in CDAN
-+ *
-+ * Return 0 for success, or negative error code for failure.
-+ */
-+static inline int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s,
-+ u16 channelid,
-+ u64 ctx)
++static void fsl_mc_msi_set_desc(msi_alloc_info_t *arg,
++ struct msi_desc *desc)
+{
-+ return qbman_swp_CDAN_set(s, channelid,
-+ CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
-+ 1, ctx);
++ arg->desc = desc;
++ arg->hwirq = fsl_mc_domain_calc_hwirq(to_fsl_mc_device(desc->dev),
++ desc);
+}
++#else
++#define fsl_mc_msi_set_desc NULL
++#endif
+
-+/* Wraps up submit + poll-for-result */
-+static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
-+ u8 cmd_verb)
++static void fsl_mc_msi_update_dom_ops(struct msi_domain_info *info)
+{
-+ int loopvar = 1000;
-+
-+ qbman_swp_mc_submit(swp, cmd, cmd_verb);
-+
-+ do {
-+ cmd = qbman_swp_mc_result(swp);
-+ } while (!cmd && loopvar--);
++ struct msi_domain_ops *ops = info->ops;
+
-+ WARN_ON(!loopvar);
++ if (!ops)
++ return;
+
-+ return cmd;
++ /*
++ * set_desc should not be set by the caller
++ */
++ if (!ops->set_desc)
++ ops->set_desc = fsl_mc_msi_set_desc;
+}
+
-+/* ------------ */
-+/* qb_attr_code */
-+/* ------------ */
++static void __fsl_mc_msi_write_msg(struct fsl_mc_device *mc_bus_dev,
++ struct fsl_mc_device_irq *mc_dev_irq)
++{
++ int error;
++ struct fsl_mc_device *owner_mc_dev = mc_dev_irq->mc_dev;
++ struct msi_desc *msi_desc = mc_dev_irq->msi_desc;
++ struct dprc_irq_cfg irq_cfg;
+
-+/* This struct locates a sub-field within a QBMan portal (CENA) cacheline which
-+ * is either serving as a configuration command or a query result. The
-+ * representation is inherently little-endian, as the indexing of the words is
-+ * itself little-endian in nature and layerscape is little endian for anything
-+ * that crosses a word boundary too (64-bit fields are the obvious examples).
-+ */
-+struct qb_attr_code {
-+ unsigned int word; /* which u32[] array member encodes the field */
-+ unsigned int lsoffset; /* encoding offset from ls-bit */
-+ unsigned int width; /* encoding width. (bool must be 1.) */
-+};
++ /*
++ * msi_desc->msg.address is 0x0 when this function is invoked in
++ * the free_irq() code path. In this case, for the MC, we don't
++ * really need to "unprogram" the MSI, so we just return.
++ */
++ if (msi_desc->msg.address_lo == 0x0 && msi_desc->msg.address_hi == 0x0)
++ return;
+
-+/* Some pre-defined codes */
-+extern struct qb_attr_code code_generic_verb;
-+extern struct qb_attr_code code_generic_rslt;
++ if (!owner_mc_dev)
++ return;
+
-+/* Macros to define codes */
-+#define QB_CODE(a, b, c) { a, b, c}
-+#define QB_CODE_NULL \
-+ QB_CODE((unsigned int)-1, (unsigned int)-1, (unsigned int)-1)
++ irq_cfg.paddr = ((u64)msi_desc->msg.address_hi << 32) |
++ msi_desc->msg.address_lo;
++ irq_cfg.val = msi_desc->msg.data;
++ irq_cfg.irq_num = msi_desc->irq;
+
-+/* Rotate a code "ms", meaning that it moves from less-significant bytes to
-+ * more-significant, from less-significant words to more-significant, etc. The
-+ * "ls" version does the inverse, from more-significant towards
-+ * less-significant.
-+ */
-+static inline void qb_attr_code_rotate_ms(struct qb_attr_code *code,
-+ unsigned int bits)
-+{
-+ code->lsoffset += bits;
-+ while (code->lsoffset > 31) {
-+ code->word++;
-+ code->lsoffset -= 32;
++ if (owner_mc_dev == mc_bus_dev) {
++ /*
++ * IRQ is for the mc_bus_dev's DPRC itself
++ */
++ error = dprc_set_irq(mc_bus_dev->mc_io,
++ MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI,
++ mc_bus_dev->mc_handle,
++ mc_dev_irq->dev_irq_index,
++ &irq_cfg);
++ if (error < 0) {
++ dev_err(&owner_mc_dev->dev,
++ "dprc_set_irq() failed: %d\n", error);
++ }
++ } else {
++ /*
++ * IRQ is for for a child device of mc_bus_dev
++ */
++ error = dprc_set_obj_irq(mc_bus_dev->mc_io,
++ MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI,
++ mc_bus_dev->mc_handle,
++ owner_mc_dev->obj_desc.type,
++ owner_mc_dev->obj_desc.id,
++ mc_dev_irq->dev_irq_index,
++ &irq_cfg);
++ if (error < 0) {
++ dev_err(&owner_mc_dev->dev,
++ "dprc_obj_set_irq() failed: %d\n", error);
++ }
+ }
+}
+
-+static inline void qb_attr_code_rotate_ls(struct qb_attr_code *code,
-+ unsigned int bits)
++/*
++ * NOTE: This function is invoked with interrupts disabled
++ */
++static void fsl_mc_msi_write_msg(struct irq_data *irq_data,
++ struct msi_msg *msg)
+{
-+ /* Don't be fooled, this trick should work because the types are
-+ * unsigned. So the case that interests the while loop (the rotate has
-+ * gone too far and the word count needs to compensate for it), is
-+ * manifested when lsoffset is negative. But that equates to a really
-+ * large unsigned value, starting with lots of "F"s. As such, we can
-+ * continue adding 32 back to it until it wraps back round above zero,
-+ * to a value of 31 or less...
++ struct msi_desc *msi_desc = irq_data_get_msi_desc(irq_data);
++ struct fsl_mc_device *mc_bus_dev = to_fsl_mc_device(msi_desc->dev);
++ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
++ struct fsl_mc_device_irq *mc_dev_irq =
++ &mc_bus->irq_resources[msi_desc->fsl_mc.msi_index];
++
++ msi_desc->msg = *msg;
++
++ /*
++ * Program the MSI (paddr, value) pair in the device:
+ */
-+ code->lsoffset -= bits;
-+ while (code->lsoffset > 31) {
-+ code->word--;
-+ code->lsoffset += 32;
-+ }
++ __fsl_mc_msi_write_msg(mc_bus_dev, mc_dev_irq);
+}
+
-+/* Implement a loop of code rotations until 'expr' evaluates to FALSE (0). */
-+#define qb_attr_code_for_ms(code, bits, expr) \
-+ for (; expr; qb_attr_code_rotate_ms(code, bits))
-+#define qb_attr_code_for_ls(code, bits, expr) \
-+ for (; expr; qb_attr_code_rotate_ls(code, bits))
-+
-+static inline void word_copy(void *d, const void *s, unsigned int cnt)
++static void fsl_mc_msi_update_chip_ops(struct msi_domain_info *info)
+{
-+ u32 *dd = d;
-+ const u32 *ss = s;
++ struct irq_chip *chip = info->chip;
++
++ if (!chip)
++ return;
+
-+ while (cnt--)
-+ *(dd++) = *(ss++);
++ /*
++ * irq_write_msi_msg should not be set by the caller
++ */
++ if (!chip->irq_write_msi_msg)
++ chip->irq_write_msi_msg = fsl_mc_msi_write_msg;
+}
+
-+/*
-+ * Currently, the CENA support code expects each 32-bit word to be written in
-+ * host order, and these are converted to hardware (little-endian) order on
-+ * command submission. However, 64-bit quantities are must be written (and read)
-+ * as two 32-bit words with the least-significant word first, irrespective of
-+ * host endianness.
++/**
++ * fsl_mc_msi_create_irq_domain - Create a fsl-mc MSI interrupt domain
++ * @np: Optional device-tree node of the interrupt controller
++ * @info: MSI domain info
++ * @parent: Parent irq domain
++ *
++ * Updates the domain and chip ops and creates a fsl-mc MSI
++ * interrupt domain.
++ *
++ * Returns:
++ * A domain pointer or NULL in case of failure.
+ */
-+static inline void u64_to_le32_copy(void *d, const u64 *s,
-+ unsigned int cnt)
++struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode,
++ struct msi_domain_info *info,
++ struct irq_domain *parent)
+{
-+ u32 *dd = d;
-+ const u32 *ss = (const u32 *)s;
++ struct irq_domain *domain;
+
-+ while (cnt--) {
-+ /*
-+ * TBD: the toolchain was choking on the use of 64-bit types up
-+ * until recently so this works entirely with 32-bit variables.
-+ * When 64-bit types become usable again, investigate better
-+ * ways of doing this.
-+ */
-+#if defined(__BIG_ENDIAN)
-+ *(dd++) = ss[1];
-+ *(dd++) = ss[0];
-+ ss += 2;
-+#else
-+ *(dd++) = *(ss++);
-+ *(dd++) = *(ss++);
-+#endif
-+ }
-+}
++ if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
++ fsl_mc_msi_update_dom_ops(info);
++ if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
++ fsl_mc_msi_update_chip_ops(info);
+
-+static inline void u64_from_le32_copy(u64 *d, const void *s,
-+ unsigned int cnt)
-+{
-+ const u32 *ss = s;
-+ u32 *dd = (u32 *)d;
++ domain = msi_create_irq_domain(fwnode, info, parent);
++ if (domain)
++ irq_domain_update_bus_token(domain, DOMAIN_BUS_FSL_MC_MSI);
+
-+ while (cnt--) {
-+#if defined(__BIG_ENDIAN)
-+ dd[1] = *(ss++);
-+ dd[0] = *(ss++);
-+ dd += 2;
-+#else
-+ *(dd++) = *(ss++);
-+ *(dd++) = *(ss++);
-+#endif
-+ }
++ return domain;
+}
+
-+/* decode a field from a cacheline */
-+static inline u32 qb_attr_code_decode(const struct qb_attr_code *code,
-+ const u32 *cacheline)
++int fsl_mc_find_msi_domain(struct device *mc_platform_dev,
++ struct irq_domain **mc_msi_domain)
+{
-+ return d32_u32(code->lsoffset, code->width, cacheline[code->word]);
-+}
++ struct irq_domain *msi_domain;
++ struct device_node *mc_of_node = mc_platform_dev->of_node;
+
-+static inline u64 qb_attr_code_decode_64(const struct qb_attr_code *code,
-+ const u64 *cacheline)
-+{
-+ u64 res;
++ msi_domain = of_msi_get_domain(mc_platform_dev, mc_of_node,
++ DOMAIN_BUS_FSL_MC_MSI);
++ if (!msi_domain) {
++ pr_err("Unable to find fsl-mc MSI domain for %pOF\n",
++ mc_of_node);
+
-+ u64_from_le32_copy(&res, &cacheline[code->word / 2], 1);
-+ return res;
-+}
++ return -ENOENT;
++ }
+
-+/* encode a field to a cacheline */
-+static inline void qb_attr_code_encode(const struct qb_attr_code *code,
-+ u32 *cacheline, u32 val)
-+{
-+ cacheline[code->word] =
-+ r32_u32(code->lsoffset, code->width, cacheline[code->word])
-+ | e32_u32(code->lsoffset, code->width, val);
++ *mc_msi_domain = msi_domain;
++ return 0;
+}
+
-+static inline void qb_attr_code_encode_64(const struct qb_attr_code *code,
-+ u64 *cacheline, u64 val)
++static void fsl_mc_msi_free_descs(struct device *dev)
+{
-+ u64_to_le32_copy(&cacheline[code->word / 2], &val, 1);
-+}
++ struct msi_desc *desc, *tmp;
+
-+/* Small-width signed values (two's-complement) will decode into medium-width
-+ * positives. (Eg. for an 8-bit signed field, which stores values from -128 to
-+ * +127, a setting of -7 would appear to decode to the 32-bit unsigned value
-+ * 249. Likewise -120 would decode as 136.) This function allows the caller to
-+ * "re-sign" such fields to 32-bit signed. (Eg. -7, which was 249 with an 8-bit
-+ * encoding, will become 0xfffffff9 if you cast the return value to u32).
-+ */
-+static inline int32_t qb_attr_code_makesigned(const struct qb_attr_code *code,
-+ u32 val)
-+{
-+ WARN_ON(val >= (1 << code->width));
-+ /* If the high bit was set, it was encoding a negative */
-+ if (val >= (1 << (code->width - 1)))
-+ return (int32_t)0 - (int32_t)(((u32)1 << code->width) -
-+ val);
-+ /* Otherwise, it was encoding a positive */
-+ return (int32_t)val;
++ list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) {
++ list_del(&desc->list);
++ free_msi_entry(desc);
++ }
+}
+
-+/* ---------------------- */
-+/* Descriptors/cachelines */
-+/* ---------------------- */
-+
-+/* To avoid needless dynamic allocation, the driver API often gives the caller
-+ * a "descriptor" type that the caller can instantiate however they like.
-+ * Ultimately though, it is just a cacheline of binary storage (or something
-+ * smaller when it is known that the descriptor doesn't need all 64 bytes) for
-+ * holding pre-formatted pieces of hardware commands. The performance-critical
-+ * code can then copy these descriptors directly into hardware command
-+ * registers more efficiently than trying to construct/format commands
-+ * on-the-fly. The API user sees the descriptor as an array of 32-bit words in
-+ * order for the compiler to know its size, but the internal details are not
-+ * exposed. The following macro is used within the driver for converting *any*
-+ * descriptor pointer to a usable array pointer. The use of a macro (instead of
-+ * an inline) is necessary to work with different descriptor types and to work
-+ * correctly with const and non-const inputs (and similarly-qualified outputs).
-+ */
-+#define qb_cl(d) (&(d)->dont_manipulate_directly[0])
++static int fsl_mc_msi_alloc_descs(struct device *dev, unsigned int irq_count)
+
-+#endif /* __FSL_QBMAN_PORTAL_H */
---- /dev/null
-+++ b/drivers/staging/fsl-mc/bus/dpio/qbman_debug.c
-@@ -0,0 +1,853 @@
-+/* Copyright (C) 2015 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
++{
++ unsigned int i;
++ int error;
++ struct msi_desc *msi_desc;
+
-+#include <linux/errno.h>
++ for (i = 0; i < irq_count; i++) {
++ msi_desc = alloc_msi_entry(dev, 1, NULL);
++ if (!msi_desc) {
++ dev_err(dev, "Failed to allocate msi entry\n");
++ error = -ENOMEM;
++ goto cleanup_msi_descs;
++ }
+
-+#include "../../include/dpaa2-global.h"
-+#include "qbman-portal.h"
-+#include "qbman_debug.h"
-+
-+/* QBMan portal management command code */
-+#define QBMAN_BP_QUERY 0x32
-+#define QBMAN_FQ_QUERY 0x44
-+#define QBMAN_FQ_QUERY_NP 0x45
-+#define QBMAN_CGR_QUERY 0x51
-+#define QBMAN_WRED_QUERY 0x54
-+#define QBMAN_CGR_STAT_QUERY 0x55
-+#define QBMAN_CGR_STAT_QUERY_CLR 0x56
-+
-+enum qbman_attr_usage_e {
-+ qbman_attr_usage_fq,
-+ qbman_attr_usage_bpool,
-+ qbman_attr_usage_cgr,
-+};
++ msi_desc->fsl_mc.msi_index = i;
++ INIT_LIST_HEAD(&msi_desc->list);
++ list_add_tail(&msi_desc->list, dev_to_msi_list(dev));
++ }
+
-+struct int_qbman_attr {
-+ u32 words[32];
-+ enum qbman_attr_usage_e usage;
-+};
++ return 0;
+
-+#define attr_type_set(a, e) \
-+{ \
-+ struct qbman_attr *__attr = a; \
-+ enum qbman_attr_usage_e __usage = e; \
-+ ((struct int_qbman_attr *)__attr)->usage = __usage; \
-+}
-+
-+#define ATTR32(d) (&(d)->dont_manipulate_directly[0])
-+#define ATTR32_1(d) (&(d)->dont_manipulate_directly[16])
-+
-+static struct qb_attr_code code_bp_bpid = QB_CODE(0, 16, 16);
-+static struct qb_attr_code code_bp_bdi = QB_CODE(1, 16, 1);
-+static struct qb_attr_code code_bp_va = QB_CODE(1, 17, 1);
-+static struct qb_attr_code code_bp_wae = QB_CODE(1, 18, 1);
-+static struct qb_attr_code code_bp_swdet = QB_CODE(4, 0, 16);
-+static struct qb_attr_code code_bp_swdxt = QB_CODE(4, 16, 16);
-+static struct qb_attr_code code_bp_hwdet = QB_CODE(5, 0, 16);
-+static struct qb_attr_code code_bp_hwdxt = QB_CODE(5, 16, 16);
-+static struct qb_attr_code code_bp_swset = QB_CODE(6, 0, 16);
-+static struct qb_attr_code code_bp_swsxt = QB_CODE(6, 16, 16);
-+static struct qb_attr_code code_bp_vbpid = QB_CODE(7, 0, 14);
-+static struct qb_attr_code code_bp_icid = QB_CODE(7, 16, 15);
-+static struct qb_attr_code code_bp_pl = QB_CODE(7, 31, 1);
-+static struct qb_attr_code code_bp_bpscn_addr_lo = QB_CODE(8, 0, 32);
-+static struct qb_attr_code code_bp_bpscn_addr_hi = QB_CODE(9, 0, 32);
-+static struct qb_attr_code code_bp_bpscn_ctx_lo = QB_CODE(10, 0, 32);
-+static struct qb_attr_code code_bp_bpscn_ctx_hi = QB_CODE(11, 0, 32);
-+static struct qb_attr_code code_bp_hw_targ = QB_CODE(12, 0, 16);
-+static struct qb_attr_code code_bp_state = QB_CODE(1, 24, 3);
-+static struct qb_attr_code code_bp_fill = QB_CODE(2, 0, 32);
-+static struct qb_attr_code code_bp_hdptr = QB_CODE(3, 0, 32);
-+static struct qb_attr_code code_bp_sdcnt = QB_CODE(13, 0, 8);
-+static struct qb_attr_code code_bp_hdcnt = QB_CODE(13, 1, 8);
-+static struct qb_attr_code code_bp_sscnt = QB_CODE(13, 2, 8);
-+
-+void qbman_bp_attr_clear(struct qbman_attr *a)
-+{
-+ memset(a, 0, sizeof(*a));
-+ attr_type_set(a, qbman_attr_usage_bpool);
++cleanup_msi_descs:
++ fsl_mc_msi_free_descs(dev);
++ return error;
+}
+
-+int qbman_bp_query(struct qbman_swp *s, u32 bpid,
-+ struct qbman_attr *a)
++int fsl_mc_msi_domain_alloc_irqs(struct device *dev,
++ unsigned int irq_count)
+{
-+ u32 *p;
-+ u32 verb, rslt;
-+ u32 *attr = ATTR32(a);
-+
-+ qbman_bp_attr_clear(a);
++ struct irq_domain *msi_domain;
++ int error;
+
-+ /* Start the management command */
-+ p = qbman_swp_mc_start(s);
-+ if (!p)
-+ return -EBUSY;
++ if (!list_empty(dev_to_msi_list(dev)))
++ return -EINVAL;
+
-+ /* Encode the caller-provided attributes */
-+ qb_attr_code_encode(&code_bp_bpid, p, bpid);
++ error = fsl_mc_msi_alloc_descs(dev, irq_count);
++ if (error < 0)
++ return error;
+
-+ /* Complete the management command */
-+ p = qbman_swp_mc_complete(s, p, QBMAN_BP_QUERY);
++ msi_domain = dev_get_msi_domain(dev);
++ if (!msi_domain) {
++ error = -EINVAL;
++ goto cleanup_msi_descs;
++ }
+
-+ /* Decode the outcome */
-+ verb = qb_attr_code_decode(&code_generic_verb, p);
-+ rslt = qb_attr_code_decode(&code_generic_rslt, p);
-+ WARN_ON(verb != QBMAN_BP_QUERY);
++ /*
++ * NOTE: Calling this function will trigger the invocation of the
++ * its_fsl_mc_msi_prepare() callback
++ */
++ error = msi_domain_alloc_irqs(msi_domain, dev, irq_count);
+
-+ /* Determine success or failure */
-+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
-+ pr_err("Query of BPID 0x%x failed, code=0x%02x\n", bpid, rslt);
-+ return -EIO;
++ if (error) {
++ dev_err(dev, "Failed to allocate IRQs\n");
++ goto cleanup_msi_descs;
+ }
+
-+ /* For the query, word[0] of the result contains only the
-+ * verb/rslt fields, so skip word[0].
-+ */
-+ word_copy(&attr[1], &p[1], 15);
+ return 0;
++
++cleanup_msi_descs:
++ fsl_mc_msi_free_descs(dev);
++ return error;
+}
+
-+void qbman_bp_attr_get_bdi(struct qbman_attr *a, int *bdi, int *va, int *wae)
++void fsl_mc_msi_domain_free_irqs(struct device *dev)
+{
-+ u32 *p = ATTR32(a);
++ struct irq_domain *msi_domain;
+
-+ *bdi = !!qb_attr_code_decode(&code_bp_bdi, p);
-+ *va = !!qb_attr_code_decode(&code_bp_va, p);
-+ *wae = !!qb_attr_code_decode(&code_bp_wae, p);
-+}
++ msi_domain = dev_get_msi_domain(dev);
++ if (!msi_domain)
++ return;
+
-+static u32 qbman_bp_thresh_to_value(u32 val)
-+{
-+ return (val & 0xff) << ((val & 0xf00) >> 8);
-+}
++ msi_domain_free_irqs(msi_domain, dev);
+
-+void qbman_bp_attr_get_swdet(struct qbman_attr *a, u32 *swdet)
-+{
-+ u32 *p = ATTR32(a);
++ if (list_empty(dev_to_msi_list(dev)))
++ return;
+
-+ *swdet = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swdet,
-+ p));
++ fsl_mc_msi_free_descs(dev);
+}
+--- /dev/null
++++ b/drivers/bus/fsl-mc/fsl-mc-private.h
+@@ -0,0 +1,223 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * Freescale Management Complex (MC) bus private declarations
++ *
++ * Copyright (C) 2016 Freescale Semiconductor, Inc.
++ *
++ */
++#ifndef _FSL_MC_PRIVATE_H_
++#define _FSL_MC_PRIVATE_H_
+
-+void qbman_bp_attr_get_swdxt(struct qbman_attr *a, u32 *swdxt)
-+{
-+ u32 *p = ATTR32(a);
++#include <linux/fsl/mc.h>
++#include <linux/mutex.h>
++#include <linux/cdev.h>
++#include <linux/ioctl.h>
+
-+ *swdxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swdxt,
-+ p));
-+}
++/*
++ * Data Path Management Complex (DPMNG) General API
++ */
+
-+void qbman_bp_attr_get_hwdet(struct qbman_attr *a, u32 *hwdet)
-+{
-+ u32 *p = ATTR32(a);
++/* DPMNG command versioning */
++#define DPMNG_CMD_BASE_VERSION 1
++#define DPMNG_CMD_ID_OFFSET 4
+
-+ *hwdet = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_hwdet,
-+ p));
-+}
++#define DPMNG_CMD(id) (((id) << DPMNG_CMD_ID_OFFSET) | DPMNG_CMD_BASE_VERSION)
+
-+void qbman_bp_attr_get_hwdxt(struct qbman_attr *a, u32 *hwdxt)
-+{
-+ u32 *p = ATTR32(a);
++/* DPMNG command IDs */
++#define DPMNG_CMDID_GET_VERSION DPMNG_CMD(0x831)
+
-+ *hwdxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_hwdxt,
-+ p));
-+}
++struct dpmng_rsp_get_version {
++ __le32 revision;
++ __le32 version_major;
++ __le32 version_minor;
++};
+
-+void qbman_bp_attr_get_swset(struct qbman_attr *a, u32 *swset)
-+{
-+ u32 *p = ATTR32(a);
++/*
++ * Data Path Management Command Portal (DPMCP) API
++ */
+
-+ *swset = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swset,
-+ p));
-+}
++/* Minimal supported DPMCP Version */
++#define DPMCP_MIN_VER_MAJOR 3
++#define DPMCP_MIN_VER_MINOR 0
+
-+void qbman_bp_attr_get_swsxt(struct qbman_attr *a, u32 *swsxt)
-+{
-+ u32 *p = ATTR32(a);
++/* DPMCP command versioning */
++#define DPMCP_CMD_BASE_VERSION 1
++#define DPMCP_CMD_ID_OFFSET 4
+
-+ *swsxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swsxt,
-+ p));
-+}
++#define DPMCP_CMD(id) (((id) << DPMCP_CMD_ID_OFFSET) | DPMCP_CMD_BASE_VERSION)
+
-+void qbman_bp_attr_get_vbpid(struct qbman_attr *a, u32 *vbpid)
-+{
-+ u32 *p = ATTR32(a);
++/* DPMCP command IDs */
++#define DPMCP_CMDID_CLOSE DPMCP_CMD(0x800)
++#define DPMCP_CMDID_OPEN DPMCP_CMD(0x80b)
++#define DPMCP_CMDID_RESET DPMCP_CMD(0x005)
+
-+ *vbpid = qb_attr_code_decode(&code_bp_vbpid, p);
-+}
++struct dpmcp_cmd_open {
++ __le32 dpmcp_id;
++};
+
-+void qbman_bp_attr_get_icid(struct qbman_attr *a, u32 *icid, int *pl)
-+{
-+ u32 *p = ATTR32(a);
++/*
++ * Initialization and runtime control APIs for DPMCP
++ */
++int dpmcp_open(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int dpmcp_id,
++ u16 *token);
+
-+ *icid = qb_attr_code_decode(&code_bp_icid, p);
-+ *pl = !!qb_attr_code_decode(&code_bp_pl, p);
-+}
++int dpmcp_close(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
+
-+void qbman_bp_attr_get_bpscn_addr(struct qbman_attr *a, u64 *bpscn_addr)
-+{
-+ u32 *p = ATTR32(a);
++int dpmcp_reset(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
+
-+ *bpscn_addr = ((u64)qb_attr_code_decode(&code_bp_bpscn_addr_hi,
-+ p) << 32) |
-+ (u64)qb_attr_code_decode(&code_bp_bpscn_addr_lo,
-+ p);
-+}
++/*
++ * Data Path Buffer Pool (DPBP) API
++ */
+
-+void qbman_bp_attr_get_bpscn_ctx(struct qbman_attr *a, u64 *bpscn_ctx)
-+{
-+ u32 *p = ATTR32(a);
++/* DPBP Version */
++#define DPBP_VER_MAJOR 3
++#define DPBP_VER_MINOR 2
+
-+ *bpscn_ctx = ((u64)qb_attr_code_decode(&code_bp_bpscn_ctx_hi, p)
-+ << 32) |
-+ (u64)qb_attr_code_decode(&code_bp_bpscn_ctx_lo,
-+ p);
-+}
++/* Command versioning */
++#define DPBP_CMD_BASE_VERSION 1
++#define DPBP_CMD_ID_OFFSET 4
+
-+void qbman_bp_attr_get_hw_targ(struct qbman_attr *a, u32 *hw_targ)
-+{
-+ u32 *p = ATTR32(a);
++#define DPBP_CMD(id) (((id) << DPBP_CMD_ID_OFFSET) | DPBP_CMD_BASE_VERSION)
+
-+ *hw_targ = qb_attr_code_decode(&code_bp_hw_targ, p);
-+}
++/* Command IDs */
++#define DPBP_CMDID_CLOSE DPBP_CMD(0x800)
++#define DPBP_CMDID_OPEN DPBP_CMD(0x804)
+
-+int qbman_bp_info_has_free_bufs(struct qbman_attr *a)
-+{
-+ u32 *p = ATTR32(a);
++#define DPBP_CMDID_ENABLE DPBP_CMD(0x002)
++#define DPBP_CMDID_DISABLE DPBP_CMD(0x003)
++#define DPBP_CMDID_GET_ATTR DPBP_CMD(0x004)
++#define DPBP_CMDID_RESET DPBP_CMD(0x005)
+
-+ return !(int)(qb_attr_code_decode(&code_bp_state, p) & 0x1);
-+}
++struct dpbp_cmd_open {
++ __le32 dpbp_id;
++};
+
-+int qbman_bp_info_is_depleted(struct qbman_attr *a)
-+{
-+ u32 *p = ATTR32(a);
++#define DPBP_ENABLE 0x1
+
-+ return (int)(qb_attr_code_decode(&code_bp_state, p) & 0x2);
-+}
++struct dpbp_rsp_get_attributes {
++ /* response word 0 */
++ __le16 pad;
++ __le16 bpid;
++ __le32 id;
++ /* response word 1 */
++ __le16 version_major;
++ __le16 version_minor;
++};
+
-+int qbman_bp_info_is_surplus(struct qbman_attr *a)
-+{
-+ u32 *p = ATTR32(a);
++/*
++ * Data Path Concentrator (DPCON) API
++ */
+
-+ return (int)(qb_attr_code_decode(&code_bp_state, p) & 0x4);
-+}
++/* DPCON Version */
++#define DPCON_VER_MAJOR 3
++#define DPCON_VER_MINOR 2
+
-+u32 qbman_bp_info_num_free_bufs(struct qbman_attr *a)
-+{
-+ u32 *p = ATTR32(a);
++/* Command versioning */
++#define DPCON_CMD_BASE_VERSION 1
++#define DPCON_CMD_ID_OFFSET 4
+
-+ return qb_attr_code_decode(&code_bp_fill, p);
-+}
++#define DPCON_CMD(id) (((id) << DPCON_CMD_ID_OFFSET) | DPCON_CMD_BASE_VERSION)
+
-+u32 qbman_bp_info_hdptr(struct qbman_attr *a)
-+{
-+ u32 *p = ATTR32(a);
++/* Command IDs */
++#define DPCON_CMDID_CLOSE DPCON_CMD(0x800)
++#define DPCON_CMDID_OPEN DPCON_CMD(0x808)
+
-+ return qb_attr_code_decode(&code_bp_hdptr, p);
-+}
++#define DPCON_CMDID_ENABLE DPCON_CMD(0x002)
++#define DPCON_CMDID_DISABLE DPCON_CMD(0x003)
++#define DPCON_CMDID_GET_ATTR DPCON_CMD(0x004)
++#define DPCON_CMDID_RESET DPCON_CMD(0x005)
+
-+u32 qbman_bp_info_sdcnt(struct qbman_attr *a)
-+{
-+ u32 *p = ATTR32(a);
++#define DPCON_CMDID_SET_NOTIFICATION DPCON_CMD(0x100)
+
-+ return qb_attr_code_decode(&code_bp_sdcnt, p);
-+}
++struct dpcon_cmd_open {
++ __le32 dpcon_id;
++};
+
-+u32 qbman_bp_info_hdcnt(struct qbman_attr *a)
-+{
-+ u32 *p = ATTR32(a);
++#define DPCON_ENABLE 1
+
-+ return qb_attr_code_decode(&code_bp_hdcnt, p);
-+}
++struct dpcon_rsp_get_attr {
++ /* response word 0 */
++ __le32 id;
++ __le16 qbman_ch_id;
++ u8 num_priorities;
++ u8 pad;
++};
+
-+u32 qbman_bp_info_sscnt(struct qbman_attr *a)
-+{
-+ u32 *p = ATTR32(a);
++struct dpcon_cmd_set_notification {
++ /* cmd word 0 */
++ __le32 dpio_id;
++ u8 priority;
++ u8 pad[3];
++ /* cmd word 1 */
++ __le64 user_ctx;
++};
+
-+ return qb_attr_code_decode(&code_bp_sscnt, p);
-+}
++int __must_check fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
++ struct fsl_mc_io *mc_io,
++ struct device *parent_dev,
++ const char *driver_override,
++ struct fsl_mc_device **new_mc_dev);
+
-+static struct qb_attr_code code_fq_fqid = QB_CODE(1, 0, 24);
-+static struct qb_attr_code code_fq_cgrid = QB_CODE(2, 16, 16);
-+static struct qb_attr_code code_fq_destwq = QB_CODE(3, 0, 15);
-+static struct qb_attr_code code_fq_fqctrl = QB_CODE(3, 24, 8);
-+static struct qb_attr_code code_fq_icscred = QB_CODE(4, 0, 15);
-+static struct qb_attr_code code_fq_tdthresh = QB_CODE(4, 16, 13);
-+static struct qb_attr_code code_fq_oa_len = QB_CODE(5, 0, 12);
-+static struct qb_attr_code code_fq_oa_ics = QB_CODE(5, 14, 1);
-+static struct qb_attr_code code_fq_oa_cgr = QB_CODE(5, 15, 1);
-+static struct qb_attr_code code_fq_mctl_bdi = QB_CODE(5, 24, 1);
-+static struct qb_attr_code code_fq_mctl_ff = QB_CODE(5, 25, 1);
-+static struct qb_attr_code code_fq_mctl_va = QB_CODE(5, 26, 1);
-+static struct qb_attr_code code_fq_mctl_ps = QB_CODE(5, 27, 1);
-+static struct qb_attr_code code_fq_ctx_lower32 = QB_CODE(6, 0, 32);
-+static struct qb_attr_code code_fq_ctx_upper32 = QB_CODE(7, 0, 32);
-+static struct qb_attr_code code_fq_icid = QB_CODE(8, 0, 15);
-+static struct qb_attr_code code_fq_pl = QB_CODE(8, 15, 1);
-+static struct qb_attr_code code_fq_vfqid = QB_CODE(9, 0, 24);
-+static struct qb_attr_code code_fq_erfqid = QB_CODE(10, 0, 24);
++int __init dprc_driver_init(void);
+
-+void qbman_fq_attr_clear(struct qbman_attr *a)
-+{
-+ memset(a, 0, sizeof(*a));
-+ attr_type_set(a, qbman_attr_usage_fq);
-+}
++void dprc_driver_exit(void);
+
-+/* FQ query function for programmable fields */
-+int qbman_fq_query(struct qbman_swp *s, u32 fqid, struct qbman_attr *desc)
-+{
-+ u32 *p;
-+ u32 verb, rslt;
-+ u32 *d = ATTR32(desc);
++int __init fsl_mc_allocator_driver_init(void);
+
-+ qbman_fq_attr_clear(desc);
++void fsl_mc_allocator_driver_exit(void);
+
-+ p = qbman_swp_mc_start(s);
-+ if (!p)
-+ return -EBUSY;
-+ qb_attr_code_encode(&code_fq_fqid, p, fqid);
-+ p = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY);
++int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
++ enum fsl_mc_pool_type pool_type,
++ struct fsl_mc_resource
++ **new_resource);
+
-+ /* Decode the outcome */
-+ verb = qb_attr_code_decode(&code_generic_verb, p);
-+ rslt = qb_attr_code_decode(&code_generic_rslt, p);
-+ WARN_ON(verb != QBMAN_FQ_QUERY);
++void fsl_mc_resource_free(struct fsl_mc_resource *resource);
+
-+ /* Determine success or failure */
-+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
-+ pr_err("Query of FQID 0x%x failed, code=0x%02x\n",
-+ fqid, rslt);
-+ return -EIO;
-+ }
-+ /*
-+ * For the configure, word[0] of the command contains only the WE-mask.
-+ * For the query, word[0] of the result contains only the verb/rslt
-+ * fields. Skip word[0] in the latter case.
-+ */
-+ word_copy(&d[1], &p[1], 15);
-+ return 0;
-+}
++int fsl_mc_msi_domain_alloc_irqs(struct device *dev,
++ unsigned int irq_count);
+
-+void qbman_fq_attr_get_fqctrl(struct qbman_attr *d, u32 *fqctrl)
-+{
-+ u32 *p = ATTR32(d);
++void fsl_mc_msi_domain_free_irqs(struct device *dev);
+
-+ *fqctrl = qb_attr_code_decode(&code_fq_fqctrl, p);
-+}
++int __must_check fsl_create_mc_io(struct device *dev,
++ phys_addr_t mc_portal_phys_addr,
++ u32 mc_portal_size,
++ struct fsl_mc_device *dpmcp_dev,
++ u32 flags, struct fsl_mc_io **new_mc_io);
+
-+void qbman_fq_attr_get_cgrid(struct qbman_attr *d, u32 *cgrid)
-+{
-+ u32 *p = ATTR32(d);
++void fsl_destroy_mc_io(struct fsl_mc_io *mc_io);
+
-+ *cgrid = qb_attr_code_decode(&code_fq_cgrid, p);
-+}
++bool fsl_mc_is_root_dprc(struct device *dev);
+
-+void qbman_fq_attr_get_destwq(struct qbman_attr *d, u32 *destwq)
-+{
-+ u32 *p = ATTR32(d);
++#ifdef CONFIG_FSL_MC_RESTOOL
+
-+ *destwq = qb_attr_code_decode(&code_fq_destwq, p);
-+}
++int fsl_mc_restool_create_device_file(struct fsl_mc_bus *mc_bus);
+
-+void qbman_fq_attr_get_icscred(struct qbman_attr *d, u32 *icscred)
-+{
-+ u32 *p = ATTR32(d);
++void fsl_mc_restool_remove_device_file(struct fsl_mc_bus *mc_bus);
+
-+ *icscred = qb_attr_code_decode(&code_fq_icscred, p);
-+}
++int fsl_mc_restool_init(void);
+
-+static struct qb_attr_code code_tdthresh_exp = QB_CODE(0, 0, 5);
-+static struct qb_attr_code code_tdthresh_mant = QB_CODE(0, 5, 8);
-+static u32 qbman_thresh_to_value(u32 val)
-+{
-+ u32 m, e;
++#else
+
-+ m = qb_attr_code_decode(&code_tdthresh_mant, &val);
-+ e = qb_attr_code_decode(&code_tdthresh_exp, &val);
-+ return m << e;
++static inline int fsl_mc_restool_create_device_file(struct fsl_mc_bus *mc_bus)
++{
++ return 0;
+}
+
-+void qbman_fq_attr_get_tdthresh(struct qbman_attr *d, u32 *tdthresh)
++static inline void fsl_mc_restool_remove_device_file(struct fsl_mc_bus *mc_bus)
+{
-+ u32 *p = ATTR32(d);
-+
-+ *tdthresh = qbman_thresh_to_value(qb_attr_code_decode(&code_fq_tdthresh,
-+ p));
+}
+
-+void qbman_fq_attr_get_oa(struct qbman_attr *d,
-+ int *oa_ics, int *oa_cgr, int32_t *oa_len)
++static inline int fsl_mc_restool_init(void)
+{
-+ u32 *p = ATTR32(d);
-+
-+ *oa_ics = !!qb_attr_code_decode(&code_fq_oa_ics, p);
-+ *oa_cgr = !!qb_attr_code_decode(&code_fq_oa_cgr, p);
-+ *oa_len = qb_attr_code_makesigned(&code_fq_oa_len,
-+ qb_attr_code_decode(&code_fq_oa_len, p));
++ return 0;
+}
+
-+void qbman_fq_attr_get_mctl(struct qbman_attr *d,
-+ int *bdi, int *ff, int *va, int *ps)
-+{
-+ u32 *p = ATTR32(d);
++#endif
+
-+ *bdi = !!qb_attr_code_decode(&code_fq_mctl_bdi, p);
-+ *ff = !!qb_attr_code_decode(&code_fq_mctl_ff, p);
-+ *va = !!qb_attr_code_decode(&code_fq_mctl_va, p);
-+ *ps = !!qb_attr_code_decode(&code_fq_mctl_ps, p);
-+}
++#endif /* _FSL_MC_PRIVATE_H_ */
+--- /dev/null
++++ b/drivers/bus/fsl-mc/fsl-mc-restool.c
+@@ -0,0 +1,219 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Management Complex (MC) restool support
++ *
++ * Copyright 2018 NXP
++ *
++ */
+
-+void qbman_fq_attr_get_ctx(struct qbman_attr *d, u32 *hi, u32 *lo)
-+{
-+ u32 *p = ATTR32(d);
++#include <linux/slab.h>
++#include <linux/cdev.h>
++#include <linux/fs.h>
++#include <linux/uaccess.h>
+
-+ *hi = qb_attr_code_decode(&code_fq_ctx_upper32, p);
-+ *lo = qb_attr_code_decode(&code_fq_ctx_lower32, p);
-+}
++#include "fsl-mc-private.h"
+
-+void qbman_fq_attr_get_icid(struct qbman_attr *d, u32 *icid, int *pl)
-+{
-+ u32 *p = ATTR32(d);
++#define FSL_MC_BUS_MAX_MINORS 1
+
-+ *icid = qb_attr_code_decode(&code_fq_icid, p);
-+ *pl = !!qb_attr_code_decode(&code_fq_pl, p);
-+}
++static struct class *fsl_mc_bus_class;
++static int fsl_mc_bus_major;
+
-+void qbman_fq_attr_get_vfqid(struct qbman_attr *d, u32 *vfqid)
++static int fsl_mc_restool_send_command(unsigned long arg,
++ struct fsl_mc_io *mc_io)
+{
-+ u32 *p = ATTR32(d);
++ struct fsl_mc_command mc_cmd;
++ int error;
+
-+ *vfqid = qb_attr_code_decode(&code_fq_vfqid, p);
-+}
++ error = copy_from_user(&mc_cmd, (void __user *)arg, sizeof(mc_cmd));
++ if (error)
++ return -EFAULT;
+
-+void qbman_fq_attr_get_erfqid(struct qbman_attr *d, u32 *erfqid)
-+{
-+ u32 *p = ATTR32(d);
++ error = mc_send_command(mc_io, &mc_cmd);
++ if (error)
++ return error;
+
-+ *erfqid = qb_attr_code_decode(&code_fq_erfqid, p);
-+}
++ error = copy_to_user((void __user *)arg, &mc_cmd, sizeof(mc_cmd));
++ if (error)
++ return -EFAULT;
+
-+/* Query FQ Non-Programmalbe Fields */
-+static struct qb_attr_code code_fq_np_state = QB_CODE(0, 16, 3);
-+static struct qb_attr_code code_fq_np_fe = QB_CODE(0, 19, 1);
-+static struct qb_attr_code code_fq_np_x = QB_CODE(0, 20, 1);
-+static struct qb_attr_code code_fq_np_r = QB_CODE(0, 21, 1);
-+static struct qb_attr_code code_fq_np_oe = QB_CODE(0, 22, 1);
-+static struct qb_attr_code code_fq_np_frm_cnt = QB_CODE(6, 0, 24);
-+static struct qb_attr_code code_fq_np_byte_cnt = QB_CODE(7, 0, 32);
++ return 0;
++}
+
-+int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
-+ struct qbman_attr *state)
++int fsl_mc_restool_init(void)
+{
-+ u32 *p;
-+ u32 verb, rslt;
-+ u32 *d = ATTR32(state);
++ dev_t dev;
++ int error;
+
-+ qbman_fq_attr_clear(state);
++ fsl_mc_bus_class = class_create(THIS_MODULE, "fsl_mc_bus");
++ if (IS_ERR(fsl_mc_bus_class)) {
++ error = PTR_ERR(fsl_mc_bus_class);
++ return error;
++ }
+
-+ p = qbman_swp_mc_start(s);
-+ if (!p)
-+ return -EBUSY;
-+ qb_attr_code_encode(&code_fq_fqid, p, fqid);
-+ p = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP);
++ error = alloc_chrdev_region(&dev, 0,
++ FSL_MC_BUS_MAX_MINORS,
++ "fsl_mc_bus");
++ if (error < 0)
++ return error;
+
-+ /* Decode the outcome */
-+ verb = qb_attr_code_decode(&code_generic_verb, p);
-+ rslt = qb_attr_code_decode(&code_generic_rslt, p);
-+ WARN_ON(verb != QBMAN_FQ_QUERY_NP);
++ fsl_mc_bus_major = MAJOR(dev);
+
-+ /* Determine success or failure */
-+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
-+ pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n",
-+ fqid, rslt);
-+ return -EIO;
-+ }
-+ word_copy(&d[0], &p[0], 16);
+ return 0;
+}
+
-+u32 qbman_fq_state_schedstate(const struct qbman_attr *state)
++static int fsl_mc_restool_dev_open(struct inode *inode, struct file *filep)
+{
-+ const u32 *p = ATTR32(state);
++ struct fsl_mc_device *root_mc_device;
++ struct fsl_mc_restool *mc_restool;
++ struct fsl_mc_bus *mc_bus;
++ struct fsl_mc_io *dynamic_mc_io;
++ int error;
+
-+ return qb_attr_code_decode(&code_fq_np_state, p);
-+}
++ mc_restool = container_of(inode->i_cdev, struct fsl_mc_restool, cdev);
++ mc_bus = container_of(mc_restool, struct fsl_mc_bus, restool_misc);
++ root_mc_device = &mc_bus->mc_dev;
+
-+int qbman_fq_state_force_eligible(const struct qbman_attr *state)
-+{
-+ const u32 *p = ATTR32(state);
++ mutex_lock(&mc_restool->mutex);
+
-+ return !!qb_attr_code_decode(&code_fq_np_fe, p);
-+}
++ if (!mc_restool->local_instance_in_use) {
++ filep->private_data = root_mc_device->mc_io;
++ mc_restool->local_instance_in_use = true;
++ } else {
++ dynamic_mc_io = kzalloc(sizeof(*dynamic_mc_io), GFP_KERNEL);
++ if (!dynamic_mc_io) {
++ error = -ENOMEM;
++ goto error_alloc_mc_io;
++ }
+
-+int qbman_fq_state_xoff(const struct qbman_attr *state)
-+{
-+ const u32 *p = ATTR32(state);
++ error = fsl_mc_portal_allocate(root_mc_device, 0,
++ &dynamic_mc_io);
++ if (error) {
++ pr_err("Could not allocate MC portal\n");
++ goto error_portal_allocate;
++ }
+
-+ return !!qb_attr_code_decode(&code_fq_np_x, p);
-+}
++ mc_restool->dynamic_instance_count++;
++ filep->private_data = dynamic_mc_io;
++ }
+
-+int qbman_fq_state_retirement_pending(const struct qbman_attr *state)
-+{
-+ const u32 *p = ATTR32(state);
++ mutex_unlock(&mc_restool->mutex);
+
-+ return !!qb_attr_code_decode(&code_fq_np_r, p);
-+}
++ return 0;
+
-+int qbman_fq_state_overflow_error(const struct qbman_attr *state)
-+{
-+ const u32 *p = ATTR32(state);
++error_portal_allocate:
++ kfree(dynamic_mc_io);
++
++error_alloc_mc_io:
++ mutex_unlock(&mc_restool->mutex);
+
-+ return !!qb_attr_code_decode(&code_fq_np_oe, p);
++ return error;
+}
+
-+u32 qbman_fq_state_frame_count(const struct qbman_attr *state)
++static int fsl_mc_restool_dev_release(struct inode *inode, struct file *filep)
+{
-+ const u32 *p = ATTR32(state);
++ struct fsl_mc_device *root_mc_device;
++ struct fsl_mc_restool *mc_restool;
++ struct fsl_mc_bus *mc_bus;
++ struct fsl_mc_io *mc_io;
+
-+ return qb_attr_code_decode(&code_fq_np_frm_cnt, p);
-+}
++ mc_restool = container_of(inode->i_cdev, struct fsl_mc_restool, cdev);
++ mc_bus = container_of(mc_restool, struct fsl_mc_bus, restool_misc);
++ root_mc_device = &mc_bus->mc_dev;
++ mc_io = filep->private_data;
+
-+u32 qbman_fq_state_byte_count(const struct qbman_attr *state)
-+{
-+ const u32 *p = ATTR32(state);
++ mutex_lock(&mc_restool->mutex);
+
-+ return qb_attr_code_decode(&code_fq_np_byte_cnt, p);
-+}
++ if (WARN_ON(!mc_restool->local_instance_in_use &&
++ mc_restool->dynamic_instance_count == 0)) {
++ mutex_unlock(&mc_restool->mutex);
++ return -EINVAL;
++ }
+
-+/* Query CGR */
-+static struct qb_attr_code code_cgr_cgid = QB_CODE(0, 16, 16);
-+static struct qb_attr_code code_cgr_cscn_wq_en_enter = QB_CODE(2, 0, 1);
-+static struct qb_attr_code code_cgr_cscn_wq_en_exit = QB_CODE(2, 1, 1);
-+static struct qb_attr_code code_cgr_cscn_wq_icd = QB_CODE(2, 2, 1);
-+static struct qb_attr_code code_cgr_mode = QB_CODE(3, 16, 2);
-+static struct qb_attr_code code_cgr_rej_cnt_mode = QB_CODE(3, 18, 1);
-+static struct qb_attr_code code_cgr_cscn_bdi = QB_CODE(3, 19, 1);
-+static struct qb_attr_code code_cgr_cscn_wr_en_enter = QB_CODE(3, 24, 1);
-+static struct qb_attr_code code_cgr_cscn_wr_en_exit = QB_CODE(3, 25, 1);
-+static struct qb_attr_code code_cgr_cg_wr_ae = QB_CODE(3, 26, 1);
-+static struct qb_attr_code code_cgr_cscn_dcp_en = QB_CODE(3, 27, 1);
-+static struct qb_attr_code code_cgr_cg_wr_va = QB_CODE(3, 28, 1);
-+static struct qb_attr_code code_cgr_i_cnt_wr_en = QB_CODE(4, 0, 1);
-+static struct qb_attr_code code_cgr_i_cnt_wr_bnd = QB_CODE(4, 1, 5);
-+static struct qb_attr_code code_cgr_td_en = QB_CODE(4, 8, 1);
-+static struct qb_attr_code code_cgr_cs_thres = QB_CODE(4, 16, 13);
-+static struct qb_attr_code code_cgr_cs_thres_x = QB_CODE(5, 0, 13);
-+static struct qb_attr_code code_cgr_td_thres = QB_CODE(5, 16, 13);
-+static struct qb_attr_code code_cgr_cscn_tdcp = QB_CODE(6, 0, 16);
-+static struct qb_attr_code code_cgr_cscn_wqid = QB_CODE(6, 16, 16);
-+static struct qb_attr_code code_cgr_cscn_vcgid = QB_CODE(7, 0, 16);
-+static struct qb_attr_code code_cgr_cg_icid = QB_CODE(7, 16, 15);
-+static struct qb_attr_code code_cgr_cg_pl = QB_CODE(7, 31, 1);
-+static struct qb_attr_code code_cgr_cg_wr_addr_lo = QB_CODE(8, 0, 32);
-+static struct qb_attr_code code_cgr_cg_wr_addr_hi = QB_CODE(9, 0, 32);
-+static struct qb_attr_code code_cgr_cscn_ctx_lo = QB_CODE(10, 0, 32);
-+static struct qb_attr_code code_cgr_cscn_ctx_hi = QB_CODE(11, 0, 32);
++ if (filep->private_data == root_mc_device->mc_io) {
++ mc_restool->local_instance_in_use = false;
++ } else {
++ fsl_mc_portal_free(mc_io);
++ kfree(mc_io);
++ mc_restool->dynamic_instance_count--;
++ }
+
-+void qbman_cgr_attr_clear(struct qbman_attr *a)
-+{
-+ memset(a, 0, sizeof(*a));
-+ attr_type_set(a, qbman_attr_usage_cgr);
++ filep->private_data = NULL;
++ mutex_unlock(&mc_restool->mutex);
++
++ return 0;
+}
+
-+int qbman_cgr_query(struct qbman_swp *s, u32 cgid, struct qbman_attr *attr)
++static long fsl_mc_restool_dev_ioctl(struct file *file,
++ unsigned int cmd,
++ unsigned long arg)
+{
-+ u32 *p;
-+ u32 verb, rslt;
-+ u32 *d[2];
-+ int i;
-+ u32 query_verb;
++ int error;
++
++ switch (cmd) {
++ case RESTOOL_SEND_MC_COMMAND:
++ error = fsl_mc_restool_send_command(arg, file->private_data);
++ break;
++ default:
++ pr_err("%s: unexpected ioctl call number\n", __func__);
++ error = -EINVAL;
++ }
+
-+ d[0] = ATTR32(attr);
-+ d[1] = ATTR32_1(attr);
++ return error;
++}
+
-+ qbman_cgr_attr_clear(attr);
++static const struct file_operations fsl_mc_restool_dev_fops = {
++ .owner = THIS_MODULE,
++ .open = fsl_mc_restool_dev_open,
++ .release = fsl_mc_restool_dev_release,
++ .unlocked_ioctl = fsl_mc_restool_dev_ioctl,
++};
+
-+ for (i = 0; i < 2; i++) {
-+ p = qbman_swp_mc_start(s);
-+ if (!p)
-+ return -EBUSY;
-+ query_verb = i ? QBMAN_WRED_QUERY : QBMAN_CGR_QUERY;
++int fsl_mc_restool_create_device_file(struct fsl_mc_bus *mc_bus)
++{
++ struct fsl_mc_device *mc_dev = &mc_bus->mc_dev;
++ struct fsl_mc_restool *mc_restool = &mc_bus->restool_misc;
++ int error;
+
-+ qb_attr_code_encode(&code_cgr_cgid, p, cgid);
-+ p = qbman_swp_mc_complete(s, p, p[0] | query_verb);
++ mc_restool = &mc_bus->restool_misc;
++ mc_restool->dev = MKDEV(fsl_mc_bus_major, 0);
++ cdev_init(&mc_restool->cdev, &fsl_mc_restool_dev_fops);
+
-+ /* Decode the outcome */
-+ verb = qb_attr_code_decode(&code_generic_verb, p);
-+ rslt = qb_attr_code_decode(&code_generic_rslt, p);
-+ WARN_ON(verb != query_verb);
++ error = cdev_add(&mc_restool->cdev,
++ mc_restool->dev,
++ FSL_MC_BUS_MAX_MINORS);
++ if (error)
++ return error;
+
-+ /* Determine success or failure */
-+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
-+ pr_err("Query CGID 0x%x failed,", cgid);
-+ pr_err(" verb=0x%02x, code=0x%02x\n", verb, rslt);
-+ return -EIO;
-+ }
-+ /* For the configure, word[0] of the command contains only the
-+ * verb/cgid. For the query, word[0] of the result contains
-+ * only the verb/rslt fields. Skip word[0] in the latter case.
-+ */
-+ word_copy(&d[i][1], &p[1], 15);
++ mc_restool->device = device_create(fsl_mc_bus_class,
++ NULL,
++ mc_restool->dev,
++ NULL,
++ "%s",
++ dev_name(&mc_dev->dev));
++ if (IS_ERR(mc_restool->device)) {
++ error = PTR_ERR(mc_restool->device);
++ goto error_device_create;
+ }
-+ return 0;
-+}
+
-+void qbman_cgr_attr_get_ctl1(struct qbman_attr *d, int *cscn_wq_en_enter,
-+ int *cscn_wq_en_exit, int *cscn_wq_icd)
-+ {
-+ u32 *p = ATTR32(d);
-+ *cscn_wq_en_enter = !!qb_attr_code_decode(&code_cgr_cscn_wq_en_enter,
-+ p);
-+ *cscn_wq_en_exit = !!qb_attr_code_decode(&code_cgr_cscn_wq_en_exit, p);
-+ *cscn_wq_icd = !!qb_attr_code_decode(&code_cgr_cscn_wq_icd, p);
-+}
++ mutex_init(&mc_restool->mutex);
+
-+void qbman_cgr_attr_get_mode(struct qbman_attr *d, u32 *mode,
-+ int *rej_cnt_mode, int *cscn_bdi)
-+{
-+ u32 *p = ATTR32(d);
-+ *mode = qb_attr_code_decode(&code_cgr_mode, p);
-+ *rej_cnt_mode = !!qb_attr_code_decode(&code_cgr_rej_cnt_mode, p);
-+ *cscn_bdi = !!qb_attr_code_decode(&code_cgr_cscn_bdi, p);
-+}
++ return 0;
+
-+void qbman_cgr_attr_get_ctl2(struct qbman_attr *d, int *cscn_wr_en_enter,
-+ int *cscn_wr_en_exit, int *cg_wr_ae,
-+ int *cscn_dcp_en, int *cg_wr_va)
-+{
-+ u32 *p = ATTR32(d);
-+ *cscn_wr_en_enter = !!qb_attr_code_decode(&code_cgr_cscn_wr_en_enter,
-+ p);
-+ *cscn_wr_en_exit = !!qb_attr_code_decode(&code_cgr_cscn_wr_en_exit, p);
-+ *cg_wr_ae = !!qb_attr_code_decode(&code_cgr_cg_wr_ae, p);
-+ *cscn_dcp_en = !!qb_attr_code_decode(&code_cgr_cscn_dcp_en, p);
-+ *cg_wr_va = !!qb_attr_code_decode(&code_cgr_cg_wr_va, p);
-+}
++error_device_create:
++ cdev_del(&mc_restool->cdev);
+
-+void qbman_cgr_attr_get_iwc(struct qbman_attr *d, int *i_cnt_wr_en,
-+ u32 *i_cnt_wr_bnd)
-+{
-+ u32 *p = ATTR32(d);
-+ *i_cnt_wr_en = !!qb_attr_code_decode(&code_cgr_i_cnt_wr_en, p);
-+ *i_cnt_wr_bnd = qb_attr_code_decode(&code_cgr_i_cnt_wr_bnd, p);
++ return error;
+}
+
-+void qbman_cgr_attr_get_tdc(struct qbman_attr *d, int *td_en)
++void fsl_mc_restool_remove_device_file(struct fsl_mc_bus *mc_bus)
+{
-+ u32 *p = ATTR32(d);
-+ *td_en = !!qb_attr_code_decode(&code_cgr_td_en, p);
-+}
++ struct fsl_mc_restool *mc_restool = &mc_bus->restool_misc;
+
-+void qbman_cgr_attr_get_cs_thres(struct qbman_attr *d, u32 *cs_thres)
-+{
-+ u32 *p = ATTR32(d);
-+ *cs_thres = qbman_thresh_to_value(qb_attr_code_decode(
-+ &code_cgr_cs_thres, p));
-+}
++ if (WARN_ON(mc_restool->local_instance_in_use))
++ return;
+
-+void qbman_cgr_attr_get_cs_thres_x(struct qbman_attr *d,
-+ u32 *cs_thres_x)
-+{
-+ u32 *p = ATTR32(d);
-+ *cs_thres_x = qbman_thresh_to_value(qb_attr_code_decode(
-+ &code_cgr_cs_thres_x, p));
-+}
++ if (WARN_ON(mc_restool->dynamic_instance_count != 0))
++ return;
+
-+void qbman_cgr_attr_get_td_thres(struct qbman_attr *d, u32 *td_thres)
-+{
-+ u32 *p = ATTR32(d);
-+ *td_thres = qbman_thresh_to_value(qb_attr_code_decode(
-+ &code_cgr_td_thres, p));
++ cdev_del(&mc_restool->cdev);
+}
+--- a/drivers/staging/fsl-mc/bus/mc-io.c
++++ /dev/null
+@@ -1,320 +0,0 @@
+-/* Copyright 2013-2016 Freescale Semiconductor Inc.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions are met:
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of the above-listed copyright holders nor the
+- * names of any contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- *
+- * ALTERNATIVELY, this software may be distributed under the terms of the
+- * GNU General Public License ("GPL") as published by the Free Software
+- * Foundation, either version 2 of that License or (at your option) any
+- * later version.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+- * POSSIBILITY OF SUCH DAMAGE.
+- */
+-
+-#include <linux/io.h>
+-#include "../include/mc-bus.h"
+-#include "../include/mc-sys.h"
+-
+-#include "fsl-mc-private.h"
+-#include "dpmcp.h"
+-#include "dpmcp-cmd.h"
+-
+-static int fsl_mc_io_set_dpmcp(struct fsl_mc_io *mc_io,
+- struct fsl_mc_device *dpmcp_dev)
+-{
+- int error;
+-
+- if (WARN_ON(!dpmcp_dev))
+- return -EINVAL;
+-
+- if (WARN_ON(mc_io->dpmcp_dev))
+- return -EINVAL;
+-
+- if (WARN_ON(dpmcp_dev->mc_io))
+- return -EINVAL;
+-
+- error = dpmcp_open(mc_io,
+- 0,
+- dpmcp_dev->obj_desc.id,
+- &dpmcp_dev->mc_handle);
+- if (error < 0)
+- return error;
+-
+- mc_io->dpmcp_dev = dpmcp_dev;
+- dpmcp_dev->mc_io = mc_io;
+- return 0;
+-}
+-
+-static void fsl_mc_io_unset_dpmcp(struct fsl_mc_io *mc_io)
+-{
+- int error;
+- struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
+-
+- if (WARN_ON(!dpmcp_dev))
+- return;
+-
+- if (WARN_ON(dpmcp_dev->mc_io != mc_io))
+- return;
+-
+- error = dpmcp_close(mc_io,
+- 0,
+- dpmcp_dev->mc_handle);
+- if (error < 0) {
+- dev_err(&dpmcp_dev->dev, "dpmcp_close() failed: %d\n",
+- error);
+- }
+-
+- mc_io->dpmcp_dev = NULL;
+- dpmcp_dev->mc_io = NULL;
+-}
+-
+-/**
+- * Creates an MC I/O object
+- *
+- * @dev: device to be associated with the MC I/O object
+- * @mc_portal_phys_addr: physical address of the MC portal to use
+- * @mc_portal_size: size in bytes of the MC portal
+- * @dpmcp-dev: Pointer to the DPMCP object associated with this MC I/O
+- * object or NULL if none.
+- * @flags: flags for the new MC I/O object
+- * @new_mc_io: Area to return pointer to newly created MC I/O object
+- *
+- * Returns '0' on Success; Error code otherwise.
+- */
+-int __must_check fsl_create_mc_io(struct device *dev,
+- phys_addr_t mc_portal_phys_addr,
+- u32 mc_portal_size,
+- struct fsl_mc_device *dpmcp_dev,
+- u32 flags, struct fsl_mc_io **new_mc_io)
+-{
+- int error;
+- struct fsl_mc_io *mc_io;
+- void __iomem *mc_portal_virt_addr;
+- struct resource *res;
+-
+- mc_io = devm_kzalloc(dev, sizeof(*mc_io), GFP_KERNEL);
+- if (!mc_io)
+- return -ENOMEM;
+-
+- mc_io->dev = dev;
+- mc_io->flags = flags;
+- mc_io->portal_phys_addr = mc_portal_phys_addr;
+- mc_io->portal_size = mc_portal_size;
+- if (flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
+- spin_lock_init(&mc_io->spinlock);
+- else
+- mutex_init(&mc_io->mutex);
+-
+- res = devm_request_mem_region(dev,
+- mc_portal_phys_addr,
+- mc_portal_size,
+- "mc_portal");
+- if (!res) {
+- dev_err(dev,
+- "devm_request_mem_region failed for MC portal %#llx\n",
+- mc_portal_phys_addr);
+- return -EBUSY;
+- }
+-
+- mc_portal_virt_addr = devm_ioremap_nocache(dev,
+- mc_portal_phys_addr,
+- mc_portal_size);
+- if (!mc_portal_virt_addr) {
+- dev_err(dev,
+- "devm_ioremap_nocache failed for MC portal %#llx\n",
+- mc_portal_phys_addr);
+- return -ENXIO;
+- }
+-
+- mc_io->portal_virt_addr = mc_portal_virt_addr;
+- if (dpmcp_dev) {
+- error = fsl_mc_io_set_dpmcp(mc_io, dpmcp_dev);
+- if (error < 0)
+- goto error_destroy_mc_io;
+- }
+-
+- *new_mc_io = mc_io;
+- return 0;
+-
+-error_destroy_mc_io:
+- fsl_destroy_mc_io(mc_io);
+- return error;
+-}
+-
+-/**
+- * Destroys an MC I/O object
+- *
+- * @mc_io: MC I/O object to destroy
+- */
+-void fsl_destroy_mc_io(struct fsl_mc_io *mc_io)
+-{
+- struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
+-
+- if (dpmcp_dev)
+- fsl_mc_io_unset_dpmcp(mc_io);
+-
+- devm_iounmap(mc_io->dev, mc_io->portal_virt_addr);
+- devm_release_mem_region(mc_io->dev,
+- mc_io->portal_phys_addr,
+- mc_io->portal_size);
+-
+- mc_io->portal_virt_addr = NULL;
+- devm_kfree(mc_io->dev, mc_io);
+-}
+-
+-/**
+- * fsl_mc_portal_allocate - Allocates an MC portal
+- *
+- * @mc_dev: MC device for which the MC portal is to be allocated
+- * @mc_io_flags: Flags for the fsl_mc_io object that wraps the allocated
+- * MC portal.
+- * @new_mc_io: Pointer to area where the pointer to the fsl_mc_io object
+- * that wraps the allocated MC portal is to be returned
+- *
+- * This function allocates an MC portal from the device's parent DPRC,
+- * from the corresponding MC bus' pool of MC portals and wraps
+- * it in a new fsl_mc_io object. If 'mc_dev' is a DPRC itself, the
+- * portal is allocated from its own MC bus.
+- */
+-int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
+- u16 mc_io_flags,
+- struct fsl_mc_io **new_mc_io)
+-{
+- struct fsl_mc_device *mc_bus_dev;
+- struct fsl_mc_bus *mc_bus;
+- phys_addr_t mc_portal_phys_addr;
+- size_t mc_portal_size;
+- struct fsl_mc_device *dpmcp_dev;
+- int error = -EINVAL;
+- struct fsl_mc_resource *resource = NULL;
+- struct fsl_mc_io *mc_io = NULL;
+-
+- if (mc_dev->flags & FSL_MC_IS_DPRC) {
+- mc_bus_dev = mc_dev;
+- } else {
+- if (WARN_ON(!dev_is_fsl_mc(mc_dev->dev.parent)))
+- return error;
+-
+- mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
+- }
+-
+- mc_bus = to_fsl_mc_bus(mc_bus_dev);
+- *new_mc_io = NULL;
+- error = fsl_mc_resource_allocate(mc_bus, FSL_MC_POOL_DPMCP, &resource);
+- if (error < 0)
+- return error;
+-
+- error = -EINVAL;
+- dpmcp_dev = resource->data;
+- if (WARN_ON(!dpmcp_dev))
+- goto error_cleanup_resource;
+-
+- if (dpmcp_dev->obj_desc.ver_major < DPMCP_MIN_VER_MAJOR ||
+- (dpmcp_dev->obj_desc.ver_major == DPMCP_MIN_VER_MAJOR &&
+- dpmcp_dev->obj_desc.ver_minor < DPMCP_MIN_VER_MINOR)) {
+- dev_err(&dpmcp_dev->dev,
+- "ERROR: Version %d.%d of DPMCP not supported.\n",
+- dpmcp_dev->obj_desc.ver_major,
+- dpmcp_dev->obj_desc.ver_minor);
+- error = -ENOTSUPP;
+- goto error_cleanup_resource;
+- }
+-
+- if (WARN_ON(dpmcp_dev->obj_desc.region_count == 0))
+- goto error_cleanup_resource;
+-
+- mc_portal_phys_addr = dpmcp_dev->regions[0].start;
+- mc_portal_size = dpmcp_dev->regions[0].end -
+- dpmcp_dev->regions[0].start + 1;
+-
+- if (WARN_ON(mc_portal_size != mc_bus_dev->mc_io->portal_size))
+- goto error_cleanup_resource;
+-
+- error = fsl_create_mc_io(&mc_bus_dev->dev,
+- mc_portal_phys_addr,
+- mc_portal_size, dpmcp_dev,
+- mc_io_flags, &mc_io);
+- if (error < 0)
+- goto error_cleanup_resource;
+-
+- *new_mc_io = mc_io;
+- return 0;
+-
+-error_cleanup_resource:
+- fsl_mc_resource_free(resource);
+- return error;
+-}
+-EXPORT_SYMBOL_GPL(fsl_mc_portal_allocate);
+-
+-/**
+- * fsl_mc_portal_free - Returns an MC portal to the pool of free MC portals
+- * of a given MC bus
+- *
+- * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free
+- */
+-void fsl_mc_portal_free(struct fsl_mc_io *mc_io)
+-{
+- struct fsl_mc_device *dpmcp_dev;
+- struct fsl_mc_resource *resource;
+-
+- /*
+- * Every mc_io obtained by calling fsl_mc_portal_allocate() is supposed
+- * to have a DPMCP object associated with.
+- */
+- dpmcp_dev = mc_io->dpmcp_dev;
+- if (WARN_ON(!dpmcp_dev))
+- return;
+-
+- resource = dpmcp_dev->resource;
+- if (WARN_ON(!resource || resource->type != FSL_MC_POOL_DPMCP))
+- return;
+-
+- if (WARN_ON(resource->data != dpmcp_dev))
+- return;
+-
+- fsl_destroy_mc_io(mc_io);
+- fsl_mc_resource_free(resource);
+-}
+-EXPORT_SYMBOL_GPL(fsl_mc_portal_free);
+-
+-/**
+- * fsl_mc_portal_reset - Resets the dpmcp object for a given fsl_mc_io object
+- *
+- * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free
+- */
+-int fsl_mc_portal_reset(struct fsl_mc_io *mc_io)
+-{
+- int error;
+- struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
+-
+- if (WARN_ON(!dpmcp_dev))
+- return -EINVAL;
+-
+- error = dpmcp_reset(mc_io, 0, dpmcp_dev->mc_handle);
+- if (error < 0) {
+- dev_err(&dpmcp_dev->dev, "dpmcp_reset() failed: %d\n", error);
+- return error;
+- }
+-
+- return 0;
+-}
+-EXPORT_SYMBOL_GPL(fsl_mc_portal_reset);
+--- /dev/null
++++ b/drivers/bus/fsl-mc/mc-io.c
+@@ -0,0 +1,268 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
++ *
++ */
+
-+void qbman_cgr_attr_get_cscn_tdcp(struct qbman_attr *d, u32 *cscn_tdcp)
-+{
-+ u32 *p = ATTR32(d);
-+ *cscn_tdcp = qb_attr_code_decode(&code_cgr_cscn_tdcp, p);
-+}
++#include <linux/io.h>
++#include <linux/fsl/mc.h>
+
-+void qbman_cgr_attr_get_cscn_wqid(struct qbman_attr *d, u32 *cscn_wqid)
-+{
-+ u32 *p = ATTR32(d);
-+ *cscn_wqid = qb_attr_code_decode(&code_cgr_cscn_wqid, p);
-+}
++#include "fsl-mc-private.h"
+
-+void qbman_cgr_attr_get_cscn_vcgid(struct qbman_attr *d,
-+ u32 *cscn_vcgid)
++static int fsl_mc_io_set_dpmcp(struct fsl_mc_io *mc_io,
++ struct fsl_mc_device *dpmcp_dev)
+{
-+ u32 *p = ATTR32(d);
-+ *cscn_vcgid = qb_attr_code_decode(&code_cgr_cscn_vcgid, p);
-+}
++ int error;
+
-+void qbman_cgr_attr_get_cg_icid(struct qbman_attr *d, u32 *icid,
-+ int *pl)
-+{
-+ u32 *p = ATTR32(d);
-+ *icid = qb_attr_code_decode(&code_cgr_cg_icid, p);
-+ *pl = !!qb_attr_code_decode(&code_cgr_cg_pl, p);
-+}
++ if (mc_io->dpmcp_dev)
++ return -EINVAL;
+
-+void qbman_cgr_attr_get_cg_wr_addr(struct qbman_attr *d,
-+ u64 *cg_wr_addr)
-+{
-+ u32 *p = ATTR32(d);
-+ *cg_wr_addr = ((u64)qb_attr_code_decode(&code_cgr_cg_wr_addr_hi,
-+ p) << 32) |
-+ (u64)qb_attr_code_decode(&code_cgr_cg_wr_addr_lo,
-+ p);
-+}
++ if (dpmcp_dev->mc_io)
++ return -EINVAL;
+
-+void qbman_cgr_attr_get_cscn_ctx(struct qbman_attr *d, u64 *cscn_ctx)
-+{
-+ u32 *p = ATTR32(d);
-+ *cscn_ctx = ((u64)qb_attr_code_decode(&code_cgr_cscn_ctx_hi, p)
-+ << 32) |
-+ (u64)qb_attr_code_decode(&code_cgr_cscn_ctx_lo, p);
-+}
++ error = dpmcp_open(mc_io,
++ 0,
++ dpmcp_dev->obj_desc.id,
++ &dpmcp_dev->mc_handle);
++ if (error < 0)
++ return error;
+
-+#define WRED_EDP_WORD(n) (18 + (n) / 4)
-+#define WRED_EDP_OFFSET(n) (8 * ((n) % 4))
-+#define WRED_PARM_DP_WORD(n) ((n) + 20)
-+#define WRED_WE_EDP(n) (16 + (n) * 2)
-+#define WRED_WE_PARM_DP(n) (17 + (n) * 2)
-+void qbman_cgr_attr_wred_get_edp(struct qbman_attr *d, u32 idx,
-+ int *edp)
-+{
-+ u32 *p = ATTR32(d);
-+ struct qb_attr_code code_wred_edp = QB_CODE(WRED_EDP_WORD(idx),
-+ WRED_EDP_OFFSET(idx), 8);
-+ *edp = (int)qb_attr_code_decode(&code_wred_edp, p);
++ mc_io->dpmcp_dev = dpmcp_dev;
++ dpmcp_dev->mc_io = mc_io;
++ return 0;
+}
+
-+void qbman_cgr_attr_wred_dp_decompose(u32 dp, u64 *minth,
-+ u64 *maxth, u8 *maxp)
++static void fsl_mc_io_unset_dpmcp(struct fsl_mc_io *mc_io)
+{
-+ u8 ma, mn, step_i, step_s, pn;
-+
-+ ma = (u8)(dp >> 24);
-+ mn = (u8)(dp >> 19) & 0x1f;
-+ step_i = (u8)(dp >> 11);
-+ step_s = (u8)(dp >> 6) & 0x1f;
-+ pn = (u8)dp & 0x3f;
-+
-+ *maxp = ((pn << 2) * 100) / 256;
++ int error;
++ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
+
-+ if (mn == 0)
-+ *maxth = ma;
-+ else
-+ *maxth = ((ma + 256) * (1 << (mn - 1)));
++ error = dpmcp_close(mc_io,
++ 0,
++ dpmcp_dev->mc_handle);
++ if (error < 0) {
++ dev_err(&dpmcp_dev->dev, "dpmcp_close() failed: %d\n",
++ error);
++ }
+
-+ if (step_s == 0)
-+ *minth = *maxth - step_i;
-+ else
-+ *minth = *maxth - (256 + step_i) * (1 << (step_s - 1));
++ mc_io->dpmcp_dev = NULL;
++ dpmcp_dev->mc_io = NULL;
+}
+
-+void qbman_cgr_attr_wred_get_parm_dp(struct qbman_attr *d, u32 idx,
-+ u32 *dp)
++/**
++ * Creates an MC I/O object
++ *
++ * @dev: device to be associated with the MC I/O object
++ * @mc_portal_phys_addr: physical address of the MC portal to use
++ * @mc_portal_size: size in bytes of the MC portal
++ * @dpmcp-dev: Pointer to the DPMCP object associated with this MC I/O
++ * object or NULL if none.
++ * @flags: flags for the new MC I/O object
++ * @new_mc_io: Area to return pointer to newly created MC I/O object
++ *
++ * Returns '0' on Success; Error code otherwise.
++ */
++int __must_check fsl_create_mc_io(struct device *dev,
++ phys_addr_t mc_portal_phys_addr,
++ u32 mc_portal_size,
++ struct fsl_mc_device *dpmcp_dev,
++ u32 flags, struct fsl_mc_io **new_mc_io)
+{
-+ u32 *p = ATTR32(d);
-+ struct qb_attr_code code_wred_parm_dp = QB_CODE(WRED_PARM_DP_WORD(idx),
-+ 0, 8);
-+ *dp = qb_attr_code_decode(&code_wred_parm_dp, p);
-+}
++ int error;
++ struct fsl_mc_io *mc_io;
++ void __iomem *mc_portal_virt_addr;
++ struct resource *res;
+
-+/* Query CGR/CCGR/CQ statistics */
-+static struct qb_attr_code code_cgr_stat_ct = QB_CODE(4, 0, 32);
-+static struct qb_attr_code code_cgr_stat_frame_cnt_lo = QB_CODE(4, 0, 32);
-+static struct qb_attr_code code_cgr_stat_frame_cnt_hi = QB_CODE(5, 0, 8);
-+static struct qb_attr_code code_cgr_stat_byte_cnt_lo = QB_CODE(6, 0, 32);
-+static struct qb_attr_code code_cgr_stat_byte_cnt_hi = QB_CODE(7, 0, 16);
-+static int qbman_cgr_statistics_query(struct qbman_swp *s, u32 cgid,
-+ int clear, u32 command_type,
-+ u64 *frame_cnt, u64 *byte_cnt)
-+{
-+ u32 *p;
-+ u32 verb, rslt;
-+ u32 query_verb;
-+ u32 hi, lo;
++ mc_io = devm_kzalloc(dev, sizeof(*mc_io), GFP_KERNEL);
++ if (!mc_io)
++ return -ENOMEM;
+
-+ p = qbman_swp_mc_start(s);
-+ if (!p)
++ mc_io->dev = dev;
++ mc_io->flags = flags;
++ mc_io->portal_phys_addr = mc_portal_phys_addr;
++ mc_io->portal_size = mc_portal_size;
++ if (flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
++ spin_lock_init(&mc_io->spinlock);
++ else
++ mutex_init(&mc_io->mutex);
++
++ res = devm_request_mem_region(dev,
++ mc_portal_phys_addr,
++ mc_portal_size,
++ "mc_portal");
++ if (!res) {
++ dev_err(dev,
++ "devm_request_mem_region failed for MC portal %pa\n",
++ &mc_portal_phys_addr);
+ return -EBUSY;
-+
-+ qb_attr_code_encode(&code_cgr_cgid, p, cgid);
-+ if (command_type < 2)
-+ qb_attr_code_encode(&code_cgr_stat_ct, p, command_type);
-+ query_verb = clear ?
-+ QBMAN_CGR_STAT_QUERY_CLR : QBMAN_CGR_STAT_QUERY;
-+ p = qbman_swp_mc_complete(s, p, p[0] | query_verb);
-+
-+ /* Decode the outcome */
-+ verb = qb_attr_code_decode(&code_generic_verb, p);
-+ rslt = qb_attr_code_decode(&code_generic_rslt, p);
-+ WARN_ON(verb != query_verb);
-+
-+ /* Determine success or failure */
-+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
-+ pr_err("Query statistics of CGID 0x%x failed,", cgid);
-+ pr_err(" verb=0x%02x code=0x%02x\n", verb, rslt);
-+ return -EIO;
+ }
+
-+ if (*frame_cnt) {
-+ hi = qb_attr_code_decode(&code_cgr_stat_frame_cnt_hi, p);
-+ lo = qb_attr_code_decode(&code_cgr_stat_frame_cnt_lo, p);
-+ *frame_cnt = ((u64)hi << 32) | (u64)lo;
++ mc_portal_virt_addr = devm_ioremap_nocache(dev,
++ mc_portal_phys_addr,
++ mc_portal_size);
++ if (!mc_portal_virt_addr) {
++ dev_err(dev,
++ "devm_ioremap_nocache failed for MC portal %pa\n",
++ &mc_portal_phys_addr);
++ return -ENXIO;
+ }
-+ if (*byte_cnt) {
-+ hi = qb_attr_code_decode(&code_cgr_stat_byte_cnt_hi, p);
-+ lo = qb_attr_code_decode(&code_cgr_stat_byte_cnt_lo, p);
-+ *byte_cnt = ((u64)hi << 32) | (u64)lo;
++
++ mc_io->portal_virt_addr = mc_portal_virt_addr;
++ if (dpmcp_dev) {
++ error = fsl_mc_io_set_dpmcp(mc_io, dpmcp_dev);
++ if (error < 0)
++ goto error_destroy_mc_io;
+ }
+
++ *new_mc_io = mc_io;
+ return 0;
-+}
+
-+int qbman_cgr_reject_statistics(struct qbman_swp *s, u32 cgid, int clear,
-+ u64 *frame_cnt, u64 *byte_cnt)
-+{
-+ return qbman_cgr_statistics_query(s, cgid, clear, 0xff,
-+ frame_cnt, byte_cnt);
++error_destroy_mc_io:
++ fsl_destroy_mc_io(mc_io);
++ return error;
+}
+
-+int qbman_ccgr_reject_statistics(struct qbman_swp *s, u32 cgid, int clear,
-+ u64 *frame_cnt, u64 *byte_cnt)
++/**
++ * Destroys an MC I/O object
++ *
++ * @mc_io: MC I/O object to destroy
++ */
++void fsl_destroy_mc_io(struct fsl_mc_io *mc_io)
+{
-+ return qbman_cgr_statistics_query(s, cgid, clear, 1,
-+ frame_cnt, byte_cnt);
-+}
++ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
+
-+int qbman_cq_dequeue_statistics(struct qbman_swp *s, u32 cgid, int clear,
-+ u64 *frame_cnt, u64 *byte_cnt)
-+{
-+ return qbman_cgr_statistics_query(s, cgid, clear, 0,
-+ frame_cnt, byte_cnt);
++ if (dpmcp_dev)
++ fsl_mc_io_unset_dpmcp(mc_io);
++
++ devm_iounmap(mc_io->dev, mc_io->portal_virt_addr);
++ devm_release_mem_region(mc_io->dev,
++ mc_io->portal_phys_addr,
++ mc_io->portal_size);
++
++ mc_io->portal_virt_addr = NULL;
++ devm_kfree(mc_io->dev, mc_io);
+}
---- /dev/null
-+++ b/drivers/staging/fsl-mc/bus/dpio/qbman_debug.h
-@@ -0,0 +1,136 @@
-+/* Copyright (C) 2015 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
++
++/**
++ * fsl_mc_portal_allocate - Allocates an MC portal
+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
++ * @mc_dev: MC device for which the MC portal is to be allocated
++ * @mc_io_flags: Flags for the fsl_mc_io object that wraps the allocated
++ * MC portal.
++ * @new_mc_io: Pointer to area where the pointer to the fsl_mc_io object
++ * that wraps the allocated MC portal is to be returned
+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ * This function allocates an MC portal from the device's parent DPRC,
++ * from the corresponding MC bus' pool of MC portals and wraps
++ * it in a new fsl_mc_io object. If 'mc_dev' is a DPRC itself, the
++ * portal is allocated from its own MC bus.
+ */
++int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
++ u16 mc_io_flags,
++ struct fsl_mc_io **new_mc_io)
++{
++ struct fsl_mc_device *mc_bus_dev;
++ struct fsl_mc_bus *mc_bus;
++ phys_addr_t mc_portal_phys_addr;
++ size_t mc_portal_size;
++ struct fsl_mc_device *dpmcp_dev;
++ int error = -EINVAL;
++ struct fsl_mc_resource *resource = NULL;
++ struct fsl_mc_io *mc_io = NULL;
++
++ if (mc_dev->flags & FSL_MC_IS_DPRC) {
++ mc_bus_dev = mc_dev;
++ } else {
++ if (!dev_is_fsl_mc(mc_dev->dev.parent))
++ return error;
+
-+struct qbman_attr {
-+ u32 dont_manipulate_directly[40];
-+};
++ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
++ }
+
-+/* Buffer pool query commands */
-+int qbman_bp_query(struct qbman_swp *s, u32 bpid,
-+ struct qbman_attr *a);
-+void qbman_bp_attr_get_bdi(struct qbman_attr *a, int *bdi, int *va, int *wae);
-+void qbman_bp_attr_get_swdet(struct qbman_attr *a, u32 *swdet);
-+void qbman_bp_attr_get_swdxt(struct qbman_attr *a, u32 *swdxt);
-+void qbman_bp_attr_get_hwdet(struct qbman_attr *a, u32 *hwdet);
-+void qbman_bp_attr_get_hwdxt(struct qbman_attr *a, u32 *hwdxt);
-+void qbman_bp_attr_get_swset(struct qbman_attr *a, u32 *swset);
-+void qbman_bp_attr_get_swsxt(struct qbman_attr *a, u32 *swsxt);
-+void qbman_bp_attr_get_vbpid(struct qbman_attr *a, u32 *vbpid);
-+void qbman_bp_attr_get_icid(struct qbman_attr *a, u32 *icid, int *pl);
-+void qbman_bp_attr_get_bpscn_addr(struct qbman_attr *a, u64 *bpscn_addr);
-+void qbman_bp_attr_get_bpscn_ctx(struct qbman_attr *a, u64 *bpscn_ctx);
-+void qbman_bp_attr_get_hw_targ(struct qbman_attr *a, u32 *hw_targ);
-+int qbman_bp_info_has_free_bufs(struct qbman_attr *a);
-+int qbman_bp_info_is_depleted(struct qbman_attr *a);
-+int qbman_bp_info_is_surplus(struct qbman_attr *a);
-+u32 qbman_bp_info_num_free_bufs(struct qbman_attr *a);
-+u32 qbman_bp_info_hdptr(struct qbman_attr *a);
-+u32 qbman_bp_info_sdcnt(struct qbman_attr *a);
-+u32 qbman_bp_info_hdcnt(struct qbman_attr *a);
-+u32 qbman_bp_info_sscnt(struct qbman_attr *a);
-+
-+/* FQ query function for programmable fields */
-+int qbman_fq_query(struct qbman_swp *s, u32 fqid,
-+ struct qbman_attr *desc);
-+void qbman_fq_attr_get_fqctrl(struct qbman_attr *d, u32 *fqctrl);
-+void qbman_fq_attr_get_cgrid(struct qbman_attr *d, u32 *cgrid);
-+void qbman_fq_attr_get_destwq(struct qbman_attr *d, u32 *destwq);
-+void qbman_fq_attr_get_icscred(struct qbman_attr *d, u32 *icscred);
-+void qbman_fq_attr_get_tdthresh(struct qbman_attr *d, u32 *tdthresh);
-+void qbman_fq_attr_get_oa(struct qbman_attr *d,
-+ int *oa_ics, int *oa_cgr, int32_t *oa_len);
-+void qbman_fq_attr_get_mctl(struct qbman_attr *d,
-+ int *bdi, int *ff, int *va, int *ps);
-+void qbman_fq_attr_get_ctx(struct qbman_attr *d, u32 *hi, u32 *lo);
-+void qbman_fq_attr_get_icid(struct qbman_attr *d, u32 *icid, int *pl);
-+void qbman_fq_attr_get_vfqid(struct qbman_attr *d, u32 *vfqid);
-+void qbman_fq_attr_get_erfqid(struct qbman_attr *d, u32 *erfqid);
-+
-+/* FQ query command for non-programmable fields*/
-+enum qbman_fq_schedstate_e {
-+ qbman_fq_schedstate_oos = 0,
-+ qbman_fq_schedstate_retired,
-+ qbman_fq_schedstate_tentatively_scheduled,
-+ qbman_fq_schedstate_truly_scheduled,
-+ qbman_fq_schedstate_parked,
-+ qbman_fq_schedstate_held_active,
-+};
++ mc_bus = to_fsl_mc_bus(mc_bus_dev);
++ *new_mc_io = NULL;
++ error = fsl_mc_resource_allocate(mc_bus, FSL_MC_POOL_DPMCP, &resource);
++ if (error < 0)
++ return error;
+
-+int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
-+ struct qbman_attr *state);
-+u32 qbman_fq_state_schedstate(const struct qbman_attr *state);
-+int qbman_fq_state_force_eligible(const struct qbman_attr *state);
-+int qbman_fq_state_xoff(const struct qbman_attr *state);
-+int qbman_fq_state_retirement_pending(const struct qbman_attr *state);
-+int qbman_fq_state_overflow_error(const struct qbman_attr *state);
-+u32 qbman_fq_state_frame_count(const struct qbman_attr *state);
-+u32 qbman_fq_state_byte_count(const struct qbman_attr *state);
-+
-+/* CGR query */
-+int qbman_cgr_query(struct qbman_swp *s, u32 cgid,
-+ struct qbman_attr *attr);
-+void qbman_cgr_attr_get_ctl1(struct qbman_attr *d, int *cscn_wq_en_enter,
-+ int *cscn_wq_en_exit, int *cscn_wq_icd);
-+void qbman_cgr_attr_get_mode(struct qbman_attr *d, u32 *mode,
-+ int *rej_cnt_mode, int *cscn_bdi);
-+void qbman_cgr_attr_get_ctl2(struct qbman_attr *d, int *cscn_wr_en_enter,
-+ int *cscn_wr_en_exit, int *cg_wr_ae,
-+ int *cscn_dcp_en, int *cg_wr_va);
-+void qbman_cgr_attr_get_iwc(struct qbman_attr *d, int *i_cnt_wr_en,
-+ u32 *i_cnt_wr_bnd);
-+void qbman_cgr_attr_get_tdc(struct qbman_attr *d, int *td_en);
-+void qbman_cgr_attr_get_cs_thres(struct qbman_attr *d, u32 *cs_thres);
-+void qbman_cgr_attr_get_cs_thres_x(struct qbman_attr *d,
-+ u32 *cs_thres_x);
-+void qbman_cgr_attr_get_td_thres(struct qbman_attr *d, u32 *td_thres);
-+void qbman_cgr_attr_get_cscn_tdcp(struct qbman_attr *d, u32 *cscn_tdcp);
-+void qbman_cgr_attr_get_cscn_wqid(struct qbman_attr *d, u32 *cscn_wqid);
-+void qbman_cgr_attr_get_cscn_vcgid(struct qbman_attr *d,
-+ u32 *cscn_vcgid);
-+void qbman_cgr_attr_get_cg_icid(struct qbman_attr *d, u32 *icid,
-+ int *pl);
-+void qbman_cgr_attr_get_cg_wr_addr(struct qbman_attr *d,
-+ u64 *cg_wr_addr);
-+void qbman_cgr_attr_get_cscn_ctx(struct qbman_attr *d, u64 *cscn_ctx);
-+void qbman_cgr_attr_wred_get_edp(struct qbman_attr *d, u32 idx,
-+ int *edp);
-+void qbman_cgr_attr_wred_dp_decompose(u32 dp, u64 *minth,
-+ u64 *maxth, u8 *maxp);
-+void qbman_cgr_attr_wred_get_parm_dp(struct qbman_attr *d, u32 idx,
-+ u32 *dp);
-+
-+/* CGR/CCGR/CQ statistics query */
-+int qbman_cgr_reject_statistics(struct qbman_swp *s, u32 cgid, int clear,
-+ u64 *frame_cnt, u64 *byte_cnt);
-+int qbman_ccgr_reject_statistics(struct qbman_swp *s, u32 cgid, int clear,
-+ u64 *frame_cnt, u64 *byte_cnt);
-+int qbman_cq_dequeue_statistics(struct qbman_swp *s, u32 cgid, int clear,
-+ u64 *frame_cnt, u64 *byte_cnt);
---- /dev/null
-+++ b/drivers/staging/fsl-mc/bus/dpio/qbman_private.h
-@@ -0,0 +1,171 @@
-+/* Copyright (C) 2014 Freescale Semiconductor, Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
++ error = -EINVAL;
++ dpmcp_dev = resource->data;
++
++ if (dpmcp_dev->obj_desc.ver_major < DPMCP_MIN_VER_MAJOR ||
++ (dpmcp_dev->obj_desc.ver_major == DPMCP_MIN_VER_MAJOR &&
++ dpmcp_dev->obj_desc.ver_minor < DPMCP_MIN_VER_MINOR)) {
++ dev_err(&dpmcp_dev->dev,
++ "ERROR: Version %d.%d of DPMCP not supported.\n",
++ dpmcp_dev->obj_desc.ver_major,
++ dpmcp_dev->obj_desc.ver_minor);
++ error = -ENOTSUPP;
++ goto error_cleanup_resource;
++ }
+
-+/* Perform extra checking */
-+#define QBMAN_CHECKING
++ mc_portal_phys_addr = dpmcp_dev->regions[0].start;
++ mc_portal_size = resource_size(dpmcp_dev->regions);
+
-+/* To maximise the amount of logic that is common between the Linux driver and
-+ * other targets (such as the embedded MC firmware), we pivot here between the
-+ * inclusion of two platform-specific headers.
-+ *
-+ * The first, qbman_sys_decl.h, includes any and all required system headers as
-+ * well as providing any definitions for the purposes of compatibility. The
-+ * second, qbman_sys.h, is where platform-specific routines go.
++ error = fsl_create_mc_io(&mc_bus_dev->dev,
++ mc_portal_phys_addr,
++ mc_portal_size, dpmcp_dev,
++ mc_io_flags, &mc_io);
++ if (error < 0)
++ goto error_cleanup_resource;
++
++ *new_mc_io = mc_io;
++ return 0;
++
++error_cleanup_resource:
++ fsl_mc_resource_free(resource);
++ return error;
++}
++EXPORT_SYMBOL_GPL(fsl_mc_portal_allocate);
++
++/**
++ * fsl_mc_portal_free - Returns an MC portal to the pool of free MC portals
++ * of a given MC bus
+ *
-+ * The point of the split is that the platform-independent code (including this
-+ * header) may depend on platform-specific declarations, yet other
-+ * platform-specific routines may depend on platform-independent definitions.
++ * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free
+ */
++void fsl_mc_portal_free(struct fsl_mc_io *mc_io)
++{
++ struct fsl_mc_device *dpmcp_dev;
++ struct fsl_mc_resource *resource;
+
-+#define QMAN_REV_4000 0x04000000
-+#define QMAN_REV_4100 0x04010000
-+#define QMAN_REV_4101 0x04010001
++ /*
++ * Every mc_io obtained by calling fsl_mc_portal_allocate() is supposed
++ * to have a DPMCP object associated with.
++ */
++ dpmcp_dev = mc_io->dpmcp_dev;
+
-+/* When things go wrong, it is a convenient trick to insert a few FOO()
-+ * statements in the code to trace progress. TODO: remove this once we are
-+ * hacking the code less actively.
-+ */
-+#define FOO() fsl_os_print("FOO: %s:%d\n", __FILE__, __LINE__)
-+
-+/* Any time there is a register interface which we poll on, this provides a
-+ * "break after x iterations" scheme for it. It's handy for debugging, eg.
-+ * where you don't want millions of lines of log output from a polling loop
-+ * that won't, because such things tend to drown out the earlier log output
-+ * that might explain what caused the problem. (NB: put ";" after each macro!)
-+ * TODO: we should probably remove this once we're done sanitising the
-+ * simulator...
-+ */
-+#define DBG_POLL_START(loopvar) (loopvar = 1000)
-+#define DBG_POLL_CHECK(loopvar) \
-+ do {if (!((loopvar)--)) WARN_ON(1); } while (0)
-+
-+/* For CCSR or portal-CINH registers that contain fields at arbitrary offsets
-+ * and widths, these macro-generated encode/decode/isolate/remove inlines can
-+ * be used.
-+ *
-+ * Eg. to "d"ecode a 14-bit field out of a register (into a "u16" type),
-+ * where the field is located 3 bits "up" from the least-significant bit of the
-+ * register (ie. the field location within the 32-bit register corresponds to a
-+ * mask of 0x0001fff8), you would do;
-+ * u16 field = d32_u16(3, 14, reg_value);
-+ *
-+ * Or to "e"ncode a 1-bit boolean value (input type is "int", zero is FALSE,
-+ * non-zero is TRUE, so must convert all non-zero inputs to 1, hence the "!!"
-+ * operator) into a register at bit location 0x00080000 (19 bits "in" from the
-+ * LS bit), do;
-+ * reg_value |= e32_int(19, 1, !!field);
-+ *
-+ * If you wish to read-modify-write a register, such that you leave the 14-bit
-+ * field as-is but have all other fields set to zero, then "i"solate the 14-bit
-+ * value using;
-+ * reg_value = i32_u16(3, 14, reg_value);
-+ *
-+ * Alternatively, you could "r"emove the 1-bit boolean field (setting it to
-+ * zero) but leaving all other fields as-is;
-+ * reg_val = r32_int(19, 1, reg_value);
-+ *
-+ */
-+#define MAKE_MASK32(width) (width == 32 ? 0xffffffff : \
-+ (u32)((1 << width) - 1))
-+#define DECLARE_CODEC32(t) \
-+static inline u32 e32_##t(u32 lsoffset, u32 width, t val) \
-+{ \
-+ WARN_ON(width > (sizeof(t) * 8)); \
-+ return ((u32)val & MAKE_MASK32(width)) << lsoffset; \
-+} \
-+static inline t d32_##t(u32 lsoffset, u32 width, u32 val) \
-+{ \
-+ WARN_ON(width > (sizeof(t) * 8)); \
-+ return (t)((val >> lsoffset) & MAKE_MASK32(width)); \
-+} \
-+static inline u32 i32_##t(u32 lsoffset, u32 width, \
-+ u32 val) \
-+{ \
-+ WARN_ON(width > (sizeof(t) * 8)); \
-+ return e32_##t(lsoffset, width, d32_##t(lsoffset, width, val)); \
-+} \
-+static inline u32 r32_##t(u32 lsoffset, u32 width, \
-+ u32 val) \
-+{ \
-+ WARN_ON(width > (sizeof(t) * 8)); \
-+ return ~(MAKE_MASK32(width) << lsoffset) & val; \
-+}
-+DECLARE_CODEC32(u32)
-+DECLARE_CODEC32(u16)
-+DECLARE_CODEC32(u8)
-+DECLARE_CODEC32(int)
-+
-+ /*********************/
-+ /* Debugging assists */
-+ /*********************/
-+
-+static inline void __hexdump(unsigned long start, unsigned long end,
-+ unsigned long p, size_t sz,
-+ const unsigned char *c)
-+{
-+ while (start < end) {
-+ unsigned int pos = 0;
-+ char buf[64];
-+ int nl = 0;
-+
-+ pos += sprintf(buf + pos, "%08lx: ", start);
-+ do {
-+ if ((start < p) || (start >= (p + sz)))
-+ pos += sprintf(buf + pos, "..");
-+ else
-+ pos += sprintf(buf + pos, "%02x", *(c++));
-+ if (!(++start & 15)) {
-+ buf[pos++] = '\n';
-+ nl = 1;
-+ } else {
-+ nl = 0;
-+ if (!(start & 1))
-+ buf[pos++] = ' ';
-+ if (!(start & 3))
-+ buf[pos++] = ' ';
-+ }
-+ } while (start & 15);
-+ if (!nl)
-+ buf[pos++] = '\n';
-+ buf[pos] = '\0';
-+ pr_info("%s", buf);
-+ }
++ resource = dpmcp_dev->resource;
++ if (!resource || resource->type != FSL_MC_POOL_DPMCP)
++ return;
++
++ if (resource->data != dpmcp_dev)
++ return;
++
++ fsl_destroy_mc_io(mc_io);
++ fsl_mc_resource_free(resource);
+}
++EXPORT_SYMBOL_GPL(fsl_mc_portal_free);
+
-+static inline void hexdump(const void *ptr, size_t sz)
++/**
++ * fsl_mc_portal_reset - Resets the dpmcp object for a given fsl_mc_io object
++ *
++ * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free
++ */
++int fsl_mc_portal_reset(struct fsl_mc_io *mc_io)
+{
-+ unsigned long p = (unsigned long)ptr;
-+ unsigned long start = p & ~15ul;
-+ unsigned long end = (p + sz + 15) & ~15ul;
-+ const unsigned char *c = ptr;
++ int error;
++ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
+
-+ __hexdump(start, end, p, sz, c);
++ error = dpmcp_reset(mc_io, 0, dpmcp_dev->mc_handle);
++ if (error < 0) {
++ dev_err(&dpmcp_dev->dev, "dpmcp_reset() failed: %d\n", error);
++ return error;
++ }
++
++ return 0;
+}
---- a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
-+++ b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
-@@ -1,4 +1,5 @@
--/* Copyright 2013-2016 Freescale Semiconductor Inc.
-+/*
-+ * Copyright 2013-2016 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
-@@ -11,7 +12,6 @@
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
-- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
-@@ -33,108 +33,24 @@
- #define _FSL_DPMCP_CMD_H
-
- /* Minimal supported DPMCP Version */
--#define DPMCP_MIN_VER_MAJOR 3
--#define DPMCP_MIN_VER_MINOR 0
--
--/* Command IDs */
--#define DPMCP_CMDID_CLOSE 0x800
--#define DPMCP_CMDID_OPEN 0x80b
--#define DPMCP_CMDID_CREATE 0x90b
--#define DPMCP_CMDID_DESTROY 0x900
--
--#define DPMCP_CMDID_GET_ATTR 0x004
--#define DPMCP_CMDID_RESET 0x005
--
--#define DPMCP_CMDID_SET_IRQ 0x010
--#define DPMCP_CMDID_GET_IRQ 0x011
--#define DPMCP_CMDID_SET_IRQ_ENABLE 0x012
--#define DPMCP_CMDID_GET_IRQ_ENABLE 0x013
--#define DPMCP_CMDID_SET_IRQ_MASK 0x014
--#define DPMCP_CMDID_GET_IRQ_MASK 0x015
--#define DPMCP_CMDID_GET_IRQ_STATUS 0x016
--
--struct dpmcp_cmd_open {
-- __le32 dpmcp_id;
--};
++EXPORT_SYMBOL_GPL(fsl_mc_portal_reset);
+--- a/drivers/staging/fsl-mc/bus/mc-sys.c
++++ /dev/null
+@@ -1,317 +0,0 @@
+-/* Copyright 2013-2014 Freescale Semiconductor Inc.
+- *
+- * I/O services to send MC commands to the MC hardware
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions are met:
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of the above-listed copyright holders nor the
+- * names of any contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- *
+- * ALTERNATIVELY, this software may be distributed under the terms of the
+- * GNU General Public License ("GPL") as published by the Free Software
+- * Foundation, either version 2 of that License or (at your option) any
+- * later version.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+- * POSSIBILITY OF SUCH DAMAGE.
+- */
-
--struct dpmcp_cmd_create {
-- __le32 portal_id;
--};
+-#include <linux/delay.h>
+-#include <linux/slab.h>
+-#include <linux/ioport.h>
+-#include <linux/device.h>
+-#include <linux/io.h>
+-#include "../include/mc-sys.h"
+-#include "../include/mc-cmd.h"
+-#include "../include/mc.h"
-
--struct dpmcp_cmd_set_irq {
-- /* cmd word 0 */
-- u8 irq_index;
-- u8 pad[3];
-- __le32 irq_val;
-- /* cmd word 1 */
-- __le64 irq_addr;
-- /* cmd word 2 */
-- __le32 irq_num;
--};
+-#include "dpmcp.h"
-
--struct dpmcp_cmd_get_irq {
-- __le32 pad;
-- u8 irq_index;
--};
+-/**
+- * Timeout in milliseconds to wait for the completion of an MC command
+- */
+-#define MC_CMD_COMPLETION_TIMEOUT_MS 500
-
--struct dpmcp_rsp_get_irq {
-- /* cmd word 0 */
-- __le32 irq_val;
-- __le32 pad;
-- /* cmd word 1 */
-- __le64 irq_paddr;
-- /* cmd word 2 */
-- __le32 irq_num;
-- __le32 type;
--};
-+#define DPMCP_MIN_VER_MAJOR 3
-+#define DPMCP_MIN_VER_MINOR 0
-
--#define DPMCP_ENABLE 0x1
-+/* Command versioning */
-+#define DPMCP_CMD_BASE_VERSION 1
-+#define DPMCP_CMD_ID_OFFSET 4
-
--struct dpmcp_cmd_set_irq_enable {
-- u8 enable;
-- u8 pad[3];
-- u8 irq_index;
--};
-+#define DPMCP_CMD(id) ((id << DPMCP_CMD_ID_OFFSET) | DPMCP_CMD_BASE_VERSION)
-
--struct dpmcp_cmd_get_irq_enable {
-- __le32 pad;
-- u8 irq_index;
--};
+-/*
+- * usleep_range() min and max values used to throttle down polling
+- * iterations while waiting for MC command completion
+- */
+-#define MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS 10
+-#define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS 500
-
--struct dpmcp_rsp_get_irq_enable {
-- u8 enabled;
--};
+-static enum mc_cmd_status mc_cmd_hdr_read_status(struct mc_command *cmd)
+-{
+- struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
-
--struct dpmcp_cmd_set_irq_mask {
-- __le32 mask;
-- u8 irq_index;
--};
+- return (enum mc_cmd_status)hdr->status;
+-}
-
--struct dpmcp_cmd_get_irq_mask {
-- __le32 pad;
-- u8 irq_index;
--};
+-static u16 mc_cmd_hdr_read_cmdid(struct mc_command *cmd)
+-{
+- struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
+- u16 cmd_id = le16_to_cpu(hdr->cmd_id);
-
--struct dpmcp_rsp_get_irq_mask {
-- __le32 mask;
--};
-+/* Command IDs */
-+#define DPMCP_CMDID_CLOSE DPMCP_CMD(0x800)
-+#define DPMCP_CMDID_OPEN DPMCP_CMD(0x80b)
-+#define DPMCP_CMDID_GET_API_VERSION DPMCP_CMD(0xa0b)
-
--struct dpmcp_cmd_get_irq_status {
-- __le32 status;
-- u8 irq_index;
--};
-+#define DPMCP_CMDID_RESET DPMCP_CMD(0x005)
-
--struct dpmcp_rsp_get_irq_status {
-- __le32 status;
--};
+- return (cmd_id & MC_CMD_HDR_CMDID_MASK) >> MC_CMD_HDR_CMDID_SHIFT;
+-}
-
--struct dpmcp_rsp_get_attributes {
-- /* response word 0 */
-- __le32 pad;
-- __le32 id;
-- /* response word 1 */
-- __le16 version_major;
-- __le16 version_minor;
-+struct dpmcp_cmd_open {
-+ __le32 dpmcp_id;
- };
-
- #endif /* _FSL_DPMCP_CMD_H */
---- a/drivers/staging/fsl-mc/bus/dpmcp.c
-+++ b/drivers/staging/fsl-mc/bus/dpmcp.c
-@@ -1,4 +1,5 @@
--/* Copyright 2013-2016 Freescale Semiconductor Inc.
-+/*
-+ * Copyright 2013-2016 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
-@@ -11,7 +12,6 @@
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
-- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
-@@ -104,76 +104,6 @@ int dpmcp_close(struct fsl_mc_io *mc_io,
- }
-
- /**
-- * dpmcp_create() - Create the DPMCP object.
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @cfg: Configuration structure
-- * @token: Returned token; use in subsequent API calls
+-static int mc_status_to_error(enum mc_cmd_status status)
+-{
+- static const int mc_status_to_error_map[] = {
+- [MC_CMD_STATUS_OK] = 0,
+- [MC_CMD_STATUS_AUTH_ERR] = -EACCES,
+- [MC_CMD_STATUS_NO_PRIVILEGE] = -EPERM,
+- [MC_CMD_STATUS_DMA_ERR] = -EIO,
+- [MC_CMD_STATUS_CONFIG_ERR] = -ENXIO,
+- [MC_CMD_STATUS_TIMEOUT] = -ETIMEDOUT,
+- [MC_CMD_STATUS_NO_RESOURCE] = -ENAVAIL,
+- [MC_CMD_STATUS_NO_MEMORY] = -ENOMEM,
+- [MC_CMD_STATUS_BUSY] = -EBUSY,
+- [MC_CMD_STATUS_UNSUPPORTED_OP] = -ENOTSUPP,
+- [MC_CMD_STATUS_INVALID_STATE] = -ENODEV,
+- };
+-
+- if (WARN_ON((u32)status >= ARRAY_SIZE(mc_status_to_error_map)))
+- return -EINVAL;
+-
+- return mc_status_to_error_map[status];
+-}
+-
+-static const char *mc_status_to_string(enum mc_cmd_status status)
+-{
+- static const char *const status_strings[] = {
+- [MC_CMD_STATUS_OK] = "Command completed successfully",
+- [MC_CMD_STATUS_READY] = "Command ready to be processed",
+- [MC_CMD_STATUS_AUTH_ERR] = "Authentication error",
+- [MC_CMD_STATUS_NO_PRIVILEGE] = "No privilege",
+- [MC_CMD_STATUS_DMA_ERR] = "DMA or I/O error",
+- [MC_CMD_STATUS_CONFIG_ERR] = "Configuration error",
+- [MC_CMD_STATUS_TIMEOUT] = "Operation timed out",
+- [MC_CMD_STATUS_NO_RESOURCE] = "No resources",
+- [MC_CMD_STATUS_NO_MEMORY] = "No memory available",
+- [MC_CMD_STATUS_BUSY] = "Device is busy",
+- [MC_CMD_STATUS_UNSUPPORTED_OP] = "Unsupported operation",
+- [MC_CMD_STATUS_INVALID_STATE] = "Invalid state"
+- };
+-
+- if ((unsigned int)status >= ARRAY_SIZE(status_strings))
+- return "Unknown MC error";
+-
+- return status_strings[status];
+-}
+-
+-/**
+- * mc_write_command - writes a command to a Management Complex (MC) portal
- *
-- * Create the DPMCP object, allocate required resources and
-- * perform required initialization.
+- * @portal: pointer to an MC portal
+- * @cmd: pointer to a filled command
+- */
+-static inline void mc_write_command(struct mc_command __iomem *portal,
+- struct mc_command *cmd)
+-{
+- int i;
+-
+- /* copy command parameters into the portal */
+- for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
+- __raw_writeq(cmd->params[i], &portal->params[i]);
+- __iowmb();
+-
+- /* submit the command by writing the header */
+- __raw_writeq(cmd->header, &portal->header);
+-}
+-
+-/**
+- * mc_read_response - reads the response for the last MC command from a
+- * Management Complex (MC) portal
- *
-- * The object can be created either by declaring it in the
-- * DPL file, or by calling this function.
-- * This function returns a unique authentication token,
-- * associated with the specific object ID and the specific MC
-- * portal; this token must be used in all subsequent calls to
-- * this specific object. For objects that are created using the
-- * DPL file, call dpmcp_open function to get an authentication
-- * token first.
+- * @portal: pointer to an MC portal
+- * @resp: pointer to command response buffer
- *
-- * Return: '0' on Success; Error code otherwise.
+- * Returns MC_CMD_STATUS_OK on Success; Error code otherwise.
- */
--int dpmcp_create(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- const struct dpmcp_cfg *cfg,
-- u16 *token)
+-static inline enum mc_cmd_status mc_read_response(struct mc_command __iomem *
+- portal,
+- struct mc_command *resp)
-{
-- struct mc_command cmd = { 0 };
-- struct dpmcp_cmd_create *cmd_params;
--
-- int err;
+- int i;
+- enum mc_cmd_status status;
+-
+- /* Copy command response header from MC portal: */
+- __iormb();
+- resp->header = __raw_readq(&portal->header);
+- __iormb();
+- status = mc_cmd_hdr_read_status(resp);
+- if (status != MC_CMD_STATUS_OK)
+- return status;
+-
+- /* Copy command response data from MC portal: */
+- for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
+- resp->params[i] = __raw_readq(&portal->params[i]);
+- __iormb();
+-
+- return status;
+-}
-
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CREATE,
-- cmd_flags, 0);
-- cmd_params = (struct dpmcp_cmd_create *)cmd.params;
-- cmd_params->portal_id = cpu_to_le32(cfg->portal_id);
+-/**
+- * Waits for the completion of an MC command doing preemptible polling.
+- * uslepp_range() is called between polling iterations.
+- *
+- * @mc_io: MC I/O object to be used
+- * @cmd: command buffer to receive MC response
+- * @mc_status: MC command completion status
+- */
+-static int mc_polling_wait_preemptible(struct fsl_mc_io *mc_io,
+- struct mc_command *cmd,
+- enum mc_cmd_status *mc_status)
+-{
+- enum mc_cmd_status status;
+- unsigned long jiffies_until_timeout =
+- jiffies + msecs_to_jiffies(MC_CMD_COMPLETION_TIMEOUT_MS);
-
-- /* send command to mc*/
-- err = mc_send_command(mc_io, &cmd);
-- if (err)
-- return err;
+- /*
+- * Wait for response from the MC hardware:
+- */
+- for (;;) {
+- status = mc_read_response(mc_io->portal_virt_addr, cmd);
+- if (status != MC_CMD_STATUS_READY)
+- break;
+-
+- /*
+- * TODO: When MC command completion interrupts are supported
+- * call wait function here instead of usleep_range()
+- */
+- usleep_range(MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS,
+- MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
+-
+- if (time_after_eq(jiffies, jiffies_until_timeout)) {
+- dev_dbg(mc_io->dev,
+- "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
+- mc_io->portal_phys_addr,
+- (unsigned int)mc_cmd_hdr_read_token(cmd),
+- (unsigned int)mc_cmd_hdr_read_cmdid(cmd));
-
-- /* retrieve response parameters */
-- *token = mc_cmd_hdr_read_token(&cmd);
+- return -ETIMEDOUT;
+- }
+- }
-
+- *mc_status = status;
- return 0;
-}
-
-/**
-- * dpmcp_destroy() - Destroy the DPMCP object and release all its resources.
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPMCP object
+- * Waits for the completion of an MC command doing atomic polling.
+- * udelay() is called between polling iterations.
- *
-- * Return: '0' on Success; error code otherwise.
+- * @mc_io: MC I/O object to be used
+- * @cmd: command buffer to receive MC response
+- * @mc_status: MC command completion status
- */
--int dpmcp_destroy(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token)
+-static int mc_polling_wait_atomic(struct fsl_mc_io *mc_io,
+- struct mc_command *cmd,
+- enum mc_cmd_status *mc_status)
-{
-- struct mc_command cmd = { 0 };
+- enum mc_cmd_status status;
+- unsigned long timeout_usecs = MC_CMD_COMPLETION_TIMEOUT_MS * 1000;
-
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_DESTROY,
-- cmd_flags, token);
+- BUILD_BUG_ON((MC_CMD_COMPLETION_TIMEOUT_MS * 1000) %
+- MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS != 0);
-
-- /* send command to mc*/
-- return mc_send_command(mc_io, &cmd);
+- for (;;) {
+- status = mc_read_response(mc_io->portal_virt_addr, cmd);
+- if (status != MC_CMD_STATUS_READY)
+- break;
+-
+- udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
+- timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS;
+- if (timeout_usecs == 0) {
+- dev_dbg(mc_io->dev,
+- "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
+- mc_io->portal_phys_addr,
+- (unsigned int)mc_cmd_hdr_read_token(cmd),
+- (unsigned int)mc_cmd_hdr_read_cmdid(cmd));
+-
+- return -ETIMEDOUT;
+- }
+- }
+-
+- *mc_status = status;
+- return 0;
-}
-
-/**
- * dpmcp_reset() - Reset the DPMCP, returns the object to initial state.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-@@ -196,309 +126,33 @@ int dpmcp_reset(struct fsl_mc_io *mc_io,
- }
-
- /**
-- * dpmcp_set_irq() - Set IRQ information for the DPMCP to trigger an interrupt.
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPMCP object
-- * @irq_index: Identifies the interrupt index to configure
-- * @irq_cfg: IRQ configuration
+- * Sends a command to the MC device using the given MC I/O object
- *
-- * Return: '0' on Success; Error code otherwise.
+- * @mc_io: MC I/O object to be used
+- * @cmd: command to be sent
+- *
+- * Returns '0' on Success; Error code otherwise.
- */
--int dpmcp_set_irq(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- struct dpmcp_irq_cfg *irq_cfg)
+-int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd)
-{
-- struct mc_command cmd = { 0 };
-- struct dpmcp_cmd_set_irq *cmd_params;
+- int error;
+- enum mc_cmd_status status;
+- unsigned long irq_flags = 0;
-
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ,
-- cmd_flags, token);
-- cmd_params = (struct dpmcp_cmd_set_irq *)cmd.params;
-- cmd_params->irq_index = irq_index;
-- cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
-- cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
-- cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
+- if (WARN_ON(in_irq() &&
+- !(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)))
+- return -EINVAL;
-
-- /* send command to mc*/
-- return mc_send_command(mc_io, &cmd);
--}
+- if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
+- spin_lock_irqsave(&mc_io->spinlock, irq_flags);
+- else
+- mutex_lock(&mc_io->mutex);
-
--/**
-- * dpmcp_get_irq() - Get IRQ information from the DPMCP.
-- * @mc_io: Pointer to MC portal's I/O object
-+ * dpmcp_get_api_version - Get Data Path Management Command Portal API version
-+ * @mc_io: Pointer to Mc portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPMCP object
-- * @irq_index: The interrupt index to configure
-- * @type: Interrupt type: 0 represents message interrupt
-- * type (both irq_addr and irq_val are valid)
-- * @irq_cfg: IRQ attributes
-+ * @major_ver: Major version of Data Path Management Command Portal API
-+ * @minor_ver: Minor version of Data Path Management Command Portal API
- *
- * Return: '0' on Success; Error code otherwise.
- */
--int dpmcp_get_irq(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- int *type,
-- struct dpmcp_irq_cfg *irq_cfg)
-+int dpmcp_get_api_version(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 *major_ver,
-+ u16 *minor_ver)
- {
- struct mc_command cmd = { 0 };
-- struct dpmcp_cmd_get_irq *cmd_params;
-- struct dpmcp_rsp_get_irq *rsp_params;
- int err;
+- /*
+- * Send command to the MC hardware:
+- */
+- mc_write_command(mc_io->portal_virt_addr, cmd);
+-
+- /*
+- * Wait for response from the MC hardware:
+- */
+- if (!(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL))
+- error = mc_polling_wait_preemptible(mc_io, cmd, &status);
+- else
+- error = mc_polling_wait_atomic(mc_io, cmd, &status);
+-
+- if (error < 0)
+- goto common_exit;
+-
+- if (status != MC_CMD_STATUS_OK) {
+- dev_dbg(mc_io->dev,
+- "MC command failed: portal: %#llx, obj handle: %#x, command: %#x, status: %s (%#x)\n",
+- mc_io->portal_phys_addr,
+- (unsigned int)mc_cmd_hdr_read_token(cmd),
+- (unsigned int)mc_cmd_hdr_read_cmdid(cmd),
+- mc_status_to_string(status),
+- (unsigned int)status);
+-
+- error = mc_status_to_error(status);
+- goto common_exit;
+- }
+-
+- error = 0;
+-common_exit:
+- if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
+- spin_unlock_irqrestore(&mc_io->spinlock, irq_flags);
+- else
+- mutex_unlock(&mc_io->mutex);
+-
+- return error;
+-}
+-EXPORT_SYMBOL(mc_send_command);
+--- /dev/null
++++ b/drivers/bus/fsl-mc/mc-sys.c
+@@ -0,0 +1,296 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
++ *
++ * I/O services to send MC commands to the MC hardware
++ *
++ */
++
++#include <linux/delay.h>
++#include <linux/slab.h>
++#include <linux/ioport.h>
++#include <linux/device.h>
++#include <linux/io.h>
++#include <linux/io-64-nonatomic-hi-lo.h>
++#include <linux/fsl/mc.h>
++
++#include "fsl-mc-private.h"
++
++/**
++ * Timeout in milliseconds to wait for the completion of an MC command
++ */
++#define MC_CMD_COMPLETION_TIMEOUT_MS 15000
++
++/*
++ * usleep_range() min and max values used to throttle down polling
++ * iterations while waiting for MC command completion
++ */
++#define MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS 10
++#define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS 500
++
++static enum mc_cmd_status mc_cmd_hdr_read_status(struct fsl_mc_command *cmd)
++{
++ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
++
++ return (enum mc_cmd_status)hdr->status;
++}
++
++static u16 mc_cmd_hdr_read_cmdid(struct fsl_mc_command *cmd)
++{
++ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
++ u16 cmd_id = le16_to_cpu(hdr->cmd_id);
++
++ return cmd_id;
++}
++
++static int mc_status_to_error(enum mc_cmd_status status)
++{
++ static const int mc_status_to_error_map[] = {
++ [MC_CMD_STATUS_OK] = 0,
++ [MC_CMD_STATUS_AUTH_ERR] = -EACCES,
++ [MC_CMD_STATUS_NO_PRIVILEGE] = -EPERM,
++ [MC_CMD_STATUS_DMA_ERR] = -EIO,
++ [MC_CMD_STATUS_CONFIG_ERR] = -ENXIO,
++ [MC_CMD_STATUS_TIMEOUT] = -ETIMEDOUT,
++ [MC_CMD_STATUS_NO_RESOURCE] = -ENAVAIL,
++ [MC_CMD_STATUS_NO_MEMORY] = -ENOMEM,
++ [MC_CMD_STATUS_BUSY] = -EBUSY,
++ [MC_CMD_STATUS_UNSUPPORTED_OP] = -ENOTSUPP,
++ [MC_CMD_STATUS_INVALID_STATE] = -ENODEV,
++ };
++
++ if ((u32)status >= ARRAY_SIZE(mc_status_to_error_map))
++ return -EINVAL;
++
++ return mc_status_to_error_map[status];
++}
++
++static const char *mc_status_to_string(enum mc_cmd_status status)
++{
++ static const char *const status_strings[] = {
++ [MC_CMD_STATUS_OK] = "Command completed successfully",
++ [MC_CMD_STATUS_READY] = "Command ready to be processed",
++ [MC_CMD_STATUS_AUTH_ERR] = "Authentication error",
++ [MC_CMD_STATUS_NO_PRIVILEGE] = "No privilege",
++ [MC_CMD_STATUS_DMA_ERR] = "DMA or I/O error",
++ [MC_CMD_STATUS_CONFIG_ERR] = "Configuration error",
++ [MC_CMD_STATUS_TIMEOUT] = "Operation timed out",
++ [MC_CMD_STATUS_NO_RESOURCE] = "No resources",
++ [MC_CMD_STATUS_NO_MEMORY] = "No memory available",
++ [MC_CMD_STATUS_BUSY] = "Device is busy",
++ [MC_CMD_STATUS_UNSUPPORTED_OP] = "Unsupported operation",
++ [MC_CMD_STATUS_INVALID_STATE] = "Invalid state"
++ };
++
++ if ((unsigned int)status >= ARRAY_SIZE(status_strings))
++ return "Unknown MC error";
++
++ return status_strings[status];
++}
++
++/**
++ * mc_write_command - writes a command to a Management Complex (MC) portal
++ *
++ * @portal: pointer to an MC portal
++ * @cmd: pointer to a filled command
++ */
++static inline void mc_write_command(struct fsl_mc_command __iomem *portal,
++ struct fsl_mc_command *cmd)
++{
++ int i;
++
++ /* copy command parameters into the portal */
++ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
++ /*
++ * Data is already in the expected LE byte-order. Do an
++ * extra LE -> CPU conversion so that the CPU -> LE done in
++ * the device io write api puts it back in the right order.
++ */
++ writeq_relaxed(le64_to_cpu(cmd->params[i]), &portal->params[i]);
++
++ /* submit the command by writing the header */
++ writeq(le64_to_cpu(cmd->header), &portal->header);
++}
++
++/**
++ * mc_read_response - reads the response for the last MC command from a
++ * Management Complex (MC) portal
++ *
++ * @portal: pointer to an MC portal
++ * @resp: pointer to command response buffer
++ *
++ * Returns MC_CMD_STATUS_OK on Success; Error code otherwise.
++ */
++static inline enum mc_cmd_status mc_read_response(struct fsl_mc_command __iomem
++ *portal,
++ struct fsl_mc_command *resp)
++{
++ int i;
++ enum mc_cmd_status status;
++
++ /* Copy command response header from MC portal: */
++ resp->header = cpu_to_le64(readq_relaxed(&portal->header));
++ status = mc_cmd_hdr_read_status(resp);
++ if (status != MC_CMD_STATUS_OK)
++ return status;
++
++ /* Copy command response data from MC portal: */
++ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
++ /*
++ * Data is expected to be in LE byte-order. Do an
++ * extra CPU -> LE to revert the LE -> CPU done in
++ * the device io read api.
++ */
++ resp->params[i] =
++ cpu_to_le64(readq_relaxed(&portal->params[i]));
++
++ return status;
++}
++
++/**
++ * Waits for the completion of an MC command doing preemptible polling.
++ * uslepp_range() is called between polling iterations.
++ *
++ * @mc_io: MC I/O object to be used
++ * @cmd: command buffer to receive MC response
++ * @mc_status: MC command completion status
++ */
++static int mc_polling_wait_preemptible(struct fsl_mc_io *mc_io,
++ struct fsl_mc_command *cmd,
++ enum mc_cmd_status *mc_status)
++{
++ enum mc_cmd_status status;
++ unsigned long jiffies_until_timeout =
++ jiffies + msecs_to_jiffies(MC_CMD_COMPLETION_TIMEOUT_MS);
++
++ /*
++ * Wait for response from the MC hardware:
++ */
++ for (;;) {
++ status = mc_read_response(mc_io->portal_virt_addr, cmd);
++ if (status != MC_CMD_STATUS_READY)
++ break;
++
++ /*
++ * TODO: When MC command completion interrupts are supported
++ * call wait function here instead of usleep_range()
++ */
++ usleep_range(MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS,
++ MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
++
++ if (time_after_eq(jiffies, jiffies_until_timeout)) {
++ dev_dbg(mc_io->dev,
++ "MC command timed out (portal: %pa, dprc handle: %#x, command: %#x)\n",
++ &mc_io->portal_phys_addr,
++ (unsigned int)mc_cmd_hdr_read_token(cmd),
++ (unsigned int)mc_cmd_hdr_read_cmdid(cmd));
++
++ return -ETIMEDOUT;
++ }
++ }
++
++ *mc_status = status;
++ return 0;
++}
++
++/**
++ * Waits for the completion of an MC command doing atomic polling.
++ * udelay() is called between polling iterations.
++ *
++ * @mc_io: MC I/O object to be used
++ * @cmd: command buffer to receive MC response
++ * @mc_status: MC command completion status
++ */
++static int mc_polling_wait_atomic(struct fsl_mc_io *mc_io,
++ struct fsl_mc_command *cmd,
++ enum mc_cmd_status *mc_status)
++{
++ enum mc_cmd_status status;
++ unsigned long timeout_usecs = MC_CMD_COMPLETION_TIMEOUT_MS * 1000;
++
++ BUILD_BUG_ON((MC_CMD_COMPLETION_TIMEOUT_MS * 1000) %
++ MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS != 0);
++
++ for (;;) {
++ status = mc_read_response(mc_io->portal_virt_addr, cmd);
++ if (status != MC_CMD_STATUS_READY)
++ break;
++
++ udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
++ timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS;
++ if (timeout_usecs == 0) {
++ dev_dbg(mc_io->dev,
++ "MC command timed out (portal: %pa, dprc handle: %#x, command: %#x)\n",
++ &mc_io->portal_phys_addr,
++ (unsigned int)mc_cmd_hdr_read_token(cmd),
++ (unsigned int)mc_cmd_hdr_read_cmdid(cmd));
++
++ return -ETIMEDOUT;
++ }
++ }
++
++ *mc_status = status;
++ return 0;
++}
++
++/**
++ * Sends a command to the MC device using the given MC I/O object
++ *
++ * @mc_io: MC I/O object to be used
++ * @cmd: command to be sent
++ *
++ * Returns '0' on Success; Error code otherwise.
++ */
++int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd)
++{
++ int error;
++ enum mc_cmd_status status;
++ unsigned long irq_flags = 0;
++
++ if (in_irq() && !(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL))
++ return -EINVAL;
++
++ if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
++ spin_lock_irqsave(&mc_io->spinlock, irq_flags);
++ else
++ mutex_lock(&mc_io->mutex);
++
++ /*
++ * Send command to the MC hardware:
++ */
++ mc_write_command(mc_io->portal_virt_addr, cmd);
++
++ /*
++ * Wait for response from the MC hardware:
++ */
++ if (!(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL))
++ error = mc_polling_wait_preemptible(mc_io, cmd, &status);
++ else
++ error = mc_polling_wait_atomic(mc_io, cmd, &status);
++
++ if (error < 0)
++ goto common_exit;
++
++ if (status != MC_CMD_STATUS_OK) {
++ dev_dbg(mc_io->dev,
++ "MC command failed: portal: %pa, dprc handle: %#x, command: %#x, status: %s (%#x)\n",
++ &mc_io->portal_phys_addr,
++ (unsigned int)mc_cmd_hdr_read_token(cmd),
++ (unsigned int)mc_cmd_hdr_read_cmdid(cmd),
++ mc_status_to_string(status),
++ (unsigned int)status);
++
++ error = mc_status_to_error(status);
++ goto common_exit;
++ }
++
++ error = 0;
++common_exit:
++ if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
++ spin_unlock_irqrestore(&mc_io->spinlock, irq_flags);
++ else
++ mutex_unlock(&mc_io->mutex);
++
++ return error;
++}
++EXPORT_SYMBOL_GPL(mc_send_command);
+--- a/drivers/irqchip/Kconfig
++++ b/drivers/irqchip/Kconfig
+@@ -41,6 +41,12 @@ config ARM_GIC_V3_ITS
+ depends on PCI_MSI
+ select ACPI_IORT if ACPI
- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ,
-- cmd_flags, token);
-- cmd_params = (struct dpmcp_cmd_get_irq *)cmd.params;
-- cmd_params->irq_index = irq_index;
--
-- /* send command to mc*/
-- err = mc_send_command(mc_io, &cmd);
-- if (err)
-- return err;
--
-- /* retrieve response parameters */
-- rsp_params = (struct dpmcp_rsp_get_irq *)cmd.params;
-- irq_cfg->val = le32_to_cpu(rsp_params->irq_val);
-- irq_cfg->paddr = le64_to_cpu(rsp_params->irq_paddr);
-- irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num);
-- *type = le32_to_cpu(rsp_params->type);
-- return 0;
--}
--
--/**
-- * dpmcp_set_irq_enable() - Set overall interrupt state.
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPMCP object
-- * @irq_index: The interrupt index to configure
-- * @en: Interrupt state - enable = 1, disable = 0
++config ARM_GIC_V3_ITS_FSL_MC
++ bool
++ depends on ARM_GIC_V3_ITS
++ depends on FSL_MC_BUS
++ default ARM_GIC_V3_ITS
++
+ config ARM_NVIC
+ bool
+ select IRQ_DOMAIN
+--- a/drivers/irqchip/Makefile
++++ b/drivers/irqchip/Makefile
+@@ -29,6 +29,7 @@ obj-$(CONFIG_ARCH_REALVIEW) += irq-gic-
+ obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o
+ obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o
+ obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o
++obj-$(CONFIG_ARM_GIC_V3_ITS_FSL_MC) += irq-gic-v3-its-fsl-mc-msi.o
+ obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o
+ obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o
+ obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
+--- a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
++++ /dev/null
+@@ -1,126 +0,0 @@
+-/*
+- * Freescale Management Complex (MC) bus driver MSI support
- *
-- * Allows GPP software to control when interrupts are generated.
-- * Each interrupt can have up to 32 causes. The enable/disable control's the
-- * overall interrupt state. if the interrupt is disabled no causes will cause
-- * an interrupt.
+- * Copyright (C) 2015 Freescale Semiconductor, Inc.
+- * Author: German Rivera <German.Rivera@freescale.com>
- *
-- * Return: '0' on Success; Error code otherwise.
+- * This file is licensed under the terms of the GNU General Public
+- * License version 2. This program is licensed "as is" without any
+- * warranty of any kind, whether express or implied.
- */
--int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u8 en)
+-
+-#include <linux/of_device.h>
+-#include <linux/of_address.h>
+-#include <linux/irqchip/arm-gic-v3.h>
+-#include <linux/irq.h>
+-#include <linux/msi.h>
+-#include <linux/of.h>
+-#include <linux/of_irq.h>
+-#include "../include/mc-bus.h"
+-#include "fsl-mc-private.h"
+-
+-static struct irq_chip its_msi_irq_chip = {
+- .name = "fsl-mc-bus-msi",
+- .irq_mask = irq_chip_mask_parent,
+- .irq_unmask = irq_chip_unmask_parent,
+- .irq_eoi = irq_chip_eoi_parent,
+- .irq_set_affinity = msi_domain_set_affinity
+-};
+-
+-static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain,
+- struct device *dev,
+- int nvec, msi_alloc_info_t *info)
-{
-- struct mc_command cmd = { 0 };
-- struct dpmcp_cmd_set_irq_enable *cmd_params;
+- struct fsl_mc_device *mc_bus_dev;
+- struct msi_domain_info *msi_info;
-
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_ENABLE,
-- cmd_flags, token);
-- cmd_params = (struct dpmcp_cmd_set_irq_enable *)cmd.params;
-- cmd_params->enable = en & DPMCP_ENABLE;
-- cmd_params->irq_index = irq_index;
+- if (WARN_ON(!dev_is_fsl_mc(dev)))
+- return -EINVAL;
-
-- /* send command to mc*/
-- return mc_send_command(mc_io, &cmd);
+- mc_bus_dev = to_fsl_mc_device(dev);
+- if (WARN_ON(!(mc_bus_dev->flags & FSL_MC_IS_DPRC)))
+- return -EINVAL;
+-
+- /*
+- * Set the device Id to be passed to the GIC-ITS:
+- *
+- * NOTE: This device id corresponds to the IOMMU stream ID
+- * associated with the DPRC object (ICID).
+- */
+- info->scratchpad[0].ul = mc_bus_dev->icid;
+- msi_info = msi_get_domain_info(msi_domain->parent);
+- return msi_info->ops->msi_prepare(msi_domain->parent, dev, nvec, info);
-}
-
--/**
-- * dpmcp_get_irq_enable() - Get overall interrupt state
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPMCP object
-- * @irq_index: The interrupt index to configure
-- * @en: Returned interrupt state - enable = 1, disable = 0
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u8 *en)
+-static struct msi_domain_ops its_fsl_mc_msi_ops = {
+- .msi_prepare = its_fsl_mc_msi_prepare,
+-};
+-
+-static struct msi_domain_info its_fsl_mc_msi_domain_info = {
+- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
+- .ops = &its_fsl_mc_msi_ops,
+- .chip = &its_msi_irq_chip,
+-};
+-
+-static const struct of_device_id its_device_id[] = {
+- { .compatible = "arm,gic-v3-its", },
+- {},
+-};
+-
+-int __init its_fsl_mc_msi_init(void)
-{
-- struct mc_command cmd = { 0 };
-- struct dpmcp_cmd_get_irq_enable *cmd_params;
-- struct dpmcp_rsp_get_irq_enable *rsp_params;
-- int err;
+- struct device_node *np;
+- struct irq_domain *parent;
+- struct irq_domain *mc_msi_domain;
+-
+- for (np = of_find_matching_node(NULL, its_device_id); np;
+- np = of_find_matching_node(np, its_device_id)) {
+- if (!of_device_is_available(np))
+- continue;
+- if (!of_property_read_bool(np, "msi-controller"))
+- continue;
+-
+- parent = irq_find_matching_host(np, DOMAIN_BUS_NEXUS);
+- if (!parent || !msi_get_domain_info(parent)) {
+- pr_err("%s: unable to locate ITS domain\n",
+- np->full_name);
+- continue;
+- }
-
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_ENABLE,
-- cmd_flags, token);
-- cmd_params = (struct dpmcp_cmd_get_irq_enable *)cmd.params;
-- cmd_params->irq_index = irq_index;
+- mc_msi_domain = fsl_mc_msi_create_irq_domain(
+- of_node_to_fwnode(np),
+- &its_fsl_mc_msi_domain_info,
+- parent);
+- if (!mc_msi_domain) {
+- pr_err("%s: unable to create fsl-mc domain\n",
+- np->full_name);
+- continue;
+- }
-
-- /* send command to mc*/
-- err = mc_send_command(mc_io, &cmd);
-- if (err)
-- return err;
+- WARN_ON(mc_msi_domain->
+- host_data != &its_fsl_mc_msi_domain_info);
+-
+- pr_info("fsl-mc MSI: %s domain created\n", np->full_name);
+- }
-
-- /* retrieve response parameters */
-- rsp_params = (struct dpmcp_rsp_get_irq_enable *)cmd.params;
-- *en = rsp_params->enabled & DPMCP_ENABLE;
- return 0;
-}
-
--/**
-- * dpmcp_set_irq_mask() - Set interrupt mask.
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPMCP object
-- * @irq_index: The interrupt index to configure
-- * @mask: Event mask to trigger interrupt;
-- * each bit:
-- * 0 = ignore event
-- * 1 = consider event for asserting IRQ
-- *
-- * Every interrupt can have up to 32 causes and the interrupt model supports
-- * masking/unmasking each cause independently
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u32 mask)
+-void its_fsl_mc_msi_cleanup(void)
-{
-- struct mc_command cmd = { 0 };
-- struct dpmcp_cmd_set_irq_mask *cmd_params;
+- struct device_node *np;
-
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_MASK,
-- cmd_flags, token);
-- cmd_params = (struct dpmcp_cmd_set_irq_mask *)cmd.params;
-- cmd_params->mask = cpu_to_le32(mask);
-- cmd_params->irq_index = irq_index;
+- for (np = of_find_matching_node(NULL, its_device_id); np;
+- np = of_find_matching_node(np, its_device_id)) {
+- struct irq_domain *mc_msi_domain = irq_find_matching_host(
+- np,
+- DOMAIN_BUS_FSL_MC_MSI);
-
-- /* send command to mc*/
-- return mc_send_command(mc_io, &cmd);
+- if (!of_property_read_bool(np, "msi-controller"))
+- continue;
+-
+- if (mc_msi_domain &&
+- mc_msi_domain->host_data == &its_fsl_mc_msi_domain_info)
+- irq_domain_remove(mc_msi_domain);
+- }
-}
+--- /dev/null
++++ b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
+@@ -0,0 +1,98 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Freescale Management Complex (MC) bus driver MSI support
++ *
++ * Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
++ * Author: German Rivera <German.Rivera@freescale.com>
++ *
++ */
++
++#include <linux/of_device.h>
++#include <linux/of_address.h>
++#include <linux/irq.h>
++#include <linux/msi.h>
++#include <linux/of.h>
++#include <linux/of_irq.h>
++#include <linux/fsl/mc.h>
++
++static struct irq_chip its_msi_irq_chip = {
++ .name = "ITS-fMSI",
++ .irq_mask = irq_chip_mask_parent,
++ .irq_unmask = irq_chip_unmask_parent,
++ .irq_eoi = irq_chip_eoi_parent,
++ .irq_set_affinity = msi_domain_set_affinity
++};
++
++static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain,
++ struct device *dev,
++ int nvec, msi_alloc_info_t *info)
++{
++ struct fsl_mc_device *mc_bus_dev;
++ struct msi_domain_info *msi_info;
++
++ if (!dev_is_fsl_mc(dev))
++ return -EINVAL;
++
++ mc_bus_dev = to_fsl_mc_device(dev);
++ if (!(mc_bus_dev->flags & FSL_MC_IS_DPRC))
++ return -EINVAL;
++
++ /*
++ * Set the device Id to be passed to the GIC-ITS:
++ *
++ * NOTE: This device id corresponds to the IOMMU stream ID
++ * associated with the DPRC object (ICID).
++ */
++ info->scratchpad[0].ul = mc_bus_dev->icid;
++ msi_info = msi_get_domain_info(msi_domain->parent);
++ return msi_info->ops->msi_prepare(msi_domain->parent, dev, nvec, info);
++}
++
++static struct msi_domain_ops its_fsl_mc_msi_ops __ro_after_init = {
++ .msi_prepare = its_fsl_mc_msi_prepare,
++};
++
++static struct msi_domain_info its_fsl_mc_msi_domain_info = {
++ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
++ .ops = &its_fsl_mc_msi_ops,
++ .chip = &its_msi_irq_chip,
++};
++
++static const struct of_device_id its_device_id[] = {
++ { .compatible = "arm,gic-v3-its", },
++ {},
++};
++
++static int __init its_fsl_mc_msi_init(void)
++{
++ struct device_node *np;
++ struct irq_domain *parent;
++ struct irq_domain *mc_msi_domain;
++
++ for (np = of_find_matching_node(NULL, its_device_id); np;
++ np = of_find_matching_node(np, its_device_id)) {
++ if (!of_property_read_bool(np, "msi-controller"))
++ continue;
++
++ parent = irq_find_matching_host(np, DOMAIN_BUS_NEXUS);
++ if (!parent || !msi_get_domain_info(parent)) {
++ pr_err("%pOF: unable to locate ITS domain\n", np);
++ continue;
++ }
++
++ mc_msi_domain = fsl_mc_msi_create_irq_domain(
++ of_node_to_fwnode(np),
++ &its_fsl_mc_msi_domain_info,
++ parent);
++ if (!mc_msi_domain) {
++ pr_err("%pOF: unable to create fsl-mc domain\n", np);
++ continue;
++ }
++
++ pr_info("fsl-mc MSI: %pOF domain created\n", np);
++ }
++
++ return 0;
++}
++
++early_initcall(its_fsl_mc_msi_init);
+--- a/drivers/staging/fsl-mc/Kconfig
++++ b/drivers/staging/fsl-mc/Kconfig
+@@ -1 +1,2 @@
++# SPDX-License-Identifier: GPL-2.0
+ source "drivers/staging/fsl-mc/bus/Kconfig"
+--- a/drivers/staging/fsl-mc/Makefile
++++ b/drivers/staging/fsl-mc/Makefile
+@@ -1,2 +1,3 @@
++# SPDX-License-Identifier: GPL-2.0
+ # Freescale Management Complex (MC) bus drivers
+ obj-$(CONFIG_FSL_MC_BUS) += bus/
+--- a/drivers/staging/fsl-mc/TODO
++++ /dev/null
+@@ -1,18 +0,0 @@
+-* Add at least one device driver for a DPAA2 object (child device of the
+- fsl-mc bus). Most likely candidate for this is adding DPAA2 Ethernet
+- driver support, which depends on drivers for several objects: DPNI,
+- DPIO, DPMAC. Other pre-requisites include:
-
--/**
-- * dpmcp_get_irq_mask() - Get interrupt mask.
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPMCP object
-- * @irq_index: The interrupt index to configure
-- * @mask: Returned event mask to trigger interrupt
+- * MC firmware uprev. The MC firmware upon which the fsl-mc
+- bus driver and DPAA2 object drivers are based is continuing
+- to evolve, so minor updates are needed to keep in sync with binary
+- interface changes to the MC.
+-
+-* Cleanup
+-
+-Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
+-german.rivera@freescale.com, devel@driverdev.osuosl.org,
+-linux-kernel@vger.kernel.org
+-
+-[1] https://lkml.org/lkml/2015/7/9/93
+-[2] https://lkml.org/lkml/2015/7/7/712
+--- a/drivers/staging/fsl-mc/bus/Kconfig
++++ b/drivers/staging/fsl-mc/bus/Kconfig
+@@ -1,25 +1,22 @@
++# SPDX-License-Identifier: GPL-2.0
+ #
+-# Freescale Management Complex (MC) bus drivers
++# DPAA2 fsl-mc bus
+ #
+-# Copyright (C) 2014 Freescale Semiconductor, Inc.
++# Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ #
+-# This file is released under the GPLv2
+-#
+-
+-config FSL_MC_BUS
+- bool "Freescale Management Complex (MC) bus driver"
+- depends on OF && ARM64
+- select GENERIC_MSI_IRQ_DOMAIN
+- help
+- Driver to enable the bus infrastructure for the Freescale
+- QorIQ Management Complex (fsl-mc). The fsl-mc is a hardware
+- module of the QorIQ LS2 SoCs, that does resource management
+- for hardware building-blocks in the SoC that can be used
+- to dynamically create networking hardware objects such as
+- network interfaces (NICs), crypto accelerator instances,
+- or L2 switches.
+-
+- Only enable this option when building the kernel for
+- Freescale QorQIQ LS2xxxx SoCs.
+
++config FSL_MC_DPIO
++ tristate "QorIQ DPAA2 DPIO driver"
++ depends on FSL_MC_BUS
++ help
++ Driver for the DPAA2 DPIO object. A DPIO provides queue and
++ buffer management facilities for software to interact with
++ other DPAA2 objects. This driver does not expose the DPIO
++ objects individually, but groups them under a service layer
++ API.
+
++config FSL_QBMAN_DEBUG
++ tristate "Freescale QBMAN Debug APIs"
++ depends on FSL_MC_DPIO
++ help
++ QBMan debug assistant APIs.
+--- a/drivers/staging/fsl-mc/bus/Makefile
++++ b/drivers/staging/fsl-mc/bus/Makefile
+@@ -1,20 +1,9 @@
++# SPDX-License-Identifier: GPL-2.0
+ #
+ # Freescale Management Complex (MC) bus drivers
+ #
+ # Copyright (C) 2014 Freescale Semiconductor, Inc.
+ #
+-# This file is released under the GPLv2
+-#
+-obj-$(CONFIG_FSL_MC_BUS) += mc-bus-driver.o
+
+-mc-bus-driver-objs := fsl-mc-bus.o \
+- mc-sys.o \
+- mc-io.o \
+- dprc.o \
+- dpmng.o \
+- dprc-driver.o \
+- fsl-mc-allocator.o \
+- fsl-mc-msi.o \
+- irq-gic-v3-its-fsl-mc-msi.o \
+- dpmcp.o \
+- dpbp.o
++# MC DPIO driver
++obj-$(CONFIG_FSL_MC_DPIO) += dpio/
+--- a/drivers/staging/fsl-mc/bus/dpbp.c
++++ /dev/null
+@@ -1,691 +0,0 @@
+-/* Copyright 2013-2016 Freescale Semiconductor Inc.
- *
-- * Every interrupt can have up to 32 causes and the interrupt model supports
-- * masking/unmasking each cause independently
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions are met:
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of the above-listed copyright holders nor the
+- * names of any contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- *
+- * ALTERNATIVELY, this software may be distributed under the terms of the
+- * GNU General Public License ("GPL") as published by the Free Software
+- * Foundation, either version 2 of that License or (at your option) any
+- * later version.
- *
-- * Return: '0' on Success; Error code otherwise.
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+- * POSSIBILITY OF SUCH DAMAGE.
- */
--int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u32 *mask)
--{
-- struct mc_command cmd = { 0 };
-- struct dpmcp_cmd_get_irq_mask *cmd_params;
-- struct dpmcp_rsp_get_irq_mask *rsp_params;
--
-- int err;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_MASK,
-- cmd_flags, token);
-- cmd_params = (struct dpmcp_cmd_get_irq_mask *)cmd.params;
-- cmd_params->irq_index = irq_index;
--
-- /* send command to mc*/
-- err = mc_send_command(mc_io, &cmd);
-- if (err)
-- return err;
--
-- /* retrieve response parameters */
-- rsp_params = (struct dpmcp_rsp_get_irq_mask *)cmd.params;
-- *mask = le32_to_cpu(rsp_params->mask);
--
-- return 0;
--}
+-#include "../include/mc-sys.h"
+-#include "../include/mc-cmd.h"
+-#include "../include/dpbp.h"
+-#include "../include/dpbp-cmd.h"
-
-/**
-- * dpmcp_get_irq_status() - Get the current status of any pending interrupts.
-- *
+- * dpbp_open() - Open a control session for the specified object.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPMCP object
-- * @irq_index: The interrupt index to configure
-- * @status: Returned interrupts status - one bit per cause:
-- * 0 = no interrupt pending
-- * 1 = interrupt pending
+- * @dpbp_id: DPBP unique ID
+- * @token: Returned token; use in subsequent API calls
+- *
+- * This function can be used to open a control session for an
+- * already created object; an object may have been declared in
+- * the DPL or by calling the dpbp_create function.
+- * This function returns a unique authentication token,
+- * associated with the specific object ID and the specific MC
+- * portal; this token must be used in all subsequent commands for
+- * this specific object
- *
- * Return: '0' on Success; Error code otherwise.
- */
--int dpmcp_get_irq_status(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u32 *status)
+-int dpbp_open(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- int dpbp_id,
+- u16 *token)
-{
- struct mc_command cmd = { 0 };
-- struct dpmcp_cmd_get_irq_status *cmd_params;
-- struct dpmcp_rsp_get_irq_status *rsp_params;
+- struct dpbp_cmd_open *cmd_params;
- int err;
-
- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_STATUS,
-- cmd_flags, token);
-- cmd_params = (struct dpmcp_cmd_get_irq_status *)cmd.params;
-- cmd_params->status = cpu_to_le32(*status);
-- cmd_params->irq_index = irq_index;
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_OPEN,
+- cmd_flags, 0);
+- cmd_params = (struct dpbp_cmd_open *)cmd.params;
+- cmd_params->dpbp_id = cpu_to_le32(dpbp_id);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- return err;
-
- /* retrieve response parameters */
-- rsp_params = (struct dpmcp_rsp_get_irq_status *)cmd.params;
-- *status = le32_to_cpu(rsp_params->status);
+- *token = mc_cmd_hdr_read_token(&cmd);
-
-- return 0;
+- return err;
-}
+-EXPORT_SYMBOL(dpbp_open);
-
-/**
-- * dpmcp_get_attributes - Retrieve DPMCP attributes.
-- *
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPMCP object
-- * @attr: Returned object's attributes
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dpmcp_get_attributes(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- struct dpmcp_attr *attr)
--{
-- struct mc_command cmd = { 0 };
-- struct dpmcp_rsp_get_attributes *rsp_params;
-- int err;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_ATTR,
-- cmd_flags, token);
-+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_API_VERSION,
-+ cmd_flags, 0);
-
-- /* send command to mc*/
-+ /* send command to mc */
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
-- rsp_params = (struct dpmcp_rsp_get_attributes *)cmd.params;
-- attr->id = le32_to_cpu(rsp_params->id);
-- attr->version.major = le16_to_cpu(rsp_params->version_major);
-- attr->version.minor = le16_to_cpu(rsp_params->version_minor);
-+ mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
-
- return 0;
- }
---- a/drivers/staging/fsl-mc/bus/dpmcp.h
-+++ b/drivers/staging/fsl-mc/bus/dpmcp.h
-@@ -1,4 +1,5 @@
--/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+/*
-+ * Copyright 2013-2016 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
-@@ -11,7 +12,6 @@
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
-- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
-@@ -32,128 +32,29 @@
- #ifndef __FSL_DPMCP_H
- #define __FSL_DPMCP_H
-
--/* Data Path Management Command Portal API
-+/*
-+ * Data Path Management Command Portal API
- * Contains initialization APIs and runtime control APIs for DPMCP
- */
-
- struct fsl_mc_io;
-
- int dpmcp_open(struct fsl_mc_io *mc_io,
-- uint32_t cmd_flags,
-+ u32 cmd_flags,
- int dpmcp_id,
-- uint16_t *token);
--
--/* Get portal ID from pool */
--#define DPMCP_GET_PORTAL_ID_FROM_POOL (-1)
-+ u16 *token);
-
- int dpmcp_close(struct fsl_mc_io *mc_io,
-- uint32_t cmd_flags,
-- uint16_t token);
-+ u32 cmd_flags,
-+ u16 token);
-
--/**
-- * struct dpmcp_cfg - Structure representing DPMCP configuration
-- * @portal_id: Portal ID; 'DPMCP_GET_PORTAL_ID_FROM_POOL' to get the portal ID
-- * from pool
-- */
--struct dpmcp_cfg {
-- int portal_id;
--};
--
--int dpmcp_create(struct fsl_mc_io *mc_io,
-- uint32_t cmd_flags,
-- const struct dpmcp_cfg *cfg,
-- uint16_t *token);
--
--int dpmcp_destroy(struct fsl_mc_io *mc_io,
-- uint32_t cmd_flags,
-- uint16_t token);
-+int dpmcp_get_api_version(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 *major_ver,
-+ u16 *minor_ver);
-
- int dpmcp_reset(struct fsl_mc_io *mc_io,
-- uint32_t cmd_flags,
-- uint16_t token);
--
--/* IRQ */
--/* IRQ Index */
--#define DPMCP_IRQ_INDEX 0
--/* irq event - Indicates that the link state changed */
--#define DPMCP_IRQ_EVENT_CMD_DONE 0x00000001
--
--/**
-- * struct dpmcp_irq_cfg - IRQ configuration
-- * @paddr: Address that must be written to signal a message-based interrupt
-- * @val: Value to write into irq_addr address
-- * @irq_num: A user defined number associated with this IRQ
-- */
--struct dpmcp_irq_cfg {
-- uint64_t paddr;
-- uint32_t val;
-- int irq_num;
--};
--
--int dpmcp_set_irq(struct fsl_mc_io *mc_io,
-- uint32_t cmd_flags,
-- uint16_t token,
-- uint8_t irq_index,
-- struct dpmcp_irq_cfg *irq_cfg);
--
--int dpmcp_get_irq(struct fsl_mc_io *mc_io,
-- uint32_t cmd_flags,
-- uint16_t token,
-- uint8_t irq_index,
-- int *type,
-- struct dpmcp_irq_cfg *irq_cfg);
--
--int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io,
-- uint32_t cmd_flags,
-- uint16_t token,
-- uint8_t irq_index,
-- uint8_t en);
--
--int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io,
-- uint32_t cmd_flags,
-- uint16_t token,
-- uint8_t irq_index,
-- uint8_t *en);
--
--int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io,
-- uint32_t cmd_flags,
-- uint16_t token,
-- uint8_t irq_index,
-- uint32_t mask);
--
--int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io,
-- uint32_t cmd_flags,
-- uint16_t token,
-- uint8_t irq_index,
-- uint32_t *mask);
--
--int dpmcp_get_irq_status(struct fsl_mc_io *mc_io,
-- uint32_t cmd_flags,
-- uint16_t token,
-- uint8_t irq_index,
-- uint32_t *status);
--
--/**
-- * struct dpmcp_attr - Structure representing DPMCP attributes
-- * @id: DPMCP object ID
-- * @version: DPMCP version
-- */
--struct dpmcp_attr {
-- int id;
-- /**
-- * struct version - Structure representing DPMCP version
-- * @major: DPMCP major version
-- * @minor: DPMCP minor version
-- */
-- struct {
-- uint16_t major;
-- uint16_t minor;
-- } version;
--};
--
--int dpmcp_get_attributes(struct fsl_mc_io *mc_io,
-- uint32_t cmd_flags,
-- uint16_t token,
-- struct dpmcp_attr *attr);
-+ u32 cmd_flags,
-+ u16 token);
-
- #endif /* __FSL_DPMCP_H */
---- a/drivers/staging/fsl-mc/bus/dpmng-cmd.h
-+++ b/drivers/staging/fsl-mc/bus/dpmng-cmd.h
-@@ -12,7 +12,6 @@
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
-- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
-@@ -41,13 +40,14 @@
- #ifndef __FSL_DPMNG_CMD_H
- #define __FSL_DPMNG_CMD_H
-
--/* Command IDs */
--#define DPMNG_CMDID_GET_CONT_ID 0x830
--#define DPMNG_CMDID_GET_VERSION 0x831
-+/* Command versioning */
-+#define DPMNG_CMD_BASE_VERSION 1
-+#define DPMNG_CMD_ID_OFFSET 4
-
--struct dpmng_rsp_get_container_id {
-- __le32 container_id;
--};
-+#define DPMNG_CMD(id) ((id << DPMNG_CMD_ID_OFFSET) | DPMNG_CMD_BASE_VERSION)
-+
-+/* Command IDs */
-+#define DPMNG_CMDID_GET_VERSION DPMNG_CMD(0x831)
-
- struct dpmng_rsp_get_version {
- __le32 revision;
---- a/drivers/staging/fsl-mc/bus/dpmng.c
-+++ b/drivers/staging/fsl-mc/bus/dpmng.c
-@@ -1,4 +1,5 @@
--/* Copyright 2013-2016 Freescale Semiconductor Inc.
-+/*
-+ * Copyright 2013-2016 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
-@@ -11,7 +12,6 @@
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
-- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
-@@ -72,36 +72,3 @@ int mc_get_version(struct fsl_mc_io *mc_
- }
- EXPORT_SYMBOL(mc_get_version);
-
--/**
-- * dpmng_get_container_id() - Get container ID associated with a given portal.
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @container_id: Requested container ID
+- * dpbp_close() - Close the control session of the object
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPBP object
+- *
+- * After this function is called, no further operations are
+- * allowed on the object without opening a new control session.
- *
- * Return: '0' on Success; Error code otherwise.
- */
--int dpmng_get_container_id(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- int *container_id)
+-int dpbp_close(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token)
+-{
+- struct mc_command cmd = { 0 };
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLOSE, cmd_flags,
+- token);
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-EXPORT_SYMBOL(dpbp_close);
+-
+-/**
+- * dpbp_create() - Create the DPBP object.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @cfg: Configuration structure
+- * @token: Returned token; use in subsequent API calls
+- *
+- * Create the DPBP object, allocate required resources and
+- * perform required initialization.
+- *
+- * The object can be created either by declaring it in the
+- * DPL file, or by calling this function.
+- * This function returns a unique authentication token,
+- * associated with the specific object ID and the specific MC
+- * portal; this token must be used in all subsequent calls to
+- * this specific object. For objects that are created using the
+- * DPL file, call dpbp_open function to get an authentication
+- * token first.
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpbp_create(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- const struct dpbp_cfg *cfg,
+- u16 *token)
-{
- struct mc_command cmd = { 0 };
-- struct dpmng_rsp_get_container_id *rsp_params;
- int err;
-
+- (void)(cfg); /* unused */
+-
- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_CONT_ID,
-- cmd_flags,
-- 0);
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_CREATE,
+- cmd_flags, 0);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- return err;
-
- /* retrieve response parameters */
-- rsp_params = (struct dpmng_rsp_get_container_id *)cmd.params;
-- *container_id = le32_to_cpu(rsp_params->container_id);
+- *token = mc_cmd_hdr_read_token(&cmd);
-
- return 0;
-}
-
---- a/drivers/staging/fsl-mc/bus/dprc-cmd.h
-+++ b/drivers/staging/fsl-mc/bus/dprc-cmd.h
-@@ -12,7 +12,6 @@
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
-- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
-@@ -42,48 +41,39 @@
- #define _FSL_DPRC_CMD_H
-
- /* Minimal supported DPRC Version */
--#define DPRC_MIN_VER_MAJOR 5
-+#define DPRC_MIN_VER_MAJOR 6
- #define DPRC_MIN_VER_MINOR 0
-
--/* Command IDs */
--#define DPRC_CMDID_CLOSE 0x800
--#define DPRC_CMDID_OPEN 0x805
--#define DPRC_CMDID_CREATE 0x905
+-/**
+- * dpbp_destroy() - Destroy the DPBP object and release all its resources.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPBP object
+- *
+- * Return: '0' on Success; error code otherwise.
+- */
+-int dpbp_destroy(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token)
+-{
+- struct mc_command cmd = { 0 };
-
--#define DPRC_CMDID_GET_ATTR 0x004
--#define DPRC_CMDID_RESET_CONT 0x005
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_DESTROY,
+- cmd_flags, token);
-
--#define DPRC_CMDID_SET_IRQ 0x010
--#define DPRC_CMDID_GET_IRQ 0x011
--#define DPRC_CMDID_SET_IRQ_ENABLE 0x012
--#define DPRC_CMDID_GET_IRQ_ENABLE 0x013
--#define DPRC_CMDID_SET_IRQ_MASK 0x014
--#define DPRC_CMDID_GET_IRQ_MASK 0x015
--#define DPRC_CMDID_GET_IRQ_STATUS 0x016
--#define DPRC_CMDID_CLEAR_IRQ_STATUS 0x017
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
-
--#define DPRC_CMDID_CREATE_CONT 0x151
--#define DPRC_CMDID_DESTROY_CONT 0x152
--#define DPRC_CMDID_SET_RES_QUOTA 0x155
--#define DPRC_CMDID_GET_RES_QUOTA 0x156
--#define DPRC_CMDID_ASSIGN 0x157
--#define DPRC_CMDID_UNASSIGN 0x158
--#define DPRC_CMDID_GET_OBJ_COUNT 0x159
--#define DPRC_CMDID_GET_OBJ 0x15A
--#define DPRC_CMDID_GET_RES_COUNT 0x15B
--#define DPRC_CMDID_GET_RES_IDS 0x15C
--#define DPRC_CMDID_GET_OBJ_REG 0x15E
--#define DPRC_CMDID_SET_OBJ_IRQ 0x15F
--#define DPRC_CMDID_GET_OBJ_IRQ 0x160
--#define DPRC_CMDID_SET_OBJ_LABEL 0x161
--#define DPRC_CMDID_GET_OBJ_DESC 0x162
+-/**
+- * dpbp_enable() - Enable the DPBP.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPBP object
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpbp_enable(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token)
+-{
+- struct mc_command cmd = { 0 };
-
--#define DPRC_CMDID_CONNECT 0x167
--#define DPRC_CMDID_DISCONNECT 0x168
--#define DPRC_CMDID_GET_POOL 0x169
--#define DPRC_CMDID_GET_POOL_COUNT 0x16A
-+/* Command versioning */
-+#define DPRC_CMD_BASE_VERSION 1
-+#define DPRC_CMD_ID_OFFSET 4
-
--#define DPRC_CMDID_GET_CONNECTION 0x16C
-+#define DPRC_CMD(id) ((id << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION)
-+
-+/* Command IDs */
-+#define DPRC_CMDID_CLOSE DPRC_CMD(0x800)
-+#define DPRC_CMDID_OPEN DPRC_CMD(0x805)
-+#define DPRC_CMDID_GET_API_VERSION DPRC_CMD(0xa05)
-+
-+#define DPRC_CMDID_GET_ATTR DPRC_CMD(0x004)
-+#define DPRC_CMDID_RESET_CONT DPRC_CMD(0x005)
-+
-+#define DPRC_CMDID_SET_IRQ DPRC_CMD(0x010)
-+#define DPRC_CMDID_GET_IRQ DPRC_CMD(0x011)
-+#define DPRC_CMDID_SET_IRQ_ENABLE DPRC_CMD(0x012)
-+#define DPRC_CMDID_GET_IRQ_ENABLE DPRC_CMD(0x013)
-+#define DPRC_CMDID_SET_IRQ_MASK DPRC_CMD(0x014)
-+#define DPRC_CMDID_GET_IRQ_MASK DPRC_CMD(0x015)
-+#define DPRC_CMDID_GET_IRQ_STATUS DPRC_CMD(0x016)
-+#define DPRC_CMDID_CLEAR_IRQ_STATUS DPRC_CMD(0x017)
-+
-+#define DPRC_CMDID_GET_CONT_ID DPRC_CMD(0x830)
-+#define DPRC_CMDID_GET_OBJ_COUNT DPRC_CMD(0x159)
-+#define DPRC_CMDID_GET_OBJ DPRC_CMD(0x15A)
-+#define DPRC_CMDID_GET_RES_COUNT DPRC_CMD(0x15B)
-+#define DPRC_CMDID_GET_OBJ_REG DPRC_CMD(0x15E)
-+#define DPRC_CMDID_SET_OBJ_IRQ DPRC_CMD(0x15F)
-+#define DPRC_CMDID_GET_OBJ_IRQ DPRC_CMD(0x160)
-
- struct dprc_cmd_open {
- __le32 container_id;
-@@ -199,9 +189,6 @@ struct dprc_rsp_get_attributes {
- /* response word 1 */
- __le32 options;
- __le32 portal_id;
-- /* response word 2 */
-- __le16 version_major;
-- __le16 version_minor;
- };
-
- struct dprc_cmd_set_res_quota {
-@@ -367,11 +354,16 @@ struct dprc_cmd_get_obj_region {
-
- struct dprc_rsp_get_obj_region {
- /* response word 0 */
-- __le64 pad;
-+ __le64 pad0;
- /* response word 1 */
-- __le64 base_addr;
-+ __le32 base_addr;
-+ __le32 pad1;
- /* response word 2 */
- __le32 size;
-+ u8 type;
-+ u8 pad2[3];
-+ /* response word 3 */
-+ __le32 flags;
- };
-
- struct dprc_cmd_set_obj_label {
---- a/drivers/staging/fsl-mc/bus/dprc-driver.c
-+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
-@@ -1,7 +1,7 @@
- /*
- * Freescale data path resource container (DPRC) driver
- *
-- * Copyright (C) 2014 Freescale Semiconductor, Inc.
-+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
- * Author: German Rivera <German.Rivera@freescale.com>
- *
- * This file is licensed under the terms of the GNU General Public
-@@ -160,6 +160,8 @@ static void check_plugged_state_change(s
- * dprc_add_new_devices - Adds devices to the logical bus for a DPRC
- *
- * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
-+ * @driver_override: driver override to apply to new objects found in the
-+ * DPRC, or NULL, if none.
- * @obj_desc_array: array of device descriptors for child devices currently
- * present in the physical DPRC.
- * @num_child_objects_in_mc: number of entries in obj_desc_array
-@@ -169,6 +171,7 @@ static void check_plugged_state_change(s
- * in the physical DPRC.
- */
- static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
-+ const char *driver_override,
- struct dprc_obj_desc *obj_desc_array,
- int num_child_objects_in_mc)
- {
-@@ -188,11 +191,12 @@ static void dprc_add_new_devices(struct
- child_dev = fsl_mc_device_lookup(obj_desc, mc_bus_dev);
- if (child_dev) {
- check_plugged_state_change(child_dev, obj_desc);
-+ put_device(&child_dev->dev);
- continue;
- }
-
- error = fsl_mc_device_add(obj_desc, NULL, &mc_bus_dev->dev,
-- &child_dev);
-+ driver_override, &child_dev);
- if (error < 0)
- continue;
- }
-@@ -202,6 +206,8 @@ static void dprc_add_new_devices(struct
- * dprc_scan_objects - Discover objects in a DPRC
- *
- * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
-+ * @driver_override: driver override to apply to new objects found in the
-+ * DPRC, or NULL, if none.
- * @total_irq_count: total number of IRQs needed by objects in the DPRC.
- *
- * Detects objects added and removed from a DPRC and synchronizes the
-@@ -217,6 +223,7 @@ static void dprc_add_new_devices(struct
- * of the device drivers for the non-allocatable devices.
- */
- int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
-+ const char *driver_override,
- unsigned int *total_irq_count)
- {
- int num_child_objects;
-@@ -297,7 +304,7 @@ int dprc_scan_objects(struct fsl_mc_devi
- dprc_remove_devices(mc_bus_dev, child_obj_desc_array,
- num_child_objects);
-
-- dprc_add_new_devices(mc_bus_dev, child_obj_desc_array,
-+ dprc_add_new_devices(mc_bus_dev, driver_override, child_obj_desc_array,
- num_child_objects);
-
- if (child_obj_desc_array)
-@@ -328,7 +335,7 @@ int dprc_scan_container(struct fsl_mc_de
- * Discover objects in the DPRC:
- */
- mutex_lock(&mc_bus->scan_mutex);
-- error = dprc_scan_objects(mc_bus_dev, &irq_count);
-+ error = dprc_scan_objects(mc_bus_dev, NULL, &irq_count);
- mutex_unlock(&mc_bus->scan_mutex);
- if (error < 0)
- goto error;
-@@ -415,7 +422,7 @@ static irqreturn_t dprc_irq0_handler_thr
- DPRC_IRQ_EVENT_OBJ_CREATED)) {
- unsigned int irq_count;
-
-- error = dprc_scan_objects(mc_dev, &irq_count);
-+ error = dprc_scan_objects(mc_dev, NULL, &irq_count);
- if (error < 0) {
- /*
- * If the error is -ENXIO, we ignore it, as it indicates
-@@ -505,7 +512,7 @@ static int register_dprc_irq_handler(str
- dprc_irq0_handler,
- dprc_irq0_handler_thread,
- IRQF_NO_SUSPEND | IRQF_ONESHOT,
-- "FSL MC DPRC irq0",
-+ dev_name(&mc_dev->dev),
- &mc_dev->dev);
- if (error < 0) {
- dev_err(&mc_dev->dev,
-@@ -597,6 +604,7 @@ static int dprc_probe(struct fsl_mc_devi
- struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
- bool mc_io_created = false;
- bool msi_domain_set = false;
-+ u16 major_ver, minor_ver;
-
- if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0))
- return -EINVAL;
-@@ -669,13 +677,21 @@ static int dprc_probe(struct fsl_mc_devi
- goto error_cleanup_open;
- }
-
-- if (mc_bus->dprc_attr.version.major < DPRC_MIN_VER_MAJOR ||
-- (mc_bus->dprc_attr.version.major == DPRC_MIN_VER_MAJOR &&
-- mc_bus->dprc_attr.version.minor < DPRC_MIN_VER_MINOR)) {
-+ error = dprc_get_api_version(mc_dev->mc_io, 0,
-+ &major_ver,
-+ &minor_ver);
-+ if (error < 0) {
-+ dev_err(&mc_dev->dev, "dprc_get_api_version() failed: %d\n",
-+ error);
-+ goto error_cleanup_open;
-+ }
-+
-+ if (major_ver < DPRC_MIN_VER_MAJOR ||
-+ (major_ver == DPRC_MIN_VER_MAJOR &&
-+ minor_ver < DPRC_MIN_VER_MINOR)) {
- dev_err(&mc_dev->dev,
- "ERROR: DPRC version %d.%d not supported\n",
-- mc_bus->dprc_attr.version.major,
-- mc_bus->dprc_attr.version.minor);
-+ major_ver, minor_ver);
- error = -ENOTSUPP;
- goto error_cleanup_open;
- }
---- a/drivers/staging/fsl-mc/bus/dprc.c
-+++ b/drivers/staging/fsl-mc/bus/dprc.c
-@@ -1,4 +1,5 @@
--/* Copyright 2013-2016 Freescale Semiconductor Inc.
-+/*
-+ * Copyright 2013-2016 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
-@@ -11,7 +12,6 @@
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
-- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
-@@ -100,93 +100,6 @@ int dprc_close(struct fsl_mc_io *mc_io,
- EXPORT_SYMBOL(dprc_close);
-
- /**
-- * dprc_create_container() - Create child container
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_ENABLE, cmd_flags,
+- token);
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-EXPORT_SYMBOL(dpbp_enable);
+-
+-/**
+- * dpbp_disable() - Disable the DPBP.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPRC object
-- * @cfg: Child container configuration
-- * @child_container_id: Returned child container ID
-- * @child_portal_offset: Returned child portal offset from MC portal base
+- * @token: Token of DPBP object
- *
- * Return: '0' on Success; Error code otherwise.
- */
--int dprc_create_container(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- struct dprc_cfg *cfg,
-- int *child_container_id,
-- u64 *child_portal_offset)
+-int dpbp_disable(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token)
-{
- struct mc_command cmd = { 0 };
-- struct dprc_cmd_create_container *cmd_params;
-- struct dprc_rsp_create_container *rsp_params;
-- int err;
-
- /* prepare command */
-- cmd_params = (struct dprc_cmd_create_container *)cmd.params;
-- cmd_params->options = cpu_to_le32(cfg->options);
-- cmd_params->icid = cpu_to_le16(cfg->icid);
-- cmd_params->portal_id = cpu_to_le32(cfg->portal_id);
-- strncpy(cmd_params->label, cfg->label, 16);
-- cmd_params->label[15] = '\0';
--
-- cmd.header = mc_encode_cmd_header(DPRC_CMDID_CREATE_CONT,
-- cmd_flags, token);
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_DISABLE,
+- cmd_flags, token);
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-EXPORT_SYMBOL(dpbp_disable);
+-
+-/**
+- * dpbp_is_enabled() - Check if the DPBP is enabled.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPBP object
+- * @en: Returns '1' if object is enabled; '0' otherwise
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpbp_is_enabled(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int *en)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpbp_rsp_is_enabled *rsp_params;
+- int err;
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_IS_ENABLED, cmd_flags,
+- token);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- return err;
-
- /* retrieve response parameters */
-- rsp_params = (struct dprc_rsp_create_container *)cmd.params;
-- *child_container_id = le32_to_cpu(rsp_params->child_container_id);
-- *child_portal_offset = le64_to_cpu(rsp_params->child_portal_addr);
+- rsp_params = (struct dpbp_rsp_is_enabled *)cmd.params;
+- *en = rsp_params->enabled & DPBP_ENABLE;
-
- return 0;
-}
-
-/**
-- * dprc_destroy_container() - Destroy child container.
+- * dpbp_reset() - Reset the DPBP, returns the object to initial state.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPRC object
-- * @child_container_id: ID of the container to destroy
-- *
-- * This function terminates the child container, so following this call the
-- * child container ID becomes invalid.
-- *
-- * Notes:
-- * - All resources and objects of the destroyed container are returned to the
-- * parent container or destroyed if were created be the destroyed container.
-- * - This function destroy all the child containers of the specified
-- * container prior to destroying the container itself.
-- *
-- * warning: Only the parent container is allowed to destroy a child policy
-- * Container 0 can't be destroyed
+- * @token: Token of DPBP object
- *
- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpbp_reset(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token)
+-{
+- struct mc_command cmd = { 0 };
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_RESET,
+- cmd_flags, token);
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dpbp_set_irq() - Set IRQ information for the DPBP to trigger an interrupt.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPBP object
+- * @irq_index: Identifies the interrupt index to configure
+- * @irq_cfg: IRQ configuration
- *
+- * Return: '0' on Success; Error code otherwise.
- */
--int dprc_destroy_container(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- int child_container_id)
+-int dpbp_set_irq(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- struct dpbp_irq_cfg *irq_cfg)
-{
- struct mc_command cmd = { 0 };
-- struct dprc_cmd_destroy_container *cmd_params;
+- struct dpbp_cmd_set_irq *cmd_params;
-
- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPRC_CMDID_DESTROY_CONT,
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ,
- cmd_flags, token);
-- cmd_params = (struct dprc_cmd_destroy_container *)cmd.params;
-- cmd_params->child_container_id = cpu_to_le32(child_container_id);
+- cmd_params = (struct dpbp_cmd_set_irq *)cmd.params;
+- cmd_params->irq_index = irq_index;
+- cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
+- cmd_params->irq_addr = cpu_to_le64(irq_cfg->addr);
+- cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprc_reset_container - Reset child container.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-@@ -565,279 +478,6 @@ int dprc_get_attributes(struct fsl_mc_io
- attr->icid = le16_to_cpu(rsp_params->icid);
- attr->options = le32_to_cpu(rsp_params->options);
- attr->portal_id = le32_to_cpu(rsp_params->portal_id);
-- attr->version.major = le16_to_cpu(rsp_params->version_major);
-- attr->version.minor = le16_to_cpu(rsp_params->version_minor);
+- * dpbp_get_irq() - Get IRQ information from the DPBP.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPBP object
+- * @irq_index: The interrupt index to configure
+- * @type: Interrupt type: 0 represents message interrupt
+- * type (both irq_addr and irq_val are valid)
+- * @irq_cfg: IRQ attributes
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpbp_get_irq(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- int *type,
+- struct dpbp_irq_cfg *irq_cfg)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpbp_cmd_get_irq *cmd_params;
+- struct dpbp_rsp_get_irq *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ,
+- cmd_flags, token);
+- cmd_params = (struct dpbp_cmd_get_irq *)cmd.params;
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dpbp_rsp_get_irq *)cmd.params;
+- irq_cfg->val = le32_to_cpu(rsp_params->irq_val);
+- irq_cfg->addr = le64_to_cpu(rsp_params->irq_addr);
+- irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num);
+- *type = le32_to_cpu(rsp_params->type);
-
- return 0;
-}
-
-/**
-- * dprc_set_res_quota() - Set allocation policy for a specific resource/object
-- * type in a child container
+- * dpbp_set_irq_enable() - Set overall interrupt state.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPRC object
-- * @child_container_id: ID of the child container
-- * @type: Resource/object type
-- * @quota: Sets the maximum number of resources of the selected type
-- * that the child container is allowed to allocate from its parent;
-- * when quota is set to -1, the policy is the same as container's
-- * general policy.
-- *
-- * Allocation policy determines whether or not a container may allocate
-- * resources from its parent. Each container has a 'global' allocation policy
-- * that is set when the container is created.
+- * @token: Token of DPBP object
+- * @irq_index: The interrupt index to configure
+- * @en: Interrupt state - enable = 1, disable = 0
- *
-- * This function sets allocation policy for a specific resource type.
-- * The default policy for all resource types matches the container's 'global'
-- * allocation policy.
+- * Allows GPP software to control when interrupts are generated.
+- * Each interrupt can have up to 32 causes. The enable/disable control's the
+- * overall interrupt state. if the interrupt is disabled no causes will cause
+- * an interrupt.
- *
- * Return: '0' on Success; Error code otherwise.
-- *
-- * @warning Only the parent container is allowed to change a child policy.
- */
--int dprc_set_res_quota(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- int child_container_id,
-- char *type,
-- u16 quota)
+-int dpbp_set_irq_enable(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u8 en)
-{
- struct mc_command cmd = { 0 };
-- struct dprc_cmd_set_res_quota *cmd_params;
+- struct dpbp_cmd_set_irq_enable *cmd_params;
-
- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_RES_QUOTA,
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ_ENABLE,
- cmd_flags, token);
-- cmd_params = (struct dprc_cmd_set_res_quota *)cmd.params;
-- cmd_params->child_container_id = cpu_to_le32(child_container_id);
-- cmd_params->quota = cpu_to_le16(quota);
-- strncpy(cmd_params->type, type, 16);
-- cmd_params->type[15] = '\0';
+- cmd_params = (struct dpbp_cmd_set_irq_enable *)cmd.params;
+- cmd_params->enable = en & DPBP_ENABLE;
+- cmd_params->irq_index = irq_index;
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
-- * dprc_get_res_quota() - Gets the allocation policy of a specific
-- * resource/object type in a child container
+- * dpbp_get_irq_enable() - Get overall interrupt state
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPRC object
-- * @child_container_id; ID of the child container
-- * @type: resource/object type
-- * @quota: Returnes the maximum number of resources of the selected type
-- * that the child container is allowed to allocate from the parent;
-- * when quota is set to -1, the policy is the same as container's
-- * general policy.
+- * @token: Token of DPBP object
+- * @irq_index: The interrupt index to configure
+- * @en: Returned interrupt state - enable = 1, disable = 0
- *
- * Return: '0' on Success; Error code otherwise.
- */
--int dprc_get_res_quota(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- int child_container_id,
-- char *type,
-- u16 *quota)
+-int dpbp_get_irq_enable(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u8 *en)
-{
- struct mc_command cmd = { 0 };
-- struct dprc_cmd_get_res_quota *cmd_params;
-- struct dprc_rsp_get_res_quota *rsp_params;
+- struct dpbp_cmd_get_irq_enable *cmd_params;
+- struct dpbp_rsp_get_irq_enable *rsp_params;
- int err;
-
- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_QUOTA,
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_ENABLE,
- cmd_flags, token);
-- cmd_params = (struct dprc_cmd_get_res_quota *)cmd.params;
-- cmd_params->child_container_id = cpu_to_le32(child_container_id);
-- strncpy(cmd_params->type, type, 16);
-- cmd_params->type[15] = '\0';
+- cmd_params = (struct dpbp_cmd_get_irq_enable *)cmd.params;
+- cmd_params->irq_index = irq_index;
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- return err;
-
- /* retrieve response parameters */
-- rsp_params = (struct dprc_rsp_get_res_quota *)cmd.params;
-- *quota = le16_to_cpu(rsp_params->quota);
--
+- rsp_params = (struct dpbp_rsp_get_irq_enable *)cmd.params;
+- *en = rsp_params->enabled & DPBP_ENABLE;
- return 0;
-}
-
-/**
-- * dprc_assign() - Assigns objects or resource to a child container.
+- * dpbp_set_irq_mask() - Set interrupt mask.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPRC object
-- * @container_id: ID of the child container
-- * @res_req: Describes the type and amount of resources to
-- * assign to the given container
-- *
-- * Assignment is usually done by a parent (this DPRC) to one of its child
-- * containers.
-- *
-- * According to the DPRC allocation policy, the assigned resources may be taken
-- * (allocated) from the container's ancestors, if not enough resources are
-- * available in the container itself.
-- *
-- * The type of assignment depends on the dprc_res_req options, as follows:
-- * - DPRC_RES_REQ_OPT_EXPLICIT: indicates that assigned resources should have
-- * the explicit base ID specified at the id_base_align field of res_req.
-- * - DPRC_RES_REQ_OPT_ALIGNED: indicates that the assigned resources should be
-- * aligned to the value given at id_base_align field of res_req.
-- * - DPRC_RES_REQ_OPT_PLUGGED: Relevant only for object assignment,
-- * and indicates that the object must be set to the plugged state.
-- *
-- * A container may use this function with its own ID in order to change a
-- * object state to plugged or unplugged.
+- * @token: Token of DPBP object
+- * @irq_index: The interrupt index to configure
+- * @mask: Event mask to trigger interrupt;
+- * each bit:
+- * 0 = ignore event
+- * 1 = consider event for asserting IRQ
- *
-- * If IRQ information has been set in the child DPRC, it will signal an
-- * interrupt following every change in its object assignment.
+- * Every interrupt can have up to 32 causes and the interrupt model supports
+- * masking/unmasking each cause independently
- *
- * Return: '0' on Success; Error code otherwise.
- */
--int dprc_assign(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- int container_id,
-- struct dprc_res_req *res_req)
+-int dpbp_set_irq_mask(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 mask)
-{
- struct mc_command cmd = { 0 };
-- struct dprc_cmd_assign *cmd_params;
+- struct dpbp_cmd_set_irq_mask *cmd_params;
-
- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPRC_CMDID_ASSIGN,
-- cmd_flags, token);
-- cmd_params = (struct dprc_cmd_assign *)cmd.params;
-- cmd_params->container_id = cpu_to_le32(container_id);
-- cmd_params->options = cpu_to_le32(res_req->options);
-- cmd_params->num = cpu_to_le32(res_req->num);
-- cmd_params->id_base_align = cpu_to_le32(res_req->id_base_align);
-- strncpy(cmd_params->type, res_req->type, 16);
-- cmd_params->type[15] = '\0';
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ_MASK,
+- cmd_flags, token);
+- cmd_params = (struct dpbp_cmd_set_irq_mask *)cmd.params;
+- cmd_params->mask = cpu_to_le32(mask);
+- cmd_params->irq_index = irq_index;
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
-- * dprc_unassign() - Un-assigns objects or resources from a child container
-- * and moves them into this (parent) DPRC.
+- * dpbp_get_irq_mask() - Get interrupt mask.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPRC object
-- * @child_container_id: ID of the child container
-- * @res_req: Describes the type and amount of resources to un-assign from
-- * the child container
+- * @token: Token of DPBP object
+- * @irq_index: The interrupt index to configure
+- * @mask: Returned event mask to trigger interrupt
- *
-- * Un-assignment of objects can succeed only if the object is not in the
-- * plugged or opened state.
+- * Every interrupt can have up to 32 causes and the interrupt model supports
+- * masking/unmasking each cause independently
- *
- * Return: '0' on Success; Error code otherwise.
- */
--int dprc_unassign(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- int child_container_id,
-- struct dprc_res_req *res_req)
+-int dpbp_get_irq_mask(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 *mask)
-{
- struct mc_command cmd = { 0 };
-- struct dprc_cmd_unassign *cmd_params;
+- struct dpbp_cmd_get_irq_mask *cmd_params;
+- struct dpbp_rsp_get_irq_mask *rsp_params;
+- int err;
-
- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPRC_CMDID_UNASSIGN,
-- cmd_flags,
-- token);
-- cmd_params = (struct dprc_cmd_unassign *)cmd.params;
-- cmd_params->child_container_id = cpu_to_le32(child_container_id);
-- cmd_params->options = cpu_to_le32(res_req->options);
-- cmd_params->num = cpu_to_le32(res_req->num);
-- cmd_params->id_base_align = cpu_to_le32(res_req->id_base_align);
-- strncpy(cmd_params->type, res_req->type, 16);
-- cmd_params->type[15] = '\0';
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_MASK,
+- cmd_flags, token);
+- cmd_params = (struct dpbp_cmd_get_irq_mask *)cmd.params;
+- cmd_params->irq_index = irq_index;
-
- /* send command to mc*/
-- return mc_send_command(mc_io, &cmd);
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dpbp_rsp_get_irq_mask *)cmd.params;
+- *mask = le32_to_cpu(rsp_params->mask);
+-
+- return 0;
-}
-
-/**
-- * dprc_get_pool_count() - Get the number of dprc's pools
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * dpbp_get_irq_status() - Get the current status of any pending interrupts.
+- *
- * @mc_io: Pointer to MC portal's I/O object
-- * @token: Token of DPRC object
-- * @pool_count: Returned number of resource pools in the dprc
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPBP object
+- * @irq_index: The interrupt index to configure
+- * @status: Returned interrupts status - one bit per cause:
+- * 0 = no interrupt pending
+- * 1 = interrupt pending
- *
- * Return: '0' on Success; Error code otherwise.
- */
--int dprc_get_pool_count(struct fsl_mc_io *mc_io,
+-int dpbp_get_irq_status(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
-- int *pool_count)
+- u8 irq_index,
+- u32 *status)
-{
- struct mc_command cmd = { 0 };
-- struct dprc_rsp_get_pool_count *rsp_params;
+- struct dpbp_cmd_get_irq_status *cmd_params;
+- struct dpbp_rsp_get_irq_status *rsp_params;
- int err;
-
- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_POOL_COUNT,
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_STATUS,
- cmd_flags, token);
+- cmd_params = (struct dpbp_cmd_get_irq_status *)cmd.params;
+- cmd_params->status = cpu_to_le32(*status);
+- cmd_params->irq_index = irq_index;
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- return err;
-
- /* retrieve response parameters */
-- rsp_params = (struct dprc_rsp_get_pool_count *)cmd.params;
-- *pool_count = le32_to_cpu(rsp_params->pool_count);
+- rsp_params = (struct dpbp_rsp_get_irq_status *)cmd.params;
+- *status = le32_to_cpu(rsp_params->status);
-
- return 0;
-}
-
-/**
-- * dprc_get_pool() - Get the type (string) of a certain dprc's pool
+- * dpbp_clear_irq_status() - Clear a pending interrupt's status
+- *
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPRC object
-- * @pool_index; Index of the pool to be queried (< pool_count)
-- * @type: The type of the pool
-- *
-- * The pool types retrieved one by one by incrementing
-- * pool_index up to (not including) the value of pool_count returned
-- * from dprc_get_pool_count(). dprc_get_pool_count() must
-- * be called prior to dprc_get_pool().
+- * @token: Token of DPBP object
+- * @irq_index: The interrupt index to configure
+- * @status: Bits to clear (W1C) - one bit per cause:
+- * 0 = don't change
+- * 1 = clear status bit
- *
- * Return: '0' on Success; Error code otherwise.
- */
--int dprc_get_pool(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- int pool_index,
-- char *type)
+-int dpbp_clear_irq_status(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 status)
-{
- struct mc_command cmd = { 0 };
-- struct dprc_cmd_get_pool *cmd_params;
-- struct dprc_rsp_get_pool *rsp_params;
-- int err;
+- struct dpbp_cmd_clear_irq_status *cmd_params;
-
- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_POOL,
-- cmd_flags,
-- token);
-- cmd_params = (struct dprc_cmd_get_pool *)cmd.params;
-- cmd_params->pool_index = cpu_to_le32(pool_index);
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLEAR_IRQ_STATUS,
+- cmd_flags, token);
+- cmd_params = (struct dpbp_cmd_clear_irq_status *)cmd.params;
+- cmd_params->status = cpu_to_le32(status);
+- cmd_params->irq_index = irq_index;
-
- /* send command to mc*/
-- err = mc_send_command(mc_io, &cmd);
-- if (err)
-- return err;
+- return mc_send_command(mc_io, &cmd);
+-}
-
-- /* retrieve response parameters */
-- rsp_params = (struct dprc_rsp_get_pool *)cmd.params;
-- strncpy(type, rsp_params->type, 16);
-- type[15] = '\0';
-
- return 0;
- }
-@@ -934,64 +574,6 @@ int dprc_get_obj(struct fsl_mc_io *mc_io
- EXPORT_SYMBOL(dprc_get_obj);
-
- /**
-- * dprc_get_obj_desc() - Get object descriptor.
+-/**
+- * dpbp_get_attributes - Retrieve DPBP attributes.
- *
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPRC object
-- * @obj_type: The type of the object to get its descriptor.
-- * @obj_id: The id of the object to get its descriptor
-- * @obj_desc: The returned descriptor to fill and return to the user
+- * @token: Token of DPBP object
+- * @attr: Returned object's attributes
- *
- * Return: '0' on Success; Error code otherwise.
-- *
- */
--int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- char *obj_type,
-- int obj_id,
-- struct dprc_obj_desc *obj_desc)
+-int dpbp_get_attributes(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- struct dpbp_attr *attr)
-{
- struct mc_command cmd = { 0 };
-- struct dprc_cmd_get_obj_desc *cmd_params;
-- struct dprc_rsp_get_obj_desc *rsp_params;
+- struct dpbp_rsp_get_attributes *rsp_params;
- int err;
-
- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_DESC,
-- cmd_flags,
-- token);
-- cmd_params = (struct dprc_cmd_get_obj_desc *)cmd.params;
-- cmd_params->obj_id = cpu_to_le32(obj_id);
-- strncpy(cmd_params->type, obj_type, 16);
-- cmd_params->type[15] = '\0';
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_ATTR,
+- cmd_flags, token);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- return err;
-
- /* retrieve response parameters */
-- rsp_params = (struct dprc_rsp_get_obj_desc *)cmd.params;
-- obj_desc->id = le32_to_cpu(rsp_params->id);
-- obj_desc->vendor = le16_to_cpu(rsp_params->vendor);
-- obj_desc->irq_count = rsp_params->irq_count;
-- obj_desc->region_count = rsp_params->region_count;
-- obj_desc->state = le32_to_cpu(rsp_params->state);
-- obj_desc->ver_major = le16_to_cpu(rsp_params->version_major);
-- obj_desc->ver_minor = le16_to_cpu(rsp_params->version_minor);
-- obj_desc->flags = le16_to_cpu(rsp_params->flags);
-- strncpy(obj_desc->type, rsp_params->type, 16);
-- obj_desc->type[15] = '\0';
-- strncpy(obj_desc->label, rsp_params->label, 16);
-- obj_desc->label[15] = '\0';
+- rsp_params = (struct dpbp_rsp_get_attributes *)cmd.params;
+- attr->bpid = le16_to_cpu(rsp_params->bpid);
+- attr->id = le32_to_cpu(rsp_params->id);
+- attr->version.major = le16_to_cpu(rsp_params->version_major);
+- attr->version.minor = le16_to_cpu(rsp_params->version_minor);
-
- return 0;
-}
--EXPORT_SYMBOL(dprc_get_obj_desc);
+-EXPORT_SYMBOL(dpbp_get_attributes);
-
-/**
- * dprc_set_obj_irq() - Set IRQ information for object to trigger an interrupt.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-@@ -1130,52 +712,6 @@ int dprc_get_res_count(struct fsl_mc_io
- EXPORT_SYMBOL(dprc_get_res_count);
-
- /**
-- * dprc_get_res_ids() - Obtains IDs of free resources in the container
+- * dpbp_set_notifications() - Set notifications towards software
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPRC object
-- * @type: pool type
-- * @range_desc: range descriptor
+- * @token: Token of DPBP object
+- * @cfg: notifications configuration
- *
- * Return: '0' on Success; Error code otherwise.
- */
--int dprc_get_res_ids(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- char *type,
-- struct dprc_res_ids_range_desc *range_desc)
+-int dpbp_set_notifications(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- struct dpbp_notification_cfg *cfg)
-{
- struct mc_command cmd = { 0 };
-- struct dprc_cmd_get_res_ids *cmd_params;
-- struct dprc_rsp_get_res_ids *rsp_params;
-- int err;
+- struct dpbp_cmd_set_notifications *cmd_params;
-
- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_IDS,
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_NOTIFICATIONS,
- cmd_flags, token);
-- cmd_params = (struct dprc_cmd_get_res_ids *)cmd.params;
-- cmd_params->iter_status = range_desc->iter_status;
-- cmd_params->base_id = cpu_to_le32(range_desc->base_id);
-- cmd_params->last_id = cpu_to_le32(range_desc->last_id);
-- strncpy(cmd_params->type, type, 16);
-- cmd_params->type[15] = '\0';
+- cmd_params = (struct dpbp_cmd_set_notifications *)cmd.params;
+- cmd_params->depletion_entry = cpu_to_le32(cfg->depletion_entry);
+- cmd_params->depletion_exit = cpu_to_le32(cfg->depletion_exit);
+- cmd_params->surplus_entry = cpu_to_le32(cfg->surplus_entry);
+- cmd_params->surplus_exit = cpu_to_le32(cfg->surplus_exit);
+- cmd_params->options = cpu_to_le16(cfg->options);
+- cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
+- cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dpbp_get_notifications() - Get the notifications configuration
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPBP object
+- * @cfg: notifications configuration
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpbp_get_notifications(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- struct dpbp_notification_cfg *cfg)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpbp_rsp_get_notifications *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_NOTIFICATIONS,
+- cmd_flags,
+- token);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
- return err;
-
- /* retrieve response parameters */
-- rsp_params = (struct dprc_rsp_get_res_ids *)cmd.params;
-- range_desc->iter_status = rsp_params->iter_status;
-- range_desc->base_id = le32_to_cpu(rsp_params->base_id);
-- range_desc->last_id = le32_to_cpu(rsp_params->last_id);
+- rsp_params = (struct dpbp_rsp_get_notifications *)cmd.params;
+- cfg->depletion_entry = le32_to_cpu(rsp_params->depletion_entry);
+- cfg->depletion_exit = le32_to_cpu(rsp_params->depletion_exit);
+- cfg->surplus_entry = le32_to_cpu(rsp_params->surplus_entry);
+- cfg->surplus_exit = le32_to_cpu(rsp_params->surplus_exit);
+- cfg->options = le16_to_cpu(rsp_params->options);
+- cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
+- cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
-
- return 0;
-}
--EXPORT_SYMBOL(dprc_get_res_ids);
--
--/**
- * dprc_get_obj_region() - Get region information for a specified object.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-@@ -1216,160 +752,66 @@ int dprc_get_obj_region(struct fsl_mc_io
-
- /* retrieve response parameters */
- rsp_params = (struct dprc_rsp_get_obj_region *)cmd.params;
-- region_desc->base_offset = le64_to_cpu(rsp_params->base_addr);
-+ region_desc->base_offset = le32_to_cpu(rsp_params->base_addr);
- region_desc->size = le32_to_cpu(rsp_params->size);
-+ region_desc->type = rsp_params->type;
-+ region_desc->flags = le32_to_cpu(rsp_params->flags);
-
- return 0;
- }
- EXPORT_SYMBOL(dprc_get_obj_region);
-
- /**
-- * dprc_set_obj_label() - Set object label.
-- * @mc_io: Pointer to MC portal's I/O object
-+ * dprc_get_api_version - Get Data Path Resource Container API version
-+ * @mc_io: Pointer to Mc portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPRC object
-- * @obj_type: Object's type
-- * @obj_id: Object's ID
-- * @label: The required label. The maximum length is 16 chars.
-+ * @major_ver: Major version of Data Path Resource Container API
-+ * @minor_ver: Minor version of Data Path Resource Container API
- *
- * Return: '0' on Success; Error code otherwise.
- */
--int dprc_set_obj_label(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- char *obj_type,
-- int obj_id,
-- char *label)
-+int dprc_get_api_version(struct fsl_mc_io *mc_io,
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/Makefile
+@@ -0,0 +1,8 @@
++# SPDX-License-Identifier: GPL-2.0
++#
++# QorIQ DPAA2 DPIO driver
++#
++
++obj-$(CONFIG_FSL_MC_DPIO) += fsl-mc-dpio.o
++
++fsl-mc-dpio-objs := dpio.o qbman-portal.o dpio-service.o dpio-driver.o
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/dpio-cmd.h
+@@ -0,0 +1,50 @@
++/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
++ * Copyright 2016 NXP
++ *
++ */
++#ifndef _FSL_DPIO_CMD_H
++#define _FSL_DPIO_CMD_H
++
++/* DPIO Version */
++#define DPIO_VER_MAJOR 4
++#define DPIO_VER_MINOR 2
++
++/* Command Versioning */
++
++#define DPIO_CMD_ID_OFFSET 4
++#define DPIO_CMD_BASE_VERSION 1
++
++#define DPIO_CMD(id) (((id) << DPIO_CMD_ID_OFFSET) | DPIO_CMD_BASE_VERSION)
++
++/* Command IDs */
++#define DPIO_CMDID_CLOSE DPIO_CMD(0x800)
++#define DPIO_CMDID_OPEN DPIO_CMD(0x803)
++#define DPIO_CMDID_GET_API_VERSION DPIO_CMD(0xa03)
++#define DPIO_CMDID_ENABLE DPIO_CMD(0x002)
++#define DPIO_CMDID_DISABLE DPIO_CMD(0x003)
++#define DPIO_CMDID_GET_ATTR DPIO_CMD(0x004)
++#define DPIO_CMDID_RESET DPIO_CMD(0x005)
++
++struct dpio_cmd_open {
++ __le32 dpio_id;
++};
++
++#define DPIO_CHANNEL_MODE_MASK 0x3
++
++struct dpio_rsp_get_attr {
++ /* cmd word 0 */
++ __le32 id;
++ __le16 qbman_portal_id;
++ u8 num_priorities;
++ u8 channel_mode;
++ /* cmd word 1 */
++ __le64 qbman_portal_ce_addr;
++ /* cmd word 2 */
++ __le64 qbman_portal_ci_addr;
++ /* cmd word 3 */
++ __le32 qbman_version;
++};
++
++#endif /* _FSL_DPIO_CMD_H */
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/dpio-driver.c
+@@ -0,0 +1,278 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++/*
++ * Copyright 2014-2016 Freescale Semiconductor Inc.
++ * Copyright 2016 NXP
++ *
++ */
++
++#include <linux/types.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/interrupt.h>
++#include <linux/msi.h>
++#include <linux/dma-mapping.h>
++#include <linux/delay.h>
++
++#include <linux/fsl/mc.h>
++#include "../../include/dpaa2-io.h"
++
++#include "qbman-portal.h"
++#include "dpio.h"
++#include "dpio-cmd.h"
++
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_AUTHOR("Freescale Semiconductor, Inc");
++MODULE_DESCRIPTION("DPIO Driver");
++
++struct dpio_priv {
++ struct dpaa2_io *io;
++};
++
++static irqreturn_t dpio_irq_handler(int irq_num, void *arg)
++{
++ struct device *dev = (struct device *)arg;
++ struct dpio_priv *priv = dev_get_drvdata(dev);
++
++ return dpaa2_io_irq(priv->io);
++}
++
++static void unregister_dpio_irq_handlers(struct fsl_mc_device *dpio_dev)
++{
++ struct fsl_mc_device_irq *irq;
++
++ irq = dpio_dev->irqs[0];
++
++ /* clear the affinity hint */
++ irq_set_affinity_hint(irq->msi_desc->irq, NULL);
++}
++
++static int register_dpio_irq_handlers(struct fsl_mc_device *dpio_dev, int cpu)
++{
++ struct dpio_priv *priv;
++ int error;
++ struct fsl_mc_device_irq *irq;
++ cpumask_t mask;
++
++ priv = dev_get_drvdata(&dpio_dev->dev);
++
++ irq = dpio_dev->irqs[0];
++ error = devm_request_irq(&dpio_dev->dev,
++ irq->msi_desc->irq,
++ dpio_irq_handler,
++ 0,
++ dev_name(&dpio_dev->dev),
++ &dpio_dev->dev);
++ if (error < 0) {
++ dev_err(&dpio_dev->dev,
++ "devm_request_irq() failed: %d\n",
++ error);
++ return error;
++ }
++
++ /* set the affinity hint */
++ cpumask_clear(&mask);
++ cpumask_set_cpu(cpu, &mask);
++ if (irq_set_affinity_hint(irq->msi_desc->irq, &mask))
++ dev_err(&dpio_dev->dev,
++ "irq_set_affinity failed irq %d cpu %d\n",
++ irq->msi_desc->irq, cpu);
++
++ return 0;
++}
++
++static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
++{
++ struct dpio_attr dpio_attrs;
++ struct dpaa2_io_desc desc;
++ struct dpio_priv *priv;
++ int err = -ENOMEM;
++ struct device *dev = &dpio_dev->dev;
++ static int next_cpu = -1;
++
++ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
++ if (!priv)
++ goto err_priv_alloc;
++
++ dev_set_drvdata(dev, priv);
++
++ err = fsl_mc_portal_allocate(dpio_dev, 0, &dpio_dev->mc_io);
++ if (err) {
++ dev_dbg(dev, "MC portal allocation failed\n");
++ err = -EPROBE_DEFER;
++ goto err_mcportal;
++ }
++
++ err = dpio_open(dpio_dev->mc_io, 0, dpio_dev->obj_desc.id,
++ &dpio_dev->mc_handle);
++ if (err) {
++ dev_err(dev, "dpio_open() failed\n");
++ goto err_open;
++ }
++
++ err = dpio_reset(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
++ if (err) {
++ dev_err(dev, "dpio_reset() failed\n");
++ goto err_reset;
++ }
++
++ err = dpio_get_attributes(dpio_dev->mc_io, 0, dpio_dev->mc_handle,
++ &dpio_attrs);
++ if (err) {
++ dev_err(dev, "dpio_get_attributes() failed %d\n", err);
++ goto err_get_attr;
++ }
++ desc.qman_version = dpio_attrs.qbman_version;
++
++ err = dpio_enable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
++ if (err) {
++ dev_err(dev, "dpio_enable() failed %d\n", err);
++ goto err_get_attr;
++ }
++
++ /* initialize DPIO descriptor */
++ desc.receives_notifications = dpio_attrs.num_priorities ? 1 : 0;
++ desc.has_8prio = dpio_attrs.num_priorities == 8 ? 1 : 0;
++ desc.dpio_id = dpio_dev->obj_desc.id;
++
++ /* get the cpu to use for the affinity hint */
++ if (next_cpu == -1)
++ next_cpu = cpumask_first(cpu_online_mask);
++ else
++ next_cpu = cpumask_next(next_cpu, cpu_online_mask);
++
++ if (!cpu_possible(next_cpu)) {
++ dev_err(dev, "probe failed. Number of DPIOs exceeds NR_CPUS.\n");
++ err = -ERANGE;
++ goto err_allocate_irqs;
++ }
++ desc.cpu = next_cpu;
++
++ /*
++ * Set the CENA regs to be the cache enabled area of the portal to
++ * achieve the best performance.
++ */
++ desc.regs_cena = ioremap_cache_ns(dpio_dev->regions[0].start,
++ resource_size(&dpio_dev->regions[0]));
++ desc.regs_cinh = ioremap(dpio_dev->regions[1].start,
++ resource_size(&dpio_dev->regions[1]));
++
++ err = fsl_mc_allocate_irqs(dpio_dev);
++ if (err) {
++ dev_err(dev, "fsl_mc_allocate_irqs failed. err=%d\n", err);
++ goto err_allocate_irqs;
++ }
++
++ err = register_dpio_irq_handlers(dpio_dev, desc.cpu);
++ if (err)
++ goto err_register_dpio_irq;
++
++ priv->io = dpaa2_io_create(&desc);
++ if (!priv->io) {
++ dev_err(dev, "dpaa2_io_create failed\n");
++ goto err_dpaa2_io_create;
++ }
++
++ dev_info(dev, "probed\n");
++ dev_dbg(dev, " receives_notifications = %d\n",
++ desc.receives_notifications);
++ dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
++ fsl_mc_portal_free(dpio_dev->mc_io);
++
++ return 0;
++
++err_dpaa2_io_create:
++ unregister_dpio_irq_handlers(dpio_dev);
++err_register_dpio_irq:
++ fsl_mc_free_irqs(dpio_dev);
++err_allocate_irqs:
++ dpio_disable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
++err_get_attr:
++err_reset:
++ dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
++err_open:
++ fsl_mc_portal_free(dpio_dev->mc_io);
++err_mcportal:
++ dev_set_drvdata(dev, NULL);
++err_priv_alloc:
++ return err;
++}
++
++/* Tear down interrupts for a given DPIO object */
++static void dpio_teardown_irqs(struct fsl_mc_device *dpio_dev)
++{
++ unregister_dpio_irq_handlers(dpio_dev);
++ fsl_mc_free_irqs(dpio_dev);
++}
++
++static int dpaa2_dpio_remove(struct fsl_mc_device *dpio_dev)
++{
++ struct device *dev;
++ struct dpio_priv *priv;
++ int err;
++
++ dev = &dpio_dev->dev;
++ priv = dev_get_drvdata(dev);
++
++ dpaa2_io_down(priv->io);
++
++ dpio_teardown_irqs(dpio_dev);
++
++ err = fsl_mc_portal_allocate(dpio_dev, 0, &dpio_dev->mc_io);
++ if (err) {
++ dev_err(dev, "MC portal allocation failed\n");
++ goto err_mcportal;
++ }
++
++ err = dpio_open(dpio_dev->mc_io, 0, dpio_dev->obj_desc.id,
++ &dpio_dev->mc_handle);
++ if (err) {
++ dev_err(dev, "dpio_open() failed\n");
++ goto err_open;
++ }
++
++ dpio_disable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
++
++ dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
++
++ fsl_mc_portal_free(dpio_dev->mc_io);
++
++ dev_set_drvdata(dev, NULL);
++
++ return 0;
++
++err_open:
++ fsl_mc_portal_free(dpio_dev->mc_io);
++err_mcportal:
++ return err;
++}
++
++static const struct fsl_mc_device_id dpaa2_dpio_match_id_table[] = {
++ {
++ .vendor = FSL_MC_VENDOR_FREESCALE,
++ .obj_type = "dpio",
++ },
++ { .vendor = 0x0 }
++};
++
++static struct fsl_mc_driver dpaa2_dpio_driver = {
++ .driver = {
++ .name = KBUILD_MODNAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = dpaa2_dpio_probe,
++ .remove = dpaa2_dpio_remove,
++ .match_id_table = dpaa2_dpio_match_id_table
++};
++
++static int dpio_driver_init(void)
++{
++ return fsl_mc_driver_register(&dpaa2_dpio_driver);
++}
++
++static void dpio_driver_exit(void)
++{
++ fsl_mc_driver_unregister(&dpaa2_dpio_driver);
++}
++module_init(dpio_driver_init);
++module_exit(dpio_driver_exit);
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/dpio-service.c
+@@ -0,0 +1,780 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++/*
++ * Copyright 2014-2016 Freescale Semiconductor Inc.
++ * Copyright 2016 NXP
++ *
++ */
++#include <linux/types.h>
++#include <linux/fsl/mc.h>
++#include "../../include/dpaa2-io.h"
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/interrupt.h>
++#include <linux/dma-mapping.h>
++#include <linux/slab.h>
++
++#include "dpio.h"
++#include "qbman-portal.h"
++
++struct dpaa2_io {
++ struct dpaa2_io_desc dpio_desc;
++ struct qbman_swp_desc swp_desc;
++ struct qbman_swp *swp;
++ struct list_head node;
++ /* protect against multiple management commands */
++ spinlock_t lock_mgmt_cmd;
++ /* protect notifications list */
++ spinlock_t lock_notifications;
++ struct list_head notifications;
++};
++
++struct dpaa2_io_store {
++ unsigned int max;
++ dma_addr_t paddr;
++ struct dpaa2_dq *vaddr;
++ void *alloced_addr; /* unaligned value from kmalloc() */
++ unsigned int idx; /* position of the next-to-be-returned entry */
++ struct qbman_swp *swp; /* portal used to issue VDQCR */
++ struct device *dev; /* device used for DMA mapping */
++};
++
++/* keep a per cpu array of DPIOs for fast access */
++static struct dpaa2_io *dpio_by_cpu[NR_CPUS];
++static struct list_head dpio_list = LIST_HEAD_INIT(dpio_list);
++static DEFINE_SPINLOCK(dpio_list_lock);
++
++static inline struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d,
++ int cpu)
++{
++ if (d)
++ return d;
++
++ if (cpu != DPAA2_IO_ANY_CPU && cpu >= num_possible_cpus())
++ return NULL;
++
++ /*
++ * If cpu == -1, choose the current cpu, with no guarantees about
++ * potentially being migrated away.
++ */
++ if (cpu < 0)
++ cpu = smp_processor_id();
++
++ /* If a specific cpu was requested, pick it up immediately */
++ return dpio_by_cpu[cpu];
++}
++
++static inline struct dpaa2_io *service_select(struct dpaa2_io *d)
++{
++ if (d)
++ return d;
++
++ d = service_select_by_cpu(d, -1);
++ if (d)
++ return d;
++
++ spin_lock(&dpio_list_lock);
++ d = list_entry(dpio_list.next, struct dpaa2_io, node);
++ list_del(&d->node);
++ list_add_tail(&d->node, &dpio_list);
++ spin_unlock(&dpio_list_lock);
++
++ return d;
++}
++
++/**
++ * dpaa2_io_service_select() - return a dpaa2_io service affined to this cpu
++ * @cpu: the cpu id
++ *
++ * Return the affine dpaa2_io service, or NULL if there is no service affined
++ * to the specified cpu. If DPAA2_IO_ANY_CPU is used, return the next available
++ * service.
++ */
++struct dpaa2_io *dpaa2_io_service_select(int cpu)
++{
++ if (cpu == DPAA2_IO_ANY_CPU)
++ return service_select(NULL);
++
++ return service_select_by_cpu(NULL, cpu);
++}
++EXPORT_SYMBOL_GPL(dpaa2_io_service_select);
++
++/**
++ * dpaa2_io_create() - create a dpaa2_io object.
++ * @desc: the dpaa2_io descriptor
++ *
++ * Activates a "struct dpaa2_io" corresponding to the given config of an actual
++ * DPIO object.
++ *
++ * Return a valid dpaa2_io object for success, or NULL for failure.
++ */
++struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc)
++{
++ struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL);
++
++ if (!obj)
++ return NULL;
++
++ /* check if CPU is out of range (-1 means any cpu) */
++ if (desc->cpu != DPAA2_IO_ANY_CPU && desc->cpu >= num_possible_cpus()) {
++ kfree(obj);
++ return NULL;
++ }
++
++ obj->dpio_desc = *desc;
++ obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena;
++ obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh;
++ obj->swp_desc.qman_version = obj->dpio_desc.qman_version;
++ obj->swp = qbman_swp_init(&obj->swp_desc);
++
++ if (!obj->swp) {
++ kfree(obj);
++ return NULL;
++ }
++
++ INIT_LIST_HEAD(&obj->node);
++ spin_lock_init(&obj->lock_mgmt_cmd);
++ spin_lock_init(&obj->lock_notifications);
++ INIT_LIST_HEAD(&obj->notifications);
++
++ /* For now only enable DQRR interrupts */
++ qbman_swp_interrupt_set_trigger(obj->swp,
++ QBMAN_SWP_INTERRUPT_DQRI);
++ qbman_swp_interrupt_clear_status(obj->swp, 0xffffffff);
++ if (obj->dpio_desc.receives_notifications)
++ qbman_swp_push_set(obj->swp, 0, 1);
++
++ spin_lock(&dpio_list_lock);
++ list_add_tail(&obj->node, &dpio_list);
++ if (desc->cpu >= 0 && !dpio_by_cpu[desc->cpu])
++ dpio_by_cpu[desc->cpu] = obj;
++ spin_unlock(&dpio_list_lock);
++
++ return obj;
++}
++
++/**
++ * dpaa2_io_down() - release the dpaa2_io object.
++ * @d: the dpaa2_io object to be released.
++ *
++ * The "struct dpaa2_io" type can represent an individual DPIO object (as
++ * described by "struct dpaa2_io_desc") or an instance of a "DPIO service",
++ * which can be used to group/encapsulate multiple DPIO objects. In all cases,
++ * each handle obtained should be released using this function.
++ */
++void dpaa2_io_down(struct dpaa2_io *d)
++{
++ kfree(d);
++}
++
++#define DPAA_POLL_MAX 32
++
++/**
++ * dpaa2_io_irq() - ISR for DPIO interrupts
++ *
++ * @obj: the given DPIO object.
++ *
++ * Return IRQ_HANDLED for success or IRQ_NONE if there
++ * were no pending interrupts.
++ */
++irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj)
++{
++ const struct dpaa2_dq *dq;
++ int max = 0;
++ struct qbman_swp *swp;
++ u32 status;
++
++ swp = obj->swp;
++ status = qbman_swp_interrupt_read_status(swp);
++ if (!status)
++ return IRQ_NONE;
++
++ dq = qbman_swp_dqrr_next(swp);
++ while (dq) {
++ if (qbman_result_is_SCN(dq)) {
++ struct dpaa2_io_notification_ctx *ctx;
++ u64 q64;
++
++ q64 = qbman_result_SCN_ctx(dq);
++ ctx = (void *)(uintptr_t)q64;
++ ctx->cb(ctx);
++ } else {
++ pr_crit("fsl-mc-dpio: Unrecognised/ignored DQRR entry\n");
++ }
++ qbman_swp_dqrr_consume(swp, dq);
++ ++max;
++ if (max > DPAA_POLL_MAX)
++ goto done;
++ dq = qbman_swp_dqrr_next(swp);
++ }
++done:
++ qbman_swp_interrupt_clear_status(swp, status);
++ qbman_swp_interrupt_set_inhibit(swp, 0);
++ return IRQ_HANDLED;
++}
++
++/**
++ * dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN
++ * notifications on the given DPIO service.
++ * @d: the given DPIO service.
++ * @ctx: the notification context.
++ *
++ * The caller should make the MC command to attach a DPAA2 object to
++ * a DPIO after this function completes successfully. In that way:
++ * (a) The DPIO service is "ready" to handle a notification arrival
++ * (which might happen before the "attach" command to MC has
++ * returned control of execution back to the caller)
++ * (b) The DPIO service can provide back to the caller the 'dpio_id' and
++ * 'qman64' parameters that it should pass along in the MC command
++ * in order for the object to be configured to produce the right
++ * notification fields to the DPIO service.
++ *
++ * Return 0 for success, or -ENODEV for failure.
++ */
++int dpaa2_io_service_register(struct dpaa2_io *d,
++ struct dpaa2_io_notification_ctx *ctx)
++{
++ unsigned long irqflags;
++
++ d = service_select_by_cpu(d, ctx->desired_cpu);
++ if (!d)
++ return -ENODEV;
++
++ ctx->dpio_id = d->dpio_desc.dpio_id;
++ ctx->qman64 = (u64)(uintptr_t)ctx;
++ ctx->dpio_private = d;
++ spin_lock_irqsave(&d->lock_notifications, irqflags);
++ list_add(&ctx->node, &d->notifications);
++ spin_unlock_irqrestore(&d->lock_notifications, irqflags);
++
++ /* Enable the generation of CDAN notifications */
++ if (ctx->is_cdan)
++ return qbman_swp_CDAN_set_context_enable(d->swp,
++ (u16)ctx->id,
++ ctx->qman64);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(dpaa2_io_service_register);
++
++/**
++ * dpaa2_io_service_deregister - The opposite of 'register'.
++ * @service: the given DPIO service.
++ * @ctx: the notification context.
++ *
++ * This function should be called only after sending the MC command to
++ * to detach the notification-producing device from the DPIO.
++ */
++void dpaa2_io_service_deregister(struct dpaa2_io *service,
++ struct dpaa2_io_notification_ctx *ctx)
++{
++ struct dpaa2_io *d = ctx->dpio_private;
++ unsigned long irqflags;
++
++ if (ctx->is_cdan)
++ qbman_swp_CDAN_disable(d->swp, (u16)ctx->id);
++
++ spin_lock_irqsave(&d->lock_notifications, irqflags);
++ list_del(&ctx->node);
++ spin_unlock_irqrestore(&d->lock_notifications, irqflags);
++}
++EXPORT_SYMBOL_GPL(dpaa2_io_service_deregister);
++
++/**
++ * dpaa2_io_service_rearm() - Rearm the notification for the given DPIO service.
++ * @d: the given DPIO service.
++ * @ctx: the notification context.
++ *
++ * Once a FQDAN/CDAN has been produced, the corresponding FQ/channel is
++ * considered "disarmed". Ie. the user can issue pull dequeue operations on that
++ * traffic source for as long as it likes. Eventually it may wish to "rearm"
++ * that source to allow it to produce another FQDAN/CDAN, that's what this
++ * function achieves.
++ *
++ * Return 0 for success.
++ */
++int dpaa2_io_service_rearm(struct dpaa2_io *d,
++ struct dpaa2_io_notification_ctx *ctx)
++{
++ unsigned long irqflags;
++ int err;
++
++ d = service_select_by_cpu(d, ctx->desired_cpu);
++ if (!unlikely(d))
++ return -ENODEV;
++
++ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
++ if (ctx->is_cdan)
++ err = qbman_swp_CDAN_enable(d->swp, (u16)ctx->id);
++ else
++ err = qbman_swp_fq_schedule(d->swp, ctx->id);
++ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
++
++ return err;
++}
++EXPORT_SYMBOL_GPL(dpaa2_io_service_rearm);
++
++/**
++ * dpaa2_io_service_pull_fq() - pull dequeue functions from a fq.
++ * @d: the given DPIO service.
++ * @fqid: the given frame queue id.
++ * @s: the dpaa2_io_store object for the result.
++ *
++ * Return 0 for success, or error code for failure.
++ */
++int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid,
++ struct dpaa2_io_store *s)
++{
++ struct qbman_pull_desc pd;
++ int err;
++
++ qbman_pull_desc_clear(&pd);
++ qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
++ qbman_pull_desc_set_numframes(&pd, (u8)s->max);
++ qbman_pull_desc_set_fq(&pd, fqid);
++
++ d = service_select(d);
++ if (!d)
++ return -ENODEV;
++ s->swp = d->swp;
++ err = qbman_swp_pull(d->swp, &pd);
++ if (err)
++ s->swp = NULL;
++
++ return err;
++}
++EXPORT_SYMBOL(dpaa2_io_service_pull_fq);
++
++/**
++ * dpaa2_io_service_pull_channel() - pull dequeue functions from a channel.
++ * @d: the given DPIO service.
++ * @channelid: the given channel id.
++ * @s: the dpaa2_io_store object for the result.
++ *
++ * Return 0 for success, or error code for failure.
++ */
++int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
++ struct dpaa2_io_store *s)
++{
++ struct qbman_pull_desc pd;
++ int err;
++
++ qbman_pull_desc_clear(&pd);
++ qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
++ qbman_pull_desc_set_numframes(&pd, (u8)s->max);
++ qbman_pull_desc_set_channel(&pd, channelid, qbman_pull_type_prio);
++
++ d = service_select(d);
++ if (!d)
++ return -ENODEV;
++
++ s->swp = d->swp;
++ err = qbman_swp_pull(d->swp, &pd);
++ if (err)
++ s->swp = NULL;
++
++ return err;
++}
++EXPORT_SYMBOL_GPL(dpaa2_io_service_pull_channel);
++
++/**
++ * dpaa2_io_service_enqueue_fq() - Enqueue a frame to a frame queue.
++ * @d: the given DPIO service.
++ * @fqid: the given frame queue id.
++ * @fd: the frame descriptor which is enqueued.
++ *
++ * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
++ * or -ENODEV if there is no dpio service.
++ */
++int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d,
++ u32 fqid,
++ const struct dpaa2_fd *fd)
++{
++ struct qbman_eq_desc ed;
++
++ d = service_select(d);
++ if (!d)
++ return -ENODEV;
++
++ qbman_eq_desc_clear(&ed);
++ qbman_eq_desc_set_no_orp(&ed, 0);
++ qbman_eq_desc_set_fq(&ed, fqid);
++
++ return qbman_swp_enqueue(d->swp, &ed, fd);
++}
++EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq);
++
++/**
++ * dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD.
++ * @d: the given DPIO service.
++ * @qdid: the given queuing destination id.
++ * @prio: the given queuing priority.
++ * @qdbin: the given queuing destination bin.
++ * @fd: the frame descriptor which is enqueued.
++ *
++ * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready,
++ * or -ENODEV if there is no dpio service.
++ */
++int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d,
++ u32 qdid, u8 prio, u16 qdbin,
++ const struct dpaa2_fd *fd)
++{
++ struct qbman_eq_desc ed;
++
++ d = service_select(d);
++ if (!d)
++ return -ENODEV;
++
++ qbman_eq_desc_clear(&ed);
++ qbman_eq_desc_set_no_orp(&ed, 0);
++ qbman_eq_desc_set_qd(&ed, qdid, qdbin, prio);
++
++ return qbman_swp_enqueue(d->swp, &ed, fd);
++}
++EXPORT_SYMBOL_GPL(dpaa2_io_service_enqueue_qd);
++
++/**
++ * dpaa2_io_service_release() - Release buffers to a buffer pool.
++ * @d: the given DPIO object.
++ * @bpid: the buffer pool id.
++ * @buffers: the buffers to be released.
++ * @num_buffers: the number of the buffers to be released.
++ *
++ * Return 0 for success, and negative error code for failure.
++ */
++int dpaa2_io_service_release(struct dpaa2_io *d,
++ u32 bpid,
++ const u64 *buffers,
++ unsigned int num_buffers)
++{
++ struct qbman_release_desc rd;
++
++ d = service_select(d);
++ if (!d)
++ return -ENODEV;
++
++ qbman_release_desc_clear(&rd);
++ qbman_release_desc_set_bpid(&rd, bpid);
++
++ return qbman_swp_release(d->swp, &rd, buffers, num_buffers);
++}
++EXPORT_SYMBOL_GPL(dpaa2_io_service_release);
++
++/**
++ * dpaa2_io_service_acquire() - Acquire buffers from a buffer pool.
++ * @d: the given DPIO object.
++ * @bpid: the buffer pool id.
++ * @buffers: the buffer addresses for acquired buffers.
++ * @num_buffers: the expected number of the buffers to acquire.
++ *
++ * Return a negative error code if the command failed, otherwise it returns
++ * the number of buffers acquired, which may be less than the number requested.
++ * Eg. if the buffer pool is empty, this will return zero.
++ */
++int dpaa2_io_service_acquire(struct dpaa2_io *d,
++ u32 bpid,
++ u64 *buffers,
++ unsigned int num_buffers)
++{
++ unsigned long irqflags;
++ int err;
++
++ d = service_select(d);
++ if (!d)
++ return -ENODEV;
++
++ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
++ err = qbman_swp_acquire(d->swp, bpid, buffers, num_buffers);
++ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
++
++ return err;
++}
++EXPORT_SYMBOL_GPL(dpaa2_io_service_acquire);
++
++/*
++ * 'Stores' are reusable memory blocks for holding dequeue results, and to
++ * assist with parsing those results.
++ */
++
++/**
++ * dpaa2_io_store_create() - Create the dma memory storage for dequeue result.
++ * @max_frames: the maximum number of dequeued result for frames, must be <= 16.
++ * @dev: the device to allow mapping/unmapping the DMAable region.
++ *
++ * The size of the storage is "max_frames*sizeof(struct dpaa2_dq)".
++ * The 'dpaa2_io_store' returned is a DPIO service managed object.
++ *
++ * Return pointer to dpaa2_io_store struct for successfuly created storage
++ * memory, or NULL on error.
++ */
++struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
++ struct device *dev)
++{
++ struct dpaa2_io_store *ret;
++ size_t size;
++
++ if (!max_frames || (max_frames > 16))
++ return NULL;
++
++ ret = kmalloc(sizeof(*ret), GFP_KERNEL);
++ if (!ret)
++ return NULL;
++
++ ret->max = max_frames;
++ size = max_frames * sizeof(struct dpaa2_dq) + 64;
++ ret->alloced_addr = kzalloc(size, GFP_KERNEL);
++ if (!ret->alloced_addr) {
++ kfree(ret);
++ return NULL;
++ }
++
++ ret->vaddr = PTR_ALIGN(ret->alloced_addr, 64);
++ ret->paddr = dma_map_single(dev, ret->vaddr,
++ sizeof(struct dpaa2_dq) * max_frames,
++ DMA_FROM_DEVICE);
++ if (dma_mapping_error(dev, ret->paddr)) {
++ kfree(ret->alloced_addr);
++ kfree(ret);
++ return NULL;
++ }
++
++ ret->idx = 0;
++ ret->dev = dev;
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(dpaa2_io_store_create);
++
++/**
++ * dpaa2_io_store_destroy() - Frees the dma memory storage for dequeue
++ * result.
++ * @s: the storage memory to be destroyed.
++ */
++void dpaa2_io_store_destroy(struct dpaa2_io_store *s)
++{
++ dma_unmap_single(s->dev, s->paddr, sizeof(struct dpaa2_dq) * s->max,
++ DMA_FROM_DEVICE);
++ kfree(s->alloced_addr);
++ kfree(s);
++}
++EXPORT_SYMBOL_GPL(dpaa2_io_store_destroy);
++
++/**
++ * dpaa2_io_store_next() - Determine when the next dequeue result is available.
++ * @s: the dpaa2_io_store object.
++ * @is_last: indicate whether this is the last frame in the pull command.
++ *
++ * When an object driver performs dequeues to a dpaa2_io_store, this function
++ * can be used to determine when the next frame result is available. Once
++ * this function returns non-NULL, a subsequent call to it will try to find
++ * the next dequeue result.
++ *
++ * Note that if a pull-dequeue has a NULL result because the target FQ/channel
++ * was empty, then this function will also return NULL (rather than expecting
++ * the caller to always check for this. As such, "is_last" can be used to
++ * differentiate between "end-of-empty-dequeue" and "still-waiting".
++ *
++ * Return dequeue result for a valid dequeue result, or NULL for empty dequeue.
++ */
++struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last)
++{
++ int match;
++ struct dpaa2_dq *ret = &s->vaddr[s->idx];
++
++ match = qbman_result_has_new_result(s->swp, ret);
++ if (!match) {
++ *is_last = 0;
++ return NULL;
++ }
++
++ s->idx++;
++
++ if (dpaa2_dq_is_pull_complete(ret)) {
++ *is_last = 1;
++ s->idx = 0;
++ /*
++ * If we get an empty dequeue result to terminate a zero-results
++ * vdqcr, return NULL to the caller rather than expecting him to
++ * check non-NULL results every time.
++ */
++ if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME))
++ ret = NULL;
++ } else {
++ *is_last = 0;
++ }
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(dpaa2_io_store_next);
++
++/**
++ * dpaa2_io_query_fq_count() - Get the frame and byte count for a given fq.
++ * @d: the given DPIO object.
++ * @fqid: the id of frame queue to be queried.
++ * @fcnt: the queried frame count.
++ * @bcnt: the queried byte count.
++ *
++ * Knowing the FQ count at run-time can be useful in debugging situations.
++ * The instantaneous frame- and byte-count are hereby returned.
++ *
++ * Return 0 for a successful query, and negative error code if query fails.
++ */
++int dpaa2_io_query_fq_count(struct dpaa2_io *d, u32 fqid,
++ u32 *fcnt, u32 *bcnt)
++{
++ struct qbman_fq_query_np_rslt state;
++ struct qbman_swp *swp;
++ unsigned long irqflags;
++ int ret;
++
++ d = service_select(d);
++ if (!d)
++ return -ENODEV;
++
++ swp = d->swp;
++ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
++ ret = qbman_fq_query_state(swp, fqid, &state);
++ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
++ if (ret)
++ return ret;
++ *fcnt = qbman_fq_state_frame_count(&state);
++ *bcnt = qbman_fq_state_byte_count(&state);
++
++ return 0;
++}
++EXPORT_SYMBOL(dpaa2_io_query_fq_count);
++
++/**
++ * dpaa2_io_query_bp_count() - Query the number of buffers currently in a
++ * buffer pool.
++ * @d: the given DPIO object.
++ * @bpid: the index of buffer pool to be queried.
++ * @num: the queried number of buffers in the buffer pool.
++ *
++ * Return 0 for a successful query, and negative error code if query fails.
++ */
++int dpaa2_io_query_bp_count(struct dpaa2_io *d, u32 bpid, u32 *num)
++{
++ struct qbman_bp_query_rslt state;
++ struct qbman_swp *swp;
++ unsigned long irqflags;
++ int ret;
++
++ d = service_select(d);
++ if (!d)
++ return -ENODEV;
++
++ swp = d->swp;
++ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
++ ret = qbman_bp_query(swp, bpid, &state);
++ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
++ if (ret)
++ return ret;
++ *num = qbman_bp_info_num_free_bufs(&state);
++ return 0;
++}
++EXPORT_SYMBOL(dpaa2_io_query_bp_count);
++
++/**
++ * dpaa2_io_service_enqueue_orp_fq() - Enqueue a frame to a frame queue with
++ * order restoration
++ * @d: the given DPIO service.
++ * @fqid: the given frame queue id.
++ * @fd: the frame descriptor which is enqueued.
++ * @orpid: the order restoration point ID
++ * @seqnum: the order sequence number
++ * @last: must be set for the final frame if seqnum is shared (spilt frame)
++ *
++ * Performs an enqueue to a frame queue using the specified order restoration
++ * point. The QMan device will ensure the order of frames placed on the
++ * queue will be ordered as per the sequence number.
++ *
++ * In the case a frame is split it is possible to enqueue using the same
++ * sequence number more than once. The final frame in a shared sequence number
++ * most be indicated by setting last = 1. For non shared sequence numbers
++ * last = 1 must always be set.
++ *
++ * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready,
++ * or -ENODEV if there is no dpio service.
++ */
++int dpaa2_io_service_enqueue_orp_fq(struct dpaa2_io *d, u32 fqid,
++ const struct dpaa2_fd *fd, u16 orpid,
++ u16 seqnum, int last)
++{
++ struct qbman_eq_desc ed;
++
++ d = service_select(d);
++ if (!d)
++ return -ENODEV;
++ qbman_eq_desc_clear(&ed);
++ qbman_eq_desc_set_orp(&ed, 0, orpid, seqnum, !last);
++ qbman_eq_desc_set_fq(&ed, fqid);
++ return qbman_swp_enqueue(d->swp, &ed, fd);
++}
++EXPORT_SYMBOL(dpaa2_io_service_enqueue_orp_fq);
++
++/**
++ * dpaa2_io_service_enqueue_orp_qd() - Enqueue a frame to a queueing destination
++ * with order restoration
++ * @d: the given DPIO service.
++ * @qdid: the given queuing destination id.
++ * @fd: the frame descriptor which is enqueued.
++ * @orpid: the order restoration point ID
++ * @seqnum: the order sequence number
++ * @last: must be set for the final frame if seqnum is shared (spilt frame)
++ *
++ * Performs an enqueue to a frame queue using the specified order restoration
++ * point. The QMan device will ensure the order of frames placed on the
++ * queue will be ordered as per the sequence number.
++ *
++ * In the case a frame is split it is possible to enqueue using the same
++ * sequence number more than once. The final frame in a shared sequence number
++ * most be indicated by setting last = 1. For non shared sequence numbers
++ * last = 1 must always be set.
++ *
++ * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready,
++ * or -ENODEV if there is no dpio service.
++ */
++int dpaa2_io_service_enqueue_orp_qd(struct dpaa2_io *d, u32 qdid, u8 prio,
++ u16 qdbin, const struct dpaa2_fd *fd,
++ u16 orpid, u16 seqnum, int last)
++{
++ struct qbman_eq_desc ed;
++
++ d = service_select(d);
++ if (!d)
++ return -ENODEV;
++ qbman_eq_desc_clear(&ed);
++ qbman_eq_desc_set_orp(&ed, 0, orpid, seqnum, !last);
++ qbman_eq_desc_set_qd(&ed, qdid, qdbin, prio);
++ return qbman_swp_enqueue(d->swp, &ed, fd);
++}
++EXPORT_SYMBOL_GPL(dpaa2_io_service_enqueue_orp_qd);
++
++/**
++ * dpaa2_io_service_orp_seqnum_drop() - Remove a sequence number from
++ * an order restoration list
++ * @d: the given DPIO service.
++ * @orpid: Order restoration point to remove a sequence number from
++ * @seqnum: Sequence number to remove
++ *
++ * Removes a frames sequence number from an order restoration point without
++ * enqueing the frame. Used to indicate that the order restoration hardware
++ * should not expect to see this sequence number. Typically used to indicate
++ * a frame was terminated or dropped from a flow.
++ *
++ * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready,
++ * or -ENODEV if there is no dpio service.
++ */
++int dpaa2_io_service_orp_seqnum_drop(struct dpaa2_io *d, u16 orpid, u16 seqnum)
++{
++ struct qbman_eq_desc ed;
++ struct dpaa2_fd fd;
++
++ d = service_select(d);
++ if (!d)
++ return -ENODEV;
++ qbman_eq_desc_clear(&ed);
++ qbman_eq_desc_set_orp_hole(&ed, orpid, seqnum);
++ return qbman_swp_enqueue(d->swp, &ed, &fd);
++}
++EXPORT_SYMBOL_GPL(dpaa2_io_service_orp_seqnum_drop);
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/dpio.c
+@@ -0,0 +1,221 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
++ * Copyright 2016 NXP
++ *
++ */
++#include <linux/kernel.h>
++#include <linux/fsl/mc.h>
++
++#include "dpio.h"
++#include "dpio-cmd.h"
++
++/*
++ * Data Path I/O Portal API
++ * Contains initialization APIs and runtime control APIs for DPIO
++ */
++
++/**
++ * dpio_open() - Open a control session for the specified object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dpio_id: DPIO unique ID
++ * @token: Returned token; use in subsequent API calls
++ *
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpio_create() function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_open(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int dpio_id,
++ u16 *token)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpio_cmd_open *dpio_cmd;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ dpio_cmd = (struct dpio_cmd_open *)cmd.params;
++ dpio_cmd->dpio_id = cpu_to_le32(dpio_id);
++
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ *token = mc_cmd_hdr_read_token(&cmd);
++
++ return 0;
++}
++
++/**
++ * dpio_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_close(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct fsl_mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLOSE,
++ cmd_flags,
++ token);
++
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpio_enable() - Enable the DPIO, allow I/O portal operations.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpio_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct fsl_mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ENABLE,
++ cmd_flags,
++ token);
++
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpio_disable() - Disable the DPIO, stop any I/O portal operation.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpio_disable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct fsl_mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DISABLE,
++ cmd_flags,
++ token);
++
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpio_get_attributes() - Retrieve DPIO attributes
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ * @attr: Returned object's attributes
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpio_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpio_attr *attr)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpio_rsp_get_attr *dpio_rsp;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ dpio_rsp = (struct dpio_rsp_get_attr *)cmd.params;
++ attr->id = le32_to_cpu(dpio_rsp->id);
++ attr->qbman_portal_id = le16_to_cpu(dpio_rsp->qbman_portal_id);
++ attr->num_priorities = dpio_rsp->num_priorities;
++ attr->channel_mode = dpio_rsp->channel_mode & DPIO_CHANNEL_MODE_MASK;
++ attr->qbman_portal_ce_offset =
++ le64_to_cpu(dpio_rsp->qbman_portal_ce_addr);
++ attr->qbman_portal_ci_offset =
++ le64_to_cpu(dpio_rsp->qbman_portal_ci_addr);
++ attr->qbman_version = le32_to_cpu(dpio_rsp->qbman_version);
++
++ return 0;
++}
++
++/**
++ * dpio_get_api_version - Get Data Path I/O API version
++ * @mc_io: Pointer to MC portal's DPIO object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @major_ver: Major version of DPIO API
++ * @minor_ver: Minor version of DPIO API
++ *
++ * Return: '0' on Success; Error code otherwise
++ */
++int dpio_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver)
- {
- struct mc_command cmd = { 0 };
-- struct dprc_cmd_set_obj_label *cmd_params;
++{
++ struct fsl_mc_command cmd = { 0 };
+ int err;
-
- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_LABEL,
-- cmd_flags,
-- token);
-- cmd_params = (struct dprc_cmd_set_obj_label *)cmd.params;
-- cmd_params->obj_id = cpu_to_le32(obj_id);
-- strncpy(cmd_params->label, label, 16);
-- cmd_params->label[15] = '\0';
-- strncpy(cmd_params->obj_type, obj_type, 16);
-- cmd_params->obj_type[15] = '\0';
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_API_VERSION,
-+ cmd_flags, 0);
-
-- /* send command to mc*/
-- return mc_send_command(mc_io, &cmd);
--}
--EXPORT_SYMBOL(dprc_set_obj_label);
--
--/**
-- * dprc_connect() - Connect two endpoints to create a network link between them
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPRC object
-- * @endpoint1: Endpoint 1 configuration parameters
-- * @endpoint2: Endpoint 2 configuration parameters
-- * @cfg: Connection configuration. The connection configuration is ignored for
-- * connections made to DPMAC objects, where rate is retrieved from the
-- * MAC configuration.
-- *
-- * Return: '0' on Success; Error code otherwise.
-- */
--int dprc_connect(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- const struct dprc_endpoint *endpoint1,
-- const struct dprc_endpoint *endpoint2,
-- const struct dprc_connection_cfg *cfg)
--{
-- struct mc_command cmd = { 0 };
-- struct dprc_cmd_connect *cmd_params;
-+ /* send command to mc */
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_API_VERSION,
++ cmd_flags, 0);
++
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
-
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPRC_CMDID_CONNECT,
-- cmd_flags,
-- token);
-- cmd_params = (struct dprc_cmd_connect *)cmd.params;
-- cmd_params->ep1_id = cpu_to_le32(endpoint1->id);
-- cmd_params->ep1_interface_id = cpu_to_le32(endpoint1->if_id);
-- cmd_params->ep2_id = cpu_to_le32(endpoint2->id);
-- cmd_params->ep2_interface_id = cpu_to_le32(endpoint2->if_id);
-- strncpy(cmd_params->ep1_type, endpoint1->type, 16);
-- cmd_params->ep1_type[15] = '\0';
-- cmd_params->max_rate = cpu_to_le32(cfg->max_rate);
-- cmd_params->committed_rate = cpu_to_le32(cfg->committed_rate);
-- strncpy(cmd_params->ep2_type, endpoint2->type, 16);
-- cmd_params->ep2_type[15] = '\0';
++
+ /* retrieve response parameters */
+ mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
-
-- /* send command to mc*/
-- return mc_send_command(mc_io, &cmd);
++
++ return 0;
++}
++
++/**
++ * dpio_reset() - Reset the DPIO, returns the object to initial state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPIO object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpio_reset(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct fsl_mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPIO_CMDID_RESET,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/dpio.h
+@@ -0,0 +1,87 @@
++/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
++/*
++ * Copyright 2013-2016 Freescale Semiconductor Inc.
++ * Copyright 2016 NXP
++ *
++ */
++#ifndef __FSL_DPIO_H
++#define __FSL_DPIO_H
++
++struct fsl_mc_io;
++
++int dpio_open(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int dpio_id,
++ u16 *token);
++
++int dpio_close(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++/**
++ * enum dpio_channel_mode - DPIO notification channel mode
++ * @DPIO_NO_CHANNEL: No support for notification channel
++ * @DPIO_LOCAL_CHANNEL: Notifications on data availability can be received by a
++ * dedicated channel in the DPIO; user should point the queue's
++ * destination in the relevant interface to this DPIO
++ */
++enum dpio_channel_mode {
++ DPIO_NO_CHANNEL = 0,
++ DPIO_LOCAL_CHANNEL = 1,
++};
++
++/**
++ * struct dpio_cfg - Structure representing DPIO configuration
++ * @channel_mode: Notification channel mode
++ * @num_priorities: Number of priorities for the notification channel (1-8);
++ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
++ */
++struct dpio_cfg {
++ enum dpio_channel_mode channel_mode;
++ u8 num_priorities;
++};
++
++int dpio_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++int dpio_disable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++/**
++ * struct dpio_attr - Structure representing DPIO attributes
++ * @id: DPIO object ID
++ * @qbman_portal_ce_offset: offset of the software portal cache-enabled area
++ * @qbman_portal_ci_offset: offset of the software portal cache-inhibited area
++ * @qbman_portal_id: Software portal ID
++ * @channel_mode: Notification channel mode
++ * @num_priorities: Number of priorities for the notification channel (1-8);
++ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
++ * @qbman_version: QBMAN version
++ */
++struct dpio_attr {
++ int id;
++ u64 qbman_portal_ce_offset;
++ u64 qbman_portal_ci_offset;
++ u16 qbman_portal_id;
++ enum dpio_channel_mode channel_mode;
++ u8 num_priorities;
++ u32 qbman_version;
++};
++
++int dpio_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpio_attr *attr);
++
++int dpio_get_api_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 *major_ver,
++ u16 *minor_ver);
++
++int dpio_reset(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++#endif /* __FSL_DPIO_H */
+--- /dev/null
++++ b/drivers/staging/fsl-mc/bus/dpio/qbman-portal.c
+@@ -0,0 +1,1164 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++/*
++ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
++ * Copyright 2016 NXP
++ *
++ */
++
++#include <asm/cacheflush.h>
++#include <linux/io.h>
++#include <linux/slab.h>
++#include "../../include/dpaa2-global.h"
++
++#include "qbman-portal.h"
++
++#define QMAN_REV_4000 0x04000000
++#define QMAN_REV_4100 0x04010000
++#define QMAN_REV_4101 0x04010001
++#define QMAN_REV_MASK 0xffff0000
++
++/* All QBMan command and result structures use this "valid bit" encoding */
++#define QB_VALID_BIT ((u32)0x80)
++
++/* QBMan portal management command codes */
++#define QBMAN_MC_ACQUIRE 0x30
++#define QBMAN_WQCHAN_CONFIGURE 0x46
++
++/* CINH register offsets */
++#define QBMAN_CINH_SWP_EQAR 0x8c0
++#define QBMAN_CINH_SWP_DQPI 0xa00
++#define QBMAN_CINH_SWP_DCAP 0xac0
++#define QBMAN_CINH_SWP_SDQCR 0xb00
++#define QBMAN_CINH_SWP_RAR 0xcc0
++#define QBMAN_CINH_SWP_ISR 0xe00
++#define QBMAN_CINH_SWP_IER 0xe40
++#define QBMAN_CINH_SWP_ISDR 0xe80
++#define QBMAN_CINH_SWP_IIR 0xec0
++
++/* CENA register offsets */
++#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6))
++#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((u32)(n) << 6))
++#define QBMAN_CENA_SWP_RCR(n) (0x400 + ((u32)(n) << 6))
++#define QBMAN_CENA_SWP_CR 0x600
++#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1))
++#define QBMAN_CENA_SWP_VDQCR 0x780
++
++/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
++#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6)
++
++/* Define token used to determine if response written to memory is valid */
++#define QMAN_DQ_TOKEN_VALID 1
++
++/* SDQCR attribute codes */
++#define QB_SDQCR_FC_SHIFT 29
++#define QB_SDQCR_FC_MASK 0x1
++#define QB_SDQCR_DCT_SHIFT 24
++#define QB_SDQCR_DCT_MASK 0x3
++#define QB_SDQCR_TOK_SHIFT 16
++#define QB_SDQCR_TOK_MASK 0xff
++#define QB_SDQCR_SRC_SHIFT 0
++#define QB_SDQCR_SRC_MASK 0xffff
++
++/* opaque token for static dequeues */
++#define QMAN_SDQCR_TOKEN 0xbb
++
++enum qbman_sdqcr_dct {
++ qbman_sdqcr_dct_null = 0,
++ qbman_sdqcr_dct_prio_ics,
++ qbman_sdqcr_dct_active_ics,
++ qbman_sdqcr_dct_active
++};
++
++enum qbman_sdqcr_fc {
++ qbman_sdqcr_fc_one = 0,
++ qbman_sdqcr_fc_up_to_3 = 1
++};
++
++#define dccvac(p) { asm volatile("dc cvac, %0;" : : "r" (p) : "memory"); }
++#define dcivac(p) { asm volatile("dc ivac, %0" : : "r"(p) : "memory"); }
++static inline void qbman_inval_prefetch(struct qbman_swp *p, uint32_t offset)
++{
++ dcivac(p->addr_cena + offset);
++ prefetch(p->addr_cena + offset);
++}
++
++/* Portal Access */
++
++static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset)
++{
++ return readl_relaxed(p->addr_cinh + offset);
++}
++
++static inline void qbman_write_register(struct qbman_swp *p, u32 offset,
++ u32 value)
++{
++ writel_relaxed(value, p->addr_cinh + offset);
++}
++
++static inline void *qbman_get_cmd(struct qbman_swp *p, u32 offset)
++{
++ return p->addr_cena + offset;
++}
++
++#define QBMAN_CINH_SWP_CFG 0xd00
++
++#define SWP_CFG_DQRR_MF_SHIFT 20
++#define SWP_CFG_EST_SHIFT 16
++#define SWP_CFG_WN_SHIFT 14
++#define SWP_CFG_RPM_SHIFT 12
++#define SWP_CFG_DCM_SHIFT 10
++#define SWP_CFG_EPM_SHIFT 8
++#define SWP_CFG_SD_SHIFT 5
++#define SWP_CFG_SP_SHIFT 4
++#define SWP_CFG_SE_SHIFT 3
++#define SWP_CFG_DP_SHIFT 2
++#define SWP_CFG_DE_SHIFT 1
++#define SWP_CFG_EP_SHIFT 0
++
++static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm,
++ u8 epm, int sd, int sp, int se,
++ int dp, int de, int ep)
++{
++ return (max_fill << SWP_CFG_DQRR_MF_SHIFT |
++ est << SWP_CFG_EST_SHIFT |
++ wn << SWP_CFG_WN_SHIFT |
++ rpm << SWP_CFG_RPM_SHIFT |
++ dcm << SWP_CFG_DCM_SHIFT |
++ epm << SWP_CFG_EPM_SHIFT |
++ sd << SWP_CFG_SD_SHIFT |
++ sp << SWP_CFG_SP_SHIFT |
++ se << SWP_CFG_SE_SHIFT |
++ dp << SWP_CFG_DP_SHIFT |
++ de << SWP_CFG_DE_SHIFT |
++ ep << SWP_CFG_EP_SHIFT);
++}
++
++/**
++ * qbman_swp_init() - Create a functional object representing the given
++ * QBMan portal descriptor.
++ * @d: the given qbman swp descriptor
++ *
++ * Return qbman_swp portal for success, NULL if the object cannot
++ * be created.
++ */
++struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
++{
++ struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL);
++ u32 reg;
++
++ if (!p)
++ return NULL;
++ p->desc = d;
++ p->mc.valid_bit = QB_VALID_BIT;
++ p->sdq = 0;
++ p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
++ p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
++ p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
++
++ atomic_set(&p->vdq.available, 1);
++ p->vdq.valid_bit = QB_VALID_BIT;
++ p->dqrr.next_idx = 0;
++ p->dqrr.valid_bit = QB_VALID_BIT;
++
++ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
++ p->dqrr.dqrr_size = 4;
++ p->dqrr.reset_bug = 1;
++ } else {
++ p->dqrr.dqrr_size = 8;
++ p->dqrr.reset_bug = 0;
++ }
++
++ p->addr_cena = d->cena_bar;
++ p->addr_cinh = d->cinh_bar;
++
++ reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
++ 0, /* Writes cacheable */
++ 0, /* EQCR_CI stashing threshold */
++ 3, /* RPM: Valid bit mode, RCR in array mode */
++ 2, /* DCM: Discrete consumption ack mode */
++ 3, /* EPM: Valid bit mode, EQCR in array mode */
++ 0, /* mem stashing drop enable == FALSE */
++ 1, /* mem stashing priority == TRUE */
++ 0, /* mem stashing enable == FALSE */
++ 1, /* dequeue stashing priority == TRUE */
++ 0, /* dequeue stashing enable == FALSE */
++ 0); /* EQCR_CI stashing priority == FALSE */
++
++ qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
++ reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
++ if (!reg) {
++ pr_err("qbman: the portal is not enabled!\n");
++ return NULL;
++ }
++
++ /*
++ * SDQCR needs to be initialized to 0 when no channels are
++ * being dequeued from or else the QMan HW will indicate an
++ * error. The values that were calculated above will be
++ * applied when dequeues from a specific channel are enabled.
++ */
++ qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0);
++ return p;
++}
++
++/**
++ * qbman_swp_finish() - Create and destroy a functional object representing
++ * the given QBMan portal descriptor.
++ * @p: the qbman_swp object to be destroyed
++ */
++void qbman_swp_finish(struct qbman_swp *p)
++{
++ kfree(p);
++}
++
++/**
++ * qbman_swp_interrupt_read_status()
++ * @p: the given software portal
++ *
++ * Return the value in the SWP_ISR register.
++ */
++u32 qbman_swp_interrupt_read_status(struct qbman_swp *p)
++{
++ return qbman_read_register(p, QBMAN_CINH_SWP_ISR);
++}
++
++/**
++ * qbman_swp_interrupt_clear_status()
++ * @p: the given software portal
++ * @mask: The mask to clear in SWP_ISR register
++ */
++void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask)
++{
++ qbman_write_register(p, QBMAN_CINH_SWP_ISR, mask);
++}
++
++/**
++ * qbman_swp_interrupt_get_trigger() - read interrupt enable register
++ * @p: the given software portal
++ *
++ * Return the value in the SWP_IER register.
++ */
++u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
++{
++ return qbman_read_register(p, QBMAN_CINH_SWP_IER);
++}
++
++/**
++ * qbman_swp_interrupt_set_trigger() - enable interrupts for a swp
++ * @p: the given software portal
++ * @mask: The mask of bits to enable in SWP_IER
++ */
++void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask)
++{
++ qbman_write_register(p, QBMAN_CINH_SWP_IER, mask);
++}
++
++/**
++ * qbman_swp_interrupt_get_inhibit() - read interrupt mask register
++ * @p: the given software portal object
++ *
++ * Return the value in the SWP_IIR register.
++ */
++int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
++{
++ return qbman_read_register(p, QBMAN_CINH_SWP_IIR);
++}
++
++/**
++ * qbman_swp_interrupt_set_inhibit() - write interrupt mask register
++ * @p: the given software portal object
++ * @mask: The mask to set in SWP_IIR register
++ */
++void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
++{
++ qbman_write_register(p, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
++}
++
++/*
++ * Different management commands all use this common base layer of code to issue
++ * commands and poll for results.
++ */
++
++/*
++ * Returns a pointer to where the caller should fill in their management command
++ * (caller should ignore the verb byte)
++ */
++void *qbman_swp_mc_start(struct qbman_swp *p)
++{
++ return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
++}
++
++/*
++ * Commits merges in the caller-supplied command verb (which should not include
++ * the valid-bit) and submits the command to hardware
++ */
++void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb)
++{
++ u8 *v = cmd;
++
++ dma_wmb();
++ *v = cmd_verb | p->mc.valid_bit;
++ dccvac(cmd);
++}
++
++/*
++ * Checks for a completed response (returns non-NULL if only if the response
++ * is complete).
++ */
++void *qbman_swp_mc_result(struct qbman_swp *p)
++{
++ u32 *ret, verb;
++
++ qbman_inval_prefetch(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
++ ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
++
++ /* Remove the valid-bit - command completed if the rest is non-zero */
++ verb = ret[0] & ~QB_VALID_BIT;
++ if (!verb)
++ return NULL;
++ p->mc.valid_bit ^= QB_VALID_BIT;
++ return ret;
++}
++
++#define QB_ENQUEUE_CMD_OPTIONS_SHIFT 0
++enum qb_enqueue_commands {
++ enqueue_empty = 0,
++ enqueue_response_always = 1,
++ enqueue_rejects_to_fq = 2
++};
++
++#define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2
++#define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
++#define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
++
++/**
++ * qbman_eq_desc_clear() - Clear the contents of a descriptor to
++ * default/starting state.
++ */
++void qbman_eq_desc_clear(struct qbman_eq_desc *d)
++{
++ memset(d, 0, sizeof(*d));
++}
++
++/**
++ * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
++ * @d: the enqueue descriptor.
++ * @response_success: 1 = enqueue with response always; 0 = enqueue with
++ * rejections returned on a FQ.
++ */
++void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
++{
++ d->verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
++ if (respond_success)
++ d->verb |= enqueue_response_always;
++ else
++ d->verb |= enqueue_rejects_to_fq;
++}
++
++/**
++ * qbman_eq_desc_set_orp() - Set order-restoration in the enqueue descriptor
++ * @d: the enqueue descriptor.
++ * @response_success: 1 = enqueue with response always; 0 = enqueue with
++ * rejections returned on a FQ.
++ * @oprid: the order point record id.
++ * @seqnum: the order restoration sequence number.
++ * @incomplete: indicates whether this is the last fragments using the same
++ * sequence number.
++ */
++void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
++ u16 oprid, u16 seqnum, int incomplete)
++{
++ d->verb |= (1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
++ if (respond_success)
++ d->verb |= enqueue_response_always;
++ else
++ d->verb |= enqueue_rejects_to_fq;
++ d->orpid = cpu_to_le16(oprid);
++ d->seqnum = cpu_to_le16((!!incomplete << 14) | seqnum);
++}
++
++/**
++ * qbman_eq_desc_set_orp_hole() - fill a hole in the order-restoration sequence
++ * without any enqueue
++ * @d: the enqueue descriptor.
++ * @oprid: the order point record id.
++ * @seqnum: the order restoration sequence number.
++ */
++void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, u16 oprid,
++ u16 seqnum)
++{
++ d->verb |= (1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT) | enqueue_empty;
++ d->orpid = cpu_to_le16(oprid);
++ d->seqnum = cpu_to_le16(seqnum);
++}
++
++/*
++ * Exactly one of the following descriptor "targets" should be set. (Calling any
++ * one of these will replace the effect of any prior call to one of these.)
++ * -enqueue to a frame queue
++ * -enqueue to a queuing destination
++ */
++
++/**
++ * qbman_eq_desc_set_fq() - set the FQ for the enqueue command
++ * @d: the enqueue descriptor
++ * @fqid: the id of the frame queue to be enqueued
++ */
++void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid)
++{
++ d->verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
++ d->tgtid = cpu_to_le32(fqid);
++}
++
++/**
++ * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command
++ * @d: the enqueue descriptor
++ * @qdid: the id of the queuing destination to be enqueued
++ * @qd_bin: the queuing destination bin
++ * @qd_prio: the queuing destination priority
++ */
++void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
++ u32 qd_bin, u32 qd_prio)
++{
++ d->verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
++ d->tgtid = cpu_to_le32(qdid);
++ d->qdbin = cpu_to_le16(qd_bin);
++ d->qpri = qd_prio;
++}
++
++#define EQAR_IDX(eqar) ((eqar) & 0x7)
++#define EQAR_VB(eqar) ((eqar) & 0x80)
++#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
++
++/**
++ * qbman_swp_enqueue() - Issue an enqueue command
++ * @s: the software portal used for enqueue
++ * @d: the enqueue descriptor
++ * @fd: the frame descriptor to be enqueued
++ *
++ * Please note that 'fd' should only be NULL if the "action" of the
++ * descriptor is "orp_hole" or "orp_nesn".
++ *
++ * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
++ */
++int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
++ const struct dpaa2_fd *fd)
++{
++ struct qbman_eq_desc *p;
++ u32 eqar = qbman_read_register(s, QBMAN_CINH_SWP_EQAR);
++
++ if (!EQAR_SUCCESS(eqar))
++ return -EBUSY;
++
++ p = qbman_get_cmd(s, QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
++ /* This is mapped as DEVICE type memory, writes are
++ * with address alignment:
++ * desc.dca address alignment = 1
++ * desc.seqnum address alignment = 2
++ * desc.orpid address alignment = 4
++ * desc.tgtid address alignment = 8
++ */
++ p->dca = d->dca;
++ p->seqnum = d->seqnum;
++ p->orpid = d->orpid;
++ memcpy(&p->tgtid, &d->tgtid, 24);
++ memcpy(&p->fd, fd, sizeof(*fd));
++
++ /* Set the verb byte, have to substitute in the valid-bit */
++ dma_wmb();
++ p->verb = d->verb | EQAR_VB(eqar);
++ dccvac(p);
++
+ return 0;
- }
-
- /**
-- * dprc_disconnect() - Disconnect one endpoint to remove its network connection
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPRC object
-- * @endpoint: Endpoint configuration parameters
-+ * dprc_get_container_id - Get container ID associated with a given portal.
-+ * @mc_io: Pointer to Mc portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @container_id: Requested container id
- *
- * Return: '0' on Success; Error code otherwise.
- */
--int dprc_disconnect(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- const struct dprc_endpoint *endpoint)
--{
-- struct mc_command cmd = { 0 };
-- struct dprc_cmd_disconnect *cmd_params;
--
-- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPRC_CMDID_DISCONNECT,
-- cmd_flags,
-- token);
-- cmd_params = (struct dprc_cmd_disconnect *)cmd.params;
-- cmd_params->id = cpu_to_le32(endpoint->id);
-- cmd_params->interface_id = cpu_to_le32(endpoint->if_id);
-- strncpy(cmd_params->type, endpoint->type, 16);
-- cmd_params->type[15] = '\0';
--
-- /* send command to mc*/
-- return mc_send_command(mc_io, &cmd);
--}
--
--/**
-- * dprc_get_connection() - Get connected endpoint and link status if connection
-- * exists.
-- * @mc_io: Pointer to MC portal's I/O object
-- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-- * @token: Token of DPRC object
-- * @endpoint1: Endpoint 1 configuration parameters
-- * @endpoint2: Returned endpoint 2 configuration parameters
-- * @state: Returned link state:
-- * 1 - link is up;
-- * 0 - link is down;
-- * -1 - no connection (endpoint2 information is irrelevant)
-- *
-- * Return: '0' on Success; -ENAVAIL if connection does not exist.
-- */
--int dprc_get_connection(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- const struct dprc_endpoint *endpoint1,
-- struct dprc_endpoint *endpoint2,
-- int *state)
-+int dprc_get_container_id(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ int *container_id)
- {
- struct mc_command cmd = { 0 };
-- struct dprc_cmd_get_connection *cmd_params;
-- struct dprc_rsp_get_connection *rsp_params;
- int err;
-
- /* prepare command */
-- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONNECTION,
-+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONT_ID,
- cmd_flags,
-- token);
-- cmd_params = (struct dprc_cmd_get_connection *)cmd.params;
-- cmd_params->ep1_id = cpu_to_le32(endpoint1->id);
-- cmd_params->ep1_interface_id = cpu_to_le32(endpoint1->if_id);
-- strncpy(cmd_params->ep1_type, endpoint1->type, 16);
-- cmd_params->ep1_type[15] = '\0';
-+ 0);
-
- /* send command to mc*/
- err = mc_send_command(mc_io, &cmd);
-@@ -1377,12 +819,7 @@ int dprc_get_connection(struct fsl_mc_io
- return err;
-
- /* retrieve response parameters */
-- rsp_params = (struct dprc_rsp_get_connection *)cmd.params;
-- endpoint2->id = le32_to_cpu(rsp_params->ep2_id);
-- endpoint2->if_id = le32_to_cpu(rsp_params->ep2_interface_id);
-- strncpy(endpoint2->type, rsp_params->ep2_type, 16);
-- endpoint2->type[15] = '\0';
-- *state = le32_to_cpu(rsp_params->state);
-+ *container_id = (int)mc_cmd_read_object_id(&cmd);
-
- return 0;
- }
---- a/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
-+++ b/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
-@@ -1,7 +1,7 @@
- /*
-- * Freescale MC object device allocator driver
-+ * fsl-mc object allocator driver
- *
-- * Copyright (C) 2013 Freescale Semiconductor, Inc.
-+ * Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
-@@ -12,9 +12,9 @@
- #include <linux/msi.h>
- #include "../include/mc-bus.h"
- #include "../include/mc-sys.h"
--#include "../include/dpbp-cmd.h"
--#include "../include/dpcon-cmd.h"
-
-+#include "dpbp-cmd.h"
-+#include "dpcon-cmd.h"
- #include "fsl-mc-private.h"
-
- #define FSL_MC_IS_ALLOCATABLE(_obj_type) \
-@@ -23,15 +23,12 @@
- strcmp(_obj_type, "dpcon") == 0)
-
- /**
-- * fsl_mc_resource_pool_add_device - add allocatable device to a resource
-- * pool of a given MC bus
-+ * fsl_mc_resource_pool_add_device - add allocatable object to a resource
-+ * pool of a given fsl-mc bus
- *
-- * @mc_bus: pointer to the MC bus
-- * @pool_type: MC bus pool type
-- * @mc_dev: Pointer to allocatable MC object device
-- *
-- * It adds an allocatable MC object device to a container's resource pool of
-- * the given resource type
-+ * @mc_bus: pointer to the fsl-mc bus
-+ * @pool_type: pool type
-+ * @mc_dev: pointer to allocatable fsl-mc device
- */
- static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
- *mc_bus,
-@@ -95,10 +92,10 @@ out:
- * fsl_mc_resource_pool_remove_device - remove an allocatable device from a
- * resource pool
- *
-- * @mc_dev: Pointer to allocatable MC object device
-+ * @mc_dev: pointer to allocatable fsl-mc device
- *
-- * It permanently removes an allocatable MC object device from the resource
-- * pool, the device is currently in, as long as it is in the pool's free list.
-+ * It permanently removes an allocatable fsl-mc device from the resource
-+ * pool. It's an error if the device is in use.
- */
- static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
- *mc_dev)
-@@ -255,17 +252,18 @@ out_unlock:
- EXPORT_SYMBOL_GPL(fsl_mc_resource_free);
-
- /**
-- * fsl_mc_object_allocate - Allocates a MC object device of the given
-- * pool type from a given MC bus
-+ * fsl_mc_object_allocate - Allocates an fsl-mc object of the given
-+ * pool type from a given fsl-mc bus instance
- *
-- * @mc_dev: MC device for which the MC object device is to be allocated
-- * @pool_type: MC bus resource pool type
-- * @new_mc_dev: Pointer to area where the pointer to the allocated
-- * MC object device is to be returned
-+ * @mc_dev: fsl-mc device which is used in conjunction with the
-+ * allocated object
-+ * @pool_type: pool type
-+ * @new_mc_dev: pointer to area where the pointer to the allocated device
-+ * is to be returned
- *
-- * This function allocates a MC object device from the device's parent DPRC,
-- * from the corresponding MC bus' pool of allocatable MC object devices of
-- * the given resource type. mc_dev cannot be a DPRC itself.
-+ * Allocatable objects are always used in conjunction with some functional
-+ * device. This function allocates an object of the specified type from
-+ * the DPRC containing the functional device.
- *
- * NOTE: pool_type must be different from FSL_MC_POOL_MCP, since MC
- * portals are allocated using fsl_mc_portal_allocate(), instead of
-@@ -312,10 +310,9 @@ error:
- EXPORT_SYMBOL_GPL(fsl_mc_object_allocate);
-
- /**
-- * fsl_mc_object_free - Returns an allocatable MC object device to the
-- * corresponding resource pool of a given MC bus.
-- *
-- * @mc_adev: Pointer to the MC object device
-+ * fsl_mc_object_free - Returns an fsl-mc object to the resource
-+ * pool where it came from.
-+ * @mc_adev: Pointer to the fsl-mc device
- */
- void fsl_mc_object_free(struct fsl_mc_device *mc_adev)
- {
-@@ -332,8 +329,14 @@ void fsl_mc_object_free(struct fsl_mc_de
- EXPORT_SYMBOL_GPL(fsl_mc_object_free);
-
- /*
-- * Initialize the interrupt pool associated with a MC bus.
-- * It allocates a block of IRQs from the GIC-ITS
-+ * A DPRC and the devices in the DPRC all share the same GIC-ITS device
-+ * ID. A block of IRQs is pre-allocated and maintained in a pool
-+ * from which devices can allocate them when needed.
++}
++
++/* Static (push) dequeue */
++
++/**
++ * qbman_swp_push_get() - Get the push dequeue setup
++ * @p: the software portal object
++ * @channel_idx: the channel index to query
++ * @enabled: returned boolean to show whether the push dequeue is enabled
++ * for the given channel
++ */
++void qbman_swp_push_get(struct qbman_swp *s, u8 channel_idx, int *enabled)
++{
++ u16 src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
++
++ WARN_ON(channel_idx > 15);
++ *enabled = src | (1 << channel_idx);
++}
++
++/**
++ * qbman_swp_push_set() - Enable or disable push dequeue
++ * @p: the software portal object
++ * @channel_idx: the channel index (0 to 15)
++ * @enable: enable or disable push dequeue
++ */
++void qbman_swp_push_set(struct qbman_swp *s, u8 channel_idx, int enable)
++{
++ u16 dqsrc;
++
++ WARN_ON(channel_idx > 15);
++ if (enable)
++ s->sdq |= 1 << channel_idx;
++ else
++ s->sdq &= ~(1 << channel_idx);
++
++ /* Read make the complete src map. If no channels are enabled
++ * the SDQCR must be 0 or else QMan will assert errors
++ */
++ dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
++ if (dqsrc != 0)
++ qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, s->sdq);
++ else
++ qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, 0);
++}
++
++#define QB_VDQCR_VERB_DCT_SHIFT 0
++#define QB_VDQCR_VERB_DT_SHIFT 2
++#define QB_VDQCR_VERB_RLS_SHIFT 4
++#define QB_VDQCR_VERB_WAE_SHIFT 5
++
++enum qb_pull_dt_e {
++ qb_pull_dt_channel,
++ qb_pull_dt_workqueue,
++ qb_pull_dt_framequeue
++};
++
++/**
++ * qbman_pull_desc_clear() - Clear the contents of a descriptor to
++ * default/starting state
++ * @d: the pull dequeue descriptor to be cleared
++ */
++void qbman_pull_desc_clear(struct qbman_pull_desc *d)
++{
++ memset(d, 0, sizeof(*d));
++}
++
++/**
++ * qbman_pull_desc_set_storage()- Set the pull dequeue storage
++ * @d: the pull dequeue descriptor to be set
++ * @storage: the pointer of the memory to store the dequeue result
++ * @storage_phys: the physical address of the storage memory
++ * @stash: to indicate whether write allocate is enabled
++ *
++ * If not called, or if called with 'storage' as NULL, the result pull dequeues
++ * will produce results to DQRR. If 'storage' is non-NULL, then results are
++ * produced to the given memory location (using the DMA address which
++ * the caller provides in 'storage_phys'), and 'stash' controls whether or not
++ * those writes to main-memory express a cache-warming attribute.
++ */
++void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
++ struct dpaa2_dq *storage,
++ dma_addr_t storage_phys,
++ int stash)
++{
++ /* save the virtual address */
++ d->rsp_addr_virt = (u64)(uintptr_t)storage;
++
++ if (!storage) {
++ d->verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
++ return;
++ }
++ d->verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
++ if (stash)
++ d->verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
++ else
++ d->verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
++
++ d->rsp_addr = cpu_to_le64(storage_phys);
++}
++
++/**
++ * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued
++ * @d: the pull dequeue descriptor to be set
++ * @numframes: number of frames to be set, must be between 1 and 16, inclusive
+ */
++void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes)
++{
++ d->numf = numframes - 1;
++}
++
++void qbman_pull_desc_set_token(struct qbman_pull_desc *d, u8 token)
++{
++ d->tok = token;
++}
+
+/*
-+ * Initialize the interrupt pool associated with an fsl-mc bus.
-+ * It allocates a block of IRQs from the GIC-ITS.
- */
- int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
- unsigned int irq_count)
-@@ -395,7 +398,7 @@ cleanup_msi_irqs:
- EXPORT_SYMBOL_GPL(fsl_mc_populate_irq_pool);
-
- /**
-- * Teardown the interrupt pool associated with an MC bus.
-+ * Teardown the interrupt pool associated with an fsl-mc bus.
- * It frees the IRQs that were allocated to the pool, back to the GIC-ITS.
- */
- void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus)
-@@ -422,11 +425,7 @@ void fsl_mc_cleanup_irq_pool(struct fsl_
- EXPORT_SYMBOL_GPL(fsl_mc_cleanup_irq_pool);
-
- /**
-- * It allocates the IRQs required by a given MC object device. The
-- * IRQs are allocated from the interrupt pool associated with the
-- * MC bus that contains the device, if the device is not a DPRC device.
-- * Otherwise, the IRQs are allocated from the interrupt pool associated
-- * with the MC bus that represents the DPRC device itself.
-+ * Allocate the IRQs required by a given fsl-mc device.
- */
- int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev)
- {
-@@ -495,8 +494,7 @@ error_resource_alloc:
- EXPORT_SYMBOL_GPL(fsl_mc_allocate_irqs);
-
- /*
-- * It frees the IRQs that were allocated for a MC object device, by
-- * returning them to the corresponding interrupt pool.
-+ * Frees the IRQs that were allocated for an fsl-mc device.
- */
- void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev)
- {
-@@ -605,7 +603,7 @@ static int fsl_mc_allocator_probe(struct
- return error;
-
- dev_dbg(&mc_dev->dev,
-- "Allocatable MC object device bound to fsl_mc_allocator driver");
-+ "Allocatable fsl-mc device bound to fsl_mc_allocator driver");
- return 0;
- }
-
-@@ -627,7 +625,7 @@ static int fsl_mc_allocator_remove(struc
- }
-
- dev_dbg(&mc_dev->dev,
-- "Allocatable MC object device unbound from fsl_mc_allocator driver");
-+ "Allocatable fsl-mc device unbound from fsl_mc_allocator driver");
- return 0;
- }
-
---- a/drivers/staging/fsl-mc/bus/fsl-mc-bus.c
-+++ b/drivers/staging/fsl-mc/bus/fsl-mc-bus.c
-@@ -1,7 +1,7 @@
- /*
- * Freescale Management Complex (MC) bus driver
- *
-- * Copyright (C) 2014 Freescale Semiconductor, Inc.
-+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
- * Author: German Rivera <German.Rivera@freescale.com>
- *
- * This file is licensed under the terms of the GNU General Public
-@@ -9,6 +9,8 @@
- * warranty of any kind, whether express or implied.
- */
-
-+#define pr_fmt(fmt) "fsl-mc: " fmt
++ * Exactly one of the following descriptor "actions" should be set. (Calling any
++ * one of these will replace the effect of any prior call to one of these.)
++ * - pull dequeue from the given frame queue (FQ)
++ * - pull dequeue from any FQ in the given work queue (WQ)
++ * - pull dequeue from any FQ in any WQ in the given channel
++ */
+
- #include <linux/module.h>
- #include <linux/of_device.h>
- #include <linux/of_address.h>
-@@ -25,8 +27,6 @@
- #include "fsl-mc-private.h"
- #include "dprc-cmd.h"
-
--static struct kmem_cache *mc_dev_cache;
--
- /**
- * Default DMA mask for devices on a fsl-mc bus
- */
-@@ -34,7 +34,7 @@ static struct kmem_cache *mc_dev_cache;
-
- /**
- * struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device
-- * @root_mc_bus_dev: MC object device representing the root DPRC
-+ * @root_mc_bus_dev: fsl-mc device representing the root DPRC
- * @num_translation_ranges: number of entries in addr_translation_ranges
- * @translation_ranges: array of bus to system address translation ranges
- */
-@@ -62,8 +62,8 @@ struct fsl_mc_addr_translation_range {
-
- /**
- * fsl_mc_bus_match - device to driver matching callback
-- * @dev: the MC object device structure to match against
-- * @drv: the device driver to search for matching MC object device id
-+ * @dev: the fsl-mc device to match against
-+ * @drv: the device driver to search for matching fsl-mc object type
- * structures
- *
- * Returns 1 on success, 0 otherwise.
-@@ -75,8 +75,11 @@ static int fsl_mc_bus_match(struct devic
- struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv);
- bool found = false;
-
-- if (WARN_ON(!fsl_mc_bus_exists()))
-+ /* When driver_override is set, only bind to the matching driver */
-+ if (mc_dev->driver_override) {
-+ found = !strcmp(mc_dev->driver_override, mc_drv->driver.name);
- goto out;
++/**
++ * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues
++ * @fqid: the frame queue index of the given FQ
++ */
++void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid)
++{
++ d->verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
++ d->verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
++ d->dq_src = cpu_to_le32(fqid);
++}
++
++/**
++ * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues
++ * @wqid: composed of channel id and wqid within the channel
++ * @dct: the dequeue command type
++ */
++void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
++ enum qbman_pull_type_e dct)
++{
++ d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
++ d->verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
++ d->dq_src = cpu_to_le32(wqid);
++}
++
++/**
++ * qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
++ * dequeues
++ * @chid: the channel id to be dequeued
++ * @dct: the dequeue command type
++ */
++void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
++ enum qbman_pull_type_e dct)
++{
++ d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
++ d->verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
++ d->dq_src = cpu_to_le32(chid);
++}
++
++/**
++ * qbman_swp_pull() - Issue the pull dequeue command
++ * @s: the software portal object
++ * @d: the software portal descriptor which has been configured with
++ * the set of qbman_pull_desc_set_*() calls
++ *
++ * Return 0 for success, and -EBUSY if the software portal is not ready
++ * to do pull dequeue.
++ */
++int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
++{
++ struct qbman_pull_desc *p;
++
++ if (!atomic_dec_and_test(&s->vdq.available)) {
++ atomic_inc(&s->vdq.available);
++ return -EBUSY;
+ }
-
- if (!mc_drv->match_id_table)
- goto out;
-@@ -91,7 +94,7 @@ static int fsl_mc_bus_match(struct devic
-
- /*
- * Traverse the match_id table of the given driver, trying to find
-- * a matching for the given MC object device.
-+ * a matching for the given device.
- */
- for (id = mc_drv->match_id_table; id->vendor != 0x0; id++) {
- if (id->vendor == mc_dev->obj_desc.vendor &&
-@@ -132,23 +135,141 @@ static ssize_t modalias_show(struct devi
- }
- static DEVICE_ATTR_RO(modalias);
-
-+static ssize_t rescan_store(struct device *dev,
-+ struct device_attribute *attr,
-+ const char *buf, size_t count)
++ s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
++ p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
++ p->numf = d->numf;
++ p->tok = QMAN_DQ_TOKEN_VALID;
++ p->dq_src = d->dq_src;
++ p->rsp_addr = d->rsp_addr;
++ p->rsp_addr_virt = d->rsp_addr_virt;
++ dma_wmb();
++
++ /* Set the verb byte, have to substitute in the valid-bit */
++ p->verb = d->verb | s->vdq.valid_bit;
++ s->vdq.valid_bit ^= QB_VALID_BIT;
++ dccvac(p);
++
++ return 0;
++}
++
++#define QMAN_DQRR_PI_MASK 0xf
++
++/**
++ * qbman_swp_dqrr_next() - Get an valid DQRR entry
++ * @s: the software portal object
++ *
++ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
++ * only once, so repeated calls can return a sequence of DQRR entries, without
++ * requiring they be consumed immediately or in any particular order.
++ */
++const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
+{
-+ unsigned long val;
-+ unsigned int irq_count;
-+ struct fsl_mc_device *root_mc_dev;
-+ struct fsl_mc_bus *root_mc_bus;
++ u32 verb;
++ u32 response_verb;
++ u32 flags;
++ struct dpaa2_dq *p;
+
-+ if (!fsl_mc_is_root_dprc(dev))
-+ return -EINVAL;
++ /* Before using valid-bit to detect if something is there, we have to
++ * handle the case of the DQRR reset bug...
++ */
++ if (unlikely(s->dqrr.reset_bug)) {
++ /*
++ * We pick up new entries by cache-inhibited producer index,
++ * which means that a non-coherent mapping would require us to
++ * invalidate and read *only* once that PI has indicated that
++ * there's an entry here. The first trip around the DQRR ring
++ * will be much less efficient than all subsequent trips around
++ * it...
++ */
++ u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
++ QMAN_DQRR_PI_MASK;
+
-+ root_mc_dev = to_fsl_mc_device(dev);
-+ root_mc_bus = to_fsl_mc_bus(root_mc_dev);
++ /* there are new entries if pi != next_idx */
++ if (pi == s->dqrr.next_idx)
++ return NULL;
+
-+ if (kstrtoul(buf, 0, &val) < 0)
-+ return -EINVAL;
++ /*
++ * if next_idx is/was the last ring index, and 'pi' is
++ * different, we can disable the workaround as all the ring
++ * entries have now been DMA'd to so valid-bit checking is
++ * repaired. Note: this logic needs to be based on next_idx
++ * (which increments one at a time), rather than on pi (which
++ * can burst and wrap-around between our snapshots of it).
++ */
++ if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
++ pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
++ s->dqrr.next_idx, pi);
++ s->dqrr.reset_bug = 0;
++ }
++ qbman_inval_prefetch(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
++ }
++
++ p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
++ verb = p->dq.verb;
++
++ /*
++ * If the valid-bit isn't of the expected polarity, nothing there. Note,
++ * in the DQRR reset bug workaround, we shouldn't need to skip these
++ * check, because we've already determined that a new entry is available
++ * and we've invalidated the cacheline before reading it, so the
++ * valid-bit behaviour is repaired and should tell us what we already
++ * knew from reading PI.
++ */
++ if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
++ qbman_inval_prefetch(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
++ return NULL;
++ }
++ /*
++ * There's something there. Move "next_idx" attention to the next ring
++ * entry (and prefetch it) before returning what we found.
++ */
++ s->dqrr.next_idx++;
++ s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
++ if (!s->dqrr.next_idx)
++ s->dqrr.valid_bit ^= QB_VALID_BIT;
++
++ /*
++ * If this is the final response to a volatile dequeue command
++ * indicate that the vdq is available
++ */
++ flags = p->dq.stat;
++ response_verb = verb & QBMAN_RESULT_MASK;
++ if ((response_verb == QBMAN_RESULT_DQ) &&
++ (flags & DPAA2_DQ_STAT_VOLATILE) &&
++ (flags & DPAA2_DQ_STAT_EXPIRED))
++ atomic_inc(&s->vdq.available);
+
-+ if (val) {
-+ mutex_lock(&root_mc_bus->scan_mutex);
-+ dprc_scan_objects(root_mc_dev, NULL, &irq_count);
-+ mutex_unlock(&root_mc_bus->scan_mutex);
-+ }
++ qbman_inval_prefetch(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
+
-+ return count;
++ return p;
+}
-+static DEVICE_ATTR_WO(rescan);
+
-+static ssize_t driver_override_store(struct device *dev,
-+ struct device_attribute *attr,
-+ const char *buf, size_t count)
++/**
++ * qbman_swp_dqrr_consume() - Consume DQRR entries previously returned from
++ * qbman_swp_dqrr_next().
++ * @s: the software portal object
++ * @dq: the DQRR entry to be consumed
++ */
++void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq)
+{
-+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
-+ const char *driver_override, *old = mc_dev->driver_override;
-+ char *cp;
-+
-+ if (WARN_ON(dev->bus != &fsl_mc_bus_type))
-+ return -EINVAL;
-+
-+ if (count >= (PAGE_SIZE - 1))
-+ return -EINVAL;
++ qbman_write_register(s, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
++}
+
-+ driver_override = kstrndup(buf, count, GFP_KERNEL);
-+ if (!driver_override)
-+ return -ENOMEM;
++/**
++ * qbman_result_has_new_result() - Check and get the dequeue response from the
++ * dq storage memory set in pull dequeue command
++ * @s: the software portal object
++ * @dq: the dequeue result read from the memory
++ *
++ * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
++ * dequeue result.
++ *
++ * Only used for user-provided storage of dequeue results, not DQRR. For
++ * efficiency purposes, the driver will perform any required endianness
++ * conversion to ensure that the user's dequeue result storage is in host-endian
++ * format. As such, once the user has called qbman_result_has_new_result() and
++ * been returned a valid dequeue result, they should not call it again on
++ * the same memory location (except of course if another dequeue command has
++ * been executed to produce a new result to that location).
++ */
++int qbman_result_has_new_result(struct qbman_swp *s, const struct dpaa2_dq *dq)
++{
++ if (dq->dq.tok != QMAN_DQ_TOKEN_VALID)
++ return 0;
+
-+ cp = strchr(driver_override, '\n');
-+ if (cp)
-+ *cp = '\0';
++ /*
++ * Set token to be 0 so we will detect change back to 1
++ * next time the looping is traversed. Const is cast away here
++ * as we want users to treat the dequeue responses as read only.
++ */
++ ((struct dpaa2_dq *)dq)->dq.tok = 0;
+
-+ if (strlen(driver_override)) {
-+ mc_dev->driver_override = driver_override;
-+ } else {
-+ kfree(driver_override);
-+ mc_dev->driver_override = NULL;
++ /*
++ * Determine whether VDQCR is available based on whether the
++ * current result is sitting in the first storage location of
++ * the busy command.
++ */
++ if (s->vdq.storage == dq) {
++ s->vdq.storage = NULL;
++ atomic_inc(&s->vdq.available);
+ }
+
-+ kfree(old);
++ return 1;
++}
+
-+ return count;
++/**
++ * qbman_release_desc_clear() - Clear the contents of a descriptor to
++ * default/starting state.
++ */
++void qbman_release_desc_clear(struct qbman_release_desc *d)
++{
++ memset(d, 0, sizeof(*d));
++ d->verb = 1 << 5; /* Release Command Valid */
+}
+
-+static ssize_t driver_override_show(struct device *dev,
-+ struct device_attribute *attr, char *buf)
++/**
++ * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
++ */
++void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid)
+{
-+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
++ d->bpid = cpu_to_le16(bpid);
++}
+
-+ return snprintf(buf, PAGE_SIZE, "%s\n", mc_dev->driver_override);
++/**
++ * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
++ * interrupt source should be asserted after the release command is completed.
++ */
++void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
++{
++ if (enable)
++ d->verb |= 1 << 6;
++ else
++ d->verb &= ~(1 << 6);
+}
-+static DEVICE_ATTR_RW(driver_override);
+
- static struct attribute *fsl_mc_dev_attrs[] = {
- &dev_attr_modalias.attr,
-+ &dev_attr_rescan.attr,
-+ &dev_attr_driver_override.attr,
- NULL,
- };
-
- ATTRIBUTE_GROUPS(fsl_mc_dev);
-
-+static int scan_fsl_mc_bus(struct device *dev, void *data)
++#define RAR_IDX(rar) ((rar) & 0x7)
++#define RAR_VB(rar) ((rar) & 0x80)
++#define RAR_SUCCESS(rar) ((rar) & 0x100)
++
++/**
++ * qbman_swp_release() - Issue a buffer release command
++ * @s: the software portal object
++ * @d: the release descriptor
++ * @buffers: a pointer pointing to the buffer address to be released
++ * @num_buffers: number of buffers to be released, must be less than 8
++ *
++ * Return 0 for success, -EBUSY if the release command ring is not ready.
++ */
++int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
++ const u64 *buffers, unsigned int num_buffers)
+{
-+ unsigned int irq_count;
-+ struct fsl_mc_device *root_mc_dev;
-+ struct fsl_mc_bus *root_mc_bus;
++ int i;
++ struct qbman_release_desc *p;
++ u32 rar;
+
-+ if (fsl_mc_is_root_dprc(dev)) {
-+ root_mc_dev = to_fsl_mc_device(dev);
-+ root_mc_bus = to_fsl_mc_bus(root_mc_dev);
-+ mutex_lock(&root_mc_bus->scan_mutex);
-+ dprc_scan_objects(root_mc_dev, NULL, &irq_count);
-+ mutex_unlock(&root_mc_bus->scan_mutex);
-+ }
++ if (!num_buffers || (num_buffers > 7))
++ return -EINVAL;
++
++ rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
++ if (!RAR_SUCCESS(rar))
++ return -EBUSY;
++
++ /* Start the release command */
++ p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
++ /* Copy the caller's buffer pointers to the command */
++ for (i = 0; i < num_buffers; i++)
++ p->buf[i] = cpu_to_le64(buffers[i]);
++ p->bpid = d->bpid;
++
++ /*
++ * Set the verb byte, have to substitute in the valid-bit and the number
++ * of buffers.
++ */
++ dma_wmb();
++ p->verb = d->verb | RAR_VB(rar) | num_buffers;
++ dccvac(p);
+
+ return 0;
+}
+
-+static ssize_t bus_rescan_store(struct bus_type *bus,
-+ const char *buf, size_t count)
++struct qbman_acquire_desc {
++ u8 verb;
++ u8 reserved;
++ __le16 bpid;
++ u8 num;
++ u8 reserved2[59];
++};
++
++struct qbman_acquire_rslt {
++ u8 verb;
++ u8 rslt;
++ __le16 reserved;
++ u8 num;
++ u8 reserved2[3];
++ __le64 buf[7];
++};
++
++/**
++ * qbman_swp_acquire() - Issue a buffer acquire command
++ * @s: the software portal object
++ * @bpid: the buffer pool index
++ * @buffers: a pointer pointing to the acquired buffer addresses
++ * @num_buffers: number of buffers to be acquired, must be less than 8
++ *
++ * Return 0 for success, or negative error code if the acquire command
++ * fails.
++ */
++int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
++ unsigned int num_buffers)
+{
-+ unsigned long val;
++ struct qbman_acquire_desc *p;
++ struct qbman_acquire_rslt *r;
++ int i;
+
-+ if (kstrtoul(buf, 0, &val) < 0)
++ if (!num_buffers || (num_buffers > 7))
+ return -EINVAL;
+
-+ if (val)
-+ bus_for_each_dev(bus, NULL, NULL, scan_fsl_mc_bus);
++ /* Start the management command */
++ p = qbman_swp_mc_start(s);
+
-+ return count;
-+}
-+static BUS_ATTR(rescan, (S_IWUSR | S_IWGRP), NULL, bus_rescan_store);
++ if (!p)
++ return -EBUSY;
+
-+static struct attribute *fsl_mc_bus_attrs[] = {
-+ &bus_attr_rescan.attr,
-+ NULL,
-+};
++ /* Encode the caller-provided attributes */
++ p->bpid = cpu_to_le16(bpid);
++ p->num = num_buffers;
+
-+static const struct attribute_group fsl_mc_bus_group = {
-+ .attrs = fsl_mc_bus_attrs,
++ /* Complete the management command */
++ r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
++ if (unlikely(!r)) {
++ pr_err("qbman: acquire from BPID %d failed, no response\n",
++ bpid);
++ return -EIO;
++ }
++
++ /* Decode the outcome */
++ WARN_ON((r->verb & 0x7f) != QBMAN_MC_ACQUIRE);
++
++ /* Determine success or failure */
++ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("qbman: acquire from BPID 0x%x failed, code=0x%02x\n",
++ bpid, r->rslt);
++ return -EIO;
++ }
++
++ WARN_ON(r->num > num_buffers);
++
++ /* Copy the acquired buffers to the caller's array */
++ for (i = 0; i < r->num; i++)
++ buffers[i] = le64_to_cpu(r->buf[i]);
++
++ return (int)r->num;
++}
++
++struct qbman_alt_fq_state_desc {
++ u8 verb;
++ u8 reserved[3];
++ __le32 fqid;
++ u8 reserved2[56];
+};
+
-+static const struct attribute_group *fsl_mc_bus_groups[] = {
-+ &fsl_mc_bus_group,
-+ NULL,
++struct qbman_alt_fq_state_rslt {
++ u8 verb;
++ u8 rslt;
++ u8 reserved[62];
+};
+
- struct bus_type fsl_mc_bus_type = {
- .name = "fsl-mc",
- .match = fsl_mc_bus_match,
- .uevent = fsl_mc_bus_uevent,
- .dev_groups = fsl_mc_dev_groups,
-+ .bus_groups = fsl_mc_bus_groups,
- };
- EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
-
--static atomic_t root_dprc_count = ATOMIC_INIT(0);
--
- static int fsl_mc_driver_probe(struct device *dev)
- {
- struct fsl_mc_driver *mc_drv;
-@@ -164,8 +285,7 @@ static int fsl_mc_driver_probe(struct de
-
- error = mc_drv->probe(mc_dev);
- if (error < 0) {
-- dev_err(dev, "MC object device probe callback failed: %d\n",
-- error);
-+ dev_err(dev, "%s failed: %d\n", __func__, error);
- return error;
- }
-
-@@ -183,9 +303,7 @@ static int fsl_mc_driver_remove(struct d
-
- error = mc_drv->remove(mc_dev);
- if (error < 0) {
-- dev_err(dev,
-- "MC object device remove callback failed: %d\n",
-- error);
-+ dev_err(dev, "%s failed: %d\n", __func__, error);
- return error;
- }
-
-@@ -232,8 +350,6 @@ int __fsl_mc_driver_register(struct fsl_
- return error;
- }
-
-- pr_info("MC object device driver %s registered\n",
-- mc_driver->driver.name);
- return 0;
- }
- EXPORT_SYMBOL_GPL(__fsl_mc_driver_register);
-@@ -249,15 +365,6 @@ void fsl_mc_driver_unregister(struct fsl
- EXPORT_SYMBOL_GPL(fsl_mc_driver_unregister);
-
- /**
-- * fsl_mc_bus_exists - check if a root dprc exists
-- */
--bool fsl_mc_bus_exists(void)
--{
-- return atomic_read(&root_dprc_count) > 0;
--}
--EXPORT_SYMBOL_GPL(fsl_mc_bus_exists);
--
--/**
- * fsl_mc_get_root_dprc - function to traverse to the root dprc
- */
- void fsl_mc_get_root_dprc(struct device *dev,
-@@ -315,21 +422,6 @@ static int get_dprc_icid(struct fsl_mc_i
- return error;
- }
-
--static int get_dprc_version(struct fsl_mc_io *mc_io,
-- int container_id, u16 *major, u16 *minor)
--{
-- struct dprc_attributes attr;
-- int error;
--
-- error = get_dprc_attr(mc_io, container_id, &attr);
-- if (error == 0) {
-- *major = attr.version.major;
-- *minor = attr.version.minor;
-- }
--
-- return error;
--}
--
- static int translate_mc_addr(struct fsl_mc_device *mc_dev,
- enum dprc_region_type mc_region_type,
- u64 mc_offset, phys_addr_t *phys_addr)
-@@ -451,18 +543,37 @@ bool fsl_mc_is_root_dprc(struct device *
- return dev == root_dprc_dev;
- }
-
-+static void fsl_mc_device_release(struct device *dev)
++#define ALT_FQ_FQID_MASK 0x00FFFFFF
++
++int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
++ u8 alt_fq_verb)
+{
-+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
-+ struct fsl_mc_bus *mc_bus = NULL;
++ struct qbman_alt_fq_state_desc *p;
++ struct qbman_alt_fq_state_rslt *r;
+
-+ kfree(mc_dev->regions);
++ /* Start the management command */
++ p = qbman_swp_mc_start(s);
++ if (!p)
++ return -EBUSY;
+
-+ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0)
-+ mc_bus = to_fsl_mc_bus(mc_dev);
++ p->fqid = cpu_to_le32(fqid & ALT_FQ_FQID_MASK);
+
-+ if (mc_bus)
-+ kfree(mc_bus);
-+ else
-+ kfree(mc_dev);
-+}
++ /* Complete the management command */
++ r = qbman_swp_mc_complete(s, p, alt_fq_verb);
++ if (unlikely(!r)) {
++ pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
++ alt_fq_verb);
++ return -EIO;
++ }
+
- /**
-- * Add a newly discovered MC object device to be visible in Linux
-+ * Add a newly discovered fsl-mc device to be visible in Linux
- */
- int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
- struct fsl_mc_io *mc_io,
- struct device *parent_dev,
-+ const char *driver_override,
- struct fsl_mc_device **new_mc_dev)
- {
- int error;
- struct fsl_mc_device *mc_dev = NULL;
- struct fsl_mc_bus *mc_bus = NULL;
- struct fsl_mc_device *parent_mc_dev;
-+ struct device *fsl_mc_platform_dev;
-+ struct device_node *fsl_mc_platform_node;
-
- if (dev_is_fsl_mc(parent_dev))
- parent_mc_dev = to_fsl_mc_device(parent_dev);
-@@ -473,7 +584,7 @@ int fsl_mc_device_add(struct dprc_obj_de
- /*
- * Allocate an MC bus device object:
- */
-- mc_bus = devm_kzalloc(parent_dev, sizeof(*mc_bus), GFP_KERNEL);
-+ mc_bus = kzalloc(sizeof(*mc_bus), GFP_KERNEL);
- if (!mc_bus)
- return -ENOMEM;
-
-@@ -482,16 +593,30 @@ int fsl_mc_device_add(struct dprc_obj_de
- /*
- * Allocate a regular fsl_mc_device object:
- */
-- mc_dev = kmem_cache_zalloc(mc_dev_cache, GFP_KERNEL);
-+ mc_dev = kzalloc(sizeof(*mc_dev), GFP_KERNEL);
- if (!mc_dev)
- return -ENOMEM;
- }
-
- mc_dev->obj_desc = *obj_desc;
- mc_dev->mc_io = mc_io;
++ /* Decode the outcome */
++ WARN_ON((r->verb & QBMAN_RESULT_MASK) != alt_fq_verb);
+
-+ if (driver_override) {
-+ /*
-+ * We trust driver_override, so we don't need to use
-+ * kstrndup() here
-+ */
-+ mc_dev->driver_override = kstrdup(driver_override, GFP_KERNEL);
-+ if (!mc_dev->driver_override) {
-+ error = -ENOMEM;
-+ goto error_cleanup_dev;
-+ }
++ /* Determine success or failure */
++ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("qbman: ALT FQID %d failed: verb = 0x%08x code = 0x%02x\n",
++ fqid, r->verb, r->rslt);
++ return -EIO;
+ }
+
- device_initialize(&mc_dev->dev);
- mc_dev->dev.parent = parent_dev;
- mc_dev->dev.bus = &fsl_mc_bus_type;
-+ mc_dev->dev.release = fsl_mc_device_release;
- dev_set_name(&mc_dev->dev, "%s.%d", obj_desc->type, obj_desc->id);
-
- if (strcmp(obj_desc->type, "dprc") == 0) {
-@@ -524,8 +649,6 @@ int fsl_mc_device_add(struct dprc_obj_de
- }
-
- mc_io2 = mc_io;
--
-- atomic_inc(&root_dprc_count);
- }
-
- error = get_dprc_icid(mc_io2, obj_desc->id, &mc_dev->icid);
-@@ -533,8 +656,8 @@ int fsl_mc_device_add(struct dprc_obj_de
- goto error_cleanup_dev;
- } else {
- /*
-- * A non-DPRC MC object device has to be a child of another
-- * MC object (specifically a DPRC object)
-+ * A non-DPRC object has to be a child of a DPRC, use the
-+ * parent's ICID and interrupt domain.
- */
- mc_dev->icid = parent_mc_dev->icid;
- mc_dev->dma_mask = FSL_MC_DEFAULT_DMA_MASK;
-@@ -556,9 +679,14 @@ int fsl_mc_device_add(struct dprc_obj_de
- goto error_cleanup_dev;
- }
-
-- /* Objects are coherent, unless 'no shareability' flag set. */
-- if (!(obj_desc->flags & DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY))
-- arch_setup_dma_ops(&mc_dev->dev, 0, 0, NULL, true);
-+ fsl_mc_platform_dev = &mc_dev->dev;
-+ while (dev_is_fsl_mc(fsl_mc_platform_dev))
-+ fsl_mc_platform_dev = fsl_mc_platform_dev->parent;
-+ fsl_mc_platform_node = fsl_mc_platform_dev->of_node;
++ return 0;
++}
+
-+ /* Set up the iommu configuration for the devices. */
-+ fsl_mc_dma_configure(mc_dev, fsl_mc_platform_node,
-+ !(obj_desc->flags & DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY));
-
- /*
- * The device-specific probe callback will get invoked by device_add()
-@@ -571,9 +699,7 @@ int fsl_mc_device_add(struct dprc_obj_de
- goto error_cleanup_dev;
- }
-
-- (void)get_device(&mc_dev->dev);
-- dev_dbg(parent_dev, "Added MC object device %s\n",
-- dev_name(&mc_dev->dev));
-+ dev_dbg(parent_dev, "added %s\n", dev_name(&mc_dev->dev));
-
- *new_mc_dev = mc_dev;
- return 0;
-@@ -581,47 +707,34 @@ int fsl_mc_device_add(struct dprc_obj_de
- error_cleanup_dev:
- kfree(mc_dev->regions);
- if (mc_bus)
-- devm_kfree(parent_dev, mc_bus);
-+ kfree(mc_bus);
- else
-- kmem_cache_free(mc_dev_cache, mc_dev);
-+ kfree(mc_dev);
-
- return error;
- }
- EXPORT_SYMBOL_GPL(fsl_mc_device_add);
-
- /**
-- * fsl_mc_device_remove - Remove a MC object device from being visible to
-+ * fsl_mc_device_remove - Remove an fsl-mc device from being visible to
- * Linux
- *
-- * @mc_dev: Pointer to a MC object device object
-+ * @mc_dev: Pointer to an fsl-mc device
- */
- void fsl_mc_device_remove(struct fsl_mc_device *mc_dev)
- {
-- struct fsl_mc_bus *mc_bus = NULL;
--
-- kfree(mc_dev->regions);
-+ kfree(mc_dev->driver_override);
-+ mc_dev->driver_override = NULL;
-
- /*
- * The device-specific remove callback will get invoked by device_del()
- */
- device_del(&mc_dev->dev);
-- put_device(&mc_dev->dev);
-
-- if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) {
-- mc_bus = to_fsl_mc_bus(mc_dev);
--
-- if (fsl_mc_is_root_dprc(&mc_dev->dev)) {
-- if (atomic_read(&root_dprc_count) > 0)
-- atomic_dec(&root_dprc_count);
-- else
-- WARN_ON(1);
-- }
-- }
-+ if (strcmp(mc_dev->obj_desc.type, "dprc") != 0)
-+ mc_dev->dev.iommu_fwspec = NULL;
-
-- if (mc_bus)
-- devm_kfree(mc_dev->dev.parent, mc_bus);
-- else
-- kmem_cache_free(mc_dev_cache, mc_dev);
-+ put_device(&mc_dev->dev);
- }
- EXPORT_SYMBOL_GPL(fsl_mc_device_remove);
-
-@@ -629,8 +742,7 @@ static int parse_mc_ranges(struct device
- int *paddr_cells,
- int *mc_addr_cells,
- int *mc_size_cells,
-- const __be32 **ranges_start,
-- u8 *num_ranges)
-+ const __be32 **ranges_start)
- {
- const __be32 *prop;
- int range_tuple_cell_count;
-@@ -643,8 +755,6 @@ static int parse_mc_ranges(struct device
- dev_warn(dev,
- "missing or empty ranges property for device tree node '%s'\n",
- mc_node->name);
--
-- *num_ranges = 0;
- return 0;
- }
-
-@@ -671,8 +781,7 @@ static int parse_mc_ranges(struct device
- return -EINVAL;
- }
-
-- *num_ranges = ranges_len / tuple_len;
-- return 0;
-+ return ranges_len / tuple_len;
- }
-
- static int get_mc_addr_translation_ranges(struct device *dev,
-@@ -680,7 +789,7 @@ static int get_mc_addr_translation_range
- **ranges,
- u8 *num_ranges)
- {
-- int error;
-+ int ret;
- int paddr_cells;
- int mc_addr_cells;
- int mc_size_cells;
-@@ -688,16 +797,16 @@ static int get_mc_addr_translation_range
- const __be32 *ranges_start;
- const __be32 *cell;
-
-- error = parse_mc_ranges(dev,
-+ ret = parse_mc_ranges(dev,
- &paddr_cells,
- &mc_addr_cells,
- &mc_size_cells,
-- &ranges_start,
-- num_ranges);
-- if (error < 0)
-- return error;
-+ &ranges_start);
-+ if (ret < 0)
-+ return ret;
-
-- if (!(*num_ranges)) {
-+ *num_ranges = ret;
-+ if (!ret) {
- /*
- * Missing or empty ranges property ("ranges;") for the
- * 'fsl,qoriq-mc' node. In this case, identity mapping
-@@ -749,8 +858,6 @@ static int fsl_mc_bus_probe(struct platf
- struct mc_version mc_version;
- struct resource res;
-
-- dev_info(&pdev->dev, "Root MC bus device probed");
--
- mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
- if (!mc)
- return -ENOMEM;
-@@ -783,8 +890,7 @@ static int fsl_mc_bus_probe(struct platf
- goto error_cleanup_mc_io;
- }
-
-- dev_info(&pdev->dev,
-- "Freescale Management Complex Firmware version: %u.%u.%u\n",
-+ dev_info(&pdev->dev, "MC firmware version: %u.%u.%u\n",
- mc_version.major, mc_version.minor, mc_version.revision);
-
- error = get_mc_addr_translation_ranges(&pdev->dev,
-@@ -793,16 +899,17 @@ static int fsl_mc_bus_probe(struct platf
- if (error < 0)
- goto error_cleanup_mc_io;
-
-- error = dpmng_get_container_id(mc_io, 0, &container_id);
-+ error = dprc_get_container_id(mc_io, 0, &container_id);
- if (error < 0) {
- dev_err(&pdev->dev,
-- "dpmng_get_container_id() failed: %d\n", error);
-+ "dprc_get_container_id() failed: %d\n", error);
- goto error_cleanup_mc_io;
- }
-
- memset(&obj_desc, 0, sizeof(struct dprc_obj_desc));
-- error = get_dprc_version(mc_io, container_id,
-- &obj_desc.ver_major, &obj_desc.ver_minor);
-+ error = dprc_get_api_version(mc_io, 0,
-+ &obj_desc.ver_major,
-+ &obj_desc.ver_minor);
- if (error < 0)
- goto error_cleanup_mc_io;
-
-@@ -812,7 +919,8 @@ static int fsl_mc_bus_probe(struct platf
- obj_desc.irq_count = 1;
- obj_desc.region_count = 0;
-
-- error = fsl_mc_device_add(&obj_desc, mc_io, &pdev->dev, &mc_bus_dev);
-+ error = fsl_mc_device_add(&obj_desc, mc_io, &pdev->dev, NULL,
-+ &mc_bus_dev);
- if (error < 0)
- goto error_cleanup_mc_io;
-
-@@ -840,7 +948,6 @@ static int fsl_mc_bus_remove(struct plat
- fsl_destroy_mc_io(mc->root_mc_bus_dev->mc_io);
- mc->root_mc_bus_dev->mc_io = NULL;
-
-- dev_info(&pdev->dev, "Root MC bus device removed");
- return 0;
- }
-
-@@ -865,22 +972,12 @@ static int __init fsl_mc_bus_driver_init
- {
- int error;
-
-- mc_dev_cache = kmem_cache_create("fsl_mc_device",
-- sizeof(struct fsl_mc_device), 0, 0,
-- NULL);
-- if (!mc_dev_cache) {
-- pr_err("Could not create fsl_mc_device cache\n");
-- return -ENOMEM;
-- }
--
- error = bus_register(&fsl_mc_bus_type);
- if (error < 0) {
-- pr_err("fsl-mc bus type registration failed: %d\n", error);
-+ pr_err("bus type registration failed: %d\n", error);
- goto error_cleanup_cache;
- }
-
-- pr_info("fsl-mc bus type registered\n");
--
- error = platform_driver_register(&fsl_mc_bus_driver);
- if (error < 0) {
- pr_err("platform_driver_register() failed: %d\n", error);
-@@ -914,7 +1011,6 @@ error_cleanup_bus:
- bus_unregister(&fsl_mc_bus_type);
-
- error_cleanup_cache:
-- kmem_cache_destroy(mc_dev_cache);
- return error;
- }
- postcore_initcall(fsl_mc_bus_driver_init);
---- /dev/null
-+++ b/drivers/staging/fsl-mc/bus/fsl-mc-iommu.c
-@@ -0,0 +1,104 @@
-+/*
-+ * Copyright 2016-17 NXP
-+ * Author: Nipun Gupta <nipun.gupta@nxp.com>
-+ *
-+ * This file is licensed under the terms of the GNU General Public
-+ * License version 2. This program is licensed "as is" without any
-+ * warranty of any kind, whether express or implied.
-+ */
++struct qbman_cdan_ctrl_desc {
++ u8 verb;
++ u8 reserved;
++ __le16 ch;
++ u8 we;
++ u8 ctrl;
++ __le16 reserved2;
++ __le64 cdan_ctx;
++ u8 reserved3[48];
+
-+#include <linux/iommu.h>
-+#include <linux/of.h>
-+#include <linux/of_iommu.h>
-+#include "../include/mc.h"
++};
++
++struct qbman_cdan_ctrl_rslt {
++ u8 verb;
++ u8 rslt;
++ __le16 ch;
++ u8 reserved[60];
++};
+
-+/* Setup the IOMMU for the DPRC container */
-+static const struct iommu_ops
-+*fsl_mc_iommu_configure(struct fsl_mc_device *mc_dev,
-+ struct device_node *fsl_mc_platform_node)
++int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
++ u8 we_mask, u8 cdan_en,
++ u64 ctx)
+{
-+ struct of_phandle_args iommu_spec;
-+ const struct iommu_ops *ops;
-+ u32 iommu_phandle;
-+ struct device_node *iommu_node;
-+ const __be32 *map = NULL;
-+ int iommu_cells, map_len, ret;
-+
-+ map = of_get_property(fsl_mc_platform_node, "iommu-map", &map_len);
-+ if (!map)
-+ return NULL;
++ struct qbman_cdan_ctrl_desc *p = NULL;
++ struct qbman_cdan_ctrl_rslt *r = NULL;
+
-+ ops = mc_dev->dev.bus->iommu_ops;
-+ if (!ops || !ops->of_xlate)
-+ return NULL;
++ /* Start the management command */
++ p = qbman_swp_mc_start(s);
++ if (!p)
++ return -EBUSY;
+
-+ iommu_phandle = be32_to_cpup(map + 1);
-+ iommu_node = of_find_node_by_phandle(iommu_phandle);
++ /* Encode the caller-provided attributes */
++ p->ch = cpu_to_le16(channelid);
++ p->we = we_mask;
++ if (cdan_en)
++ p->ctrl = 1;
++ else
++ p->ctrl = 0;
++ p->cdan_ctx = cpu_to_le64(ctx);
+
-+ if (of_property_read_u32(iommu_node, "#iommu-cells", &iommu_cells)) {
-+ pr_err("%s: missing #iommu-cells property\n", iommu_node->name);
-+ return NULL;
++ /* Complete the management command */
++ r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
++ if (unlikely(!r)) {
++ pr_err("qbman: wqchan config failed, no response\n");
++ return -EIO;
+ }
+
-+ /* Initialize the fwspec */
-+ ret = iommu_fwspec_init(&mc_dev->dev, &iommu_node->fwnode, ops);
-+ if (ret)
-+ return NULL;
++ WARN_ON((r->verb & 0x7f) != QBMAN_WQCHAN_CONFIGURE);
+
-+ /*
-+ * Fill in the required stream-id before calling the iommu's
-+ * ops->xlate callback.
-+ */
-+ iommu_spec.np = iommu_node;
-+ iommu_spec.args[0] = mc_dev->icid;
-+ iommu_spec.args_count = 1;
++ /* Determine success or failure */
++ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
++ pr_err("qbman: CDAN cQID %d failed: code = 0x%02x\n",
++ channelid, r->rslt);
++ return -EIO;
++ }
+
-+ ret = ops->of_xlate(&mc_dev->dev, &iommu_spec);
-+ if (ret)
-+ return NULL;
++ return 0;
++}
+
-+ of_node_put(iommu_spec.np);
++#define QBMAN_RESPONSE_VERB_MASK 0x7f
++#define QBMAN_FQ_QUERY_NP 0x45
++#define QBMAN_BP_QUERY 0x32
+
-+ return ops;
-+}
++struct qbman_fq_query_desc {
++ u8 verb;
++ u8 reserved[3];
++ u32 fqid;
++ u8 reserved2[56];
++};
+
-+/* Set up DMA configuration for fsl-mc devices */
-+void fsl_mc_dma_configure(struct fsl_mc_device *mc_dev,
-+ struct device_node *fsl_mc_platform_node, int coherent)
++int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
++ struct qbman_fq_query_np_rslt *r)
+{
-+ const struct iommu_ops *ops;
++ struct qbman_fq_query_desc *p;
++ void *resp;
+
-+ ops = fsl_mc_iommu_configure(mc_dev, fsl_mc_platform_node);
++ p = (struct qbman_fq_query_desc *)qbman_swp_mc_start(s);
++ if (!p)
++ return -EBUSY;
+
-+ mc_dev->dev.coherent_dma_mask = DMA_BIT_MASK(48);
-+ mc_dev->dev.dma_mask = &mc_dev->dev.coherent_dma_mask;
-+ arch_setup_dma_ops(&mc_dev->dev, 0,
-+ mc_dev->dev.coherent_dma_mask + 1, ops, coherent);
++ /* FQID is a 24 bit value */
++ p->fqid = cpu_to_le32(fqid) & 0x00FFFFFF;
++ resp = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP);
++ if (!resp) {
++ pr_err("qbman: Query FQID %d NP fields failed, no response\n",
++ fqid);
++ return -EIO;
++ }
++ *r = *(struct qbman_fq_query_np_rslt *)resp;
++ /* Decode the outcome */
++ WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_FQ_QUERY_NP);
++
++ /* Determine success or failure */
++ if (r->rslt != QBMAN_MC_RSLT_OK) {
++ pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n",
++ p->fqid, r->rslt);
++ return -EIO;
++ }
++
++ return 0;
+}
+
-+/* Macro to get the container device of a MC device */
-+#define fsl_mc_cont_dev(_dev) ((to_fsl_mc_device(_dev)->flags & \
-+ FSL_MC_IS_DPRC) ? (_dev) : ((_dev)->parent))
++u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r)
++{
++ return (r->frm_cnt & 0x00FFFFFF);
++}
+
-+/* Macro to check if a device is a container device */
-+#define is_cont_dev(_dev) (to_fsl_mc_device(_dev)->flags & FSL_MC_IS_DPRC)
++u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r)
++{
++ return r->byte_cnt;
++}
++
++struct qbman_bp_query_desc {
++ u8 verb;
++ u8 reserved;
++ u16 bpid;
++ u8 reserved2[60];
++};
+
-+/* Get the IOMMU group for device on fsl-mc bus */
-+struct iommu_group *fsl_mc_device_group(struct device *dev)
++int qbman_bp_query(struct qbman_swp *s, u32 bpid,
++ struct qbman_bp_query_rslt *r)
+{
-+ struct device *cont_dev = fsl_mc_cont_dev(dev);
-+ struct iommu_group *group;
++ struct qbman_bp_query_desc *p;
++ void *resp;
+
-+ /* Container device is responsible for creating the iommu group */
-+ if (is_cont_dev(dev)) {
-+ group = iommu_group_alloc();
-+ if (IS_ERR(group))
-+ return NULL;
-+ } else {
-+ get_device(cont_dev);
-+ group = iommu_group_get(cont_dev);
-+ put_device(cont_dev);
++ p = (struct qbman_bp_query_desc *)qbman_swp_mc_start(s);
++ if (!p)
++ return -EBUSY;
++
++ p->bpid = bpid;
++ resp = qbman_swp_mc_complete(s, p, QBMAN_BP_QUERY);
++ if (!resp) {
++ pr_err("qbman: Query BPID %d fields failed, no response\n",
++ bpid);
++ return -EIO;
++ }
++ *r = *(struct qbman_bp_query_rslt *)resp;
++ /* Decode the outcome */
++ WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_BP_QUERY);
++
++ /* Determine success or failure */
++ if (r->rslt != QBMAN_MC_RSLT_OK) {
++ pr_err("Query fields of BPID 0x%x failed, code=0x%02x\n",
++ bpid, r->rslt);
++ return -EIO;
+ }
+
-+ return group;
++ return 0;
+}
---- a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
-+++ b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
-@@ -1,7 +1,7 @@
- /*
- * Freescale Management Complex (MC) bus driver MSI support
- *
-- * Copyright (C) 2015 Freescale Semiconductor, Inc.
-+ * Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
- * Author: German Rivera <German.Rivera@freescale.com>
- *
- * This file is licensed under the terms of the GNU General Public
---- a/drivers/staging/fsl-mc/bus/fsl-mc-private.h
-+++ b/drivers/staging/fsl-mc/bus/fsl-mc-private.h
-@@ -10,13 +10,15 @@
- #ifndef _FSL_MC_PRIVATE_H_
- #define _FSL_MC_PRIVATE_H_
-
-+#include "../include/mc.h"
-+#include "../include/mc-bus.h"
+
- int __must_check fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
- struct fsl_mc_io *mc_io,
- struct device *parent_dev,
-+ const char *driver_override,
- struct fsl_mc_device **new_mc_dev);
-
--void fsl_mc_device_remove(struct fsl_mc_device *mc_dev);
--
- int __init dprc_driver_init(void);
-
- void dprc_driver_exit(void);
---- a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
-+++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
-@@ -1,7 +1,7 @@
- /*
- * Freescale Management Complex (MC) bus driver MSI support
- *
-- * Copyright (C) 2015 Freescale Semiconductor, Inc.
-+ * Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
- * Author: German Rivera <German.Rivera@freescale.com>
- *
- * This file is licensed under the terms of the GNU General Public
-@@ -20,7 +20,7 @@
- #include "fsl-mc-private.h"
-
- static struct irq_chip its_msi_irq_chip = {
-- .name = "fsl-mc-bus-msi",
-+ .name = "ITS-fMSI",
- .irq_mask = irq_chip_mask_parent,
- .irq_unmask = irq_chip_unmask_parent,
- .irq_eoi = irq_chip_eoi_parent,
-@@ -52,7 +52,7 @@ static int its_fsl_mc_msi_prepare(struct
- return msi_info->ops->msi_prepare(msi_domain->parent, dev, nvec, info);
- }
-
--static struct msi_domain_ops its_fsl_mc_msi_ops = {
-+static struct msi_domain_ops its_fsl_mc_msi_ops __ro_after_init = {
- .msi_prepare = its_fsl_mc_msi_prepare,
- };
-
-@@ -97,8 +97,8 @@ int __init its_fsl_mc_msi_init(void)
- continue;
- }
-
-- WARN_ON(mc_msi_domain->
-- host_data != &its_fsl_mc_msi_domain_info);
-+ WARN_ON(mc_msi_domain->host_data !=
-+ &its_fsl_mc_msi_domain_info);
-
- pr_info("fsl-mc MSI: %s domain created\n", np->full_name);
- }
---- a/drivers/staging/fsl-mc/bus/mc-io.c
-+++ b/drivers/staging/fsl-mc/bus/mc-io.c
-@@ -1,4 +1,5 @@
--/* Copyright 2013-2016 Freescale Semiconductor Inc.
-+/*
-+ * Copyright 2013-2016 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
-@@ -11,7 +12,6 @@
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
-- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
++u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a)
++{
++ return a->fill;
++}
--- /dev/null
-+++ b/drivers/staging/fsl-mc/bus/mc-ioctl.h
-@@ -0,0 +1,22 @@
++++ b/drivers/staging/fsl-mc/bus/dpio/qbman-portal.h
+@@ -0,0 +1,505 @@
++/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
-+ * Freescale Management Complex (MC) ioclt interface
-+ *
-+ * Copyright (C) 2014 Freescale Semiconductor, Inc.
-+ * Author: Lijun Pan <Lijun.Pan@freescale.com>
++ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
++ * Copyright 2016 NXP
+ *
-+ * This file is licensed under the terms of the GNU General Public
-+ * License version 2. This program is licensed "as is" without any
-+ * warranty of any kind, whether express or implied.
+ */
-+#ifndef _FSL_MC_IOCTL_H_
-+#define _FSL_MC_IOCTL_H_
++#ifndef __FSL_QBMAN_PORTAL_H
++#define __FSL_QBMAN_PORTAL_H
+
-+#include <linux/ioctl.h>
-+#include "../include/mc-sys.h"
++#include "../../include/dpaa2-fd.h"
+
-+#define RESTOOL_IOCTL_TYPE 'R'
++struct dpaa2_dq;
++struct qbman_swp;
+
-+#define RESTOOL_SEND_MC_COMMAND \
-+ _IOWR(RESTOOL_IOCTL_TYPE, 0xE0, struct mc_command)
++/* qbman software portal descriptor structure */
++struct qbman_swp_desc {
++ void *cena_bar; /* Cache-enabled portal base address */
++ void *cinh_bar; /* Cache-inhibited portal base address */
++ u32 qman_version;
++};
+
-+#endif /* _FSL_MC_IOCTL_H_ */
---- /dev/null
-+++ b/drivers/staging/fsl-mc/bus/mc-restool.c
-@@ -0,0 +1,405 @@
-+/*
-+ * Freescale Management Complex (MC) restool driver
-+ *
-+ * Copyright (C) 2014 Freescale Semiconductor, Inc.
-+ * Author: Lijun Pan <Lijun.Pan@freescale.com>
-+ *
-+ * This file is licensed under the terms of the GNU General Public
-+ * License version 2. This program is licensed "as is" without any
-+ * warranty of any kind, whether express or implied.
-+ */
++#define QBMAN_SWP_INTERRUPT_EQRI 0x01
++#define QBMAN_SWP_INTERRUPT_EQDI 0x02
++#define QBMAN_SWP_INTERRUPT_DQRI 0x04
++#define QBMAN_SWP_INTERRUPT_RCRI 0x08
++#define QBMAN_SWP_INTERRUPT_RCDI 0x10
++#define QBMAN_SWP_INTERRUPT_VDCI 0x20
+
-+#include "../include/mc.h"
-+#include <linux/module.h>
-+#include <linux/fs.h>
-+#include <linux/miscdevice.h>
-+#include <linux/mm.h>
-+#include <linux/slab.h>
-+#include <linux/uaccess.h>
-+#include <linux/mutex.h>
-+#include <linux/platform_device.h>
-+#include "mc-ioctl.h"
-+#include "../include/mc-sys.h"
-+#include "../include/mc-bus.h"
-+#include "../include/mc-cmd.h"
-+#include "../include/dpmng.h"
++/* the structure for pull dequeue descriptor */
++struct qbman_pull_desc {
++ u8 verb;
++ u8 numf;
++ u8 tok;
++ u8 reserved;
++ __le32 dq_src;
++ __le64 rsp_addr;
++ u64 rsp_addr_virt;
++ u8 padding[40];
++};
+
-+/**
-+ * Maximum number of DPRCs that can be opened at the same time
-+ */
-+#define MAX_DPRC_HANDLES 64
++enum qbman_pull_type_e {
++ /* dequeue with priority precedence, respect intra-class scheduling */
++ qbman_pull_type_prio = 1,
++ /* dequeue with active FQ precedence, respect ICS */
++ qbman_pull_type_active,
++ /* dequeue with active FQ precedence, no ICS */
++ qbman_pull_type_active_noics
++};
++
++/* Definitions for parsing dequeue entries */
++#define QBMAN_RESULT_MASK 0x7f
++#define QBMAN_RESULT_DQ 0x60
++#define QBMAN_RESULT_FQRN 0x21
++#define QBMAN_RESULT_FQRNI 0x22
++#define QBMAN_RESULT_FQPN 0x24
++#define QBMAN_RESULT_FQDAN 0x25
++#define QBMAN_RESULT_CDAN 0x26
++#define QBMAN_RESULT_CSCN_MEM 0x27
++#define QBMAN_RESULT_CGCU 0x28
++#define QBMAN_RESULT_BPSCN 0x29
++#define QBMAN_RESULT_CSCN_WQ 0x2a
++
++/* QBMan FQ management command codes */
++#define QBMAN_FQ_SCHEDULE 0x48
++#define QBMAN_FQ_FORCE 0x49
++#define QBMAN_FQ_XON 0x4d
++#define QBMAN_FQ_XOFF 0x4e
++
++/* structure of enqueue descriptor */
++struct qbman_eq_desc {
++ u8 verb;
++ u8 dca;
++ __le16 seqnum;
++ __le16 orpid;
++ __le16 reserved1;
++ __le32 tgtid;
++ __le32 tag;
++ __le16 qdbin;
++ u8 qpri;
++ u8 reserved[3];
++ u8 wae;
++ u8 rspid;
++ __le64 rsp_addr;
++ u8 fd[32];
++};
++
++/* buffer release descriptor */
++struct qbman_release_desc {
++ u8 verb;
++ u8 reserved;
++ __le16 bpid;
++ __le32 reserved2;
++ __le64 buf[7];
++};
++
++/* Management command result codes */
++#define QBMAN_MC_RSLT_OK 0xf0
++
++#define CODE_CDAN_WE_EN 0x1
++#define CODE_CDAN_WE_CTX 0x4
+
-+/**
-+ * restool_misc - information associated with the newly added miscdevice
-+ * @misc: newly created miscdevice associated with root dprc
-+ * @miscdevt: device id of this miscdevice
-+ * @list: a linked list node representing this miscdevcie
-+ * @static_mc_io: pointer to the static MC I/O object used by the restool
-+ * @dynamic_instance_count: number of dynamically created instances
-+ * @static_instance_in_use: static instance is in use or not
-+ * @mutex: mutex lock to serialze the open/release operations
-+ * @dev: root dprc associated with this miscdevice
-+ */
-+struct restool_misc {
-+ struct miscdevice misc;
-+ dev_t miscdevt;
-+ struct list_head list;
-+ struct fsl_mc_io *static_mc_io;
-+ u32 dynamic_instance_count;
-+ bool static_instance_in_use;
-+ struct mutex mutex; /* serialze the open/release operations */
-+ struct device *dev;
-+};
++/* portal data structure */
++struct qbman_swp {
++ const struct qbman_swp_desc *desc;
++ void __iomem *addr_cena;
++ void __iomem *addr_cinh;
+
-+/**
-+ * struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device
-+ * @root_mc_bus_dev: fsl-mc device representing the root DPRC
-+ * @num_translation_ranges: number of entries in addr_translation_ranges
-+ * @translation_ranges: array of bus to system address translation ranges
-+ */
-+struct fsl_mc {
-+ struct fsl_mc_device *root_mc_bus_dev;
-+ u8 num_translation_ranges;
-+ struct fsl_mc_addr_translation_range *translation_ranges;
-+};
++ /* Management commands */
++ struct {
++ u32 valid_bit; /* 0x00 or 0x80 */
++ } mc;
+
-+/*
-+ * initialize a global list to link all
-+ * the miscdevice nodes (struct restool_misc)
-+ */
-+static LIST_HEAD(misc_list);
-+static DEFINE_MUTEX(misc_list_mutex);
++ /* Push dequeues */
++ u32 sdq;
+
-+static int fsl_mc_restool_dev_open(struct inode *inode, struct file *filep)
-+{
-+ struct fsl_mc_device *root_mc_dev;
-+ int error;
-+ struct fsl_mc_io *dynamic_mc_io = NULL;
-+ struct restool_misc *restool_misc = NULL;
-+ struct restool_misc *restool_misc_cursor;
++ /* Volatile dequeues */
++ struct {
++ atomic_t available; /* indicates if a command can be sent */
++ u32 valid_bit; /* 0x00 or 0x80 */
++ struct dpaa2_dq *storage; /* NULL if DQRR */
++ } vdq;
+
-+ mutex_lock(&misc_list_mutex);
++ /* DQRR */
++ struct {
++ u32 next_idx;
++ u32 valid_bit;
++ u8 dqrr_size;
++ int reset_bug; /* indicates dqrr reset workaround is needed */
++ } dqrr;
++};
+
-+ list_for_each_entry(restool_misc_cursor, &misc_list, list) {
-+ if (restool_misc_cursor->miscdevt == inode->i_rdev) {
-+ restool_misc = restool_misc_cursor;
-+ break;
-+ }
-+ }
++struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
++void qbman_swp_finish(struct qbman_swp *p);
++u32 qbman_swp_interrupt_read_status(struct qbman_swp *p);
++void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask);
++u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
++void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask);
++int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);
++void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
+
-+ mutex_unlock(&misc_list_mutex);
++void qbman_swp_push_get(struct qbman_swp *p, u8 channel_idx, int *enabled);
++void qbman_swp_push_set(struct qbman_swp *p, u8 channel_idx, int enable);
+
-+ if (!restool_misc)
-+ return -EINVAL;
++void qbman_pull_desc_clear(struct qbman_pull_desc *d);
++void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
++ struct dpaa2_dq *storage,
++ dma_addr_t storage_phys,
++ int stash);
++void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes);
++void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid);
++void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
++ enum qbman_pull_type_e dct);
++void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
++ enum qbman_pull_type_e dct);
+
-+ if (WARN_ON(!restool_misc->dev))
-+ return -EINVAL;
++int qbman_swp_pull(struct qbman_swp *p, struct qbman_pull_desc *d);
+
-+ mutex_lock(&restool_misc->mutex);
++const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s);
++void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq);
+
-+ if (!restool_misc->static_instance_in_use) {
-+ restool_misc->static_instance_in_use = true;
-+ filep->private_data = restool_misc->static_mc_io;
-+ } else {
-+ dynamic_mc_io = kzalloc(sizeof(*dynamic_mc_io), GFP_KERNEL);
-+ if (!dynamic_mc_io) {
-+ error = -ENOMEM;
-+ goto err_unlock;
-+ }
++int qbman_result_has_new_result(struct qbman_swp *p, const struct dpaa2_dq *dq);
+
-+ root_mc_dev = to_fsl_mc_device(restool_misc->dev);
-+ error = fsl_mc_portal_allocate(root_mc_dev, 0, &dynamic_mc_io);
-+ if (error < 0) {
-+ pr_err("Not able to allocate MC portal\n");
-+ goto free_dynamic_mc_io;
-+ }
-+ ++restool_misc->dynamic_instance_count;
-+ filep->private_data = dynamic_mc_io;
-+ }
++void qbman_eq_desc_clear(struct qbman_eq_desc *d);
++void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
++void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
++ u16 oprid, u16 seqnum, int incomplete);
++void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, u16 oprid, u16 seqnum);
++void qbman_eq_desc_set_token(struct qbman_eq_desc *d, u8 token);
++void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid);
++void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
++ u32 qd_bin, u32 qd_prio);
+
-+ mutex_unlock(&restool_misc->mutex);
++int qbman_swp_enqueue(struct qbman_swp *p, const struct qbman_eq_desc *d,
++ const struct dpaa2_fd *fd);
+
-+ return 0;
++void qbman_release_desc_clear(struct qbman_release_desc *d);
++void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid);
++void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
+
-+free_dynamic_mc_io:
-+ kfree(dynamic_mc_io);
-+err_unlock:
-+ mutex_unlock(&restool_misc->mutex);
++int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
++ const u64 *buffers, unsigned int num_buffers);
++int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
++ unsigned int num_buffers);
++int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
++ u8 alt_fq_verb);
++int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
++ u8 we_mask, u8 cdan_en,
++ u64 ctx);
+
-+ return error;
++void *qbman_swp_mc_start(struct qbman_swp *p);
++void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb);
++void *qbman_swp_mc_result(struct qbman_swp *p);
++
++/**
++ * qbman_result_is_DQ() - check if the dequeue result is a dequeue response
++ * @dq: the dequeue result to be checked
++ *
++ * DQRR entries may contain non-dequeue results, ie. notifications
++ */
++static inline int qbman_result_is_DQ(const struct dpaa2_dq *dq)
++{
++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_DQ);
+}
+
-+static int fsl_mc_restool_dev_release(struct inode *inode, struct file *filep)
++/**
++ * qbman_result_is_SCN() - Check the dequeue result is notification or not
++ * @dq: the dequeue result to be checked
++ *
++ */
++static inline int qbman_result_is_SCN(const struct dpaa2_dq *dq)
+{
-+ struct fsl_mc_io *local_mc_io = filep->private_data;
-+ struct restool_misc *restool_misc = NULL;
-+ struct restool_misc *restool_misc_cursor;
++ return !qbman_result_is_DQ(dq);
++}
+
-+ if (WARN_ON(!filep->private_data))
-+ return -EINVAL;
++/* FQ Data Availability */
++static inline int qbman_result_is_FQDAN(const struct dpaa2_dq *dq)
++{
++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQDAN);
++}
+
-+ mutex_lock(&misc_list_mutex);
++/* Channel Data Availability */
++static inline int qbman_result_is_CDAN(const struct dpaa2_dq *dq)
++{
++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CDAN);
++}
+
-+ list_for_each_entry(restool_misc_cursor, &misc_list, list) {
-+ if (restool_misc_cursor->miscdevt == inode->i_rdev) {
-+ restool_misc = restool_misc_cursor;
-+ break;
-+ }
-+ }
++/* Congestion State Change */
++static inline int qbman_result_is_CSCN(const struct dpaa2_dq *dq)
++{
++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CSCN_WQ);
++}
+
-+ mutex_unlock(&misc_list_mutex);
++/* Buffer Pool State Change */
++static inline int qbman_result_is_BPSCN(const struct dpaa2_dq *dq)
++{
++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_BPSCN);
++}
+
-+ if (!restool_misc)
-+ return -EINVAL;
++/* Congestion Group Count Update */
++static inline int qbman_result_is_CGCU(const struct dpaa2_dq *dq)
++{
++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CGCU);
++}
+
-+ mutex_lock(&restool_misc->mutex);
++/* Retirement */
++static inline int qbman_result_is_FQRN(const struct dpaa2_dq *dq)
++{
++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRN);
++}
+
-+ if (WARN_ON(restool_misc->dynamic_instance_count == 0 &&
-+ !restool_misc->static_instance_in_use)) {
-+ mutex_unlock(&restool_misc->mutex);
-+ return -EINVAL;
-+ }
++/* Retirement Immediate */
++static inline int qbman_result_is_FQRNI(const struct dpaa2_dq *dq)
++{
++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRNI);
++}
+
-+ /* Globally clean up opened/untracked handles */
-+ fsl_mc_portal_reset(local_mc_io);
++ /* Park */
++static inline int qbman_result_is_FQPN(const struct dpaa2_dq *dq)
++{
++ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQPN);
++}
+
-+ /*
-+ * must check
-+ * whether local_mc_io is dynamic or static instance
-+ * Otherwise it will free up the reserved portal by accident
-+ * or even not free up the dynamic allocated portal
-+ * if 2 or more instances running concurrently
-+ */
-+ if (local_mc_io == restool_misc->static_mc_io) {
-+ restool_misc->static_instance_in_use = false;
-+ } else {
-+ fsl_mc_portal_free(local_mc_io);
-+ kfree(filep->private_data);
-+ --restool_misc->dynamic_instance_count;
-+ }
++/**
++ * qbman_result_SCN_state() - Get the state field in State-change notification
++ */
++static inline u8 qbman_result_SCN_state(const struct dpaa2_dq *scn)
++{
++ return scn->scn.state;
++}
+
-+ filep->private_data = NULL;
-+ mutex_unlock(&restool_misc->mutex);
++#define SCN_RID_MASK 0x00FFFFFF
+
-+ return 0;
++/**
++ * qbman_result_SCN_rid() - Get the resource id in State-change notification
++ */
++static inline u32 qbman_result_SCN_rid(const struct dpaa2_dq *scn)
++{
++ return le32_to_cpu(scn->scn.rid_tok) & SCN_RID_MASK;
+}
+
-+static int restool_send_mc_command(unsigned long arg,
-+ struct fsl_mc_io *local_mc_io)
++/**
++ * qbman_result_SCN_ctx() - Get the context data in State-change notification
++ */
++static inline u64 qbman_result_SCN_ctx(const struct dpaa2_dq *scn)
+{
-+ int error;
-+ struct mc_command mc_cmd;
-+
-+ if (copy_from_user(&mc_cmd, (void __user *)arg, sizeof(mc_cmd)))
-+ return -EFAULT;
++ return le64_to_cpu(scn->scn.ctx);
++}
+
-+ /*
-+ * Send MC command to the MC:
-+ */
-+ error = mc_send_command(local_mc_io, &mc_cmd);
-+ if (error < 0)
-+ return error;
++/**
++ * qbman_swp_fq_schedule() - Move the fq to the scheduled state
++ * @s: the software portal object
++ * @fqid: the index of frame queue to be scheduled
++ *
++ * There are a couple of different ways that a FQ can end up parked state,
++ * This schedules it.
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++static inline int qbman_swp_fq_schedule(struct qbman_swp *s, u32 fqid)
++{
++ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
++}
+
-+ if (copy_to_user((void __user *)arg, &mc_cmd, sizeof(mc_cmd)))
-+ return -EFAULT;
++/**
++ * qbman_swp_fq_force() - Force the FQ to fully scheduled state
++ * @s: the software portal object
++ * @fqid: the index of frame queue to be forced
++ *
++ * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
++ * and thus be available for selection by any channel-dequeuing behaviour (push
++ * or pull). If the FQ is subsequently "dequeued" from the channel and is still
++ * empty at the time this happens, the resulting dq_entry will have no FD.
++ * (qbman_result_DQ_fd() will return NULL.)
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++static inline int qbman_swp_fq_force(struct qbman_swp *s, u32 fqid)
++{
++ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
++}
+
-+ return 0;
++/**
++ * qbman_swp_fq_xon() - sets FQ flow-control to XON
++ * @s: the software portal object
++ * @fqid: the index of frame queue
++ *
++ * This setting doesn't affect enqueues to the FQ, just dequeues.
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++static inline int qbman_swp_fq_xon(struct qbman_swp *s, u32 fqid)
++{
++ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
+}
+
-+static long
-+fsl_mc_restool_dev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++/**
++ * qbman_swp_fq_xoff() - sets FQ flow-control to XOFF
++ * @s: the software portal object
++ * @fqid: the index of frame queue
++ *
++ * This setting doesn't affect enqueues to the FQ, just dequeues.
++ * XOFF FQs will remain in the tenatively-scheduled state, even when
++ * non-empty, meaning they won't be selected for scheduled dequeuing.
++ * If a FQ is changed to XOFF after it had already become truly-scheduled
++ * to a channel, and a pull dequeue of that channel occurs that selects
++ * that FQ for dequeuing, then the resulting dq_entry will have no FD.
++ * (qbman_result_DQ_fd() will return NULL.)
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++static inline int qbman_swp_fq_xoff(struct qbman_swp *s, u32 fqid)
+{
-+ int error;
-+
-+ switch (cmd) {
-+ case RESTOOL_SEND_MC_COMMAND:
-+ error = restool_send_mc_command(arg, file->private_data);
-+ break;
-+ default:
-+ pr_err("%s: unexpected ioctl call number\n", __func__);
-+ error = -EINVAL;
-+ }
-+
-+ return error;
++ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
+}
+
-+static const struct file_operations fsl_mc_restool_dev_fops = {
-+ .owner = THIS_MODULE,
-+ .open = fsl_mc_restool_dev_open,
-+ .release = fsl_mc_restool_dev_release,
-+ .unlocked_ioctl = fsl_mc_restool_dev_ioctl,
-+};
++/* If the user has been allocated a channel object that is going to generate
++ * CDANs to another channel, then the qbman_swp_CDAN* functions will be
++ * necessary.
++ *
++ * CDAN-enabled channels only generate a single CDAN notification, after which
++ * they need to be reenabled before they'll generate another. The idea is
++ * that pull dequeuing will occur in reaction to the CDAN, followed by a
++ * reenable step. Each function generates a distinct command to hardware, so a
++ * combination function is provided if the user wishes to modify the "context"
++ * (which shows up in each CDAN message) each time they reenable, as a single
++ * command to hardware.
++ */
+
-+static int restool_add_device_file(struct device *dev)
++/**
++ * qbman_swp_CDAN_set_context() - Set CDAN context
++ * @s: the software portal object
++ * @channelid: the channel index
++ * @ctx: the context to be set in CDAN
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++static inline int qbman_swp_CDAN_set_context(struct qbman_swp *s, u16 channelid,
++ u64 ctx)
+{
-+ u32 name1 = 0;
-+ char name2[20] = {0};
-+ int error;
-+ struct fsl_mc_device *root_mc_dev;
-+ struct restool_misc *restool_misc;
-+
-+ if (dev->bus == &platform_bus_type && dev->driver_data) {
-+ if (sscanf(dev_name(dev), "%x.%s", &name1, name2) != 2)
-+ return -EINVAL;
-+
-+ if (strcmp(name2, "fsl-mc") == 0)
-+ pr_debug("platform's root dprc name is: %s\n",
-+ dev_name(&(((struct fsl_mc *)
-+ (dev->driver_data))->root_mc_bus_dev->dev)));
-+ }
-+
-+ if (!fsl_mc_is_root_dprc(dev))
-+ return 0;
-+
-+ restool_misc = kzalloc(sizeof(*restool_misc), GFP_KERNEL);
-+ if (!restool_misc)
-+ return -ENOMEM;
-+
-+ restool_misc->dev = dev;
-+ root_mc_dev = to_fsl_mc_device(dev);
-+ error = fsl_mc_portal_allocate(root_mc_dev, 0,
-+ &restool_misc->static_mc_io);
-+ if (error < 0) {
-+ pr_err("Not able to allocate MC portal\n");
-+ goto free_restool_misc;
-+ }
-+
-+ restool_misc->misc.minor = MISC_DYNAMIC_MINOR;
-+ restool_misc->misc.name = dev_name(dev);
-+ restool_misc->misc.fops = &fsl_mc_restool_dev_fops;
-+
-+ error = misc_register(&restool_misc->misc);
-+ if (error < 0) {
-+ pr_err("misc_register() failed: %d\n", error);
-+ goto free_portal;
-+ }
-+
-+ restool_misc->miscdevt = restool_misc->misc.this_device->devt;
-+ mutex_init(&restool_misc->mutex);
-+ mutex_lock(&misc_list_mutex);
-+ list_add(&restool_misc->list, &misc_list);
-+ mutex_unlock(&misc_list_mutex);
-+
-+ pr_info("/dev/%s driver registered\n", dev_name(dev));
-+
-+ return 0;
-+
-+free_portal:
-+ fsl_mc_portal_free(restool_misc->static_mc_io);
-+free_restool_misc:
-+ kfree(restool_misc);
-+
-+ return error;
++ return qbman_swp_CDAN_set(s, channelid,
++ CODE_CDAN_WE_CTX,
++ 0, ctx);
+}
+
-+static int restool_bus_notifier(struct notifier_block *nb,
-+ unsigned long action, void *data)
++/**
++ * qbman_swp_CDAN_enable() - Enable CDAN for the channel
++ * @s: the software portal object
++ * @channelid: the index of the channel to generate CDAN
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++static inline int qbman_swp_CDAN_enable(struct qbman_swp *s, u16 channelid)
+{
-+ int error;
-+ struct device *dev = data;
-+
-+ switch (action) {
-+ case BUS_NOTIFY_ADD_DEVICE:
-+ error = restool_add_device_file(dev);
-+ if (error)
-+ return error;
-+ break;
-+ case BUS_NOTIFY_DEL_DEVICE:
-+ case BUS_NOTIFY_REMOVED_DEVICE:
-+ case BUS_NOTIFY_BIND_DRIVER:
-+ case BUS_NOTIFY_BOUND_DRIVER:
-+ case BUS_NOTIFY_UNBIND_DRIVER:
-+ case BUS_NOTIFY_UNBOUND_DRIVER:
-+ break;
-+ default:
-+ pr_err("%s: unrecognized device action from %s\n", __func__,
-+ dev_name(dev));
-+ return -EINVAL;
-+ }
-+
-+ return 0;
++ return qbman_swp_CDAN_set(s, channelid,
++ CODE_CDAN_WE_EN,
++ 1, 0);
+}
+
-+static int add_to_restool(struct device *dev, void *data)
++/**
++ * qbman_swp_CDAN_disable() - disable CDAN for the channel
++ * @s: the software portal object
++ * @channelid: the index of the channel to generate CDAN
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++static inline int qbman_swp_CDAN_disable(struct qbman_swp *s, u16 channelid)
+{
-+ return restool_add_device_file(dev);
++ return qbman_swp_CDAN_set(s, channelid,
++ CODE_CDAN_WE_EN,
++ 0, 0);
+}
+
-+static int __init fsl_mc_restool_driver_init(void)
++/**
++ * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
++ * @s: the software portal object
++ * @channelid: the index of the channel to generate CDAN
++ * @ctx:i the context set in CDAN
++ *
++ * Return 0 for success, or negative error code for failure.
++ */
++static inline int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s,
++ u16 channelid,
++ u64 ctx)
+{
-+ int error;
-+ struct notifier_block *nb;
++ return qbman_swp_CDAN_set(s, channelid,
++ CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
++ 1, ctx);
++}
+
-+ nb = kzalloc(sizeof(*nb), GFP_KERNEL);
-+ if (!nb)
-+ return -ENOMEM;
++/* Wraps up submit + poll-for-result */
++static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
++ u8 cmd_verb)
++{
++ int loopvar = 1000;
+
-+ nb->notifier_call = restool_bus_notifier;
-+ error = bus_register_notifier(&fsl_mc_bus_type, nb);
-+ if (error)
-+ goto free_nb;
++ qbman_swp_mc_submit(swp, cmd, cmd_verb);
+
-+ /*
-+ * This driver runs after fsl-mc bus driver runs.
-+ * Hence, many of the root dprcs are already attached to fsl-mc bus
-+ * In order to make sure we find all the root dprcs,
-+ * we need to scan the fsl_mc_bus_type.
-+ */
-+ error = bus_for_each_dev(&fsl_mc_bus_type, NULL, NULL, add_to_restool);
-+ if (error) {
-+ bus_unregister_notifier(&fsl_mc_bus_type, nb);
-+ kfree(nb);
-+ pr_err("restool driver registration failure\n");
-+ return error;
-+ }
++ do {
++ cmd = qbman_swp_mc_result(swp);
++ } while (!cmd && loopvar--);
+
-+ return 0;
++ WARN_ON(!loopvar);
+
-+free_nb:
-+ kfree(nb);
-+ return error;
++ return cmd;
+}
+
-+module_init(fsl_mc_restool_driver_init);
-+
-+static void __exit fsl_mc_restool_driver_exit(void)
-+{
-+ struct restool_misc *restool_misc;
-+ struct restool_misc *restool_misc_tmp;
-+ char name1[20] = {0};
-+ u32 name2 = 0;
-+
-+ list_for_each_entry_safe(restool_misc, restool_misc_tmp,
-+ &misc_list, list) {
-+ if (sscanf(restool_misc->misc.name, "%4s.%u", name1, &name2)
-+ != 2)
-+ continue;
-+
-+ pr_debug("name1=%s,name2=%u\n", name1, name2);
-+ pr_debug("misc-device: %s\n", restool_misc->misc.name);
-+ if (strcmp(name1, "dprc") != 0)
-+ continue;
-+
-+ if (WARN_ON(!restool_misc->static_mc_io))
-+ return;
++/* Query APIs */
++struct qbman_fq_query_np_rslt {
++ u8 verb;
++ u8 rslt;
++ u8 st1;
++ u8 st2;
++ u8 reserved[2];
++ u16 od1_sfdr;
++ u16 od2_sfdr;
++ u16 od3_sfdr;
++ u16 ra1_sfdr;
++ u16 ra2_sfdr;
++ u32 pfdr_hptr;
++ u32 pfdr_tptr;
++ u32 frm_cnt;
++ u32 byte_cnt;
++ u16 ics_surp;
++ u8 is;
++ u8 reserved2[29];
++};
+
-+ if (WARN_ON(restool_misc->dynamic_instance_count != 0))
-+ return;
++int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
++ struct qbman_fq_query_np_rslt *r);
++u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r);
++u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r);
+
-+ if (WARN_ON(restool_misc->static_instance_in_use))
-+ return;
++struct qbman_bp_query_rslt {
++ u8 verb;
++ u8 rslt;
++ u8 reserved[4];
++ u8 bdi;
++ u8 state;
++ u32 fill;
++ u32 hdotr;
++ u16 swdet;
++ u16 swdxt;
++ u16 hwdet;
++ u16 hwdxt;
++ u16 swset;
++ u16 swsxt;
++ u16 vbpid;
++ u16 icid;
++ u64 bpscn_addr;
++ u64 bpscn_ctx;
++ u16 hw_targ;
++ u8 dbe;
++ u8 reserved2;
++ u8 sdcnt;
++ u8 hdcnt;
++ u8 sscnt;
++ u8 reserved3[9];
++};
+
-+ misc_deregister(&restool_misc->misc);
-+ pr_info("/dev/%s driver unregistered\n",
-+ restool_misc->misc.name);
-+ fsl_mc_portal_free(restool_misc->static_mc_io);
-+ list_del(&restool_misc->list);
-+ kfree(restool_misc);
-+ }
-+}
++int qbman_bp_query(struct qbman_swp *s, u32 bpid,
++ struct qbman_bp_query_rslt *r);
+
-+module_exit(fsl_mc_restool_driver_exit);
++u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a);
+
-+MODULE_AUTHOR("Freescale Semiconductor Inc.");
-+MODULE_DESCRIPTION("Freescale's MC restool driver");
-+MODULE_LICENSE("GPL");
---- a/drivers/staging/fsl-mc/bus/mc-sys.c
-+++ b/drivers/staging/fsl-mc/bus/mc-sys.c
-@@ -1,4 +1,5 @@
--/* Copyright 2013-2014 Freescale Semiconductor Inc.
-+/*
-+ * Copyright 2013-2016 Freescale Semiconductor Inc.
- *
- * I/O services to send MC commands to the MC hardware
- *
-@@ -13,7 +14,6 @@
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
-- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
-@@ -46,7 +46,7 @@
- /**
- * Timeout in milliseconds to wait for the completion of an MC command
- */
--#define MC_CMD_COMPLETION_TIMEOUT_MS 500
-+#define MC_CMD_COMPLETION_TIMEOUT_MS 15000
-
- /*
- * usleep_range() min and max values used to throttle down polling
-@@ -67,7 +67,7 @@ static u16 mc_cmd_hdr_read_cmdid(struct
- struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
- u16 cmd_id = le16_to_cpu(hdr->cmd_id);
-
-- return (cmd_id & MC_CMD_HDR_CMDID_MASK) >> MC_CMD_HDR_CMDID_SHIFT;
-+ return cmd_id;
- }
-
- static int mc_status_to_error(enum mc_cmd_status status)
-@@ -200,7 +200,7 @@ static int mc_polling_wait_preemptible(s
-
- if (time_after_eq(jiffies, jiffies_until_timeout)) {
- dev_dbg(mc_io->dev,
-- "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
-+ "MC command timed out (portal: %#llx, dprc handle: %#x, command: %#x)\n",
- mc_io->portal_phys_addr,
- (unsigned int)mc_cmd_hdr_read_token(cmd),
- (unsigned int)mc_cmd_hdr_read_cmdid(cmd));
-@@ -240,7 +240,7 @@ static int mc_polling_wait_atomic(struct
- timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS;
- if (timeout_usecs == 0) {
- dev_dbg(mc_io->dev,
-- "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
-+ "MC command timed out (portal: %#llx, dprc handle: %#x, command: %#x)\n",
- mc_io->portal_phys_addr,
- (unsigned int)mc_cmd_hdr_read_token(cmd),
- (unsigned int)mc_cmd_hdr_read_cmdid(cmd));
-@@ -294,7 +294,7 @@ int mc_send_command(struct fsl_mc_io *mc
-
- if (status != MC_CMD_STATUS_OK) {
- dev_dbg(mc_io->dev,
-- "MC command failed: portal: %#llx, obj handle: %#x, command: %#x, status: %s (%#x)\n",
-+ "MC command failed: portal: %#llx, dprc handle: %#x, command: %#x, status: %s (%#x)\n",
- mc_io->portal_phys_addr,
- (unsigned int)mc_cmd_hdr_read_token(cmd),
- (unsigned int)mc_cmd_hdr_read_cmdid(cmd),
++#endif /* __FSL_QBMAN_PORTAL_H */
+--- a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
++++ /dev/null
+@@ -1,140 +0,0 @@
+-/* Copyright 2013-2016 Freescale Semiconductor Inc.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions are met:
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of the above-listed copyright holders nor the
+- * names of any contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- *
+- * ALTERNATIVELY, this software may be distributed under the terms of the
+- * GNU General Public License ("GPL") as published by the Free Software
+- * Foundation, either version 2 of that License or (at your option) any
+- * later version.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+- * POSSIBILITY OF SUCH DAMAGE.
+- */
+-#ifndef _FSL_DPMCP_CMD_H
+-#define _FSL_DPMCP_CMD_H
+-
+-/* Minimal supported DPMCP Version */
+-#define DPMCP_MIN_VER_MAJOR 3
+-#define DPMCP_MIN_VER_MINOR 0
+-
+-/* Command IDs */
+-#define DPMCP_CMDID_CLOSE 0x800
+-#define DPMCP_CMDID_OPEN 0x80b
+-#define DPMCP_CMDID_CREATE 0x90b
+-#define DPMCP_CMDID_DESTROY 0x900
+-
+-#define DPMCP_CMDID_GET_ATTR 0x004
+-#define DPMCP_CMDID_RESET 0x005
+-
+-#define DPMCP_CMDID_SET_IRQ 0x010
+-#define DPMCP_CMDID_GET_IRQ 0x011
+-#define DPMCP_CMDID_SET_IRQ_ENABLE 0x012
+-#define DPMCP_CMDID_GET_IRQ_ENABLE 0x013
+-#define DPMCP_CMDID_SET_IRQ_MASK 0x014
+-#define DPMCP_CMDID_GET_IRQ_MASK 0x015
+-#define DPMCP_CMDID_GET_IRQ_STATUS 0x016
+-
+-struct dpmcp_cmd_open {
+- __le32 dpmcp_id;
+-};
+-
+-struct dpmcp_cmd_create {
+- __le32 portal_id;
+-};
+-
+-struct dpmcp_cmd_set_irq {
+- /* cmd word 0 */
+- u8 irq_index;
+- u8 pad[3];
+- __le32 irq_val;
+- /* cmd word 1 */
+- __le64 irq_addr;
+- /* cmd word 2 */
+- __le32 irq_num;
+-};
+-
+-struct dpmcp_cmd_get_irq {
+- __le32 pad;
+- u8 irq_index;
+-};
+-
+-struct dpmcp_rsp_get_irq {
+- /* cmd word 0 */
+- __le32 irq_val;
+- __le32 pad;
+- /* cmd word 1 */
+- __le64 irq_paddr;
+- /* cmd word 2 */
+- __le32 irq_num;
+- __le32 type;
+-};
+-
+-#define DPMCP_ENABLE 0x1
+-
+-struct dpmcp_cmd_set_irq_enable {
+- u8 enable;
+- u8 pad[3];
+- u8 irq_index;
+-};
+-
+-struct dpmcp_cmd_get_irq_enable {
+- __le32 pad;
+- u8 irq_index;
+-};
+-
+-struct dpmcp_rsp_get_irq_enable {
+- u8 enabled;
+-};
+-
+-struct dpmcp_cmd_set_irq_mask {
+- __le32 mask;
+- u8 irq_index;
+-};
+-
+-struct dpmcp_cmd_get_irq_mask {
+- __le32 pad;
+- u8 irq_index;
+-};
+-
+-struct dpmcp_rsp_get_irq_mask {
+- __le32 mask;
+-};
+-
+-struct dpmcp_cmd_get_irq_status {
+- __le32 status;
+- u8 irq_index;
+-};
+-
+-struct dpmcp_rsp_get_irq_status {
+- __le32 status;
+-};
+-
+-struct dpmcp_rsp_get_attributes {
+- /* response word 0 */
+- __le32 pad;
+- __le32 id;
+- /* response word 1 */
+- __le16 version_major;
+- __le16 version_minor;
+-};
+-
+-#endif /* _FSL_DPMCP_CMD_H */
+--- a/drivers/staging/fsl-mc/bus/dpmcp.c
++++ /dev/null
+@@ -1,504 +0,0 @@
+-/* Copyright 2013-2016 Freescale Semiconductor Inc.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions are met:
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of the above-listed copyright holders nor the
+- * names of any contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- *
+- * ALTERNATIVELY, this software may be distributed under the terms of the
+- * GNU General Public License ("GPL") as published by the Free Software
+- * Foundation, either version 2 of that License or (at your option) any
+- * later version.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+- * POSSIBILITY OF SUCH DAMAGE.
+- */
+-#include "../include/mc-sys.h"
+-#include "../include/mc-cmd.h"
+-
+-#include "dpmcp.h"
+-#include "dpmcp-cmd.h"
+-
+-/**
+- * dpmcp_open() - Open a control session for the specified object.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @dpmcp_id: DPMCP unique ID
+- * @token: Returned token; use in subsequent API calls
+- *
+- * This function can be used to open a control session for an
+- * already created object; an object may have been declared in
+- * the DPL or by calling the dpmcp_create function.
+- * This function returns a unique authentication token,
+- * associated with the specific object ID and the specific MC
+- * portal; this token must be used in all subsequent commands for
+- * this specific object
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpmcp_open(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- int dpmcp_id,
+- u16 *token)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpmcp_cmd_open *cmd_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_OPEN,
+- cmd_flags, 0);
+- cmd_params = (struct dpmcp_cmd_open *)cmd.params;
+- cmd_params->dpmcp_id = cpu_to_le32(dpmcp_id);
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- *token = mc_cmd_hdr_read_token(&cmd);
+-
+- return err;
+-}
+-
+-/**
+- * dpmcp_close() - Close the control session of the object
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPMCP object
+- *
+- * After this function is called, no further operations are
+- * allowed on the object without opening a new control session.
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpmcp_close(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token)
+-{
+- struct mc_command cmd = { 0 };
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CLOSE,
+- cmd_flags, token);
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dpmcp_create() - Create the DPMCP object.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @cfg: Configuration structure
+- * @token: Returned token; use in subsequent API calls
+- *
+- * Create the DPMCP object, allocate required resources and
+- * perform required initialization.
+- *
+- * The object can be created either by declaring it in the
+- * DPL file, or by calling this function.
+- * This function returns a unique authentication token,
+- * associated with the specific object ID and the specific MC
+- * portal; this token must be used in all subsequent calls to
+- * this specific object. For objects that are created using the
+- * DPL file, call dpmcp_open function to get an authentication
+- * token first.
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpmcp_create(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- const struct dpmcp_cfg *cfg,
+- u16 *token)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpmcp_cmd_create *cmd_params;
+-
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CREATE,
+- cmd_flags, 0);
+- cmd_params = (struct dpmcp_cmd_create *)cmd.params;
+- cmd_params->portal_id = cpu_to_le32(cfg->portal_id);
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- *token = mc_cmd_hdr_read_token(&cmd);
+-
+- return 0;
+-}
+-
+-/**
+- * dpmcp_destroy() - Destroy the DPMCP object and release all its resources.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPMCP object
+- *
+- * Return: '0' on Success; error code otherwise.
+- */
+-int dpmcp_destroy(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token)
+-{
+- struct mc_command cmd = { 0 };
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_DESTROY,
+- cmd_flags, token);
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dpmcp_reset() - Reset the DPMCP, returns the object to initial state.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPMCP object
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpmcp_reset(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token)
+-{
+- struct mc_command cmd = { 0 };
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_RESET,
+- cmd_flags, token);
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dpmcp_set_irq() - Set IRQ information for the DPMCP to trigger an interrupt.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPMCP object
+- * @irq_index: Identifies the interrupt index to configure
+- * @irq_cfg: IRQ configuration
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpmcp_set_irq(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- struct dpmcp_irq_cfg *irq_cfg)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpmcp_cmd_set_irq *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ,
+- cmd_flags, token);
+- cmd_params = (struct dpmcp_cmd_set_irq *)cmd.params;
+- cmd_params->irq_index = irq_index;
+- cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
+- cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
+- cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dpmcp_get_irq() - Get IRQ information from the DPMCP.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPMCP object
+- * @irq_index: The interrupt index to configure
+- * @type: Interrupt type: 0 represents message interrupt
+- * type (both irq_addr and irq_val are valid)
+- * @irq_cfg: IRQ attributes
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpmcp_get_irq(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- int *type,
+- struct dpmcp_irq_cfg *irq_cfg)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpmcp_cmd_get_irq *cmd_params;
+- struct dpmcp_rsp_get_irq *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ,
+- cmd_flags, token);
+- cmd_params = (struct dpmcp_cmd_get_irq *)cmd.params;
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dpmcp_rsp_get_irq *)cmd.params;
+- irq_cfg->val = le32_to_cpu(rsp_params->irq_val);
+- irq_cfg->paddr = le64_to_cpu(rsp_params->irq_paddr);
+- irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num);
+- *type = le32_to_cpu(rsp_params->type);
+- return 0;
+-}
+-
+-/**
+- * dpmcp_set_irq_enable() - Set overall interrupt state.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPMCP object
+- * @irq_index: The interrupt index to configure
+- * @en: Interrupt state - enable = 1, disable = 0
+- *
+- * Allows GPP software to control when interrupts are generated.
+- * Each interrupt can have up to 32 causes. The enable/disable control's the
+- * overall interrupt state. if the interrupt is disabled no causes will cause
+- * an interrupt.
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u8 en)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpmcp_cmd_set_irq_enable *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_ENABLE,
+- cmd_flags, token);
+- cmd_params = (struct dpmcp_cmd_set_irq_enable *)cmd.params;
+- cmd_params->enable = en & DPMCP_ENABLE;
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dpmcp_get_irq_enable() - Get overall interrupt state
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPMCP object
+- * @irq_index: The interrupt index to configure
+- * @en: Returned interrupt state - enable = 1, disable = 0
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u8 *en)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpmcp_cmd_get_irq_enable *cmd_params;
+- struct dpmcp_rsp_get_irq_enable *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_ENABLE,
+- cmd_flags, token);
+- cmd_params = (struct dpmcp_cmd_get_irq_enable *)cmd.params;
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dpmcp_rsp_get_irq_enable *)cmd.params;
+- *en = rsp_params->enabled & DPMCP_ENABLE;
+- return 0;
+-}
+-
+-/**
+- * dpmcp_set_irq_mask() - Set interrupt mask.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPMCP object
+- * @irq_index: The interrupt index to configure
+- * @mask: Event mask to trigger interrupt;
+- * each bit:
+- * 0 = ignore event
+- * 1 = consider event for asserting IRQ
+- *
+- * Every interrupt can have up to 32 causes and the interrupt model supports
+- * masking/unmasking each cause independently
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 mask)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpmcp_cmd_set_irq_mask *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_MASK,
+- cmd_flags, token);
+- cmd_params = (struct dpmcp_cmd_set_irq_mask *)cmd.params;
+- cmd_params->mask = cpu_to_le32(mask);
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dpmcp_get_irq_mask() - Get interrupt mask.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPMCP object
+- * @irq_index: The interrupt index to configure
+- * @mask: Returned event mask to trigger interrupt
+- *
+- * Every interrupt can have up to 32 causes and the interrupt model supports
+- * masking/unmasking each cause independently
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 *mask)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpmcp_cmd_get_irq_mask *cmd_params;
+- struct dpmcp_rsp_get_irq_mask *rsp_params;
+-
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_MASK,
+- cmd_flags, token);
+- cmd_params = (struct dpmcp_cmd_get_irq_mask *)cmd.params;
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dpmcp_rsp_get_irq_mask *)cmd.params;
+- *mask = le32_to_cpu(rsp_params->mask);
+-
+- return 0;
+-}
+-
+-/**
+- * dpmcp_get_irq_status() - Get the current status of any pending interrupts.
+- *
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPMCP object
+- * @irq_index: The interrupt index to configure
+- * @status: Returned interrupts status - one bit per cause:
+- * 0 = no interrupt pending
+- * 1 = interrupt pending
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpmcp_get_irq_status(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 *status)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpmcp_cmd_get_irq_status *cmd_params;
+- struct dpmcp_rsp_get_irq_status *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_STATUS,
+- cmd_flags, token);
+- cmd_params = (struct dpmcp_cmd_get_irq_status *)cmd.params;
+- cmd_params->status = cpu_to_le32(*status);
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dpmcp_rsp_get_irq_status *)cmd.params;
+- *status = le32_to_cpu(rsp_params->status);
+-
+- return 0;
+-}
+-
+-/**
+- * dpmcp_get_attributes - Retrieve DPMCP attributes.
+- *
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPMCP object
+- * @attr: Returned object's attributes
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpmcp_get_attributes(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- struct dpmcp_attr *attr)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpmcp_rsp_get_attributes *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_ATTR,
+- cmd_flags, token);
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dpmcp_rsp_get_attributes *)cmd.params;
+- attr->id = le32_to_cpu(rsp_params->id);
+- attr->version.major = le16_to_cpu(rsp_params->version_major);
+- attr->version.minor = le16_to_cpu(rsp_params->version_minor);
+-
+- return 0;
+-}
+--- a/drivers/staging/fsl-mc/bus/dpmcp.h
++++ /dev/null
+@@ -1,159 +0,0 @@
+-/* Copyright 2013-2015 Freescale Semiconductor Inc.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions are met:
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of the above-listed copyright holders nor the
+- * names of any contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- *
+- * ALTERNATIVELY, this software may be distributed under the terms of the
+- * GNU General Public License ("GPL") as published by the Free Software
+- * Foundation, either version 2 of that License or (at your option) any
+- * later version.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+- * POSSIBILITY OF SUCH DAMAGE.
+- */
+-#ifndef __FSL_DPMCP_H
+-#define __FSL_DPMCP_H
+-
+-/* Data Path Management Command Portal API
+- * Contains initialization APIs and runtime control APIs for DPMCP
+- */
+-
+-struct fsl_mc_io;
+-
+-int dpmcp_open(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- int dpmcp_id,
+- uint16_t *token);
+-
+-/* Get portal ID from pool */
+-#define DPMCP_GET_PORTAL_ID_FROM_POOL (-1)
+-
+-int dpmcp_close(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- uint16_t token);
+-
+-/**
+- * struct dpmcp_cfg - Structure representing DPMCP configuration
+- * @portal_id: Portal ID; 'DPMCP_GET_PORTAL_ID_FROM_POOL' to get the portal ID
+- * from pool
+- */
+-struct dpmcp_cfg {
+- int portal_id;
+-};
+-
+-int dpmcp_create(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- const struct dpmcp_cfg *cfg,
+- uint16_t *token);
+-
+-int dpmcp_destroy(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- uint16_t token);
+-
+-int dpmcp_reset(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- uint16_t token);
+-
+-/* IRQ */
+-/* IRQ Index */
+-#define DPMCP_IRQ_INDEX 0
+-/* irq event - Indicates that the link state changed */
+-#define DPMCP_IRQ_EVENT_CMD_DONE 0x00000001
+-
+-/**
+- * struct dpmcp_irq_cfg - IRQ configuration
+- * @paddr: Address that must be written to signal a message-based interrupt
+- * @val: Value to write into irq_addr address
+- * @irq_num: A user defined number associated with this IRQ
+- */
+-struct dpmcp_irq_cfg {
+- uint64_t paddr;
+- uint32_t val;
+- int irq_num;
+-};
+-
+-int dpmcp_set_irq(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- uint16_t token,
+- uint8_t irq_index,
+- struct dpmcp_irq_cfg *irq_cfg);
+-
+-int dpmcp_get_irq(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- uint16_t token,
+- uint8_t irq_index,
+- int *type,
+- struct dpmcp_irq_cfg *irq_cfg);
+-
+-int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- uint16_t token,
+- uint8_t irq_index,
+- uint8_t en);
+-
+-int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- uint16_t token,
+- uint8_t irq_index,
+- uint8_t *en);
+-
+-int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- uint16_t token,
+- uint8_t irq_index,
+- uint32_t mask);
+-
+-int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- uint16_t token,
+- uint8_t irq_index,
+- uint32_t *mask);
+-
+-int dpmcp_get_irq_status(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- uint16_t token,
+- uint8_t irq_index,
+- uint32_t *status);
+-
+-/**
+- * struct dpmcp_attr - Structure representing DPMCP attributes
+- * @id: DPMCP object ID
+- * @version: DPMCP version
+- */
+-struct dpmcp_attr {
+- int id;
+- /**
+- * struct version - Structure representing DPMCP version
+- * @major: DPMCP major version
+- * @minor: DPMCP minor version
+- */
+- struct {
+- uint16_t major;
+- uint16_t minor;
+- } version;
+-};
+-
+-int dpmcp_get_attributes(struct fsl_mc_io *mc_io,
+- uint32_t cmd_flags,
+- uint16_t token,
+- struct dpmcp_attr *attr);
+-
+-#endif /* __FSL_DPMCP_H */
+--- a/drivers/staging/fsl-mc/bus/dpmng-cmd.h
++++ /dev/null
+@@ -1,58 +0,0 @@
+-/*
+- * Copyright 2013-2016 Freescale Semiconductor Inc.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions are met:
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of the above-listed copyright holders nor the
+- * names of any contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- *
+- * ALTERNATIVELY, this software may be distributed under the terms of the
+- * GNU General Public License ("GPL") as published by the Free Software
+- * Foundation, either version 2 of that License or (at your option) any
+- * later version.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+- * POSSIBILITY OF SUCH DAMAGE.
+- */
+-
+-/*
+- * dpmng-cmd.h
+- *
+- * defines portal commands
+- *
+- */
+-
+-#ifndef __FSL_DPMNG_CMD_H
+-#define __FSL_DPMNG_CMD_H
+-
+-/* Command IDs */
+-#define DPMNG_CMDID_GET_CONT_ID 0x830
+-#define DPMNG_CMDID_GET_VERSION 0x831
+-
+-struct dpmng_rsp_get_container_id {
+- __le32 container_id;
+-};
+-
+-struct dpmng_rsp_get_version {
+- __le32 revision;
+- __le32 version_major;
+- __le32 version_minor;
+-};
+-
+-#endif /* __FSL_DPMNG_CMD_H */
+--- a/drivers/staging/fsl-mc/bus/dpmng.c
++++ /dev/null
+@@ -1,107 +0,0 @@
+-/* Copyright 2013-2016 Freescale Semiconductor Inc.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions are met:
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of the above-listed copyright holders nor the
+- * names of any contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- *
+- * ALTERNATIVELY, this software may be distributed under the terms of the
+- * GNU General Public License ("GPL") as published by the Free Software
+- * Foundation, either version 2 of that License or (at your option) any
+- * later version.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+- * POSSIBILITY OF SUCH DAMAGE.
+- */
+-#include "../include/mc-sys.h"
+-#include "../include/mc-cmd.h"
+-#include "../include/dpmng.h"
+-
+-#include "dpmng-cmd.h"
+-
+-/**
+- * mc_get_version() - Retrieves the Management Complex firmware
+- * version information
+- * @mc_io: Pointer to opaque I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @mc_ver_info: Returned version information structure
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int mc_get_version(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- struct mc_version *mc_ver_info)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpmng_rsp_get_version *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_VERSION,
+- cmd_flags,
+- 0);
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dpmng_rsp_get_version *)cmd.params;
+- mc_ver_info->revision = le32_to_cpu(rsp_params->revision);
+- mc_ver_info->major = le32_to_cpu(rsp_params->version_major);
+- mc_ver_info->minor = le32_to_cpu(rsp_params->version_minor);
+-
+- return 0;
+-}
+-EXPORT_SYMBOL(mc_get_version);
+-
+-/**
+- * dpmng_get_container_id() - Get container ID associated with a given portal.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @container_id: Requested container ID
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dpmng_get_container_id(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- int *container_id)
+-{
+- struct mc_command cmd = { 0 };
+- struct dpmng_rsp_get_container_id *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_CONT_ID,
+- cmd_flags,
+- 0);
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dpmng_rsp_get_container_id *)cmd.params;
+- *container_id = le32_to_cpu(rsp_params->container_id);
+-
+- return 0;
+-}
+-
+--- a/drivers/staging/fsl-mc/bus/dprc-cmd.h
++++ /dev/null
+@@ -1,465 +0,0 @@
+-/*
+- * Copyright 2013-2016 Freescale Semiconductor Inc.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions are met:
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of the above-listed copyright holders nor the
+- * names of any contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- *
+- * ALTERNATIVELY, this software may be distributed under the terms of the
+- * GNU General Public License ("GPL") as published by the Free Software
+- * Foundation, either version 2 of that License or (at your option) any
+- * later version.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+- * POSSIBILITY OF SUCH DAMAGE.
+- */
+-
+-/*
+- * dprc-cmd.h
+- *
+- * defines dprc portal commands
+- *
+- */
+-
+-#ifndef _FSL_DPRC_CMD_H
+-#define _FSL_DPRC_CMD_H
+-
+-/* Minimal supported DPRC Version */
+-#define DPRC_MIN_VER_MAJOR 5
+-#define DPRC_MIN_VER_MINOR 0
+-
+-/* Command IDs */
+-#define DPRC_CMDID_CLOSE 0x800
+-#define DPRC_CMDID_OPEN 0x805
+-#define DPRC_CMDID_CREATE 0x905
+-
+-#define DPRC_CMDID_GET_ATTR 0x004
+-#define DPRC_CMDID_RESET_CONT 0x005
+-
+-#define DPRC_CMDID_SET_IRQ 0x010
+-#define DPRC_CMDID_GET_IRQ 0x011
+-#define DPRC_CMDID_SET_IRQ_ENABLE 0x012
+-#define DPRC_CMDID_GET_IRQ_ENABLE 0x013
+-#define DPRC_CMDID_SET_IRQ_MASK 0x014
+-#define DPRC_CMDID_GET_IRQ_MASK 0x015
+-#define DPRC_CMDID_GET_IRQ_STATUS 0x016
+-#define DPRC_CMDID_CLEAR_IRQ_STATUS 0x017
+-
+-#define DPRC_CMDID_CREATE_CONT 0x151
+-#define DPRC_CMDID_DESTROY_CONT 0x152
+-#define DPRC_CMDID_SET_RES_QUOTA 0x155
+-#define DPRC_CMDID_GET_RES_QUOTA 0x156
+-#define DPRC_CMDID_ASSIGN 0x157
+-#define DPRC_CMDID_UNASSIGN 0x158
+-#define DPRC_CMDID_GET_OBJ_COUNT 0x159
+-#define DPRC_CMDID_GET_OBJ 0x15A
+-#define DPRC_CMDID_GET_RES_COUNT 0x15B
+-#define DPRC_CMDID_GET_RES_IDS 0x15C
+-#define DPRC_CMDID_GET_OBJ_REG 0x15E
+-#define DPRC_CMDID_SET_OBJ_IRQ 0x15F
+-#define DPRC_CMDID_GET_OBJ_IRQ 0x160
+-#define DPRC_CMDID_SET_OBJ_LABEL 0x161
+-#define DPRC_CMDID_GET_OBJ_DESC 0x162
+-
+-#define DPRC_CMDID_CONNECT 0x167
+-#define DPRC_CMDID_DISCONNECT 0x168
+-#define DPRC_CMDID_GET_POOL 0x169
+-#define DPRC_CMDID_GET_POOL_COUNT 0x16A
+-
+-#define DPRC_CMDID_GET_CONNECTION 0x16C
+-
+-struct dprc_cmd_open {
+- __le32 container_id;
+-};
+-
+-struct dprc_cmd_create_container {
+- /* cmd word 0 */
+- __le32 options;
+- __le16 icid;
+- __le16 pad0;
+- /* cmd word 1 */
+- __le32 pad1;
+- __le32 portal_id;
+- /* cmd words 2-3 */
+- u8 label[16];
+-};
+-
+-struct dprc_rsp_create_container {
+- /* response word 0 */
+- __le64 pad0;
+- /* response word 1 */
+- __le32 child_container_id;
+- __le32 pad1;
+- /* response word 2 */
+- __le64 child_portal_addr;
+-};
+-
+-struct dprc_cmd_destroy_container {
+- __le32 child_container_id;
+-};
+-
+-struct dprc_cmd_reset_container {
+- __le32 child_container_id;
+-};
+-
+-struct dprc_cmd_set_irq {
+- /* cmd word 0 */
+- __le32 irq_val;
+- u8 irq_index;
+- u8 pad[3];
+- /* cmd word 1 */
+- __le64 irq_addr;
+- /* cmd word 2 */
+- __le32 irq_num;
+-};
+-
+-struct dprc_cmd_get_irq {
+- __le32 pad;
+- u8 irq_index;
+-};
+-
+-struct dprc_rsp_get_irq {
+- /* response word 0 */
+- __le32 irq_val;
+- __le32 pad;
+- /* response word 1 */
+- __le64 irq_addr;
+- /* response word 2 */
+- __le32 irq_num;
+- __le32 type;
+-};
+-
+-#define DPRC_ENABLE 0x1
+-
+-struct dprc_cmd_set_irq_enable {
+- u8 enable;
+- u8 pad[3];
+- u8 irq_index;
+-};
+-
+-struct dprc_cmd_get_irq_enable {
+- __le32 pad;
+- u8 irq_index;
+-};
+-
+-struct dprc_rsp_get_irq_enable {
+- u8 enabled;
+-};
+-
+-struct dprc_cmd_set_irq_mask {
+- __le32 mask;
+- u8 irq_index;
+-};
+-
+-struct dprc_cmd_get_irq_mask {
+- __le32 pad;
+- u8 irq_index;
+-};
+-
+-struct dprc_rsp_get_irq_mask {
+- __le32 mask;
+-};
+-
+-struct dprc_cmd_get_irq_status {
+- __le32 status;
+- u8 irq_index;
+-};
+-
+-struct dprc_rsp_get_irq_status {
+- __le32 status;
+-};
+-
+-struct dprc_cmd_clear_irq_status {
+- __le32 status;
+- u8 irq_index;
+-};
+-
+-struct dprc_rsp_get_attributes {
+- /* response word 0 */
+- __le32 container_id;
+- __le16 icid;
+- __le16 pad;
+- /* response word 1 */
+- __le32 options;
+- __le32 portal_id;
+- /* response word 2 */
+- __le16 version_major;
+- __le16 version_minor;
+-};
+-
+-struct dprc_cmd_set_res_quota {
+- /* cmd word 0 */
+- __le32 child_container_id;
+- __le16 quota;
+- __le16 pad;
+- /* cmd words 1-2 */
+- u8 type[16];
+-};
+-
+-struct dprc_cmd_get_res_quota {
+- /* cmd word 0 */
+- __le32 child_container_id;
+- __le32 pad;
+- /* cmd word 1-2 */
+- u8 type[16];
+-};
+-
+-struct dprc_rsp_get_res_quota {
+- __le32 pad;
+- __le16 quota;
+-};
+-
+-struct dprc_cmd_assign {
+- /* cmd word 0 */
+- __le32 container_id;
+- __le32 options;
+- /* cmd word 1 */
+- __le32 num;
+- __le32 id_base_align;
+- /* cmd word 2-3 */
+- u8 type[16];
+-};
+-
+-struct dprc_cmd_unassign {
+- /* cmd word 0 */
+- __le32 child_container_id;
+- __le32 options;
+- /* cmd word 1 */
+- __le32 num;
+- __le32 id_base_align;
+- /* cmd word 2-3 */
+- u8 type[16];
+-};
+-
+-struct dprc_rsp_get_pool_count {
+- __le32 pool_count;
+-};
+-
+-struct dprc_cmd_get_pool {
+- __le32 pool_index;
+-};
+-
+-struct dprc_rsp_get_pool {
+- /* response word 0 */
+- __le64 pad;
+- /* response word 1-2 */
+- u8 type[16];
+-};
+-
+-struct dprc_rsp_get_obj_count {
+- __le32 pad;
+- __le32 obj_count;
+-};
+-
+-struct dprc_cmd_get_obj {
+- __le32 obj_index;
+-};
+-
+-struct dprc_rsp_get_obj {
+- /* response word 0 */
+- __le32 pad0;
+- __le32 id;
+- /* response word 1 */
+- __le16 vendor;
+- u8 irq_count;
+- u8 region_count;
+- __le32 state;
+- /* response word 2 */
+- __le16 version_major;
+- __le16 version_minor;
+- __le16 flags;
+- __le16 pad1;
+- /* response word 3-4 */
+- u8 type[16];
+- /* response word 5-6 */
+- u8 label[16];
+-};
+-
+-struct dprc_cmd_get_obj_desc {
+- /* cmd word 0 */
+- __le32 obj_id;
+- __le32 pad;
+- /* cmd word 1-2 */
+- u8 type[16];
+-};
+-
+-struct dprc_rsp_get_obj_desc {
+- /* response word 0 */
+- __le32 pad0;
+- __le32 id;
+- /* response word 1 */
+- __le16 vendor;
+- u8 irq_count;
+- u8 region_count;
+- __le32 state;
+- /* response word 2 */
+- __le16 version_major;
+- __le16 version_minor;
+- __le16 flags;
+- __le16 pad1;
+- /* response word 3-4 */
+- u8 type[16];
+- /* response word 5-6 */
+- u8 label[16];
+-};
+-
+-struct dprc_cmd_get_res_count {
+- /* cmd word 0 */
+- __le64 pad;
+- /* cmd word 1-2 */
+- u8 type[16];
+-};
+-
+-struct dprc_rsp_get_res_count {
+- __le32 res_count;
+-};
+-
+-struct dprc_cmd_get_res_ids {
+- /* cmd word 0 */
+- u8 pad0[5];
+- u8 iter_status;
+- __le16 pad1;
+- /* cmd word 1 */
+- __le32 base_id;
+- __le32 last_id;
+- /* cmd word 2-3 */
+- u8 type[16];
+-};
+-
+-struct dprc_rsp_get_res_ids {
+- /* response word 0 */
+- u8 pad0[5];
+- u8 iter_status;
+- __le16 pad1;
+- /* response word 1 */
+- __le32 base_id;
+- __le32 last_id;
+-};
+-
+-struct dprc_cmd_get_obj_region {
+- /* cmd word 0 */
+- __le32 obj_id;
+- __le16 pad0;
+- u8 region_index;
+- u8 pad1;
+- /* cmd word 1-2 */
+- __le64 pad2[2];
+- /* cmd word 3-4 */
+- u8 obj_type[16];
+-};
+-
+-struct dprc_rsp_get_obj_region {
+- /* response word 0 */
+- __le64 pad;
+- /* response word 1 */
+- __le64 base_addr;
+- /* response word 2 */
+- __le32 size;
+-};
+-
+-struct dprc_cmd_set_obj_label {
+- /* cmd word 0 */
+- __le32 obj_id;
+- __le32 pad;
+- /* cmd word 1-2 */
+- u8 label[16];
+- /* cmd word 3-4 */
+- u8 obj_type[16];
+-};
+-
+-struct dprc_cmd_set_obj_irq {
+- /* cmd word 0 */
+- __le32 irq_val;
+- u8 irq_index;
+- u8 pad[3];
+- /* cmd word 1 */
+- __le64 irq_addr;
+- /* cmd word 2 */
+- __le32 irq_num;
+- __le32 obj_id;
+- /* cmd word 3-4 */
+- u8 obj_type[16];
+-};
+-
+-struct dprc_cmd_get_obj_irq {
+- /* cmd word 0 */
+- __le32 obj_id;
+- u8 irq_index;
+- u8 pad[3];
+- /* cmd word 1-2 */
+- u8 obj_type[16];
+-};
+-
+-struct dprc_rsp_get_obj_irq {
+- /* response word 0 */
+- __le32 irq_val;
+- __le32 pad;
+- /* response word 1 */
+- __le64 irq_addr;
+- /* response word 2 */
+- __le32 irq_num;
+- __le32 type;
+-};
+-
+-struct dprc_cmd_connect {
+- /* cmd word 0 */
+- __le32 ep1_id;
+- __le32 ep1_interface_id;
+- /* cmd word 1 */
+- __le32 ep2_id;
+- __le32 ep2_interface_id;
+- /* cmd word 2-3 */
+- u8 ep1_type[16];
+- /* cmd word 4 */
+- __le32 max_rate;
+- __le32 committed_rate;
+- /* cmd word 5-6 */
+- u8 ep2_type[16];
+-};
+-
+-struct dprc_cmd_disconnect {
+- /* cmd word 0 */
+- __le32 id;
+- __le32 interface_id;
+- /* cmd word 1-2 */
+- u8 type[16];
+-};
+-
+-struct dprc_cmd_get_connection {
+- /* cmd word 0 */
+- __le32 ep1_id;
+- __le32 ep1_interface_id;
+- /* cmd word 1-2 */
+- u8 ep1_type[16];
+-};
+-
+-struct dprc_rsp_get_connection {
+- /* response word 0-2 */
+- __le64 pad[3];
+- /* response word 3 */
+- __le32 ep2_id;
+- __le32 ep2_interface_id;
+- /* response word 4-5 */
+- u8 ep2_type[16];
+- /* response word 6 */
+- __le32 state;
+-};
+-
+-#endif /* _FSL_DPRC_CMD_H */
+--- a/drivers/staging/fsl-mc/bus/dprc.c
++++ /dev/null
+@@ -1,1388 +0,0 @@
+-/* Copyright 2013-2016 Freescale Semiconductor Inc.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions are met:
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of the above-listed copyright holders nor the
+- * names of any contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- *
+- * ALTERNATIVELY, this software may be distributed under the terms of the
+- * GNU General Public License ("GPL") as published by the Free Software
+- * Foundation, either version 2 of that License or (at your option) any
+- * later version.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+- * POSSIBILITY OF SUCH DAMAGE.
+- */
+-#include "../include/mc-sys.h"
+-#include "../include/mc-cmd.h"
+-#include "../include/dprc.h"
+-
+-#include "dprc-cmd.h"
+-
+-/**
+- * dprc_open() - Open DPRC object for use
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @container_id: Container ID to open
+- * @token: Returned token of DPRC object
+- *
+- * Return: '0' on Success; Error code otherwise.
+- *
+- * @warning Required before any operation on the object.
+- */
+-int dprc_open(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- int container_id,
+- u16 *token)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_open *cmd_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_OPEN, cmd_flags,
+- 0);
+- cmd_params = (struct dprc_cmd_open *)cmd.params;
+- cmd_params->container_id = cpu_to_le32(container_id);
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- *token = mc_cmd_hdr_read_token(&cmd);
+-
+- return 0;
+-}
+-EXPORT_SYMBOL(dprc_open);
+-
+-/**
+- * dprc_close() - Close the control session of the object
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- *
+- * After this function is called, no further operations are
+- * allowed on the object without opening a new control session.
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_close(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token)
+-{
+- struct mc_command cmd = { 0 };
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLOSE, cmd_flags,
+- token);
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-EXPORT_SYMBOL(dprc_close);
+-
+-/**
+- * dprc_create_container() - Create child container
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @cfg: Child container configuration
+- * @child_container_id: Returned child container ID
+- * @child_portal_offset: Returned child portal offset from MC portal base
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_create_container(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- struct dprc_cfg *cfg,
+- int *child_container_id,
+- u64 *child_portal_offset)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_create_container *cmd_params;
+- struct dprc_rsp_create_container *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd_params = (struct dprc_cmd_create_container *)cmd.params;
+- cmd_params->options = cpu_to_le32(cfg->options);
+- cmd_params->icid = cpu_to_le16(cfg->icid);
+- cmd_params->portal_id = cpu_to_le32(cfg->portal_id);
+- strncpy(cmd_params->label, cfg->label, 16);
+- cmd_params->label[15] = '\0';
+-
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_CREATE_CONT,
+- cmd_flags, token);
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dprc_rsp_create_container *)cmd.params;
+- *child_container_id = le32_to_cpu(rsp_params->child_container_id);
+- *child_portal_offset = le64_to_cpu(rsp_params->child_portal_addr);
+-
+- return 0;
+-}
+-
+-/**
+- * dprc_destroy_container() - Destroy child container.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @child_container_id: ID of the container to destroy
+- *
+- * This function terminates the child container, so following this call the
+- * child container ID becomes invalid.
+- *
+- * Notes:
+- * - All resources and objects of the destroyed container are returned to the
+- * parent container or destroyed if were created be the destroyed container.
+- * - This function destroy all the child containers of the specified
+- * container prior to destroying the container itself.
+- *
+- * warning: Only the parent container is allowed to destroy a child policy
+- * Container 0 can't be destroyed
+- *
+- * Return: '0' on Success; Error code otherwise.
+- *
+- */
+-int dprc_destroy_container(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int child_container_id)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_destroy_container *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_DESTROY_CONT,
+- cmd_flags, token);
+- cmd_params = (struct dprc_cmd_destroy_container *)cmd.params;
+- cmd_params->child_container_id = cpu_to_le32(child_container_id);
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dprc_reset_container - Reset child container.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @child_container_id: ID of the container to reset
+- *
+- * In case a software context crashes or becomes non-responsive, the parent
+- * may wish to reset its resources container before the software context is
+- * restarted.
+- *
+- * This routine informs all objects assigned to the child container that the
+- * container is being reset, so they may perform any cleanup operations that are
+- * needed. All objects handles that were owned by the child container shall be
+- * closed.
+- *
+- * Note that such request may be submitted even if the child software context
+- * has not crashed, but the resulting object cleanup operations will not be
+- * aware of that.
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_reset_container(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int child_container_id)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_reset_container *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_RESET_CONT,
+- cmd_flags, token);
+- cmd_params = (struct dprc_cmd_reset_container *)cmd.params;
+- cmd_params->child_container_id = cpu_to_le32(child_container_id);
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dprc_get_irq() - Get IRQ information from the DPRC.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @irq_index: The interrupt index to configure
+- * @type: Interrupt type: 0 represents message interrupt
+- * type (both irq_addr and irq_val are valid)
+- * @irq_cfg: IRQ attributes
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_get_irq(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- int *type,
+- struct dprc_irq_cfg *irq_cfg)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_get_irq *cmd_params;
+- struct dprc_rsp_get_irq *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ,
+- cmd_flags,
+- token);
+- cmd_params = (struct dprc_cmd_get_irq *)cmd.params;
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dprc_rsp_get_irq *)cmd.params;
+- irq_cfg->val = le32_to_cpu(rsp_params->irq_val);
+- irq_cfg->paddr = le64_to_cpu(rsp_params->irq_addr);
+- irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num);
+- *type = le32_to_cpu(rsp_params->type);
+-
+- return 0;
+-}
+-
+-/**
+- * dprc_set_irq() - Set IRQ information for the DPRC to trigger an interrupt.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @irq_index: Identifies the interrupt index to configure
+- * @irq_cfg: IRQ configuration
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_set_irq(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- struct dprc_irq_cfg *irq_cfg)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_set_irq *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ,
+- cmd_flags,
+- token);
+- cmd_params = (struct dprc_cmd_set_irq *)cmd.params;
+- cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
+- cmd_params->irq_index = irq_index;
+- cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
+- cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dprc_get_irq_enable() - Get overall interrupt state.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @irq_index: The interrupt index to configure
+- * @en: Returned interrupt state - enable = 1, disable = 0
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_get_irq_enable(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u8 *en)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_get_irq_enable *cmd_params;
+- struct dprc_rsp_get_irq_enable *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_ENABLE,
+- cmd_flags, token);
+- cmd_params = (struct dprc_cmd_get_irq_enable *)cmd.params;
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dprc_rsp_get_irq_enable *)cmd.params;
+- *en = rsp_params->enabled & DPRC_ENABLE;
+-
+- return 0;
+-}
+-
+-/**
+- * dprc_set_irq_enable() - Set overall interrupt state.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @irq_index: The interrupt index to configure
+- * @en: Interrupt state - enable = 1, disable = 0
+- *
+- * Allows GPP software to control when interrupts are generated.
+- * Each interrupt can have up to 32 causes. The enable/disable control's the
+- * overall interrupt state. if the interrupt is disabled no causes will cause
+- * an interrupt.
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u8 en)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_set_irq_enable *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_ENABLE,
+- cmd_flags, token);
+- cmd_params = (struct dprc_cmd_set_irq_enable *)cmd.params;
+- cmd_params->enable = en & DPRC_ENABLE;
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dprc_get_irq_mask() - Get interrupt mask.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @irq_index: The interrupt index to configure
+- * @mask: Returned event mask to trigger interrupt
+- *
+- * Every interrupt can have up to 32 causes and the interrupt model supports
+- * masking/unmasking each cause independently
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_get_irq_mask(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 *mask)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_get_irq_mask *cmd_params;
+- struct dprc_rsp_get_irq_mask *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_MASK,
+- cmd_flags, token);
+- cmd_params = (struct dprc_cmd_get_irq_mask *)cmd.params;
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dprc_rsp_get_irq_mask *)cmd.params;
+- *mask = le32_to_cpu(rsp_params->mask);
+-
+- return 0;
+-}
+-
+-/**
+- * dprc_set_irq_mask() - Set interrupt mask.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @irq_index: The interrupt index to configure
+- * @mask: event mask to trigger interrupt;
+- * each bit:
+- * 0 = ignore event
+- * 1 = consider event for asserting irq
+- *
+- * Every interrupt can have up to 32 causes and the interrupt model supports
+- * masking/unmasking each cause independently
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 mask)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_set_irq_mask *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_MASK,
+- cmd_flags, token);
+- cmd_params = (struct dprc_cmd_set_irq_mask *)cmd.params;
+- cmd_params->mask = cpu_to_le32(mask);
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dprc_get_irq_status() - Get the current status of any pending interrupts.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @irq_index: The interrupt index to configure
+- * @status: Returned interrupts status - one bit per cause:
+- * 0 = no interrupt pending
+- * 1 = interrupt pending
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_get_irq_status(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 *status)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_get_irq_status *cmd_params;
+- struct dprc_rsp_get_irq_status *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_STATUS,
+- cmd_flags, token);
+- cmd_params = (struct dprc_cmd_get_irq_status *)cmd.params;
+- cmd_params->status = cpu_to_le32(*status);
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dprc_rsp_get_irq_status *)cmd.params;
+- *status = le32_to_cpu(rsp_params->status);
+-
+- return 0;
+-}
+-
+-/**
+- * dprc_clear_irq_status() - Clear a pending interrupt's status
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @irq_index: The interrupt index to configure
+- * @status: bits to clear (W1C) - one bit per cause:
+- * 0 = don't change
+- * 1 = clear status bit
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 status)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_clear_irq_status *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLEAR_IRQ_STATUS,
+- cmd_flags, token);
+- cmd_params = (struct dprc_cmd_clear_irq_status *)cmd.params;
+- cmd_params->status = cpu_to_le32(status);
+- cmd_params->irq_index = irq_index;
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dprc_get_attributes() - Obtains container attributes
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @attributes Returned container attributes
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_get_attributes(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- struct dprc_attributes *attr)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_rsp_get_attributes *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_ATTR,
+- cmd_flags,
+- token);
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dprc_rsp_get_attributes *)cmd.params;
+- attr->container_id = le32_to_cpu(rsp_params->container_id);
+- attr->icid = le16_to_cpu(rsp_params->icid);
+- attr->options = le32_to_cpu(rsp_params->options);
+- attr->portal_id = le32_to_cpu(rsp_params->portal_id);
+- attr->version.major = le16_to_cpu(rsp_params->version_major);
+- attr->version.minor = le16_to_cpu(rsp_params->version_minor);
+-
+- return 0;
+-}
+-
+-/**
+- * dprc_set_res_quota() - Set allocation policy for a specific resource/object
+- * type in a child container
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @child_container_id: ID of the child container
+- * @type: Resource/object type
+- * @quota: Sets the maximum number of resources of the selected type
+- * that the child container is allowed to allocate from its parent;
+- * when quota is set to -1, the policy is the same as container's
+- * general policy.
+- *
+- * Allocation policy determines whether or not a container may allocate
+- * resources from its parent. Each container has a 'global' allocation policy
+- * that is set when the container is created.
+- *
+- * This function sets allocation policy for a specific resource type.
+- * The default policy for all resource types matches the container's 'global'
+- * allocation policy.
+- *
+- * Return: '0' on Success; Error code otherwise.
+- *
+- * @warning Only the parent container is allowed to change a child policy.
+- */
+-int dprc_set_res_quota(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int child_container_id,
+- char *type,
+- u16 quota)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_set_res_quota *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_RES_QUOTA,
+- cmd_flags, token);
+- cmd_params = (struct dprc_cmd_set_res_quota *)cmd.params;
+- cmd_params->child_container_id = cpu_to_le32(child_container_id);
+- cmd_params->quota = cpu_to_le16(quota);
+- strncpy(cmd_params->type, type, 16);
+- cmd_params->type[15] = '\0';
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dprc_get_res_quota() - Gets the allocation policy of a specific
+- * resource/object type in a child container
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @child_container_id; ID of the child container
+- * @type: resource/object type
+- * @quota: Returnes the maximum number of resources of the selected type
+- * that the child container is allowed to allocate from the parent;
+- * when quota is set to -1, the policy is the same as container's
+- * general policy.
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_get_res_quota(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int child_container_id,
+- char *type,
+- u16 *quota)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_get_res_quota *cmd_params;
+- struct dprc_rsp_get_res_quota *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_QUOTA,
+- cmd_flags, token);
+- cmd_params = (struct dprc_cmd_get_res_quota *)cmd.params;
+- cmd_params->child_container_id = cpu_to_le32(child_container_id);
+- strncpy(cmd_params->type, type, 16);
+- cmd_params->type[15] = '\0';
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dprc_rsp_get_res_quota *)cmd.params;
+- *quota = le16_to_cpu(rsp_params->quota);
+-
+- return 0;
+-}
+-
+-/**
+- * dprc_assign() - Assigns objects or resource to a child container.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @container_id: ID of the child container
+- * @res_req: Describes the type and amount of resources to
+- * assign to the given container
+- *
+- * Assignment is usually done by a parent (this DPRC) to one of its child
+- * containers.
+- *
+- * According to the DPRC allocation policy, the assigned resources may be taken
+- * (allocated) from the container's ancestors, if not enough resources are
+- * available in the container itself.
+- *
+- * The type of assignment depends on the dprc_res_req options, as follows:
+- * - DPRC_RES_REQ_OPT_EXPLICIT: indicates that assigned resources should have
+- * the explicit base ID specified at the id_base_align field of res_req.
+- * - DPRC_RES_REQ_OPT_ALIGNED: indicates that the assigned resources should be
+- * aligned to the value given at id_base_align field of res_req.
+- * - DPRC_RES_REQ_OPT_PLUGGED: Relevant only for object assignment,
+- * and indicates that the object must be set to the plugged state.
+- *
+- * A container may use this function with its own ID in order to change a
+- * object state to plugged or unplugged.
+- *
+- * If IRQ information has been set in the child DPRC, it will signal an
+- * interrupt following every change in its object assignment.
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_assign(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int container_id,
+- struct dprc_res_req *res_req)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_assign *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_ASSIGN,
+- cmd_flags, token);
+- cmd_params = (struct dprc_cmd_assign *)cmd.params;
+- cmd_params->container_id = cpu_to_le32(container_id);
+- cmd_params->options = cpu_to_le32(res_req->options);
+- cmd_params->num = cpu_to_le32(res_req->num);
+- cmd_params->id_base_align = cpu_to_le32(res_req->id_base_align);
+- strncpy(cmd_params->type, res_req->type, 16);
+- cmd_params->type[15] = '\0';
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dprc_unassign() - Un-assigns objects or resources from a child container
+- * and moves them into this (parent) DPRC.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @child_container_id: ID of the child container
+- * @res_req: Describes the type and amount of resources to un-assign from
+- * the child container
+- *
+- * Un-assignment of objects can succeed only if the object is not in the
+- * plugged or opened state.
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_unassign(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int child_container_id,
+- struct dprc_res_req *res_req)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_unassign *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_UNASSIGN,
+- cmd_flags,
+- token);
+- cmd_params = (struct dprc_cmd_unassign *)cmd.params;
+- cmd_params->child_container_id = cpu_to_le32(child_container_id);
+- cmd_params->options = cpu_to_le32(res_req->options);
+- cmd_params->num = cpu_to_le32(res_req->num);
+- cmd_params->id_base_align = cpu_to_le32(res_req->id_base_align);
+- strncpy(cmd_params->type, res_req->type, 16);
+- cmd_params->type[15] = '\0';
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dprc_get_pool_count() - Get the number of dprc's pools
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @mc_io: Pointer to MC portal's I/O object
+- * @token: Token of DPRC object
+- * @pool_count: Returned number of resource pools in the dprc
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_get_pool_count(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int *pool_count)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_rsp_get_pool_count *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_POOL_COUNT,
+- cmd_flags, token);
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dprc_rsp_get_pool_count *)cmd.params;
+- *pool_count = le32_to_cpu(rsp_params->pool_count);
+-
+- return 0;
+-}
+-
+-/**
+- * dprc_get_pool() - Get the type (string) of a certain dprc's pool
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @pool_index; Index of the pool to be queried (< pool_count)
+- * @type: The type of the pool
+- *
+- * The pool types retrieved one by one by incrementing
+- * pool_index up to (not including) the value of pool_count returned
+- * from dprc_get_pool_count(). dprc_get_pool_count() must
+- * be called prior to dprc_get_pool().
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_get_pool(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int pool_index,
+- char *type)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_get_pool *cmd_params;
+- struct dprc_rsp_get_pool *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_POOL,
+- cmd_flags,
+- token);
+- cmd_params = (struct dprc_cmd_get_pool *)cmd.params;
+- cmd_params->pool_index = cpu_to_le32(pool_index);
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dprc_rsp_get_pool *)cmd.params;
+- strncpy(type, rsp_params->type, 16);
+- type[15] = '\0';
+-
+- return 0;
+-}
+-
+-/**
+- * dprc_get_obj_count() - Obtains the number of objects in the DPRC
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @obj_count: Number of objects assigned to the DPRC
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_get_obj_count(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int *obj_count)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_rsp_get_obj_count *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_COUNT,
+- cmd_flags, token);
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dprc_rsp_get_obj_count *)cmd.params;
+- *obj_count = le32_to_cpu(rsp_params->obj_count);
+-
+- return 0;
+-}
+-EXPORT_SYMBOL(dprc_get_obj_count);
+-
+-/**
+- * dprc_get_obj() - Get general information on an object
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @obj_index: Index of the object to be queried (< obj_count)
+- * @obj_desc: Returns the requested object descriptor
+- *
+- * The object descriptors are retrieved one by one by incrementing
+- * obj_index up to (not including) the value of obj_count returned
+- * from dprc_get_obj_count(). dprc_get_obj_count() must
+- * be called prior to dprc_get_obj().
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_get_obj(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int obj_index,
+- struct dprc_obj_desc *obj_desc)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_get_obj *cmd_params;
+- struct dprc_rsp_get_obj *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ,
+- cmd_flags,
+- token);
+- cmd_params = (struct dprc_cmd_get_obj *)cmd.params;
+- cmd_params->obj_index = cpu_to_le32(obj_index);
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dprc_rsp_get_obj *)cmd.params;
+- obj_desc->id = le32_to_cpu(rsp_params->id);
+- obj_desc->vendor = le16_to_cpu(rsp_params->vendor);
+- obj_desc->irq_count = rsp_params->irq_count;
+- obj_desc->region_count = rsp_params->region_count;
+- obj_desc->state = le32_to_cpu(rsp_params->state);
+- obj_desc->ver_major = le16_to_cpu(rsp_params->version_major);
+- obj_desc->ver_minor = le16_to_cpu(rsp_params->version_minor);
+- obj_desc->flags = le16_to_cpu(rsp_params->flags);
+- strncpy(obj_desc->type, rsp_params->type, 16);
+- obj_desc->type[15] = '\0';
+- strncpy(obj_desc->label, rsp_params->label, 16);
+- obj_desc->label[15] = '\0';
+- return 0;
+-}
+-EXPORT_SYMBOL(dprc_get_obj);
+-
+-/**
+- * dprc_get_obj_desc() - Get object descriptor.
+- *
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @obj_type: The type of the object to get its descriptor.
+- * @obj_id: The id of the object to get its descriptor
+- * @obj_desc: The returned descriptor to fill and return to the user
+- *
+- * Return: '0' on Success; Error code otherwise.
+- *
+- */
+-int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- char *obj_type,
+- int obj_id,
+- struct dprc_obj_desc *obj_desc)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_get_obj_desc *cmd_params;
+- struct dprc_rsp_get_obj_desc *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_DESC,
+- cmd_flags,
+- token);
+- cmd_params = (struct dprc_cmd_get_obj_desc *)cmd.params;
+- cmd_params->obj_id = cpu_to_le32(obj_id);
+- strncpy(cmd_params->type, obj_type, 16);
+- cmd_params->type[15] = '\0';
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dprc_rsp_get_obj_desc *)cmd.params;
+- obj_desc->id = le32_to_cpu(rsp_params->id);
+- obj_desc->vendor = le16_to_cpu(rsp_params->vendor);
+- obj_desc->irq_count = rsp_params->irq_count;
+- obj_desc->region_count = rsp_params->region_count;
+- obj_desc->state = le32_to_cpu(rsp_params->state);
+- obj_desc->ver_major = le16_to_cpu(rsp_params->version_major);
+- obj_desc->ver_minor = le16_to_cpu(rsp_params->version_minor);
+- obj_desc->flags = le16_to_cpu(rsp_params->flags);
+- strncpy(obj_desc->type, rsp_params->type, 16);
+- obj_desc->type[15] = '\0';
+- strncpy(obj_desc->label, rsp_params->label, 16);
+- obj_desc->label[15] = '\0';
+-
+- return 0;
+-}
+-EXPORT_SYMBOL(dprc_get_obj_desc);
+-
+-/**
+- * dprc_set_obj_irq() - Set IRQ information for object to trigger an interrupt.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @obj_type: Type of the object to set its IRQ
+- * @obj_id: ID of the object to set its IRQ
+- * @irq_index: The interrupt index to configure
+- * @irq_cfg: IRQ configuration
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- char *obj_type,
+- int obj_id,
+- u8 irq_index,
+- struct dprc_irq_cfg *irq_cfg)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_set_obj_irq *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_IRQ,
+- cmd_flags,
+- token);
+- cmd_params = (struct dprc_cmd_set_obj_irq *)cmd.params;
+- cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
+- cmd_params->irq_index = irq_index;
+- cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
+- cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
+- cmd_params->obj_id = cpu_to_le32(obj_id);
+- strncpy(cmd_params->obj_type, obj_type, 16);
+- cmd_params->obj_type[15] = '\0';
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-EXPORT_SYMBOL(dprc_set_obj_irq);
+-
+-/**
+- * dprc_get_obj_irq() - Get IRQ information from object.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @obj_type: Type od the object to get its IRQ
+- * @obj_id: ID of the object to get its IRQ
+- * @irq_index: The interrupt index to configure
+- * @type: Interrupt type: 0 represents message interrupt
+- * type (both irq_addr and irq_val are valid)
+- * @irq_cfg: The returned IRQ attributes
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_get_obj_irq(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- char *obj_type,
+- int obj_id,
+- u8 irq_index,
+- int *type,
+- struct dprc_irq_cfg *irq_cfg)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_get_obj_irq *cmd_params;
+- struct dprc_rsp_get_obj_irq *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_IRQ,
+- cmd_flags,
+- token);
+- cmd_params = (struct dprc_cmd_get_obj_irq *)cmd.params;
+- cmd_params->obj_id = cpu_to_le32(obj_id);
+- cmd_params->irq_index = irq_index;
+- strncpy(cmd_params->obj_type, obj_type, 16);
+- cmd_params->obj_type[15] = '\0';
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dprc_rsp_get_obj_irq *)cmd.params;
+- irq_cfg->val = le32_to_cpu(rsp_params->irq_val);
+- irq_cfg->paddr = le64_to_cpu(rsp_params->irq_addr);
+- irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num);
+- *type = le32_to_cpu(rsp_params->type);
+-
+- return 0;
+-}
+-EXPORT_SYMBOL(dprc_get_obj_irq);
+-
+-/**
+- * dprc_get_res_count() - Obtains the number of free resources that are assigned
+- * to this container, by pool type
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @type: pool type
+- * @res_count: Returned number of free resources of the given
+- * resource type that are assigned to this DPRC
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_get_res_count(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- char *type,
+- int *res_count)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_get_res_count *cmd_params;
+- struct dprc_rsp_get_res_count *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_COUNT,
+- cmd_flags, token);
+- cmd_params = (struct dprc_cmd_get_res_count *)cmd.params;
+- strncpy(cmd_params->type, type, 16);
+- cmd_params->type[15] = '\0';
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dprc_rsp_get_res_count *)cmd.params;
+- *res_count = le32_to_cpu(rsp_params->res_count);
+-
+- return 0;
+-}
+-EXPORT_SYMBOL(dprc_get_res_count);
+-
+-/**
+- * dprc_get_res_ids() - Obtains IDs of free resources in the container
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @type: pool type
+- * @range_desc: range descriptor
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_get_res_ids(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- char *type,
+- struct dprc_res_ids_range_desc *range_desc)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_get_res_ids *cmd_params;
+- struct dprc_rsp_get_res_ids *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_IDS,
+- cmd_flags, token);
+- cmd_params = (struct dprc_cmd_get_res_ids *)cmd.params;
+- cmd_params->iter_status = range_desc->iter_status;
+- cmd_params->base_id = cpu_to_le32(range_desc->base_id);
+- cmd_params->last_id = cpu_to_le32(range_desc->last_id);
+- strncpy(cmd_params->type, type, 16);
+- cmd_params->type[15] = '\0';
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dprc_rsp_get_res_ids *)cmd.params;
+- range_desc->iter_status = rsp_params->iter_status;
+- range_desc->base_id = le32_to_cpu(rsp_params->base_id);
+- range_desc->last_id = le32_to_cpu(rsp_params->last_id);
+-
+- return 0;
+-}
+-EXPORT_SYMBOL(dprc_get_res_ids);
+-
+-/**
+- * dprc_get_obj_region() - Get region information for a specified object.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @obj_type; Object type as returned in dprc_get_obj()
+- * @obj_id: Unique object instance as returned in dprc_get_obj()
+- * @region_index: The specific region to query
+- * @region_desc: Returns the requested region descriptor
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_get_obj_region(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- char *obj_type,
+- int obj_id,
+- u8 region_index,
+- struct dprc_region_desc *region_desc)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_get_obj_region *cmd_params;
+- struct dprc_rsp_get_obj_region *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG,
+- cmd_flags, token);
+- cmd_params = (struct dprc_cmd_get_obj_region *)cmd.params;
+- cmd_params->obj_id = cpu_to_le32(obj_id);
+- cmd_params->region_index = region_index;
+- strncpy(cmd_params->obj_type, obj_type, 16);
+- cmd_params->obj_type[15] = '\0';
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dprc_rsp_get_obj_region *)cmd.params;
+- region_desc->base_offset = le64_to_cpu(rsp_params->base_addr);
+- region_desc->size = le32_to_cpu(rsp_params->size);
+-
+- return 0;
+-}
+-EXPORT_SYMBOL(dprc_get_obj_region);
+-
+-/**
+- * dprc_set_obj_label() - Set object label.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @obj_type: Object's type
+- * @obj_id: Object's ID
+- * @label: The required label. The maximum length is 16 chars.
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_set_obj_label(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- char *obj_type,
+- int obj_id,
+- char *label)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_set_obj_label *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_LABEL,
+- cmd_flags,
+- token);
+- cmd_params = (struct dprc_cmd_set_obj_label *)cmd.params;
+- cmd_params->obj_id = cpu_to_le32(obj_id);
+- strncpy(cmd_params->label, label, 16);
+- cmd_params->label[15] = '\0';
+- strncpy(cmd_params->obj_type, obj_type, 16);
+- cmd_params->obj_type[15] = '\0';
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-EXPORT_SYMBOL(dprc_set_obj_label);
+-
+-/**
+- * dprc_connect() - Connect two endpoints to create a network link between them
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @endpoint1: Endpoint 1 configuration parameters
+- * @endpoint2: Endpoint 2 configuration parameters
+- * @cfg: Connection configuration. The connection configuration is ignored for
+- * connections made to DPMAC objects, where rate is retrieved from the
+- * MAC configuration.
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_connect(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- const struct dprc_endpoint *endpoint1,
+- const struct dprc_endpoint *endpoint2,
+- const struct dprc_connection_cfg *cfg)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_connect *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_CONNECT,
+- cmd_flags,
+- token);
+- cmd_params = (struct dprc_cmd_connect *)cmd.params;
+- cmd_params->ep1_id = cpu_to_le32(endpoint1->id);
+- cmd_params->ep1_interface_id = cpu_to_le32(endpoint1->if_id);
+- cmd_params->ep2_id = cpu_to_le32(endpoint2->id);
+- cmd_params->ep2_interface_id = cpu_to_le32(endpoint2->if_id);
+- strncpy(cmd_params->ep1_type, endpoint1->type, 16);
+- cmd_params->ep1_type[15] = '\0';
+- cmd_params->max_rate = cpu_to_le32(cfg->max_rate);
+- cmd_params->committed_rate = cpu_to_le32(cfg->committed_rate);
+- strncpy(cmd_params->ep2_type, endpoint2->type, 16);
+- cmd_params->ep2_type[15] = '\0';
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dprc_disconnect() - Disconnect one endpoint to remove its network connection
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @endpoint: Endpoint configuration parameters
+- *
+- * Return: '0' on Success; Error code otherwise.
+- */
+-int dprc_disconnect(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- const struct dprc_endpoint *endpoint)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_disconnect *cmd_params;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_DISCONNECT,
+- cmd_flags,
+- token);
+- cmd_params = (struct dprc_cmd_disconnect *)cmd.params;
+- cmd_params->id = cpu_to_le32(endpoint->id);
+- cmd_params->interface_id = cpu_to_le32(endpoint->if_id);
+- strncpy(cmd_params->type, endpoint->type, 16);
+- cmd_params->type[15] = '\0';
+-
+- /* send command to mc*/
+- return mc_send_command(mc_io, &cmd);
+-}
+-
+-/**
+- * dprc_get_connection() - Get connected endpoint and link status if connection
+- * exists.
+- * @mc_io: Pointer to MC portal's I/O object
+- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+- * @token: Token of DPRC object
+- * @endpoint1: Endpoint 1 configuration parameters
+- * @endpoint2: Returned endpoint 2 configuration parameters
+- * @state: Returned link state:
+- * 1 - link is up;
+- * 0 - link is down;
+- * -1 - no connection (endpoint2 information is irrelevant)
+- *
+- * Return: '0' on Success; -ENAVAIL if connection does not exist.
+- */
+-int dprc_get_connection(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- const struct dprc_endpoint *endpoint1,
+- struct dprc_endpoint *endpoint2,
+- int *state)
+-{
+- struct mc_command cmd = { 0 };
+- struct dprc_cmd_get_connection *cmd_params;
+- struct dprc_rsp_get_connection *rsp_params;
+- int err;
+-
+- /* prepare command */
+- cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONNECTION,
+- cmd_flags,
+- token);
+- cmd_params = (struct dprc_cmd_get_connection *)cmd.params;
+- cmd_params->ep1_id = cpu_to_le32(endpoint1->id);
+- cmd_params->ep1_interface_id = cpu_to_le32(endpoint1->if_id);
+- strncpy(cmd_params->ep1_type, endpoint1->type, 16);
+- cmd_params->ep1_type[15] = '\0';
+-
+- /* send command to mc*/
+- err = mc_send_command(mc_io, &cmd);
+- if (err)
+- return err;
+-
+- /* retrieve response parameters */
+- rsp_params = (struct dprc_rsp_get_connection *)cmd.params;
+- endpoint2->id = le32_to_cpu(rsp_params->ep2_id);
+- endpoint2->if_id = le32_to_cpu(rsp_params->ep2_interface_id);
+- strncpy(endpoint2->type, rsp_params->ep2_type, 16);
+- endpoint2->type[15] = '\0';
+- *state = le32_to_cpu(rsp_params->state);
+-
+- return 0;
+-}
+--- a/drivers/staging/fsl-mc/bus/fsl-mc-private.h
++++ /dev/null
+@@ -1,52 +0,0 @@
+-/*
+- * Freescale Management Complex (MC) bus private declarations
+- *
+- * Copyright (C) 2016 Freescale Semiconductor, Inc.
+- *
+- * This file is licensed under the terms of the GNU General Public
+- * License version 2. This program is licensed "as is" without any
+- * warranty of any kind, whether express or implied.
+- */
+-#ifndef _FSL_MC_PRIVATE_H_
+-#define _FSL_MC_PRIVATE_H_
+-
+-int __must_check fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
+- struct fsl_mc_io *mc_io,
+- struct device *parent_dev,
+- struct fsl_mc_device **new_mc_dev);
+-
+-void fsl_mc_device_remove(struct fsl_mc_device *mc_dev);
+-
+-int __init dprc_driver_init(void);
+-
+-void dprc_driver_exit(void);
+-
+-int __init fsl_mc_allocator_driver_init(void);
+-
+-void fsl_mc_allocator_driver_exit(void);
+-
+-int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
+- enum fsl_mc_pool_type pool_type,
+- struct fsl_mc_resource
+- **new_resource);
+-
+-void fsl_mc_resource_free(struct fsl_mc_resource *resource);
+-
+-int fsl_mc_msi_domain_alloc_irqs(struct device *dev,
+- unsigned int irq_count);
+-
+-void fsl_mc_msi_domain_free_irqs(struct device *dev);
+-
+-int __init its_fsl_mc_msi_init(void);
+-
+-void its_fsl_mc_msi_cleanup(void);
+-
+-int __must_check fsl_create_mc_io(struct device *dev,
+- phys_addr_t mc_portal_phys_addr,
+- u32 mc_portal_size,
+- struct fsl_mc_device *dpmcp_dev,
+- u32 flags, struct fsl_mc_io **new_mc_io);
+-
+-void fsl_destroy_mc_io(struct fsl_mc_io *mc_io);
+-
+-#endif /* _FSL_MC_PRIVATE_H_ */
--- /dev/null
+++ b/drivers/staging/fsl-mc/include/dpaa2-fd.h
-@@ -0,0 +1,706 @@
+@@ -0,0 +1,681 @@
++/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FSL_DPAA2_FD_H
+#define __FSL_DPAA2_FD_H
+ */
+static inline dma_addr_t dpaa2_sg_get_addr(const struct dpaa2_sg_entry *sg)
+{
-+ return le64_to_cpu((dma_addr_t)sg->addr);
++ return (dma_addr_t)le64_to_cpu(sg->addr);
+}
+
+/**
+ */
+static inline void dpaa2_sg_set_final(struct dpaa2_sg_entry *sg, bool final)
+{
-+ sg->format_offset &= cpu_to_le16(~(SG_FINAL_FLAG_MASK
-+ << SG_FINAL_FLAG_SHIFT));
++ sg->format_offset &= cpu_to_le16((~(SG_FINAL_FLAG_MASK
++ << SG_FINAL_FLAG_SHIFT)) & 0xFFFF);
+ sg->format_offset |= cpu_to_le16(final << SG_FINAL_FLAG_SHIFT);
+}
+
+#endif /* __FSL_DPAA2_FD_H */
--- /dev/null
+++ b/drivers/staging/fsl-mc/include/dpaa2-global.h
-@@ -0,0 +1,202 @@
+@@ -0,0 +1,177 @@
++/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FSL_DPAA2_GLOBAL_H
+#define __FSL_DPAA2_GLOBAL_H
+#define DQ_FQID_MASK 0x00FFFFFF
+#define DQ_FRAME_COUNT_MASK 0x00FFFFFF
+
-+/**
-+ * dpaa2_dq_flags() - Get the stat field of dequeue response
-+ * @dq: the dequeue result.
-+ */
-+static inline u32 dpaa2_dq_flags(const struct dpaa2_dq *dq)
-+{
-+ return dq->dq.stat;
-+}
++/**
++ * dpaa2_dq_flags() - Get the stat field of dequeue response
++ * @dq: the dequeue result.
++ */
++static inline u32 dpaa2_dq_flags(const struct dpaa2_dq *dq)
++{
++ return dq->dq.stat;
++}
++
++/**
++ * dpaa2_dq_is_pull() - Check whether the dq response is from a pull
++ * command.
++ * @dq: the dequeue result
++ *
++ * Return 1 for volatile(pull) dequeue, 0 for static dequeue.
++ */
++static inline int dpaa2_dq_is_pull(const struct dpaa2_dq *dq)
++{
++ return (int)(dpaa2_dq_flags(dq) & DPAA2_DQ_STAT_VOLATILE);
++}
++
++/**
++ * dpaa2_dq_is_pull_complete() - Check whether the pull command is completed.
++ * @dq: the dequeue result
++ *
++ * Return boolean.
++ */
++static inline bool dpaa2_dq_is_pull_complete(const struct dpaa2_dq *dq)
++{
++ return !!(dpaa2_dq_flags(dq) & DPAA2_DQ_STAT_EXPIRED);
++}
++
++/**
++ * dpaa2_dq_seqnum() - Get the seqnum field in dequeue response
++ * @dq: the dequeue result
++ *
++ * seqnum is valid only if VALIDFRAME flag is TRUE
++ *
++ * Return seqnum.
++ */
++static inline u16 dpaa2_dq_seqnum(const struct dpaa2_dq *dq)
++{
++ return le16_to_cpu(dq->dq.seqnum);
++}
++
++/**
++ * dpaa2_dq_odpid() - Get the odpid field in dequeue response
++ * @dq: the dequeue result
++ *
++ * odpid is valid only if ODPVALID flag is TRUE.
++ *
++ * Return odpid.
++ */
++static inline u16 dpaa2_dq_odpid(const struct dpaa2_dq *dq)
++{
++ return le16_to_cpu(dq->dq.oprid);
++}
++
++/**
++ * dpaa2_dq_fqid() - Get the fqid in dequeue response
++ * @dq: the dequeue result
++ *
++ * Return fqid.
++ */
++static inline u32 dpaa2_dq_fqid(const struct dpaa2_dq *dq)
++{
++ return le32_to_cpu(dq->dq.fqid) & DQ_FQID_MASK;
++}
++
++/**
++ * dpaa2_dq_byte_count() - Get the byte count in dequeue response
++ * @dq: the dequeue result
++ *
++ * Return the byte count remaining in the FQ.
++ */
++static inline u32 dpaa2_dq_byte_count(const struct dpaa2_dq *dq)
++{
++ return le32_to_cpu(dq->dq.fq_byte_cnt);
++}
++
++/**
++ * dpaa2_dq_frame_count() - Get the frame count in dequeue response
++ * @dq: the dequeue result
++ *
++ * Return the frame count remaining in the FQ.
++ */
++static inline u32 dpaa2_dq_frame_count(const struct dpaa2_dq *dq)
++{
++ return le32_to_cpu(dq->dq.fq_frm_cnt) & DQ_FRAME_COUNT_MASK;
++}
++
++/**
++ * dpaa2_dq_fd_ctx() - Get the frame queue context in dequeue response
++ * @dq: the dequeue result
++ *
++ * Return the frame queue context.
++ */
++static inline u64 dpaa2_dq_fqd_ctx(const struct dpaa2_dq *dq)
++{
++ return le64_to_cpu(dq->dq.fqd_ctx);
++}
++
++/**
++ * dpaa2_dq_fd() - Get the frame descriptor in dequeue response
++ * @dq: the dequeue result
++ *
++ * Return the frame descriptor.
++ */
++static inline const struct dpaa2_fd *dpaa2_dq_fd(const struct dpaa2_dq *dq)
++{
++ return (const struct dpaa2_fd *)&dq->dq.fd[0];
++}
++
++#endif /* __FSL_DPAA2_GLOBAL_H */
+--- /dev/null
++++ b/drivers/staging/fsl-mc/include/dpaa2-io.h
+@@ -0,0 +1,178 @@
++/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
++/*
++ * Copyright 2014-2016 Freescale Semiconductor Inc.
++ * Copyright 2017 NXP
++ *
++ */
++#ifndef __FSL_DPAA2_IO_H
++#define __FSL_DPAA2_IO_H
++
++#include <linux/types.h>
++#include <linux/cpumask.h>
++
++#include "dpaa2-fd.h"
++#include "dpaa2-global.h"
++
++struct dpaa2_io;
++struct dpaa2_io_store;
++struct device;
++
++/**
++ * DOC: DPIO Service
++ *
++ * The DPIO service provides APIs for users to interact with the datapath
++ * by enqueueing and dequeing frame descriptors.
++ *
++ * The following set of APIs can be used to enqueue and dequeue frames
++ * as well as producing notification callbacks when data is available
++ * for dequeue.
++ */
++
++#define DPAA2_IO_ANY_CPU -1
++
++/**
++ * struct dpaa2_io_desc - The DPIO descriptor
++ * @receives_notifications: Use notificaton mode. Non-zero if the DPIO
++ * has a channel.
++ * @has_8prio: Set to non-zero for channel with 8 priority WQs. Ignored
++ * unless receives_notification is TRUE.
++ * @cpu: The cpu index that at least interrupt handlers will
++ * execute on.
++ * @stash_affinity: The stash affinity for this portal favour 'cpu'
++ * @regs_cena: The cache enabled regs.
++ * @regs_cinh: The cache inhibited regs
++ * @dpio_id: The dpio index
++ * @qman_version: The qman version
++ *
++ * Describes the attributes and features of the DPIO object.
++ */
++struct dpaa2_io_desc {
++ int receives_notifications;
++ int has_8prio;
++ int cpu;
++ void *regs_cena;
++ void *regs_cinh;
++ int dpio_id;
++ u32 qman_version;
++};
++
++struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc);
++
++void dpaa2_io_down(struct dpaa2_io *d);
++
++irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj);
++
++struct dpaa2_io *dpaa2_io_service_select(int cpu);
++
++/**
++ * struct dpaa2_io_notification_ctx - The DPIO notification context structure
++ * @cb: The callback to be invoked when the notification arrives
++ * @is_cdan: Zero for FQDAN, non-zero for CDAN
++ * @id: FQID or channel ID, needed for rearm
++ * @desired_cpu: The cpu on which the notifications will show up. Use
++ * DPAA2_IO_ANY_CPU if don't care
++ * @dpio_id: The dpio index
++ * @qman64: The 64-bit context value shows up in the FQDAN/CDAN.
++ * @node: The list node
++ * @dpio_private: The dpio object internal to dpio_service
++ *
++ * Used when a FQDAN/CDAN registration is made by drivers.
++ */
++struct dpaa2_io_notification_ctx {
++ void (*cb)(struct dpaa2_io_notification_ctx *ctx);
++ int is_cdan;
++ u32 id;
++ int desired_cpu;
++ int dpio_id;
++ u64 qman64;
++ struct list_head node;
++ void *dpio_private;
++};
++
++int dpaa2_io_service_register(struct dpaa2_io *service,
++ struct dpaa2_io_notification_ctx *ctx);
++void dpaa2_io_service_deregister(struct dpaa2_io *service,
++ struct dpaa2_io_notification_ctx *ctx);
++int dpaa2_io_service_rearm(struct dpaa2_io *service,
++ struct dpaa2_io_notification_ctx *ctx);
++
++int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid,
++ struct dpaa2_io_store *s);
++int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
++ struct dpaa2_io_store *s);
++
++int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, u32 fqid,
++ const struct dpaa2_fd *fd);
++int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, u32 qdid, u8 prio,
++ u16 qdbin, const struct dpaa2_fd *fd);
++int dpaa2_io_service_release(struct dpaa2_io *d, u32 bpid,
++ const u64 *buffers, unsigned int num_buffers);
++int dpaa2_io_service_acquire(struct dpaa2_io *d, u32 bpid,
++ u64 *buffers, unsigned int num_buffers);
++
++struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
++ struct device *dev);
++void dpaa2_io_store_destroy(struct dpaa2_io_store *s);
++struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last);
+
-+/**
-+ * dpaa2_dq_is_pull() - Check whether the dq response is from a pull
-+ * command.
-+ * @dq: the dequeue result
-+ *
-+ * Return 1 for volatile(pull) dequeue, 0 for static dequeue.
-+ */
-+static inline int dpaa2_dq_is_pull(const struct dpaa2_dq *dq)
-+{
-+ return (int)(dpaa2_dq_flags(dq) & DPAA2_DQ_STAT_VOLATILE);
-+}
++/* Order Restoration Support */
++int dpaa2_io_service_enqueue_orp_fq(struct dpaa2_io *d, u32 fqid,
++ const struct dpaa2_fd *fd, u16 orpid,
++ u16 seqnum, int last);
+
-+/**
-+ * dpaa2_dq_is_pull_complete() - Check whether the pull command is completed.
-+ * @dq: the dequeue result
-+ *
-+ * Return boolean.
-+ */
-+static inline bool dpaa2_dq_is_pull_complete(const struct dpaa2_dq *dq)
-+{
-+ return !!(dpaa2_dq_flags(dq) & DPAA2_DQ_STAT_EXPIRED);
-+}
++int dpaa2_io_service_enqueue_orp_qd(struct dpaa2_io *d, u32 qdid, u8 prio,
++ u16 qdbin, const struct dpaa2_fd *fd,
++ u16 orpid, u16 seqnum, int last);
+
-+/**
-+ * dpaa2_dq_seqnum() - Get the seqnum field in dequeue response
-+ * @dq: the dequeue result
-+ *
-+ * seqnum is valid only if VALIDFRAME flag is TRUE
-+ *
-+ * Return seqnum.
-+ */
-+static inline u16 dpaa2_dq_seqnum(const struct dpaa2_dq *dq)
-+{
-+ return le16_to_cpu(dq->dq.seqnum);
-+}
++int dpaa2_io_service_orp_seqnum_drop(struct dpaa2_io *d, u16 orpid,
++ u16 seqnum);
+
-+/**
-+ * dpaa2_dq_odpid() - Get the odpid field in dequeue response
-+ * @dq: the dequeue result
-+ *
-+ * odpid is valid only if ODPVALID flag is TRUE.
-+ *
-+ * Return odpid.
-+ */
-+static inline u16 dpaa2_dq_odpid(const struct dpaa2_dq *dq)
-+{
-+ return le16_to_cpu(dq->dq.oprid);
-+}
++/***************/
++/* CSCN */
++/***************/
+
+/**
-+ * dpaa2_dq_fqid() - Get the fqid in dequeue response
-+ * @dq: the dequeue result
++ * struct dpaa2_cscn - The CSCN message format
++ * @verb: identifies the type of message (should be 0x27).
++ * @stat: status bits related to dequeuing response (not used)
++ * @state: bit 0 = 0/1 if CG is no/is congested
++ * @reserved: reserved byte
++ * @cgid: congest grp ID - the first 16 bits
++ * @ctx: context data
+ *
-+ * Return fqid.
++ * Congestion management can be implemented in software through
++ * the use of Congestion State Change Notifications (CSCN). These
++ * are messages written by DPAA2 hardware to memory whenever the
++ * instantaneous count (I_CNT field in the CG) exceeds the
++ * Congestion State (CS) entrance threshold, signifying congestion
++ * entrance, or when the instantaneous count returns below exit
++ * threshold, signifying congestion exit. The format of the message
++ * is given by the dpaa2_cscn structure. Bit 0 of the state field
++ * represents congestion state written by the hardware.
+ */
-+static inline u32 dpaa2_dq_fqid(const struct dpaa2_dq *dq)
-+{
-+ return le32_to_cpu(dq->dq.fqid) & DQ_FQID_MASK;
-+}
++struct dpaa2_cscn {
++ u8 verb;
++ u8 stat;
++ u8 state;
++ u8 reserved;
++ __le32 cgid;
++ __le64 ctx;
++};
+
-+/**
-+ * dpaa2_dq_byte_count() - Get the byte count in dequeue response
-+ * @dq: the dequeue result
-+ *
-+ * Return the byte count remaining in the FQ.
-+ */
-+static inline u32 dpaa2_dq_byte_count(const struct dpaa2_dq *dq)
-+{
-+ return le32_to_cpu(dq->dq.fq_byte_cnt);
-+}
++#define DPAA2_CSCN_SIZE 64
++#define DPAA2_CSCN_ALIGN 16
+
-+/**
-+ * dpaa2_dq_frame_count() - Get the frame count in dequeue response
-+ * @dq: the dequeue result
-+ *
-+ * Return the frame count remaining in the FQ.
-+ */
-+static inline u32 dpaa2_dq_frame_count(const struct dpaa2_dq *dq)
-+{
-+ return le32_to_cpu(dq->dq.fq_frm_cnt) & DQ_FRAME_COUNT_MASK;
-+}
++#define DPAA2_CSCN_STATE_MASK 0x1
++#define DPAA2_CSCN_CONGESTED 1
+
-+/**
-+ * dpaa2_dq_fd_ctx() - Get the frame queue context in dequeue response
-+ * @dq: the dequeue result
-+ *
-+ * Return the frame queue context.
-+ */
-+static inline u64 dpaa2_dq_fqd_ctx(const struct dpaa2_dq *dq)
++static inline bool dpaa2_cscn_state_congested(struct dpaa2_cscn *cscn)
+{
-+ return le64_to_cpu(dq->dq.fqd_ctx);
++ return ((cscn->state & DPAA2_CSCN_STATE_MASK) == DPAA2_CSCN_CONGESTED);
+}
+
-+/**
-+ * dpaa2_dq_fd() - Get the frame descriptor in dequeue response
-+ * @dq: the dequeue result
-+ *
-+ * Return the frame descriptor.
-+ */
-+static inline const struct dpaa2_fd *dpaa2_dq_fd(const struct dpaa2_dq *dq)
-+{
-+ return (const struct dpaa2_fd *)&dq->dq.fd[0];
-+}
++int dpaa2_io_query_fq_count(struct dpaa2_io *d, u32 fqid,
++ u32 *fcnt, u32 *bcnt);
++int dpaa2_io_query_bp_count(struct dpaa2_io *d, u32 bpid,
++ u32 *num);
+
-+#endif /* __FSL_DPAA2_GLOBAL_H */
++#endif /* __FSL_DPAA2_IO_H */
+--- a/drivers/staging/fsl-mc/include/dpbp-cmd.h
++++ /dev/null
+@@ -1,185 +0,0 @@
+-/* Copyright 2013-2016 Freescale Semiconductor Inc.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions are met:
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of the above-listed copyright holders nor the
+- * names of any contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- *
+- * ALTERNATIVELY, this software may be distributed under the terms of the
+- * GNU General Public License ("GPL") as published by the Free Software
+- * Foundation, either version 2 of that License or (at your option) any
+- * later version.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+- * POSSIBILITY OF SUCH DAMAGE.
+- */
+-#ifndef _FSL_DPBP_CMD_H
+-#define _FSL_DPBP_CMD_H
+-
+-/* DPBP Version */
+-#define DPBP_VER_MAJOR 2
+-#define DPBP_VER_MINOR 2
+-
+-/* Command IDs */
+-#define DPBP_CMDID_CLOSE 0x800
+-#define DPBP_CMDID_OPEN 0x804
+-#define DPBP_CMDID_CREATE 0x904
+-#define DPBP_CMDID_DESTROY 0x900
+-
+-#define DPBP_CMDID_ENABLE 0x002
+-#define DPBP_CMDID_DISABLE 0x003
+-#define DPBP_CMDID_GET_ATTR 0x004
+-#define DPBP_CMDID_RESET 0x005
+-#define DPBP_CMDID_IS_ENABLED 0x006
+-
+-#define DPBP_CMDID_SET_IRQ 0x010
+-#define DPBP_CMDID_GET_IRQ 0x011
+-#define DPBP_CMDID_SET_IRQ_ENABLE 0x012
+-#define DPBP_CMDID_GET_IRQ_ENABLE 0x013
+-#define DPBP_CMDID_SET_IRQ_MASK 0x014
+-#define DPBP_CMDID_GET_IRQ_MASK 0x015
+-#define DPBP_CMDID_GET_IRQ_STATUS 0x016
+-#define DPBP_CMDID_CLEAR_IRQ_STATUS 0x017
+-
+-#define DPBP_CMDID_SET_NOTIFICATIONS 0x01b0
+-#define DPBP_CMDID_GET_NOTIFICATIONS 0x01b1
+-
+-struct dpbp_cmd_open {
+- __le32 dpbp_id;
+-};
+-
+-#define DPBP_ENABLE 0x1
+-
+-struct dpbp_rsp_is_enabled {
+- u8 enabled;
+-};
+-
+-struct dpbp_cmd_set_irq {
+- /* cmd word 0 */
+- u8 irq_index;
+- u8 pad[3];
+- __le32 irq_val;
+- /* cmd word 1 */
+- __le64 irq_addr;
+- /* cmd word 2 */
+- __le32 irq_num;
+-};
+-
+-struct dpbp_cmd_get_irq {
+- __le32 pad;
+- u8 irq_index;
+-};
+-
+-struct dpbp_rsp_get_irq {
+- /* response word 0 */
+- __le32 irq_val;
+- __le32 pad;
+- /* response word 1 */
+- __le64 irq_addr;
+- /* response word 2 */
+- __le32 irq_num;
+- __le32 type;
+-};
+-
+-struct dpbp_cmd_set_irq_enable {
+- u8 enable;
+- u8 pad[3];
+- u8 irq_index;
+-};
+-
+-struct dpbp_cmd_get_irq_enable {
+- __le32 pad;
+- u8 irq_index;
+-};
+-
+-struct dpbp_rsp_get_irq_enable {
+- u8 enabled;
+-};
+-
+-struct dpbp_cmd_set_irq_mask {
+- __le32 mask;
+- u8 irq_index;
+-};
+-
+-struct dpbp_cmd_get_irq_mask {
+- __le32 pad;
+- u8 irq_index;
+-};
+-
+-struct dpbp_rsp_get_irq_mask {
+- __le32 mask;
+-};
+-
+-struct dpbp_cmd_get_irq_status {
+- __le32 status;
+- u8 irq_index;
+-};
+-
+-struct dpbp_rsp_get_irq_status {
+- __le32 status;
+-};
+-
+-struct dpbp_cmd_clear_irq_status {
+- __le32 status;
+- u8 irq_index;
+-};
+-
+-struct dpbp_rsp_get_attributes {
+- /* response word 0 */
+- __le16 pad;
+- __le16 bpid;
+- __le32 id;
+- /* response word 1 */
+- __le16 version_major;
+- __le16 version_minor;
+-};
+-
+-struct dpbp_cmd_set_notifications {
+- /* cmd word 0 */
+- __le32 depletion_entry;
+- __le32 depletion_exit;
+- /* cmd word 1 */
+- __le32 surplus_entry;
+- __le32 surplus_exit;
+- /* cmd word 2 */
+- __le16 options;
+- __le16 pad[3];
+- /* cmd word 3 */
+- __le64 message_ctx;
+- /* cmd word 4 */
+- __le64 message_iova;
+-};
+-
+-struct dpbp_rsp_get_notifications {
+- /* response word 0 */
+- __le32 depletion_entry;
+- __le32 depletion_exit;
+- /* response word 1 */
+- __le32 surplus_entry;
+- __le32 surplus_exit;
+- /* response word 2 */
+- __le16 options;
+- __le16 pad[3];
+- /* response word 3 */
+- __le64 message_ctx;
+- /* response word 4 */
+- __le64 message_iova;
+-};
+-
+-#endif /* _FSL_DPBP_CMD_H */
+--- a/drivers/staging/fsl-mc/include/dpbp.h
++++ /dev/null
+@@ -1,220 +0,0 @@
+-/* Copyright 2013-2015 Freescale Semiconductor Inc.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions are met:
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of the above-listed copyright holders nor the
+- * names of any contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- *
+- * ALTERNATIVELY, this software may be distributed under the terms of the
+- * GNU General Public License ("GPL") as published by the Free Software
+- * Foundation, either version 2 of that License or (at your option) any
+- * later version.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+- * POSSIBILITY OF SUCH DAMAGE.
+- */
+-#ifndef __FSL_DPBP_H
+-#define __FSL_DPBP_H
+-
+-/* Data Path Buffer Pool API
+- * Contains initialization APIs and runtime control APIs for DPBP
+- */
+-
+-struct fsl_mc_io;
+-
+-int dpbp_open(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- int dpbp_id,
+- u16 *token);
+-
+-int dpbp_close(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token);
+-
+-/**
+- * struct dpbp_cfg - Structure representing DPBP configuration
+- * @options: place holder
+- */
+-struct dpbp_cfg {
+- u32 options;
+-};
+-
+-int dpbp_create(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- const struct dpbp_cfg *cfg,
+- u16 *token);
+-
+-int dpbp_destroy(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token);
+-
+-int dpbp_enable(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token);
+-
+-int dpbp_disable(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token);
+-
+-int dpbp_is_enabled(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int *en);
+-
+-int dpbp_reset(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token);
+-
+-/**
+- * struct dpbp_irq_cfg - IRQ configuration
+- * @addr: Address that must be written to signal a message-based interrupt
+- * @val: Value to write into irq_addr address
+- * @irq_num: A user defined number associated with this IRQ
+- */
+-struct dpbp_irq_cfg {
+- u64 addr;
+- u32 val;
+- int irq_num;
+-};
+-
+-int dpbp_set_irq(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- struct dpbp_irq_cfg *irq_cfg);
+-
+-int dpbp_get_irq(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- int *type,
+- struct dpbp_irq_cfg *irq_cfg);
+-
+-int dpbp_set_irq_enable(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u8 en);
+-
+-int dpbp_get_irq_enable(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u8 *en);
+-
+-int dpbp_set_irq_mask(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 mask);
+-
+-int dpbp_get_irq_mask(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 *mask);
+-
+-int dpbp_get_irq_status(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 *status);
+-
+-int dpbp_clear_irq_status(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 status);
+-
+-/**
+- * struct dpbp_attr - Structure representing DPBP attributes
+- * @id: DPBP object ID
+- * @version: DPBP version
+- * @bpid: Hardware buffer pool ID; should be used as an argument in
+- * acquire/release operations on buffers
+- */
+-struct dpbp_attr {
+- int id;
+- /**
+- * struct version - Structure representing DPBP version
+- * @major: DPBP major version
+- * @minor: DPBP minor version
+- */
+- struct {
+- u16 major;
+- u16 minor;
+- } version;
+- u16 bpid;
+-};
+-
+-int dpbp_get_attributes(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- struct dpbp_attr *attr);
+-
+-/**
+- * DPBP notifications options
+- */
+-
+-/**
+- * BPSCN write will attempt to allocate into a cache (coherent write)
+- */
+-#define DPBP_NOTIF_OPT_COHERENT_WRITE 0x00000001
+-
+-/**
+- * struct dpbp_notification_cfg - Structure representing DPBP notifications
+- * towards software
+- * @depletion_entry: below this threshold the pool is "depleted";
+- * set it to '0' to disable it
+- * @depletion_exit: greater than or equal to this threshold the pool exit its
+- * "depleted" state
+- * @surplus_entry: above this threshold the pool is in "surplus" state;
+- * set it to '0' to disable it
+- * @surplus_exit: less than or equal to this threshold the pool exit its
+- * "surplus" state
+- * @message_iova: MUST be given if either 'depletion_entry' or 'surplus_entry'
+- * is not '0' (enable); I/O virtual address (must be in DMA-able memory),
+- * must be 16B aligned.
+- * @message_ctx: The context that will be part of the BPSCN message and will
+- * be written to 'message_iova'
+- * @options: Mask of available options; use 'DPBP_NOTIF_OPT_<X>' values
+- */
+-struct dpbp_notification_cfg {
+- u32 depletion_entry;
+- u32 depletion_exit;
+- u32 surplus_entry;
+- u32 surplus_exit;
+- u64 message_iova;
+- u64 message_ctx;
+- u16 options;
+-};
+-
+-int dpbp_set_notifications(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- struct dpbp_notification_cfg *cfg);
+-
+-int dpbp_get_notifications(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- struct dpbp_notification_cfg *cfg);
+-
+-/** @} */
+-
+-#endif /* __FSL_DPBP_H */
+--- a/drivers/staging/fsl-mc/include/dpcon-cmd.h
++++ /dev/null
+@@ -1,62 +0,0 @@
+-/* Copyright 2013-2015 Freescale Semiconductor Inc.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions are met:
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of the above-listed copyright holders nor the
+- * names of any contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- *
+- * ALTERNATIVELY, this software may be distributed under the terms of the
+- * GNU General Public License ("GPL") as published by the Free Software
+- * Foundation, either version 2 of that License or (at your option) any
+- * later version.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+- * POSSIBILITY OF SUCH DAMAGE.
+- */
+-#ifndef _FSL_DPCON_CMD_H
+-#define _FSL_DPCON_CMD_H
+-
+-/* DPCON Version */
+-#define DPCON_VER_MAJOR 2
+-#define DPCON_VER_MINOR 1
+-
+-/* Command IDs */
+-#define DPCON_CMDID_CLOSE 0x800
+-#define DPCON_CMDID_OPEN 0x808
+-#define DPCON_CMDID_CREATE 0x908
+-#define DPCON_CMDID_DESTROY 0x900
+-
+-#define DPCON_CMDID_ENABLE 0x002
+-#define DPCON_CMDID_DISABLE 0x003
+-#define DPCON_CMDID_GET_ATTR 0x004
+-#define DPCON_CMDID_RESET 0x005
+-#define DPCON_CMDID_IS_ENABLED 0x006
+-
+-#define DPCON_CMDID_SET_IRQ 0x010
+-#define DPCON_CMDID_GET_IRQ 0x011
+-#define DPCON_CMDID_SET_IRQ_ENABLE 0x012
+-#define DPCON_CMDID_GET_IRQ_ENABLE 0x013
+-#define DPCON_CMDID_SET_IRQ_MASK 0x014
+-#define DPCON_CMDID_GET_IRQ_MASK 0x015
+-#define DPCON_CMDID_GET_IRQ_STATUS 0x016
+-#define DPCON_CMDID_CLEAR_IRQ_STATUS 0x017
+-
+-#define DPCON_CMDID_SET_NOTIFICATION 0x100
+-
+-#endif /* _FSL_DPCON_CMD_H */
+--- a/drivers/staging/fsl-mc/include/dpmng.h
++++ /dev/null
+@@ -1,69 +0,0 @@
+-/* Copyright 2013-2015 Freescale Semiconductor Inc.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions are met:
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of the above-listed copyright holders nor the
+- * names of any contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- *
+- * ALTERNATIVELY, this software may be distributed under the terms of the
+- * GNU General Public License ("GPL") as published by the Free Software
+- * Foundation, either version 2 of that License or (at your option) any
+- * later version.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+- * POSSIBILITY OF SUCH DAMAGE.
+- */
+-#ifndef __FSL_DPMNG_H
+-#define __FSL_DPMNG_H
+-
+-/* Management Complex General API
+- * Contains general API for the Management Complex firmware
+- */
+-
+-struct fsl_mc_io;
+-
+-/**
+- * Management Complex firmware version information
+- */
+-#define MC_VER_MAJOR 8
+-#define MC_VER_MINOR 0
+-
+-/**
+- * struct mc_version
+- * @major: Major version number: incremented on API compatibility changes
+- * @minor: Minor version number: incremented on API additions (that are
+- * backward compatible); reset when major version is incremented
+- * @revision: Internal revision number: incremented on implementation changes
+- * and/or bug fixes that have no impact on API
+- */
+-struct mc_version {
+- u32 major;
+- u32 minor;
+- u32 revision;
+-};
+-
+-int mc_get_version(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- struct mc_version *mc_ver_info);
+-
+-int dpmng_get_container_id(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- int *container_id);
+-
+-#endif /* __FSL_DPMNG_H */
--- /dev/null
-+++ b/drivers/staging/fsl-mc/include/dpaa2-io.h
-@@ -0,0 +1,190 @@
++++ b/drivers/staging/fsl-mc/include/dpopr.h
+@@ -0,0 +1,112 @@
+/*
-+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef __FSL_DPAA2_IO_H
-+#define __FSL_DPAA2_IO_H
-+
-+#include <linux/types.h>
-+#include <linux/cpumask.h>
-+
-+#include "dpaa2-fd.h"
-+#include "dpaa2-global.h"
-+
-+struct dpaa2_io;
-+struct dpaa2_io_store;
-+struct device;
-+
-+/**
-+ * DOC: DPIO Service
-+ *
-+ * The DPIO service provides APIs for users to interact with the datapath
-+ * by enqueueing and dequeing frame descriptors.
-+ *
-+ * The following set of APIs can be used to enqueue and dequeue frames
-+ * as well as producing notification callbacks when data is available
-+ * for dequeue.
-+ */
-+
-+/**
-+ * struct dpaa2_io_desc - The DPIO descriptor
-+ * @receives_notifications: Use notificaton mode. Non-zero if the DPIO
-+ * has a channel.
-+ * @has_8prio: Set to non-zero for channel with 8 priority WQs. Ignored
-+ * unless receives_notification is TRUE.
-+ * @cpu: The cpu index that at least interrupt handlers will
-+ * execute on.
-+ * @stash_affinity: The stash affinity for this portal favour 'cpu'
-+ * @regs_cena: The cache enabled regs.
-+ * @regs_cinh: The cache inhibited regs
-+ * @dpio_id: The dpio index
-+ * @qman_version: The qman version
-+ *
-+ * Describes the attributes and features of the DPIO object.
-+ */
-+struct dpaa2_io_desc {
-+ int receives_notifications;
-+ int has_8prio;
-+ int cpu;
-+ void *regs_cena;
-+ void *regs_cinh;
-+ int dpio_id;
-+ u32 qman_version;
-+};
-+
-+struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc);
-+
-+void dpaa2_io_down(struct dpaa2_io *d);
-+
-+irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj);
-+
-+/**
-+ * struct dpaa2_io_notification_ctx - The DPIO notification context structure
-+ * @cb: The callback to be invoked when the notification arrives
-+ * @is_cdan: Zero for FQDAN, non-zero for CDAN
-+ * @id: FQID or channel ID, needed for rearm
-+ * @desired_cpu: The cpu on which the notifications will show up. -1 means
-+ * any CPU.
-+ * @dpio_id: The dpio index
-+ * @qman64: The 64-bit context value shows up in the FQDAN/CDAN.
-+ * @node: The list node
-+ * @dpio_private: The dpio object internal to dpio_service
-+ *
-+ * Used when a FQDAN/CDAN registration is made by drivers.
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
+ */
-+struct dpaa2_io_notification_ctx {
-+ void (*cb)(struct dpaa2_io_notification_ctx *);
-+ int is_cdan;
-+ u32 id;
-+ int desired_cpu;
-+ int dpio_id;
-+ u64 qman64;
-+ struct list_head node;
-+ void *dpio_private;
-+};
-+
-+int dpaa2_io_service_register(struct dpaa2_io *service,
-+ struct dpaa2_io_notification_ctx *ctx);
-+void dpaa2_io_service_deregister(struct dpaa2_io *service,
-+ struct dpaa2_io_notification_ctx *ctx);
-+int dpaa2_io_service_rearm(struct dpaa2_io *service,
-+ struct dpaa2_io_notification_ctx *ctx);
-+
-+int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid,
-+ struct dpaa2_io_store *s);
-+int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
-+ struct dpaa2_io_store *s);
-+
-+int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, u32 fqid,
-+ const struct dpaa2_fd *fd);
-+int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, u32 qdid, u8 prio,
-+ u16 qdbin, const struct dpaa2_fd *fd);
-+int dpaa2_io_service_release(struct dpaa2_io *d, u32 bpid,
-+ const u64 *buffers, unsigned int num_buffers);
-+int dpaa2_io_service_acquire(struct dpaa2_io *d, u32 bpid,
-+ u64 *buffers, unsigned int num_buffers);
-+
-+struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
-+ struct device *dev);
-+void dpaa2_io_store_destroy(struct dpaa2_io_store *s);
-+struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last);
++#ifndef __FSL_DPOPR_H_
++#define __FSL_DPOPR_H_
+
-+#ifdef CONFIG_FSL_QBMAN_DEBUG
-+int dpaa2_io_query_fq_count(struct dpaa2_io *d, uint32_t fqid,
-+ uint32_t *fcnt, uint32_t *bcnt);
-+int dpaa2_io_query_bp_count(struct dpaa2_io *d, uint32_t bpid,
-+ uint32_t *num);
-+#endif
++#include <linux/types.h>
+
++/* Data Path Order Restoration API
++ * Contains initialization APIs and runtime APIs for the Order Restoration
++ */
+
-+/***************/
-+/* CSCN */
-+/***************/
++/** Order Restoration properties */
+
+/**
-+ * struct dpaa2_cscn - The CSCN message format
-+ * @verb: identifies the type of message (should be 0x27).
-+ * @stat: status bits related to dequeuing response (not used)
-+ * @state: bit 0 = 0/1 if CG is no/is congested
-+ * @reserved: reserved byte
-+ * @cgid: congest grp ID - the first 16 bits
-+ * @ctx: context data
-+ *
-+ * Congestion management can be implemented in software through
-+ * the use of Congestion State Change Notifications (CSCN). These
-+ * are messages written by DPAA2 hardware to memory whenever the
-+ * instantaneous count (I_CNT field in the CG) exceeds the
-+ * Congestion State (CS) entrance threshold, signifying congestion
-+ * entrance, or when the instantaneous count returns below exit
-+ * threshold, signifying congestion exit. The format of the message
-+ * is given by the dpaa2_cscn structure. Bit 0 of the state field
-+ * represents congestion state written by the hardware.
++ * Create a new Order Point Record option
+ */
-+struct dpaa2_cscn {
-+ u8 verb;
-+ u8 stat;
-+ u8 state;
-+ u8 reserved;
-+ __le32 cgid;
-+ __le64 ctx;
-+};
-+
-+#define DPAA2_CSCN_SIZE 64
-+#define DPAA2_CSCN_ALIGN 16
++#define OPR_OPT_CREATE 0x1
++/**
++ * Retire an existing Order Point Record option
++ */
++#define OPR_OPT_RETIRE 0x2
+
-+#define DPAA2_CSCN_STATE_MASK 0x1
-+#define DPAA2_CSCN_CONGESTED 1
++/**
++ * struct opr_cfg - Structure representing OPR configuration
++ * @oprrws: Order point record (OPR) restoration window size (0 to 5)
++ * 0 - Window size is 32 frames.
++ * 1 - Window size is 64 frames.
++ * 2 - Window size is 128 frames.
++ * 3 - Window size is 256 frames.
++ * 4 - Window size is 512 frames.
++ * 5 - Window size is 1024 frames.
++ * @oa: OPR auto advance NESN window size (0 disabled, 1 enabled)
++ * @olws: OPR acceptable late arrival window size (0 to 3)
++ * 0 - Disabled. Late arrivals are always rejected.
++ * 1 - Window size is 32 frames.
++ * 2 - Window size is the same as the OPR restoration
++ * window size configured in the OPRRWS field.
++ * 3 - Window size is 8192 frames. Late arrivals are
++ * always accepted.
++ * @oeane: Order restoration list (ORL) resource exhaustion
++ * advance NESN enable (0 disabled, 1 enabled)
++ * @oloe: OPR loose ordering enable (0 disabled, 1 enabled)
++ */
++struct opr_cfg {
++ u8 oprrws;
++ u8 oa;
++ u8 olws;
++ u8 oeane;
++ u8 oloe;
++};
+
-+static inline bool dpaa2_cscn_state_congested(struct dpaa2_cscn *cscn)
-+{
-+ return ((cscn->state & DPAA2_CSCN_STATE_MASK) == DPAA2_CSCN_CONGESTED);
-+}
++/**
++ * struct opr_qry - Structure representing OPR configuration
++ * @enable: Enabled state
++ * @rip: Retirement In Progress
++ * @ndsn: Next dispensed sequence number
++ * @nesn: Next expected sequence number
++ * @ea_hseq: Early arrival head sequence number
++ * @hseq_nlis: HSEQ not last in sequence
++ * @ea_tseq: Early arrival tail sequence number
++ * @tseq_nlis: TSEQ not last in sequence
++ * @ea_tptr: Early arrival tail pointer
++ * @ea_hptr: Early arrival head pointer
++ * @opr_id: Order Point Record ID
++ * @opr_vid: Order Point Record Virtual ID
++ */
++struct opr_qry {
++ char enable;
++ char rip;
++ u16 ndsn;
++ u16 nesn;
++ u16 ea_hseq;
++ char hseq_nlis;
++ u16 ea_tseq;
++ char tseq_nlis;
++ u16 ea_tptr;
++ u16 ea_hptr;
++ u16 opr_id;
++ u16 opr_vid;
++};
+
-+#endif /* __FSL_DPAA2_IO_H */
---- a/drivers/staging/fsl-mc/include/dpbp-cmd.h
++#endif /* __FSL_DPOPR_H_ */
+--- a/drivers/staging/fsl-mc/include/dprc.h
++++ /dev/null
+@@ -1,544 +0,0 @@
+-/* Copyright 2013-2015 Freescale Semiconductor Inc.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions are met:
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of the above-listed copyright holders nor the
+- * names of any contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- *
+- * ALTERNATIVELY, this software may be distributed under the terms of the
+- * GNU General Public License ("GPL") as published by the Free Software
+- * Foundation, either version 2 of that License or (at your option) any
+- * later version.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+- * POSSIBILITY OF SUCH DAMAGE.
+- */
+-#ifndef _FSL_DPRC_H
+-#define _FSL_DPRC_H
+-
+-#include "mc-cmd.h"
+-
+-/* Data Path Resource Container API
+- * Contains DPRC API for managing and querying DPAA resources
+- */
+-
+-struct fsl_mc_io;
+-
+-/**
+- * Set this value as the icid value in dprc_cfg structure when creating a
+- * container, in case the ICID is not selected by the user and should be
+- * allocated by the DPRC from the pool of ICIDs.
+- */
+-#define DPRC_GET_ICID_FROM_POOL (u16)(~(0))
+-
+-/**
+- * Set this value as the portal_id value in dprc_cfg structure when creating a
+- * container, in case the portal ID is not specifically selected by the
+- * user and should be allocated by the DPRC from the pool of portal ids.
+- */
+-#define DPRC_GET_PORTAL_ID_FROM_POOL (int)(~(0))
+-
+-int dprc_open(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- int container_id,
+- u16 *token);
+-
+-int dprc_close(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token);
+-
+-/**
+- * Container general options
+- *
+- * These options may be selected at container creation by the container creator
+- * and can be retrieved using dprc_get_attributes()
+- */
+-
+-/* Spawn Policy Option allowed - Indicates that the new container is allowed
+- * to spawn and have its own child containers.
+- */
+-#define DPRC_CFG_OPT_SPAWN_ALLOWED 0x00000001
+-
+-/* General Container allocation policy - Indicates that the new container is
+- * allowed to allocate requested resources from its parent container; if not
+- * set, the container is only allowed to use resources in its own pools; Note
+- * that this is a container's global policy, but the parent container may
+- * override it and set specific quota per resource type.
+- */
+-#define DPRC_CFG_OPT_ALLOC_ALLOWED 0x00000002
+-
+-/* Object initialization allowed - software context associated with this
+- * container is allowed to invoke object initialization operations.
+- */
+-#define DPRC_CFG_OPT_OBJ_CREATE_ALLOWED 0x00000004
+-
+-/* Topology change allowed - software context associated with this
+- * container is allowed to invoke topology operations, such as attach/detach
+- * of network objects.
+- */
+-#define DPRC_CFG_OPT_TOPOLOGY_CHANGES_ALLOWED 0x00000008
+-
+-/* AIOP - Indicates that container belongs to AIOP. */
+-#define DPRC_CFG_OPT_AIOP 0x00000020
+-
+-/* IRQ Config - Indicates that the container allowed to configure its IRQs. */
+-#define DPRC_CFG_OPT_IRQ_CFG_ALLOWED 0x00000040
+-
+-/**
+- * struct dprc_cfg - Container configuration options
+- * @icid: Container's ICID; if set to 'DPRC_GET_ICID_FROM_POOL', a free
+- * ICID value is allocated by the DPRC
+- * @portal_id: Portal ID; if set to 'DPRC_GET_PORTAL_ID_FROM_POOL', a free
+- * portal ID is allocated by the DPRC
+- * @options: Combination of 'DPRC_CFG_OPT_<X>' options
+- * @label: Object's label
+- */
+-struct dprc_cfg {
+- u16 icid;
+- int portal_id;
+- u64 options;
+- char label[16];
+-};
+-
+-int dprc_create_container(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- struct dprc_cfg *cfg,
+- int *child_container_id,
+- u64 *child_portal_offset);
+-
+-int dprc_destroy_container(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int child_container_id);
+-
+-int dprc_reset_container(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int child_container_id);
+-
+-/* IRQ */
+-
+-/* IRQ index */
+-#define DPRC_IRQ_INDEX 0
+-
+-/* Number of dprc's IRQs */
+-#define DPRC_NUM_OF_IRQS 1
+-
+-/* DPRC IRQ events */
+-
+-/* IRQ event - Indicates that a new object added to the container */
+-#define DPRC_IRQ_EVENT_OBJ_ADDED 0x00000001
+-/* IRQ event - Indicates that an object was removed from the container */
+-#define DPRC_IRQ_EVENT_OBJ_REMOVED 0x00000002
+-/* IRQ event - Indicates that resources added to the container */
+-#define DPRC_IRQ_EVENT_RES_ADDED 0x00000004
+-/* IRQ event - Indicates that resources removed from the container */
+-#define DPRC_IRQ_EVENT_RES_REMOVED 0x00000008
+-/* IRQ event - Indicates that one of the descendant containers that opened by
+- * this container is destroyed
+- */
+-#define DPRC_IRQ_EVENT_CONTAINER_DESTROYED 0x00000010
+-
+-/* IRQ event - Indicates that on one of the container's opened object is
+- * destroyed
+- */
+-#define DPRC_IRQ_EVENT_OBJ_DESTROYED 0x00000020
+-
+-/* Irq event - Indicates that object is created at the container */
+-#define DPRC_IRQ_EVENT_OBJ_CREATED 0x00000040
+-
+-/**
+- * struct dprc_irq_cfg - IRQ configuration
+- * @paddr: Address that must be written to signal a message-based interrupt
+- * @val: Value to write into irq_addr address
+- * @irq_num: A user defined number associated with this IRQ
+- */
+-struct dprc_irq_cfg {
+- phys_addr_t paddr;
+- u32 val;
+- int irq_num;
+-};
+-
+-int dprc_set_irq(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- struct dprc_irq_cfg *irq_cfg);
+-
+-int dprc_get_irq(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- int *type,
+- struct dprc_irq_cfg *irq_cfg);
+-
+-int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u8 en);
+-
+-int dprc_get_irq_enable(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u8 *en);
+-
+-int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 mask);
+-
+-int dprc_get_irq_mask(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 *mask);
+-
+-int dprc_get_irq_status(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 *status);
+-
+-int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- u8 irq_index,
+- u32 status);
+-
+-/**
+- * struct dprc_attributes - Container attributes
+- * @container_id: Container's ID
+- * @icid: Container's ICID
+- * @portal_id: Container's portal ID
+- * @options: Container's options as set at container's creation
+- * @version: DPRC version
+- */
+-struct dprc_attributes {
+- int container_id;
+- u16 icid;
+- int portal_id;
+- u64 options;
+- /**
+- * struct version - DPRC version
+- * @major: DPRC major version
+- * @minor: DPRC minor version
+- */
+- struct {
+- u16 major;
+- u16 minor;
+- } version;
+-};
+-
+-int dprc_get_attributes(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- struct dprc_attributes *attributes);
+-
+-int dprc_set_res_quota(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int child_container_id,
+- char *type,
+- u16 quota);
+-
+-int dprc_get_res_quota(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int child_container_id,
+- char *type,
+- u16 *quota);
+-
+-/* Resource request options */
+-
+-/* Explicit resource ID request - The requested objects/resources
+- * are explicit and sequential (in case of resources).
+- * The base ID is given at res_req at base_align field
+- */
+-#define DPRC_RES_REQ_OPT_EXPLICIT 0x00000001
+-
+-/* Aligned resources request - Relevant only for resources
+- * request (and not objects). Indicates that resources base ID should be
+- * sequential and aligned to the value given at dprc_res_req base_align field
+- */
+-#define DPRC_RES_REQ_OPT_ALIGNED 0x00000002
+-
+-/* Plugged Flag - Relevant only for object assignment request.
+- * Indicates that after all objects assigned. An interrupt will be invoked at
+- * the relevant GPP. The assigned object will be marked as plugged.
+- * plugged objects can't be assigned from their container
+- */
+-#define DPRC_RES_REQ_OPT_PLUGGED 0x00000004
+-
+-/**
+- * struct dprc_res_req - Resource request descriptor, to be used in assignment
+- * or un-assignment of resources and objects.
+- * @type: Resource/object type: Represent as a NULL terminated string.
+- * This string may received by using dprc_get_pool() to get resource
+- * type and dprc_get_obj() to get object type;
+- * Note: it is not possible to assign/un-assign DPRC objects
+- * @num: Number of resources
+- * @options: Request options: combination of DPRC_RES_REQ_OPT_ options
+- * @id_base_align: In case of explicit assignment (DPRC_RES_REQ_OPT_EXPLICIT
+- * is set at option), this field represents the required base ID
+- * for resource allocation; In case of aligned assignment
+- * (DPRC_RES_REQ_OPT_ALIGNED is set at option), this field
+- * indicates the required alignment for the resource ID(s) -
+- * use 0 if there is no alignment or explicit ID requirements
+- */
+-struct dprc_res_req {
+- char type[16];
+- u32 num;
+- u32 options;
+- int id_base_align;
+-};
+-
+-int dprc_assign(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int container_id,
+- struct dprc_res_req *res_req);
+-
+-int dprc_unassign(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int child_container_id,
+- struct dprc_res_req *res_req);
+-
+-int dprc_get_pool_count(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int *pool_count);
+-
+-int dprc_get_pool(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int pool_index,
+- char *type);
+-
+-int dprc_get_obj_count(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int *obj_count);
+-
+-/* Objects Attributes Flags */
+-
+-/* Opened state - Indicates that an object is open by at least one owner */
+-#define DPRC_OBJ_STATE_OPEN 0x00000001
+-/* Plugged state - Indicates that the object is plugged */
+-#define DPRC_OBJ_STATE_PLUGGED 0x00000002
+-
+-/**
+- * Shareability flag - Object flag indicating no memory shareability.
+- * the object generates memory accesses that are non coherent with other
+- * masters;
+- * user is responsible for proper memory handling through IOMMU configuration.
+- */
+-#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001
+-
+-/**
+- * struct dprc_obj_desc - Object descriptor, returned from dprc_get_obj()
+- * @type: Type of object: NULL terminated string
+- * @id: ID of logical object resource
+- * @vendor: Object vendor identifier
+- * @ver_major: Major version number
+- * @ver_minor: Minor version number
+- * @irq_count: Number of interrupts supported by the object
+- * @region_count: Number of mappable regions supported by the object
+- * @state: Object state: combination of DPRC_OBJ_STATE_ states
+- * @label: Object label
+- * @flags: Object's flags
+- */
+-struct dprc_obj_desc {
+- char type[16];
+- int id;
+- u16 vendor;
+- u16 ver_major;
+- u16 ver_minor;
+- u8 irq_count;
+- u8 region_count;
+- u32 state;
+- char label[16];
+- u16 flags;
+-};
+-
+-int dprc_get_obj(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- int obj_index,
+- struct dprc_obj_desc *obj_desc);
+-
+-int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- char *obj_type,
+- int obj_id,
+- struct dprc_obj_desc *obj_desc);
+-
+-int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- char *obj_type,
+- int obj_id,
+- u8 irq_index,
+- struct dprc_irq_cfg *irq_cfg);
+-
+-int dprc_get_obj_irq(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- char *obj_type,
+- int obj_id,
+- u8 irq_index,
+- int *type,
+- struct dprc_irq_cfg *irq_cfg);
+-
+-int dprc_get_res_count(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- char *type,
+- int *res_count);
+-
+-/**
+- * enum dprc_iter_status - Iteration status
+- * @DPRC_ITER_STATUS_FIRST: Perform first iteration
+- * @DPRC_ITER_STATUS_MORE: Indicates more/next iteration is needed
+- * @DPRC_ITER_STATUS_LAST: Indicates last iteration
+- */
+-enum dprc_iter_status {
+- DPRC_ITER_STATUS_FIRST = 0,
+- DPRC_ITER_STATUS_MORE = 1,
+- DPRC_ITER_STATUS_LAST = 2
+-};
+-
+-/**
+- * struct dprc_res_ids_range_desc - Resource ID range descriptor
+- * @base_id: Base resource ID of this range
+- * @last_id: Last resource ID of this range
+- * @iter_status: Iteration status - should be set to DPRC_ITER_STATUS_FIRST at
+- * first iteration; while the returned marker is DPRC_ITER_STATUS_MORE,
+- * additional iterations are needed, until the returned marker is
+- * DPRC_ITER_STATUS_LAST
+- */
+-struct dprc_res_ids_range_desc {
+- int base_id;
+- int last_id;
+- enum dprc_iter_status iter_status;
+-};
+-
+-int dprc_get_res_ids(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- char *type,
+- struct dprc_res_ids_range_desc *range_desc);
+-
+-/* Region flags */
+-/* Cacheable - Indicates that region should be mapped as cacheable */
+-#define DPRC_REGION_CACHEABLE 0x00000001
+-
+-/**
+- * enum dprc_region_type - Region type
+- * @DPRC_REGION_TYPE_MC_PORTAL: MC portal region
+- * @DPRC_REGION_TYPE_QBMAN_PORTAL: Qbman portal region
+- */
+-enum dprc_region_type {
+- DPRC_REGION_TYPE_MC_PORTAL,
+- DPRC_REGION_TYPE_QBMAN_PORTAL
+-};
+-
+-/**
+- * struct dprc_region_desc - Mappable region descriptor
+- * @base_offset: Region offset from region's base address.
+- * For DPMCP and DPRC objects, region base is offset from SoC MC portals
+- * base address; For DPIO, region base is offset from SoC QMan portals
+- * base address
+- * @size: Region size (in bytes)
+- * @flags: Region attributes
+- * @type: Portal region type
+- */
+-struct dprc_region_desc {
+- u32 base_offset;
+- u32 size;
+- u32 flags;
+- enum dprc_region_type type;
+-};
+-
+-int dprc_get_obj_region(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- char *obj_type,
+- int obj_id,
+- u8 region_index,
+- struct dprc_region_desc *region_desc);
+-
+-int dprc_set_obj_label(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- char *obj_type,
+- int obj_id,
+- char *label);
+-
+-/**
+- * struct dprc_endpoint - Endpoint description for link connect/disconnect
+- * operations
+- * @type: Endpoint object type: NULL terminated string
+- * @id: Endpoint object ID
+- * @if_id: Interface ID; should be set for endpoints with multiple
+- * interfaces ("dpsw", "dpdmux"); for others, always set to 0
+- */
+-struct dprc_endpoint {
+- char type[16];
+- int id;
+- int if_id;
+-};
+-
+-/**
+- * struct dprc_connection_cfg - Connection configuration.
+- * Used for virtual connections only
+- * @committed_rate: Committed rate (Mbits/s)
+- * @max_rate: Maximum rate (Mbits/s)
+- */
+-struct dprc_connection_cfg {
+- u32 committed_rate;
+- u32 max_rate;
+-};
+-
+-int dprc_connect(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- const struct dprc_endpoint *endpoint1,
+- const struct dprc_endpoint *endpoint2,
+- const struct dprc_connection_cfg *cfg);
+-
+-int dprc_disconnect(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- const struct dprc_endpoint *endpoint);
+-
+-int dprc_get_connection(struct fsl_mc_io *mc_io,
+- u32 cmd_flags,
+- u16 token,
+- const struct dprc_endpoint *endpoint1,
+- struct dprc_endpoint *endpoint2,
+- int *state);
+-
+-#endif /* _FSL_DPRC_H */
+-
+--- a/drivers/staging/fsl-mc/include/mc-bus.h
+++ /dev/null
-@@ -1,185 +0,0 @@
--/* Copyright 2013-2016 Freescale Semiconductor Inc.
+@@ -1,111 +0,0 @@
+-/*
+- * Freescale Management Complex (MC) bus declarations
+- *
+- * Copyright (C) 2014 Freescale Semiconductor, Inc.
+- * Author: German Rivera <German.Rivera@freescale.com>
+- *
+- * This file is licensed under the terms of the GNU General Public
+- * License version 2. This program is licensed "as is" without any
+- * warranty of any kind, whether express or implied.
+- */
+-#ifndef _FSL_MC_MCBUS_H_
+-#define _FSL_MC_MCBUS_H_
+-
+-#include "../include/mc.h"
+-#include <linux/mutex.h>
+-
+-struct irq_domain;
+-struct msi_domain_info;
+-
+-/**
+- * Maximum number of total IRQs that can be pre-allocated for an MC bus'
+- * IRQ pool
+- */
+-#define FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS 256
+-
+-#ifdef CONFIG_FSL_MC_BUS
+-#define dev_is_fsl_mc(_dev) ((_dev)->bus == &fsl_mc_bus_type)
+-#else
+-/* If fsl-mc bus is not present device cannot belong to fsl-mc bus */
+-#define dev_is_fsl_mc(_dev) (0)
+-#endif
+-
+-/**
+- * struct fsl_mc_resource_pool - Pool of MC resources of a given
+- * type
+- * @type: type of resources in the pool
+- * @max_count: maximum number of resources in the pool
+- * @free_count: number of free resources in the pool
+- * @mutex: mutex to serialize access to the pool's free list
+- * @free_list: anchor node of list of free resources in the pool
+- * @mc_bus: pointer to the MC bus that owns this resource pool
+- */
+-struct fsl_mc_resource_pool {
+- enum fsl_mc_pool_type type;
+- int16_t max_count;
+- int16_t free_count;
+- struct mutex mutex; /* serializes access to free_list */
+- struct list_head free_list;
+- struct fsl_mc_bus *mc_bus;
+-};
+-
+-/**
+- * struct fsl_mc_bus - logical bus that corresponds to a physical DPRC
+- * @mc_dev: fsl-mc device for the bus device itself.
+- * @resource_pools: array of resource pools (one pool per resource type)
+- * for this MC bus. These resources represent allocatable entities
+- * from the physical DPRC.
+- * @irq_resources: Pointer to array of IRQ objects for the IRQ pool
+- * @scan_mutex: Serializes bus scanning
+- * @dprc_attr: DPRC attributes
+- */
+-struct fsl_mc_bus {
+- struct fsl_mc_device mc_dev;
+- struct fsl_mc_resource_pool resource_pools[FSL_MC_NUM_POOL_TYPES];
+- struct fsl_mc_device_irq *irq_resources;
+- struct mutex scan_mutex; /* serializes bus scanning */
+- struct dprc_attributes dprc_attr;
+-};
+-
+-#define to_fsl_mc_bus(_mc_dev) \
+- container_of(_mc_dev, struct fsl_mc_bus, mc_dev)
+-
+-int dprc_scan_container(struct fsl_mc_device *mc_bus_dev);
+-
+-int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
+- unsigned int *total_irq_count);
+-
+-int __init dprc_driver_init(void);
+-
+-void dprc_driver_exit(void);
+-
+-int __init fsl_mc_allocator_driver_init(void);
+-
+-void fsl_mc_allocator_driver_exit(void);
+-
+-struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode,
+- struct msi_domain_info *info,
+- struct irq_domain *parent);
+-
+-int fsl_mc_find_msi_domain(struct device *mc_platform_dev,
+- struct irq_domain **mc_msi_domain);
+-
+-int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
+- unsigned int irq_count);
+-
+-void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus);
+-
+-void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
+-
+-void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
+-
+-bool fsl_mc_bus_exists(void);
+-
+-void fsl_mc_get_root_dprc(struct device *dev,
+- struct device **root_dprc_dev);
+-
+-bool fsl_mc_is_root_dprc(struct device *dev);
+-
+-extern struct bus_type fsl_mc_bus_type;
+-
+-#endif /* _FSL_MC_MCBUS_H_ */
+--- a/drivers/staging/fsl-mc/include/mc-cmd.h
++++ /dev/null
+@@ -1,108 +0,0 @@
+-/* Copyright 2013-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
--#ifndef _FSL_DPBP_CMD_H
--#define _FSL_DPBP_CMD_H
--
--/* DPBP Version */
--#define DPBP_VER_MAJOR 2
--#define DPBP_VER_MINOR 2
--
--/* Command IDs */
--#define DPBP_CMDID_CLOSE 0x800
--#define DPBP_CMDID_OPEN 0x804
--#define DPBP_CMDID_CREATE 0x904
--#define DPBP_CMDID_DESTROY 0x900
--
--#define DPBP_CMDID_ENABLE 0x002
--#define DPBP_CMDID_DISABLE 0x003
--#define DPBP_CMDID_GET_ATTR 0x004
--#define DPBP_CMDID_RESET 0x005
--#define DPBP_CMDID_IS_ENABLED 0x006
--
--#define DPBP_CMDID_SET_IRQ 0x010
--#define DPBP_CMDID_GET_IRQ 0x011
--#define DPBP_CMDID_SET_IRQ_ENABLE 0x012
--#define DPBP_CMDID_GET_IRQ_ENABLE 0x013
--#define DPBP_CMDID_SET_IRQ_MASK 0x014
--#define DPBP_CMDID_GET_IRQ_MASK 0x015
--#define DPBP_CMDID_GET_IRQ_STATUS 0x016
--#define DPBP_CMDID_CLEAR_IRQ_STATUS 0x017
--
--#define DPBP_CMDID_SET_NOTIFICATIONS 0x01b0
--#define DPBP_CMDID_GET_NOTIFICATIONS 0x01b1
--
--struct dpbp_cmd_open {
-- __le32 dpbp_id;
--};
--
--#define DPBP_ENABLE 0x1
--
--struct dpbp_rsp_is_enabled {
-- u8 enabled;
+-#ifndef __FSL_MC_CMD_H
+-#define __FSL_MC_CMD_H
+-
+-#define MC_CMD_NUM_OF_PARAMS 7
+-
+-struct mc_cmd_header {
+- u8 src_id;
+- u8 flags_hw;
+- u8 status;
+- u8 flags_sw;
+- __le16 token;
+- __le16 cmd_id;
-};
-
--struct dpbp_cmd_set_irq {
-- /* cmd word 0 */
-- u8 irq_index;
-- u8 pad[3];
-- __le32 irq_val;
-- /* cmd word 1 */
-- __le64 irq_addr;
-- /* cmd word 2 */
-- __le32 irq_num;
--};
--
--struct dpbp_cmd_get_irq {
-- __le32 pad;
-- u8 irq_index;
--};
--
--struct dpbp_rsp_get_irq {
-- /* response word 0 */
-- __le32 irq_val;
-- __le32 pad;
-- /* response word 1 */
-- __le64 irq_addr;
-- /* response word 2 */
-- __le32 irq_num;
-- __le32 type;
--};
--
--struct dpbp_cmd_set_irq_enable {
-- u8 enable;
-- u8 pad[3];
-- u8 irq_index;
--};
--
--struct dpbp_cmd_get_irq_enable {
-- __le32 pad;
-- u8 irq_index;
+-struct mc_command {
+- u64 header;
+- u64 params[MC_CMD_NUM_OF_PARAMS];
-};
-
--struct dpbp_rsp_get_irq_enable {
-- u8 enabled;
+-enum mc_cmd_status {
+- MC_CMD_STATUS_OK = 0x0, /* Completed successfully */
+- MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */
+- MC_CMD_STATUS_AUTH_ERR = 0x3, /* Authentication error */
+- MC_CMD_STATUS_NO_PRIVILEGE = 0x4, /* No privilege */
+- MC_CMD_STATUS_DMA_ERR = 0x5, /* DMA or I/O error */
+- MC_CMD_STATUS_CONFIG_ERR = 0x6, /* Configuration error */
+- MC_CMD_STATUS_TIMEOUT = 0x7, /* Operation timed out */
+- MC_CMD_STATUS_NO_RESOURCE = 0x8, /* No resources */
+- MC_CMD_STATUS_NO_MEMORY = 0x9, /* No memory available */
+- MC_CMD_STATUS_BUSY = 0xA, /* Device is busy */
+- MC_CMD_STATUS_UNSUPPORTED_OP = 0xB, /* Unsupported operation */
+- MC_CMD_STATUS_INVALID_STATE = 0xC /* Invalid state */
-};
-
--struct dpbp_cmd_set_irq_mask {
-- __le32 mask;
-- u8 irq_index;
--};
+-/*
+- * MC command flags
+- */
-
--struct dpbp_cmd_get_irq_mask {
-- __le32 pad;
-- u8 irq_index;
--};
+-/* High priority flag */
+-#define MC_CMD_FLAG_PRI 0x80
+-/* Command completion flag */
+-#define MC_CMD_FLAG_INTR_DIS 0x01
-
--struct dpbp_rsp_get_irq_mask {
-- __le32 mask;
--};
+-#define MC_CMD_HDR_CMDID_MASK 0xFFF0
+-#define MC_CMD_HDR_CMDID_SHIFT 4
+-#define MC_CMD_HDR_TOKEN_MASK 0xFFC0
+-#define MC_CMD_HDR_TOKEN_SHIFT 6
-
--struct dpbp_cmd_get_irq_status {
-- __le32 status;
-- u8 irq_index;
--};
+-static inline u64 mc_encode_cmd_header(u16 cmd_id,
+- u32 cmd_flags,
+- u16 token)
+-{
+- u64 header = 0;
+- struct mc_cmd_header *hdr = (struct mc_cmd_header *)&header;
-
--struct dpbp_rsp_get_irq_status {
-- __le32 status;
--};
+- hdr->cmd_id = cpu_to_le16((cmd_id << MC_CMD_HDR_CMDID_SHIFT) &
+- MC_CMD_HDR_CMDID_MASK);
+- hdr->token = cpu_to_le16((token << MC_CMD_HDR_TOKEN_SHIFT) &
+- MC_CMD_HDR_TOKEN_MASK);
+- hdr->status = MC_CMD_STATUS_READY;
+- if (cmd_flags & MC_CMD_FLAG_PRI)
+- hdr->flags_hw = MC_CMD_FLAG_PRI;
+- if (cmd_flags & MC_CMD_FLAG_INTR_DIS)
+- hdr->flags_sw = MC_CMD_FLAG_INTR_DIS;
-
--struct dpbp_cmd_clear_irq_status {
-- __le32 status;
-- u8 irq_index;
--};
+- return header;
+-}
-
--struct dpbp_rsp_get_attributes {
-- /* response word 0 */
-- __le16 pad;
-- __le16 bpid;
-- __le32 id;
-- /* response word 1 */
-- __le16 version_major;
-- __le16 version_minor;
--};
+-static inline u16 mc_cmd_hdr_read_token(struct mc_command *cmd)
+-{
+- struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
+- u16 token = le16_to_cpu(hdr->token);
-
--struct dpbp_cmd_set_notifications {
-- /* cmd word 0 */
-- __le32 depletion_entry;
-- __le32 depletion_exit;
-- /* cmd word 1 */
-- __le32 surplus_entry;
-- __le32 surplus_exit;
-- /* cmd word 2 */
-- __le16 options;
-- __le16 pad[3];
-- /* cmd word 3 */
-- __le64 message_ctx;
-- /* cmd word 4 */
-- __le64 message_iova;
--};
+- return (token & MC_CMD_HDR_TOKEN_MASK) >> MC_CMD_HDR_TOKEN_SHIFT;
+-}
-
--struct dpbp_rsp_get_notifications {
-- /* response word 0 */
-- __le32 depletion_entry;
-- __le32 depletion_exit;
-- /* response word 1 */
-- __le32 surplus_entry;
-- __le32 surplus_exit;
-- /* response word 2 */
-- __le16 options;
-- __le16 pad[3];
-- /* response word 3 */
-- __le64 message_ctx;
-- /* response word 4 */
-- __le64 message_iova;
--};
+-#endif /* __FSL_MC_CMD_H */
+--- a/drivers/staging/fsl-mc/include/mc-sys.h
++++ /dev/null
+@@ -1,98 +0,0 @@
+-/* Copyright 2013-2014 Freescale Semiconductor Inc.
+- *
+- * Interface of the I/O services to send MC commands to the MC hardware
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions are met:
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of the above-listed copyright holders nor the
+- * names of any contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- *
+- * ALTERNATIVELY, this software may be distributed under the terms of the
+- * GNU General Public License ("GPL") as published by the Free Software
+- * Foundation, either version 2 of that License or (at your option) any
+- * later version.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+- * POSSIBILITY OF SUCH DAMAGE.
+- */
-
--#endif /* _FSL_DPBP_CMD_H */
---- a/drivers/staging/fsl-mc/include/dpbp.h
-+++ b/drivers/staging/fsl-mc/include/dpbp.h
-@@ -1,4 +1,5 @@
--/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+/*
-+ * Copyright 2013-2016 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
-@@ -32,7 +33,8 @@
- #ifndef __FSL_DPBP_H
- #define __FSL_DPBP_H
-
--/* Data Path Buffer Pool API
-+/*
-+ * Data Path Buffer Pool API
- * Contains initialization APIs and runtime control APIs for DPBP
- */
-
-@@ -44,25 +46,8 @@ int dpbp_open(struct fsl_mc_io *mc_io,
- u16 *token);
-
- int dpbp_close(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token);
+-#ifndef _FSL_MC_SYS_H
+-#define _FSL_MC_SYS_H
+-
+-#include <linux/types.h>
+-#include <linux/errno.h>
+-#include <linux/mutex.h>
+-#include <linux/spinlock.h>
-
-/**
-- * struct dpbp_cfg - Structure representing DPBP configuration
-- * @options: place holder
+- * Bit masks for a MC I/O object (struct fsl_mc_io) flags
- */
--struct dpbp_cfg {
-- u32 options;
--};
+-#define FSL_MC_IO_ATOMIC_CONTEXT_PORTAL 0x0001
-
--int dpbp_create(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- const struct dpbp_cfg *cfg,
-- u16 *token);
+-struct fsl_mc_resource;
+-struct mc_command;
-
--int dpbp_destroy(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token);
-+ u32 cmd_flags,
-+ u16 token);
-
- int dpbp_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
-@@ -82,139 +67,24 @@ int dpbp_reset(struct fsl_mc_io *mc_io,
- u16 token);
-
- /**
-- * struct dpbp_irq_cfg - IRQ configuration
-- * @addr: Address that must be written to signal a message-based interrupt
-- * @val: Value to write into irq_addr address
-- * @irq_num: A user defined number associated with this IRQ
+-/**
+- * struct fsl_mc_io - MC I/O object to be passed-in to mc_send_command()
+- * @dev: device associated with this Mc I/O object
+- * @flags: flags for mc_send_command()
+- * @portal_size: MC command portal size in bytes
+- * @portal_phys_addr: MC command portal physical address
+- * @portal_virt_addr: MC command portal virtual address
+- * @dpmcp_dev: pointer to the DPMCP device associated with the MC portal.
+- *
+- * Fields are only meaningful if the FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is not
+- * set:
+- * @mutex: Mutex to serialize mc_send_command() calls that use the same MC
+- * portal, if the fsl_mc_io object was created with the
+- * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag off. mc_send_command() calls for this
+- * fsl_mc_io object must be made only from non-atomic context.
+- *
+- * Fields are only meaningful if the FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is
+- * set:
+- * @spinlock: Spinlock to serialize mc_send_command() calls that use the same MC
+- * portal, if the fsl_mc_io object was created with the
+- * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag on. mc_send_command() calls for this
+- * fsl_mc_io object can be made from atomic or non-atomic context.
- */
--struct dpbp_irq_cfg {
-- u64 addr;
-- u32 val;
-- int irq_num;
+-struct fsl_mc_io {
+- struct device *dev;
+- u16 flags;
+- u16 portal_size;
+- phys_addr_t portal_phys_addr;
+- void __iomem *portal_virt_addr;
+- struct fsl_mc_device *dpmcp_dev;
+- union {
+- /*
+- * This field is only meaningful if the
+- * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is not set
+- */
+- struct mutex mutex; /* serializes mc_send_command() */
+-
+- /*
+- * This field is only meaningful if the
+- * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is set
+- */
+- spinlock_t spinlock; /* serializes mc_send_command() */
+- };
-};
-
--int dpbp_set_irq(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- struct dpbp_irq_cfg *irq_cfg);
--
--int dpbp_get_irq(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- int *type,
-- struct dpbp_irq_cfg *irq_cfg);
+-int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd);
-
--int dpbp_set_irq_enable(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u8 en);
+-#endif /* _FSL_MC_SYS_H */
+--- a/drivers/staging/fsl-mc/include/mc.h
++++ /dev/null
+@@ -1,201 +0,0 @@
+-/*
+- * Freescale Management Complex (MC) bus public interface
+- *
+- * Copyright (C) 2014 Freescale Semiconductor, Inc.
+- * Author: German Rivera <German.Rivera@freescale.com>
+- *
+- * This file is licensed under the terms of the GNU General Public
+- * License version 2. This program is licensed "as is" without any
+- * warranty of any kind, whether express or implied.
+- */
+-#ifndef _FSL_MC_H_
+-#define _FSL_MC_H_
-
--int dpbp_get_irq_enable(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u8 *en);
+-#include <linux/device.h>
+-#include <linux/mod_devicetable.h>
+-#include <linux/interrupt.h>
+-#include "../include/dprc.h"
-
--int dpbp_set_irq_mask(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u32 mask);
+-#define FSL_MC_VENDOR_FREESCALE 0x1957
-
--int dpbp_get_irq_mask(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u32 *mask);
+-struct fsl_mc_device;
+-struct fsl_mc_io;
-
--int dpbp_get_irq_status(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u32 *status);
+-/**
+- * struct fsl_mc_driver - MC object device driver object
+- * @driver: Generic device driver
+- * @match_id_table: table of supported device matching Ids
+- * @probe: Function called when a device is added
+- * @remove: Function called when a device is removed
+- * @shutdown: Function called at shutdown time to quiesce the device
+- * @suspend: Function called when a device is stopped
+- * @resume: Function called when a device is resumed
+- *
+- * Generic DPAA device driver object for device drivers that are registered
+- * with a DPRC bus. This structure is to be embedded in each device-specific
+- * driver structure.
+- */
+-struct fsl_mc_driver {
+- struct device_driver driver;
+- const struct fsl_mc_device_id *match_id_table;
+- int (*probe)(struct fsl_mc_device *dev);
+- int (*remove)(struct fsl_mc_device *dev);
+- void (*shutdown)(struct fsl_mc_device *dev);
+- int (*suspend)(struct fsl_mc_device *dev, pm_message_t state);
+- int (*resume)(struct fsl_mc_device *dev);
+-};
-
--int dpbp_clear_irq_status(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u32 status);
+-#define to_fsl_mc_driver(_drv) \
+- container_of(_drv, struct fsl_mc_driver, driver)
-
-/**
- * struct dpbp_attr - Structure representing DPBP attributes
- * @id: DPBP object ID
-- * @version: DPBP version
- * @bpid: Hardware buffer pool ID; should be used as an argument in
- * acquire/release operations on buffers
- */
- struct dpbp_attr {
- int id;
-- /**
-- * struct version - Structure representing DPBP version
-- * @major: DPBP major version
-- * @minor: DPBP minor version
+- * enum fsl_mc_pool_type - Types of allocatable MC bus resources
+- *
+- * Entries in these enum are used as indices in the array of resource
+- * pools of an fsl_mc_bus object.
+- */
+-enum fsl_mc_pool_type {
+- FSL_MC_POOL_DPMCP = 0x0, /* corresponds to "dpmcp" in the MC */
+- FSL_MC_POOL_DPBP, /* corresponds to "dpbp" in the MC */
+- FSL_MC_POOL_DPCON, /* corresponds to "dpcon" in the MC */
+- FSL_MC_POOL_IRQ,
+-
+- /*
+- * NOTE: New resource pool types must be added before this entry
- */
-- struct {
-- u16 major;
-- u16 minor;
-- } version;
- u16 bpid;
- };
-
--int dpbp_get_attributes(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- struct dpbp_attr *attr);
+- FSL_MC_NUM_POOL_TYPES
+-};
-
-/**
-- * DPBP notifications options
+- * struct fsl_mc_resource - MC generic resource
+- * @type: type of resource
+- * @id: unique MC resource Id within the resources of the same type
+- * @data: pointer to resource-specific data if the resource is currently
+- * allocated, or NULL if the resource is not currently allocated.
+- * @parent_pool: pointer to the parent resource pool from which this
+- * resource is allocated from.
+- * @node: Node in the free list of the corresponding resource pool
+- *
+- * NOTE: This structure is to be embedded as a field of specific
+- * MC resource structures.
- */
+-struct fsl_mc_resource {
+- enum fsl_mc_pool_type type;
+- int32_t id;
+- void *data;
+- struct fsl_mc_resource_pool *parent_pool;
+- struct list_head node;
+-};
-
-/**
-- * BPSCN write will attempt to allocate into a cache (coherent write)
+- * struct fsl_mc_device_irq - MC object device message-based interrupt
+- * @msi_desc: pointer to MSI descriptor allocated by fsl_mc_msi_alloc_descs()
+- * @mc_dev: MC object device that owns this interrupt
+- * @dev_irq_index: device-relative IRQ index
+- * @resource: MC generic resource associated with the interrupt
- */
--#define DPBP_NOTIF_OPT_COHERENT_WRITE 0x00000001
+-struct fsl_mc_device_irq {
+- struct msi_desc *msi_desc;
+- struct fsl_mc_device *mc_dev;
+- u8 dev_irq_index;
+- struct fsl_mc_resource resource;
+-};
+-
+-#define to_fsl_mc_irq(_mc_resource) \
+- container_of(_mc_resource, struct fsl_mc_device_irq, resource)
-
-/**
-- * struct dpbp_notification_cfg - Structure representing DPBP notifications
-- * towards software
-- * @depletion_entry: below this threshold the pool is "depleted";
-- * set it to '0' to disable it
-- * @depletion_exit: greater than or equal to this threshold the pool exit its
-- * "depleted" state
-- * @surplus_entry: above this threshold the pool is in "surplus" state;
-- * set it to '0' to disable it
-- * @surplus_exit: less than or equal to this threshold the pool exit its
-- * "surplus" state
-- * @message_iova: MUST be given if either 'depletion_entry' or 'surplus_entry'
-- * is not '0' (enable); I/O virtual address (must be in DMA-able memory),
-- * must be 16B aligned.
-- * @message_ctx: The context that will be part of the BPSCN message and will
-- * be written to 'message_iova'
-- * @options: Mask of available options; use 'DPBP_NOTIF_OPT_<X>' values
+- * Bit masks for a MC object device (struct fsl_mc_device) flags
- */
--struct dpbp_notification_cfg {
-- u32 depletion_entry;
-- u32 depletion_exit;
-- u32 surplus_entry;
-- u32 surplus_exit;
-- u64 message_iova;
-- u64 message_ctx;
-- u16 options;
+-#define FSL_MC_IS_DPRC 0x0001
+-
+-/**
+- * struct fsl_mc_device - MC object device object
+- * @dev: Linux driver model device object
+- * @dma_mask: Default DMA mask
+- * @flags: MC object device flags
+- * @icid: Isolation context ID for the device
+- * @mc_handle: MC handle for the corresponding MC object opened
+- * @mc_io: Pointer to MC IO object assigned to this device or
+- * NULL if none.
+- * @obj_desc: MC description of the DPAA device
+- * @regions: pointer to array of MMIO region entries
+- * @irqs: pointer to array of pointers to interrupts allocated to this device
+- * @resource: generic resource associated with this MC object device, if any.
+- *
+- * Generic device object for MC object devices that are "attached" to a
+- * MC bus.
+- *
+- * NOTES:
+- * - For a non-DPRC object its icid is the same as its parent DPRC's icid.
+- * - The SMMU notifier callback gets invoked after device_add() has been
+- * called for an MC object device, but before the device-specific probe
+- * callback gets called.
+- * - DP_OBJ_DPRC objects are the only MC objects that have built-in MC
+- * portals. For all other MC objects, their device drivers are responsible for
+- * allocating MC portals for them by calling fsl_mc_portal_allocate().
+- * - Some types of MC objects (e.g., DP_OBJ_DPBP, DP_OBJ_DPCON) are
+- * treated as resources that can be allocated/deallocated from the
+- * corresponding resource pool in the object's parent DPRC, using the
+- * fsl_mc_object_allocate()/fsl_mc_object_free() functions. These MC objects
+- * are known as "allocatable" objects. For them, the corresponding
+- * fsl_mc_device's 'resource' points to the associated resource object.
+- * For MC objects that are not allocatable (e.g., DP_OBJ_DPRC, DP_OBJ_DPNI),
+- * 'resource' is NULL.
+- */
+-struct fsl_mc_device {
+- struct device dev;
+- u64 dma_mask;
+- u16 flags;
+- u16 icid;
+- u16 mc_handle;
+- struct fsl_mc_io *mc_io;
+- struct dprc_obj_desc obj_desc;
+- struct resource *regions;
+- struct fsl_mc_device_irq **irqs;
+- struct fsl_mc_resource *resource;
-};
-
--int dpbp_set_notifications(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- struct dpbp_notification_cfg *cfg);
+-#define to_fsl_mc_device(_dev) \
+- container_of(_dev, struct fsl_mc_device, dev)
-
--int dpbp_get_notifications(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- struct dpbp_notification_cfg *cfg);
+-/*
+- * module_fsl_mc_driver() - Helper macro for drivers that don't do
+- * anything special in module init/exit. This eliminates a lot of
+- * boilerplate. Each module may only use this macro once, and
+- * calling it replaces module_init() and module_exit()
+- */
+-#define module_fsl_mc_driver(__fsl_mc_driver) \
+- module_driver(__fsl_mc_driver, fsl_mc_driver_register, \
+- fsl_mc_driver_unregister)
-
--/** @} */
-+int dpbp_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpbp_attr *attr);
-+
-+int dpbp_get_api_version(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 *major_ver,
-+ u16 *minor_ver);
-
- #endif /* __FSL_DPBP_H */
+-/*
+- * Macro to avoid include chaining to get THIS_MODULE
+- */
+-#define fsl_mc_driver_register(drv) \
+- __fsl_mc_driver_register(drv, THIS_MODULE)
+-
+-int __must_check __fsl_mc_driver_register(struct fsl_mc_driver *fsl_mc_driver,
+- struct module *owner);
+-
+-void fsl_mc_driver_unregister(struct fsl_mc_driver *driver);
+-
+-int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
+- u16 mc_io_flags,
+- struct fsl_mc_io **new_mc_io);
+-
+-void fsl_mc_portal_free(struct fsl_mc_io *mc_io);
+-
+-int fsl_mc_portal_reset(struct fsl_mc_io *mc_io);
+-
+-int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
+- enum fsl_mc_pool_type pool_type,
+- struct fsl_mc_device **new_mc_adev);
+-
+-void fsl_mc_object_free(struct fsl_mc_device *mc_adev);
+-
+-int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev);
+-
+-void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev);
+-
+-#endif /* _FSL_MC_H_ */
--- /dev/null
-+++ b/drivers/staging/fsl-mc/include/dpcon.h
-@@ -0,0 +1,115 @@
-+/* Copyright 2013-2016 Freescale Semiconductor Inc.
++++ b/include/linux/fsl/mc.h
+@@ -0,0 +1,1025 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * Freescale Management Complex (MC) bus public interface
+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
++ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
++ * Author: German Rivera <German.Rivera@freescale.com>
+ *
++ */
++#ifndef _FSL_MC_H_
++#define _FSL_MC_H_
++
++#include <linux/device.h>
++#include <linux/mod_devicetable.h>
++#include <linux/interrupt.h>
++#include <linux/cdev.h>
++#include <uapi/linux/fsl_mc.h>
++
++#define FSL_MC_VENDOR_FREESCALE 0x1957
++
++struct irq_domain;
++struct msi_domain_info;
++
++struct fsl_mc_device;
++struct fsl_mc_io;
++
++/**
++ * struct fsl_mc_driver - MC object device driver object
++ * @driver: Generic device driver
++ * @match_id_table: table of supported device matching Ids
++ * @probe: Function called when a device is added
++ * @remove: Function called when a device is removed
++ * @shutdown: Function called at shutdown time to quiesce the device
++ * @suspend: Function called when a device is stopped
++ * @resume: Function called when a device is resumed
+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
++ * Generic DPAA device driver object for device drivers that are registered
++ * with a DPRC bus. This structure is to be embedded in each device-specific
++ * driver structure.
++ */
++struct fsl_mc_driver {
++ struct device_driver driver;
++ const struct fsl_mc_device_id *match_id_table;
++ int (*probe)(struct fsl_mc_device *dev);
++ int (*remove)(struct fsl_mc_device *dev);
++ void (*shutdown)(struct fsl_mc_device *dev);
++ int (*suspend)(struct fsl_mc_device *dev, pm_message_t state);
++ int (*resume)(struct fsl_mc_device *dev);
++};
++
++#define to_fsl_mc_driver(_drv) \
++ container_of(_drv, struct fsl_mc_driver, driver)
++
++#define to_fsl_mc_bus(_mc_dev) \
++ container_of(_mc_dev, struct fsl_mc_bus, mc_dev)
++
++/**
++ * enum fsl_mc_pool_type - Types of allocatable MC bus resources
+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
++ * Entries in these enum are used as indices in the array of resource
++ * pools of an fsl_mc_bus object.
++ */
++enum fsl_mc_pool_type {
++ FSL_MC_POOL_DPMCP = 0x0, /* corresponds to "dpmcp" in the MC */
++ FSL_MC_POOL_DPBP, /* corresponds to "dpbp" in the MC */
++ FSL_MC_POOL_DPCON, /* corresponds to "dpcon" in the MC */
++ FSL_MC_POOL_IRQ,
++
++ /*
++ * NOTE: New resource pool types must be added before this entry
++ */
++ FSL_MC_NUM_POOL_TYPES
++};
++
++/**
++ * struct fsl_mc_resource - MC generic resource
++ * @type: type of resource
++ * @id: unique MC resource Id within the resources of the same type
++ * @data: pointer to resource-specific data if the resource is currently
++ * allocated, or NULL if the resource is not currently allocated.
++ * @parent_pool: pointer to the parent resource pool from which this
++ * resource is allocated from.
++ * @node: Node in the free list of the corresponding resource pool
++ *
++ * NOTE: This structure is to be embedded as a field of specific
++ * MC resource structures.
++ */
++struct fsl_mc_resource {
++ enum fsl_mc_pool_type type;
++ s32 id;
++ void *data;
++ struct fsl_mc_resource_pool *parent_pool;
++ struct list_head node;
++};
++
++/**
++ * struct fsl_mc_device_irq - MC object device message-based interrupt
++ * @msi_desc: pointer to MSI descriptor allocated by fsl_mc_msi_alloc_descs()
++ * @mc_dev: MC object device that owns this interrupt
++ * @dev_irq_index: device-relative IRQ index
++ * @resource: MC generic resource associated with the interrupt
++ */
++struct fsl_mc_device_irq {
++ struct msi_desc *msi_desc;
++ struct fsl_mc_device *mc_dev;
++ u8 dev_irq_index;
++ struct fsl_mc_resource resource;
++};
++
++#define to_fsl_mc_irq(_mc_resource) \
++ container_of(_mc_resource, struct fsl_mc_device_irq, resource)
++
++/* Opened state - Indicates that an object is open by at least one owner */
++#define FSL_MC_OBJ_STATE_OPEN 0x00000001
++/* Plugged state - Indicates that the object is plugged */
++#define FSL_MC_OBJ_STATE_PLUGGED 0x00000002
++
++/**
++ * Shareability flag - Object flag indicating no memory shareability.
++ * the object generates memory accesses that are non coherent with other
++ * masters;
++ * user is responsible for proper memory handling through IOMMU configuration.
++ */
++#define FSL_MC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001
++
++/**
++ * struct fsl_mc_obj_desc - Object descriptor
++ * @type: Type of object: NULL terminated string
++ * @id: ID of logical object resource
++ * @vendor: Object vendor identifier
++ * @ver_major: Major version number
++ * @ver_minor: Minor version number
++ * @irq_count: Number of interrupts supported by the object
++ * @region_count: Number of mappable regions supported by the object
++ * @state: Object state: combination of FSL_MC_OBJ_STATE_ states
++ * @label: Object label: NULL terminated string
++ * @flags: Object's flags
++ */
++struct fsl_mc_obj_desc {
++ char type[16];
++ int id;
++ u16 vendor;
++ u16 ver_major;
++ u16 ver_minor;
++ u8 irq_count;
++ u8 region_count;
++ u32 state;
++ char label[16];
++ u16 flags;
++};
++
++/**
++ * Bit masks for a MC object device (struct fsl_mc_device) flags
+ */
-+#ifndef __FSL_DPCON_H
-+#define __FSL_DPCON_H
++#define FSL_MC_IS_DPRC 0x0001
+
-+/* Data Path Concentrator API
-+ * Contains initialization APIs and runtime control APIs for DPCON
++/**
++ * struct fsl_mc_device - MC object device object
++ * @dev: Linux driver model device object
++ * @dma_mask: Default DMA mask
++ * @flags: MC object device flags
++ * @icid: Isolation context ID for the device
++ * @mc_handle: MC handle for the corresponding MC object opened
++ * @mc_io: Pointer to MC IO object assigned to this device or
++ * NULL if none.
++ * @obj_desc: MC description of the DPAA device
++ * @regions: pointer to array of MMIO region entries
++ * @irqs: pointer to array of pointers to interrupts allocated to this device
++ * @resource: generic resource associated with this MC object device, if any.
++ * @driver_override: Driver name to force a match
++ *
++ * Generic device object for MC object devices that are "attached" to a
++ * MC bus.
++ *
++ * NOTES:
++ * - For a non-DPRC object its icid is the same as its parent DPRC's icid.
++ * - The SMMU notifier callback gets invoked after device_add() has been
++ * called for an MC object device, but before the device-specific probe
++ * callback gets called.
++ * - DP_OBJ_DPRC objects are the only MC objects that have built-in MC
++ * portals. For all other MC objects, their device drivers are responsible for
++ * allocating MC portals for them by calling fsl_mc_portal_allocate().
++ * - Some types of MC objects (e.g., DP_OBJ_DPBP, DP_OBJ_DPCON) are
++ * treated as resources that can be allocated/deallocated from the
++ * corresponding resource pool in the object's parent DPRC, using the
++ * fsl_mc_object_allocate()/fsl_mc_object_free() functions. These MC objects
++ * are known as "allocatable" objects. For them, the corresponding
++ * fsl_mc_device's 'resource' points to the associated resource object.
++ * For MC objects that are not allocatable (e.g., DP_OBJ_DPRC, DP_OBJ_DPNI),
++ * 'resource' is NULL.
+ */
++struct fsl_mc_device {
++ struct device dev;
++ u64 dma_mask;
++ u16 flags;
++ u32 icid;
++ u16 mc_handle;
++ struct fsl_mc_io *mc_io;
++ struct fsl_mc_obj_desc obj_desc;
++ struct resource *regions;
++ struct fsl_mc_device_irq **irqs;
++ struct fsl_mc_resource *resource;
++ const char *driver_override;
++};
+
-+struct fsl_mc_io;
++#define to_fsl_mc_device(_dev) \
++ container_of(_dev, struct fsl_mc_device, dev)
+
-+/** General DPCON macros */
++struct mc_cmd_header {
++ u8 src_id;
++ u8 flags_hw;
++ u8 status;
++ u8 flags_sw;
++ __le16 token;
++ __le16 cmd_id;
++};
+
-+/**
-+ * Use it to disable notifications; see dpcon_set_notification()
++enum mc_cmd_status {
++ MC_CMD_STATUS_OK = 0x0, /* Completed successfully */
++ MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */
++ MC_CMD_STATUS_AUTH_ERR = 0x3, /* Authentication error */
++ MC_CMD_STATUS_NO_PRIVILEGE = 0x4, /* No privilege */
++ MC_CMD_STATUS_DMA_ERR = 0x5, /* DMA or I/O error */
++ MC_CMD_STATUS_CONFIG_ERR = 0x6, /* Configuration error */
++ MC_CMD_STATUS_TIMEOUT = 0x7, /* Operation timed out */
++ MC_CMD_STATUS_NO_RESOURCE = 0x8, /* No resources */
++ MC_CMD_STATUS_NO_MEMORY = 0x9, /* No memory available */
++ MC_CMD_STATUS_BUSY = 0xA, /* Device is busy */
++ MC_CMD_STATUS_UNSUPPORTED_OP = 0xB, /* Unsupported operation */
++ MC_CMD_STATUS_INVALID_STATE = 0xC /* Invalid state */
++};
++
++/*
++ * MC command flags
+ */
-+#define DPCON_INVALID_DPIO_ID (int)(-1)
+
-+int dpcon_open(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ int dpcon_id,
-+ u16 *token);
++/* High priority flag */
++#define MC_CMD_FLAG_PRI 0x80
++/* Command completion flag */
++#define MC_CMD_FLAG_INTR_DIS 0x01
+
-+int dpcon_close(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
++static inline u64 mc_encode_cmd_header(u16 cmd_id,
++ u32 cmd_flags,
++ u16 token)
++{
++ u64 header = 0;
++ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&header;
+
-+int dpcon_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
++ hdr->cmd_id = cpu_to_le16(cmd_id);
++ hdr->token = cpu_to_le16(token);
++ hdr->status = MC_CMD_STATUS_READY;
++ if (cmd_flags & MC_CMD_FLAG_PRI)
++ hdr->flags_hw = MC_CMD_FLAG_PRI;
++ if (cmd_flags & MC_CMD_FLAG_INTR_DIS)
++ hdr->flags_sw = MC_CMD_FLAG_INTR_DIS;
+
-+int dpcon_disable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
++ return header;
++}
+
-+int dpcon_is_enabled(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int *en);
++static inline u16 mc_cmd_hdr_read_token(struct fsl_mc_command *cmd)
++{
++ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
++ u16 token = le16_to_cpu(hdr->token);
+
-+int dpcon_reset(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
++ return token;
++}
+
-+/**
-+ * struct dpcon_attr - Structure representing DPCON attributes
-+ * @id: DPCON object ID
-+ * @qbman_ch_id: Channel ID to be used by dequeue operation
-+ * @num_priorities: Number of priorities for the DPCON channel (1-8)
-+ */
-+struct dpcon_attr {
-+ int id;
-+ u16 qbman_ch_id;
-+ u8 num_priorities;
++struct mc_rsp_create {
++ __le32 object_id;
+};
+
-+int dpcon_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpcon_attr *attr);
++struct mc_rsp_api_ver {
++ __le16 major_ver;
++ __le16 minor_ver;
++};
++
++static inline u32 mc_cmd_read_object_id(struct fsl_mc_command *cmd)
++{
++ struct mc_rsp_create *rsp_params;
++
++ rsp_params = (struct mc_rsp_create *)cmd->params;
++ return le32_to_cpu(rsp_params->object_id);
++}
++
++static inline void mc_cmd_read_api_version(struct fsl_mc_command *cmd,
++ u16 *major_ver,
++ u16 *minor_ver)
++{
++ struct mc_rsp_api_ver *rsp_params;
++
++ rsp_params = (struct mc_rsp_api_ver *)cmd->params;
++ *major_ver = le16_to_cpu(rsp_params->major_ver);
++ *minor_ver = le16_to_cpu(rsp_params->minor_ver);
++}
+
+/**
-+ * struct dpcon_notification_cfg - Structure representing notification params
-+ * @dpio_id: DPIO object ID; must be configured with a notification channel;
-+ * to disable notifications set it to 'DPCON_INVALID_DPIO_ID';
-+ * @priority: Priority selection within the DPIO channel; valid values
-+ * are 0-7, depending on the number of priorities in that channel
-+ * @user_ctx: User context value provided with each CDAN message
++ * Bit masks for a MC I/O object (struct fsl_mc_io) flags
+ */
-+struct dpcon_notification_cfg {
-+ int dpio_id;
-+ u8 priority;
-+ u64 user_ctx;
++#define FSL_MC_IO_ATOMIC_CONTEXT_PORTAL 0x0001
++
++/**
++ * struct fsl_mc_io - MC I/O object to be passed-in to mc_send_command()
++ * @dev: device associated with this Mc I/O object
++ * @flags: flags for mc_send_command()
++ * @portal_size: MC command portal size in bytes
++ * @portal_phys_addr: MC command portal physical address
++ * @portal_virt_addr: MC command portal virtual address
++ * @dpmcp_dev: pointer to the DPMCP device associated with the MC portal.
++ *
++ * Fields are only meaningful if the FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is not
++ * set:
++ * @mutex: Mutex to serialize mc_send_command() calls that use the same MC
++ * portal, if the fsl_mc_io object was created with the
++ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag off. mc_send_command() calls for this
++ * fsl_mc_io object must be made only from non-atomic context.
++ *
++ * Fields are only meaningful if the FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is
++ * set:
++ * @spinlock: Spinlock to serialize mc_send_command() calls that use the same MC
++ * portal, if the fsl_mc_io object was created with the
++ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag on. mc_send_command() calls for this
++ * fsl_mc_io object can be made from atomic or non-atomic context.
++ */
++struct fsl_mc_io {
++ struct device *dev;
++ u16 flags;
++ u32 portal_size;
++ phys_addr_t portal_phys_addr;
++ void __iomem *portal_virt_addr;
++ struct fsl_mc_device *dpmcp_dev;
++ union {
++ /*
++ * This field is only meaningful if the
++ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is not set
++ */
++ struct mutex mutex; /* serializes mc_send_command() */
++
++ /*
++ * This field is only meaningful if the
++ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is set
++ */
++ spinlock_t spinlock; /* serializes mc_send_command() */
++ };
+};
+
-+int dpcon_set_notification(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpcon_notification_cfg *cfg);
++int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd);
+
-+int dpcon_get_api_version(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 *major_ver,
-+ u16 *minor_ver);
++#ifdef CONFIG_FSL_MC_BUS
++#define dev_is_fsl_mc(_dev) ((_dev)->bus == &fsl_mc_bus_type)
++#else
++/* If fsl-mc bus is not present device cannot belong to fsl-mc bus */
++#define dev_is_fsl_mc(_dev) (0)
++#endif
++
++/* Macro to check if a device is a container device */
++#define fsl_mc_is_cont_dev(_dev) (to_fsl_mc_device(_dev)->flags & \
++ FSL_MC_IS_DPRC)
++
++/* Macro to get the container device of a MC device */
++#define fsl_mc_cont_dev(_dev) (fsl_mc_is_cont_dev(_dev) ? \
++ (_dev) : (_dev)->parent)
++
++#define fsl_mc_is_dev_coherent(_dev) \
++ (!((to_fsl_mc_device(_dev))->obj_desc.flags & \
++ FSL_MC_OBJ_FLAG_NO_MEM_SHAREABILITY))
+
-+#endif /* __FSL_DPCON_H */
---- a/drivers/staging/fsl-mc/include/dpmng.h
-+++ b/drivers/staging/fsl-mc/include/dpmng.h
-@@ -1,4 +1,5 @@
--/* Copyright 2013-2015 Freescale Semiconductor Inc.
+/*
-+ * Copyright 2013-2016 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
-@@ -32,7 +33,8 @@
- #ifndef __FSL_DPMNG_H
- #define __FSL_DPMNG_H
-
--/* Management Complex General API
++ * module_fsl_mc_driver() - Helper macro for drivers that don't do
++ * anything special in module init/exit. This eliminates a lot of
++ * boilerplate. Each module may only use this macro once, and
++ * calling it replaces module_init() and module_exit()
++ */
++#define module_fsl_mc_driver(__fsl_mc_driver) \
++ module_driver(__fsl_mc_driver, fsl_mc_driver_register, \
++ fsl_mc_driver_unregister)
++
++void fsl_mc_device_remove(struct fsl_mc_device *mc_dev);
++
+/*
-+ * Management Complex General API
- * Contains general API for the Management Complex firmware
- */
-
-@@ -58,12 +60,8 @@ struct mc_version {
- u32 revision;
- };
-
--int mc_get_version(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- struct mc_version *mc_ver_info);
--
--int dpmng_get_container_id(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- int *container_id);
-+int mc_get_version(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ struct mc_version *mc_ver_info);
-
- #endif /* __FSL_DPMNG_H */
---- /dev/null
-+++ b/drivers/staging/fsl-mc/include/dpopr.h
-@@ -0,0 +1,110 @@
++ * Macro to avoid include chaining to get THIS_MODULE
++ */
++#define fsl_mc_driver_register(drv) \
++ __fsl_mc_driver_register(drv, THIS_MODULE)
++
++int __must_check __fsl_mc_driver_register(struct fsl_mc_driver *fsl_mc_driver,
++ struct module *owner);
++
++void fsl_mc_driver_unregister(struct fsl_mc_driver *driver);
++
++int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
++ u16 mc_io_flags,
++ struct fsl_mc_io **new_mc_io);
++
++void fsl_mc_portal_free(struct fsl_mc_io *mc_io);
++
++int fsl_mc_portal_reset(struct fsl_mc_io *mc_io);
++
++int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
++ enum fsl_mc_pool_type pool_type,
++ struct fsl_mc_device **new_mc_adev);
++
++void fsl_mc_object_free(struct fsl_mc_device *mc_adev);
++
++struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode,
++ struct msi_domain_info *info,
++ struct irq_domain *parent);
++
++int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev);
++
++void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev);
++
++void fsl_mc_dma_configure(struct fsl_mc_device *mc_dev,
++ struct device_node *fsl_mc_platform_node, int coherent);
++
++extern struct bus_type fsl_mc_bus_type;
++
++extern struct device_type fsl_mc_bus_dprc_type;
++extern struct device_type fsl_mc_bus_dpni_type;
++extern struct device_type fsl_mc_bus_dpio_type;
++extern struct device_type fsl_mc_bus_dpsw_type;
++extern struct device_type fsl_mc_bus_dpdmux_type;
++extern struct device_type fsl_mc_bus_dpbp_type;
++extern struct device_type fsl_mc_bus_dpcon_type;
++extern struct device_type fsl_mc_bus_dpmcp_type;
++extern struct device_type fsl_mc_bus_dpmac_type;
++extern struct device_type fsl_mc_bus_dprtc_type;
++extern struct device_type fsl_mc_bus_dpseci_type;
++extern struct device_type fsl_mc_bus_dpdcei_type;
++extern struct device_type fsl_mc_bus_dpaiop_type;
++extern struct device_type fsl_mc_bus_dpci_type;
++extern struct device_type fsl_mc_bus_dpdmai_type;
++
++static inline bool is_fsl_mc_bus_dprc(const struct fsl_mc_device *mc_dev)
++{
++ return mc_dev->dev.type == &fsl_mc_bus_dprc_type;
++}
++
++static inline bool is_fsl_mc_bus_dpni(const struct fsl_mc_device *mc_dev)
++{
++ return mc_dev->dev.type == &fsl_mc_bus_dpni_type;
++}
++
++static inline bool is_fsl_mc_bus_dpio(const struct fsl_mc_device *mc_dev)
++{
++ return mc_dev->dev.type == &fsl_mc_bus_dpio_type;
++}
++
++static inline bool is_fsl_mc_bus_dpsw(const struct fsl_mc_device *mc_dev)
++{
++ return mc_dev->dev.type == &fsl_mc_bus_dpsw_type;
++}
++
++static inline bool is_fsl_mc_bus_dpdmux(const struct fsl_mc_device *mc_dev)
++{
++ return mc_dev->dev.type == &fsl_mc_bus_dpdmux_type;
++}
++
++static inline bool is_fsl_mc_bus_dpbp(const struct fsl_mc_device *mc_dev)
++{
++ return mc_dev->dev.type == &fsl_mc_bus_dpbp_type;
++}
++
++static inline bool is_fsl_mc_bus_dpcon(const struct fsl_mc_device *mc_dev)
++{
++ return mc_dev->dev.type == &fsl_mc_bus_dpcon_type;
++}
++
++static inline bool is_fsl_mc_bus_dpmcp(const struct fsl_mc_device *mc_dev)
++{
++ return mc_dev->dev.type == &fsl_mc_bus_dpmcp_type;
++}
++
++static inline bool is_fsl_mc_bus_dpmac(const struct fsl_mc_device *mc_dev)
++{
++ return mc_dev->dev.type == &fsl_mc_bus_dpmac_type;
++}
++
++static inline bool is_fsl_mc_bus_dprtc(const struct fsl_mc_device *mc_dev)
++{
++ return mc_dev->dev.type == &fsl_mc_bus_dprtc_type;
++}
++
++static inline bool is_fsl_mc_bus_dpseci(const struct fsl_mc_device *mc_dev)
++{
++ return mc_dev->dev.type == &fsl_mc_bus_dpseci_type;
++}
++
++static inline bool is_fsl_mc_bus_dpdcei(const struct fsl_mc_device *mc_dev)
++{
++ return mc_dev->dev.type == &fsl_mc_bus_dpdcei_type;
++}
++
++static inline bool is_fsl_mc_bus_dpaiop(const struct fsl_mc_device *mc_dev)
++{
++ return mc_dev->dev.type == &fsl_mc_bus_dpaiop_type;
++}
++
++static inline bool is_fsl_mc_bus_dpci(const struct fsl_mc_device *mc_dev)
++{
++ return mc_dev->dev.type == &fsl_mc_bus_dpci_type;
++}
++
++static inline bool is_fsl_mc_bus_dpdmai(const struct fsl_mc_device *mc_dev)
++{
++ return mc_dev->dev.type == &fsl_mc_bus_dpdmai_type;
++}
++
+/*
-+ * Copyright 2017 NXP
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
++ * Data Path Resource Container (DPRC) API
+ */
-+#ifndef __FSL_DPOPR_H_
-+#define __FSL_DPOPR_H_
+
-+/* Data Path Order Restoration API
-+ * Contains initialization APIs and runtime APIs for the Order Restoration
-+ */
++/* Minimal supported DPRC Version */
++#define DPRC_MIN_VER_MAJOR 6
++#define DPRC_MIN_VER_MINOR 0
++
++/* DPRC command versioning */
++#define DPRC_CMD_BASE_VERSION 1
++#define DPRC_CMD_ID_OFFSET 4
++
++#define DPRC_CMD(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION)
++
++/* DPRC command IDs */
++#define DPRC_CMDID_CLOSE DPRC_CMD(0x800)
++#define DPRC_CMDID_OPEN DPRC_CMD(0x805)
++#define DPRC_CMDID_GET_API_VERSION DPRC_CMD(0xa05)
++
++#define DPRC_CMDID_GET_ATTR DPRC_CMD(0x004)
++#define DPRC_CMDID_RESET_CONT DPRC_CMD(0x005)
++
++#define DPRC_CMDID_SET_IRQ DPRC_CMD(0x010)
++#define DPRC_CMDID_SET_IRQ_ENABLE DPRC_CMD(0x012)
++#define DPRC_CMDID_SET_IRQ_MASK DPRC_CMD(0x014)
++#define DPRC_CMDID_GET_IRQ_STATUS DPRC_CMD(0x016)
++#define DPRC_CMDID_CLEAR_IRQ_STATUS DPRC_CMD(0x017)
++
++#define DPRC_CMDID_GET_CONT_ID DPRC_CMD(0x830)
++#define DPRC_CMDID_GET_OBJ_COUNT DPRC_CMD(0x159)
++#define DPRC_CMDID_GET_OBJ DPRC_CMD(0x15A)
++#define DPRC_CMDID_GET_OBJ_REG DPRC_CMD(0x15E)
++#define DPRC_CMDID_SET_OBJ_IRQ DPRC_CMD(0x15F)
++
++struct dprc_cmd_open {
++ __le32 container_id;
++};
++
++struct dprc_cmd_reset_container {
++ __le32 child_container_id;
++};
+
-+/** Order Restoration properties */
++struct dprc_cmd_set_irq {
++ /* cmd word 0 */
++ __le32 irq_val;
++ u8 irq_index;
++ u8 pad[3];
++ /* cmd word 1 */
++ __le64 irq_addr;
++ /* cmd word 2 */
++ __le32 irq_num;
++};
+
-+/**
-+ * Create a new Order Point Record option
-+ */
-+#define OPR_OPT_CREATE 0x1
-+/**
-+ * Retire an existing Order Point Record option
-+ */
-+#define OPR_OPT_RETIRE 0x2
++#define DPRC_ENABLE 0x1
+
-+/**
-+ * struct opr_cfg - Structure representing OPR configuration
-+ * @oprrws: Order point record (OPR) restoration window size (0 to 5)
-+ * 0 - Window size is 32 frames.
-+ * 1 - Window size is 64 frames.
-+ * 2 - Window size is 128 frames.
-+ * 3 - Window size is 256 frames.
-+ * 4 - Window size is 512 frames.
-+ * 5 - Window size is 1024 frames.
-+ * @oa: OPR auto advance NESN window size (0 disabled, 1 enabled)
-+ * @olws: OPR acceptable late arrival window size (0 to 3)
-+ * 0 - Disabled. Late arrivals are always rejected.
-+ * 1 - Window size is 32 frames.
-+ * 2 - Window size is the same as the OPR restoration
-+ * window size configured in the OPRRWS field.
-+ * 3 - Window size is 8192 frames. Late arrivals are
-+ * always accepted.
-+ * @oeane: Order restoration list (ORL) resource exhaustion
-+ * advance NESN enable (0 disabled, 1 enabled)
-+ * @oloe: OPR loose ordering enable (0 disabled, 1 enabled)
-+ */
-+struct opr_cfg {
-+ u8 oprrws;
-+ u8 oa;
-+ u8 olws;
-+ u8 oeane;
-+ u8 oloe;
++struct dprc_cmd_set_irq_enable {
++ u8 enable;
++ u8 pad[3];
++ u8 irq_index;
+};
+
-+/**
-+ * struct opr_qry - Structure representing OPR configuration
-+ * @enable: Enabled state
-+ * @rip: Retirement In Progress
-+ * @ndsn: Next dispensed sequence number
-+ * @nesn: Next expected sequence number
-+ * @ea_hseq: Early arrival head sequence number
-+ * @hseq_nlis: HSEQ not last in sequence
-+ * @ea_tseq: Early arrival tail sequence number
-+ * @tseq_nlis: TSEQ not last in sequence
-+ * @ea_tptr: Early arrival tail pointer
-+ * @ea_hptr: Early arrival head pointer
-+ * @opr_id: Order Point Record ID
-+ * @opr_vid: Order Point Record Virtual ID
-+ */
-+struct opr_qry {
-+ char enable;
-+ char rip;
-+ u16 ndsn;
-+ u16 nesn;
-+ u16 ea_hseq;
-+ char hseq_nlis;
-+ u16 ea_tseq;
-+ char tseq_nlis;
-+ u16 ea_tptr;
-+ u16 ea_hptr;
-+ u16 opr_id;
-+ u16 opr_vid;
++struct dprc_cmd_set_irq_mask {
++ __le32 mask;
++ u8 irq_index;
+};
+
-+#endif /* __FSL_DPOPR_H_ */
---- a/drivers/staging/fsl-mc/include/dprc.h
-+++ b/drivers/staging/fsl-mc/include/dprc.h
-@@ -1,4 +1,5 @@
--/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+/*
-+ * Copyright 2013-2016 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
-@@ -34,26 +35,13 @@
-
- #include "mc-cmd.h"
-
--/* Data Path Resource Container API
-+/*
-+ * Data Path Resource Container API
- * Contains DPRC API for managing and querying DPAA resources
- */
-
- struct fsl_mc_io;
-
--/**
-- * Set this value as the icid value in dprc_cfg structure when creating a
-- * container, in case the ICID is not selected by the user and should be
-- * allocated by the DPRC from the pool of ICIDs.
-- */
--#define DPRC_GET_ICID_FROM_POOL (u16)(~(0))
--
--/**
-- * Set this value as the portal_id value in dprc_cfg structure when creating a
-- * container, in case the portal ID is not specifically selected by the
-- * user and should be allocated by the DPRC from the pool of portal ids.
-- */
--#define DPRC_GET_PORTAL_ID_FROM_POOL (int)(~(0))
--
- int dprc_open(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- int container_id,
-@@ -63,75 +51,6 @@ int dprc_close(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token);
-
--/**
-- * Container general options
-- *
-- * These options may be selected at container creation by the container creator
-- * and can be retrieved using dprc_get_attributes()
-- */
--
--/* Spawn Policy Option allowed - Indicates that the new container is allowed
-- * to spawn and have its own child containers.
-- */
--#define DPRC_CFG_OPT_SPAWN_ALLOWED 0x00000001
--
--/* General Container allocation policy - Indicates that the new container is
-- * allowed to allocate requested resources from its parent container; if not
-- * set, the container is only allowed to use resources in its own pools; Note
-- * that this is a container's global policy, but the parent container may
-- * override it and set specific quota per resource type.
-- */
--#define DPRC_CFG_OPT_ALLOC_ALLOWED 0x00000002
--
--/* Object initialization allowed - software context associated with this
-- * container is allowed to invoke object initialization operations.
-- */
--#define DPRC_CFG_OPT_OBJ_CREATE_ALLOWED 0x00000004
--
--/* Topology change allowed - software context associated with this
-- * container is allowed to invoke topology operations, such as attach/detach
-- * of network objects.
-- */
--#define DPRC_CFG_OPT_TOPOLOGY_CHANGES_ALLOWED 0x00000008
--
--/* AIOP - Indicates that container belongs to AIOP. */
--#define DPRC_CFG_OPT_AIOP 0x00000020
--
--/* IRQ Config - Indicates that the container allowed to configure its IRQs. */
--#define DPRC_CFG_OPT_IRQ_CFG_ALLOWED 0x00000040
--
--/**
-- * struct dprc_cfg - Container configuration options
-- * @icid: Container's ICID; if set to 'DPRC_GET_ICID_FROM_POOL', a free
-- * ICID value is allocated by the DPRC
-- * @portal_id: Portal ID; if set to 'DPRC_GET_PORTAL_ID_FROM_POOL', a free
-- * portal ID is allocated by the DPRC
-- * @options: Combination of 'DPRC_CFG_OPT_<X>' options
-- * @label: Object's label
-- */
--struct dprc_cfg {
-- u16 icid;
-- int portal_id;
-- u64 options;
-- char label[16];
--};
--
--int dprc_create_container(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- struct dprc_cfg *cfg,
-- int *child_container_id,
-- u64 *child_portal_offset);
--
--int dprc_destroy_container(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- int child_container_id);
--
--int dprc_reset_container(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- int child_container_id);
-
- /* IRQ */
-
-@@ -139,7 +58,7 @@ int dprc_reset_container(struct fsl_mc_i
- #define DPRC_IRQ_INDEX 0
-
- /* Number of dprc's IRQs */
--#define DPRC_NUM_OF_IRQS 1
-+#define DPRC_NUM_OF_IRQS 1
-
- /* DPRC IRQ events */
-
-@@ -151,12 +70,14 @@ int dprc_reset_container(struct fsl_mc_i
- #define DPRC_IRQ_EVENT_RES_ADDED 0x00000004
- /* IRQ event - Indicates that resources removed from the container */
- #define DPRC_IRQ_EVENT_RES_REMOVED 0x00000008
--/* IRQ event - Indicates that one of the descendant containers that opened by
-+/*
-+ * IRQ event - Indicates that one of the descendant containers that opened by
- * this container is destroyed
- */
- #define DPRC_IRQ_EVENT_CONTAINER_DESTROYED 0x00000010
-
--/* IRQ event - Indicates that on one of the container's opened object is
-+/*
-+ * IRQ event - Indicates that on one of the container's opened object is
- * destroyed
- */
- #define DPRC_IRQ_EVENT_OBJ_DESTROYED 0x00000020
-@@ -171,59 +92,59 @@ int dprc_reset_container(struct fsl_mc_i
- * @irq_num: A user defined number associated with this IRQ
- */
- struct dprc_irq_cfg {
-- phys_addr_t paddr;
-- u32 val;
-- int irq_num;
--};
--
--int dprc_set_irq(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- struct dprc_irq_cfg *irq_cfg);
--
--int dprc_get_irq(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- int *type,
-- struct dprc_irq_cfg *irq_cfg);
--
--int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u8 en);
--
--int dprc_get_irq_enable(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u8 *en);
--
--int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u32 mask);
--
--int dprc_get_irq_mask(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u32 *mask);
--
--int dprc_get_irq_status(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u32 *status);
--
--int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- u8 irq_index,
-- u32 status);
++struct dprc_cmd_get_irq_status {
++ __le32 status;
++ u8 irq_index;
++};
++
++struct dprc_rsp_get_irq_status {
++ __le32 status;
++};
++
++struct dprc_cmd_clear_irq_status {
++ __le32 status;
++ u8 irq_index;
++};
++
++struct dprc_rsp_get_attributes {
++ /* response word 0 */
++ __le32 container_id;
++ __le32 icid;
++ /* response word 1 */
++ __le32 options;
++ __le32 portal_id;
++};
++
++struct dprc_rsp_get_obj_count {
++ __le32 pad;
++ __le32 obj_count;
++};
++
++struct dprc_cmd_get_obj {
++ __le32 obj_index;
++};
++
++struct dprc_rsp_get_obj {
++ /* response word 0 */
++ __le32 pad0;
++ __le32 id;
++ /* response word 1 */
++ __le16 vendor;
++ u8 irq_count;
++ u8 region_count;
++ __le32 state;
++ /* response word 2 */
++ __le16 version_major;
++ __le16 version_minor;
++ __le16 flags;
++ __le16 pad1;
++ /* response word 3-4 */
++ u8 type[16];
++ /* response word 5-6 */
++ u8 label[16];
++};
++
++struct dprc_cmd_get_obj_region {
++ /* cmd word 0 */
++ __le32 obj_id;
++ __le16 pad0;
++ u8 region_index;
++ u8 pad1;
++ /* cmd word 1-2 */
++ __le64 pad2[2];
++ /* cmd word 3-4 */
++ u8 obj_type[16];
++};
++
++struct dprc_rsp_get_obj_region {
++ /* response word 0 */
++ __le64 pad0;
++ /* response word 1 */
++ __le32 base_addr;
++ __le32 pad1;
++ /* response word 2 */
++ __le32 size;
++ u8 type;
++ u8 pad2[3];
++ /* response word 3 */
++ __le32 flags;
++};
++
++struct dprc_cmd_set_obj_irq {
++ /* cmd word 0 */
++ __le32 irq_val;
++ u8 irq_index;
++ u8 pad[3];
++ /* cmd word 1 */
++ __le64 irq_addr;
++ /* cmd word 2 */
++ __le32 irq_num;
++ __le32 obj_id;
++ /* cmd word 3-4 */
++ u8 obj_type[16];
++};
++
++/*
++ * DPRC API for managing and querying DPAA resources
++ */
++int dprc_open(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int container_id,
++ u16 *token);
++
++int dprc_close(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++/* DPRC IRQ events */
++
++/* IRQ event - Indicates that a new object added to the container */
++#define DPRC_IRQ_EVENT_OBJ_ADDED 0x00000001
++/* IRQ event - Indicates that an object was removed from the container */
++#define DPRC_IRQ_EVENT_OBJ_REMOVED 0x00000002
++/*
++ * IRQ event - Indicates that one of the descendant containers that opened by
++ * this container is destroyed
++ */
++#define DPRC_IRQ_EVENT_CONTAINER_DESTROYED 0x00000010
++
++/*
++ * IRQ event - Indicates that on one of the container's opened object is
++ * destroyed
++ */
++#define DPRC_IRQ_EVENT_OBJ_DESTROYED 0x00000020
++
++/* Irq event - Indicates that object is created at the container */
++#define DPRC_IRQ_EVENT_OBJ_CREATED 0x00000040
++
++/**
++ * struct dprc_irq_cfg - IRQ configuration
++ * @paddr: Address that must be written to signal a message-based interrupt
++ * @val: Value to write into irq_addr address
++ * @irq_num: A user defined number associated with this IRQ
++ */
++struct dprc_irq_cfg {
+ phys_addr_t paddr;
+ u32 val;
+ int irq_num;
+ u8 irq_index,
+ struct dprc_irq_cfg *irq_cfg);
+
-+int dprc_get_irq(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ int *type,
-+ struct dprc_irq_cfg *irq_cfg);
-+
+int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en);
+
-+int dprc_get_irq_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u8 *en);
-+
+int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask);
+
-+int dprc_get_irq_mask(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 *mask);
-+
+int dprc_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 token,
+ u8 irq_index,
+ u32 status);
-
- /**
- * struct dprc_attributes - Container attributes
-@@ -231,114 +152,23 @@ int dprc_clear_irq_status(struct fsl_mc_
- * @icid: Container's ICID
- * @portal_id: Container's portal ID
- * @options: Container's options as set at container's creation
-- * @version: DPRC version
- */
- struct dprc_attributes {
- int container_id;
- u16 icid;
- int portal_id;
- u64 options;
-- /**
-- * struct version - DPRC version
-- * @major: DPRC major version
-- * @minor: DPRC minor version
-- */
-- struct {
-- u16 major;
-- u16 minor;
-- } version;
- };
-
--int dprc_get_attributes(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- struct dprc_attributes *attributes);
--
--int dprc_set_res_quota(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- int child_container_id,
-- char *type,
-- u16 quota);
--
--int dprc_get_res_quota(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- int child_container_id,
-- char *type,
-- u16 *quota);
--
--/* Resource request options */
--
--/* Explicit resource ID request - The requested objects/resources
-- * are explicit and sequential (in case of resources).
-- * The base ID is given at res_req at base_align field
-- */
--#define DPRC_RES_REQ_OPT_EXPLICIT 0x00000001
--
--/* Aligned resources request - Relevant only for resources
-- * request (and not objects). Indicates that resources base ID should be
-- * sequential and aligned to the value given at dprc_res_req base_align field
-- */
--#define DPRC_RES_REQ_OPT_ALIGNED 0x00000002
--
--/* Plugged Flag - Relevant only for object assignment request.
-- * Indicates that after all objects assigned. An interrupt will be invoked at
-- * the relevant GPP. The assigned object will be marked as plugged.
-- * plugged objects can't be assigned from their container
-- */
--#define DPRC_RES_REQ_OPT_PLUGGED 0x00000004
--
--/**
-- * struct dprc_res_req - Resource request descriptor, to be used in assignment
-- * or un-assignment of resources and objects.
-- * @type: Resource/object type: Represent as a NULL terminated string.
-- * This string may received by using dprc_get_pool() to get resource
-- * type and dprc_get_obj() to get object type;
-- * Note: it is not possible to assign/un-assign DPRC objects
-- * @num: Number of resources
-- * @options: Request options: combination of DPRC_RES_REQ_OPT_ options
-- * @id_base_align: In case of explicit assignment (DPRC_RES_REQ_OPT_EXPLICIT
-- * is set at option), this field represents the required base ID
-- * for resource allocation; In case of aligned assignment
-- * (DPRC_RES_REQ_OPT_ALIGNED is set at option), this field
-- * indicates the required alignment for the resource ID(s) -
-- * use 0 if there is no alignment or explicit ID requirements
-- */
--struct dprc_res_req {
-- char type[16];
-- u32 num;
-- u32 options;
-- int id_base_align;
--};
--
--int dprc_assign(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- int container_id,
-- struct dprc_res_req *res_req);
--
--int dprc_unassign(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- int child_container_id,
-- struct dprc_res_req *res_req);
--
--int dprc_get_pool_count(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- int *pool_count);
--
--int dprc_get_pool(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- int pool_index,
-- char *type);
++
++/**
++ * struct dprc_attributes - Container attributes
++ * @container_id: Container's ID
++ * @icid: Container's ICID
++ * @portal_id: Container's portal ID
++ * @options: Container's options as set at container's creation
++ */
++struct dprc_attributes {
++ int container_id;
++ u32 icid;
++ int portal_id;
++ u64 options;
++};
++
+int dprc_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dprc_attributes *attributes);
-
- int dprc_get_obj_count(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- int *obj_count);
++
++int dprc_get_obj_count(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *obj_count);
-
- /* Objects Attributes Flags */
-
-@@ -353,7 +183,7 @@ int dprc_get_obj_count(struct fsl_mc_io
- * masters;
- * user is responsible for proper memory handling through IOMMU configuration.
- */
--#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001
-+#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001
-
- /**
- * struct dprc_obj_desc - Object descriptor, returned from dprc_get_obj()
-@@ -381,41 +211,41 @@ struct dprc_obj_desc {
- u16 flags;
- };
-
--int dprc_get_obj(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- int obj_index,
-- struct dprc_obj_desc *obj_desc);
--
--int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- char *obj_type,
-- int obj_id,
-- struct dprc_obj_desc *obj_desc);
--
--int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- char *obj_type,
-- int obj_id,
-- u8 irq_index,
-- struct dprc_irq_cfg *irq_cfg);
--
--int dprc_get_obj_irq(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- char *obj_type,
-- int obj_id,
-- u8 irq_index,
-- int *type,
-- struct dprc_irq_cfg *irq_cfg);
--
--int dprc_get_res_count(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- char *type,
-- int *res_count);
++
+int dprc_get_obj(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int obj_index,
-+ struct dprc_obj_desc *obj_desc);
-+
-+int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ char *obj_type,
-+ int obj_id,
-+ struct dprc_obj_desc *obj_desc);
++ struct fsl_mc_obj_desc *obj_desc);
+
+int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
-+ char *obj_type,
-+ int obj_id,
-+ u8 irq_index,
-+ struct dprc_irq_cfg *irq_cfg);
-+
-+int dprc_get_obj_irq(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ char *obj_type,
-+ int obj_id,
-+ u8 irq_index,
-+ int *type,
-+ struct dprc_irq_cfg *irq_cfg);
-+
-+int dprc_get_res_count(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ char *type,
-+ int *res_count);
-
- /**
- * enum dprc_iter_status - Iteration status
-@@ -429,27 +259,6 @@ enum dprc_iter_status {
- DPRC_ITER_STATUS_LAST = 2
- };
-
--/**
-- * struct dprc_res_ids_range_desc - Resource ID range descriptor
-- * @base_id: Base resource ID of this range
-- * @last_id: Last resource ID of this range
-- * @iter_status: Iteration status - should be set to DPRC_ITER_STATUS_FIRST at
-- * first iteration; while the returned marker is DPRC_ITER_STATUS_MORE,
-- * additional iterations are needed, until the returned marker is
-- * DPRC_ITER_STATUS_LAST
-- */
--struct dprc_res_ids_range_desc {
-- int base_id;
-- int last_id;
-- enum dprc_iter_status iter_status;
--};
--
--int dprc_get_res_ids(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- char *type,
-- struct dprc_res_ids_range_desc *range_desc);
--
- /* Region flags */
- /* Cacheable - Indicates that region should be mapped as cacheable */
- #define DPRC_REGION_CACHEABLE 0x00000001
-@@ -481,64 +290,27 @@ struct dprc_region_desc {
- enum dprc_region_type type;
- };
-
--int dprc_get_obj_region(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- char *obj_type,
-- int obj_id,
-- u8 region_index,
-- struct dprc_region_desc *region_desc);
--
--int dprc_set_obj_label(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- char *obj_type,
-- int obj_id,
-- char *label);
++ char *obj_type,
++ int obj_id,
++ u8 irq_index,
++ struct dprc_irq_cfg *irq_cfg);
++
++/* Region flags */
++/* Cacheable - Indicates that region should be mapped as cacheable */
++#define DPRC_REGION_CACHEABLE 0x00000001
++
++/**
++ * enum dprc_region_type - Region type
++ * @DPRC_REGION_TYPE_MC_PORTAL: MC portal region
++ * @DPRC_REGION_TYPE_QBMAN_PORTAL: Qbman portal region
++ */
++enum dprc_region_type {
++ DPRC_REGION_TYPE_MC_PORTAL,
++ DPRC_REGION_TYPE_QBMAN_PORTAL
++};
++
++#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001
++
++/**
++ * struct dprc_region_desc - Mappable region descriptor
++ * @base_offset: Region offset from region's base address.
++ * For DPMCP and DPRC objects, region base is offset from SoC MC portals
++ * base address; For DPIO, region base is offset from SoC QMan portals
++ * base address
++ * @size: Region size (in bytes)
++ * @flags: Region attributes
++ * @type: Portal region type
++ */
++struct dprc_region_desc {
++ u32 base_offset;
++ u32 size;
++ u32 flags;
++ enum dprc_region_type type;
++};
++
+int dprc_get_obj_region(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int obj_id,
+ u8 region_index,
+ struct dprc_region_desc *region_desc);
-
--/**
-- * struct dprc_endpoint - Endpoint description for link connect/disconnect
-- * operations
-- * @type: Endpoint object type: NULL terminated string
-- * @id: Endpoint object ID
-- * @if_id: Interface ID; should be set for endpoints with multiple
-- * interfaces ("dpsw", "dpdmux"); for others, always set to 0
-- */
--struct dprc_endpoint {
-- char type[16];
-- int id;
-- int if_id;
--};
++
+int dprc_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver);
-
--/**
-- * struct dprc_connection_cfg - Connection configuration.
-- * Used for virtual connections only
-- * @committed_rate: Committed rate (Mbits/s)
-- * @max_rate: Maximum rate (Mbits/s)
-- */
--struct dprc_connection_cfg {
-- u32 committed_rate;
-- u32 max_rate;
--};
++
+int dprc_get_container_id(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int *container_id);
-
--int dprc_connect(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- const struct dprc_endpoint *endpoint1,
-- const struct dprc_endpoint *endpoint2,
-- const struct dprc_connection_cfg *cfg);
--
--int dprc_disconnect(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- const struct dprc_endpoint *endpoint);
--
--int dprc_get_connection(struct fsl_mc_io *mc_io,
-- u32 cmd_flags,
-- u16 token,
-- const struct dprc_endpoint *endpoint1,
-- struct dprc_endpoint *endpoint2,
-- int *state);
++
+int dprc_reset_container(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int child_container_id);
-
- #endif /* _FSL_DPRC_H */
-
---- a/drivers/staging/fsl-mc/include/mc-bus.h
-+++ b/drivers/staging/fsl-mc/include/mc-bus.h
-@@ -1,7 +1,7 @@
- /*
- * Freescale Management Complex (MC) bus declarations
- *
-- * Copyright (C) 2014 Freescale Semiconductor, Inc.
-+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
- * Author: German Rivera <German.Rivera@freescale.com>
- *
- * This file is licensed under the terms of the GNU General Public
-@@ -42,8 +42,8 @@ struct msi_domain_info;
- */
- struct fsl_mc_resource_pool {
- enum fsl_mc_pool_type type;
-- int16_t max_count;
-- int16_t free_count;
++
++/*
++ * Data Path Buffer Pool (DPBP) API
++ * Contains initialization APIs and runtime control APIs for DPBP
++ */
++
++int dpbp_open(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int dpbp_id,
++ u16 *token);
++
++int dpbp_close(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++int dpbp_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++int dpbp_disable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++int dpbp_reset(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++/**
++ * struct dpbp_attr - Structure representing DPBP attributes
++ * @id: DPBP object ID
++ * @bpid: Hardware buffer pool ID; should be used as an argument in
++ * acquire/release operations on buffers
++ */
++struct dpbp_attr {
++ int id;
++ u16 bpid;
++};
++
++int dpbp_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpbp_attr *attr);
++
++/* Data Path Concentrator (DPCON) API
++ * Contains initialization APIs and runtime control APIs for DPCON
++ */
++
++/**
++ * Use it to disable notifications; see dpcon_set_notification()
++ */
++#define DPCON_INVALID_DPIO_ID (int)(-1)
++
++int dpcon_open(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int dpcon_id,
++ u16 *token);
++
++int dpcon_close(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++int dpcon_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++int dpcon_disable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++int dpcon_reset(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
++
++/**
++ * struct dpcon_attr - Structure representing DPCON attributes
++ * @id: DPCON object ID
++ * @qbman_ch_id: Channel ID to be used by dequeue operation
++ * @num_priorities: Number of priorities for the DPCON channel (1-8)
++ */
++struct dpcon_attr {
++ int id;
++ u16 qbman_ch_id;
++ u8 num_priorities;
++};
++
++int dpcon_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpcon_attr *attr);
++
++/**
++ * struct dpcon_notification_cfg - Structure representing notification params
++ * @dpio_id: DPIO object ID; must be configured with a notification channel;
++ * to disable notifications set it to 'DPCON_INVALID_DPIO_ID';
++ * @priority: Priority selection within the DPIO channel; valid values
++ * are 0-7, depending on the number of priorities in that channel
++ * @user_ctx: User context value provided with each CDAN message
++ */
++struct dpcon_notification_cfg {
++ int dpio_id;
++ u8 priority;
++ u64 user_ctx;
++};
++
++int dpcon_set_notification(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpcon_notification_cfg *cfg);
++
++struct irq_domain;
++struct msi_domain_info;
++
++/**
++ * Maximum number of total IRQs that can be pre-allocated for an MC bus'
++ * IRQ pool
++ */
++#define FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS 256
++
++/**
++ * struct fsl_mc_resource_pool - Pool of MC resources of a given
++ * type
++ * @type: type of resources in the pool
++ * @max_count: maximum number of resources in the pool
++ * @free_count: number of free resources in the pool
++ * @mutex: mutex to serialize access to the pool's free list
++ * @free_list: anchor node of list of free resources in the pool
++ * @mc_bus: pointer to the MC bus that owns this resource pool
++ */
++struct fsl_mc_resource_pool {
++ enum fsl_mc_pool_type type;
+ int max_count;
+ int free_count;
- struct mutex mutex; /* serializes access to free_list */
- struct list_head free_list;
- struct fsl_mc_bus *mc_bus;
-@@ -73,6 +73,7 @@ struct fsl_mc_bus {
- int dprc_scan_container(struct fsl_mc_device *mc_bus_dev);
-
- int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
-+ const char *driver_override,
- unsigned int *total_irq_count);
-
- int __init dprc_driver_init(void);
---- a/drivers/staging/fsl-mc/include/mc-cmd.h
-+++ b/drivers/staging/fsl-mc/include/mc-cmd.h
-@@ -1,4 +1,5 @@
--/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+/*
-+ * Copyright 2013-2016 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
-@@ -48,6 +49,15 @@ struct mc_command {
- u64 params[MC_CMD_NUM_OF_PARAMS];
- };
-
-+struct mc_rsp_create {
-+ __le32 object_id;
++ struct mutex mutex; /* serializes access to free_list */
++ struct list_head free_list;
++ struct fsl_mc_bus *mc_bus;
+};
+
-+struct mc_rsp_api_ver {
-+ __le16 major_ver;
-+ __le16 minor_ver;
++/**
++ * struct fsl_mc_restool - information associated with a restool device file
++ * @cdev: struct char device linked to the root dprc
++ * @dev: dev_t for the char device to be added
++ * @device: newly created device in /dev
++ * @mutex: mutex lock to serialize the open/release operations
++ * @local_instance_in_use: local MC I/O instance in use or not
++ * @dynamic_instance_count: number of dynamically created MC I/O instances
++ */
++struct fsl_mc_restool {
++ struct cdev cdev;
++ dev_t dev;
++ struct device *device;
++ struct mutex mutex; /* serialize open/release operations */
++ bool local_instance_in_use;
++ u32 dynamic_instance_count;
+};
+
- enum mc_cmd_status {
- MC_CMD_STATUS_OK = 0x0, /* Completed successfully */
- MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */
-@@ -72,11 +82,6 @@ enum mc_cmd_status {
- /* Command completion flag */
- #define MC_CMD_FLAG_INTR_DIS 0x01
-
--#define MC_CMD_HDR_CMDID_MASK 0xFFF0
--#define MC_CMD_HDR_CMDID_SHIFT 4
--#define MC_CMD_HDR_TOKEN_MASK 0xFFC0
--#define MC_CMD_HDR_TOKEN_SHIFT 6
--
- static inline u64 mc_encode_cmd_header(u16 cmd_id,
- u32 cmd_flags,
- u16 token)
-@@ -84,10 +89,8 @@ static inline u64 mc_encode_cmd_header(u
- u64 header = 0;
- struct mc_cmd_header *hdr = (struct mc_cmd_header *)&header;
-
-- hdr->cmd_id = cpu_to_le16((cmd_id << MC_CMD_HDR_CMDID_SHIFT) &
-- MC_CMD_HDR_CMDID_MASK);
-- hdr->token = cpu_to_le16((token << MC_CMD_HDR_TOKEN_SHIFT) &
-- MC_CMD_HDR_TOKEN_MASK);
-+ hdr->cmd_id = cpu_to_le16(cmd_id);
-+ hdr->token = cpu_to_le16(token);
- hdr->status = MC_CMD_STATUS_READY;
- if (cmd_flags & MC_CMD_FLAG_PRI)
- hdr->flags_hw = MC_CMD_FLAG_PRI;
-@@ -102,7 +105,26 @@ static inline u16 mc_cmd_hdr_read_token(
- struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
- u16 token = le16_to_cpu(hdr->token);
-
-- return (token & MC_CMD_HDR_TOKEN_MASK) >> MC_CMD_HDR_TOKEN_SHIFT;
-+ return token;
-+}
++/**
++ * struct fsl_mc_bus - logical bus that corresponds to a physical DPRC
++ * @mc_dev: fsl-mc device for the bus device itself.
++ * @resource_pools: array of resource pools (one pool per resource type)
++ * for this MC bus. These resources represent allocatable entities
++ * from the physical DPRC.
++ * @irq_resources: Pointer to array of IRQ objects for the IRQ pool
++ * @scan_mutex: Serializes bus scanning
++ * @dprc_attr: DPRC attributes
++ * @restool_misc: struct that abstracts the interaction with userspace restool
++ */
++struct fsl_mc_bus {
++ struct fsl_mc_device mc_dev;
++ struct fsl_mc_resource_pool resource_pools[FSL_MC_NUM_POOL_TYPES];
++ struct fsl_mc_device_irq *irq_resources;
++ struct mutex scan_mutex; /* serializes bus scanning */
++ struct dprc_attributes dprc_attr;
++ struct fsl_mc_restool restool_misc;
++};
+
-+static inline u32 mc_cmd_read_object_id(struct mc_command *cmd)
-+{
-+ struct mc_rsp_create *rsp_params;
++int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
++ const char *driver_override,
++ unsigned int *total_irq_count);
+
-+ rsp_params = (struct mc_rsp_create *)cmd->params;
-+ return le32_to_cpu(rsp_params->object_id);
-+}
++int fsl_mc_find_msi_domain(struct device *mc_platform_dev,
++ struct irq_domain **mc_msi_domain);
+
-+static inline void mc_cmd_read_api_version(struct mc_command *cmd,
-+ u16 *major_ver,
-+ u16 *minor_ver)
-+{
-+ struct mc_rsp_api_ver *rsp_params;
++int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
++ unsigned int irq_count);
+
-+ rsp_params = (struct mc_rsp_api_ver *)cmd->params;
-+ *major_ver = le16_to_cpu(rsp_params->major_ver);
-+ *minor_ver = le16_to_cpu(rsp_params->minor_ver);
- }
-
- #endif /* __FSL_MC_CMD_H */
---- a/drivers/staging/fsl-mc/include/mc-sys.h
-+++ b/drivers/staging/fsl-mc/include/mc-sys.h
-@@ -1,4 +1,5 @@
--/* Copyright 2013-2014 Freescale Semiconductor Inc.
++void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus);
++
++void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
++
++void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
++
++void fsl_mc_get_root_dprc(struct device *dev, struct device **root_dprc_dev);
++
++#endif /* _FSL_MC_H_ */
+--- /dev/null
++++ b/include/uapi/linux/fsl_mc.h
+@@ -0,0 +1,31 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
-+ * Copyright 2013-2016 Freescale Semiconductor Inc.
- *
- * Interface of the I/O services to send MC commands to the MC hardware
- *
---- a/drivers/staging/fsl-mc/include/mc.h
-+++ b/drivers/staging/fsl-mc/include/mc.h
-@@ -1,7 +1,7 @@
- /*
- * Freescale Management Complex (MC) bus public interface
- *
-- * Copyright (C) 2014 Freescale Semiconductor, Inc.
-+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
- * Author: German Rivera <German.Rivera@freescale.com>
- *
- * This file is licensed under the terms of the GNU General Public
-@@ -81,7 +81,7 @@ enum fsl_mc_pool_type {
- */
- struct fsl_mc_resource {
- enum fsl_mc_pool_type type;
-- int32_t id;
-+ s32 id;
- void *data;
- struct fsl_mc_resource_pool *parent_pool;
- struct list_head node;
-@@ -122,6 +122,7 @@ struct fsl_mc_device_irq {
- * @regions: pointer to array of MMIO region entries
- * @irqs: pointer to array of pointers to interrupts allocated to this device
- * @resource: generic resource associated with this MC object device, if any.
-+ * @driver_override: Driver name to force a match
- *
- * Generic device object for MC object devices that are "attached" to a
- * MC bus.
-@@ -154,6 +155,7 @@ struct fsl_mc_device {
- struct resource *regions;
- struct fsl_mc_device_irq **irqs;
- struct fsl_mc_resource *resource;
-+ const char *driver_override;
- };
-
- #define to_fsl_mc_device(_dev) \
-@@ -175,6 +177,8 @@ struct fsl_mc_device {
- #define fsl_mc_driver_register(drv) \
- __fsl_mc_driver_register(drv, THIS_MODULE)
-
-+void fsl_mc_device_remove(struct fsl_mc_device *mc_dev);
++ * Management Complex (MC) userspace public interface
++ *
++ * Copyright 2018 NXP
++ *
++ */
++#ifndef _UAPI_FSL_MC_H_
++#define _UAPI_FSL_MC_H_
+
- int __must_check __fsl_mc_driver_register(struct fsl_mc_driver *fsl_mc_driver,
- struct module *owner);
-
-@@ -198,4 +202,13 @@ int __must_check fsl_mc_allocate_irqs(st
-
- void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev);
-
-+void fsl_mc_dma_configure(struct fsl_mc_device *mc_dev,
-+ struct device_node *fsl_mc_platform_node, int coherent);
++#define MC_CMD_NUM_OF_PARAMS 7
+
-+#ifdef CONFIG_FSL_MC_BUS
-+struct iommu_group *fsl_mc_device_group(struct device *dev);
-+#else
-+#define fsl_mc_device_group(__dev) NULL
-+#endif
++/**
++ * struct fsl_mc_command - Management Complex (MC) command structure
++ * @header: MC command header
++ * @params: MC command parameters
++ *
++ * Used by RESTOOL_SEND_MC_COMMAND
++ */
++struct fsl_mc_command {
++ __u64 header;
++ __u64 params[MC_CMD_NUM_OF_PARAMS];
++};
++
++#define RESTOOL_IOCTL_TYPE 'R'
++#define RESTOOL_IOCTL_SEQ 0xE0
++
++#define RESTOOL_SEND_MC_COMMAND \
++ _IOWR(RESTOOL_IOCTL_TYPE, RESTOOL_IOCTL_SEQ, struct fsl_mc_command)
+
- #endif /* _FSL_MC_H_ */
++#endif /* _UAPI_FSL_MC_H_ */
-From e729e648e4259940473e256dd4f9c8df99e774b0 Mon Sep 17 00:00:00 2001
+From 77cc39e936f87463f92f7fddaaf0de51eec3972f Mon Sep 17 00:00:00 2001
From: Yangbo Lu <yangbo.lu@nxp.com>
-Date: Wed, 17 Jan 2018 15:12:58 +0800
+Date: Fri, 6 Jul 2018 15:30:21 +0800
Subject: [PATCH] dpaa2: support layerscape
This is an integrated patch for layerscape dpaa2 support.
Signed-off-by: Mathew McBride <matt@traverse.com.au>
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
---
- drivers/soc/fsl/ls2-console/Kconfig | 4 +
- drivers/soc/fsl/ls2-console/Makefile | 1 +
- drivers/soc/fsl/ls2-console/ls2-console.c | 284 ++
- drivers/staging/fsl-dpaa2/ethernet/Makefile | 11 +
- drivers/staging/fsl-dpaa2/ethernet/README | 186 ++
- .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c | 352 ++
- .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h | 60 +
- .../staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h | 184 +
- drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 3516 ++++++++++++++++++++
- drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 499 +++
- drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c | 864 +++++
- drivers/staging/fsl-dpaa2/ethernet/dpkg.h | 176 +
- drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h | 658 ++++
- drivers/staging/fsl-dpaa2/ethernet/dpni.c | 1903 +++++++++++
- drivers/staging/fsl-dpaa2/ethernet/dpni.h | 1053 ++++++
- drivers/staging/fsl-dpaa2/ethernet/net.h | 480 +++
- drivers/staging/fsl-dpaa2/ethsw/Kconfig | 6 +
- drivers/staging/fsl-dpaa2/ethsw/Makefile | 10 +
- drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h | 851 +++++
- drivers/staging/fsl-dpaa2/ethsw/dpsw.c | 2762 +++++++++++++++
- drivers/staging/fsl-dpaa2/ethsw/dpsw.h | 1269 +++++++
- drivers/staging/fsl-dpaa2/ethsw/switch.c | 1857 +++++++++++
- drivers/staging/fsl-dpaa2/evb/Kconfig | 7 +
- drivers/staging/fsl-dpaa2/evb/Makefile | 10 +
- drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h | 279 ++
- drivers/staging/fsl-dpaa2/evb/dpdmux.c | 1112 +++++++
- drivers/staging/fsl-dpaa2/evb/dpdmux.h | 453 +++
- drivers/staging/fsl-dpaa2/evb/evb.c | 1350 ++++++++
- drivers/staging/fsl-dpaa2/mac/Kconfig | 23 +
- drivers/staging/fsl-dpaa2/mac/Makefile | 10 +
- drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h | 172 +
- drivers/staging/fsl-dpaa2/mac/dpmac.c | 620 ++++
- drivers/staging/fsl-dpaa2/mac/dpmac.h | 342 ++
- drivers/staging/fsl-dpaa2/mac/mac.c | 670 ++++
- drivers/staging/fsl-dpaa2/rtc/Makefile | 10 +
- drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h | 160 +
- drivers/staging/fsl-dpaa2/rtc/dprtc.c | 746 +++++
- drivers/staging/fsl-dpaa2/rtc/dprtc.h | 172 +
- drivers/staging/fsl-dpaa2/rtc/rtc.c | 243 ++
- 39 files changed, 23365 insertions(+)
+ drivers/soc/fsl/ls2-console/Kconfig | 4 +
+ drivers/soc/fsl/ls2-console/Makefile | 1 +
+ drivers/soc/fsl/ls2-console/ls2-console.c | 284 ++
+ drivers/staging/fsl-dpaa2/ethernet/Makefile | 12 +
+ drivers/staging/fsl-dpaa2/ethernet/README | 186 +
+ drivers/staging/fsl-dpaa2/ethernet/TODO | 18 +
+ .../fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c | 1253 ++++++
+ .../fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h | 182 +
+ .../fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c | 357 ++
+ .../fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h | 60 +
+ .../fsl-dpaa2/ethernet/dpaa2-eth-trace.h | 185 +
+ .../staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 3734 +++++++++++++++++
+ .../staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 601 +++
+ .../fsl-dpaa2/ethernet/dpaa2-ethtool.c | 878 ++++
+ drivers/staging/fsl-dpaa2/ethernet/dpkg.h | 176 +
+ drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h | 719 ++++
+ drivers/staging/fsl-dpaa2/ethernet/dpni.c | 2112 ++++++++++
+ drivers/staging/fsl-dpaa2/ethernet/dpni.h | 1172 ++++++
+ drivers/staging/fsl-dpaa2/ethernet/net.h | 480 +++
+ drivers/staging/fsl-dpaa2/ethsw/Makefile | 10 +
+ drivers/staging/fsl-dpaa2/ethsw/README | 106 +
+ drivers/staging/fsl-dpaa2/ethsw/TODO | 14 +
+ drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h | 359 ++
+ drivers/staging/fsl-dpaa2/ethsw/dpsw.c | 1165 +++++
+ drivers/staging/fsl-dpaa2/ethsw/dpsw.h | 592 +++
+ .../staging/fsl-dpaa2/ethsw/ethsw-ethtool.c | 206 +
+ drivers/staging/fsl-dpaa2/ethsw/ethsw.c | 1438 +++++++
+ drivers/staging/fsl-dpaa2/ethsw/ethsw.h | 90 +
+ drivers/staging/fsl-dpaa2/evb/Kconfig | 7 +
+ drivers/staging/fsl-dpaa2/evb/Makefile | 10 +
+ drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h | 279 ++
+ drivers/staging/fsl-dpaa2/evb/dpdmux.c | 1111 +++++
+ drivers/staging/fsl-dpaa2/evb/dpdmux.h | 453 ++
+ drivers/staging/fsl-dpaa2/evb/evb.c | 1354 ++++++
+ drivers/staging/fsl-dpaa2/mac/Kconfig | 23 +
+ drivers/staging/fsl-dpaa2/mac/Makefile | 10 +
+ drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h | 172 +
+ drivers/staging/fsl-dpaa2/mac/dpmac.c | 619 +++
+ drivers/staging/fsl-dpaa2/mac/dpmac.h | 342 ++
+ drivers/staging/fsl-dpaa2/mac/mac.c | 673 +++
+ drivers/staging/fsl-dpaa2/rtc/Makefile | 10 +
+ drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h | 160 +
+ drivers/staging/fsl-dpaa2/rtc/dprtc.c | 746 ++++
+ drivers/staging/fsl-dpaa2/rtc/dprtc.h | 172 +
+ drivers/staging/fsl-dpaa2/rtc/rtc.c | 242 ++
+ include/linux/filter.h | 3 +
+ 46 files changed, 22780 insertions(+)
create mode 100644 drivers/soc/fsl/ls2-console/Kconfig
create mode 100644 drivers/soc/fsl/ls2-console/Makefile
create mode 100644 drivers/soc/fsl/ls2-console/ls2-console.c
create mode 100644 drivers/staging/fsl-dpaa2/ethernet/Makefile
create mode 100644 drivers/staging/fsl-dpaa2/ethernet/README
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/TODO
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c
+ create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h
create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.c
create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.h
create mode 100644 drivers/staging/fsl-dpaa2/ethernet/net.h
- create mode 100644 drivers/staging/fsl-dpaa2/ethsw/Kconfig
create mode 100644 drivers/staging/fsl-dpaa2/ethsw/Makefile
+ create mode 100644 drivers/staging/fsl-dpaa2/ethsw/README
+ create mode 100644 drivers/staging/fsl-dpaa2/ethsw/TODO
create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw.c
create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw.h
- create mode 100644 drivers/staging/fsl-dpaa2/ethsw/switch.c
+ create mode 100644 drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c
+ create mode 100644 drivers/staging/fsl-dpaa2/ethsw/ethsw.c
+ create mode 100644 drivers/staging/fsl-dpaa2/ethsw/ethsw.h
create mode 100644 drivers/staging/fsl-dpaa2/evb/Kconfig
create mode 100644 drivers/staging/fsl-dpaa2/evb/Makefile
create mode 100644 drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h
+MODULE_DESCRIPTION("Freescale LS2 console driver");
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethernet/Makefile
-@@ -0,0 +1,11 @@
+@@ -0,0 +1,12 @@
+#
+# Makefile for the Freescale DPAA2 Ethernet controller
+#
+
+fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o
+fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DEBUGFS} += dpaa2-eth-debugfs.o
++fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_CEETM} += dpaa2-eth-ceetm.o
+
+# Needed by the tracing framework
+CFLAGS_dpaa2-eth.o := -I$(src)
+Hardware specific statistics for the network interface as well as some
+non-standard driver stats can be consulted through ethtool -S option.
--- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
-@@ -0,0 +1,352 @@
-+
-+/* Copyright 2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
++++ b/drivers/staging/fsl-dpaa2/ethernet/TODO
+@@ -0,0 +1,18 @@
++* Add a DPAA2 MAC kernel driver in order to allow PHY management; currently
++ the DPMAC objects and their link to DPNIs are handled by MC internally
++ and all PHYs are seen as fixed-link
++* add more debug support: decide how to expose detailed debug statistics,
++ add ingress error queue support
++* MC firmware uprev; the DPAA2 objects used by the Ethernet driver need to
++ be kept in sync with binary interface changes in MC
++* refine README file
++* cleanup
++
++NOTE: None of the above is must-have before getting the DPAA2 Ethernet driver
++out of staging. The main requirement for that is to have the drivers it
++depends on, fsl-mc bus and DPIO driver, moved to drivers/bus and drivers/soc
++respectively.
++
++ Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
++ ruxandra.radulescu@nxp.com, devel@driverdev.osuosl.org,
++ linux-kernel@vger.kernel.org
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c
+@@ -0,0 +1,1253 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++/*
++ * Copyright 2017 NXP
+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
++#include <linux/init.h>
+#include <linux/module.h>
-+#include <linux/debugfs.h>
++
++#include "dpaa2-eth-ceetm.h"
+#include "dpaa2-eth.h"
-+#include "dpaa2-eth-debugfs.h"
+
-+#define DPAA2_ETH_DBG_ROOT "dpaa2-eth"
++#define DPAA2_CEETM_DESCRIPTION "FSL DPAA2 CEETM qdisc"
++/* Conversion formula from userspace passed Bps to expected Mbit */
++#define dpaa2_eth_bps_to_mbit(rate) (rate >> 17)
+
-+static struct dentry *dpaa2_dbg_root;
++static const struct nla_policy dpaa2_ceetm_policy[DPAA2_CEETM_TCA_MAX] = {
++ [DPAA2_CEETM_TCA_COPT] = { .len = sizeof(struct dpaa2_ceetm_tc_copt) },
++ [DPAA2_CEETM_TCA_QOPS] = { .len = sizeof(struct dpaa2_ceetm_tc_qopt) },
++};
+
-+static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset)
-+{
-+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
-+ struct rtnl_link_stats64 *stats;
-+ struct dpaa2_eth_drv_stats *extras;
-+ int i;
++struct Qdisc_ops dpaa2_ceetm_qdisc_ops;
+
-+ seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name);
-+ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s\n",
-+ "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf",
-+ "Tx SG", "Enq busy");
++static inline int dpaa2_eth_set_ch_shaping(struct dpaa2_eth_priv *priv,
++ struct dpni_tx_shaping_cfg *scfg,
++ struct dpni_tx_shaping_cfg *ecfg,
++ int coupled, int ch_id)
++{
++ int err = 0;
+
-+ for_each_online_cpu(i) {
-+ stats = per_cpu_ptr(priv->percpu_stats, i);
-+ extras = per_cpu_ptr(priv->percpu_extras, i);
-+ seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n",
-+ i,
-+ stats->rx_packets,
-+ stats->rx_errors,
-+ extras->rx_sg_frames,
-+ stats->tx_packets,
-+ stats->tx_errors,
-+ extras->tx_conf_frames,
-+ extras->tx_sg_frames,
-+ extras->tx_portal_busy);
-+ }
++ netdev_dbg(priv->net_dev, "%s: ch_id %d rate %d mbps\n", __func__,
++ ch_id, scfg->rate_limit);
++ err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, scfg,
++ ecfg, coupled);
++ if (err)
++ netdev_err(priv->net_dev, "dpni_set_tx_shaping err\n");
+
-+ return 0;
++ return err;
+}
+
-+static int dpaa2_dbg_cpu_open(struct inode *inode, struct file *file)
++static inline int dpaa2_eth_reset_ch_shaping(struct dpaa2_eth_priv *priv,
++ int ch_id)
+{
-+ int err;
-+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
-+
-+ err = single_open(file, dpaa2_dbg_cpu_show, priv);
-+ if (err < 0)
-+ netdev_err(priv->net_dev, "single_open() failed\n");
++ struct dpni_tx_shaping_cfg cfg = { 0 };
+
-+ return err;
++ return dpaa2_eth_set_ch_shaping(priv, &cfg, &cfg, 0, ch_id);
+}
+
-+static const struct file_operations dpaa2_dbg_cpu_ops = {
-+ .open = dpaa2_dbg_cpu_open,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = single_release,
-+};
-+
-+static char *fq_type_to_str(struct dpaa2_eth_fq *fq)
++static inline int
++dpaa2_eth_update_shaping_cfg(struct net_device *dev,
++ struct dpaa2_ceetm_shaping_cfg cfg,
++ struct dpni_tx_shaping_cfg *scfg,
++ struct dpni_tx_shaping_cfg *ecfg)
+{
-+ switch (fq->type) {
-+ case DPAA2_RX_FQ:
-+ return "Rx";
-+ case DPAA2_TX_CONF_FQ:
-+ return "Tx conf";
-+ case DPAA2_RX_ERR_FQ:
-+ return "Rx err";
-+ default:
-+ return "N/A";
++ scfg->rate_limit = dpaa2_eth_bps_to_mbit(cfg.cir);
++ ecfg->rate_limit = dpaa2_eth_bps_to_mbit(cfg.eir);
++
++ if (cfg.cbs > DPAA2_ETH_MAX_BURST_SIZE) {
++ netdev_err(dev, "Committed burst size must be under %d\n",
++ DPAA2_ETH_MAX_BURST_SIZE);
++ return -EINVAL;
+ }
-+}
+
-+static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
-+{
-+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
-+ struct dpaa2_eth_fq *fq;
-+ u32 fcnt, bcnt;
-+ int i, err;
++ scfg->max_burst_size = cfg.cbs;
+
-+ seq_printf(file, "FQ stats for %s:\n", priv->net_dev->name);
-+ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n",
-+ "VFQID", "CPU", "Traffic Class", "Type", "Frames",
-+ "Pending frames", "Congestion");
++ if (cfg.ebs > DPAA2_ETH_MAX_BURST_SIZE) {
++ netdev_err(dev, "Excess burst size must be under %d\n",
++ DPAA2_ETH_MAX_BURST_SIZE);
++ return -EINVAL;
++ }
+
-+ for (i = 0; i < priv->num_fqs; i++) {
-+ fq = &priv->fq[i];
-+ err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
-+ if (err)
-+ fcnt = 0;
++ ecfg->max_burst_size = cfg.ebs;
+
-+ seq_printf(file, "%5d%16d%16d%16s%16llu%16u%16llu\n",
-+ fq->fqid,
-+ fq->target_cpu,
-+ fq->tc,
-+ fq_type_to_str(fq),
-+ fq->stats.frames,
-+ fcnt,
-+ fq->stats.congestion_entry);
++ if ((!cfg.cir || !cfg.eir) && cfg.coupled) {
++ netdev_err(dev, "Coupling can be set when both CIR and EIR are finite\n");
++ return -EINVAL;
+ }
+
+ return 0;
+}
+
-+static int dpaa2_dbg_fqs_open(struct inode *inode, struct file *file)
++enum update_tx_prio {
++ DPAA2_ETH_ADD_CQ,
++ DPAA2_ETH_DEL_CQ,
++};
++
++/* Normalize weights based on max passed value */
++static inline int dpaa2_eth_normalize_tx_prio(struct dpaa2_ceetm_qdisc *priv)
+{
-+ int err;
-+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
++ struct dpni_tx_schedule_cfg *sched_cfg;
++ struct dpaa2_ceetm_class *cl;
++ u32 qpri;
++ u16 weight_max = 0, increment;
++ int i;
+
-+ err = single_open(file, dpaa2_dbg_fqs_show, priv);
-+ if (err < 0)
-+ netdev_err(priv->net_dev, "single_open() failed\n");
++ /* Check the boundaries of the provided values */
++ for (i = 0; i < priv->clhash.hashsize; i++)
++ hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode)
++ weight_max = (weight_max == 0 ? cl->prio.weight :
++ (weight_max < cl->prio.weight ?
++ cl->prio.weight : weight_max));
+
-+ return err;
-+}
++ /* If there are no elements, there's nothing to do */
++ if (weight_max == 0)
++ return 0;
+
-+static const struct file_operations dpaa2_dbg_fq_ops = {
-+ .open = dpaa2_dbg_fqs_open,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = single_release,
-+};
++ increment = (DPAA2_CEETM_MAX_WEIGHT - DPAA2_CEETM_MIN_WEIGHT) /
++ weight_max;
+
-+static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
-+{
-+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
-+ struct dpaa2_eth_channel *ch;
-+ int i;
++ for (i = 0; i < priv->clhash.hashsize; i++) {
++ hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) {
++ if (cl->prio.mode == STRICT_PRIORITY)
++ continue;
+
-+ seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name);
-+ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n",
-+ "CHID", "CPU", "Deq busy", "Frames", "CDANs",
-+ "Avg frm/CDAN", "Buf count");
++ qpri = cl->prio.qpri;
++ sched_cfg = &priv->prio.tx_prio_cfg.tc_sched[qpri];
+
-+ for (i = 0; i < priv->num_channels; i++) {
-+ ch = priv->channel[i];
-+ seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu%16d\n",
-+ ch->ch_id,
-+ ch->nctx.desired_cpu,
-+ ch->stats.dequeue_portal_busy,
-+ ch->stats.frames,
-+ ch->stats.cdan,
-+ ch->stats.frames / ch->stats.cdan,
-+ ch->buf_count);
++ sched_cfg->delta_bandwidth =
++ DPAA2_CEETM_MIN_WEIGHT +
++ (cl->prio.weight * increment);
++
++ pr_debug("%s: Normalized CQ qpri %d weight to %d\n",
++ __func__, qpri, sched_cfg->delta_bandwidth);
++ }
+ }
+
+ return 0;
+}
+
-+static int dpaa2_dbg_ch_open(struct inode *inode, struct file *file)
++static inline int dpaa2_eth_update_tx_prio(struct dpaa2_eth_priv *priv,
++ struct dpaa2_ceetm_class *cl,
++ enum update_tx_prio type)
+{
-+ int err;
-+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
-+
-+ err = single_open(file, dpaa2_dbg_ch_show, priv);
-+ if (err < 0)
-+ netdev_err(priv->net_dev, "single_open() failed\n");
++ struct dpaa2_ceetm_qdisc *sch = qdisc_priv(cl->parent);
++ struct dpni_congestion_notification_cfg notif_cfg = {0};
++ struct dpni_tx_schedule_cfg *sched_cfg;
++ struct dpni_taildrop td = {0};
++ u8 ch_id = 0, tc_id = 0;
++ u32 qpri = 0;
++ int err = 0;
+
-+ return err;
-+}
++ qpri = cl->prio.qpri;
++ tc_id = DPNI_BUILD_CH_TC(ch_id, qpri);
+
-+static const struct file_operations dpaa2_dbg_ch_ops = {
-+ .open = dpaa2_dbg_ch_open,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = single_release,
-+};
++ switch (type) {
++ case DPAA2_ETH_ADD_CQ:
++ /* Disable congestion notifications */
++ notif_cfg.threshold_entry = 0;
++ notif_cfg.threshold_exit = 0;
++ err = dpni_set_congestion_notification(priv->mc_io, 0,
++ priv->mc_token,
++ DPNI_QUEUE_TX, tc_id,
++ ¬if_cfg);
++ if (err) {
++ netdev_err(priv->net_dev, "Error disabling congestion notifications %d\n",
++ err);
++ return err;
++ }
++ /* Enable taildrop */
++ td.enable = 1;
++ td.units = DPNI_CONGESTION_UNIT_FRAMES;
++ td.threshold = DPAA2_CEETM_TD_THRESHOLD;
++ err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
++ DPNI_CP_GROUP, DPNI_QUEUE_TX, tc_id,
++ 0, &td);
++ if (err) {
++ netdev_err(priv->net_dev, "Error enabling Tx taildrop %d\n",
++ err);
++ return err;
++ }
++ break;
++ case DPAA2_ETH_DEL_CQ:
++ /* Disable taildrop */
++ td.enable = 0;
++ err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
++ DPNI_CP_GROUP, DPNI_QUEUE_TX, tc_id,
++ 0, &td);
++ if (err) {
++ netdev_err(priv->net_dev, "Error disabling Tx taildrop %d\n",
++ err);
++ return err;
++ }
++ /* Enable congestion notifications */
++ notif_cfg.units = DPNI_CONGESTION_UNIT_BYTES;
++ notif_cfg.threshold_entry = DPAA2_ETH_TX_CONG_ENTRY_THRESH;
++ notif_cfg.threshold_exit = DPAA2_ETH_TX_CONG_EXIT_THRESH;
++ notif_cfg.message_ctx = (u64)priv;
++ notif_cfg.message_iova = priv->cscn_dma;
++ notif_cfg.notification_mode = DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
++ DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
++ DPNI_CONG_OPT_COHERENT_WRITE;
++ err = dpni_set_congestion_notification(priv->mc_io, 0,
++ priv->mc_token,
++ DPNI_QUEUE_TX, tc_id,
++ ¬if_cfg);
++ if (err) {
++ netdev_err(priv->net_dev, "Error enabling congestion notifications %d\n",
++ err);
++ return err;
++ }
++ break;
++ }
+
-+static ssize_t dpaa2_dbg_reset_write(struct file *file, const char __user *buf,
-+ size_t count, loff_t *offset)
-+{
-+ struct dpaa2_eth_priv *priv = file->private_data;
-+ struct rtnl_link_stats64 *percpu_stats;
-+ struct dpaa2_eth_drv_stats *percpu_extras;
-+ struct dpaa2_eth_fq *fq;
-+ struct dpaa2_eth_channel *ch;
-+ int i;
++ /* We can zero out the structure in the tx_prio_conf array */
++ if (type == DPAA2_ETH_DEL_CQ) {
++ sched_cfg = &sch->prio.tx_prio_cfg.tc_sched[qpri];
++ memset(sched_cfg, 0, sizeof(*sched_cfg));
++ }
+
-+ for_each_online_cpu(i) {
-+ percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
-+ memset(percpu_stats, 0, sizeof(*percpu_stats));
++ /* Normalize priorities */
++ err = dpaa2_eth_normalize_tx_prio(sch);
+
-+ percpu_extras = per_cpu_ptr(priv->percpu_extras, i);
-+ memset(percpu_extras, 0, sizeof(*percpu_extras));
-+ }
++ /* Debug print goes here */
++ print_hex_dump_debug("tx_prio: ", DUMP_PREFIX_OFFSET, 16, 1,
++ &sch->prio.tx_prio_cfg,
++ sizeof(sch->prio.tx_prio_cfg), 0);
+
-+ for (i = 0; i < priv->num_fqs; i++) {
-+ fq = &priv->fq[i];
-+ memset(&fq->stats, 0, sizeof(fq->stats));
-+ }
++ /* Call dpni_set_tx_priorities for the entire prio qdisc */
++ err = dpni_set_tx_priorities(priv->mc_io, 0, priv->mc_token,
++ &sch->prio.tx_prio_cfg);
++ if (err)
++ netdev_err(priv->net_dev, "dpni_set_tx_priorities err %d\n",
++ err);
+
-+ for (i = 0; i < priv->num_channels; i++) {
-+ ch = priv->channel[i];
-+ memset(&ch->stats, 0, sizeof(ch->stats));
-+ }
++ return err;
++}
+
-+ return count;
++static void dpaa2_eth_ceetm_enable(struct dpaa2_eth_priv *priv)
++{
++ priv->ceetm_en = true;
+}
+
-+static const struct file_operations dpaa2_dbg_reset_ops = {
-+ .open = simple_open,
-+ .write = dpaa2_dbg_reset_write,
-+};
++static void dpaa2_eth_ceetm_disable(struct dpaa2_eth_priv *priv)
++{
++ priv->ceetm_en = false;
++}
+
-+static ssize_t dpaa2_dbg_reset_mc_write(struct file *file,
-+ const char __user *buf,
-+ size_t count, loff_t *offset)
++/* Find class in qdisc hash table using given handle */
++static inline struct dpaa2_ceetm_class *dpaa2_ceetm_find(u32 handle,
++ struct Qdisc *sch)
+{
-+ struct dpaa2_eth_priv *priv = file->private_data;
-+ int err;
++ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
++ struct Qdisc_class_common *clc;
+
-+ err = dpni_reset_statistics(priv->mc_io, 0, priv->mc_token);
-+ if (err)
-+ netdev_err(priv->net_dev,
-+ "dpni_reset_statistics() failed %d\n", err);
++ pr_debug(KBUILD_BASENAME " : %s : find class %X in qdisc %X\n",
++ __func__, handle, sch->handle);
+
-+ return count;
++ clc = qdisc_class_find(&priv->clhash, handle);
++ return clc ? container_of(clc, struct dpaa2_ceetm_class, common) : NULL;
+}
+
-+static const struct file_operations dpaa2_dbg_reset_mc_ops = {
-+ .open = simple_open,
-+ .write = dpaa2_dbg_reset_mc_write,
-+};
++/* Insert a class in the qdisc's class hash */
++static void dpaa2_ceetm_link_class(struct Qdisc *sch,
++ struct Qdisc_class_hash *clhash,
++ struct Qdisc_class_common *common)
++{
++ sch_tree_lock(sch);
++ qdisc_class_hash_insert(clhash, common);
++ sch_tree_unlock(sch);
++ qdisc_class_hash_grow(sch, clhash);
++}
+
-+void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
++/* Destroy a ceetm class */
++static void dpaa2_ceetm_cls_destroy(struct Qdisc *sch,
++ struct dpaa2_ceetm_class *cl)
+{
-+ if (!dpaa2_dbg_root)
++ struct net_device *dev = qdisc_dev(sch);
++ struct dpaa2_eth_priv *priv = netdev_priv(dev);
++
++ if (!cl)
+ return;
+
-+ /* Create a directory for the interface */
-+ priv->dbg.dir = debugfs_create_dir(priv->net_dev->name,
-+ dpaa2_dbg_root);
-+ if (!priv->dbg.dir) {
-+ netdev_err(priv->net_dev, "debugfs_create_dir() failed\n");
-+ return;
++ pr_debug(KBUILD_BASENAME " : %s : destroy class %X from under %X\n",
++ __func__, cl->common.classid, sch->handle);
++
++ /* Recurse into child first */
++ if (cl->child) {
++ qdisc_destroy(cl->child);
++ cl->child = NULL;
+ }
+
-+ /* per-cpu stats file */
-+ priv->dbg.cpu_stats = debugfs_create_file("cpu_stats", 0444,
-+ priv->dbg.dir, priv,
-+ &dpaa2_dbg_cpu_ops);
-+ if (!priv->dbg.cpu_stats) {
-+ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
-+ goto err_cpu_stats;
++ switch (cl->type) {
++ case CEETM_ROOT:
++ if (dpaa2_eth_reset_ch_shaping(priv, cl->root.ch_id))
++ netdev_err(dev, "Error resetting channel shaping\n");
++
++ break;
++
++ case CEETM_PRIO:
++ if (dpaa2_eth_update_tx_prio(priv, cl, DPAA2_ETH_DEL_CQ))
++ netdev_err(dev, "Error resetting tx_priorities\n");
++
++ if (cl->prio.cstats)
++ free_percpu(cl->prio.cstats);
++
++ break;
+ }
+
-+ /* per-fq stats file */
-+ priv->dbg.fq_stats = debugfs_create_file("fq_stats", 0444,
-+ priv->dbg.dir, priv,
-+ &dpaa2_dbg_fq_ops);
-+ if (!priv->dbg.fq_stats) {
-+ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
-+ goto err_fq_stats;
++ tcf_destroy_chain(&cl->filter_list);
++ kfree(cl);
++}
++
++/* Destroy a ceetm qdisc */
++static void dpaa2_ceetm_destroy(struct Qdisc *sch)
++{
++ unsigned int i;
++ struct hlist_node *next;
++ struct dpaa2_ceetm_class *cl;
++ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
++ struct net_device *dev = qdisc_dev(sch);
++ struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
++
++ pr_debug(KBUILD_BASENAME " : %s : destroy qdisc %X\n",
++ __func__, sch->handle);
++
++ /* All filters need to be removed before destroying the classes */
++ tcf_destroy_chain(&priv->filter_list);
++
++ for (i = 0; i < priv->clhash.hashsize; i++) {
++ hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode)
++ tcf_destroy_chain(&cl->filter_list);
+ }
+
-+ /* per-fq stats file */
-+ priv->dbg.ch_stats = debugfs_create_file("ch_stats", 0444,
-+ priv->dbg.dir, priv,
-+ &dpaa2_dbg_ch_ops);
-+ if (!priv->dbg.fq_stats) {
-+ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
-+ goto err_ch_stats;
++ for (i = 0; i < priv->clhash.hashsize; i++) {
++ hlist_for_each_entry_safe(cl, next, &priv->clhash.hash[i],
++ common.hnode)
++ dpaa2_ceetm_cls_destroy(sch, cl);
+ }
+
-+ /* reset stats */
-+ priv->dbg.reset_stats = debugfs_create_file("reset_stats", 0200,
-+ priv->dbg.dir, priv,
-+ &dpaa2_dbg_reset_ops);
-+ if (!priv->dbg.reset_stats) {
-+ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
-+ goto err_reset_stats;
++ qdisc_class_hash_destroy(&priv->clhash);
++
++ switch (priv->type) {
++ case CEETM_ROOT:
++ dpaa2_eth_ceetm_disable(priv_eth);
++
++ if (priv->root.qstats)
++ free_percpu(priv->root.qstats);
++
++ if (!priv->root.qdiscs)
++ break;
++
++ /* Destroy the pfifo qdiscs in case they haven't been attached
++ * to the netdev queues yet.
++ */
++ for (i = 0; i < dev->num_tx_queues; i++)
++ if (priv->root.qdiscs[i])
++ qdisc_destroy(priv->root.qdiscs[i]);
++
++ kfree(priv->root.qdiscs);
++ break;
++
++ case CEETM_PRIO:
++ if (priv->prio.parent)
++ priv->prio.parent->child = NULL;
++ break;
+ }
++}
+
-+ /* reset MC stats */
-+ priv->dbg.reset_mc_stats = debugfs_create_file("reset_mc_stats",
-+ 0222, priv->dbg.dir, priv,
-+ &dpaa2_dbg_reset_mc_ops);
-+ if (!priv->dbg.reset_mc_stats) {
-+ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
-+ goto err_reset_mc_stats;
++static int dpaa2_ceetm_dump(struct Qdisc *sch, struct sk_buff *skb)
++{
++ struct Qdisc *qdisc;
++ unsigned int ntx, i;
++ struct nlattr *nest;
++ struct dpaa2_ceetm_tc_qopt qopt;
++ struct dpaa2_ceetm_qdisc_stats *qstats;
++ struct net_device *dev = qdisc_dev(sch);
++ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
++
++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
++
++ sch_tree_lock(sch);
++ memset(&qopt, 0, sizeof(qopt));
++ qopt.type = priv->type;
++ qopt.shaped = priv->shaped;
++
++ switch (priv->type) {
++ case CEETM_ROOT:
++ /* Gather statistics from the underlying pfifo qdiscs */
++ sch->q.qlen = 0;
++ memset(&sch->bstats, 0, sizeof(sch->bstats));
++ memset(&sch->qstats, 0, sizeof(sch->qstats));
++
++ for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
++ qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
++ sch->q.qlen += qdisc->q.qlen;
++ sch->bstats.bytes += qdisc->bstats.bytes;
++ sch->bstats.packets += qdisc->bstats.packets;
++ sch->qstats.qlen += qdisc->qstats.qlen;
++ sch->qstats.backlog += qdisc->qstats.backlog;
++ sch->qstats.drops += qdisc->qstats.drops;
++ sch->qstats.requeues += qdisc->qstats.requeues;
++ sch->qstats.overlimits += qdisc->qstats.overlimits;
++ }
++
++ for_each_online_cpu(i) {
++ qstats = per_cpu_ptr(priv->root.qstats, i);
++ sch->qstats.drops += qstats->drops;
++ }
++
++ break;
++
++ case CEETM_PRIO:
++ qopt.prio_group_A = priv->prio.tx_prio_cfg.prio_group_A;
++ qopt.prio_group_B = priv->prio.tx_prio_cfg.prio_group_B;
++ qopt.separate_groups = priv->prio.tx_prio_cfg.separate_groups;
++ break;
++
++ default:
++ pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
++ sch_tree_unlock(sch);
++ return -EINVAL;
+ }
+
-+ return;
++ nest = nla_nest_start(skb, TCA_OPTIONS);
++ if (!nest)
++ goto nla_put_failure;
++ if (nla_put(skb, DPAA2_CEETM_TCA_QOPS, sizeof(qopt), &qopt))
++ goto nla_put_failure;
++ nla_nest_end(skb, nest);
+
-+err_reset_mc_stats:
-+ debugfs_remove(priv->dbg.reset_stats);
-+err_reset_stats:
-+ debugfs_remove(priv->dbg.ch_stats);
-+err_ch_stats:
-+ debugfs_remove(priv->dbg.fq_stats);
-+err_fq_stats:
-+ debugfs_remove(priv->dbg.cpu_stats);
-+err_cpu_stats:
-+ debugfs_remove(priv->dbg.dir);
++ sch_tree_unlock(sch);
++ return skb->len;
++
++nla_put_failure:
++ sch_tree_unlock(sch);
++ nla_nest_cancel(skb, nest);
++ return -EMSGSIZE;
+}
+
-+void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv)
++static int dpaa2_ceetm_change_prio(struct Qdisc *sch,
++ struct dpaa2_ceetm_qdisc *priv,
++ struct dpaa2_ceetm_tc_qopt *qopt)
+{
-+ debugfs_remove(priv->dbg.reset_mc_stats);
-+ debugfs_remove(priv->dbg.reset_stats);
-+ debugfs_remove(priv->dbg.fq_stats);
-+ debugfs_remove(priv->dbg.ch_stats);
-+ debugfs_remove(priv->dbg.cpu_stats);
-+ debugfs_remove(priv->dbg.dir);
++ /* TODO: Once LX2 support is added */
++ /* priv->shaped = parent_cl->shaped; */
++ priv->prio.tx_prio_cfg.prio_group_A = qopt->prio_group_A;
++ priv->prio.tx_prio_cfg.prio_group_B = qopt->prio_group_B;
++ priv->prio.tx_prio_cfg.separate_groups = qopt->separate_groups;
++
++ return 0;
+}
+
-+void dpaa2_eth_dbg_init(void)
++/* Edit a ceetm qdisc */
++static int dpaa2_ceetm_change(struct Qdisc *sch, struct nlattr *opt)
+{
-+ dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL);
-+ if (!dpaa2_dbg_root) {
-+ pr_err("DPAA2-ETH: debugfs create failed\n");
-+ return;
++ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
++ struct nlattr *tb[DPAA2_CEETM_TCA_QOPS + 1];
++ struct dpaa2_ceetm_tc_qopt *qopt;
++ int err;
++
++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
++
++ err = nla_parse_nested(tb, DPAA2_CEETM_TCA_QOPS, opt,
++ dpaa2_ceetm_policy);
++ if (err < 0) {
++ pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
++ "nla_parse_nested");
++ return err;
+ }
+
-+ pr_info("DPAA2-ETH: debugfs created\n");
++ if (!tb[DPAA2_CEETM_TCA_QOPS]) {
++ pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
++ "tb");
++ return -EINVAL;
++ }
++
++ if (TC_H_MIN(sch->handle)) {
++ pr_err("CEETM: a qdisc should not have a minor\n");
++ return -EINVAL;
++ }
++
++ qopt = nla_data(tb[DPAA2_CEETM_TCA_QOPS]);
++
++ if (priv->type != qopt->type) {
++ pr_err("CEETM: qdisc %X is not of the provided type\n",
++ sch->handle);
++ return -EINVAL;
++ }
++
++ switch (priv->type) {
++ case CEETM_PRIO:
++ err = dpaa2_ceetm_change_prio(sch, priv, qopt);
++ break;
++ default:
++ pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
++ err = -EINVAL;
++ }
++
++ return err;
+}
+
-+void __exit dpaa2_eth_dbg_exit(void)
++/* Configure a root ceetm qdisc */
++static int dpaa2_ceetm_init_root(struct Qdisc *sch,
++ struct dpaa2_ceetm_qdisc *priv,
++ struct dpaa2_ceetm_tc_qopt *qopt)
+{
-+ debugfs_remove(dpaa2_dbg_root);
-+}
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
-@@ -0,0 +1,60 @@
-+/* Copyright 2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
++ struct net_device *dev = qdisc_dev(sch);
++ struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
++ struct netdev_queue *dev_queue;
++ unsigned int i, parent_id;
++ struct Qdisc *qdisc;
++ int err;
+
-+#ifndef DPAA2_ETH_DEBUGFS_H
-+#define DPAA2_ETH_DEBUGFS_H
++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
+
-+#include <linux/dcache.h>
++ /* Validate inputs */
++ if (sch->parent != TC_H_ROOT) {
++ pr_err("CEETM: a root ceetm qdisc can not be attached to a class\n");
++ tcf_destroy_chain(&priv->filter_list);
++ qdisc_class_hash_destroy(&priv->clhash);
++ return -EINVAL;
++ }
+
-+struct dpaa2_eth_priv;
++ /* Pre-allocate underlying pfifo qdiscs.
++ *
++ * We want to offload shaping and scheduling decisions to the hardware.
++ * The pfifo qdiscs will be attached to the netdev queues and will
++ * guide the traffic from the IP stack down to the driver with minimum
++ * interference.
++ *
++ * The CEETM qdiscs and classes will be crossed when the traffic
++ * reaches the driver.
++ */
++ priv->root.qdiscs = kcalloc(dev->num_tx_queues,
++ sizeof(priv->root.qdiscs[0]),
++ GFP_KERNEL);
++ if (!priv->root.qdiscs) {
++ err = -ENOMEM;
++ goto err_init_root;
++ }
+
-+struct dpaa2_debugfs {
-+ struct dentry *dir;
-+ struct dentry *fq_stats;
-+ struct dentry *ch_stats;
-+ struct dentry *cpu_stats;
-+ struct dentry *reset_stats;
-+ struct dentry *reset_mc_stats;
-+};
++ for (i = 0; i < dev->num_tx_queues; i++) {
++ dev_queue = netdev_get_tx_queue(dev, i);
++ parent_id = TC_H_MAKE(TC_H_MAJ(sch->handle),
++ TC_H_MIN(i + PFIFO_MIN_OFFSET));
+
-+#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
-+void dpaa2_eth_dbg_init(void);
-+void dpaa2_eth_dbg_exit(void);
-+void dpaa2_dbg_add(struct dpaa2_eth_priv *priv);
-+void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv);
-+#else
-+static inline void dpaa2_eth_dbg_init(void) {}
-+static inline void dpaa2_eth_dbg_exit(void) {}
-+static inline void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) {}
-+static inline void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) {}
-+#endif /* CONFIG_FSL_DPAA2_ETH_DEBUGFS */
++ qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
++ parent_id);
++ if (!qdisc) {
++ err = -ENOMEM;
++ goto err_init_root;
++ }
+
-+#endif /* DPAA2_ETH_DEBUGFS_H */
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
-@@ -0,0 +1,184 @@
-+/* Copyright 2014-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
++ priv->root.qdiscs[i] = qdisc;
++ qdisc->flags |= TCQ_F_ONETXQUEUE;
++ }
+
-+#undef TRACE_SYSTEM
-+#define TRACE_SYSTEM dpaa2_eth
++ sch->flags |= TCQ_F_MQROOT;
+
-+#if !defined(_DPAA2_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
-+#define _DPAA2_ETH_TRACE_H
++ priv->root.qstats = alloc_percpu(struct dpaa2_ceetm_qdisc_stats);
++ if (!priv->root.qstats) {
++ pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
++ __func__);
++ err = -ENOMEM;
++ goto err_init_root;
++ }
+
-+#include <linux/skbuff.h>
-+#include <linux/netdevice.h>
-+#include <linux/tracepoint.h>
++ dpaa2_eth_ceetm_enable(priv_eth);
++ return 0;
+
-+#define TR_FMT "[%s] fd: addr=0x%llx, len=%u, off=%u"
-+/* trace_printk format for raw buffer event class */
-+#define TR_BUF_FMT "[%s] vaddr=%p size=%zu dma_addr=%pad map_size=%zu bpid=%d"
++err_init_root:
++ dpaa2_ceetm_destroy(sch);
++ return err;
++}
+
-+/* This is used to declare a class of events.
-+ * individual events of this type will be defined below.
-+ */
++/* Configure a prio ceetm qdisc */
++static int dpaa2_ceetm_init_prio(struct Qdisc *sch,
++ struct dpaa2_ceetm_qdisc *priv,
++ struct dpaa2_ceetm_tc_qopt *qopt)
++{
++ struct net_device *dev = qdisc_dev(sch);
++ struct dpaa2_ceetm_class *parent_cl;
++ struct Qdisc *parent_qdisc;
++ int err;
+
-+/* Store details about a frame descriptor */
-+DECLARE_EVENT_CLASS(dpaa2_eth_fd,
-+ /* Trace function prototype */
-+ TP_PROTO(struct net_device *netdev,
-+ const struct dpaa2_fd *fd),
++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
+
-+ /* Repeat argument list here */
-+ TP_ARGS(netdev, fd),
-+
-+ /* A structure containing the relevant information we want
-+ * to record. Declare name and type for each normal element,
-+ * name, type and size for arrays. Use __string for variable
-+ * length strings.
-+ */
-+ TP_STRUCT__entry(
-+ __field(u64, fd_addr)
-+ __field(u32, fd_len)
-+ __field(u16, fd_offset)
-+ __string(name, netdev->name)
-+ ),
-+
-+ /* The function that assigns values to the above declared
-+ * fields
-+ */
-+ TP_fast_assign(
-+ __entry->fd_addr = dpaa2_fd_get_addr(fd);
-+ __entry->fd_len = dpaa2_fd_get_len(fd);
-+ __entry->fd_offset = dpaa2_fd_get_offset(fd);
-+ __assign_str(name, netdev->name);
-+ ),
++ if (sch->parent == TC_H_ROOT) {
++ pr_err("CEETM: a prio ceetm qdisc can not be root\n");
++ err = -EINVAL;
++ goto err_init_prio;
++ }
+
-+ /* This is what gets printed when the trace event is
-+ * triggered.
-+ */
-+ TP_printk(TR_FMT,
-+ __get_str(name),
-+ __entry->fd_addr,
-+ __entry->fd_len,
-+ __entry->fd_offset)
-+);
++ parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent));
++ if (strcmp(parent_qdisc->ops->id, dpaa2_ceetm_qdisc_ops.id)) {
++ pr_err("CEETM: a ceetm qdisc can not be attached to other qdisc/class types\n");
++ err = -EINVAL;
++ goto err_init_prio;
++ }
+
-+/* Now declare events of the above type. Format is:
-+ * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class
-+ */
++ /* Obtain the parent root ceetm_class */
++ parent_cl = dpaa2_ceetm_find(sch->parent, parent_qdisc);
+
-+/* Tx (egress) fd */
-+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_fd,
-+ TP_PROTO(struct net_device *netdev,
-+ const struct dpaa2_fd *fd),
++ if (!parent_cl || parent_cl->type != CEETM_ROOT) {
++ pr_err("CEETM: a prio ceetm qdiscs can be added only under a root ceetm class\n");
++ err = -EINVAL;
++ goto err_init_prio;
++ }
+
-+ TP_ARGS(netdev, fd)
-+);
++ priv->prio.parent = parent_cl;
++ parent_cl->child = sch;
+
-+/* Rx fd */
-+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd,
-+ TP_PROTO(struct net_device *netdev,
-+ const struct dpaa2_fd *fd),
++ err = dpaa2_ceetm_change_prio(sch, priv, qopt);
+
-+ TP_ARGS(netdev, fd)
-+);
++ return 0;
+
-+/* Tx confirmation fd */
-+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd,
-+ TP_PROTO(struct net_device *netdev,
-+ const struct dpaa2_fd *fd),
++err_init_prio:
++ dpaa2_ceetm_destroy(sch);
++ return err;
++}
+
-+ TP_ARGS(netdev, fd)
-+);
++/* Configure a generic ceetm qdisc */
++static int dpaa2_ceetm_init(struct Qdisc *sch, struct nlattr *opt)
++{
++ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
++ struct net_device *dev = qdisc_dev(sch);
++ struct nlattr *tb[DPAA2_CEETM_TCA_QOPS + 1];
++ struct dpaa2_ceetm_tc_qopt *qopt;
++ int err;
+
-+/* Log data about raw buffers. Useful for tracing DPBP content. */
-+TRACE_EVENT(dpaa2_eth_buf_seed,
-+ /* Trace function prototype */
-+ TP_PROTO(struct net_device *netdev,
-+ /* virtual address and size */
-+ void *vaddr,
-+ size_t size,
-+ /* dma map address and size */
-+ dma_addr_t dma_addr,
-+ size_t map_size,
-+ /* buffer pool id, if relevant */
-+ u16 bpid),
++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
+
-+ /* Repeat argument list here */
-+ TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid),
++ if (!netif_is_multiqueue(dev))
++ return -EOPNOTSUPP;
+
-+ /* A structure containing the relevant information we want
-+ * to record. Declare name and type for each normal element,
-+ * name, type and size for arrays. Use __string for variable
-+ * length strings.
-+ */
-+ TP_STRUCT__entry(
-+ __field(void *, vaddr)
-+ __field(size_t, size)
-+ __field(dma_addr_t, dma_addr)
-+ __field(size_t, map_size)
-+ __field(u16, bpid)
-+ __string(name, netdev->name)
-+ ),
++ RCU_INIT_POINTER(priv->filter_list, NULL);
+
-+ /* The function that assigns values to the above declared
-+ * fields
-+ */
-+ TP_fast_assign(
-+ __entry->vaddr = vaddr;
-+ __entry->size = size;
-+ __entry->dma_addr = dma_addr;
-+ __entry->map_size = map_size;
-+ __entry->bpid = bpid;
-+ __assign_str(name, netdev->name);
-+ ),
++ if (!opt) {
++ pr_err(KBUILD_BASENAME " : %s : tc error - opt = NULL\n",
++ __func__);
++ return -EINVAL;
++ }
+
-+ /* This is what gets printed when the trace event is
-+ * triggered.
-+ */
-+ TP_printk(TR_BUF_FMT,
-+ __get_str(name),
-+ __entry->vaddr,
-+ __entry->size,
-+ &__entry->dma_addr,
-+ __entry->map_size,
-+ __entry->bpid)
-+);
++ err = nla_parse_nested(tb, DPAA2_CEETM_TCA_QOPS, opt,
++ dpaa2_ceetm_policy);
++ if (err < 0) {
++ pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
++ "nla_parse_nested");
++ return err;
++ }
+
-+/* If only one event of a certain type needs to be declared, use TRACE_EVENT().
-+ * The syntax is the same as for DECLARE_EVENT_CLASS().
-+ */
++ if (!tb[DPAA2_CEETM_TCA_QOPS]) {
++ pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
++ "tb");
++ return -EINVAL;
++ }
+
-+#endif /* _DPAA2_ETH_TRACE_H */
++ if (TC_H_MIN(sch->handle)) {
++ pr_err("CEETM: a qdisc should not have a minor\n");
++ return -EINVAL;
++ }
+
-+/* This must be outside ifdef _DPAA2_ETH_TRACE_H */
-+#undef TRACE_INCLUDE_PATH
-+#define TRACE_INCLUDE_PATH .
-+#undef TRACE_INCLUDE_FILE
-+#define TRACE_INCLUDE_FILE dpaa2-eth-trace
-+#include <trace/define_trace.h>
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
-@@ -0,0 +1,3516 @@
-+/* Copyright 2014-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/platform_device.h>
-+#include <linux/etherdevice.h>
-+#include <linux/of_net.h>
-+#include <linux/interrupt.h>
-+#include <linux/debugfs.h>
-+#include <linux/kthread.h>
-+#include <linux/msi.h>
-+#include <linux/net_tstamp.h>
-+#include <linux/iommu.h>
++ qopt = nla_data(tb[DPAA2_CEETM_TCA_QOPS]);
+
-+#include "../../fsl-mc/include/dpbp.h"
-+#include "../../fsl-mc/include/dpcon.h"
-+#include "../../fsl-mc/include/mc.h"
-+#include "../../fsl-mc/include/mc-sys.h"
-+#include "dpaa2-eth.h"
-+#include "dpkg.h"
++ /* Initialize the class hash list. Each qdisc has its own class hash */
++ err = qdisc_class_hash_init(&priv->clhash);
++ if (err < 0) {
++ pr_err(KBUILD_BASENAME " : %s : qdisc_class_hash_init failed\n",
++ __func__);
++ return err;
++ }
+
-+/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
-+ * using trace events only need to #include <trace/events/sched.h>
-+ */
-+#define CREATE_TRACE_POINTS
-+#include "dpaa2-eth-trace.h"
++ priv->type = qopt->type;
++ priv->shaped = qopt->shaped;
+
-+MODULE_LICENSE("Dual BSD/GPL");
-+MODULE_AUTHOR("Freescale Semiconductor, Inc");
-+MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
++ switch (priv->type) {
++ case CEETM_ROOT:
++ err = dpaa2_ceetm_init_root(sch, priv, qopt);
++ break;
++ case CEETM_PRIO:
++ err = dpaa2_ceetm_init_prio(sch, priv, qopt);
++ break;
++ default:
++ pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
++ dpaa2_ceetm_destroy(sch);
++ err = -EINVAL;
++ }
+
-+const char dpaa2_eth_drv_version[] = "0.1";
++ return err;
++}
+
-+void *dpaa2_eth_iova_to_virt(struct iommu_domain *domain, dma_addr_t iova_addr)
++/* Attach the underlying pfifo qdiscs */
++static void dpaa2_ceetm_attach(struct Qdisc *sch)
+{
-+ phys_addr_t phys_addr;
++ struct net_device *dev = qdisc_dev(sch);
++ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
++ struct Qdisc *qdisc, *old_qdisc;
++ unsigned int i;
+
-+ phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
+
-+ return phys_to_virt(phys_addr);
++ for (i = 0; i < dev->num_tx_queues; i++) {
++ qdisc = priv->root.qdiscs[i];
++ old_qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
++ if (old_qdisc)
++ qdisc_destroy(old_qdisc);
++ }
++
++ /* Remove the references to the pfifo qdiscs since the kernel will
++ * destroy them when needed. No cleanup from our part is required from
++ * this point on.
++ */
++ kfree(priv->root.qdiscs);
++ priv->root.qdiscs = NULL;
+}
+
-+static void validate_rx_csum(struct dpaa2_eth_priv *priv,
-+ u32 fd_status,
-+ struct sk_buff *skb)
++static unsigned long dpaa2_ceetm_cls_get(struct Qdisc *sch, u32 classid)
+{
-+ skb_checksum_none_assert(skb);
++ struct dpaa2_ceetm_class *cl;
+
-+ /* HW checksum validation is disabled, nothing to do here */
-+ if (!(priv->net_dev->features & NETIF_F_RXCSUM))
-+ return;
++ pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n",
++ __func__, classid, sch->handle);
++ cl = dpaa2_ceetm_find(classid, sch);
+
-+ /* Read checksum validation bits */
-+ if (!((fd_status & DPAA2_FAS_L3CV) &&
-+ (fd_status & DPAA2_FAS_L4CV)))
-+ return;
++ if (cl)
++ cl->refcnt++;
+
-+ /* Inform the stack there's no need to compute L3/L4 csum anymore */
-+ skb->ip_summed = CHECKSUM_UNNECESSARY;
++ return (unsigned long)cl;
+}
+
-+/* Free a received FD.
-+ * Not to be used for Tx conf FDs or on any other paths.
-+ */
-+static void free_rx_fd(struct dpaa2_eth_priv *priv,
-+ const struct dpaa2_fd *fd,
-+ void *vaddr)
++static void dpaa2_ceetm_cls_put(struct Qdisc *sch, unsigned long arg)
+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
-+ u8 fd_format = dpaa2_fd_get_format(fd);
-+ struct dpaa2_sg_entry *sgt;
-+ void *sg_vaddr;
-+ int i;
++ struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
++ pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n",
++ __func__, cl->common.classid, sch->handle);
++ cl->refcnt--;
+
-+ /* If single buffer frame, just free the data buffer */
-+ if (fd_format == dpaa2_fd_single)
-+ goto free_buf;
-+ else if (fd_format != dpaa2_fd_sg)
-+ /* we don't support any other format */
-+ return;
++ if (cl->refcnt == 0)
++ dpaa2_ceetm_cls_destroy(sch, cl);
++}
+
-+ /* For S/G frames, we first need to free all SG entries */
-+ sgt = vaddr + dpaa2_fd_get_offset(fd);
-+ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
-+ addr = dpaa2_sg_get_addr(&sgt[i]);
-+ sg_vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, addr);
++static int dpaa2_ceetm_cls_change_root(struct dpaa2_ceetm_class *cl,
++ struct dpaa2_ceetm_tc_copt *copt,
++ struct net_device *dev)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(dev);
++ struct dpni_tx_shaping_cfg scfg = { 0 }, ecfg = { 0 };
++ int err = 0;
+
-+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
-+ DMA_FROM_DEVICE);
++ pr_debug(KBUILD_BASENAME " : %s : class %X\n", __func__,
++ cl->common.classid);
+
-+ put_page(virt_to_head_page(sg_vaddr));
++ if (!cl->shaped)
++ return 0;
+
-+ if (dpaa2_sg_is_final(&sgt[i]))
-+ break;
-+ }
++ if (dpaa2_eth_update_shaping_cfg(dev, copt->shaping_cfg,
++ &scfg, &ecfg))
++ return -EINVAL;
+
-+free_buf:
-+ put_page(virt_to_head_page(vaddr));
++ err = dpaa2_eth_set_ch_shaping(priv, &scfg, &ecfg,
++ copt->shaping_cfg.coupled,
++ cl->root.ch_id);
++ if (err)
++ return err;
++
++ memcpy(&cl->root.shaping_cfg, &copt->shaping_cfg,
++ sizeof(struct dpaa2_ceetm_shaping_cfg));
++
++ return err;
+}
+
-+/* Build a linear skb based on a single-buffer frame descriptor */
-+static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
-+ struct dpaa2_eth_channel *ch,
-+ const struct dpaa2_fd *fd,
-+ void *fd_vaddr)
++static int dpaa2_ceetm_cls_change_prio(struct dpaa2_ceetm_class *cl,
++ struct dpaa2_ceetm_tc_copt *copt,
++ struct net_device *dev)
+{
-+ struct sk_buff *skb = NULL;
-+ u16 fd_offset = dpaa2_fd_get_offset(fd);
-+ u32 fd_length = dpaa2_fd_get_len(fd);
++ struct dpaa2_ceetm_qdisc *sch = qdisc_priv(cl->parent);
++ struct dpni_tx_schedule_cfg *sched_cfg;
++ struct dpaa2_eth_priv *priv = netdev_priv(dev);
++ int err;
+
-+ ch->buf_count--;
++ pr_debug(KBUILD_BASENAME " : %s : class %X mode %d weight %d\n",
++ __func__, cl->common.classid, copt->mode, copt->weight);
+
-+ skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE);
-+ if (unlikely(!skb))
-+ return NULL;
++ if (!cl->prio.cstats) {
++ cl->prio.cstats = alloc_percpu(struct dpaa2_ceetm_class_stats);
++ if (!cl->prio.cstats) {
++ pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
++ __func__);
++ return -ENOMEM;
++ }
++ }
+
-+ skb_reserve(skb, fd_offset);
-+ skb_put(skb, fd_length);
++ cl->prio.mode = copt->mode;
++ cl->prio.weight = copt->weight;
+
-+ return skb;
++ sched_cfg = &sch->prio.tx_prio_cfg.tc_sched[cl->prio.qpri];
++
++ switch (copt->mode) {
++ case STRICT_PRIORITY:
++ sched_cfg->mode = DPNI_TX_SCHED_STRICT_PRIORITY;
++ break;
++ case WEIGHTED_A:
++ sched_cfg->mode = DPNI_TX_SCHED_WEIGHTED_A;
++ break;
++ case WEIGHTED_B:
++ sched_cfg->mode = DPNI_TX_SCHED_WEIGHTED_B;
++ break;
++ }
++
++ err = dpaa2_eth_update_tx_prio(priv, cl, DPAA2_ETH_ADD_CQ);
++
++ return err;
+}
+
-+/* Build a non linear (fragmented) skb based on a S/G table */
-+static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
-+ struct dpaa2_eth_channel *ch,
-+ struct dpaa2_sg_entry *sgt)
++/* Add a new ceetm class */
++static int dpaa2_ceetm_cls_add(struct Qdisc *sch, u32 classid,
++ struct dpaa2_ceetm_tc_copt *copt,
++ unsigned long *arg)
+{
-+ struct sk_buff *skb = NULL;
-+ struct device *dev = priv->net_dev->dev.parent;
-+ void *sg_vaddr;
-+ dma_addr_t sg_addr;
-+ u16 sg_offset;
-+ u32 sg_length;
-+ struct page *page, *head_page;
-+ int page_offset;
-+ int i;
++ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
++ struct net_device *dev = qdisc_dev(sch);
++ struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
++ struct dpaa2_ceetm_class *cl;
++ int err;
+
-+ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
-+ struct dpaa2_sg_entry *sge = &sgt[i];
++ if (copt->type == CEETM_ROOT &&
++ priv->clhash.hashelems == dpaa2_eth_ch_count(priv_eth)) {
++ pr_err("CEETM: only %d channel%s per DPNI allowed, sorry\n",
++ dpaa2_eth_ch_count(priv_eth),
++ dpaa2_eth_ch_count(priv_eth) == 1 ? "" : "s");
++ return -EINVAL;
++ }
+
-+ /* NOTE: We only support SG entries in dpaa2_sg_single format,
-+ * but this is the only format we may receive from HW anyway
-+ */
++ if (copt->type == CEETM_PRIO &&
++ priv->clhash.hashelems == dpaa2_eth_tc_count(priv_eth)) {
++ pr_err("CEETM: only %d queue%s per channel allowed, sorry\n",
++ dpaa2_eth_tc_count(priv_eth),
++ dpaa2_eth_tc_count(priv_eth) == 1 ? "" : "s");
++ return -EINVAL;
++ }
+
-+ /* Get the address and length from the S/G entry */
-+ sg_addr = dpaa2_sg_get_addr(sge);
-+ sg_vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, sg_addr);
-+ dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
-+ DMA_FROM_DEVICE);
++ cl = kzalloc(sizeof(*cl), GFP_KERNEL);
++ if (!cl)
++ return -ENOMEM;
+
-+ sg_length = dpaa2_sg_get_len(sge);
++ RCU_INIT_POINTER(cl->filter_list, NULL);
+
-+ if (i == 0) {
-+ /* We build the skb around the first data buffer */
-+ skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE);
-+ if (unlikely(!skb))
-+ goto err_build;
++ cl->common.classid = classid;
++ cl->refcnt = 1;
++ cl->parent = sch;
++ cl->child = NULL;
+
-+ sg_offset = dpaa2_sg_get_offset(sge);
-+ skb_reserve(skb, sg_offset);
-+ skb_put(skb, sg_length);
-+ } else {
-+ /* Rest of the data buffers are stored as skb frags */
-+ page = virt_to_page(sg_vaddr);
-+ head_page = virt_to_head_page(sg_vaddr);
++ /* Add class handle in Qdisc */
++ dpaa2_ceetm_link_class(sch, &priv->clhash, &cl->common);
+
-+ /* Offset in page (which may be compound).
-+ * Data in subsequent SG entries is stored from the
-+ * beginning of the buffer, so we don't need to add the
-+ * sg_offset.
-+ */
-+ page_offset = ((unsigned long)sg_vaddr &
-+ (PAGE_SIZE - 1)) +
-+ (page_address(page) - page_address(head_page));
++ cl->shaped = copt->shaped;
++ cl->type = copt->type;
+
-+ skb_add_rx_frag(skb, i - 1, head_page, page_offset,
-+ sg_length, DPAA2_ETH_RX_BUF_SIZE);
-+ }
++ /* Claim a CEETM channel / tc - DPAA2. will assume transition from
++ * classid to qdid/qpri, starting from qdid / qpri 0
++ */
++ switch (copt->type) {
++ case CEETM_ROOT:
++ cl->root.ch_id = classid - sch->handle - 1;
++ err = dpaa2_ceetm_cls_change_root(cl, copt, dev);
++ break;
++ case CEETM_PRIO:
++ cl->prio.qpri = classid - sch->handle - 1;
++ err = dpaa2_ceetm_cls_change_prio(cl, copt, dev);
++ break;
++ default:
++ err = -EINVAL;
++ break;
++ }
+
-+ if (dpaa2_sg_is_final(sge))
-+ break;
++ if (err) {
++ pr_err("%s: Unable to set new %s class\n", __func__,
++ (copt->type == CEETM_ROOT ? "root" : "prio"));
++ goto out_free;
+ }
+
-+ /* Count all data buffers + SG table buffer */
-+ ch->buf_count -= i + 2;
++ switch (copt->type) {
++ case CEETM_ROOT:
++ pr_debug(KBUILD_BASENAME " : %s : configured root class %X associated with channel qdid %d\n",
++ __func__, classid, cl->root.ch_id);
++ break;
++ case CEETM_PRIO:
++ pr_debug(KBUILD_BASENAME " : %s : configured prio class %X associated with queue qpri %d\n",
++ __func__, classid, cl->prio.qpri);
++ break;
++ }
+
-+ return skb;
++ *arg = (unsigned long)cl;
++ return 0;
+
-+err_build:
-+ /* We still need to subtract the buffers used by this FD from our
-+ * software counter
-+ */
-+ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++)
-+ if (dpaa2_sg_is_final(&sgt[i]))
-+ break;
-+ ch->buf_count -= i + 2;
++out_free:
++ kfree(cl);
++ return err;
++}
+
-+ return NULL;
++/* Add or configure a ceetm class */
++static int dpaa2_ceetm_cls_change(struct Qdisc *sch, u32 classid, u32 parentid,
++ struct nlattr **tca, unsigned long *arg)
++{
++ struct dpaa2_ceetm_qdisc *priv;
++ struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)*arg;
++ struct nlattr *opt = tca[TCA_OPTIONS];
++ struct nlattr *tb[DPAA2_CEETM_TCA_MAX];
++ struct dpaa2_ceetm_tc_copt *copt;
++ struct net_device *dev = qdisc_dev(sch);
++ int err;
++
++ pr_debug(KBUILD_BASENAME " : %s : classid %X under qdisc %X\n",
++ __func__, classid, sch->handle);
++
++ if (strcmp(sch->ops->id, dpaa2_ceetm_qdisc_ops.id)) {
++ pr_err("CEETM: a ceetm class can not be attached to other qdisc/class types\n");
++ return -EINVAL;
++ }
++
++ priv = qdisc_priv(sch);
++
++ if (!opt) {
++ pr_err(KBUILD_BASENAME " : %s : tc error NULL opt\n", __func__);
++ return -EINVAL;
++ }
++
++ err = nla_parse_nested(tb, DPAA2_CEETM_TCA_COPT, opt,
++ dpaa2_ceetm_policy);
++ if (err < 0) {
++ pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
++ "nla_parse_nested");
++ return -EINVAL;
++ }
++
++ if (!tb[DPAA2_CEETM_TCA_COPT]) {
++ pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
++ "tb");
++ return -EINVAL;
++ }
++
++ copt = nla_data(tb[DPAA2_CEETM_TCA_COPT]);
++
++ /* Configure an existing ceetm class */
++ if (cl) {
++ if (copt->type != cl->type) {
++ pr_err("CEETM: class %X is not of the provided type\n",
++ cl->common.classid);
++ return -EINVAL;
++ }
++
++ switch (copt->type) {
++ case CEETM_ROOT:
++ return dpaa2_ceetm_cls_change_root(cl, copt, dev);
++ case CEETM_PRIO:
++ return dpaa2_ceetm_cls_change_prio(cl, copt, dev);
++
++ default:
++ pr_err(KBUILD_BASENAME " : %s : invalid class\n",
++ __func__);
++ return -EINVAL;
++ }
++ }
++
++ return dpaa2_ceetm_cls_add(sch, classid, copt, arg);
+}
+
-+static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
++static void dpaa2_ceetm_cls_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ void *vaddr;
-+ int i;
++ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
++ struct dpaa2_ceetm_class *cl;
++ unsigned int i;
+
-+ for (i = 0; i < count; i++) {
-+ /* Same logic as on regular Rx path */
-+ vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, buf_array[i]);
-+ dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
-+ DMA_FROM_DEVICE);
-+ put_page(virt_to_head_page(vaddr));
++ pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
++
++ if (arg->stop)
++ return;
++
++ for (i = 0; i < priv->clhash.hashsize; i++) {
++ hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) {
++ if (arg->count < arg->skip) {
++ arg->count++;
++ continue;
++ }
++ if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
++ arg->stop = 1;
++ return;
++ }
++ arg->count++;
++ }
+ }
+}
+
-+/* Main Rx frame processing routine */
-+static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
-+ struct dpaa2_eth_channel *ch,
-+ const struct dpaa2_fd *fd,
-+ struct napi_struct *napi,
-+ u16 queue_id)
++static int dpaa2_ceetm_cls_dump(struct Qdisc *sch, unsigned long arg,
++ struct sk_buff *skb, struct tcmsg *tcm)
+{
-+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
-+ u8 fd_format = dpaa2_fd_get_format(fd);
-+ void *vaddr;
-+ struct sk_buff *skb;
-+ struct rtnl_link_stats64 *percpu_stats;
-+ struct dpaa2_eth_drv_stats *percpu_extras;
-+ struct device *dev = priv->net_dev->dev.parent;
-+ struct dpaa2_fas *fas;
-+ void *buf_data;
-+ u32 status = 0;
++ struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
++ struct nlattr *nest;
++ struct dpaa2_ceetm_tc_copt copt;
+
-+ /* Tracing point */
-+ trace_dpaa2_rx_fd(priv->net_dev, fd);
++ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
++ __func__, cl->common.classid, sch->handle);
+
-+ vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, addr);
-+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
++ sch_tree_lock(sch);
+
-+ /* HWA - FAS, timestamp */
-+ fas = dpaa2_eth_get_fas(vaddr);
-+ prefetch(fas);
-+ /* data / SG table */
-+ buf_data = vaddr + dpaa2_fd_get_offset(fd);
-+ prefetch(buf_data);
++ tcm->tcm_parent = ((struct Qdisc *)cl->parent)->handle;
++ tcm->tcm_handle = cl->common.classid;
+
-+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
-+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
++ memset(&copt, 0, sizeof(copt));
+
-+ switch (fd_format) {
-+ case dpaa2_fd_single:
-+ skb = build_linear_skb(priv, ch, fd, vaddr);
-+ break;
-+ case dpaa2_fd_sg:
-+ skb = build_frag_skb(priv, ch, buf_data);
-+ put_page(virt_to_head_page(vaddr));
-+ percpu_extras->rx_sg_frames++;
-+ percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
-+ break;
-+ default:
-+ /* We don't support any other format */
-+ goto err_frame_format;
-+ }
++ copt.shaped = cl->shaped;
++ copt.type = cl->type;
+
-+ if (unlikely(!skb))
-+ goto err_build_skb;
++ switch (cl->type) {
++ case CEETM_ROOT:
++ if (cl->child)
++ tcm->tcm_info = cl->child->handle;
+
-+ prefetch(skb->data);
++ memcpy(&copt.shaping_cfg, &cl->root.shaping_cfg,
++ sizeof(struct dpaa2_ceetm_shaping_cfg));
+
-+ /* Get the timestamp value */
-+ if (priv->ts_rx_en) {
-+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
-+ u64 *ns = (u64 *)dpaa2_eth_get_ts(vaddr);
++ break;
+
-+ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns);
-+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
-+ shhwtstamps->hwtstamp = ns_to_ktime(*ns);
-+ }
++ case CEETM_PRIO:
++ if (cl->child)
++ tcm->tcm_info = cl->child->handle;
+
-+ /* Check if we need to validate the L4 csum */
-+ if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
-+ status = le32_to_cpu(fas->status);
-+ validate_rx_csum(priv, status, skb);
-+ }
++ copt.mode = cl->prio.mode;
++ copt.weight = cl->prio.weight;
+
-+ skb->protocol = eth_type_trans(skb, priv->net_dev);
++ break;
++ }
+
-+ /* Record Rx queue - this will be used when picking a Tx queue to
-+ * forward the frames. We're keeping flow affinity through the
-+ * network stack.
-+ */
-+ skb_record_rx_queue(skb, queue_id);
++ nest = nla_nest_start(skb, TCA_OPTIONS);
++ if (!nest)
++ goto nla_put_failure;
++ if (nla_put(skb, DPAA2_CEETM_TCA_COPT, sizeof(copt), &copt))
++ goto nla_put_failure;
++ nla_nest_end(skb, nest);
++ sch_tree_unlock(sch);
++ return skb->len;
+
-+ percpu_stats->rx_packets++;
-+ percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
++nla_put_failure:
++ sch_tree_unlock(sch);
++ nla_nest_cancel(skb, nest);
++ return -EMSGSIZE;
++}
+
-+ napi_gro_receive(napi, skb);
++static int dpaa2_ceetm_cls_delete(struct Qdisc *sch, unsigned long arg)
++{
++ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
++ struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
+
-+ return;
++ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
++ __func__, cl->common.classid, sch->handle);
+
-+err_build_skb:
-+ free_rx_fd(priv, fd, vaddr);
-+err_frame_format:
-+ percpu_stats->rx_dropped++;
++ sch_tree_lock(sch);
++ qdisc_class_hash_remove(&priv->clhash, &cl->common);
++ cl->refcnt--;
++ WARN_ON(cl->refcnt == 0);
++ sch_tree_unlock(sch);
++ return 0;
+}
+
-+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
-+/* Processing of Rx frames received on the error FQ
-+ * We check and print the error bits and then free the frame
-+ */
-+static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
-+ struct dpaa2_eth_channel *ch,
-+ const struct dpaa2_fd *fd,
-+ struct napi_struct *napi __always_unused,
-+ u16 queue_id __always_unused)
++/* Get the class' child qdisc, if any */
++static struct Qdisc *dpaa2_ceetm_cls_leaf(struct Qdisc *sch, unsigned long arg)
+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
-+ void *vaddr;
-+ struct rtnl_link_stats64 *percpu_stats;
-+ struct dpaa2_fas *fas;
-+ u32 status = 0;
-+ bool check_fas_errors = false;
++ struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
+
-+ vaddr = dpaa2_eth_iova_to_virt(priv->iommu_domain, addr);
-+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
++ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
++ __func__, cl->common.classid, sch->handle);
+
-+ /* check frame errors in the FD field */
-+ if (fd->simple.ctrl & DPAA2_FD_RX_ERR_MASK) {
-+ check_fas_errors = !!(fd->simple.ctrl & FD_CTRL_FAERR) &&
-+ !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
-+ if (net_ratelimit())
-+ netdev_dbg(priv->net_dev, "Rx frame FD err: %x08\n",
-+ fd->simple.ctrl & DPAA2_FD_RX_ERR_MASK);
++ switch (cl->type) {
++ case CEETM_ROOT:
++ case CEETM_PRIO:
++ return cl->child;
+ }
+
-+ /* check frame errors in the FAS field */
-+ if (check_fas_errors) {
-+ fas = dpaa2_eth_get_fas(vaddr);
-+ status = le32_to_cpu(fas->status);
-+ if (net_ratelimit())
-+ netdev_dbg(priv->net_dev, "Rx frame FAS err: 0x%08x\n",
-+ status & DPAA2_FAS_RX_ERR_MASK);
++ return NULL;
++}
++
++static int dpaa2_ceetm_cls_graft(struct Qdisc *sch, unsigned long arg,
++ struct Qdisc *new, struct Qdisc **old)
++{
++ if (new && strcmp(new->ops->id, dpaa2_ceetm_qdisc_ops.id)) {
++ pr_err("CEETM: only ceetm qdiscs can be attached to ceetm classes\n");
++ return -EOPNOTSUPP;
+ }
-+ free_rx_fd(priv, fd, vaddr);
+
-+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
-+ percpu_stats->rx_errors++;
++ return 0;
+}
-+#endif
+
-+/* Consume all frames pull-dequeued into the store. This is the simplest way to
-+ * make sure we don't accidentally issue another volatile dequeue which would
-+ * overwrite (leak) frames already in the store.
-+ *
-+ * The number of frames is returned using the last 2 output arguments,
-+ * separately for Rx and Tx confirmations.
-+ *
-+ * Observance of NAPI budget is not our concern, leaving that to the caller.
-+ */
-+static bool consume_frames(struct dpaa2_eth_channel *ch, int *rx_cleaned,
-+ int *tx_conf_cleaned)
++static int dpaa2_ceetm_cls_dump_stats(struct Qdisc *sch, unsigned long arg,
++ struct gnet_dump *d)
+{
-+ struct dpaa2_eth_priv *priv = ch->priv;
-+ struct dpaa2_eth_fq *fq = NULL;
-+ struct dpaa2_dq *dq;
-+ const struct dpaa2_fd *fd;
-+ int cleaned = 0;
-+ int is_last;
-+
-+ do {
-+ dq = dpaa2_io_store_next(ch->store, &is_last);
-+ if (unlikely(!dq)) {
-+ /* If we're here, we *must* have placed a
-+ * volatile dequeue comnmand, so keep reading through
-+ * the store until we get some sort of valid response
-+ * token (either a valid frame or an "empty dequeue")
-+ */
-+ continue;
-+ }
++ struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
++ struct gnet_stats_basic_packed tmp_bstats;
++ struct dpaa2_ceetm_tc_xstats xstats;
++ union dpni_statistics dpni_stats;
++ struct net_device *dev = qdisc_dev(sch);
++ struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
++ u8 ch_id = 0;
++ int err;
+
-+ fd = dpaa2_dq_fd(dq);
++ memset(&xstats, 0, sizeof(xstats));
++ memset(&tmp_bstats, 0, sizeof(tmp_bstats));
+
-+ /* prefetch the frame descriptor */
-+ prefetch(fd);
++ if (cl->type == CEETM_ROOT)
++ return 0;
+
-+ fq = (struct dpaa2_eth_fq *)dpaa2_dq_fqd_ctx(dq);
-+ fq->consume(priv, ch, fd, &ch->napi, fq->flowid);
-+ cleaned++;
-+ } while (!is_last);
++ err = dpni_get_statistics(priv_eth->mc_io, 0, priv_eth->mc_token, 3,
++ DPNI_BUILD_CH_TC(ch_id, cl->prio.qpri),
++ &dpni_stats);
++ if (err)
++ netdev_warn(dev, "dpni_get_stats(%d) failed - %d\n", 3, err);
+
-+ if (!cleaned)
-+ return false;
++ xstats.ceetm_dequeue_bytes = dpni_stats.page_3.ceetm_dequeue_bytes;
++ xstats.ceetm_dequeue_frames = dpni_stats.page_3.ceetm_dequeue_frames;
++ xstats.ceetm_reject_bytes = dpni_stats.page_3.ceetm_reject_bytes;
++ xstats.ceetm_reject_frames = dpni_stats.page_3.ceetm_reject_frames;
+
-+ /* All frames brought in store by a volatile dequeue
-+ * come from the same queue
-+ */
-+ if (fq->type == DPAA2_TX_CONF_FQ)
-+ *tx_conf_cleaned += cleaned;
-+ else
-+ *rx_cleaned += cleaned;
++ return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
++}
+
-+ fq->stats.frames += cleaned;
-+ ch->stats.frames += cleaned;
++static struct tcf_proto __rcu **dpaa2_ceetm_tcf_chain(struct Qdisc *sch,
++ unsigned long arg)
++{
++ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
++ struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
+
-+ return true;
++ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
++ cl ? cl->common.classid : 0, sch->handle);
++ return cl ? &cl->filter_list : &priv->filter_list;
+}
+
-+/* Configure the egress frame annotation for timestamp update */
-+static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
++static unsigned long dpaa2_ceetm_tcf_bind(struct Qdisc *sch,
++ unsigned long parent,
++ u32 classid)
+{
-+ struct dpaa2_faead *faead;
-+ u32 ctrl;
-+ u32 frc;
++ struct dpaa2_ceetm_class *cl = dpaa2_ceetm_find(classid, sch);
+
-+ /* Mark the egress frame annotation area as valid */
-+ frc = dpaa2_fd_get_frc(fd);
-+ dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
-+
-+ /* enable UPD (update prepanded data) bit in FAEAD field of
-+ * hardware frame annotation area
-+ */
-+ ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
-+ faead = dpaa2_eth_get_faead(buf_start);
-+ faead->ctrl = cpu_to_le32(ctrl);
++ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
++ cl ? cl->common.classid : 0, sch->handle);
++ return (unsigned long)cl;
+}
+
-+/* Create a frame descriptor based on a fragmented skb */
-+static int build_sg_fd(struct dpaa2_eth_priv *priv,
-+ struct sk_buff *skb,
-+ struct dpaa2_fd *fd)
++static void dpaa2_ceetm_tcf_unbind(struct Qdisc *sch, unsigned long arg)
+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ void *sgt_buf = NULL;
-+ dma_addr_t addr;
-+ int nr_frags = skb_shinfo(skb)->nr_frags;
-+ struct dpaa2_sg_entry *sgt;
-+ int i, err;
-+ int sgt_buf_size;
-+ struct scatterlist *scl, *crt_scl;
-+ int num_sg;
-+ int num_dma_bufs;
-+ struct dpaa2_fas *fas;
-+ struct dpaa2_eth_swa *swa;
++ struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
+
-+ /* Create and map scatterlist.
-+ * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
-+ * to go beyond nr_frags+1.
-+ * Note: We don't support chained scatterlists
-+ */
-+ if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
-+ return -EINVAL;
++ pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
++ cl ? cl->common.classid : 0, sch->handle);
++}
+
-+ scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
-+ if (unlikely(!scl))
-+ return -ENOMEM;
++const struct Qdisc_class_ops dpaa2_ceetm_cls_ops = {
++ .graft = dpaa2_ceetm_cls_graft,
++ .leaf = dpaa2_ceetm_cls_leaf,
++ .get = dpaa2_ceetm_cls_get,
++ .put = dpaa2_ceetm_cls_put,
++ .change = dpaa2_ceetm_cls_change,
++ .delete = dpaa2_ceetm_cls_delete,
++ .walk = dpaa2_ceetm_cls_walk,
++ .tcf_chain = dpaa2_ceetm_tcf_chain,
++ .bind_tcf = dpaa2_ceetm_tcf_bind,
++ .unbind_tcf = dpaa2_ceetm_tcf_unbind,
++ .dump = dpaa2_ceetm_cls_dump,
++ .dump_stats = dpaa2_ceetm_cls_dump_stats,
++};
+
-+ sg_init_table(scl, nr_frags + 1);
-+ num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
-+ num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_TO_DEVICE);
-+ if (unlikely(!num_dma_bufs)) {
-+ err = -ENOMEM;
-+ goto dma_map_sg_failed;
-+ }
++struct Qdisc_ops dpaa2_ceetm_qdisc_ops __read_mostly = {
++ .id = "ceetm",
++ .priv_size = sizeof(struct dpaa2_ceetm_qdisc),
++ .cl_ops = &dpaa2_ceetm_cls_ops,
++ .init = dpaa2_ceetm_init,
++ .destroy = dpaa2_ceetm_destroy,
++ .change = dpaa2_ceetm_change,
++ .dump = dpaa2_ceetm_dump,
++ .attach = dpaa2_ceetm_attach,
++ .owner = THIS_MODULE,
++};
+
-+ /* Prepare the HW SGT structure */
-+ sgt_buf_size = priv->tx_data_offset +
-+ sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
-+ sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC);
-+ if (unlikely(!sgt_buf)) {
-+ err = -ENOMEM;
-+ goto sgt_buf_alloc_failed;
++/* Run the filters and classifiers attached to the qdisc on the provided skb */
++int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch,
++ int *qdid, int *qpri)
++{
++ struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
++ struct dpaa2_ceetm_class *cl = NULL;
++ struct tcf_result res;
++ struct tcf_proto *tcf;
++ int result;
++
++ tcf = rcu_dereference_bh(priv->filter_list);
++ while (tcf && (result = tc_classify(skb, tcf, &res, false)) >= 0) {
++#ifdef CONFIG_NET_CLS_ACT
++ switch (result) {
++ case TC_ACT_QUEUED:
++ case TC_ACT_STOLEN:
++ case TC_ACT_SHOT:
++ /* No valid class found due to action */
++ return -1;
++ }
++#endif
++ cl = (void *)res.class;
++ if (!cl) {
++ /* The filter leads to the qdisc */
++ if (res.classid == sch->handle)
++ return 0;
++
++ cl = dpaa2_ceetm_find(res.classid, sch);
++ /* The filter leads to an invalid class */
++ if (!cl)
++ break;
++ }
++
++ /* The class might have its own filters attached */
++ tcf = rcu_dereference_bh(cl->filter_list);
+ }
-+ sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
+
-+ /* PTA from egress side is passed as is to the confirmation side so
-+ * we need to clear some fields here in order to find consistent values
-+ * on TX confirmation. We are clearing FAS (Frame Annotation Status)
-+ * field from the hardware annotation area
-+ */
-+ fas = dpaa2_eth_get_fas(sgt_buf);
-+ memset(fas, 0, DPAA2_FAS_SIZE);
++ /* No valid class found */
++ if (!cl)
++ return 0;
+
-+ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
++ switch (cl->type) {
++ case CEETM_ROOT:
++ *qdid = cl->root.ch_id;
+
-+ /* Fill in the HW SGT structure.
-+ *
-+ * sgt_buf is zeroed out, so the following fields are implicit
-+ * in all sgt entries:
-+ * - offset is 0
-+ * - format is 'dpaa2_sg_single'
-+ */
-+ for_each_sg(scl, crt_scl, num_dma_bufs, i) {
-+ dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
-+ dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
-+ }
-+ dpaa2_sg_set_final(&sgt[i - 1], true);
++ /* The root class does not have a child prio qdisc */
++ if (!cl->child)
++ return 0;
+
-+ /* Store the skb backpointer in the SGT buffer.
-+ * Fit the scatterlist and the number of buffers alongside the
-+ * skb backpointer in the software annotation area. We'll need
-+ * all of them on Tx Conf.
-+ */
-+ swa = (struct dpaa2_eth_swa *)sgt_buf;
-+ swa->skb = skb;
-+ swa->scl = scl;
-+ swa->num_sg = num_sg;
-+ swa->num_dma_bufs = num_dma_bufs;
++ /* Run the prio qdisc classifiers */
++ return dpaa2_ceetm_classify(skb, cl->child, qdid, qpri);
+
-+ /* Separately map the SGT buffer */
-+ addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
-+ if (unlikely(dma_mapping_error(dev, addr))) {
-+ err = -ENOMEM;
-+ goto dma_map_single_failed;
++ case CEETM_PRIO:
++ *qpri = cl->prio.qpri;
++ break;
+ }
-+ dpaa2_fd_set_offset(fd, priv->tx_data_offset);
-+ dpaa2_fd_set_format(fd, dpaa2_fd_sg);
-+ dpaa2_fd_set_addr(fd, addr);
-+ dpaa2_fd_set_len(fd, skb->len);
+
-+ fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | FD_CTRL_PTA;
++ return 0;
++}
++
++int __init dpaa2_ceetm_register(void)
++{
++ int err = 0;
+
-+ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
-+ enable_tx_tstamp(fd, sgt_buf);
++ pr_debug(KBUILD_MODNAME ": " DPAA2_CEETM_DESCRIPTION "\n");
+
-+ return 0;
++ err = register_qdisc(&dpaa2_ceetm_qdisc_ops);
++ if (unlikely(err))
++ pr_err(KBUILD_MODNAME
++ ": %s:%hu:%s(): register_qdisc() = %d\n",
++ KBUILD_BASENAME ".c", __LINE__, __func__, err);
+
-+dma_map_single_failed:
-+ kfree(sgt_buf);
-+sgt_buf_alloc_failed:
-+ dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE);
-+dma_map_sg_failed:
-+ kfree(scl);
+ return err;
+}
+
-+/* Create a frame descriptor based on a linear skb */
-+static int build_single_fd(struct dpaa2_eth_priv *priv,
-+ struct sk_buff *skb,
-+ struct dpaa2_fd *fd)
++void __exit dpaa2_ceetm_unregister(void)
+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ u8 *buffer_start;
-+ struct sk_buff **skbh;
-+ dma_addr_t addr;
-+ struct dpaa2_fas *fas;
++ pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
++ KBUILD_BASENAME ".c", __func__);
+
-+ buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset -
-+ DPAA2_ETH_TX_BUF_ALIGN,
-+ DPAA2_ETH_TX_BUF_ALIGN);
++ unregister_qdisc(&dpaa2_ceetm_qdisc_ops);
++}
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h
+@@ -0,0 +1,182 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
++/*
++ * Copyright 2017 NXP
++ *
++ */
+
-+ /* PTA from egress side is passed as is to the confirmation side so
-+ * we need to clear some fields here in order to find consistent values
-+ * on TX confirmation. We are clearing FAS (Frame Annotation Status)
-+ * field from the hardware annotation area
-+ */
-+ fas = dpaa2_eth_get_fas(buffer_start);
-+ memset(fas, 0, DPAA2_FAS_SIZE);
++#ifndef __DPAA2_ETH_CEETM_H
++#define __DPAA2_ETH_CEETM_H
+
-+ /* Store a backpointer to the skb at the beginning of the buffer
-+ * (in the private data area) such that we can release it
-+ * on Tx confirm
-+ */
-+ skbh = (struct sk_buff **)buffer_start;
-+ *skbh = skb;
++#include <net/pkt_sched.h>
++#include <net/pkt_cls.h>
++#include <net/netlink.h>
+
-+ addr = dma_map_single(dev, buffer_start,
-+ skb_tail_pointer(skb) - buffer_start,
-+ DMA_BIDIRECTIONAL);
-+ if (unlikely(dma_mapping_error(dev, addr)))
-+ return -ENOMEM;
++#include "dpaa2-eth.h"
+
-+ dpaa2_fd_set_addr(fd, addr);
-+ dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
-+ dpaa2_fd_set_len(fd, skb->len);
-+ dpaa2_fd_set_format(fd, dpaa2_fd_single);
++/* For functional purposes, there are num_tx_queues pfifo qdiscs through which
++ * frames reach the driver. Their handles start from 1:21. Handles 1:1 to 1:20
++ * are reserved for the maximum 32 CEETM channels (majors and minors are in
++ * hex).
++ */
++#define PFIFO_MIN_OFFSET 0x21
+
-+ fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | FD_CTRL_PTA;
++#define DPAA2_CEETM_MIN_WEIGHT 100
++#define DPAA2_CEETM_MAX_WEIGHT 24800
+
-+ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
-+ enable_tx_tstamp(fd, buffer_start);
++#define DPAA2_CEETM_TD_THRESHOLD 1000
+
-+ return 0;
-+}
++enum wbfs_group_type {
++ WBFS_GRP_A,
++ WBFS_GRP_B,
++ WBFS_GRP_LARGE
++};
+
-+/* FD freeing routine on the Tx path
-+ *
-+ * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
-+ * back-pointed to is also freed.
-+ * This can be called either from dpaa2_eth_tx_conf() or on the error path of
-+ * dpaa2_eth_tx().
-+ * Optionally, return the frame annotation status word (FAS), which needs
-+ * to be checked if we're on the confirmation path.
-+ */
-+static void free_tx_fd(const struct dpaa2_eth_priv *priv,
-+ const struct dpaa2_fd *fd,
-+ u32 *status, bool in_napi)
-+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ dma_addr_t fd_addr;
-+ struct sk_buff **skbh, *skb;
-+ unsigned char *buffer_start;
-+ int unmap_size;
-+ struct scatterlist *scl;
-+ int num_sg, num_dma_bufs;
-+ struct dpaa2_eth_swa *swa;
-+ u8 fd_format = dpaa2_fd_get_format(fd);
-+ struct dpaa2_fas *fas;
++enum {
++ DPAA2_CEETM_TCA_UNSPEC,
++ DPAA2_CEETM_TCA_COPT,
++ DPAA2_CEETM_TCA_QOPS,
++ DPAA2_CEETM_TCA_MAX,
++};
+
-+ fd_addr = dpaa2_fd_get_addr(fd);
-+ skbh = dpaa2_eth_iova_to_virt(priv->iommu_domain, fd_addr);
++/* CEETM configuration types */
++enum dpaa2_ceetm_type {
++ CEETM_ROOT = 1,
++ CEETM_PRIO,
++};
+
-+ /* HWA - FAS, timestamp (for Tx confirmation frames) */
-+ fas = dpaa2_eth_get_fas(skbh);
-+ prefetch(fas);
++enum {
++ STRICT_PRIORITY = 0,
++ WEIGHTED_A,
++ WEIGHTED_B,
++};
+
-+ switch (fd_format) {
-+ case dpaa2_fd_single:
-+ skb = *skbh;
-+ buffer_start = (unsigned char *)skbh;
-+ /* Accessing the skb buffer is safe before dma unmap, because
-+ * we didn't map the actual skb shell.
-+ */
-+ dma_unmap_single(dev, fd_addr,
-+ skb_tail_pointer(skb) - buffer_start,
-+ DMA_BIDIRECTIONAL);
-+ break;
-+ case dpaa2_fd_sg:
-+ swa = (struct dpaa2_eth_swa *)skbh;
-+ skb = swa->skb;
-+ scl = swa->scl;
-+ num_sg = swa->num_sg;
-+ num_dma_bufs = swa->num_dma_bufs;
++struct dpaa2_ceetm_shaping_cfg {
++ __u64 cir; /* committed information rate */
++ __u64 eir; /* excess information rate */
++ __u16 cbs; /* committed burst size */
++ __u16 ebs; /* excess burst size */
++ __u8 coupled; /* shaper coupling */
++};
+
-+ /* Unmap the scatterlist */
-+ dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE);
-+ kfree(scl);
++extern const struct nla_policy ceetm_policy[DPAA2_CEETM_TCA_MAX];
+
-+ /* Unmap the SGT buffer */
-+ unmap_size = priv->tx_data_offset +
-+ sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
-+ dma_unmap_single(dev, fd_addr, unmap_size, DMA_BIDIRECTIONAL);
-+ break;
-+ default:
-+ /* Unsupported format, mark it as errored and give up */
-+ if (status)
-+ *status = ~0;
-+ return;
-+ }
++struct dpaa2_ceetm_class;
++struct dpaa2_ceetm_qdisc_stats;
++struct dpaa2_ceetm_class_stats;
+
-+ /* Get the timestamp value */
-+ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
-+ struct skb_shared_hwtstamps shhwtstamps;
-+ u64 *ns;
++/* corresponds to CEETM shaping at LNI level */
++struct dpaa2_root_q {
++ struct Qdisc **qdiscs;
++ struct dpaa2_ceetm_qdisc_stats __percpu *qstats;
++};
+
-+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
++/* corresponds to the number of priorities a channel serves */
++struct dpaa2_prio_q {
++ struct dpaa2_ceetm_class *parent;
++ struct dpni_tx_priorities_cfg tx_prio_cfg;
++};
+
-+ ns = (u64 *)dpaa2_eth_get_ts(skbh);
-+ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns);
-+ shhwtstamps.hwtstamp = ns_to_ktime(*ns);
-+ skb_tstamp_tx(skb, &shhwtstamps);
-+ }
++struct dpaa2_ceetm_qdisc {
++ struct Qdisc_class_hash clhash;
++ struct tcf_proto *filter_list; /* qdisc attached filters */
+
-+ /* Read the status from the Frame Annotation after we unmap the first
-+ * buffer but before we free it. The caller function is responsible
-+ * for checking the status value.
-+ */
-+ if (status)
-+ *status = le32_to_cpu(fas->status);
++ enum dpaa2_ceetm_type type; /* ROOT/PRIO */
++ bool shaped;
++ union {
++ struct dpaa2_root_q root;
++ struct dpaa2_prio_q prio;
++ };
++};
+
-+ /* Free SGT buffer kmalloc'ed on tx */
-+ if (fd_format != dpaa2_fd_single)
-+ kfree(skbh);
++/* CEETM Qdisc configuration parameters */
++struct dpaa2_ceetm_tc_qopt {
++ enum dpaa2_ceetm_type type;
++ __u16 shaped;
++ __u8 prio_group_A;
++ __u8 prio_group_B;
++ __u8 separate_groups;
++};
+
-+ /* Move on with skb release */
-+ napi_consume_skb(skb, in_napi);
-+}
++/* root class - corresponds to a channel */
++struct dpaa2_root_c {
++ struct dpaa2_ceetm_shaping_cfg shaping_cfg;
++ u32 ch_id;
++};
+
-+static int dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ struct device *dev = net_dev->dev.parent;
-+ struct dpaa2_fd fd;
-+ struct rtnl_link_stats64 *percpu_stats;
-+ struct dpaa2_eth_drv_stats *percpu_extras;
-+ struct dpaa2_eth_fq *fq;
-+ u16 queue_mapping = skb_get_queue_mapping(skb);
-+ int err, i;
++/* prio class - corresponds to a strict priority queue (group) */
++struct dpaa2_prio_c {
++ struct dpaa2_ceetm_class_stats __percpu *cstats;
++ u32 qpri;
++ u8 mode;
++ u16 weight;
++};
+
-+ /* If we're congested, stop this tx queue; transmission of the
-+ * current skb happens regardless of congestion state
-+ */
-+ fq = &priv->fq[queue_mapping];
++struct dpaa2_ceetm_class {
++ struct Qdisc_class_common common;
++ int refcnt;
++ struct tcf_proto *filter_list; /* class attached filters */
++ struct Qdisc *parent;
++ struct Qdisc *child;
+
-+ dma_sync_single_for_cpu(dev, priv->cscn_dma,
-+ DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
-+ if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem))) {
-+ netif_stop_subqueue(net_dev, queue_mapping);
-+ fq->stats.congestion_entry++;
-+ }
++ enum dpaa2_ceetm_type type; /* ROOT/PRIO */
++ bool shaped;
++ union {
++ struct dpaa2_root_c root;
++ struct dpaa2_prio_c prio;
++ };
++};
+
-+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
-+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
++/* CEETM Class configuration parameters */
++struct dpaa2_ceetm_tc_copt {
++ enum dpaa2_ceetm_type type;
++ struct dpaa2_ceetm_shaping_cfg shaping_cfg;
++ __u16 shaped;
++ __u8 mode;
++ __u16 weight;
++};
+
-+ if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) {
-+ struct sk_buff *ns;
-+
-+ ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv));
-+ if (unlikely(!ns)) {
-+ percpu_stats->tx_dropped++;
-+ goto err_alloc_headroom;
-+ }
-+ dev_kfree_skb(skb);
-+ skb = ns;
-+ }
-+
-+ /* We'll be holding a back-reference to the skb until Tx Confirmation;
-+ * we don't want that overwritten by a concurrent Tx with a cloned skb.
-+ */
-+ skb = skb_unshare(skb, GFP_ATOMIC);
-+ if (unlikely(!skb)) {
-+ /* skb_unshare() has already freed the skb */
-+ percpu_stats->tx_dropped++;
-+ return NETDEV_TX_OK;
-+ }
-+
-+ /* Setup the FD fields */
-+ memset(&fd, 0, sizeof(fd));
-+
-+ if (skb_is_nonlinear(skb)) {
-+ err = build_sg_fd(priv, skb, &fd);
-+ percpu_extras->tx_sg_frames++;
-+ percpu_extras->tx_sg_bytes += skb->len;
-+ } else {
-+ err = build_single_fd(priv, skb, &fd);
-+ }
-+
-+ if (unlikely(err)) {
-+ percpu_stats->tx_dropped++;
-+ goto err_build_fd;
-+ }
++/* CEETM stats */
++struct dpaa2_ceetm_qdisc_stats {
++ __u32 drops;
++};
+
-+ /* Tracing point */
-+ trace_dpaa2_tx_fd(net_dev, &fd);
++struct dpaa2_ceetm_class_stats {
++ /* Software counters */
++ struct gnet_stats_basic_packed bstats;
++ __u32 ern_drop_count;
++ __u32 congested_count;
++};
+
-+ for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
-+ err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0,
-+ fq->tx_qdbin, &fd);
-+ /* TODO: This doesn't work. Check on simulator.
-+ * err = dpaa2_io_service_enqueue_fq(NULL,
-+ * priv->fq[0].fqid_tx, &fd);
-+ */
-+ if (err != -EBUSY)
-+ break;
-+ }
-+ percpu_extras->tx_portal_busy += i;
-+ if (unlikely(err < 0)) {
-+ percpu_stats->tx_errors++;
-+ /* Clean up everything, including freeing the skb */
-+ free_tx_fd(priv, &fd, NULL, false);
-+ } else {
-+ percpu_stats->tx_packets++;
-+ percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
-+ }
++struct dpaa2_ceetm_tc_xstats {
++ __u64 ceetm_dequeue_bytes;
++ __u64 ceetm_dequeue_frames;
++ __u64 ceetm_reject_bytes;
++ __u64 ceetm_reject_frames;
++};
+
-+ return NETDEV_TX_OK;
++#ifdef CONFIG_FSL_DPAA2_ETH_CEETM
++int __init dpaa2_ceetm_register(void);
++void __exit dpaa2_ceetm_unregister(void);
++int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch,
++ int *qdid, int *qpri);
++#else
++static inline int dpaa2_ceetm_register(void)
++{
++ return 0;
++}
+
-+err_build_fd:
-+err_alloc_headroom:
-+ dev_kfree_skb(skb);
++static inline void dpaa2_ceetm_unregister(void) {}
+
-+ return NETDEV_TX_OK;
++static inline int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch,
++ int *qdid, int *qpri)
++{
++ return 0;
+}
++#endif
+
-+/* Tx confirmation frame processing routine */
-+static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
-+ struct dpaa2_eth_channel *ch,
-+ const struct dpaa2_fd *fd,
-+ struct napi_struct *napi __always_unused,
-+ u16 queue_id)
++static inline bool dpaa2_eth_ceetm_is_enabled(struct dpaa2_eth_priv *priv)
+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ struct rtnl_link_stats64 *percpu_stats;
-+ struct dpaa2_eth_drv_stats *percpu_extras;
-+ u32 status = 0;
-+ bool errors = !!(fd->simple.ctrl & DPAA2_FD_TX_ERR_MASK);
-+ bool check_fas_errors = false;
-+
-+ /* Tracing point */
-+ trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
-+
-+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
-+ percpu_extras->tx_conf_frames++;
-+ percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
-+
-+ /* Check congestion state and wake all queues if necessary */
-+ if (unlikely(__netif_subqueue_stopped(priv->net_dev, queue_id))) {
-+ dma_sync_single_for_cpu(dev, priv->cscn_dma,
-+ DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
-+ if (!dpaa2_cscn_state_congested(priv->cscn_mem))
-+ netif_tx_wake_all_queues(priv->net_dev);
-+ }
++ return priv->ceetm_en;
++}
+
-+ /* check frame errors in the FD field */
-+ if (unlikely(errors)) {
-+ check_fas_errors = !!(fd->simple.ctrl & FD_CTRL_FAERR) &&
-+ !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
-+ if (net_ratelimit())
-+ netdev_dbg(priv->net_dev, "Tx frame FD err: %x08\n",
-+ fd->simple.ctrl & DPAA2_FD_TX_ERR_MASK);
-+ }
++#endif
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
+@@ -0,0 +1,357 @@
+
-+ free_tx_fd(priv, fd, check_fas_errors ? &status : NULL, true);
++/* Copyright 2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
+
-+ /* if there are no errors, we're done */
-+ if (likely(!errors))
-+ return;
++#include <linux/module.h>
++#include <linux/debugfs.h>
++#include "dpaa2-eth.h"
++#include "dpaa2-eth-debugfs.h"
+
-+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
-+ /* Tx-conf logically pertains to the egress path. */
-+ percpu_stats->tx_errors++;
++#define DPAA2_ETH_DBG_ROOT "dpaa2-eth"
+
-+ if (net_ratelimit())
-+ netdev_dbg(priv->net_dev, "Tx frame FAS err: %x08\n",
-+ status & DPAA2_FAS_TX_ERR_MASK);
-+}
++static struct dentry *dpaa2_dbg_root;
+
-+static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
++static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset)
+{
-+ int err;
++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
++ struct rtnl_link_stats64 *stats;
++ struct dpaa2_eth_drv_stats *extras;
++ int i;
+
-+ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
-+ DPNI_OFF_RX_L3_CSUM, enable);
-+ if (err) {
-+ netdev_err(priv->net_dev,
-+ "dpni_set_offload() DPNI_OFF_RX_L3_CSUM failed\n");
-+ return err;
-+ }
++ seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name);
++ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s%16s\n",
++ "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf",
++ "Tx SG", "Tx realloc", "Enq busy");
+
-+ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
-+ DPNI_OFF_RX_L4_CSUM, enable);
-+ if (err) {
-+ netdev_err(priv->net_dev,
-+ "dpni_set_offload() DPNI_OFF_RX_L4_CSUM failed\n");
-+ return err;
++ for_each_online_cpu(i) {
++ stats = per_cpu_ptr(priv->percpu_stats, i);
++ extras = per_cpu_ptr(priv->percpu_extras, i);
++ seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n",
++ i,
++ stats->rx_packets,
++ stats->rx_errors,
++ extras->rx_sg_frames,
++ stats->tx_packets,
++ stats->tx_errors,
++ extras->tx_conf_frames,
++ extras->tx_sg_frames,
++ extras->tx_reallocs,
++ extras->tx_portal_busy);
+ }
+
+ return 0;
+}
+
-+static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
++static int dpaa2_dbg_cpu_open(struct inode *inode, struct file *file)
+{
+ int err;
++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
+
-+ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
-+ DPNI_OFF_TX_L3_CSUM, enable);
-+ if (err) {
-+ netdev_err(priv->net_dev,
-+ "dpni_set_offload() DPNI_OFF_RX_L3_CSUM failed\n");
-+ return err;
-+ }
++ err = single_open(file, dpaa2_dbg_cpu_show, priv);
++ if (err < 0)
++ netdev_err(priv->net_dev, "single_open() failed\n");
+
-+ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
-+ DPNI_OFF_TX_L4_CSUM, enable);
-+ if (err) {
-+ netdev_err(priv->net_dev,
-+ "dpni_set_offload() DPNI_OFF_RX_L4_CSUM failed\n");
-+ return err;
-+ }
++ return err;
++}
+
-+ return 0;
++static const struct file_operations dpaa2_dbg_cpu_ops = {
++ .open = dpaa2_dbg_cpu_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static char *fq_type_to_str(struct dpaa2_eth_fq *fq)
++{
++ switch (fq->type) {
++ case DPAA2_RX_FQ:
++ return "Rx";
++ case DPAA2_TX_CONF_FQ:
++ return "Tx conf";
++ case DPAA2_RX_ERR_FQ:
++ return "Rx err";
++ default:
++ return "N/A";
++ }
+}
+
-+/* Perform a single release command to add buffers
-+ * to the specified buffer pool
-+ */
-+static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid)
++static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
-+ void *buf;
-+ dma_addr_t addr;
++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
++ struct dpaa2_eth_fq *fq;
++ u32 fcnt, bcnt;
+ int i, err;
+
-+ for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
-+ /* Allocate buffer visible to WRIOP + skb shared info +
-+ * alignment padding.
-+ */
-+ buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE(priv));
-+ if (unlikely(!buf))
-+ goto err_alloc;
-+
-+ buf = PTR_ALIGN(buf, priv->rx_buf_align);
++ seq_printf(file, "non-zero FQ stats for %s:\n", priv->net_dev->name);
++ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n",
++ "VFQID", "CPU", "Traffic Class", "Type", "Frames",
++ "Pending frames", "Congestion");
+
-+ addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
-+ DMA_FROM_DEVICE);
-+ if (unlikely(dma_mapping_error(dev, addr)))
-+ goto err_map;
++ for (i = 0; i < priv->num_fqs; i++) {
++ fq = &priv->fq[i];
++ err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
++ if (err)
++ fcnt = 0;
+
-+ buf_array[i] = addr;
++ /* A lot of queues, no use displaying zero traffic ones */
++ if (!fq->stats.frames && !fcnt)
++ continue;
+
-+ /* tracing point */
-+ trace_dpaa2_eth_buf_seed(priv->net_dev,
-+ buf, DPAA2_ETH_BUF_RAW_SIZE(priv),
-+ addr, DPAA2_ETH_RX_BUF_SIZE,
-+ bpid);
++ seq_printf(file, "%5d%16d%16d%16s%16llu%16u%16llu\n",
++ fq->fqid,
++ fq->target_cpu,
++ fq->tc,
++ fq_type_to_str(fq),
++ fq->stats.frames,
++ fcnt,
++ fq->stats.congestion_entry);
+ }
+
-+release_bufs:
-+ /* In case the portal is busy, retry until successful */
-+ while ((err = dpaa2_io_service_release(NULL, bpid,
-+ buf_array, i)) == -EBUSY)
-+ cpu_relax();
-+
-+ /* If release command failed, clean up and bail out; not much
-+ * else we can do about it
-+ */
-+ if (unlikely(err)) {
-+ free_bufs(priv, buf_array, i);
-+ return 0;
-+ }
-+
-+ return i;
-+
-+err_map:
-+ put_page(virt_to_head_page(buf));
-+err_alloc:
-+ /* If we managed to allocate at least some buffers, release them */
-+ if (i)
-+ goto release_bufs;
-+
+ return 0;
+}
+
-+static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
++static int dpaa2_dbg_fqs_open(struct inode *inode, struct file *file)
+{
-+ int i, j;
-+ int new_count;
-+
-+ /* This is the lazy seeding of Rx buffer pools.
-+ * dpaa2_add_bufs() is also used on the Rx hotpath and calls
-+ * napi_alloc_frag(). The trouble with that is that it in turn ends up
-+ * calling this_cpu_ptr(), which mandates execution in atomic context.
-+ * Rather than splitting up the code, do a one-off preempt disable.
-+ */
-+ preempt_disable();
-+ for (j = 0; j < priv->num_channels; j++) {
-+ priv->channel[j]->buf_count = 0;
-+ for (i = 0; i < priv->num_bufs;
-+ i += DPAA2_ETH_BUFS_PER_CMD) {
-+ new_count = add_bufs(priv, bpid);
-+ priv->channel[j]->buf_count += new_count;
-+
-+ if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
-+ preempt_enable();
-+ return -ENOMEM;
-+ }
-+ }
-+ }
-+ preempt_enable();
-+
-+ return 0;
-+}
++ int err;
++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
+
-+/**
-+ * Drain the specified number of buffers from the DPNI's private buffer pool.
-+ * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
-+ */
-+static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
-+{
-+ u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
-+ int ret;
++ err = single_open(file, dpaa2_dbg_fqs_show, priv);
++ if (err < 0)
++ netdev_err(priv->net_dev, "single_open() failed\n");
+
-+ do {
-+ ret = dpaa2_io_service_acquire(NULL, priv->bpid,
-+ buf_array, count);
-+ if (ret < 0) {
-+ netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
-+ return;
-+ }
-+ free_bufs(priv, buf_array, ret);
-+ } while (ret);
++ return err;
+}
+
-+static void drain_pool(struct dpaa2_eth_priv *priv)
-+{
-+ preempt_disable();
-+ drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
-+ drain_bufs(priv, 1);
-+ preempt_enable();
-+}
++static const struct file_operations dpaa2_dbg_fq_ops = {
++ .open = dpaa2_dbg_fqs_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
+
-+/* Function is called from softirq context only, so we don't need to guard
-+ * the access to percpu count
-+ */
-+static int refill_pool(struct dpaa2_eth_priv *priv,
-+ struct dpaa2_eth_channel *ch,
-+ u16 bpid)
++static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
+{
-+ int new_count;
-+
-+ if (likely(ch->buf_count >= priv->refill_thresh))
-+ return 0;
++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
++ struct dpaa2_eth_channel *ch;
++ int i;
+
-+ do {
-+ new_count = add_bufs(priv, bpid);
-+ if (unlikely(!new_count)) {
-+ /* Out of memory; abort for now, we'll try later on */
-+ break;
-+ }
-+ ch->buf_count += new_count;
-+ } while (ch->buf_count < priv->num_bufs);
++ seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name);
++ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n",
++ "CHID", "CPU", "Deq busy", "Frames", "CDANs",
++ "Avg frm/CDAN", "Buf count");
+
-+ if (unlikely(ch->buf_count < priv->num_bufs))
-+ return -ENOMEM;
++ for (i = 0; i < priv->num_channels; i++) {
++ ch = priv->channel[i];
++ seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu%16d\n",
++ ch->ch_id,
++ ch->nctx.desired_cpu,
++ ch->stats.dequeue_portal_busy,
++ ch->stats.frames,
++ ch->stats.cdan,
++ ch->stats.frames / ch->stats.cdan,
++ ch->buf_count);
++ }
+
+ return 0;
+}
+
-+static int pull_channel(struct dpaa2_eth_channel *ch)
++static int dpaa2_dbg_ch_open(struct inode *inode, struct file *file)
+{
+ int err;
-+ int dequeues = -1;
-+
-+ /* Retry while portal is busy */
-+ do {
-+ err = dpaa2_io_service_pull_channel(NULL, ch->ch_id, ch->store);
-+ dequeues++;
-+ cpu_relax();
-+ } while (err == -EBUSY);
++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
+
-+ ch->stats.dequeue_portal_busy += dequeues;
-+ if (unlikely(err))
-+ ch->stats.pull_err++;
++ err = single_open(file, dpaa2_dbg_ch_show, priv);
++ if (err < 0)
++ netdev_err(priv->net_dev, "single_open() failed\n");
+
+ return err;
+}
+
-+/* NAPI poll routine
-+ *
-+ * Frames are dequeued from the QMan channel associated with this NAPI context.
-+ * Rx and (if configured) Rx error frames count towards the NAPI budget. Tx
-+ * confirmation frames are limited by a threshold per NAPI poll cycle.
-+ */
-+static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
-+{
-+ struct dpaa2_eth_channel *ch;
-+ int rx_cleaned = 0, tx_conf_cleaned = 0;
-+ bool store_cleaned;
-+ struct dpaa2_eth_priv *priv;
-+ int err;
-+
-+ ch = container_of(napi, struct dpaa2_eth_channel, napi);
-+ priv = ch->priv;
-+
-+ do {
-+ err = pull_channel(ch);
-+ if (unlikely(err))
-+ break;
-+
-+ /* Refill pool if appropriate */
-+ refill_pool(priv, ch, priv->bpid);
-+
-+ store_cleaned = consume_frames(ch, &rx_cleaned,
-+ &tx_conf_cleaned);
-+
-+ /* If we've either consumed the budget with Rx frames,
-+ * or reached the Tx conf threshold, we're done.
-+ */
-+ if (rx_cleaned >= budget ||
-+ tx_conf_cleaned >= TX_CONF_PER_NAPI_POLL)
-+ return budget;
-+ } while (store_cleaned);
-+
-+ /* We didn't consume the entire budget, finish napi and
-+ * re-enable data availability notifications.
-+ */
-+ napi_complete(napi);
-+ do {
-+ err = dpaa2_io_service_rearm(NULL, &ch->nctx);
-+ cpu_relax();
-+ } while (err == -EBUSY);
-+
-+ return max(rx_cleaned, 1);
-+}
++static const struct file_operations dpaa2_dbg_ch_ops = {
++ .open = dpaa2_dbg_ch_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
+
-+static void enable_ch_napi(struct dpaa2_eth_priv *priv)
++static ssize_t dpaa2_dbg_reset_write(struct file *file, const char __user *buf,
++ size_t count, loff_t *offset)
+{
++ struct dpaa2_eth_priv *priv = file->private_data;
++ struct rtnl_link_stats64 *percpu_stats;
++ struct dpaa2_eth_drv_stats *percpu_extras;
++ struct dpaa2_eth_fq *fq;
+ struct dpaa2_eth_channel *ch;
+ int i;
+
-+ for (i = 0; i < priv->num_channels; i++) {
-+ ch = priv->channel[i];
-+ napi_enable(&ch->napi);
++ for_each_online_cpu(i) {
++ percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
++ memset(percpu_stats, 0, sizeof(*percpu_stats));
++
++ percpu_extras = per_cpu_ptr(priv->percpu_extras, i);
++ memset(percpu_extras, 0, sizeof(*percpu_extras));
+ }
-+}
+
-+static void disable_ch_napi(struct dpaa2_eth_priv *priv)
-+{
-+ struct dpaa2_eth_channel *ch;
-+ int i;
++ for (i = 0; i < priv->num_fqs; i++) {
++ fq = &priv->fq[i];
++ memset(&fq->stats, 0, sizeof(fq->stats));
++ }
+
+ for (i = 0; i < priv->num_channels; i++) {
+ ch = priv->channel[i];
-+ napi_disable(&ch->napi);
++ memset(&ch->stats, 0, sizeof(ch->stats));
+ }
++
++ return count;
+}
+
-+static int link_state_update(struct dpaa2_eth_priv *priv)
++static const struct file_operations dpaa2_dbg_reset_ops = {
++ .open = simple_open,
++ .write = dpaa2_dbg_reset_write,
++};
++
++static ssize_t dpaa2_dbg_reset_mc_write(struct file *file,
++ const char __user *buf,
++ size_t count, loff_t *offset)
+{
-+ struct dpni_link_state state;
++ struct dpaa2_eth_priv *priv = file->private_data;
+ int err;
+
-+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
-+ if (unlikely(err)) {
++ err = dpni_reset_statistics(priv->mc_io, 0, priv->mc_token);
++ if (err)
+ netdev_err(priv->net_dev,
-+ "dpni_get_link_state() failed\n");
-+ return err;
-+ }
-+
-+ /* Chech link state; speed / duplex changes are not treated yet */
-+ if (priv->link_state.up == state.up)
-+ return 0;
-+
-+ priv->link_state = state;
-+ if (state.up) {
-+ netif_carrier_on(priv->net_dev);
-+ netif_tx_start_all_queues(priv->net_dev);
-+ } else {
-+ netif_tx_stop_all_queues(priv->net_dev);
-+ netif_carrier_off(priv->net_dev);
-+ }
-+
-+ netdev_info(priv->net_dev, "Link Event: state %s",
-+ state.up ? "up" : "down");
++ "dpni_reset_statistics() failed %d\n", err);
+
-+ return 0;
++ return count;
+}
+
-+static int dpaa2_eth_open(struct net_device *net_dev)
++static const struct file_operations dpaa2_dbg_reset_mc_ops = {
++ .open = simple_open,
++ .write = dpaa2_dbg_reset_mc_write,
++};
++
++void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ int err;
-+
-+ /* We'll only start the txqs when the link is actually ready; make sure
-+ * we don't race against the link up notification, which may come
-+ * immediately after dpni_enable();
-+ */
-+ netif_tx_stop_all_queues(net_dev);
-+
-+ /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
-+ * return true and cause 'ip link show' to report the LOWER_UP flag,
-+ * even though the link notification wasn't even received.
-+ */
-+ netif_carrier_off(net_dev);
++ if (!dpaa2_dbg_root)
++ return;
+
-+ err = seed_pool(priv, priv->bpid);
-+ if (err) {
-+ /* Not much to do; the buffer pool, though not filled up,
-+ * may still contain some buffers which would enable us
-+ * to limp on.
-+ */
-+ netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
-+ priv->dpbp_dev->obj_desc.id, priv->bpid);
++ /* Create a directory for the interface */
++ priv->dbg.dir = debugfs_create_dir(priv->net_dev->name,
++ dpaa2_dbg_root);
++ if (!priv->dbg.dir) {
++ netdev_err(priv->net_dev, "debugfs_create_dir() failed\n");
++ return;
+ }
+
-+ if (priv->tx_pause_frames)
-+ priv->refill_thresh = priv->num_bufs - DPAA2_ETH_BUFS_PER_CMD;
-+ else
-+ priv->refill_thresh = DPAA2_ETH_REFILL_THRESH_TD;
-+
-+ err = dpni_enable(priv->mc_io, 0, priv->mc_token);
-+ if (err < 0) {
-+ netdev_err(net_dev, "dpni_enable() failed\n");
-+ goto enable_err;
++ /* per-cpu stats file */
++ priv->dbg.cpu_stats = debugfs_create_file("cpu_stats", 0444,
++ priv->dbg.dir, priv,
++ &dpaa2_dbg_cpu_ops);
++ if (!priv->dbg.cpu_stats) {
++ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
++ goto err_cpu_stats;
+ }
+
-+ /* If the DPMAC object has already processed the link up interrupt,
-+ * we have to learn the link state ourselves.
-+ */
-+ err = link_state_update(priv);
-+ if (err < 0) {
-+ netdev_err(net_dev, "Can't update link state\n");
-+ goto link_state_err;
++ /* per-fq stats file */
++ priv->dbg.fq_stats = debugfs_create_file("fq_stats", 0444,
++ priv->dbg.dir, priv,
++ &dpaa2_dbg_fq_ops);
++ if (!priv->dbg.fq_stats) {
++ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
++ goto err_fq_stats;
+ }
+
-+ return 0;
-+
-+link_state_err:
-+enable_err:
-+ priv->refill_thresh = 0;
-+ drain_pool(priv);
-+ return err;
-+}
-+
-+static int dpaa2_eth_stop(struct net_device *net_dev)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ int dpni_enabled;
-+ int retries = 10, i;
-+
-+ netif_tx_stop_all_queues(net_dev);
-+ netif_carrier_off(net_dev);
-+
-+ /* Loop while dpni_disable() attempts to drain the egress FQs
-+ * and confirm them back to us.
-+ */
-+ do {
-+ dpni_disable(priv->mc_io, 0, priv->mc_token);
-+ dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
-+ if (dpni_enabled)
-+ /* Allow the MC some slack */
-+ msleep(100);
-+ } while (dpni_enabled && --retries);
-+ if (!retries) {
-+ netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
-+ /* Must go on and disable NAPI nonetheless, so we don't crash at
-+ * the next "ifconfig up"
-+ */
++ /* per-fq stats file */
++ priv->dbg.ch_stats = debugfs_create_file("ch_stats", 0444,
++ priv->dbg.dir, priv,
++ &dpaa2_dbg_ch_ops);
++ if (!priv->dbg.fq_stats) {
++ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
++ goto err_ch_stats;
+ }
+
-+ priv->refill_thresh = 0;
++ /* reset stats */
++ priv->dbg.reset_stats = debugfs_create_file("reset_stats", 0200,
++ priv->dbg.dir, priv,
++ &dpaa2_dbg_reset_ops);
++ if (!priv->dbg.reset_stats) {
++ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
++ goto err_reset_stats;
++ }
+
-+ /* Wait for all running napi poll routines to finish, so that no
-+ * new refill operations are started.
-+ */
-+ for (i = 0; i < priv->num_channels; i++)
-+ napi_synchronize(&priv->channel[i]->napi);
++ /* reset MC stats */
++ priv->dbg.reset_mc_stats = debugfs_create_file("reset_mc_stats",
++ 0222, priv->dbg.dir, priv,
++ &dpaa2_dbg_reset_mc_ops);
++ if (!priv->dbg.reset_mc_stats) {
++ netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
++ goto err_reset_mc_stats;
++ }
+
-+ /* Empty the buffer pool */
-+ drain_pool(priv);
++ return;
+
-+ return 0;
++err_reset_mc_stats:
++ debugfs_remove(priv->dbg.reset_stats);
++err_reset_stats:
++ debugfs_remove(priv->dbg.ch_stats);
++err_ch_stats:
++ debugfs_remove(priv->dbg.fq_stats);
++err_fq_stats:
++ debugfs_remove(priv->dbg.cpu_stats);
++err_cpu_stats:
++ debugfs_remove(priv->dbg.dir);
+}
+
-+static int dpaa2_eth_init(struct net_device *net_dev)
++void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv)
+{
-+ u64 supported = 0;
-+ u64 not_supported = 0;
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ u32 options = priv->dpni_attrs.options;
-+
-+ /* Capabilities listing */
-+ supported |= IFF_LIVE_ADDR_CHANGE;
-+
-+ if (options & DPNI_OPT_NO_MAC_FILTER)
-+ not_supported |= IFF_UNICAST_FLT;
-+ else
-+ supported |= IFF_UNICAST_FLT;
-+
-+ net_dev->priv_flags |= supported;
-+ net_dev->priv_flags &= ~not_supported;
-+
-+ /* Features */
-+ net_dev->features = NETIF_F_RXCSUM |
-+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
-+ NETIF_F_SG | NETIF_F_HIGHDMA |
-+ NETIF_F_LLTX;
-+ net_dev->hw_features = net_dev->features;
-+
-+ return 0;
++ debugfs_remove(priv->dbg.reset_mc_stats);
++ debugfs_remove(priv->dbg.reset_stats);
++ debugfs_remove(priv->dbg.fq_stats);
++ debugfs_remove(priv->dbg.ch_stats);
++ debugfs_remove(priv->dbg.cpu_stats);
++ debugfs_remove(priv->dbg.dir);
+}
+
-+static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
++void dpaa2_eth_dbg_init(void)
+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ struct device *dev = net_dev->dev.parent;
-+ int err;
-+
-+ err = eth_mac_addr(net_dev, addr);
-+ if (err < 0) {
-+ dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
-+ return err;
-+ }
-+
-+ err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
-+ net_dev->dev_addr);
-+ if (err) {
-+ dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
-+ return err;
++ dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL);
++ if (!dpaa2_dbg_root) {
++ pr_err("DPAA2-ETH: debugfs create failed\n");
++ return;
+ }
+
-+ return 0;
++ pr_info("DPAA2-ETH: debugfs created\n");
+}
+
-+/** Fill in counters maintained by the GPP driver. These may be different from
-+ * the hardware counters obtained by ethtool.
-+ */
-+static struct rtnl_link_stats64 *dpaa2_eth_get_stats(struct net_device *net_dev,
-+ struct rtnl_link_stats64 *stats)
++void __exit dpaa2_eth_dbg_exit(void)
+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ struct rtnl_link_stats64 *percpu_stats;
-+ u64 *cpustats;
-+ u64 *netstats = (u64 *)stats;
-+ int i, j;
-+ int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
-+
-+ for_each_possible_cpu(i) {
-+ percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
-+ cpustats = (u64 *)percpu_stats;
-+ for (j = 0; j < num; j++)
-+ netstats[j] += cpustats[j];
-+ }
-+ return stats;
++ debugfs_remove(dpaa2_dbg_root);
+}
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
+@@ -0,0 +1,60 @@
++/* Copyright 2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
+
-+static int dpaa2_eth_change_mtu(struct net_device *net_dev, int mtu)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ int err;
++#ifndef DPAA2_ETH_DEBUGFS_H
++#define DPAA2_ETH_DEBUGFS_H
+
-+ /* Set the maximum Rx frame length to match the transmit side;
-+ * account for L2 headers when computing the MFL
-+ */
-+ err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
-+ (u16)DPAA2_ETH_L2_MAX_FRM(mtu));
-+ if (err) {
-+ netdev_err(net_dev, "dpni_set_max_frame_length() failed\n");
-+ return err;
-+ }
++#include <linux/dcache.h>
+
-+ net_dev->mtu = mtu;
-+ return 0;
-+}
++struct dpaa2_eth_priv;
+
-+/* Copy mac unicast addresses from @net_dev to @priv.
-+ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
-+ */
-+static void add_uc_hw_addr(const struct net_device *net_dev,
-+ struct dpaa2_eth_priv *priv)
-+{
-+ struct netdev_hw_addr *ha;
-+ int err;
++struct dpaa2_debugfs {
++ struct dentry *dir;
++ struct dentry *fq_stats;
++ struct dentry *ch_stats;
++ struct dentry *cpu_stats;
++ struct dentry *reset_stats;
++ struct dentry *reset_mc_stats;
++};
+
-+ netdev_for_each_uc_addr(ha, net_dev) {
-+ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
-+ ha->addr);
-+ if (err)
-+ netdev_warn(priv->net_dev,
-+ "Could not add ucast MAC %pM to the filtering table (err %d)\n",
-+ ha->addr, err);
-+ }
-+}
++#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
++void dpaa2_eth_dbg_init(void);
++void dpaa2_eth_dbg_exit(void);
++void dpaa2_dbg_add(struct dpaa2_eth_priv *priv);
++void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv);
++#else
++static inline void dpaa2_eth_dbg_init(void) {}
++static inline void dpaa2_eth_dbg_exit(void) {}
++static inline void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) {}
++static inline void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) {}
++#endif /* CONFIG_FSL_DPAA2_ETH_DEBUGFS */
+
-+/* Copy mac multicast addresses from @net_dev to @priv
-+ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
++#endif /* DPAA2_ETH_DEBUGFS_H */
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
+@@ -0,0 +1,185 @@
++/* Copyright 2014-2015 Freescale Semiconductor Inc.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
-+static void add_mc_hw_addr(const struct net_device *net_dev,
-+ struct dpaa2_eth_priv *priv)
-+{
-+ struct netdev_hw_addr *ha;
-+ int err;
+
-+ netdev_for_each_mc_addr(ha, net_dev) {
-+ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
-+ ha->addr);
-+ if (err)
-+ netdev_warn(priv->net_dev,
-+ "Could not add mcast MAC %pM to the filtering table (err %d)\n",
-+ ha->addr, err);
-+ }
-+}
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM dpaa2_eth
+
-+static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ int uc_count = netdev_uc_count(net_dev);
-+ int mc_count = netdev_mc_count(net_dev);
-+ u8 max_mac = priv->dpni_attrs.mac_filter_entries;
-+ u32 options = priv->dpni_attrs.options;
-+ u16 mc_token = priv->mc_token;
-+ struct fsl_mc_io *mc_io = priv->mc_io;
-+ int err;
++#if !defined(_DPAA2_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _DPAA2_ETH_TRACE_H
+
-+ /* Basic sanity checks; these probably indicate a misconfiguration */
-+ if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
-+ netdev_info(net_dev,
-+ "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
-+ max_mac);
++#include <linux/skbuff.h>
++#include <linux/netdevice.h>
++#include "dpaa2-eth.h"
++#include <linux/tracepoint.h>
+
-+ /* Force promiscuous if the uc or mc counts exceed our capabilities. */
-+ if (uc_count > max_mac) {
-+ netdev_info(net_dev,
-+ "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
-+ uc_count, max_mac);
-+ goto force_promisc;
-+ }
-+ if (mc_count + uc_count > max_mac) {
-+ netdev_info(net_dev,
-+ "Unicast + Multicast addr count reached %d, max allowed is %d; forcing promisc\n",
-+ uc_count + mc_count, max_mac);
-+ goto force_mc_promisc;
-+ }
++#define TR_FMT "[%s] fd: addr=0x%llx, len=%u, off=%u"
++/* trace_printk format for raw buffer event class */
++#define TR_BUF_FMT "[%s] vaddr=%p size=%zu dma_addr=%pad map_size=%zu bpid=%d"
+
-+ /* Adjust promisc settings due to flag combinations */
-+ if (net_dev->flags & IFF_PROMISC)
-+ goto force_promisc;
-+ if (net_dev->flags & IFF_ALLMULTI) {
-+ /* First, rebuild unicast filtering table. This should be done
-+ * in promisc mode, in order to avoid frame loss while we
-+ * progressively add entries to the table.
-+ * We don't know whether we had been in promisc already, and
-+ * making an MC call to find out is expensive; so set uc promisc
-+ * nonetheless.
-+ */
-+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
-+ if (err)
-+ netdev_warn(net_dev, "Can't set uc promisc\n");
++/* This is used to declare a class of events.
++ * individual events of this type will be defined below.
++ */
+
-+ /* Actual uc table reconstruction. */
-+ err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
-+ if (err)
-+ netdev_warn(net_dev, "Can't clear uc filters\n");
-+ add_uc_hw_addr(net_dev, priv);
++/* Store details about a frame descriptor */
++DECLARE_EVENT_CLASS(dpaa2_eth_fd,
++ /* Trace function prototype */
++ TP_PROTO(struct net_device *netdev,
++ const struct dpaa2_fd *fd),
+
-+ /* Finally, clear uc promisc and set mc promisc as requested. */
-+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
-+ if (err)
-+ netdev_warn(net_dev, "Can't clear uc promisc\n");
-+ goto force_mc_promisc;
-+ }
++ /* Repeat argument list here */
++ TP_ARGS(netdev, fd),
+
-+ /* Neither unicast, nor multicast promisc will be on... eventually.
-+ * For now, rebuild mac filtering tables while forcing both of them on.
-+ */
-+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
-+ if (err)
-+ netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
-+ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
-+ if (err)
-+ netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
++ /* A structure containing the relevant information we want
++ * to record. Declare name and type for each normal element,
++ * name, type and size for arrays. Use __string for variable
++ * length strings.
++ */
++ TP_STRUCT__entry(
++ __field(u64, fd_addr)
++ __field(u32, fd_len)
++ __field(u16, fd_offset)
++ __string(name, netdev->name)
++ ),
+
-+ /* Actual mac filtering tables reconstruction */
-+ err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
-+ if (err)
-+ netdev_warn(net_dev, "Can't clear mac filters\n");
-+ add_mc_hw_addr(net_dev, priv);
-+ add_uc_hw_addr(net_dev, priv);
++ /* The function that assigns values to the above declared
++ * fields
++ */
++ TP_fast_assign(
++ __entry->fd_addr = dpaa2_fd_get_addr(fd);
++ __entry->fd_len = dpaa2_fd_get_len(fd);
++ __entry->fd_offset = dpaa2_fd_get_offset(fd);
++ __assign_str(name, netdev->name);
++ ),
+
-+ /* Now we can clear both ucast and mcast promisc, without risking
-+ * to drop legitimate frames anymore.
-+ */
-+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
-+ if (err)
-+ netdev_warn(net_dev, "Can't clear ucast promisc\n");
-+ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
-+ if (err)
-+ netdev_warn(net_dev, "Can't clear mcast promisc\n");
++ /* This is what gets printed when the trace event is
++ * triggered.
++ */
++ TP_printk(TR_FMT,
++ __get_str(name),
++ __entry->fd_addr,
++ __entry->fd_len,
++ __entry->fd_offset)
++);
+
-+ return;
++/* Now declare events of the above type. Format is:
++ * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class
++ */
+
-+force_promisc:
-+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
-+ if (err)
-+ netdev_warn(net_dev, "Can't set ucast promisc\n");
-+force_mc_promisc:
-+ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
-+ if (err)
-+ netdev_warn(net_dev, "Can't set mcast promisc\n");
-+}
++/* Tx (egress) fd */
++DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_fd,
++ TP_PROTO(struct net_device *netdev,
++ const struct dpaa2_fd *fd),
+
-+static int dpaa2_eth_set_features(struct net_device *net_dev,
-+ netdev_features_t features)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ netdev_features_t changed = features ^ net_dev->features;
-+ bool enable;
-+ int err;
++ TP_ARGS(netdev, fd)
++);
+
-+ if (changed & NETIF_F_RXCSUM) {
-+ enable = !!(features & NETIF_F_RXCSUM);
-+ err = set_rx_csum(priv, enable);
-+ if (err)
-+ return err;
-+ }
++/* Rx fd */
++DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd,
++ TP_PROTO(struct net_device *netdev,
++ const struct dpaa2_fd *fd),
+
-+ if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
-+ enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
-+ err = set_tx_csum(priv, enable);
-+ if (err)
-+ return err;
-+ }
++ TP_ARGS(netdev, fd)
++);
+
-+ return 0;
-+}
++/* Tx confirmation fd */
++DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd,
++ TP_PROTO(struct net_device *netdev,
++ const struct dpaa2_fd *fd),
+
-+static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
-+ struct hwtstamp_config config;
++ TP_ARGS(netdev, fd)
++);
+
-+ if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
-+ return -EFAULT;
++/* Log data about raw buffers. Useful for tracing DPBP content. */
++TRACE_EVENT(dpaa2_eth_buf_seed,
++ /* Trace function prototype */
++ TP_PROTO(struct net_device *netdev,
++ /* virtual address and size */
++ void *vaddr,
++ size_t size,
++ /* dma map address and size */
++ dma_addr_t dma_addr,
++ size_t map_size,
++ /* buffer pool id, if relevant */
++ u16 bpid),
+
-+ switch (config.tx_type) {
-+ case HWTSTAMP_TX_OFF:
-+ priv->ts_tx_en = false;
-+ break;
-+ case HWTSTAMP_TX_ON:
-+ priv->ts_tx_en = true;
-+ break;
-+ default:
-+ return -ERANGE;
-+ }
++ /* Repeat argument list here */
++ TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid),
+
-+ if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
-+ priv->ts_rx_en = false;
-+ } else {
-+ priv->ts_rx_en = true;
-+ /* TS is set for all frame types, not only those requested */
-+ config.rx_filter = HWTSTAMP_FILTER_ALL;
-+ }
++ /* A structure containing the relevant information we want
++ * to record. Declare name and type for each normal element,
++ * name, type and size for arrays. Use __string for variable
++ * length strings.
++ */
++ TP_STRUCT__entry(
++ __field(void *, vaddr)
++ __field(size_t, size)
++ __field(dma_addr_t, dma_addr)
++ __field(size_t, map_size)
++ __field(u16, bpid)
++ __string(name, netdev->name)
++ ),
+
-+ return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
-+ -EFAULT : 0;
-+}
++ /* The function that assigns values to the above declared
++ * fields
++ */
++ TP_fast_assign(
++ __entry->vaddr = vaddr;
++ __entry->size = size;
++ __entry->dma_addr = dma_addr;
++ __entry->map_size = map_size;
++ __entry->bpid = bpid;
++ __assign_str(name, netdev->name);
++ ),
+
-+static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
++ /* This is what gets printed when the trace event is
++ * triggered.
++ */
++ TP_printk(TR_BUF_FMT,
++ __get_str(name),
++ __entry->vaddr,
++ __entry->size,
++ &__entry->dma_addr,
++ __entry->map_size,
++ __entry->bpid)
++);
++
++/* If only one event of a certain type needs to be declared, use TRACE_EVENT().
++ * The syntax is the same as for DECLARE_EVENT_CLASS().
++ */
++
++#endif /* _DPAA2_ETH_TRACE_H */
++
++/* This must be outside ifdef _DPAA2_ETH_TRACE_H */
++#undef TRACE_INCLUDE_PATH
++#define TRACE_INCLUDE_PATH .
++#undef TRACE_INCLUDE_FILE
++#define TRACE_INCLUDE_FILE dpaa2-eth-trace
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
+@@ -0,0 +1,3734 @@
++/* Copyright 2014-2016 Freescale Semiconductor Inc.
++ * Copyright 2016-2017 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/etherdevice.h>
++#include <linux/of_net.h>
++#include <linux/interrupt.h>
++#include <linux/msi.h>
++#include <linux/kthread.h>
++#include <linux/iommu.h>
++#include <linux/net_tstamp.h>
++#include <linux/bpf.h>
++#include <linux/filter.h>
++#include <linux/atomic.h>
++#include <net/sock.h>
++#include <linux/fsl/mc.h>
++#include "dpaa2-eth.h"
++#include "dpaa2-eth-ceetm.h"
++
++/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
++ * using trace events only need to #include <trace/events/sched.h>
++ */
++#define CREATE_TRACE_POINTS
++#include "dpaa2-eth-trace.h"
++
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_AUTHOR("Freescale Semiconductor, Inc");
++MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
++
++const char dpaa2_eth_drv_version[] = "0.1";
++
++static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
++ dma_addr_t iova_addr)
+{
-+ if (cmd == SIOCSHWTSTAMP)
-+ return dpaa2_eth_ts_ioctl(dev, rq, cmd);
++ phys_addr_t phys_addr;
+
-+ return -EINVAL;
-+}
++ phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
+
-+static const struct net_device_ops dpaa2_eth_ops = {
-+ .ndo_open = dpaa2_eth_open,
-+ .ndo_start_xmit = dpaa2_eth_tx,
-+ .ndo_stop = dpaa2_eth_stop,
-+ .ndo_init = dpaa2_eth_init,
-+ .ndo_set_mac_address = dpaa2_eth_set_addr,
-+ .ndo_get_stats64 = dpaa2_eth_get_stats,
-+ .ndo_change_mtu = dpaa2_eth_change_mtu,
-+ .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
-+ .ndo_set_features = dpaa2_eth_set_features,
-+ .ndo_do_ioctl = dpaa2_eth_ioctl,
-+};
++ return phys_to_virt(phys_addr);
++}
+
-+static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
++static void validate_rx_csum(struct dpaa2_eth_priv *priv,
++ u32 fd_status,
++ struct sk_buff *skb)
+{
-+ struct dpaa2_eth_channel *ch;
++ skb_checksum_none_assert(skb);
+
-+ ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
++ /* HW checksum validation is disabled, nothing to do here */
++ if (!(priv->net_dev->features & NETIF_F_RXCSUM))
++ return;
+
-+ /* Update NAPI statistics */
-+ ch->stats.cdan++;
++ /* Read checksum validation bits */
++ if (!((fd_status & DPAA2_FAS_L3CV) &&
++ (fd_status & DPAA2_FAS_L4CV)))
++ return;
+
-+ napi_schedule_irqoff(&ch->napi);
++ /* Inform the stack there's no need to compute L3/L4 csum anymore */
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
-+/* Allocate and configure a DPCON object */
-+static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
++/* Free a received FD.
++ * Not to be used for Tx conf FDs or on any other paths.
++ */
++static void free_rx_fd(struct dpaa2_eth_priv *priv,
++ const struct dpaa2_fd *fd,
++ void *vaddr)
+{
-+ struct fsl_mc_device *dpcon;
+ struct device *dev = priv->net_dev->dev.parent;
-+ struct dpcon_attr attrs;
-+ int err;
++ dma_addr_t addr = dpaa2_fd_get_addr(fd);
++ u8 fd_format = dpaa2_fd_get_format(fd);
++ struct dpaa2_sg_entry *sgt;
++ void *sg_vaddr;
++ int i;
+
-+ err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
-+ FSL_MC_POOL_DPCON, &dpcon);
-+ if (err) {
-+ dev_info(dev, "Not enough DPCONs, will go on as-is\n");
-+ return NULL;
-+ }
++ /* If single buffer frame, just free the data buffer */
++ if (fd_format == dpaa2_fd_single)
++ goto free_buf;
++ else if (fd_format != dpaa2_fd_sg)
++ /* We don't support any other format */
++ return;
+
-+ err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
-+ if (err) {
-+ dev_err(dev, "dpcon_open() failed\n");
-+ goto err_open;
-+ }
++ /* For S/G frames, we first need to free all SG entries
++ * except the first one, which was taken care of already
++ */
++ sgt = vaddr + dpaa2_fd_get_offset(fd);
++ for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
++ addr = dpaa2_sg_get_addr(&sgt[i]);
++ sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
++ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
++ DMA_BIDIRECTIONAL);
+
-+ err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
-+ if (err) {
-+ dev_err(dev, "dpcon_reset() failed\n");
-+ goto err_reset;
++ skb_free_frag(sg_vaddr);
++ if (dpaa2_sg_is_final(&sgt[i]))
++ break;
+ }
+
-+ err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
-+ if (err) {
-+ dev_err(dev, "dpcon_get_attributes() failed\n");
-+ goto err_get_attr;
-+ }
++free_buf:
++ skb_free_frag(vaddr);
++}
+
-+ err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
-+ if (err) {
-+ dev_err(dev, "dpcon_enable() failed\n");
-+ goto err_enable;
-+ }
++/* Build a linear skb based on a single-buffer frame descriptor */
++static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch,
++ const struct dpaa2_fd *fd,
++ void *fd_vaddr)
++{
++ struct sk_buff *skb = NULL;
++ u16 fd_offset = dpaa2_fd_get_offset(fd);
++ u32 fd_length = dpaa2_fd_get_len(fd);
+
-+ return dpcon;
++ ch->buf_count--;
+
-+err_enable:
-+err_get_attr:
-+err_reset:
-+ dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
-+err_open:
-+ fsl_mc_object_free(dpcon);
++ skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE);
++ if (unlikely(!skb))
++ return NULL;
+
-+ return NULL;
-+}
++ skb_reserve(skb, fd_offset);
++ skb_put(skb, fd_length);
+
-+static void free_dpcon(struct dpaa2_eth_priv *priv,
-+ struct fsl_mc_device *dpcon)
-+{
-+ dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
-+ dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
-+ fsl_mc_object_free(dpcon);
++ return skb;
+}
+
-+static struct dpaa2_eth_channel *
-+alloc_channel(struct dpaa2_eth_priv *priv)
++/* Build a non linear (fragmented) skb based on a S/G table */
++static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch,
++ struct dpaa2_sg_entry *sgt)
+{
-+ struct dpaa2_eth_channel *channel;
-+ struct dpcon_attr attr;
++ struct sk_buff *skb = NULL;
+ struct device *dev = priv->net_dev->dev.parent;
-+ int err;
-+
-+ channel = kzalloc(sizeof(*channel), GFP_KERNEL);
-+ if (!channel)
-+ return NULL;
++ void *sg_vaddr;
++ dma_addr_t sg_addr;
++ u16 sg_offset;
++ u32 sg_length;
++ struct page *page, *head_page;
++ int page_offset;
++ int i;
+
-+ channel->dpcon = setup_dpcon(priv);
-+ if (!channel->dpcon)
-+ goto err_setup;
++ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
++ struct dpaa2_sg_entry *sge = &sgt[i];
+
-+ err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
-+ &attr);
-+ if (err) {
-+ dev_err(dev, "dpcon_get_attributes() failed\n");
-+ goto err_get_attr;
-+ }
++ /* NOTE: We only support SG entries in dpaa2_sg_single format,
++ * but this is the only format we may receive from HW anyway
++ */
+
-+ channel->dpcon_id = attr.id;
-+ channel->ch_id = attr.qbman_ch_id;
-+ channel->priv = priv;
++ /* Get the address and length from the S/G entry */
++ sg_addr = dpaa2_sg_get_addr(sge);
++ sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
++ dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
++ DMA_BIDIRECTIONAL);
+
-+ return channel;
++ sg_length = dpaa2_sg_get_len(sge);
+
-+err_get_attr:
-+ free_dpcon(priv, channel->dpcon);
-+err_setup:
-+ kfree(channel);
-+ return NULL;
-+}
++ if (i == 0) {
++ /* We build the skb around the first data buffer */
++ skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE);
++ if (unlikely(!skb)) {
++ /* Free the first SG entry now, since we already
++ * unmapped it and obtained the virtual address
++ */
++ skb_free_frag(sg_vaddr);
++
++ /* We still need to subtract the buffers used
++ * by this FD from our software counter
++ */
++ while (!dpaa2_sg_is_final(&sgt[i]) &&
++ i < DPAA2_ETH_MAX_SG_ENTRIES)
++ i++;
++ break;
++ }
+
-+static void free_channel(struct dpaa2_eth_priv *priv,
-+ struct dpaa2_eth_channel *channel)
-+{
-+ free_dpcon(priv, channel->dpcon);
-+ kfree(channel);
-+}
++ sg_offset = dpaa2_sg_get_offset(sge);
++ skb_reserve(skb, sg_offset);
++ skb_put(skb, sg_length);
++ } else {
++ /* Rest of the data buffers are stored as skb frags */
++ page = virt_to_page(sg_vaddr);
++ head_page = virt_to_head_page(sg_vaddr);
+
-+/* DPIO setup: allocate and configure QBMan channels, setup core affinity
-+ * and register data availability notifications
-+ */
-+static int setup_dpio(struct dpaa2_eth_priv *priv)
-+{
-+ struct dpaa2_io_notification_ctx *nctx;
-+ struct dpaa2_eth_channel *channel;
-+ struct dpcon_notification_cfg dpcon_notif_cfg;
-+ struct device *dev = priv->net_dev->dev.parent;
-+ int i, err;
++ /* Offset in page (which may be compound).
++ * Data in subsequent SG entries is stored from the
++ * beginning of the buffer, so we don't need to add the
++ * sg_offset.
++ */
++ page_offset = ((unsigned long)sg_vaddr &
++ (PAGE_SIZE - 1)) +
++ (page_address(page) - page_address(head_page));
+
-+ /* We want the ability to spread ingress traffic (RX, TX conf) to as
-+ * many cores as possible, so we need one channel for each core
-+ * (unless there's fewer queues than cores, in which case the extra
-+ * channels would be wasted).
-+ * Allocate one channel per core and register it to the core's
-+ * affine DPIO. If not enough channels are available for all cores
-+ * or if some cores don't have an affine DPIO, there will be no
-+ * ingress frame processing on those cores.
-+ */
-+ cpumask_clear(&priv->dpio_cpumask);
-+ for_each_online_cpu(i) {
-+ /* Try to allocate a channel */
-+ channel = alloc_channel(priv);
-+ if (!channel) {
-+ dev_info(dev,
-+ "No affine channel for cpu %d and above\n", i);
-+ goto err_alloc_ch;
++ skb_add_rx_frag(skb, i - 1, head_page, page_offset,
++ sg_length, DPAA2_ETH_RX_BUF_SIZE);
+ }
+
-+ priv->channel[priv->num_channels] = channel;
++ if (dpaa2_sg_is_final(sge))
++ break;
++ }
+
-+ nctx = &channel->nctx;
-+ nctx->is_cdan = 1;
-+ nctx->cb = cdan_cb;
-+ nctx->id = channel->ch_id;
-+ nctx->desired_cpu = i;
++ WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
+
-+ /* Register the new context */
-+ err = dpaa2_io_service_register(NULL, nctx);
-+ if (err) {
-+ dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
-+ /* If no affine DPIO for this core, there's probably
-+ * none available for next cores either.
-+ */
-+ goto err_service_reg;
-+ }
++ /* Count all data buffers + SG table buffer */
++ ch->buf_count -= i + 2;
+
-+ /* Register DPCON notification with MC */
-+ dpcon_notif_cfg.dpio_id = nctx->dpio_id;
-+ dpcon_notif_cfg.priority = 0;
-+ dpcon_notif_cfg.user_ctx = nctx->qman64;
-+ err = dpcon_set_notification(priv->mc_io, 0,
-+ channel->dpcon->mc_handle,
-+ &dpcon_notif_cfg);
-+ if (err) {
-+ dev_err(dev, "dpcon_set_notification failed()\n");
-+ goto err_set_cdan;
-+ }
++ return skb;
++}
+
-+ /* If we managed to allocate a channel and also found an affine
-+ * DPIO for this core, add it to the final mask
-+ */
-+ cpumask_set_cpu(i, &priv->dpio_cpumask);
-+ priv->num_channels++;
++static int dpaa2_eth_xdp_tx(struct dpaa2_eth_priv *priv,
++ struct dpaa2_fd *fd,
++ void *buf_start,
++ u16 queue_id)
++{
++ struct dpaa2_eth_fq *fq;
++ struct rtnl_link_stats64 *percpu_stats;
++ struct dpaa2_eth_drv_stats *percpu_extras;
++ struct dpaa2_faead *faead;
++ u32 ctrl, frc;
++ int i, err;
+
-+ /* Stop if we already have enough channels to accommodate all
-+ * RX and TX conf queues
-+ */
-+ if (priv->num_channels == dpaa2_eth_queue_count(priv))
-+ break;
-+ }
++ /* Mark the egress frame annotation area as valid */
++ frc = dpaa2_fd_get_frc(fd);
++ dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
++ dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
+
-+ /* Tx confirmation queues can only be serviced by cpus
-+ * with an affine DPIO/channel
-+ */
-+ cpumask_copy(&priv->txconf_cpumask, &priv->dpio_cpumask);
++ ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
++ faead = dpaa2_get_faead(buf_start, false);
++ faead->ctrl = cpu_to_le32(ctrl);
++ faead->conf_fqid = 0;
+
-+ return 0;
++ percpu_stats = this_cpu_ptr(priv->percpu_stats);
++ percpu_extras = this_cpu_ptr(priv->percpu_extras);
+
-+err_set_cdan:
-+ dpaa2_io_service_deregister(NULL, nctx);
-+err_service_reg:
-+ free_channel(priv, channel);
-+err_alloc_ch:
-+ if (cpumask_empty(&priv->dpio_cpumask)) {
-+ dev_dbg(dev, "No cpu with an affine DPIO/DPCON\n");
-+ return -ENODEV;
++ fq = &priv->fq[queue_id];
++ for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
++ err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
++ priv->tx_qdid, 0,
++ fq->tx_qdbin, fd);
++ if (err != -EBUSY)
++ break;
+ }
-+ cpumask_copy(&priv->txconf_cpumask, &priv->dpio_cpumask);
+
-+ dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
-+ cpumask_pr_args(&priv->dpio_cpumask));
++ percpu_extras->tx_portal_busy += i;
++ if (unlikely(err)) {
++ percpu_stats->tx_errors++;
++ } else {
++ percpu_stats->tx_packets++;
++ percpu_stats->tx_bytes += dpaa2_fd_get_len(fd);
++ }
+
-+ return 0;
++ return err;
+}
+
-+static void free_dpio(struct dpaa2_eth_priv *priv)
++static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
+{
++ struct device *dev = priv->net_dev->dev.parent;
++ void *vaddr;
+ int i;
-+ struct dpaa2_eth_channel *ch;
+
-+ /* deregister CDAN notifications and free channels */
-+ for (i = 0; i < priv->num_channels; i++) {
-+ ch = priv->channel[i];
-+ dpaa2_io_service_deregister(NULL, &ch->nctx);
-+ free_channel(priv, ch);
++ for (i = 0; i < count; i++) {
++ /* Same logic as on regular Rx path */
++ vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
++ dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
++ DMA_BIDIRECTIONAL);
++ skb_free_frag(vaddr);
+ }
+}
+
-+static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
-+ int cpu)
++static void release_fd_buf(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch,
++ dma_addr_t addr)
+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ int i;
++ int err;
+
-+ for (i = 0; i < priv->num_channels; i++)
-+ if (priv->channel[i]->nctx.desired_cpu == cpu)
-+ return priv->channel[i];
++ ch->rel_buf_array[ch->rel_buf_cnt++] = addr;
++ if (likely(ch->rel_buf_cnt < DPAA2_ETH_BUFS_PER_CMD))
++ return;
+
-+ /* We should never get here. Issue a warning and return
-+ * the first channel, because it's still better than nothing
-+ */
-+ dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
++ while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
++ ch->rel_buf_array,
++ ch->rel_buf_cnt)) == -EBUSY)
++ cpu_relax();
+
-+ return priv->channel[0];
++ if (err)
++ free_bufs(priv, ch->rel_buf_array, ch->rel_buf_cnt);
++
++ ch->rel_buf_cnt = 0;
+}
+
-+static void set_fq_affinity(struct dpaa2_eth_priv *priv)
++/* Main Rx frame processing routine */
++static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch,
++ const struct dpaa2_fd *fd,
++ struct napi_struct *napi,
++ u16 queue_id)
+{
++ dma_addr_t addr = dpaa2_fd_get_addr(fd);
++ u8 fd_format = dpaa2_fd_get_format(fd);
++ void *vaddr;
++ struct sk_buff *skb;
++ struct rtnl_link_stats64 *percpu_stats;
++ struct dpaa2_eth_drv_stats *percpu_extras;
+ struct device *dev = priv->net_dev->dev.parent;
-+ struct cpumask xps_mask = CPU_MASK_NONE;
-+ struct dpaa2_eth_fq *fq;
-+ int rx_cpu, txc_cpu;
-+ int i, err;
++ struct dpaa2_fas *fas;
++ void *buf_data;
++ u32 status = 0;
++ struct bpf_prog *xdp_prog;
++ struct xdp_buff xdp;
++ u32 xdp_act;
+
-+ /* For each FQ, pick one channel/CPU to deliver frames to.
-+ * This may well change at runtime, either through irqbalance or
-+ * through direct user intervention.
-+ */
-+ rx_cpu = cpumask_first(&priv->dpio_cpumask);
-+ txc_cpu = cpumask_first(&priv->txconf_cpumask);
++ /* Tracing point */
++ trace_dpaa2_rx_fd(priv->net_dev, fd);
+
-+ for (i = 0; i < priv->num_fqs; i++) {
-+ fq = &priv->fq[i];
-+ switch (fq->type) {
-+ case DPAA2_RX_FQ:
-+ case DPAA2_RX_ERR_FQ:
-+ fq->target_cpu = rx_cpu;
-+ rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
-+ if (rx_cpu >= nr_cpu_ids)
-+ rx_cpu = cpumask_first(&priv->dpio_cpumask);
-+ break;
-+ case DPAA2_TX_CONF_FQ:
-+ fq->target_cpu = txc_cpu;
++ vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
++ dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
++ DMA_BIDIRECTIONAL);
+
-+ /* register txc_cpu to XPS */
-+ cpumask_set_cpu(txc_cpu, &xps_mask);
-+ err = netif_set_xps_queue(priv->net_dev, &xps_mask,
-+ fq->flowid);
-+ if (err)
-+ dev_info_once(dev,
-+ "Tx: error setting XPS queue\n");
-+ cpumask_clear_cpu(txc_cpu, &xps_mask);
++ fas = dpaa2_get_fas(vaddr, false);
++ prefetch(fas);
++ buf_data = vaddr + dpaa2_fd_get_offset(fd);
++ prefetch(buf_data);
+
-+ txc_cpu = cpumask_next(txc_cpu, &priv->txconf_cpumask);
-+ if (txc_cpu >= nr_cpu_ids)
-+ txc_cpu = cpumask_first(&priv->txconf_cpumask);
-+ break;
-+ default:
-+ dev_err(dev, "Unknown FQ type: %d\n", fq->type);
-+ }
-+ fq->channel = get_affine_channel(priv, fq->target_cpu);
-+ }
-+}
++ percpu_stats = this_cpu_ptr(priv->percpu_stats);
++ percpu_extras = this_cpu_ptr(priv->percpu_extras);
+
-+static void setup_fqs(struct dpaa2_eth_priv *priv)
-+{
-+ int i, j;
++ xdp_prog = READ_ONCE(ch->xdp_prog);
+
-+ /* We have one TxConf FQ per Tx flow. Tx queues MUST be at the
-+ * beginning of the queue array.
-+ * Number of Rx and Tx queues are the same.
-+ * We only support one traffic class for now.
-+ */
-+ for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
-+ priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
-+ priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
-+ priv->fq[priv->num_fqs++].flowid = (u16)i;
-+ }
++ if (fd_format == dpaa2_fd_single) {
++ if (xdp_prog) {
++ xdp.data = buf_data;
++ xdp.data_end = buf_data + dpaa2_fd_get_len(fd);
++ /* for now, we don't support changes in header size */
++ xdp.data_hard_start = buf_data;
+
-+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++)
-+ for (j = 0; j < dpaa2_eth_queue_count(priv); j++) {
-+ priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
-+ priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
-+ priv->fq[priv->num_fqs].tc = (u8)i;
-+ priv->fq[priv->num_fqs++].flowid = (u16)j;
++ /* update stats here, as we won't reach the code
++ * that does that for standard frames
++ */
++ percpu_stats->rx_packets++;
++ percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
++
++ xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
++ switch (xdp_act) {
++ case XDP_PASS:
++ break;
++ default:
++ bpf_warn_invalid_xdp_action(xdp_act);
++ case XDP_ABORTED:
++ case XDP_DROP:
++ release_fd_buf(priv, ch, addr);
++ goto drop_cnt;
++ case XDP_TX:
++ if (dpaa2_eth_xdp_tx(priv, (struct dpaa2_fd *)fd, vaddr,
++ queue_id)) {
++ dma_unmap_single(dev, addr,
++ DPAA2_ETH_RX_BUF_SIZE,
++ DMA_BIDIRECTIONAL);
++ free_rx_fd(priv, fd, vaddr);
++ ch->buf_count--;
++ }
++ return;
++ }
+ }
-+
-+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
-+ /* We have exactly one Rx error queue per DPNI */
-+ priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ;
-+ priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err;
-+#endif
-+
-+ /* For each FQ, decide on which core to process incoming frames */
-+ set_fq_affinity(priv);
-+}
-+
-+/* Allocate and configure one buffer pool for each interface */
-+static int setup_dpbp(struct dpaa2_eth_priv *priv)
-+{
-+ int err;
-+ struct fsl_mc_device *dpbp_dev;
-+ struct dpbp_attr dpbp_attrs;
-+ struct device *dev = priv->net_dev->dev.parent;
-+
-+ err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
-+ &dpbp_dev);
-+ if (err) {
-+ dev_err(dev, "DPBP device allocation failed\n");
-+ return err;
++ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
++ DMA_BIDIRECTIONAL);
++ skb = build_linear_skb(priv, ch, fd, vaddr);
++ } else if (fd_format == dpaa2_fd_sg) {
++ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
++ DMA_BIDIRECTIONAL);
++ skb = build_frag_skb(priv, ch, buf_data);
++ skb_free_frag(vaddr);
++ percpu_extras->rx_sg_frames++;
++ percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
++ } else {
++ /* We don't support any other format */
++ goto drop_cnt;
+ }
+
-+ priv->dpbp_dev = dpbp_dev;
++ if (unlikely(!skb))
++ goto drop_fd;
+
-+ err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
-+ &dpbp_dev->mc_handle);
-+ if (err) {
-+ dev_err(dev, "dpbp_open() failed\n");
-+ goto err_open;
-+ }
++ prefetch(skb->data);
+
-+ err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
-+ if (err) {
-+ dev_err(dev, "dpbp_reset() failed\n");
-+ goto err_reset;
-+ }
++ /* Get the timestamp value */
++ if (priv->ts_rx_en) {
++ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
++ u64 *ns = dpaa2_get_ts(vaddr, false);
+
-+ err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
-+ if (err) {
-+ dev_err(dev, "dpbp_enable() failed\n");
-+ goto err_enable;
++ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns);
++ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
++ shhwtstamps->hwtstamp = ns_to_ktime(*ns);
+ }
+
-+ err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
-+ &dpbp_attrs);
-+ if (err) {
-+ dev_err(dev, "dpbp_get_attributes() failed\n");
-+ goto err_get_attr;
++ /* Check if we need to validate the L4 csum */
++ if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
++ status = le32_to_cpu(fas->status);
++ validate_rx_csum(priv, status, skb);
+ }
+
-+ priv->bpid = dpbp_attrs.bpid;
-+ priv->num_bufs = DPAA2_ETH_NUM_BUFS_FC / priv->num_channels;
++ skb->protocol = eth_type_trans(skb, priv->net_dev);
+
-+ return 0;
++ /* Record Rx queue - this will be used when picking a Tx queue to
++ * forward the frames. We're keeping flow affinity through the
++ * network stack.
++ */
++ skb_record_rx_queue(skb, queue_id);
+
-+err_get_attr:
-+ dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
-+err_enable:
-+err_reset:
-+ dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
-+err_open:
-+ fsl_mc_object_free(dpbp_dev);
++ percpu_stats->rx_packets++;
++ percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
+
-+ return err;
-+}
++ napi_gro_receive(napi, skb);
+
-+static void free_dpbp(struct dpaa2_eth_priv *priv)
-+{
-+ drain_pool(priv);
-+ dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
-+ dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
-+ fsl_mc_object_free(priv->dpbp_dev);
++ return;
++
++drop_fd:
++ free_rx_fd(priv, fd, vaddr);
++drop_cnt:
++ percpu_stats->rx_dropped++;
+}
+
-+static int setup_tx_congestion(struct dpaa2_eth_priv *priv)
++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
++/* Processing of Rx frames received on the error FQ
++ * We check and print the error bits and then free the frame
++ */
++static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch,
++ const struct dpaa2_fd *fd,
++ struct napi_struct *napi __always_unused,
++ u16 queue_id __always_unused)
+{
-+ struct dpni_congestion_notification_cfg cong_notif_cfg = { 0 };
+ struct device *dev = priv->net_dev->dev.parent;
-+ int err;
++ dma_addr_t addr = dpaa2_fd_get_addr(fd);
++ void *vaddr;
++ struct rtnl_link_stats64 *percpu_stats;
++ struct dpaa2_fas *fas;
++ u32 status = 0;
++ u32 fd_errors;
++ bool has_fas_errors = false;
+
-+ priv->cscn_unaligned = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
-+ GFP_KERNEL);
-+ if (!priv->cscn_unaligned)
-+ return -ENOMEM;
++ vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
++ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL);
+
-+ priv->cscn_mem = PTR_ALIGN(priv->cscn_unaligned, DPAA2_CSCN_ALIGN);
-+ priv->cscn_dma = dma_map_single(dev, priv->cscn_mem, DPAA2_CSCN_SIZE,
-+ DMA_FROM_DEVICE);
-+ if (dma_mapping_error(dev, priv->cscn_dma)) {
-+ dev_err(dev, "Error mapping CSCN memory area\n");
-+ err = -ENOMEM;
-+ goto err_dma_map;
++ /* check frame errors in the FD field */
++ fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_RX_ERR_MASK;
++ if (likely(fd_errors)) {
++ has_fas_errors = (fd_errors & FD_CTRL_FAERR) &&
++ !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
++ if (net_ratelimit())
++ netdev_dbg(priv->net_dev, "RX frame FD err: %08x\n",
++ fd_errors);
+ }
+
-+ cong_notif_cfg.units = DPNI_CONGESTION_UNIT_BYTES;
-+ cong_notif_cfg.threshold_entry = DPAA2_ETH_TX_CONG_ENTRY_THRESH;
-+ cong_notif_cfg.threshold_exit = DPAA2_ETH_TX_CONG_EXIT_THRESH;
-+ cong_notif_cfg.message_ctx = (u64)priv;
-+ cong_notif_cfg.message_iova = priv->cscn_dma;
-+ cong_notif_cfg.notification_mode = DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
-+ DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
-+ DPNI_CONG_OPT_COHERENT_WRITE;
-+ err = dpni_set_congestion_notification(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_TX, 0,
-+ &cong_notif_cfg);
-+ if (err) {
-+ dev_err(dev, "dpni_set_congestion_notification failed\n");
-+ goto err_set_cong;
++ /* check frame errors in the FAS field */
++ if (has_fas_errors) {
++ fas = dpaa2_get_fas(vaddr, false);
++ status = le32_to_cpu(fas->status);
++ if (net_ratelimit())
++ netdev_dbg(priv->net_dev, "Rx frame FAS err: 0x%08x\n",
++ status & DPAA2_FAS_RX_ERR_MASK);
+ }
++ free_rx_fd(priv, fd, vaddr);
+
-+ return 0;
-+
-+err_set_cong:
-+ dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
-+err_dma_map:
-+ kfree(priv->cscn_unaligned);
-+
-+ return err;
++ percpu_stats = this_cpu_ptr(priv->percpu_stats);
++ percpu_stats->rx_errors++;
++ ch->buf_count--;
+}
++#endif
+
-+/* Configure the DPNI object this interface is associated with */
-+static int setup_dpni(struct fsl_mc_device *ls_dev)
++/* Consume all frames pull-dequeued into the store. This is the simplest way to
++ * make sure we don't accidentally issue another volatile dequeue which would
++ * overwrite (leak) frames already in the store.
++ *
++ * The number of frames is returned using the last 2 output arguments,
++ * separately for Rx and Tx confirmations.
++ *
++ * Observance of NAPI budget is not our concern, leaving that to the caller.
++ */
++static bool consume_frames(struct dpaa2_eth_channel *ch, int *rx_cleaned,
++ int *tx_conf_cleaned)
+{
-+ struct device *dev = &ls_dev->dev;
-+ struct dpaa2_eth_priv *priv;
-+ struct net_device *net_dev;
-+ struct dpni_buffer_layout buf_layout;
-+ struct dpni_link_cfg cfg = {0};
-+ int err;
++ struct dpaa2_eth_priv *priv = ch->priv;
++ struct dpaa2_eth_fq *fq = NULL;
++ struct dpaa2_dq *dq;
++ const struct dpaa2_fd *fd;
++ int cleaned = 0;
++ int is_last;
+
-+ net_dev = dev_get_drvdata(dev);
-+ priv = netdev_priv(net_dev);
++ do {
++ dq = dpaa2_io_store_next(ch->store, &is_last);
++ if (unlikely(!dq)) {
++ /* If we're here, we *must* have placed a
++ * volatile dequeue comnmand, so keep reading through
++ * the store until we get some sort of valid response
++ * token (either a valid frame or an "empty dequeue")
++ */
++ continue;
++ }
+
-+ priv->dpni_id = ls_dev->obj_desc.id;
++ fd = dpaa2_dq_fd(dq);
++ prefetch(fd);
+
-+ /* get a handle for the DPNI object */
-+ err = dpni_open(priv->mc_io, 0, priv->dpni_id, &priv->mc_token);
-+ if (err) {
-+ dev_err(dev, "dpni_open() failed\n");
-+ goto err_open;
-+ }
++ fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
++ fq->consume(priv, ch, fd, &ch->napi, fq->flowid);
++ cleaned++;
++ } while (!is_last);
+
-+ ls_dev->mc_io = priv->mc_io;
-+ ls_dev->mc_handle = priv->mc_token;
++ if (!cleaned)
++ return false;
+
-+ err = dpni_reset(priv->mc_io, 0, priv->mc_token);
-+ if (err) {
-+ dev_err(dev, "dpni_reset() failed\n");
-+ goto err_reset;
-+ }
++ /* All frames brought in store by a volatile dequeue
++ * come from the same queue
++ */
++ if (fq->type == DPAA2_TX_CONF_FQ)
++ *tx_conf_cleaned += cleaned;
++ else
++ *rx_cleaned += cleaned;
+
-+ err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
-+ &priv->dpni_attrs);
++ fq->stats.frames += cleaned;
++ ch->stats.frames += cleaned;
+
-+ if (err) {
-+ dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
-+ goto err_get_attr;
-+ }
++ return true;
++}
++
++/* Configure the egress frame annotation for timestamp update */
++static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
++{
++ struct dpaa2_faead *faead;
++ u32 ctrl, frc;
+
-+ /* due to a limitation in WRIOP 1.0.0 (ERR009354), the Rx buf
-+ * align value must be a multiple of 256.
++ /* Mark the egress frame annotation area as valid */
++ frc = dpaa2_fd_get_frc(fd);
++ dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
++
++ /* Set hardware annotation size */
++ ctrl = dpaa2_fd_get_ctrl(fd);
++ dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
++
++ /* enable UPD (update prepanded data) bit in FAEAD field of
++ * hardware frame annotation area
+ */
-+ priv->rx_buf_align =
-+ priv->dpni_attrs.wriop_version & 0x3ff ?
-+ DPAA2_ETH_RX_BUF_ALIGN : DPAA2_ETH_RX_BUF_ALIGN_V1;
++ ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
++ faead = dpaa2_get_faead(buf_start, true);
++ faead->ctrl = cpu_to_le32(ctrl);
++}
+
-+ /* Update number of logical FQs in netdev */
-+ err = netif_set_real_num_tx_queues(net_dev,
-+ dpaa2_eth_queue_count(priv));
-+ if (err) {
-+ dev_err(dev, "netif_set_real_num_tx_queues failed (%d)\n", err);
-+ goto err_set_tx_queues;
-+ }
++/* Create a frame descriptor based on a fragmented skb */
++static int build_sg_fd(struct dpaa2_eth_priv *priv,
++ struct sk_buff *skb,
++ struct dpaa2_fd *fd)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ void *sgt_buf = NULL;
++ dma_addr_t addr;
++ int nr_frags = skb_shinfo(skb)->nr_frags;
++ struct dpaa2_sg_entry *sgt;
++ int i, err;
++ int sgt_buf_size;
++ struct scatterlist *scl, *crt_scl;
++ int num_sg;
++ int num_dma_bufs;
++ struct dpaa2_eth_swa *swa;
+
-+ err = netif_set_real_num_rx_queues(net_dev,
-+ dpaa2_eth_queue_count(priv));
-+ if (err) {
-+ dev_err(dev, "netif_set_real_num_rx_queues failed (%d)\n", err);
-+ goto err_set_rx_queues;
-+ }
++ /* Create and map scatterlist.
++ * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
++ * to go beyond nr_frags+1.
++ * Note: We don't support chained scatterlists
++ */
++ if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
++ return -EINVAL;
+
-+ /* Configure buffer layouts */
-+ /* rx buffer */
-+ buf_layout.pass_parser_result = true;
-+ buf_layout.pass_frame_status = true;
-+ buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
-+ buf_layout.data_align = priv->rx_buf_align;
-+ buf_layout.data_head_room = DPAA2_ETH_RX_HEAD_ROOM;
-+ buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
-+ DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
-+ DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
-+ DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
-+ DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
-+ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_RX, &buf_layout);
-+ if (err) {
-+ dev_err(dev,
-+ "dpni_set_buffer_layout(RX) failed\n");
-+ goto err_buf_layout;
-+ }
++ scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
++ if (unlikely(!scl))
++ return -ENOMEM;
+
-+ /* tx buffer */
-+ buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
-+ DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
-+ DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
-+ buf_layout.pass_timestamp = true;
-+ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_TX, &buf_layout);
-+ if (err) {
-+ dev_err(dev,
-+ "dpni_set_buffer_layout(TX) failed\n");
-+ goto err_buf_layout;
++ sg_init_table(scl, nr_frags + 1);
++ num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
++ num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
++ if (unlikely(!num_dma_bufs)) {
++ err = -ENOMEM;
++ goto dma_map_sg_failed;
+ }
+
-+ /* tx-confirm buffer */
-+ buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
-+ DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
-+ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_TX_CONFIRM, &buf_layout);
-+ if (err) {
-+ dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
-+ goto err_buf_layout;
++ /* Prepare the HW SGT structure */
++ sgt_buf_size = priv->tx_data_offset +
++ sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
++ sgt_buf = netdev_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
++ if (unlikely(!sgt_buf)) {
++ err = -ENOMEM;
++ goto sgt_buf_alloc_failed;
+ }
++ sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
++ memset(sgt_buf, 0, sgt_buf_size);
+
-+ /* Now that we've set our tx buffer layout, retrieve the minimum
-+ * required tx data offset.
++ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
++
++ /* Fill in the HW SGT structure.
++ *
++ * sgt_buf is zeroed out, so the following fields are implicit
++ * in all sgt entries:
++ * - offset is 0
++ * - format is 'dpaa2_sg_single'
+ */
-+ err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
-+ &priv->tx_data_offset);
-+ if (err) {
-+ dev_err(dev, "dpni_get_tx_data_offset() failed (%d)\n", err);
-+ goto err_data_offset;
++ for_each_sg(scl, crt_scl, num_dma_bufs, i) {
++ dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
++ dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
+ }
++ dpaa2_sg_set_final(&sgt[i - 1], true);
+
-+ if ((priv->tx_data_offset % 64) != 0)
-+ dev_warn(dev, "Tx data offset (%d) not a multiple of 64B",
-+ priv->tx_data_offset);
-+
-+ /* Enable congestion notifications for Tx queues */
-+ err = setup_tx_congestion(priv);
-+ if (err)
-+ goto err_tx_cong;
-+
-+ /* allocate classification rule space */
-+ priv->cls_rule = kzalloc(sizeof(*priv->cls_rule) *
-+ dpaa2_eth_fs_count(priv), GFP_KERNEL);
-+ if (!priv->cls_rule)
-+ goto err_cls_rule;
-+
-+ /* Enable flow control */
-+ cfg.options = DPNI_LINK_OPT_AUTONEG | DPNI_LINK_OPT_PAUSE;
-+ priv->tx_pause_frames = 1;
++ /* Store the skb backpointer in the SGT buffer.
++ * Fit the scatterlist and the number of buffers alongside the
++ * skb backpointer in the software annotation area. We'll need
++ * all of them on Tx Conf.
++ */
++ swa = (struct dpaa2_eth_swa *)sgt_buf;
++ swa->type = DPAA2_ETH_SWA_SG;
++ swa->sg.skb = skb;
++ swa->sg.scl = scl;
++ swa->sg.num_sg = num_sg;
++ swa->sg.sgt_size = sgt_buf_size;
+
-+ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
-+ if (err) {
-+ netdev_err(net_dev, "ERROR %d setting link cfg", err);
-+ goto err_set_link_cfg;
++ /* Separately map the SGT buffer */
++ addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
++ if (unlikely(dma_mapping_error(dev, addr))) {
++ err = -ENOMEM;
++ goto dma_map_single_failed;
+ }
++ dpaa2_fd_set_offset(fd, priv->tx_data_offset);
++ dpaa2_fd_set_format(fd, dpaa2_fd_sg);
++ dpaa2_fd_set_addr(fd, addr);
++ dpaa2_fd_set_len(fd, skb->len);
++ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
++
++ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
++ enable_tx_tstamp(fd, sgt_buf);
+
+ return 0;
+
-+err_set_link_cfg:
-+err_cls_rule:
-+err_tx_cong:
-+err_data_offset:
-+err_buf_layout:
-+err_set_rx_queues:
-+err_set_tx_queues:
-+err_get_attr:
-+err_reset:
-+ dpni_close(priv->mc_io, 0, priv->mc_token);
-+err_open:
++dma_map_single_failed:
++ skb_free_frag(sgt_buf);
++sgt_buf_alloc_failed:
++ dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
++dma_map_sg_failed:
++ kfree(scl);
+ return err;
+}
+
-+static void free_dpni(struct dpaa2_eth_priv *priv)
++/* Create a frame descriptor based on a linear skb */
++static int build_single_fd(struct dpaa2_eth_priv *priv,
++ struct sk_buff *skb,
++ struct dpaa2_fd *fd)
+{
+ struct device *dev = priv->net_dev->dev.parent;
-+ int err;
++ u8 *buffer_start, *aligned_start;
++ struct dpaa2_eth_swa *swa;
++ dma_addr_t addr;
+
-+ err = dpni_reset(priv->mc_io, 0, priv->mc_token);
-+ if (err)
-+ netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
-+ err);
++ buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb);
+
-+ dpni_close(priv->mc_io, 0, priv->mc_token);
++ /* If there's enough room to align the FD address, do it.
++ * It will help hardware optimize accesses.
++ */
++ aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
++ DPAA2_ETH_TX_BUF_ALIGN);
++ if (aligned_start >= skb->head)
++ buffer_start = aligned_start;
+
-+ kfree(priv->cls_rule);
++ /* Store a backpointer to the skb at the beginning of the buffer
++ * (in the private data area) such that we can release it
++ * on Tx confirm
++ */
++ swa = (struct dpaa2_eth_swa *)buffer_start;
++ swa->type = DPAA2_ETH_SWA_SINGLE;
++ swa->single.skb = skb;
+
-+ dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
-+ kfree(priv->cscn_unaligned);
++ addr = dma_map_single(dev, buffer_start,
++ skb_tail_pointer(skb) - buffer_start,
++ DMA_BIDIRECTIONAL);
++ if (unlikely(dma_mapping_error(dev, addr)))
++ return -ENOMEM;
++
++ dpaa2_fd_set_addr(fd, addr);
++ dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
++ dpaa2_fd_set_len(fd, skb->len);
++ dpaa2_fd_set_format(fd, dpaa2_fd_single);
++ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
++
++ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
++ enable_tx_tstamp(fd, buffer_start);
++
++ return 0;
+}
+
-+static int set_queue_taildrop(struct dpaa2_eth_priv *priv,
-+ struct dpni_taildrop *td)
++/* FD freeing routine on the Tx path
++ *
++ * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
++ * back-pointed to is also freed.
++ * This can be called either from dpaa2_eth_tx_conf() or on the error path of
++ * dpaa2_eth_tx().
++ * Optionally, return the frame annotation status word (FAS), which needs
++ * to be checked if we're on the confirmation path.
++ */
++static void free_tx_fd(struct dpaa2_eth_priv *priv,
++ const struct dpaa2_fd *fd,
++ bool in_napi)
+{
+ struct device *dev = priv->net_dev->dev.parent;
-+ int err, i;
++ dma_addr_t fd_addr;
++ struct sk_buff *skb = NULL;
++ unsigned char *buffer_start;
++ struct dpaa2_eth_swa *swa;
++ u8 fd_format = dpaa2_fd_get_format(fd);
+
++ fd_addr = dpaa2_fd_get_addr(fd);
++ buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
++ swa = (struct dpaa2_eth_swa *)buffer_start;
+
-+ for (i = 0; i < priv->num_fqs; i++) {
-+ if (priv->fq[i].type != DPAA2_RX_FQ)
-+ continue;
++ if (fd_format == dpaa2_fd_single) {
++ skb = swa->single.skb;
++ /* Accessing the skb buffer is safe before dma unmap, because
++ * we didn't map the actual skb shell.
++ */
++ dma_unmap_single(dev, fd_addr,
++ skb_tail_pointer(skb) - buffer_start,
++ DMA_BIDIRECTIONAL);
++ } else if (fd_format == dpaa2_fd_sg) {
++ skb = swa->sg.skb;
+
-+ err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
-+ DPNI_CP_QUEUE, DPNI_QUEUE_RX,
-+ priv->fq[i].tc, priv->fq[i].flowid,
-+ td);
-+ if (err) {
-+ dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err);
-+ return err;
-+ }
++ /* Unmap the scatterlist */
++ dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg, DMA_BIDIRECTIONAL);
++ kfree(swa->sg.scl);
++
++ /* Unmap the SGT buffer */
++ dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
++ DMA_BIDIRECTIONAL);
++ } else {
++ netdev_dbg(priv->net_dev, "Invalid FD format\n");
++ return;
+ }
+
-+ return 0;
++ /* Get the timestamp value */
++ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
++ struct skb_shared_hwtstamps shhwtstamps;
++ u64 *ns;
++
++ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
++
++ ns = dpaa2_get_ts(buffer_start, true);
++ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns);
++ shhwtstamps.hwtstamp = ns_to_ktime(*ns);
++ skb_tstamp_tx(skb, &shhwtstamps);
++ }
++
++ /* Free SGT buffer allocated on tx */
++ if (fd_format != dpaa2_fd_single)
++ skb_free_frag(buffer_start);
++
++ /* Move on with skb release */
++ napi_consume_skb(skb, in_napi);
+}
+
-+static int set_group_taildrop(struct dpaa2_eth_priv *priv,
-+ struct dpni_taildrop *td)
++static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ struct dpni_taildrop disable_td, *tc_td;
-+ int i, err;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct device *dev = net_dev->dev.parent;
++ struct dpaa2_fd fd;
++ struct rtnl_link_stats64 *percpu_stats;
++ struct dpaa2_eth_drv_stats *percpu_extras;
++ unsigned int needed_headroom;
++ struct dpaa2_eth_fq *fq;
++ u16 queue_mapping;
++ int err, i, ch_id = 0, qpri = 0;
+
-+ memset(&disable_td, 0, sizeof(struct dpni_taildrop));
-+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
-+ if (td->enable && dpaa2_eth_is_pfc_enabled(priv, i))
-+ /* Do not set taildrop thresholds for PFC-enabled
-+ * traffic classes. We will enable congestion
-+ * notifications for them.
-+ */
-+ tc_td = &disable_td;
-+ else
-+ tc_td = td;
++ queue_mapping = skb_get_queue_mapping(skb);
++ fq = &priv->fq[queue_mapping];
+
-+ err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
-+ DPNI_CP_GROUP, DPNI_QUEUE_RX,
-+ i, 0, tc_td);
-+ if (err) {
-+ dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err);
-+ return err;
++ /* If we're congested, stop this tx queue; transmission of
++ * the current skb happens regardless of congestion state
++ */
++ dma_sync_single_for_cpu(dev, priv->cscn_dma,
++ DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
++ if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem))) {
++ netif_stop_subqueue(net_dev, queue_mapping);
++ fq->stats.congestion_entry++;
++ }
++
++ percpu_stats = this_cpu_ptr(priv->percpu_stats);
++ percpu_extras = this_cpu_ptr(priv->percpu_extras);
++
++ /* For non-linear skb we don't need a minimum headroom */
++ needed_headroom = dpaa2_eth_needed_headroom(priv, skb);
++ if (skb_headroom(skb) < needed_headroom) {
++ struct sk_buff *ns;
++
++ ns = skb_realloc_headroom(skb, needed_headroom);
++ if (unlikely(!ns)) {
++ percpu_stats->tx_dropped++;
++ goto err_alloc_headroom;
+ }
++ percpu_extras->tx_reallocs++;
++ if (skb->sk)
++ skb_set_owner_w(ns, skb->sk);
++ dev_kfree_skb(skb);
++ skb = ns;
+ }
-+ return 0;
-+}
+
-+/* Enable/disable Rx FQ taildrop
-+ *
-+ * Rx FQ taildrop is mutually exclusive with flow control and it only gets
-+ * disabled when FC is active. Depending on FC status, we need to compute
-+ * the maximum number of buffers in the pool differently, so use the
-+ * opportunity to update max number of buffers as well.
-+ */
-+int set_rx_taildrop(struct dpaa2_eth_priv *priv)
-+{
-+ enum dpaa2_eth_td_cfg cfg = dpaa2_eth_get_td_type(priv);
-+ struct dpni_taildrop td_queue, td_group;
-+ int err = 0;
++ /* We'll be holding a back-reference to the skb until Tx Confirmation;
++ * we don't want that overwritten by a concurrent Tx with a cloned skb.
++ */
++ skb = skb_unshare(skb, GFP_ATOMIC);
++ if (unlikely(!skb)) {
++ /* skb_unshare() has already freed the skb */
++ percpu_stats->tx_dropped++;
++ return NETDEV_TX_OK;
++ }
+
-+ switch (cfg) {
-+ case DPAA2_ETH_TD_NONE:
-+ memset(&td_queue, 0, sizeof(struct dpni_taildrop));
-+ memset(&td_group, 0, sizeof(struct dpni_taildrop));
-+ priv->num_bufs = DPAA2_ETH_NUM_BUFS_FC /
-+ priv->num_channels;
-+ break;
-+ case DPAA2_ETH_TD_QUEUE:
-+ memset(&td_group, 0, sizeof(struct dpni_taildrop));
-+ td_queue.enable = 1;
-+ td_queue.units = DPNI_CONGESTION_UNIT_BYTES;
-+ td_queue.threshold = DPAA2_ETH_TAILDROP_THRESH /
-+ dpaa2_eth_tc_count(priv);
-+ priv->num_bufs = DPAA2_ETH_NUM_BUFS_TD;
-+ break;
-+ case DPAA2_ETH_TD_GROUP:
-+ memset(&td_queue, 0, sizeof(struct dpni_taildrop));
-+ td_group.enable = 1;
-+ td_group.units = DPNI_CONGESTION_UNIT_FRAMES;
-+ td_group.threshold = NAPI_POLL_WEIGHT *
-+ dpaa2_eth_queue_count(priv);
-+ priv->num_bufs = NAPI_POLL_WEIGHT *
-+ dpaa2_eth_tc_count(priv);
-+ break;
-+ default:
-+ break;
++ /* Setup the FD fields */
++ memset(&fd, 0, sizeof(fd));
++
++ if (skb_is_nonlinear(skb)) {
++ err = build_sg_fd(priv, skb, &fd);
++ percpu_extras->tx_sg_frames++;
++ percpu_extras->tx_sg_bytes += skb->len;
++ } else {
++ err = build_single_fd(priv, skb, &fd);
+ }
+
-+ err = set_queue_taildrop(priv, &td_queue);
-+ if (err)
-+ return err;
++ if (unlikely(err)) {
++ percpu_stats->tx_dropped++;
++ goto err_build_fd;
++ }
+
-+ err = set_group_taildrop(priv, &td_group);
-+ if (err)
-+ return err;
++ /* Tracing point */
++ trace_dpaa2_tx_fd(net_dev, &fd);
+
-+ priv->refill_thresh = priv->num_bufs - DPAA2_ETH_BUFS_PER_CMD;
++ if (dpaa2_eth_ceetm_is_enabled(priv)) {
++ err = dpaa2_ceetm_classify(skb, net_dev->qdisc, &ch_id, &qpri);
++ if (err)
++ goto err_ceetm_classify;
++ }
+
-+ return 0;
++ for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
++ err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
++ priv->tx_qdid, qpri,
++ fq->tx_qdbin, &fd);
++ if (err != -EBUSY)
++ break;
++ }
++ percpu_extras->tx_portal_busy += i;
++ if (unlikely(err < 0)) {
++ percpu_stats->tx_errors++;
++ /* Clean up everything, including freeing the skb */
++ free_tx_fd(priv, &fd, false);
++ } else {
++ percpu_stats->tx_packets++;
++ percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
++ }
++
++ return NETDEV_TX_OK;
++
++err_ceetm_classify:
++ free_tx_fd(priv, &fd, false);
++err_build_fd:
++err_alloc_headroom:
++ dev_kfree_skb(skb);
++
++ return NETDEV_TX_OK;
+}
+
-+static int setup_rx_flow(struct dpaa2_eth_priv *priv,
-+ struct dpaa2_eth_fq *fq)
++/* Tx confirmation frame processing routine */
++static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch,
++ const struct dpaa2_fd *fd,
++ struct napi_struct *napi __always_unused,
++ u16 queue_id)
+{
+ struct device *dev = priv->net_dev->dev.parent;
-+ struct dpni_queue q = { { 0 } };
-+ struct dpni_queue_id qid;
-+ u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
-+ int err;
++ struct rtnl_link_stats64 *percpu_stats;
++ struct dpaa2_eth_drv_stats *percpu_extras;
++ u32 fd_errors;
+
-+ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_RX, fq->tc, fq->flowid, &q, &qid);
-+ if (err) {
-+ dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
-+ return err;
-+ }
++ /* Tracing point */
++ trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
+
-+ fq->fqid = qid.fqid;
++ percpu_extras = this_cpu_ptr(priv->percpu_extras);
++ percpu_extras->tx_conf_frames++;
++ percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
+
-+ q.destination.id = fq->channel->dpcon_id;
-+ q.destination.type = DPNI_DEST_DPCON;
-+ q.destination.priority = 1;
-+ q.user_context = (u64)fq;
-+ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_RX, fq->tc, fq->flowid, q_opt, &q);
-+ if (err) {
-+ dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
-+ return err;
++ /* Check congestion state and wake all queues if necessary */
++ if (unlikely(__netif_subqueue_stopped(priv->net_dev, queue_id))) {
++ dma_sync_single_for_cpu(dev, priv->cscn_dma,
++ DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
++ if (!dpaa2_cscn_state_congested(priv->cscn_mem))
++ netif_tx_wake_all_queues(priv->net_dev);
+ }
+
-+ return 0;
++ /* Check frame errors in the FD field */
++ fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
++ free_tx_fd(priv, fd, true);
++
++ if (likely(!fd_errors))
++ return;
++
++ if (net_ratelimit())
++ netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
++ fd_errors);
++
++ percpu_stats = this_cpu_ptr(priv->percpu_stats);
++ /* Tx-conf logically pertains to the egress path. */
++ percpu_stats->tx_errors++;
+}
+
-+static int setup_tx_flow(struct dpaa2_eth_priv *priv,
-+ struct dpaa2_eth_fq *fq)
++static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ struct dpni_queue q = { { 0 } };
-+ struct dpni_queue_id qid;
-+ u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
+ int err;
+
-+ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_TX, 0, fq->flowid, &q, &qid);
-+ if (err) {
-+ dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
-+ return err;
-+ }
-+
-+ fq->tx_qdbin = qid.qdbin;
-+
-+ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, &q, &qid);
++ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
++ DPNI_OFF_RX_L3_CSUM, enable);
+ if (err) {
-+ dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
++ netdev_err(priv->net_dev,
++ "dpni_set_offload(RX_L3_CSUM) failed\n");
+ return err;
+ }
+
-+ fq->fqid = qid.fqid;
-+
-+ q.destination.id = fq->channel->dpcon_id;
-+ q.destination.type = DPNI_DEST_DPCON;
-+ q.destination.priority = 0;
-+ q.user_context = (u64)fq;
-+ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, q_opt, &q);
++ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
++ DPNI_OFF_RX_L4_CSUM, enable);
+ if (err) {
-+ dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
++ netdev_err(priv->net_dev,
++ "dpni_set_offload(RX_L4_CSUM) failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
-+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
-+static int setup_rx_err_flow(struct dpaa2_eth_priv *priv,
-+ struct dpaa2_eth_fq *fq)
++static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ struct dpni_queue q = { { 0 } };
-+ struct dpni_queue_id qid;
-+ u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
+ int err;
+
-+ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid);
++ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
++ DPNI_OFF_TX_L3_CSUM, enable);
+ if (err) {
-+ dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
++ netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
+ return err;
+ }
+
-+ fq->fqid = qid.fqid;
-+
-+ q.destination.id = fq->channel->dpcon_id;
-+ q.destination.type = DPNI_DEST_DPCON;
-+ q.destination.priority = 1;
-+ q.user_context = (u64)fq;
-+ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
-+ DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q);
++ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
++ DPNI_OFF_TX_L4_CSUM, enable);
+ if (err) {
-+ dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
++ netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
+ return err;
+ }
+
+ return 0;
+}
-+#endif
+
-+/* default hash key fields */
-+static struct dpaa2_eth_hash_fields default_hash_fields[] = {
-+ {
-+ /* L2 header */
-+ .rxnfc_field = RXH_L2DA,
-+ .cls_prot = NET_PROT_ETH,
-+ .cls_field = NH_FLD_ETH_DA,
-+ .size = 6,
-+ }, {
-+ .cls_prot = NET_PROT_ETH,
-+ .cls_field = NH_FLD_ETH_SA,
-+ .size = 6,
-+ }, {
-+ /* This is the last ethertype field parsed:
-+ * depending on frame format, it can be the MAC ethertype
-+ * or the VLAN etype.
++/* Perform a single release command to add buffers
++ * to the specified buffer pool
++ */
++static int add_bufs(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch, u16 bpid)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
++ void *buf;
++ dma_addr_t addr;
++ int i, err;
++
++ for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
++ /* Allocate buffer visible to WRIOP + skb shared info +
++ * alignment padding
+ */
-+ .cls_prot = NET_PROT_ETH,
-+ .cls_field = NH_FLD_ETH_TYPE,
-+ .size = 2,
-+ }, {
-+ /* VLAN header */
-+ .rxnfc_field = RXH_VLAN,
-+ .cls_prot = NET_PROT_VLAN,
-+ .cls_field = NH_FLD_VLAN_TCI,
-+ .size = 2,
-+ }, {
-+ /* IP header */
-+ .rxnfc_field = RXH_IP_SRC,
-+ .cls_prot = NET_PROT_IP,
-+ .cls_field = NH_FLD_IP_SRC,
-+ .size = 4,
-+ }, {
-+ .rxnfc_field = RXH_IP_DST,
-+ .cls_prot = NET_PROT_IP,
-+ .cls_field = NH_FLD_IP_DST,
-+ .size = 4,
-+ }, {
-+ .rxnfc_field = RXH_L3_PROTO,
-+ .cls_prot = NET_PROT_IP,
-+ .cls_field = NH_FLD_IP_PROTO,
-+ .size = 1,
-+ }, {
-+ /* Using UDP ports, this is functionally equivalent to raw
-+ * byte pairs from L4 header.
-+ */
-+ .rxnfc_field = RXH_L4_B_0_1,
-+ .cls_prot = NET_PROT_UDP,
-+ .cls_field = NH_FLD_UDP_PORT_SRC,
-+ .size = 2,
-+ }, {
-+ .rxnfc_field = RXH_L4_B_2_3,
-+ .cls_prot = NET_PROT_UDP,
-+ .cls_field = NH_FLD_UDP_PORT_DST,
-+ .size = 2,
-+ },
-+};
-+
-+/* Set RX hash options */
-+static int set_hash(struct dpaa2_eth_priv *priv)
-+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ struct dpkg_profile_cfg cls_cfg;
-+ struct dpni_rx_tc_dist_cfg dist_cfg;
-+ u8 *dma_mem;
-+ int i;
-+ int err = 0;
++ buf = napi_alloc_frag(dpaa2_eth_buf_raw_size(priv));
++ if (unlikely(!buf))
++ goto err_alloc;
+
-+ memset(&cls_cfg, 0, sizeof(cls_cfg));
++ buf = PTR_ALIGN(buf, priv->rx_buf_align);
+
-+ for (i = 0; i < priv->num_hash_fields; i++) {
-+ struct dpkg_extract *key =
-+ &cls_cfg.extracts[cls_cfg.num_extracts];
++ addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
++ DMA_BIDIRECTIONAL);
++ if (unlikely(dma_mapping_error(dev, addr)))
++ goto err_map;
+
-+ key->type = DPKG_EXTRACT_FROM_HDR;
-+ key->extract.from_hdr.prot = priv->hash_fields[i].cls_prot;
-+ key->extract.from_hdr.type = DPKG_FULL_FIELD;
-+ key->extract.from_hdr.field = priv->hash_fields[i].cls_field;
-+ cls_cfg.num_extracts++;
++ buf_array[i] = addr;
+
-+ priv->rx_flow_hash |= priv->hash_fields[i].rxnfc_field;
++ /* tracing point */
++ trace_dpaa2_eth_buf_seed(priv->net_dev,
++ buf, dpaa2_eth_buf_raw_size(priv),
++ addr, DPAA2_ETH_RX_BUF_SIZE,
++ bpid);
+ }
+
-+ dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_DMA | GFP_KERNEL);
-+ if (!dma_mem)
-+ return -ENOMEM;
++release_bufs:
++ /* In case the portal is busy, retry until successful */
++ while ((err = dpaa2_io_service_release(ch->dpio, bpid,
++ buf_array, i)) == -EBUSY)
++ cpu_relax();
+
-+ err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
++ /* If release command failed, clean up and bail out; not much
++ * else we can do about it
++ */
+ if (err) {
-+ dev_err(dev, "dpni_prepare_key_cfg() failed (%d)", err);
-+ goto err_prep_key;
-+ }
-+
-+ memset(&dist_cfg, 0, sizeof(dist_cfg));
-+
-+ /* Prepare for setting the rx dist */
-+ dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
-+ DPAA2_CLASSIFIER_DMA_SIZE,
-+ DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) {
-+ dev_err(dev, "DMA mapping failed\n");
-+ err = -ENOMEM;
-+ goto err_dma_map;
-+ }
-+
-+ dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
-+ if (dpaa2_eth_fs_enabled(priv)) {
-+ dist_cfg.dist_mode = DPNI_DIST_MODE_FS;
-+ dist_cfg.fs_cfg.miss_action = DPNI_FS_MISS_HASH;
-+ } else {
-+ dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
++ free_bufs(priv, buf_array, i);
++ return 0;
+ }
+
-+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
-+ err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, i,
-+ &dist_cfg);
-+ if (err)
-+ break;
-+ }
++ return i;
+
-+ dma_unmap_single(dev, dist_cfg.key_cfg_iova,
-+ DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
-+ if (err)
-+ dev_err(dev, "dpni_set_rx_tc_dist() failed (%d)\n", err);
++err_map:
++ skb_free_frag(buf);
++err_alloc:
++ /* If we managed to allocate at least some buffers, release them */
++ if (i)
++ goto release_bufs;
+
-+err_dma_map:
-+err_prep_key:
-+ kfree(dma_mem);
-+ return err;
++ return 0;
+}
+
-+/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
-+ * frame queues and channels
-+ */
-+static int bind_dpni(struct dpaa2_eth_priv *priv)
++static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
+{
-+ struct net_device *net_dev = priv->net_dev;
-+ struct device *dev = net_dev->dev.parent;
-+ struct dpni_pools_cfg pools_params;
-+ struct dpni_error_cfg err_cfg;
-+ int err = 0;
-+ int i;
-+
-+ pools_params.num_dpbp = 1;
-+ pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
-+ pools_params.pools[0].backup_pool = 0;
-+ pools_params.pools[0].priority_mask = 0xff;
-+ pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
-+ err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
-+ if (err) {
-+ dev_err(dev, "dpni_set_pools() failed\n");
-+ return err;
-+ }
-+
-+ /* Verify classification options and disable hashing and/or
-+ * flow steering support in case of invalid configuration values
-+ */
-+ priv->hash_fields = default_hash_fields;
-+ priv->num_hash_fields = ARRAY_SIZE(default_hash_fields);
-+ check_cls_support(priv);
++ int i, j;
++ int new_count;
+
-+ /* have the interface implicitly distribute traffic based on
-+ * a static hash key
++ /* This is the lazy seeding of Rx buffer pools.
++ * dpaa2_add_bufs() is also used on the Rx hotpath and calls
++ * napi_alloc_frag(). The trouble with that is that it in turn ends up
++ * calling this_cpu_ptr(), which mandates execution in atomic context.
++ * Rather than splitting up the code, do a one-off preempt disable.
+ */
-+ if (dpaa2_eth_hash_enabled(priv)) {
-+ err = set_hash(priv);
-+ if (err) {
-+ dev_err(dev, "Hashing configuration failed\n");
-+ return err;
-+ }
-+ }
-+
-+ /* Configure handling of error frames */
-+ err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
-+ err_cfg.set_frame_annotation = 1;
-+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
-+ err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE;
-+#else
-+ err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
-+#endif
-+ err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
-+ &err_cfg);
-+ if (err) {
-+ dev_err(dev, "dpni_set_errors_behavior() failed (%d)\n", err);
-+ return err;
-+ }
++ preempt_disable();
++ for (j = 0; j < priv->num_channels; j++) {
++ priv->channel[j]->buf_count = 0;
++ for (i = 0; i < priv->max_bufs_per_ch;
++ i += DPAA2_ETH_BUFS_PER_CMD) {
++ new_count = add_bufs(priv, priv->channel[j], bpid);
++ priv->channel[j]->buf_count += new_count;
+
-+ /* Configure Rx and Tx conf queues to generate CDANs */
-+ for (i = 0; i < priv->num_fqs; i++) {
-+ switch (priv->fq[i].type) {
-+ case DPAA2_RX_FQ:
-+ err = setup_rx_flow(priv, &priv->fq[i]);
-+ break;
-+ case DPAA2_TX_CONF_FQ:
-+ err = setup_tx_flow(priv, &priv->fq[i]);
-+ break;
-+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
-+ case DPAA2_RX_ERR_FQ:
-+ err = setup_rx_err_flow(priv, &priv->fq[i]);
-+ break;
-+#endif
-+ default:
-+ dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
-+ return -EINVAL;
++ if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
++ preempt_enable();
++ return -ENOMEM;
++ }
+ }
-+ if (err)
-+ return err;
-+ }
-+
-+ err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token, DPNI_QUEUE_TX,
-+ &priv->tx_qdid);
-+ if (err) {
-+ dev_err(dev, "dpni_get_qdid() failed\n");
-+ return err;
+ }
++ preempt_enable();
+
+ return 0;
+}
+
-+/* Allocate rings for storing incoming frame descriptors */
-+static int alloc_rings(struct dpaa2_eth_priv *priv)
++/**
++ * Drain the specified number of buffers from the DPNI's private buffer pool.
++ * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
++ */
++static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
+{
-+ struct net_device *net_dev = priv->net_dev;
-+ struct device *dev = net_dev->dev.parent;
-+ int i;
++ u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
++ int ret;
+
-+ for (i = 0; i < priv->num_channels; i++) {
-+ priv->channel[i]->store =
-+ dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
-+ if (!priv->channel[i]->store) {
-+ netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
-+ goto err_ring;
++ do {
++ ret = dpaa2_io_service_acquire(NULL, priv->bpid,
++ buf_array, count);
++ if (ret < 0) {
++ netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
++ return;
+ }
-+ }
-+
-+ return 0;
-+
-+err_ring:
-+ for (i = 0; i < priv->num_channels; i++) {
-+ if (!priv->channel[i]->store)
-+ break;
-+ dpaa2_io_store_destroy(priv->channel[i]->store);
-+ }
-+
-+ return -ENOMEM;
++ free_bufs(priv, buf_array, ret);
++ } while (ret);
+}
+
-+static void free_rings(struct dpaa2_eth_priv *priv)
++static void drain_pool(struct dpaa2_eth_priv *priv)
+{
-+ int i;
-+
-+ for (i = 0; i < priv->num_channels; i++)
-+ dpaa2_io_store_destroy(priv->channel[i]->store);
++ preempt_disable();
++ drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
++ drain_bufs(priv, 1);
++ preempt_enable();
+}
+
-+static int netdev_init(struct net_device *net_dev)
++/* Function is called from softirq context only, so we don't need to guard
++ * the access to percpu count
++ */
++static int refill_pool(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *ch,
++ u16 bpid)
+{
-+ int err;
-+ struct device *dev = net_dev->dev.parent;
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
-+ u8 bcast_addr[ETH_ALEN];
-+ u16 rx_headroom, rx_req_headroom;
++ int new_count;
+
-+ net_dev->netdev_ops = &dpaa2_eth_ops;
++ if (likely(ch->buf_count >= priv->refill_thresh))
++ return 0;
+
-+ /* Get firmware address, if any */
-+ err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
-+ if (err) {
-+ dev_err(dev, "dpni_get_port_mac_addr() failed (%d)\n", err);
-+ return err;
-+ }
-+
-+ /* Get DPNI atttributes address, if any */
-+ err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
-+ dpni_mac_addr);
-+ if (err) {
-+ dev_err(dev, "dpni_get_primary_mac_addr() failed (%d)\n", err);
-+ return err;
-+ }
-+
-+ /* First check if firmware has any address configured by bootloader */
-+ if (!is_zero_ether_addr(mac_addr)) {
-+ /* If the DPMAC addr != the DPNI addr, update it */
-+ if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
-+ err = dpni_set_primary_mac_addr(priv->mc_io, 0,
-+ priv->mc_token,
-+ mac_addr);
-+ if (err) {
-+ dev_err(dev,
-+ "dpni_set_primary_mac_addr() failed (%d)\n",
-+ err);
-+ return err;
-+ }
-+ }
-+ memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
-+ } else if (is_zero_ether_addr(dpni_mac_addr)) {
-+ /* Fills in net_dev->dev_addr, as required by
-+ * register_netdevice()
-+ */
-+ eth_hw_addr_random(net_dev);
-+ /* Make the user aware, without cluttering the boot log */
-+ dev_dbg_once(dev, " device(s) have all-zero hwaddr, replaced with random\n");
-+ err = dpni_set_primary_mac_addr(priv->mc_io, 0,
-+ priv->mc_token, net_dev->dev_addr);
-+ if (err) {
-+ dev_err(dev,
-+ "dpni_set_primary_mac_addr() failed (%d)\n", err);
-+ return err;
++ do {
++ new_count = add_bufs(priv, ch, bpid);
++ if (unlikely(!new_count)) {
++ /* Out of memory; abort for now, we'll try later on */
++ break;
+ }
-+ /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
-+ * practical purposes, this will be our "permanent" mac address,
-+ * at least until the next reboot. This move will also permit
-+ * register_netdevice() to properly fill up net_dev->perm_addr.
-+ */
-+ net_dev->addr_assign_type = NET_ADDR_PERM;
-+ /* If DPMAC address is non-zero, use that one */
-+ } else {
-+ /* NET_ADDR_PERM is default, all we have to do is
-+ * fill in the device addr.
-+ */
-+ memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
-+ }
-+
-+ /* Explicitly add the broadcast address to the MAC filtering table;
-+ * the MC won't do that for us.
-+ */
-+ eth_broadcast_addr(bcast_addr);
-+ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
-+ if (err) {
-+ dev_warn(dev, "dpni_add_mac_addr() failed (%d)\n", err);
-+ /* Won't return an error; at least, we'd have egress traffic */
-+ }
-+
-+ /* Reserve enough space to align buffer as per hardware requirement;
-+ * NOTE: priv->tx_data_offset MUST be initialized at this point.
-+ */
-+ net_dev->needed_headroom = DPAA2_ETH_NEEDED_HEADROOM(priv);
-+
-+ /* Set MTU limits */
-+ net_dev->min_mtu = 68;
-+ net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
-+
-+ /* Required headroom for Rx skbs, to avoid reallocation on
-+ * forwarding path.
-+ */
-+ rx_req_headroom = LL_RESERVED_SPACE(net_dev) - ETH_HLEN;
-+ rx_headroom = ALIGN(DPAA2_ETH_RX_HWA_SIZE + DPAA2_ETH_SWA_SIZE +
-+ DPAA2_ETH_RX_HEAD_ROOM, priv->rx_buf_align);
-+ if (rx_req_headroom > rx_headroom)
-+ dev_info_once(dev,
-+ "Required headroom (%d) greater than available (%d).\n"
-+ "This will impact performance due to reallocations.\n",
-+ rx_req_headroom, rx_headroom);
++ ch->buf_count += new_count;
++ } while (ch->buf_count < priv->max_bufs_per_ch);
+
-+ /* Our .ndo_init will be called herein */
-+ err = register_netdev(net_dev);
-+ if (err < 0) {
-+ dev_err(dev, "register_netdev() failed (%d)\n", err);
-+ return err;
-+ }
++ if (unlikely(ch->buf_count < priv->max_bufs_per_ch))
++ return -ENOMEM;
+
+ return 0;
+}
+
-+static int poll_link_state(void *arg)
++static int pull_channel(struct dpaa2_eth_channel *ch)
+{
-+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
+ int err;
++ int dequeues = -1;
+
-+ while (!kthread_should_stop()) {
-+ err = link_state_update(priv);
-+ if (unlikely(err))
-+ return err;
-+
-+ msleep(DPAA2_ETH_LINK_STATE_REFRESH);
-+ }
++ /* Retry while portal is busy */
++ do {
++ err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
++ ch->store);
++ dequeues++;
++ cpu_relax();
++ } while (err == -EBUSY);
+
-+ return 0;
-+}
++ ch->stats.dequeue_portal_busy += dequeues;
++ if (unlikely(err))
++ ch->stats.pull_err++;
+
-+static irqreturn_t dpni_irq0_handler(int irq_num, void *arg)
-+{
-+ return IRQ_WAKE_THREAD;
++ return err;
+}
+
-+static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
++/* NAPI poll routine
++ *
++ * Frames are dequeued from the QMan channel associated with this NAPI context.
++ * Rx and (if configured) Rx error frames count towards the NAPI budget. Tx
++ * confirmation frames are limited by a threshold per NAPI poll cycle.
++ */
++static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
+{
-+ u32 status = 0, clear = 0;
-+ struct device *dev = (struct device *)arg;
-+ struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
-+ struct net_device *net_dev = dev_get_drvdata(dev);
++ struct dpaa2_eth_channel *ch;
++ int rx_cleaned = 0, tx_conf_cleaned = 0;
++ bool store_cleaned;
++ struct dpaa2_eth_priv *priv;
+ int err;
+
-+ err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
-+ DPNI_IRQ_INDEX, &status);
-+ if (unlikely(err)) {
-+ netdev_err(net_dev, "Can't get irq status (err %d)", err);
-+ clear = 0xffffffff;
-+ goto out;
-+ }
-+
-+ if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
-+ clear |= DPNI_IRQ_EVENT_LINK_CHANGED;
-+ link_state_update(netdev_priv(net_dev));
-+ }
-+
-+out:
-+ dpni_clear_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
-+ DPNI_IRQ_INDEX, clear);
-+ return IRQ_HANDLED;
-+}
-+
-+static int setup_irqs(struct fsl_mc_device *ls_dev)
-+{
-+ int err = 0;
-+ struct fsl_mc_device_irq *irq;
-+
-+ err = fsl_mc_allocate_irqs(ls_dev);
-+ if (err) {
-+ dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
-+ return err;
-+ }
++ ch = container_of(napi, struct dpaa2_eth_channel, napi);
++ priv = ch->priv;
+
-+ irq = ls_dev->irqs[0];
-+ err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
-+ dpni_irq0_handler,
-+ dpni_irq0_handler_thread,
-+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
-+ dev_name(&ls_dev->dev), &ls_dev->dev);
-+ if (err < 0) {
-+ dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d", err);
-+ goto free_mc_irq;
-+ }
++ do {
++ err = pull_channel(ch);
++ if (unlikely(err))
++ break;
+
-+ err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
-+ DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED);
-+ if (err < 0) {
-+ dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d", err);
-+ goto free_irq;
-+ }
++ /* Refill pool if appropriate */
++ refill_pool(priv, ch, priv->bpid);
+
-+ err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
-+ DPNI_IRQ_INDEX, 1);
-+ if (err < 0) {
-+ dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d", err);
-+ goto free_irq;
-+ }
++ store_cleaned = consume_frames(ch, &rx_cleaned,
++ &tx_conf_cleaned);
+
-+ return 0;
++ /* If we've either consumed the budget with Rx frames,
++ * or reached the Tx conf threshold, we're done.
++ */
++ if (rx_cleaned >= budget ||
++ tx_conf_cleaned >= TX_CONF_PER_NAPI_POLL)
++ return budget;
++ } while (store_cleaned);
+
-+free_irq:
-+ devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
-+free_mc_irq:
-+ fsl_mc_free_irqs(ls_dev);
++ /* We didn't consume the entire budget, finish napi and
++ * re-enable data availability notifications
++ */
++ napi_complete(napi);
++ do {
++ err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
++ cpu_relax();
++ } while (err == -EBUSY);
++ WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
++ ch->nctx.desired_cpu);
+
-+ return err;
++ return max(rx_cleaned, 1);
+}
+
-+static void add_ch_napi(struct dpaa2_eth_priv *priv)
++static void enable_ch_napi(struct dpaa2_eth_priv *priv)
+{
-+ int i;
+ struct dpaa2_eth_channel *ch;
++ int i;
+
+ for (i = 0; i < priv->num_channels; i++) {
+ ch = priv->channel[i];
-+ /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
-+ netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
-+ NAPI_POLL_WEIGHT);
++ napi_enable(&ch->napi);
+ }
+}
+
-+static void del_ch_napi(struct dpaa2_eth_priv *priv)
++static void disable_ch_napi(struct dpaa2_eth_priv *priv)
+{
-+ int i;
+ struct dpaa2_eth_channel *ch;
++ int i;
+
+ for (i = 0; i < priv->num_channels; i++) {
+ ch = priv->channel[i];
-+ netif_napi_del(&ch->napi);
++ napi_disable(&ch->napi);
+ }
+}
+
-+/* SysFS support */
-+static ssize_t dpaa2_eth_show_tx_shaping(struct device *dev,
-+ struct device_attribute *attr,
-+ char *buf)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
-+ /* No MC API for getting the shaping config. We're stateful. */
-+ struct dpni_tx_shaping_cfg *scfg = &priv->shaping_cfg;
-+
-+ return sprintf(buf, "%u %hu\n", scfg->rate_limit, scfg->max_burst_size);
-+}
-+
-+static ssize_t dpaa2_eth_write_tx_shaping(struct device *dev,
-+ struct device_attribute *attr,
-+ const char *buf,
-+ size_t count)
++static int link_state_update(struct dpaa2_eth_priv *priv)
+{
-+ int err, items;
-+ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
-+ struct dpni_tx_shaping_cfg scfg;
++ struct dpni_link_state state;
++ int err;
+
-+ items = sscanf(buf, "%u %hu", &scfg.rate_limit, &scfg.max_burst_size);
-+ if (items != 2) {
-+ pr_err("Expected format: \"rate_limit(Mbps) max_burst_size(bytes)\"\n");
-+ return -EINVAL;
-+ }
-+ /* Size restriction as per MC API documentation */
-+ if (scfg.max_burst_size > DPAA2_ETH_MAX_BURST_SIZE) {
-+ pr_err("max_burst_size must be <= %d\n",
-+ DPAA2_ETH_MAX_BURST_SIZE);
-+ return -EINVAL;
++ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
++ if (unlikely(err)) {
++ netdev_err(priv->net_dev,
++ "dpni_get_link_state() failed\n");
++ return err;
+ }
+
-+ err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &scfg);
-+ if (err) {
-+ dev_err(dev, "dpni_set_tx_shaping() failed\n");
-+ return -EPERM;
-+ }
-+ /* If successful, save the current configuration for future inquiries */
-+ priv->shaping_cfg = scfg;
++ /* Chech link state; speed / duplex changes are not treated yet */
++ if (priv->link_state.up == state.up)
++ return 0;
+
-+ return count;
-+}
++ priv->link_state = state;
++ if (state.up) {
++ netif_carrier_on(priv->net_dev);
++ netif_tx_start_all_queues(priv->net_dev);
++ } else {
++ netif_tx_stop_all_queues(priv->net_dev);
++ netif_carrier_off(priv->net_dev);
++ }
+
-+static ssize_t dpaa2_eth_show_txconf_cpumask(struct device *dev,
-+ struct device_attribute *attr,
-+ char *buf)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
++ netdev_info(priv->net_dev, "Link Event: state %s\n",
++ state.up ? "up" : "down");
+
-+ return cpumap_print_to_pagebuf(1, buf, &priv->txconf_cpumask);
++ return 0;
+}
+
-+static ssize_t dpaa2_eth_write_txconf_cpumask(struct device *dev,
-+ struct device_attribute *attr,
-+ const char *buf,
-+ size_t count)
++static int dpaa2_eth_open(struct net_device *net_dev)
+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
-+ struct dpaa2_eth_fq *fq;
-+ bool running = netif_running(priv->net_dev);
-+ int i, err;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ int err;
+
-+ err = cpulist_parse(buf, &priv->txconf_cpumask);
-+ if (err)
-+ return err;
++ /* We'll only start the txqs when the link is actually ready; make sure
++ * we don't race against the link up notification, which may come
++ * immediately after dpni_enable();
++ */
++ netif_tx_stop_all_queues(net_dev);
+
-+ /* Only accept CPUs that have an affine DPIO */
-+ if (!cpumask_subset(&priv->txconf_cpumask, &priv->dpio_cpumask)) {
-+ netdev_info(priv->net_dev,
-+ "cpumask must be a subset of 0x%lx\n",
-+ *cpumask_bits(&priv->dpio_cpumask));
-+ cpumask_and(&priv->txconf_cpumask, &priv->dpio_cpumask,
-+ &priv->txconf_cpumask);
-+ }
-+
-+ /* Rewiring the TxConf FQs requires interface shutdown.
++ /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
++ * return true and cause 'ip link show' to report the LOWER_UP flag,
++ * even though the link notification wasn't even received.
+ */
-+ if (running) {
-+ err = dpaa2_eth_stop(priv->net_dev);
-+ if (err)
-+ return -ENODEV;
-+ }
++ netif_carrier_off(net_dev);
+
-+ /* Set the new TxConf FQ affinities */
-+ set_fq_affinity(priv);
++ err = seed_pool(priv, priv->bpid);
++ if (err) {
++ /* Not much to do; the buffer pool, though not filled up,
++ * may still contain some buffers which would enable us
++ * to limp on.
++ */
++ netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
++ priv->dpbp_dev->obj_desc.id, priv->bpid);
++ }
+
-+ /* dpaa2_eth_open() below will *stop* the Tx queues until an explicit
-+ * link up notification is received. Give the polling thread enough time
-+ * to detect the link state change, or else we'll end up with the
-+ * transmission side forever shut down.
-+ */
-+ if (priv->do_link_poll)
-+ msleep(2 * DPAA2_ETH_LINK_STATE_REFRESH);
++ priv->refill_thresh = DPAA2_ETH_REFILL_THRESH(priv);
+
-+ for (i = 0; i < priv->num_fqs; i++) {
-+ fq = &priv->fq[i];
-+ if (fq->type != DPAA2_TX_CONF_FQ)
-+ continue;
-+ setup_tx_flow(priv, fq);
++ err = dpni_enable(priv->mc_io, 0, priv->mc_token);
++ if (err < 0) {
++ netdev_err(net_dev, "dpni_enable() failed\n");
++ goto enable_err;
+ }
+
-+ if (running) {
-+ err = dpaa2_eth_open(priv->net_dev);
-+ if (err)
-+ return -ENODEV;
++ /* If the DPMAC object has already processed the link up interrupt,
++ * we have to learn the link state ourselves.
++ */
++ err = link_state_update(priv);
++ if (err < 0) {
++ netdev_err(net_dev, "Can't update link state\n");
++ goto link_state_err;
+ }
+
-+ return count;
-+}
-+
-+static struct device_attribute dpaa2_eth_attrs[] = {
-+ __ATTR(txconf_cpumask,
-+ 0600,
-+ dpaa2_eth_show_txconf_cpumask,
-+ dpaa2_eth_write_txconf_cpumask),
++ return 0;
+
-+ __ATTR(tx_shaping,
-+ 0600,
-+ dpaa2_eth_show_tx_shaping,
-+ dpaa2_eth_write_tx_shaping),
-+};
++link_state_err:
++enable_err:
++ priv->refill_thresh = 0;
++ drain_pool(priv);
++ return err;
++}
+
-+static void dpaa2_eth_sysfs_init(struct device *dev)
++static int dpaa2_eth_stop(struct net_device *net_dev)
+{
-+ int i, err;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ int dpni_enabled;
++ int retries = 10, i;
++ int err = 0;
+
-+ for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++) {
-+ err = device_create_file(dev, &dpaa2_eth_attrs[i]);
-+ if (err) {
-+ dev_err(dev, "ERROR creating sysfs file\n");
-+ goto undo;
-+ }
++ netif_tx_stop_all_queues(net_dev);
++ netif_carrier_off(net_dev);
++
++ /* Loop while dpni_disable() attempts to drain the egress FQs
++ * and confirm them back to us.
++ */
++ do {
++ dpni_disable(priv->mc_io, 0, priv->mc_token);
++ dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
++ if (dpni_enabled)
++ /* Allow the hardware some slack */
++ msleep(100);
++ } while (dpni_enabled && --retries);
++ if (!retries) {
++ netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
++ /* Must go on and finish processing pending frames, so we don't
++ * crash at the next "ifconfig up"
++ */
++ err = -ETIMEDOUT;
+ }
-+ return;
+
-+undo:
-+ while (i > 0)
-+ device_remove_file(dev, &dpaa2_eth_attrs[--i]);
-+}
++ priv->refill_thresh = 0;
+
-+static void dpaa2_eth_sysfs_remove(struct device *dev)
-+{
-+ int i;
++ /* Wait for all running napi poll routines to finish, so that no
++ * new refill operations are started
++ */
++ for (i = 0; i < priv->num_channels; i++)
++ napi_synchronize(&priv->channel[i]->napi);
+
-+ for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++)
-+ device_remove_file(dev, &dpaa2_eth_attrs[i]);
++ /* Empty the buffer pool */
++ drain_pool(priv);
++
++ return err;
+}
+
-+#ifdef CONFIG_FSL_DPAA2_ETH_DCB
-+static int dpaa2_eth_dcbnl_ieee_getpfc(struct net_device *net_dev,
-+ struct ieee_pfc *pfc)
++static int dpaa2_eth_init(struct net_device *net_dev)
+{
++ u64 supported = 0;
++ u64 not_supported = 0;
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ struct dpni_congestion_notification_cfg notification_cfg;
-+ struct dpni_link_state state;
-+ int err, i;
-+
-+ pfc->pfc_cap = dpaa2_eth_tc_count(priv);
-+
-+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
-+ if (err) {
-+ netdev_err(net_dev, "ERROR %d getting link state", err);
-+ return err;
-+ }
++ u32 options = priv->dpni_attrs.options;
+
-+ if (!(state.options & DPNI_LINK_OPT_PFC_PAUSE))
-+ return 0;
++ /* Capabilities listing */
++ supported |= IFF_LIVE_ADDR_CHANGE;
+
-+ priv->pfc.pfc_en = 0;
-+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
-+ err = dpni_get_congestion_notification(priv->mc_io, 0,
-+ priv->mc_token,
-+ DPNI_QUEUE_RX,
-+ i, ¬ification_cfg);
-+ if (err) {
-+ netdev_err(net_dev, "Error %d getting congestion notif",
-+ err);
-+ return err;
-+ }
++ if (options & DPNI_OPT_NO_MAC_FILTER)
++ not_supported |= IFF_UNICAST_FLT;
++ else
++ supported |= IFF_UNICAST_FLT;
+
-+ if (notification_cfg.threshold_entry)
-+ priv->pfc.pfc_en |= 1 << i;
-+ }
++ net_dev->priv_flags |= supported;
++ net_dev->priv_flags &= ~not_supported;
+
-+ pfc->pfc_en = priv->pfc.pfc_en;
-+ pfc->mbc = priv->pfc.mbc;
-+ pfc->delay = priv->pfc.delay;
++ /* Features */
++ net_dev->features = NETIF_F_RXCSUM |
++ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
++ NETIF_F_SG | NETIF_F_HIGHDMA |
++ NETIF_F_LLTX;
++ net_dev->hw_features = net_dev->features;
+
+ return 0;
+}
+
-+/* Configure ingress classification based on VLAN PCP */
-+static int set_vlan_qos(struct dpaa2_eth_priv *priv)
++static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
+{
-+ struct device *dev = priv->net_dev->dev.parent;
-+ struct dpkg_profile_cfg kg_cfg = {0};
-+ struct dpni_qos_tbl_cfg qos_cfg = {0};
-+ struct dpni_rule_cfg key_params;
-+ u8 *params_iova;
-+ __be16 key, mask = cpu_to_be16(VLAN_PRIO_MASK);
-+ int err = 0, i, j = 0;
-+
-+ if (priv->vlan_clsf_set)
-+ return 0;
-+
-+ params_iova = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
-+ if (!params_iova)
-+ return -ENOMEM;
-+
-+ kg_cfg.num_extracts = 1;
-+ kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
-+ kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN;
-+ kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
-+ kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI;
-+
-+ err = dpni_prepare_key_cfg(&kg_cfg, params_iova);
-+ if (err) {
-+ dev_err(dev, "dpkg_prepare_key_cfg failed: %d\n", err);
-+ goto out_free;
-+ }
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct device *dev = net_dev->dev.parent;
++ int err;
+
-+ /* Set QoS table */
-+ qos_cfg.default_tc = 0;
-+ qos_cfg.discard_on_miss = 0;
-+ qos_cfg.key_cfg_iova = dma_map_single(dev, params_iova,
-+ DPAA2_CLASSIFIER_DMA_SIZE,
-+ DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) {
-+ dev_err(dev, "%s: DMA mapping failed\n", __func__);
-+ err = -ENOMEM;
-+ goto out_free;
++ err = eth_mac_addr(net_dev, addr);
++ if (err < 0) {
++ dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
++ return err;
+ }
-+ err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg);
-+ dma_unmap_single(dev, qos_cfg.key_cfg_iova,
-+ DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
+
++ err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
++ net_dev->dev_addr);
+ if (err) {
-+ dev_err(dev, "dpni_set_qos_table failed: %d\n", err);
-+ goto out_free;
++ dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
++ return err;
+ }
+
-+ key_params.key_size = sizeof(key);
++ return 0;
++}
+
-+ if (dpaa2_eth_fs_mask_enabled(priv)) {
-+ key_params.mask_iova = dma_map_single(dev, &mask, sizeof(mask),
-+ DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, key_params.mask_iova)) {
-+ dev_err(dev, "DMA mapping failed %s\n", __func__);
-+ err = -ENOMEM;
-+ goto out_free;
-+ }
-+ } else {
-+ key_params.mask_iova = 0;
-+ }
++/** Fill in counters maintained by the GPP driver. These may be different from
++ * the hardware counters obtained by ethtool.
++ */
++static struct rtnl_link_stats64 *dpaa2_eth_get_stats(struct net_device *net_dev,
++ struct rtnl_link_stats64 *stats)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct rtnl_link_stats64 *percpu_stats;
++ u64 *cpustats;
++ u64 *netstats = (u64 *)stats;
++ int i, j;
++ int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
+
-+ key_params.key_iova = dma_map_single(dev, &key, sizeof(key),
-+ DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, key_params.key_iova)) {
-+ dev_err(dev, "%s: DMA mapping failed\n", __func__);
-+ err = -ENOMEM;
-+ goto out_unmap_mask;
++ for_each_possible_cpu(i) {
++ percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
++ cpustats = (u64 *)percpu_stats;
++ for (j = 0; j < num; j++)
++ netstats[j] += cpustats[j];
+ }
++ return stats;
++}
+
-+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
-+ key = cpu_to_be16(i << VLAN_PRIO_SHIFT);
-+ dma_sync_single_for_device(dev, key_params.key_iova,
-+ sizeof(key), DMA_TO_DEVICE);
++/* Copy mac unicast addresses from @net_dev to @priv.
++ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
++ */
++static void add_uc_hw_addr(const struct net_device *net_dev,
++ struct dpaa2_eth_priv *priv)
++{
++ struct netdev_hw_addr *ha;
++ int err;
+
-+ err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token,
-+ &key_params, i, j++);
-+ if (err) {
-+ dev_err(dev, "dpni_add_qos_entry failed: %d\n", err);
-+ goto out_unmap;
-+ }
++ netdev_for_each_uc_addr(ha, net_dev) {
++ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
++ ha->addr);
++ if (err)
++ netdev_warn(priv->net_dev,
++ "Could not add ucast MAC %pM to the filtering table (err %d)\n",
++ ha->addr, err);
+ }
++}
+
-+ priv->vlan_clsf_set = true;
++/* Copy mac multicast addresses from @net_dev to @priv
++ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
++ */
++static void add_mc_hw_addr(const struct net_device *net_dev,
++ struct dpaa2_eth_priv *priv)
++{
++ struct netdev_hw_addr *ha;
++ int err;
+
-+out_unmap:
-+ dma_unmap_single(dev, key_params.key_iova, sizeof(key), DMA_TO_DEVICE);
-+out_unmap_mask:
-+ if (key_params.mask_iova)
-+ dma_unmap_single(dev, key_params.mask_iova, sizeof(mask),
-+ DMA_TO_DEVICE);
-+out_free:
-+ kfree(params_iova);
-+ return err;
++ netdev_for_each_mc_addr(ha, net_dev) {
++ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
++ ha->addr);
++ if (err)
++ netdev_warn(priv->net_dev,
++ "Could not add mcast MAC %pM to the filtering table (err %d)\n",
++ ha->addr, err);
++ }
+}
+
-+static int dpaa2_eth_dcbnl_ieee_setpfc(struct net_device *net_dev,
-+ struct ieee_pfc *pfc)
++static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ struct dpni_congestion_notification_cfg notification_cfg = {0};
-+ struct dpni_link_state state = {0};
-+ struct dpni_link_cfg cfg = {0};
-+ int err = 0, i;
-+
-+ if (priv->pfc.pfc_en == pfc->pfc_en)
-+ /* Same enabled mask, nothing to be done */
-+ return 0;
++ int uc_count = netdev_uc_count(net_dev);
++ int mc_count = netdev_mc_count(net_dev);
++ u8 max_mac = priv->dpni_attrs.mac_filter_entries;
++ u32 options = priv->dpni_attrs.options;
++ u16 mc_token = priv->mc_token;
++ struct fsl_mc_io *mc_io = priv->mc_io;
++ int err;
+
-+ err = set_vlan_qos(priv);
-+ if (err)
-+ return err;
++ /* Basic sanity checks; these probably indicate a misconfiguration */
++ if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
++ netdev_info(net_dev,
++ "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
++ max_mac);
+
-+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
-+ if (err) {
-+ netdev_err(net_dev, "ERROR %d getting link state", err);
-+ return err;
++ /* Force promiscuous if the uc or mc counts exceed our capabilities. */
++ if (uc_count > max_mac) {
++ netdev_info(net_dev,
++ "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
++ uc_count, max_mac);
++ goto force_promisc;
++ }
++ if (mc_count + uc_count > max_mac) {
++ netdev_info(net_dev,
++ "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
++ uc_count + mc_count, max_mac);
++ goto force_mc_promisc;
+ }
+
-+ cfg.rate = state.rate;
-+ cfg.options = state.options;
-+ if (pfc->pfc_en)
-+ cfg.options |= DPNI_LINK_OPT_PFC_PAUSE;
-+ else
-+ cfg.options &= ~DPNI_LINK_OPT_PFC_PAUSE;
++ /* Adjust promisc settings due to flag combinations */
++ if (net_dev->flags & IFF_PROMISC)
++ goto force_promisc;
++ if (net_dev->flags & IFF_ALLMULTI) {
++ /* First, rebuild unicast filtering table. This should be done
++ * in promisc mode, in order to avoid frame loss while we
++ * progressively add entries to the table.
++ * We don't know whether we had been in promisc already, and
++ * making an MC call to find out is expensive; so set uc promisc
++ * nonetheless.
++ */
++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
++ if (err)
++ netdev_warn(net_dev, "Can't set uc promisc\n");
+
-+ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
-+ if (err) {
-+ netdev_err(net_dev, "ERROR %d setting link cfg", err);
-+ return err;
-+ }
++ /* Actual uc table reconstruction. */
++ err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
++ if (err)
++ netdev_warn(net_dev, "Can't clear uc filters\n");
++ add_uc_hw_addr(net_dev, priv);
+
-+ memcpy(&priv->pfc, pfc, sizeof(priv->pfc));
++ /* Finally, clear uc promisc and set mc promisc as requested. */
++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
++ if (err)
++ netdev_warn(net_dev, "Can't clear uc promisc\n");
++ goto force_mc_promisc;
++ }
+
-+ err = set_rx_taildrop(priv);
++ /* Neither unicast, nor multicast promisc will be on... eventually.
++ * For now, rebuild mac filtering tables while forcing both of them on.
++ */
++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
+ if (err)
-+ return err;
++ netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
++ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
++ if (err)
++ netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
+
-+ /* configure congestion notifications */
-+ notification_cfg.notification_mode = DPNI_CONG_OPT_FLOW_CONTROL;
-+ notification_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
-+ notification_cfg.message_iova = 0ULL;
-+ notification_cfg.message_ctx = 0ULL;
++ /* Actual mac filtering tables reconstruction */
++ err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
++ if (err)
++ netdev_warn(net_dev, "Can't clear mac filters\n");
++ add_mc_hw_addr(net_dev, priv);
++ add_uc_hw_addr(net_dev, priv);
+
-+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
-+ if (dpaa2_eth_is_pfc_enabled(priv, i)) {
-+ notification_cfg.threshold_entry = NAPI_POLL_WEIGHT;
-+ notification_cfg.threshold_exit = NAPI_POLL_WEIGHT / 2;
-+ } else {
-+ notification_cfg.threshold_entry = 0;
-+ notification_cfg.threshold_exit = 0;
-+ }
++ /* Now we can clear both ucast and mcast promisc, without risking
++ * to drop legitimate frames anymore.
++ */
++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
++ if (err)
++ netdev_warn(net_dev, "Can't clear ucast promisc\n");
++ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
++ if (err)
++ netdev_warn(net_dev, "Can't clear mcast promisc\n");
+
-+ err = dpni_set_congestion_notification(priv->mc_io, 0,
-+ priv->mc_token,
-+ DPNI_QUEUE_RX,
-+ i, ¬ification_cfg);
-+ if (err) {
-+ netdev_err(net_dev, "Error %d setting congestion notif",
-+ err);
-+ return err;
-+ }
-+ }
++ return;
+
-+ return 0;
++force_promisc:
++ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
++ if (err)
++ netdev_warn(net_dev, "Can't set ucast promisc\n");
++force_mc_promisc:
++ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
++ if (err)
++ netdev_warn(net_dev, "Can't set mcast promisc\n");
+}
+
-+static u8 dpaa2_eth_dcbnl_getdcbx(struct net_device *net_dev)
++static int dpaa2_eth_set_features(struct net_device *net_dev,
++ netdev_features_t features)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ netdev_features_t changed = features ^ net_dev->features;
++ bool enable;
++ int err;
+
-+ return priv->dcbx_mode;
-+}
++ if (changed & NETIF_F_RXCSUM) {
++ enable = !!(features & NETIF_F_RXCSUM);
++ err = set_rx_csum(priv, enable);
++ if (err)
++ return err;
++ }
+
-+static u8 dpaa2_eth_dcbnl_setdcbx(struct net_device *net_dev, u8 mode)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
++ enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
++ err = set_tx_csum(priv, enable);
++ if (err)
++ return err;
++ }
+
-+ priv->dcbx_mode = mode;
+ return 0;
+}
+
-+static u8 dpaa2_eth_dcbnl_getcap(struct net_device *net_dev, int capid, u8 *cap)
++static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct dpaa2_eth_priv *priv = netdev_priv(dev);
++ struct hwtstamp_config config;
+
-+ switch (capid) {
-+ case DCB_CAP_ATTR_PFC:
-+ *cap = true;
-+ break;
-+ case DCB_CAP_ATTR_PFC_TCS:
-+ *cap = 1 << dpaa2_eth_tc_count(priv);
++ if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
++ return -EFAULT;
++
++ switch (config.tx_type) {
++ case HWTSTAMP_TX_OFF:
++ priv->ts_tx_en = false;
+ break;
-+ case DCB_CAP_ATTR_DCBX:
-+ *cap = priv->dcbx_mode;
++ case HWTSTAMP_TX_ON:
++ priv->ts_tx_en = true;
+ break;
+ default:
-+ *cap = false;
-+ break;
++ return -ERANGE;
+ }
+
-+ return 0;
++ if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
++ priv->ts_rx_en = false;
++ } else {
++ priv->ts_rx_en = true;
++ /* TS is set for all frame types, not only those requested */
++ config.rx_filter = HWTSTAMP_FILTER_ALL;
++ }
++
++ return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
++ -EFAULT : 0;
+}
+
-+const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops = {
-+ .ieee_getpfc = dpaa2_eth_dcbnl_ieee_getpfc,
-+ .ieee_setpfc = dpaa2_eth_dcbnl_ieee_setpfc,
-+ .getdcbx = dpaa2_eth_dcbnl_getdcbx,
-+ .setdcbx = dpaa2_eth_dcbnl_setdcbx,
-+ .getcap = dpaa2_eth_dcbnl_getcap,
-+};
-+#endif
++static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
++{
++ if (cmd == SIOCSHWTSTAMP)
++ return dpaa2_eth_ts_ioctl(dev, rq, cmd);
+
-+static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
++ return -EINVAL;
++}
++
++static int set_buffer_layout(struct dpaa2_eth_priv *priv)
+{
-+ struct device *dev;
-+ struct net_device *net_dev = NULL;
-+ struct dpaa2_eth_priv *priv = NULL;
-+ int err = 0;
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpni_buffer_layout buf_layout = {0};
++ int err;
+
-+ dev = &dpni_dev->dev;
++ /* We need to check for WRIOP version 1.0.0, but depending on the MC
++ * version, this number is not always provided correctly on rev1.
++ * We need to check for both alternatives in this situation.
++ */
++ if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
++ priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
++ priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
++ else
++ priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
+
-+ /* Net device */
-+ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES);
-+ if (!net_dev) {
-+ dev_err(dev, "alloc_etherdev_mq() failed\n");
-+ return -ENOMEM;
++ /* tx buffer */
++ buf_layout.pass_timestamp = true;
++ buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
++ buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
++ DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
++ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_TX, &buf_layout);
++ if (err) {
++ dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
++ return err;
+ }
+
-+ SET_NETDEV_DEV(net_dev, dev);
-+ dev_set_drvdata(dev, net_dev);
-+
-+ priv = netdev_priv(net_dev);
-+ priv->net_dev = net_dev;
-+
-+ priv->iommu_domain = iommu_get_domain_for_dev(dev);
++ /* tx-confirm buffer */
++ buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
++ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_TX_CONFIRM, &buf_layout);
++ if (err) {
++ dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
++ return err;
++ }
+
-+ /* Obtain a MC portal */
-+ err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
-+ &priv->mc_io);
++ /* Now that we've set our tx buffer layout, retrieve the minimum
++ * required tx data offset.
++ */
++ err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
++ &priv->tx_data_offset);
+ if (err) {
-+ dev_dbg(dev, "MC portal allocation failed\n");
-+ err = -EPROBE_DEFER;
-+ goto err_portal_alloc;
++ dev_err(dev, "dpni_get_tx_data_offset() failed\n");
++ return err;
+ }
+
-+ /* MC objects initialization and configuration */
-+ err = setup_dpni(dpni_dev);
-+ if (err)
-+ goto err_dpni_setup;
++ if ((priv->tx_data_offset % 64) != 0)
++ dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
++ priv->tx_data_offset);
+
-+ err = setup_dpio(priv);
++ /* rx buffer */
++ buf_layout.pass_frame_status = true;
++ buf_layout.pass_parser_result = true;
++ buf_layout.data_align = priv->rx_buf_align;
++ buf_layout.private_data_size = 0;
++ buf_layout.data_head_room = dpaa2_eth_rx_headroom(priv);
++ buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
++ DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
++ DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
++ DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
++ DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
++ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_RX, &buf_layout);
+ if (err) {
-+ dev_info(dev, "Defer probing as no DPIO available\n");
-+ err = -EPROBE_DEFER;
-+ goto err_dpio_setup;
++ dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
++ return err;
+ }
+
-+ setup_fqs(priv);
++ return 0;
++}
+
-+ err = setup_dpbp(priv);
-+ if (err)
-+ goto err_dpbp_setup;
++static int dpaa2_eth_set_xdp(struct net_device *net_dev, struct bpf_prog *prog)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct dpaa2_eth_channel *ch;
++ struct bpf_prog *old_prog;
++ int i, err;
+
-+ err = bind_dpni(priv);
-+ if (err)
-+ goto err_bind;
++ /* No support for SG frames */
++ if (DPAA2_ETH_L2_MAX_FRM(net_dev->mtu) > DPAA2_ETH_RX_BUF_SIZE)
++ return -EINVAL;
+
-+ /* Percpu statistics */
-+ priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
-+ if (!priv->percpu_stats) {
-+ dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
-+ err = -ENOMEM;
-+ goto err_alloc_percpu_stats;
-+ }
-+ priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
-+ if (!priv->percpu_extras) {
-+ dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
-+ err = -ENOMEM;
-+ goto err_alloc_percpu_extras;
++ if (netif_running(net_dev)) {
++ err = dpaa2_eth_stop(net_dev);
++ if (err)
++ return err;
+ }
+
-+ snprintf(net_dev->name, IFNAMSIZ, "ni%d", dpni_dev->obj_desc.id);
-+ if (!dev_valid_name(net_dev->name)) {
-+ dev_warn(&net_dev->dev,
-+ "netdevice name \"%s\" cannot be used, reverting to default..\n",
-+ net_dev->name);
-+ dev_alloc_name(net_dev, "eth%d");
-+ dev_warn(&net_dev->dev, "using name \"%s\"\n", net_dev->name);
++ if (prog) {
++ prog = bpf_prog_add(prog, priv->num_channels - 1);
++ if (IS_ERR(prog))
++ return PTR_ERR(prog);
+ }
+
-+ err = netdev_init(net_dev);
-+ if (err)
-+ goto err_netdev_init;
-+
-+ /* Configure checksum offload based on current interface flags */
-+ err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
-+ if (err)
-+ goto err_csum;
++ priv->has_xdp_prog = !!prog;
+
-+ err = set_tx_csum(priv, !!(net_dev->features &
-+ (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
-+ if (err)
-+ goto err_csum;
++ for (i = 0; i < priv->num_channels; i++) {
++ ch = priv->channel[i];
++ old_prog = xchg(&ch->xdp_prog, prog);
++ if (old_prog)
++ bpf_prog_put(old_prog);
++ }
+
-+ err = alloc_rings(priv);
-+ if (err)
-+ goto err_alloc_rings;
++ if (netif_running(net_dev)) {
++ err = dpaa2_eth_open(net_dev);
++ if (err)
++ return err;
++ }
+
-+ net_dev->ethtool_ops = &dpaa2_ethtool_ops;
-+#ifdef CONFIG_FSL_DPAA2_ETH_DCB
-+ net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops;
-+ priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
-+#endif
++ return 0;
++}
+
-+ /* Add a NAPI context for each channel */
-+ add_ch_napi(priv);
-+ enable_ch_napi(priv);
++static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_xdp *xdp)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+
-+ err = setup_irqs(dpni_dev);
-+ if (err) {
-+ netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
-+ priv->poll_thread = kthread_run(poll_link_state, priv,
-+ "%s_poll_link", net_dev->name);
-+ if (IS_ERR(priv->poll_thread)) {
-+ netdev_err(net_dev, "Error starting polling thread\n");
-+ goto err_poll_thread;
-+ }
-+ priv->do_link_poll = true;
++ switch (xdp->command) {
++ case XDP_SETUP_PROG:
++ return dpaa2_eth_set_xdp(dev, xdp->prog);
++ case XDP_QUERY_PROG:
++ xdp->prog_attached = priv->has_xdp_prog;
++ return 0;
++ default:
++ return -EINVAL;
+ }
++}
+
-+ dpaa2_eth_sysfs_init(&net_dev->dev);
-+#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
-+ dpaa2_dbg_add(priv);
-+#endif
++static const struct net_device_ops dpaa2_eth_ops = {
++ .ndo_open = dpaa2_eth_open,
++ .ndo_start_xmit = dpaa2_eth_tx,
++ .ndo_stop = dpaa2_eth_stop,
++ .ndo_init = dpaa2_eth_init,
++ .ndo_set_mac_address = dpaa2_eth_set_addr,
++ .ndo_get_stats64 = dpaa2_eth_get_stats,
++ .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
++ .ndo_set_features = dpaa2_eth_set_features,
++ .ndo_do_ioctl = dpaa2_eth_ioctl,
++ .ndo_xdp = dpaa2_eth_xdp,
++};
+
-+ dev_info(dev, "Probed interface %s\n", net_dev->name);
-+ return 0;
++static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
++{
++ struct dpaa2_eth_channel *ch;
+
-+err_poll_thread:
-+ free_rings(priv);
-+err_alloc_rings:
-+err_csum:
-+ unregister_netdev(net_dev);
-+err_netdev_init:
-+ free_percpu(priv->percpu_extras);
-+err_alloc_percpu_extras:
-+ free_percpu(priv->percpu_stats);
-+err_alloc_percpu_stats:
-+ disable_ch_napi(priv);
-+ del_ch_napi(priv);
-+err_bind:
-+ free_dpbp(priv);
-+err_dpbp_setup:
-+ free_dpio(priv);
-+err_dpio_setup:
-+ free_dpni(priv);
-+err_dpni_setup:
-+ fsl_mc_portal_free(priv->mc_io);
-+err_portal_alloc:
-+ dev_set_drvdata(dev, NULL);
-+ free_netdev(net_dev);
++ ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
+
-+ return err;
++ /* Update NAPI statistics */
++ ch->stats.cdan++;
++
++ napi_schedule_irqoff(&ch->napi);
+}
+
-+static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
++/* Allocate and configure a DPCON object */
++static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
+{
-+ struct device *dev;
-+ struct net_device *net_dev;
-+ struct dpaa2_eth_priv *priv;
-+
-+ dev = &ls_dev->dev;
-+ net_dev = dev_get_drvdata(dev);
-+ priv = netdev_priv(net_dev);
++ struct fsl_mc_device *dpcon;
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpcon_attr attrs;
++ int err;
+
-+#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
-+ dpaa2_dbg_remove(priv);
-+#endif
-+ dpaa2_eth_sysfs_remove(&net_dev->dev);
++ err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
++ FSL_MC_POOL_DPCON, &dpcon);
++ if (err) {
++ dev_info(dev, "Not enough DPCONs, will go on as-is\n");
++ return NULL;
++ }
+
-+ disable_ch_napi(priv);
-+ del_ch_napi(priv);
++ err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
++ if (err) {
++ dev_err(dev, "dpcon_open() failed\n");
++ goto free;
++ }
+
-+ unregister_netdev(net_dev);
-+ dev_info(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
++ err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
++ if (err) {
++ dev_err(dev, "dpcon_reset() failed\n");
++ goto close;
++ }
+
-+ if (priv->do_link_poll)
-+ kthread_stop(priv->poll_thread);
-+ else
-+ fsl_mc_free_irqs(ls_dev);
++ err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
++ if (err) {
++ dev_err(dev, "dpcon_get_attributes() failed\n");
++ goto close;
++ }
+
-+ free_rings(priv);
-+ free_percpu(priv->percpu_stats);
-+ free_percpu(priv->percpu_extras);
-+ free_dpbp(priv);
-+ free_dpio(priv);
-+ free_dpni(priv);
++ err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
++ if (err) {
++ dev_err(dev, "dpcon_enable() failed\n");
++ goto close;
++ }
+
-+ fsl_mc_portal_free(priv->mc_io);
++ return dpcon;
+
-+ dev_set_drvdata(dev, NULL);
-+ free_netdev(net_dev);
++close:
++ dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
++free:
++ fsl_mc_object_free(dpcon);
+
-+ return 0;
++ return NULL;
+}
+
-+static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
-+ {
-+ .vendor = FSL_MC_VENDOR_FREESCALE,
-+ .obj_type = "dpni",
-+ },
-+ { .vendor = 0x0 }
-+};
-+MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
-+
-+static struct fsl_mc_driver dpaa2_eth_driver = {
-+ .driver = {
-+ .name = KBUILD_MODNAME,
-+ .owner = THIS_MODULE,
-+ },
-+ .probe = dpaa2_eth_probe,
-+ .remove = dpaa2_eth_remove,
-+ .match_id_table = dpaa2_eth_match_id_table
-+};
-+
-+static int __init dpaa2_eth_driver_init(void)
++static void free_dpcon(struct dpaa2_eth_priv *priv,
++ struct fsl_mc_device *dpcon)
+{
-+ int err;
++ dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
++ dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
++ fsl_mc_object_free(dpcon);
++}
+
-+ dpaa2_eth_dbg_init();
-+ err = fsl_mc_driver_register(&dpaa2_eth_driver);
++static struct dpaa2_eth_channel *
++alloc_channel(struct dpaa2_eth_priv *priv)
++{
++ struct dpaa2_eth_channel *channel;
++ struct dpcon_attr attr;
++ struct device *dev = priv->net_dev->dev.parent;
++ int err;
++
++ channel = kzalloc(sizeof(*channel), GFP_KERNEL);
++ if (!channel)
++ return NULL;
++
++ channel->dpcon = setup_dpcon(priv);
++ if (!channel->dpcon)
++ goto err_setup;
++
++ err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
++ &attr);
+ if (err) {
-+ dpaa2_eth_dbg_exit();
-+ return err;
++ dev_err(dev, "dpcon_get_attributes() failed\n");
++ goto err_get_attr;
+ }
+
-+ return 0;
-+}
++ channel->dpcon_id = attr.id;
++ channel->ch_id = attr.qbman_ch_id;
++ channel->priv = priv;
+
-+static void __exit dpaa2_eth_driver_exit(void)
-+{
-+ dpaa2_eth_dbg_exit();
-+ fsl_mc_driver_unregister(&dpaa2_eth_driver);
-+}
++ return channel;
+
-+module_init(dpaa2_eth_driver_init);
-+module_exit(dpaa2_eth_driver_exit);
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
-@@ -0,0 +1,499 @@
-+/* Copyright 2014-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+ */
++err_get_attr:
++ free_dpcon(priv, channel->dpcon);
++err_setup:
++ kfree(channel);
++ return NULL;
++}
+
-+#ifndef __DPAA2_ETH_H
-+#define __DPAA2_ETH_H
++static void free_channel(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_channel *channel)
++{
++ struct bpf_prog *prog;
+
-+#include <linux/atomic.h>
-+#include <linux/dcbnl.h>
-+#include <linux/netdevice.h>
-+#include <linux/if_vlan.h>
-+#include "../../fsl-mc/include/dpaa2-io.h"
-+#include "dpni.h"
-+#include "net.h"
++ free_dpcon(priv, channel->dpcon);
+
-+#include "dpaa2-eth-debugfs.h"
++ prog = READ_ONCE(channel->xdp_prog);
++ if (prog)
++ bpf_prog_put(prog);
+
-+#define DPAA2_ETH_STORE_SIZE 16
++ kfree(channel);
++}
+
-+/* We set a max threshold for how many Tx confirmations we should process
-+ * on a NAPI poll call, they take less processing time.
++/* DPIO setup: allocate and configure QBMan channels, setup core affinity
++ * and register data availability notifications
+ */
-+#define TX_CONF_PER_NAPI_POLL 256
++static int setup_dpio(struct dpaa2_eth_priv *priv)
++{
++ struct dpaa2_io_notification_ctx *nctx;
++ struct dpaa2_eth_channel *channel;
++ struct dpcon_notification_cfg dpcon_notif_cfg;
++ struct device *dev = priv->net_dev->dev.parent;
++ int i, err;
+
-+/* Maximum number of scatter-gather entries in an ingress frame,
-+ * considering the maximum receive frame size is 64K
-+ */
-+#define DPAA2_ETH_MAX_SG_ENTRIES ((64 * 1024) / DPAA2_ETH_RX_BUF_SIZE)
++ /* We want the ability to spread ingress traffic (RX, TX conf) to as
++ * many cores as possible, so we need one channel for each core
++ * (unless there's fewer queues than cores, in which case the extra
++ * channels would be wasted).
++ * Allocate one channel per core and register it to the core's
++ * affine DPIO. If not enough channels are available for all cores
++ * or if some cores don't have an affine DPIO, there will be no
++ * ingress frame processing on those cores.
++ */
++ cpumask_clear(&priv->dpio_cpumask);
++ for_each_online_cpu(i) {
++ /* Try to allocate a channel */
++ channel = alloc_channel(priv);
++ if (!channel) {
++ dev_info(dev,
++ "No affine channel for cpu %d and above\n", i);
++ err = -ENODEV;
++ goto err_alloc_ch;
++ }
+
-+/* Maximum acceptable MTU value. It is in direct relation with the hardware
-+ * enforced Max Frame Length (currently 10k).
-+ */
-+#define DPAA2_ETH_MFL (10 * 1024)
-+#define DPAA2_ETH_MAX_MTU (DPAA2_ETH_MFL - VLAN_ETH_HLEN)
-+/* Convert L3 MTU to L2 MFL */
-+#define DPAA2_ETH_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN)
++ priv->channel[priv->num_channels] = channel;
+
-+/* Maximum burst size value for Tx shaping */
-+#define DPAA2_ETH_MAX_BURST_SIZE 0xF7FF
++ nctx = &channel->nctx;
++ nctx->is_cdan = 1;
++ nctx->cb = cdan_cb;
++ nctx->id = channel->ch_id;
++ nctx->desired_cpu = i;
+
-+/* Maximum number of buffers that can be acquired/released through a single
-+ * QBMan command
-+ */
-+#define DPAA2_ETH_BUFS_PER_CMD 7
++ /* Register the new context */
++ channel->dpio = dpaa2_io_service_select(i);
++ err = dpaa2_io_service_register(channel->dpio, nctx);
++ if (err) {
++ dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
++ /* If no affine DPIO for this core, there's probably
++ * none available for next cores either. Signal we want
++ * to retry later, in case the DPIO devices weren't
++ * probed yet.
++ */
++ err = -EPROBE_DEFER;
++ goto err_service_reg;
++ }
+
-+/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo
-+ * frames in the Rx queues (length of the current frame is not
-+ * taken into account when making the taildrop decision)
-+ */
-+#define DPAA2_ETH_TAILDROP_THRESH (64 * 1024)
++ /* Register DPCON notification with MC */
++ dpcon_notif_cfg.dpio_id = nctx->dpio_id;
++ dpcon_notif_cfg.priority = 0;
++ dpcon_notif_cfg.user_ctx = nctx->qman64;
++ err = dpcon_set_notification(priv->mc_io, 0,
++ channel->dpcon->mc_handle,
++ &dpcon_notif_cfg);
++ if (err) {
++ dev_err(dev, "dpcon_set_notification failed()\n");
++ goto err_set_cdan;
++ }
+
-+/* Buffer quota per queue. Must be large enough such that for minimum sized
-+ * frames taildrop kicks in before the bpool gets depleted, so we compute
-+ * how many 64B frames fit inside the taildrop threshold and add a margin
-+ * to accommodate the buffer refill delay.
-+ */
-+#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64)
-+#define DPAA2_ETH_NUM_BUFS_TD (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
-+#define DPAA2_ETH_REFILL_THRESH_TD \
-+ (DPAA2_ETH_NUM_BUFS_TD - DPAA2_ETH_BUFS_PER_CMD)
++ /* If we managed to allocate a channel and also found an affine
++ * DPIO for this core, add it to the final mask
++ */
++ cpumask_set_cpu(i, &priv->dpio_cpumask);
++ priv->num_channels++;
+
-+/* Buffer quota per queue to use when flow control is active. */
-+#define DPAA2_ETH_NUM_BUFS_FC 256
++ /* Stop if we already have enough channels to accommodate all
++ * RX and TX conf queues
++ */
++ if (priv->num_channels == dpaa2_eth_queue_count(priv))
++ break;
++ }
+
-+/* Hardware requires alignment for ingress/egress buffer addresses
-+ * and ingress buffer lengths.
-+ */
-+#define DPAA2_ETH_RX_BUF_SIZE 2048
-+#define DPAA2_ETH_TX_BUF_ALIGN 64
-+#define DPAA2_ETH_RX_BUF_ALIGN 64
-+#define DPAA2_ETH_RX_BUF_ALIGN_V1 256
-+#define DPAA2_ETH_NEEDED_HEADROOM(p_priv) \
-+ ((p_priv)->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN - HH_DATA_MOD)
++ return 0;
+
-+/* rx_extra_head prevents reallocations in L3 processing. */
-+#define DPAA2_ETH_SKB_SIZE \
-+ (DPAA2_ETH_RX_BUF_SIZE + \
-+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
++err_set_cdan:
++ dpaa2_io_service_deregister(channel->dpio, nctx);
++err_service_reg:
++ free_channel(priv, channel);
++err_alloc_ch:
++ if (cpumask_empty(&priv->dpio_cpumask)) {
++ dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
++ return err;
++ }
+
-+/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but we need to allocate ingress
-+ * buffers large enough to allow building an skb around them and also account
-+ * for alignment restrictions.
-+ */
-+#define DPAA2_ETH_BUF_RAW_SIZE(p_priv) \
-+ (DPAA2_ETH_SKB_SIZE + \
-+ (p_priv)->rx_buf_align)
++ dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
++ cpumask_pr_args(&priv->dpio_cpumask));
+
-+/* PTP nominal frequency 1GHz */
-+#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1
++ return 0;
++}
+
-+/* We are accommodating a skb backpointer and some S/G info
-+ * in the frame's software annotation. The hardware
-+ * options are either 0 or 64, so we choose the latter.
-+ */
-+#define DPAA2_ETH_SWA_SIZE 64
++static void free_dpio(struct dpaa2_eth_priv *priv)
++{
++ int i;
++ struct dpaa2_eth_channel *ch;
+
-+/* Extra headroom space requested to hardware, in order to make sure there's
-+ * no realloc'ing in forwarding scenarios
-+ */
-+#define DPAA2_ETH_RX_HEAD_ROOM \
-+ (DPAA2_ETH_TX_HWA_SIZE - DPAA2_ETH_RX_HWA_SIZE + \
-+ DPAA2_ETH_TX_BUF_ALIGN)
++ /* deregister CDAN notifications and free channels */
++ for (i = 0; i < priv->num_channels; i++) {
++ ch = priv->channel[i];
++ dpaa2_io_service_deregister(ch->dpio, &ch->nctx);
++ free_channel(priv, ch);
++ }
++}
+
-+/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */
-+struct dpaa2_eth_swa {
-+ struct sk_buff *skb;
-+ struct scatterlist *scl;
-+ int num_sg;
-+ int num_dma_bufs;
-+};
++static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
++ int cpu)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ int i;
+
-+/* Annotation valid bits in FD FRC */
-+#define DPAA2_FD_FRC_FASV 0x8000
-+#define DPAA2_FD_FRC_FAEADV 0x4000
-+#define DPAA2_FD_FRC_FAPRV 0x2000
-+#define DPAA2_FD_FRC_FAIADV 0x1000
-+#define DPAA2_FD_FRC_FASWOV 0x0800
-+#define DPAA2_FD_FRC_FAICFDV 0x0400
++ for (i = 0; i < priv->num_channels; i++)
++ if (priv->channel[i]->nctx.desired_cpu == cpu)
++ return priv->channel[i];
+
-+#define DPAA2_FD_RX_ERR_MASK (FD_CTRL_SBE | FD_CTRL_FAERR)
-+#define DPAA2_FD_TX_ERR_MASK (FD_CTRL_UFD | \
-+ FD_CTRL_SBE | \
-+ FD_CTRL_FSE | \
-+ FD_CTRL_FAERR)
++ /* We should never get here. Issue a warning and return
++ * the first channel, because it's still better than nothing
++ */
++ dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
+
-+/* Annotation bits in FD CTRL */
-+#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */
++ return priv->channel[0];
++}
+
-+/* Size of hardware annotation area based on the current buffer layout
-+ * configuration
-+ */
-+#define DPAA2_ETH_RX_HWA_SIZE 64
-+#define DPAA2_ETH_TX_HWA_SIZE 128
++static void set_fq_affinity(struct dpaa2_eth_priv *priv)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ struct cpumask xps_mask;
++ struct dpaa2_eth_fq *fq;
++ int rx_cpu, txc_cpu;
++ int i, err;
+
-+/* Frame annotation status */
-+struct dpaa2_fas {
-+ u8 reserved;
-+ u8 ppid;
-+ __le16 ifpid;
-+ __le32 status;
-+} __packed;
++ /* For each FQ, pick one channel/CPU to deliver frames to.
++ * This may well change at runtime, either through irqbalance or
++ * through direct user intervention.
++ */
++ rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
+
-+/* Frame annotation status word is located in the first 8 bytes
-+ * of the buffer's hardware annotation area
-+ */
-+#define DPAA2_FAS_OFFSET 0
-+#define DPAA2_FAS_SIZE (sizeof(struct dpaa2_fas))
++ for (i = 0; i < priv->num_fqs; i++) {
++ fq = &priv->fq[i];
++ switch (fq->type) {
++ case DPAA2_RX_FQ:
++ case DPAA2_RX_ERR_FQ:
++ fq->target_cpu = rx_cpu;
++ rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
++ if (rx_cpu >= nr_cpu_ids)
++ rx_cpu = cpumask_first(&priv->dpio_cpumask);
++ break;
++ case DPAA2_TX_CONF_FQ:
++ fq->target_cpu = txc_cpu;
+
-+/* Timestamp is located in the next 8 bytes of the buffer's
-+ * hardware annotation area
-+ */
-+#define DPAA2_TS_OFFSET 0x8
++ /* Tell the stack to affine to txc_cpu the Tx queue
++ * associated with the confirmation one
++ */
++ cpumask_clear(&xps_mask);
++ cpumask_set_cpu(txc_cpu, &xps_mask);
++ err = netif_set_xps_queue(priv->net_dev, &xps_mask,
++ fq->flowid);
++ if (err)
++ dev_info_once(dev, "Error setting XPS queue\n");
+
-+/* Frame annotation egress action descriptor */
-+#define DPAA2_FAEAD_OFFSET 0x58
++ txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
++ if (txc_cpu >= nr_cpu_ids)
++ txc_cpu = cpumask_first(&priv->dpio_cpumask);
++ break;
++ default:
++ dev_err(dev, "Unknown FQ type: %d\n", fq->type);
++ }
++ fq->channel = get_affine_channel(priv, fq->target_cpu);
++ }
++}
+
-+struct dpaa2_faead {
-+ __le32 conf_fqid;
-+ __le32 ctrl;
-+};
++static void setup_fqs(struct dpaa2_eth_priv *priv)
++{
++ int i, j;
+
-+#define DPAA2_FAEAD_A2V 0x20000000
-+#define DPAA2_FAEAD_UPDV 0x00001000
-+#define DPAA2_FAEAD_UPD 0x00000010
++ /* We have one TxConf FQ per Tx flow.
++ * The number of Tx and Rx queues is the same.
++ * Tx queues come first in the fq array.
++ */
++ for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
++ priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
++ priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
++ priv->fq[priv->num_fqs++].flowid = (u16)i;
++ }
+
-+/* accessors for the hardware annotation fields that we use */
-+#define dpaa2_eth_get_hwa(buf_addr) \
-+ ((void *)(buf_addr) + DPAA2_ETH_SWA_SIZE)
++ for (i = 0; i < dpaa2_eth_tc_count(priv); i++)
++ for (j = 0; j < dpaa2_eth_queue_count(priv); j++) {
++ priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
++ priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
++ priv->fq[priv->num_fqs].tc = (u8)i;
++ priv->fq[priv->num_fqs++].flowid = (u16)j;
++ }
+
-+#define dpaa2_eth_get_fas(buf_addr) \
-+ (struct dpaa2_fas *)(dpaa2_eth_get_hwa(buf_addr) + DPAA2_FAS_OFFSET)
++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
++ /* We have exactly one Rx error queue per DPNI */
++ priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ;
++ priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err;
++#endif
+
-+#define dpaa2_eth_get_ts(buf_addr) \
-+ (u64 *)(dpaa2_eth_get_hwa(buf_addr) + DPAA2_TS_OFFSET)
++ /* For each FQ, decide on which core to process incoming frames */
++ set_fq_affinity(priv);
++}
+
-+#define dpaa2_eth_get_faead(buf_addr) \
-+ (struct dpaa2_faead *)(dpaa2_eth_get_hwa(buf_addr) + DPAA2_FAEAD_OFFSET)
++/* Allocate and configure one buffer pool for each interface */
++static int setup_dpbp(struct dpaa2_eth_priv *priv)
++{
++ int err;
++ struct fsl_mc_device *dpbp_dev;
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpbp_attr dpbp_attrs;
+
-+/* Error and status bits in the frame annotation status word */
-+/* Debug frame, otherwise supposed to be discarded */
-+#define DPAA2_FAS_DISC 0x80000000
-+/* MACSEC frame */
-+#define DPAA2_FAS_MS 0x40000000
-+#define DPAA2_FAS_PTP 0x08000000
-+/* Ethernet multicast frame */
-+#define DPAA2_FAS_MC 0x04000000
-+/* Ethernet broadcast frame */
-+#define DPAA2_FAS_BC 0x02000000
-+#define DPAA2_FAS_KSE 0x00040000
-+#define DPAA2_FAS_EOFHE 0x00020000
-+#define DPAA2_FAS_MNLE 0x00010000
-+#define DPAA2_FAS_TIDE 0x00008000
-+#define DPAA2_FAS_PIEE 0x00004000
-+/* Frame length error */
-+#define DPAA2_FAS_FLE 0x00002000
-+/* Frame physical error */
-+#define DPAA2_FAS_FPE 0x00001000
-+#define DPAA2_FAS_PTE 0x00000080
-+#define DPAA2_FAS_ISP 0x00000040
-+#define DPAA2_FAS_PHE 0x00000020
-+#define DPAA2_FAS_BLE 0x00000010
-+/* L3 csum validation performed */
-+#define DPAA2_FAS_L3CV 0x00000008
-+/* L3 csum error */
-+#define DPAA2_FAS_L3CE 0x00000004
-+/* L4 csum validation performed */
-+#define DPAA2_FAS_L4CV 0x00000002
-+/* L4 csum error */
-+#define DPAA2_FAS_L4CE 0x00000001
-+/* Possible errors on the ingress path */
-+#define DPAA2_FAS_RX_ERR_MASK ((DPAA2_FAS_KSE) | \
-+ (DPAA2_FAS_EOFHE) | \
-+ (DPAA2_FAS_MNLE) | \
-+ (DPAA2_FAS_TIDE) | \
-+ (DPAA2_FAS_PIEE) | \
-+ (DPAA2_FAS_FLE) | \
-+ (DPAA2_FAS_FPE) | \
-+ (DPAA2_FAS_PTE) | \
-+ (DPAA2_FAS_ISP) | \
-+ (DPAA2_FAS_PHE) | \
-+ (DPAA2_FAS_BLE) | \
-+ (DPAA2_FAS_L3CE) | \
-+ (DPAA2_FAS_L4CE))
-+/* Tx errors */
-+#define DPAA2_FAS_TX_ERR_MASK ((DPAA2_FAS_KSE) | \
-+ (DPAA2_FAS_EOFHE) | \
-+ (DPAA2_FAS_MNLE) | \
-+ (DPAA2_FAS_TIDE))
++ err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
++ &dpbp_dev);
++ if (err) {
++ dev_err(dev, "DPBP device allocation failed\n");
++ return err;
++ }
+
-+/* Time in milliseconds between link state updates */
-+#define DPAA2_ETH_LINK_STATE_REFRESH 1000
++ priv->dpbp_dev = dpbp_dev;
+
-+/* Number of times to retry a frame enqueue before giving up.
-+ * Value determined empirically, in order to minimize the number
-+ * of frames dropped on Tx
-+ */
-+#define DPAA2_ETH_ENQUEUE_RETRIES 10
++ err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
++ &dpbp_dev->mc_handle);
++ if (err) {
++ dev_err(dev, "dpbp_open() failed\n");
++ goto err_open;
++ }
+
-+/* Tx congestion entry & exit thresholds, in number of bytes.
-+ * We allow a maximum of 512KB worth of frames pending processing on the Tx
-+ * queues of an interface
-+ */
-+#define DPAA2_ETH_TX_CONG_ENTRY_THRESH (512 * 1024)
-+#define DPAA2_ETH_TX_CONG_EXIT_THRESH (DPAA2_ETH_TX_CONG_ENTRY_THRESH * 9/10)
++ err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
++ if (err) {
++ dev_err(dev, "dpbp_reset() failed\n");
++ goto err_reset;
++ }
+
-+/* Driver statistics, other than those in struct rtnl_link_stats64.
-+ * These are usually collected per-CPU and aggregated by ethtool.
-+ */
-+struct dpaa2_eth_drv_stats {
-+ __u64 tx_conf_frames;
-+ __u64 tx_conf_bytes;
-+ __u64 tx_sg_frames;
-+ __u64 tx_sg_bytes;
-+ __u64 rx_sg_frames;
-+ __u64 rx_sg_bytes;
-+ /* Enqueues retried due to portal busy */
-+ __u64 tx_portal_busy;
-+};
++ err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
++ if (err) {
++ dev_err(dev, "dpbp_enable() failed\n");
++ goto err_enable;
++ }
+
-+/* Per-FQ statistics */
-+struct dpaa2_eth_fq_stats {
-+ /* Number of frames received on this queue */
-+ __u64 frames;
-+ /* Number of times this queue entered congestion */
-+ __u64 congestion_entry;
-+};
++ err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
++ &dpbp_attrs);
++ if (err) {
++ dev_err(dev, "dpbp_get_attributes() failed\n");
++ goto err_get_attr;
++ }
++ priv->bpid = dpbp_attrs.bpid;
+
-+/* Per-channel statistics */
-+struct dpaa2_eth_ch_stats {
-+ /* Volatile dequeues retried due to portal busy */
-+ __u64 dequeue_portal_busy;
-+ /* Number of CDANs; useful to estimate avg NAPI len */
-+ __u64 cdan;
-+ /* Number of frames received on queues from this channel */
-+ __u64 frames;
-+ /* Pull errors */
-+ __u64 pull_err;
-+};
++ /* By default we start with flow control enabled */
++ priv->max_bufs_per_ch = DPAA2_ETH_NUM_BUFS_FC / priv->num_channels;
+
-+#define DPAA2_ETH_MAX_DPCONS NR_CPUS
-+#define DPAA2_ETH_MAX_TCS 8
++ return 0;
+
-+/* Maximum number of queues associated with a DPNI */
-+#define DPAA2_ETH_MAX_RX_QUEUES (DPNI_MAX_DIST_SIZE * DPAA2_ETH_MAX_TCS)
-+#define DPAA2_ETH_MAX_TX_QUEUES DPNI_MAX_SENDERS
-+#define DPAA2_ETH_MAX_RX_ERR_QUEUES 1
-+#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \
-+ DPAA2_ETH_MAX_TX_QUEUES + \
-+ DPAA2_ETH_MAX_RX_ERR_QUEUES)
++err_get_attr:
++ dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
++err_enable:
++err_reset:
++ dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
++err_open:
++ fsl_mc_object_free(dpbp_dev);
+
-+enum dpaa2_eth_fq_type {
-+ DPAA2_RX_FQ = 0,
-+ DPAA2_TX_CONF_FQ,
-+ DPAA2_RX_ERR_FQ
-+};
++ return err;
++}
+
-+struct dpaa2_eth_priv;
++static void free_dpbp(struct dpaa2_eth_priv *priv)
++{
++ drain_pool(priv);
++ dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
++ dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
++ fsl_mc_object_free(priv->dpbp_dev);
++}
+
-+struct dpaa2_eth_fq {
-+ u32 fqid;
-+ u32 tx_qdbin;
-+ u16 flowid;
-+ u8 tc;
-+ int target_cpu;
-+ struct dpaa2_eth_channel *channel;
-+ enum dpaa2_eth_fq_type type;
++static int setup_tx_congestion(struct dpaa2_eth_priv *priv)
++{
++ struct dpni_congestion_notification_cfg notif_cfg = {0};
++ struct device *dev = priv->net_dev->dev.parent;
++ int err;
+
-+ void (*consume)(struct dpaa2_eth_priv *,
-+ struct dpaa2_eth_channel *,
-+ const struct dpaa2_fd *,
-+ struct napi_struct *,
-+ u16 queue_id);
-+ struct dpaa2_eth_fq_stats stats;
-+};
++ priv->cscn_unaligned = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
++ GFP_KERNEL);
+
-+struct dpaa2_eth_channel {
-+ struct dpaa2_io_notification_ctx nctx;
-+ struct fsl_mc_device *dpcon;
-+ int dpcon_id;
-+ int ch_id;
-+ int dpio_id;
-+ struct napi_struct napi;
-+ struct dpaa2_io_store *store;
-+ struct dpaa2_eth_priv *priv;
-+ int buf_count;
-+ struct dpaa2_eth_ch_stats stats;
-+};
++ if (!priv->cscn_unaligned)
++ return -ENOMEM;
+
-+struct dpaa2_eth_cls_rule {
-+ struct ethtool_rx_flow_spec fs;
-+ bool in_use;
-+};
++ priv->cscn_mem = PTR_ALIGN(priv->cscn_unaligned, DPAA2_CSCN_ALIGN);
++ priv->cscn_dma = dma_map_single(dev, priv->cscn_mem, DPAA2_CSCN_SIZE,
++ DMA_FROM_DEVICE);
++ if (dma_mapping_error(dev, priv->cscn_dma)) {
++ dev_err(dev, "Error mapping CSCN memory area\n");
++ err = -ENOMEM;
++ goto err_dma_map;
++ }
+
-+struct dpaa2_eth_hash_fields {
-+ u64 rxnfc_field;
-+ enum net_prot cls_prot;
-+ int cls_field;
-+ int offset;
-+ int size;
-+};
++ notif_cfg.units = DPNI_CONGESTION_UNIT_BYTES;
++ notif_cfg.threshold_entry = DPAA2_ETH_TX_CONG_ENTRY_THRESH;
++ notif_cfg.threshold_exit = DPAA2_ETH_TX_CONG_EXIT_THRESH;
++ notif_cfg.message_ctx = (u64)priv;
++ notif_cfg.message_iova = priv->cscn_dma;
++ notif_cfg.notification_mode = DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
++ DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
++ DPNI_CONG_OPT_COHERENT_WRITE;
++ err = dpni_set_congestion_notification(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_TX, 0, ¬if_cfg);
++ if (err) {
++ dev_err(dev, "dpni_set_congestion_notification failed\n");
++ goto err_set_cong;
++ }
+
-+/* Driver private data */
-+struct dpaa2_eth_priv {
-+ struct net_device *net_dev;
++ return 0;
+
-+ /* Standard statistics */
-+ struct rtnl_link_stats64 __percpu *percpu_stats;
-+ /* Extra stats, in addition to the ones known by the kernel */
-+ struct dpaa2_eth_drv_stats __percpu *percpu_extras;
-+ struct iommu_domain *iommu_domain;
++err_set_cong:
++ dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
++err_dma_map:
++ kfree(priv->cscn_unaligned);
+
-+ bool ts_tx_en; /* Tx timestamping enabled */
-+ bool ts_rx_en; /* Rx timestamping enabled */
++ return err;
++}
+
-+ u16 tx_data_offset;
-+ u16 rx_buf_align;
++/* Configure the DPNI object this interface is associated with */
++static int setup_dpni(struct fsl_mc_device *ls_dev)
++{
++ struct device *dev = &ls_dev->dev;
++ struct dpaa2_eth_priv *priv;
++ struct net_device *net_dev;
++ struct dpni_link_cfg cfg = {0};
++ int err;
+
-+ u16 bpid;
-+ u16 tx_qdid;
++ net_dev = dev_get_drvdata(dev);
++ priv = netdev_priv(net_dev);
+
-+ int tx_pause_frames;
-+ int num_bufs;
-+ int refill_thresh;
++ /* get a handle for the DPNI object */
++ err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
++ if (err) {
++ dev_err(dev, "dpni_open() failed\n");
++ return err;
++ }
+
-+ /* Tx congestion notifications are written here */
-+ void *cscn_mem;
-+ void *cscn_unaligned;
-+ dma_addr_t cscn_dma;
++ /* Check if we can work with this DPNI object */
++ err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
++ &priv->dpni_ver_minor);
++ if (err) {
++ dev_err(dev, "dpni_get_api_version() failed\n");
++ goto close;
++ }
++ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
++ dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
++ priv->dpni_ver_major, priv->dpni_ver_minor,
++ DPNI_VER_MAJOR, DPNI_VER_MINOR);
++ err = -ENOTSUPP;
++ goto close;
++ }
+
-+ u8 num_fqs;
-+ /* Tx queues are at the beginning of the array */
-+ struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES];
++ ls_dev->mc_io = priv->mc_io;
++ ls_dev->mc_handle = priv->mc_token;
+
-+ u8 num_channels;
-+ struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
++ err = dpni_reset(priv->mc_io, 0, priv->mc_token);
++ if (err) {
++ dev_err(dev, "dpni_reset() failed\n");
++ goto close;
++ }
+
-+ int dpni_id;
-+ struct dpni_attr dpni_attrs;
-+ struct fsl_mc_device *dpbp_dev;
++ err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
++ &priv->dpni_attrs);
++ if (err) {
++ dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
++ goto close;
++ }
+
-+ struct fsl_mc_io *mc_io;
-+ /* SysFS-controlled affinity mask for TxConf FQs */
-+ struct cpumask txconf_cpumask;
-+ /* Cores which have an affine DPIO/DPCON.
-+ * This is the cpu set on which Rx frames are processed;
-+ * Tx confirmation frames are processed on a subset of this,
-+ * depending on user settings.
-+ */
-+ struct cpumask dpio_cpumask;
++ err = set_buffer_layout(priv);
++ if (err)
++ goto close;
+
-+ u16 mc_token;
++ /* Enable congestion notifications for Tx queues */
++ err = setup_tx_congestion(priv);
++ if (err)
++ goto close;
+
-+ struct dpni_link_state link_state;
-+ bool do_link_poll;
-+ struct task_struct *poll_thread;
++ /* allocate classification rule space */
++ priv->cls_rule = kzalloc(sizeof(*priv->cls_rule) *
++ dpaa2_eth_fs_count(priv), GFP_KERNEL);
++ if (!priv->cls_rule)
++ goto close;
+
-+ struct dpaa2_eth_hash_fields *hash_fields;
-+ u8 num_hash_fields;
-+ /* enabled ethtool hashing bits */
-+ u64 rx_flow_hash;
++ /* Enable flow control */
++ cfg.options = DPNI_LINK_OPT_AUTONEG | DPNI_LINK_OPT_PAUSE;
++ priv->tx_pause_frames = true;
++ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
++ if (err) {
++ dev_err(dev, "dpni_set_link_cfg() failed\n");
++ goto cls_free;
++ }
+
-+#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
-+ struct dpaa2_debugfs dbg;
-+#endif
++ return 0;
+
-+ /* array of classification rules */
-+ struct dpaa2_eth_cls_rule *cls_rule;
++cls_free:
++ kfree(priv->cls_rule);
++close:
++ dpni_close(priv->mc_io, 0, priv->mc_token);
+
-+ struct dpni_tx_shaping_cfg shaping_cfg;
++ return err;
++}
+
-+ u8 dcbx_mode;
-+ struct ieee_pfc pfc;
-+ bool vlan_clsf_set;
-+};
++static void free_dpni(struct dpaa2_eth_priv *priv)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ int err;
+
-+#define dpaa2_eth_hash_enabled(priv) \
-+ ((priv)->dpni_attrs.num_queues > 1)
++ err = dpni_reset(priv->mc_io, 0, priv->mc_token);
++ if (err)
++ netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
++ err);
+
-+#define dpaa2_eth_fs_enabled(priv) \
-+ (!((priv)->dpni_attrs.options & DPNI_OPT_NO_FS))
++ dpni_close(priv->mc_io, 0, priv->mc_token);
+
-+#define dpaa2_eth_fs_mask_enabled(priv) \
-+ ((priv)->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)
++ kfree(priv->cls_rule);
+
-+#define dpaa2_eth_fs_count(priv) \
-+ ((priv)->dpni_attrs.fs_entries)
++ dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
++ kfree(priv->cscn_unaligned);
++}
+
-+/* size of DMA memory used to pass configuration to classifier, in bytes */
-+#define DPAA2_CLASSIFIER_DMA_SIZE 256
++static int setup_rx_flow(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_fq *fq)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpni_queue queue;
++ struct dpni_queue_id qid;
++ int err;
+
-+extern const struct ethtool_ops dpaa2_ethtool_ops;
-+extern const char dpaa2_eth_drv_version[];
++ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid);
++ if (err) {
++ dev_err(dev, "dpni_get_queue(RX) failed\n");
++ return err;
++ }
+
-+static inline int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
-+{
-+ return priv->dpni_attrs.num_queues;
++ fq->fqid = qid.fqid;
++
++ queue.destination.id = fq->channel->dpcon_id;
++ queue.destination.type = DPNI_DEST_DPCON;
++ queue.destination.priority = 1;
++ queue.user_context = (u64)(uintptr_t)fq;
++ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_RX, fq->tc, fq->flowid,
++ DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
++ &queue);
++ if (err) {
++ dev_err(dev, "dpni_set_queue(RX) failed\n");
++ return err;
++ }
++
++ return 0;
+}
+
-+static inline int dpaa2_eth_tc_count(struct dpaa2_eth_priv *priv)
++static int set_queue_taildrop(struct dpaa2_eth_priv *priv,
++ struct dpni_taildrop *td)
+{
-+ return priv->dpni_attrs.num_tcs;
++ struct device *dev = priv->net_dev->dev.parent;
++ int i, err;
++
++ for (i = 0; i < priv->num_fqs; i++) {
++ if (priv->fq[i].type != DPAA2_RX_FQ)
++ continue;
++
++ err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
++ DPNI_CP_QUEUE, DPNI_QUEUE_RX,
++ priv->fq[i].tc, priv->fq[i].flowid,
++ td);
++ if (err) {
++ dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err);
++ return err;
++ }
++
++ dev_dbg(dev, "%s taildrop for Rx group tc %d\n",
++ (td->enable ? "Enabled" : "Disabled"),
++ i);
++ }
++
++ return 0;
+}
+
-+static inline bool dpaa2_eth_is_pfc_enabled(struct dpaa2_eth_priv *priv,
-+ int traffic_class)
++static int set_group_taildrop(struct dpaa2_eth_priv *priv,
++ struct dpni_taildrop *td)
+{
-+ return priv->pfc.pfc_en & (1 << traffic_class);
-+}
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpni_taildrop disable_td, *tc_td;
++ int i, err;
+
-+enum dpaa2_eth_td_cfg {
-+ DPAA2_ETH_TD_NONE,
-+ DPAA2_ETH_TD_QUEUE,
-+ DPAA2_ETH_TD_GROUP
-+};
++ memset(&disable_td, 0, sizeof(struct dpni_taildrop));
++ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
++ if (td->enable && dpaa2_eth_is_pfc_enabled(priv, i))
++ /* Do not set taildrop thresholds for PFC-enabled
++ * traffic classes. We will enable congestion
++ * notifications for them.
++ */
++ tc_td = &disable_td;
++ else
++ tc_td = td;
+
-+static inline enum dpaa2_eth_td_cfg
-+dpaa2_eth_get_td_type(struct dpaa2_eth_priv *priv)
-+{
-+ bool pfc_enabled = !!(priv->pfc.pfc_en);
++ err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
++ DPNI_CP_GROUP, DPNI_QUEUE_RX,
++ i, 0, tc_td);
++ if (err) {
++ dev_err(dev, "dpni_set_taildrop() failed (%d)\n", err);
++ return err;
++ }
+
-+ if (pfc_enabled)
-+ return DPAA2_ETH_TD_GROUP;
-+ else if (priv->tx_pause_frames)
-+ return DPAA2_ETH_TD_NONE;
-+ else
-+ return DPAA2_ETH_TD_QUEUE;
-+}
++ dev_dbg(dev, "%s taildrop for Rx queue id %d tc %d\n",
++ (tc_td->enable ? "Enabled" : "Disabled"),
++ priv->fq[i].flowid, priv->fq[i].tc);
++ }
+
-+void check_cls_support(struct dpaa2_eth_priv *priv);
++ return 0;
++}
+
-+int set_rx_taildrop(struct dpaa2_eth_priv *priv);
-+#endif /* __DPAA2_H */
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
-@@ -0,0 +1,864 @@
-+/* Copyright 2014-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
++/* Enable/disable Rx FQ taildrop
+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ * Rx FQ taildrop is mutually exclusive with flow control and it only gets
++ * disabled when FC is active. Depending on FC status, we need to compute
++ * the maximum number of buffers in the pool differently, so use the
++ * opportunity to update max number of buffers as well.
+ */
++int set_rx_taildrop(struct dpaa2_eth_priv *priv)
++{
++ enum dpaa2_eth_td_cfg cfg = dpaa2_eth_get_td_type(priv);
++ struct dpni_taildrop td_queue, td_group;
++ int err = 0;
+
-+#include "dpni.h" /* DPNI_LINK_OPT_* */
-+#include "dpaa2-eth.h"
-+
-+/* To be kept in sync with dpni_statistics */
-+static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = {
-+ "rx frames",
-+ "rx bytes",
-+ "rx mcast frames",
-+ "rx mcast bytes",
-+ "rx bcast frames",
-+ "rx bcast bytes",
-+ "tx frames",
-+ "tx bytes",
-+ "tx mcast frames",
-+ "tx mcast bytes",
-+ "tx bcast frames",
-+ "tx bcast bytes",
-+ "rx filtered frames",
-+ "rx discarded frames",
-+ "rx nobuffer discards",
-+ "tx discarded frames",
-+ "tx confirmed frames",
-+};
-+
-+#define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats)
-+
-+/* To be kept in sync with 'struct dpaa2_eth_drv_stats' */
-+static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
-+ /* per-cpu stats */
++ switch (cfg) {
++ case DPAA2_ETH_TD_NONE:
++ memset(&td_queue, 0, sizeof(struct dpni_taildrop));
++ memset(&td_group, 0, sizeof(struct dpni_taildrop));
++ priv->max_bufs_per_ch = DPAA2_ETH_NUM_BUFS_FC /
++ priv->num_channels;
++ break;
++ case DPAA2_ETH_TD_QUEUE:
++ memset(&td_group, 0, sizeof(struct dpni_taildrop));
++ td_queue.enable = 1;
++ td_queue.units = DPNI_CONGESTION_UNIT_BYTES;
++ td_queue.threshold = DPAA2_ETH_TAILDROP_THRESH /
++ dpaa2_eth_tc_count(priv);
++ priv->max_bufs_per_ch = DPAA2_ETH_NUM_BUFS_PER_CH;
++ break;
++ case DPAA2_ETH_TD_GROUP:
++ memset(&td_queue, 0, sizeof(struct dpni_taildrop));
++ td_group.enable = 1;
++ td_group.units = DPNI_CONGESTION_UNIT_FRAMES;
++ td_group.threshold = NAPI_POLL_WEIGHT *
++ dpaa2_eth_queue_count(priv);
++ priv->max_bufs_per_ch = NAPI_POLL_WEIGHT *
++ dpaa2_eth_tc_count(priv);
++ break;
++ default:
++ break;
++ }
+
-+ "tx conf frames",
-+ "tx conf bytes",
-+ "tx sg frames",
-+ "tx sg bytes",
-+ "rx sg frames",
-+ "rx sg bytes",
-+ /* how many times we had to retry the enqueue command */
-+ "enqueue portal busy",
++ err = set_queue_taildrop(priv, &td_queue);
++ if (err)
++ return err;
+
-+ /* Channel stats */
-+ /* How many times we had to retry the volatile dequeue command */
-+ "dequeue portal busy",
-+ "channel pull errors",
-+ /* Number of notifications received */
-+ "cdan",
-+ "tx congestion state",
-+#ifdef CONFIG_FSL_QBMAN_DEBUG
-+ /* FQ stats */
-+ "rx pending frames",
-+ "rx pending bytes",
-+ "tx conf pending frames",
-+ "tx conf pending bytes",
-+ "buffer count"
-+#endif
-+};
++ err = set_group_taildrop(priv, &td_group);
++ if (err)
++ return err;
+
-+#define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras)
++ priv->refill_thresh = DPAA2_ETH_REFILL_THRESH(priv);
+
-+static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
-+ struct ethtool_drvinfo *drvinfo)
-+{
-+ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
-+ strlcpy(drvinfo->version, dpaa2_eth_drv_version,
-+ sizeof(drvinfo->version));
-+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
-+ strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
-+ sizeof(drvinfo->bus_info));
++ return 0;
+}
+
-+static int dpaa2_eth_get_settings(struct net_device *net_dev,
-+ struct ethtool_cmd *cmd)
++static int setup_tx_flow(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_fq *fq)
+{
-+ struct dpni_link_state state = {0};
-+ int err = 0;
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpni_queue queue;
++ struct dpni_queue_id qid;
++ int err;
+
-+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
++ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_TX, 0, fq->flowid, &queue, &qid);
+ if (err) {
-+ netdev_err(net_dev, "ERROR %d getting link state", err);
-+ goto out;
++ dev_err(dev, "dpni_get_queue(TX) failed\n");
++ return err;
+ }
+
-+ /* At the moment, we have no way of interrogating the DPMAC
-+ * from the DPNI side - and for that matter there may exist
-+ * no DPMAC at all. So for now we just don't report anything
-+ * beyond the DPNI attributes.
-+ */
-+ if (state.options & DPNI_LINK_OPT_AUTONEG)
-+ cmd->autoneg = AUTONEG_ENABLE;
-+ if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX))
-+ cmd->duplex = DUPLEX_FULL;
-+ ethtool_cmd_speed_set(cmd, state.rate);
-+
-+out:
-+ return err;
-+}
-+
-+static int dpaa2_eth_set_settings(struct net_device *net_dev,
-+ struct ethtool_cmd *cmd)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ struct dpni_link_state state = {0};
-+ struct dpni_link_cfg cfg = {0};
-+ int err = 0;
-+
-+ netdev_dbg(net_dev, "Setting link parameters...");
++ fq->tx_qdbin = qid.qdbin;
+
-+ /* Need to interrogate on link state to get flow control params */
-+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
++ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
++ &queue, &qid);
+ if (err) {
-+ netdev_err(net_dev, "ERROR %d getting link state", err);
-+ goto out;
++ dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
++ return err;
+ }
+
-+ cfg.options = state.options;
-+ cfg.rate = ethtool_cmd_speed(cmd);
-+ if (cmd->autoneg == AUTONEG_ENABLE)
-+ cfg.options |= DPNI_LINK_OPT_AUTONEG;
-+ else
-+ cfg.options &= ~DPNI_LINK_OPT_AUTONEG;
-+ if (cmd->duplex == DUPLEX_HALF)
-+ cfg.options |= DPNI_LINK_OPT_HALF_DUPLEX;
-+ else
-+ cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX;
++ fq->fqid = qid.fqid;
+
-+ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
-+ if (err)
-+ /* ethtool will be loud enough if we return an error; no point
-+ * in putting our own error message on the console by default
-+ */
-+ netdev_dbg(net_dev, "ERROR %d setting link cfg", err);
++ queue.destination.id = fq->channel->dpcon_id;
++ queue.destination.type = DPNI_DEST_DPCON;
++ queue.destination.priority = 0;
++ queue.user_context = (u64)(uintptr_t)fq;
++ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
++ DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
++ &queue);
++ if (err) {
++ dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
++ return err;
++ }
+
-+out:
-+ return err;
++ return 0;
+}
+
-+static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
-+ struct ethtool_pauseparam *pause)
++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
++static int setup_rx_err_flow(struct dpaa2_eth_priv *priv,
++ struct dpaa2_eth_fq *fq)
+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ struct dpni_link_state state = {0};
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpni_queue q = { { 0 } };
++ struct dpni_queue_id qid;
++ u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
+ int err;
+
-+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
-+ if (err)
-+ netdev_dbg(net_dev, "ERROR %d getting link state", err);
-+
-+ /* for now, pause frames autonegotiation is not separate */
-+ pause->autoneg = !!(state.options & DPNI_LINK_OPT_AUTONEG);
-+ pause->rx_pause = !!(state.options & DPNI_LINK_OPT_PAUSE);
-+ pause->tx_pause = pause->rx_pause ^
-+ !!(state.options & DPNI_LINK_OPT_ASYM_PAUSE);
-+}
-+
-+static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
-+ struct ethtool_pauseparam *pause)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ struct dpni_link_state state = {0};
-+ struct dpni_link_cfg cfg = {0};
-+ u32 current_tx_pause;
-+ int err = 0;
-+
-+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
++ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid);
+ if (err) {
-+ netdev_dbg(net_dev, "ERROR %d getting link state", err);
-+ goto out;
++ dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
++ return err;
+ }
+
-+ cfg.rate = state.rate;
-+ cfg.options = state.options;
-+ current_tx_pause = !!(cfg.options & DPNI_LINK_OPT_PAUSE) ^
-+ !!(cfg.options & DPNI_LINK_OPT_ASYM_PAUSE);
-+
-+ if (pause->autoneg != !!(state.options & DPNI_LINK_OPT_AUTONEG))
-+ netdev_warn(net_dev,
-+ "WARN: Can't change pause frames autoneg separately\n");
-+
-+ if (pause->rx_pause)
-+ cfg.options |= DPNI_LINK_OPT_PAUSE;
-+ else
-+ cfg.options &= ~DPNI_LINK_OPT_PAUSE;
-+
-+ if (pause->rx_pause ^ pause->tx_pause)
-+ cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
-+ else
-+ cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
++ fq->fqid = qid.fqid;
+
-+ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
++ q.destination.id = fq->channel->dpcon_id;
++ q.destination.type = DPNI_DEST_DPCON;
++ q.destination.priority = 1;
++ q.user_context = (u64)fq;
++ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q);
+ if (err) {
-+ /* ethtool will be loud enough if we return an error; no point
-+ * in putting our own error message on the console by default
-+ */
-+ netdev_dbg(net_dev, "ERROR %d setting link cfg", err);
-+ goto out;
-+ }
-+
-+ /* Enable / disable taildrops if Tx pause frames have changed */
-+ if (current_tx_pause == pause->tx_pause)
-+ goto out;
-+
-+ priv->tx_pause_frames = pause->tx_pause;
-+ err = set_rx_taildrop(priv);
-+ if (err)
-+ netdev_dbg(net_dev, "ERROR %d configuring taildrop", err);
-+
-+ priv->tx_pause_frames = pause->tx_pause;
-+out:
-+ return err;
-+}
-+
-+static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
-+ u8 *data)
-+{
-+ u8 *p = data;
-+ int i;
-+
-+ switch (stringset) {
-+ case ETH_SS_STATS:
-+ for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) {
-+ strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN);
-+ p += ETH_GSTRING_LEN;
-+ }
-+ for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) {
-+ strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
-+ p += ETH_GSTRING_LEN;
-+ }
-+ break;
++ dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
++ return err;
+ }
-+}
+
-+static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
-+{
-+ switch (sset) {
-+ case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
-+ return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS;
-+ default:
-+ return -EOPNOTSUPP;
-+ }
++ return 0;
+}
++#endif
+
-+/** Fill in hardware counters, as returned by MC.
-+ */
-+static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
-+ struct ethtool_stats *stats,
-+ u64 *data)
++/* default hash key fields */
++static struct dpaa2_eth_dist_fields default_dist_fields[] = {
++ {
++ /* L2 header */
++ .rxnfc_field = RXH_L2DA,
++ .cls_prot = NET_PROT_ETH,
++ .cls_field = NH_FLD_ETH_DA,
++ .id = DPAA2_ETH_DIST_ETHDST,
++ .size = 6,
++ }, {
++ .cls_prot = NET_PROT_ETH,
++ .cls_field = NH_FLD_ETH_SA,
++ .id = DPAA2_ETH_DIST_ETHSRC,
++ .size = 6,
++ }, {
++ /* This is the last ethertype field parsed:
++ * depending on frame format, it can be the MAC ethertype
++ * or the VLAN etype.
++ */
++ .cls_prot = NET_PROT_ETH,
++ .cls_field = NH_FLD_ETH_TYPE,
++ .id = DPAA2_ETH_DIST_ETHTYPE,
++ .size = 2,
++ }, {
++ /* VLAN header */
++ .rxnfc_field = RXH_VLAN,
++ .cls_prot = NET_PROT_VLAN,
++ .cls_field = NH_FLD_VLAN_TCI,
++ .id = DPAA2_ETH_DIST_VLAN,
++ .size = 2,
++ }, {
++ /* IP header */
++ .rxnfc_field = RXH_IP_SRC,
++ .cls_prot = NET_PROT_IP,
++ .cls_field = NH_FLD_IP_SRC,
++ .id = DPAA2_ETH_DIST_IPSRC,
++ .size = 4,
++ }, {
++ .rxnfc_field = RXH_IP_DST,
++ .cls_prot = NET_PROT_IP,
++ .cls_field = NH_FLD_IP_DST,
++ .id = DPAA2_ETH_DIST_IPDST,
++ .size = 4,
++ }, {
++ .rxnfc_field = RXH_L3_PROTO,
++ .cls_prot = NET_PROT_IP,
++ .cls_field = NH_FLD_IP_PROTO,
++ .id = DPAA2_ETH_DIST_IPPROTO,
++ .size = 1,
++ }, {
++ /* Using UDP ports, this is functionally equivalent to raw
++ * byte pairs from L4 header.
++ */
++ .rxnfc_field = RXH_L4_B_0_1,
++ .cls_prot = NET_PROT_UDP,
++ .cls_field = NH_FLD_UDP_PORT_SRC,
++ .id = DPAA2_ETH_DIST_L4SRC,
++ .size = 2,
++ }, {
++ .rxnfc_field = RXH_L4_B_2_3,
++ .cls_prot = NET_PROT_UDP,
++ .cls_field = NH_FLD_UDP_PORT_DST,
++ .id = DPAA2_ETH_DIST_L4DST,
++ .size = 2,
++ },
++};
++
++static int legacy_config_dist_key(struct dpaa2_eth_priv *priv,
++ dma_addr_t key_iova)
+{
-+ int i = 0; /* Current index in the data array */
-+ int j = 0, k, err;
-+ union dpni_statistics dpni_stats;
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpni_rx_tc_dist_cfg dist_cfg;
++ int i, err;
+
-+#ifdef CONFIG_FSL_QBMAN_DEBUG
-+ u32 fcnt, bcnt;
-+ u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
-+ u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
-+ u32 buf_cnt;
-+#endif
-+ u64 cdan = 0;
-+ u64 portal_busy = 0, pull_err = 0;
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ struct dpaa2_eth_drv_stats *extras;
-+ struct dpaa2_eth_ch_stats *ch_stats;
++ /* In legacy mode, we can't configure flow steering independently */
++ if (!dpaa2_eth_hash_enabled(priv))
++ return -EOPNOTSUPP;
+
-+ memset(data, 0,
-+ sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
++ memset(&dist_cfg, 0, sizeof(dist_cfg));
+
-+ /* Print standard counters, from DPNI statistics */
-+ for (j = 0; j <= 2; j++) {
-+ err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
-+ j, &dpni_stats);
-+ if (err != 0)
-+ netdev_warn(net_dev, "Err %d getting DPNI stats page %d",
-+ err, j);
++ dist_cfg.key_cfg_iova = key_iova;
++ dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
++ if (dpaa2_eth_fs_enabled(priv)) {
++ dist_cfg.dist_mode = DPNI_DIST_MODE_FS;
++ dist_cfg.fs_cfg.miss_action = DPNI_FS_MISS_HASH;
++ } else {
++ dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
++ }
+
-+ switch (j) {
-+ case 0:
-+ *(data + i++) = dpni_stats.page_0.ingress_all_frames;
-+ *(data + i++) = dpni_stats.page_0.ingress_all_bytes;
-+ *(data + i++) = dpni_stats.page_0.ingress_multicast_frames;
-+ *(data + i++) = dpni_stats.page_0.ingress_multicast_bytes;
-+ *(data + i++) = dpni_stats.page_0.ingress_broadcast_frames;
-+ *(data + i++) = dpni_stats.page_0.ingress_broadcast_bytes;
-+ break;
-+ case 1:
-+ *(data + i++) = dpni_stats.page_1.egress_all_frames;
-+ *(data + i++) = dpni_stats.page_1.egress_all_bytes;
-+ *(data + i++) = dpni_stats.page_1.egress_multicast_frames;
-+ *(data + i++) = dpni_stats.page_1.egress_multicast_bytes;
-+ *(data + i++) = dpni_stats.page_1.egress_broadcast_frames;
-+ *(data + i++) = dpni_stats.page_1.egress_broadcast_bytes;
-+ break;
-+ case 2:
-+ *(data + i++) = dpni_stats.page_2.ingress_filtered_frames;
-+ *(data + i++) = dpni_stats.page_2.ingress_discarded_frames;
-+ *(data + i++) = dpni_stats.page_2.ingress_nobuffer_discards;
-+ *(data + i++) = dpni_stats.page_2.egress_discarded_frames;
-+ *(data + i++) = dpni_stats.page_2.egress_confirmed_frames;
-+ break;
-+ default:
-+ break;
++ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
++ err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, i,
++ &dist_cfg);
++ if (err) {
++ dev_err(dev, "dpni_set_rx_tc_dist failed\n");
++ return err;
+ }
+ }
+
-+ /* Print per-cpu extra stats */
-+ for_each_online_cpu(k) {
-+ extras = per_cpu_ptr(priv->percpu_extras, k);
-+ for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++)
-+ *((__u64 *)data + i + j) += *((__u64 *)extras + j);
-+ }
++ return 0;
++}
+
-+ i += j;
++static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key_iova)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpni_rx_dist_cfg dist_cfg;
++ int i, err;
+
-+ /* We may be using fewer DPIOs than actual CPUs */
-+ for (j = 0; j < priv->num_channels; j++) {
-+ ch_stats = &priv->channel[j]->stats;
-+ cdan += ch_stats->cdan;
-+ portal_busy += ch_stats->dequeue_portal_busy;
-+ pull_err += ch_stats->pull_err;
-+ }
++ if (!dpaa2_eth_hash_enabled(priv))
++ return -EOPNOTSUPP;
+
-+ *(data + i++) = portal_busy;
-+ *(data + i++) = pull_err;
-+ *(data + i++) = cdan;
++ memset(&dist_cfg, 0, sizeof(dist_cfg));
+
-+ *(data + i++) = dpaa2_cscn_state_congested(priv->cscn_mem);
++ dist_cfg.key_cfg_iova = key_iova;
++ dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
++ dist_cfg.enable = true;
+
-+#ifdef CONFIG_FSL_QBMAN_DEBUG
-+ for (j = 0; j < priv->num_fqs; j++) {
-+ /* Print FQ instantaneous counts */
-+ err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid,
-+ &fcnt, &bcnt);
-+ if (err) {
-+ netdev_warn(net_dev, "FQ query error %d", err);
-+ return;
-+ }
++ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
++ dist_cfg.tc = i;
+
-+ if (priv->fq[j].type == DPAA2_TX_CONF_FQ) {
-+ fcnt_tx_total += fcnt;
-+ bcnt_tx_total += bcnt;
-+ } else {
-+ fcnt_rx_total += fcnt;
-+ bcnt_rx_total += bcnt;
++ err = dpni_set_rx_hash_dist(priv->mc_io, 0,
++ priv->mc_token, &dist_cfg);
++ if (err) {
++ dev_err(dev, "dpni_set_rx_hash_dist failed\n");
++ return err;
+ }
+ }
+
-+ *(data + i++) = fcnt_rx_total;
-+ *(data + i++) = bcnt_rx_total;
-+ *(data + i++) = fcnt_tx_total;
-+ *(data + i++) = bcnt_tx_total;
-+
-+ err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
-+ if (err) {
-+ netdev_warn(net_dev, "Buffer count query error %d\n", err);
-+ return;
-+ }
-+ *(data + i++) = buf_cnt;
-+#endif
++ return 0;
+}
+
-+static int cls_key_off(struct dpaa2_eth_priv *priv, int prot, int field)
++static int config_fs_key(struct dpaa2_eth_priv *priv, dma_addr_t key_iova)
+{
-+ int i, off = 0;
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpni_rx_dist_cfg dist_cfg;
++ int i, err;
+
-+ for (i = 0; i < priv->num_hash_fields; i++) {
-+ if (priv->hash_fields[i].cls_prot == prot &&
-+ priv->hash_fields[i].cls_field == field)
-+ return off;
-+ off += priv->hash_fields[i].size;
-+ }
++ if (!dpaa2_eth_fs_enabled(priv))
++ return -EOPNOTSUPP;
+
-+ return -1;
-+}
++ memset(&dist_cfg, 0, sizeof(dist_cfg));
+
-+static u8 cls_key_size(struct dpaa2_eth_priv *priv)
-+{
-+ u8 i, size = 0;
++ dist_cfg.key_cfg_iova = key_iova;
++ dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
++ dist_cfg.enable = true;
+
-+ for (i = 0; i < priv->num_hash_fields; i++)
-+ size += priv->hash_fields[i].size;
++ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
++ dist_cfg.tc = i;
+
-+ return size;
++ err = dpni_set_rx_fs_dist(priv->mc_io, 0,
++ priv->mc_token, &dist_cfg);
++ if (err) {
++ dev_err(dev, "dpni_set_rx_fs_dist failed\n");
++ return err;
++ }
++ }
++
++ return 0;
+}
+
-+void check_cls_support(struct dpaa2_eth_priv *priv)
++int dpaa2_eth_set_dist_key(struct dpaa2_eth_priv *priv,
++ enum dpaa2_eth_rx_dist type, u32 key_fields)
+{
-+ u8 key_size = cls_key_size(priv);
+ struct device *dev = priv->net_dev->dev.parent;
++ struct dpkg_profile_cfg cls_cfg;
++ struct dpkg_extract *key;
++ u32 hash_fields = 0;
++ dma_addr_t key_iova;
++ u8 *key_mem;
++ int i, err;
+
-+ if (dpaa2_eth_hash_enabled(priv)) {
-+ if (priv->dpni_attrs.fs_key_size < key_size) {
-+ dev_info(dev, "max_dist_key_size = %d, expected %d. Hashing and steering are disabled\n",
-+ priv->dpni_attrs.fs_key_size,
-+ key_size);
-+ goto disable_fs;
-+ }
-+ if (priv->num_hash_fields > DPKG_MAX_NUM_OF_EXTRACTS) {
-+ dev_info(dev, "Too many key fields (max = %d). Hashing and steering are disabled\n",
-+ DPKG_MAX_NUM_OF_EXTRACTS);
-+ goto disable_fs;
-+ }
-+ }
-+
-+ if (dpaa2_eth_fs_enabled(priv)) {
-+ if (!dpaa2_eth_hash_enabled(priv)) {
-+ dev_info(dev, "Insufficient queues. Steering is disabled\n");
-+ goto disable_fs;
-+ }
++ memset(&cls_cfg, 0, sizeof(cls_cfg));
+
-+ if (!dpaa2_eth_fs_mask_enabled(priv)) {
-+ dev_info(dev, "Key masks not supported. Steering is disabled\n");
-+ goto disable_fs;
-+ }
-+ }
++ for (i = 0; i < priv->num_dist_fields; i++) {
++ if (!(key_fields & priv->dist_fields[i].id))
++ continue;
+
-+ return;
++ key = &cls_cfg.extracts[cls_cfg.num_extracts];
++ key->type = DPKG_EXTRACT_FROM_HDR;
++ key->extract.from_hdr.prot = priv->dist_fields[i].cls_prot;
++ key->extract.from_hdr.type = DPKG_FULL_FIELD;
++ key->extract.from_hdr.field = priv->dist_fields[i].cls_field;
++ cls_cfg.num_extracts++;
+
-+disable_fs:
-+ priv->dpni_attrs.options |= DPNI_OPT_NO_FS;
-+ priv->dpni_attrs.options &= ~DPNI_OPT_HAS_KEY_MASKING;
-+}
++ hash_fields |= priv->dist_fields[i].rxnfc_field;
++ }
+
-+static int prep_l4_rule(struct dpaa2_eth_priv *priv,
-+ struct ethtool_tcpip4_spec *l4_value,
-+ struct ethtool_tcpip4_spec *l4_mask,
-+ void *key, void *mask, u8 l4_proto)
-+{
-+ int offset;
++ key_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
++ if (!key_mem)
++ return -ENOMEM;
+
-+ if (l4_mask->tos) {
-+ netdev_err(priv->net_dev, "ToS is not supported for IPv4 L4\n");
-+ return -EOPNOTSUPP;
++ err = dpni_prepare_key_cfg(&cls_cfg, key_mem);
++ if (err) {
++ dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
++ goto free_key;
+ }
+
-+ if (l4_mask->ip4src) {
-+ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC);
-+ *(u32 *)(key + offset) = l4_value->ip4src;
-+ *(u32 *)(mask + offset) = l4_mask->ip4src;
++ key_iova = dma_map_single(dev, key_mem, DPAA2_CLASSIFIER_DMA_SIZE,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, key_iova)) {
++ dev_err(dev, "DMA mapping failed\n");
++ err = -ENOMEM;
++ goto free_key;
+ }
+
-+ if (l4_mask->ip4dst) {
-+ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST);
-+ *(u32 *)(key + offset) = l4_value->ip4dst;
-+ *(u32 *)(mask + offset) = l4_mask->ip4dst;
++ switch (type) {
++ case DPAA2_ETH_RX_DIST_LEGACY:
++ err = legacy_config_dist_key(priv, key_iova);
++ break;
++ case DPAA2_ETH_RX_DIST_HASH:
++ err = config_hash_key(priv, key_iova);
++ break;
++ case DPAA2_ETH_RX_DIST_FS:
++ err = config_fs_key(priv, key_iova);
++ break;
++ default:
++ err = -EINVAL;
++ break;
+ }
+
-+ if (l4_mask->psrc) {
-+ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
-+ *(u32 *)(key + offset) = l4_value->psrc;
-+ *(u32 *)(mask + offset) = l4_mask->psrc;
++ dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
++ DMA_TO_DEVICE);
++ if (err) {
++ if (err != -EOPNOTSUPP)
++ dev_err(dev, "Distribution key config failed\n");
++ goto free_key;
+ }
+
-+ if (l4_mask->pdst) {
-+ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
-+ *(u32 *)(key + offset) = l4_value->pdst;
-+ *(u32 *)(mask + offset) = l4_mask->pdst;
-+ }
++ if (type != DPAA2_ETH_RX_DIST_FS)
++ priv->rx_hash_fields = hash_fields;
+
-+ /* Only apply the rule for the user-specified L4 protocol
-+ * and if ethertype matches IPv4
-+ */
-+ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE);
-+ *(u16 *)(key + offset) = htons(ETH_P_IP);
-+ *(u16 *)(mask + offset) = 0xFFFF;
++free_key:
++ kfree(key_mem);
++ return err;
++}
+
-+ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO);
-+ *(u8 *)(key + offset) = l4_proto;
-+ *(u8 *)(mask + offset) = 0xFF;
++/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
++ * frame queues and channels
++ */
++static int bind_dpni(struct dpaa2_eth_priv *priv)
++{
++ struct net_device *net_dev = priv->net_dev;
++ struct device *dev = net_dev->dev.parent;
++ struct dpni_pools_cfg pools_params;
++ struct dpni_error_cfg err_cfg;
++ int err = 0;
++ int i;
+
-+ /* TODO: check IP version */
++ pools_params.num_dpbp = 1;
++ pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
++ pools_params.pools[0].backup_pool = 0;
++ pools_params.pools[0].priority_mask = 0xff;
++ pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
++ err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
++ if (err) {
++ dev_err(dev, "dpni_set_pools() failed\n");
++ return err;
++ }
+
-+ return 0;
-+}
++ /* Verify classification options and disable hashing and/or
++ * flow steering support in case of invalid configuration values
++ */
++ priv->dist_fields = default_dist_fields;
++ priv->num_dist_fields = ARRAY_SIZE(default_dist_fields);
++ check_cls_support(priv);
+
-+static int prep_eth_rule(struct dpaa2_eth_priv *priv,
-+ struct ethhdr *eth_value, struct ethhdr *eth_mask,
-+ void *key, void *mask)
-+{
-+ int offset;
++ /* have the interface implicitly distribute traffic based on
++ * a static hash key. Also configure flow steering key, if supported.
++ * Errors here are not blocking, so just let the called function
++ * print its error message and move along.
++ */
++ if (dpaa2_eth_has_legacy_dist(priv)) {
++ dpaa2_eth_set_dist_key(priv, DPAA2_ETH_RX_DIST_LEGACY,
++ DPAA2_ETH_DIST_ALL);
++ } else {
++ dpaa2_eth_set_dist_key(priv, DPAA2_ETH_RX_DIST_HASH,
++ DPAA2_ETH_DIST_DEFAULT_HASH);
++ dpaa2_eth_set_dist_key(priv, DPAA2_ETH_RX_DIST_FS,
++ DPAA2_ETH_DIST_ALL);
++ }
+
-+ if (eth_mask->h_proto) {
-+ netdev_err(priv->net_dev, "Ethertype is not supported!\n");
-+ return -EOPNOTSUPP;
++ /* Configure handling of error frames */
++ err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
++ err_cfg.set_frame_annotation = 1;
++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
++ err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE;
++#else
++ err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
++#endif
++ err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
++ &err_cfg);
++ if (err) {
++ dev_err(dev, "dpni_set_errors_behavior failed\n");
++ return err;
+ }
+
-+ if (!is_zero_ether_addr(eth_mask->h_source)) {
-+ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_SA);
-+ ether_addr_copy(key + offset, eth_value->h_source);
-+ ether_addr_copy(mask + offset, eth_mask->h_source);
++ /* Configure Rx and Tx conf queues to generate CDANs */
++ for (i = 0; i < priv->num_fqs; i++) {
++ switch (priv->fq[i].type) {
++ case DPAA2_RX_FQ:
++ err = setup_rx_flow(priv, &priv->fq[i]);
++ break;
++ case DPAA2_TX_CONF_FQ:
++ err = setup_tx_flow(priv, &priv->fq[i]);
++ break;
++#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
++ case DPAA2_RX_ERR_FQ:
++ err = setup_rx_err_flow(priv, &priv->fq[i]);
++ break;
++#endif
++ default:
++ dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
++ return -EINVAL;
++ }
++ if (err)
++ return err;
+ }
+
-+ if (!is_zero_ether_addr(eth_mask->h_dest)) {
-+ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA);
-+ ether_addr_copy(key + offset, eth_value->h_dest);
-+ ether_addr_copy(mask + offset, eth_mask->h_dest);
++ err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
++ DPNI_QUEUE_TX, &priv->tx_qdid);
++ if (err) {
++ dev_err(dev, "dpni_get_qdid() failed\n");
++ return err;
+ }
+
+ return 0;
+}
+
-+static int prep_user_ip_rule(struct dpaa2_eth_priv *priv,
-+ struct ethtool_usrip4_spec *uip_value,
-+ struct ethtool_usrip4_spec *uip_mask,
-+ void *key, void *mask)
++/* Allocate rings for storing incoming frame descriptors */
++static int alloc_rings(struct dpaa2_eth_priv *priv)
+{
-+ int offset;
-+
-+ if (uip_mask->tos)
-+ return -EOPNOTSUPP;
++ struct net_device *net_dev = priv->net_dev;
++ struct device *dev = net_dev->dev.parent;
++ int i;
+
-+ if (uip_mask->ip4src) {
-+ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC);
-+ *(u32 *)(key + offset) = uip_value->ip4src;
-+ *(u32 *)(mask + offset) = uip_mask->ip4src;
++ for (i = 0; i < priv->num_channels; i++) {
++ priv->channel[i]->store =
++ dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
++ if (!priv->channel[i]->store) {
++ netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
++ goto err_ring;
++ }
+ }
+
-+ if (uip_mask->ip4dst) {
-+ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST);
-+ *(u32 *)(key + offset) = uip_value->ip4dst;
-+ *(u32 *)(mask + offset) = uip_mask->ip4dst;
-+ }
++ return 0;
+
-+ if (uip_mask->proto) {
-+ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO);
-+ *(u32 *)(key + offset) = uip_value->proto;
-+ *(u32 *)(mask + offset) = uip_mask->proto;
++err_ring:
++ for (i = 0; i < priv->num_channels; i++) {
++ if (!priv->channel[i]->store)
++ break;
++ dpaa2_io_store_destroy(priv->channel[i]->store);
+ }
-+ if (uip_mask->l4_4_bytes) {
-+ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
-+ *(u16 *)(key + offset) = uip_value->l4_4_bytes << 16;
-+ *(u16 *)(mask + offset) = uip_mask->l4_4_bytes << 16;
+
-+ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
-+ *(u16 *)(key + offset) = uip_value->l4_4_bytes & 0xFFFF;
-+ *(u16 *)(mask + offset) = uip_mask->l4_4_bytes & 0xFFFF;
-+ }
++ return -ENOMEM;
++}
+
-+ /* Ethertype must be IP */
-+ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE);
-+ *(u16 *)(key + offset) = htons(ETH_P_IP);
-+ *(u16 *)(mask + offset) = 0xFFFF;
++static void free_rings(struct dpaa2_eth_priv *priv)
++{
++ int i;
+
-+ return 0;
++ for (i = 0; i < priv->num_channels; i++)
++ dpaa2_io_store_destroy(priv->channel[i]->store);
+}
+
-+static int prep_ext_rule(struct dpaa2_eth_priv *priv,
-+ struct ethtool_flow_ext *ext_value,
-+ struct ethtool_flow_ext *ext_mask,
-+ void *key, void *mask)
++static int set_mac_addr(struct dpaa2_eth_priv *priv)
+{
-+ int offset;
++ struct net_device *net_dev = priv->net_dev;
++ struct device *dev = net_dev->dev.parent;
++ u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
++ int err;
+
-+ if (ext_mask->vlan_etype)
-+ return -EOPNOTSUPP;
++ /* Get firmware address, if any */
++ err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
++ if (err) {
++ dev_err(dev, "dpni_get_port_mac_addr() failed\n");
++ return err;
++ }
+
-+ if (ext_mask->vlan_tci) {
-+ offset = cls_key_off(priv, NET_PROT_VLAN, NH_FLD_VLAN_TCI);
-+ *(u16 *)(key + offset) = ext_value->vlan_tci;
-+ *(u16 *)(mask + offset) = ext_mask->vlan_tci;
++ /* Get DPNI attributes address, if any */
++ err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
++ dpni_mac_addr);
++ if (err) {
++ dev_err(dev, "dpni_get_primary_mac_addr() failed\n");
++ return err;
+ }
+
-+ return 0;
-+}
++ /* First check if firmware has any address configured by bootloader */
++ if (!is_zero_ether_addr(mac_addr)) {
++ /* If the DPMAC addr != DPNI addr, update it */
++ if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
++ err = dpni_set_primary_mac_addr(priv->mc_io, 0,
++ priv->mc_token,
++ mac_addr);
++ if (err) {
++ dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
++ return err;
++ }
++ }
++ memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
++ } else if (is_zero_ether_addr(dpni_mac_addr)) {
++ /* No MAC address configured, fill in net_dev->dev_addr
++ * with a random one
++ */
++ eth_hw_addr_random(net_dev);
++ dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
+
-+static int prep_mac_ext_rule(struct dpaa2_eth_priv *priv,
-+ struct ethtool_flow_ext *ext_value,
-+ struct ethtool_flow_ext *ext_mask,
-+ void *key, void *mask)
-+{
-+ int offset;
++ err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
++ net_dev->dev_addr);
++ if (err) {
++ dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
++ return err;
++ }
+
-+ if (!is_zero_ether_addr(ext_mask->h_dest)) {
-+ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA);
-+ ether_addr_copy(key + offset, ext_value->h_dest);
-+ ether_addr_copy(mask + offset, ext_mask->h_dest);
++ /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
++ * practical purposes, this will be our "permanent" mac address,
++ * at least until the next reboot. This move will also permit
++ * register_netdevice() to properly fill up net_dev->perm_addr.
++ */
++ net_dev->addr_assign_type = NET_ADDR_PERM;
++ } else {
++ /* NET_ADDR_PERM is default, all we have to do is
++ * fill in the device addr.
++ */
++ memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
+ }
+
+ return 0;
+}
+
-+static int prep_cls_rule(struct net_device *net_dev,
-+ struct ethtool_rx_flow_spec *fs,
-+ void *key)
++static int netdev_init(struct net_device *net_dev)
+{
++ struct device *dev = net_dev->dev.parent;
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ const u8 key_size = cls_key_size(priv);
-+ void *msk = key + key_size;
++ u8 bcast_addr[ETH_ALEN];
++ u8 num_queues;
+ int err;
+
-+ memset(key, 0, key_size * 2);
++ net_dev->netdev_ops = &dpaa2_eth_ops;
+
-+ switch (fs->flow_type & 0xff) {
-+ case TCP_V4_FLOW:
-+ err = prep_l4_rule(priv, &fs->h_u.tcp_ip4_spec,
-+ &fs->m_u.tcp_ip4_spec, key, msk,
-+ IPPROTO_TCP);
-+ break;
-+ case UDP_V4_FLOW:
-+ err = prep_l4_rule(priv, &fs->h_u.udp_ip4_spec,
-+ &fs->m_u.udp_ip4_spec, key, msk,
-+ IPPROTO_UDP);
-+ break;
-+ case SCTP_V4_FLOW:
-+ err = prep_l4_rule(priv, &fs->h_u.sctp_ip4_spec,
-+ &fs->m_u.sctp_ip4_spec, key, msk,
-+ IPPROTO_SCTP);
-+ break;
-+ case ETHER_FLOW:
-+ err = prep_eth_rule(priv, &fs->h_u.ether_spec,
-+ &fs->m_u.ether_spec, key, msk);
-+ break;
-+ case IP_USER_FLOW:
-+ err = prep_user_ip_rule(priv, &fs->h_u.usr_ip4_spec,
-+ &fs->m_u.usr_ip4_spec, key, msk);
-+ break;
-+ default:
-+ /* TODO: AH, ESP */
-+ return -EOPNOTSUPP;
-+ }
++ err = set_mac_addr(priv);
+ if (err)
+ return err;
+
-+ if (fs->flow_type & FLOW_EXT) {
-+ err = prep_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk);
-+ if (err)
-+ return err;
++ /* Explicitly add the broadcast address to the MAC filtering table */
++ eth_broadcast_addr(bcast_addr);
++ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
++ if (err) {
++ dev_err(dev, "dpni_add_mac_addr() failed\n");
++ return err;
+ }
+
-+ if (fs->flow_type & FLOW_MAC_EXT) {
-+ err = prep_mac_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk);
-+ if (err)
-+ return err;
++ /* Set MTU upper limit; lower limit is default (68B) */
++ net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
++ err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
++ (u16)DPAA2_ETH_MFL);
++ if (err) {
++ dev_err(dev, "dpni_set_max_frame_length() failed\n");
++ return err;
++ }
++
++ /* Set actual number of queues in the net device */
++ num_queues = dpaa2_eth_queue_count(priv);
++ err = netif_set_real_num_tx_queues(net_dev, num_queues);
++ if (err) {
++ dev_err(dev, "netif_set_real_num_tx_queues() failed\n");
++ return err;
++ }
++ err = netif_set_real_num_rx_queues(net_dev, num_queues);
++ if (err) {
++ dev_err(dev, "netif_set_real_num_rx_queues() failed\n");
++ return err;
++ }
++
++ /* Our .ndo_init will be called herein */
++ err = register_netdev(net_dev);
++ if (err < 0) {
++ dev_err(dev, "register_netdev() failed\n");
++ return err;
+ }
+
+ return 0;
+}
+
-+static int del_cls(struct net_device *net_dev, int location);
-+
-+static int do_cls(struct net_device *net_dev,
-+ struct ethtool_rx_flow_spec *fs,
-+ bool add)
++static int poll_link_state(void *arg)
+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ struct device *dev = net_dev->dev.parent;
-+ const int rule_cnt = dpaa2_eth_fs_count(priv);
-+ struct dpni_rule_cfg rule_cfg;
-+ struct dpni_fs_action_cfg fs_act = { 0 };
-+ void *dma_mem;
-+ int err = 0, tc;
++ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
++ int err;
+
-+ if (!dpaa2_eth_fs_enabled(priv)) {
-+ netdev_err(net_dev, "dev does not support steering!\n");
-+ /* dev doesn't support steering */
-+ return -EOPNOTSUPP;
-+ }
++ while (!kthread_should_stop()) {
++ err = link_state_update(priv);
++ if (unlikely(err))
++ return err;
+
-+ if ((fs->ring_cookie != RX_CLS_FLOW_DISC &&
-+ fs->ring_cookie >= dpaa2_eth_queue_count(priv)) ||
-+ fs->location >= rule_cnt)
-+ return -EINVAL;
++ msleep(DPAA2_ETH_LINK_STATE_REFRESH);
++ }
+
-+ /* When adding a new rule, check if location if available,
-+ * and if not free the existing table entry before inserting
-+ * the new one
-+ */
-+ if (add && (priv->cls_rule[fs->location].in_use == true))
-+ del_cls(net_dev, fs->location);
++ return 0;
++}
+
-+ memset(&rule_cfg, 0, sizeof(rule_cfg));
-+ rule_cfg.key_size = cls_key_size(priv);
++static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
++{
++ u32 status = ~0;
++ struct device *dev = (struct device *)arg;
++ struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
++ struct net_device *net_dev = dev_get_drvdata(dev);
++ int err;
+
-+ /* allocate twice the key size, for the actual key and for mask */
-+ dma_mem = kzalloc(rule_cfg.key_size * 2, GFP_DMA | GFP_KERNEL);
-+ if (!dma_mem)
-+ return -ENOMEM;
++ err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
++ DPNI_IRQ_INDEX, &status);
++ if (unlikely(err)) {
++ netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
++ return IRQ_HANDLED;
++ }
+
-+ err = prep_cls_rule(net_dev, fs, dma_mem);
-+ if (err)
-+ goto err_free_mem;
++ if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
++ link_state_update(netdev_priv(net_dev));
+
-+ rule_cfg.key_iova = dma_map_single(dev, dma_mem,
-+ rule_cfg.key_size * 2,
-+ DMA_TO_DEVICE);
++ return IRQ_HANDLED;
++}
+
-+ rule_cfg.mask_iova = rule_cfg.key_iova + rule_cfg.key_size;
++static int setup_irqs(struct fsl_mc_device *ls_dev)
++{
++ int err = 0;
++ struct fsl_mc_device_irq *irq;
+
-+ if (fs->ring_cookie == RX_CLS_FLOW_DISC)
-+ fs_act.options |= DPNI_FS_OPT_DISCARD;
-+ else
-+ fs_act.flow_id = fs->ring_cookie;
++ err = fsl_mc_allocate_irqs(ls_dev);
++ if (err) {
++ dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
++ return err;
++ }
+
-+ for (tc = 0; tc < dpaa2_eth_tc_count(priv); tc++) {
-+ if (add)
-+ err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token,
-+ tc, fs->location, &rule_cfg,
-+ &fs_act);
-+ else
-+ err = dpni_remove_fs_entry(priv->mc_io, 0,
-+ priv->mc_token, tc,
-+ &rule_cfg);
++ irq = ls_dev->irqs[0];
++ err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
++ NULL, dpni_irq0_handler_thread,
++ IRQF_NO_SUSPEND | IRQF_ONESHOT,
++ dev_name(&ls_dev->dev), &ls_dev->dev);
++ if (err < 0) {
++ dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
++ goto free_mc_irq;
++ }
+
-+ if (err)
-+ break;
++ err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
++ DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED);
++ if (err < 0) {
++ dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
++ goto free_irq;
+ }
+
-+ dma_unmap_single(dev, rule_cfg.key_iova,
-+ rule_cfg.key_size * 2, DMA_TO_DEVICE);
++ err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
++ DPNI_IRQ_INDEX, 1);
++ if (err < 0) {
++ dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
++ goto free_irq;
++ }
+
-+ if (err)
-+ netdev_err(net_dev, "dpaa2_add/remove_cls() error %d\n", err);
++ return 0;
+
-+err_free_mem:
-+ kfree(dma_mem);
++free_irq:
++ devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
++free_mc_irq:
++ fsl_mc_free_irqs(ls_dev);
+
+ return err;
+}
+
-+static int add_cls(struct net_device *net_dev,
-+ struct ethtool_rx_flow_spec *fs)
++static void add_ch_napi(struct dpaa2_eth_priv *priv)
+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ int err;
-+
-+ err = do_cls(net_dev, fs, true);
-+ if (err)
-+ return err;
-+
-+ priv->cls_rule[fs->location].in_use = true;
-+ priv->cls_rule[fs->location].fs = *fs;
++ int i;
++ struct dpaa2_eth_channel *ch;
+
-+ return 0;
++ for (i = 0; i < priv->num_channels; i++) {
++ ch = priv->channel[i];
++ /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
++ netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
++ NAPI_POLL_WEIGHT);
++ }
+}
+
-+static int del_cls(struct net_device *net_dev, int location)
++static void del_ch_napi(struct dpaa2_eth_priv *priv)
+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ int err;
++ int i;
++ struct dpaa2_eth_channel *ch;
+
-+ err = do_cls(net_dev, &priv->cls_rule[location].fs, false);
-+ if (err)
-+ return err;
++ for (i = 0; i < priv->num_channels; i++) {
++ ch = priv->channel[i];
++ netif_napi_del(&ch->napi);
++ }
++}
+
-+ priv->cls_rule[location].in_use = false;
++/* SysFS support */
++static ssize_t dpaa2_eth_show_tx_shaping(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
++ /* No MC API for getting the shaping config. We're stateful. */
++ struct dpni_tx_shaping_cfg *scfg = &priv->shaping_cfg;
+
-+ return 0;
++ return sprintf(buf, "%u %hu\n", scfg->rate_limit, scfg->max_burst_size);
+}
+
-+static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
-+ struct ethtool_rxnfc *rxnfc)
++static ssize_t dpaa2_eth_write_tx_shaping(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf,
++ size_t count)
+{
-+ int err = 0;
-+
-+ switch (rxnfc->cmd) {
-+ case ETHTOOL_SRXCLSRLINS:
-+ err = add_cls(net_dev, &rxnfc->fs);
-+ break;
++ int err, items;
++ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev));
++ struct dpni_tx_shaping_cfg scfg, ercfg = { 0 };
+
-+ case ETHTOOL_SRXCLSRLDEL:
-+ err = del_cls(net_dev, rxnfc->fs.location);
-+ break;
++ items = sscanf(buf, "%u %hu", &scfg.rate_limit, &scfg.max_burst_size);
++ if (items != 2) {
++ pr_err("Expected format: \"rate_limit(Mbps) max_burst_size(bytes)\"\n");
++ return -EINVAL;
++ }
++ /* Size restriction as per MC API documentation */
++ if (scfg.max_burst_size > DPAA2_ETH_MAX_BURST_SIZE) {
++ pr_err("max_burst_size must be <= %d\n",
++ DPAA2_ETH_MAX_BURST_SIZE);
++ return -EINVAL;
++ }
+
-+ default:
-+ err = -EOPNOTSUPP;
++ err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &scfg,
++ &ercfg, 0);
++ if (err) {
++ dev_err(dev, "dpni_set_tx_shaping() failed\n");
++ return -EPERM;
+ }
++ /* If successful, save the current configuration for future inquiries */
++ priv->shaping_cfg = scfg;
+
-+ return err;
++ return count;
+}
+
-+static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
-+ struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
-+{
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ const int rule_cnt = dpaa2_eth_fs_count(priv);
-+ int i, j;
++static struct device_attribute dpaa2_eth_attrs[] = {
++ __ATTR(tx_shaping,
++ 0600,
++ dpaa2_eth_show_tx_shaping,
++ dpaa2_eth_write_tx_shaping),
++};
+
-+ switch (rxnfc->cmd) {
-+ case ETHTOOL_GRXFH:
-+ /* we purposely ignore cmd->flow_type, because the hashing key
-+ * is the same (and fixed) for all protocols
-+ */
-+ rxnfc->data = priv->rx_flow_hash;
-+ break;
++static void dpaa2_eth_sysfs_init(struct device *dev)
++{
++ int i, err;
+
-+ case ETHTOOL_GRXRINGS:
-+ rxnfc->data = dpaa2_eth_queue_count(priv);
-+ break;
++ for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++) {
++ err = device_create_file(dev, &dpaa2_eth_attrs[i]);
++ if (err) {
++ dev_err(dev, "ERROR creating sysfs file\n");
++ goto undo;
++ }
++ }
++ return;
+
-+ case ETHTOOL_GRXCLSRLCNT:
-+ for (i = 0, rxnfc->rule_cnt = 0; i < rule_cnt; i++)
-+ if (priv->cls_rule[i].in_use)
-+ rxnfc->rule_cnt++;
-+ rxnfc->data = rule_cnt;
-+ break;
++undo:
++ while (i > 0)
++ device_remove_file(dev, &dpaa2_eth_attrs[--i]);
++}
+
-+ case ETHTOOL_GRXCLSRULE:
-+ if (!priv->cls_rule[rxnfc->fs.location].in_use)
-+ return -EINVAL;
++static void dpaa2_eth_sysfs_remove(struct device *dev)
++{
++ int i;
+
-+ rxnfc->fs = priv->cls_rule[rxnfc->fs.location].fs;
-+ break;
++ for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++)
++ device_remove_file(dev, &dpaa2_eth_attrs[i]);
++}
+
-+ case ETHTOOL_GRXCLSRLALL:
-+ for (i = 0, j = 0; i < rule_cnt; i++) {
-+ if (!priv->cls_rule[i].in_use)
-+ continue;
-+ if (j == rxnfc->rule_cnt)
-+ return -EMSGSIZE;
-+ rule_locs[j++] = i;
++#ifdef CONFIG_FSL_DPAA2_ETH_DCB
++static int dpaa2_eth_dcbnl_ieee_getpfc(struct net_device *net_dev,
++ struct ieee_pfc *pfc)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct dpni_congestion_notification_cfg notification_cfg;
++ struct dpni_link_state state;
++ int err, i;
++
++ priv->pfc.pfc_cap = dpaa2_eth_tc_count(priv);
++
++ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
++ if (err) {
++ netdev_err(net_dev, "ERROR %d getting link state", err);
++ return err;
++ }
++
++ if (!(state.options & DPNI_LINK_OPT_PFC_PAUSE))
++ return 0;
++
++ priv->pfc.pfc_en = 0;
++ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
++ err = dpni_get_congestion_notification(priv->mc_io, 0,
++ priv->mc_token,
++ DPNI_QUEUE_RX,
++ i, ¬ification_cfg);
++ if (err) {
++ netdev_err(net_dev, "Error %d getting congestion notif",
++ err);
++ return err;
+ }
-+ rxnfc->rule_cnt = j;
-+ rxnfc->data = rule_cnt;
-+ break;
+
-+ default:
-+ return -EOPNOTSUPP;
++ if (notification_cfg.threshold_entry)
++ priv->pfc.pfc_en |= 1 << i;
+ }
+
++ memcpy(pfc, &priv->pfc, sizeof(priv->pfc));
++
+ return 0;
+}
+
-+const struct ethtool_ops dpaa2_ethtool_ops = {
-+ .get_drvinfo = dpaa2_eth_get_drvinfo,
-+ .get_link = ethtool_op_get_link,
-+ .get_settings = dpaa2_eth_get_settings,
-+ .set_settings = dpaa2_eth_set_settings,
-+ .get_pauseparam = dpaa2_eth_get_pauseparam,
-+ .set_pauseparam = dpaa2_eth_set_pauseparam,
-+ .get_sset_count = dpaa2_eth_get_sset_count,
-+ .get_ethtool_stats = dpaa2_eth_get_ethtool_stats,
-+ .get_strings = dpaa2_eth_get_strings,
-+ .get_rxnfc = dpaa2_eth_get_rxnfc,
-+ .set_rxnfc = dpaa2_eth_set_rxnfc,
-+};
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpkg.h
-@@ -0,0 +1,176 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef __FSL_DPKG_H_
-+#define __FSL_DPKG_H_
++/* Configure ingress classification based on VLAN PCP */
++static int set_vlan_qos(struct dpaa2_eth_priv *priv)
++{
++ struct device *dev = priv->net_dev->dev.parent;
++ struct dpkg_profile_cfg kg_cfg = {0};
++ struct dpni_qos_tbl_cfg qos_cfg = {0};
++ struct dpni_rule_cfg key_params;
++ u8 *params_iova, *key, *mask = NULL;
++ /* We only need the trailing 16 bits, without the TPID */
++ u8 key_size = VLAN_HLEN / 2;
++ int err = 0, i, j = 0;
+
-+#include <linux/types.h>
-+#include "net.h"
++ if (priv->vlan_clsf_set)
++ return 0;
+
-+/* Data Path Key Generator API
-+ * Contains initialization APIs and runtime APIs for the Key Generator
-+ */
++ params_iova = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
++ if (!params_iova)
++ return -ENOMEM;
+
-+/** Key Generator properties */
++ kg_cfg.num_extracts = 1;
++ kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
++ kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN;
++ kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
++ kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI;
+
-+/**
-+ * Number of masks per key extraction
-+ */
-+#define DPKG_NUM_OF_MASKS 4
-+/**
-+ * Number of extractions per key profile
-+ */
-+#define DPKG_MAX_NUM_OF_EXTRACTS 10
++ err = dpni_prepare_key_cfg(&kg_cfg, params_iova);
++ if (err) {
++ dev_err(dev, "dpkg_prepare_key_cfg failed: %d\n", err);
++ goto out_free;
++ }
+
-+/**
-+ * enum dpkg_extract_from_hdr_type - Selecting extraction by header types
-+ * @DPKG_FROM_HDR: Extract selected bytes from header, by offset
-+ * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field
-+ * @DPKG_FULL_FIELD: Extract a full field
-+ */
-+enum dpkg_extract_from_hdr_type {
-+ DPKG_FROM_HDR = 0,
-+ DPKG_FROM_FIELD = 1,
-+ DPKG_FULL_FIELD = 2
-+};
++ /* Set QoS table */
++ qos_cfg.default_tc = 0;
++ qos_cfg.discard_on_miss = 0;
++ qos_cfg.key_cfg_iova = dma_map_single(dev, params_iova,
++ DPAA2_CLASSIFIER_DMA_SIZE,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) {
++ dev_err(dev, "%s: DMA mapping failed\n", __func__);
++ err = -ENOMEM;
++ goto out_free;
++ }
++ err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg);
++ dma_unmap_single(dev, qos_cfg.key_cfg_iova,
++ DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
+
-+/**
-+ * enum dpkg_extract_type - Enumeration for selecting extraction type
-+ * @DPKG_EXTRACT_FROM_HDR: Extract from the header
-+ * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header
-+ * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result;
-+ * e.g. can be used to extract header existence;
-+ * please refer to 'Parse Result definition' section in the parser BG
-+ */
-+enum dpkg_extract_type {
-+ DPKG_EXTRACT_FROM_HDR = 0,
-+ DPKG_EXTRACT_FROM_DATA = 1,
-+ DPKG_EXTRACT_FROM_PARSE = 3
-+};
++ if (err) {
++ dev_err(dev, "dpni_set_qos_table failed: %d\n", err);
++ goto out_free;
++ }
+
-+/**
-+ * struct dpkg_mask - A structure for defining a single extraction mask
-+ * @mask: Byte mask for the extracted content
-+ * @offset: Offset within the extracted content
-+ */
-+struct dpkg_mask {
-+ u8 mask;
-+ u8 offset;
-+};
++ key_params.key_size = key_size;
+
-+/**
-+ * struct dpkg_extract - A structure for defining a single extraction
-+ * @type: Determines how the union below is interpreted:
-+ * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr';
-+ * DPKG_EXTRACT_FROM_DATA: selects 'from_data';
-+ * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse'
-+ * @extract: Selects extraction method
-+ * @num_of_byte_masks: Defines the number of valid entries in the array below;
-+ * This is also the number of bytes to be used as masks
-+ * @masks: Masks parameters
-+ */
-+struct dpkg_extract {
-+ enum dpkg_extract_type type;
-+ /**
-+ * union extract - Selects extraction method
-+ * @from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
-+ * @from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA'
-+ * @from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE'
-+ */
-+ union {
-+ /**
-+ * struct from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
-+ * @prot: Any of the supported headers
-+ * @type: Defines the type of header extraction:
-+ * DPKG_FROM_HDR: use size & offset below;
-+ * DPKG_FROM_FIELD: use field, size and offset below;
-+ * DPKG_FULL_FIELD: use field below
-+ * @field: One of the supported fields (NH_FLD_)
-+ *
-+ * @size: Size in bytes
-+ * @offset: Byte offset
-+ * @hdr_index: Clear for cases not listed below;
-+ * Used for protocols that may have more than a single
-+ * header, 0 indicates an outer header;
-+ * Supported protocols (possible values):
-+ * NET_PROT_VLAN (0, HDR_INDEX_LAST);
-+ * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST);
-+ * NET_PROT_IP(0, HDR_INDEX_LAST);
-+ * NET_PROT_IPv4(0, HDR_INDEX_LAST);
-+ * NET_PROT_IPv6(0, HDR_INDEX_LAST);
-+ */
++ if (dpaa2_eth_fs_mask_enabled(priv)) {
++ mask = kzalloc(key_size, GFP_KERNEL);
++ if (!mask)
++ goto out_free;
+
-+ struct {
-+ enum net_prot prot;
-+ enum dpkg_extract_from_hdr_type type;
-+ u32 field;
-+ u8 size;
-+ u8 offset;
-+ u8 hdr_index;
-+ } from_hdr;
-+ /**
-+ * struct from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA'
-+ * @size: Size in bytes
-+ * @offset: Byte offset
-+ */
-+ struct {
-+ u8 size;
-+ u8 offset;
-+ } from_data;
++ *mask = cpu_to_be16(VLAN_PRIO_MASK);
+
-+ /**
-+ * struct from_parse - Used when
-+ * 'type = DPKG_EXTRACT_FROM_PARSE'
-+ * @size: Size in bytes
-+ * @offset: Byte offset
-+ */
-+ struct {
-+ u8 size;
-+ u8 offset;
-+ } from_parse;
-+ } extract;
++ key_params.mask_iova = dma_map_single(dev, mask, key_size,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, key_params.mask_iova)) {
++ dev_err(dev, "DMA mapping failed %s\n", __func__);
++ err = -ENOMEM;
++ goto out_free_mask;
++ }
++ } else {
++ key_params.mask_iova = 0;
++ }
+
-+ u8 num_of_byte_masks;
-+ struct dpkg_mask masks[DPKG_NUM_OF_MASKS];
-+};
++ key = kzalloc(key_size, GFP_KERNEL);
++ if (!key)
++ goto out_cleanup_mask;
+
-+/**
-+ * struct dpkg_profile_cfg - A structure for defining a full Key Generation
-+ * profile (rule)
-+ * @num_extracts: Defines the number of valid entries in the array below
-+ * @extracts: Array of required extractions
-+ */
-+struct dpkg_profile_cfg {
-+ u8 num_extracts;
-+ struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
-+};
++ key_params.key_iova = dma_map_single(dev, key, key_size,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, key_params.key_iova)) {
++ dev_err(dev, "%s: DMA mapping failed\n", __func__);
++ err = -ENOMEM;
++ goto out_free_key;
++ }
+
-+#endif /* __FSL_DPKG_H_ */
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
-@@ -0,0 +1,658 @@
-+/* Copyright 2013-2016 Freescale Semiconductor Inc.
-+ * Copyright 2016 NXP
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
-+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
-+ */
-+#ifndef _FSL_DPNI_CMD_H
-+#define _FSL_DPNI_CMD_H
++ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
++ *key = cpu_to_be16(i << VLAN_PRIO_SHIFT);
+
-+/* DPNI Version */
-+#define DPNI_VER_MAJOR 7
-+#define DPNI_VER_MINOR 0
-+#define DPNI_CMD_BASE_VERSION 1
-+#define DPNI_CMD_2ND_VERSION 2
-+#define DPNI_CMD_ID_OFFSET 4
++ dma_sync_single_for_device(dev, key_params.key_iova,
++ key_size, DMA_TO_DEVICE);
+
-+#define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION)
-+#define DPNI_CMD_V2(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_2ND_VERSION)
++ err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token,
++ &key_params, i, j++);
++ if (err) {
++ dev_err(dev, "dpni_add_qos_entry failed: %d\n", err);
++ goto out_remove;
++ }
++ }
+
-+#define DPNI_CMDID_OPEN DPNI_CMD(0x801)
-+#define DPNI_CMDID_CLOSE DPNI_CMD(0x800)
-+#define DPNI_CMDID_CREATE DPNI_CMD(0x901)
-+#define DPNI_CMDID_DESTROY DPNI_CMD(0x900)
-+#define DPNI_CMDID_GET_API_VERSION DPNI_CMD(0xa01)
++ priv->vlan_clsf_set = true;
++ dev_dbg(dev, "Vlan PCP QoS classification set\n");
++ goto out_cleanup;
+
-+#define DPNI_CMDID_ENABLE DPNI_CMD(0x002)
-+#define DPNI_CMDID_DISABLE DPNI_CMD(0x003)
-+#define DPNI_CMDID_GET_ATTR DPNI_CMD(0x004)
-+#define DPNI_CMDID_RESET DPNI_CMD(0x005)
-+#define DPNI_CMDID_IS_ENABLED DPNI_CMD(0x006)
++out_remove:
++ for (j = 0; j < i; j++) {
++ *key = cpu_to_be16(j << VLAN_PRIO_SHIFT);
+
-+#define DPNI_CMDID_SET_IRQ DPNI_CMD(0x010)
-+#define DPNI_CMDID_GET_IRQ DPNI_CMD(0x011)
-+#define DPNI_CMDID_SET_IRQ_ENABLE DPNI_CMD(0x012)
-+#define DPNI_CMDID_GET_IRQ_ENABLE DPNI_CMD(0x013)
-+#define DPNI_CMDID_SET_IRQ_MASK DPNI_CMD(0x014)
-+#define DPNI_CMDID_GET_IRQ_MASK DPNI_CMD(0x015)
-+#define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016)
-+#define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017)
++ dma_sync_single_for_device(dev, key_params.key_iova, key_size,
++ DMA_TO_DEVICE);
+
-+#define DPNI_CMDID_SET_POOLS DPNI_CMD_V2(0x200)
-+#define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B)
++ err = dpni_remove_qos_entry(priv->mc_io, 0, priv->mc_token,
++ &key_params);
++ if (err)
++ dev_err(dev, "dpni_remove_qos_entry failed: %d\n", err);
++ }
+
-+#define DPNI_CMDID_GET_QDID DPNI_CMD(0x210)
-+#define DPNI_CMDID_GET_TX_DATA_OFFSET DPNI_CMD(0x212)
-+#define DPNI_CMDID_GET_LINK_STATE DPNI_CMD(0x215)
-+#define DPNI_CMDID_SET_MAX_FRAME_LENGTH DPNI_CMD(0x216)
-+#define DPNI_CMDID_GET_MAX_FRAME_LENGTH DPNI_CMD(0x217)
-+#define DPNI_CMDID_SET_LINK_CFG DPNI_CMD(0x21A)
-+#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD(0x21B)
++out_cleanup:
++ dma_unmap_single(dev, key_params.key_iova, key_size, DMA_TO_DEVICE);
++out_free_key:
++ kfree(key);
++out_cleanup_mask:
++ if (key_params.mask_iova)
++ dma_unmap_single(dev, key_params.mask_iova, key_size,
++ DMA_TO_DEVICE);
++out_free_mask:
++ kfree(mask);
++out_free:
++ kfree(params_iova);
++ return err;
++}
+
-+#define DPNI_CMDID_SET_MCAST_PROMISC DPNI_CMD(0x220)
-+#define DPNI_CMDID_GET_MCAST_PROMISC DPNI_CMD(0x221)
-+#define DPNI_CMDID_SET_UNICAST_PROMISC DPNI_CMD(0x222)
-+#define DPNI_CMDID_GET_UNICAST_PROMISC DPNI_CMD(0x223)
-+#define DPNI_CMDID_SET_PRIM_MAC DPNI_CMD(0x224)
-+#define DPNI_CMDID_GET_PRIM_MAC DPNI_CMD(0x225)
-+#define DPNI_CMDID_ADD_MAC_ADDR DPNI_CMD(0x226)
-+#define DPNI_CMDID_REMOVE_MAC_ADDR DPNI_CMD(0x227)
-+#define DPNI_CMDID_CLR_MAC_FILTERS DPNI_CMD(0x228)
++static int dpaa2_eth_dcbnl_ieee_setpfc(struct net_device *net_dev,
++ struct ieee_pfc *pfc)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct dpni_congestion_notification_cfg notification_cfg = {0};
++ struct dpni_link_state state = {0};
++ struct dpni_link_cfg cfg = {0};
++ struct ieee_pfc old_pfc;
++ int err = 0, i;
+
-+#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD(0x235)
++ if (dpaa2_eth_tc_count(priv) == 1) {
++ netdev_dbg(net_dev, "DPNI has 1 TC, PFC configuration N/A\n");
++ return 0;
++ }
+
-+#define DPNI_CMDID_SET_QOS_TBL DPNI_CMD(0x240)
-+#define DPNI_CMDID_ADD_QOS_ENT DPNI_CMD(0x241)
-+#define DPNI_CMDID_ADD_FS_ENT DPNI_CMD(0x244)
-+#define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245)
-+#define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246)
++ /* Zero out pfc_enabled prios greater than tc_count */
++ pfc->pfc_en &= (1 << dpaa2_eth_tc_count(priv)) - 1;
+
-+#define DPNI_CMDID_GET_STATISTICS DPNI_CMD(0x25D)
-+#define DPNI_CMDID_RESET_STATISTICS DPNI_CMD(0x25E)
-+#define DPNI_CMDID_GET_QUEUE DPNI_CMD(0x25F)
-+#define DPNI_CMDID_SET_QUEUE DPNI_CMD(0x260)
-+#define DPNI_CMDID_GET_TAILDROP DPNI_CMD(0x261)
-+#define DPNI_CMDID_SET_TAILDROP DPNI_CMD(0x262)
++ if (priv->pfc.pfc_en == pfc->pfc_en)
++ /* Same enabled mask, nothing to be done */
++ return 0;
+
-+#define DPNI_CMDID_GET_PORT_MAC_ADDR DPNI_CMD(0x263)
++ err = set_vlan_qos(priv);
++ if (err)
++ return err;
+
-+#define DPNI_CMDID_GET_BUFFER_LAYOUT DPNI_CMD(0x264)
-+#define DPNI_CMDID_SET_BUFFER_LAYOUT DPNI_CMD(0x265)
++ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
++ if (err) {
++ netdev_err(net_dev, "ERROR %d getting link state", err);
++ return err;
++ }
+
-+#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE DPNI_CMD(0x266)
-+#define DPNI_CMDID_SET_CONGESTION_NOTIFICATION DPNI_CMD(0x267)
-+#define DPNI_CMDID_GET_CONGESTION_NOTIFICATION DPNI_CMD(0x268)
-+#define DPNI_CMDID_SET_EARLY_DROP DPNI_CMD(0x269)
-+#define DPNI_CMDID_GET_EARLY_DROP DPNI_CMD(0x26A)
-+#define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B)
-+#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C)
++ cfg.rate = state.rate;
++ cfg.options = state.options;
++ if (pfc->pfc_en)
++ cfg.options |= DPNI_LINK_OPT_PFC_PAUSE;
++ else
++ cfg.options &= ~DPNI_LINK_OPT_PFC_PAUSE;
+
-+/* Macros for accessing command fields smaller than 1byte */
-+#define DPNI_MASK(field) \
-+ GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \
-+ DPNI_##field##_SHIFT)
++ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
++ if (err) {
++ netdev_err(net_dev, "ERROR %d setting link cfg", err);
++ return err;
++ }
+
-+#define dpni_set_field(var, field, val) \
-+ ((var) |= (((val) << DPNI_##field##_SHIFT) & DPNI_MASK(field)))
-+#define dpni_get_field(var, field) \
-+ (((var) & DPNI_MASK(field)) >> DPNI_##field##_SHIFT)
++ memcpy(&old_pfc, &priv->pfc, sizeof(priv->pfc));
++ memcpy(&priv->pfc, pfc, sizeof(priv->pfc));
+
-+struct dpni_cmd_open {
-+ __le32 dpni_id;
-+};
++ err = set_rx_taildrop(priv);
++ if (err)
++ goto out_restore_config;
+
-+#define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order))
-+struct dpni_cmd_set_pools {
-+ u8 num_dpbp;
-+ u8 backup_pool_mask;
-+ __le16 pad;
-+ struct {
-+ __le16 dpbp_id;
-+ u8 priority_mask;
-+ u8 pad;
-+ } pool[DPNI_MAX_DPBP];
-+ __le16 buffer_size[DPNI_MAX_DPBP];
-+};
++ /* configure congestion notifications */
++ notification_cfg.notification_mode = DPNI_CONG_OPT_FLOW_CONTROL;
++ notification_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
++ notification_cfg.message_iova = 0ULL;
++ notification_cfg.message_ctx = 0ULL;
+
-+/* The enable indication is always the least significant bit */
-+#define DPNI_ENABLE_SHIFT 0
-+#define DPNI_ENABLE_SIZE 1
++ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
++ if (dpaa2_eth_is_pfc_enabled(priv, i)) {
++ notification_cfg.threshold_entry = NAPI_POLL_WEIGHT;
++ notification_cfg.threshold_exit = NAPI_POLL_WEIGHT / 2;
++ } else {
++ notification_cfg.threshold_entry = 0;
++ notification_cfg.threshold_exit = 0;
++ }
+
-+struct dpni_rsp_is_enabled {
-+ u8 enabled;
-+};
++ err = dpni_set_congestion_notification(priv->mc_io, 0,
++ priv->mc_token,
++ DPNI_QUEUE_RX,
++ i, ¬ification_cfg);
++ if (err) {
++ netdev_err(net_dev, "Error %d setting congestion notif",
++ err);
++ goto out_restore_config;
++ }
+
-+struct dpni_rsp_get_irq {
-+ /* response word 0 */
-+ __le32 irq_val;
-+ __le32 pad;
-+ /* response word 1 */
-+ __le64 irq_addr;
-+ /* response word 2 */
-+ __le32 irq_num;
-+ __le32 type;
-+};
++ netdev_dbg(net_dev, "%s congestion notifications for tc %d\n",
++ (notification_cfg.threshold_entry ?
++ "Enabled" : "Disabled"), i);
++ }
+
-+struct dpni_cmd_set_irq_enable {
-+ u8 enable;
-+ u8 pad[3];
-+ u8 irq_index;
-+};
++ return 0;
+
-+struct dpni_cmd_get_irq_enable {
-+ __le32 pad;
-+ u8 irq_index;
-+};
++out_restore_config:
++ memcpy(&priv->pfc, &old_pfc, sizeof(priv->pfc));
++ return err;
++}
+
-+struct dpni_rsp_get_irq_enable {
-+ u8 enabled;
-+};
++static u8 dpaa2_eth_dcbnl_getdcbx(struct net_device *net_dev)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
-+struct dpni_cmd_set_irq_mask {
-+ __le32 mask;
-+ u8 irq_index;
-+};
++ return priv->dcbx_mode;
++}
+
-+struct dpni_cmd_get_irq_mask {
-+ __le32 pad;
-+ u8 irq_index;
-+};
++static u8 dpaa2_eth_dcbnl_setdcbx(struct net_device *net_dev, u8 mode)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
-+struct dpni_rsp_get_irq_mask {
-+ __le32 mask;
-+};
++ priv->dcbx_mode = mode;
++ return 0;
++}
+
-+struct dpni_cmd_get_irq_status {
-+ __le32 status;
-+ u8 irq_index;
-+};
++static u8 dpaa2_eth_dcbnl_getcap(struct net_device *net_dev, int capid, u8 *cap)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
-+struct dpni_rsp_get_irq_status {
-+ __le32 status;
-+};
++ switch (capid) {
++ case DCB_CAP_ATTR_PFC:
++ *cap = true;
++ break;
++ case DCB_CAP_ATTR_PFC_TCS:
++ /* bitmap where each bit represents a number of traffic
++ * classes the device can be configured to use for Priority
++ * Flow Control
++ */
++ *cap = 1 << (dpaa2_eth_tc_count(priv) - 1);
++ break;
++ case DCB_CAP_ATTR_DCBX:
++ *cap = priv->dcbx_mode;
++ break;
++ default:
++ *cap = false;
++ break;
++ }
+
-+struct dpni_cmd_clear_irq_status {
-+ __le32 status;
-+ u8 irq_index;
-+};
++ return 0;
++}
+
-+struct dpni_rsp_get_attr {
-+ /* response word 0 */
-+ __le32 options;
-+ u8 num_queues;
-+ u8 num_tcs;
-+ u8 mac_filter_entries;
-+ u8 pad0;
-+ /* response word 1 */
-+ u8 vlan_filter_entries;
-+ u8 pad1;
-+ u8 qos_entries;
-+ u8 pad2;
-+ __le16 fs_entries;
-+ __le16 pad3;
-+ /* response word 2 */
-+ u8 qos_key_size;
-+ u8 fs_key_size;
-+ __le16 wriop_version;
++const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops = {
++ .ieee_getpfc = dpaa2_eth_dcbnl_ieee_getpfc,
++ .ieee_setpfc = dpaa2_eth_dcbnl_ieee_setpfc,
++ .getdcbx = dpaa2_eth_dcbnl_getdcbx,
++ .setdcbx = dpaa2_eth_dcbnl_setdcbx,
++ .getcap = dpaa2_eth_dcbnl_getcap,
+};
++#endif
+
-+#define DPNI_ERROR_ACTION_SHIFT 0
-+#define DPNI_ERROR_ACTION_SIZE 4
-+#define DPNI_FRAME_ANN_SHIFT 4
-+#define DPNI_FRAME_ANN_SIZE 1
++static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
++{
++ struct device *dev;
++ struct net_device *net_dev = NULL;
++ struct dpaa2_eth_priv *priv = NULL;
++ int err = 0;
+
-+struct dpni_cmd_set_errors_behavior {
-+ __le32 errors;
-+ /* from least significant bit: error_action:4, set_frame_annotation:1 */
-+ u8 flags;
-+};
++ dev = &dpni_dev->dev;
+
-+/* There are 3 separate commands for configuring Rx, Tx and Tx confirmation
-+ * buffer layouts, but they all share the same parameters.
-+ * If one of the functions changes, below structure needs to be split.
-+ */
++ /* Net device */
++ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES);
++ if (!net_dev) {
++ dev_err(dev, "alloc_etherdev_mq() failed\n");
++ return -ENOMEM;
++ }
+
-+#define DPNI_PASS_TS_SHIFT 0
-+#define DPNI_PASS_TS_SIZE 1
-+#define DPNI_PASS_PR_SHIFT 1
-+#define DPNI_PASS_PR_SIZE 1
-+#define DPNI_PASS_FS_SHIFT 2
-+#define DPNI_PASS_FS_SIZE 1
++ SET_NETDEV_DEV(net_dev, dev);
++ dev_set_drvdata(dev, net_dev);
+
-+struct dpni_cmd_get_buffer_layout {
-+ u8 qtype;
-+};
++ priv = netdev_priv(net_dev);
++ priv->net_dev = net_dev;
+
-+struct dpni_rsp_get_buffer_layout {
-+ /* response word 0 */
-+ u8 pad0[6];
-+ /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */
-+ u8 flags;
-+ u8 pad1;
-+ /* response word 1 */
-+ __le16 private_data_size;
-+ __le16 data_align;
-+ __le16 head_room;
-+ __le16 tail_room;
-+};
++ priv->iommu_domain = iommu_get_domain_for_dev(dev);
+
-+struct dpni_cmd_set_buffer_layout {
-+ /* cmd word 0 */
-+ u8 qtype;
-+ u8 pad0[3];
-+ __le16 options;
-+ /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */
-+ u8 flags;
-+ u8 pad1;
-+ /* cmd word 1 */
-+ __le16 private_data_size;
-+ __le16 data_align;
-+ __le16 head_room;
-+ __le16 tail_room;
-+};
++ /* Obtain a MC portal */
++ err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
++ &priv->mc_io);
++ if (err) {
++ if (err == -ENXIO)
++ err = -EPROBE_DEFER;
++ else
++ dev_err(dev, "MC portal allocation failed\n");
++ goto err_portal_alloc;
++ }
+
-+struct dpni_cmd_set_offload {
-+ u8 pad[3];
-+ u8 dpni_offload;
-+ __le32 config;
-+};
++ /* MC objects initialization and configuration */
++ err = setup_dpni(dpni_dev);
++ if (err)
++ goto err_dpni_setup;
+
-+struct dpni_cmd_get_offload {
-+ u8 pad[3];
-+ u8 dpni_offload;
-+};
++ err = setup_dpio(priv);
++ if (err)
++ goto err_dpio_setup;
+
-+struct dpni_rsp_get_offload {
-+ __le32 pad;
-+ __le32 config;
-+};
++ setup_fqs(priv);
+
-+struct dpni_cmd_get_qdid {
-+ u8 qtype;
-+};
++ err = setup_dpbp(priv);
++ if (err)
++ goto err_dpbp_setup;
+
-+struct dpni_rsp_get_qdid {
-+ __le16 qdid;
-+};
++ err = bind_dpni(priv);
++ if (err)
++ goto err_bind;
+
-+struct dpni_rsp_get_tx_data_offset {
-+ __le16 data_offset;
-+};
++ /* Percpu statistics */
++ priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
++ if (!priv->percpu_stats) {
++ dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
++ err = -ENOMEM;
++ goto err_alloc_percpu_stats;
++ }
++ priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
++ if (!priv->percpu_extras) {
++ dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
++ err = -ENOMEM;
++ goto err_alloc_percpu_extras;
++ }
+
-+struct dpni_cmd_get_statistics {
-+ u8 page_number;
-+};
++ err = netdev_init(net_dev);
++ if (err)
++ goto err_netdev_init;
+
-+struct dpni_rsp_get_statistics {
-+ __le64 counter[DPNI_STATISTICS_CNT];
-+};
++ /* Configure checksum offload based on current interface flags */
++ err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
++ if (err)
++ goto err_csum;
+
-+struct dpni_cmd_set_link_cfg {
-+ /* cmd word 0 */
-+ __le64 pad0;
-+ /* cmd word 1 */
-+ __le32 rate;
-+ __le32 pad1;
-+ /* cmd word 2 */
-+ __le64 options;
-+};
++ err = set_tx_csum(priv, !!(net_dev->features &
++ (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
++ if (err)
++ goto err_csum;
+
-+#define DPNI_LINK_STATE_SHIFT 0
-+#define DPNI_LINK_STATE_SIZE 1
++ err = alloc_rings(priv);
++ if (err)
++ goto err_alloc_rings;
+
-+struct dpni_rsp_get_link_state {
-+ /* response word 0 */
-+ __le32 pad0;
-+ /* from LSB: up:1 */
-+ u8 flags;
-+ u8 pad1[3];
-+ /* response word 1 */
-+ __le32 rate;
-+ __le32 pad2;
-+ /* response word 2 */
-+ __le64 options;
-+};
-+
-+struct dpni_cmd_set_tx_shaping {
-+ /* cmd word 0 */
-+ __le16 max_burst_size;
-+ __le16 pad0[3];
-+ /* cmd word 1 */
-+ __le32 rate_limit;
-+};
-+
-+struct dpni_cmd_set_max_frame_length {
-+ __le16 max_frame_length;
-+};
++ net_dev->ethtool_ops = &dpaa2_ethtool_ops;
++#ifdef CONFIG_FSL_DPAA2_ETH_DCB
++ net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops;
++ priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
++#endif
+
-+struct dpni_rsp_get_max_frame_length {
-+ __le16 max_frame_length;
-+};
++ /* Add a NAPI context for each channel */
++ add_ch_napi(priv);
++ enable_ch_napi(priv);
+
-+struct dpni_cmd_set_multicast_promisc {
-+ u8 enable;
-+};
++ err = setup_irqs(dpni_dev);
++ if (err) {
++ netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
++ priv->poll_thread = kthread_run(poll_link_state, priv,
++ "%s_poll_link", net_dev->name);
++ if (IS_ERR(priv->poll_thread)) {
++ netdev_err(net_dev, "Error starting polling thread\n");
++ goto err_poll_thread;
++ }
++ priv->do_link_poll = true;
++ }
+
-+struct dpni_rsp_get_multicast_promisc {
-+ u8 enabled;
-+};
++ dpaa2_eth_sysfs_init(&net_dev->dev);
++#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
++ dpaa2_dbg_add(priv);
++#endif
+
-+struct dpni_cmd_set_unicast_promisc {
-+ u8 enable;
-+};
++ dev_info(dev, "Probed interface %s\n", net_dev->name);
++ return 0;
+
-+struct dpni_rsp_get_unicast_promisc {
-+ u8 enabled;
-+};
++err_poll_thread:
++ free_rings(priv);
++err_alloc_rings:
++err_csum:
++ unregister_netdev(net_dev);
++err_netdev_init:
++ free_percpu(priv->percpu_extras);
++err_alloc_percpu_extras:
++ free_percpu(priv->percpu_stats);
++err_alloc_percpu_stats:
++ disable_ch_napi(priv);
++ del_ch_napi(priv);
++err_bind:
++ free_dpbp(priv);
++err_dpbp_setup:
++ free_dpio(priv);
++err_dpio_setup:
++ free_dpni(priv);
++err_dpni_setup:
++ fsl_mc_portal_free(priv->mc_io);
++err_portal_alloc:
++ dev_set_drvdata(dev, NULL);
++ free_netdev(net_dev);
+
-+struct dpni_cmd_set_primary_mac_addr {
-+ __le16 pad;
-+ u8 mac_addr[6];
-+};
++ return err;
++}
+
-+struct dpni_rsp_get_primary_mac_addr {
-+ __le16 pad;
-+ u8 mac_addr[6];
-+};
++static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
++{
++ struct device *dev;
++ struct net_device *net_dev;
++ struct dpaa2_eth_priv *priv;
+
-+struct dpni_rsp_get_port_mac_addr {
-+ __le16 pad;
-+ u8 mac_addr[6];
-+};
++ dev = &ls_dev->dev;
++ net_dev = dev_get_drvdata(dev);
++ priv = netdev_priv(net_dev);
+
-+struct dpni_cmd_add_mac_addr {
-+ __le16 pad;
-+ u8 mac_addr[6];
-+};
++#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
++ dpaa2_dbg_remove(priv);
++#endif
++ dpaa2_eth_sysfs_remove(&net_dev->dev);
+
-+struct dpni_cmd_remove_mac_addr {
-+ __le16 pad;
-+ u8 mac_addr[6];
-+};
++ unregister_netdev(net_dev);
+
-+#define DPNI_UNICAST_FILTERS_SHIFT 0
-+#define DPNI_UNICAST_FILTERS_SIZE 1
-+#define DPNI_MULTICAST_FILTERS_SHIFT 1
-+#define DPNI_MULTICAST_FILTERS_SIZE 1
++ disable_ch_napi(priv);
++ del_ch_napi(priv);
+
-+struct dpni_cmd_clear_mac_filters {
-+ /* from LSB: unicast:1, multicast:1 */
-+ u8 flags;
-+};
++ if (priv->do_link_poll)
++ kthread_stop(priv->poll_thread);
++ else
++ fsl_mc_free_irqs(ls_dev);
+
-+#define DPNI_DIST_MODE_SHIFT 0
-+#define DPNI_DIST_MODE_SIZE 4
-+#define DPNI_MISS_ACTION_SHIFT 4
-+#define DPNI_MISS_ACTION_SIZE 4
++ free_rings(priv);
++ free_percpu(priv->percpu_stats);
++ free_percpu(priv->percpu_extras);
++ free_dpbp(priv);
++ free_dpio(priv);
++ free_dpni(priv);
+
-+struct dpni_cmd_set_rx_tc_dist {
-+ /* cmd word 0 */
-+ __le16 dist_size;
-+ u8 tc_id;
-+ /* from LSB: dist_mode:4, miss_action:4 */
-+ u8 flags;
-+ __le16 pad0;
-+ __le16 default_flow_id;
-+ /* cmd word 1..5 */
-+ __le64 pad1[5];
-+ /* cmd word 6 */
-+ __le64 key_cfg_iova;
-+};
++ fsl_mc_portal_free(priv->mc_io);
+
-+/* dpni_set_rx_tc_dist extension (structure of the DMA-able memory at
-+ * key_cfg_iova)
-+ */
-+struct dpni_mask_cfg {
-+ u8 mask;
-+ u8 offset;
-+};
++ dev_set_drvdata(dev, NULL);
++ free_netdev(net_dev);
+
-+#define DPNI_EFH_TYPE_SHIFT 0
-+#define DPNI_EFH_TYPE_SIZE 4
-+#define DPNI_EXTRACT_TYPE_SHIFT 0
-+#define DPNI_EXTRACT_TYPE_SIZE 4
++ dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
+
-+struct dpni_dist_extract {
-+ /* word 0 */
-+ u8 prot;
-+ /* EFH type stored in the 4 least significant bits */
-+ u8 efh_type;
-+ u8 size;
-+ u8 offset;
-+ __le32 field;
-+ /* word 1 */
-+ u8 hdr_index;
-+ u8 constant;
-+ u8 num_of_repeats;
-+ u8 num_of_byte_masks;
-+ /* Extraction type is stored in the 4 LSBs */
-+ u8 extract_type;
-+ u8 pad[3];
-+ /* word 2 */
-+ struct dpni_mask_cfg masks[4];
-+};
++ return 0;
++}
+
-+struct dpni_ext_set_rx_tc_dist {
-+ /* extension word 0 */
-+ u8 num_extracts;
-+ u8 pad[7];
-+ /* words 1..25 */
-+ struct dpni_dist_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
++static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
++ {
++ .vendor = FSL_MC_VENDOR_FREESCALE,
++ .obj_type = "dpni",
++ },
++ { .vendor = 0x0 }
+};
++MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
+
-+struct dpni_cmd_get_queue {
-+ u8 qtype;
-+ u8 tc;
-+ u8 index;
++static struct fsl_mc_driver dpaa2_eth_driver = {
++ .driver = {
++ .name = KBUILD_MODNAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = dpaa2_eth_probe,
++ .remove = dpaa2_eth_remove,
++ .match_id_table = dpaa2_eth_match_id_table
+};
+
-+#define DPNI_DEST_TYPE_SHIFT 0
-+#define DPNI_DEST_TYPE_SIZE 4
-+#define DPNI_STASH_CTRL_SHIFT 6
-+#define DPNI_STASH_CTRL_SIZE 1
-+#define DPNI_HOLD_ACTIVE_SHIFT 7
-+#define DPNI_HOLD_ACTIVE_SIZE 1
++static int __init dpaa2_eth_driver_init(void)
++{
++ int err;
+
-+struct dpni_rsp_get_queue {
-+ /* response word 0 */
-+ __le64 pad0;
-+ /* response word 1 */
-+ __le32 dest_id;
-+ __le16 pad1;
-+ u8 dest_prio;
-+ /* From LSB: dest_type:4, pad:2, flc_stash_ctrl:1, hold_active:1 */
-+ u8 flags;
-+ /* response word 2 */
-+ __le64 flc;
-+ /* response word 3 */
-+ __le64 user_context;
-+ /* response word 4 */
-+ __le32 fqid;
-+ __le16 qdbin;
-+};
++ dpaa2_eth_dbg_init();
++ err = fsl_mc_driver_register(&dpaa2_eth_driver);
++ if (err)
++ goto out_debugfs_err;
+
-+struct dpni_cmd_set_queue {
-+ /* cmd word 0 */
-+ u8 qtype;
-+ u8 tc;
-+ u8 index;
-+ u8 options;
-+ __le32 pad0;
-+ /* cmd word 1 */
-+ __le32 dest_id;
-+ __le16 pad1;
-+ u8 dest_prio;
-+ u8 flags;
-+ /* cmd word 2 */
-+ __le64 flc;
-+ /* cmd word 3 */
-+ __le64 user_context;
-+};
++ err = dpaa2_ceetm_register();
++ if (err)
++ goto out_ceetm_err;
+
-+#define DPNI_DISCARD_ON_MISS_SHIFT 0
-+#define DPNI_DISCARD_ON_MISS_SIZE 1
++ return 0;
+
-+struct dpni_cmd_set_qos_table {
-+ u32 pad;
-+ u8 default_tc;
-+ /* only the LSB */
-+ u8 discard_on_miss;
-+ u16 pad1[21];
-+ u64 key_cfg_iova;
-+};
-+
-+struct dpni_cmd_add_qos_entry {
-+ u16 pad;
-+ u8 tc_id;
-+ u8 key_size;
-+ u16 index;
-+ u16 pad2;
-+ u64 key_iova;
-+ u64 mask_iova;
-+};
-+
-+struct dpni_cmd_remove_qos_entry {
-+ u8 pad1[3];
-+ u8 key_size;
-+ u32 pad2;
-+ u64 key_iova;
-+ u64 mask_iova;
-+};
-+
-+struct dpni_cmd_add_fs_entry {
-+ /* cmd word 0 */
-+ u16 options;
-+ u8 tc_id;
-+ u8 key_size;
-+ u16 index;
-+ u16 flow_id;
-+ /* cmd word 1 */
-+ u64 key_iova;
-+ /* cmd word 2 */
-+ u64 mask_iova;
-+ /* cmd word 3 */
-+ u64 flc;
-+};
-+
-+struct dpni_cmd_remove_fs_entry {
-+ /* cmd word 0 */
-+ __le16 pad0;
-+ u8 tc_id;
-+ u8 key_size;
-+ __le32 pad1;
-+ /* cmd word 1 */
-+ u64 key_iova;
-+ /* cmd word 2 */
-+ u64 mask_iova;
-+};
-+
-+struct dpni_cmd_set_taildrop {
-+ /* cmd word 0 */
-+ u8 congestion_point;
-+ u8 qtype;
-+ u8 tc;
-+ u8 index;
-+ __le32 pad0;
-+ /* cmd word 1 */
-+ /* Only least significant bit is relevant */
-+ u8 enable;
-+ u8 pad1;
-+ u8 units;
-+ u8 pad2;
-+ __le32 threshold;
-+};
-+
-+struct dpni_cmd_get_taildrop {
-+ u8 congestion_point;
-+ u8 qtype;
-+ u8 tc;
-+ u8 index;
-+};
-+
-+struct dpni_rsp_get_taildrop {
-+ /* cmd word 0 */
-+ __le64 pad0;
-+ /* cmd word 1 */
-+ /* only least significant bit is relevant */
-+ u8 enable;
-+ u8 pad1;
-+ u8 units;
-+ u8 pad2;
-+ __le32 threshold;
-+};
-+
-+#define DPNI_DEST_TYPE_SHIFT 0
-+#define DPNI_DEST_TYPE_SIZE 4
-+#define DPNI_CONG_UNITS_SHIFT 4
-+#define DPNI_CONG_UNITS_SIZE 2
-+
-+struct dpni_cmd_set_congestion_notification {
-+ /* cmd word 0 */
-+ u8 qtype;
-+ u8 tc;
-+ u8 pad[6];
-+ /* cmd word 1 */
-+ u32 dest_id;
-+ u16 notification_mode;
-+ u8 dest_priority;
-+ /* from LSB: dest_type: 4 units:2 */
-+ u8 type_units;
-+ /* cmd word 2 */
-+ u64 message_iova;
-+ /* cmd word 3 */
-+ u64 message_ctx;
-+ /* cmd word 4 */
-+ u32 threshold_entry;
-+ u32 threshold_exit;
-+};
++out_ceetm_err:
++ fsl_mc_driver_unregister(&dpaa2_eth_driver);
++out_debugfs_err:
++ dpaa2_eth_dbg_exit();
++ return err;
++}
+
-+struct dpni_cmd_get_congestion_notification {
-+ /* cmd word 0 */
-+ u8 qtype;
-+ u8 tc;
-+};
++static void __exit dpaa2_eth_driver_exit(void)
++{
++ dpaa2_ceetm_unregister();
++ fsl_mc_driver_unregister(&dpaa2_eth_driver);
++ dpaa2_eth_dbg_exit();
++}
+
-+struct dpni_rsp_get_congestion_notification {
-+ /* cmd word 0 */
-+ u64 pad;
-+ /* cmd word 1 */
-+ u32 dest_id;
-+ u16 notification_mode;
-+ u8 dest_priority;
-+ /* from LSB: dest_type: 4 units:2 */
-+ u8 type_units;
-+ /* cmd word 2 */
-+ u64 message_iova;
-+ /* cmd word 3 */
-+ u64 message_ctx;
-+ /* cmd word 4 */
-+ u32 threshold_entry;
-+ u32 threshold_exit;
-+};
-+#endif /* _FSL_DPNI_CMD_H */
++module_init(dpaa2_eth_driver_init);
++module_exit(dpaa2_eth_driver_exit);
--- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
-@@ -0,0 +1,1903 @@
-+/* Copyright 2013-2016 Freescale Semiconductor Inc.
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
+@@ -0,0 +1,601 @@
++/* Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
-+#include "../../fsl-mc/include/mc-sys.h"
-+#include "../../fsl-mc/include/mc-cmd.h"
-+#include "dpni.h"
-+#include "dpni-cmd.h"
+
-+/**
-+ * dpni_prepare_key_cfg() - function prepare extract parameters
-+ * @cfg: defining a full Key Generation profile (rule)
-+ * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
-+ *
-+ * This function has to be called before the following functions:
-+ * - dpni_set_rx_tc_dist()
-+ * - dpni_set_qos_table()
-+ */
-+int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, u8 *key_cfg_buf)
-+{
-+ int i, j;
-+ struct dpni_ext_set_rx_tc_dist *dpni_ext;
-+ struct dpni_dist_extract *extr;
++#ifndef __DPAA2_ETH_H
++#define __DPAA2_ETH_H
+
-+ if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS)
-+ return -EINVAL;
++#include <linux/dcbnl.h>
++#include <linux/netdevice.h>
++#include <linux/if_vlan.h>
+
-+ dpni_ext = (struct dpni_ext_set_rx_tc_dist *)key_cfg_buf;
-+ dpni_ext->num_extracts = cfg->num_extracts;
++#include "../../fsl-mc/include/dpaa2-io.h"
++#include "../../fsl-mc/include/dpaa2-fd.h"
++#include "dpni.h"
++#include "dpni-cmd.h"
+
-+ for (i = 0; i < cfg->num_extracts; i++) {
-+ extr = &dpni_ext->extracts[i];
++#include "dpaa2-eth-trace.h"
++#include "dpaa2-eth-debugfs.h"
+
-+ switch (cfg->extracts[i].type) {
-+ case DPKG_EXTRACT_FROM_HDR:
-+ extr->prot = cfg->extracts[i].extract.from_hdr.prot;
-+ dpni_set_field(extr->efh_type, EFH_TYPE,
-+ cfg->extracts[i].extract.from_hdr.type);
-+ extr->size = cfg->extracts[i].extract.from_hdr.size;
-+ extr->offset = cfg->extracts[i].extract.from_hdr.offset;
-+ extr->field = cpu_to_le32(
-+ cfg->extracts[i].extract.from_hdr.field);
-+ extr->hdr_index =
-+ cfg->extracts[i].extract.from_hdr.hdr_index;
-+ break;
-+ case DPKG_EXTRACT_FROM_DATA:
-+ extr->size = cfg->extracts[i].extract.from_data.size;
-+ extr->offset =
-+ cfg->extracts[i].extract.from_data.offset;
-+ break;
-+ case DPKG_EXTRACT_FROM_PARSE:
-+ extr->size = cfg->extracts[i].extract.from_parse.size;
-+ extr->offset =
-+ cfg->extracts[i].extract.from_parse.offset;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
++#define DPAA2_WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0)
+
-+ extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks;
-+ dpni_set_field(extr->extract_type, EXTRACT_TYPE,
-+ cfg->extracts[i].type);
++#define DPAA2_ETH_STORE_SIZE 16
+
-+ for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
-+ extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
-+ extr->masks[j].offset =
-+ cfg->extracts[i].masks[j].offset;
-+ }
-+ }
++/* We set a max threshold for how many Tx confirmations we should process
++ * on a NAPI poll call, they take less processing time.
++ */
++#define TX_CONF_PER_NAPI_POLL 256
+
-+ return 0;
-+}
++/* Maximum number of scatter-gather entries in an ingress frame,
++ * considering the maximum receive frame size is 64K
++ */
++#define DPAA2_ETH_MAX_SG_ENTRIES ((64 * 1024) / DPAA2_ETH_RX_BUF_SIZE)
+
-+/**
-+ * dpni_open() - Open a control session for the specified object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @dpni_id: DPNI unique ID
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * This function can be used to open a control session for an
-+ * already created object; an object may have been declared in
-+ * the DPL or by calling the dpni_create() function.
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent commands for
-+ * this specific object.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
++/* Maximum acceptable MTU value. It is in direct relation with the hardware
++ * enforced Max Frame Length (currently 10k).
+ */
-+int dpni_open(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ int dpni_id,
-+ u16 *token)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_open *cmd_params;
++#define DPAA2_ETH_MFL (10 * 1024)
++#define DPAA2_ETH_MAX_MTU (DPAA2_ETH_MFL - VLAN_ETH_HLEN)
++/* Convert L3 MTU to L2 MFL */
++#define DPAA2_ETH_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN)
+
-+ int err;
++/* Maximum burst size value for Tx shaping */
++#define DPAA2_ETH_MAX_BURST_SIZE 0xF7FF
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN,
-+ cmd_flags,
-+ 0);
-+ cmd_params = (struct dpni_cmd_open *)cmd.params;
-+ cmd_params->dpni_id = cpu_to_le32(dpni_id);
++/* Maximum number of buffers that can be acquired/released through a single
++ * QBMan command
++ */
++#define DPAA2_ETH_BUFS_PER_CMD 7
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo
++ * frames in the Rx queues (length of the current frame is not
++ * taken into account when making the taildrop decision)
++ */
++#define DPAA2_ETH_TAILDROP_THRESH (64 * 1024)
+
-+ /* retrieve response parameters */
-+ *token = mc_cmd_hdr_read_token(&cmd);
++/* Buffer quota per queue. Must be large enough such that for minimum sized
++ * frames taildrop kicks in before the bpool gets depleted, so we compute
++ * how many 64B frames fit inside the taildrop threshold and add a margin
++ * to accommodate the buffer refill delay.
++ */
++#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64)
++#define DPAA2_ETH_NUM_BUFS_PER_CH (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
++#define DPAA2_ETH_REFILL_THRESH(priv) \
++ ((priv)->max_bufs_per_ch - DPAA2_ETH_BUFS_PER_CMD)
+
-+ return 0;
-+}
++/* Global buffer quota in case flow control is enabled */
++#define DPAA2_ETH_NUM_BUFS_FC 256
+
-+/**
-+ * dpni_close() - Close the control session of the object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ *
-+ * After this function is called, no further operations are
-+ * allowed on the object without opening a new control session.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
++/* Hardware requires alignment for ingress/egress buffer addresses */
++#define DPAA2_ETH_TX_BUF_ALIGN 64
++
++/* Due to a limitation in WRIOP 1.0.0, the RX buffer data must be aligned
++ * to 256B. For newer revisions, the requirement is only for 64B alignment
+ */
-+int dpni_close(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct mc_command cmd = { 0 };
++#define DPAA2_ETH_RX_BUF_ALIGN_REV1 256
++#define DPAA2_ETH_RX_BUF_ALIGN 64
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE,
-+ cmd_flags,
-+ token);
++#define DPAA2_ETH_RX_BUF_SIZE 2048
++#define DPAA2_ETH_SKB_SIZE \
++ (DPAA2_ETH_RX_BUF_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++/* PTP nominal frequency 1GHz */
++#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1
+
-+/**
-+ * dpni_set_pools() - Set buffer pools configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @cfg: Buffer pools configuration
-+ *
-+ * mandatory for DPNI operation
-+ * warning:Allowed only when DPNI is disabled
-+ *
-+ * Return: '0' on Success; Error code otherwise.
++/* Hardware annotation area in RX/TX buffers */
++#define DPAA2_ETH_RX_HWA_SIZE 64
++#define DPAA2_ETH_TX_HWA_SIZE 128
++
++/* We are accommodating a skb backpointer and some S/G info
++ * in the frame's software annotation. The hardware
++ * options are either 0 or 64, so we choose the latter.
+ */
-+int dpni_set_pools(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpni_pools_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_set_pools *cmd_params;
-+ int i;
++#define DPAA2_ETH_SWA_SIZE 64
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
-+ cmd_params->num_dpbp = cfg->num_dpbp;
-+ for (i = 0; i < DPNI_MAX_DPBP; i++) {
-+ cmd_params->pool[i].dpbp_id =
-+ cpu_to_le16(cfg->pools[i].dpbp_id);
-+ cmd_params->pool[i].priority_mask =
-+ cfg->pools[i].priority_mask;
-+ cmd_params->buffer_size[i] =
-+ cpu_to_le16(cfg->pools[i].buffer_size);
-+ cmd_params->backup_pool_mask |=
-+ DPNI_BACKUP_POOL(cfg->pools[i].backup_pool, i);
-+ }
++/* We store different information in the software annotation area of a Tx frame
++ * based on what type of frame it is
++ */
++enum dpaa2_eth_swa_type {
++ DPAA2_ETH_SWA_SINGLE,
++ DPAA2_ETH_SWA_SG,
++};
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */
++struct dpaa2_eth_swa {
++ enum dpaa2_eth_swa_type type;
++ union {
++ struct {
++ struct sk_buff *skb;
++ } single;
++ struct {
++ struct sk_buff *skb;
++ struct scatterlist *scl;
++ int num_sg;
++ int sgt_size;
++ } sg;
++ };
++};
+
-+/**
-+ * dpni_enable() - Enable the DPNI, allow sending and receiving frames.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct mc_command cmd = { 0 };
++/* Annotation valid bits in FD FRC */
++#define DPAA2_FD_FRC_FASV 0x8000
++#define DPAA2_FD_FRC_FAEADV 0x4000
++#define DPAA2_FD_FRC_FAPRV 0x2000
++#define DPAA2_FD_FRC_FAIADV 0x1000
++#define DPAA2_FD_FRC_FASWOV 0x0800
++#define DPAA2_FD_FRC_FAICFDV 0x0400
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE,
-+ cmd_flags,
-+ token);
++#define DPAA2_FD_RX_ERR_MASK (FD_CTRL_SBE | FD_CTRL_FAERR)
++#define DPAA2_FD_TX_ERR_MASK (FD_CTRL_UFD | \
++ FD_CTRL_SBE | \
++ FD_CTRL_FSE | \
++ FD_CTRL_FAERR)
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++/* Annotation bits in FD CTRL */
++#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */
+
-+/**
-+ * dpni_disable() - Disable the DPNI, stop sending and receiving frames.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
++/* Frame annotation status */
++struct dpaa2_fas {
++ u8 reserved;
++ u8 ppid;
++ __le16 ifpid;
++ __le32 status;
++};
++
++/* Frame annotation status word is located in the first 8 bytes
++ * of the buffer's hardware annoatation area
+ */
-+int dpni_disable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct mc_command cmd = { 0 };
++#define DPAA2_FAS_OFFSET 0
++#define DPAA2_FAS_SIZE (sizeof(struct dpaa2_fas))
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE,
-+ cmd_flags,
-+ token);
++/* Timestamp is located in the next 8 bytes of the buffer's
++ * hardware annotation area
++ */
++#define DPAA2_TS_OFFSET 0x8
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++/* Frame annotation egress action descriptor */
++#define DPAA2_FAEAD_OFFSET 0x58
+
-+/**
-+ * dpni_is_enabled() - Check if the DPNI is enabled.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @en: Returns '1' if object is enabled; '0' otherwise
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_is_enabled(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_rsp_is_enabled *rsp_params;
-+ int err;
++struct dpaa2_faead {
++ __le32 conf_fqid;
++ __le32 ctrl;
++};
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED,
-+ cmd_flags,
-+ token);
++#define DPAA2_FAEAD_A2V 0x20000000
++#define DPAA2_FAEAD_A4V 0x08000000
++#define DPAA2_FAEAD_UPDV 0x00001000
++#define DPAA2_FAEAD_EBDDV 0x00002000
++#define DPAA2_FAEAD_UPD 0x00000010
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++/* Accessors for the hardware annotation fields that we use */
++static inline void *dpaa2_get_hwa(void *buf_addr, bool swa)
++{
++ return buf_addr + (swa ? DPAA2_ETH_SWA_SIZE : 0);
++}
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_is_enabled *)cmd.params;
-+ *en = dpni_get_field(rsp_params->enabled, ENABLE);
++static inline struct dpaa2_fas *dpaa2_get_fas(void *buf_addr, bool swa)
++{
++ return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAS_OFFSET;
++}
+
-+ return 0;
++static inline u64 *dpaa2_get_ts(void *buf_addr, bool swa)
++{
++ return dpaa2_get_hwa(buf_addr, swa) + DPAA2_TS_OFFSET;
+}
+
-+/**
-+ * dpni_reset() - Reset the DPNI, returns the object to initial state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_reset(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
++static inline struct dpaa2_faead *dpaa2_get_faead(void *buf_addr, bool swa)
+{
-+ struct mc_command cmd = { 0 };
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
++ return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAEAD_OFFSET;
+}
+
-+/**
-+ * dpni_set_irq_enable() - Set overall interrupt state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Interrupt state: - enable = 1, disable = 0
-+ *
-+ * Allows GPP software to control when interrupts are generated.
-+ * Each interrupt can have up to 32 causes. The enable/disable control's the
-+ * overall interrupt state. if the interrupt is disabled no causes will cause
-+ * an interrupt.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u8 en)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_set_irq_enable *cmd_params;
++/* Error and status bits in the frame annotation status word */
++/* Debug frame, otherwise supposed to be discarded */
++#define DPAA2_FAS_DISC 0x80000000
++/* MACSEC frame */
++#define DPAA2_FAS_MS 0x40000000
++#define DPAA2_FAS_PTP 0x08000000
++/* Ethernet multicast frame */
++#define DPAA2_FAS_MC 0x04000000
++/* Ethernet broadcast frame */
++#define DPAA2_FAS_BC 0x02000000
++#define DPAA2_FAS_KSE 0x00040000
++#define DPAA2_FAS_EOFHE 0x00020000
++#define DPAA2_FAS_MNLE 0x00010000
++#define DPAA2_FAS_TIDE 0x00008000
++#define DPAA2_FAS_PIEE 0x00004000
++/* Frame length error */
++#define DPAA2_FAS_FLE 0x00002000
++/* Frame physical error */
++#define DPAA2_FAS_FPE 0x00001000
++#define DPAA2_FAS_PTE 0x00000080
++#define DPAA2_FAS_ISP 0x00000040
++#define DPAA2_FAS_PHE 0x00000020
++#define DPAA2_FAS_BLE 0x00000010
++/* L3 csum validation performed */
++#define DPAA2_FAS_L3CV 0x00000008
++/* L3 csum error */
++#define DPAA2_FAS_L3CE 0x00000004
++/* L4 csum validation performed */
++#define DPAA2_FAS_L4CV 0x00000002
++/* L4 csum error */
++#define DPAA2_FAS_L4CE 0x00000001
++/* Possible errors on the ingress path */
++#define DPAA2_FAS_RX_ERR_MASK (DPAA2_FAS_KSE | \
++ DPAA2_FAS_EOFHE | \
++ DPAA2_FAS_MNLE | \
++ DPAA2_FAS_TIDE | \
++ DPAA2_FAS_PIEE | \
++ DPAA2_FAS_FLE | \
++ DPAA2_FAS_FPE | \
++ DPAA2_FAS_PTE | \
++ DPAA2_FAS_ISP | \
++ DPAA2_FAS_PHE | \
++ DPAA2_FAS_BLE | \
++ DPAA2_FAS_L3CE | \
++ DPAA2_FAS_L4CE)
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_irq_enable *)cmd.params;
-+ dpni_set_field(cmd_params->enable, ENABLE, en);
-+ cmd_params->irq_index = irq_index;
++/* Time in milliseconds between link state updates */
++#define DPAA2_ETH_LINK_STATE_REFRESH 1000
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++/* Number of times to retry a frame enqueue before giving up.
++ * Value determined empirically, in order to minimize the number
++ * of frames dropped on Tx
++ */
++#define DPAA2_ETH_ENQUEUE_RETRIES 10
+
-+/**
-+ * dpni_get_irq_enable() - Get overall interrupt state
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Returned interrupt state - enable = 1, disable = 0
-+ *
-+ * Return: '0' on Success; Error code otherwise.
++/* Tx congestion entry & exit thresholds, in number of bytes.
++ * We allow a maximum of 512KB worth of frames pending processing on the Tx
++ * queues of an interface
+ */
-+int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u8 *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_get_irq_enable *cmd_params;
-+ struct dpni_rsp_get_irq_enable *rsp_params;
++#define DPAA2_ETH_TX_CONG_ENTRY_THRESH (512 * 1024)
++#define DPAA2_ETH_TX_CONG_EXIT_THRESH \
++ (DPAA2_ETH_TX_CONG_ENTRY_THRESH * 9 / 10)
+
-+ int err;
++/* Driver statistics, other than those in struct rtnl_link_stats64.
++ * These are usually collected per-CPU and aggregated by ethtool.
++ */
++struct dpaa2_eth_drv_stats {
++ __u64 tx_conf_frames;
++ __u64 tx_conf_bytes;
++ __u64 tx_sg_frames;
++ __u64 tx_sg_bytes;
++ __u64 tx_reallocs;
++ __u64 rx_sg_frames;
++ __u64 rx_sg_bytes;
++ /* Enqueues retried due to portal busy */
++ __u64 tx_portal_busy;
++};
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_get_irq_enable *)cmd.params;
-+ cmd_params->irq_index = irq_index;
++/* Per-FQ statistics */
++struct dpaa2_eth_fq_stats {
++ /* Number of frames received on this queue */
++ __u64 frames;
++ /* Number of times this queue entered congestion */
++ __u64 congestion_entry;
++};
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++/* Per-channel statistics */
++struct dpaa2_eth_ch_stats {
++ /* Volatile dequeues retried due to portal busy */
++ __u64 dequeue_portal_busy;
++ /* Number of CDANs; useful to estimate avg NAPI len */
++ __u64 cdan;
++ /* Number of frames received on queues from this channel */
++ __u64 frames;
++ /* Pull errors */
++ __u64 pull_err;
++};
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_irq_enable *)cmd.params;
-+ *en = dpni_get_field(rsp_params->enabled, ENABLE);
++#define DPAA2_ETH_MAX_TCS 8
+
-+ return 0;
-+}
++/* Maximum number of queues associated with a DPNI */
++#define DPAA2_ETH_MAX_RX_QUEUES (DPNI_MAX_DIST_SIZE * DPAA2_ETH_MAX_TCS)
++#define DPAA2_ETH_MAX_TX_QUEUES DPNI_MAX_SENDERS
++#define DPAA2_ETH_MAX_RX_ERR_QUEUES 1
++#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \
++ DPAA2_ETH_MAX_TX_QUEUES + \
++ DPAA2_ETH_MAX_RX_ERR_QUEUES)
+
-+/**
-+ * dpni_set_irq_mask() - Set interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: event mask to trigger interrupt;
-+ * each bit:
-+ * 0 = ignore event
-+ * 1 = consider event for asserting IRQ
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 mask)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_set_irq_mask *cmd_params;
++#define DPAA2_ETH_MAX_DPCONS 16
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_irq_mask *)cmd.params;
-+ cmd_params->mask = cpu_to_le32(mask);
-+ cmd_params->irq_index = irq_index;
++enum dpaa2_eth_fq_type {
++ DPAA2_RX_FQ = 0,
++ DPAA2_TX_CONF_FQ,
++ DPAA2_RX_ERR_FQ
++};
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++struct dpaa2_eth_priv;
+
-+/**
-+ * dpni_get_irq_mask() - Get interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: Returned event mask to trigger interrupt
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 *mask)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_get_irq_mask *cmd_params;
-+ struct dpni_rsp_get_irq_mask *rsp_params;
-+ int err;
++struct dpaa2_eth_fq {
++ u32 fqid;
++ u32 tx_qdbin;
++ u16 flowid;
++ u8 tc;
++ int target_cpu;
++ struct dpaa2_eth_channel *channel;
++ enum dpaa2_eth_fq_type type;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_get_irq_mask *)cmd.params;
-+ cmd_params->irq_index = irq_index;
++ void (*consume)(struct dpaa2_eth_priv *,
++ struct dpaa2_eth_channel *,
++ const struct dpaa2_fd *,
++ struct napi_struct *,
++ u16 queue_id);
++ struct dpaa2_eth_fq_stats stats;
++};
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++struct dpaa2_eth_channel {
++ struct dpaa2_io_notification_ctx nctx;
++ struct fsl_mc_device *dpcon;
++ int dpcon_id;
++ int ch_id;
++ struct napi_struct napi;
++ struct dpaa2_io *dpio;
++ struct dpaa2_io_store *store;
++ struct dpaa2_eth_priv *priv;
++ int buf_count;
++ struct dpaa2_eth_ch_stats stats;
++ struct bpf_prog *xdp_prog;
++ u64 rel_buf_array[DPAA2_ETH_BUFS_PER_CMD];
++ u8 rel_buf_cnt;
++};
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_irq_mask *)cmd.params;
-+ *mask = le32_to_cpu(rsp_params->mask);
++struct dpaa2_eth_cls_rule {
++ struct ethtool_rx_flow_spec fs;
++ bool in_use;
++};
+
-+ return 0;
-+}
++struct dpaa2_eth_dist_fields {
++ u64 rxnfc_field;
++ enum net_prot cls_prot;
++ int cls_field;
++ int offset;
++ int size;
++ u32 id;
++};
+
-+/**
-+ * dpni_get_irq_status() - Get the current status of any pending interrupts.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @irq_index: The interrupt index to configure
-+ * @status: Returned interrupts status - one bit per cause:
-+ * 0 = no interrupt pending
-+ * 1 = interrupt pending
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_irq_status(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 *status)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_get_irq_status *cmd_params;
-+ struct dpni_rsp_get_irq_status *rsp_params;
-+ int err;
++/* Driver private data */
++struct dpaa2_eth_priv {
++ struct net_device *net_dev;
++ /* Standard statistics */
++ struct rtnl_link_stats64 __percpu *percpu_stats;
++ /* Extra stats, in addition to the ones known by the kernel */
++ struct dpaa2_eth_drv_stats __percpu *percpu_extras;
++ bool ts_tx_en; /* Tx timestamping enabled */
++ bool ts_rx_en; /* Rx timestamping enabled */
++ u16 tx_data_offset;
++ u16 bpid;
++ u16 tx_qdid;
++ u16 rx_buf_align;
++ struct iommu_domain *iommu_domain;
++ int max_bufs_per_ch;
++ int refill_thresh;
++ bool has_xdp_prog;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_get_irq_status *)cmd.params;
-+ cmd_params->status = cpu_to_le32(*status);
-+ cmd_params->irq_index = irq_index;
++ void *cscn_mem; /* Tx congestion notifications are written here */
++ void *cscn_unaligned;
++ dma_addr_t cscn_dma;
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++ u8 num_fqs;
++ struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES];
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_irq_status *)cmd.params;
-+ *status = le32_to_cpu(rsp_params->status);
++ u8 num_channels;
++ struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
+
-+ return 0;
-+}
++ struct dpni_attr dpni_attrs;
++ u16 dpni_ver_major;
++ u16 dpni_ver_minor;
++ struct fsl_mc_device *dpbp_dev;
+
-+/**
-+ * dpni_clear_irq_status() - Clear a pending interrupt's status
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @irq_index: The interrupt index to configure
-+ * @status: bits to clear (W1C) - one bit per cause:
-+ * 0 = don't change
-+ * 1 = clear status bit
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 status)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_clear_irq_status *cmd_params;
++ struct fsl_mc_io *mc_io;
++ /* Cores which have an affine DPIO/DPCON.
++ * This is the cpu set on which Rx and Tx conf frames are processed
++ */
++ struct cpumask dpio_cpumask;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLEAR_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_clear_irq_status *)cmd.params;
-+ cmd_params->irq_index = irq_index;
-+ cmd_params->status = cpu_to_le32(status);
++ u16 mc_token;
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++ struct dpni_link_state link_state;
++ bool do_link_poll;
++ struct task_struct *poll_thread;
+
-+/**
-+ * dpni_get_attributes() - Retrieve DPNI attributes.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @attr: Object's attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpni_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_rsp_get_attr *rsp_params;
++ /* Rx distribution (hash and flow steering) header fields
++ * supported by the driver
++ */
++ struct dpaa2_eth_dist_fields *dist_fields;
++ u8 num_dist_fields;
++ /* enabled ethtool hashing bits */
++ u64 rx_hash_fields;
++#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
++ struct dpaa2_debugfs dbg;
++#endif
++ /* array of classification rules */
++ struct dpaa2_eth_cls_rule *cls_rule;
++ struct dpni_tx_shaping_cfg shaping_cfg;
+
-+ int err;
++ u8 dcbx_mode;
++ struct ieee_pfc pfc;
++ bool vlan_clsf_set;
++ bool tx_pause_frames;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR,
-+ cmd_flags,
-+ token);
++ bool ceetm_en;
++};
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++enum dpaa2_eth_rx_dist {
++ DPAA2_ETH_RX_DIST_HASH,
++ DPAA2_ETH_RX_DIST_FS,
++ DPAA2_ETH_RX_DIST_LEGACY
++};
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_attr *)cmd.params;
-+ attr->options = le32_to_cpu(rsp_params->options);
-+ attr->num_queues = rsp_params->num_queues;
-+ attr->num_tcs = rsp_params->num_tcs;
-+ attr->mac_filter_entries = rsp_params->mac_filter_entries;
-+ attr->vlan_filter_entries = rsp_params->vlan_filter_entries;
-+ attr->qos_entries = rsp_params->qos_entries;
-+ attr->fs_entries = le16_to_cpu(rsp_params->fs_entries);
-+ attr->qos_key_size = rsp_params->qos_key_size;
-+ attr->fs_key_size = rsp_params->fs_key_size;
-+ attr->wriop_version = le16_to_cpu(rsp_params->wriop_version);
++/* Supported Rx distribution field ids */
++#define DPAA2_ETH_DIST_ETHSRC BIT(0)
++#define DPAA2_ETH_DIST_ETHDST BIT(1)
++#define DPAA2_ETH_DIST_ETHTYPE BIT(2)
++#define DPAA2_ETH_DIST_VLAN BIT(3)
++#define DPAA2_ETH_DIST_IPSRC BIT(4)
++#define DPAA2_ETH_DIST_IPDST BIT(5)
++#define DPAA2_ETH_DIST_IPPROTO BIT(6)
++#define DPAA2_ETH_DIST_L4SRC BIT(7)
++#define DPAA2_ETH_DIST_L4DST BIT(8)
++#define DPAA2_ETH_DIST_ALL (~0U)
+
-+ return 0;
-+}
++/* Default Rx hash key */
++#define DPAA2_ETH_DIST_DEFAULT_HASH \
++ (DPAA2_ETH_DIST_IPPROTO | \
++ DPAA2_ETH_DIST_IPSRC | DPAA2_ETH_DIST_IPDST | \
++ DPAA2_ETH_DIST_L4SRC | DPAA2_ETH_DIST_L4DST)
+
-+/**
-+ * dpni_set_errors_behavior() - Set errors behavior
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @cfg: Errors configuration
-+ *
-+ * this function may be called numerous times with different
-+ * error masks
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpni_error_cfg *cfg)
++#define dpaa2_eth_hash_enabled(priv) \
++ ((priv)->dpni_attrs.num_queues > 1)
++
++#define dpaa2_eth_fs_enabled(priv) \
++ (!((priv)->dpni_attrs.options & DPNI_OPT_NO_FS))
++
++#define dpaa2_eth_fs_mask_enabled(priv) \
++ ((priv)->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)
++
++#define dpaa2_eth_fs_count(priv) \
++ ((priv)->dpni_attrs.fs_entries)
++
++/* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */
++#define DPAA2_CLASSIFIER_DMA_SIZE 256
++
++extern const struct ethtool_ops dpaa2_ethtool_ops;
++extern const char dpaa2_eth_drv_version[];
++
++static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
++ u16 ver_major, u16 ver_minor)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_set_errors_behavior *cmd_params;
++ if (priv->dpni_ver_major == ver_major)
++ return priv->dpni_ver_minor - ver_minor;
++ return priv->dpni_ver_major - ver_major;
++}
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_errors_behavior *)cmd.params;
-+ cmd_params->errors = cpu_to_le32(cfg->errors);
-+ dpni_set_field(cmd_params->flags, ERROR_ACTION, cfg->error_action);
-+ dpni_set_field(cmd_params->flags, FRAME_ANN, cfg->set_frame_annotation);
++#define DPNI_DIST_KEY_VER_MAJOR 7
++#define DPNI_DIST_KEY_VER_MINOR 5
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
++static inline bool dpaa2_eth_has_legacy_dist(struct dpaa2_eth_priv *priv)
++{
++ return (dpaa2_eth_cmp_dpni_ver(priv, DPNI_DIST_KEY_VER_MAJOR,
++ DPNI_DIST_KEY_VER_MINOR) < 0);
+}
+
-+/**
-+ * dpni_get_buffer_layout() - Retrieve buffer layout attributes.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @qtype: Type of queue to retrieve configuration for
-+ * @layout: Returns buffer layout attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
++/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but the skb built around
++ * the buffer also needs space for its shared info struct, and we need
++ * to allocate enough to accommodate hardware alignment restrictions
+ */
-+int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_queue_type qtype,
-+ struct dpni_buffer_layout *layout)
++static inline unsigned int dpaa2_eth_buf_raw_size(struct dpaa2_eth_priv *priv)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_get_buffer_layout *cmd_params;
-+ struct dpni_rsp_get_buffer_layout *rsp_params;
-+ int err;
++ return DPAA2_ETH_SKB_SIZE + priv->rx_buf_align;
++}
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_BUFFER_LAYOUT,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_get_buffer_layout *)cmd.params;
-+ cmd_params->qtype = qtype;
++/* Total headroom needed by the hardware in Tx frame buffers */
++static inline unsigned int
++dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv, struct sk_buff *skb)
++{
++ unsigned int headroom = DPAA2_ETH_SWA_SIZE;
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++ /* If we don't have an skb (e.g. XDP buffer), we only need space for
++ * the software annotation area
++ */
++ if (!skb)
++ return headroom;
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_buffer_layout *)cmd.params;
-+ layout->pass_timestamp = dpni_get_field(rsp_params->flags, PASS_TS);
-+ layout->pass_parser_result = dpni_get_field(rsp_params->flags, PASS_PR);
-+ layout->pass_frame_status = dpni_get_field(rsp_params->flags, PASS_FS);
-+ layout->private_data_size = le16_to_cpu(rsp_params->private_data_size);
-+ layout->data_align = le16_to_cpu(rsp_params->data_align);
-+ layout->data_head_room = le16_to_cpu(rsp_params->head_room);
-+ layout->data_tail_room = le16_to_cpu(rsp_params->tail_room);
++ /* For non-linear skbs we have no headroom requirement, as we build a
++ * SG frame with a newly allocated SGT buffer
++ */
++ if (skb_is_nonlinear(skb))
++ return 0;
+
-+ return 0;
++ /* If we have Tx timestamping, need 128B hardware annotation */
++ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
++ headroom += DPAA2_ETH_TX_HWA_SIZE;
++
++ return headroom;
+}
+
-+/**
-+ * dpni_set_buffer_layout() - Set buffer layout configuration.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @qtype: Type of queue this configuration applies to
-+ * @layout: Buffer layout configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ *
-+ * @warning Allowed only when DPNI is disabled
++/* Extra headroom space requested to hardware, in order to make sure there's
++ * no realloc'ing in forwarding scenarios. We need to reserve enough space
++ * such that we can accommodate the maximum required Tx offset and alignment
++ * in the ingress frame buffer
+ */
-+int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_queue_type qtype,
-+ const struct dpni_buffer_layout *layout)
++static inline unsigned int dpaa2_eth_rx_headroom(struct dpaa2_eth_priv *priv)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_set_buffer_layout *cmd_params;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_BUFFER_LAYOUT,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_buffer_layout *)cmd.params;
-+ cmd_params->qtype = qtype;
-+ cmd_params->options = cpu_to_le16(layout->options);
-+ dpni_set_field(cmd_params->flags, PASS_TS, layout->pass_timestamp);
-+ dpni_set_field(cmd_params->flags, PASS_PR, layout->pass_parser_result);
-+ dpni_set_field(cmd_params->flags, PASS_FS, layout->pass_frame_status);
-+ cmd_params->private_data_size = cpu_to_le16(layout->private_data_size);
-+ cmd_params->data_align = cpu_to_le16(layout->data_align);
-+ cmd_params->head_room = cpu_to_le16(layout->data_head_room);
-+ cmd_params->tail_room = cpu_to_le16(layout->data_tail_room);
-+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
++ return priv->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN -
++ DPAA2_ETH_RX_HWA_SIZE;
+}
+
-+/**
-+ * dpni_set_offload() - Set DPNI offload configuration.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @type: Type of DPNI offload
-+ * @config: Offload configuration.
-+ * For checksum offloads, non-zero value enables the offload
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ *
-+ * @warning Allowed only when DPNI is disabled
-+ */
-+
-+int dpni_set_offload(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_offload type,
-+ u32 config)
++static inline int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_set_offload *cmd_params;
-+
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_OFFLOAD,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_offload *)cmd.params;
-+ cmd_params->dpni_offload = type;
-+ cmd_params->config = cpu_to_le32(config);
-+
-+ return mc_send_command(mc_io, &cmd);
++ return priv->dpni_attrs.num_queues;
+}
+
-+int dpni_get_offload(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_offload type,
-+ u32 *config)
++static inline int dpaa2_eth_tc_count(struct dpaa2_eth_priv *priv)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_get_offload *cmd_params;
-+ struct dpni_rsp_get_offload *rsp_params;
-+ int err;
++ return priv->dpni_attrs.num_tcs;
++}
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_OFFLOAD,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_get_offload *)cmd.params;
-+ cmd_params->dpni_offload = type;
++static inline bool dpaa2_eth_is_pfc_enabled(struct dpaa2_eth_priv *priv,
++ int traffic_class)
++{
++ return priv->pfc.pfc_en & (1 << traffic_class);
++}
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++enum dpaa2_eth_td_cfg {
++ DPAA2_ETH_TD_NONE,
++ DPAA2_ETH_TD_QUEUE,
++ DPAA2_ETH_TD_GROUP
++};
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_offload *)cmd.params;
-+ *config = le32_to_cpu(rsp_params->config);
++static inline enum dpaa2_eth_td_cfg
++dpaa2_eth_get_td_type(struct dpaa2_eth_priv *priv)
++{
++ bool pfc_enabled = !!(priv->pfc.pfc_en);
+
-+ return 0;
++ if (pfc_enabled)
++ return DPAA2_ETH_TD_GROUP;
++ else if (priv->tx_pause_frames)
++ return DPAA2_ETH_TD_NONE;
++ else
++ return DPAA2_ETH_TD_QUEUE;
+}
+
-+/**
-+ * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used
-+ * for enqueue operations
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @qtype: Type of queue to receive QDID for
-+ * @qdid: Returned virtual QDID value that should be used as an argument
-+ * in all enqueue operations
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_qdid(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_queue_type qtype,
-+ u16 *qdid)
++static inline int dpaa2_eth_ch_count(struct dpaa2_eth_priv *priv)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_get_qdid *cmd_params;
-+ struct dpni_rsp_get_qdid *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_get_qdid *)cmd.params;
-+ cmd_params->qtype = qtype;
++ return 1;
++}
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++void check_cls_support(struct dpaa2_eth_priv *priv);
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_qdid *)cmd.params;
-+ *qdid = le16_to_cpu(rsp_params->qdid);
++int set_rx_taildrop(struct dpaa2_eth_priv *priv);
+
-+ return 0;
-+}
++int dpaa2_eth_set_dist_key(struct dpaa2_eth_priv *priv,
++ enum dpaa2_eth_rx_dist type, u32 key_fields);
+
-+/**
-+ * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer)
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @data_offset: Tx data offset (from start of buffer)
++#endif /* __DPAA2_H */
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
+@@ -0,0 +1,878 @@
++/* Copyright 2014-2016 Freescale Semiconductor Inc.
++ * Copyright 2016-2017 NXP
+ *
-+ * Return: '0' on Success; Error code otherwise.
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
-+int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 *data_offset)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_rsp_get_tx_data_offset *rsp_params;
-+ int err;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET,
-+ cmd_flags,
-+ token);
++#include "dpni.h" /* DPNI_LINK_OPT_* */
++#include "dpaa2-eth.h"
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++/* To be kept in sync with DPNI statistics */
++static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = {
++ "[hw] rx frames",
++ "[hw] rx bytes",
++ "[hw] rx mcast frames",
++ "[hw] rx mcast bytes",
++ "[hw] rx bcast frames",
++ "[hw] rx bcast bytes",
++ "[hw] tx frames",
++ "[hw] tx bytes",
++ "[hw] tx mcast frames",
++ "[hw] tx mcast bytes",
++ "[hw] tx bcast frames",
++ "[hw] tx bcast bytes",
++ "[hw] rx filtered frames",
++ "[hw] rx discarded frames",
++ "[hw] rx nobuffer discards",
++ "[hw] tx discarded frames",
++ "[hw] tx confirmed frames",
++};
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_tx_data_offset *)cmd.params;
-+ *data_offset = le16_to_cpu(rsp_params->data_offset);
++#define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats)
+
-+ return 0;
-+}
++static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
++ /* per-cpu stats */
++ "[drv] tx conf frames",
++ "[drv] tx conf bytes",
++ "[drv] tx sg frames",
++ "[drv] tx sg bytes",
++ "[drv] tx realloc frames",
++ "[drv] rx sg frames",
++ "[drv] rx sg bytes",
++ "[drv] enqueue portal busy",
++ /* Channel stats */
++ "[drv] dequeue portal busy",
++ "[drv] channel pull errors",
++ "[drv] cdan",
++ "[drv] tx congestion state",
++#ifdef CONFIG_FSL_QBMAN_DEBUG
++ /* FQ stats */
++ "rx pending frames",
++ "rx pending bytes",
++ "tx conf pending frames",
++ "tx conf pending bytes",
++ "buffer count"
++#endif
++};
+
-+/**
-+ * dpni_set_link_cfg() - set the link configuration.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @cfg: Link configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpni_link_cfg *cfg)
++#define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras)
++
++static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
++ struct ethtool_drvinfo *drvinfo)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_set_link_cfg *cmd_params;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_link_cfg *)cmd.params;
-+ cmd_params->rate = cpu_to_le32(cfg->rate);
-+ cmd_params->options = cpu_to_le64(cfg->options);
++ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
++ strlcpy(drvinfo->version, dpaa2_eth_drv_version,
++ sizeof(drvinfo->version));
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
++ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
++ "%u.%u", priv->dpni_ver_major, priv->dpni_ver_minor);
++
++ strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
++ sizeof(drvinfo->bus_info));
+}
+
-+/**
-+ * dpni_get_link_state() - Return the link state (either up or down)
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @state: Returned link state;
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_link_state(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpni_link_state *state)
++static int
++dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
++ struct ethtool_link_ksettings *link_settings)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_rsp_get_link_state *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE,
-+ cmd_flags,
-+ token);
++ struct dpni_link_state state = {0};
++ int err = 0;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
++ if (err) {
++ netdev_err(net_dev, "ERROR %d getting link state\n", err);
++ goto out;
++ }
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_link_state *)cmd.params;
-+ state->up = dpni_get_field(rsp_params->flags, LINK_STATE);
-+ state->rate = le32_to_cpu(rsp_params->rate);
-+ state->options = le64_to_cpu(rsp_params->options);
++ /* At the moment, we have no way of interrogating the DPMAC
++ * from the DPNI side - and for that matter there may exist
++ * no DPMAC at all. So for now we just don't report anything
++ * beyond the DPNI attributes.
++ */
++ if (state.options & DPNI_LINK_OPT_AUTONEG)
++ link_settings->base.autoneg = AUTONEG_ENABLE;
++ if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX))
++ link_settings->base.duplex = DUPLEX_FULL;
++ link_settings->base.speed = state.rate;
+
-+ return 0;
++out:
++ return err;
+}
+
-+/**
-+ * dpni_set_tx_shaping() - Set the transmit shaping
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @tx_shaper: tx shaping configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpni_tx_shaping_cfg *tx_shaper)
++#define DPNI_DYNAMIC_LINK_SET_VER_MAJOR 7
++#define DPNI_DYNAMIC_LINK_SET_VER_MINOR 1
++static int
++dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
++ const struct ethtool_link_ksettings *link_settings)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_set_tx_shaping *cmd_params;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct dpni_link_state state = {0};
++ struct dpni_link_cfg cfg = {0};
++ int err = 0;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_tx_shaping *)cmd.params;
-+ cmd_params->max_burst_size = cpu_to_le16(tx_shaper->max_burst_size);
-+ cmd_params->rate_limit = cpu_to_le32(tx_shaper->rate_limit);
++ /* If using an older MC version, the DPNI must be down
++ * in order to be able to change link settings. Taking steps to let
++ * the user know that.
++ */
++ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_DYNAMIC_LINK_SET_VER_MAJOR,
++ DPNI_DYNAMIC_LINK_SET_VER_MINOR) < 0) {
++ if (netif_running(net_dev)) {
++ netdev_info(net_dev, "Interface must be brought down first.\n");
++ return -EACCES;
++ }
++ }
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++ /* Need to interrogate link state to get flow control params */
++ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
++ if (err) {
++ netdev_err(net_dev, "Error getting link state\n");
++ goto out;
++ }
+
-+/**
-+ * dpni_set_max_frame_length() - Set the maximum received frame length.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @max_frame_length: Maximum received frame length (in
-+ * bytes); frame is discarded if its
-+ * length exceeds this value
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 max_frame_length)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_set_max_frame_length *cmd_params;
++ cfg.options = state.options;
++ cfg.rate = link_settings->base.speed;
++ if (link_settings->base.autoneg == AUTONEG_ENABLE)
++ cfg.options |= DPNI_LINK_OPT_AUTONEG;
++ else
++ cfg.options &= ~DPNI_LINK_OPT_AUTONEG;
++ if (link_settings->base.duplex == DUPLEX_HALF)
++ cfg.options |= DPNI_LINK_OPT_HALF_DUPLEX;
++ else
++ cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_max_frame_length *)cmd.params;
-+ cmd_params->max_frame_length = cpu_to_le16(max_frame_length);
++ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
++ if (err)
++ /* ethtool will be loud enough if we return an error; no point
++ * in putting our own error message on the console by default
++ */
++ netdev_dbg(net_dev, "ERROR %d setting link cfg\n", err);
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
++out:
++ return err;
+}
+
-+/**
-+ * dpni_get_max_frame_length() - Get the maximum received frame length.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @max_frame_length: Maximum received frame length (in
-+ * bytes); frame is discarded if its
-+ * length exceeds this value
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 *max_frame_length)
++static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
++ struct ethtool_pauseparam *pause)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_rsp_get_max_frame_length *rsp_params;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct dpni_link_state state = {0};
+ int err;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
++ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_max_frame_length *)cmd.params;
-+ *max_frame_length = le16_to_cpu(rsp_params->max_frame_length);
++ netdev_dbg(net_dev, "Error getting link state\n");
+
-+ return 0;
++ /* Report general port autonegotiation status */
++ pause->autoneg = !!(state.options & DPNI_LINK_OPT_AUTONEG);
++ pause->rx_pause = !!(state.options & DPNI_LINK_OPT_PAUSE);
++ pause->tx_pause = pause->rx_pause ^
++ !!(state.options & DPNI_LINK_OPT_ASYM_PAUSE);
+}
+
-+/**
-+ * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @en: Set to '1' to enable; '0' to disable
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int en)
++static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
++ struct ethtool_pauseparam *pause)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_set_multicast_promisc *cmd_params;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct dpni_link_state state = {0};
++ struct dpni_link_cfg cfg = {0};
++ u32 current_tx_pause;
++ int err = 0;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_multicast_promisc *)cmd.params;
-+ dpni_set_field(cmd_params->enable, ENABLE, en);
++ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
++ if (err) {
++ netdev_dbg(net_dev, "Error getting link state\n");
++ goto out;
++ }
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++ cfg.rate = state.rate;
++ cfg.options = state.options;
++ current_tx_pause = !!(cfg.options & DPNI_LINK_OPT_PAUSE) ^
++ !!(cfg.options & DPNI_LINK_OPT_ASYM_PAUSE);
+
-+/**
-+ * dpni_get_multicast_promisc() - Get multicast promiscuous mode
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @en: Returns '1' if enabled; '0' otherwise
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_rsp_get_multicast_promisc *rsp_params;
-+ int err;
++ /* We don't support changing pause frame autonegotiation separately
++ * from general port autoneg
++ */
++ if (pause->autoneg != !!(state.options & DPNI_LINK_OPT_AUTONEG))
++ netdev_warn(net_dev,
++ "Cannot change pause frame autoneg separately\n");
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC,
-+ cmd_flags,
-+ token);
++ if (pause->rx_pause)
++ cfg.options |= DPNI_LINK_OPT_PAUSE;
++ else
++ cfg.options &= ~DPNI_LINK_OPT_PAUSE;
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++ if (pause->rx_pause ^ pause->tx_pause)
++ cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
++ else
++ cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_multicast_promisc *)cmd.params;
-+ *en = dpni_get_field(rsp_params->enabled, ENABLE);
++ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
++ if (err) {
++ netdev_dbg(net_dev, "Error setting link\n");
++ goto out;
++ }
+
-+ return 0;
++ /* Enable/disable Rx FQ taildrop if Tx pause frames have changed */
++ if (current_tx_pause == pause->tx_pause)
++ goto out;
++
++ priv->tx_pause_frames = pause->tx_pause;
++ err = set_rx_taildrop(priv);
++ if (err)
++ netdev_dbg(net_dev, "Error configuring taildrop\n");
++
++out:
++ return err;
+}
+
-+/**
-+ * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @en: Set to '1' to enable; '0' to disable
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int en)
++static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
++ u8 *data)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_set_unicast_promisc *cmd_params;
++ u8 *p = data;
++ int i;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_unicast_promisc *)cmd.params;
-+ dpni_set_field(cmd_params->enable, ENABLE, en);
++ switch (stringset) {
++ case ETH_SS_STATS:
++ for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) {
++ strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN);
++ p += ETH_GSTRING_LEN;
++ }
++ for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) {
++ strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
++ p += ETH_GSTRING_LEN;
++ }
++ break;
++ }
++}
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
++static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
++{
++ switch (sset) {
++ case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
++ return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS;
++ default:
++ return -EOPNOTSUPP;
++ }
+}
+
-+/**
-+ * dpni_get_unicast_promisc() - Get unicast promiscuous mode
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @en: Returns '1' if enabled; '0' otherwise
-+ *
-+ * Return: '0' on Success; Error code otherwise.
++/** Fill in hardware counters, as returned by MC.
+ */
-+int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int *en)
++static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
++ struct ethtool_stats *stats,
++ u64 *data)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_rsp_get_unicast_promisc *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC,
-+ cmd_flags,
-+ token);
++ int i = 0;
++ int j, k, err;
++ int num_cnt;
++ union dpni_statistics dpni_stats;
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++#ifdef CONFIG_FSL_QBMAN_DEBUG
++ u32 fcnt, bcnt;
++ u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
++ u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
++ u32 buf_cnt;
++#endif
++ u64 cdan = 0;
++ u64 portal_busy = 0, pull_err = 0;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct dpaa2_eth_drv_stats *extras;
++ struct dpaa2_eth_ch_stats *ch_stats;
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_unicast_promisc *)cmd.params;
-+ *en = dpni_get_field(rsp_params->enabled, ENABLE);
++ memset(data, 0,
++ sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
+
-+ return 0;
-+}
++ /* Print standard counters, from DPNI statistics */
++ for (j = 0; j <= 2; j++) {
++ err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
++ j, 0, &dpni_stats);
++ if (err != 0)
++ netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j);
++ switch (j) {
++ case 0:
++ num_cnt = sizeof(dpni_stats.page_0) / sizeof(u64);
++ break;
++ case 1:
++ num_cnt = sizeof(dpni_stats.page_1) / sizeof(u64);
++ break;
++ case 2:
++ num_cnt = sizeof(dpni_stats.page_2) / sizeof(u64);
++ break;
++ }
++ for (k = 0; k < num_cnt; k++)
++ *(data + i++) = dpni_stats.raw.counter[k];
++ }
+
-+/**
-+ * dpni_set_primary_mac_addr() - Set the primary MAC address
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @mac_addr: MAC address to set as primary address
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const u8 mac_addr[6])
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_set_primary_mac_addr *cmd_params;
-+ int i;
++ /* Print per-cpu extra stats */
++ for_each_online_cpu(k) {
++ extras = per_cpu_ptr(priv->percpu_extras, k);
++ for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++)
++ *((__u64 *)data + i + j) += *((__u64 *)extras + j);
++ }
++ i += j;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_primary_mac_addr *)cmd.params;
-+ for (i = 0; i < 6; i++)
-+ cmd_params->mac_addr[i] = mac_addr[5 - i];
++ for (j = 0; j < priv->num_channels; j++) {
++ ch_stats = &priv->channel[j]->stats;
++ cdan += ch_stats->cdan;
++ portal_busy += ch_stats->dequeue_portal_busy;
++ pull_err += ch_stats->pull_err;
++ }
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++ *(data + i++) = portal_busy;
++ *(data + i++) = pull_err;
++ *(data + i++) = cdan;
+
-+/**
-+ * dpni_get_primary_mac_addr() - Get the primary MAC address
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @mac_addr: Returned MAC address
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 mac_addr[6])
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_rsp_get_primary_mac_addr *rsp_params;
-+ int i, err;
++ *(data + i++) = dpaa2_cscn_state_congested(priv->cscn_mem);
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC,
-+ cmd_flags,
-+ token);
++#ifdef CONFIG_FSL_QBMAN_DEBUG
++ for (j = 0; j < priv->num_fqs; j++) {
++ /* Print FQ instantaneous counts */
++ err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid,
++ &fcnt, &bcnt);
++ if (err) {
++ netdev_warn(net_dev, "FQ query error %d", err);
++ return;
++ }
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++ if (priv->fq[j].type == DPAA2_TX_CONF_FQ) {
++ fcnt_tx_total += fcnt;
++ bcnt_tx_total += bcnt;
++ } else {
++ fcnt_rx_total += fcnt;
++ bcnt_rx_total += bcnt;
++ }
++ }
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_primary_mac_addr *)cmd.params;
-+ for (i = 0; i < 6; i++)
-+ mac_addr[5 - i] = rsp_params->mac_addr[i];
++ *(data + i++) = fcnt_rx_total;
++ *(data + i++) = bcnt_rx_total;
++ *(data + i++) = fcnt_tx_total;
++ *(data + i++) = bcnt_tx_total;
+
-+ return 0;
++ err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
++ if (err) {
++ netdev_warn(net_dev, "Buffer count query error %d\n", err);
++ return;
++ }
++ *(data + i++) = buf_cnt;
++#endif
+}
+
-+/**
-+ * dpni_get_port_mac_addr() - Retrieve MAC address associated to the physical
-+ * port the DPNI is attached to
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @mac_addr: MAC address of the physical port, if any, otherwise 0
-+ *
-+ * The primary MAC address is not cleared by this operation.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 mac_addr[6])
++static int cls_key_off(struct dpaa2_eth_priv *priv, int prot, int field)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_rsp_get_port_mac_addr *rsp_params;
-+ int i, err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PORT_MAC_ADDR,
-+ cmd_flags,
-+ token);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++ int i, off = 0;
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_port_mac_addr *)cmd.params;
-+ for (i = 0; i < 6; i++)
-+ mac_addr[5 - i] = rsp_params->mac_addr[i];
++ for (i = 0; i < priv->num_dist_fields; i++) {
++ if (priv->dist_fields[i].cls_prot == prot &&
++ priv->dist_fields[i].cls_field == field)
++ return off;
++ off += priv->dist_fields[i].size;
++ }
+
-+ return 0;
++ return -1;
+}
+
-+/**
-+ * dpni_add_mac_addr() - Add MAC address filter
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @mac_addr: MAC address to add
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const u8 mac_addr[6])
++static u8 cls_key_size(struct dpaa2_eth_priv *priv)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_add_mac_addr *cmd_params;
-+ int i;
++ u8 i, size = 0;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_add_mac_addr *)cmd.params;
-+ for (i = 0; i < 6; i++)
-+ cmd_params->mac_addr[i] = mac_addr[5 - i];
++ for (i = 0; i < priv->num_dist_fields; i++)
++ size += priv->dist_fields[i].size;
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
++ return size;
+}
+
-+/**
-+ * dpni_remove_mac_addr() - Remove MAC address filter
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @mac_addr: MAC address to remove
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const u8 mac_addr[6])
++void check_cls_support(struct dpaa2_eth_priv *priv)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_remove_mac_addr *cmd_params;
-+ int i;
++ u8 key_size = cls_key_size(priv);
++ struct device *dev = priv->net_dev->dev.parent;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_remove_mac_addr *)cmd.params;
-+ for (i = 0; i < 6; i++)
-+ cmd_params->mac_addr[i] = mac_addr[5 - i];
++ if (dpaa2_eth_hash_enabled(priv)) {
++ if (priv->dpni_attrs.fs_key_size < key_size) {
++ dev_info(dev, "max_dist_key_size = %d, expected %d. Hashing and steering are disabled\n",
++ priv->dpni_attrs.fs_key_size,
++ key_size);
++ goto disable_fs;
++ }
++ if (priv->num_dist_fields > DPKG_MAX_NUM_OF_EXTRACTS) {
++ dev_info(dev, "Too many key fields (max = %d). Hashing and steering are disabled\n",
++ DPKG_MAX_NUM_OF_EXTRACTS);
++ goto disable_fs;
++ }
++ }
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++ if (dpaa2_eth_fs_enabled(priv)) {
++ if (!dpaa2_eth_hash_enabled(priv)) {
++ dev_info(dev, "Insufficient queues. Steering is disabled\n");
++ goto disable_fs;
++ }
+
-+/**
-+ * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @unicast: Set to '1' to clear unicast addresses
-+ * @multicast: Set to '1' to clear multicast addresses
-+ *
-+ * The primary MAC address is not cleared by this operation.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int unicast,
-+ int multicast)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_clear_mac_filters *cmd_params;
++ if (!dpaa2_eth_fs_mask_enabled(priv)) {
++ dev_info(dev, "Key masks not supported. Steering is disabled\n");
++ goto disable_fs;
++ }
++ }
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_clear_mac_filters *)cmd.params;
-+ dpni_set_field(cmd_params->flags, UNICAST_FILTERS, unicast);
-+ dpni_set_field(cmd_params->flags, MULTICAST_FILTERS, multicast);
++ return;
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
++disable_fs:
++ priv->dpni_attrs.options |= DPNI_OPT_NO_FS;
++ priv->dpni_attrs.options &= ~DPNI_OPT_HAS_KEY_MASKING;
+}
+
-+/**
-+ * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @tc_id: Traffic class selection (0-7)
-+ * @cfg: Traffic class distribution configuration
-+ *
-+ * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpni_prepare_key_cfg()
-+ * first to prepare the key_cfg_iova parameter
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 tc_id,
-+ const struct dpni_rx_tc_dist_cfg *cfg)
++static int prep_l4_rule(struct dpaa2_eth_priv *priv,
++ struct ethtool_tcpip4_spec *l4_value,
++ struct ethtool_tcpip4_spec *l4_mask,
++ void *key, void *mask, u8 l4_proto)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_set_rx_tc_dist *cmd_params;
++ int offset;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_rx_tc_dist *)cmd.params;
-+ cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
-+ cmd_params->tc_id = tc_id;
-+ dpni_set_field(cmd_params->flags, DIST_MODE, cfg->dist_mode);
-+ dpni_set_field(cmd_params->flags, MISS_ACTION, cfg->fs_cfg.miss_action);
-+ cmd_params->default_flow_id = cpu_to_le16(cfg->fs_cfg.default_flow_id);
-+ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
++ if (l4_mask->tos) {
++ netdev_err(priv->net_dev, "ToS is not supported for IPv4 L4\n");
++ return -EOPNOTSUPP;
++ }
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++ if (l4_mask->ip4src) {
++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC);
++ *(u32 *)(key + offset) = l4_value->ip4src;
++ *(u32 *)(mask + offset) = l4_mask->ip4src;
++ }
+
-+/*
-+ * dpni_set_qos_table() - Set QoS mapping table
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @cfg: QoS table configuration
-+ *
-+ * This function and all QoS-related functions require that
-+ *'max_tcs > 1' was set at DPNI creation.
-+ *
-+ * warning: Before calling this function, call dpkg_prepare_key_cfg() to
-+ * prepare the key_cfg_iova parameter
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_qos_table(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpni_qos_tbl_cfg *cfg)
-+{
-+ struct dpni_cmd_set_qos_table *cmd_params;
-+ struct mc_command cmd = { 0 };
++ if (l4_mask->ip4dst) {
++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST);
++ *(u32 *)(key + offset) = l4_value->ip4dst;
++ *(u32 *)(mask + offset) = l4_mask->ip4dst;
++ }
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_qos_table *)cmd.params;
-+ cmd_params->default_tc = cfg->default_tc;
-+ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
-+ dpni_set_field(cmd_params->discard_on_miss,
-+ ENABLE,
-+ cfg->discard_on_miss);
++ if (l4_mask->psrc) {
++ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
++ *(u32 *)(key + offset) = l4_value->psrc;
++ *(u32 *)(mask + offset) = l4_mask->psrc;
++ }
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++ if (l4_mask->pdst) {
++ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
++ *(u32 *)(key + offset) = l4_value->pdst;
++ *(u32 *)(mask + offset) = l4_mask->pdst;
++ }
+
-+/**
-+ * dpni_add_qos_entry() - Add QoS mapping entry (to select a traffic class)
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @cfg: QoS rule to add
-+ * @tc_id: Traffic class selection (0-7)
-+ * @index: Location in the QoS table where to insert the entry.
-+ * Only relevant if MASKING is enabled for QoS classification on
-+ * this DPNI, it is ignored for exact match.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpni_rule_cfg *cfg,
-+ u8 tc_id,
-+ u16 index)
-+{
-+ struct dpni_cmd_add_qos_entry *cmd_params;
-+ struct mc_command cmd = { 0 };
++ /* Only apply the rule for the user-specified L4 protocol
++ * and if ethertype matches IPv4
++ */
++ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE);
++ *(u16 *)(key + offset) = htons(ETH_P_IP);
++ *(u16 *)(mask + offset) = 0xFFFF;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_add_qos_entry *)cmd.params;
-+ cmd_params->tc_id = tc_id;
-+ cmd_params->key_size = cfg->key_size;
-+ cmd_params->index = cpu_to_le16(index);
-+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
-+ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO);
++ *(u8 *)(key + offset) = l4_proto;
++ *(u8 *)(mask + offset) = 0xFF;
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
++ /* TODO: check IP version */
++
++ return 0;
+}
+
-+/**
-+ * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class
-+ * (to select a flow ID)
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @tc_id: Traffic class selection (0-7)
-+ * @index: Location in the QoS table where to insert the entry.
-+ * Only relevant if MASKING is enabled for QoS
-+ * classification on this DPNI, it is ignored for exact match.
-+ * @cfg: Flow steering rule to add
-+ * @action: Action to be taken as result of a classification hit
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 tc_id,
-+ u16 index,
-+ const struct dpni_rule_cfg *cfg,
-+ const struct dpni_fs_action_cfg *action)
++static int prep_eth_rule(struct dpaa2_eth_priv *priv,
++ struct ethhdr *eth_value, struct ethhdr *eth_mask,
++ void *key, void *mask)
+{
-+ struct dpni_cmd_add_fs_entry *cmd_params;
-+ struct mc_command cmd = { 0 };
++ int offset;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_add_fs_entry *)cmd.params;
-+ cmd_params->tc_id = tc_id;
-+ cmd_params->key_size = cfg->key_size;
-+ cmd_params->index = cpu_to_le16(index);
-+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
-+ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
-+ cmd_params->options = cpu_to_le16(action->options);
-+ cmd_params->flow_id = cpu_to_le16(action->flow_id);
-+ cmd_params->flc = cpu_to_le64(action->flc);
++ if (eth_mask->h_proto) {
++ netdev_err(priv->net_dev, "Ethertype is not supported!\n");
++ return -EOPNOTSUPP;
++ }
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
++ if (!is_zero_ether_addr(eth_mask->h_source)) {
++ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_SA);
++ ether_addr_copy(key + offset, eth_value->h_source);
++ ether_addr_copy(mask + offset, eth_mask->h_source);
++ }
++
++ if (!is_zero_ether_addr(eth_mask->h_dest)) {
++ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA);
++ ether_addr_copy(key + offset, eth_value->h_dest);
++ ether_addr_copy(mask + offset, eth_mask->h_dest);
++ }
++
++ return 0;
+}
+
-+/**
-+ * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific
-+ * traffic class
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @tc_id: Traffic class selection (0-7)
-+ * @cfg: Flow steering rule to remove
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 tc_id,
-+ const struct dpni_rule_cfg *cfg)
++static int prep_user_ip_rule(struct dpaa2_eth_priv *priv,
++ struct ethtool_usrip4_spec *uip_value,
++ struct ethtool_usrip4_spec *uip_mask,
++ void *key, void *mask)
+{
-+ struct dpni_cmd_remove_fs_entry *cmd_params;
-+ struct mc_command cmd = { 0 };
++ int offset;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_remove_fs_entry *)cmd.params;
-+ cmd_params->tc_id = tc_id;
-+ cmd_params->key_size = cfg->key_size;
-+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
-+ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
++ if (uip_mask->tos)
++ return -EOPNOTSUPP;
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
++ if (uip_mask->ip4src) {
++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC);
++ *(u32 *)(key + offset) = uip_value->ip4src;
++ *(u32 *)(mask + offset) = uip_mask->ip4src;
++ }
++
++ if (uip_mask->ip4dst) {
++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST);
++ *(u32 *)(key + offset) = uip_value->ip4dst;
++ *(u32 *)(mask + offset) = uip_mask->ip4dst;
++ }
++
++ if (uip_mask->proto) {
++ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO);
++ *(u32 *)(key + offset) = uip_value->proto;
++ *(u32 *)(mask + offset) = uip_mask->proto;
++ }
++ if (uip_mask->l4_4_bytes) {
++ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
++ *(u16 *)(key + offset) = uip_value->l4_4_bytes << 16;
++ *(u16 *)(mask + offset) = uip_mask->l4_4_bytes << 16;
++
++ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
++ *(u16 *)(key + offset) = uip_value->l4_4_bytes & 0xFFFF;
++ *(u16 *)(mask + offset) = uip_mask->l4_4_bytes & 0xFFFF;
++ }
++
++ /* Ethertype must be IP */
++ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE);
++ *(u16 *)(key + offset) = htons(ETH_P_IP);
++ *(u16 *)(mask + offset) = 0xFFFF;
++
++ return 0;
+}
+
-+/**
-+ * dpni_set_congestion_notification() - Set traffic class congestion
-+ * notification configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
-+ * @tc_id: Traffic class selection (0-7)
-+ * @cfg: congestion notification configuration
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_queue_type qtype,
-+ u8 tc_id,
-+ const struct dpni_congestion_notification_cfg *cfg)
++static int prep_ext_rule(struct dpaa2_eth_priv *priv,
++ struct ethtool_flow_ext *ext_value,
++ struct ethtool_flow_ext *ext_mask,
++ void *key, void *mask)
+{
-+ struct dpni_cmd_set_congestion_notification *cmd_params;
-+ struct mc_command cmd = { 0 };
++ int offset;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(
-+ DPNI_CMDID_SET_CONGESTION_NOTIFICATION,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_congestion_notification *)cmd.params;
-+ cmd_params->qtype = qtype;
-+ cmd_params->tc = tc_id;
-+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
-+ cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
-+ cmd_params->dest_priority = cfg->dest_cfg.priority;
-+ dpni_set_field(cmd_params->type_units, DEST_TYPE,
-+ cfg->dest_cfg.dest_type);
-+ dpni_set_field(cmd_params->type_units, CONG_UNITS, cfg->units);
-+ cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
-+ cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
-+ cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
-+ cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
++ if (ext_mask->vlan_etype)
++ return -EOPNOTSUPP;
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
++ if (ext_mask->vlan_tci) {
++ offset = cls_key_off(priv, NET_PROT_VLAN, NH_FLD_VLAN_TCI);
++ *(u16 *)(key + offset) = ext_value->vlan_tci;
++ *(u16 *)(mask + offset) = ext_mask->vlan_tci;
++ }
++
++ return 0;
+}
+
-+/**
-+ * dpni_get_congestion_notification() - Get traffic class congestion
-+ * notification configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
-+ * @tc_id: Traffic class selection (0-7)
-+ * @cfg: congestion notification configuration
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpni_get_congestion_notification(
-+ struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_queue_type qtype,
-+ u8 tc_id,
-+ struct dpni_congestion_notification_cfg *cfg)
++static int prep_mac_ext_rule(struct dpaa2_eth_priv *priv,
++ struct ethtool_flow_ext *ext_value,
++ struct ethtool_flow_ext *ext_mask,
++ void *key, void *mask)
+{
-+ struct dpni_rsp_get_congestion_notification *rsp_params;
-+ struct dpni_cmd_get_congestion_notification *cmd_params;
-+ struct mc_command cmd = { 0 };
++ int offset;
++
++ if (!is_zero_ether_addr(ext_mask->h_dest)) {
++ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA);
++ ether_addr_copy(key + offset, ext_value->h_dest);
++ ether_addr_copy(mask + offset, ext_mask->h_dest);
++ }
++
++ return 0;
++}
++
++static int prep_cls_rule(struct net_device *net_dev,
++ struct ethtool_rx_flow_spec *fs,
++ void *key)
++{
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ const u8 key_size = cls_key_size(priv);
++ void *msk = key + key_size;
+ int err;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(
-+ DPNI_CMDID_GET_CONGESTION_NOTIFICATION,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_get_congestion_notification *)cmd.params;
-+ cmd_params->qtype = qtype;
-+ cmd_params->tc = tc_id;
++ memset(key, 0, key_size * 2);
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
++ switch (fs->flow_type & 0xff) {
++ case TCP_V4_FLOW:
++ err = prep_l4_rule(priv, &fs->h_u.tcp_ip4_spec,
++ &fs->m_u.tcp_ip4_spec, key, msk,
++ IPPROTO_TCP);
++ break;
++ case UDP_V4_FLOW:
++ err = prep_l4_rule(priv, &fs->h_u.udp_ip4_spec,
++ &fs->m_u.udp_ip4_spec, key, msk,
++ IPPROTO_UDP);
++ break;
++ case SCTP_V4_FLOW:
++ err = prep_l4_rule(priv, &fs->h_u.sctp_ip4_spec,
++ &fs->m_u.sctp_ip4_spec, key, msk,
++ IPPROTO_SCTP);
++ break;
++ case ETHER_FLOW:
++ err = prep_eth_rule(priv, &fs->h_u.ether_spec,
++ &fs->m_u.ether_spec, key, msk);
++ break;
++ case IP_USER_FLOW:
++ err = prep_user_ip_rule(priv, &fs->h_u.usr_ip4_spec,
++ &fs->m_u.usr_ip4_spec, key, msk);
++ break;
++ default:
++ /* TODO: AH, ESP */
++ return -EOPNOTSUPP;
++ }
+ if (err)
+ return err;
+
-+ rsp_params = (struct dpni_rsp_get_congestion_notification *)cmd.params;
-+ cfg->units = dpni_get_field(rsp_params->type_units, CONG_UNITS);
-+ cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
-+ cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
-+ cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
-+ cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
-+ cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
-+ cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
-+ cfg->dest_cfg.priority = rsp_params->dest_priority;
-+ cfg->dest_cfg.dest_type = dpni_get_field(rsp_params->type_units,
-+ DEST_TYPE);
++ if (fs->flow_type & FLOW_EXT) {
++ err = prep_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk);
++ if (err)
++ return err;
++ }
++
++ if (fs->flow_type & FLOW_MAC_EXT) {
++ err = prep_mac_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk);
++ if (err)
++ return err;
++ }
+
+ return 0;
+}
+
-+/**
-+ * dpni_set_queue() - Set queue parameters
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @qtype: Type of queue - all queue types are supported, although
-+ * the command is ignored for Tx
-+ * @tc: Traffic class, in range 0 to NUM_TCS - 1
-+ * @index: Selects the specific queue out of the set allocated for the
-+ * same TC. Value must be in range 0 to NUM_QUEUES - 1
-+ * @options: A combination of DPNI_QUEUE_OPT_ values that control what
-+ * configuration options are set on the queue
-+ * @queue: Queue structure
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_queue(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_queue_type qtype,
-+ u8 tc,
-+ u8 index,
-+ u8 options,
-+ const struct dpni_queue *queue)
++static int del_cls(struct net_device *net_dev, int location);
++
++static int do_cls(struct net_device *net_dev,
++ struct ethtool_rx_flow_spec *fs,
++ bool add)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_set_queue *cmd_params;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ struct device *dev = net_dev->dev.parent;
++ const int rule_cnt = dpaa2_eth_fs_count(priv);
++ struct dpni_rule_cfg rule_cfg;
++ struct dpni_fs_action_cfg fs_act = { 0 };
++ void *dma_mem;
++ int err = 0, tc;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QUEUE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_queue *)cmd.params;
-+ cmd_params->qtype = qtype;
-+ cmd_params->tc = tc;
-+ cmd_params->index = index;
-+ cmd_params->options = options;
-+ cmd_params->dest_id = cpu_to_le32(queue->destination.id);
-+ cmd_params->dest_prio = queue->destination.priority;
-+ dpni_set_field(cmd_params->flags, DEST_TYPE, queue->destination.type);
-+ dpni_set_field(cmd_params->flags, STASH_CTRL, queue->flc.stash_control);
-+ dpni_set_field(cmd_params->flags, HOLD_ACTIVE,
-+ queue->destination.hold_active);
-+ cmd_params->flc = cpu_to_le64(queue->flc.value);
-+ cmd_params->user_context = cpu_to_le64(queue->user_context);
++ if (!dpaa2_eth_fs_enabled(priv)) {
++ netdev_err(net_dev, "dev does not support steering!\n");
++ /* dev doesn't support steering */
++ return -EOPNOTSUPP;
++ }
+
-+ /* send command to mc */
-+ return mc_send_command(mc_io, &cmd);
++ if ((fs->ring_cookie != RX_CLS_FLOW_DISC &&
++ fs->ring_cookie >= dpaa2_eth_queue_count(priv)) ||
++ fs->location >= rule_cnt)
++ return -EINVAL;
++
++ /* When adding a new rule, check if location if available
++ * and if not, free the existing table entry before inserting
++ * the new one
++ */
++ if (add && (priv->cls_rule[fs->location].in_use == true))
++ del_cls(net_dev, fs->location);
++
++ memset(&rule_cfg, 0, sizeof(rule_cfg));
++ rule_cfg.key_size = cls_key_size(priv);
++
++ /* allocate twice the key size, for the actual key and for mask */
++ dma_mem = kzalloc(rule_cfg.key_size * 2, GFP_DMA | GFP_KERNEL);
++ if (!dma_mem)
++ return -ENOMEM;
++
++ err = prep_cls_rule(net_dev, fs, dma_mem);
++ if (err)
++ goto err_free_mem;
++
++ rule_cfg.key_iova = dma_map_single(dev, dma_mem,
++ rule_cfg.key_size * 2,
++ DMA_TO_DEVICE);
++
++ rule_cfg.mask_iova = rule_cfg.key_iova + rule_cfg.key_size;
++
++ if (fs->ring_cookie == RX_CLS_FLOW_DISC)
++ fs_act.options |= DPNI_FS_OPT_DISCARD;
++ else
++ fs_act.flow_id = fs->ring_cookie;
++
++ for (tc = 0; tc < dpaa2_eth_tc_count(priv); tc++) {
++ if (add)
++ err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token,
++ tc, fs->location, &rule_cfg,
++ &fs_act);
++ else
++ err = dpni_remove_fs_entry(priv->mc_io, 0,
++ priv->mc_token, tc,
++ &rule_cfg);
++
++ if (err)
++ break;
++ }
++
++ dma_unmap_single(dev, rule_cfg.key_iova,
++ rule_cfg.key_size * 2, DMA_TO_DEVICE);
++
++ if (err)
++ netdev_err(net_dev, "dpaa2_add/remove_cls() error %d\n", err);
++
++err_free_mem:
++ kfree(dma_mem);
++
++ return err;
+}
+
-+/**
-+ * dpni_get_queue() - Get queue parameters
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @qtype: Type of queue - all queue types are supported
-+ * @tc: Traffic class, in range 0 to NUM_TCS - 1
-+ * @index: Selects the specific queue out of the set allocated for the
-+ * same TC. Value must be in range 0 to NUM_QUEUES - 1
-+ * @queue: Queue configuration structure
-+ * @qid: Queue identification
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_queue(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_queue_type qtype,
-+ u8 tc,
-+ u8 index,
-+ struct dpni_queue *queue,
-+ struct dpni_queue_id *qid)
++static int add_cls(struct net_device *net_dev,
++ struct ethtool_rx_flow_spec *fs)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_get_queue *cmd_params;
-+ struct dpni_rsp_get_queue *rsp_params;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QUEUE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_get_queue *)cmd.params;
-+ cmd_params->qtype = qtype;
-+ cmd_params->tc = tc;
-+ cmd_params->index = index;
-+
-+ /* send command to mc */
-+ err = mc_send_command(mc_io, &cmd);
++ err = do_cls(net_dev, fs, true);
+ if (err)
+ return err;
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_queue *)cmd.params;
-+ queue->destination.id = le32_to_cpu(rsp_params->dest_id);
-+ queue->destination.priority = rsp_params->dest_prio;
-+ queue->destination.type = dpni_get_field(rsp_params->flags,
-+ DEST_TYPE);
-+ queue->flc.stash_control = dpni_get_field(rsp_params->flags,
-+ STASH_CTRL);
-+ queue->destination.hold_active = dpni_get_field(rsp_params->flags,
-+ HOLD_ACTIVE);
-+ queue->flc.value = le64_to_cpu(rsp_params->flc);
-+ queue->user_context = le64_to_cpu(rsp_params->user_context);
-+ qid->fqid = le32_to_cpu(rsp_params->fqid);
-+ qid->qdbin = le16_to_cpu(rsp_params->qdbin);
++ priv->cls_rule[fs->location].in_use = true;
++ priv->cls_rule[fs->location].fs = *fs;
+
+ return 0;
+}
+
-+/**
-+ * dpni_get_statistics() - Get DPNI statistics
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @page: Selects the statistics page to retrieve, see
-+ * DPNI_GET_STATISTICS output. Pages are numbered 0 to 2.
-+ * @stat: Structure containing the statistics
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_statistics(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 page,
-+ union dpni_statistics *stat)
++static int del_cls(struct net_device *net_dev, int location)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_get_statistics *cmd_params;
-+ struct dpni_rsp_get_statistics *rsp_params;
-+ int i, err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_STATISTICS,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_get_statistics *)cmd.params;
-+ cmd_params->page_number = page;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ int err;
+
-+ /* send command to mc */
-+ err = mc_send_command(mc_io, &cmd);
++ err = do_cls(net_dev, &priv->cls_rule[location].fs, false);
+ if (err)
+ return err;
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_statistics *)cmd.params;
-+ for (i = 0; i < DPNI_STATISTICS_CNT; i++)
-+ stat->raw.counter[i] = le64_to_cpu(rsp_params->counter[i]);
++ priv->cls_rule[location].in_use = false;
+
+ return 0;
+}
+
-+/**
-+ * dpni_reset_statistics() - Clears DPNI statistics
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_reset_statistics(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
++static int set_hash(struct net_device *net_dev, u64 data)
+{
-+ struct mc_command cmd = { 0 };
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ u32 key = 0;
++ int i;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET_STATISTICS,
-+ cmd_flags,
-+ token);
++ if (data & RXH_DISCARD)
++ return -EOPNOTSUPP;
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
++ for (i = 0; i < priv->num_dist_fields; i++)
++ if (priv->dist_fields[i].rxnfc_field & data)
++ key |= priv->dist_fields[i].id;
++
++ return dpaa2_eth_set_dist_key(priv, DPAA2_ETH_RX_DIST_HASH, key);
+}
+
-+/**
-+ * dpni_set_taildrop() - Set taildrop per queue or TC
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @cg_point: Congestion point
-+ * @q_type: Queue type on which the taildrop is configured.
-+ * Only Rx queues are supported for now
-+ * @tc: Traffic class to apply this taildrop to
-+ * @q_index: Index of the queue if the DPNI supports multiple queues for
-+ * traffic distribution. Ignored if CONGESTION_POINT is not 0.
-+ * @taildrop: Taildrop structure
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_set_taildrop(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_congestion_point cg_point,
-+ enum dpni_queue_type qtype,
-+ u8 tc,
-+ u8 index,
-+ struct dpni_taildrop *taildrop)
++static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
++ struct ethtool_rxnfc *rxnfc)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_set_taildrop *cmd_params;
++ int err = 0;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TAILDROP,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_set_taildrop *)cmd.params;
-+ cmd_params->congestion_point = cg_point;
-+ cmd_params->qtype = qtype;
-+ cmd_params->tc = tc;
-+ cmd_params->index = index;
-+ dpni_set_field(cmd_params->enable, ENABLE, taildrop->enable);
-+ cmd_params->units = taildrop->units;
-+ cmd_params->threshold = cpu_to_le32(taildrop->threshold);
++ switch (rxnfc->cmd) {
++ case ETHTOOL_SRXCLSRLINS:
++ err = add_cls(net_dev, &rxnfc->fs);
++ break;
++ case ETHTOOL_SRXCLSRLDEL:
++ err = del_cls(net_dev, rxnfc->fs.location);
++ break;
++ case ETHTOOL_SRXFH:
++ err = set_hash(net_dev, rxnfc->data);
++ break;
++ default:
++ err = -EOPNOTSUPP;
++ }
+
-+ /* send command to mc */
-+ return mc_send_command(mc_io, &cmd);
++ return err;
+}
+
-+/**
-+ * dpni_get_taildrop() - Get taildrop information
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPNI object
-+ * @cg_point: Congestion point
-+ * @q_type: Queue type on which the taildrop is configured.
-+ * Only Rx queues are supported for now
-+ * @tc: Traffic class to apply this taildrop to
-+ * @q_index: Index of the queue if the DPNI supports multiple queues for
-+ * traffic distribution. Ignored if CONGESTION_POINT is not 0.
-+ * @taildrop: Taildrop structure
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpni_get_taildrop(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_congestion_point cg_point,
-+ enum dpni_queue_type qtype,
-+ u8 tc,
-+ u8 index,
-+ struct dpni_taildrop *taildrop)
++static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
++ struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpni_cmd_get_taildrop *cmd_params;
-+ struct dpni_rsp_get_taildrop *rsp_params;
-+ int err;
++ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
++ const int rule_cnt = dpaa2_eth_fs_count(priv);
++ int i, j;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TAILDROP,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpni_cmd_get_taildrop *)cmd.params;
-+ cmd_params->congestion_point = cg_point;
-+ cmd_params->qtype = qtype;
-+ cmd_params->tc = tc;
-+ cmd_params->index = index;
++ switch (rxnfc->cmd) {
++ case ETHTOOL_GRXFH:
++ /* we purposely ignore cmd->flow_type for now, because the
++ * classifier only supports a single set of fields for all
++ * protocols
++ */
++ rxnfc->data = priv->rx_hash_fields;
++ break;
++ case ETHTOOL_GRXRINGS:
++ rxnfc->data = dpaa2_eth_queue_count(priv);
++ break;
+
-+ /* send command to mc */
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++ case ETHTOOL_GRXCLSRLCNT:
++ for (i = 0, rxnfc->rule_cnt = 0; i < rule_cnt; i++)
++ if (priv->cls_rule[i].in_use)
++ rxnfc->rule_cnt++;
++ rxnfc->data = rule_cnt;
++ break;
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpni_rsp_get_taildrop *)cmd.params;
-+ taildrop->enable = dpni_get_field(rsp_params->enable, ENABLE);
-+ taildrop->units = rsp_params->units;
-+ taildrop->threshold = le32_to_cpu(rsp_params->threshold);
++ case ETHTOOL_GRXCLSRULE:
++ if (!priv->cls_rule[rxnfc->fs.location].in_use)
++ return -EINVAL;
++
++ rxnfc->fs = priv->cls_rule[rxnfc->fs.location].fs;
++ break;
++
++ case ETHTOOL_GRXCLSRLALL:
++ for (i = 0, j = 0; i < rule_cnt; i++) {
++ if (!priv->cls_rule[i].in_use)
++ continue;
++ if (j == rxnfc->rule_cnt)
++ return -EMSGSIZE;
++ rule_locs[j++] = i;
++ }
++ rxnfc->rule_cnt = j;
++ rxnfc->data = rule_cnt;
++ break;
++
++ default:
++ return -EOPNOTSUPP;
++ }
+
+ return 0;
+}
++
++const struct ethtool_ops dpaa2_ethtool_ops = {
++ .get_drvinfo = dpaa2_eth_get_drvinfo,
++ .get_link = ethtool_op_get_link,
++ .get_link_ksettings = dpaa2_eth_get_link_ksettings,
++ .set_link_ksettings = dpaa2_eth_set_link_ksettings,
++ .get_pauseparam = dpaa2_eth_get_pauseparam,
++ .set_pauseparam = dpaa2_eth_set_pauseparam,
++ .get_sset_count = dpaa2_eth_get_sset_count,
++ .get_ethtool_stats = dpaa2_eth_get_ethtool_stats,
++ .get_strings = dpaa2_eth_get_strings,
++ .get_rxnfc = dpaa2_eth_get_rxnfc,
++ .set_rxnfc = dpaa2_eth_set_rxnfc,
++};
--- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.h
-@@ -0,0 +1,1053 @@
-+/* Copyright 2013-2016 Freescale Semiconductor Inc.
-+ * Copyright 2016 NXP
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpkg.h
+@@ -0,0 +1,176 @@
++/* Copyright 2013-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
-+#ifndef __FSL_DPNI_H
-+#define __FSL_DPNI_H
-+
-+#include "dpkg.h"
++#ifndef __FSL_DPKG_H_
++#define __FSL_DPKG_H_
+
-+struct fsl_mc_io;
++#include <linux/types.h>
++#include "net.h"
+
-+/**
-+ * Data Path Network Interface API
-+ * Contains initialization APIs and runtime control APIs for DPNI
++/* Data Path Key Generator API
++ * Contains initialization APIs and runtime APIs for the Key Generator
+ */
+
-+/** General DPNI macros */
++/** Key Generator properties */
+
+/**
-+ * Maximum number of traffic classes
-+ */
-+#define DPNI_MAX_TC 8
-+/**
-+ * Maximum number of buffer pools per DPNI
-+ */
-+#define DPNI_MAX_DPBP 8
-+/**
-+ * Maximum number of senders
++ * Number of masks per key extraction
+ */
-+#define DPNI_MAX_SENDERS 8
++#define DPKG_NUM_OF_MASKS 4
+/**
-+ * Maximum distribution size
++ * Number of extractions per key profile
+ */
-+#define DPNI_MAX_DIST_SIZE 8
++#define DPKG_MAX_NUM_OF_EXTRACTS 10
+
+/**
-+ * All traffic classes considered; see dpni_set_queue()
-+ */
-+#define DPNI_ALL_TCS (u8)(-1)
-+/**
-+ * All flows within traffic class considered; see dpni_set_queue()
-+ */
-+#define DPNI_ALL_TC_FLOWS (u16)(-1)
-+/**
-+ * Generate new flow ID; see dpni_set_queue()
++ * enum dpkg_extract_from_hdr_type - Selecting extraction by header types
++ * @DPKG_FROM_HDR: Extract selected bytes from header, by offset
++ * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field
++ * @DPKG_FULL_FIELD: Extract a full field
+ */
-+#define DPNI_NEW_FLOW_ID (u16)(-1)
++enum dpkg_extract_from_hdr_type {
++ DPKG_FROM_HDR = 0,
++ DPKG_FROM_FIELD = 1,
++ DPKG_FULL_FIELD = 2
++};
+
+/**
-+ * Tx traffic is always released to a buffer pool on transmit, there are no
-+ * resources allocated to have the frames confirmed back to the source after
-+ * transmission.
-+ */
-+#define DPNI_OPT_TX_FRM_RELEASE 0x000001
-+/**
-+ * Disables support for MAC address filtering for addresses other than primary
-+ * MAC address. This affects both unicast and multicast. Promiscuous mode can
-+ * still be enabled/disabled for both unicast and multicast. If promiscuous mode
-+ * is disabled, only traffic matching the primary MAC address will be accepted.
-+ */
-+#define DPNI_OPT_NO_MAC_FILTER 0x000002
-+/**
-+ * Allocate policers for this DPNI. They can be used to rate-limit traffic per
-+ * traffic class (TC) basis.
-+ */
-+#define DPNI_OPT_HAS_POLICING 0x000004
-+/**
-+ * Congestion can be managed in several ways, allowing the buffer pool to
-+ * deplete on ingress, taildrop on each queue or use congestion groups for sets
-+ * of queues. If set, it configures a single congestion groups across all TCs.
-+ * If reset, a congestion group is allocated for each TC. Only relevant if the
-+ * DPNI has multiple traffic classes.
-+ */
-+#define DPNI_OPT_SHARED_CONGESTION 0x000008
-+/**
-+ * Enables TCAM for Flow Steering and QoS look-ups. If not specified, all
-+ * look-ups are exact match. Note that TCAM is not available on LS1088 and its
-+ * variants. Setting this bit on these SoCs will trigger an error.
++ * enum dpkg_extract_type - Enumeration for selecting extraction type
++ * @DPKG_EXTRACT_FROM_HDR: Extract from the header
++ * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header
++ * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result;
++ * e.g. can be used to extract header existence;
++ * please refer to 'Parse Result definition' section in the parser BG
+ */
-+#define DPNI_OPT_HAS_KEY_MASKING 0x000010
++enum dpkg_extract_type {
++ DPKG_EXTRACT_FROM_HDR = 0,
++ DPKG_EXTRACT_FROM_DATA = 1,
++ DPKG_EXTRACT_FROM_PARSE = 3
++};
++
+/**
-+ * Disables the flow steering table.
++ * struct dpkg_mask - A structure for defining a single extraction mask
++ * @mask: Byte mask for the extracted content
++ * @offset: Offset within the extracted content
+ */
-+#define DPNI_OPT_NO_FS 0x000020
-+
-+int dpni_open(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ int dpni_id,
-+ u16 *token);
-+
-+int dpni_close(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
++struct dpkg_mask {
++ u8 mask;
++ u8 offset;
++};
+
+/**
-+ * struct dpni_pools_cfg - Structure representing buffer pools configuration
-+ * @num_dpbp: Number of DPBPs
-+ * @pools: Array of buffer pools parameters; The number of valid entries
-+ * must match 'num_dpbp' value
++ * struct dpkg_extract - A structure for defining a single extraction
++ * @type: Determines how the union below is interpreted:
++ * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr';
++ * DPKG_EXTRACT_FROM_DATA: selects 'from_data';
++ * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse'
++ * @extract: Selects extraction method
++ * @num_of_byte_masks: Defines the number of valid entries in the array below;
++ * This is also the number of bytes to be used as masks
++ * @masks: Masks parameters
+ */
-+struct dpni_pools_cfg {
-+ u8 num_dpbp;
++struct dpkg_extract {
++ enum dpkg_extract_type type;
+ /**
-+ * struct pools - Buffer pools parameters
-+ * @dpbp_id: DPBP object ID
-+ * @priority_mask: priorities served by DPBP
-+ * @buffer_size: Buffer size
-+ * @backup_pool: Backup pool
++ * union extract - Selects extraction method
++ * @from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
++ * @from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA'
++ * @from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE'
+ */
-+ struct {
-+ u16 dpbp_id;
-+ u8 priority_mask;
-+ u16 buffer_size;
-+ u8 backup_pool;
-+ } pools[DPNI_MAX_DPBP];
-+};
-+
-+int dpni_set_pools(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpni_pools_cfg *cfg);
-+
-+int dpni_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
++ union {
++ /**
++ * struct from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
++ * @prot: Any of the supported headers
++ * @type: Defines the type of header extraction:
++ * DPKG_FROM_HDR: use size & offset below;
++ * DPKG_FROM_FIELD: use field, size and offset below;
++ * DPKG_FULL_FIELD: use field below
++ * @field: One of the supported fields (NH_FLD_)
++ *
++ * @size: Size in bytes
++ * @offset: Byte offset
++ * @hdr_index: Clear for cases not listed below;
++ * Used for protocols that may have more than a single
++ * header, 0 indicates an outer header;
++ * Supported protocols (possible values):
++ * NET_PROT_VLAN (0, HDR_INDEX_LAST);
++ * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST);
++ * NET_PROT_IP(0, HDR_INDEX_LAST);
++ * NET_PROT_IPv4(0, HDR_INDEX_LAST);
++ * NET_PROT_IPv6(0, HDR_INDEX_LAST);
++ */
+
-+int dpni_disable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
++ struct {
++ enum net_prot prot;
++ enum dpkg_extract_from_hdr_type type;
++ u32 field;
++ u8 size;
++ u8 offset;
++ u8 hdr_index;
++ } from_hdr;
++ /**
++ * struct from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA'
++ * @size: Size in bytes
++ * @offset: Byte offset
++ */
++ struct {
++ u8 size;
++ u8 offset;
++ } from_data;
+
-+int dpni_is_enabled(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int *en);
++ /**
++ * struct from_parse - Used when
++ * 'type = DPKG_EXTRACT_FROM_PARSE'
++ * @size: Size in bytes
++ * @offset: Byte offset
++ */
++ struct {
++ u8 size;
++ u8 offset;
++ } from_parse;
++ } extract;
+
-+int dpni_reset(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
++ u8 num_of_byte_masks;
++ struct dpkg_mask masks[DPKG_NUM_OF_MASKS];
++};
+
+/**
-+ * DPNI IRQ Index and Events
++ * struct dpkg_profile_cfg - A structure for defining a full Key Generation
++ * profile (rule)
++ * @num_extracts: Defines the number of valid entries in the array below
++ * @extracts: Array of required extractions
+ */
++struct dpkg_profile_cfg {
++ u8 num_extracts;
++ struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
++};
+
-+/**
-+ * IRQ index
-+ */
-+#define DPNI_IRQ_INDEX 0
-+/**
-+ * IRQ event - indicates a change in link state
++#endif /* __FSL_DPKG_H_ */
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
+@@ -0,0 +1,719 @@
++/* Copyright 2013-2016 Freescale Semiconductor Inc.
++ * Copyright 2016 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
+ */
-+#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001
++#ifndef _FSL_DPNI_CMD_H
++#define _FSL_DPNI_CMD_H
+
-+int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u8 en);
++#include "dpni.h"
+
-+int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u8 *en);
++/* DPNI Version */
++#define DPNI_VER_MAJOR 7
++#define DPNI_VER_MINOR 0
++#define DPNI_CMD_BASE_VERSION 1
++#define DPNI_CMD_2ND_VERSION 2
++#define DPNI_CMD_ID_OFFSET 4
+
-+int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 mask);
++#define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION)
++#define DPNI_CMD_V2(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_2ND_VERSION)
+
-+int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 *mask);
++#define DPNI_CMDID_OPEN DPNI_CMD(0x801)
++#define DPNI_CMDID_CLOSE DPNI_CMD(0x800)
++#define DPNI_CMDID_CREATE DPNI_CMD(0x901)
++#define DPNI_CMDID_DESTROY DPNI_CMD(0x900)
++#define DPNI_CMDID_GET_API_VERSION DPNI_CMD(0xa01)
+
-+int dpni_get_irq_status(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 *status);
++#define DPNI_CMDID_ENABLE DPNI_CMD(0x002)
++#define DPNI_CMDID_DISABLE DPNI_CMD(0x003)
++#define DPNI_CMDID_GET_ATTR DPNI_CMD(0x004)
++#define DPNI_CMDID_RESET DPNI_CMD(0x005)
++#define DPNI_CMDID_IS_ENABLED DPNI_CMD(0x006)
+
-+int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 status);
++#define DPNI_CMDID_SET_IRQ DPNI_CMD(0x010)
++#define DPNI_CMDID_GET_IRQ DPNI_CMD(0x011)
++#define DPNI_CMDID_SET_IRQ_ENABLE DPNI_CMD(0x012)
++#define DPNI_CMDID_GET_IRQ_ENABLE DPNI_CMD(0x013)
++#define DPNI_CMDID_SET_IRQ_MASK DPNI_CMD(0x014)
++#define DPNI_CMDID_GET_IRQ_MASK DPNI_CMD(0x015)
++#define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016)
++#define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017)
+
-+/**
-+ * struct dpni_attr - Structure representing DPNI attributes
-+ * @options: Any combination of the following options:
-+ * DPNI_OPT_TX_FRM_RELEASE
-+ * DPNI_OPT_NO_MAC_FILTER
-+ * DPNI_OPT_HAS_POLICING
-+ * DPNI_OPT_SHARED_CONGESTION
-+ * DPNI_OPT_HAS_KEY_MASKING
-+ * DPNI_OPT_NO_FS
-+ * @num_queues: Number of Tx and Rx queues used for traffic distribution.
-+ * @num_tcs: Number of traffic classes (TCs), reserved for the DPNI.
-+ * @mac_filter_entries: Number of entries in the MAC address filtering table.
-+ * @vlan_filter_entries: Number of entries in the VLAN address filtering table.
-+ * @qos_entries: Number of entries in the QoS classification table.
-+ * @fs_entries: Number of entries in the flow steering table.
-+ * @qos_key_size: Size, in bytes, of the QoS look-up key. Defining a key larger
-+ * than this when adding QoS entries will result in an error.
-+ * @fs_key_size: Size, in bytes, of the flow steering look-up key. Defining a
-+ * key larger than this when composing the hash + FS key will
-+ * result in an error.
-+ * @wriop_version: Version of WRIOP HW block. The 3 version values are stored
-+ * on 6, 5, 5 bits respectively.
-+ */
-+struct dpni_attr {
-+ u32 options;
-+ u8 num_queues;
-+ u8 num_tcs;
-+ u8 mac_filter_entries;
-+ u8 vlan_filter_entries;
-+ u8 qos_entries;
-+ u16 fs_entries;
-+ u8 qos_key_size;
-+ u8 fs_key_size;
-+ u16 wriop_version;
-+};
++#define DPNI_CMDID_SET_POOLS DPNI_CMD_V2(0x200)
++#define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B)
+
-+int dpni_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpni_attr *attr);
++#define DPNI_CMDID_GET_QDID DPNI_CMD(0x210)
++#define DPNI_CMDID_GET_TX_DATA_OFFSET DPNI_CMD(0x212)
++#define DPNI_CMDID_GET_LINK_STATE DPNI_CMD(0x215)
++#define DPNI_CMDID_SET_MAX_FRAME_LENGTH DPNI_CMD(0x216)
++#define DPNI_CMDID_GET_MAX_FRAME_LENGTH DPNI_CMD(0x217)
++#define DPNI_CMDID_SET_LINK_CFG DPNI_CMD(0x21A)
++#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD_V2(0x21B)
+
-+/**
-+ * DPNI errors
-+ */
++#define DPNI_CMDID_SET_MCAST_PROMISC DPNI_CMD(0x220)
++#define DPNI_CMDID_GET_MCAST_PROMISC DPNI_CMD(0x221)
++#define DPNI_CMDID_SET_UNICAST_PROMISC DPNI_CMD(0x222)
++#define DPNI_CMDID_GET_UNICAST_PROMISC DPNI_CMD(0x223)
++#define DPNI_CMDID_SET_PRIM_MAC DPNI_CMD(0x224)
++#define DPNI_CMDID_GET_PRIM_MAC DPNI_CMD(0x225)
++#define DPNI_CMDID_ADD_MAC_ADDR DPNI_CMD(0x226)
++#define DPNI_CMDID_REMOVE_MAC_ADDR DPNI_CMD(0x227)
++#define DPNI_CMDID_CLR_MAC_FILTERS DPNI_CMD(0x228)
+
-+/**
-+ * Extract out of frame header error
-+ */
-+#define DPNI_ERROR_EOFHE 0x00020000
-+/**
-+ * Frame length error
-+ */
-+#define DPNI_ERROR_FLE 0x00002000
-+/**
-+ * Frame physical error
-+ */
-+#define DPNI_ERROR_FPE 0x00001000
-+/**
-+ * Parsing header error
-+ */
-+#define DPNI_ERROR_PHE 0x00000020
-+/**
-+ * Parser L3 checksum error
-+ */
-+#define DPNI_ERROR_L3CE 0x00000004
-+/**
-+ * Parser L3 checksum error
-+ */
-+#define DPNI_ERROR_L4CE 0x00000001
++#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD(0x235)
+
-+/**
-+ * enum dpni_error_action - Defines DPNI behavior for errors
-+ * @DPNI_ERROR_ACTION_DISCARD: Discard the frame
-+ * @DPNI_ERROR_ACTION_CONTINUE: Continue with the normal flow
-+ * @DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE: Send the frame to the error queue
-+ */
-+enum dpni_error_action {
-+ DPNI_ERROR_ACTION_DISCARD = 0,
-+ DPNI_ERROR_ACTION_CONTINUE = 1,
-+ DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE = 2
-+};
++#define DPNI_CMDID_SET_QOS_TBL DPNI_CMD(0x240)
++#define DPNI_CMDID_ADD_QOS_ENT DPNI_CMD(0x241)
++#define DPNI_CMDID_REMOVE_QOS_ENT DPNI_CMD(0x242)
++#define DPNI_CMDID_ADD_FS_ENT DPNI_CMD(0x244)
++#define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245)
++#define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246)
+
-+/**
-+ * struct dpni_error_cfg - Structure representing DPNI errors treatment
-+ * @errors: Errors mask; use 'DPNI_ERROR__<X>
-+ * @error_action: The desired action for the errors mask
-+ * @set_frame_annotation: Set to '1' to mark the errors in frame annotation
-+ * status (FAS); relevant only for the non-discard action
-+ */
-+struct dpni_error_cfg {
-+ u32 errors;
-+ enum dpni_error_action error_action;
-+ int set_frame_annotation;
++#define DPNI_CMDID_SET_TX_PRIORITIES DPNI_CMD_V2(0x250)
++#define DPNI_CMDID_GET_STATISTICS DPNI_CMD_V2(0x25D)
++#define DPNI_CMDID_RESET_STATISTICS DPNI_CMD(0x25E)
++#define DPNI_CMDID_GET_QUEUE DPNI_CMD(0x25F)
++#define DPNI_CMDID_SET_QUEUE DPNI_CMD(0x260)
++#define DPNI_CMDID_GET_TAILDROP DPNI_CMD(0x261)
++#define DPNI_CMDID_SET_TAILDROP DPNI_CMD(0x262)
++
++#define DPNI_CMDID_GET_PORT_MAC_ADDR DPNI_CMD(0x263)
++
++#define DPNI_CMDID_GET_BUFFER_LAYOUT DPNI_CMD(0x264)
++#define DPNI_CMDID_SET_BUFFER_LAYOUT DPNI_CMD(0x265)
++
++#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE DPNI_CMD(0x266)
++#define DPNI_CMDID_SET_CONGESTION_NOTIFICATION DPNI_CMD(0x267)
++#define DPNI_CMDID_GET_CONGESTION_NOTIFICATION DPNI_CMD(0x268)
++#define DPNI_CMDID_SET_EARLY_DROP DPNI_CMD(0x269)
++#define DPNI_CMDID_GET_EARLY_DROP DPNI_CMD(0x26A)
++#define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B)
++#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C)
++
++#define DPNI_CMDID_SET_RX_FS_DIST DPNI_CMD(0x273)
++#define DPNI_CMDID_SET_RX_HASH_DIST DPNI_CMD(0x274)
++
++/* Macros for accessing command fields smaller than 1byte */
++#define DPNI_MASK(field) \
++ GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \
++ DPNI_##field##_SHIFT)
++
++#define dpni_set_field(var, field, val) \
++ ((var) |= (((val) << DPNI_##field##_SHIFT) & DPNI_MASK(field)))
++#define dpni_get_field(var, field) \
++ (((var) & DPNI_MASK(field)) >> DPNI_##field##_SHIFT)
++
++struct dpni_cmd_open {
++ __le32 dpni_id;
+};
+
-+int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpni_error_cfg *cfg);
++#define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order))
++struct dpni_cmd_set_pools {
++ u8 num_dpbp;
++ u8 backup_pool_mask;
++ __le16 pad;
++ struct {
++ __le16 dpbp_id;
++ u8 priority_mask;
++ u8 pad;
++ } pool[DPNI_MAX_DPBP];
++ __le16 buffer_size[DPNI_MAX_DPBP];
++};
+
-+/**
-+ * DPNI buffer layout modification options
-+ */
++/* The enable indication is always the least significant bit */
++#define DPNI_ENABLE_SHIFT 0
++#define DPNI_ENABLE_SIZE 1
+
-+/**
-+ * Select to modify the time-stamp setting
-+ */
-+#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001
-+/**
-+ * Select to modify the parser-result setting; not applicable for Tx
-+ */
-+#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002
-+/**
-+ * Select to modify the frame-status setting
-+ */
-+#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004
-+/**
-+ * Select to modify the private-data-size setting
-+ */
-+#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008
-+/**
-+ * Select to modify the data-alignment setting
-+ */
-+#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010
-+/**
-+ * Select to modify the data-head-room setting
-+ */
-+#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020
-+/**
-+ * Select to modify the data-tail-room setting
-+ */
-+#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040
++struct dpni_rsp_is_enabled {
++ u8 enabled;
++};
+
-+/**
-+ * struct dpni_buffer_layout - Structure representing DPNI buffer layout
-+ * @options: Flags representing the suggested modifications to the buffer
-+ * layout; Use any combination of 'DPNI_BUF_LAYOUT_OPT_<X>' flags
-+ * @pass_timestamp: Pass timestamp value
-+ * @pass_parser_result: Pass parser results
-+ * @pass_frame_status: Pass frame status
-+ * @private_data_size: Size kept for private data (in bytes)
-+ * @data_align: Data alignment
-+ * @data_head_room: Data head room
-+ * @data_tail_room: Data tail room
-+ */
-+struct dpni_buffer_layout {
-+ u32 options;
-+ int pass_timestamp;
-+ int pass_parser_result;
-+ int pass_frame_status;
-+ u16 private_data_size;
-+ u16 data_align;
-+ u16 data_head_room;
-+ u16 data_tail_room;
++struct dpni_rsp_get_irq {
++ /* response word 0 */
++ __le32 irq_val;
++ __le32 pad;
++ /* response word 1 */
++ __le64 irq_addr;
++ /* response word 2 */
++ __le32 irq_num;
++ __le32 type;
+};
+
-+/**
-+ * enum dpni_queue_type - Identifies a type of queue targeted by the command
-+ * @DPNI_QUEUE_RX: Rx queue
-+ * @DPNI_QUEUE_TX: Tx queue
-+ * @DPNI_QUEUE_TX_CONFIRM: Tx confirmation queue
-+ * @DPNI_QUEUE_RX_ERR: Rx error queue
-+ */enum dpni_queue_type {
-+ DPNI_QUEUE_RX,
-+ DPNI_QUEUE_TX,
-+ DPNI_QUEUE_TX_CONFIRM,
-+ DPNI_QUEUE_RX_ERR,
++struct dpni_cmd_set_irq_enable {
++ u8 enable;
++ u8 pad[3];
++ u8 irq_index;
+};
+
-+int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_queue_type qtype,
-+ struct dpni_buffer_layout *layout);
++struct dpni_cmd_get_irq_enable {
++ __le32 pad;
++ u8 irq_index;
++};
+
-+int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_queue_type qtype,
-+ const struct dpni_buffer_layout *layout);
++struct dpni_rsp_get_irq_enable {
++ u8 enabled;
++};
+
-+/**
-+ * enum dpni_offload - Identifies a type of offload targeted by the command
-+ * @DPNI_OFF_RX_L3_CSUM: Rx L3 checksum validation
-+ * @DPNI_OFF_RX_L4_CSUM: Rx L4 checksum validation
-+ * @DPNI_OFF_TX_L3_CSUM: Tx L3 checksum generation
-+ * @DPNI_OFF_TX_L4_CSUM: Tx L4 checksum generation
-+ */
-+enum dpni_offload {
-+ DPNI_OFF_RX_L3_CSUM,
-+ DPNI_OFF_RX_L4_CSUM,
-+ DPNI_OFF_TX_L3_CSUM,
-+ DPNI_OFF_TX_L4_CSUM,
++struct dpni_cmd_set_irq_mask {
++ __le32 mask;
++ u8 irq_index;
+};
+
-+int dpni_set_offload(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_offload type,
-+ u32 config);
++struct dpni_cmd_get_irq_mask {
++ __le32 pad;
++ u8 irq_index;
++};
+
-+int dpni_get_offload(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_offload type,
-+ u32 *config);
++struct dpni_rsp_get_irq_mask {
++ __le32 mask;
++};
+
-+int dpni_get_qdid(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_queue_type qtype,
-+ u16 *qdid);
++struct dpni_cmd_get_irq_status {
++ __le32 status;
++ u8 irq_index;
++};
+
-+int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 *data_offset);
++struct dpni_rsp_get_irq_status {
++ __le32 status;
++};
+
-+#define DPNI_STATISTICS_CNT 7
++struct dpni_cmd_clear_irq_status {
++ __le32 status;
++ u8 irq_index;
++};
+
-+union dpni_statistics {
-+ /**
-+ * struct page_0 - Page_0 statistics structure
-+ * @ingress_all_frames: Ingress frame count
-+ * @ingress_all_bytes: Ingress byte count
-+ * @ingress_multicast_frames: Ingress multicast frame count
-+ * @ingress_multicast_bytes: Ingress multicast byte count
-+ * @ingress_broadcast_frames: Ingress broadcast frame count
-+ * @ingress_broadcast_bytes: Ingress broadcast byte count
-+ */
-+ struct {
-+ u64 ingress_all_frames;
-+ u64 ingress_all_bytes;
-+ u64 ingress_multicast_frames;
-+ u64 ingress_multicast_bytes;
-+ u64 ingress_broadcast_frames;
-+ u64 ingress_broadcast_bytes;
-+ } page_0;
-+ /**
-+ * struct page_1 - Page_1 statistics structure
-+ * @egress_all_frames: Egress frame count
-+ * @egress_all_bytes: Egress byte count
-+ * @egress_multicast_frames: Egress multicast frame count
-+ * @egress_multicast_bytes: Egress multicast byte count
-+ * @egress_broadcast_frames: Egress broadcast frame count
-+ * @egress_broadcast_bytes: Egress broadcast byte count
-+ */
-+ struct {
-+ u64 egress_all_frames;
-+ u64 egress_all_bytes;
-+ u64 egress_multicast_frames;
-+ u64 egress_multicast_bytes;
-+ u64 egress_broadcast_frames;
-+ u64 egress_broadcast_bytes;
-+ } page_1;
-+ /**
-+ * struct page_2 - Page_2 statistics structure
-+ * @ingress_filtered_frames: Ingress filtered frame count
-+ * @ingress_discarded_frames: Ingress discarded frame count
-+ * @ingress_nobuffer_discards: Ingress discarded frame count
-+ * due to lack of buffers
-+ * @egress_discarded_frames: Egress discarded frame count
-+ * @egress_confirmed_frames: Egress confirmed frame count
-+ */
-+ struct {
-+ u64 ingress_filtered_frames;
-+ u64 ingress_discarded_frames;
-+ u64 ingress_nobuffer_discards;
-+ u64 egress_discarded_frames;
-+ u64 egress_confirmed_frames;
-+ } page_2;
-+ /**
-+ * struct raw - raw statistics structure
-+ */
-+ struct {
-+ u64 counter[DPNI_STATISTICS_CNT];
-+ } raw;
++struct dpni_rsp_get_attr {
++ /* response word 0 */
++ __le32 options;
++ u8 num_queues;
++ u8 num_tcs;
++ u8 mac_filter_entries;
++ u8 pad0;
++ /* response word 1 */
++ u8 vlan_filter_entries;
++ u8 pad1;
++ u8 qos_entries;
++ u8 pad2;
++ __le16 fs_entries;
++ __le16 pad3;
++ /* response word 2 */
++ u8 qos_key_size;
++ u8 fs_key_size;
++ __le16 wriop_version;
+};
+
-+int dpni_get_statistics(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 page,
-+ union dpni_statistics *stat);
++#define DPNI_ERROR_ACTION_SHIFT 0
++#define DPNI_ERROR_ACTION_SIZE 4
++#define DPNI_FRAME_ANN_SHIFT 4
++#define DPNI_FRAME_ANN_SIZE 1
+
-+int dpni_reset_statistics(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
++struct dpni_cmd_set_errors_behavior {
++ __le32 errors;
++ /* from least significant bit: error_action:4, set_frame_annotation:1 */
++ u8 flags;
++};
+
-+/**
-+ * Enable auto-negotiation
-+ */
-+#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL
-+/**
-+ * Enable half-duplex mode
-+ */
-+#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
-+/**
-+ * Enable pause frames
-+ */
-+#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL
-+/**
-+ * Enable a-symmetric pause frames
-+ */
-+#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
-+/**
-+ * Enable priority flow control pause frames
++/* There are 3 separate commands for configuring Rx, Tx and Tx confirmation
++ * buffer layouts, but they all share the same parameters.
++ * If one of the functions changes, below structure needs to be split.
+ */
-+#define DPNI_LINK_OPT_PFC_PAUSE 0x0000000000000010ULL
+
-+/**
-+ * struct - Structure representing DPNI link configuration
-+ * @rate: Rate
-+ * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
-+ */
-+struct dpni_link_cfg {
-+ u32 rate;
-+ u64 options;
-+};
++#define DPNI_PASS_TS_SHIFT 0
++#define DPNI_PASS_TS_SIZE 1
++#define DPNI_PASS_PR_SHIFT 1
++#define DPNI_PASS_PR_SIZE 1
++#define DPNI_PASS_FS_SHIFT 2
++#define DPNI_PASS_FS_SIZE 1
+
-+int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpni_link_cfg *cfg);
++struct dpni_cmd_get_buffer_layout {
++ u8 qtype;
++};
+
-+/**
-+ * struct dpni_link_state - Structure representing DPNI link state
-+ * @rate: Rate
-+ * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
-+ * @up: Link state; '0' for down, '1' for up
-+ */
-+struct dpni_link_state {
-+ u32 rate;
-+ u64 options;
-+ int up;
++struct dpni_rsp_get_buffer_layout {
++ /* response word 0 */
++ u8 pad0[6];
++ /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */
++ u8 flags;
++ u8 pad1;
++ /* response word 1 */
++ __le16 private_data_size;
++ __le16 data_align;
++ __le16 head_room;
++ __le16 tail_room;
+};
+
-+int dpni_get_link_state(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpni_link_state *state);
++struct dpni_cmd_set_buffer_layout {
++ /* cmd word 0 */
++ u8 qtype;
++ u8 pad0[3];
++ __le16 options;
++ /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */
++ u8 flags;
++ u8 pad1;
++ /* cmd word 1 */
++ __le16 private_data_size;
++ __le16 data_align;
++ __le16 head_room;
++ __le16 tail_room;
++};
+
-+/**
-+ * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration
-+ * @rate_limit: rate in Mbps
-+ * @max_burst_size: burst size in bytes (up to 64KB)
-+ */
-+struct dpni_tx_shaping_cfg {
-+ u32 rate_limit;
-+ u16 max_burst_size;
++struct dpni_cmd_set_offload {
++ u8 pad[3];
++ u8 dpni_offload;
++ __le32 config;
+};
+
-+int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpni_tx_shaping_cfg *tx_shaper);
++struct dpni_cmd_get_offload {
++ u8 pad[3];
++ u8 dpni_offload;
++};
+
-+int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 max_frame_length);
++struct dpni_rsp_get_offload {
++ __le32 pad;
++ __le32 config;
++};
+
-+int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 *max_frame_length);
++struct dpni_cmd_get_qdid {
++ u8 qtype;
++};
+
-+int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int en);
++struct dpni_rsp_get_qdid {
++ __le16 qdid;
++};
+
-+int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int *en);
++struct dpni_rsp_get_tx_data_offset {
++ __le16 data_offset;
++};
+
-+int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int en);
++struct dpni_cmd_get_statistics {
++ u8 page_number;
++ u8 param;
++};
+
-+int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int *en);
++struct dpni_rsp_get_statistics {
++ __le64 counter[DPNI_STATISTICS_CNT];
++};
+
-+int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const u8 mac_addr[6]);
++struct dpni_cmd_set_link_cfg {
++ /* cmd word 0 */
++ __le64 pad0;
++ /* cmd word 1 */
++ __le32 rate;
++ __le32 pad1;
++ /* cmd word 2 */
++ __le64 options;
++};
+
-+int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 mac_addr[6]);
++#define DPNI_LINK_STATE_SHIFT 0
++#define DPNI_LINK_STATE_SIZE 1
+
-+int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
-+ u32 cm_flags,
-+ u16 token,
-+ u8 mac_addr[6]);
++struct dpni_rsp_get_link_state {
++ /* response word 0 */
++ __le32 pad0;
++ /* from LSB: up:1 */
++ u8 flags;
++ u8 pad1[3];
++ /* response word 1 */
++ __le32 rate;
++ __le32 pad2;
++ /* response word 2 */
++ __le64 options;
++};
+
-+int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const u8 mac_addr[6]);
++#define DPNI_COUPLED_SHIFT 0
++#define DPNI_COUPLED_SIZE 1
+
-+int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const u8 mac_addr[6]);
++struct dpni_cmd_set_tx_shaping {
++ /* cmd word 0 */
++ __le16 tx_cr_max_burst_size;
++ __le16 tx_er_max_burst_size;
++ __le32 pad;
++ /* cmd word 1 */
++ __le32 tx_cr_rate_limit;
++ __le32 tx_er_rate_limit;
++ /* cmd word 2 */
++ /* from LSB: coupled:1 */
++ u8 coupled;
++};
+
-+int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int unicast,
-+ int multicast);
++struct dpni_cmd_set_max_frame_length {
++ __le16 max_frame_length;
++};
+
-+/**
-+ * enum dpni_dist_mode - DPNI distribution mode
-+ * @DPNI_DIST_MODE_NONE: No distribution
-+ * @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if
-+ * the 'DPNI_OPT_DIST_HASH' option was set at DPNI creation
-+ * @DPNI_DIST_MODE_FS: Use explicit flow steering; only relevant if
-+ * the 'DPNI_OPT_DIST_FS' option was set at DPNI creation
-+ */
-+enum dpni_dist_mode {
-+ DPNI_DIST_MODE_NONE = 0,
-+ DPNI_DIST_MODE_HASH = 1,
-+ DPNI_DIST_MODE_FS = 2
++struct dpni_rsp_get_max_frame_length {
++ __le16 max_frame_length;
+};
+
-+/**
-+ * enum dpni_fs_miss_action - DPNI Flow Steering miss action
-+ * @DPNI_FS_MISS_DROP: In case of no-match, drop the frame
-+ * @DPNI_FS_MISS_EXPLICIT_FLOWID: In case of no-match, use explicit flow-id
-+ * @DPNI_FS_MISS_HASH: In case of no-match, distribute using hash
-+ */
-+enum dpni_fs_miss_action {
-+ DPNI_FS_MISS_DROP = 0,
-+ DPNI_FS_MISS_EXPLICIT_FLOWID = 1,
-+ DPNI_FS_MISS_HASH = 2
++struct dpni_cmd_set_multicast_promisc {
++ u8 enable;
+};
+
-+/**
-+ * struct dpni_fs_tbl_cfg - Flow Steering table configuration
-+ * @miss_action: Miss action selection
-+ * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID'
-+ */
-+struct dpni_fs_tbl_cfg {
-+ enum dpni_fs_miss_action miss_action;
-+ u16 default_flow_id;
++struct dpni_rsp_get_multicast_promisc {
++ u8 enabled;
+};
+
-+int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg,
-+ u8 *key_cfg_buf);
++struct dpni_cmd_set_unicast_promisc {
++ u8 enable;
++};
+
-+/**
-+ * struct dpni_qos_tbl_cfg - Structure representing QOS table configuration
-+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
-+ * key extractions to be used as the QoS criteria by calling
-+ * dpkg_prepare_key_cfg()
-+ * @discard_on_miss: Set to '1' to discard frames in case of no match (miss);
-+ * '0' to use the 'default_tc' in such cases
-+ * @default_tc: Used in case of no-match and 'discard_on_miss'= 0
-+ */
-+struct dpni_qos_tbl_cfg {
-+ u64 key_cfg_iova;
-+ int discard_on_miss;
-+ u8 default_tc;
++struct dpni_rsp_get_unicast_promisc {
++ u8 enabled;
+};
+
-+int dpni_set_qos_table(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpni_qos_tbl_cfg *cfg);
++struct dpni_cmd_set_primary_mac_addr {
++ __le16 pad;
++ u8 mac_addr[6];
++};
+
-+/**
-+ * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration
-+ * @dist_size: Set the distribution size;
-+ * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96,
-+ * 112,128,192,224,256,384,448,512,768,896,1024
-+ * @dist_mode: Distribution mode
-+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
-+ * the extractions to be used for the distribution key by calling
-+ * dpni_prepare_key_cfg() relevant only when
-+ * 'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0'
-+ * @fs_cfg: Flow Steering table configuration; only relevant if
-+ * 'dist_mode = DPNI_DIST_MODE_FS'
-+ */
-+struct dpni_rx_tc_dist_cfg {
-+ u16 dist_size;
-+ enum dpni_dist_mode dist_mode;
-+ u64 key_cfg_iova;
-+ struct dpni_fs_tbl_cfg fs_cfg;
++struct dpni_rsp_get_primary_mac_addr {
++ __le16 pad;
++ u8 mac_addr[6];
+};
+
-+int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 tc_id,
-+ const struct dpni_rx_tc_dist_cfg *cfg);
++struct dpni_rsp_get_port_mac_addr {
++ __le16 pad;
++ u8 mac_addr[6];
++};
+
-+/**
-+ * enum dpni_dest - DPNI destination types
-+ * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and
-+ * does not generate FQDAN notifications; user is expected to
-+ * dequeue from the queue based on polling or other user-defined
-+ * method
-+ * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
-+ * notifications to the specified DPIO; user is expected to dequeue
-+ * from the queue only after notification is received
-+ * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate
-+ * FQDAN notifications, but is connected to the specified DPCON
-+ * object; user is expected to dequeue from the DPCON channel
-+ */
-+enum dpni_dest {
-+ DPNI_DEST_NONE = 0,
-+ DPNI_DEST_DPIO = 1,
-+ DPNI_DEST_DPCON = 2
++struct dpni_cmd_add_mac_addr {
++ __le16 pad;
++ u8 mac_addr[6];
+};
+
-+/**
-+ * struct dpni_queue - Queue structure
-+ * @user_context: User data, presented to the user along with any frames from
-+ * this queue. Not relevant for Tx queues.
-+ */
-+struct dpni_queue {
-+/**
-+ * struct destination - Destination structure
-+ * @id: ID of the destination, only relevant if DEST_TYPE is > 0.
-+ * Identifies either a DPIO or a DPCON object. Not relevant for
-+ * Tx queues.
-+ * @type: May be one of the following:
-+ * 0 - No destination, queue can be manually queried, but will not
-+ * push traffic or notifications to a DPIO;
-+ * 1 - The destination is a DPIO. When traffic becomes available in
-+ * the queue a FQDAN (FQ data available notification) will be
-+ * generated to selected DPIO;
-+ * 2 - The destination is a DPCON. The queue is associated with a
-+ * DPCON object for the purpose of scheduling between multiple
-+ * queues. The DPCON may be independently configured to
-+ * generate notifications. Not relevant for Tx queues.
-+ * @hold_active: Hold active, maintains a queue scheduled for longer
-+ * in a DPIO during dequeue to reduce spread of traffic.
-+ * Only relevant if queues are not affined to a single DPIO.
-+ */
-+ struct {
-+ u16 id;
-+ enum dpni_dest type;
-+ char hold_active;
-+ u8 priority;
-+ } destination;
-+ u64 user_context;
-+ struct {
-+ u64 value;
-+ char stash_control;
-+ } flc;
++struct dpni_cmd_remove_mac_addr {
++ __le16 pad;
++ u8 mac_addr[6];
+};
+
-+/**
-+ * struct dpni_queue_id - Queue identification, used for enqueue commands
-+ * or queue control
-+ * @fqid: FQID used for enqueueing to and/or configuration of this specific FQ
-+ * @qdbin: Queueing bin, used to enqueue using QDID, DQBIN, QPRI. Only relevant
-+ * for Tx queues.
-+ */
-+struct dpni_queue_id {
-+ u32 fqid;
-+ u16 qdbin;
++#define DPNI_UNICAST_FILTERS_SHIFT 0
++#define DPNI_UNICAST_FILTERS_SIZE 1
++#define DPNI_MULTICAST_FILTERS_SHIFT 1
++#define DPNI_MULTICAST_FILTERS_SIZE 1
++
++struct dpni_cmd_clear_mac_filters {
++ /* from LSB: unicast:1, multicast:1 */
++ u8 flags;
+};
+
-+/**
-+ * Set User Context
-+ */
-+#define DPNI_QUEUE_OPT_USER_CTX 0x00000001
-+#define DPNI_QUEUE_OPT_DEST 0x00000002
-+#define DPNI_QUEUE_OPT_FLC 0x00000004
-+#define DPNI_QUEUE_OPT_HOLD_ACTIVE 0x00000008
++#define DPNI_SEPARATE_GRP_SHIFT 0
++#define DPNI_SEPARATE_GRP_SIZE 1
++#define DPNI_MODE_1_SHIFT 0
++#define DPNI_MODE_1_SIZE 4
++#define DPNI_MODE_2_SHIFT 4
++#define DPNI_MODE_2_SIZE 4
+
-+int dpni_set_queue(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_queue_type qtype,
-+ u8 tc,
-+ u8 index,
-+ u8 options,
-+ const struct dpni_queue *queue);
++struct dpni_cmd_set_tx_priorities {
++ __le16 flags;
++ u8 prio_group_A;
++ u8 prio_group_B;
++ __le32 pad0;
++ u8 modes[4];
++ __le32 pad1;
++ __le64 pad2;
++ __le16 delta_bandwidth[8];
++};
+
-+int dpni_get_queue(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_queue_type qtype,
-+ u8 tc,
-+ u8 index,
-+ struct dpni_queue *queue,
-+ struct dpni_queue_id *qid);
++#define DPNI_DIST_MODE_SHIFT 0
++#define DPNI_DIST_MODE_SIZE 4
++#define DPNI_MISS_ACTION_SHIFT 4
++#define DPNI_MISS_ACTION_SIZE 4
+
-+/**
-+ * enum dpni_congestion_unit - DPNI congestion units
-+ * @DPNI_CONGESTION_UNIT_BYTES: bytes units
-+ * @DPNI_CONGESTION_UNIT_FRAMES: frames units
-+ */
-+enum dpni_congestion_unit {
-+ DPNI_CONGESTION_UNIT_BYTES = 0,
-+ DPNI_CONGESTION_UNIT_FRAMES
++struct dpni_cmd_set_rx_tc_dist {
++ /* cmd word 0 */
++ __le16 dist_size;
++ u8 tc_id;
++ /* from LSB: dist_mode:4, miss_action:4 */
++ u8 flags;
++ __le16 pad0;
++ __le16 default_flow_id;
++ /* cmd word 1..5 */
++ __le64 pad1[5];
++ /* cmd word 6 */
++ __le64 key_cfg_iova;
+};
+
-+/**
-+ * enum dpni_congestion_point - Structure representing congestion point
-+ * @DPNI_CP_QUEUE: Set taildrop per queue, identified by QUEUE_TYPE, TC and
-+ * QUEUE_INDEX
-+ * @DPNI_CP_GROUP: Set taildrop per queue group. Depending on options used to
-+ * define the DPNI this can be either per TC (default) or per
-+ * interface (DPNI_OPT_SHARED_CONGESTION set at DPNI create).
-+ * QUEUE_INDEX is ignored if this type is used.
++/* dpni_set_rx_tc_dist extension (structure of the DMA-able memory at
++ * key_cfg_iova)
+ */
-+enum dpni_congestion_point {
-+ DPNI_CP_QUEUE,
-+ DPNI_CP_GROUP,
++struct dpni_mask_cfg {
++ u8 mask;
++ u8 offset;
+};
+
-+/**
-+ * struct dpni_dest_cfg - Structure representing DPNI destination parameters
-+ * @dest_type: Destination type
-+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
-+ * @priority: Priority selection within the DPIO or DPCON channel; valid values
-+ * are 0-1 or 0-7, depending on the number of priorities in that
-+ * channel; not relevant for 'DPNI_DEST_NONE' option
-+ */
-+struct dpni_dest_cfg {
-+ enum dpni_dest dest_type;
-+ int dest_id;
-+ u8 priority;
++#define DPNI_EFH_TYPE_SHIFT 0
++#define DPNI_EFH_TYPE_SIZE 4
++#define DPNI_EXTRACT_TYPE_SHIFT 0
++#define DPNI_EXTRACT_TYPE_SIZE 4
++
++struct dpni_dist_extract {
++ /* word 0 */
++ u8 prot;
++ /* EFH type stored in the 4 least significant bits */
++ u8 efh_type;
++ u8 size;
++ u8 offset;
++ __le32 field;
++ /* word 1 */
++ u8 hdr_index;
++ u8 constant;
++ u8 num_of_repeats;
++ u8 num_of_byte_masks;
++ /* Extraction type is stored in the 4 LSBs */
++ u8 extract_type;
++ u8 pad[3];
++ /* word 2 */
++ struct dpni_mask_cfg masks[4];
+};
+
-+/* DPNI congestion options */
++struct dpni_ext_set_rx_tc_dist {
++ /* extension word 0 */
++ u8 num_extracts;
++ u8 pad[7];
++ /* words 1..25 */
++ struct dpni_dist_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
++};
+
-+/**
-+ * CSCN message is written to message_iova once entering a
-+ * congestion state (see 'threshold_entry')
-+ */
-+#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER 0x00000001
-+/**
-+ * CSCN message is written to message_iova once exiting a
-+ * congestion state (see 'threshold_exit')
-+ */
-+#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT 0x00000002
-+/**
-+ * CSCN write will attempt to allocate into a cache (coherent write);
-+ * valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is selected
-+ */
-+#define DPNI_CONG_OPT_COHERENT_WRITE 0x00000004
-+/**
-+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
-+ * DPIO/DPCON's WQ channel once entering a congestion state
-+ * (see 'threshold_entry')
-+ */
-+#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER 0x00000008
-+/**
-+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
-+ * DPIO/DPCON's WQ channel once exiting a congestion state
-+ * (see 'threshold_exit')
-+ */
-+#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT 0x00000010
-+/**
-+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the
-+ * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled)
-+ */
-+#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020
-+/**
-+ * This congestion will trigger flow control or priority flow control.
-+ * This will have effect only if flow control is enabled with
-+ * dpni_set_link_cfg().
-+ */
-+#define DPNI_CONG_OPT_FLOW_CONTROL 0x00000040
++struct dpni_cmd_get_queue {
++ u8 qtype;
++ u8 tc;
++ u8 index;
++};
+
-+/**
-+ * struct dpni_congestion_notification_cfg - congestion notification
-+ * configuration
-+ * @units: units type
-+ * @threshold_entry: above this threshold we enter a congestion state.
-+ * set it to '0' to disable it
-+ * @threshold_exit: below this threshold we exit the congestion state.
-+ * @message_ctx: The context that will be part of the CSCN message
-+ * @message_iova: I/O virtual address (must be in DMA-able memory),
-+ * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is
-+ * contained in 'options'
-+ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
-+ * @notification_mode: Mask of available options; use 'DPNI_CONG_OPT_<X>' values
-+ */
++#define DPNI_DEST_TYPE_SHIFT 0
++#define DPNI_DEST_TYPE_SIZE 4
++#define DPNI_STASH_CTRL_SHIFT 6
++#define DPNI_STASH_CTRL_SIZE 1
++#define DPNI_HOLD_ACTIVE_SHIFT 7
++#define DPNI_HOLD_ACTIVE_SIZE 1
+
-+struct dpni_congestion_notification_cfg {
-+ enum dpni_congestion_unit units;
-+ u32 threshold_entry;
-+ u32 threshold_exit;
-+ u64 message_ctx;
-+ u64 message_iova;
-+ struct dpni_dest_cfg dest_cfg;
-+ u16 notification_mode;
++struct dpni_rsp_get_queue {
++ /* response word 0 */
++ __le64 pad0;
++ /* response word 1 */
++ __le32 dest_id;
++ __le16 pad1;
++ u8 dest_prio;
++ /* From LSB: dest_type:4, pad:2, flc_stash_ctrl:1, hold_active:1 */
++ u8 flags;
++ /* response word 2 */
++ __le64 flc;
++ /* response word 3 */
++ __le64 user_context;
++ /* response word 4 */
++ __le32 fqid;
++ __le16 qdbin;
+};
+
-+int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_queue_type qtype,
-+ u8 tc_id,
-+ const struct dpni_congestion_notification_cfg *cfg);
++struct dpni_cmd_set_queue {
++ /* cmd word 0 */
++ u8 qtype;
++ u8 tc;
++ u8 index;
++ u8 options;
++ __le32 pad0;
++ /* cmd word 1 */
++ __le32 dest_id;
++ __le16 pad1;
++ u8 dest_prio;
++ u8 flags;
++ /* cmd word 2 */
++ __le64 flc;
++ /* cmd word 3 */
++ __le64 user_context;
++};
+
-+int dpni_get_congestion_notification(
-+ struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_queue_type qtype,
-+ u8 tc_id,
-+ struct dpni_congestion_notification_cfg *cfg);
++#define DPNI_DISCARD_ON_MISS_SHIFT 0
++#define DPNI_DISCARD_ON_MISS_SIZE 1
+
-+/**
-+ * struct dpni_taildrop - Structure representing the taildrop
-+ * @enable: Indicates whether the taildrop is active or not.
-+ * @units: Indicates the unit of THRESHOLD. Queue taildrop only supports
-+ * byte units, this field is ignored and assumed = 0 if
-+ * CONGESTION_POINT is 0.
-+ * @threshold: Threshold value, in units identified by UNITS field. Value 0
-+ * cannot be used as a valid taildrop threshold, THRESHOLD must
-+ * be > 0 if the taildrop is enabled.
-+ */
-+struct dpni_taildrop {
-+ char enable;
-+ enum dpni_congestion_unit units;
-+ u32 threshold;
++struct dpni_cmd_set_qos_table {
++ __le32 pad;
++ u8 default_tc;
++ /* only the LSB */
++ u8 discard_on_miss;
++ __le16 pad1[21];
++ __le64 key_cfg_iova;
+};
+
-+int dpni_set_taildrop(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_congestion_point cg_point,
-+ enum dpni_queue_type q_type,
-+ u8 tc,
-+ u8 q_index,
-+ struct dpni_taildrop *taildrop);
++struct dpni_cmd_add_qos_entry {
++ __le16 pad;
++ u8 tc_id;
++ u8 key_size;
++ __le16 index;
++ __le16 pad2;
++ __le64 key_iova;
++ __le64 mask_iova;
++};
+
-+int dpni_get_taildrop(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ enum dpni_congestion_point cg_point,
-+ enum dpni_queue_type q_type,
-+ u8 tc,
-+ u8 q_index,
-+ struct dpni_taildrop *taildrop);
++struct dpni_cmd_remove_qos_entry {
++ u8 pad1[3];
++ u8 key_size;
++ __le32 pad2;
++ __le64 key_iova;
++ __le64 mask_iova;
++};
+
-+/**
-+ * struct dpni_rule_cfg - Rule configuration for table lookup
-+ * @key_iova: I/O virtual address of the key (must be in DMA-able memory)
-+ * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory)
-+ * @key_size: key and mask size (in bytes)
-+ */
-+struct dpni_rule_cfg {
-+ u64 key_iova;
-+ u64 mask_iova;
-+ u8 key_size;
++struct dpni_cmd_add_fs_entry {
++ /* cmd word 0 */
++ __le16 options;
++ u8 tc_id;
++ u8 key_size;
++ __le16 index;
++ __le16 flow_id;
++ /* cmd word 1 */
++ __le64 key_iova;
++ /* cmd word 2 */
++ __le64 mask_iova;
++ /* cmd word 3 */
++ __le64 flc;
+};
+
-+int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpni_rule_cfg *cfg,
-+ u8 tc_id,
-+ u16 index);
++struct dpni_cmd_remove_fs_entry {
++ /* cmd word 0 */
++ __le16 pad0;
++ u8 tc_id;
++ u8 key_size;
++ __le32 pad1;
++ /* cmd word 1 */
++ __le64 key_iova;
++ /* cmd word 2 */
++ __le64 mask_iova;
++};
+
-+int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpni_rule_cfg *cfg);
++struct dpni_cmd_set_taildrop {
++ /* cmd word 0 */
++ u8 congestion_point;
++ u8 qtype;
++ u8 tc;
++ u8 index;
++ __le32 pad0;
++ /* cmd word 1 */
++ /* Only least significant bit is relevant */
++ u8 enable;
++ u8 pad1;
++ u8 units;
++ u8 pad2;
++ __le32 threshold;
++};
+
-+int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
++struct dpni_cmd_get_taildrop {
++ u8 congestion_point;
++ u8 qtype;
++ u8 tc;
++ u8 index;
++};
+
-+/**
-+ * Discard matching traffic. If set, this takes precedence over any other
-+ * configuration and matching traffic is always discarded.
-+ */
-+ #define DPNI_FS_OPT_DISCARD 0x1
++struct dpni_rsp_get_taildrop {
++ /* cmd word 0 */
++ __le64 pad0;
++ /* cmd word 1 */
++ /* only least significant bit is relevant */
++ u8 enable;
++ u8 pad1;
++ u8 units;
++ u8 pad2;
++ __le32 threshold;
++};
+
-+/**
-+ * Set FLC value. If set, flc member of truct dpni_fs_action_cfg is used to
-+ * override the FLC value set per queue.
-+ * For more details check the Frame Descriptor section in the hardware
-+ * documentation.
-+ */
-+#define DPNI_FS_OPT_SET_FLC 0x2
++struct dpni_rsp_get_api_version {
++ u16 major;
++ u16 minor;
++};
+
-+/*
-+ * Indicates whether the 6 lowest significant bits of FLC are used for stash
-+ * control. If set, the 6 least significant bits in value are interpreted as
-+ * follows:
-+ * - bits 0-1: indicates the number of 64 byte units of context that are
-+ * stashed. FLC value is interpreted as a memory address in this case,
-+ * excluding the 6 LS bits.
-+ * - bits 2-3: indicates the number of 64 byte units of frame annotation
-+ * to be stashed. Annotation is placed at FD[ADDR].
-+ * - bits 4-5: indicates the number of 64 byte units of frame data to be
-+ * stashed. Frame data is placed at FD[ADDR] + FD[OFFSET].
-+ * This flag is ignored if DPNI_FS_OPT_SET_FLC is not specified.
-+ */
-+#define DPNI_FS_OPT_SET_STASH_CONTROL 0x4
++#define DPNI_DEST_TYPE_SHIFT 0
++#define DPNI_DEST_TYPE_SIZE 4
++#define DPNI_CONG_UNITS_SHIFT 4
++#define DPNI_CONG_UNITS_SIZE 2
+
-+/**
-+ * struct dpni_fs_action_cfg - Action configuration for table look-up
-+ * @flc: FLC value for traffic matching this rule. Please check the Frame
-+ * Descriptor section in the hardware documentation for more information.
-+ * @flow_id: Identifies the Rx queue used for matching traffic. Supported
-+ * values are in range 0 to num_queue-1.
-+ * @options: Any combination of DPNI_FS_OPT_ values.
-+ */
-+struct dpni_fs_action_cfg {
-+ u64 flc;
-+ u16 flow_id;
-+ u16 options;
++struct dpni_cmd_set_congestion_notification {
++ /* cmd word 0 */
++ u8 qtype;
++ u8 tc;
++ u8 pad[6];
++ /* cmd word 1 */
++ __le32 dest_id;
++ __le16 notification_mode;
++ u8 dest_priority;
++ /* from LSB: dest_type: 4 units:2 */
++ u8 type_units;
++ /* cmd word 2 */
++ __le64 message_iova;
++ /* cmd word 3 */
++ __le64 message_ctx;
++ /* cmd word 4 */
++ __le32 threshold_entry;
++ __le32 threshold_exit;
+};
+
-+int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 tc_id,
-+ u16 index,
-+ const struct dpni_rule_cfg *cfg,
-+ const struct dpni_fs_action_cfg *action);
++struct dpni_cmd_get_congestion_notification {
++ /* cmd word 0 */
++ u8 qtype;
++ u8 tc;
++};
+
-+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 tc_id,
-+ const struct dpni_rule_cfg *cfg);
++struct dpni_rsp_get_congestion_notification {
++ /* cmd word 0 */
++ __le64 pad;
++ /* cmd word 1 */
++ __le32 dest_id;
++ __le16 notification_mode;
++ u8 dest_priority;
++ /* from LSB: dest_type: 4 units:2 */
++ u8 type_units;
++ /* cmd word 2 */
++ __le64 message_iova;
++ /* cmd word 3 */
++ __le64 message_ctx;
++ /* cmd word 4 */
++ __le32 threshold_entry;
++ __le32 threshold_exit;
++};
+
-+#endif /* __FSL_DPNI_H */
++#define DPNI_RX_FS_DIST_ENABLE_SHIFT 0
++#define DPNI_RX_FS_DIST_ENABLE_SIZE 1
++struct dpni_cmd_set_rx_fs_dist {
++ __le16 dist_size;
++ u8 enable;
++ u8 tc;
++ __le16 miss_flow_id;
++ __le16 pad;
++ __le64 key_cfg_iova;
++};
++
++#define DPNI_RX_HASH_DIST_ENABLE_SHIFT 0
++#define DPNI_RX_HASH_DIST_ENABLE_SIZE 1
++struct dpni_cmd_set_rx_hash_dist {
++ __le16 dist_size;
++ u8 enable;
++ u8 tc;
++ __le32 pad;
++ __le64 key_cfg_iova;
++};
++
++#endif /* _FSL_DPNI_CMD_H */
--- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethernet/net.h
-@@ -0,0 +1,480 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
+@@ -0,0 +1,2112 @@
++/* Copyright 2013-2016 Freescale Semiconductor Inc.
++ * Copyright 2016 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
-+#ifndef __FSL_NET_H
-+#define __FSL_NET_H
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/fsl/mc.h>
++#include "dpni.h"
++#include "dpni-cmd.h"
+
-+#define LAST_HDR_INDEX 0xFFFFFFFF
++/**
++ * dpni_prepare_key_cfg() - function prepare extract parameters
++ * @cfg: defining a full Key Generation profile (rule)
++ * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
++ *
++ * This function has to be called before the following functions:
++ * - dpni_set_rx_tc_dist()
++ * - dpni_set_qos_table()
++ */
++int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, u8 *key_cfg_buf)
++{
++ int i, j;
++ struct dpni_ext_set_rx_tc_dist *dpni_ext;
++ struct dpni_dist_extract *extr;
+
-+/*****************************************************************************/
-+/* Protocol fields */
-+/*****************************************************************************/
++ if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS)
++ return -EINVAL;
+
-+/************************* Ethernet fields *********************************/
-+#define NH_FLD_ETH_DA (1)
-+#define NH_FLD_ETH_SA (NH_FLD_ETH_DA << 1)
-+#define NH_FLD_ETH_LENGTH (NH_FLD_ETH_DA << 2)
-+#define NH_FLD_ETH_TYPE (NH_FLD_ETH_DA << 3)
-+#define NH_FLD_ETH_FINAL_CKSUM (NH_FLD_ETH_DA << 4)
-+#define NH_FLD_ETH_PADDING (NH_FLD_ETH_DA << 5)
-+#define NH_FLD_ETH_ALL_FIELDS ((NH_FLD_ETH_DA << 6) - 1)
++ dpni_ext = (struct dpni_ext_set_rx_tc_dist *)key_cfg_buf;
++ dpni_ext->num_extracts = cfg->num_extracts;
+
-+#define NH_FLD_ETH_ADDR_SIZE 6
++ for (i = 0; i < cfg->num_extracts; i++) {
++ extr = &dpni_ext->extracts[i];
+
-+/*************************** VLAN fields ***********************************/
-+#define NH_FLD_VLAN_VPRI (1)
-+#define NH_FLD_VLAN_CFI (NH_FLD_VLAN_VPRI << 1)
-+#define NH_FLD_VLAN_VID (NH_FLD_VLAN_VPRI << 2)
-+#define NH_FLD_VLAN_LENGTH (NH_FLD_VLAN_VPRI << 3)
-+#define NH_FLD_VLAN_TYPE (NH_FLD_VLAN_VPRI << 4)
-+#define NH_FLD_VLAN_ALL_FIELDS ((NH_FLD_VLAN_VPRI << 5) - 1)
++ switch (cfg->extracts[i].type) {
++ case DPKG_EXTRACT_FROM_HDR:
++ extr->prot = cfg->extracts[i].extract.from_hdr.prot;
++ dpni_set_field(extr->efh_type, EFH_TYPE,
++ cfg->extracts[i].extract.from_hdr.type);
++ extr->size = cfg->extracts[i].extract.from_hdr.size;
++ extr->offset = cfg->extracts[i].extract.from_hdr.offset;
++ extr->field = cpu_to_le32(
++ cfg->extracts[i].extract.from_hdr.field);
++ extr->hdr_index =
++ cfg->extracts[i].extract.from_hdr.hdr_index;
++ break;
++ case DPKG_EXTRACT_FROM_DATA:
++ extr->size = cfg->extracts[i].extract.from_data.size;
++ extr->offset =
++ cfg->extracts[i].extract.from_data.offset;
++ break;
++ case DPKG_EXTRACT_FROM_PARSE:
++ extr->size = cfg->extracts[i].extract.from_parse.size;
++ extr->offset =
++ cfg->extracts[i].extract.from_parse.offset;
++ break;
++ default:
++ return -EINVAL;
++ }
+
-+#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \
-+ NH_FLD_VLAN_CFI | \
-+ NH_FLD_VLAN_VID)
++ extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks;
++ dpni_set_field(extr->extract_type, EXTRACT_TYPE,
++ cfg->extracts[i].type);
+
-+/************************ IP (generic) fields ******************************/
-+#define NH_FLD_IP_VER (1)
-+#define NH_FLD_IP_DSCP (NH_FLD_IP_VER << 2)
-+#define NH_FLD_IP_ECN (NH_FLD_IP_VER << 3)
-+#define NH_FLD_IP_PROTO (NH_FLD_IP_VER << 4)
-+#define NH_FLD_IP_SRC (NH_FLD_IP_VER << 5)
-+#define NH_FLD_IP_DST (NH_FLD_IP_VER << 6)
-+#define NH_FLD_IP_TOS_TC (NH_FLD_IP_VER << 7)
-+#define NH_FLD_IP_ID (NH_FLD_IP_VER << 8)
-+#define NH_FLD_IP_ALL_FIELDS ((NH_FLD_IP_VER << 9) - 1)
++ for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
++ extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
++ extr->masks[j].offset =
++ cfg->extracts[i].masks[j].offset;
++ }
++ }
+
-+#define NH_FLD_IP_PROTO_SIZE 1
++ return 0;
++}
+
-+/***************************** IPV4 fields *********************************/
-+#define NH_FLD_IPV4_VER (1)
-+#define NH_FLD_IPV4_HDR_LEN (NH_FLD_IPV4_VER << 1)
-+#define NH_FLD_IPV4_TOS (NH_FLD_IPV4_VER << 2)
-+#define NH_FLD_IPV4_TOTAL_LEN (NH_FLD_IPV4_VER << 3)
-+#define NH_FLD_IPV4_ID (NH_FLD_IPV4_VER << 4)
-+#define NH_FLD_IPV4_FLAG_D (NH_FLD_IPV4_VER << 5)
-+#define NH_FLD_IPV4_FLAG_M (NH_FLD_IPV4_VER << 6)
-+#define NH_FLD_IPV4_OFFSET (NH_FLD_IPV4_VER << 7)
-+#define NH_FLD_IPV4_TTL (NH_FLD_IPV4_VER << 8)
-+#define NH_FLD_IPV4_PROTO (NH_FLD_IPV4_VER << 9)
-+#define NH_FLD_IPV4_CKSUM (NH_FLD_IPV4_VER << 10)
-+#define NH_FLD_IPV4_SRC_IP (NH_FLD_IPV4_VER << 11)
-+#define NH_FLD_IPV4_DST_IP (NH_FLD_IPV4_VER << 12)
-+#define NH_FLD_IPV4_OPTS (NH_FLD_IPV4_VER << 13)
-+#define NH_FLD_IPV4_OPTS_COUNT (NH_FLD_IPV4_VER << 14)
-+#define NH_FLD_IPV4_ALL_FIELDS ((NH_FLD_IPV4_VER << 15) - 1)
++/**
++ * dpni_open() - Open a control session for the specified object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @dpni_id: DPNI unique ID
++ * @token: Returned token; use in subsequent API calls
++ *
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpni_create() function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_open(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int dpni_id,
++ u16 *token)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_open *cmd_params;
+
-+#define NH_FLD_IPV4_ADDR_SIZE 4
-+#define NH_FLD_IPV4_PROTO_SIZE 1
++ int err;
+
-+/***************************** IPV6 fields *********************************/
-+#define NH_FLD_IPV6_VER (1)
-+#define NH_FLD_IPV6_TC (NH_FLD_IPV6_VER << 1)
-+#define NH_FLD_IPV6_SRC_IP (NH_FLD_IPV6_VER << 2)
-+#define NH_FLD_IPV6_DST_IP (NH_FLD_IPV6_VER << 3)
-+#define NH_FLD_IPV6_NEXT_HDR (NH_FLD_IPV6_VER << 4)
-+#define NH_FLD_IPV6_FL (NH_FLD_IPV6_VER << 5)
-+#define NH_FLD_IPV6_HOP_LIMIT (NH_FLD_IPV6_VER << 6)
-+#define NH_FLD_IPV6_ID (NH_FLD_IPV6_VER << 7)
-+#define NH_FLD_IPV6_ALL_FIELDS ((NH_FLD_IPV6_VER << 8) - 1)
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN,
++ cmd_flags,
++ 0);
++ cmd_params = (struct dpni_cmd_open *)cmd.params;
++ cmd_params->dpni_id = cpu_to_le32(dpni_id);
+
-+#define NH_FLD_IPV6_ADDR_SIZE 16
-+#define NH_FLD_IPV6_NEXT_HDR_SIZE 1
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
+
-+/***************************** ICMP fields *********************************/
-+#define NH_FLD_ICMP_TYPE (1)
-+#define NH_FLD_ICMP_CODE (NH_FLD_ICMP_TYPE << 1)
-+#define NH_FLD_ICMP_CKSUM (NH_FLD_ICMP_TYPE << 2)
-+#define NH_FLD_ICMP_ID (NH_FLD_ICMP_TYPE << 3)
-+#define NH_FLD_ICMP_SQ_NUM (NH_FLD_ICMP_TYPE << 4)
-+#define NH_FLD_ICMP_ALL_FIELDS ((NH_FLD_ICMP_TYPE << 5) - 1)
++ /* retrieve response parameters */
++ *token = mc_cmd_hdr_read_token(&cmd);
+
-+#define NH_FLD_ICMP_CODE_SIZE 1
-+#define NH_FLD_ICMP_TYPE_SIZE 1
++ return 0;
++}
+
-+/***************************** IGMP fields *********************************/
-+#define NH_FLD_IGMP_VERSION (1)
-+#define NH_FLD_IGMP_TYPE (NH_FLD_IGMP_VERSION << 1)
-+#define NH_FLD_IGMP_CKSUM (NH_FLD_IGMP_VERSION << 2)
-+#define NH_FLD_IGMP_DATA (NH_FLD_IGMP_VERSION << 3)
-+#define NH_FLD_IGMP_ALL_FIELDS ((NH_FLD_IGMP_VERSION << 4) - 1)
++/**
++ * dpni_close() - Close the control session of the object
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ *
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_close(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct fsl_mc_command cmd = { 0 };
+
-+/***************************** TCP fields **********************************/
-+#define NH_FLD_TCP_PORT_SRC (1)
-+#define NH_FLD_TCP_PORT_DST (NH_FLD_TCP_PORT_SRC << 1)
-+#define NH_FLD_TCP_SEQ (NH_FLD_TCP_PORT_SRC << 2)
-+#define NH_FLD_TCP_ACK (NH_FLD_TCP_PORT_SRC << 3)
-+#define NH_FLD_TCP_OFFSET (NH_FLD_TCP_PORT_SRC << 4)
-+#define NH_FLD_TCP_FLAGS (NH_FLD_TCP_PORT_SRC << 5)
-+#define NH_FLD_TCP_WINDOW (NH_FLD_TCP_PORT_SRC << 6)
-+#define NH_FLD_TCP_CKSUM (NH_FLD_TCP_PORT_SRC << 7)
-+#define NH_FLD_TCP_URGPTR (NH_FLD_TCP_PORT_SRC << 8)
-+#define NH_FLD_TCP_OPTS (NH_FLD_TCP_PORT_SRC << 9)
-+#define NH_FLD_TCP_OPTS_COUNT (NH_FLD_TCP_PORT_SRC << 10)
-+#define NH_FLD_TCP_ALL_FIELDS ((NH_FLD_TCP_PORT_SRC << 11) - 1)
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE,
++ cmd_flags,
++ token);
+
-+#define NH_FLD_TCP_PORT_SIZE 2
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+
-+/***************************** UDP fields **********************************/
-+#define NH_FLD_UDP_PORT_SRC (1)
-+#define NH_FLD_UDP_PORT_DST (NH_FLD_UDP_PORT_SRC << 1)
-+#define NH_FLD_UDP_LEN (NH_FLD_UDP_PORT_SRC << 2)
-+#define NH_FLD_UDP_CKSUM (NH_FLD_UDP_PORT_SRC << 3)
-+#define NH_FLD_UDP_ALL_FIELDS ((NH_FLD_UDP_PORT_SRC << 4) - 1)
++/**
++ * dpni_set_pools() - Set buffer pools configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: Buffer pools configuration
++ *
++ * mandatory for DPNI operation
++ * warning:Allowed only when DPNI is disabled
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_pools(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_pools_cfg *cfg)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_set_pools *cmd_params;
++ int i;
+
-+#define NH_FLD_UDP_PORT_SIZE 2
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
++ cmd_params->num_dpbp = cfg->num_dpbp;
++ for (i = 0; i < DPNI_MAX_DPBP; i++) {
++ cmd_params->pool[i].dpbp_id =
++ cpu_to_le16(cfg->pools[i].dpbp_id);
++ cmd_params->pool[i].priority_mask =
++ cfg->pools[i].priority_mask;
++ cmd_params->buffer_size[i] =
++ cpu_to_le16(cfg->pools[i].buffer_size);
++ cmd_params->backup_pool_mask |=
++ DPNI_BACKUP_POOL(cfg->pools[i].backup_pool, i);
++ }
+
-+/*************************** UDP-lite fields *******************************/
-+#define NH_FLD_UDP_LITE_PORT_SRC (1)
-+#define NH_FLD_UDP_LITE_PORT_DST (NH_FLD_UDP_LITE_PORT_SRC << 1)
-+#define NH_FLD_UDP_LITE_ALL_FIELDS \
-+ ((NH_FLD_UDP_LITE_PORT_SRC << 2) - 1)
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+
-+#define NH_FLD_UDP_LITE_PORT_SIZE 2
++/**
++ * dpni_enable() - Enable the DPNI, allow sending and receiving frames.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct fsl_mc_command cmd = { 0 };
+
-+/*************************** UDP-encap-ESP fields **************************/
-+#define NH_FLD_UDP_ENC_ESP_PORT_SRC (1)
-+#define NH_FLD_UDP_ENC_ESP_PORT_DST (NH_FLD_UDP_ENC_ESP_PORT_SRC << 1)
-+#define NH_FLD_UDP_ENC_ESP_LEN (NH_FLD_UDP_ENC_ESP_PORT_SRC << 2)
-+#define NH_FLD_UDP_ENC_ESP_CKSUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 3)
-+#define NH_FLD_UDP_ENC_ESP_SPI (NH_FLD_UDP_ENC_ESP_PORT_SRC << 4)
-+#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 5)
-+#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS \
-+ ((NH_FLD_UDP_ENC_ESP_PORT_SRC << 6) - 1)
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE,
++ cmd_flags,
++ token);
+
-+#define NH_FLD_UDP_ENC_ESP_PORT_SIZE 2
-+#define NH_FLD_UDP_ENC_ESP_SPI_SIZE 4
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+
-+/***************************** SCTP fields *********************************/
-+#define NH_FLD_SCTP_PORT_SRC (1)
-+#define NH_FLD_SCTP_PORT_DST (NH_FLD_SCTP_PORT_SRC << 1)
-+#define NH_FLD_SCTP_VER_TAG (NH_FLD_SCTP_PORT_SRC << 2)
-+#define NH_FLD_SCTP_CKSUM (NH_FLD_SCTP_PORT_SRC << 3)
-+#define NH_FLD_SCTP_ALL_FIELDS ((NH_FLD_SCTP_PORT_SRC << 4) - 1)
++/**
++ * dpni_disable() - Disable the DPNI, stop sending and receiving frames.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_disable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct fsl_mc_command cmd = { 0 };
+
-+#define NH_FLD_SCTP_PORT_SIZE 2
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE,
++ cmd_flags,
++ token);
+
-+/***************************** DCCP fields *********************************/
-+#define NH_FLD_DCCP_PORT_SRC (1)
-+#define NH_FLD_DCCP_PORT_DST (NH_FLD_DCCP_PORT_SRC << 1)
-+#define NH_FLD_DCCP_ALL_FIELDS ((NH_FLD_DCCP_PORT_SRC << 2) - 1)
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+
-+#define NH_FLD_DCCP_PORT_SIZE 2
++/**
++ * dpni_is_enabled() - Check if the DPNI is enabled.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Returns '1' if object is enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_is_enabled(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int *en)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_rsp_is_enabled *rsp_params;
++ int err;
+
-+/***************************** IPHC fields *********************************/
-+#define NH_FLD_IPHC_CID (1)
-+#define NH_FLD_IPHC_CID_TYPE (NH_FLD_IPHC_CID << 1)
-+#define NH_FLD_IPHC_HCINDEX (NH_FLD_IPHC_CID << 2)
-+#define NH_FLD_IPHC_GEN (NH_FLD_IPHC_CID << 3)
-+#define NH_FLD_IPHC_D_BIT (NH_FLD_IPHC_CID << 4)
-+#define NH_FLD_IPHC_ALL_FIELDS ((NH_FLD_IPHC_CID << 5) - 1)
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED,
++ cmd_flags,
++ token);
+
-+/***************************** SCTP fields *********************************/
-+#define NH_FLD_SCTP_CHUNK_DATA_TYPE (1)
-+#define NH_FLD_SCTP_CHUNK_DATA_FLAGS (NH_FLD_SCTP_CHUNK_DATA_TYPE << 1)
-+#define NH_FLD_SCTP_CHUNK_DATA_LENGTH (NH_FLD_SCTP_CHUNK_DATA_TYPE << 2)
-+#define NH_FLD_SCTP_CHUNK_DATA_TSN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 3)
-+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 4)
-+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 5)
-+#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 6)
-+#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED (NH_FLD_SCTP_CHUNK_DATA_TYPE << 7)
-+#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING (NH_FLD_SCTP_CHUNK_DATA_TYPE << 8)
-+#define NH_FLD_SCTP_CHUNK_DATA_END (NH_FLD_SCTP_CHUNK_DATA_TYPE << 9)
-+#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS \
-+ ((NH_FLD_SCTP_CHUNK_DATA_TYPE << 10) - 1)
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
+
-+/*************************** L2TPV2 fields *********************************/
-+#define NH_FLD_L2TPV2_TYPE_BIT (1)
-+#define NH_FLD_L2TPV2_LENGTH_BIT (NH_FLD_L2TPV2_TYPE_BIT << 1)
-+#define NH_FLD_L2TPV2_SEQUENCE_BIT (NH_FLD_L2TPV2_TYPE_BIT << 2)
-+#define NH_FLD_L2TPV2_OFFSET_BIT (NH_FLD_L2TPV2_TYPE_BIT << 3)
-+#define NH_FLD_L2TPV2_PRIORITY_BIT (NH_FLD_L2TPV2_TYPE_BIT << 4)
-+#define NH_FLD_L2TPV2_VERSION (NH_FLD_L2TPV2_TYPE_BIT << 5)
-+#define NH_FLD_L2TPV2_LEN (NH_FLD_L2TPV2_TYPE_BIT << 6)
-+#define NH_FLD_L2TPV2_TUNNEL_ID (NH_FLD_L2TPV2_TYPE_BIT << 7)
-+#define NH_FLD_L2TPV2_SESSION_ID (NH_FLD_L2TPV2_TYPE_BIT << 8)
-+#define NH_FLD_L2TPV2_NS (NH_FLD_L2TPV2_TYPE_BIT << 9)
-+#define NH_FLD_L2TPV2_NR (NH_FLD_L2TPV2_TYPE_BIT << 10)
-+#define NH_FLD_L2TPV2_OFFSET_SIZE (NH_FLD_L2TPV2_TYPE_BIT << 11)
-+#define NH_FLD_L2TPV2_FIRST_BYTE (NH_FLD_L2TPV2_TYPE_BIT << 12)
-+#define NH_FLD_L2TPV2_ALL_FIELDS \
-+ ((NH_FLD_L2TPV2_TYPE_BIT << 13) - 1)
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_is_enabled *)cmd.params;
++ *en = dpni_get_field(rsp_params->enabled, ENABLE);
+
-+/*************************** L2TPV3 fields *********************************/
-+#define NH_FLD_L2TPV3_CTRL_TYPE_BIT (1)
-+#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 1)
-+#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 2)
-+#define NH_FLD_L2TPV3_CTRL_VERSION (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 3)
-+#define NH_FLD_L2TPV3_CTRL_LENGTH (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 4)
-+#define NH_FLD_L2TPV3_CTRL_CONTROL (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 5)
-+#define NH_FLD_L2TPV3_CTRL_SENT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 6)
-+#define NH_FLD_L2TPV3_CTRL_RECV (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 7)
-+#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 8)
-+#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS \
-+ ((NH_FLD_L2TPV3_CTRL_TYPE_BIT << 9) - 1)
++ return 0;
++}
+
-+#define NH_FLD_L2TPV3_SESS_TYPE_BIT (1)
-+#define NH_FLD_L2TPV3_SESS_VERSION (NH_FLD_L2TPV3_SESS_TYPE_BIT << 1)
-+#define NH_FLD_L2TPV3_SESS_ID (NH_FLD_L2TPV3_SESS_TYPE_BIT << 2)
-+#define NH_FLD_L2TPV3_SESS_COOKIE (NH_FLD_L2TPV3_SESS_TYPE_BIT << 3)
-+#define NH_FLD_L2TPV3_SESS_ALL_FIELDS \
-+ ((NH_FLD_L2TPV3_SESS_TYPE_BIT << 4) - 1)
++/**
++ * dpni_reset() - Reset the DPNI, returns the object to initial state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_reset(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct fsl_mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_set_irq_enable() - Set overall interrupt state.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @irq_index: The interrupt index to configure
++ * @en: Interrupt state: - enable = 1, disable = 0
++ *
++ * Allows GPP software to control when interrupts are generated.
++ * Each interrupt can have up to 32 causes. The enable/disable control's the
++ * overall interrupt state. if the interrupt is disabled no causes will cause
++ * an interrupt.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u8 en)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_set_irq_enable *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_irq_enable *)cmd.params;
++ dpni_set_field(cmd_params->enable, ENABLE, en);
++ cmd_params->irq_index = irq_index;
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_get_irq_enable() - Get overall interrupt state
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @irq_index: The interrupt index to configure
++ * @en: Returned interrupt state - enable = 1, disable = 0
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u8 *en)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_get_irq_enable *cmd_params;
++ struct dpni_rsp_get_irq_enable *rsp_params;
++
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_get_irq_enable *)cmd.params;
++ cmd_params->irq_index = irq_index;
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_irq_enable *)cmd.params;
++ *en = dpni_get_field(rsp_params->enabled, ENABLE);
++
++ return 0;
++}
++
++/**
++ * dpni_set_irq_mask() - Set interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @irq_index: The interrupt index to configure
++ * @mask: event mask to trigger interrupt;
++ * each bit:
++ * 0 = ignore event
++ * 1 = consider event for asserting IRQ
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 mask)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_set_irq_mask *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_MASK,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_irq_mask *)cmd.params;
++ cmd_params->mask = cpu_to_le32(mask);
++ cmd_params->irq_index = irq_index;
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_get_irq_mask() - Get interrupt mask.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @irq_index: The interrupt index to configure
++ * @mask: Returned event mask to trigger interrupt
++ *
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 *mask)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_get_irq_mask *cmd_params;
++ struct dpni_rsp_get_irq_mask *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_get_irq_mask *)cmd.params;
++ cmd_params->irq_index = irq_index;
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_irq_mask *)cmd.params;
++ *mask = le32_to_cpu(rsp_params->mask);
++
++ return 0;
++}
++
++/**
++ * dpni_get_irq_status() - Get the current status of any pending interrupts.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @irq_index: The interrupt index to configure
++ * @status: Returned interrupts status - one bit per cause:
++ * 0 = no interrupt pending
++ * 1 = interrupt pending
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_irq_status(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 *status)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_get_irq_status *cmd_params;
++ struct dpni_rsp_get_irq_status *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_STATUS,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_get_irq_status *)cmd.params;
++ cmd_params->status = cpu_to_le32(*status);
++ cmd_params->irq_index = irq_index;
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_irq_status *)cmd.params;
++ *status = le32_to_cpu(rsp_params->status);
++
++ return 0;
++}
++
++/**
++ * dpni_clear_irq_status() - Clear a pending interrupt's status
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @irq_index: The interrupt index to configure
++ * @status: bits to clear (W1C) - one bit per cause:
++ * 0 = don't change
++ * 1 = clear status bit
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 status)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_clear_irq_status *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLEAR_IRQ_STATUS,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_clear_irq_status *)cmd.params;
++ cmd_params->irq_index = irq_index;
++ cmd_params->status = cpu_to_le32(status);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_get_attributes() - Retrieve DPNI attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @attr: Object's attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpni_attr *attr)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_rsp_get_attr *rsp_params;
++
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_attr *)cmd.params;
++ attr->options = le32_to_cpu(rsp_params->options);
++ attr->num_queues = rsp_params->num_queues;
++ attr->num_tcs = rsp_params->num_tcs;
++ attr->mac_filter_entries = rsp_params->mac_filter_entries;
++ attr->vlan_filter_entries = rsp_params->vlan_filter_entries;
++ attr->qos_entries = rsp_params->qos_entries;
++ attr->fs_entries = le16_to_cpu(rsp_params->fs_entries);
++ attr->qos_key_size = rsp_params->qos_key_size;
++ attr->fs_key_size = rsp_params->fs_key_size;
++ attr->wriop_version = le16_to_cpu(rsp_params->wriop_version);
++
++ return 0;
++}
++
++/**
++ * dpni_set_errors_behavior() - Set errors behavior
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: Errors configuration
++ *
++ * this function may be called numerous times with different
++ * error masks
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpni_error_cfg *cfg)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_set_errors_behavior *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_errors_behavior *)cmd.params;
++ cmd_params->errors = cpu_to_le32(cfg->errors);
++ dpni_set_field(cmd_params->flags, ERROR_ACTION, cfg->error_action);
++ dpni_set_field(cmd_params->flags, FRAME_ANN, cfg->set_frame_annotation);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_get_buffer_layout() - Retrieve buffer layout attributes.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @qtype: Type of queue to retrieve configuration for
++ * @layout: Returns buffer layout attributes
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ struct dpni_buffer_layout *layout)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_get_buffer_layout *cmd_params;
++ struct dpni_rsp_get_buffer_layout *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_BUFFER_LAYOUT,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_get_buffer_layout *)cmd.params;
++ cmd_params->qtype = qtype;
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_buffer_layout *)cmd.params;
++ layout->pass_timestamp = dpni_get_field(rsp_params->flags, PASS_TS);
++ layout->pass_parser_result = dpni_get_field(rsp_params->flags, PASS_PR);
++ layout->pass_frame_status = dpni_get_field(rsp_params->flags, PASS_FS);
++ layout->private_data_size = le16_to_cpu(rsp_params->private_data_size);
++ layout->data_align = le16_to_cpu(rsp_params->data_align);
++ layout->data_head_room = le16_to_cpu(rsp_params->head_room);
++ layout->data_tail_room = le16_to_cpu(rsp_params->tail_room);
++
++ return 0;
++}
++
++/**
++ * dpni_set_buffer_layout() - Set buffer layout configuration.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @qtype: Type of queue this configuration applies to
++ * @layout: Buffer layout configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ *
++ * @warning Allowed only when DPNI is disabled
++ */
++int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ const struct dpni_buffer_layout *layout)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_set_buffer_layout *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_BUFFER_LAYOUT,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_buffer_layout *)cmd.params;
++ cmd_params->qtype = qtype;
++ cmd_params->options = cpu_to_le16(layout->options);
++ dpni_set_field(cmd_params->flags, PASS_TS, layout->pass_timestamp);
++ dpni_set_field(cmd_params->flags, PASS_PR, layout->pass_parser_result);
++ dpni_set_field(cmd_params->flags, PASS_FS, layout->pass_frame_status);
++ cmd_params->private_data_size = cpu_to_le16(layout->private_data_size);
++ cmd_params->data_align = cpu_to_le16(layout->data_align);
++ cmd_params->head_room = cpu_to_le16(layout->data_head_room);
++ cmd_params->tail_room = cpu_to_le16(layout->data_tail_room);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_set_offload() - Set DPNI offload configuration.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @type: Type of DPNI offload
++ * @config: Offload configuration.
++ * For checksum offloads, non-zero value enables the offload
++ *
++ * Return: '0' on Success; Error code otherwise.
++ *
++ * @warning Allowed only when DPNI is disabled
++ */
++
++int dpni_set_offload(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_offload type,
++ u32 config)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_set_offload *cmd_params;
++
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_OFFLOAD,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_offload *)cmd.params;
++ cmd_params->dpni_offload = type;
++ cmd_params->config = cpu_to_le32(config);
++
++ return mc_send_command(mc_io, &cmd);
++}
++
++int dpni_get_offload(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_offload type,
++ u32 *config)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_get_offload *cmd_params;
++ struct dpni_rsp_get_offload *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_OFFLOAD,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_get_offload *)cmd.params;
++ cmd_params->dpni_offload = type;
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_offload *)cmd.params;
++ *config = le32_to_cpu(rsp_params->config);
++
++ return 0;
++}
++
++/**
++ * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used
++ * for enqueue operations
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @qtype: Type of queue to receive QDID for
++ * @qdid: Returned virtual QDID value that should be used as an argument
++ * in all enqueue operations
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_qdid(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ u16 *qdid)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_get_qdid *cmd_params;
++ struct dpni_rsp_get_qdid *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_get_qdid *)cmd.params;
++ cmd_params->qtype = qtype;
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_qdid *)cmd.params;
++ *qdid = le16_to_cpu(rsp_params->qdid);
++
++ return 0;
++}
++
++/**
++ * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer)
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @data_offset: Tx data offset (from start of buffer)
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 *data_offset)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_rsp_get_tx_data_offset *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_tx_data_offset *)cmd.params;
++ *data_offset = le16_to_cpu(rsp_params->data_offset);
++
++ return 0;
++}
++
++/**
++ * dpni_set_link_cfg() - set the link configuration.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: Link configuration
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_link_cfg *cfg)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_set_link_cfg *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_link_cfg *)cmd.params;
++ cmd_params->rate = cpu_to_le32(cfg->rate);
++ cmd_params->options = cpu_to_le64(cfg->options);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_get_link_state() - Return the link state (either up or down)
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @state: Returned link state;
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_link_state(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpni_link_state *state)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_rsp_get_link_state *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_link_state *)cmd.params;
++ state->up = dpni_get_field(rsp_params->flags, LINK_STATE);
++ state->rate = le32_to_cpu(rsp_params->rate);
++ state->options = le64_to_cpu(rsp_params->options);
++
++ return 0;
++}
++
++/**
++ * dpni_set_tx_shaping() - Set the transmit shaping
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tx_cr_shaper: TX committed rate shaping configuration
++ * @tx_er_shaper: TX excess rate shaping configuration
++ * @coupled: Committed and excess rate shapers are coupled
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_tx_shaping_cfg *tx_cr_shaper,
++ const struct dpni_tx_shaping_cfg *tx_er_shaper,
++ int coupled)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_set_tx_shaping *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_tx_shaping *)cmd.params;
++ cmd_params->tx_cr_max_burst_size =
++ cpu_to_le16(tx_cr_shaper->max_burst_size);
++ cmd_params->tx_er_max_burst_size =
++ cpu_to_le16(tx_er_shaper->max_burst_size);
++ cmd_params->tx_cr_rate_limit = cpu_to_le32(tx_cr_shaper->rate_limit);
++ cmd_params->tx_er_rate_limit = cpu_to_le32(tx_er_shaper->rate_limit);
++ dpni_set_field(cmd_params->coupled, COUPLED, coupled);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_set_max_frame_length() - Set the maximum received frame length.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @max_frame_length: Maximum received frame length (in
++ * bytes); frame is discarded if its
++ * length exceeds this value
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 max_frame_length)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_set_max_frame_length *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_max_frame_length *)cmd.params;
++ cmd_params->max_frame_length = cpu_to_le16(max_frame_length);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_get_max_frame_length() - Get the maximum received frame length.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @max_frame_length: Maximum received frame length (in
++ * bytes); frame is discarded if its
++ * length exceeds this value
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 *max_frame_length)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_rsp_get_max_frame_length *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_max_frame_length *)cmd.params;
++ *max_frame_length = le16_to_cpu(rsp_params->max_frame_length);
++
++ return 0;
++}
++
++/**
++ * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Set to '1' to enable; '0' to disable
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int en)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_set_multicast_promisc *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_multicast_promisc *)cmd.params;
++ dpni_set_field(cmd_params->enable, ENABLE, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_get_multicast_promisc() - Get multicast promiscuous mode
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Returns '1' if enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int *en)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_rsp_get_multicast_promisc *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_multicast_promisc *)cmd.params;
++ *en = dpni_get_field(rsp_params->enabled, ENABLE);
++
++ return 0;
++}
++
++/**
++ * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Set to '1' to enable; '0' to disable
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int en)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_set_unicast_promisc *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_unicast_promisc *)cmd.params;
++ dpni_set_field(cmd_params->enable, ENABLE, en);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_get_unicast_promisc() - Get unicast promiscuous mode
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @en: Returns '1' if enabled; '0' otherwise
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int *en)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_rsp_get_unicast_promisc *rsp_params;
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_unicast_promisc *)cmd.params;
++ *en = dpni_get_field(rsp_params->enabled, ENABLE);
++
++ return 0;
++}
++
++/**
++ * dpni_set_primary_mac_addr() - Set the primary MAC address
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @mac_addr: MAC address to set as primary address
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const u8 mac_addr[6])
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_set_primary_mac_addr *cmd_params;
++ int i;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_primary_mac_addr *)cmd.params;
++ for (i = 0; i < 6; i++)
++ cmd_params->mac_addr[i] = mac_addr[5 - i];
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_get_primary_mac_addr() - Get the primary MAC address
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @mac_addr: Returned MAC address
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 mac_addr[6])
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_rsp_get_primary_mac_addr *rsp_params;
++ int i, err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_primary_mac_addr *)cmd.params;
++ for (i = 0; i < 6; i++)
++ mac_addr[5 - i] = rsp_params->mac_addr[i];
++
++ return 0;
++}
++
++/**
++ * dpni_get_port_mac_addr() - Retrieve MAC address associated to the physical
++ * port the DPNI is attached to
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @mac_addr: MAC address of the physical port, if any, otherwise 0
++ *
++ * The primary MAC address is not cleared by this operation.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 mac_addr[6])
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_rsp_get_port_mac_addr *rsp_params;
++ int i, err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PORT_MAC_ADDR,
++ cmd_flags,
++ token);
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_port_mac_addr *)cmd.params;
++ for (i = 0; i < 6; i++)
++ mac_addr[5 - i] = rsp_params->mac_addr[i];
++
++ return 0;
++}
++
++/**
++ * dpni_add_mac_addr() - Add MAC address filter
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @mac_addr: MAC address to add
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const u8 mac_addr[6])
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_add_mac_addr *cmd_params;
++ int i;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_add_mac_addr *)cmd.params;
++ for (i = 0; i < 6; i++)
++ cmd_params->mac_addr[i] = mac_addr[5 - i];
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_remove_mac_addr() - Remove MAC address filter
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @mac_addr: MAC address to remove
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const u8 mac_addr[6])
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_remove_mac_addr *cmd_params;
++ int i;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_remove_mac_addr *)cmd.params;
++ for (i = 0; i < 6; i++)
++ cmd_params->mac_addr[i] = mac_addr[5 - i];
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @unicast: Set to '1' to clear unicast addresses
++ * @multicast: Set to '1' to clear multicast addresses
++ *
++ * The primary MAC address is not cleared by this operation.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int unicast,
++ int multicast)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_clear_mac_filters *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_clear_mac_filters *)cmd.params;
++ dpni_set_field(cmd_params->flags, UNICAST_FILTERS, unicast);
++ dpni_set_field(cmd_params->flags, MULTICAST_FILTERS, multicast);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_set_tx_priorities() - Set transmission TC priority configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: Transmission selection configuration
++ *
++ * warning: Allowed only when DPNI is disabled
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_tx_priorities(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_tx_priorities_cfg *cfg)
++{
++ struct dpni_cmd_set_tx_priorities *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++ int i;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_PRIORITIES,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_tx_priorities *)cmd.params;
++ dpni_set_field(cmd_params->flags,
++ SEPARATE_GRP,
++ cfg->separate_groups);
++ cmd_params->prio_group_A = cfg->prio_group_A;
++ cmd_params->prio_group_B = cfg->prio_group_B;
++
++ for (i = 0; i + 1 < DPNI_MAX_TC; i += 2) {
++ dpni_set_field(cmd_params->modes[i / 2],
++ MODE_1,
++ cfg->tc_sched[i].mode);
++ dpni_set_field(cmd_params->modes[i / 2],
++ MODE_2,
++ cfg->tc_sched[i + 1].mode);
++ }
++
++ for (i = 0; i < DPNI_MAX_TC; i++) {
++ cmd_params->delta_bandwidth[i] =
++ cpu_to_le16(cfg->tc_sched[i].delta_bandwidth);
++ }
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @cfg: Traffic class distribution configuration
++ *
++ * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpni_prepare_key_cfg()
++ * first to prepare the key_cfg_iova parameter
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 tc_id,
++ const struct dpni_rx_tc_dist_cfg *cfg)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_set_rx_tc_dist *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_rx_tc_dist *)cmd.params;
++ cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
++ cmd_params->tc_id = tc_id;
++ dpni_set_field(cmd_params->flags, DIST_MODE, cfg->dist_mode);
++ dpni_set_field(cmd_params->flags, MISS_ACTION, cfg->fs_cfg.miss_action);
++ cmd_params->default_flow_id = cpu_to_le16(cfg->fs_cfg.default_flow_id);
++ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/*
++ * dpni_set_qos_table() - Set QoS mapping table
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: QoS table configuration
++ *
++ * This function and all QoS-related functions require that
++ *'max_tcs > 1' was set at DPNI creation.
++ *
++ * warning: Before calling this function, call dpkg_prepare_key_cfg() to
++ * prepare the key_cfg_iova parameter
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_qos_table(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_qos_tbl_cfg *cfg)
++{
++ struct dpni_cmd_set_qos_table *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_qos_table *)cmd.params;
++ cmd_params->default_tc = cfg->default_tc;
++ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
++ dpni_set_field(cmd_params->discard_on_miss,
++ ENABLE,
++ cfg->discard_on_miss);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_add_qos_entry() - Add QoS mapping entry (to select a traffic class)
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: QoS rule to add
++ * @tc_id: Traffic class selection (0-7)
++ * @index: Location in the QoS table where to insert the entry.
++ * Only relevant if MASKING is enabled for QoS classification on
++ * this DPNI, it is ignored for exact match.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_rule_cfg *cfg,
++ u8 tc_id,
++ u16 index)
++{
++ struct dpni_cmd_add_qos_entry *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_add_qos_entry *)cmd.params;
++ cmd_params->tc_id = tc_id;
++ cmd_params->key_size = cfg->key_size;
++ cmd_params->index = cpu_to_le16(index);
++ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
++ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_remove_qos_entry() - Remove QoS mapping entry
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: QoS rule to remove
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_rule_cfg *cfg)
++{
++ struct dpni_cmd_remove_qos_entry *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
+
-+/**************************** PPP fields ***********************************/
-+#define NH_FLD_PPP_PID (1)
-+#define NH_FLD_PPP_COMPRESSED (NH_FLD_PPP_PID << 1)
-+#define NH_FLD_PPP_ALL_FIELDS ((NH_FLD_PPP_PID << 2) - 1)
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_QOS_ENT,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_remove_qos_entry *)cmd.params;
++ cmd_params->key_size = cfg->key_size;
++ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
++ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+
-+/************************** PPPoE fields ***********************************/
-+#define NH_FLD_PPPOE_VER (1)
-+#define NH_FLD_PPPOE_TYPE (NH_FLD_PPPOE_VER << 1)
-+#define NH_FLD_PPPOE_CODE (NH_FLD_PPPOE_VER << 2)
-+#define NH_FLD_PPPOE_SID (NH_FLD_PPPOE_VER << 3)
-+#define NH_FLD_PPPOE_LEN (NH_FLD_PPPOE_VER << 4)
-+#define NH_FLD_PPPOE_SESSION (NH_FLD_PPPOE_VER << 5)
-+#define NH_FLD_PPPOE_PID (NH_FLD_PPPOE_VER << 6)
-+#define NH_FLD_PPPOE_ALL_FIELDS ((NH_FLD_PPPOE_VER << 7) - 1)
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+
-+/************************* PPP-Mux fields **********************************/
-+#define NH_FLD_PPPMUX_PID (1)
-+#define NH_FLD_PPPMUX_CKSUM (NH_FLD_PPPMUX_PID << 1)
-+#define NH_FLD_PPPMUX_COMPRESSED (NH_FLD_PPPMUX_PID << 2)
-+#define NH_FLD_PPPMUX_ALL_FIELDS ((NH_FLD_PPPMUX_PID << 3) - 1)
++/**
++ * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class
++ * (to select a flow ID)
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @index: Location in the QoS table where to insert the entry.
++ * Only relevant if MASKING is enabled for QoS
++ * classification on this DPNI, it is ignored for exact match.
++ * @cfg: Flow steering rule to add
++ * @action: Action to be taken as result of a classification hit
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 tc_id,
++ u16 index,
++ const struct dpni_rule_cfg *cfg,
++ const struct dpni_fs_action_cfg *action)
++{
++ struct dpni_cmd_add_fs_entry *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
+
-+/*********************** PPP-Mux sub-frame fields **************************/
-+#define NH_FLD_PPPMUX_SUBFRM_PFF (1)
-+#define NH_FLD_PPPMUX_SUBFRM_LXT (NH_FLD_PPPMUX_SUBFRM_PFF << 1)
-+#define NH_FLD_PPPMUX_SUBFRM_LEN (NH_FLD_PPPMUX_SUBFRM_PFF << 2)
-+#define NH_FLD_PPPMUX_SUBFRM_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 3)
-+#define NH_FLD_PPPMUX_SUBFRM_USE_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 4)
-+#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS \
-+ ((NH_FLD_PPPMUX_SUBFRM_PFF << 5) - 1)
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_add_fs_entry *)cmd.params;
++ cmd_params->tc_id = tc_id;
++ cmd_params->key_size = cfg->key_size;
++ cmd_params->index = cpu_to_le16(index);
++ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
++ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
++ cmd_params->options = cpu_to_le16(action->options);
++ cmd_params->flow_id = cpu_to_le16(action->flow_id);
++ cmd_params->flc = cpu_to_le64(action->flc);
+
-+/*************************** LLC fields ************************************/
-+#define NH_FLD_LLC_DSAP (1)
-+#define NH_FLD_LLC_SSAP (NH_FLD_LLC_DSAP << 1)
-+#define NH_FLD_LLC_CTRL (NH_FLD_LLC_DSAP << 2)
-+#define NH_FLD_LLC_ALL_FIELDS ((NH_FLD_LLC_DSAP << 3) - 1)
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+
-+/*************************** NLPID fields **********************************/
-+#define NH_FLD_NLPID_NLPID (1)
-+#define NH_FLD_NLPID_ALL_FIELDS ((NH_FLD_NLPID_NLPID << 1) - 1)
++/**
++ * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific
++ * traffic class
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @tc_id: Traffic class selection (0-7)
++ * @cfg: Flow steering rule to remove
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 tc_id,
++ const struct dpni_rule_cfg *cfg)
++{
++ struct dpni_cmd_remove_fs_entry *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_remove_fs_entry *)cmd.params;
++ cmd_params->tc_id = tc_id;
++ cmd_params->key_size = cfg->key_size;
++ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
++ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_set_congestion_notification() - Set traffic class congestion
++ * notification configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
++ * @tc_id: Traffic class selection (0-7)
++ * @cfg: Congestion notification configuration
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpni_set_congestion_notification(
++ struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ u8 tc_id,
++ const struct dpni_congestion_notification_cfg *cfg)
++{
++ struct dpni_cmd_set_congestion_notification *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(
++ DPNI_CMDID_SET_CONGESTION_NOTIFICATION,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_congestion_notification *)cmd.params;
++ cmd_params->qtype = qtype;
++ cmd_params->tc = tc_id;
++ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
++ cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
++ cmd_params->dest_priority = cfg->dest_cfg.priority;
++ dpni_set_field(cmd_params->type_units, DEST_TYPE,
++ cfg->dest_cfg.dest_type);
++ dpni_set_field(cmd_params->type_units, CONG_UNITS, cfg->units);
++ cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
++ cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
++ cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
++ cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
++
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
++
++/**
++ * dpni_get_congestion_notification() - Get traffic class congestion
++ * notification configuration
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
++ * @tc_id: bits 7-4 contain ceetm channel index (valid only for TX);
++ * bits 3-0 contain traffic class.
++ * Use macro DPNI_BUILD_CH_TC() to build correct value for
++ * tc_id parameter.
++ * @cfg: congestion notification configuration
++ *
++ * Return: '0' on Success; error code otherwise.
++ */
++int dpni_get_congestion_notification(
++ struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ u8 tc_id,
++ struct dpni_congestion_notification_cfg *cfg)
++{
++ struct dpni_rsp_get_congestion_notification *rsp_params;
++ struct dpni_cmd_get_congestion_notification *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++ int err;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(
++ DPNI_CMDID_GET_CONGESTION_NOTIFICATION,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_get_congestion_notification *)cmd.params;
++ cmd_params->qtype = qtype;
++ cmd_params->tc = tc_id;
++
++ /* send command to mc*/
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ rsp_params = (struct dpni_rsp_get_congestion_notification *)cmd.params;
++ cfg->units = dpni_get_field(rsp_params->type_units, CONG_UNITS);
++ cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
++ cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
++ cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
++ cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
++ cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
++ cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
++ cfg->dest_cfg.priority = rsp_params->dest_priority;
++ cfg->dest_cfg.dest_type = dpni_get_field(rsp_params->type_units,
++ DEST_TYPE);
++
++ return 0;
++}
++
++/**
++ * dpni_set_queue() - Set queue parameters
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @qtype: Type of queue - all queue types are supported, although
++ * the command is ignored for Tx
++ * @tc: Traffic class, in range 0 to NUM_TCS - 1
++ * @index: Selects the specific queue out of the set allocated for the
++ * same TC. Value must be in range 0 to NUM_QUEUES - 1
++ * @options: A combination of DPNI_QUEUE_OPT_ values that control what
++ * configuration options are set on the queue
++ * @queue: Queue structure
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_queue(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ u8 tc,
++ u8 index,
++ u8 options,
++ const struct dpni_queue *queue)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_set_queue *cmd_params;
++
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QUEUE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_queue *)cmd.params;
++ cmd_params->qtype = qtype;
++ cmd_params->tc = tc;
++ cmd_params->index = index;
++ cmd_params->options = options;
++ cmd_params->dest_id = cpu_to_le32(queue->destination.id);
++ cmd_params->dest_prio = queue->destination.priority;
++ dpni_set_field(cmd_params->flags, DEST_TYPE, queue->destination.type);
++ dpni_set_field(cmd_params->flags, STASH_CTRL, queue->flc.stash_control);
++ dpni_set_field(cmd_params->flags, HOLD_ACTIVE,
++ queue->destination.hold_active);
++ cmd_params->flc = cpu_to_le64(queue->flc.value);
++ cmd_params->user_context = cpu_to_le64(queue->user_context);
+
-+/*************************** SNAP fields ***********************************/
-+#define NH_FLD_SNAP_OUI (1)
-+#define NH_FLD_SNAP_PID (NH_FLD_SNAP_OUI << 1)
-+#define NH_FLD_SNAP_ALL_FIELDS ((NH_FLD_SNAP_OUI << 2) - 1)
++ /* send command to mc */
++ return mc_send_command(mc_io, &cmd);
++}
+
-+/*************************** LLC SNAP fields *******************************/
-+#define NH_FLD_LLC_SNAP_TYPE (1)
-+#define NH_FLD_LLC_SNAP_ALL_FIELDS ((NH_FLD_LLC_SNAP_TYPE << 1) - 1)
++/**
++ * dpni_get_queue() - Get queue parameters
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @qtype: Type of queue - all queue types are supported
++ * @tc: Traffic class, in range 0 to NUM_TCS - 1
++ * @index: Selects the specific queue out of the set allocated for the
++ * same TC. Value must be in range 0 to NUM_QUEUES - 1
++ * @queue: Queue configuration structure
++ * @qid: Queue identification
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_queue(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ u8 tc,
++ u8 index,
++ struct dpni_queue *queue,
++ struct dpni_queue_id *qid)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_get_queue *cmd_params;
++ struct dpni_rsp_get_queue *rsp_params;
++ int err;
+
-+#define NH_FLD_ARP_HTYPE (1)
-+#define NH_FLD_ARP_PTYPE (NH_FLD_ARP_HTYPE << 1)
-+#define NH_FLD_ARP_HLEN (NH_FLD_ARP_HTYPE << 2)
-+#define NH_FLD_ARP_PLEN (NH_FLD_ARP_HTYPE << 3)
-+#define NH_FLD_ARP_OPER (NH_FLD_ARP_HTYPE << 4)
-+#define NH_FLD_ARP_SHA (NH_FLD_ARP_HTYPE << 5)
-+#define NH_FLD_ARP_SPA (NH_FLD_ARP_HTYPE << 6)
-+#define NH_FLD_ARP_THA (NH_FLD_ARP_HTYPE << 7)
-+#define NH_FLD_ARP_TPA (NH_FLD_ARP_HTYPE << 8)
-+#define NH_FLD_ARP_ALL_FIELDS ((NH_FLD_ARP_HTYPE << 9) - 1)
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QUEUE,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_get_queue *)cmd.params;
++ cmd_params->qtype = qtype;
++ cmd_params->tc = tc;
++ cmd_params->index = index;
+
-+/*************************** RFC2684 fields ********************************/
-+#define NH_FLD_RFC2684_LLC (1)
-+#define NH_FLD_RFC2684_NLPID (NH_FLD_RFC2684_LLC << 1)
-+#define NH_FLD_RFC2684_OUI (NH_FLD_RFC2684_LLC << 2)
-+#define NH_FLD_RFC2684_PID (NH_FLD_RFC2684_LLC << 3)
-+#define NH_FLD_RFC2684_VPN_OUI (NH_FLD_RFC2684_LLC << 4)
-+#define NH_FLD_RFC2684_VPN_IDX (NH_FLD_RFC2684_LLC << 5)
-+#define NH_FLD_RFC2684_ALL_FIELDS ((NH_FLD_RFC2684_LLC << 6) - 1)
++ /* send command to mc */
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
+
-+/*************************** User defined fields ***************************/
-+#define NH_FLD_USER_DEFINED_SRCPORT (1)
-+#define NH_FLD_USER_DEFINED_PCDID (NH_FLD_USER_DEFINED_SRCPORT << 1)
-+#define NH_FLD_USER_DEFINED_ALL_FIELDS \
-+ ((NH_FLD_USER_DEFINED_SRCPORT << 2) - 1)
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_queue *)cmd.params;
++ queue->destination.id = le32_to_cpu(rsp_params->dest_id);
++ queue->destination.priority = rsp_params->dest_prio;
++ queue->destination.type = dpni_get_field(rsp_params->flags,
++ DEST_TYPE);
++ queue->flc.stash_control = dpni_get_field(rsp_params->flags,
++ STASH_CTRL);
++ queue->destination.hold_active = dpni_get_field(rsp_params->flags,
++ HOLD_ACTIVE);
++ queue->flc.value = le64_to_cpu(rsp_params->flc);
++ queue->user_context = le64_to_cpu(rsp_params->user_context);
++ qid->fqid = le32_to_cpu(rsp_params->fqid);
++ qid->qdbin = le16_to_cpu(rsp_params->qdbin);
+
-+/*************************** Payload fields ********************************/
-+#define NH_FLD_PAYLOAD_BUFFER (1)
-+#define NH_FLD_PAYLOAD_SIZE (NH_FLD_PAYLOAD_BUFFER << 1)
-+#define NH_FLD_MAX_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 2)
-+#define NH_FLD_MIN_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 3)
-+#define NH_FLD_PAYLOAD_TYPE (NH_FLD_PAYLOAD_BUFFER << 4)
-+#define NH_FLD_FRAME_SIZE (NH_FLD_PAYLOAD_BUFFER << 5)
-+#define NH_FLD_PAYLOAD_ALL_FIELDS ((NH_FLD_PAYLOAD_BUFFER << 6) - 1)
++ return 0;
++}
+
-+/*************************** GRE fields ************************************/
-+#define NH_FLD_GRE_TYPE (1)
-+#define NH_FLD_GRE_ALL_FIELDS ((NH_FLD_GRE_TYPE << 1) - 1)
++/**
++ * dpni_get_statistics() - Get DPNI statistics
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @page: Selects the statistics page to retrieve, see
++ * DPNI_GET_STATISTICS output. Pages are numbered 0 to 2.
++ * @param: Custom parameter for some pages used to select a certain
++ * statistic source, for example the TC.
++ * @stat: Structure containing the statistics
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_statistics(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 page,
++ u8 param,
++ union dpni_statistics *stat)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_get_statistics *cmd_params;
++ struct dpni_rsp_get_statistics *rsp_params;
++ int i, err;
+
-+/*************************** MINENCAP fields *******************************/
-+#define NH_FLD_MINENCAP_SRC_IP (1)
-+#define NH_FLD_MINENCAP_DST_IP (NH_FLD_MINENCAP_SRC_IP << 1)
-+#define NH_FLD_MINENCAP_TYPE (NH_FLD_MINENCAP_SRC_IP << 2)
-+#define NH_FLD_MINENCAP_ALL_FIELDS \
-+ ((NH_FLD_MINENCAP_SRC_IP << 3) - 1)
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_STATISTICS,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_get_statistics *)cmd.params;
++ cmd_params->page_number = page;
++ cmd_params->param = param;
+
-+/*************************** IPSEC AH fields *******************************/
-+#define NH_FLD_IPSEC_AH_SPI (1)
-+#define NH_FLD_IPSEC_AH_NH (NH_FLD_IPSEC_AH_SPI << 1)
-+#define NH_FLD_IPSEC_AH_ALL_FIELDS ((NH_FLD_IPSEC_AH_SPI << 2) - 1)
++ /* send command to mc */
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
+
-+/*************************** IPSEC ESP fields ******************************/
-+#define NH_FLD_IPSEC_ESP_SPI (1)
-+#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM (NH_FLD_IPSEC_ESP_SPI << 1)
-+#define NH_FLD_IPSEC_ESP_ALL_FIELDS ((NH_FLD_IPSEC_ESP_SPI << 2) - 1)
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_statistics *)cmd.params;
++ for (i = 0; i < DPNI_STATISTICS_CNT; i++)
++ stat->raw.counter[i] = le64_to_cpu(rsp_params->counter[i]);
+
-+#define NH_FLD_IPSEC_ESP_SPI_SIZE 4
++ return 0;
++}
+
-+/*************************** MPLS fields ***********************************/
-+#define NH_FLD_MPLS_LABEL_STACK (1)
-+#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS \
-+ ((NH_FLD_MPLS_LABEL_STACK << 1) - 1)
++/**
++ * dpni_reset_statistics() - Clears DPNI statistics
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_reset_statistics(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
++{
++ struct fsl_mc_command cmd = { 0 };
+
-+/*************************** MACSEC fields *********************************/
-+#define NH_FLD_MACSEC_SECTAG (1)
-+#define NH_FLD_MACSEC_ALL_FIELDS ((NH_FLD_MACSEC_SECTAG << 1) - 1)
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET_STATISTICS,
++ cmd_flags,
++ token);
+
-+/*************************** GTP fields ************************************/
-+#define NH_FLD_GTP_TEID (1)
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+
-+/* Protocol options */
++/**
++ * dpni_set_taildrop() - Set taildrop per queue or TC
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cg_point: Congestion point
++ * @q_type: Queue type on which the taildrop is configured.
++ * Only Rx queues are supported for now
++ * @tc: bits 7-4 contain ceetm channel index (valid only for TX);
++ * bits 3-0 contain traffic class.
++ * Use macro DPNI_BUILD_CH_TC() to build correct value for
++ * tc parameter.
++ * @q_index: Index of the queue if the DPNI supports multiple queues for
++ * traffic distribution. Ignored if CONGESTION_POINT is not 0.
++ * @taildrop: Taildrop structure
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_set_taildrop(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_congestion_point cg_point,
++ enum dpni_queue_type qtype,
++ u8 tc,
++ u8 index,
++ struct dpni_taildrop *taildrop)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_set_taildrop *cmd_params;
+
-+/* Ethernet options */
-+#define NH_OPT_ETH_BROADCAST 1
-+#define NH_OPT_ETH_MULTICAST 2
-+#define NH_OPT_ETH_UNICAST 3
-+#define NH_OPT_ETH_BPDU 4
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TAILDROP,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_taildrop *)cmd.params;
++ cmd_params->congestion_point = cg_point;
++ cmd_params->qtype = qtype;
++ cmd_params->tc = tc;
++ cmd_params->index = index;
++ dpni_set_field(cmd_params->enable, ENABLE, taildrop->enable);
++ cmd_params->units = taildrop->units;
++ cmd_params->threshold = cpu_to_le32(taildrop->threshold);
+
-+#define NH_ETH_IS_MULTICAST_ADDR(addr) (addr[0] & 0x01)
-+/* also applicable for broadcast */
++ /* send command to mc */
++ return mc_send_command(mc_io, &cmd);
++}
+
-+/* VLAN options */
-+#define NH_OPT_VLAN_CFI 1
++/**
++ * dpni_get_taildrop() - Get taildrop information
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cg_point: Congestion point
++ * @q_type: Queue type on which the taildrop is configured.
++ * Only Rx queues are supported for now
++ * @tc: bits 7-4 contain ceetm channel index (valid only for TX);
++ * bits 3-0 contain traffic class.
++ * Use macro DPNI_BUILD_CH_TC() to build correct value for
++ * tc parameter.
++ * @q_index: Index of the queue if the DPNI supports multiple queues for
++ * traffic distribution. Ignored if CONGESTION_POINT is not 0.
++ * @taildrop: Taildrop structure
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_taildrop(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_congestion_point cg_point,
++ enum dpni_queue_type qtype,
++ u8 tc,
++ u8 index,
++ struct dpni_taildrop *taildrop)
++{
++ struct fsl_mc_command cmd = { 0 };
++ struct dpni_cmd_get_taildrop *cmd_params;
++ struct dpni_rsp_get_taildrop *rsp_params;
++ int err;
+
-+/* IPV4 options */
-+#define NH_OPT_IPV4_UNICAST 1
-+#define NH_OPT_IPV4_MULTICAST 2
-+#define NH_OPT_IPV4_BROADCAST 3
-+#define NH_OPT_IPV4_OPTION 4
-+#define NH_OPT_IPV4_FRAG 5
-+#define NH_OPT_IPV4_INITIAL_FRAG 6
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TAILDROP,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_get_taildrop *)cmd.params;
++ cmd_params->congestion_point = cg_point;
++ cmd_params->qtype = qtype;
++ cmd_params->tc = tc;
++ cmd_params->index = index;
+
-+/* IPV6 options */
-+#define NH_OPT_IPV6_UNICAST 1
-+#define NH_OPT_IPV6_MULTICAST 2
-+#define NH_OPT_IPV6_OPTION 3
-+#define NH_OPT_IPV6_FRAG 4
-+#define NH_OPT_IPV6_INITIAL_FRAG 5
++ /* send command to mc */
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
+
-+/* General IP options (may be used for any version) */
-+#define NH_OPT_IP_FRAG 1
-+#define NH_OPT_IP_INITIAL_FRAG 2
-+#define NH_OPT_IP_OPTION 3
++ /* retrieve response parameters */
++ rsp_params = (struct dpni_rsp_get_taildrop *)cmd.params;
++ taildrop->enable = dpni_get_field(rsp_params->enable, ENABLE);
++ taildrop->units = rsp_params->units;
++ taildrop->threshold = le32_to_cpu(rsp_params->threshold);
+
-+/* Minenc. options */
-+#define NH_OPT_MINENCAP_SRC_ADDR_PRESENT 1
++ return 0;
++}
+
-+/* GRE. options */
-+#define NH_OPT_GRE_ROUTING_PRESENT 1
++/**
++ * dpni_get_api_version() - Get Data Path Network Interface API version
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @major_ver: Major version of data path network interface API
++ * @minor_ver: Minor version of data path network interface API
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpni_get_api_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 *major_ver,
++ u16 *minor_ver)
++{
++ struct dpni_rsp_get_api_version *rsp_params;
++ struct fsl_mc_command cmd = { 0 };
++ int err;
+
-+/* TCP options */
-+#define NH_OPT_TCP_OPTIONS 1
-+#define NH_OPT_TCP_CONTROL_HIGH_BITS 2
-+#define NH_OPT_TCP_CONTROL_LOW_BITS 3
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_API_VERSION,
++ cmd_flags, 0);
+
-+/* CAPWAP options */
-+#define NH_OPT_CAPWAP_DTLS 1
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
+
-+enum net_prot {
-+ NET_PROT_NONE = 0,
-+ NET_PROT_PAYLOAD,
-+ NET_PROT_ETH,
-+ NET_PROT_VLAN,
-+ NET_PROT_IPV4,
-+ NET_PROT_IPV6,
-+ NET_PROT_IP,
-+ NET_PROT_TCP,
-+ NET_PROT_UDP,
-+ NET_PROT_UDP_LITE,
-+ NET_PROT_IPHC,
-+ NET_PROT_SCTP,
-+ NET_PROT_SCTP_CHUNK_DATA,
-+ NET_PROT_PPPOE,
-+ NET_PROT_PPP,
-+ NET_PROT_PPPMUX,
-+ NET_PROT_PPPMUX_SUBFRM,
-+ NET_PROT_L2TPV2,
-+ NET_PROT_L2TPV3_CTRL,
-+ NET_PROT_L2TPV3_SESS,
-+ NET_PROT_LLC,
-+ NET_PROT_LLC_SNAP,
-+ NET_PROT_NLPID,
-+ NET_PROT_SNAP,
-+ NET_PROT_MPLS,
-+ NET_PROT_IPSEC_AH,
-+ NET_PROT_IPSEC_ESP,
-+ NET_PROT_UDP_ENC_ESP, /* RFC 3948 */
-+ NET_PROT_MACSEC,
-+ NET_PROT_GRE,
-+ NET_PROT_MINENCAP,
-+ NET_PROT_DCCP,
-+ NET_PROT_ICMP,
-+ NET_PROT_IGMP,
-+ NET_PROT_ARP,
-+ NET_PROT_CAPWAP_DATA,
-+ NET_PROT_CAPWAP_CTRL,
-+ NET_PROT_RFC2684,
-+ NET_PROT_ICMPV6,
-+ NET_PROT_FCOE,
-+ NET_PROT_FIP,
-+ NET_PROT_ISCSI,
-+ NET_PROT_GTP,
-+ NET_PROT_USER_DEFINED_L2,
-+ NET_PROT_USER_DEFINED_L3,
-+ NET_PROT_USER_DEFINED_L4,
-+ NET_PROT_USER_DEFINED_L5,
-+ NET_PROT_USER_DEFINED_SHIM1,
-+ NET_PROT_USER_DEFINED_SHIM2,
++ rsp_params = (struct dpni_rsp_get_api_version *)cmd.params;
++ *major_ver = le16_to_cpu(rsp_params->major);
++ *minor_ver = le16_to_cpu(rsp_params->minor);
+
-+ NET_PROT_DUMMY_LAST
-+};
++ return 0;
++}
+
-+/*! IEEE8021.Q */
-+#define NH_IEEE8021Q_ETYPE 0x8100
-+#define NH_IEEE8021Q_HDR(etype, pcp, dei, vlan_id) \
-+ ((((u32)((etype) & 0xFFFF)) << 16) | \
-+ (((u32)((pcp) & 0x07)) << 13) | \
-+ (((u32)((dei) & 0x01)) << 12) | \
-+ (((u32)((vlan_id) & 0xFFF))))
++/**
++ * dpni_set_rx_fs_dist() - Set Rx traffic class FS distribution
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: Distribution configuration
++ * If the FS is already enabled with a previous call the classification
++ * key will be changed but all the table rules are kept. If the
++ * existing rules do not match the key the results will not be
++ * predictable. It is the user responsibility to keep key integrity.
++ * If cfg.enable is set to 1 the command will create a flow steering table
++ * and will classify packets according to this table. The packets that
++ * miss all the table rules will be classified according to settings
++ * made in dpni_set_rx_hash_dist()
++ * If cfg.enable is set to 0 the command will clear flow steering table.
++ * The packets will be classified according to settings made in
++ * dpni_set_rx_hash_dist()
++ */
++int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_rx_dist_cfg *cfg)
++{
++ struct dpni_cmd_set_rx_fs_dist *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
+
-+#endif /* __FSL_NET_H */
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethsw/Kconfig
-@@ -0,0 +1,6 @@
-+config FSL_DPAA2_ETHSW
-+ tristate "DPAA2 Ethernet Switch"
-+ depends on FSL_MC_BUS && FSL_DPAA2
-+ default y
-+ ---help---
-+ Prototype driver for DPAA2 Ethernet Switch.
---- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethsw/Makefile
-@@ -0,0 +1,10 @@
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FS_DIST,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_rx_fs_dist *)cmd.params;
++ cmd_params->dist_size = le16_to_cpu(cfg->dist_size);
++ dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable);
++ cmd_params->tc = cfg->tc;
++ cmd_params->miss_flow_id = le16_to_cpu(cfg->fs_miss_flow_id);
++ cmd_params->key_cfg_iova = le64_to_cpu(cfg->key_cfg_iova);
+
-+obj-$(CONFIG_FSL_DPAA2_ETHSW) += dpaa2-ethsw.o
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
+
-+dpaa2-ethsw-objs := switch.o dpsw.o
++/**
++ * dpni_set_rx_hash_dist() - Set Rx traffic class HASH distribution
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPNI object
++ * @cfg: Distribution configuration
++ * If cfg.enable is set to 1 the packets will be classified using a hash
++ * function based on the key received in cfg.key_cfg_iova parameter.
++ * If cfg.enable is set to 0 the packets will be sent to the queue configured
++ * in dpni_set_rx_dist_default_queue() call
++ */
++int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_rx_dist_cfg *cfg)
++{
++ struct dpni_cmd_set_rx_hash_dist *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
+
-+all:
-+ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
++ /* prepare command */
++ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_HASH_DIST,
++ cmd_flags,
++ token);
++ cmd_params = (struct dpni_cmd_set_rx_hash_dist *)cmd.params;
++ cmd_params->dist_size = le16_to_cpu(cfg->dist_size);
++ dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable);
++ cmd_params->tc = cfg->tc;
++ cmd_params->key_cfg_iova = le64_to_cpu(cfg->key_cfg_iova);
+
-+clean:
-+ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
++}
--- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
-@@ -0,0 +1,851 @@
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.h
+@@ -0,0 +1,1172 @@
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
++ * Copyright 2016 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
-+#ifndef __FSL_DPSW_CMD_H
-+#define __FSL_DPSW_CMD_H
-+
-+/* DPSW Version */
-+#define DPSW_VER_MAJOR 8
-+#define DPSW_VER_MINOR 0
-+
-+#define DPSW_CMD_BASE_VERSION 1
-+#define DPSW_CMD_ID_OFFSET 4
-+
-+#define DPSW_CMD_ID(id) (((id) << DPSW_CMD_ID_OFFSET) | DPSW_CMD_BASE_VERSION)
-+
-+/* Command IDs */
-+#define DPSW_CMDID_CLOSE DPSW_CMD_ID(0x800)
-+#define DPSW_CMDID_OPEN DPSW_CMD_ID(0x802)
-+
-+#define DPSW_CMDID_GET_API_VERSION DPSW_CMD_ID(0xa02)
-+
-+#define DPSW_CMDID_ENABLE DPSW_CMD_ID(0x002)
-+#define DPSW_CMDID_DISABLE DPSW_CMD_ID(0x003)
-+#define DPSW_CMDID_GET_ATTR DPSW_CMD_ID(0x004)
-+#define DPSW_CMDID_RESET DPSW_CMD_ID(0x005)
-+#define DPSW_CMDID_IS_ENABLED DPSW_CMD_ID(0x006)
-+
-+#define DPSW_CMDID_SET_IRQ DPSW_CMD_ID(0x010)
-+#define DPSW_CMDID_GET_IRQ DPSW_CMD_ID(0x011)
-+#define DPSW_CMDID_SET_IRQ_ENABLE DPSW_CMD_ID(0x012)
-+#define DPSW_CMDID_GET_IRQ_ENABLE DPSW_CMD_ID(0x013)
-+#define DPSW_CMDID_SET_IRQ_MASK DPSW_CMD_ID(0x014)
-+#define DPSW_CMDID_GET_IRQ_MASK DPSW_CMD_ID(0x015)
-+#define DPSW_CMDID_GET_IRQ_STATUS DPSW_CMD_ID(0x016)
-+#define DPSW_CMDID_CLEAR_IRQ_STATUS DPSW_CMD_ID(0x017)
-+
-+#define DPSW_CMDID_SET_REFLECTION_IF DPSW_CMD_ID(0x022)
-+
-+#define DPSW_CMDID_ADD_CUSTOM_TPID DPSW_CMD_ID(0x024)
-+
-+#define DPSW_CMDID_REMOVE_CUSTOM_TPID DPSW_CMD_ID(0x026)
-+
-+#define DPSW_CMDID_IF_SET_TCI DPSW_CMD_ID(0x030)
-+#define DPSW_CMDID_IF_SET_STP DPSW_CMD_ID(0x031)
-+#define DPSW_CMDID_IF_SET_ACCEPTED_FRAMES DPSW_CMD_ID(0x032)
-+#define DPSW_CMDID_SET_IF_ACCEPT_ALL_VLAN DPSW_CMD_ID(0x033)
-+#define DPSW_CMDID_IF_GET_COUNTER DPSW_CMD_ID(0x034)
-+#define DPSW_CMDID_IF_SET_COUNTER DPSW_CMD_ID(0x035)
-+#define DPSW_CMDID_IF_SET_TX_SELECTION DPSW_CMD_ID(0x036)
-+#define DPSW_CMDID_IF_ADD_REFLECTION DPSW_CMD_ID(0x037)
-+#define DPSW_CMDID_IF_REMOVE_REFLECTION DPSW_CMD_ID(0x038)
-+#define DPSW_CMDID_IF_SET_FLOODING_METERING DPSW_CMD_ID(0x039)
-+#define DPSW_CMDID_IF_SET_METERING DPSW_CMD_ID(0x03A)
-+#define DPSW_CMDID_IF_SET_EARLY_DROP DPSW_CMD_ID(0x03B)
-+
-+#define DPSW_CMDID_IF_ENABLE DPSW_CMD_ID(0x03D)
-+#define DPSW_CMDID_IF_DISABLE DPSW_CMD_ID(0x03E)
-+
-+#define DPSW_CMDID_IF_GET_ATTR DPSW_CMD_ID(0x042)
-+
-+#define DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH DPSW_CMD_ID(0x044)
-+#define DPSW_CMDID_IF_GET_MAX_FRAME_LENGTH DPSW_CMD_ID(0x045)
-+#define DPSW_CMDID_IF_GET_LINK_STATE DPSW_CMD_ID(0x046)
-+#define DPSW_CMDID_IF_SET_FLOODING DPSW_CMD_ID(0x047)
-+#define DPSW_CMDID_IF_SET_BROADCAST DPSW_CMD_ID(0x048)
-+#define DPSW_CMDID_IF_SET_MULTICAST DPSW_CMD_ID(0x049)
-+#define DPSW_CMDID_IF_GET_TCI DPSW_CMD_ID(0x04A)
++#ifndef __FSL_DPNI_H
++#define __FSL_DPNI_H
+
-+#define DPSW_CMDID_IF_SET_LINK_CFG DPSW_CMD_ID(0x04C)
++#include "dpkg.h"
+
-+#define DPSW_CMDID_VLAN_ADD DPSW_CMD_ID(0x060)
-+#define DPSW_CMDID_VLAN_ADD_IF DPSW_CMD_ID(0x061)
-+#define DPSW_CMDID_VLAN_ADD_IF_UNTAGGED DPSW_CMD_ID(0x062)
-+#define DPSW_CMDID_VLAN_ADD_IF_FLOODING DPSW_CMD_ID(0x063)
-+#define DPSW_CMDID_VLAN_REMOVE_IF DPSW_CMD_ID(0x064)
-+#define DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED DPSW_CMD_ID(0x065)
-+#define DPSW_CMDID_VLAN_REMOVE_IF_FLOODING DPSW_CMD_ID(0x066)
-+#define DPSW_CMDID_VLAN_REMOVE DPSW_CMD_ID(0x067)
-+#define DPSW_CMDID_VLAN_GET_IF DPSW_CMD_ID(0x068)
-+#define DPSW_CMDID_VLAN_GET_IF_FLOODING DPSW_CMD_ID(0x069)
-+#define DPSW_CMDID_VLAN_GET_IF_UNTAGGED DPSW_CMD_ID(0x06A)
-+#define DPSW_CMDID_VLAN_GET_ATTRIBUTES DPSW_CMD_ID(0x06B)
-+
-+#define DPSW_CMDID_FDB_GET_MULTICAST DPSW_CMD_ID(0x080)
-+#define DPSW_CMDID_FDB_GET_UNICAST DPSW_CMD_ID(0x081)
-+#define DPSW_CMDID_FDB_ADD DPSW_CMD_ID(0x082)
-+#define DPSW_CMDID_FDB_REMOVE DPSW_CMD_ID(0x083)
-+#define DPSW_CMDID_FDB_ADD_UNICAST DPSW_CMD_ID(0x084)
-+#define DPSW_CMDID_FDB_REMOVE_UNICAST DPSW_CMD_ID(0x085)
-+#define DPSW_CMDID_FDB_ADD_MULTICAST DPSW_CMD_ID(0x086)
-+#define DPSW_CMDID_FDB_REMOVE_MULTICAST DPSW_CMD_ID(0x087)
-+#define DPSW_CMDID_FDB_SET_LEARNING_MODE DPSW_CMD_ID(0x088)
-+#define DPSW_CMDID_FDB_GET_ATTR DPSW_CMD_ID(0x089)
++struct fsl_mc_io;
+
-+#define DPSW_CMDID_ACL_ADD DPSW_CMD_ID(0x090)
-+#define DPSW_CMDID_ACL_REMOVE DPSW_CMD_ID(0x091)
-+#define DPSW_CMDID_ACL_ADD_ENTRY DPSW_CMD_ID(0x092)
-+#define DPSW_CMDID_ACL_REMOVE_ENTRY DPSW_CMD_ID(0x093)
-+#define DPSW_CMDID_ACL_ADD_IF DPSW_CMD_ID(0x094)
-+#define DPSW_CMDID_ACL_REMOVE_IF DPSW_CMD_ID(0x095)
-+#define DPSW_CMDID_ACL_GET_ATTR DPSW_CMD_ID(0x096)
++/**
++ * Data Path Network Interface API
++ * Contains initialization APIs and runtime control APIs for DPNI
++ */
+
-+#define DPSW_CMDID_CTRL_IF_GET_ATTR DPSW_CMD_ID(0x0A0)
-+#define DPSW_CMDID_CTRL_IF_SET_POOLS DPSW_CMD_ID(0x0A1)
-+#define DPSW_CMDID_CTRL_IF_ENABLE DPSW_CMD_ID(0x0A2)
-+#define DPSW_CMDID_CTRL_IF_DISABLE DPSW_CMD_ID(0x0A3)
++/** General DPNI macros */
+
-+/* Macros for accessing command fields smaller than 1byte */
-+#define DPSW_MASK(field) \
-+ GENMASK(DPSW_##field##_SHIFT + DPSW_##field##_SIZE - 1, \
-+ DPSW_##field##_SHIFT)
-+#define dpsw_set_field(var, field, val) \
-+ ((var) |= (((val) << DPSW_##field##_SHIFT) & DPSW_MASK(field)))
-+#define dpsw_get_field(var, field) \
-+ (((var) & DPSW_MASK(field)) >> DPSW_##field##_SHIFT)
-+#define dpsw_get_bit(var, bit) \
-+ (((var) >> (bit)) & GENMASK(0, 0))
++/**
++ * Maximum number of traffic classes
++ */
++#define DPNI_MAX_TC 8
++/**
++ * Maximum number of buffer pools per DPNI
++ */
++#define DPNI_MAX_DPBP 8
++/**
++ * Maximum number of senders
++ */
++#define DPNI_MAX_SENDERS 16
++/**
++ * Maximum distribution size
++ */
++#define DPNI_MAX_DIST_SIZE 16
+
-+static inline u64 dpsw_set_bit(u64 var, unsigned int bit, u8 val)
-+{
-+ var |= (u64)val << bit & GENMASK(bit, bit);
-+ return var;
-+}
++/**
++ * All traffic classes considered; see dpni_set_queue()
++ */
++#define DPNI_ALL_TCS (u8)(-1)
++/**
++ * All flows within traffic class considered; see dpni_set_queue()
++ */
++#define DPNI_ALL_TC_FLOWS (u16)(-1)
++/**
++ * Generate new flow ID; see dpni_set_queue()
++ */
++#define DPNI_NEW_FLOW_ID (u16)(-1)
+
-+struct dpsw_cmd_open {
-+ __le32 dpsw_id;
-+};
++/**
++ * Tx traffic is always released to a buffer pool on transmit, there are no
++ * resources allocated to have the frames confirmed back to the source after
++ * transmission.
++ */
++#define DPNI_OPT_TX_FRM_RELEASE 0x000001
++/**
++ * Disables support for MAC address filtering for addresses other than primary
++ * MAC address. This affects both unicast and multicast. Promiscuous mode can
++ * still be enabled/disabled for both unicast and multicast. If promiscuous mode
++ * is disabled, only traffic matching the primary MAC address will be accepted.
++ */
++#define DPNI_OPT_NO_MAC_FILTER 0x000002
++/**
++ * Allocate policers for this DPNI. They can be used to rate-limit traffic per
++ * traffic class (TC) basis.
++ */
++#define DPNI_OPT_HAS_POLICING 0x000004
++/**
++ * Congestion can be managed in several ways, allowing the buffer pool to
++ * deplete on ingress, taildrop on each queue or use congestion groups for sets
++ * of queues. If set, it configures a single congestion groups across all TCs.
++ * If reset, a congestion group is allocated for each TC. Only relevant if the
++ * DPNI has multiple traffic classes.
++ */
++#define DPNI_OPT_SHARED_CONGESTION 0x000008
++/**
++ * Enables TCAM for Flow Steering and QoS look-ups. If not specified, all
++ * look-ups are exact match. Note that TCAM is not available on LS1088 and its
++ * variants. Setting this bit on these SoCs will trigger an error.
++ */
++#define DPNI_OPT_HAS_KEY_MASKING 0x000010
++/**
++ * Disables the flow steering table.
++ */
++#define DPNI_OPT_NO_FS 0x000020
+
-+#define DPSW_COMPONENT_TYPE_SHIFT 0
-+#define DPSW_COMPONENT_TYPE_SIZE 4
++int dpni_open(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int dpni_id,
++ u16 *token);
+
-+struct dpsw_cmd_create {
-+ /* cmd word 0 */
-+ __le16 num_ifs;
-+ u8 max_fdbs;
-+ u8 max_meters_per_if;
-+ /* from LSB: only the first 4 bits */
-+ u8 component_type;
-+ u8 pad[3];
-+ /* cmd word 1 */
-+ __le16 max_vlans;
-+ __le16 max_fdb_entries;
-+ __le16 fdb_aging_time;
-+ __le16 max_fdb_mc_groups;
-+ /* cmd word 2 */
-+ __le64 options;
-+};
++int dpni_close(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
+
-+struct dpsw_cmd_destroy {
-+ __le32 dpsw_id;
++/**
++ * struct dpni_pools_cfg - Structure representing buffer pools configuration
++ * @num_dpbp: Number of DPBPs
++ * @pools: Array of buffer pools parameters; The number of valid entries
++ * must match 'num_dpbp' value
++ */
++struct dpni_pools_cfg {
++ u8 num_dpbp;
++ /**
++ * struct pools - Buffer pools parameters
++ * @dpbp_id: DPBP object ID
++ * @priority_mask: priorities served by DPBP
++ * @buffer_size: Buffer size
++ * @backup_pool: Backup pool
++ */
++ struct {
++ u16 dpbp_id;
++ u8 priority_mask;
++ u16 buffer_size;
++ u8 backup_pool;
++ } pools[DPNI_MAX_DPBP];
+};
+
-+#define DPSW_ENABLE_SHIFT 0
-+#define DPSW_ENABLE_SIZE 1
++int dpni_set_pools(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_pools_cfg *cfg);
+
-+struct dpsw_rsp_is_enabled {
-+ /* from LSB: enable:1 */
-+ u8 enabled;
-+};
++int dpni_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
+
-+struct dpsw_cmd_set_irq {
-+ /* cmd word 0 */
-+ u8 irq_index;
-+ u8 pad[3];
-+ __le32 irq_val;
-+ /* cmd word 1 */
-+ __le64 irq_addr;
-+ /* cmd word 2 */
-+ __le32 irq_num;
-+};
++int dpni_disable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
+
-+struct dpsw_cmd_get_irq {
-+ __le32 pad;
-+ u8 irq_index;
-+};
++int dpni_is_enabled(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int *en);
+
-+struct dpsw_rsp_get_irq {
-+ /* cmd word 0 */
-+ __le32 irq_val;
-+ __le32 pad;
-+ /* cmd word 1 */
-+ __le64 irq_addr;
-+ /* cmd word 2 */
-+ __le32 irq_num;
-+ __le32 irq_type;
-+};
++int dpni_reset(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
+
-+struct dpsw_cmd_set_irq_enable {
-+ u8 enable_state;
-+ u8 pad[3];
-+ u8 irq_index;
-+};
++/**
++ * DPNI IRQ Index and Events
++ */
+
-+struct dpsw_cmd_get_irq_enable {
-+ __le32 pad;
-+ u8 irq_index;
-+};
++/**
++ * IRQ index
++ */
++#define DPNI_IRQ_INDEX 0
++/**
++ * IRQ event - indicates a change in link state
++ */
++#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001
+
-+struct dpsw_rsp_get_irq_enable {
-+ u8 enable_state;
-+};
++int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u8 en);
+
-+struct dpsw_cmd_set_irq_mask {
-+ __le32 mask;
-+ u8 irq_index;
-+};
++int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u8 *en);
+
-+struct dpsw_cmd_get_irq_mask {
-+ __le32 pad;
-+ u8 irq_index;
-+};
++int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 mask);
+
-+struct dpsw_rsp_get_irq_mask {
-+ __le32 mask;
-+};
++int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 *mask);
+
-+struct dpsw_cmd_get_irq_status {
-+ __le32 status;
-+ u8 irq_index;
-+};
++int dpni_get_irq_status(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 *status);
+
-+struct dpsw_rsp_get_irq_status {
-+ __le32 status;
-+};
++int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 status);
+
-+struct dpsw_cmd_clear_irq_status {
-+ __le32 status;
-+ u8 irq_index;
++/**
++ * struct dpni_attr - Structure representing DPNI attributes
++ * @options: Any combination of the following options:
++ * DPNI_OPT_TX_FRM_RELEASE
++ * DPNI_OPT_NO_MAC_FILTER
++ * DPNI_OPT_HAS_POLICING
++ * DPNI_OPT_SHARED_CONGESTION
++ * DPNI_OPT_HAS_KEY_MASKING
++ * DPNI_OPT_NO_FS
++ * @num_queues: Number of Tx and Rx queues used for traffic distribution.
++ * @num_tcs: Number of traffic classes (TCs), reserved for the DPNI.
++ * @mac_filter_entries: Number of entries in the MAC address filtering table.
++ * @vlan_filter_entries: Number of entries in the VLAN address filtering table.
++ * @qos_entries: Number of entries in the QoS classification table.
++ * @fs_entries: Number of entries in the flow steering table.
++ * @qos_key_size: Size, in bytes, of the QoS look-up key. Defining a key larger
++ * than this when adding QoS entries will result in an error.
++ * @fs_key_size: Size, in bytes, of the flow steering look-up key. Defining a
++ * key larger than this when composing the hash + FS key will
++ * result in an error.
++ * @wriop_version: Version of WRIOP HW block. The 3 version values are stored
++ * on 6, 5, 5 bits respectively.
++ */
++struct dpni_attr {
++ u32 options;
++ u8 num_queues;
++ u8 num_tcs;
++ u8 mac_filter_entries;
++ u8 vlan_filter_entries;
++ u8 qos_entries;
++ u16 fs_entries;
++ u8 qos_key_size;
++ u8 fs_key_size;
++ u16 wriop_version;
+};
+
-+#define DPSW_COMPONENT_TYPE_SHIFT 0
-+#define DPSW_COMPONENT_TYPE_SIZE 4
++int dpni_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpni_attr *attr);
+
-+struct dpsw_rsp_get_attr {
-+ /* cmd word 0 */
-+ __le16 num_ifs;
-+ u8 max_fdbs;
-+ u8 num_fdbs;
-+ __le16 max_vlans;
-+ __le16 num_vlans;
-+ /* cmd word 1 */
-+ __le16 max_fdb_entries;
-+ __le16 fdb_aging_time;
-+ __le32 dpsw_id;
-+ /* cmd word 2 */
-+ __le16 mem_size;
-+ __le16 max_fdb_mc_groups;
-+ u8 max_meters_per_if;
-+ /* from LSB only the ffirst 4 bits */
-+ u8 component_type;
-+ __le16 pad;
-+ /* cmd word 3 */
-+ __le64 options;
-+};
++/**
++ * DPNI errors
++ */
+
-+struct dpsw_cmd_set_reflection_if {
-+ __le16 if_id;
-+};
++/**
++ * Extract out of frame header error
++ */
++#define DPNI_ERROR_EOFHE 0x00020000
++/**
++ * Frame length error
++ */
++#define DPNI_ERROR_FLE 0x00002000
++/**
++ * Frame physical error
++ */
++#define DPNI_ERROR_FPE 0x00001000
++/**
++ * Parsing header error
++ */
++#define DPNI_ERROR_PHE 0x00000020
++/**
++ * Parser L3 checksum error
++ */
++#define DPNI_ERROR_L3CE 0x00000004
++/**
++ * Parser L3 checksum error
++ */
++#define DPNI_ERROR_L4CE 0x00000001
+
-+struct dpsw_cmd_if_set_flooding {
-+ __le16 if_id;
-+ /* from LSB: enable:1 */
-+ u8 enable;
++/**
++ * enum dpni_error_action - Defines DPNI behavior for errors
++ * @DPNI_ERROR_ACTION_DISCARD: Discard the frame
++ * @DPNI_ERROR_ACTION_CONTINUE: Continue with the normal flow
++ * @DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE: Send the frame to the error queue
++ */
++enum dpni_error_action {
++ DPNI_ERROR_ACTION_DISCARD = 0,
++ DPNI_ERROR_ACTION_CONTINUE = 1,
++ DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE = 2
+};
+
-+struct dpsw_cmd_if_set_broadcast {
-+ __le16 if_id;
-+ /* from LSB: enable:1 */
-+ u8 enable;
++/**
++ * struct dpni_error_cfg - Structure representing DPNI errors treatment
++ * @errors: Errors mask; use 'DPNI_ERROR__<X>
++ * @error_action: The desired action for the errors mask
++ * @set_frame_annotation: Set to '1' to mark the errors in frame annotation
++ * status (FAS); relevant only for the non-discard action
++ */
++struct dpni_error_cfg {
++ u32 errors;
++ enum dpni_error_action error_action;
++ int set_frame_annotation;
+};
+
-+struct dpsw_cmd_if_set_multicast {
-+ __le16 if_id;
-+ /* from LSB: enable:1 */
-+ u8 enable;
-+};
++int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpni_error_cfg *cfg);
+
-+#define DPSW_VLAN_ID_SHIFT 0
-+#define DPSW_VLAN_ID_SIZE 12
-+#define DPSW_DEI_SHIFT 12
-+#define DPSW_DEI_SIZE 1
-+#define DPSW_PCP_SHIFT 13
-+#define DPSW_PCP_SIZE 3
++/**
++ * DPNI buffer layout modification options
++ */
+
-+struct dpsw_cmd_if_set_tci {
-+ __le16 if_id;
-+ /* from LSB: VLAN_ID:12 DEI:1 PCP:3 */
-+ __le16 conf;
-+};
++/**
++ * Select to modify the time-stamp setting
++ */
++#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001
++/**
++ * Select to modify the parser-result setting; not applicable for Tx
++ */
++#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002
++/**
++ * Select to modify the frame-status setting
++ */
++#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004
++/**
++ * Select to modify the private-data-size setting
++ */
++#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008
++/**
++ * Select to modify the data-alignment setting
++ */
++#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010
++/**
++ * Select to modify the data-head-room setting
++ */
++#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020
++/**
++ * Select to modify the data-tail-room setting
++ */
++#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040
+
-+struct dpsw_cmd_if_get_tci {
-+ __le16 if_id;
++/**
++ * struct dpni_buffer_layout - Structure representing DPNI buffer layout
++ * @options: Flags representing the suggested modifications to the buffer
++ * layout; Use any combination of 'DPNI_BUF_LAYOUT_OPT_<X>' flags
++ * @pass_timestamp: Pass timestamp value
++ * @pass_parser_result: Pass parser results
++ * @pass_frame_status: Pass frame status
++ * @private_data_size: Size kept for private data (in bytes)
++ * @data_align: Data alignment
++ * @data_head_room: Data head room
++ * @data_tail_room: Data tail room
++ */
++struct dpni_buffer_layout {
++ u32 options;
++ int pass_timestamp;
++ int pass_parser_result;
++ int pass_frame_status;
++ u16 private_data_size;
++ u16 data_align;
++ u16 data_head_room;
++ u16 data_tail_room;
+};
+
-+struct dpsw_rsp_if_get_tci {
-+ __le16 pad;
-+ __le16 vlan_id;
-+ u8 dei;
-+ u8 pcp;
++/**
++ * enum dpni_queue_type - Identifies a type of queue targeted by the command
++ * @DPNI_QUEUE_RX: Rx queue
++ * @DPNI_QUEUE_TX: Tx queue
++ * @DPNI_QUEUE_TX_CONFIRM: Tx confirmation queue
++ * @DPNI_QUEUE_RX_ERR: Rx error queue
++ */enum dpni_queue_type {
++ DPNI_QUEUE_RX,
++ DPNI_QUEUE_TX,
++ DPNI_QUEUE_TX_CONFIRM,
++ DPNI_QUEUE_RX_ERR,
+};
+
-+#define DPSW_STATE_SHIFT 0
-+#define DPSW_STATE_SIZE 4
-+
-+struct dpsw_cmd_if_set_stp {
-+ __le16 if_id;
-+ __le16 vlan_id;
-+ /* only the first LSB 4 bits */
-+ u8 state;
-+};
++int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ struct dpni_buffer_layout *layout);
+
-+#define DPSW_FRAME_TYPE_SHIFT 0
-+#define DPSW_FRAME_TYPE_SIZE 4
-+#define DPSW_UNACCEPTED_ACT_SHIFT 4
-+#define DPSW_UNACCEPTED_ACT_SIZE 4
++int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ const struct dpni_buffer_layout *layout);
+
-+struct dpsw_cmd_if_set_accepted_frames {
-+ __le16 if_id;
-+ /* from LSB: type:4 unaccepted_act:4 */
-+ u8 unaccepted;
++/**
++ * enum dpni_offload - Identifies a type of offload targeted by the command
++ * @DPNI_OFF_RX_L3_CSUM: Rx L3 checksum validation
++ * @DPNI_OFF_RX_L4_CSUM: Rx L4 checksum validation
++ * @DPNI_OFF_TX_L3_CSUM: Tx L3 checksum generation
++ * @DPNI_OFF_TX_L4_CSUM: Tx L4 checksum generation
++ */
++enum dpni_offload {
++ DPNI_OFF_RX_L3_CSUM,
++ DPNI_OFF_RX_L4_CSUM,
++ DPNI_OFF_TX_L3_CSUM,
++ DPNI_OFF_TX_L4_CSUM,
+};
+
-+#define DPSW_ACCEPT_ALL_SHIFT 0
-+#define DPSW_ACCEPT_ALL_SIZE 1
++int dpni_set_offload(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_offload type,
++ u32 config);
+
-+struct dpsw_cmd_if_set_accept_all_vlan {
-+ __le16 if_id;
-+ /* only the least significant bit */
-+ u8 accept_all;
-+};
++int dpni_get_offload(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_offload type,
++ u32 *config);
+
-+#define DPSW_COUNTER_TYPE_SHIFT 0
-+#define DPSW_COUNTER_TYPE_SIZE 5
++int dpni_get_qdid(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ u16 *qdid);
+
-+struct dpsw_cmd_if_get_counter {
-+ __le16 if_id;
-+ /* from LSB: type:5 */
-+ u8 type;
-+};
++int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 *data_offset);
+
-+struct dpsw_rsp_if_get_counter {
-+ __le64 pad;
-+ __le64 counter;
-+};
++#define DPNI_STATISTICS_CNT 7
+
-+struct dpsw_cmd_if_set_counter {
-+ /* cmd word 0 */
-+ __le16 if_id;
-+ /* from LSB: type:5 */
-+ u8 type;
-+ /* cmd word 1 */
-+ __le64 counter;
++union dpni_statistics {
++ /**
++ * struct page_0 - Page_0 statistics structure
++ * @ingress_all_frames: Ingress frame count
++ * @ingress_all_bytes: Ingress byte count
++ * @ingress_multicast_frames: Ingress multicast frame count
++ * @ingress_multicast_bytes: Ingress multicast byte count
++ * @ingress_broadcast_frames: Ingress broadcast frame count
++ * @ingress_broadcast_bytes: Ingress broadcast byte count
++ */
++ struct {
++ u64 ingress_all_frames;
++ u64 ingress_all_bytes;
++ u64 ingress_multicast_frames;
++ u64 ingress_multicast_bytes;
++ u64 ingress_broadcast_frames;
++ u64 ingress_broadcast_bytes;
++ } page_0;
++ /**
++ * struct page_1 - Page_1 statistics structure
++ * @egress_all_frames: Egress frame count
++ * @egress_all_bytes: Egress byte count
++ * @egress_multicast_frames: Egress multicast frame count
++ * @egress_multicast_bytes: Egress multicast byte count
++ * @egress_broadcast_frames: Egress broadcast frame count
++ * @egress_broadcast_bytes: Egress broadcast byte count
++ */
++ struct {
++ u64 egress_all_frames;
++ u64 egress_all_bytes;
++ u64 egress_multicast_frames;
++ u64 egress_multicast_bytes;
++ u64 egress_broadcast_frames;
++ u64 egress_broadcast_bytes;
++ } page_1;
++ /**
++ * struct page_2 - Page_2 statistics structure
++ * @ingress_filtered_frames: Ingress filtered frame count
++ * @ingress_discarded_frames: Ingress discarded frame count
++ * @ingress_nobuffer_discards: Ingress discarded frame count
++ * due to lack of buffers
++ * @egress_discarded_frames: Egress discarded frame count
++ * @egress_confirmed_frames: Egress confirmed frame count
++ */
++ struct {
++ u64 ingress_filtered_frames;
++ u64 ingress_discarded_frames;
++ u64 ingress_nobuffer_discards;
++ u64 egress_discarded_frames;
++ u64 egress_confirmed_frames;
++ } page_2;
++ /**
++ * struct page_3 - Page_3 statistics structure with values for the
++ * selected TC
++ * @ceetm_dequeue_bytes: Cumulative count of the number of bytes
++ * dequeued
++ * @ceetm_dequeue_frames: Cumulative count of the number of frames
++ * dequeued
++ * @ceetm_reject_bytes: Cumulative count of the number of bytes in all
++ * frames whose enqueue was rejected
++ * @ceetm_reject_frames: Cumulative count of all frame enqueues
++ * rejected
++ */
++ struct {
++ u64 ceetm_dequeue_bytes;
++ u64 ceetm_dequeue_frames;
++ u64 ceetm_reject_bytes;
++ u64 ceetm_reject_frames;
++ } page_3;
++ /**
++ * struct raw - raw statistics structure
++ */
++ struct {
++ u64 counter[DPNI_STATISTICS_CNT];
++ } raw;
+};
+
-+#define DPSW_PRIORITY_SELECTOR_SHIFT 0
-+#define DPSW_PRIORITY_SELECTOR_SIZE 3
-+#define DPSW_SCHED_MODE_SHIFT 0
-+#define DPSW_SCHED_MODE_SIZE 4
++int dpni_get_statistics(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 page,
++ u8 param,
++ union dpni_statistics *stat);
+
-+struct dpsw_cmd_if_set_tx_selection {
-+ __le16 if_id;
-+ /* from LSB: priority_selector:3 */
-+ u8 priority_selector;
-+ u8 pad[5];
-+ u8 tc_id[8];
-+
-+ struct dpsw_tc_sched {
-+ __le16 delta_bandwidth;
-+ u8 mode;
-+ u8 pad;
-+ } tc_sched[8];
-+};
++int dpni_reset_statistics(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
+
-+#define DPSW_FILTER_SHIFT 0
-+#define DPSW_FILTER_SIZE 2
++/**
++ * Enable auto-negotiation
++ */
++#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL
++/**
++ * Enable half-duplex mode
++ */
++#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
++/**
++ * Enable pause frames
++ */
++#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL
++/**
++ * Enable a-symmetric pause frames
++ */
++#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
++/**
++ * Enable priority flow control pause frames
++ */
++#define DPNI_LINK_OPT_PFC_PAUSE 0x0000000000000010ULL
+
-+struct dpsw_cmd_if_reflection {
-+ __le16 if_id;
-+ __le16 vlan_id;
-+ /* only 2 bits from the LSB */
-+ u8 filter;
++/**
++ * struct - Structure representing DPNI link configuration
++ * @rate: Rate
++ * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
++ */
++struct dpni_link_cfg {
++ u32 rate;
++ u64 options;
+};
+
-+#define DPSW_MODE_SHIFT 0
-+#define DPSW_MODE_SIZE 4
-+#define DPSW_UNITS_SHIFT 4
-+#define DPSW_UNITS_SIZE 4
-+
-+struct dpsw_cmd_if_set_flooding_metering {
-+ /* cmd word 0 */
-+ __le16 if_id;
-+ u8 pad;
-+ /* from LSB: mode:4 units:4 */
-+ u8 mode_units;
-+ __le32 cir;
-+ /* cmd word 1 */
-+ __le32 eir;
-+ __le32 cbs;
-+ /* cmd word 2 */
-+ __le32 ebs;
-+};
++int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_link_cfg *cfg);
+
-+struct dpsw_cmd_if_set_metering {
-+ /* cmd word 0 */
-+ __le16 if_id;
-+ u8 tc_id;
-+ /* from LSB: mode:4 units:4 */
-+ u8 mode_units;
-+ __le32 cir;
-+ /* cmd word 1 */
-+ __le32 eir;
-+ __le32 cbs;
-+ /* cmd word 2 */
-+ __le32 ebs;
++/**
++ * struct dpni_link_state - Structure representing DPNI link state
++ * @rate: Rate
++ * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
++ * @up: Link state; '0' for down, '1' for up
++ */
++struct dpni_link_state {
++ u32 rate;
++ u64 options;
++ int up;
+};
+
-+#define DPSW_EARLY_DROP_MODE_SHIFT 0
-+#define DPSW_EARLY_DROP_MODE_SIZE 2
-+#define DPSW_EARLY_DROP_UNIT_SHIFT 2
-+#define DPSW_EARLY_DROP_UNIT_SIZE 2
++int dpni_get_link_state(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpni_link_state *state);
+
-+struct dpsw_prep_early_drop {
-+ /* from LSB: mode:2 units:2 */
-+ u8 conf;
-+ u8 pad0[3];
-+ __le32 tail_drop_threshold;
-+ u8 green_drop_probability;
-+ u8 pad1[7];
-+ __le64 green_max_threshold;
-+ __le64 green_min_threshold;
-+ __le64 pad2;
-+ u8 yellow_drop_probability;
-+ u8 pad3[7];
-+ __le64 yellow_max_threshold;
-+ __le64 yellow_min_threshold;
++/**
++ * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration
++ * @rate_limit: rate in Mbps
++ * @max_burst_size: burst size in bytes (up to 64KB)
++ */
++struct dpni_tx_shaping_cfg {
++ u32 rate_limit;
++ u16 max_burst_size;
+};
+
-+struct dpsw_cmd_if_set_early_drop {
-+ /* cmd word 0 */
-+ u8 pad0;
-+ u8 tc_id;
-+ __le16 if_id;
-+ __le32 pad1;
-+ /* cmd word 1 */
-+ __le64 early_drop_iova;
-+};
++int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_tx_shaping_cfg *tx_cr_shaper,
++ const struct dpni_tx_shaping_cfg *tx_er_shaper,
++ int coupled);
+
-+struct dpsw_cmd_custom_tpid {
-+ __le16 pad;
-+ __le16 tpid;
-+};
++int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 max_frame_length);
+
-+struct dpsw_cmd_if {
-+ __le16 if_id;
-+};
++int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 *max_frame_length);
+
-+#define DPSW_ADMIT_UNTAGGED_SHIFT 0
-+#define DPSW_ADMIT_UNTAGGED_SIZE 4
-+#define DPSW_ENABLED_SHIFT 5
-+#define DPSW_ENABLED_SIZE 1
-+#define DPSW_ACCEPT_ALL_VLAN_SHIFT 6
-+#define DPSW_ACCEPT_ALL_VLAN_SIZE 1
++int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int en);
+
-+struct dpsw_rsp_if_get_attr {
-+ /* cmd word 0 */
-+ /* from LSB: admit_untagged:4 enabled:1 accept_all_vlan:1 */
-+ u8 conf;
-+ u8 pad1;
-+ u8 num_tcs;
-+ u8 pad2;
-+ __le16 qdid;
-+ /* cmd word 1 */
-+ __le32 options;
-+ __le32 pad3;
-+ /* cmd word 2 */
-+ __le32 rate;
-+};
++int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int *en);
+
-+struct dpsw_cmd_if_set_max_frame_length {
-+ __le16 if_id;
-+ __le16 frame_length;
-+};
++int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int en);
+
-+struct dpsw_cmd_if_get_max_frame_length {
-+ __le16 if_id;
-+};
++int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int *en);
+
-+struct dpsw_rsp_if_get_max_frame_length {
-+ __le16 pad;
-+ __le16 frame_length;
-+};
++int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const u8 mac_addr[6]);
+
-+struct dpsw_cmd_if_set_link_cfg {
-+ /* cmd word 0 */
-+ __le16 if_id;
-+ u8 pad[6];
-+ /* cmd word 1 */
-+ __le32 rate;
-+ __le32 pad1;
-+ /* cmd word 2 */
-+ __le64 options;
-+};
++int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 mac_addr[6]);
+
-+struct dpsw_cmd_if_get_link_state {
-+ __le16 if_id;
-+};
++int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
++ u32 cm_flags,
++ u16 token,
++ u8 mac_addr[6]);
+
-+#define DPSW_UP_SHIFT 0
-+#define DPSW_UP_SIZE 1
++int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const u8 mac_addr[6]);
+
-+struct dpsw_rsp_if_get_link_state {
-+ /* cmd word 0 */
-+ __le32 pad0;
-+ u8 up;
-+ u8 pad1[3];
-+ /* cmd word 1 */
-+ __le32 rate;
-+ __le32 pad2;
-+ /* cmd word 2 */
-+ __le64 options;
-+};
++int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const u8 mac_addr[6]);
+
-+struct dpsw_vlan_add {
-+ __le16 fdb_id;
-+ __le16 vlan_id;
-+};
++int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ int unicast,
++ int multicast);
+
-+struct dpsw_cmd_vlan_manage_if {
-+ /* cmd word 0 */
-+ __le16 pad0;
-+ __le16 vlan_id;
-+ __le32 pad1;
-+ /* cmd word 1 */
-+ __le64 if_id[4];
++/**
++ * enum dpni_dist_mode - DPNI distribution mode
++ * @DPNI_DIST_MODE_NONE: No distribution
++ * @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if
++ * the 'DPNI_OPT_DIST_HASH' option was set at DPNI creation
++ * @DPNI_DIST_MODE_FS: Use explicit flow steering; only relevant if
++ * the 'DPNI_OPT_DIST_FS' option was set at DPNI creation
++ */
++enum dpni_dist_mode {
++ DPNI_DIST_MODE_NONE = 0,
++ DPNI_DIST_MODE_HASH = 1,
++ DPNI_DIST_MODE_FS = 2
+};
+
-+struct dpsw_cmd_vlan_remove {
-+ __le16 pad;
-+ __le16 vlan_id;
++/**
++ * enum dpni_fs_miss_action - DPNI Flow Steering miss action
++ * @DPNI_FS_MISS_DROP: In case of no-match, drop the frame
++ * @DPNI_FS_MISS_EXPLICIT_FLOWID: In case of no-match, use explicit flow-id
++ * @DPNI_FS_MISS_HASH: In case of no-match, distribute using hash
++ */
++enum dpni_fs_miss_action {
++ DPNI_FS_MISS_DROP = 0,
++ DPNI_FS_MISS_EXPLICIT_FLOWID = 1,
++ DPNI_FS_MISS_HASH = 2
+};
+
-+struct dpsw_cmd_vlan_get_attr {
-+ __le16 vlan_id;
++/**
++ * struct dpni_fs_tbl_cfg - Flow Steering table configuration
++ * @miss_action: Miss action selection
++ * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID'
++ */
++struct dpni_fs_tbl_cfg {
++ enum dpni_fs_miss_action miss_action;
++ u16 default_flow_id;
+};
+
-+struct dpsw_rsp_vlan_get_attr {
-+ /* cmd word 0 */
-+ __le64 pad;
-+ /* cmd word 1 */
-+ __le16 fdb_id;
-+ __le16 num_ifs;
-+ __le16 num_untagged_ifs;
-+ __le16 num_flooding_ifs;
-+};
++int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg,
++ u8 *key_cfg_buf);
+
-+struct dpsw_cmd_vlan_get_if {
-+ __le16 vlan_id;
++/**
++ * struct dpni_qos_tbl_cfg - Structure representing QOS table configuration
++ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
++ * key extractions to be used as the QoS criteria by calling
++ * dpkg_prepare_key_cfg()
++ * @discard_on_miss: Set to '1' to discard frames in case of no match (miss);
++ * '0' to use the 'default_tc' in such cases
++ * @default_tc: Used in case of no-match and 'discard_on_miss'= 0
++ */
++struct dpni_qos_tbl_cfg {
++ u64 key_cfg_iova;
++ int discard_on_miss;
++ u8 default_tc;
+};
+
-+struct dpsw_rsp_vlan_get_if {
-+ /* cmd word 0 */
-+ __le16 pad0;
-+ __le16 num_ifs;
-+ u8 pad1[4];
-+ /* cmd word 1 */
-+ __le64 if_id[4];
-+};
++int dpni_set_qos_table(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_qos_tbl_cfg *cfg);
+
-+struct dpsw_cmd_vlan_get_if_untagged {
-+ __le16 vlan_id;
++/**
++ * enum dpni_tx_schedule_mode - DPNI Tx scheduling mode
++ * @DPNI_TX_SCHED_STRICT_PRIORITY: strict priority
++ * @DPNI_TX_SCHED_WEIGHTED_A: weighted based scheduling in group A
++ * @DPNI_TX_SCHED_WEIGHTED_B: weighted based scheduling in group B
++ */
++enum dpni_tx_schedule_mode {
++ DPNI_TX_SCHED_STRICT_PRIORITY = 0,
++ DPNI_TX_SCHED_WEIGHTED_A,
++ DPNI_TX_SCHED_WEIGHTED_B,
+};
+
-+struct dpsw_rsp_vlan_get_if_untagged {
-+ /* cmd word 0 */
-+ __le16 pad0;
-+ __le16 num_ifs;
-+ u8 pad1[4];
-+ /* cmd word 1 */
-+ __le64 if_id[4];
++/**
++ * struct dpni_tx_schedule_cfg - Structure representing Tx scheduling conf
++ * @mode: Scheduling mode
++ * @delta_bandwidth: Bandwidth represented in weights from 100 to 10000;
++ * not applicable for 'strict-priority' mode;
++ */
++struct dpni_tx_schedule_cfg {
++ enum dpni_tx_schedule_mode mode;
++ u16 delta_bandwidth;
+};
+
-+struct dpsw_cmd_vlan_get_if_flooding {
-+ __le16 vlan_id;
++/**
++ * struct dpni_tx_priorities_cfg - Structure representing transmission
++ * priorities for DPNI TCs
++ * @tc_sched: An array of traffic-classes
++ * @prio_group_A: Priority of group A
++ * @prio_group_B: Priority of group B
++ * @separate_groups: Treat A and B groups as separate
++ * @ceetm_ch_idx: ceetm channel index to apply the changes
++ */
++struct dpni_tx_priorities_cfg {
++ struct dpni_tx_schedule_cfg tc_sched[DPNI_MAX_TC];
++ u8 prio_group_A;
++ u8 prio_group_B;
++ u8 separate_groups;
+};
+
-+struct dpsw_rsp_vlan_get_if_flooding {
-+ /* cmd word 0 */
-+ __le16 pad0;
-+ __le16 num_ifs;
-+ u8 pad1[4];
-+ /* cmd word 1 */
-+ __le64 if_id[4];
-+};
++int dpni_set_tx_priorities(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_tx_priorities_cfg *cfg);
+
-+struct dpsw_cmd_fdb_add {
-+ __le32 pad;
-+ __le16 fdb_aging_time;
-+ __le16 num_fdb_entries;
++/**
++ * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration
++ * @dist_size: Set the distribution size;
++ * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96,
++ * 112,128,192,224,256,384,448,512,768,896,1024
++ * @dist_mode: Distribution mode
++ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
++ * the extractions to be used for the distribution key by calling
++ * dpni_prepare_key_cfg() relevant only when
++ * 'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0'
++ * @fs_cfg: Flow Steering table configuration; only relevant if
++ * 'dist_mode = DPNI_DIST_MODE_FS'
++ */
++struct dpni_rx_tc_dist_cfg {
++ u16 dist_size;
++ enum dpni_dist_mode dist_mode;
++ u64 key_cfg_iova;
++ struct dpni_fs_tbl_cfg fs_cfg;
+};
+
-+struct dpsw_rsp_fdb_add {
-+ __le16 fdb_id;
-+};
++int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 tc_id,
++ const struct dpni_rx_tc_dist_cfg *cfg);
+
-+struct dpsw_cmd_fdb_remove {
-+ __le16 fdb_id;
++/**
++ * enum dpni_dest - DPNI destination types
++ * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and
++ * does not generate FQDAN notifications; user is expected to
++ * dequeue from the queue based on polling or other user-defined
++ * method
++ * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
++ * notifications to the specified DPIO; user is expected to dequeue
++ * from the queue only after notification is received
++ * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate
++ * FQDAN notifications, but is connected to the specified DPCON
++ * object; user is expected to dequeue from the DPCON channel
++ */
++enum dpni_dest {
++ DPNI_DEST_NONE = 0,
++ DPNI_DEST_DPIO = 1,
++ DPNI_DEST_DPCON = 2
+};
+
-+#define DPSW_ENTRY_TYPE_SHIFT 0
-+#define DPSW_ENTRY_TYPE_SIZE 4
-+
-+struct dpsw_cmd_fdb_add_unicast {
-+ /* cmd word 0 */
-+ __le16 fdb_id;
-+ u8 mac_addr[6];
-+ /* cmd word 1 */
-+ u8 if_egress;
-+ u8 pad;
-+ /* only the first 4 bits from LSB */
-+ u8 type;
++/**
++ * struct dpni_queue - Queue structure
++ * @user_context: User data, presented to the user along with any frames from
++ * this queue. Not relevant for Tx queues.
++ */
++struct dpni_queue {
++/**
++ * struct destination - Destination structure
++ * @id: ID of the destination, only relevant if DEST_TYPE is > 0.
++ * Identifies either a DPIO or a DPCON object. Not relevant for
++ * Tx queues.
++ * @type: May be one of the following:
++ * 0 - No destination, queue can be manually queried, but will not
++ * push traffic or notifications to a DPIO;
++ * 1 - The destination is a DPIO. When traffic becomes available in
++ * the queue a FQDAN (FQ data available notification) will be
++ * generated to selected DPIO;
++ * 2 - The destination is a DPCON. The queue is associated with a
++ * DPCON object for the purpose of scheduling between multiple
++ * queues. The DPCON may be independently configured to
++ * generate notifications. Not relevant for Tx queues.
++ * @hold_active: Hold active, maintains a queue scheduled for longer
++ * in a DPIO during dequeue to reduce spread of traffic.
++ * Only relevant if queues are not affined to a single DPIO.
++ */
++ struct {
++ u16 id;
++ enum dpni_dest type;
++ char hold_active;
++ u8 priority;
++ } destination;
++ u64 user_context;
++ struct {
++ u64 value;
++ char stash_control;
++ } flc;
+};
+
-+struct dpsw_cmd_fdb_get_unicast {
-+ __le16 fdb_id;
-+ u8 mac_addr[6];
++/**
++ * struct dpni_queue_id - Queue identification, used for enqueue commands
++ * or queue control
++ * @fqid: FQID used for enqueueing to and/or configuration of this specific FQ
++ * @qdbin: Queueing bin, used to enqueue using QDID, DQBIN, QPRI. Only relevant
++ * for Tx queues.
++ */
++struct dpni_queue_id {
++ u32 fqid;
++ u16 qdbin;
+};
+
-+struct dpsw_rsp_fdb_get_unicast {
-+ __le64 pad;
-+ __le16 if_egress;
-+ /* only first 4 bits from LSB */
-+ u8 type;
-+};
++/**
++ * Set User Context
++ */
++#define DPNI_QUEUE_OPT_USER_CTX 0x00000001
++#define DPNI_QUEUE_OPT_DEST 0x00000002
++#define DPNI_QUEUE_OPT_FLC 0x00000004
++#define DPNI_QUEUE_OPT_HOLD_ACTIVE 0x00000008
+
-+struct dpsw_cmd_fdb_remove_unicast {
-+ /* cmd word 0 */
-+ __le16 fdb_id;
-+ u8 mac_addr[6];
-+ /* cmd word 1 */
-+ __le16 if_egress;
-+ /* only the first 4 bits from LSB */
-+ u8 type;
-+};
++int dpni_set_queue(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ u8 tc,
++ u8 index,
++ u8 options,
++ const struct dpni_queue *queue);
+
-+struct dpsw_cmd_fdb_add_multicast {
-+ /* cmd word 0 */
-+ __le16 fdb_id;
-+ __le16 num_ifs;
-+ /* only the first 4 bits from LSB */
-+ u8 type;
-+ u8 pad[3];
-+ /* cmd word 1 */
-+ u8 mac_addr[6];
-+ __le16 pad2;
-+ /* cmd word 2 */
-+ __le64 if_id[4];
-+};
++int dpni_get_queue(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ u8 tc,
++ u8 index,
++ struct dpni_queue *queue,
++ struct dpni_queue_id *qid);
+
-+struct dpsw_cmd_fdb_get_multicast {
-+ __le16 fdb_id;
-+ u8 mac_addr[6];
++/**
++ * enum dpni_congestion_unit - DPNI congestion units
++ * @DPNI_CONGESTION_UNIT_BYTES: bytes units
++ * @DPNI_CONGESTION_UNIT_FRAMES: frames units
++ */
++enum dpni_congestion_unit {
++ DPNI_CONGESTION_UNIT_BYTES = 0,
++ DPNI_CONGESTION_UNIT_FRAMES
+};
+
-+struct dpsw_rsp_fdb_get_multicast {
-+ /* cmd word 0 */
-+ __le64 pad0;
-+ /* cmd word 1 */
-+ __le16 num_ifs;
-+ /* only the first 4 bits from LSB */
-+ u8 type;
-+ u8 pad1[5];
-+ /* cmd word 2 */
-+ __le64 if_id[4];
++/**
++ * enum dpni_congestion_point - Structure representing congestion point
++ * @DPNI_CP_QUEUE: Set taildrop per queue, identified by QUEUE_TYPE, TC and
++ * QUEUE_INDEX
++ * @DPNI_CP_GROUP: Set taildrop per queue group. Depending on options used to
++ * define the DPNI this can be either per TC (default) or per
++ * interface (DPNI_OPT_SHARED_CONGESTION set at DPNI create).
++ * QUEUE_INDEX is ignored if this type is used.
++ */
++enum dpni_congestion_point {
++ DPNI_CP_QUEUE,
++ DPNI_CP_GROUP,
+};
+
-+struct dpsw_cmd_fdb_remove_multicast {
-+ /* cmd word 0 */
-+ __le16 fdb_id;
-+ __le16 num_ifs;
-+ /* only the first 4 bits from LSB */
-+ u8 type;
-+ u8 pad[3];
-+ /* cmd word 1 */
-+ u8 mac_addr[6];
-+ __le16 pad2;
-+ /* cmd word 2 */
-+ __le64 if_id[4];
++/**
++ * struct dpni_dest_cfg - Structure representing DPNI destination parameters
++ * @dest_type: Destination type
++ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
++ * @priority: Priority selection within the DPIO or DPCON channel; valid
++ * values are 0-1 or 0-7, depending on the number of priorities
++ * in that channel; not relevant for 'DPNI_DEST_NONE' option
++ */
++struct dpni_dest_cfg {
++ enum dpni_dest dest_type;
++ int dest_id;
++ u8 priority;
+};
+
-+#define DPSW_LEARNING_MODE_SHIFT 0
-+#define DPSW_LEARNING_MODE_SIZE 4
++/* DPNI congestion options */
+
-+struct dpsw_cmd_fdb_set_learning_mode {
-+ __le16 fdb_id;
-+ /* only the first 4 bits from LSB */
-+ u8 mode;
-+};
++/**
++ * CSCN message is written to message_iova once entering a
++ * congestion state (see 'threshold_entry')
++ */
++#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER 0x00000001
++/**
++ * CSCN message is written to message_iova once exiting a
++ * congestion state (see 'threshold_exit')
++ */
++#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT 0x00000002
++/**
++ * CSCN write will attempt to allocate into a cache (coherent write);
++ * valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is selected
++ */
++#define DPNI_CONG_OPT_COHERENT_WRITE 0x00000004
++/**
++ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
++ * DPIO/DPCON's WQ channel once entering a congestion state
++ * (see 'threshold_entry')
++ */
++#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER 0x00000008
++/**
++ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to
++ * DPIO/DPCON's WQ channel once exiting a congestion state
++ * (see 'threshold_exit')
++ */
++#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT 0x00000010
++/**
++ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the
++ * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled)
++ */
++#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020
++/**
++ * This congestion will trigger flow control or priority flow control.
++ * This will have effect only if flow control is enabled with
++ * dpni_set_link_cfg().
++ */
++#define DPNI_CONG_OPT_FLOW_CONTROL 0x00000040
+
-+struct dpsw_cmd_fdb_get_attr {
-+ __le16 fdb_id;
-+};
++/**
++ * struct dpni_congestion_notification_cfg - congestion notification
++ * configuration
++ * @units: Units type
++ * @threshold_entry: Above this threshold we enter a congestion state.
++ * set it to '0' to disable it
++ * @threshold_exit: Below this threshold we exit the congestion state.
++ * @message_ctx: The context that will be part of the CSCN message
++ * @message_iova: I/O virtual address (must be in DMA-able memory),
++ * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>'
++ * is contained in 'options'
++ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
++ * @notification_mode: Mask of available options; use 'DPNI_CONG_OPT_<X>' values
++ */
+
-+struct dpsw_rsp_fdb_get_attr {
-+ /* cmd word 0 */
-+ __le16 pad;
-+ __le16 max_fdb_entries;
-+ __le16 fdb_aging_time;
-+ __le16 num_fdb_mc_groups;
-+ /* cmd word 1 */
-+ __le16 max_fdb_mc_groups;
-+ /* only the first 4 bits from LSB */
-+ u8 learning_mode;
++struct dpni_congestion_notification_cfg {
++ enum dpni_congestion_unit units;
++ u32 threshold_entry;
++ u32 threshold_exit;
++ u64 message_ctx;
++ u64 message_iova;
++ struct dpni_dest_cfg dest_cfg;
++ u16 notification_mode;
+};
+
-+struct dpsw_cmd_acl_add {
-+ __le16 pad;
-+ __le16 max_entries;
-+};
++/** Compose TC parameter for function dpni_set_congestion_notification()
++ * and dpni_get_congestion_notification().
++ */
++#define DPNI_BUILD_CH_TC(ceetm_ch_idx, tc) \
++ ((((ceetm_ch_idx) & 0x0F) << 4) | ((tc) & 0x0F))
+
-+struct dpsw_rsp_acl_add {
-+ __le16 acl_id;
-+};
++int dpni_set_congestion_notification(
++ struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ u8 tc_id,
++ const struct dpni_congestion_notification_cfg *cfg);
++
++int dpni_get_congestion_notification(
++ struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_queue_type qtype,
++ u8 tc_id,
++ struct dpni_congestion_notification_cfg *cfg);
+
-+struct dpsw_cmd_acl_remove {
-+ __le16 acl_id;
++/**
++ * struct dpni_taildrop - Structure representing the taildrop
++ * @enable: Indicates whether the taildrop is active or not.
++ * @units: Indicates the unit of THRESHOLD. Queue taildrop only supports
++ * byte units, this field is ignored and assumed = 0 if
++ * CONGESTION_POINT is 0.
++ * @threshold: Threshold value, in units identified by UNITS field. Value 0
++ * cannot be used as a valid taildrop threshold, THRESHOLD must
++ * be > 0 if the taildrop is enabled.
++ */
++struct dpni_taildrop {
++ char enable;
++ enum dpni_congestion_unit units;
++ u32 threshold;
+};
+
-+struct dpsw_prep_acl_entry {
-+ u8 match_l2_dest_mac[6];
-+ __le16 match_l2_tpid;
++int dpni_set_taildrop(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_congestion_point cg_point,
++ enum dpni_queue_type q_type,
++ u8 tc,
++ u8 q_index,
++ struct dpni_taildrop *taildrop);
+
-+ u8 match_l2_source_mac[6];
-+ __le16 match_l2_vlan_id;
++int dpni_get_taildrop(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ enum dpni_congestion_point cg_point,
++ enum dpni_queue_type q_type,
++ u8 tc,
++ u8 q_index,
++ struct dpni_taildrop *taildrop);
+
-+ __le32 match_l3_dest_ip;
-+ __le32 match_l3_source_ip;
++/**
++ * struct dpni_rule_cfg - Rule configuration for table lookup
++ * @key_iova: I/O virtual address of the key (must be in DMA-able memory)
++ * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory)
++ * @key_size: key and mask size (in bytes)
++ */
++struct dpni_rule_cfg {
++ u64 key_iova;
++ u64 mask_iova;
++ u8 key_size;
++};
+
-+ __le16 match_l4_dest_port;
-+ __le16 match_l4_source_port;
-+ __le16 match_l2_ether_type;
-+ u8 match_l2_pcp_dei;
-+ u8 match_l3_dscp;
++int dpni_get_api_version(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 *major_ver,
++ u16 *minor_ver);
+
-+ u8 mask_l2_dest_mac[6];
-+ __le16 mask_l2_tpid;
++int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_rule_cfg *cfg,
++ u8 tc_id,
++ u16 index);
+
-+ u8 mask_l2_source_mac[6];
-+ __le16 mask_l2_vlan_id;
++int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_rule_cfg *cfg);
+
-+ __le32 mask_l3_dest_ip;
-+ __le32 mask_l3_source_ip;
++int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token);
+
-+ __le16 mask_l4_dest_port;
-+ __le16 mask_l4_source_port;
-+ __le16 mask_l2_ether_type;
-+ u8 mask_l2_pcp_dei;
-+ u8 mask_l3_dscp;
++/**
++ * Discard matching traffic. If set, this takes precedence over any other
++ * configuration and matching traffic is always discarded.
++ */
++ #define DPNI_FS_OPT_DISCARD 0x1
+
-+ u8 match_l3_protocol;
-+ u8 mask_l3_protocol;
-+};
++/**
++ * Set FLC value. If set, flc member of struct dpni_fs_action_cfg is used to
++ * override the FLC value set per queue.
++ * For more details check the Frame Descriptor section in the hardware
++ * documentation.
++ */
++#define DPNI_FS_OPT_SET_FLC 0x2
+
-+#define DPSW_RESULT_ACTION_SHIFT 0
-+#define DPSW_RESULT_ACTION_SIZE 4
++/*
++ * Indicates whether the 6 lowest significant bits of FLC are used for stash
++ * control. If set, the 6 least significant bits in value are interpreted as
++ * follows:
++ * - bits 0-1: indicates the number of 64 byte units of context that are
++ * stashed. FLC value is interpreted as a memory address in this case,
++ * excluding the 6 LS bits.
++ * - bits 2-3: indicates the number of 64 byte units of frame annotation
++ * to be stashed. Annotation is placed at FD[ADDR].
++ * - bits 4-5: indicates the number of 64 byte units of frame data to be
++ * stashed. Frame data is placed at FD[ADDR] + FD[OFFSET].
++ * This flag is ignored if DPNI_FS_OPT_SET_FLC is not specified.
++ */
++#define DPNI_FS_OPT_SET_STASH_CONTROL 0x4
+
-+struct dpsw_cmd_acl_entry {
-+ __le16 acl_id;
-+ __le16 result_if_id;
-+ __le32 precedence;
-+ /* from LSB only the first 4 bits */
-+ u8 result_action;
-+ u8 pad[7];
-+ __le64 pad2[4];
-+ __le64 key_iova;
++/**
++ * struct dpni_fs_action_cfg - Action configuration for table look-up
++ * @flc: FLC value for traffic matching this rule. Please check the
++ * Frame Descriptor section in the hardware documentation for
++ * more information.
++ * @flow_id: Identifies the Rx queue used for matching traffic. Supported
++ * values are in range 0 to num_queue-1.
++ * @options: Any combination of DPNI_FS_OPT_ values.
++ */
++struct dpni_fs_action_cfg {
++ u64 flc;
++ u16 flow_id;
++ u16 options;
+};
+
-+struct dpsw_cmd_acl_if {
-+ /* cmd word 0 */
-+ __le16 acl_id;
-+ __le16 num_ifs;
-+ __le32 pad;
-+ /* cmd word 1 */
-+ __le64 if_id[4];
-+};
++int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 tc_id,
++ u16 index,
++ const struct dpni_rule_cfg *cfg,
++ const struct dpni_fs_action_cfg *action);
+
-+struct dpsw_cmd_acl_get_attr {
-+ __le16 acl_id;
-+};
++int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 tc_id,
++ const struct dpni_rule_cfg *cfg);
+
-+struct dpsw_rsp_acl_get_attr {
-+ /* cmd word 0 */
-+ __le64 pad;
-+ /* cmd word 1 */
-+ __le16 max_entries;
-+ __le16 num_entries;
-+ __le16 num_ifs;
-+};
++/**
++ * When used for queue_idx in function dpni_set_rx_dist_default_queue
++ * will signal to dpni to drop all unclassified frames
++ */
++#define DPNI_FS_MISS_DROP ((uint16_t)-1)
+
-+struct dpsw_rsp_ctrl_if_get_attr {
-+ /* cmd word 0 */
-+ __le64 pad;
-+ /* cmd word 1 */
-+ __le32 rx_fqid;
-+ __le32 rx_err_fqid;
-+ /* cmd word 2 */
-+ __le32 tx_err_conf_fqid;
++/**
++ * struct dpni_rx_dist_cfg - distribution configuration
++ * @dist_size: distribution size; supported values: 1,2,3,4,6,7,8,
++ * 12,14,16,24,28,32,48,56,64,96,112,128,192,224,256,384,448,
++ * 512,768,896,1024
++ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
++ * the extractions to be used for the distribution key by calling
++ * dpkg_prepare_key_cfg() relevant only when enable!=0 otherwise
++ * it can be '0'
++ * @enable: enable/disable the distribution.
++ * @tc: TC id for which distribution is set
++ * @fs_miss_flow_id: when packet misses all rules from flow steering table and
++ * hash is disabled it will be put into this queue id; use
++ * DPNI_FS_MISS_DROP to drop frames. The value of this field is
++ * used only when flow steering distribution is enabled and hash
++ * distribution is disabled
++ */
++struct dpni_rx_dist_cfg {
++ u16 dist_size;
++ u64 key_cfg_iova;
++ u8 enable;
++ u8 tc;
++ u16 fs_miss_flow_id;
+};
+
-+struct dpsw_cmd_ctrl_if_set_pools {
-+ u8 num_dpbp;
-+ /* from LSB: POOL0_BACKUP_POOL:1 ... POOL7_BACKUP_POOL */
-+ u8 backup_pool;
-+ __le16 pad;
-+ __le32 dpbp_id[8];
-+ __le16 buffer_size[8];
-+};
++int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_rx_dist_cfg *cfg);
+
-+struct dpsw_rsp_get_api_version {
-+ __le16 version_major;
-+ __le16 version_minor;
-+};
++int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ const struct dpni_rx_dist_cfg *cfg);
+
-+#endif /* __FSL_DPSW_CMD_H */
++#endif /* __FSL_DPNI_H */
--- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.c
-@@ -0,0 +1,2762 @@
++++ b/drivers/staging/fsl-dpaa2/ethernet/net.h
+@@ -0,0 +1,480 @@
+/* Copyright 2013-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
-+#include "../../fsl-mc/include/mc-sys.h"
-+#include "../../fsl-mc/include/mc-cmd.h"
-+#include "dpsw.h"
-+#include "dpsw-cmd.h"
++#ifndef __FSL_NET_H
++#define __FSL_NET_H
++
++#define LAST_HDR_INDEX 0xFFFFFFFF
++
++/*****************************************************************************/
++/* Protocol fields */
++/*****************************************************************************/
+
-+static void build_if_id_bitmap(__le64 *bmap,
-+ const u16 *id,
-+ const u16 num_ifs) {
-+ int i;
++/************************* Ethernet fields *********************************/
++#define NH_FLD_ETH_DA (1)
++#define NH_FLD_ETH_SA (NH_FLD_ETH_DA << 1)
++#define NH_FLD_ETH_LENGTH (NH_FLD_ETH_DA << 2)
++#define NH_FLD_ETH_TYPE (NH_FLD_ETH_DA << 3)
++#define NH_FLD_ETH_FINAL_CKSUM (NH_FLD_ETH_DA << 4)
++#define NH_FLD_ETH_PADDING (NH_FLD_ETH_DA << 5)
++#define NH_FLD_ETH_ALL_FIELDS ((NH_FLD_ETH_DA << 6) - 1)
+
-+ for (i = 0; (i < num_ifs) && (i < DPSW_MAX_IF); i++)
-+ bmap[id[i] / 64] = dpsw_set_bit(bmap[id[i] / 64],
-+ (id[i] % 64),
-+ 1);
-+}
++#define NH_FLD_ETH_ADDR_SIZE 6
+
-+static void read_if_id_bitmap(u16 *if_id,
-+ u16 *num_ifs,
-+ __le64 *bmap) {
-+ int bitmap[DPSW_MAX_IF] = { 0 };
-+ int i, j = 0;
-+ int count = 0;
++/*************************** VLAN fields ***********************************/
++#define NH_FLD_VLAN_VPRI (1)
++#define NH_FLD_VLAN_CFI (NH_FLD_VLAN_VPRI << 1)
++#define NH_FLD_VLAN_VID (NH_FLD_VLAN_VPRI << 2)
++#define NH_FLD_VLAN_LENGTH (NH_FLD_VLAN_VPRI << 3)
++#define NH_FLD_VLAN_TYPE (NH_FLD_VLAN_VPRI << 4)
++#define NH_FLD_VLAN_ALL_FIELDS ((NH_FLD_VLAN_VPRI << 5) - 1)
+
-+ for (i = 0; i < DPSW_MAX_IF; i++) {
-+ bitmap[i] = dpsw_get_bit(le64_to_cpu(bmap[i / 64]),
-+ i % 64);
-+ count += bitmap[i];
-+ }
++#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \
++ NH_FLD_VLAN_CFI | \
++ NH_FLD_VLAN_VID)
+
-+ *num_ifs = (u16)count;
++/************************ IP (generic) fields ******************************/
++#define NH_FLD_IP_VER (1)
++#define NH_FLD_IP_DSCP (NH_FLD_IP_VER << 2)
++#define NH_FLD_IP_ECN (NH_FLD_IP_VER << 3)
++#define NH_FLD_IP_PROTO (NH_FLD_IP_VER << 4)
++#define NH_FLD_IP_SRC (NH_FLD_IP_VER << 5)
++#define NH_FLD_IP_DST (NH_FLD_IP_VER << 6)
++#define NH_FLD_IP_TOS_TC (NH_FLD_IP_VER << 7)
++#define NH_FLD_IP_ID (NH_FLD_IP_VER << 8)
++#define NH_FLD_IP_ALL_FIELDS ((NH_FLD_IP_VER << 9) - 1)
+
-+ for (i = 0; (i < DPSW_MAX_IF) && (j < count); i++) {
-+ if (bitmap[i]) {
-+ if_id[j] = (u16)i;
-+ j++;
-+ }
-+ }
-+}
++#define NH_FLD_IP_PROTO_SIZE 1
+
-+/**
-+ * dpsw_open() - Open a control session for the specified object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @dpsw_id: DPSW unique ID
-+ * @token: Returned token; use in subsequent API calls
-+ *
-+ * This function can be used to open a control session for an
-+ * already created object; an object may have been declared in
-+ * the DPL or by calling the dpsw_create() function.
-+ * This function returns a unique authentication token,
-+ * associated with the specific object ID and the specific MC
-+ * portal; this token must be used in all subsequent commands for
-+ * this specific object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_open(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ int dpsw_id,
-+ u16 *token)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_open *cmd_params;
-+ int err;
++/***************************** IPV4 fields *********************************/
++#define NH_FLD_IPV4_VER (1)
++#define NH_FLD_IPV4_HDR_LEN (NH_FLD_IPV4_VER << 1)
++#define NH_FLD_IPV4_TOS (NH_FLD_IPV4_VER << 2)
++#define NH_FLD_IPV4_TOTAL_LEN (NH_FLD_IPV4_VER << 3)
++#define NH_FLD_IPV4_ID (NH_FLD_IPV4_VER << 4)
++#define NH_FLD_IPV4_FLAG_D (NH_FLD_IPV4_VER << 5)
++#define NH_FLD_IPV4_FLAG_M (NH_FLD_IPV4_VER << 6)
++#define NH_FLD_IPV4_OFFSET (NH_FLD_IPV4_VER << 7)
++#define NH_FLD_IPV4_TTL (NH_FLD_IPV4_VER << 8)
++#define NH_FLD_IPV4_PROTO (NH_FLD_IPV4_VER << 9)
++#define NH_FLD_IPV4_CKSUM (NH_FLD_IPV4_VER << 10)
++#define NH_FLD_IPV4_SRC_IP (NH_FLD_IPV4_VER << 11)
++#define NH_FLD_IPV4_DST_IP (NH_FLD_IPV4_VER << 12)
++#define NH_FLD_IPV4_OPTS (NH_FLD_IPV4_VER << 13)
++#define NH_FLD_IPV4_OPTS_COUNT (NH_FLD_IPV4_VER << 14)
++#define NH_FLD_IPV4_ALL_FIELDS ((NH_FLD_IPV4_VER << 15) - 1)
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_OPEN,
-+ cmd_flags,
-+ 0);
-+ cmd_params = (struct dpsw_cmd_open *)cmd.params;
-+ cmd_params->dpsw_id = cpu_to_le32(dpsw_id);
++#define NH_FLD_IPV4_ADDR_SIZE 4
++#define NH_FLD_IPV4_PROTO_SIZE 1
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++/***************************** IPV6 fields *********************************/
++#define NH_FLD_IPV6_VER (1)
++#define NH_FLD_IPV6_TC (NH_FLD_IPV6_VER << 1)
++#define NH_FLD_IPV6_SRC_IP (NH_FLD_IPV6_VER << 2)
++#define NH_FLD_IPV6_DST_IP (NH_FLD_IPV6_VER << 3)
++#define NH_FLD_IPV6_NEXT_HDR (NH_FLD_IPV6_VER << 4)
++#define NH_FLD_IPV6_FL (NH_FLD_IPV6_VER << 5)
++#define NH_FLD_IPV6_HOP_LIMIT (NH_FLD_IPV6_VER << 6)
++#define NH_FLD_IPV6_ID (NH_FLD_IPV6_VER << 7)
++#define NH_FLD_IPV6_ALL_FIELDS ((NH_FLD_IPV6_VER << 8) - 1)
+
-+ /* retrieve response parameters */
-+ *token = mc_cmd_hdr_read_token(&cmd);
++#define NH_FLD_IPV6_ADDR_SIZE 16
++#define NH_FLD_IPV6_NEXT_HDR_SIZE 1
+
-+ return 0;
-+}
++/***************************** ICMP fields *********************************/
++#define NH_FLD_ICMP_TYPE (1)
++#define NH_FLD_ICMP_CODE (NH_FLD_ICMP_TYPE << 1)
++#define NH_FLD_ICMP_CKSUM (NH_FLD_ICMP_TYPE << 2)
++#define NH_FLD_ICMP_ID (NH_FLD_ICMP_TYPE << 3)
++#define NH_FLD_ICMP_SQ_NUM (NH_FLD_ICMP_TYPE << 4)
++#define NH_FLD_ICMP_ALL_FIELDS ((NH_FLD_ICMP_TYPE << 5) - 1)
+
-+/**
-+ * dpsw_close() - Close the control session of the object
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ *
-+ * After this function is called, no further operations are
-+ * allowed on the object without opening a new control session.
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_close(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct mc_command cmd = { 0 };
++#define NH_FLD_ICMP_CODE_SIZE 1
++#define NH_FLD_ICMP_TYPE_SIZE 1
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLOSE,
-+ cmd_flags,
-+ token);
++/***************************** IGMP fields *********************************/
++#define NH_FLD_IGMP_VERSION (1)
++#define NH_FLD_IGMP_TYPE (NH_FLD_IGMP_VERSION << 1)
++#define NH_FLD_IGMP_CKSUM (NH_FLD_IGMP_VERSION << 2)
++#define NH_FLD_IGMP_DATA (NH_FLD_IGMP_VERSION << 3)
++#define NH_FLD_IGMP_ALL_FIELDS ((NH_FLD_IGMP_VERSION << 4) - 1)
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++/***************************** TCP fields **********************************/
++#define NH_FLD_TCP_PORT_SRC (1)
++#define NH_FLD_TCP_PORT_DST (NH_FLD_TCP_PORT_SRC << 1)
++#define NH_FLD_TCP_SEQ (NH_FLD_TCP_PORT_SRC << 2)
++#define NH_FLD_TCP_ACK (NH_FLD_TCP_PORT_SRC << 3)
++#define NH_FLD_TCP_OFFSET (NH_FLD_TCP_PORT_SRC << 4)
++#define NH_FLD_TCP_FLAGS (NH_FLD_TCP_PORT_SRC << 5)
++#define NH_FLD_TCP_WINDOW (NH_FLD_TCP_PORT_SRC << 6)
++#define NH_FLD_TCP_CKSUM (NH_FLD_TCP_PORT_SRC << 7)
++#define NH_FLD_TCP_URGPTR (NH_FLD_TCP_PORT_SRC << 8)
++#define NH_FLD_TCP_OPTS (NH_FLD_TCP_PORT_SRC << 9)
++#define NH_FLD_TCP_OPTS_COUNT (NH_FLD_TCP_PORT_SRC << 10)
++#define NH_FLD_TCP_ALL_FIELDS ((NH_FLD_TCP_PORT_SRC << 11) - 1)
+
-+/**
-+ * dpsw_enable() - Enable DPSW functionality
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct mc_command cmd = { 0 };
++#define NH_FLD_TCP_PORT_SIZE 2
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ENABLE,
-+ cmd_flags,
-+ token);
++/***************************** UDP fields **********************************/
++#define NH_FLD_UDP_PORT_SRC (1)
++#define NH_FLD_UDP_PORT_DST (NH_FLD_UDP_PORT_SRC << 1)
++#define NH_FLD_UDP_LEN (NH_FLD_UDP_PORT_SRC << 2)
++#define NH_FLD_UDP_CKSUM (NH_FLD_UDP_PORT_SRC << 3)
++#define NH_FLD_UDP_ALL_FIELDS ((NH_FLD_UDP_PORT_SRC << 4) - 1)
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++#define NH_FLD_UDP_PORT_SIZE 2
+
-+/**
-+ * dpsw_disable() - Disable DPSW functionality
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_disable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct mc_command cmd = { 0 };
++/*************************** UDP-lite fields *******************************/
++#define NH_FLD_UDP_LITE_PORT_SRC (1)
++#define NH_FLD_UDP_LITE_PORT_DST (NH_FLD_UDP_LITE_PORT_SRC << 1)
++#define NH_FLD_UDP_LITE_ALL_FIELDS \
++ ((NH_FLD_UDP_LITE_PORT_SRC << 2) - 1)
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_DISABLE,
-+ cmd_flags,
-+ token);
++#define NH_FLD_UDP_LITE_PORT_SIZE 2
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++/*************************** UDP-encap-ESP fields **************************/
++#define NH_FLD_UDP_ENC_ESP_PORT_SRC (1)
++#define NH_FLD_UDP_ENC_ESP_PORT_DST (NH_FLD_UDP_ENC_ESP_PORT_SRC << 1)
++#define NH_FLD_UDP_ENC_ESP_LEN (NH_FLD_UDP_ENC_ESP_PORT_SRC << 2)
++#define NH_FLD_UDP_ENC_ESP_CKSUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 3)
++#define NH_FLD_UDP_ENC_ESP_SPI (NH_FLD_UDP_ENC_ESP_PORT_SRC << 4)
++#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 5)
++#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS \
++ ((NH_FLD_UDP_ENC_ESP_PORT_SRC << 6) - 1)
+
-+/**
-+ * dpsw_is_enabled() - Check if the DPSW is enabled
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @en: Returns '1' if object is enabled; '0' otherwise
-+ *
-+ * Return: '0' on Success; Error code otherwise
-+ */
-+int dpsw_is_enabled(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int *en)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_rsp_is_enabled *cmd_rsp;
-+ int err;
++#define NH_FLD_UDP_ENC_ESP_PORT_SIZE 2
++#define NH_FLD_UDP_ENC_ESP_SPI_SIZE 4
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IS_ENABLED, cmd_flags,
-+ token);
++/***************************** SCTP fields *********************************/
++#define NH_FLD_SCTP_PORT_SRC (1)
++#define NH_FLD_SCTP_PORT_DST (NH_FLD_SCTP_PORT_SRC << 1)
++#define NH_FLD_SCTP_VER_TAG (NH_FLD_SCTP_PORT_SRC << 2)
++#define NH_FLD_SCTP_CKSUM (NH_FLD_SCTP_PORT_SRC << 3)
++#define NH_FLD_SCTP_ALL_FIELDS ((NH_FLD_SCTP_PORT_SRC << 4) - 1)
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++#define NH_FLD_SCTP_PORT_SIZE 2
+
-+ /* retrieve response parameters */
-+ cmd_rsp = (struct dpsw_rsp_is_enabled *)cmd.params;
-+ *en = dpsw_get_field(cmd_rsp->enabled, ENABLE);
++/***************************** DCCP fields *********************************/
++#define NH_FLD_DCCP_PORT_SRC (1)
++#define NH_FLD_DCCP_PORT_DST (NH_FLD_DCCP_PORT_SRC << 1)
++#define NH_FLD_DCCP_ALL_FIELDS ((NH_FLD_DCCP_PORT_SRC << 2) - 1)
++
++#define NH_FLD_DCCP_PORT_SIZE 2
++
++/***************************** IPHC fields *********************************/
++#define NH_FLD_IPHC_CID (1)
++#define NH_FLD_IPHC_CID_TYPE (NH_FLD_IPHC_CID << 1)
++#define NH_FLD_IPHC_HCINDEX (NH_FLD_IPHC_CID << 2)
++#define NH_FLD_IPHC_GEN (NH_FLD_IPHC_CID << 3)
++#define NH_FLD_IPHC_D_BIT (NH_FLD_IPHC_CID << 4)
++#define NH_FLD_IPHC_ALL_FIELDS ((NH_FLD_IPHC_CID << 5) - 1)
+
-+ return 0;
-+}
++/***************************** SCTP fields *********************************/
++#define NH_FLD_SCTP_CHUNK_DATA_TYPE (1)
++#define NH_FLD_SCTP_CHUNK_DATA_FLAGS (NH_FLD_SCTP_CHUNK_DATA_TYPE << 1)
++#define NH_FLD_SCTP_CHUNK_DATA_LENGTH (NH_FLD_SCTP_CHUNK_DATA_TYPE << 2)
++#define NH_FLD_SCTP_CHUNK_DATA_TSN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 3)
++#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 4)
++#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 5)
++#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 6)
++#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED (NH_FLD_SCTP_CHUNK_DATA_TYPE << 7)
++#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING (NH_FLD_SCTP_CHUNK_DATA_TYPE << 8)
++#define NH_FLD_SCTP_CHUNK_DATA_END (NH_FLD_SCTP_CHUNK_DATA_TYPE << 9)
++#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS \
++ ((NH_FLD_SCTP_CHUNK_DATA_TYPE << 10) - 1)
+
-+/**
-+ * dpsw_reset() - Reset the DPSW, returns the object to initial state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_reset(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
-+{
-+ struct mc_command cmd = { 0 };
++/*************************** L2TPV2 fields *********************************/
++#define NH_FLD_L2TPV2_TYPE_BIT (1)
++#define NH_FLD_L2TPV2_LENGTH_BIT (NH_FLD_L2TPV2_TYPE_BIT << 1)
++#define NH_FLD_L2TPV2_SEQUENCE_BIT (NH_FLD_L2TPV2_TYPE_BIT << 2)
++#define NH_FLD_L2TPV2_OFFSET_BIT (NH_FLD_L2TPV2_TYPE_BIT << 3)
++#define NH_FLD_L2TPV2_PRIORITY_BIT (NH_FLD_L2TPV2_TYPE_BIT << 4)
++#define NH_FLD_L2TPV2_VERSION (NH_FLD_L2TPV2_TYPE_BIT << 5)
++#define NH_FLD_L2TPV2_LEN (NH_FLD_L2TPV2_TYPE_BIT << 6)
++#define NH_FLD_L2TPV2_TUNNEL_ID (NH_FLD_L2TPV2_TYPE_BIT << 7)
++#define NH_FLD_L2TPV2_SESSION_ID (NH_FLD_L2TPV2_TYPE_BIT << 8)
++#define NH_FLD_L2TPV2_NS (NH_FLD_L2TPV2_TYPE_BIT << 9)
++#define NH_FLD_L2TPV2_NR (NH_FLD_L2TPV2_TYPE_BIT << 10)
++#define NH_FLD_L2TPV2_OFFSET_SIZE (NH_FLD_L2TPV2_TYPE_BIT << 11)
++#define NH_FLD_L2TPV2_FIRST_BYTE (NH_FLD_L2TPV2_TYPE_BIT << 12)
++#define NH_FLD_L2TPV2_ALL_FIELDS \
++ ((NH_FLD_L2TPV2_TYPE_BIT << 13) - 1)
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_RESET,
-+ cmd_flags,
-+ token);
++/*************************** L2TPV3 fields *********************************/
++#define NH_FLD_L2TPV3_CTRL_TYPE_BIT (1)
++#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 1)
++#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 2)
++#define NH_FLD_L2TPV3_CTRL_VERSION (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 3)
++#define NH_FLD_L2TPV3_CTRL_LENGTH (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 4)
++#define NH_FLD_L2TPV3_CTRL_CONTROL (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 5)
++#define NH_FLD_L2TPV3_CTRL_SENT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 6)
++#define NH_FLD_L2TPV3_CTRL_RECV (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 7)
++#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 8)
++#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS \
++ ((NH_FLD_L2TPV3_CTRL_TYPE_BIT << 9) - 1)
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++#define NH_FLD_L2TPV3_SESS_TYPE_BIT (1)
++#define NH_FLD_L2TPV3_SESS_VERSION (NH_FLD_L2TPV3_SESS_TYPE_BIT << 1)
++#define NH_FLD_L2TPV3_SESS_ID (NH_FLD_L2TPV3_SESS_TYPE_BIT << 2)
++#define NH_FLD_L2TPV3_SESS_COOKIE (NH_FLD_L2TPV3_SESS_TYPE_BIT << 3)
++#define NH_FLD_L2TPV3_SESS_ALL_FIELDS \
++ ((NH_FLD_L2TPV3_SESS_TYPE_BIT << 4) - 1)
+
-+/**
-+ * dpsw_set_irq() - Set IRQ information for the DPSW to trigger an interrupt.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @irq_index: Identifies the interrupt index to configure
-+ * @irq_cfg: IRQ configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_set_irq(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ struct dpsw_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_set_irq *cmd_params;
++/**************************** PPP fields ***********************************/
++#define NH_FLD_PPP_PID (1)
++#define NH_FLD_PPP_COMPRESSED (NH_FLD_PPP_PID << 1)
++#define NH_FLD_PPP_ALL_FIELDS ((NH_FLD_PPP_PID << 2) - 1)
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_set_irq *)cmd.params;
-+ cmd_params->irq_index = irq_index;
-+ cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
-+ cmd_params->irq_addr = cpu_to_le64(irq_cfg->addr);
-+ cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
++/************************** PPPoE fields ***********************************/
++#define NH_FLD_PPPOE_VER (1)
++#define NH_FLD_PPPOE_TYPE (NH_FLD_PPPOE_VER << 1)
++#define NH_FLD_PPPOE_CODE (NH_FLD_PPPOE_VER << 2)
++#define NH_FLD_PPPOE_SID (NH_FLD_PPPOE_VER << 3)
++#define NH_FLD_PPPOE_LEN (NH_FLD_PPPOE_VER << 4)
++#define NH_FLD_PPPOE_SESSION (NH_FLD_PPPOE_VER << 5)
++#define NH_FLD_PPPOE_PID (NH_FLD_PPPOE_VER << 6)
++#define NH_FLD_PPPOE_ALL_FIELDS ((NH_FLD_PPPOE_VER << 7) - 1)
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++/************************* PPP-Mux fields **********************************/
++#define NH_FLD_PPPMUX_PID (1)
++#define NH_FLD_PPPMUX_CKSUM (NH_FLD_PPPMUX_PID << 1)
++#define NH_FLD_PPPMUX_COMPRESSED (NH_FLD_PPPMUX_PID << 2)
++#define NH_FLD_PPPMUX_ALL_FIELDS ((NH_FLD_PPPMUX_PID << 3) - 1)
+
-+/**
-+ * dpsw_get_irq() - Get IRQ information from the DPSW
-+ *
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @irq_index: The interrupt index to configure
-+ * @type: Interrupt type: 0 represents message interrupt
-+ * type (both irq_addr and irq_val are valid)
-+ * @irq_cfg: IRQ attributes
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_get_irq(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ int *type,
-+ struct dpsw_irq_cfg *irq_cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_get_irq *cmd_params;
-+ struct dpsw_rsp_get_irq *rsp_params;
-+ int err;
++/*********************** PPP-Mux sub-frame fields **************************/
++#define NH_FLD_PPPMUX_SUBFRM_PFF (1)
++#define NH_FLD_PPPMUX_SUBFRM_LXT (NH_FLD_PPPMUX_SUBFRM_PFF << 1)
++#define NH_FLD_PPPMUX_SUBFRM_LEN (NH_FLD_PPPMUX_SUBFRM_PFF << 2)
++#define NH_FLD_PPPMUX_SUBFRM_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 3)
++#define NH_FLD_PPPMUX_SUBFRM_USE_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 4)
++#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS \
++ ((NH_FLD_PPPMUX_SUBFRM_PFF << 5) - 1)
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_get_irq *)cmd.params;
-+ cmd_params->irq_index = irq_index;
++/*************************** LLC fields ************************************/
++#define NH_FLD_LLC_DSAP (1)
++#define NH_FLD_LLC_SSAP (NH_FLD_LLC_DSAP << 1)
++#define NH_FLD_LLC_CTRL (NH_FLD_LLC_DSAP << 2)
++#define NH_FLD_LLC_ALL_FIELDS ((NH_FLD_LLC_DSAP << 3) - 1)
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++/*************************** NLPID fields **********************************/
++#define NH_FLD_NLPID_NLPID (1)
++#define NH_FLD_NLPID_ALL_FIELDS ((NH_FLD_NLPID_NLPID << 1) - 1)
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_get_irq *)cmd.params;
-+ irq_cfg->addr = le64_to_cpu(rsp_params->irq_addr);
-+ irq_cfg->val = le32_to_cpu(rsp_params->irq_val);
-+ irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num);
-+ *type = le32_to_cpu(rsp_params->irq_type);
++/*************************** SNAP fields ***********************************/
++#define NH_FLD_SNAP_OUI (1)
++#define NH_FLD_SNAP_PID (NH_FLD_SNAP_OUI << 1)
++#define NH_FLD_SNAP_ALL_FIELDS ((NH_FLD_SNAP_OUI << 2) - 1)
+
-+ return 0;
-+}
++/*************************** LLC SNAP fields *******************************/
++#define NH_FLD_LLC_SNAP_TYPE (1)
++#define NH_FLD_LLC_SNAP_ALL_FIELDS ((NH_FLD_LLC_SNAP_TYPE << 1) - 1)
+
-+/**
-+ * dpsw_set_irq_enable() - Set overall interrupt state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCI object
-+ * @irq_index: The interrupt index to configure
-+ * @en: Interrupt state - enable = 1, disable = 0
-+ *
-+ * Allows GPP software to control when interrupts are generated.
-+ * Each interrupt can have up to 32 causes. The enable/disable control's the
-+ * overall interrupt state. if the interrupt is disabled no causes will cause
-+ * an interrupt
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_set_irq_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u8 en)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_set_irq_enable *cmd_params;
++#define NH_FLD_ARP_HTYPE (1)
++#define NH_FLD_ARP_PTYPE (NH_FLD_ARP_HTYPE << 1)
++#define NH_FLD_ARP_HLEN (NH_FLD_ARP_HTYPE << 2)
++#define NH_FLD_ARP_PLEN (NH_FLD_ARP_HTYPE << 3)
++#define NH_FLD_ARP_OPER (NH_FLD_ARP_HTYPE << 4)
++#define NH_FLD_ARP_SHA (NH_FLD_ARP_HTYPE << 5)
++#define NH_FLD_ARP_SPA (NH_FLD_ARP_HTYPE << 6)
++#define NH_FLD_ARP_THA (NH_FLD_ARP_HTYPE << 7)
++#define NH_FLD_ARP_TPA (NH_FLD_ARP_HTYPE << 8)
++#define NH_FLD_ARP_ALL_FIELDS ((NH_FLD_ARP_HTYPE << 9) - 1)
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_ENABLE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_set_irq_enable *)cmd.params;
-+ dpsw_set_field(cmd_params->enable_state, ENABLE, en);
-+ cmd_params->irq_index = irq_index;
++/*************************** RFC2684 fields ********************************/
++#define NH_FLD_RFC2684_LLC (1)
++#define NH_FLD_RFC2684_NLPID (NH_FLD_RFC2684_LLC << 1)
++#define NH_FLD_RFC2684_OUI (NH_FLD_RFC2684_LLC << 2)
++#define NH_FLD_RFC2684_PID (NH_FLD_RFC2684_LLC << 3)
++#define NH_FLD_RFC2684_VPN_OUI (NH_FLD_RFC2684_LLC << 4)
++#define NH_FLD_RFC2684_VPN_IDX (NH_FLD_RFC2684_LLC << 5)
++#define NH_FLD_RFC2684_ALL_FIELDS ((NH_FLD_RFC2684_LLC << 6) - 1)
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++/*************************** User defined fields ***************************/
++#define NH_FLD_USER_DEFINED_SRCPORT (1)
++#define NH_FLD_USER_DEFINED_PCDID (NH_FLD_USER_DEFINED_SRCPORT << 1)
++#define NH_FLD_USER_DEFINED_ALL_FIELDS \
++ ((NH_FLD_USER_DEFINED_SRCPORT << 2) - 1)
+
-+/**
-+ * dpsw_set_irq_mask() - Set interrupt mask.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCI object
-+ * @irq_index: The interrupt index to configure
-+ * @mask: Event mask to trigger interrupt;
-+ * each bit:
-+ * 0 = ignore event
-+ * 1 = consider event for asserting IRQ
-+ *
-+ * Every interrupt can have up to 32 causes and the interrupt model supports
-+ * masking/unmasking each cause independently
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_set_irq_mask(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 mask)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_set_irq_mask *cmd_params;
++/*************************** Payload fields ********************************/
++#define NH_FLD_PAYLOAD_BUFFER (1)
++#define NH_FLD_PAYLOAD_SIZE (NH_FLD_PAYLOAD_BUFFER << 1)
++#define NH_FLD_MAX_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 2)
++#define NH_FLD_MIN_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 3)
++#define NH_FLD_PAYLOAD_TYPE (NH_FLD_PAYLOAD_BUFFER << 4)
++#define NH_FLD_FRAME_SIZE (NH_FLD_PAYLOAD_BUFFER << 5)
++#define NH_FLD_PAYLOAD_ALL_FIELDS ((NH_FLD_PAYLOAD_BUFFER << 6) - 1)
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_MASK,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_set_irq_mask *)cmd.params;
-+ cmd_params->mask = cpu_to_le32(mask);
-+ cmd_params->irq_index = irq_index;
++/*************************** GRE fields ************************************/
++#define NH_FLD_GRE_TYPE (1)
++#define NH_FLD_GRE_ALL_FIELDS ((NH_FLD_GRE_TYPE << 1) - 1)
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++/*************************** MINENCAP fields *******************************/
++#define NH_FLD_MINENCAP_SRC_IP (1)
++#define NH_FLD_MINENCAP_DST_IP (NH_FLD_MINENCAP_SRC_IP << 1)
++#define NH_FLD_MINENCAP_TYPE (NH_FLD_MINENCAP_SRC_IP << 2)
++#define NH_FLD_MINENCAP_ALL_FIELDS \
++ ((NH_FLD_MINENCAP_SRC_IP << 3) - 1)
+
-+/**
-+ * dpsw_get_irq_status() - Get the current status of any pending interrupts
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @irq_index: The interrupt index to configure
-+ * @status: Returned interrupts status - one bit per cause:
-+ * 0 = no interrupt pending
-+ * 1 = interrupt pending
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_get_irq_status(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 *status)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_get_irq_status *cmd_params;
-+ struct dpsw_rsp_get_irq_status *rsp_params;
-+ int err;
++/*************************** IPSEC AH fields *******************************/
++#define NH_FLD_IPSEC_AH_SPI (1)
++#define NH_FLD_IPSEC_AH_NH (NH_FLD_IPSEC_AH_SPI << 1)
++#define NH_FLD_IPSEC_AH_ALL_FIELDS ((NH_FLD_IPSEC_AH_SPI << 2) - 1)
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_get_irq_status *)cmd.params;
-+ cmd_params->status = cpu_to_le32(*status);
-+ cmd_params->irq_index = irq_index;
++/*************************** IPSEC ESP fields ******************************/
++#define NH_FLD_IPSEC_ESP_SPI (1)
++#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM (NH_FLD_IPSEC_ESP_SPI << 1)
++#define NH_FLD_IPSEC_ESP_ALL_FIELDS ((NH_FLD_IPSEC_ESP_SPI << 2) - 1)
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++#define NH_FLD_IPSEC_ESP_SPI_SIZE 4
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_get_irq_status *)cmd.params;
-+ *status = le32_to_cpu(rsp_params->status);
++/*************************** MPLS fields ***********************************/
++#define NH_FLD_MPLS_LABEL_STACK (1)
++#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS \
++ ((NH_FLD_MPLS_LABEL_STACK << 1) - 1)
+
-+ return 0;
-+}
++/*************************** MACSEC fields *********************************/
++#define NH_FLD_MACSEC_SECTAG (1)
++#define NH_FLD_MACSEC_ALL_FIELDS ((NH_FLD_MACSEC_SECTAG << 1) - 1)
+
-+/**
-+ * dpsw_clear_irq_status() - Clear a pending interrupt's status
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPCI object
-+ * @irq_index: The interrupt index to configure
-+ * @status: bits to clear (W1C) - one bit per cause:
-+ * 0 = don't change
-+ * 1 = clear status bit
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_clear_irq_status(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 status)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_clear_irq_status *cmd_params;
++/*************************** GTP fields ************************************/
++#define NH_FLD_GTP_TEID (1)
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLEAR_IRQ_STATUS,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_clear_irq_status *)cmd.params;
-+ cmd_params->status = cpu_to_le32(status);
-+ cmd_params->irq_index = irq_index;
++/* Protocol options */
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++/* Ethernet options */
++#define NH_OPT_ETH_BROADCAST 1
++#define NH_OPT_ETH_MULTICAST 2
++#define NH_OPT_ETH_UNICAST 3
++#define NH_OPT_ETH_BPDU 4
+
-+/**
-+ * dpsw_get_attributes() - Retrieve DPSW attributes
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @attr: Returned DPSW attributes
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpsw_attr *attr)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_rsp_get_attr *rsp_params;
-+ int err;
++#define NH_ETH_IS_MULTICAST_ADDR(addr) (addr[0] & 0x01)
++/* also applicable for broadcast */
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_ATTR,
-+ cmd_flags,
-+ token);
++/* VLAN options */
++#define NH_OPT_VLAN_CFI 1
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++/* IPV4 options */
++#define NH_OPT_IPV4_UNICAST 1
++#define NH_OPT_IPV4_MULTICAST 2
++#define NH_OPT_IPV4_BROADCAST 3
++#define NH_OPT_IPV4_OPTION 4
++#define NH_OPT_IPV4_FRAG 5
++#define NH_OPT_IPV4_INITIAL_FRAG 6
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_get_attr *)cmd.params;
-+ attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
-+ attr->max_fdbs = rsp_params->max_fdbs;
-+ attr->num_fdbs = rsp_params->num_fdbs;
-+ attr->max_vlans = le16_to_cpu(rsp_params->max_vlans);
-+ attr->num_vlans = le16_to_cpu(rsp_params->num_vlans);
-+ attr->max_fdb_entries = le16_to_cpu(rsp_params->max_fdb_entries);
-+ attr->fdb_aging_time = le16_to_cpu(rsp_params->fdb_aging_time);
-+ attr->id = le32_to_cpu(rsp_params->dpsw_id);
-+ attr->mem_size = le16_to_cpu(rsp_params->mem_size);
-+ attr->max_fdb_mc_groups = le16_to_cpu(rsp_params->max_fdb_mc_groups);
-+ attr->max_meters_per_if = rsp_params->max_meters_per_if;
-+ attr->options = le64_to_cpu(rsp_params->options);
-+ attr->component_type = dpsw_get_field(rsp_params->component_type,
-+ COMPONENT_TYPE);
++/* IPV6 options */
++#define NH_OPT_IPV6_UNICAST 1
++#define NH_OPT_IPV6_MULTICAST 2
++#define NH_OPT_IPV6_OPTION 3
++#define NH_OPT_IPV6_FRAG 4
++#define NH_OPT_IPV6_INITIAL_FRAG 5
+
-+ return 0;
-+}
++/* General IP options (may be used for any version) */
++#define NH_OPT_IP_FRAG 1
++#define NH_OPT_IP_INITIAL_FRAG 2
++#define NH_OPT_IP_OPTION 3
+
-+/**
-+ * dpsw_set_reflection_if() - Set target interface for reflected interfaces.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Id
-+ *
-+ * Only one reflection receive interface is allowed per switch
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_set_reflection_if(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_set_reflection_if *cmd_params;
++/* Minenc. options */
++#define NH_OPT_MINENCAP_SRC_ADDR_PRESENT 1
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_REFLECTION_IF,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_set_reflection_if *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
++/* GRE. options */
++#define NH_OPT_GRE_ROUTING_PRESENT 1
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++/* TCP options */
++#define NH_OPT_TCP_OPTIONS 1
++#define NH_OPT_TCP_CONTROL_HIGH_BITS 2
++#define NH_OPT_TCP_CONTROL_LOW_BITS 3
+
-+/**
-+ * dpsw_if_set_link_cfg() - Set the link configuration.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface id
-+ * @cfg: Link configuration
-+ *
-+ * Return: '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ struct dpsw_link_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_link_cfg *cmd_params;
++/* CAPWAP options */
++#define NH_OPT_CAPWAP_DTLS 1
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_LINK_CFG,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_link_cfg *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ cmd_params->rate = cpu_to_le32(cfg->rate);
-+ cmd_params->options = cpu_to_le64(cfg->options);
++enum net_prot {
++ NET_PROT_NONE = 0,
++ NET_PROT_PAYLOAD,
++ NET_PROT_ETH,
++ NET_PROT_VLAN,
++ NET_PROT_IPV4,
++ NET_PROT_IPV6,
++ NET_PROT_IP,
++ NET_PROT_TCP,
++ NET_PROT_UDP,
++ NET_PROT_UDP_LITE,
++ NET_PROT_IPHC,
++ NET_PROT_SCTP,
++ NET_PROT_SCTP_CHUNK_DATA,
++ NET_PROT_PPPOE,
++ NET_PROT_PPP,
++ NET_PROT_PPPMUX,
++ NET_PROT_PPPMUX_SUBFRM,
++ NET_PROT_L2TPV2,
++ NET_PROT_L2TPV3_CTRL,
++ NET_PROT_L2TPV3_SESS,
++ NET_PROT_LLC,
++ NET_PROT_LLC_SNAP,
++ NET_PROT_NLPID,
++ NET_PROT_SNAP,
++ NET_PROT_MPLS,
++ NET_PROT_IPSEC_AH,
++ NET_PROT_IPSEC_ESP,
++ NET_PROT_UDP_ENC_ESP, /* RFC 3948 */
++ NET_PROT_MACSEC,
++ NET_PROT_GRE,
++ NET_PROT_MINENCAP,
++ NET_PROT_DCCP,
++ NET_PROT_ICMP,
++ NET_PROT_IGMP,
++ NET_PROT_ARP,
++ NET_PROT_CAPWAP_DATA,
++ NET_PROT_CAPWAP_CTRL,
++ NET_PROT_RFC2684,
++ NET_PROT_ICMPV6,
++ NET_PROT_FCOE,
++ NET_PROT_FIP,
++ NET_PROT_ISCSI,
++ NET_PROT_GTP,
++ NET_PROT_USER_DEFINED_L2,
++ NET_PROT_USER_DEFINED_L3,
++ NET_PROT_USER_DEFINED_L4,
++ NET_PROT_USER_DEFINED_L5,
++ NET_PROT_USER_DEFINED_SHIM1,
++ NET_PROT_USER_DEFINED_SHIM2,
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++ NET_PROT_DUMMY_LAST
++};
+
-+/**
-+ * dpsw_if_get_link_state - Return the link state
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface id
-+ * @state: Link state 1 - linkup, 0 - link down or disconnected
-+ *
-+ * @Return '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_get_link_state(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ struct dpsw_link_state *state)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_get_link_state *cmd_params;
-+ struct dpsw_rsp_if_get_link_state *rsp_params;
-+ int err;
++/*! IEEE8021.Q */
++#define NH_IEEE8021Q_ETYPE 0x8100
++#define NH_IEEE8021Q_HDR(etype, pcp, dei, vlan_id) \
++ ((((u32)((etype) & 0xFFFF)) << 16) | \
++ (((u32)((pcp) & 0x07)) << 13) | \
++ (((u32)((dei) & 0x01)) << 12) | \
++ (((u32)((vlan_id) & 0xFFF))))
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_LINK_STATE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_get_link_state *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
++#endif /* __FSL_NET_H */
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethsw/Makefile
+@@ -0,0 +1,10 @@
++# SPDX-License-Identifier: GPL-2.0
++#
++# Makefile for the Freescale DPAA2 Ethernet Switch
++#
++# Copyright 2014-2017 Freescale Semiconductor, Inc.
++# Copyright 2017-2018 NXP
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++obj-$(CONFIG_FSL_DPAA2_ETHSW) += dpaa2-ethsw.o
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_if_get_link_state *)cmd.params;
-+ state->rate = le32_to_cpu(rsp_params->rate);
-+ state->options = le64_to_cpu(rsp_params->options);
-+ state->up = dpsw_get_field(rsp_params->up, UP);
++dpaa2-ethsw-objs := ethsw.o ethsw-ethtool.o dpsw.o
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethsw/README
+@@ -0,0 +1,106 @@
++DPAA2 Ethernet Switch driver
++============================
+
-+ return 0;
-+}
++This file provides documentation for the DPAA2 Ethernet Switch driver
+
-+/**
-+ * dpsw_if_set_flooding() - Enable Disable flooding for particular interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @en: 1 - enable, 0 - disable
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_flooding(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ int en)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_flooding *cmd_params;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_flooding *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ dpsw_set_field(cmd_params->enable, ENABLE, en);
++Contents
++========
++ Supported Platforms
++ Architecture Overview
++ Creating an Ethernet Switch
++ Features
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
+
-+/**
-+ * dpsw_if_set_broadcast() - Enable/disable broadcast for particular interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @en: 1 - enable, 0 - disable
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ int en)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_broadcast *cmd_params;
++ Supported Platforms
++===================
++This driver provides networking support for Freescale LS2085A, LS2088A
++DPAA2 SoCs.
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_BROADCAST,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_broadcast *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ dpsw_set_field(cmd_params->enable, ENABLE, en);
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++Architecture Overview
++=====================
++The Ethernet Switch in the DPAA2 architecture consists of several hardware
++resources that provide the functionality. These are allocated and
++configured via the Management Complex (MC) portals. MC abstracts most of
++these resources as DPAA2 objects and exposes ABIs through which they can
++be configured and controlled.
+
-+/**
-+ * dpsw_if_set_multicast() - Enable/disable multicast for particular interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @en: 1 - enable, 0 - disable
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_multicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ int en)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_multicast *cmd_params;
++For a more detailed description of the DPAA2 architecture and its object
++abstractions see:
++ drivers/staging/fsl-mc/README.txt
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MULTICAST,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_multicast *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ dpsw_set_field(cmd_params->enable, ENABLE, en);
++The Ethernet Switch is built on top of a Datapath Switch (DPSW) object.
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++Configuration interface:
+
-+/**
-+ * dpsw_if_set_tci() - Set default VLAN Tag Control Information (TCI)
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @cfg: Tag Control Information Configuration
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_tci(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_tci_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_tci *cmd_params;
++ ---------------------
++ | DPAA2 Switch driver |
++ ---------------------
++ .
++ .
++ ----------
++ | DPSW API |
++ ----------
++ . software
++ ================= . ==============
++ . hardware
++ ---------------------
++ | MC hardware portals |
++ ---------------------
++ .
++ .
++ ------
++ | DPSW |
++ ------
++
++Driver uses the switch device driver model and exposes each switch port as
++a network interface, which can be included in a bridge. Traffic switched
++between ports is offloaded into the hardware. Exposed network interfaces
++are not used for I/O, they are used just for configuration. This
++limitation is going to be addressed in the future.
++
++The DPSW can have ports connected to DPNIs or to PHYs via DPMACs.
++
++
++ [ethA] [ethB] [ethC] [ethD] [ethE] [ethF]
++ : : : : : :
++ : : : : : :
++[eth drv] [eth drv] [ ethsw drv ]
++ : : : : : : kernel
++========================================================================
++ : : : : : : hardware
++ [DPNI] [DPNI] [============= DPSW =================]
++ | | | | | |
++ | ---------- | [DPMAC] [DPMAC]
++ ------------------------------- | |
++ | |
++ [PHY] [PHY]
++
++For a more detailed description of the Ethernet switch device driver model
++see:
++ Documentation/networking/switchdev.txt
++
++Creating an Ethernet Switch
++===========================
++A device is created for the switch objects probed on the MC bus. Each DPSW
++has a number of properties which determine the configuration options and
++associated hardware resources.
++
++A DPSW object (and the other DPAA2 objects needed for a DPAA2 switch) can
++be added to a container on the MC bus in one of two ways: statically,
++through a Datapath Layout Binary file (DPL) that is parsed by MC at boot
++time; or created dynamically at runtime, via the DPAA2 objects APIs.
++
++Features
++========
++Driver configures DPSW to perform hardware switching offload of
++unicast/multicast/broadcast (VLAN tagged or untagged) traffic between its
++ports.
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TCI,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_tci *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ dpsw_set_field(cmd_params->conf, VLAN_ID, cfg->vlan_id);
-+ dpsw_set_field(cmd_params->conf, DEI, cfg->dei);
-+ dpsw_set_field(cmd_params->conf, PCP, cfg->pcp);
-+ cmd_params->conf = cpu_to_le16(cmd_params->conf);
++It allows configuration of hardware learning, flooding, multicast groups,
++port VLAN configuration and STP state.
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++Static entries can be added/removed from the FDB.
+
-+/**
-+ * dpsw_if_get_tci() - Get default VLAN Tag Control Information (TCI)
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @cfg: Tag Control Information Configuration
++Hardware statistics for each port are provided through ethtool -S option.
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethsw/TODO
+@@ -0,0 +1,14 @@
++* Add I/O capabilities on switch port netdevices. This will allow control
++traffic to reach the CPU.
++* Add ACL to redirect control traffic to CPU.
++* Add support for displaying learned FDB entries
++* MC firmware uprev; the DPAA2 objects used by the Ethernet Switch driver
++need to be kept in sync with binary interface changes in MC
++* refine README file
++* cleanup
++
++NOTE: At least first three of the above are required before getting the
++DPAA2 Ethernet Switch driver out of staging. Another requirement is that
++the fsl-mc bus driver is moved to drivers/bus and dpio driver is moved to
++drivers/soc (this is required for I/O).
++
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
+@@ -0,0 +1,359 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Copyright 2013-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017-2018 NXP
+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_if_get_tci(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ struct dpsw_tci_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_get_tci *cmd_params;
-+ struct dpsw_rsp_if_get_tci *rsp_params;
-+ int err;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_TCI,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_get_tci *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
++#ifndef __FSL_DPSW_CMD_H
++#define __FSL_DPSW_CMD_H
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++/* DPSW Version */
++#define DPSW_VER_MAJOR 8
++#define DPSW_VER_MINOR 0
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_if_get_tci *)cmd.params;
-+ cfg->pcp = rsp_params->pcp;
-+ cfg->dei = rsp_params->dei;
-+ cfg->vlan_id = le16_to_cpu(rsp_params->vlan_id);
++#define DPSW_CMD_BASE_VERSION 1
++#define DPSW_CMD_ID_OFFSET 4
+
-+ return 0;
-+}
++#define DPSW_CMD_ID(id) (((id) << DPSW_CMD_ID_OFFSET) | DPSW_CMD_BASE_VERSION)
+
-+/**
-+ * dpsw_if_set_stp() - Function sets Spanning Tree Protocol (STP) state.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @cfg: STP State configuration parameters
-+ *
-+ * The following STP states are supported -
-+ * blocking, listening, learning, forwarding and disabled.
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_stp(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_stp_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_stp *cmd_params;
++/* Command IDs */
++#define DPSW_CMDID_CLOSE DPSW_CMD_ID(0x800)
++#define DPSW_CMDID_OPEN DPSW_CMD_ID(0x802)
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_STP,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_stp *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
-+ dpsw_set_field(cmd_params->state, STATE, cfg->state);
++#define DPSW_CMDID_GET_API_VERSION DPSW_CMD_ID(0xa02)
++
++#define DPSW_CMDID_ENABLE DPSW_CMD_ID(0x002)
++#define DPSW_CMDID_DISABLE DPSW_CMD_ID(0x003)
++#define DPSW_CMDID_GET_ATTR DPSW_CMD_ID(0x004)
++#define DPSW_CMDID_RESET DPSW_CMD_ID(0x005)
++
++#define DPSW_CMDID_SET_IRQ_ENABLE DPSW_CMD_ID(0x012)
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++#define DPSW_CMDID_SET_IRQ_MASK DPSW_CMD_ID(0x014)
+
-+/**
-+ * dpsw_if_set_accepted_frames()
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @cfg: Frame types configuration
-+ *
-+ * When is admit_only_vlan_tagged- the device will discard untagged
-+ * frames or Priority-Tagged frames received on this interface.
-+ * When admit_only_untagged- untagged frames or Priority-Tagged
-+ * frames received on this interface will be accepted and assigned
-+ * to a VID based on the PVID and VID Set for this interface.
-+ * When admit_all - the device will accept VLAN tagged, untagged
-+ * and priority tagged frames.
-+ * The default is admit_all
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_accepted_frames(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_accepted_frames_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_accepted_frames *cmd_params;
++#define DPSW_CMDID_GET_IRQ_STATUS DPSW_CMD_ID(0x016)
++#define DPSW_CMDID_CLEAR_IRQ_STATUS DPSW_CMD_ID(0x017)
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_ACCEPTED_FRAMES,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_accepted_frames *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ dpsw_set_field(cmd_params->unaccepted, FRAME_TYPE, cfg->type);
-+ dpsw_set_field(cmd_params->unaccepted, UNACCEPTED_ACT,
-+ cfg->unaccept_act);
++#define DPSW_CMDID_IF_SET_TCI DPSW_CMD_ID(0x030)
++#define DPSW_CMDID_IF_SET_STP DPSW_CMD_ID(0x031)
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++#define DPSW_CMDID_IF_GET_COUNTER DPSW_CMD_ID(0x034)
+
-+/**
-+ * dpsw_if_set_accept_all_vlan()
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @accept_all: Accept or drop frames having different VLAN
-+ *
-+ * When this is accept (FALSE), the device will discard incoming
-+ * frames for VLANs that do not include this interface in its
-+ * Member set. When accept (TRUE), the interface will accept all incoming frames
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_accept_all_vlan(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ int accept_all)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_accept_all_vlan *cmd_params;
++#define DPSW_CMDID_IF_ENABLE DPSW_CMD_ID(0x03D)
++#define DPSW_CMDID_IF_DISABLE DPSW_CMD_ID(0x03E)
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IF_ACCEPT_ALL_VLAN,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_accept_all_vlan *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ dpsw_set_field(cmd_params->accept_all, ACCEPT_ALL, accept_all);
++#define DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH DPSW_CMD_ID(0x044)
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++#define DPSW_CMDID_IF_GET_LINK_STATE DPSW_CMD_ID(0x046)
++#define DPSW_CMDID_IF_SET_FLOODING DPSW_CMD_ID(0x047)
++#define DPSW_CMDID_IF_SET_BROADCAST DPSW_CMD_ID(0x048)
+
-+/**
-+ * dpsw_if_get_counter() - Get specific counter of particular interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @type: Counter type
-+ * @counter: return value
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_get_counter(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ enum dpsw_counter type,
-+ u64 *counter)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_get_counter *cmd_params;
-+ struct dpsw_rsp_if_get_counter *rsp_params;
-+ int err;
++#define DPSW_CMDID_IF_GET_TCI DPSW_CMD_ID(0x04A)
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_COUNTER,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_get_counter *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ dpsw_set_field(cmd_params->type, COUNTER_TYPE, type);
++#define DPSW_CMDID_IF_SET_LINK_CFG DPSW_CMD_ID(0x04C)
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++#define DPSW_CMDID_VLAN_ADD DPSW_CMD_ID(0x060)
++#define DPSW_CMDID_VLAN_ADD_IF DPSW_CMD_ID(0x061)
++#define DPSW_CMDID_VLAN_ADD_IF_UNTAGGED DPSW_CMD_ID(0x062)
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_if_get_counter *)cmd.params;
-+ *counter = le64_to_cpu(rsp_params->counter);
++#define DPSW_CMDID_VLAN_REMOVE_IF DPSW_CMD_ID(0x064)
++#define DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED DPSW_CMD_ID(0x065)
++#define DPSW_CMDID_VLAN_REMOVE_IF_FLOODING DPSW_CMD_ID(0x066)
++#define DPSW_CMDID_VLAN_REMOVE DPSW_CMD_ID(0x067)
+
-+ return 0;
-+}
++#define DPSW_CMDID_FDB_ADD_UNICAST DPSW_CMD_ID(0x084)
++#define DPSW_CMDID_FDB_REMOVE_UNICAST DPSW_CMD_ID(0x085)
++#define DPSW_CMDID_FDB_ADD_MULTICAST DPSW_CMD_ID(0x086)
++#define DPSW_CMDID_FDB_REMOVE_MULTICAST DPSW_CMD_ID(0x087)
++#define DPSW_CMDID_FDB_SET_LEARNING_MODE DPSW_CMD_ID(0x088)
+
-+/**
-+ * dpsw_if_set_counter() - Set specific counter of particular interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @type: Counter type
-+ * @counter: New counter value
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_counter(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ enum dpsw_counter type,
-+ u64 counter)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_counter *cmd_params;
++/* Macros for accessing command fields smaller than 1byte */
++#define DPSW_MASK(field) \
++ GENMASK(DPSW_##field##_SHIFT + DPSW_##field##_SIZE - 1, \
++ DPSW_##field##_SHIFT)
++#define dpsw_set_field(var, field, val) \
++ ((var) |= (((val) << DPSW_##field##_SHIFT) & DPSW_MASK(field)))
++#define dpsw_get_field(var, field) \
++ (((var) & DPSW_MASK(field)) >> DPSW_##field##_SHIFT)
++#define dpsw_get_bit(var, bit) \
++ (((var) >> (bit)) & GENMASK(0, 0))
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_COUNTER,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_counter *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ cmd_params->counter = cpu_to_le64(counter);
-+ dpsw_set_field(cmd_params->type, COUNTER_TYPE, type);
++struct dpsw_cmd_open {
++ __le32 dpsw_id;
++};
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++#define DPSW_COMPONENT_TYPE_SHIFT 0
++#define DPSW_COMPONENT_TYPE_SIZE 4
+
-+/**
-+ * dpsw_if_set_tx_selection() - Function is used for mapping variety
-+ * of frame fields
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @cfg: Traffic class mapping configuration
-+ *
-+ * Function is used for mapping variety of frame fields (DSCP, PCP)
-+ * to Traffic Class. Traffic class is a number
-+ * in the range from 0 to 7
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_tx_selection(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_tx_selection_cfg *cfg)
-+{
-+ struct dpsw_cmd_if_set_tx_selection *cmd_params;
-+ struct mc_command cmd = { 0 };
-+ int i;
++struct dpsw_cmd_create {
++ /* cmd word 0 */
++ __le16 num_ifs;
++ u8 max_fdbs;
++ u8 max_meters_per_if;
++ /* from LSB: only the first 4 bits */
++ u8 component_type;
++ u8 pad[3];
++ /* cmd word 1 */
++ __le16 max_vlans;
++ __le16 max_fdb_entries;
++ __le16 fdb_aging_time;
++ __le16 max_fdb_mc_groups;
++ /* cmd word 2 */
++ __le64 options;
++};
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TX_SELECTION,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_tx_selection *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ dpsw_set_field(cmd_params->priority_selector, PRIORITY_SELECTOR,
-+ cfg->priority_selector);
++struct dpsw_cmd_destroy {
++ __le32 dpsw_id;
++};
+
-+ for (i = 0; i < 8; i++) {
-+ cmd_params->tc_sched[i].delta_bandwidth =
-+ cpu_to_le16(cfg->tc_sched[i].delta_bandwidth);
-+ dpsw_set_field(cmd_params->tc_sched[i].mode, SCHED_MODE,
-+ cfg->tc_sched[i].mode);
-+ cmd_params->tc_id[i] = cfg->tc_id[i];
-+ }
++#define DPSW_ENABLE_SHIFT 0
++#define DPSW_ENABLE_SIZE 1
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++struct dpsw_rsp_is_enabled {
++ /* from LSB: enable:1 */
++ u8 enabled;
++};
+
-+/**
-+ * dpsw_if_add_reflection() - Identify interface to be reflected or mirrored
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @cfg: Reflection configuration
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_add_reflection(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_reflection_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_reflection *cmd_params;
++struct dpsw_cmd_set_irq_enable {
++ u8 enable_state;
++ u8 pad[3];
++ u8 irq_index;
++};
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ADD_REFLECTION,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_reflection *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
-+ dpsw_set_field(cmd_params->filter, FILTER, cfg->filter);
++struct dpsw_cmd_get_irq_enable {
++ __le32 pad;
++ u8 irq_index;
++};
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++struct dpsw_rsp_get_irq_enable {
++ u8 enable_state;
++};
+
-+/**
-+ * dpsw_if_remove_reflection() - Remove interface to be reflected or mirrored
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @cfg: Reflection configuration
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_reflection_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_reflection *cmd_params;
++struct dpsw_cmd_set_irq_mask {
++ __le32 mask;
++ u8 irq_index;
++};
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_REMOVE_REFLECTION,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_reflection *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
-+ dpsw_set_field(cmd_params->filter, FILTER, cfg->filter);
++struct dpsw_cmd_get_irq_mask {
++ __le32 pad;
++ u8 irq_index;
++};
++
++struct dpsw_rsp_get_irq_mask {
++ __le32 mask;
++};
++
++struct dpsw_cmd_get_irq_status {
++ __le32 status;
++ u8 irq_index;
++};
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++struct dpsw_rsp_get_irq_status {
++ __le32 status;
++};
+
-+/**
-+ * dpsw_if_set_flooding_metering() - Set flooding metering
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @cfg: Metering parameters
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_flooding_metering(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_metering_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_flooding_metering *cmd_params;
++struct dpsw_cmd_clear_irq_status {
++ __le32 status;
++ u8 irq_index;
++};
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING_METERING,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_flooding_metering *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ dpsw_set_field(cmd_params->mode_units, MODE, cfg->mode);
-+ dpsw_set_field(cmd_params->mode_units, UNITS, cfg->units);
-+ cmd_params->cir = cpu_to_le32(cfg->cir);
-+ cmd_params->eir = cpu_to_le32(cfg->eir);
-+ cmd_params->cbs = cpu_to_le32(cfg->cbs);
-+ cmd_params->ebs = cpu_to_le32(cfg->ebs);
++#define DPSW_COMPONENT_TYPE_SHIFT 0
++#define DPSW_COMPONENT_TYPE_SIZE 4
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++struct dpsw_rsp_get_attr {
++ /* cmd word 0 */
++ __le16 num_ifs;
++ u8 max_fdbs;
++ u8 num_fdbs;
++ __le16 max_vlans;
++ __le16 num_vlans;
++ /* cmd word 1 */
++ __le16 max_fdb_entries;
++ __le16 fdb_aging_time;
++ __le32 dpsw_id;
++ /* cmd word 2 */
++ __le16 mem_size;
++ __le16 max_fdb_mc_groups;
++ u8 max_meters_per_if;
++ /* from LSB only the first 4 bits */
++ u8 component_type;
++ __le16 pad;
++ /* cmd word 3 */
++ __le64 options;
++};
+
-+/**
-+ * dpsw_if_set_metering() - Set interface metering for flooding
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @tc_id: Traffic class ID
-+ * @cfg: Metering parameters
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_metering(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ u8 tc_id,
-+ const struct dpsw_metering_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_metering *cmd_params;
++struct dpsw_cmd_if_set_flooding {
++ __le16 if_id;
++ /* from LSB: enable:1 */
++ u8 enable;
++};
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_METERING,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_metering *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ cmd_params->tc_id = tc_id;
-+ dpsw_set_field(cmd_params->mode_units, MODE, cfg->mode);
-+ dpsw_set_field(cmd_params->mode_units, UNITS, cfg->units);
-+ cmd_params->cir = cpu_to_le32(cfg->cir);
-+ cmd_params->eir = cpu_to_le32(cfg->eir);
-+ cmd_params->cbs = cpu_to_le32(cfg->cbs);
-+ cmd_params->ebs = cpu_to_le32(cfg->ebs);
++struct dpsw_cmd_if_set_broadcast {
++ __le16 if_id;
++ /* from LSB: enable:1 */
++ u8 enable;
++};
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++#define DPSW_VLAN_ID_SHIFT 0
++#define DPSW_VLAN_ID_SIZE 12
++#define DPSW_DEI_SHIFT 12
++#define DPSW_DEI_SIZE 1
++#define DPSW_PCP_SHIFT 13
++#define DPSW_PCP_SIZE 3
+
-+/**
-+ * dpsw_prepare_early_drop() - Prepare an early drop for setting in to interface
-+ * @cfg: Early-drop configuration
-+ * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA
-+ *
-+ * This function has to be called before dpsw_if_tc_set_early_drop
-+ *
-+ */
-+void dpsw_prepare_early_drop(const struct dpsw_early_drop_cfg *cfg,
-+ u8 *early_drop_buf)
-+{
-+ struct dpsw_prep_early_drop *ext_params;
++struct dpsw_cmd_if_set_tci {
++ __le16 if_id;
++ /* from LSB: VLAN_ID:12 DEI:1 PCP:3 */
++ __le16 conf;
++};
+
-+ ext_params = (struct dpsw_prep_early_drop *)early_drop_buf;
-+ dpsw_set_field(ext_params->conf, EARLY_DROP_MODE, cfg->drop_mode);
-+ dpsw_set_field(ext_params->conf, EARLY_DROP_UNIT, cfg->units);
-+ ext_params->tail_drop_threshold = cpu_to_le32(cfg->tail_drop_threshold);
-+ ext_params->green_drop_probability = cfg->green.drop_probability;
-+ ext_params->green_max_threshold = cpu_to_le64(cfg->green.max_threshold);
-+ ext_params->green_min_threshold = cpu_to_le64(cfg->green.min_threshold);
-+ ext_params->yellow_drop_probability = cfg->yellow.drop_probability;
-+ ext_params->yellow_max_threshold =
-+ cpu_to_le64(cfg->yellow.max_threshold);
-+ ext_params->yellow_min_threshold =
-+ cpu_to_le64(cfg->yellow.min_threshold);
-+}
++struct dpsw_cmd_if_get_tci {
++ __le16 if_id;
++};
+
-+/**
-+ * dpsw_if_set_early_drop() - Set interface traffic class early-drop
-+ * configuration
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @tc_id: Traffic class selection (0-7)
-+ * @early_drop_iova: I/O virtual address of 64 bytes;
-+ * Must be cacheline-aligned and DMA-able memory
-+ *
-+ * warning: Before calling this function, call dpsw_prepare_if_tc_early_drop()
-+ * to prepare the early_drop_iova parameter
-+ *
-+ * Return: '0' on Success; error code otherwise.
-+ */
-+int dpsw_if_set_early_drop(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ u8 tc_id,
-+ u64 early_drop_iova)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_early_drop *cmd_params;
++struct dpsw_rsp_if_get_tci {
++ __le16 pad;
++ __le16 vlan_id;
++ u8 dei;
++ u8 pcp;
++};
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_EARLY_DROP,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_early_drop *)cmd.params;
-+ cmd_params->tc_id = tc_id;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ cmd_params->early_drop_iova = cpu_to_le64(early_drop_iova);
++#define DPSW_STATE_SHIFT 0
++#define DPSW_STATE_SIZE 4
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++struct dpsw_cmd_if_set_stp {
++ __le16 if_id;
++ __le16 vlan_id;
++ /* only the first LSB 4 bits */
++ u8 state;
++};
+
-+/**
-+ * dpsw_add_custom_tpid() - API Configures a distinct Ethernet type value
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @cfg: Tag Protocol identifier
-+ *
-+ * API Configures a distinct Ethernet type value (or TPID value)
-+ * to indicate a VLAN tag in addition to the common
-+ * TPID values 0x8100 and 0x88A8.
-+ * Two additional TPID's are supported
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_add_custom_tpid(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpsw_custom_tpid_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_custom_tpid *cmd_params;
++#define DPSW_COUNTER_TYPE_SHIFT 0
++#define DPSW_COUNTER_TYPE_SIZE 5
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ADD_CUSTOM_TPID,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_custom_tpid *)cmd.params;
-+ cmd_params->tpid = cpu_to_le16(cfg->tpid);
++struct dpsw_cmd_if_get_counter {
++ __le16 if_id;
++ /* from LSB: type:5 */
++ u8 type;
++};
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++struct dpsw_rsp_if_get_counter {
++ __le64 pad;
++ __le64 counter;
++};
+
-+/**
-+ * dpsw_remove_custom_tpid - API removes a distinct Ethernet type value
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @cfg: Tag Protocol identifier
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_remove_custom_tpid(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpsw_custom_tpid_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_custom_tpid *cmd_params;
++struct dpsw_cmd_if {
++ __le16 if_id;
++};
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_REMOVE_CUSTOM_TPID,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_custom_tpid *)cmd.params;
-+ cmd_params->tpid = cpu_to_le16(cfg->tpid);
++struct dpsw_cmd_if_set_max_frame_length {
++ __le16 if_id;
++ __le16 frame_length;
++};
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++struct dpsw_cmd_if_set_link_cfg {
++ /* cmd word 0 */
++ __le16 if_id;
++ u8 pad[6];
++ /* cmd word 1 */
++ __le32 rate;
++ __le32 pad1;
++ /* cmd word 2 */
++ __le64 options;
++};
+
-+/**
-+ * dpsw_if_enable() - Enable Interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if *cmd_params;
++struct dpsw_cmd_if_get_link_state {
++ __le16 if_id;
++};
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ENABLE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
++#define DPSW_UP_SHIFT 0
++#define DPSW_UP_SIZE 1
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++struct dpsw_rsp_if_get_link_state {
++ /* cmd word 0 */
++ __le32 pad0;
++ u8 up;
++ u8 pad1[3];
++ /* cmd word 1 */
++ __le32 rate;
++ __le32 pad2;
++ /* cmd word 2 */
++ __le64 options;
++};
+
-+/**
-+ * dpsw_if_disable() - Disable Interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_disable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if *cmd_params;
++struct dpsw_vlan_add {
++ __le16 fdb_id;
++ __le16 vlan_id;
++};
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_DISABLE,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
++struct dpsw_cmd_vlan_manage_if {
++ /* cmd word 0 */
++ __le16 pad0;
++ __le16 vlan_id;
++ __le32 pad1;
++ /* cmd word 1-4 */
++ __le64 if_id[4];
++};
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++struct dpsw_cmd_vlan_remove {
++ __le16 pad;
++ __le16 vlan_id;
++};
+
-+/**
-+ * dpsw_if_get_attributes() - Function obtains attributes of interface
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @attr: Returned interface attributes
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ struct dpsw_if_attr *attr)
-+{
-+ struct dpsw_rsp_if_get_attr *rsp_params;
-+ struct dpsw_cmd_if *cmd_params;
-+ struct mc_command cmd = { 0 };
-+ int err;
++struct dpsw_cmd_fdb_add {
++ __le32 pad;
++ __le16 fdb_aging_time;
++ __le16 num_fdb_entries;
++};
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_ATTR,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
++struct dpsw_rsp_fdb_add {
++ __le16 fdb_id;
++};
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++struct dpsw_cmd_fdb_remove {
++ __le16 fdb_id;
++};
+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_if_get_attr *)cmd.params;
-+ attr->num_tcs = rsp_params->num_tcs;
-+ attr->rate = le32_to_cpu(rsp_params->rate);
-+ attr->options = le32_to_cpu(rsp_params->options);
-+ attr->enabled = dpsw_get_field(rsp_params->conf, ENABLED);
-+ attr->accept_all_vlan = dpsw_get_field(rsp_params->conf,
-+ ACCEPT_ALL_VLAN);
-+ attr->admit_untagged = dpsw_get_field(rsp_params->conf, ADMIT_UNTAGGED);
-+ attr->qdid = le16_to_cpu(rsp_params->qdid);
++#define DPSW_ENTRY_TYPE_SHIFT 0
++#define DPSW_ENTRY_TYPE_SIZE 4
+
-+ return 0;
-+}
++struct dpsw_cmd_fdb_unicast_op {
++ /* cmd word 0 */
++ __le16 fdb_id;
++ u8 mac_addr[6];
++ /* cmd word 1 */
++ __le16 if_egress;
++ /* only the first 4 bits from LSB */
++ u8 type;
++};
+
-+/**
-+ * dpsw_if_set_max_frame_length() - Set Maximum Receive frame length.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @frame_length: Maximum Frame Length
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ u16 frame_length)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_set_max_frame_length *cmd_params;
++struct dpsw_cmd_fdb_multicast_op {
++ /* cmd word 0 */
++ __le16 fdb_id;
++ __le16 num_ifs;
++ /* only the first 4 bits from LSB */
++ u8 type;
++ u8 pad[3];
++ /* cmd word 1 */
++ u8 mac_addr[6];
++ __le16 pad2;
++ /* cmd word 2-5 */
++ __le64 if_id[4];
++};
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_set_max_frame_length *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
-+ cmd_params->frame_length = cpu_to_le16(frame_length);
++#define DPSW_LEARNING_MODE_SHIFT 0
++#define DPSW_LEARNING_MODE_SIZE 4
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
++struct dpsw_cmd_fdb_set_learning_mode {
++ __le16 fdb_id;
++ /* only the first 4 bits from LSB */
++ u8 mode;
++};
+
-+/**
-+ * dpsw_if_get_max_frame_length() - Get Maximum Receive frame length.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @if_id: Interface Identifier
-+ * @frame_length: Returned maximum Frame Length
++struct dpsw_rsp_get_api_version {
++ __le16 version_major;
++ __le16 version_minor;
++};
++
++#endif /* __FSL_DPSW_CMD_H */
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.c
+@@ -0,0 +1,1165 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Copyright 2013-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017-2018 NXP
+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_if_get_max_frame_length(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ u16 *frame_length)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_if_get_max_frame_length *cmd_params;
-+ struct dpsw_rsp_if_get_max_frame_length *rsp_params;
-+ int err;
-+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_MAX_FRAME_LENGTH,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_if_get_max_frame_length *)cmd.params;
-+ cmd_params->if_id = cpu_to_le16(if_id);
+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
++#include <linux/fsl/mc.h>
++#include "dpsw.h"
++#include "dpsw-cmd.h"
+
-+ rsp_params = (struct dpsw_rsp_if_get_max_frame_length *)cmd.params;
-+ *frame_length = le16_to_cpu(rsp_params->frame_length);
++static void build_if_id_bitmap(__le64 *bmap,
++ const u16 *id,
++ const u16 num_ifs)
++{
++ int i;
+
-+ return 0;
++ for (i = 0; (i < num_ifs) && (i < DPSW_MAX_IF); i++) {
++ if (id[i] < DPSW_MAX_IF)
++ bmap[id[i] / 64] |= cpu_to_le64(BIT_MASK(id[i] % 64));
++ }
+}
+
+/**
-+ * dpsw_vlan_add() - Adding new VLAN to DPSW.
++ * dpsw_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: VLAN configuration
++ * @dpsw_id: DPSW unique ID
++ * @token: Returned token; use in subsequent API calls
+ *
-+ * Only VLAN ID and FDB ID are required parameters here.
-+ * 12 bit VLAN ID is defined in IEEE802.1Q.
-+ * Adding a duplicate VLAN ID is not allowed.
-+ * FDB ID can be shared across multiple VLANs. Shared learning
-+ * is obtained by calling dpsw_vlan_add for multiple VLAN IDs
-+ * with same fdb_id
++ * This function can be used to open a control session for an
++ * already created object; an object may have been declared in
++ * the DPL or by calling the dpsw_create() function.
++ * This function returns a unique authentication token,
++ * associated with the specific object ID and the specific MC
++ * portal; this token must be used in all subsequent commands for
++ * this specific object
+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
++ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpsw_vlan_add(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ const struct dpsw_vlan_cfg *cfg)
++int dpsw_open(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ int dpsw_id,
++ u16 *token)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_vlan_add *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_open *cmd_params;
++ int err;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_OPEN,
+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_vlan_add *)cmd.params;
-+ cmd_params->fdb_id = cpu_to_le16(cfg->fdb_id);
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
++ 0);
++ cmd_params = (struct dpsw_cmd_open *)cmd.params;
++ cmd_params->dpsw_id = cpu_to_le32(dpsw_id);
+
+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
-+}
-+
-+/**
-+ * dpsw_vlan_add_if() - Adding a set of interfaces to an existing VLAN.
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: Set of interfaces to add
-+ *
-+ * It adds only interfaces not belonging to this VLAN yet,
-+ * otherwise an error is generated and an entire command is
-+ * ignored. This function can be called numerous times always
-+ * providing required interfaces delta.
-+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_vlan_add_if(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg)
-+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_vlan_manage_if *cmd_params;
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
+
-+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF,
-+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
-+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
++ /* retrieve response parameters */
++ *token = mc_cmd_hdr_read_token(&cmd);
+
-+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
++ return 0;
+}
+
+/**
-+ * dpsw_vlan_add_if_untagged() - Defining a set of interfaces that should be
-+ * transmitted as untagged.
++ * dpsw_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: Set of interfaces that should be transmitted as untagged
-+ *
-+ * These interfaces should already belong to this VLAN.
-+ * By default all interfaces are transmitted as tagged.
-+ * Providing un-existing interface or untagged interface that is
-+ * configured untagged already generates an error and the entire
-+ * command is ignored.
+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
-+ */
-+int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg)
++ * After this function is called, no further operations are
++ * allowed on the object without opening a new control session.
++ *
++ * Return: '0' on Success; Error code otherwise.
++ */
++int dpsw_close(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_vlan_manage_if *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_UNTAGGED,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLOSE,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
-+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_vlan_add_if_flooding() - Define a set of interfaces that should be
-+ * included in flooding when frame with unknown destination
-+ * unicast MAC arrived.
++ * dpsw_enable() - Enable DPSW functionality
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: Set of interfaces that should be used for flooding
-+ *
-+ * These interfaces should belong to this VLAN. By default all
-+ * interfaces are included into flooding list. Providing
-+ * un-existing interface or an interface that already in the
-+ * flooding list generates an error and the entire command is
-+ * ignored.
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_vlan_add_if_flooding(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg)
++int dpsw_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_vlan_manage_if *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_FLOODING,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ENABLE,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
-+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_vlan_remove_if() - Remove interfaces from an existing VLAN.
++ * dpsw_disable() - Disable DPSW functionality
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: Set of interfaces that should be removed
-+ *
-+ * Interfaces must belong to this VLAN, otherwise an error
-+ * is returned and an the command is ignored
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg)
++int dpsw_disable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_vlan_manage_if *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_DISABLE,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
-+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_vlan_remove_if_untagged() - Define a set of interfaces that should be
-+ * converted from transmitted as untagged to transmit as tagged.
++ * dpsw_reset() - Reset the DPSW, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: Set of interfaces that should be removed
-+ *
-+ * Interfaces provided by API have to belong to this VLAN and
-+ * configured untagged, otherwise an error is returned and the
-+ * command is ignored
+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
++ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg)
++int dpsw_reset(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_vlan_manage_if *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_RESET,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
-+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_vlan_remove_if_flooding() - Define a set of interfaces that should be
-+ * removed from the flooding list.
++ * dpsw_set_irq_enable() - Set overall interrupt state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: Set of interfaces used for flooding
++ * @token: Token of DPCI object
++ * @irq_index: The interrupt index to configure
++ * @en: Interrupt state - enable = 1, disable = 0
+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
++ * Allows GPP software to control when interrupts are generated.
++ * Each interrupt can have up to 32 causes. The enable/disable control's the
++ * overall interrupt state. if the interrupt is disabled no causes will cause
++ * an interrupt
++ *
++ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpsw_vlan_remove_if_flooding(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg)
++int dpsw_set_irq_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u8 en)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_vlan_manage_if *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_set_irq_enable *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_FLOODING,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_ENABLE,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
-+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
++ cmd_params = (struct dpsw_cmd_set_irq_enable *)cmd.params;
++ dpsw_set_field(cmd_params->enable_state, ENABLE, en);
++ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_vlan_remove() - Remove an entire VLAN
++ * dpsw_set_irq_mask() - Set interrupt mask.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
++ * @token: Token of DPCI object
++ * @irq_index: The interrupt index to configure
++ * @mask: Event mask to trigger interrupt;
++ * each bit:
++ * 0 = ignore event
++ * 1 = consider event for asserting IRQ
+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
++ * Every interrupt can have up to 32 causes and the interrupt model supports
++ * masking/unmasking each cause independently
++ *
++ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpsw_vlan_remove(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id)
++int dpsw_set_irq_mask(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 mask)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_vlan_remove *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_set_irq_mask *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_MASK,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_vlan_remove *)cmd.params;
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
++ cmd_params = (struct dpsw_cmd_set_irq_mask *)cmd.params;
++ cmd_params->mask = cpu_to_le32(mask);
++ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_vlan_get_attributes() - Get VLAN attributes
++ * dpsw_get_irq_status() - Get the current status of any pending interrupts
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @attr: Returned DPSW attributes
++ * @irq_index: The interrupt index to configure
++ * @status: Returned interrupts status - one bit per cause:
++ * 0 = no interrupt pending
++ * 1 = interrupt pending
+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
++ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpsw_vlan_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ struct dpsw_vlan_attr *attr)
++int dpsw_get_irq_status(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 *status)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_vlan_get_attr *cmd_params;
-+ struct dpsw_rsp_vlan_get_attr *rsp_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_get_irq_status *cmd_params;
++ struct dpsw_rsp_get_irq_status *rsp_params;
+ int err;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_ATTRIBUTES,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_STATUS,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_vlan_get_attr *)cmd.params;
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
++ cmd_params = (struct dpsw_cmd_get_irq_status *)cmd.params;
++ cmd_params->status = cpu_to_le32(*status);
++ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ return err;
+
+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_vlan_get_attr *)cmd.params;
-+ attr->fdb_id = le16_to_cpu(rsp_params->fdb_id);
-+ attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
-+ attr->num_untagged_ifs = le16_to_cpu(rsp_params->num_untagged_ifs);
-+ attr->num_flooding_ifs = le16_to_cpu(rsp_params->num_flooding_ifs);
++ rsp_params = (struct dpsw_rsp_get_irq_status *)cmd.params;
++ *status = le32_to_cpu(rsp_params->status);
+
+ return 0;
+}
+
+/**
-+ * dpsw_vlan_get_if() - Get interfaces belong to this VLAN
++ * dpsw_clear_irq_status() - Clear a pending interrupt's status
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: Returned set of interfaces belong to this VLAN
++ * @token: Token of DPCI object
++ * @irq_index: The interrupt index to configure
++ * @status: bits to clear (W1C) - one bit per cause:
++ * 0 = don't change
++ * 1 = clear status bit
+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
++ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpsw_vlan_get_if(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ struct dpsw_vlan_if_cfg *cfg)
++int dpsw_clear_irq_status(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u8 irq_index,
++ u32 status)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_vlan_get_if *cmd_params;
-+ struct dpsw_rsp_vlan_get_if *rsp_params;
-+ int err;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_clear_irq_status *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLEAR_IRQ_STATUS,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_vlan_get_if *)cmd.params;
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
++ cmd_params = (struct dpsw_cmd_clear_irq_status *)cmd.params;
++ cmd_params->status = cpu_to_le32(status);
++ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_vlan_get_if *)cmd.params;
-+ cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs);
-+ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id);
-+
-+ return 0;
++ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_vlan_get_if_flooding() - Get interfaces used in flooding for this VLAN
++ * dpsw_get_attributes() - Retrieve DPSW attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: Returned set of flooding interfaces
++ * @attr: Returned DPSW attributes
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+
-+int dpsw_vlan_get_if_flooding(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ struct dpsw_vlan_if_cfg *cfg)
++int dpsw_get_attributes(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ struct dpsw_attr *attr)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_vlan_get_if_flooding *cmd_params;
-+ struct dpsw_rsp_vlan_get_if_flooding *rsp_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_rsp_get_attr *rsp_params;
+ int err;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF_FLOODING,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_vlan_get_if_flooding *)cmd.params;
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ return err;
+
+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_vlan_get_if_flooding *)cmd.params;
-+ cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs);
-+ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id);
++ rsp_params = (struct dpsw_rsp_get_attr *)cmd.params;
++ attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
++ attr->max_fdbs = rsp_params->max_fdbs;
++ attr->num_fdbs = rsp_params->num_fdbs;
++ attr->max_vlans = le16_to_cpu(rsp_params->max_vlans);
++ attr->num_vlans = le16_to_cpu(rsp_params->num_vlans);
++ attr->max_fdb_entries = le16_to_cpu(rsp_params->max_fdb_entries);
++ attr->fdb_aging_time = le16_to_cpu(rsp_params->fdb_aging_time);
++ attr->id = le32_to_cpu(rsp_params->dpsw_id);
++ attr->mem_size = le16_to_cpu(rsp_params->mem_size);
++ attr->max_fdb_mc_groups = le16_to_cpu(rsp_params->max_fdb_mc_groups);
++ attr->max_meters_per_if = rsp_params->max_meters_per_if;
++ attr->options = le64_to_cpu(rsp_params->options);
++ attr->component_type = dpsw_get_field(rsp_params->component_type,
++ COMPONENT_TYPE);
+
+ return 0;
+}
+
+/**
-+ * dpsw_vlan_get_if_untagged() - Get interfaces that should be transmitted as
-+ * untagged
++ * dpsw_if_set_link_cfg() - Set the link configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @vlan_id: VLAN Identifier
-+ * @cfg: Returned set of untagged interfaces
++ * @if_id: Interface id
++ * @cfg: Link configuration
+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
++ * Return: '0' on Success; Error code otherwise.
+ */
-+int dpsw_vlan_get_if_untagged(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ struct dpsw_vlan_if_cfg *cfg)
++int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ struct dpsw_link_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_vlan_get_if_untagged *cmd_params;
-+ struct dpsw_rsp_vlan_get_if_untagged *rsp_params;
-+ int err;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_if_set_link_cfg *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_GET_IF_UNTAGGED,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_LINK_CFG,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_vlan_get_if_untagged *)cmd.params;
-+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
-+
-+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_vlan_get_if_untagged *)cmd.params;
-+ cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs);
-+ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id);
++ cmd_params = (struct dpsw_cmd_if_set_link_cfg *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ cmd_params->rate = cpu_to_le32(cfg->rate);
++ cmd_params->options = cpu_to_le64(cfg->options);
+
-+ return 0;
++ /* send command to mc*/
++ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_fdb_add() - Add FDB to switch and Returns handle to FDB table for
-+ * the reference
++ * dpsw_if_get_link_state - Return the link state
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @fdb_id: Returned Forwarding Database Identifier
-+ * @cfg: FDB Configuration
++ * @if_id: Interface id
++ * @state: Link state 1 - linkup, 0 - link down or disconnected
+ *
-+ * Return: Completion status. '0' on Success; Error code otherwise.
++ * @Return '0' on Success; Error code otherwise.
+ */
-+int dpsw_fdb_add(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 *fdb_id,
-+ const struct dpsw_fdb_cfg *cfg)
++int dpsw_if_get_link_state(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ struct dpsw_link_state *state)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_fdb_add *cmd_params;
-+ struct dpsw_rsp_fdb_add *rsp_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_if_get_link_state *cmd_params;
++ struct dpsw_rsp_if_get_link_state *rsp_params;
+ int err;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_LINK_STATE,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_fdb_add *)cmd.params;
-+ cmd_params->fdb_aging_time = cpu_to_le16(cfg->fdb_aging_time);
-+ cmd_params->num_fdb_entries = cpu_to_le16(cfg->num_fdb_entries);
++ cmd_params = (struct dpsw_cmd_if_get_link_state *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ return err;
+
+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_fdb_add *)cmd.params;
-+ *fdb_id = le16_to_cpu(rsp_params->fdb_id);
++ rsp_params = (struct dpsw_rsp_if_get_link_state *)cmd.params;
++ state->rate = le32_to_cpu(rsp_params->rate);
++ state->options = le64_to_cpu(rsp_params->options);
++ state->up = dpsw_get_field(rsp_params->up, UP);
+
+ return 0;
+}
+
+/**
-+ * dpsw_fdb_remove() - Remove FDB from switch
++ * dpsw_if_set_flooding() - Enable Disable flooding for particular interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
++ * @if_id: Interface Identifier
++ * @en: 1 - enable, 0 - disable
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_fdb_remove(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id)
++int dpsw_if_set_flooding(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ u8 en)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_fdb_remove *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_if_set_flooding *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_fdb_remove *)cmd.params;
-+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
++ cmd_params = (struct dpsw_cmd_if_set_flooding *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ dpsw_set_field(cmd_params->enable, ENABLE, en);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_fdb_add_unicast() - Function adds an unicast entry into MAC lookup table
++ * dpsw_if_set_broadcast() - Enable/disable broadcast for particular interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
-+ * @cfg: Unicast entry configuration
++ * @if_id: Interface Identifier
++ * @en: 1 - enable, 0 - disable
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ const struct dpsw_fdb_unicast_cfg *cfg)
++int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ u8 en)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_fdb_add_unicast *cmd_params;
-+ int i;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_if_set_broadcast *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_UNICAST,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_BROADCAST,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_fdb_add_unicast *)cmd.params;
-+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
-+ cmd_params->if_egress = cpu_to_le16(cfg->if_egress);
-+ for (i = 0; i < 6; i++)
-+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
-+ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
++ cmd_params = (struct dpsw_cmd_if_set_broadcast *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ dpsw_set_field(cmd_params->enable, ENABLE, en);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_fdb_get_unicast() - Get unicast entry from MAC lookup table by
-+ * unicast Ethernet address
++ * dpsw_if_set_tci() - Set default VLAN Tag Control Information (TCI)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
-+ * @cfg: Returned unicast entry configuration
++ * @if_id: Interface Identifier
++ * @cfg: Tag Control Information Configuration
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_fdb_get_unicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ struct dpsw_fdb_unicast_cfg *cfg)
++int dpsw_if_set_tci(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ const struct dpsw_tci_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_fdb_get_unicast *cmd_params;
-+ struct dpsw_rsp_fdb_get_unicast *rsp_params;
-+ int err, i;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_if_set_tci *cmd_params;
++ u16 tmp_conf = 0;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_UNICAST,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TCI,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_fdb_get_unicast *)cmd.params;
-+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
-+ for (i = 0; i < 6; i++)
-+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
++ cmd_params = (struct dpsw_cmd_if_set_tci *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ dpsw_set_field(tmp_conf, VLAN_ID, cfg->vlan_id);
++ dpsw_set_field(tmp_conf, DEI, cfg->dei);
++ dpsw_set_field(tmp_conf, PCP, cfg->pcp);
++ cmd_params->conf = cpu_to_le16(tmp_conf);
+
+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_fdb_get_unicast *)cmd.params;
-+ cfg->if_egress = le16_to_cpu(rsp_params->if_egress);
-+ cfg->type = dpsw_get_field(rsp_params->type, ENTRY_TYPE);
-+
-+ return 0;
++ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_fdb_remove_unicast() - removes an entry from MAC lookup table
++ * dpsw_if_get_tci() - Get default VLAN Tag Control Information (TCI)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
-+ * @cfg: Unicast entry configuration
++ * @if_id: Interface Identifier
++ * @cfg: Tag Control Information Configuration
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ const struct dpsw_fdb_unicast_cfg *cfg)
++int dpsw_if_get_tci(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ struct dpsw_tci_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_fdb_remove_unicast *cmd_params;
-+ int i;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_if_get_tci *cmd_params;
++ struct dpsw_rsp_if_get_tci *rsp_params;
++ int err;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_UNICAST,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_TCI,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_fdb_remove_unicast *)cmd.params;
-+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
-+ for (i = 0; i < 6; i++)
-+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
-+ cmd_params->if_egress = cpu_to_le16(cfg->if_egress);
-+ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
++ cmd_params = (struct dpsw_cmd_if_get_tci *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
+
+ /* send command to mc*/
-+ return mc_send_command(mc_io, &cmd);
++ err = mc_send_command(mc_io, &cmd);
++ if (err)
++ return err;
++
++ /* retrieve response parameters */
++ rsp_params = (struct dpsw_rsp_if_get_tci *)cmd.params;
++ cfg->pcp = rsp_params->pcp;
++ cfg->dei = rsp_params->dei;
++ cfg->vlan_id = le16_to_cpu(rsp_params->vlan_id);
++
++ return 0;
+}
+
+/**
-+ * dpsw_fdb_add_multicast() - Add a set of egress interfaces to multi-cast group
++ * dpsw_if_set_stp() - Function sets Spanning Tree Protocol (STP) state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
-+ * @cfg: Multicast entry configuration
++ * @if_id: Interface Identifier
++ * @cfg: STP State configuration parameters
+ *
-+ * If group doesn't exist, it will be created.
-+ * It adds only interfaces not belonging to this multicast group
-+ * yet, otherwise error will be generated and the command is
-+ * ignored.
-+ * This function may be called numerous times always providing
-+ * required interfaces delta.
++ * The following STP states are supported -
++ * blocking, listening, learning, forwarding and disabled.
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ const struct dpsw_fdb_multicast_cfg *cfg)
++int dpsw_if_set_stp(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ const struct dpsw_stp_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_fdb_add_multicast *cmd_params;
-+ int i;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_if_set_stp *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_MULTICAST,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_STP,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_fdb_add_multicast *)cmd.params;
-+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
-+ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
-+ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
-+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
-+ for (i = 0; i < 6; i++)
-+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
++ cmd_params = (struct dpsw_cmd_if_set_stp *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
++ dpsw_set_field(cmd_params->state, STATE, cfg->state);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_fdb_get_multicast() - Reading multi-cast group by multi-cast Ethernet
-+ * address.
++ * dpsw_if_get_counter() - Get specific counter of particular interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
-+ * @cfg: Returned multicast entry configuration
++ * @if_id: Interface Identifier
++ * @type: Counter type
++ * @counter: return value
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_fdb_get_multicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ struct dpsw_fdb_multicast_cfg *cfg)
++int dpsw_if_get_counter(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ enum dpsw_counter type,
++ u64 *counter)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_fdb_get_multicast *cmd_params;
-+ struct dpsw_rsp_fdb_get_multicast *rsp_params;
-+ int err, i;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_if_get_counter *cmd_params;
++ struct dpsw_rsp_if_get_counter *rsp_params;
++ int err;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_MULTICAST,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_COUNTER,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_fdb_get_multicast *)cmd.params;
-+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
-+ for (i = 0; i < 6; i++)
-+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
++ cmd_params = (struct dpsw_cmd_if_get_counter *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ dpsw_set_field(cmd_params->type, COUNTER_TYPE, type);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ return err;
+
+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_fdb_get_multicast *)cmd.params;
-+ cfg->num_ifs = le16_to_cpu(rsp_params->num_ifs);
-+ cfg->type = dpsw_get_field(rsp_params->type, ENTRY_TYPE);
-+ read_if_id_bitmap(cfg->if_id, &cfg->num_ifs, rsp_params->if_id);
++ rsp_params = (struct dpsw_rsp_if_get_counter *)cmd.params;
++ *counter = le64_to_cpu(rsp_params->counter);
+
+ return 0;
+}
+
+/**
-+ * dpsw_fdb_remove_multicast() - Removing interfaces from an existing multicast
-+ * group.
++ * dpsw_if_enable() - Enable Interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
-+ * @cfg: Multicast entry configuration
-+ *
-+ * Interfaces provided by this API have to exist in the group,
-+ * otherwise an error will be returned and an entire command
-+ * ignored. If there is no interface left in the group,
-+ * an entire group is deleted
++ * @if_id: Interface Identifier
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ const struct dpsw_fdb_multicast_cfg *cfg)
++int dpsw_if_enable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_fdb_remove_multicast *cmd_params;
-+ int i;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_if *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_MULTICAST,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ENABLE,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_fdb_remove_multicast *)cmd.params;
-+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
-+ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
-+ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
-+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
-+ for (i = 0; i < 6; i++)
-+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
++ cmd_params = (struct dpsw_cmd_if *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_fdb_set_learning_mode() - Define FDB learning mode
++ * dpsw_if_disable() - Disable Interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
-+ * @mode: Learning mode
++ * @if_id: Interface Identifier
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ enum dpsw_fdb_learning_mode mode)
++int dpsw_if_disable(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_fdb_set_learning_mode *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_if *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_SET_LEARNING_MODE,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_DISABLE,
+ cmd_flags,
-+ token);
-+ cmd_params = (struct dpsw_cmd_fdb_set_learning_mode *)cmd.params;
-+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
-+ dpsw_set_field(cmd_params->mode, LEARNING_MODE, mode);
++ token);
++ cmd_params = (struct dpsw_cmd_if *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_fdb_get_attributes() - Get FDB attributes
-+ * @mc_io: Pointer to MC portal's I/O object
-+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
-+ * @token: Token of DPSW object
-+ * @fdb_id: Forwarding Database Identifier
-+ * @attr: Returned FDB attributes
++ * dpsw_if_set_max_frame_length() - Set Maximum Receive frame length.
++ * @mc_io: Pointer to MC portal's I/O object
++ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
++ * @token: Token of DPSW object
++ * @if_id: Interface Identifier
++ * @frame_length: Maximum Frame Length
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_fdb_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ struct dpsw_fdb_attr *attr)
++int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ u16 frame_length)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_fdb_get_attr *cmd_params;
-+ struct dpsw_rsp_fdb_get_attr *rsp_params;
-+ int err;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_if_set_max_frame_length *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_GET_ATTR,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_fdb_get_attr *)cmd.params;
-+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
++ cmd_params = (struct dpsw_cmd_if_set_max_frame_length *)cmd.params;
++ cmd_params->if_id = cpu_to_le16(if_id);
++ cmd_params->frame_length = cpu_to_le16(frame_length);
+
+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_fdb_get_attr *)cmd.params;
-+ attr->max_fdb_entries = le16_to_cpu(rsp_params->max_fdb_entries);
-+ attr->fdb_aging_time = le16_to_cpu(rsp_params->fdb_aging_time);
-+ attr->learning_mode = dpsw_get_field(rsp_params->learning_mode,
-+ LEARNING_MODE);
-+ attr->num_fdb_mc_groups = le16_to_cpu(rsp_params->num_fdb_mc_groups);
-+ attr->max_fdb_mc_groups = le16_to_cpu(rsp_params->max_fdb_mc_groups);
-+
-+ return 0;
++ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_acl_add() - Adds ACL to L2 switch.
++ * dpsw_vlan_add() - Adding new VLAN to DPSW.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @acl_id: Returned ACL ID, for the future reference
-+ * @cfg: ACL configuration
++ * @vlan_id: VLAN Identifier
++ * @cfg: VLAN configuration
+ *
-+ * Create Access Control List. Multiple ACLs can be created and
-+ * co-exist in L2 switch
++ * Only VLAN ID and FDB ID are required parameters here.
++ * 12 bit VLAN ID is defined in IEEE802.1Q.
++ * Adding a duplicate VLAN ID is not allowed.
++ * FDB ID can be shared across multiple VLANs. Shared learning
++ * is obtained by calling dpsw_vlan_add for multiple VLAN IDs
++ * with same fdb_id
+ *
-+ * Return: '0' on Success; Error code otherwise.
++ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_acl_add(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 *acl_id,
-+ const struct dpsw_acl_cfg *cfg)
++int dpsw_vlan_add(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id,
++ const struct dpsw_vlan_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_acl_add *cmd_params;
-+ struct dpsw_rsp_acl_add *rsp_params;
-+ int err;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_vlan_add *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_acl_add *)cmd.params;
-+ cmd_params->max_entries = cpu_to_le16(cfg->max_entries);
++ cmd_params = (struct dpsw_vlan_add *)cmd.params;
++ cmd_params->fdb_id = cpu_to_le16(cfg->fdb_id);
++ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+
+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_acl_add *)cmd.params;
-+ *acl_id = le16_to_cpu(rsp_params->acl_id);
-+
-+ return 0;
++ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_acl_remove() - Removes ACL from L2 switch.
++ * dpsw_vlan_add_if() - Adding a set of interfaces to an existing VLAN.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @acl_id: ACL ID
++ * @vlan_id: VLAN Identifier
++ * @cfg: Set of interfaces to add
+ *
-+ * Return: '0' on Success; Error code otherwise.
++ * It adds only interfaces not belonging to this VLAN yet,
++ * otherwise an error is generated and an entire command is
++ * ignored. This function can be called numerous times always
++ * providing required interfaces delta.
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_acl_remove(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id)
++int dpsw_vlan_add_if(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_acl_remove *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_vlan_manage_if *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_acl_remove *)cmd.params;
-+ cmd_params->acl_id = cpu_to_le16(acl_id);
++ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
++ cmd_params->vlan_id = cpu_to_le16(vlan_id);
++ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_acl_prepare_entry_cfg() - Set an entry to ACL.
-+ * @key: Key
-+ * @entry_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
-+ *
-+ * This function has to be called before adding or removing acl_entry
-+ *
-+ */
-+void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key,
-+ u8 *entry_cfg_buf)
-+{
-+ struct dpsw_prep_acl_entry *ext_params;
-+ int i;
-+
-+ ext_params = (struct dpsw_prep_acl_entry *)entry_cfg_buf;
-+
-+ for (i = 0; i < 6; i++) {
-+ ext_params->match_l2_dest_mac[i] =
-+ key->match.l2_dest_mac[5 - i];
-+ ext_params->match_l2_source_mac[i] =
-+ key->match.l2_source_mac[5 - i];
-+ ext_params->mask_l2_dest_mac[i] =
-+ key->mask.l2_dest_mac[5 - i];
-+ ext_params->mask_l2_source_mac[i] =
-+ key->mask.l2_source_mac[5 - i];
-+ }
-+
-+ ext_params->match_l2_tpid = cpu_to_le16(key->match.l2_tpid);
-+ ext_params->match_l2_vlan_id = cpu_to_le16(key->match.l2_vlan_id);
-+ ext_params->match_l3_dest_ip = cpu_to_le32(key->match.l3_dest_ip);
-+ ext_params->match_l3_source_ip = cpu_to_le32(key->match.l3_source_ip);
-+ ext_params->match_l4_dest_port = cpu_to_le16(key->match.l4_dest_port);
-+ ext_params->match_l2_ether_type = cpu_to_le16(key->match.l2_ether_type);
-+ ext_params->match_l2_pcp_dei = key->match.l2_pcp_dei;
-+ ext_params->match_l3_dscp = key->match.l3_dscp;
-+ ext_params->match_l4_source_port =
-+ cpu_to_le16(key->match.l4_source_port);
-+
-+ ext_params->mask_l2_tpid = cpu_to_le16(key->mask.l2_tpid);
-+ ext_params->mask_l2_vlan_id = cpu_to_le16(key->mask.l2_vlan_id);
-+ ext_params->mask_l3_dest_ip = cpu_to_le32(key->mask.l3_dest_ip);
-+ ext_params->mask_l3_source_ip = cpu_to_le32(key->mask.l3_source_ip);
-+ ext_params->mask_l4_dest_port = cpu_to_le16(key->mask.l4_dest_port);
-+ ext_params->mask_l4_source_port = cpu_to_le16(key->mask.l4_source_port);
-+ ext_params->mask_l2_ether_type = cpu_to_le16(key->mask.l2_ether_type);
-+ ext_params->mask_l2_pcp_dei = key->mask.l2_pcp_dei;
-+ ext_params->mask_l3_dscp = key->mask.l3_dscp;
-+ ext_params->match_l3_protocol = key->match.l3_protocol;
-+ ext_params->mask_l3_protocol = key->mask.l3_protocol;
-+}
-+
-+/**
-+ * dpsw_acl_add_entry() - Adds an entry to ACL.
++ * dpsw_vlan_add_if_untagged() - Defining a set of interfaces that should be
++ * transmitted as untagged.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @acl_id: ACL ID
-+ * @cfg: Entry configuration
++ * @vlan_id: VLAN Identifier
++ * @cfg: Set of interfaces that should be transmitted as untagged
+ *
-+ * warning: This function has to be called after dpsw_acl_set_entry_cfg()
++ * These interfaces should already belong to this VLAN.
++ * By default all interfaces are transmitted as tagged.
++ * Providing un-existing interface or untagged interface that is
++ * configured untagged already generates an error and the entire
++ * command is ignored.
+ *
-+ * Return: '0' on Success; Error code otherwise.
++ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_acl_add_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id,
-+ const struct dpsw_acl_entry_cfg *cfg)
++int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_acl_entry *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_vlan_manage_if *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_ENTRY,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_UNTAGGED,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_acl_entry *)cmd.params;
-+ cmd_params->acl_id = cpu_to_le16(acl_id);
-+ cmd_params->result_if_id = cpu_to_le16(cfg->result.if_id);
-+ cmd_params->precedence = cpu_to_le32(cfg->precedence);
-+ dpsw_set_field(cmd_params->result_action, RESULT_ACTION,
-+ cfg->result.action);
-+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
++ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
++ cmd_params->vlan_id = cpu_to_le16(vlan_id);
++ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_acl_remove_entry() - Removes an entry from ACL.
++ * dpsw_vlan_remove_if() - Remove interfaces from an existing VLAN.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @acl_id: ACL ID
-+ * @cfg: Entry configuration
++ * @vlan_id: VLAN Identifier
++ * @cfg: Set of interfaces that should be removed
+ *
-+ * warning: This function has to be called after dpsw_acl_set_entry_cfg()
++ * Interfaces must belong to this VLAN, otherwise an error
++ * is returned and an the command is ignored
+ *
-+ * Return: '0' on Success; Error code otherwise.
++ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id,
-+ const struct dpsw_acl_entry_cfg *cfg)
++int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_acl_entry *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_vlan_manage_if *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_ENTRY,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_acl_entry *)cmd.params;
-+ cmd_params->acl_id = cpu_to_le16(acl_id);
-+ cmd_params->result_if_id = cpu_to_le16(cfg->result.if_id);
-+ cmd_params->precedence = cpu_to_le32(cfg->precedence);
-+ dpsw_set_field(cmd_params->result_action, RESULT_ACTION,
-+ cfg->result.action);
-+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
++ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
++ cmd_params->vlan_id = cpu_to_le16(vlan_id);
++ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_acl_add_if() - Associate interface/interfaces with ACL.
++ * dpsw_vlan_remove_if_untagged() - Define a set of interfaces that should be
++ * converted from transmitted as untagged to transmit as tagged.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @acl_id: ACL ID
-+ * @cfg: Interfaces list
++ * @vlan_id: VLAN Identifier
++ * @cfg: Set of interfaces that should be removed
+ *
-+ * Return: '0' on Success; Error code otherwise.
++ * Interfaces provided by API have to belong to this VLAN and
++ * configured untagged, otherwise an error is returned and the
++ * command is ignored
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_acl_add_if(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id,
-+ const struct dpsw_acl_if_cfg *cfg)
++int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id,
++ const struct dpsw_vlan_if_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_acl_if *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_vlan_manage_if *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_IF,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_acl_if *)cmd.params;
-+ cmd_params->acl_id = cpu_to_le16(acl_id);
-+ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
++ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
++ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+}
+
+/**
-+ * dpsw_acl_remove_if() - De-associate interface/interfaces from ACL.
++ * dpsw_vlan_remove() - Remove an entire VLAN
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @acl_id: ACL ID
-+ * @cfg: Interfaces list
++ * @vlan_id: VLAN Identifier
+ *
-+ * Return: '0' on Success; Error code otherwise.
++ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_acl_remove_if(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id,
-+ const struct dpsw_acl_if_cfg *cfg)
++int dpsw_vlan_remove(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 vlan_id)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_acl_if *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_vlan_remove *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_IF,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_acl_if *)cmd.params;
-+ cmd_params->acl_id = cpu_to_le16(acl_id);
-+ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
-+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
++ cmd_params = (struct dpsw_cmd_vlan_remove *)cmd.params;
++ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_acl_get_attributes() - Get specific counter of particular interface
++ * dpsw_fdb_add_unicast() - Function adds an unicast entry into MAC lookup table
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @acl_id: ACL Identifier
-+ * @attr: Returned ACL attributes
++ * @fdb_id: Forwarding Database Identifier
++ * @cfg: Unicast entry configuration
+ *
-+ * Return: '0' on Success; Error code otherwise.
++ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_acl_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id,
-+ struct dpsw_acl_attr *attr)
++int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 fdb_id,
++ const struct dpsw_fdb_unicast_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_acl_get_attr *cmd_params;
-+ struct dpsw_rsp_acl_get_attr *rsp_params;
-+ int err;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_fdb_unicast_op *cmd_params;
++ int i;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_GET_ATTR,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_UNICAST,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_acl_get_attr *)cmd.params;
-+ cmd_params->acl_id = cpu_to_le16(acl_id);
++ cmd_params = (struct dpsw_cmd_fdb_unicast_op *)cmd.params;
++ cmd_params->fdb_id = cpu_to_le16(fdb_id);
++ cmd_params->if_egress = cpu_to_le16(cfg->if_egress);
++ for (i = 0; i < 6; i++)
++ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
++ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
+
+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_acl_get_attr *)cmd.params;
-+ attr->max_entries = le16_to_cpu(rsp_params->max_entries);
-+ attr->num_entries = le16_to_cpu(rsp_params->num_entries);
-+ attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
-+
-+ return 0;
++ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_ctrl_if_get_attributes() - Obtain control interface attributes
++ * dpsw_fdb_remove_unicast() - removes an entry from MAC lookup table
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @attr: Returned control interface attributes
++ * @fdb_id: Forwarding Database Identifier
++ * @cfg: Unicast entry configuration
+ *
-+ * Return: '0' on Success; Error code otherwise.
++ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpsw_ctrl_if_attr *attr)
++int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 fdb_id,
++ const struct dpsw_fdb_unicast_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_rsp_ctrl_if_get_attr *rsp_params;
-+ int err;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_fdb_unicast_op *cmd_params;
++ int i;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_GET_ATTR,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_UNICAST,
+ cmd_flags,
+ token);
++ cmd_params = (struct dpsw_cmd_fdb_unicast_op *)cmd.params;
++ cmd_params->fdb_id = cpu_to_le16(fdb_id);
++ for (i = 0; i < 6; i++)
++ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
++ cmd_params->if_egress = cpu_to_le16(cfg->if_egress);
++ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
+
+ /* send command to mc*/
-+ err = mc_send_command(mc_io, &cmd);
-+ if (err)
-+ return err;
-+
-+ /* retrieve response parameters */
-+ rsp_params = (struct dpsw_rsp_ctrl_if_get_attr *)cmd.params;
-+ attr->rx_fqid = le32_to_cpu(rsp_params->rx_fqid);
-+ attr->rx_err_fqid = le32_to_cpu(rsp_params->rx_err_fqid);
-+ attr->tx_err_conf_fqid = le32_to_cpu(rsp_params->tx_err_conf_fqid);
-+
-+ return 0;
++ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_ctrl_if_set_pools() - Set control interface buffer pools
++ * dpsw_fdb_add_multicast() - Add a set of egress interfaces to multi-cast group
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
-+ * @cfg: Buffer pools configuration
++ * @fdb_id: Forwarding Database Identifier
++ * @cfg: Multicast entry configuration
+ *
-+ * Return: '0' on Success; Error code otherwise.
++ * If group doesn't exist, it will be created.
++ * It adds only interfaces not belonging to this multicast group
++ * yet, otherwise error will be generated and the command is
++ * ignored.
++ * This function may be called numerous times always providing
++ * required interfaces delta.
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io,
++int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
-+ const struct dpsw_ctrl_if_pools_cfg *pools)
++ u16 fdb_id,
++ const struct dpsw_fdb_multicast_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
-+ struct dpsw_cmd_ctrl_if_set_pools *cmd_params;
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_fdb_multicast_op *cmd_params;
+ int i;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_SET_POOLS,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_MULTICAST,
+ cmd_flags,
+ token);
-+ cmd_params = (struct dpsw_cmd_ctrl_if_set_pools *)cmd.params;
-+ cmd_params->num_dpbp = pools->num_dpbp;
-+ for (i = 0; i < 8; i++) {
-+ cmd_params->backup_pool = dpsw_set_bit(cmd_params->backup_pool,
-+ i,
-+ pools->pools[i].backup_pool);
-+ cmd_params->buffer_size[i] =
-+ cpu_to_le16(pools->pools[i].buffer_size);
-+ cmd_params->dpbp_id[i] =
-+ cpu_to_le32(pools->pools[i].dpbp_id);
-+ }
++ cmd_params = (struct dpsw_cmd_fdb_multicast_op *)cmd.params;
++ cmd_params->fdb_id = cpu_to_le16(fdb_id);
++ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
++ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
++ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
++ for (i = 0; i < 6; i++)
++ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_ctrl_if_enable() - Enable control interface
++ * dpsw_fdb_remove_multicast() - Removing interfaces from an existing multicast
++ * group.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
++ * @fdb_id: Forwarding Database Identifier
++ * @cfg: Multicast entry configuration
+ *
-+ * Return: '0' on Success; Error code otherwise.
++ * Interfaces provided by this API have to exist in the group,
++ * otherwise an error will be returned and an entire command
++ * ignored. If there is no interface left in the group,
++ * an entire group is deleted
++ *
++ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
++int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 fdb_id,
++ const struct dpsw_fdb_multicast_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_fdb_multicast_op *cmd_params;
++ int i;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_ENABLE,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_MULTICAST,
+ cmd_flags,
+ token);
++ cmd_params = (struct dpsw_cmd_fdb_multicast_op *)cmd.params;
++ cmd_params->fdb_id = cpu_to_le16(fdb_id);
++ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
++ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
++ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
++ for (i = 0; i < 6; i++)
++ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
-+ * dpsw_ctrl_if_disable() - Function disables control interface
++ * dpsw_fdb_set_learning_mode() - Define FDB learning mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
++ * @fdb_id: Forwarding Database Identifier
++ * @mode: Learning mode
+ *
-+ * Return: '0' on Success; Error code otherwise.
++ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
-+int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token)
++int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 fdb_id,
++ enum dpsw_fdb_learning_mode mode)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
++ struct dpsw_cmd_fdb_set_learning_mode *cmd_params;
+
+ /* prepare command */
-+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_DISABLE,
++ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_SET_LEARNING_MODE,
+ cmd_flags,
+ token);
++ cmd_params = (struct dpsw_cmd_fdb_set_learning_mode *)cmd.params;
++ cmd_params->fdb_id = cpu_to_le16(fdb_id);
++ dpsw_set_field(cmd_params->mode, LEARNING_MODE, mode);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+ u16 *major_ver,
+ u16 *minor_ver)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_rsp_get_api_version *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_API_VERSION,
-+ cmd_flags,
-+ 0);
++ cmd_flags,
++ 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+}
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.h
-@@ -0,0 +1,1269 @@
-+/* Copyright 2013-2015 Freescale Semiconductor Inc.
-+ *
-+ * Redistribution and use in source and binary forms, with or without
-+ * modification, are permitted provided that the following conditions are met:
-+ * * Redistributions of source code must retain the above copyright
-+ * notice, this list of conditions and the following disclaimer.
-+ * * Redistributions in binary form must reproduce the above copyright
-+ * notice, this list of conditions and the following disclaimer in the
-+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of the above-listed copyright holders nor the
-+ * names of any contributors may be used to endorse or promote products
-+ * derived from this software without specific prior written permission.
-+ *
-+ *
-+ * ALTERNATIVELY, this software may be distributed under the terms of the
-+ * GNU General Public License ("GPL") as published by the Free Software
-+ * Foundation, either version 2 of that License or (at your option) any
-+ * later version.
+@@ -0,0 +1,592 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Copyright 2013-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017-2018 NXP
+ *
-+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
-+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-+ * POSSIBILITY OF SUCH DAMAGE.
+ */
++
+#ifndef __FSL_DPSW_H
+#define __FSL_DPSW_H
+
+ } adv;
+};
+
-+int dpsw_create(struct fsl_mc_io *mc_io,
-+ u16 dprc_token,
-+ u32 cmd_flags,
-+ const struct dpsw_cfg *cfg,
-+ u32 *obj_id);
-+
-+int dpsw_destroy(struct fsl_mc_io *mc_io,
-+ u16 dprc_token,
-+ u32 cmd_flags,
-+ u32 object_id);
-+
+int dpsw_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+ u32 cmd_flags,
+ u16 token);
+
-+int dpsw_is_enabled(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ int *en);
-+
+int dpsw_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+ int irq_num;
+};
+
-+int dpsw_set_irq(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ struct dpsw_irq_cfg *irq_cfg);
-+
-+int dpsw_get_irq(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ int *type,
-+ struct dpsw_irq_cfg *irq_cfg);
-+
+int dpsw_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en);
+
-+int dpsw_get_irq_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u8 *en);
-+
+int dpsw_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask);
+
-+int dpsw_get_irq_mask(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u8 irq_index,
-+ u32 *mask);
-+
+int dpsw_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 token,
+ struct dpsw_attr *attr);
+
-+int dpsw_set_reflection_if(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id);
-+
+/**
+ * enum dpsw_action - Action selection for special/control frames
+ * @DPSW_ACTION_DROP: Drop frame
+struct dpsw_link_state {
+ u32 rate;
+ u64 options;
-+ int up;
++ u8 up;
+};
+
+int dpsw_if_get_link_state(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
-+ int en);
++ u8 en);
+
+int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
-+ int en);
-+
-+int dpsw_if_set_multicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ int en);
++ u8 en);
+
+/**
-+ * struct dpsw_tci_cfg - Tag Contorl Information (TCI) configuration
++ * struct dpsw_tci_cfg - Tag Control Information (TCI) configuration
+ * @pcp: Priority Code Point (PCP): a 3-bit field which refers
+ * to the IEEE 802.1p priority
+ * @dei: Drop Eligible Indicator (DEI): a 1-bit field. May be used
+ *
+ */
+enum dpsw_stp_state {
-+ DPSW_STP_STATE_BLOCKING = 0,
++ DPSW_STP_STATE_DISABLED = 0,
+ DPSW_STP_STATE_LISTENING = 1,
+ DPSW_STP_STATE_LEARNING = 2,
-+ DPSW_STP_STATE_FORWARDING = 3
++ DPSW_STP_STATE_FORWARDING = 3,
++ DPSW_STP_STATE_BLOCKING = 0
+};
+
+/**
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
-+ const struct dpsw_stp_cfg *cfg);
-+
-+/**
-+ * enum dpsw_accepted_frames - Types of frames to accept
-+ * @DPSW_ADMIT_ALL: The device accepts VLAN tagged, untagged and
-+ * priority tagged frames
-+ * @DPSW_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or
-+ * Priority-Tagged frames received on this interface.
-+ *
-+ */
-+enum dpsw_accepted_frames {
-+ DPSW_ADMIT_ALL = 1,
-+ DPSW_ADMIT_ONLY_VLAN_TAGGED = 3
-+};
-+
-+/**
-+ * struct dpsw_accepted_frames_cfg - Types of frames to accept configuration
-+ * @type: Defines ingress accepted frames
-+ * @unaccept_act: When a frame is not accepted, it may be discarded or
-+ * redirected to control interface depending on this mode
-+ */
-+struct dpsw_accepted_frames_cfg {
-+ enum dpsw_accepted_frames type;
-+ enum dpsw_action unaccept_act;
-+};
-+
-+int dpsw_if_set_accepted_frames(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_accepted_frames_cfg *cfg);
-+
-+int dpsw_if_set_accept_all_vlan(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ int accept_all);
-+
-+/**
-+ * enum dpsw_counter - Counters types
-+ * @DPSW_CNT_ING_FRAME: Counts ingress frames
-+ * @DPSW_CNT_ING_BYTE: Counts ingress bytes
-+ * @DPSW_CNT_ING_FLTR_FRAME: Counts filtered ingress frames
-+ * @DPSW_CNT_ING_FRAME_DISCARD: Counts discarded ingress frame
-+ * @DPSW_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
-+ * @DPSW_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
-+ * @DPSW_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
-+ * @DPSW_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
-+ * @DPSW_CNT_EGR_FRAME: Counts egress frames
-+ * @DPSW_CNT_EGR_BYTE: Counts eEgress bytes
-+ * @DPSW_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames
-+ * @DPSW_CNT_EGR_STP_FRAME_DISCARD: Counts egress STP discarded frames
-+ */
-+enum dpsw_counter {
-+ DPSW_CNT_ING_FRAME = 0x0,
-+ DPSW_CNT_ING_BYTE = 0x1,
-+ DPSW_CNT_ING_FLTR_FRAME = 0x2,
-+ DPSW_CNT_ING_FRAME_DISCARD = 0x3,
-+ DPSW_CNT_ING_MCAST_FRAME = 0x4,
-+ DPSW_CNT_ING_MCAST_BYTE = 0x5,
-+ DPSW_CNT_ING_BCAST_FRAME = 0x6,
-+ DPSW_CNT_ING_BCAST_BYTES = 0x7,
-+ DPSW_CNT_EGR_FRAME = 0x8,
-+ DPSW_CNT_EGR_BYTE = 0x9,
-+ DPSW_CNT_EGR_FRAME_DISCARD = 0xa,
-+ DPSW_CNT_EGR_STP_FRAME_DISCARD = 0xb
-+};
-+
-+int dpsw_if_get_counter(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ enum dpsw_counter type,
-+ u64 *counter);
-+
-+int dpsw_if_set_counter(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ enum dpsw_counter type,
-+ u64 counter);
-+
-+/**
-+ * Maximum number of TC
-+ */
-+#define DPSW_MAX_TC 8
-+
-+/**
-+ * enum dpsw_priority_selector - User priority
-+ * @DPSW_UP_PCP: Priority Code Point (PCP): a 3-bit field which
-+ * refers to the IEEE 802.1p priority.
-+ * @DPSW_UP_DSCP: Differentiated services Code Point (DSCP): 6 bit
-+ * field from IP header
-+ *
-+ */
-+enum dpsw_priority_selector {
-+ DPSW_UP_PCP = 0,
-+ DPSW_UP_DSCP = 1
-+};
-+
-+/**
-+ * enum dpsw_schedule_mode - Traffic classes scheduling
-+ * @DPSW_SCHED_STRICT_PRIORITY: schedule strict priority
-+ * @DPSW_SCHED_WEIGHTED: schedule based on token bucket created algorithm
-+ */
-+enum dpsw_schedule_mode {
-+ DPSW_SCHED_STRICT_PRIORITY,
-+ DPSW_SCHED_WEIGHTED
-+};
-+
-+/**
-+ * struct dpsw_tx_schedule_cfg - traffic class configuration
-+ * @mode: Strict or weight-based scheduling
-+ * @delta_bandwidth: weighted Bandwidth in range from 100 to 10000
-+ */
-+struct dpsw_tx_schedule_cfg {
-+ enum dpsw_schedule_mode mode;
-+ u16 delta_bandwidth;
-+};
-+
-+/**
-+ * struct dpsw_tx_selection_cfg - Mapping user priority into traffic
-+ * class configuration
-+ * @priority_selector: Source for user priority regeneration
-+ * @tc_id: The Regenerated User priority that the incoming
-+ * User Priority is mapped to for this interface
-+ * @tc_sched: Traffic classes configuration
-+ */
-+struct dpsw_tx_selection_cfg {
-+ enum dpsw_priority_selector priority_selector;
-+ u8 tc_id[DPSW_MAX_PRIORITIES];
-+ struct dpsw_tx_schedule_cfg tc_sched[DPSW_MAX_TC];
-+};
-+
-+int dpsw_if_set_tx_selection(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_tx_selection_cfg *cfg);
-+
-+/**
-+ * enum dpsw_reflection_filter - Filter type for frames to reflect
-+ * @DPSW_REFLECTION_FILTER_INGRESS_ALL: Reflect all frames
-+ * @DPSW_REFLECTION_FILTER_INGRESS_VLAN: Reflect only frames belong to
-+ * particular VLAN defined by vid parameter
-+ *
-+ */
-+enum dpsw_reflection_filter {
-+ DPSW_REFLECTION_FILTER_INGRESS_ALL = 0,
-+ DPSW_REFLECTION_FILTER_INGRESS_VLAN = 1
-+};
-+
-+/**
-+ * struct dpsw_reflection_cfg - Structure representing reflection information
-+ * @filter: Filter type for frames to reflect
-+ * @vlan_id: Vlan Id to reflect; valid only when filter type is
-+ * DPSW_INGRESS_VLAN
-+ */
-+struct dpsw_reflection_cfg {
-+ enum dpsw_reflection_filter filter;
-+ u16 vlan_id;
-+};
-+
-+int dpsw_if_add_reflection(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_reflection_cfg *cfg);
-+
-+int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_reflection_cfg *cfg);
-+
-+/**
-+ * enum dpsw_metering_mode - Metering modes
-+ * @DPSW_METERING_MODE_NONE: metering disabled
-+ * @DPSW_METERING_MODE_RFC2698: RFC 2698
-+ * @DPSW_METERING_MODE_RFC4115: RFC 4115
-+ */
-+enum dpsw_metering_mode {
-+ DPSW_METERING_MODE_NONE = 0,
-+ DPSW_METERING_MODE_RFC2698,
-+ DPSW_METERING_MODE_RFC4115
-+};
-+
-+/**
-+ * enum dpsw_metering_unit - Metering count
-+ * @DPSW_METERING_UNIT_BYTES: count bytes
-+ * @DPSW_METERING_UNIT_FRAMES: count frames
-+ */
-+enum dpsw_metering_unit {
-+ DPSW_METERING_UNIT_BYTES = 0,
-+ DPSW_METERING_UNIT_FRAMES
-+};
-+
-+/**
-+ * struct dpsw_metering_cfg - Metering configuration
-+ * @mode: metering modes
-+ * @units: Bytes or frame units
-+ * @cir: Committed information rate (CIR) in Kbits/s
-+ * @eir: Peak information rate (PIR) Kbit/s rfc2698
-+ * Excess information rate (EIR) Kbit/s rfc4115
-+ * @cbs: Committed burst size (CBS) in bytes
-+ * @ebs: Peak burst size (PBS) in bytes for rfc2698
-+ * Excess bust size (EBS) in bytes rfc4115
-+ *
-+ */
-+struct dpsw_metering_cfg {
-+ enum dpsw_metering_mode mode;
-+ enum dpsw_metering_unit units;
-+ u32 cir;
-+ u32 eir;
-+ u32 cbs;
-+ u32 ebs;
-+};
-+
-+int dpsw_if_set_flooding_metering(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ const struct dpsw_metering_cfg *cfg);
-+
-+int dpsw_if_set_metering(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ u8 tc_id,
-+ const struct dpsw_metering_cfg *cfg);
-+
-+/**
-+ * enum dpsw_early_drop_unit - DPSW early drop unit
-+ * @DPSW_EARLY_DROP_UNIT_BYTE: count bytes
-+ * @DPSW_EARLY_DROP_UNIT_FRAMES: count frames
-+ */
-+enum dpsw_early_drop_unit {
-+ DPSW_EARLY_DROP_UNIT_BYTE = 0,
-+ DPSW_EARLY_DROP_UNIT_FRAMES
-+};
-+
-+/**
-+ * enum dpsw_early_drop_mode - DPSW early drop mode
-+ * @DPSW_EARLY_DROP_MODE_NONE: early drop is disabled
-+ * @DPSW_EARLY_DROP_MODE_TAIL: early drop in taildrop mode
-+ * @DPSW_EARLY_DROP_MODE_WRED: early drop in WRED mode
-+ */
-+enum dpsw_early_drop_mode {
-+ DPSW_EARLY_DROP_MODE_NONE = 0,
-+ DPSW_EARLY_DROP_MODE_TAIL,
-+ DPSW_EARLY_DROP_MODE_WRED
-+};
-+
-+/**
-+ * struct dpsw_wred_cfg - WRED configuration
-+ * @max_threshold: maximum threshold that packets may be discarded. Above this
-+ * threshold all packets are discarded; must be less than 2^39;
-+ * approximated to be expressed as (x+256)*2^(y-1) due to HW
-+ * implementation.
-+ * @min_threshold: minimum threshold that packets may be discarded at
-+ * @drop_probability: probability that a packet will be discarded (1-100,
-+ * associated with the maximum threshold)
-+ */
-+struct dpsw_wred_cfg {
-+ u64 min_threshold;
-+ u64 max_threshold;
-+ u8 drop_probability;
-+};
++ const struct dpsw_stp_cfg *cfg);
+
+/**
-+ * struct dpsw_early_drop_cfg - early-drop configuration
-+ * @drop_mode: drop mode
-+ * @units: count units
-+ * @yellow: WRED - 'yellow' configuration
-+ * @green: WRED - 'green' configuration
-+ * @tail_drop_threshold: tail drop threshold
++ * enum dpsw_accepted_frames - Types of frames to accept
++ * @DPSW_ADMIT_ALL: The device accepts VLAN tagged, untagged and
++ * priority tagged frames
++ * @DPSW_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or
++ * Priority-Tagged frames received on this interface.
++ *
+ */
-+struct dpsw_early_drop_cfg {
-+ enum dpsw_early_drop_mode drop_mode;
-+ enum dpsw_early_drop_unit units;
-+ struct dpsw_wred_cfg yellow;
-+ struct dpsw_wred_cfg green;
-+ u32 tail_drop_threshold;
++enum dpsw_accepted_frames {
++ DPSW_ADMIT_ALL = 1,
++ DPSW_ADMIT_ONLY_VLAN_TAGGED = 3
+};
+
-+void dpsw_prepare_early_drop(const struct dpsw_early_drop_cfg *cfg,
-+ u8 *early_drop_buf);
-+
-+int dpsw_if_set_early_drop(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ u8 tc_id,
-+ u64 early_drop_iova);
-+
+/**
-+ * struct dpsw_custom_tpid_cfg - Structure representing tag Protocol identifier
-+ * @tpid: An additional tag protocol identifier
++ * enum dpsw_counter - Counters types
++ * @DPSW_CNT_ING_FRAME: Counts ingress frames
++ * @DPSW_CNT_ING_BYTE: Counts ingress bytes
++ * @DPSW_CNT_ING_FLTR_FRAME: Counts filtered ingress frames
++ * @DPSW_CNT_ING_FRAME_DISCARD: Counts discarded ingress frame
++ * @DPSW_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
++ * @DPSW_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
++ * @DPSW_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
++ * @DPSW_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
++ * @DPSW_CNT_EGR_FRAME: Counts egress frames
++ * @DPSW_CNT_EGR_BYTE: Counts eEgress bytes
++ * @DPSW_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames
++ * @DPSW_CNT_EGR_STP_FRAME_DISCARD: Counts egress STP discarded frames
+ */
-+struct dpsw_custom_tpid_cfg {
-+ u16 tpid;
++enum dpsw_counter {
++ DPSW_CNT_ING_FRAME = 0x0,
++ DPSW_CNT_ING_BYTE = 0x1,
++ DPSW_CNT_ING_FLTR_FRAME = 0x2,
++ DPSW_CNT_ING_FRAME_DISCARD = 0x3,
++ DPSW_CNT_ING_MCAST_FRAME = 0x4,
++ DPSW_CNT_ING_MCAST_BYTE = 0x5,
++ DPSW_CNT_ING_BCAST_FRAME = 0x6,
++ DPSW_CNT_ING_BCAST_BYTES = 0x7,
++ DPSW_CNT_EGR_FRAME = 0x8,
++ DPSW_CNT_EGR_BYTE = 0x9,
++ DPSW_CNT_EGR_FRAME_DISCARD = 0xa,
++ DPSW_CNT_EGR_STP_FRAME_DISCARD = 0xb
+};
+
-+int dpsw_add_custom_tpid(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpsw_custom_tpid_cfg *cfg);
-+
-+int dpsw_remove_custom_tpid(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpsw_custom_tpid_cfg *cfg);
++int dpsw_if_get_counter(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 if_id,
++ enum dpsw_counter type,
++ u64 *counter);
+
+int dpsw_if_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id);
+
-+/**
-+ * struct dpsw_if_attr - Structure representing DPSW interface attributes
-+ * @num_tcs: Number of traffic classes
-+ * @rate: Transmit rate in bits per second
-+ * @options: Interface configuration options (bitmap)
-+ * @enabled: Indicates if interface is enabled
-+ * @accept_all_vlan: The device discards/accepts incoming frames
-+ * for VLANs that do not include this interface
-+ * @admit_untagged: When set to 'DPSW_ADMIT_ONLY_VLAN_TAGGED', the device
-+ * discards untagged frames or priority-tagged frames received on
-+ * this interface;
-+ * When set to 'DPSW_ADMIT_ALL', untagged frames or priority-
-+ * tagged frames received on this interface are accepted
-+ * @qdid: control frames transmit qdid
-+ */
-+struct dpsw_if_attr {
-+ u8 num_tcs;
-+ u32 rate;
-+ u32 options;
-+ int enabled;
-+ int accept_all_vlan;
-+ enum dpsw_accepted_frames admit_untagged;
-+ u16 qdid;
-+};
-+
-+int dpsw_if_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ struct dpsw_if_attr *attr);
-+
+int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ u16 frame_length);
+
-+int dpsw_if_get_max_frame_length(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 if_id,
-+ u16 *frame_length);
-+
+/**
+ * struct dpsw_vlan_cfg - VLAN Configuration
+ * @fdb_id: Forwarding Data Base
+ u16 vlan_id,
+ const struct dpsw_vlan_if_cfg *cfg);
+
-+int dpsw_vlan_add_if_flooding(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg);
-+
+int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 vlan_id,
+ const struct dpsw_vlan_if_cfg *cfg);
+
-+int dpsw_vlan_remove_if_flooding(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ const struct dpsw_vlan_if_cfg *cfg);
-+
+int dpsw_vlan_remove(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 vlan_id);
+
+/**
-+ * struct dpsw_vlan_attr - VLAN attributes
-+ * @fdb_id: Associated FDB ID
-+ * @num_ifs: Number of interfaces
-+ * @num_untagged_ifs: Number of untagged interfaces
-+ * @num_flooding_ifs: Number of flooding interfaces
-+ */
-+struct dpsw_vlan_attr {
-+ u16 fdb_id;
-+ u16 num_ifs;
-+ u16 num_untagged_ifs;
-+ u16 num_flooding_ifs;
-+};
-+
-+int dpsw_vlan_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ struct dpsw_vlan_attr *attr);
-+
-+int dpsw_vlan_get_if(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ struct dpsw_vlan_if_cfg *cfg);
-+
-+int dpsw_vlan_get_if_flooding(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ struct dpsw_vlan_if_cfg *cfg);
-+
-+int dpsw_vlan_get_if_untagged(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 vlan_id,
-+ struct dpsw_vlan_if_cfg *cfg);
-+
-+/**
-+ * struct dpsw_fdb_cfg - FDB Configuration
-+ * @num_fdb_entries: Number of FDB entries
-+ * @fdb_aging_time: Aging time in seconds
-+ */
-+struct dpsw_fdb_cfg {
-+ u16 num_fdb_entries;
-+ u16 fdb_aging_time;
-+};
-+
-+int dpsw_fdb_add(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 *fdb_id,
-+ const struct dpsw_fdb_cfg *cfg);
-+
-+int dpsw_fdb_remove(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id);
-+
-+/**
+ * enum dpsw_fdb_entry_type - FDB Entry type - Static/Dynamic
+ * @DPSW_FDB_ENTRY_STATIC: Static entry
+ * @DPSW_FDB_ENTRY_DINAMIC: Dynamic entry
+ u16 fdb_id,
+ const struct dpsw_fdb_unicast_cfg *cfg);
+
-+int dpsw_fdb_get_unicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ struct dpsw_fdb_unicast_cfg *cfg);
-+
+int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id[DPSW_MAX_IF];
+};
+
-+int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ const struct dpsw_fdb_multicast_cfg *cfg);
-+
-+int dpsw_fdb_get_multicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ struct dpsw_fdb_multicast_cfg *cfg);
-+
-+int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ const struct dpsw_fdb_multicast_cfg *cfg);
-+
-+/**
-+ * enum dpsw_fdb_learning_mode - Auto-learning modes
-+ * @DPSW_FDB_LEARNING_MODE_DIS: Disable Auto-learning
-+ * @DPSW_FDB_LEARNING_MODE_HW: Enable HW auto-Learning
-+ * @DPSW_FDB_LEARNING_MODE_NON_SECURE: Enable None secure learning by CPU
-+ * @DPSW_FDB_LEARNING_MODE_SECURE: Enable secure learning by CPU
-+ *
-+ * NONE - SECURE LEARNING
-+ * SMAC found DMAC found CTLU Action
-+ * v v Forward frame to
-+ * 1. DMAC destination
-+ * - v Forward frame to
-+ * 1. DMAC destination
-+ * 2. Control interface
-+ * v - Forward frame to
-+ * 1. Flooding list of interfaces
-+ * - - Forward frame to
-+ * 1. Flooding list of interfaces
-+ * 2. Control interface
-+ * SECURE LEARING
-+ * SMAC found DMAC found CTLU Action
-+ * v v Forward frame to
-+ * 1. DMAC destination
-+ * - v Forward frame to
-+ * 1. Control interface
-+ * v - Forward frame to
-+ * 1. Flooding list of interfaces
-+ * - - Forward frame to
-+ * 1. Control interface
-+ */
-+enum dpsw_fdb_learning_mode {
-+ DPSW_FDB_LEARNING_MODE_DIS = 0,
-+ DPSW_FDB_LEARNING_MODE_HW = 1,
-+ DPSW_FDB_LEARNING_MODE_NON_SECURE = 2,
-+ DPSW_FDB_LEARNING_MODE_SECURE = 3
-+};
-+
-+int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ enum dpsw_fdb_learning_mode mode);
-+
-+/**
-+ * struct dpsw_fdb_attr - FDB Attributes
-+ * @max_fdb_entries: Number of FDB entries
-+ * @fdb_aging_time: Aging time in seconds
-+ * @learning_mode: Learning mode
-+ * @num_fdb_mc_groups: Current number of multicast groups
-+ * @max_fdb_mc_groups: Maximum number of multicast groups
-+ */
-+struct dpsw_fdb_attr {
-+ u16 max_fdb_entries;
-+ u16 fdb_aging_time;
-+ enum dpsw_fdb_learning_mode learning_mode;
-+ u16 num_fdb_mc_groups;
-+ u16 max_fdb_mc_groups;
-+};
-+
-+int dpsw_fdb_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 fdb_id,
-+ struct dpsw_fdb_attr *attr);
-+
-+/**
-+ * struct dpsw_acl_cfg - ACL Configuration
-+ * @max_entries: Number of FDB entries
-+ */
-+struct dpsw_acl_cfg {
-+ u16 max_entries;
-+};
-+
-+/**
-+ * struct dpsw_acl_fields - ACL fields.
-+ * @l2_dest_mac: Destination MAC address: BPDU, Multicast, Broadcast, Unicast,
-+ * slow protocols, MVRP, STP
-+ * @l2_source_mac: Source MAC address
-+ * @l2_tpid: Layer 2 (Ethernet) protocol type, used to identify the following
-+ * protocols: MPLS, PTP, PFC, ARP, Jumbo frames, LLDP, IEEE802.1ae,
-+ * Q-in-Q, IPv4, IPv6, PPPoE
-+ * @l2_pcp_dei: indicate which protocol is encapsulated in the payload
-+ * @l2_vlan_id: layer 2 VLAN ID
-+ * @l2_ether_type: layer 2 Ethernet type
-+ * @l3_dscp: Layer 3 differentiated services code point
-+ * @l3_protocol: Tells the Network layer at the destination host, to which
-+ * Protocol this packet belongs to. The following protocol are
-+ * supported: ICMP, IGMP, IPv4 (encapsulation), TCP, IPv6
-+ * (encapsulation), GRE, PTP
-+ * @l3_source_ip: Source IPv4 IP
-+ * @l3_dest_ip: Destination IPv4 IP
-+ * @l4_source_port: Source TCP/UDP Port
-+ * @l4_dest_port: Destination TCP/UDP Port
-+ */
-+struct dpsw_acl_fields {
-+ u8 l2_dest_mac[6];
-+ u8 l2_source_mac[6];
-+ u16 l2_tpid;
-+ u8 l2_pcp_dei;
-+ u16 l2_vlan_id;
-+ u16 l2_ether_type;
-+ u8 l3_dscp;
-+ u8 l3_protocol;
-+ u32 l3_source_ip;
-+ u32 l3_dest_ip;
-+ u16 l4_source_port;
-+ u16 l4_dest_port;
-+};
-+
-+/**
-+ * struct dpsw_acl_key - ACL key
-+ * @match: Match fields
-+ * @mask: Mask: b'1 - valid, b'0 don't care
-+ */
-+struct dpsw_acl_key {
-+ struct dpsw_acl_fields match;
-+ struct dpsw_acl_fields mask;
-+};
-+
-+/**
-+ * enum dpsw_acl_action
-+ * @DPSW_ACL_ACTION_DROP: Drop frame
-+ * @DPSW_ACL_ACTION_REDIRECT: Redirect to certain port
-+ * @DPSW_ACL_ACTION_ACCEPT: Accept frame
-+ * @DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF: Redirect to control interface
-+ */
-+enum dpsw_acl_action {
-+ DPSW_ACL_ACTION_DROP,
-+ DPSW_ACL_ACTION_REDIRECT,
-+ DPSW_ACL_ACTION_ACCEPT,
-+ DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF
-+};
-+
-+/**
-+ * struct dpsw_acl_result - ACL action
-+ * @action: Action should be taken when ACL entry hit
-+ * @if_id: Interface IDs to redirect frame. Valid only if redirect selected for
-+ * action
-+ */
-+struct dpsw_acl_result {
-+ enum dpsw_acl_action action;
-+ u16 if_id;
-+};
-+
-+/**
-+ * struct dpsw_acl_entry_cfg - ACL entry
-+ * @key_iova: I/O virtual address of DMA-able memory filled with key after call
-+ * to dpsw_acl_prepare_entry_cfg()
-+ * @result: Required action when entry hit occurs
-+ * @precedence: Precedence inside ACL 0 is lowest; This priority can not change
-+ * during the lifetime of a Policy. It is user responsibility to
-+ * space the priorities according to consequent rule additions.
-+ */
-+struct dpsw_acl_entry_cfg {
-+ u64 key_iova;
-+ struct dpsw_acl_result result;
-+ int precedence;
-+};
-+
-+int dpsw_acl_add(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 *acl_id,
-+ const struct dpsw_acl_cfg *cfg);
-+
-+int dpsw_acl_remove(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id);
-+
-+void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key,
-+ uint8_t *entry_cfg_buf);
-+
-+int dpsw_acl_add_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id,
-+ const struct dpsw_acl_entry_cfg *cfg);
-+
-+int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id,
-+ const struct dpsw_acl_entry_cfg *cfg);
-+
-+/**
-+ * struct dpsw_acl_if_cfg - List of interfaces to Associate with ACL
-+ * @num_ifs: Number of interfaces
-+ * @if_id: List of interfaces
-+ */
-+struct dpsw_acl_if_cfg {
-+ u16 num_ifs;
-+ u16 if_id[DPSW_MAX_IF];
-+};
-+
-+int dpsw_acl_add_if(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id,
-+ const struct dpsw_acl_if_cfg *cfg);
-+
-+int dpsw_acl_remove_if(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id,
-+ const struct dpsw_acl_if_cfg *cfg);
-+
-+/**
-+ * struct dpsw_acl_attr - ACL Attributes
-+ * @max_entries: Max number of ACL entries
-+ * @num_entries: Number of used ACL entries
-+ * @num_ifs: Number of interfaces associated with ACL
-+ */
-+struct dpsw_acl_attr {
-+ u16 max_entries;
-+ u16 num_entries;
-+ u16 num_ifs;
-+};
-+
-+int dpsw_acl_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ u16 acl_id,
-+ struct dpsw_acl_attr *attr);
-+/**
-+ * struct dpsw_ctrl_if_attr - Control interface attributes
-+ * @rx_fqid: Receive FQID
-+ * @rx_err_fqid: Receive error FQID
-+ * @tx_err_conf_fqid: Transmit error and confirmation FQID
-+ */
-+struct dpsw_ctrl_if_attr {
-+ u32 rx_fqid;
-+ u32 rx_err_fqid;
-+ u32 tx_err_conf_fqid;
-+};
-+
-+int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ struct dpsw_ctrl_if_attr *attr);
++int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 fdb_id,
++ const struct dpsw_fdb_multicast_cfg *cfg);
+
-+/**
-+ * Maximum number of DPBP
-+ */
-+#define DPSW_MAX_DPBP 8
++int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 fdb_id,
++ const struct dpsw_fdb_multicast_cfg *cfg);
+
+/**
-+ * struct dpsw_ctrl_if_pools_cfg - Control interface buffer pools configuration
-+ * @num_dpbp: Number of DPBPs
-+ * @pools: Array of buffer pools parameters; The number of valid entries
-+ * must match 'num_dpbp' value
++ * enum dpsw_fdb_learning_mode - Auto-learning modes
++ * @DPSW_FDB_LEARNING_MODE_DIS: Disable Auto-learning
++ * @DPSW_FDB_LEARNING_MODE_HW: Enable HW auto-Learning
++ * @DPSW_FDB_LEARNING_MODE_NON_SECURE: Enable None secure learning by CPU
++ * @DPSW_FDB_LEARNING_MODE_SECURE: Enable secure learning by CPU
++ *
++ * NONE - SECURE LEARNING
++ * SMAC found DMAC found CTLU Action
++ * v v Forward frame to
++ * 1. DMAC destination
++ * - v Forward frame to
++ * 1. DMAC destination
++ * 2. Control interface
++ * v - Forward frame to
++ * 1. Flooding list of interfaces
++ * - - Forward frame to
++ * 1. Flooding list of interfaces
++ * 2. Control interface
++ * SECURE LEARING
++ * SMAC found DMAC found CTLU Action
++ * v v Forward frame to
++ * 1. DMAC destination
++ * - v Forward frame to
++ * 1. Control interface
++ * v - Forward frame to
++ * 1. Flooding list of interfaces
++ * - - Forward frame to
++ * 1. Control interface
+ */
-+struct dpsw_ctrl_if_pools_cfg {
-+ u8 num_dpbp;
-+ /**
-+ * struct pools - Buffer pools parameters
-+ * @dpbp_id: DPBP object ID
-+ * @buffer_size: Buffer size
-+ * @backup_pool: Backup pool
-+ */
-+ struct {
-+ int dpbp_id;
-+ u16 buffer_size;
-+ int backup_pool;
-+ } pools[DPSW_MAX_DPBP];
++enum dpsw_fdb_learning_mode {
++ DPSW_FDB_LEARNING_MODE_DIS = 0,
++ DPSW_FDB_LEARNING_MODE_HW = 1,
++ DPSW_FDB_LEARNING_MODE_NON_SECURE = 2,
++ DPSW_FDB_LEARNING_MODE_SECURE = 3
+};
+
-+int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token,
-+ const struct dpsw_ctrl_if_pools_cfg *cfg);
-+
-+int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
++int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io,
++ u32 cmd_flags,
++ u16 token,
++ u16 fdb_id,
++ enum dpsw_fdb_learning_mode mode);
+
-+int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io,
-+ u32 cmd_flags,
-+ u16 token);
++/**
++ * struct dpsw_fdb_attr - FDB Attributes
++ * @max_fdb_entries: Number of FDB entries
++ * @fdb_aging_time: Aging time in seconds
++ * @learning_mode: Learning mode
++ * @num_fdb_mc_groups: Current number of multicast groups
++ * @max_fdb_mc_groups: Maximum number of multicast groups
++ */
++struct dpsw_fdb_attr {
++ u16 max_fdb_entries;
++ u16 fdb_aging_time;
++ enum dpsw_fdb_learning_mode learning_mode;
++ u16 num_fdb_mc_groups;
++ u16 max_fdb_mc_groups;
++};
+
+int dpsw_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+
+#endif /* __FSL_DPSW_H */
--- /dev/null
-+++ b/drivers/staging/fsl-dpaa2/ethsw/switch.c
-@@ -0,0 +1,1857 @@
-+/* Copyright 2014-2015 Freescale Semiconductor Inc.
++++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c
+@@ -0,0 +1,206 @@
++/* Copyright 2014-2016 Freescale Semiconductor Inc.
++ * Copyright 2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
-+ * * Neither the name of Freescale Semiconductor nor the
-+ * names of its contributors may be used to endorse or promote products
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
-+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
-+#include <linux/module.h>
-+#include <linux/msi.h>
-+
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/rtnetlink.h>
-+#include <linux/if_vlan.h>
-+
-+#include <uapi/linux/if_bridge.h>
-+#include <net/netlink.h>
-+
-+#include "../../fsl-mc/include/mc.h"
-+#include "dpsw.h"
-+#include "dpsw-cmd.h"
-+
-+static const char ethsw_drv_version[] = "0.1";
-+
-+/* Minimal supported DPSE version */
-+#define DPSW_MIN_VER_MAJOR 8
-+#define DPSW_MIN_VER_MINOR 0
-+
-+/* IRQ index */
-+#define DPSW_MAX_IRQ_NUM 2
-+
-+#define ETHSW_VLAN_MEMBER 1
-+#define ETHSW_VLAN_UNTAGGED 2
-+#define ETHSW_VLAN_PVID 4
-+#define ETHSW_VLAN_GLOBAL 8
-+
-+/* Maximum Frame Length supported by HW (currently 10k) */
-+#define DPAA2_MFL (10 * 1024)
-+#define ETHSW_MAX_FRAME_LENGTH (DPAA2_MFL - VLAN_ETH_HLEN - ETH_FCS_LEN)
-+#define ETHSW_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN + ETH_FCS_LEN)
-+
-+struct ethsw_port_priv {
-+ struct net_device *netdev;
-+ struct list_head list;
-+ u16 port_index;
-+ struct ethsw_dev_priv *ethsw_priv;
-+ u8 stp_state;
-+
-+ char vlans[VLAN_VID_MASK + 1];
-+
-+};
-+
-+struct ethsw_dev_priv {
-+ struct net_device *netdev;
-+ struct fsl_mc_io *mc_io;
-+ u16 dpsw_handle;
-+ struct dpsw_attr sw_attr;
-+ int dev_id;
-+ /*TODO: redundant, we can use the slave dev list */
-+ struct list_head port_list;
-+
-+ bool flood;
-+ bool learning;
-+
-+ char vlans[VLAN_VID_MASK + 1];
-+};
-+
-+static int ethsw_port_stop(struct net_device *netdev);
-+static int ethsw_port_open(struct net_device *netdev);
-+
-+static inline void __get_priv(struct net_device *netdev,
-+ struct ethsw_dev_priv **priv,
-+ struct ethsw_port_priv **port_priv)
-+{
-+ struct ethsw_dev_priv *_priv = NULL;
-+ struct ethsw_port_priv *_port_priv = NULL;
-+
-+ if (netdev->flags & IFF_MASTER) {
-+ _priv = netdev_priv(netdev);
-+ } else {
-+ _port_priv = netdev_priv(netdev);
-+ _priv = _port_priv->ethsw_priv;
-+ }
-+
-+ if (priv)
-+ *priv = _priv;
-+ if (port_priv)
-+ *port_priv = _port_priv;
-+}
-+
-+/* -------------------------------------------------------------------------- */
-+/* ethsw netdevice ops */
-+
-+static netdev_tx_t ethsw_dropframe(struct sk_buff *skb, struct net_device *dev)
-+{
-+ /* we don't support I/O for now, drop the frame */
-+ dev_kfree_skb_any(skb);
-+ return NETDEV_TX_OK;
-+}
-+
-+static int ethsw_open(struct net_device *netdev)
-+{
-+ struct ethsw_dev_priv *priv = netdev_priv(netdev);
-+ struct list_head *pos;
-+ struct ethsw_port_priv *port_priv = NULL;
-+ int err;
-+
-+ err = dpsw_enable(priv->mc_io, 0, priv->dpsw_handle);
-+ if (err) {
-+ netdev_err(netdev, "dpsw_enable err %d\n", err);
-+ return err;
-+ }
-+
-+ list_for_each(pos, &priv->port_list) {
-+ port_priv = list_entry(pos, struct ethsw_port_priv, list);
-+ err = dev_open(port_priv->netdev);
-+ if (err)
-+ netdev_err(port_priv->netdev, "dev_open err %d\n", err);
-+ }
-+
-+ return 0;
-+}
-+
-+static int ethsw_stop(struct net_device *netdev)
-+{
-+ struct ethsw_dev_priv *priv = netdev_priv(netdev);
-+ struct list_head *pos;
-+ struct ethsw_port_priv *port_priv = NULL;
-+ int err;
-+
-+ err = dpsw_disable(priv->mc_io, 0, priv->dpsw_handle);
-+ if (err) {
-+ netdev_err(netdev, "dpsw_disable err %d\n", err);
-+ return err;
-+ }
-+
-+ list_for_each(pos, &priv->port_list) {
-+ port_priv = list_entry(pos, struct ethsw_port_priv, list);
-+ err = dev_close(port_priv->netdev);
-+ if (err)
-+ netdev_err(port_priv->netdev,
-+ "dev_close err %d\n", err);
-+ }
-+
-+ return 0;
-+}
-+
-+static int ethsw_add_vlan(struct net_device *netdev, u16 vid)
-+{
-+ struct ethsw_dev_priv *priv = netdev_priv(netdev);
-+ int err;
-+
-+ struct dpsw_vlan_cfg vcfg = {
-+ /* TODO: add support for VLAN private FDBs */
-+ .fdb_id = 0,
-+ };
-+ if (priv->vlans[vid]) {
-+ netdev_err(netdev, "VLAN already configured\n");
-+ return -EEXIST;
-+ }
-+
-+ err = dpsw_vlan_add(priv->mc_io, 0, priv->dpsw_handle, vid, &vcfg);
-+ if (err) {
-+ netdev_err(netdev, "dpsw_vlan_add err %d\n", err);
-+ return err;
-+ }
-+ priv->vlans[vid] = ETHSW_VLAN_MEMBER;
-+
-+ return 0;
-+}
-+
-+static int ethsw_port_add_vlan(struct net_device *netdev, u16 vid, u16 flags)
-+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
-+ int err;
-+
-+ struct dpsw_vlan_if_cfg vcfg = {
-+ .num_ifs = 1,
-+ .if_id[0] = port_priv->port_index,
-+ };
-+
-+ if (port_priv->vlans[vid]) {
-+ netdev_err(netdev, "VLAN already configured\n");
-+ return -EEXIST;
-+ }
-+
-+ if (flags & BRIDGE_VLAN_INFO_PVID && netif_oper_up(netdev)) {
-+ netdev_err(netdev, "interface must be down to change PVID!\n");
-+ return -EBUSY;
-+ }
-+
-+ err = dpsw_vlan_add_if(priv->mc_io, 0, priv->dpsw_handle, vid, &vcfg);
-+ if (err) {
-+ netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err);
-+ return err;
-+ }
-+ port_priv->vlans[vid] = ETHSW_VLAN_MEMBER;
-+
-+ if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
-+ err = dpsw_vlan_add_if_untagged(priv->mc_io, 0,
-+ priv->dpsw_handle, vid, &vcfg);
-+ if (err) {
-+ netdev_err(netdev, "dpsw_vlan_add_if_untagged err %d\n",
-+ err);
-+ return err;
-+ }
-+ port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED;
-+ }
-+
-+ if (flags & BRIDGE_VLAN_INFO_PVID) {
-+ struct dpsw_tci_cfg tci_cfg = {
-+ /* TODO: at least add better defaults if these cannot
-+ * be configured
-+ */
-+ .pcp = 0,
-+ .dei = 0,
-+ .vlan_id = vid,
-+ };
-+
-+ err = dpsw_if_set_tci(priv->mc_io, 0, priv->dpsw_handle,
-+ port_priv->port_index, &tci_cfg);
-+ if (err) {
-+ netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
-+ return err;
-+ }
-+ port_priv->vlans[vid] |= ETHSW_VLAN_PVID;
-+ }
++#include "ethsw.h"
+
-+ return 0;
-+}
++static struct {
++ enum dpsw_counter id;
++ char name[ETH_GSTRING_LEN];
++} ethsw_ethtool_counters[] = {
++ {DPSW_CNT_ING_FRAME, "rx frames"},
++ {DPSW_CNT_ING_BYTE, "rx bytes"},
++ {DPSW_CNT_ING_FLTR_FRAME, "rx filtered frames"},
++ {DPSW_CNT_ING_FRAME_DISCARD, "rx discarded frames"},
++ {DPSW_CNT_ING_BCAST_FRAME, "rx b-cast frames"},
++ {DPSW_CNT_ING_BCAST_BYTES, "rx b-cast bytes"},
++ {DPSW_CNT_ING_MCAST_FRAME, "rx m-cast frames"},
++ {DPSW_CNT_ING_MCAST_BYTE, "rx m-cast bytes"},
++ {DPSW_CNT_EGR_FRAME, "tx frames"},
++ {DPSW_CNT_EGR_BYTE, "tx bytes"},
++ {DPSW_CNT_EGR_FRAME_DISCARD, "tx discarded frames"},
+
-+static const struct nla_policy ifla_br_policy[IFLA_MAX + 1] = {
-+ [IFLA_BRIDGE_FLAGS] = { .type = NLA_U16 },
-+ [IFLA_BRIDGE_MODE] = { .type = NLA_U16 },
-+ [IFLA_BRIDGE_VLAN_INFO] = { .type = NLA_BINARY,
-+ .len = sizeof(struct bridge_vlan_info), },
+};
+
-+static int ethsw_setlink_af_spec(struct net_device *netdev,
-+ struct nlattr **tb)
-+{
-+ struct bridge_vlan_info *vinfo;
-+ struct ethsw_dev_priv *priv = NULL;
-+ struct ethsw_port_priv *port_priv = NULL;
-+ int err = 0;
++#define ETHSW_NUM_COUNTERS ARRAY_SIZE(ethsw_ethtool_counters)
+
-+ if (!tb[IFLA_BRIDGE_VLAN_INFO]) {
-+ netdev_err(netdev, "no VLAN INFO in nlmsg\n");
-+ return -EOPNOTSUPP;
-+ }
++static void ethsw_get_drvinfo(struct net_device *netdev,
++ struct ethtool_drvinfo *drvinfo)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ u16 version_major, version_minor;
++ int err;
+
-+ vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
++ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+
-+ if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
-+ return -EINVAL;
++ err = dpsw_get_api_version(port_priv->ethsw_data->mc_io, 0,
++ &version_major,
++ &version_minor);
++ if (err)
++ strlcpy(drvinfo->fw_version, "N/A",
++ sizeof(drvinfo->fw_version));
++ else
++ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
++ "%u.%u", version_major, version_minor);
+
-+ __get_priv(netdev, &priv, &port_priv);
++ strlcpy(drvinfo->bus_info, dev_name(netdev->dev.parent->parent),
++ sizeof(drvinfo->bus_info));
++}
+
-+ if (!port_priv || !priv->vlans[vinfo->vid]) {
-+ /* command targets switch device or this is a new VLAN */
-+ err = ethsw_add_vlan(priv->netdev, vinfo->vid);
-+ if (err)
-+ return err;
++static int
++ethsw_get_link_ksettings(struct net_device *netdev,
++ struct ethtool_link_ksettings *link_ksettings)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ struct dpsw_link_state state = {0};
++ int err = 0;
+
-+ /* command targets switch device; mark it*/
-+ if (!port_priv)
-+ priv->vlans[vinfo->vid] |= ETHSW_VLAN_GLOBAL;
++ err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx,
++ &state);
++ if (err) {
++ netdev_err(netdev, "ERROR %d getting link state", err);
++ goto out;
+ }
+
-+ if (port_priv) {
-+ /* command targets switch port */
-+ err = ethsw_port_add_vlan(netdev, vinfo->vid, vinfo->flags);
-+ if (err)
-+ return err;
-+ }
++ /* At the moment, we have no way of interrogating the DPMAC
++ * from the DPSW side or there may not exist a DPMAC at all.
++ * Report only autoneg state, duplexity and speed.
++ */
++ if (state.options & DPSW_LINK_OPT_AUTONEG)
++ link_ksettings->base.autoneg = AUTONEG_ENABLE;
++ if (!(state.options & DPSW_LINK_OPT_HALF_DUPLEX))
++ link_ksettings->base.duplex = DUPLEX_FULL;
++ link_ksettings->base.speed = state.rate;
+
-+ return 0;
++out:
++ return err;
+}
+
-+static const struct nla_policy ifla_brport_policy[IFLA_BRPORT_MAX + 1] = {
-+ [IFLA_BRPORT_STATE] = { .type = NLA_U8 },
-+ [IFLA_BRPORT_COST] = { .type = NLA_U32 },
-+ [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 },
-+ [IFLA_BRPORT_MODE] = { .type = NLA_U8 },
-+ [IFLA_BRPORT_GUARD] = { .type = NLA_U8 },
-+ [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 },
-+ [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
-+ [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
-+};
-+
-+static int ethsw_set_learning(struct net_device *netdev, u8 flag)
++static int
++ethsw_set_link_ksettings(struct net_device *netdev,
++ const struct ethtool_link_ksettings *link_ksettings)
+{
-+ struct ethsw_dev_priv *priv = netdev_priv(netdev);
-+ enum dpsw_fdb_learning_mode learn_mode;
-+ int err;
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ struct dpsw_link_cfg cfg = {0};
++ int err = 0;
+
-+ if (flag)
-+ learn_mode = DPSW_FDB_LEARNING_MODE_HW;
-+ else
-+ learn_mode = DPSW_FDB_LEARNING_MODE_DIS;
++ netdev_dbg(netdev, "Setting link parameters...");
+
-+ err = dpsw_fdb_set_learning_mode(priv->mc_io, 0, priv->dpsw_handle,
-+ 0, learn_mode);
-+ if (err) {
-+ netdev_err(netdev, "dpsw_fdb_set_learning_mode err %d\n", err);
-+ return err;
++ /* Due to a temporary MC limitation, the DPSW port must be down
++ * in order to be able to change link settings. Taking steps to let
++ * the user know that.
++ */
++ if (netif_running(netdev)) {
++ netdev_info(netdev, "Sorry, interface must be brought down first.\n");
++ return -EACCES;
+ }
-+ priv->learning = !!flag;
+
-+ return 0;
++ cfg.rate = link_ksettings->base.speed;
++ if (link_ksettings->base.autoneg == AUTONEG_ENABLE)
++ cfg.options |= DPSW_LINK_OPT_AUTONEG;
++ else
++ cfg.options &= ~DPSW_LINK_OPT_AUTONEG;
++ if (link_ksettings->base.duplex == DUPLEX_HALF)
++ cfg.options |= DPSW_LINK_OPT_HALF_DUPLEX;
++ else
++ cfg.options &= ~DPSW_LINK_OPT_HALF_DUPLEX;
++
++ err = dpsw_if_set_link_cfg(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx,
++ &cfg);
++ if (err)
++ /* ethtool will be loud enough if we return an error; no point
++ * in putting our own error message on the console by default
++ */
++ netdev_dbg(netdev, "ERROR %d setting link cfg", err);
++
++ return err;
+}
+
-+static int ethsw_port_set_flood(struct net_device *netdev, u8 flag)
++static int ethsw_ethtool_get_sset_count(struct net_device *dev, int sset)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
-+ int err;
-+
-+ err = dpsw_if_set_flooding(priv->mc_io, 0, priv->dpsw_handle,
-+ port_priv->port_index, (int)flag);
-+ if (err) {
-+ netdev_err(netdev, "dpsw_fdb_set_learning_mode err %d\n", err);
-+ return err;
++ switch (sset) {
++ case ETH_SS_STATS:
++ return ETHSW_NUM_COUNTERS;
++ default:
++ return -EOPNOTSUPP;
+ }
-+ priv->flood = !!flag;
-+
-+ return 0;
+}
+
-+static int ethsw_port_set_state(struct net_device *netdev, u8 state)
++static void ethsw_ethtool_get_strings(struct net_device *netdev,
++ u32 stringset, u8 *data)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
-+ u8 old_state = port_priv->stp_state;
-+ int err;
-+
-+ struct dpsw_stp_cfg stp_cfg = {
-+ .vlan_id = 1,
-+ .state = state,
-+ };
-+ /* TODO: check port state, interface may be down */
++ int i;
+
-+ if (state > BR_STATE_BLOCKING)
-+ return -EINVAL;
++ switch (stringset) {
++ case ETH_SS_STATS:
++ for (i = 0; i < ETHSW_NUM_COUNTERS; i++)
++ memcpy(data + i * ETH_GSTRING_LEN,
++ ethsw_ethtool_counters[i].name, ETH_GSTRING_LEN);
++ break;
++ }
++}
+
-+ if (state == port_priv->stp_state)
-+ return 0;
++static void ethsw_ethtool_get_stats(struct net_device *netdev,
++ struct ethtool_stats *stats,
++ u64 *data)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ int i, err;
+
-+ if (state == BR_STATE_DISABLED) {
-+ port_priv->stp_state = state;
++ memset(data, 0,
++ sizeof(u64) * ETHSW_NUM_COUNTERS);
+
-+ err = ethsw_port_stop(netdev);
++ for (i = 0; i < ETHSW_NUM_COUNTERS; i++) {
++ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx,
++ ethsw_ethtool_counters[i].id,
++ &data[i]);
+ if (err)
-+ goto error;
-+ } else {
-+ err = dpsw_if_set_stp(priv->mc_io, 0, priv->dpsw_handle,
-+ port_priv->port_index, &stp_cfg);
-+ if (err) {
-+ netdev_err(netdev, "dpsw_if_set_stp err %d\n", err);
-+ return err;
-+ }
-+
-+ port_priv->stp_state = state;
-+
-+ if (old_state == BR_STATE_DISABLED) {
-+ err = ethsw_port_open(netdev);
-+ if (err)
-+ goto error;
-+ }
++ netdev_err(netdev, "dpsw_if_get_counter[%s] err %d\n",
++ ethsw_ethtool_counters[i].name, err);
+ }
-+
-+ return 0;
-+error:
-+ port_priv->stp_state = old_state;
-+ return err;
+}
+
-+static int ethsw_setlink_protinfo(struct net_device *netdev,
-+ struct nlattr **tb)
-+{
-+ struct ethsw_dev_priv *priv;
-+ struct ethsw_port_priv *port_priv = NULL;
-+ int err = 0;
++const struct ethtool_ops ethsw_port_ethtool_ops = {
++ .get_drvinfo = ethsw_get_drvinfo,
++ .get_link = ethtool_op_get_link,
++ .get_link_ksettings = ethsw_get_link_ksettings,
++ .set_link_ksettings = ethsw_set_link_ksettings,
++ .get_strings = ethsw_ethtool_get_strings,
++ .get_ethtool_stats = ethsw_ethtool_get_stats,
++ .get_sset_count = ethsw_ethtool_get_sset_count,
++};
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
+@@ -0,0 +1,1438 @@
++/* Copyright 2014-2016 Freescale Semiconductor Inc.
++ * Copyright 2017 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
+
-+ __get_priv(netdev, &priv, &port_priv);
++#include <linux/module.h>
+
-+ if (tb[IFLA_BRPORT_LEARNING]) {
-+ u8 flag = nla_get_u8(tb[IFLA_BRPORT_LEARNING]);
++#include <linux/interrupt.h>
++#include <linux/msi.h>
++#include <linux/kthread.h>
++#include <linux/workqueue.h>
+
-+ if (port_priv)
-+ netdev_warn(netdev,
-+ "learning set on whole switch dev\n");
++#include <linux/fsl/mc.h>
+
-+ err = ethsw_set_learning(priv->netdev, flag);
-+ if (err)
-+ return err;
++#include "ethsw.h"
+
-+ } else if (tb[IFLA_BRPORT_UNICAST_FLOOD] && port_priv) {
-+ u8 flag = nla_get_u8(tb[IFLA_BRPORT_UNICAST_FLOOD]);
++static struct workqueue_struct *ethsw_owq;
+
-+ err = ethsw_port_set_flood(port_priv->netdev, flag);
-+ if (err)
-+ return err;
++/* Minimal supported DPSW version */
++#define DPSW_MIN_VER_MAJOR 8
++#define DPSW_MIN_VER_MINOR 0
+
-+ } else if (tb[IFLA_BRPORT_STATE] && port_priv) {
-+ u8 state = nla_get_u8(tb[IFLA_BRPORT_STATE]);
++#define DEFAULT_VLAN_ID 1
+
-+ err = ethsw_port_set_state(port_priv->netdev, state);
-+ if (err)
-+ return err;
++static int ethsw_add_vlan(struct ethsw_core *ethsw, u16 vid)
++{
++ int err;
+
-+ } else {
-+ return -EOPNOTSUPP;
++ struct dpsw_vlan_cfg vcfg = {
++ .fdb_id = 0,
++ };
++
++ if (ethsw->vlans[vid]) {
++ dev_err(ethsw->dev, "VLAN already configured\n");
++ return -EEXIST;
++ }
++
++ err = dpsw_vlan_add(ethsw->mc_io, 0,
++ ethsw->dpsw_handle, vid, &vcfg);
++ if (err) {
++ dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err);
++ return err;
+ }
++ ethsw->vlans[vid] = ETHSW_VLAN_MEMBER;
+
+ return 0;
+}
+
-+static int ethsw_setlink(struct net_device *netdev,
-+ struct nlmsghdr *nlh,
-+ u16 flags)
++static int ethsw_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid)
+{
-+ struct nlattr *attr;
-+ struct nlattr *tb[(IFLA_BRIDGE_MAX > IFLA_BRPORT_MAX) ?
-+ IFLA_BRIDGE_MAX : IFLA_BRPORT_MAX + 1];
-+ int err = 0;
++ struct ethsw_core *ethsw = port_priv->ethsw_data;
++ struct net_device *netdev = port_priv->netdev;
++ struct dpsw_tci_cfg tci_cfg = { 0 };
++ bool is_oper;
++ int err, ret;
+
-+ attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
-+ if (attr) {
-+ err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, attr,
-+ ifla_br_policy);
-+ if (err) {
-+ netdev_err(netdev,
-+ "nla_parse_nested for br_policy err %d\n",
-+ err);
-+ return err;
-+ }
-+
-+ err = ethsw_setlink_af_spec(netdev, tb);
++ err = dpsw_if_get_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ port_priv->idx, &tci_cfg);
++ if (err) {
++ netdev_err(netdev, "dpsw_if_get_tci err %d\n", err);
+ return err;
+ }
+
-+ attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO);
-+ if (attr) {
-+ err = nla_parse_nested(tb, IFLA_BRPORT_MAX, attr,
-+ ifla_brport_policy);
++ tci_cfg.vlan_id = pvid;
++
++ /* Interface needs to be down to change PVID */
++ is_oper = netif_oper_up(netdev);
++ if (is_oper) {
++ err = dpsw_if_disable(ethsw->mc_io, 0,
++ ethsw->dpsw_handle,
++ port_priv->idx);
+ if (err) {
-+ netdev_err(netdev,
-+ "nla_parse_nested for brport_policy err %d\n",
-+ err);
++ netdev_err(netdev, "dpsw_if_disable err %d\n", err);
+ return err;
+ }
-+
-+ err = ethsw_setlink_protinfo(netdev, tb);
-+ return err;
-+ }
-+
-+ netdev_err(netdev, "nlmsg_find_attr found no AF_SPEC/PROTINFO\n");
-+ return -EOPNOTSUPP;
-+}
-+
-+static int __nla_put_netdev(struct sk_buff *skb, struct net_device *netdev,
-+ struct ethsw_dev_priv *priv)
-+{
-+ u8 operstate = netif_running(netdev) ? netdev->operstate : IF_OPER_DOWN;
-+ int iflink;
-+ int err;
-+
-+ err = nla_put_string(skb, IFLA_IFNAME, netdev->name);
-+ if (err)
-+ goto nla_put_err;
-+ err = nla_put_u32(skb, IFLA_MASTER, priv->netdev->ifindex);
-+ if (err)
-+ goto nla_put_err;
-+ err = nla_put_u32(skb, IFLA_MTU, netdev->mtu);
-+ if (err)
-+ goto nla_put_err;
-+ err = nla_put_u8(skb, IFLA_OPERSTATE, operstate);
-+ if (err)
-+ goto nla_put_err;
-+ if (netdev->addr_len) {
-+ err = nla_put(skb, IFLA_ADDRESS, netdev->addr_len,
-+ netdev->dev_addr);
-+ if (err)
-+ goto nla_put_err;
+ }
+
-+ iflink = dev_get_iflink(netdev);
-+ if (netdev->ifindex != iflink) {
-+ err = nla_put_u32(skb, IFLA_LINK, iflink);
-+ if (err)
-+ goto nla_put_err;
++ err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ port_priv->idx, &tci_cfg);
++ if (err) {
++ netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
++ goto set_tci_error;
++ }
++
++ /* Delete previous PVID info and mark the new one */
++ port_priv->vlans[port_priv->pvid] &= ~ETHSW_VLAN_PVID;
++ port_priv->vlans[pvid] |= ETHSW_VLAN_PVID;
++ port_priv->pvid = pvid;
++
++set_tci_error:
++ if (is_oper) {
++ ret = dpsw_if_enable(ethsw->mc_io, 0,
++ ethsw->dpsw_handle,
++ port_priv->idx);
++ if (ret) {
++ netdev_err(netdev, "dpsw_if_enable err %d\n", ret);
++ return ret;
++ }
+ }
+
-+ return 0;
-+
-+nla_put_err:
-+ netdev_err(netdev, "nla_put_ err %d\n", err);
+ return err;
+}
+
-+static int __nla_put_port(struct sk_buff *skb, struct net_device *netdev,
-+ struct ethsw_port_priv *port_priv)
++static int ethsw_port_add_vlan(struct ethsw_port_priv *port_priv,
++ u16 vid, u16 flags)
+{
-+ struct nlattr *nest;
++ struct ethsw_core *ethsw = port_priv->ethsw_data;
++ struct net_device *netdev = port_priv->netdev;
++ struct dpsw_vlan_if_cfg vcfg;
+ int err;
+
-+ u8 stp_state = port_priv->stp_state;
++ if (port_priv->vlans[vid]) {
++ netdev_warn(netdev, "VLAN %d already configured\n", vid);
++ return -EEXIST;
++ }
+
-+ if (port_priv->stp_state == DPSW_STP_STATE_BLOCKING)
-+ stp_state = BR_STATE_BLOCKING;
++ vcfg.num_ifs = 1;
++ vcfg.if_id[0] = port_priv->idx;
++ err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid, &vcfg);
++ if (err) {
++ netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err);
++ return err;
++ }
+
-+ nest = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
-+ if (!nest) {
-+ netdev_err(netdev, "nla_nest_start failed\n");
-+ return -ENOMEM;
++ port_priv->vlans[vid] = ETHSW_VLAN_MEMBER;
++
++ if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
++ err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0,
++ ethsw->dpsw_handle,
++ vid, &vcfg);
++ if (err) {
++ netdev_err(netdev,
++ "dpsw_vlan_add_if_untagged err %d\n", err);
++ return err;
++ }
++ port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED;
+ }
+
-+ err = nla_put_u8(skb, IFLA_BRPORT_STATE, stp_state);
-+ if (err)
-+ goto nla_put_err;
-+ err = nla_put_u16(skb, IFLA_BRPORT_PRIORITY, 0);
-+ if (err)
-+ goto nla_put_err;
-+ err = nla_put_u32(skb, IFLA_BRPORT_COST, 0);
-+ if (err)
-+ goto nla_put_err;
-+ err = nla_put_u8(skb, IFLA_BRPORT_MODE, 0);
-+ if (err)
-+ goto nla_put_err;
-+ err = nla_put_u8(skb, IFLA_BRPORT_GUARD, 0);
-+ if (err)
-+ goto nla_put_err;
-+ err = nla_put_u8(skb, IFLA_BRPORT_PROTECT, 0);
-+ if (err)
-+ goto nla_put_err;
-+ err = nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, 0);
-+ if (err)
-+ goto nla_put_err;
-+ err = nla_put_u8(skb, IFLA_BRPORT_LEARNING,
-+ port_priv->ethsw_priv->learning);
-+ if (err)
-+ goto nla_put_err;
-+ err = nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD,
-+ port_priv->ethsw_priv->flood);
-+ if (err)
-+ goto nla_put_err;
-+ nla_nest_end(skb, nest);
++ if (flags & BRIDGE_VLAN_INFO_PVID) {
++ err = ethsw_port_set_pvid(port_priv, vid);
++ if (err)
++ return err;
++ }
+
+ return 0;
-+
-+nla_put_err:
-+ netdev_err(netdev, "nla_put_ err %d\n", err);
-+ nla_nest_cancel(skb, nest);
-+ return err;
+}
+
-+static int __nla_put_vlan(struct sk_buff *skb, struct net_device *netdev,
-+ struct ethsw_dev_priv *priv,
-+ struct ethsw_port_priv *port_priv)
++static int ethsw_set_learning(struct ethsw_core *ethsw, u8 flag)
+{
-+ struct nlattr *nest;
-+ struct bridge_vlan_info vinfo;
-+ const char *vlans;
-+ u16 i;
++ enum dpsw_fdb_learning_mode learn_mode;
+ int err;
+
-+ nest = nla_nest_start(skb, IFLA_AF_SPEC);
-+ if (!nest) {
-+ netdev_err(netdev, "nla_nest_start failed");
-+ return -ENOMEM;
-+ }
-+
-+ if (port_priv)
-+ vlans = port_priv->vlans;
++ if (flag)
++ learn_mode = DPSW_FDB_LEARNING_MODE_HW;
+ else
-+ vlans = priv->vlans;
-+
-+ for (i = 0; i < VLAN_VID_MASK + 1; i++) {
-+ vinfo.flags = 0;
-+ vinfo.vid = i;
-+
-+ if (vlans[i] & ETHSW_VLAN_UNTAGGED)
-+ vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
-+
-+ if (vlans[i] & ETHSW_VLAN_PVID)
-+ vinfo.flags |= BRIDGE_VLAN_INFO_PVID;
++ learn_mode = DPSW_FDB_LEARNING_MODE_DIS;
+
-+ if (vlans[i] & ETHSW_VLAN_MEMBER) {
-+ err = nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
-+ sizeof(vinfo), &vinfo);
-+ if (err)
-+ goto nla_put_err;
-+ }
++ err = dpsw_fdb_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 0,
++ learn_mode);
++ if (err) {
++ dev_err(ethsw->dev, "dpsw_fdb_set_learning_mode err %d\n", err);
++ return err;
+ }
-+
-+ nla_nest_end(skb, nest);
++ ethsw->learning = !!flag;
+
+ return 0;
-+nla_put_err:
-+ netdev_err(netdev, "nla_put_ err %d\n", err);
-+ nla_nest_cancel(skb, nest);
-+ return err;
+}
+
-+static int ethsw_getlink(struct sk_buff *skb, u32 pid, u32 seq,
-+ struct net_device *netdev, u32 filter_mask,
-+ int nlflags)
++static int ethsw_port_set_flood(struct ethsw_port_priv *port_priv, u8 flag)
+{
-+ struct ethsw_dev_priv *priv;
-+ struct ethsw_port_priv *port_priv = NULL;
-+ struct ifinfomsg *hdr;
-+ struct nlmsghdr *nlh;
+ int err;
+
-+ __get_priv(netdev, &priv, &port_priv);
-+
-+ nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*hdr), NLM_F_MULTI);
-+ if (!nlh)
-+ return -EMSGSIZE;
++ err = dpsw_if_set_flooding(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx, flag);
++ if (err) {
++ netdev_err(port_priv->netdev,
++ "dpsw_fdb_set_learning_mode err %d\n", err);
++ return err;
++ }
++ port_priv->flood = !!flag;
+
-+ hdr = nlmsg_data(nlh);
-+ memset(hdr, 0, sizeof(*hdr));
-+ hdr->ifi_family = AF_BRIDGE;
-+ hdr->ifi_type = netdev->type;
-+ hdr->ifi_index = netdev->ifindex;
-+ hdr->ifi_flags = dev_get_flags(netdev);
++ return 0;
++}
+
-+ err = __nla_put_netdev(skb, netdev, priv);
-+ if (err)
-+ goto nla_put_err;
++static int ethsw_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state)
++{
++ struct dpsw_stp_cfg stp_cfg = {
++ .vlan_id = DEFAULT_VLAN_ID,
++ .state = state,
++ };
++ int err;
+
-+ if (port_priv) {
-+ err = __nla_put_port(skb, netdev, port_priv);
-+ if (err)
-+ goto nla_put_err;
-+ }
++ if (!netif_oper_up(port_priv->netdev) || state == port_priv->stp_state)
++ return 0; /* Nothing to do */
+
-+ /* Check if the VID information is requested */
-+ if (filter_mask & RTEXT_FILTER_BRVLAN) {
-+ err = __nla_put_vlan(skb, netdev, priv, port_priv);
-+ if (err)
-+ goto nla_put_err;
++ err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx, &stp_cfg);
++ if (err) {
++ netdev_err(port_priv->netdev,
++ "dpsw_if_set_stp err %d\n", err);
++ return err;
+ }
+
-+ nlmsg_end(skb, nlh);
-+ return skb->len;
++ port_priv->stp_state = state;
+
-+nla_put_err:
-+ nlmsg_cancel(skb, nlh);
-+ return -EMSGSIZE;
++ return 0;
+}
+
-+static int ethsw_dellink_switch(struct ethsw_dev_priv *priv, u16 vid)
++static int ethsw_dellink_switch(struct ethsw_core *ethsw, u16 vid)
+{
-+ struct list_head *pos;
-+ struct ethsw_port_priv *ppriv_local = NULL;
-+ int err = 0;
++ struct ethsw_port_priv *ppriv_local = NULL;
++ int i, err;
+
-+ if (!priv->vlans[vid])
++ if (!ethsw->vlans[vid])
+ return -ENOENT;
+
-+ err = dpsw_vlan_remove(priv->mc_io, 0, priv->dpsw_handle, vid);
++ err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid);
+ if (err) {
-+ netdev_err(priv->netdev, "dpsw_vlan_remove err %d\n", err);
++ dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err);
+ return err;
+ }
-+ priv->vlans[vid] = 0;
++ ethsw->vlans[vid] = 0;
+
-+ list_for_each(pos, &priv->port_list) {
-+ ppriv_local = list_entry(pos, struct ethsw_port_priv,
-+ list);
++ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
++ ppriv_local = ethsw->ports[i];
+ ppriv_local->vlans[vid] = 0;
+ }
+
+ return 0;
+}
+
-+static int ethsw_dellink_port(struct ethsw_dev_priv *priv,
-+ struct ethsw_port_priv *port_priv,
-+ u16 vid)
++static int ethsw_port_fdb_add_uc(struct ethsw_port_priv *port_priv,
++ const unsigned char *addr)
+{
-+ struct list_head *pos;
-+ struct ethsw_port_priv *ppriv_local = NULL;
-+ struct dpsw_vlan_if_cfg vcfg = {
-+ .num_ifs = 1,
-+ .if_id[0] = port_priv->port_index,
-+ };
-+ unsigned int count = 0;
-+ int err = 0;
++ struct dpsw_fdb_unicast_cfg entry = {0};
++ int err;
+
-+ if (!port_priv->vlans[vid])
-+ return -ENOENT;
++ entry.if_egress = port_priv->idx;
++ entry.type = DPSW_FDB_ENTRY_STATIC;
++ ether_addr_copy(entry.mac_addr, addr);
+
-+ /* VLAN will be deleted from switch if global flag is not set
-+ * and is configured on only one port
-+ */
-+ if (!(priv->vlans[vid] & ETHSW_VLAN_GLOBAL)) {
-+ list_for_each(pos, &priv->port_list) {
-+ ppriv_local = list_entry(pos, struct ethsw_port_priv,
-+ list);
-+ if (ppriv_local->vlans[vid] & ETHSW_VLAN_MEMBER)
-+ count++;
-+ }
++ err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ 0, &entry);
++ if (err)
++ netdev_err(port_priv->netdev,
++ "dpsw_fdb_add_unicast err %d\n", err);
++ return err;
++}
+
-+ if (count == 1)
-+ return ethsw_dellink_switch(priv, vid);
-+ }
++static int ethsw_port_fdb_del_uc(struct ethsw_port_priv *port_priv,
++ const unsigned char *addr)
++{
++ struct dpsw_fdb_unicast_cfg entry = {0};
++ int err;
+
-+ err = dpsw_vlan_remove_if(priv->mc_io, 0, priv->dpsw_handle,
-+ vid, &vcfg);
-+ if (err) {
-+ netdev_err(priv->netdev, "dpsw_vlan_remove_if err %d\n", err);
-+ return err;
-+ }
-+ port_priv->vlans[vid] = 0;
-+ return 0;
++ entry.if_egress = port_priv->idx;
++ entry.type = DPSW_FDB_ENTRY_STATIC;
++ ether_addr_copy(entry.mac_addr, addr);
++
++ err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ 0, &entry);
++ /* Silently discard calling multiple times the del command */
++ if (err && err != -ENXIO)
++ netdev_err(port_priv->netdev,
++ "dpsw_fdb_remove_unicast err %d\n", err);
++ return err;
+}
+
-+static int ethsw_dellink(struct net_device *netdev,
-+ struct nlmsghdr *nlh,
-+ u16 flags)
++static int ethsw_port_fdb_add_mc(struct ethsw_port_priv *port_priv,
++ const unsigned char *addr)
+{
-+ struct nlattr *tb[IFLA_BRIDGE_MAX + 1];
-+ struct nlattr *spec;
-+ struct bridge_vlan_info *vinfo;
-+ struct ethsw_dev_priv *priv;
-+ struct ethsw_port_priv *port_priv = NULL;
-+ int err = 0;
++ struct dpsw_fdb_multicast_cfg entry = {0};
++ int err;
+
-+ spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
-+ if (!spec)
-+ return 0;
++ ether_addr_copy(entry.mac_addr, addr);
++ entry.type = DPSW_FDB_ENTRY_STATIC;
++ entry.num_ifs = 1;
++ entry.if_id[0] = port_priv->idx;
+
-+ err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, spec, ifla_br_policy);
++ err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ 0, &entry);
++ /* Silently discard calling multiple times the add command */
++ if (err && err != -ENXIO)
++ netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err %d\n",
++ err);
++ return err;
++}
++
++static int ethsw_port_fdb_del_mc(struct ethsw_port_priv *port_priv,
++ const unsigned char *addr)
++{
++ struct dpsw_fdb_multicast_cfg entry = {0};
++ int err;
++
++ ether_addr_copy(entry.mac_addr, addr);
++ entry.type = DPSW_FDB_ENTRY_STATIC;
++ entry.num_ifs = 1;
++ entry.if_id[0] = port_priv->idx;
++
++ err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ 0, &entry);
++ /* Silently discard calling multiple times the del command */
++ if (err && err != -ENAVAIL)
++ netdev_err(port_priv->netdev,
++ "dpsw_fdb_remove_multicast err %d\n", err);
++ return err;
++}
++
++static void port_get_stats(struct net_device *netdev,
++ struct rtnl_link_stats64 *stats)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ u64 tmp;
++ int err;
++
++ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx,
++ DPSW_CNT_ING_FRAME, &stats->rx_packets);
+ if (err)
-+ return err;
++ goto error;
+
-+ if (!tb[IFLA_BRIDGE_VLAN_INFO])
-+ return -EOPNOTSUPP;
++ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx,
++ DPSW_CNT_EGR_FRAME, &stats->tx_packets);
++ if (err)
++ goto error;
+
-+ vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
++ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx,
++ DPSW_CNT_ING_BYTE, &stats->rx_bytes);
++ if (err)
++ goto error;
+
-+ if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
-+ return -EINVAL;
++ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx,
++ DPSW_CNT_EGR_BYTE, &stats->tx_bytes);
++ if (err)
++ goto error;
+
-+ __get_priv(netdev, &priv, &port_priv);
++ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx,
++ DPSW_CNT_ING_FRAME_DISCARD,
++ &stats->rx_dropped);
++ if (err)
++ goto error;
+
-+ /* decide if command targets switch device or port */
-+ if (!port_priv)
-+ err = ethsw_dellink_switch(priv, vinfo->vid);
-+ else
-+ err = ethsw_dellink_port(priv, port_priv, vinfo->vid);
++ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx,
++ DPSW_CNT_ING_FLTR_FRAME,
++ &tmp);
++ if (err)
++ goto error;
++ stats->rx_dropped += tmp;
+
-+ return err;
++ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx,
++ DPSW_CNT_EGR_FRAME_DISCARD,
++ &stats->tx_dropped);
++ if (err)
++ goto error;
++
++ return;
++
++error:
++ netdev_err(netdev, "dpsw_if_get_counter err %d\n", err);
+}
+
-+static const struct net_device_ops ethsw_ops = {
-+ .ndo_open = ðsw_open,
-+ .ndo_stop = ðsw_stop,
++static bool port_has_offload_stats(const struct net_device *netdev,
++ int attr_id)
++{
++ return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT);
++}
+
-+ .ndo_bridge_setlink = ðsw_setlink,
-+ .ndo_bridge_getlink = ðsw_getlink,
-+ .ndo_bridge_dellink = ðsw_dellink,
++static int port_get_offload_stats(int attr_id,
++ const struct net_device *netdev,
++ void *sp)
++{
++ switch (attr_id) {
++ case IFLA_OFFLOAD_XSTATS_CPU_HIT:
++ port_get_stats((struct net_device *)netdev, sp);
++ return 0;
++ }
+
-+ .ndo_start_xmit = ðsw_dropframe,
-+};
++ return -EINVAL;
++}
++
++static int port_change_mtu(struct net_device *netdev, int mtu)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ int err;
++
++ err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io,
++ 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx,
++ (u16)ETHSW_L2_MAX_FRM(mtu));
++ if (err) {
++ netdev_err(netdev,
++ "dpsw_if_set_max_frame_length() err %d\n", err);
++ return err;
++ }
+
-+/*--------------------------------------------------------------------------- */
-+/* switch port netdevice ops */
++ netdev->mtu = mtu;
++ return 0;
++}
+
-+static int _ethsw_port_carrier_state_sync(struct net_device *netdev)
++static int port_carrier_state_sync(struct net_device *netdev)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct dpsw_link_state state;
-+ int err;
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ struct dpsw_link_state state;
++ int err;
+
-+ err = dpsw_if_get_link_state(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index, &state);
-+ if (unlikely(err)) {
++ err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx, &state);
++ if (err) {
+ netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
+ return err;
+ }
+
+ WARN_ONCE(state.up > 1, "Garbage read into link_state");
+
-+ if (state.up)
-+ netif_carrier_on(port_priv->netdev);
-+ else
-+ netif_carrier_off(port_priv->netdev);
-+
++ if (state.up != port_priv->link_state) {
++ if (state.up)
++ netif_carrier_on(netdev);
++ else
++ netif_carrier_off(netdev);
++ port_priv->link_state = state.up;
++ }
+ return 0;
+}
+
-+static int ethsw_port_open(struct net_device *netdev)
-+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ int err;
++static int port_open(struct net_device *netdev)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ int err;
++
++ /* No need to allow Tx as control interface is disabled */
++ netif_tx_stop_all_queues(netdev);
+
-+ err = dpsw_if_enable(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index);
++ err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_enable err %d\n", err);
+ return err;
+ }
+
+ /* sync carrier state */
-+ err = _ethsw_port_carrier_state_sync(netdev);
++ err = port_carrier_state_sync(netdev);
+ if (err) {
-+ netdev_err(netdev, "_ethsw_port_carrier_state_sync err %d\n",
-+ err);
++ netdev_err(netdev,
++ "port_carrier_state_sync err %d\n", err);
+ goto err_carrier_sync;
+ }
+
+ return 0;
+
+err_carrier_sync:
-+ dpsw_if_disable(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index);
++ dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx);
+ return err;
+}
+
-+static int ethsw_port_stop(struct net_device *netdev)
++static int port_stop(struct net_device *netdev)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ int err;
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ int err;
+
-+ err = dpsw_if_disable(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index);
++ err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
++ port_priv->ethsw_data->dpsw_handle,
++ port_priv->idx);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_disable err %d\n", err);
+ return err;
+ return 0;
+}
+
-+static int ethsw_port_fdb_add_uc(struct net_device *netdev,
-+ const unsigned char *addr)
++static netdev_tx_t port_dropframe(struct sk_buff *skb,
++ struct net_device *netdev)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct dpsw_fdb_unicast_cfg entry = {0};
-+ int err;
-+
-+ entry.if_egress = port_priv->port_index;
-+ entry.type = DPSW_FDB_ENTRY_STATIC;
-+ ether_addr_copy(entry.mac_addr, addr);
++ /* we don't support I/O for now, drop the frame */
++ dev_kfree_skb_any(skb);
+
-+ err = dpsw_fdb_add_unicast(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ 0, &entry);
-+ if (err)
-+ netdev_err(netdev, "dpsw_fdb_add_unicast err %d\n", err);
-+ return err;
++ return NETDEV_TX_OK;
+}
+
-+static int ethsw_port_fdb_del_uc(struct net_device *netdev,
-+ const unsigned char *addr)
-+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct dpsw_fdb_unicast_cfg entry = {0};
-+ int err;
++static const struct net_device_ops ethsw_port_ops = {
++ .ndo_open = port_open,
++ .ndo_stop = port_stop,
+
-+ entry.if_egress = port_priv->port_index;
-+ entry.type = DPSW_FDB_ENTRY_STATIC;
-+ ether_addr_copy(entry.mac_addr, addr);
++ .ndo_set_mac_address = eth_mac_addr,
++ .ndo_change_mtu = port_change_mtu,
++ .ndo_has_offload_stats = port_has_offload_stats,
++ .ndo_get_offload_stats = port_get_offload_stats,
+
-+ err = dpsw_fdb_remove_unicast(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ 0, &entry);
-+ if (err)
-+ netdev_err(netdev, "dpsw_fdb_remove_unicast err %d\n", err);
-+ return err;
-+}
++ .ndo_start_xmit = port_dropframe,
++};
+
-+static int ethsw_port_fdb_add_mc(struct net_device *netdev,
-+ const unsigned char *addr)
++static void ethsw_links_state_update(struct ethsw_core *ethsw)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct dpsw_fdb_multicast_cfg entry = {0};
-+ int err;
++ int i;
+
-+ ether_addr_copy(entry.mac_addr, addr);
-+ entry.type = DPSW_FDB_ENTRY_STATIC;
-+ entry.num_ifs = 1;
-+ entry.if_id[0] = port_priv->port_index;
++ for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
++ port_carrier_state_sync(ethsw->ports[i]->netdev);
++}
+
-+ err = dpsw_fdb_add_multicast(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ 0, &entry);
-+ if (err)
-+ netdev_err(netdev, "dpsw_fdb_add_multicast err %d\n", err);
-+ return err;
++static irqreturn_t ethsw_irq0_handler(int irq_num, void *arg)
++{
++ return IRQ_WAKE_THREAD;
+}
+
-+static int ethsw_port_fdb_del_mc(struct net_device *netdev,
-+ const unsigned char *addr)
++static irqreturn_t ethsw_irq0_handler_thread(int irq_num, void *arg)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct dpsw_fdb_multicast_cfg entry = {0};
++ struct device *dev = (struct device *)arg;
++ struct ethsw_core *ethsw = dev_get_drvdata(dev);
++
++ /* Mask the events and the if_id reserved bits to be cleared on read */
++ u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
+ int err;
+
-+ ether_addr_copy(entry.mac_addr, addr);
-+ entry.type = DPSW_FDB_ENTRY_STATIC;
-+ entry.num_ifs = 1;
-+ entry.if_id[0] = port_priv->port_index;
++ err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ DPSW_IRQ_INDEX_IF, &status);
++ if (err) {
++ dev_err(dev, "Can't get irq status (err %d)", err);
+
-+ err = dpsw_fdb_remove_multicast(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ 0, &entry);
-+ if (err)
-+ netdev_err(netdev, "dpsw_fdb_remove_multicast err %d\n", err);
-+ return err;
-+}
++ err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ DPSW_IRQ_INDEX_IF, 0xFFFFFFFF);
++ if (err)
++ dev_err(dev, "Can't clear irq status (err %d)", err);
++ goto out;
++ }
+
-+static int _lookup_address(struct net_device *netdev, int is_uc,
-+ const unsigned char *addr)
-+{
-+ struct netdev_hw_addr *ha;
-+ struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc;
++ if (status & DPSW_IRQ_EVENT_LINK_CHANGED)
++ ethsw_links_state_update(ethsw);
+
-+ netif_addr_lock_bh(netdev);
-+ list_for_each_entry(ha, &list->list, list) {
-+ if (ether_addr_equal(ha->addr, addr)) {
-+ netif_addr_unlock_bh(netdev);
-+ return 1;
-+ }
-+ }
-+ netif_addr_unlock_bh(netdev);
-+ return 0;
++out:
++ return IRQ_HANDLED;
+}
+
-+static int ethsw_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
-+ struct net_device *netdev,
-+ const unsigned char *addr, u16 vid,
-+ u16 flags)
++static int ethsw_setup_irqs(struct fsl_mc_device *sw_dev)
+{
-+ struct list_head *pos;
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct ethsw_dev_priv *priv = port_priv->ethsw_priv;
++ struct device *dev = &sw_dev->dev;
++ struct ethsw_core *ethsw = dev_get_drvdata(dev);
++ u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED;
++ struct fsl_mc_device_irq *irq;
+ int err;
+
-+ /* TODO: add replace support when added to iproute bridge */
-+ if (!(flags & NLM_F_REQUEST)) {
-+ netdev_err(netdev,
-+ "ethsw_port_fdb_add unexpected flags value %08x\n",
-+ flags);
-+ return -EINVAL;
++ err = fsl_mc_allocate_irqs(sw_dev);
++ if (err) {
++ dev_err(dev, "MC irqs allocation failed\n");
++ return err;
+ }
+
-+ if (is_unicast_ether_addr(addr)) {
-+ /* if entry cannot be replaced, return error if exists */
-+ if (flags & NLM_F_EXCL || flags & NLM_F_APPEND) {
-+ list_for_each(pos, &priv->port_list) {
-+ port_priv = list_entry(pos,
-+ struct ethsw_port_priv,
-+ list);
-+ if (_lookup_address(port_priv->netdev,
-+ 1, addr))
-+ return -EEXIST;
-+ }
-+ }
-+
-+ err = ethsw_port_fdb_add_uc(netdev, addr);
-+ if (err) {
-+ netdev_err(netdev, "ethsw_port_fdb_add_uc err %d\n",
-+ err);
-+ return err;
-+ }
-+
-+ /* we might have replaced an existing entry for a different
-+ * switch port, make sure the address doesn't linger in any
-+ * port address list
-+ */
-+ list_for_each(pos, &priv->port_list) {
-+ port_priv = list_entry(pos, struct ethsw_port_priv,
-+ list);
-+ dev_uc_del(port_priv->netdev, addr);
-+ }
++ if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) {
++ err = -EINVAL;
++ goto free_irq;
++ }
+
-+ err = dev_uc_add(netdev, addr);
-+ if (err) {
-+ netdev_err(netdev, "dev_uc_add err %d\n", err);
-+ return err;
-+ }
-+ } else {
-+ struct dpsw_fdb_multicast_cfg entry = {
-+ .type = DPSW_FDB_ENTRY_STATIC,
-+ .num_ifs = 0,
-+ };
++ err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ DPSW_IRQ_INDEX_IF, 0);
++ if (err) {
++ dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
++ goto free_irq;
++ }
+
-+ /* check if address is already set on this port */
-+ if (_lookup_address(netdev, 0, addr))
-+ return -EEXIST;
++ irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
+
-+ /* check if the address exists on other port */
-+ ether_addr_copy(entry.mac_addr, addr);
-+ err = dpsw_fdb_get_multicast(priv->mc_io, 0, priv->dpsw_handle,
-+ 0, &entry);
-+ if (!err) {
-+ /* entry exists, can we replace it? */
-+ if (flags & NLM_F_EXCL)
-+ return -EEXIST;
-+ } else if (err != -ENAVAIL) {
-+ netdev_err(netdev, "dpsw_fdb_get_unicast err %d\n",
-+ err);
-+ return err;
-+ }
++ err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
++ ethsw_irq0_handler,
++ ethsw_irq0_handler_thread,
++ IRQF_NO_SUSPEND | IRQF_ONESHOT,
++ dev_name(dev), dev);
++ if (err) {
++ dev_err(dev, "devm_request_threaded_irq(): %d", err);
++ goto free_irq;
++ }
+
-+ err = ethsw_port_fdb_add_mc(netdev, addr);
-+ if (err) {
-+ netdev_err(netdev, "ethsw_port_fdb_add_mc err %d\n",
-+ err);
-+ return err;
-+ }
++ err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ DPSW_IRQ_INDEX_IF, mask);
++ if (err) {
++ dev_err(dev, "dpsw_set_irq_mask(): %d", err);
++ goto free_devm_irq;
++ }
+
-+ err = dev_mc_add(netdev, addr);
-+ if (err) {
-+ netdev_err(netdev, "dev_mc_add err %d\n", err);
-+ return err;
-+ }
++ err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ DPSW_IRQ_INDEX_IF, 1);
++ if (err) {
++ dev_err(dev, "dpsw_set_irq_enable(): %d", err);
++ goto free_devm_irq;
+ }
+
+ return 0;
++
++free_devm_irq:
++ devm_free_irq(dev, irq->msi_desc->irq, dev);
++free_irq:
++ fsl_mc_free_irqs(sw_dev);
++ return err;
+}
+
-+static int ethsw_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
-+ struct net_device *netdev,
-+ const unsigned char *addr, u16 vid)
++static void ethsw_teardown_irqs(struct fsl_mc_device *sw_dev)
+{
++ struct device *dev = &sw_dev->dev;
++ struct ethsw_core *ethsw = dev_get_drvdata(dev);
++ struct fsl_mc_device_irq *irq;
+ int err;
+
-+ if (is_unicast_ether_addr(addr)) {
-+ err = ethsw_port_fdb_del_uc(netdev, addr);
-+ if (err) {
-+ netdev_err(netdev, "ethsw_port_fdb_del_uc err %d\n",
-+ err);
-+ return err;
-+ }
++ irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
++ err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ DPSW_IRQ_INDEX_IF, 0);
++ if (err)
++ dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
+
-+ /* also delete if configured on port */
-+ err = dev_uc_del(netdev, addr);
-+ if (err && err != -ENOENT) {
-+ netdev_err(netdev, "dev_uc_del err %d\n", err);
-+ return err;
-+ }
-+ } else {
-+ if (!_lookup_address(netdev, 0, addr))
-+ return -ENOENT;
++ fsl_mc_free_irqs(sw_dev);
++}
+
-+ err = dev_mc_del(netdev, addr);
-+ if (err) {
-+ netdev_err(netdev, "dev_mc_del err %d\n", err);
-+ return err;
-+ }
++static int swdev_port_attr_get(struct net_device *netdev,
++ struct switchdev_attr *attr)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+
-+ err = ethsw_port_fdb_del_mc(netdev, addr);
-+ if (err) {
-+ netdev_err(netdev, "ethsw_port_fdb_del_mc err %d\n",
-+ err);
-+ return err;
-+ }
++ switch (attr->id) {
++ case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
++ attr->u.ppid.id_len = 1;
++ attr->u.ppid.id[0] = port_priv->ethsw_data->dev_id;
++ break;
++ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
++ attr->u.brport_flags =
++ (port_priv->ethsw_data->learning ? BR_LEARNING : 0) |
++ (port_priv->flood ? BR_FLOOD : 0);
++ break;
++ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
++ attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD;
++ break;
++ default:
++ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
-+struct rtnl_link_stats64 *ethsw_port_get_stats(struct net_device *netdev,
-+ struct rtnl_link_stats64 *storage)
++static int port_attr_stp_state_set(struct net_device *netdev,
++ struct switchdev_trans *trans,
++ u8 state)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ u64 tmp;
-+ int err;
-+
-+ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ DPSW_CNT_ING_FRAME, &storage->rx_packets);
-+ if (err)
-+ goto error;
-+
-+ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ DPSW_CNT_EGR_FRAME, &storage->tx_packets);
-+ if (err)
-+ goto error;
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+
-+ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ DPSW_CNT_ING_BYTE, &storage->rx_bytes);
-+ if (err)
-+ goto error;
++ if (switchdev_trans_ph_prepare(trans))
++ return 0;
+
-+ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ DPSW_CNT_EGR_BYTE, &storage->tx_bytes);
-+ if (err)
-+ goto error;
++ return ethsw_port_set_stp_state(port_priv, state);
++}
+
-+ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ DPSW_CNT_ING_FRAME_DISCARD,
-+ &storage->rx_dropped);
-+ if (err)
-+ goto error;
++static int port_attr_br_flags_set(struct net_device *netdev,
++ struct switchdev_trans *trans,
++ unsigned long flags)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ int err = 0;
+
-+ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ DPSW_CNT_ING_FLTR_FRAME,
-+ &tmp);
-+ if (err)
-+ goto error;
-+ storage->rx_dropped += tmp;
++ if (switchdev_trans_ph_prepare(trans))
++ return 0;
+
-+ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ DPSW_CNT_EGR_FRAME_DISCARD,
-+ &storage->tx_dropped);
++ /* Learning is enabled per switch */
++ err = ethsw_set_learning(port_priv->ethsw_data, !!(flags & BR_LEARNING));
+ if (err)
-+ goto error;
++ goto exit;
+
-+ return storage;
++ err = ethsw_port_set_flood(port_priv, !!(flags & BR_FLOOD));
+
-+error:
-+ netdev_err(netdev, "dpsw_if_get_counter err %d\n", err);
++exit:
++ return err;
+}
+
-+static int ethsw_port_change_mtu(struct net_device *netdev, int mtu)
++static int swdev_port_attr_set(struct net_device *netdev,
++ const struct switchdev_attr *attr,
++ struct switchdev_trans *trans)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ int err;
-+
-+ if (mtu < ETH_ZLEN || mtu > ETHSW_MAX_FRAME_LENGTH) {
-+ netdev_err(netdev, "Invalid MTU %d. Valid range is: %d..%d\n",
-+ mtu, ETH_ZLEN, ETHSW_MAX_FRAME_LENGTH);
-+ return -EINVAL;
-+ }
++ int err = 0;
+
-+ err = dpsw_if_set_max_frame_length(port_priv->ethsw_priv->mc_io,
-+ 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ (u16)ETHSW_L2_MAX_FRM(mtu));
-+ if (err) {
-+ netdev_err(netdev,
-+ "dpsw_if_set_max_frame_length() err %d\n", err);
-+ return err;
++ switch (attr->id) {
++ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
++ err = port_attr_stp_state_set(netdev, trans,
++ attr->u.stp_state);
++ break;
++ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
++ err = port_attr_br_flags_set(netdev, trans,
++ attr->u.brport_flags);
++ break;
++ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
++ /* VLANs are supported by default */
++ break;
++ default:
++ err = -EOPNOTSUPP;
++ break;
+ }
-+
-+ netdev->mtu = mtu;
-+ return 0;
-+}
-+
-+static const struct net_device_ops ethsw_port_ops = {
-+ .ndo_open = ðsw_port_open,
-+ .ndo_stop = ðsw_port_stop,
-+
-+ .ndo_fdb_add = ðsw_port_fdb_add,
-+ .ndo_fdb_del = ðsw_port_fdb_del,
-+ .ndo_fdb_dump = &ndo_dflt_fdb_dump,
-+
-+ .ndo_get_stats64 = ðsw_port_get_stats,
-+ .ndo_change_mtu = ðsw_port_change_mtu,
-+
-+ .ndo_start_xmit = ðsw_dropframe,
-+};
-+
-+static void ethsw_get_drvinfo(struct net_device *netdev,
-+ struct ethtool_drvinfo *drvinfo)
-+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ u16 version_major, version_minor;
-+ int err;
-+
-+ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
-+ strlcpy(drvinfo->version, ethsw_drv_version, sizeof(drvinfo->version));
-+
-+ err = dpsw_get_api_version(port_priv->ethsw_priv->mc_io, 0,
-+ &version_major,
-+ &version_minor);
-+ if (err)
-+ strlcpy(drvinfo->fw_version, "N/A",
-+ sizeof(drvinfo->fw_version));
-+ else
-+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
-+ "%u.%u", version_major, version_minor);
-+
-+ strlcpy(drvinfo->bus_info, dev_name(netdev->dev.parent->parent),
-+ sizeof(drvinfo->bus_info));
++
++ return err;
+}
+
-+static int ethsw_get_settings(struct net_device *netdev,
-+ struct ethtool_cmd *cmd)
++static int port_vlans_add(struct net_device *netdev,
++ const struct switchdev_obj_port_vlan *vlan,
++ struct switchdev_trans *trans)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct dpsw_link_state state = {0};
-+ int err = 0;
++ int vid, err;
+
-+ err = dpsw_if_get_link_state(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ &state);
-+ if (err) {
-+ netdev_err(netdev, "ERROR %d getting link state", err);
-+ goto out;
++ if (switchdev_trans_ph_prepare(trans))
++ return 0;
++
++ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
++ if (!port_priv->ethsw_data->vlans[vid]) {
++ /* this is a new VLAN */
++ err = ethsw_add_vlan(port_priv->ethsw_data, vid);
++ if (err)
++ return err;
++
++ port_priv->ethsw_data->vlans[vid] |= ETHSW_VLAN_GLOBAL;
++ }
++ err = ethsw_port_add_vlan(port_priv, vid, vlan->flags);
++ if (err)
++ break;
+ }
+
-+ /* At the moment, we have no way of interrogating the DPMAC
-+ * from the DPSW side or there may not exist a DPMAC at all.
-+ * Report only autoneg state, duplexity and speed.
-+ */
-+ if (state.options & DPSW_LINK_OPT_AUTONEG)
-+ cmd->autoneg = AUTONEG_ENABLE;
-+ if (!(state.options & DPSW_LINK_OPT_HALF_DUPLEX))
-+ cmd->autoneg = DUPLEX_FULL;
-+ ethtool_cmd_speed_set(cmd, state.rate);
++ return err;
++}
++
++static int swdev_port_obj_add(struct net_device *netdev,
++ const struct switchdev_obj *obj,
++ struct switchdev_trans *trans)
++{
++ int err;
++
++ switch (obj->id) {
++ case SWITCHDEV_OBJ_ID_PORT_VLAN:
++ err = port_vlans_add(netdev,
++ SWITCHDEV_OBJ_PORT_VLAN(obj),
++ trans);
++ break;
++ default:
++ err = -EOPNOTSUPP;
++ break;
++ }
+
-+out:
+ return err;
+}
+
-+static int ethsw_set_settings(struct net_device *netdev,
-+ struct ethtool_cmd *cmd)
++static int ethsw_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ struct dpsw_link_state state = {0};
-+ struct dpsw_link_cfg cfg = {0};
-+ int err = 0;
++ struct ethsw_core *ethsw = port_priv->ethsw_data;
++ struct net_device *netdev = port_priv->netdev;
++ struct dpsw_vlan_if_cfg vcfg;
++ int i, err;
+
-+ netdev_dbg(netdev, "Setting link parameters...");
++ if (!port_priv->vlans[vid])
++ return -ENOENT;
+
-+ err = dpsw_if_get_link_state(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ &state);
-+ if (err) {
-+ netdev_err(netdev, "ERROR %d getting link state", err);
-+ goto out;
++ if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) {
++ err = ethsw_port_set_pvid(port_priv, 0);
++ if (err)
++ return err;
+ }
+
-+ /* Due to a temporary MC limitation, the DPSW port must be down
-+ * in order to be able to change link settings. Taking steps to let
-+ * the user know that.
-+ */
-+ if (netif_running(netdev)) {
-+ netdev_info(netdev,
-+ "Sorry, interface must be brought down first.\n");
-+ return -EACCES;
++ vcfg.num_ifs = 1;
++ vcfg.if_id[0] = port_priv->idx;
++ if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) {
++ err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0,
++ ethsw->dpsw_handle,
++ vid, &vcfg);
++ if (err) {
++ netdev_err(netdev,
++ "dpsw_vlan_remove_if_untagged err %d\n",
++ err);
++ }
++ port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED;
+ }
+
-+ cfg.options = state.options;
-+ cfg.rate = ethtool_cmd_speed(cmd);
-+ if (cmd->autoneg == AUTONEG_ENABLE)
-+ cfg.options |= DPSW_LINK_OPT_AUTONEG;
-+ else
-+ cfg.options &= ~DPSW_LINK_OPT_AUTONEG;
-+ if (cmd->duplex == DUPLEX_HALF)
-+ cfg.options |= DPSW_LINK_OPT_HALF_DUPLEX;
-+ else
-+ cfg.options &= ~DPSW_LINK_OPT_HALF_DUPLEX;
++ if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
++ err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ vid, &vcfg);
++ if (err) {
++ netdev_err(netdev,
++ "dpsw_vlan_remove_if err %d\n", err);
++ return err;
++ }
++ port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER;
+
-+ err = dpsw_if_set_link_cfg(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ &cfg);
-+ if (err)
-+ /* ethtool will be loud enough if we return an error; no point
-+ * in putting our own error message on the console by default
++ /* Delete VLAN from switch if it is no longer configured on
++ * any port
+ */
-+ netdev_dbg(netdev, "ERROR %d setting link cfg", err);
++ for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
++ if (ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER)
++ return 0; /* Found a port member in VID */
+
-+out:
-+ return err;
-+}
++ ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL;
+
-+static struct {
-+ enum dpsw_counter id;
-+ char name[ETH_GSTRING_LEN];
-+} ethsw_ethtool_counters[] = {
-+ {DPSW_CNT_ING_FRAME, "rx frames"},
-+ {DPSW_CNT_ING_BYTE, "rx bytes"},
-+ {DPSW_CNT_ING_FLTR_FRAME, "rx filtered frames"},
-+ {DPSW_CNT_ING_FRAME_DISCARD, "rx discarded frames"},
-+ {DPSW_CNT_ING_BCAST_FRAME, "rx b-cast frames"},
-+ {DPSW_CNT_ING_BCAST_BYTES, "rx b-cast bytes"},
-+ {DPSW_CNT_ING_MCAST_FRAME, "rx m-cast frames"},
-+ {DPSW_CNT_ING_MCAST_BYTE, "rx m-cast bytes"},
-+ {DPSW_CNT_EGR_FRAME, "tx frames"},
-+ {DPSW_CNT_EGR_BYTE, "tx bytes"},
-+ {DPSW_CNT_EGR_FRAME_DISCARD, "tx discarded frames"},
++ err = ethsw_dellink_switch(ethsw, vid);
++ if (err)
++ return err;
++ }
+
-+};
++ return 0;
++}
+
-+static int ethsw_ethtool_get_sset_count(struct net_device *dev, int sset)
++static int port_vlans_del(struct net_device *netdev,
++ const struct switchdev_obj_port_vlan *vlan)
+{
-+ switch (sset) {
-+ case ETH_SS_STATS:
-+ return ARRAY_SIZE(ethsw_ethtool_counters);
-+ default:
-+ return -EOPNOTSUPP;
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++ int vid, err;
++
++ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
++ err = ethsw_port_del_vlan(port_priv, vid);
++ if (err)
++ break;
+ }
++
++ return err;
+}
+
-+static void ethsw_ethtool_get_strings(struct net_device *netdev,
-+ u32 stringset, u8 *data)
++static int swdev_port_obj_del(struct net_device *netdev,
++ const struct switchdev_obj *obj)
+{
-+ u32 i;
++ int err;
+
-+ switch (stringset) {
-+ case ETH_SS_STATS:
-+ for (i = 0; i < ARRAY_SIZE(ethsw_ethtool_counters); i++)
-+ memcpy(data + i * ETH_GSTRING_LEN,
-+ ethsw_ethtool_counters[i].name, ETH_GSTRING_LEN);
++ switch (obj->id) {
++ case SWITCHDEV_OBJ_ID_PORT_VLAN:
++ err = port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj));
++ break;
++ default:
++ err = -EOPNOTSUPP;
+ break;
+ }
++ return err;
+}
+
-+static void ethsw_ethtool_get_stats(struct net_device *netdev,
-+ struct ethtool_stats *stats,
-+ u64 *data)
++static const struct switchdev_ops ethsw_port_switchdev_ops = {
++ .switchdev_port_attr_get = swdev_port_attr_get,
++ .switchdev_port_attr_set = swdev_port_attr_set,
++ .switchdev_port_obj_add = swdev_port_obj_add,
++ .switchdev_port_obj_del = swdev_port_obj_del,
++};
++
++/* For the moment, only flood setting needs to be updated */
++static int port_bridge_join(struct net_device *netdev)
+{
-+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
-+ u32 i;
-+ int err;
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+
-+ for (i = 0; i < ARRAY_SIZE(ethsw_ethtool_counters); i++) {
-+ err = dpsw_if_get_counter(port_priv->ethsw_priv->mc_io, 0,
-+ port_priv->ethsw_priv->dpsw_handle,
-+ port_priv->port_index,
-+ ethsw_ethtool_counters[i].id,
-+ &data[i]);
-+ if (err)
-+ netdev_err(netdev, "dpsw_if_get_counter[%s] err %d\n",
-+ ethsw_ethtool_counters[i].name, err);
++ /* Enable flooding */
++ return ethsw_port_set_flood(port_priv, 1);
++}
++
++static int port_bridge_leave(struct net_device *netdev)
++{
++ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
++
++ /* Disable flooding */
++ return ethsw_port_set_flood(port_priv, 0);
++}
++
++static int port_netdevice_event(struct notifier_block *unused,
++ unsigned long event, void *ptr)
++{
++ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
++ struct netdev_notifier_changeupper_info *info = ptr;
++ struct net_device *upper_dev;
++ int err = 0;
++
++ if (netdev->netdev_ops != ðsw_port_ops)
++ return NOTIFY_DONE;
++
++ /* Handle just upper dev link/unlink for the moment */
++ if (event == NETDEV_CHANGEUPPER) {
++ upper_dev = info->upper_dev;
++ if (netif_is_bridge_master(upper_dev)) {
++ if (info->linking)
++ err = port_bridge_join(netdev);
++ else
++ err = port_bridge_leave(netdev);
++ }
+ }
++
++ return notifier_from_errno(err);
+}
+
-+static const struct ethtool_ops ethsw_port_ethtool_ops = {
-+ .get_drvinfo = ðsw_get_drvinfo,
-+ .get_link = ðtool_op_get_link,
-+ .get_settings = ðsw_get_settings,
-+ .set_settings = ðsw_set_settings,
-+ .get_strings = ðsw_ethtool_get_strings,
-+ .get_ethtool_stats = ðsw_ethtool_get_stats,
-+ .get_sset_count = ðsw_ethtool_get_sset_count,
++static struct notifier_block port_nb __read_mostly = {
++ .notifier_call = port_netdevice_event,
+};
+
-+/* -------------------------------------------------------------------------- */
-+/* ethsw driver functions */
++struct ethsw_switchdev_event_work {
++ struct work_struct work;
++ struct switchdev_notifier_fdb_info fdb_info;
++ struct net_device *dev;
++ unsigned long event;
++};
+
-+static int ethsw_links_state_update(struct ethsw_dev_priv *priv)
++static void ethsw_switchdev_event_work(struct work_struct *work)
+{
-+ struct list_head *pos;
-+ struct ethsw_port_priv *port_priv;
-+ int err;
++ struct ethsw_switchdev_event_work *switchdev_work =
++ container_of(work, struct ethsw_switchdev_event_work, work);
++ struct net_device *dev = switchdev_work->dev;
++ struct switchdev_notifier_fdb_info *fdb_info;
++ struct ethsw_port_priv *port_priv;
+
-+ list_for_each(pos, &priv->port_list) {
-+ port_priv = list_entry(pos, struct ethsw_port_priv,
-+ list);
++ rtnl_lock();
++ port_priv = netdev_priv(dev);
++ fdb_info = &switchdev_work->fdb_info;
+
-+ err = _ethsw_port_carrier_state_sync(port_priv->netdev);
-+ if (err)
-+ netdev_err(port_priv->netdev,
-+ "_ethsw_port_carrier_state_sync err %d\n",
-+ err);
++ switch (switchdev_work->event) {
++ case SWITCHDEV_FDB_ADD_TO_DEVICE:
++ if (is_unicast_ether_addr(fdb_info->addr))
++ ethsw_port_fdb_add_uc(netdev_priv(dev), fdb_info->addr);
++ else
++ ethsw_port_fdb_add_mc(netdev_priv(dev), fdb_info->addr);
++ break;
++ case SWITCHDEV_FDB_DEL_TO_DEVICE:
++ if (is_unicast_ether_addr(fdb_info->addr))
++ ethsw_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr);
++ else
++ ethsw_port_fdb_del_mc(netdev_priv(dev), fdb_info->addr);
++ break;
+ }
+
-+ return 0;
++ rtnl_unlock();
++ kfree(switchdev_work->fdb_info.addr);
++ kfree(switchdev_work);
++ dev_put(dev);
+}
+
-+static irqreturn_t ethsw_irq0_handler(int irq_num, void *arg)
++/* Called under rcu_read_lock() */
++static int port_switchdev_event(struct notifier_block *unused,
++ unsigned long event, void *ptr)
+{
-+ return IRQ_WAKE_THREAD;
-+}
++ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
++ struct ethsw_switchdev_event_work *switchdev_work;
++ struct switchdev_notifier_fdb_info *fdb_info = ptr;
+
-+static irqreturn_t _ethsw_irq0_handler_thread(int irq_num, void *arg)
-+{
-+ struct device *dev = (struct device *)arg;
-+ struct net_device *netdev = dev_get_drvdata(dev);
-+ struct ethsw_dev_priv *priv = netdev_priv(netdev);
++ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
++ if (!switchdev_work)
++ return NOTIFY_BAD;
+
-+ struct fsl_mc_io *io = priv->mc_io;
-+ u16 token = priv->dpsw_handle;
-+ int irq_index = DPSW_IRQ_INDEX_IF;
++ INIT_WORK(&switchdev_work->work, ethsw_switchdev_event_work);
++ switchdev_work->dev = dev;
++ switchdev_work->event = event;
+
-+ /* Mask the events and the if_id reserved bits to be cleared on read */
-+ u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
-+ int err;
++ switch (event) {
++ case SWITCHDEV_FDB_ADD_TO_DEVICE:
++ case SWITCHDEV_FDB_DEL_TO_DEVICE:
++ memcpy(&switchdev_work->fdb_info, ptr,
++ sizeof(switchdev_work->fdb_info));
++ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
++ if (!switchdev_work->fdb_info.addr)
++ goto err_addr_alloc;
+
-+ err = dpsw_get_irq_status(io, 0, token, irq_index, &status);
-+ if (unlikely(err)) {
-+ netdev_err(netdev, "Can't get irq status (err %d)", err);
++ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
++ fdb_info->addr);
+
-+ err = dpsw_clear_irq_status(io, 0, token, irq_index,
-+ 0xFFFFFFFF);
-+ if (unlikely(err))
-+ netdev_err(netdev, "Can't clear irq status (err %d)",
-+ err);
-+ goto out;
++ /* Take a reference on the device to avoid being freed. */
++ dev_hold(dev);
++ break;
++ default:
++ return NOTIFY_DONE;
+ }
+
-+ if (status & DPSW_IRQ_EVENT_LINK_CHANGED) {
-+ err = ethsw_links_state_update(priv);
-+ if (unlikely(err))
-+ goto out;
-+ }
++ queue_work(ethsw_owq, &switchdev_work->work);
+
-+out:
-+ return IRQ_HANDLED;
++ return NOTIFY_DONE;
++
++err_addr_alloc:
++ kfree(switchdev_work);
++ return NOTIFY_BAD;
+}
+
-+static int ethsw_setup_irqs(struct fsl_mc_device *sw_dev)
++static struct notifier_block port_switchdev_nb = {
++ .notifier_call = port_switchdev_event,
++};
++
++static int ethsw_register_notifier(struct device *dev)
+{
-+ struct device *dev = &sw_dev->dev;
-+ struct net_device *netdev = dev_get_drvdata(dev);
-+ struct ethsw_dev_priv *priv = netdev_priv(netdev);
-+ int err = 0;
-+ struct fsl_mc_device_irq *irq;
-+ const int irq_index = DPSW_IRQ_INDEX_IF;
-+ u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED;
++ int err;
+
-+ err = fsl_mc_allocate_irqs(sw_dev);
-+ if (unlikely(err)) {
-+ dev_err(dev, "MC irqs allocation failed\n");
++ err = register_netdevice_notifier(&port_nb);
++ if (err) {
++ dev_err(dev, "Failed to register netdev notifier\n");
+ return err;
+ }
+
-+ if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_MAX_IRQ_NUM)) {
-+ err = -EINVAL;
-+ goto free_irq;
++ err = register_switchdev_notifier(&port_switchdev_nb);
++ if (err) {
++ dev_err(dev, "Failed to register switchdev notifier\n");
++ goto err_switchdev_nb;
+ }
+
-+ err = dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle,
-+ irq_index, 0);
-+ if (unlikely(err)) {
-+ dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
-+ goto free_irq;
-+ }
++ return 0;
+
-+ irq = sw_dev->irqs[irq_index];
++err_switchdev_nb:
++ unregister_netdevice_notifier(&port_nb);
++ return err;
++}
+
-+ err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
-+ ethsw_irq0_handler,
-+ _ethsw_irq0_handler_thread,
-+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
-+ dev_name(dev), dev);
-+ if (unlikely(err)) {
-+ dev_err(dev, "devm_request_threaded_irq(): %d", err);
-+ goto free_irq;
-+ }
++static int ethsw_open(struct ethsw_core *ethsw)
++{
++ struct ethsw_port_priv *port_priv = NULL;
++ int i, err;
+
-+ err = dpsw_set_irq_mask(priv->mc_io, 0, priv->dpsw_handle,
-+ irq_index, mask);
-+ if (unlikely(err)) {
-+ dev_err(dev, "dpsw_set_irq_mask(): %d", err);
-+ goto free_devm_irq;
++ err = dpsw_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
++ if (err) {
++ dev_err(ethsw->dev, "dpsw_enable err %d\n", err);
++ return err;
+ }
+
-+ err = dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle,
-+ irq_index, 1);
-+ if (unlikely(err)) {
-+ dev_err(dev, "dpsw_set_irq_enable(): %d", err);
-+ goto free_devm_irq;
++ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
++ port_priv = ethsw->ports[i];
++ err = dev_open(port_priv->netdev);
++ if (err) {
++ netdev_err(port_priv->netdev, "dev_open err %d\n", err);
++ return err;
++ }
+ }
+
+ return 0;
-+
-+free_devm_irq:
-+ devm_free_irq(dev, irq->msi_desc->irq, dev);
-+free_irq:
-+ fsl_mc_free_irqs(sw_dev);
-+ return err;
+}
+
-+static void ethsw_teardown_irqs(struct fsl_mc_device *sw_dev)
++static int ethsw_stop(struct ethsw_core *ethsw)
+{
-+ struct device *dev = &sw_dev->dev;
-+ struct net_device *netdev = dev_get_drvdata(dev);
-+ struct ethsw_dev_priv *priv = netdev_priv(netdev);
++ struct ethsw_port_priv *port_priv = NULL;
++ int i, err;
+
-+ dpsw_set_irq_enable(priv->mc_io, 0, priv->dpsw_handle,
-+ DPSW_IRQ_INDEX_IF, 0);
-+ devm_free_irq(dev,
-+ sw_dev->irqs[DPSW_IRQ_INDEX_IF]->msi_desc->irq,
-+ dev);
-+ fsl_mc_free_irqs(sw_dev);
++ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
++ port_priv = ethsw->ports[i];
++ dev_close(port_priv->netdev);
++ }
++
++ err = dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
++ if (err) {
++ dev_err(ethsw->dev, "dpsw_disable err %d\n", err);
++ return err;
++ }
++
++ return 0;
+}
+
-+static int __cold
-+ethsw_init(struct fsl_mc_device *sw_dev)
++static int ethsw_init(struct fsl_mc_device *sw_dev)
+{
-+ struct device *dev = &sw_dev->dev;
-+ struct ethsw_dev_priv *priv;
-+ struct net_device *netdev;
-+ int err = 0;
-+ u16 i;
-+ u16 version_major, version_minor;
-+ const struct dpsw_stp_cfg stp_cfg = {
-+ .vlan_id = 1,
-+ .state = DPSW_STP_STATE_FORWARDING,
-+ };
-+
-+ netdev = dev_get_drvdata(dev);
-+ priv = netdev_priv(netdev);
++ struct device *dev = &sw_dev->dev;
++ struct ethsw_core *ethsw = dev_get_drvdata(dev);
++ u16 version_major, version_minor, i;
++ struct dpsw_stp_cfg stp_cfg;
++ int err;
+
-+ priv->dev_id = sw_dev->obj_desc.id;
++ ethsw->dev_id = sw_dev->obj_desc.id;
+
-+ err = dpsw_open(priv->mc_io, 0, priv->dev_id, &priv->dpsw_handle);
++ err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, ðsw->dpsw_handle);
+ if (err) {
+ dev_err(dev, "dpsw_open err %d\n", err);
-+ goto err_exit;
-+ }
-+ if (!priv->dpsw_handle) {
-+ dev_err(dev, "dpsw_open returned null handle but no error\n");
-+ err = -EFAULT;
-+ goto err_exit;
++ return err;
+ }
+
-+ err = dpsw_get_attributes(priv->mc_io, 0, priv->dpsw_handle,
-+ &priv->sw_attr);
++ err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ ðsw->sw_attr);
+ if (err) {
+ dev_err(dev, "dpsw_get_attributes err %d\n", err);
+ goto err_close;
+ }
+
-+ err = dpsw_get_api_version(priv->mc_io, 0,
++ err = dpsw_get_api_version(ethsw->mc_io, 0,
+ &version_major,
+ &version_minor);
+ if (err) {
+ goto err_close;
+ }
+
-+ err = dpsw_reset(priv->mc_io, 0, priv->dpsw_handle);
++ err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ if (err) {
+ dev_err(dev, "dpsw_reset err %d\n", err);
+ goto err_close;
+ }
+
-+ err = dpsw_fdb_set_learning_mode(priv->mc_io, 0, priv->dpsw_handle, 0,
++ err = dpsw_fdb_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 0,
+ DPSW_FDB_LEARNING_MODE_HW);
+ if (err) {
+ dev_err(dev, "dpsw_fdb_set_learning_mode err %d\n", err);
+ goto err_close;
+ }
+
-+ for (i = 0; i < priv->sw_attr.num_ifs; i++) {
-+ err = dpsw_if_set_stp(priv->mc_io, 0, priv->dpsw_handle, i,
++ stp_cfg.vlan_id = DEFAULT_VLAN_ID;
++ stp_cfg.state = DPSW_STP_STATE_FORWARDING;
++
++ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
++ err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i,
+ &stp_cfg);
+ if (err) {
+ dev_err(dev, "dpsw_if_set_stp err %d for port %d\n",
+ goto err_close;
+ }
+
-+ err = dpsw_if_set_broadcast(priv->mc_io, 0,
-+ priv->dpsw_handle, i, 1);
++ err = dpsw_if_set_broadcast(ethsw->mc_io, 0,
++ ethsw->dpsw_handle, i, 1);
+ if (err) {
+ dev_err(dev,
+ "dpsw_if_set_broadcast err %d for port %d\n",
+ }
+ }
+
++ ethsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM,
++ "ethsw");
++ if (!ethsw_owq) {
++ err = -ENOMEM;
++ goto err_close;
++ }
++
++ err = ethsw_register_notifier(dev);
++ if (err)
++ goto err_destroy_ordered_workqueue;
++
+ return 0;
+
++err_destroy_ordered_workqueue:
++ destroy_workqueue(ethsw_owq);
++
+err_close:
-+ dpsw_close(priv->mc_io, 0, priv->dpsw_handle);
-+err_exit:
++ dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ return err;
+}
+
-+static int __cold
-+ethsw_takedown(struct fsl_mc_device *sw_dev)
++static int ethsw_port_init(struct ethsw_port_priv *port_priv, u16 port)
+{
-+ struct device *dev = &sw_dev->dev;
-+ struct net_device *netdev;
-+ struct ethsw_dev_priv *priv;
-+ int err;
++ const char def_mcast[ETH_ALEN] = {0x01, 0x00, 0x5e, 0x00, 0x00, 0x01};
++ struct net_device *netdev = port_priv->netdev;
++ struct ethsw_core *ethsw = port_priv->ethsw_data;
++ struct dpsw_vlan_if_cfg vcfg;
++ int err;
+
-+ netdev = dev_get_drvdata(dev);
-+ priv = netdev_priv(netdev);
++ /* Switch starts with all ports configured to VLAN 1. Need to
++ * remove this setting to allow configuration at bridge join
++ */
++ vcfg.num_ifs = 1;
++ vcfg.if_id[0] = port_priv->idx;
++
++ err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ DEFAULT_VLAN_ID, &vcfg);
++ if (err) {
++ netdev_err(netdev, "dpsw_vlan_remove_if_untagged err %d\n",
++ err);
++ return err;
++ }
+
-+ err = dpsw_close(priv->mc_io, 0, priv->dpsw_handle);
++ err = ethsw_port_set_pvid(port_priv, 0);
+ if (err)
-+ dev_warn(dev, "dpsw_close err %d\n", err);
++ return err;
+
-+ return 0;
++ err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
++ DEFAULT_VLAN_ID, &vcfg);
++ if (err) {
++ netdev_err(netdev, "dpsw_vlan_remove_if err %d\n", err);
++ return err;
++ }
++
++ err = ethsw_port_fdb_add_mc(port_priv, def_mcast);
++
++ return err;
+}
+
-+static int __cold
-+ethsw_remove(struct fsl_mc_device *sw_dev)
++static void ethsw_unregister_notifier(struct device *dev)
+{
-+ struct device *dev;
-+ struct net_device *netdev;
-+ struct ethsw_dev_priv *priv;
-+ struct ethsw_port_priv *port_priv;
-+ struct list_head *pos;
++ int err;
++
++ err = unregister_switchdev_notifier(&port_switchdev_nb);
++ if (err)
++ dev_err(dev,
++ "Failed to unregister switchdev notifier (%d)\n", err);
++
++ err = unregister_netdevice_notifier(&port_nb);
++ if (err)
++ dev_err(dev,
++ "Failed to unregister netdev notifier (%d)\n", err);
++}
++
++static void ethsw_takedown(struct fsl_mc_device *sw_dev)
++{
++ struct device *dev = &sw_dev->dev;
++ struct ethsw_core *ethsw = dev_get_drvdata(dev);
++ int err;
++
++ ethsw_unregister_notifier(dev);
++
++ err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
++ if (err)
++ dev_warn(dev, "dpsw_close err %d\n", err);
++}
++
++static int ethsw_remove(struct fsl_mc_device *sw_dev)
++{
++ struct ethsw_port_priv *port_priv;
++ struct ethsw_core *ethsw;
++ struct device *dev;
++ int i;
+
+ dev = &sw_dev->dev;
-+ netdev = dev_get_drvdata(dev);
-+ priv = netdev_priv(netdev);
++ ethsw = dev_get_drvdata(dev);
+
-+ list_for_each(pos, &priv->port_list) {
-+ port_priv = list_entry(pos, struct ethsw_port_priv, list);
++ ethsw_teardown_irqs(sw_dev);
+
-+ rtnl_lock();
-+ netdev_upper_dev_unlink(port_priv->netdev, netdev);
-+ rtnl_unlock();
++ destroy_workqueue(ethsw_owq);
++
++ rtnl_lock();
++ ethsw_stop(ethsw);
++ rtnl_unlock();
+
++ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
++ port_priv = ethsw->ports[i];
+ unregister_netdev(port_priv->netdev);
+ free_netdev(port_priv->netdev);
+ }
-+
-+ ethsw_teardown_irqs(sw_dev);
-+
-+ unregister_netdev(netdev);
++ kfree(ethsw->ports);
+
+ ethsw_takedown(sw_dev);
-+ fsl_mc_portal_free(priv->mc_io);
++ fsl_mc_portal_free(ethsw->mc_io);
++
++ kfree(ethsw);
+
+ dev_set_drvdata(dev, NULL);
-+ free_netdev(netdev);
+
+ return 0;
+}
+
-+static int __cold
-+ethsw_probe(struct fsl_mc_device *sw_dev)
++static int ethsw_probe_port(struct ethsw_core *ethsw, u16 port_idx)
+{
-+ struct device *dev;
-+ struct net_device *netdev = NULL;
-+ struct ethsw_dev_priv *priv = NULL;
-+ int err = 0;
-+ u16 i;
-+ const char def_mcast[ETH_ALEN] = {
-+ 0x01, 0x00, 0x5e, 0x00, 0x00, 0x01,
-+ };
-+ char port_name[IFNAMSIZ];
-+
-+ dev = &sw_dev->dev;
++ struct ethsw_port_priv *port_priv;
++ struct device *dev = ethsw->dev;
++ struct net_device *port_netdev;
++ int err;
+
-+ /* register switch device, it's for management only - no I/O */
-+ netdev = alloc_etherdev(sizeof(*priv));
-+ if (!netdev) {
++ port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv));
++ if (!port_netdev) {
+ dev_err(dev, "alloc_etherdev error\n");
+ return -ENOMEM;
+ }
-+ netdev->netdev_ops = ðsw_ops;
+
-+ SET_NETDEV_DEV(netdev, dev);
-+ dev_set_drvdata(dev, netdev);
++ port_priv = netdev_priv(port_netdev);
++ port_priv->netdev = port_netdev;
++ port_priv->ethsw_data = ethsw;
+
-+ priv = netdev_priv(netdev);
-+ priv->netdev = netdev;
++ port_priv->idx = port_idx;
++ port_priv->stp_state = BR_STATE_FORWARDING;
+
-+ err = fsl_mc_portal_allocate(sw_dev, 0, &priv->mc_io);
-+ if (err) {
-+ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
-+ goto err_free_netdev;
-+ }
-+ if (!priv->mc_io) {
-+ dev_err(dev, "fsl_mc_portal_allocate returned null handle but no error\n");
-+ err = -EFAULT;
-+ goto err_free_netdev;
-+ }
++ /* Flooding is implicitly enabled */
++ port_priv->flood = true;
+
-+ err = ethsw_init(sw_dev);
-+ if (err) {
-+ dev_err(dev, "switch init err %d\n", err);
-+ goto err_free_cmdport;
-+ }
++ SET_NETDEV_DEV(port_netdev, dev);
++ port_netdev->netdev_ops = ðsw_port_ops;
++ port_netdev->ethtool_ops = ðsw_port_ethtool_ops;
++ port_netdev->switchdev_ops = ðsw_port_switchdev_ops;
+
-+ netdev->flags = netdev->flags | IFF_PROMISC | IFF_MASTER;
++ /* Set MTU limits */
++ port_netdev->min_mtu = ETH_MIN_MTU;
++ port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH;
+
-+ /* TODO: should we hold rtnl_lock here? We can't register_netdev under
-+ * lock
-+ */
-+ dev_alloc_name(netdev, "sw%d");
-+ err = register_netdev(netdev);
++ err = register_netdev(port_netdev);
+ if (err < 0) {
+ dev_err(dev, "register_netdev error %d\n", err);
-+ goto err_takedown;
-+ }
-+ if (err)
-+ dev_info(dev, "register_netdev res %d\n", err);
-+
-+ /* VLAN 1 is implicitly configured on the switch */
-+ priv->vlans[1] = ETHSW_VLAN_MEMBER;
-+ /* Flooding, learning are implicitly enabled */
-+ priv->learning = true;
-+ priv->flood = true;
-+
-+ /* register switch ports */
-+ snprintf(port_name, IFNAMSIZ, "%sp%%d", netdev->name);
-+
-+ INIT_LIST_HEAD(&priv->port_list);
-+ for (i = 0; i < priv->sw_attr.num_ifs; i++) {
-+ struct net_device *port_netdev;
-+ struct ethsw_port_priv *port_priv;
-+
-+ port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv));
-+ if (!port_netdev) {
-+ dev_err(dev, "alloc_etherdev error\n");
-+ goto err_takedown;
++ free_netdev(port_netdev);
++ return err;
+ }
+
-+ port_priv = netdev_priv(port_netdev);
-+ port_priv->netdev = port_netdev;
-+ port_priv->ethsw_priv = priv;
++ ethsw->ports[port_idx] = port_priv;
+
-+ port_priv->port_index = i;
-+ port_priv->stp_state = BR_STATE_FORWARDING;
-+ /* VLAN 1 is configured by default on all switch ports */
-+ port_priv->vlans[1] = ETHSW_VLAN_MEMBER | ETHSW_VLAN_UNTAGGED |
-+ ETHSW_VLAN_PVID;
++ return ethsw_port_init(port_priv, port_idx);
++}
+
-+ SET_NETDEV_DEV(port_netdev, dev);
-+ port_netdev->netdev_ops = ðsw_port_ops;
-+ port_netdev->ethtool_ops = ðsw_port_ethtool_ops;
++static int ethsw_probe(struct fsl_mc_device *sw_dev)
++{
++ struct device *dev = &sw_dev->dev;
++ struct ethsw_core *ethsw;
++ int i, err;
+
-+ port_netdev->flags = port_netdev->flags |
-+ IFF_PROMISC | IFF_SLAVE;
++ /* Allocate switch core*/
++ ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL);
+
-+ dev_alloc_name(port_netdev, port_name);
-+ err = register_netdev(port_netdev);
-+ if (err < 0) {
-+ dev_err(dev, "register_netdev error %d\n", err);
-+ free_netdev(port_netdev);
-+ goto err_takedown;
-+ }
++ if (!ethsw)
++ return -ENOMEM;
+
-+ rtnl_lock();
++ ethsw->dev = dev;
++ dev_set_drvdata(dev, ethsw);
+
-+ err = netdev_master_upper_dev_link(port_netdev, netdev,
-+ NULL, NULL);
-+ if (err) {
-+ dev_err(dev, "netdev_master_upper_dev_link error %d\n",
-+ err);
-+ unregister_netdev(port_netdev);
-+ free_netdev(port_netdev);
-+ rtnl_unlock();
-+ goto err_takedown;
-+ }
++ err = fsl_mc_portal_allocate(sw_dev, 0, ðsw->mc_io);
++ if (err) {
++ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
++ goto err_free_drvdata;
++ }
++
++ err = ethsw_init(sw_dev);
++ if (err)
++ goto err_free_cmdport;
+
-+ rtmsg_ifinfo(RTM_NEWLINK, port_netdev, IFF_SLAVE, GFP_KERNEL);
++ /* DEFAULT_VLAN_ID is implicitly configured on the switch */
++ ethsw->vlans[DEFAULT_VLAN_ID] = ETHSW_VLAN_MEMBER;
+
-+ rtnl_unlock();
++ /* Learning is implicitly enabled */
++ ethsw->learning = true;
+
-+ list_add(&port_priv->list, &priv->port_list);
++ ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports),
++ GFP_KERNEL);
++ if (!(ethsw->ports)) {
++ err = -ENOMEM;
++ goto err_takedown;
++ }
+
-+ /* TODO: implmenet set_rm_mode instead of this */
-+ err = ethsw_port_fdb_add_mc(port_netdev, def_mcast);
++ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
++ err = ethsw_probe_port(ethsw, i);
+ if (err)
-+ dev_warn(&netdev->dev,
-+ "ethsw_port_fdb_add_mc err %d\n", err);
++ goto err_free_ports;
+ }
+
-+ /* the switch starts up enabled */
++ /* Switch starts up enabled */
+ rtnl_lock();
-+ err = dev_open(netdev);
++ err = ethsw_open(ethsw);
+ rtnl_unlock();
+ if (err)
-+ dev_warn(dev, "dev_open err %d\n", err);
++ goto err_free_ports;
+
-+ /* setup irqs */
++ /* Setup IRQs */
+ err = ethsw_setup_irqs(sw_dev);
-+ if (unlikely(err)) {
-+ dev_warn(dev, "ethsw_setup_irqs err %d\n", err);
-+ goto err_takedown;
-+ }
++ if (err)
++ goto err_stop;
+
-+ dev_info(&netdev->dev,
-+ "probed %d port switch\n", priv->sw_attr.num_ifs);
++ dev_info(dev, "probed %d port switch\n", ethsw->sw_attr.num_ifs);
+ return 0;
+
++err_stop:
++ rtnl_lock();
++ ethsw_stop(ethsw);
++ rtnl_unlock();
++
++err_free_ports:
++ /* Cleanup registered ports only */
++ for (i--; i >= 0; i--) {
++ unregister_netdev(ethsw->ports[i]->netdev);
++ free_netdev(ethsw->ports[i]->netdev);
++ }
++ kfree(ethsw->ports);
++
+err_takedown:
-+ ethsw_remove(sw_dev);
++ ethsw_takedown(sw_dev);
++
+err_free_cmdport:
-+ fsl_mc_portal_free(priv->mc_io);
-+err_free_netdev:
++ fsl_mc_portal_free(ethsw->mc_io);
++
++err_free_drvdata:
++ kfree(ethsw);
+ dev_set_drvdata(dev, NULL);
-+ free_netdev(netdev);
+
+ return err;
+}
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpsw",
+ },
-+ {}
++ { .vendor = 0x0 }
+};
++MODULE_DEVICE_TABLE(fslmc, ethsw_match_id_table);
+
+static struct fsl_mc_driver eth_sw_drv = {
+ .driver = {
-+ .name = KBUILD_MODNAME,
-+ .owner = THIS_MODULE,
++ .name = KBUILD_MODNAME,
++ .owner = THIS_MODULE,
+ },
-+ .probe = ethsw_probe,
-+ .remove = ethsw_remove,
-+ .match_id_table = ethsw_match_id_table,
++ .probe = ethsw_probe,
++ .remove = ethsw_remove,
++ .match_id_table = ethsw_match_id_table
+};
+
+module_fsl_mc_driver(eth_sw_drv);
+
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver (prototype)");
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver");
+--- /dev/null
++++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
+@@ -0,0 +1,90 @@
++/* Copyright 2014-2017 Freescale Semiconductor Inc.
++ * Copyright 2017 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __ETHSW_H
++#define __ETHSW_H
++
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/rtnetlink.h>
++#include <linux/if_vlan.h>
++#include <uapi/linux/if_bridge.h>
++#include <net/switchdev.h>
++#include <linux/if_bridge.h>
++
++#include "dpsw.h"
++
++/* Number of IRQs supported */
++#define DPSW_IRQ_NUM 2
++
++#define ETHSW_VLAN_MEMBER 1
++#define ETHSW_VLAN_UNTAGGED 2
++#define ETHSW_VLAN_PVID 4
++#define ETHSW_VLAN_GLOBAL 8
++
++/* Maximum Frame Length supported by HW (currently 10k) */
++#define DPAA2_MFL (10 * 1024)
++#define ETHSW_MAX_FRAME_LENGTH (DPAA2_MFL - VLAN_ETH_HLEN - ETH_FCS_LEN)
++#define ETHSW_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN + ETH_FCS_LEN)
++
++extern const struct ethtool_ops ethsw_port_ethtool_ops;
++
++struct ethsw_core;
++
++/* Per port private data */
++struct ethsw_port_priv {
++ struct net_device *netdev;
++ u16 idx;
++ struct ethsw_core *ethsw_data;
++ u8 link_state;
++ u8 stp_state;
++ bool flood;
++
++ u8 vlans[VLAN_VID_MASK + 1];
++ u16 pvid;
++};
++
++/* Switch data */
++struct ethsw_core {
++ struct device *dev;
++ struct fsl_mc_io *mc_io;
++ u16 dpsw_handle;
++ struct dpsw_attr sw_attr;
++ int dev_id;
++ struct ethsw_port_priv **ports;
++
++ u8 vlans[VLAN_VID_MASK + 1];
++ bool learning;
++};
++
++#endif /* __ETHSW_H */
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/evb/Kconfig
@@ -0,0 +1,7 @@
+#endif /* _FSL_DPDMUX_CMD_H */
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/evb/dpdmux.c
-@@ -0,0 +1,1112 @@
+@@ -0,0 +1,1111 @@
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
-+#include "../../fsl-mc/include/mc-sys.h"
-+#include "../../fsl-mc/include/mc-cmd.h"
++#include <linux/fsl/mc.h>
+#include "dpdmux.h"
+#include "dpdmux-cmd.h"
+
+ int dpdmux_id,
+ u16 *token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_open *cmd_params;
+ int err;
+
+ u32 cmd_flags,
+ u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLOSE,
+ const struct dpdmux_cfg *cfg,
+ u32 *obj_id)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_create *cmd_params;
+ int err;
+
+ u32 cmd_flags,
+ u32 object_id)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_destroy *cmd_params;
+
+ /* prepare command */
+ u32 cmd_flags,
+ u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ENABLE,
+ u32 cmd_flags,
+ u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DISABLE,
+ u16 token,
+ int *en)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_rsp_is_enabled *rsp_params;
+ int err;
+
+ u32 cmd_flags,
+ u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_RESET,
+ u8 irq_index,
+ u8 en)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_set_irq_enable *cmd_params;
+
+ /* prepare command */
+ u8 irq_index,
+ u8 *en)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_get_irq_enable *cmd_params;
+ struct dpdmux_rsp_get_irq_enable *rsp_params;
+ int err;
+ u8 irq_index,
+ u32 mask)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_set_irq_mask *cmd_params;
+
+ /* prepare command */
+ u8 irq_index,
+ u32 *mask)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_get_irq_mask *cmd_params;
+ struct dpdmux_rsp_get_irq_mask *rsp_params;
+ int err;
+ u8 irq_index,
+ u32 *status)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_get_irq_status *cmd_params;
+ struct dpdmux_rsp_get_irq_status *rsp_params;
+ int err;
+ u8 irq_index,
+ u32 status)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_clear_irq_status *cmd_params;
+
+ /* prepare command */
+ u16 token,
+ struct dpdmux_attr *attr)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_rsp_get_attr *rsp_params;
+ int err;
+
+ u16 if_id)
+{
+ struct dpdmux_cmd_if *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ENABLE,
+ u16 if_id)
+{
+ struct dpdmux_cmd_if *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_DISABLE,
+ u16 token,
+ u16 max_frame_length)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_set_max_frame_length *cmd_params;
+
+ /* prepare command */
+ u32 cmd_flags,
+ u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_UL_RESET_COUNTERS,
+ u16 if_id,
+ const struct dpdmux_accepted_frames *cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_if_set_accepted_frames *cmd_params;
+
+ /* prepare command */
+ u16 if_id,
+ struct dpdmux_if_attr *attr)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_if *cmd_params;
+ struct dpdmux_rsp_if_get_attr *rsp_params;
+ int err;
+ u16 if_id,
+ const struct dpdmux_l2_rule *rule)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_if_l2_rule *cmd_params;
+
+ /* prepare command */
+ u16 if_id,
+ const struct dpdmux_l2_rule *rule)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_if_l2_rule *cmd_params;
+
+ /* prepare command */
+ enum dpdmux_counter_type counter_type,
+ u64 *counter)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_if_get_counter *cmd_params;
+ struct dpdmux_rsp_if_get_counter *rsp_params;
+ int err;
+ u16 if_id,
+ struct dpdmux_link_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_if_set_link_cfg *cmd_params;
+
+ /* prepare command */
+ u16 if_id,
+ struct dpdmux_link_state *state)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_if_get_link_state *cmd_params;
+ struct dpdmux_rsp_if_get_link_state *rsp_params;
+ int err;
+ u64 key_cfg_iova)
+{
+ struct dpdmux_set_custom_key *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_CUSTOM_KEY,
+ struct dpdmux_cls_action *action)
+{
+ struct dpdmux_cmd_add_custom_cls_entry *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ADD_CUSTOM_CLS_ENTRY,
+ struct dpdmux_rule_cfg *rule)
+{
+ struct dpdmux_cmd_remove_custom_cls_entry *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_REMOVE_CUSTOM_CLS_ENTRY,
+ u16 *major_ver,
+ u16 *minor_ver)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_rsp_get_api_version *rsp_params;
+ int err;
+
+#endif /* __FSL_DPDMUX_H */
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/evb/evb.c
-@@ -0,0 +1,1350 @@
+@@ -0,0 +1,1354 @@
+/* Copyright 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+#include <uapi/linux/if_bridge.h>
+#include <net/netlink.h>
+
-+#include "../../fsl-mc/include/mc.h"
++#include <linux/fsl/mc.h>
+
+#include "dpdmux.h"
+#include "dpdmux-cmd.h"
+#define DPDMUX_MAX_IRQ_NUM 2
+
+/* MAX FRAME LENGTH (currently 10k) */
-+#define EVB_MAX_FRAME_LENGTH (10 * 1024)
-+/* MIN FRAME LENGTH (64 bytes + 4 bytes CRC) */
-+#define EVB_MIN_FRAME_LENGTH 68
++#define EVB_MAX_FRAME_LENGTH (10 * 1024)
++#define EVB_MAX_MTU (EVB_MAX_FRAME_LENGTH - VLAN_ETH_HLEN)
++#define EVB_MIN_MTU 68
+
+struct evb_port_priv {
+ struct net_device *netdev;
+ if (port_priv->port_index > 0)
+ return -EPERM;
+
-+ if (mtu < EVB_MIN_FRAME_LENGTH || mtu > EVB_MAX_FRAME_LENGTH) {
-+ netdev_err(netdev, "Invalid MTU %d. Valid range is: %d..%d\n",
-+ mtu, EVB_MIN_FRAME_LENGTH, EVB_MAX_FRAME_LENGTH);
-+ return -EINVAL;
-+ }
-+
+ err = dpdmux_set_max_frame_length(evb_priv->mc_io,
+ 0,
+ evb_priv->mux_handle,
-+ (uint16_t)mtu);
++ (uint16_t)(mtu + VLAN_ETH_HLEN));
+
+ if (unlikely(err)) {
+ netdev_err(netdev, "dpdmux_ul_set_max_frame_length err %d\n",
+
+error:
+ netdev_err(netdev, "dpdmux_if_get_counter err %d\n", err);
++ return storage;
+}
+
+static const struct net_device_ops evb_port_ops = {
+
+ priv = netdev_priv(netdev);
+
-+ err = fsl_mc_portal_allocate(evb_dev, 0, &priv->mc_io);
-+ if (unlikely(err)) {
-+ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
++ err = fsl_mc_portal_allocate(evb_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
++ &priv->mc_io);
++ if (err) {
++ if (err == -ENXIO)
++ err = -EPROBE_DEFER;
++ else
++ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
+ goto err_free_netdev;
+ }
++
+ if (!priv->mc_io) {
+ dev_err(dev, "fsl_mc_portal_allocate returned null handle but no error\n");
+ err = -EFAULT;
+
+ list_add(&port_priv->list, &priv->port_list);
+ } else {
++ /* Set MTU limits only on uplink */
++ port_netdev->min_mtu = EVB_MIN_MTU;
++ port_netdev->max_mtu = EVB_MAX_MTU;
++
+ err = register_netdev(netdev);
+
+ if (err < 0) {
+#endif /* _FSL_DPMAC_CMD_H */
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/mac/dpmac.c
-@@ -0,0 +1,620 @@
+@@ -0,0 +1,619 @@
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
-+#include "../../fsl-mc/include/mc-sys.h"
-+#include "../../fsl-mc/include/mc-cmd.h"
++#include <linux/fsl/mc.h>
+#include "dpmac.h"
+#include "dpmac-cmd.h"
+
+ u16 *token)
+{
+ struct dpmac_cmd_open *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ u32 cmd_flags,
+ u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLOSE, cmd_flags,
+ u32 *obj_id)
+{
+ struct dpmac_cmd_create *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ u32 object_id)
+{
+ struct dpmac_cmd_destroy *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_DESTROY,
+ u8 en)
+{
+ struct dpmac_cmd_set_irq_enable *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_ENABLE,
+{
+ struct dpmac_cmd_get_irq_enable *cmd_params;
+ struct dpmac_rsp_get_irq_enable *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ u32 mask)
+{
+ struct dpmac_cmd_set_irq_mask *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_MASK,
+{
+ struct dpmac_cmd_get_irq_mask *cmd_params;
+ struct dpmac_rsp_get_irq_mask *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+{
+ struct dpmac_cmd_get_irq_status *cmd_params;
+ struct dpmac_rsp_get_irq_status *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ u32 status)
+{
+ struct dpmac_cmd_clear_irq_status *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLEAR_IRQ_STATUS,
+ struct dpmac_attr *attr)
+{
+ struct dpmac_rsp_get_attributes *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ struct dpmac_link_cfg *cfg)
+{
+ struct dpmac_rsp_get_link_cfg *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err = 0;
+
+ /* prepare command */
+ struct dpmac_link_state *link_state)
+{
+ struct dpmac_cmd_set_link_state *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_LINK_STATE,
+{
+ struct dpmac_cmd_get_counter *dpmac_cmd;
+ struct dpmac_rsp_get_counter *dpmac_rsp;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err = 0;
+
+ /* prepare command */
+ const u8 addr[6])
+{
+ struct dpmac_cmd_set_port_mac_addr *dpmac_cmd;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_PORT_MAC_ADDR,
+ u16 *minor_ver)
+{
+ struct dpmac_rsp_get_api_version *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_API_VERSION,
+#endif /* __FSL_DPMAC_H */
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/mac/mac.c
-@@ -0,0 +1,670 @@
+@@ -0,0 +1,673 @@
+/* Copyright 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+
-+#include "../../fsl-mc/include/mc.h"
-+#include "../../fsl-mc/include/mc-sys.h"
++#include <linux/fsl/mc.h>
+
+#include "dpmac.h"
+#include "dpmac-cmd.h"
+ return NETDEV_TX_OK;
+}
+
-+static int dpaa2_mac_get_settings(struct net_device *netdev,
-+ struct ethtool_cmd *cmd)
++static int dpaa2_mac_get_link_ksettings(struct net_device *netdev,
++ struct ethtool_link_ksettings *ks)
+{
-+ return phy_ethtool_gset(netdev->phydev, cmd);
++ phy_ethtool_ksettings_get(netdev->phydev, ks);
++
++ return 0;
+}
+
-+static int dpaa2_mac_set_settings(struct net_device *netdev,
-+ struct ethtool_cmd *cmd)
++static int dpaa2_mac_set_link_ksettings(struct net_device *netdev,
++ const struct ethtool_link_ksettings *ks)
+{
-+ return phy_ethtool_sset(netdev->phydev, cmd);
++ return phy_ethtool_ksettings_set(netdev->phydev, ks);
+}
+
+static struct rtnl_link_stats64 *dpaa2_mac_get_stats(struct net_device *netdev,
+};
+
+static const struct ethtool_ops dpaa2_mac_ethtool_ops = {
-+ .get_settings = &dpaa2_mac_get_settings,
-+ .set_settings = &dpaa2_mac_set_settings,
++ .get_link_ksettings = &dpaa2_mac_get_link_ksettings,
++ .set_link_ksettings = &dpaa2_mac_set_link_ksettings,
+ .get_strings = &dpaa2_mac_get_strings,
+ .get_ethtool_stats = &dpaa2_mac_get_ethtool_stats,
+ .get_sset_count = &dpaa2_mac_get_sset_count,
+
+ dev_set_drvdata(dev, priv);
+
-+ err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io);
++ /* We may need to issue MC commands while in atomic context */
++ err = fsl_mc_portal_allocate(mc_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
++ &mc_dev->mc_io);
+ if (err || !mc_dev->mc_io) {
-+ dev_err(dev, "fsl_mc_portal_allocate error: %d\n", err);
-+ err = -ENODEV;
++ dev_dbg(dev, "fsl_mc_portal_allocate error: %d\n", err);
++ err = -EPROBE_DEFER;
+ goto err_free_netdev;
+ }
+
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
-+#include "../../fsl-mc/include/mc-sys.h"
-+#include "../../fsl-mc/include/mc-cmd.h"
++#include <linux/fsl/mc.h>
++
+#include "dprtc.h"
+#include "dprtc-cmd.h"
+
+ uint16_t *token)
+{
+ struct dprtc_cmd_open *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ uint32_t cmd_flags,
+ uint16_t token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLOSE, cmd_flags,
+ const struct dprtc_cfg *cfg,
+ uint32_t *obj_id)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ (void)(cfg); /* unused */
+ uint32_t object_id)
+{
+ struct dprtc_cmd_destroy *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DESTROY,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_ENABLE, cmd_flags,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DISABLE,
+ int *en)
+{
+ struct dprtc_rsp_is_enabled *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ uint32_t cmd_flags,
+ uint16_t token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_RESET,
+ uint8_t en)
+{
+ struct dprtc_cmd_set_irq_enable *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_ENABLE,
+{
+ struct dprtc_rsp_get_irq_enable *rsp_params;
+ struct dprtc_cmd_get_irq *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ uint32_t mask)
+{
+ struct dprtc_cmd_set_irq_mask *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_MASK,
+{
+ struct dprtc_rsp_get_irq_mask *rsp_params;
+ struct dprtc_cmd_get_irq *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+{
+ struct dprtc_cmd_get_irq_status *cmd_params;
+ struct dprtc_rsp_get_irq_status *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ uint32_t status)
+{
+ struct dprtc_cmd_clear_irq_status *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLEAR_IRQ_STATUS,
+ struct dprtc_attr *attr)
+{
+ struct dprtc_rsp_get_attributes *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ int64_t offset)
+{
+ struct dprtc_cmd_set_clock_offset *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_CLOCK_OFFSET,
+ uint32_t freq_compensation)
+{
+ struct dprtc_get_freq_compensation *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_FREQ_COMPENSATION,
+ uint32_t *freq_compensation)
+{
+ struct dprtc_get_freq_compensation *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ uint64_t *time)
+{
+ struct dprtc_time *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ uint64_t time)
+{
+ struct dprtc_time *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_TIME,
+ uint16_t token, uint64_t time)
+{
+ struct dprtc_time *cmd_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_ALARM,
+ uint16_t *minor_ver)
+{
+ struct dprtc_rsp_get_api_version *rsp_params;
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_API_VERSION,
+#endif /* __FSL_DPRTC_H */
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/rtc/rtc.c
-@@ -0,0 +1,243 @@
+@@ -0,0 +1,242 @@
+/* Copyright 2013-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+#include <linux/module.h>
+#include <linux/ptp_clock_kernel.h>
+
-+#include "../../fsl-mc/include/mc.h"
-+#include "../../fsl-mc/include/mc-sys.h"
++#include <linux/fsl/mc.h>
+
+#include "dprtc.h"
+#include "dprtc-cmd.h"
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DPAA2 RTC (PTP 1588 clock) driver (prototype)");
+--- a/include/linux/filter.h
++++ b/include/linux/filter.h
+@@ -429,12 +429,15 @@ struct sk_filter {
+
+ struct bpf_skb_data_end {
+ struct qdisc_skb_cb qdisc_cb;
++ void *data_meta;
+ void *data_end;
+ };
+
+ struct xdp_buff {
+ void *data;
+ void *data_end;
++ void *data_meta;
++ void *data_hard_start;
+ };
+
+ /* compute the linear packet data range [data, data_end) which
-From 79fb41b6040d00d3bdfca9eb70a7848441eb7447 Mon Sep 17 00:00:00 2001
+From 50fb2f2e93aeae0baed156eb4794a2f358376b77 Mon Sep 17 00:00:00 2001
From: Yangbo Lu <yangbo.lu@nxp.com>
-Date: Wed, 17 Jan 2018 15:14:12 +0800
-Subject: [PATCH] fsl_ppfe: support layercape
+Date: Thu, 5 Jul 2018 17:19:20 +0800
+Subject: [PATCH 12/32] fsl_ppfe: support layercape
This is an integrated patch for layerscape pfe support.
Calvin Johnson <calvin.johnson@nxp.com>
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
---
- drivers/staging/fsl_ppfe/Kconfig | 20 +
- drivers/staging/fsl_ppfe/Makefile | 19 +
- drivers/staging/fsl_ppfe/TODO | 2 +
- drivers/staging/fsl_ppfe/include/pfe/cbus.h | 78 +
- drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h | 55 +
- .../staging/fsl_ppfe/include/pfe/cbus/class_csr.h | 289 +++
- .../staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h | 242 ++
- drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h | 86 +
- drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h | 100 +
- .../staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h | 50 +
- .../staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h | 168 ++
- .../staging/fsl_ppfe/include/pfe/cbus/util_csr.h | 61 +
- drivers/staging/fsl_ppfe/include/pfe/pfe.h | 372 +++
- drivers/staging/fsl_ppfe/pfe_ctrl.c | 238 ++
- drivers/staging/fsl_ppfe/pfe_ctrl.h | 112 +
- drivers/staging/fsl_ppfe/pfe_debugfs.c | 111 +
- drivers/staging/fsl_ppfe/pfe_debugfs.h | 25 +
- drivers/staging/fsl_ppfe/pfe_eth.c | 2474 ++++++++++++++++++++
- drivers/staging/fsl_ppfe/pfe_eth.h | 184 ++
- drivers/staging/fsl_ppfe/pfe_firmware.c | 314 +++
- drivers/staging/fsl_ppfe/pfe_firmware.h | 32 +
- drivers/staging/fsl_ppfe/pfe_hal.c | 1516 ++++++++++++
- drivers/staging/fsl_ppfe/pfe_hif.c | 1072 +++++++++
- drivers/staging/fsl_ppfe/pfe_hif.h | 211 ++
- drivers/staging/fsl_ppfe/pfe_hif_lib.c | 637 +++++
- drivers/staging/fsl_ppfe/pfe_hif_lib.h | 240 ++
- drivers/staging/fsl_ppfe/pfe_hw.c | 176 ++
- drivers/staging/fsl_ppfe/pfe_hw.h | 27 +
- drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c | 385 +++
- drivers/staging/fsl_ppfe/pfe_mod.c | 141 ++
- drivers/staging/fsl_ppfe/pfe_mod.h | 112 +
- drivers/staging/fsl_ppfe/pfe_perfmon.h | 38 +
- drivers/staging/fsl_ppfe/pfe_sysfs.c | 818 +++++++
- drivers/staging/fsl_ppfe/pfe_sysfs.h | 29 +
- 34 files changed, 10434 insertions(+)
+ drivers/staging/fsl_ppfe/Kconfig | 20 +
+ drivers/staging/fsl_ppfe/Makefile | 19 +
+ drivers/staging/fsl_ppfe/TODO | 2 +
+ drivers/staging/fsl_ppfe/include/pfe/cbus.h | 78 +
+ .../staging/fsl_ppfe/include/pfe/cbus/bmu.h | 55 +
+ .../fsl_ppfe/include/pfe/cbus/class_csr.h | 289 ++
+ .../fsl_ppfe/include/pfe/cbus/emac_mtip.h | 242 ++
+ .../staging/fsl_ppfe/include/pfe/cbus/gpi.h | 86 +
+ .../staging/fsl_ppfe/include/pfe/cbus/hif.h | 100 +
+ .../fsl_ppfe/include/pfe/cbus/hif_nocpy.h | 50 +
+ .../fsl_ppfe/include/pfe/cbus/tmu_csr.h | 168 ++
+ .../fsl_ppfe/include/pfe/cbus/util_csr.h | 61 +
+ drivers/staging/fsl_ppfe/include/pfe/pfe.h | 372 +++
+ drivers/staging/fsl_ppfe/pfe_ctrl.c | 238 ++
+ drivers/staging/fsl_ppfe/pfe_ctrl.h | 112 +
+ drivers/staging/fsl_ppfe/pfe_debugfs.c | 111 +
+ drivers/staging/fsl_ppfe/pfe_debugfs.h | 25 +
+ drivers/staging/fsl_ppfe/pfe_eth.c | 2491 +++++++++++++++++
+ drivers/staging/fsl_ppfe/pfe_eth.h | 184 ++
+ drivers/staging/fsl_ppfe/pfe_firmware.c | 314 +++
+ drivers/staging/fsl_ppfe/pfe_firmware.h | 32 +
+ drivers/staging/fsl_ppfe/pfe_hal.c | 1516 ++++++++++
+ drivers/staging/fsl_ppfe/pfe_hif.c | 1072 +++++++
+ drivers/staging/fsl_ppfe/pfe_hif.h | 211 ++
+ drivers/staging/fsl_ppfe/pfe_hif_lib.c | 640 +++++
+ drivers/staging/fsl_ppfe/pfe_hif_lib.h | 241 ++
+ drivers/staging/fsl_ppfe/pfe_hw.c | 176 ++
+ drivers/staging/fsl_ppfe/pfe_hw.h | 27 +
+ .../staging/fsl_ppfe/pfe_ls1012a_platform.c | 385 +++
+ drivers/staging/fsl_ppfe/pfe_mod.c | 156 ++
+ drivers/staging/fsl_ppfe/pfe_mod.h | 114 +
+ drivers/staging/fsl_ppfe/pfe_perfmon.h | 38 +
+ drivers/staging/fsl_ppfe/pfe_sysfs.c | 818 ++++++
+ drivers/staging/fsl_ppfe/pfe_sysfs.h | 29 +
+ 34 files changed, 10472 insertions(+)
create mode 100644 drivers/staging/fsl_ppfe/Kconfig
create mode 100644 drivers/staging/fsl_ppfe/Makefile
create mode 100644 drivers/staging/fsl_ppfe/TODO
+#endif /* _PFE_DEBUGFS_H_ */
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_eth.c
-@@ -0,0 +1,2474 @@
+@@ -0,0 +1,2491 @@
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ goto err0;
+ }
+
++ if (us)
++ emac_txq_cnt = EMAC_TXQ_CNT;
+ /* Create an ethernet device instance */
+ ndev = alloc_etherdev_mq(sizeof(*priv), emac_txq_cnt);
+
+ }
+ }
+
++ if (us)
++ goto phy_init;
++
+ ndev->mtu = 1500;
+
+ /* Set MTU limits */
+ netdev_err(ndev, "register_netdev() failed\n");
+ goto err3;
+ }
++
++phy_init:
+ device_init_wakeup(&ndev->dev, WAKE_MAGIC);
+
+ if (!(priv->einfo->phy_flags & GEMAC_NO_PHY)) {
+ }
+ }
+
++ if (us) {
++ if (priv->phydev)
++ phy_start(priv->phydev);
++ return 0;
++ }
++
+ netif_carrier_on(ndev);
+
+ /* Create all the sysfs files */
+
+ return 0;
+err4:
++ if (us)
++ goto err3;
+ unregister_netdev(ndev);
+err3:
+ pfe_eth_mdio_exit(priv->mii_bus);
+{
+ netif_info(priv, probe, priv->ndev, "%s\n", __func__);
+
-+ pfe_eth_sysfs_exit(priv->ndev);
-+
-+ unregister_netdev(priv->ndev);
++ if (!us)
++ pfe_eth_sysfs_exit(priv->ndev);
+
+ if (!(priv->einfo->phy_flags & GEMAC_NO_PHY))
+ pfe_phy_exit(priv->ndev);
+
++ if (!us)
++ unregister_netdev(priv->ndev);
++
+ if (priv->mii_bus)
+ pfe_eth_mdio_exit(priv->mii_bus);
+
+#endif /* _PFE_HIF_H_ */
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_hif_lib.c
-@@ -0,0 +1,637 @@
+@@ -0,0 +1,640 @@
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ u16 size = *rx_ctrl >> HIF_CTRL_RX_OFFSET_OFST;
+
+ if (size) {
++ size += PFE_PARSE_INFO_SIZE;
+ *len = CL_DESC_BUF_LEN(desc->ctrl) -
+ PFE_PKT_HEADER_SZ - size;
+ *ofst = pfe_pkt_headroom + PFE_PKT_HEADER_SZ
+ *priv_data = desc->data + PFE_PKT_HEADER_SZ;
+ } else {
+ *len = CL_DESC_BUF_LEN(desc->ctrl) -
-+ PFE_PKT_HEADER_SZ;
-+ *ofst = pfe_pkt_headroom + PFE_PKT_HEADER_SZ;
++ PFE_PKT_HEADER_SZ - PFE_PARSE_INFO_SIZE;
++ *ofst = pfe_pkt_headroom
++ + PFE_PKT_HEADER_SZ
++ + PFE_PARSE_INFO_SIZE;
+ *priv_data = NULL;
+ }
+
+}
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_hif_lib.h
-@@ -0,0 +1,240 @@
+@@ -0,0 +1,241 @@
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+
+#define HIF_CL_REQ_TIMEOUT 10
+#define GFP_DMA_PFE 0
++#define PFE_PARSE_INFO_SIZE 16
+
+enum {
+ REQUEST_CL_REGISTER = 0,
+#define PFE_BUF_SIZE 2048
+#define PFE_PKT_HEADROOM 128
+
-+#define SKB_SHARED_INFO_SIZE (sizeof(struct skb_shared_info))
++#define SKB_SHARED_INFO_SIZE SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+#define PFE_PKT_SIZE (PFE_BUF_SIZE - PFE_PKT_HEADROOM \
+ - SKB_SHARED_INFO_SIZE)
+#define MAX_L2_HDR_SIZE 14 /* Not correct for VLAN/PPPoE */
+MODULE_AUTHOR("NXP DNCPE");
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_mod.c
-@@ -0,0 +1,141 @@
+@@ -0,0 +1,156 @@
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+#include <linux/dma-mapping.h>
+#include "pfe_mod.h"
+
++unsigned int us;
++module_param(us, uint, 0444);
++MODULE_PARM_DESC(us, "0: module enabled for kernel networking (DEFAULT)\n"
++ "1: module enabled for userspace networking\n");
+struct pfe *pfe;
+
+/*
+ if (rc < 0)
+ goto err_hw;
+
++ if (us)
++ goto firmware_init;
++
+ rc = pfe_hif_lib_init(pfe);
+ if (rc < 0)
+ goto err_hif_lib;
+ if (rc < 0)
+ goto err_hif;
+
++firmware_init:
+ rc = pfe_firmware_init(pfe);
+ if (rc < 0)
+ goto err_firmware;
+ pfe_firmware_exit(pfe);
+
+err_firmware:
++ if (us)
++ goto err_hif_lib;
++
+ pfe_hif_exit(pfe);
+
+err_hif:
+#endif
+ pfe_firmware_exit(pfe);
+
++ if (us)
++ goto hw_exit;
++
+ pfe_hif_exit(pfe);
+
+ pfe_hif_lib_exit(pfe);
+
++hw_exit:
+ pfe_hw_exit(pfe);
+
+ return 0;
+}
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_mod.h
-@@ -0,0 +1,112 @@
+@@ -0,0 +1,114 @@
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+#include <linux/device.h>
+#include <linux/elf.h>
+
++extern unsigned int us;
++
+struct pfe;
+
+#include "pfe_hw.h"
-From b018e44a68dc2f4df819ae194e39e07313841dad Mon Sep 17 00:00:00 2001
+From d78d78ccbaded757e8bea0d13c4120518bdd4660 Mon Sep 17 00:00:00 2001
From: Yangbo Lu <yangbo.lu@nxp.com>
-Date: Wed, 17 Jan 2018 15:27:58 +0800
-Subject: [PATCH 15/30] cpufreq: support layerscape
+Date: Thu, 5 Jul 2018 17:21:38 +0800
+Subject: [PATCH 15/32] cpufreq: support layerscape
This is an integrated patch for layerscape pm support.
Signed-off-by: Tang Yuantian <Yuantian.Tang@nxp.com>
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
---
- drivers/cpufreq/Kconfig | 2 +-
- drivers/cpufreq/qoriq-cpufreq.c | 176 +++++++++++++++-------------------------
- drivers/firmware/psci.c | 12 ++-
- drivers/soc/fsl/rcpm.c | 158 ++++++++++++++++++++++++++++++++++++
- 4 files changed, 235 insertions(+), 113 deletions(-)
+ .../devicetree/bindings/powerpc/fsl/pmc.txt | 59 ++--
+ drivers/cpufreq/Kconfig | 2 +-
+ drivers/cpufreq/qoriq-cpufreq.c | 176 +++++------
+ drivers/firmware/psci.c | 12 +-
+ drivers/soc/fsl/rcpm.c | 158 ++++++++++
+ drivers/soc/fsl/sleep_fsm.c | 279 ++++++++++++++++++
+ drivers/soc/fsl/sleep_fsm.h | 130 ++++++++
+ 7 files changed, 678 insertions(+), 138 deletions(-)
create mode 100644 drivers/soc/fsl/rcpm.c
+ create mode 100644 drivers/soc/fsl/sleep_fsm.c
+ create mode 100644 drivers/soc/fsl/sleep_fsm.h
+--- a/Documentation/devicetree/bindings/powerpc/fsl/pmc.txt
++++ b/Documentation/devicetree/bindings/powerpc/fsl/pmc.txt
+@@ -9,15 +9,20 @@ Properties:
+
+ "fsl,mpc8548-pmc" should be listed for any chip whose PMC is
+ compatible. "fsl,mpc8536-pmc" should also be listed for any chip
+- whose PMC is compatible, and implies deep-sleep capability.
++ whose PMC is compatible, and implies deep-sleep capability and
++ wake on user defined packet(wakeup on ARP).
++
++ "fsl,p1022-pmc" should be listed for any chip whose PMC is
++ compatible, and implies lossless Ethernet capability during sleep.
+
+ "fsl,mpc8641d-pmc" should be listed for any chip whose PMC is
+ compatible; all statements below that apply to "fsl,mpc8548-pmc" also
+ apply to "fsl,mpc8641d-pmc".
+
+ Compatibility does not include bit assignments in SCCR/PMCDR/DEVDISR; these
+- bit assignments are indicated via the sleep specifier in each device's
+- sleep property.
++ bit assignments are indicated via the clock nodes. Device which has a
++ controllable clock source should have a "fsl,pmc-handle" property pointing
++ to the clock node.
+
+ - reg: For devices compatible with "fsl,mpc8349-pmc", the first resource
+ is the PMC block, and the second resource is the Clock Configuration
+@@ -33,31 +38,35 @@ Properties:
+ this is a phandle to an "fsl,gtm" node on which timer 4 can be used as
+ a wakeup source from deep sleep.
+
+-Sleep specifiers:
+-
+- fsl,mpc8349-pmc: Sleep specifiers consist of one cell. For each bit
+- that is set in the cell, the corresponding bit in SCCR will be saved
+- and cleared on suspend, and restored on resume. This sleep controller
+- supports disabling and resuming devices at any time.
+-
+- fsl,mpc8536-pmc: Sleep specifiers consist of three cells, the third of
+- which will be ORed into PMCDR upon suspend, and cleared from PMCDR
+- upon resume. The first two cells are as described for fsl,mpc8578-pmc.
+- This sleep controller only supports disabling devices during system
+- sleep, or permanently.
+-
+- fsl,mpc8548-pmc: Sleep specifiers consist of one or two cells, the
+- first of which will be ORed into DEVDISR (and the second into
+- DEVDISR2, if present -- this cell should be zero or absent if the
+- hardware does not have DEVDISR2) upon a request for permanent device
+- disabling. This sleep controller does not support configuring devices
+- to disable during system sleep (unless supported by another compatible
+- match), or dynamically.
++Clock nodes:
++The clock nodes are to describe the masks in PM controller registers for each
++soc clock.
++- fsl,pmcdr-mask: For "fsl,mpc8548-pmc"-compatible devices, the mask will be
++ ORed into PMCDR before suspend if the device using this clock is the wake-up
++ source and need to be running during low power mode; clear the mask if
++ otherwise.
++
++- fsl,sccr-mask: For "fsl,mpc8349-pmc"-compatible devices, the corresponding
++ bit specified by the mask in SCCR will be saved and cleared on suspend, and
++ restored on resume.
++
++- fsl,devdisr-mask: Contain one or two cells, depending on the availability of
++ DEVDISR2 register. For compatible devices, the mask will be ORed into DEVDISR
++ or DEVDISR2 when the clock should be permenently disabled.
+
+ Example:
+
+- power@b00 {
+- compatible = "fsl,mpc8313-pmc", "fsl,mpc8349-pmc";
+- reg = <0xb00 0x100 0xa00 0x100>;
+- interrupts = <80 8>;
++ power@e0070 {
++ compatible = "fsl,mpc8536-pmc", "fsl,mpc8548-pmc";
++ reg = <0xe0070 0x20>;
++
++ etsec1_clk: soc-clk@24 {
++ fsl,pmcdr-mask = <0x00000080>;
++ };
++ etsec2_clk: soc-clk@25 {
++ fsl,pmcdr-mask = <0x00000040>;
++ };
++ etsec3_clk: soc-clk@26 {
++ fsl,pmcdr-mask = <0x00000020>;
++ };
+ };
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -334,7 +334,7 @@ endif
+}
+
+subsys_initcall(layerscape_rcpm_init);
+--- /dev/null
++++ b/drivers/soc/fsl/sleep_fsm.c
+@@ -0,0 +1,279 @@
++/*
++ * deep sleep FSM (finite-state machine) configuration
++ *
++ * Copyright 2018 NXP
++ *
++ * Author: Hongbo Zhang <hongbo.zhang@freescale.com>
++ * Chenhui Zhao <chenhui.zhao@freescale.com>
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/kernel.h>
++#include <linux/io.h>
++#include <linux/types.h>
++
++#include "sleep_fsm.h"
++/*
++ * These values are from chip's reference manual. For example,
++ * the values for T1040 can be found in "8.4.3.8 Programming
++ * supporting deep sleep mode" of Chapter 8 "Run Control and
++ * Power Management (RCPM)".
++ * The default value can be applied to T104x, LS1021.
++ */
++struct fsm_reg_vals epu_default_val[] = {
++ /* EPGCR (Event Processor Global Control Register) */
++ {EPGCR, 0},
++ /* EPECR (Event Processor Event Control Registers) */
++ {EPECR0 + EPECR_STRIDE * 0, 0},
++ {EPECR0 + EPECR_STRIDE * 1, 0},
++ {EPECR0 + EPECR_STRIDE * 2, 0xF0004004},
++ {EPECR0 + EPECR_STRIDE * 3, 0x80000084},
++ {EPECR0 + EPECR_STRIDE * 4, 0x20000084},
++ {EPECR0 + EPECR_STRIDE * 5, 0x08000004},
++ {EPECR0 + EPECR_STRIDE * 6, 0x80000084},
++ {EPECR0 + EPECR_STRIDE * 7, 0x80000084},
++ {EPECR0 + EPECR_STRIDE * 8, 0x60000084},
++ {EPECR0 + EPECR_STRIDE * 9, 0x08000084},
++ {EPECR0 + EPECR_STRIDE * 10, 0x42000084},
++ {EPECR0 + EPECR_STRIDE * 11, 0x90000084},
++ {EPECR0 + EPECR_STRIDE * 12, 0x80000084},
++ {EPECR0 + EPECR_STRIDE * 13, 0x08000084},
++ {EPECR0 + EPECR_STRIDE * 14, 0x02000084},
++ {EPECR0 + EPECR_STRIDE * 15, 0x00000004},
++ /*
++ * EPEVTCR (Event Processor EVT Pin Control Registers)
++ * SCU8 triger EVT2, and SCU11 triger EVT9
++ */
++ {EPEVTCR0 + EPEVTCR_STRIDE * 0, 0},
++ {EPEVTCR0 + EPEVTCR_STRIDE * 1, 0},
++ {EPEVTCR0 + EPEVTCR_STRIDE * 2, 0x80000001},
++ {EPEVTCR0 + EPEVTCR_STRIDE * 3, 0},
++ {EPEVTCR0 + EPEVTCR_STRIDE * 4, 0},
++ {EPEVTCR0 + EPEVTCR_STRIDE * 5, 0},
++ {EPEVTCR0 + EPEVTCR_STRIDE * 6, 0},
++ {EPEVTCR0 + EPEVTCR_STRIDE * 7, 0},
++ {EPEVTCR0 + EPEVTCR_STRIDE * 8, 0},
++ {EPEVTCR0 + EPEVTCR_STRIDE * 9, 0xB0000001},
++ /* EPCMPR (Event Processor Counter Compare Registers) */
++ {EPCMPR0 + EPCMPR_STRIDE * 0, 0},
++ {EPCMPR0 + EPCMPR_STRIDE * 1, 0},
++ {EPCMPR0 + EPCMPR_STRIDE * 2, 0x000000FF},
++ {EPCMPR0 + EPCMPR_STRIDE * 3, 0},
++ {EPCMPR0 + EPCMPR_STRIDE * 4, 0x000000FF},
++ {EPCMPR0 + EPCMPR_STRIDE * 5, 0x00000020},
++ {EPCMPR0 + EPCMPR_STRIDE * 6, 0},
++ {EPCMPR0 + EPCMPR_STRIDE * 7, 0},
++ {EPCMPR0 + EPCMPR_STRIDE * 8, 0x000000FF},
++ {EPCMPR0 + EPCMPR_STRIDE * 9, 0x000000FF},
++ {EPCMPR0 + EPCMPR_STRIDE * 10, 0x000000FF},
++ {EPCMPR0 + EPCMPR_STRIDE * 11, 0x000000FF},
++ {EPCMPR0 + EPCMPR_STRIDE * 12, 0x000000FF},
++ {EPCMPR0 + EPCMPR_STRIDE * 13, 0},
++ {EPCMPR0 + EPCMPR_STRIDE * 14, 0x000000FF},
++ {EPCMPR0 + EPCMPR_STRIDE * 15, 0x000000FF},
++ /* EPCCR (Event Processor Counter Control Registers) */
++ {EPCCR0 + EPCCR_STRIDE * 0, 0},
++ {EPCCR0 + EPCCR_STRIDE * 1, 0},
++ {EPCCR0 + EPCCR_STRIDE * 2, 0x92840000},
++ {EPCCR0 + EPCCR_STRIDE * 3, 0},
++ {EPCCR0 + EPCCR_STRIDE * 4, 0x92840000},
++ {EPCCR0 + EPCCR_STRIDE * 5, 0x92840000},
++ {EPCCR0 + EPCCR_STRIDE * 6, 0},
++ {EPCCR0 + EPCCR_STRIDE * 7, 0},
++ {EPCCR0 + EPCCR_STRIDE * 8, 0x92840000},
++ {EPCCR0 + EPCCR_STRIDE * 9, 0x92840000},
++ {EPCCR0 + EPCCR_STRIDE * 10, 0x92840000},
++ {EPCCR0 + EPCCR_STRIDE * 11, 0x92840000},
++ {EPCCR0 + EPCCR_STRIDE * 12, 0x92840000},
++ {EPCCR0 + EPCCR_STRIDE * 13, 0},
++ {EPCCR0 + EPCCR_STRIDE * 14, 0x92840000},
++ {EPCCR0 + EPCCR_STRIDE * 15, 0x92840000},
++ /* EPSMCR (Event Processor SCU Mux Control Registers) */
++ {EPSMCR0 + EPSMCR_STRIDE * 0, 0},
++ {EPSMCR0 + EPSMCR_STRIDE * 1, 0},
++ {EPSMCR0 + EPSMCR_STRIDE * 2, 0x6C700000},
++ {EPSMCR0 + EPSMCR_STRIDE * 3, 0x2F000000},
++ {EPSMCR0 + EPSMCR_STRIDE * 4, 0x002F0000},
++ {EPSMCR0 + EPSMCR_STRIDE * 5, 0x00002E00},
++ {EPSMCR0 + EPSMCR_STRIDE * 6, 0x7C000000},
++ {EPSMCR0 + EPSMCR_STRIDE * 7, 0x30000000},
++ {EPSMCR0 + EPSMCR_STRIDE * 8, 0x64300000},
++ {EPSMCR0 + EPSMCR_STRIDE * 9, 0x00003000},
++ {EPSMCR0 + EPSMCR_STRIDE * 10, 0x65000030},
++ {EPSMCR0 + EPSMCR_STRIDE * 11, 0x31740000},
++ {EPSMCR0 + EPSMCR_STRIDE * 12, 0x7F000000},
++ {EPSMCR0 + EPSMCR_STRIDE * 13, 0x00003100},
++ {EPSMCR0 + EPSMCR_STRIDE * 14, 0x00000031},
++ {EPSMCR0 + EPSMCR_STRIDE * 15, 0x76000000},
++ /* EPACR (Event Processor Action Control Registers) */
++ {EPACR0 + EPACR_STRIDE * 0, 0},
++ {EPACR0 + EPACR_STRIDE * 1, 0},
++ {EPACR0 + EPACR_STRIDE * 2, 0},
++ {EPACR0 + EPACR_STRIDE * 3, 0x00000080},
++ {EPACR0 + EPACR_STRIDE * 4, 0},
++ {EPACR0 + EPACR_STRIDE * 5, 0x00000040},
++ {EPACR0 + EPACR_STRIDE * 6, 0},
++ {EPACR0 + EPACR_STRIDE * 7, 0},
++ {EPACR0 + EPACR_STRIDE * 8, 0},
++ {EPACR0 + EPACR_STRIDE * 9, 0x0000001C},
++ {EPACR0 + EPACR_STRIDE * 10, 0x00000020},
++ {EPACR0 + EPACR_STRIDE * 11, 0},
++ {EPACR0 + EPACR_STRIDE * 12, 0x00000003},
++ {EPACR0 + EPACR_STRIDE * 13, 0x06000000},
++ {EPACR0 + EPACR_STRIDE * 14, 0x04000000},
++ {EPACR0 + EPACR_STRIDE * 15, 0x02000000},
++ /* EPIMCR (Event Processor Input Mux Control Registers) */
++ {EPIMCR0 + EPIMCR_STRIDE * 0, 0},
++ {EPIMCR0 + EPIMCR_STRIDE * 1, 0},
++ {EPIMCR0 + EPIMCR_STRIDE * 2, 0},
++ {EPIMCR0 + EPIMCR_STRIDE * 3, 0},
++ {EPIMCR0 + EPIMCR_STRIDE * 4, 0x44000000},
++ {EPIMCR0 + EPIMCR_STRIDE * 5, 0x40000000},
++ {EPIMCR0 + EPIMCR_STRIDE * 6, 0},
++ {EPIMCR0 + EPIMCR_STRIDE * 7, 0},
++ {EPIMCR0 + EPIMCR_STRIDE * 8, 0},
++ {EPIMCR0 + EPIMCR_STRIDE * 9, 0},
++ {EPIMCR0 + EPIMCR_STRIDE * 10, 0},
++ {EPIMCR0 + EPIMCR_STRIDE * 11, 0},
++ {EPIMCR0 + EPIMCR_STRIDE * 12, 0x44000000},
++ {EPIMCR0 + EPIMCR_STRIDE * 13, 0},
++ {EPIMCR0 + EPIMCR_STRIDE * 14, 0},
++ {EPIMCR0 + EPIMCR_STRIDE * 15, 0},
++ {EPIMCR0 + EPIMCR_STRIDE * 16, 0x6A000000},
++ {EPIMCR0 + EPIMCR_STRIDE * 17, 0},
++ {EPIMCR0 + EPIMCR_STRIDE * 18, 0},
++ {EPIMCR0 + EPIMCR_STRIDE * 19, 0},
++ {EPIMCR0 + EPIMCR_STRIDE * 20, 0x48000000},
++ {EPIMCR0 + EPIMCR_STRIDE * 21, 0},
++ {EPIMCR0 + EPIMCR_STRIDE * 22, 0x6C000000},
++ {EPIMCR0 + EPIMCR_STRIDE * 23, 0},
++ {EPIMCR0 + EPIMCR_STRIDE * 24, 0},
++ {EPIMCR0 + EPIMCR_STRIDE * 25, 0},
++ {EPIMCR0 + EPIMCR_STRIDE * 26, 0},
++ {EPIMCR0 + EPIMCR_STRIDE * 27, 0},
++ {EPIMCR0 + EPIMCR_STRIDE * 28, 0x76000000},
++ {EPIMCR0 + EPIMCR_STRIDE * 29, 0},
++ {EPIMCR0 + EPIMCR_STRIDE * 30, 0},
++ {EPIMCR0 + EPIMCR_STRIDE * 31, 0x76000000},
++ /* EPXTRIGCR (Event Processor Crosstrigger Control Register) */
++ {EPXTRIGCR, 0x0000FFDF},
++ /* end */
++ {FSM_END_FLAG, 0},
++};
++
++struct fsm_reg_vals npc_default_val[] = {
++ /* NPC triggered Memory-Mapped Access Registers */
++ {NCR, 0x80000000},
++ {MCCR1, 0},
++ {MCSR1, 0},
++ {MMAR1LO, 0},
++ {MMAR1HI, 0},
++ {MMDR1, 0},
++ {MCSR2, 0},
++ {MMAR2LO, 0},
++ {MMAR2HI, 0},
++ {MMDR2, 0},
++ {MCSR3, 0x80000000},
++ {MMAR3LO, 0x000E2130},
++ {MMAR3HI, 0x00030000},
++ {MMDR3, 0x00020000},
++ /* end */
++ {FSM_END_FLAG, 0},
++};
++
++/**
++ * fsl_fsm_setup - Configure EPU's FSM registers
++ * @base: the base address of registers
++ * @val: Pointer to address-value pairs for FSM registers
++ */
++void fsl_fsm_setup(void __iomem *base, struct fsm_reg_vals *val)
++{
++ struct fsm_reg_vals *data = val;
++
++ WARN_ON(!base || !data);
++ while (data->offset != FSM_END_FLAG) {
++ iowrite32be(data->value, base + data->offset);
++ data++;
++ }
++}
++
++void fsl_epu_setup_default(void __iomem *epu_base)
++{
++ fsl_fsm_setup(epu_base, epu_default_val);
++}
++
++void fsl_npc_setup_default(void __iomem *npc_base)
++{
++ fsl_fsm_setup(npc_base, npc_default_val);
++}
++
++void fsl_epu_clean_default(void __iomem *epu_base)
++{
++ u32 offset;
++
++ /* follow the exact sequence to clear the registers */
++ /* Clear EPACRn */
++ for (offset = EPACR0; offset <= EPACR15; offset += EPACR_STRIDE)
++ iowrite32be(0, epu_base + offset);
++
++ /* Clear EPEVTCRn */
++ for (offset = EPEVTCR0; offset <= EPEVTCR9; offset += EPEVTCR_STRIDE)
++ iowrite32be(0, epu_base + offset);
++
++ /* Clear EPGCR */
++ iowrite32be(0, epu_base + EPGCR);
++
++ /* Clear EPSMCRn */
++ for (offset = EPSMCR0; offset <= EPSMCR15; offset += EPSMCR_STRIDE)
++ iowrite32be(0, epu_base + offset);
++
++ /* Clear EPCCRn */
++ for (offset = EPCCR0; offset <= EPCCR31; offset += EPCCR_STRIDE)
++ iowrite32be(0, epu_base + offset);
++
++ /* Clear EPCMPRn */
++ for (offset = EPCMPR0; offset <= EPCMPR31; offset += EPCMPR_STRIDE)
++ iowrite32be(0, epu_base + offset);
++
++ /* Clear EPCTRn */
++ for (offset = EPCTR0; offset <= EPCTR31; offset += EPCTR_STRIDE)
++ iowrite32be(0, epu_base + offset);
++
++ /* Clear EPIMCRn */
++ for (offset = EPIMCR0; offset <= EPIMCR31; offset += EPIMCR_STRIDE)
++ iowrite32be(0, epu_base + offset);
++
++ /* Clear EPXTRIGCRn */
++ iowrite32be(0, epu_base + EPXTRIGCR);
++
++ /* Clear EPECRn */
++ for (offset = EPECR0; offset <= EPECR15; offset += EPECR_STRIDE)
++ iowrite32be(0, epu_base + offset);
++}
+--- /dev/null
++++ b/drivers/soc/fsl/sleep_fsm.h
+@@ -0,0 +1,130 @@
++/*
++ * deep sleep FSM (finite-state machine) configuration
++ *
++ * Copyright 2018 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef _FSL_SLEEP_FSM_H
++#define _FSL_SLEEP_FSM_H
++
++#define FSL_STRIDE_4B 4
++#define FSL_STRIDE_8B 8
++
++/* End flag */
++#define FSM_END_FLAG 0xFFFFFFFFUL
++
++/* Block offsets */
++#define RCPM_BLOCK_OFFSET 0x00022000
++#define EPU_BLOCK_OFFSET 0x00000000
++#define NPC_BLOCK_OFFSET 0x00001000
++
++/* EPGCR (Event Processor Global Control Register) */
++#define EPGCR 0x000
++
++/* EPEVTCR0-9 (Event Processor EVT Pin Control Registers) */
++#define EPEVTCR0 0x050
++#define EPEVTCR9 0x074
++#define EPEVTCR_STRIDE FSL_STRIDE_4B
++
++/* EPXTRIGCR (Event Processor Crosstrigger Control Register) */
++#define EPXTRIGCR 0x090
++
++/* EPIMCR0-31 (Event Processor Input Mux Control Registers) */
++#define EPIMCR0 0x100
++#define EPIMCR31 0x17C
++#define EPIMCR_STRIDE FSL_STRIDE_4B
++
++/* EPSMCR0-15 (Event Processor SCU Mux Control Registers) */
++#define EPSMCR0 0x200
++#define EPSMCR15 0x278
++#define EPSMCR_STRIDE FSL_STRIDE_8B
++
++/* EPECR0-15 (Event Processor Event Control Registers) */
++#define EPECR0 0x300
++#define EPECR15 0x33C
++#define EPECR_STRIDE FSL_STRIDE_4B
++
++/* EPACR0-15 (Event Processor Action Control Registers) */
++#define EPACR0 0x400
++#define EPACR15 0x43C
++#define EPACR_STRIDE FSL_STRIDE_4B
++
++/* EPCCRi0-15 (Event Processor Counter Control Registers) */
++#define EPCCR0 0x800
++#define EPCCR15 0x83C
++#define EPCCR31 0x87C
++#define EPCCR_STRIDE FSL_STRIDE_4B
++
++/* EPCMPR0-15 (Event Processor Counter Compare Registers) */
++#define EPCMPR0 0x900
++#define EPCMPR15 0x93C
++#define EPCMPR31 0x97C
++#define EPCMPR_STRIDE FSL_STRIDE_4B
++
++/* EPCTR0-31 (Event Processor Counter Register) */
++#define EPCTR0 0xA00
++#define EPCTR31 0xA7C
++#define EPCTR_STRIDE FSL_STRIDE_4B
++
++/* NPC triggered Memory-Mapped Access Registers */
++#define NCR 0x000
++#define MCCR1 0x0CC
++#define MCSR1 0x0D0
++#define MMAR1LO 0x0D4
++#define MMAR1HI 0x0D8
++#define MMDR1 0x0DC
++#define MCSR2 0x0E0
++#define MMAR2LO 0x0E4
++#define MMAR2HI 0x0E8
++#define MMDR2 0x0EC
++#define MCSR3 0x0F0
++#define MMAR3LO 0x0F4
++#define MMAR3HI 0x0F8
++#define MMDR3 0x0FC
++
++/* RCPM Core State Action Control Register 0 */
++#define CSTTACR0 0xB00
++
++/* RCPM Core Group 1 Configuration Register 0 */
++#define CG1CR0 0x31C
++
++struct fsm_reg_vals {
++ u32 offset;
++ u32 value;
++};
++
++void fsl_fsm_setup(void __iomem *base, struct fsm_reg_vals *val);
++void fsl_epu_setup_default(void __iomem *epu_base);
++void fsl_npc_setup_default(void __iomem *npc_base);
++void fsl_epu_clean_default(void __iomem *epu_base);
++
++#endif /* _FSL_SLEEP_FSM_H */
-From a3310d64d7cb1ba0f9279e77d21f13a75fa66ab5 Mon Sep 17 00:00:00 2001
+From 2a0aa9bd187f6f5693982a8f79665585af772237 Mon Sep 17 00:00:00 2001
From: Yangbo Lu <yangbo.lu@nxp.com>
-Date: Wed, 17 Jan 2018 15:29:23 +0800
-Subject: [PATCH 16/30] crypto: support layerscape
+Date: Thu, 5 Jul 2018 17:29:41 +0800
+Subject: [PATCH 16/32] crypto: support layerscape
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Laura Abbott <labbott@redhat.com>
Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
-Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+Singed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
---
- crypto/Kconfig | 30 +
- crypto/Makefile | 4 +
- crypto/acompress.c | 169 +
- crypto/algboss.c | 12 +-
- crypto/crypto_user.c | 19 +
- crypto/scompress.c | 356 ++
- crypto/tcrypt.c | 17 +-
- crypto/testmgr.c | 1708 +++---
- crypto/testmgr.h | 1125 ++--
- crypto/tls.c | 607 +++
- drivers/crypto/caam/Kconfig | 77 +-
- drivers/crypto/caam/Makefile | 16 +-
- drivers/crypto/caam/caamalg.c | 2171 ++------
- drivers/crypto/caam/caamalg_desc.c | 1961 +++++++
- drivers/crypto/caam/caamalg_desc.h | 127 +
- drivers/crypto/caam/caamalg_qi.c | 2929 ++++++++++
- drivers/crypto/caam/caamalg_qi2.c | 5920 +++++++++++++++++++++
- drivers/crypto/caam/caamalg_qi2.h | 281 +
- drivers/crypto/caam/caamhash.c | 550 +-
- drivers/crypto/caam/caamhash_desc.c | 108 +
- drivers/crypto/caam/caamhash_desc.h | 49 +
- drivers/crypto/caam/caampkc.c | 471 +-
- drivers/crypto/caam/caampkc.h | 58 +
- drivers/crypto/caam/caamrng.c | 16 +-
- drivers/crypto/caam/compat.h | 1 +
- drivers/crypto/caam/ctrl.c | 358 +-
- drivers/crypto/caam/ctrl.h | 2 +
- drivers/crypto/caam/desc.h | 84 +-
- drivers/crypto/caam/desc_constr.h | 180 +-
- drivers/crypto/caam/dpseci.c | 859 +++
- drivers/crypto/caam/dpseci.h | 395 ++
- drivers/crypto/caam/dpseci_cmd.h | 261 +
- drivers/crypto/caam/error.c | 127 +-
- drivers/crypto/caam/error.h | 10 +-
- drivers/crypto/caam/intern.h | 31 +-
- drivers/crypto/caam/jr.c | 72 +-
- drivers/crypto/caam/jr.h | 2 +
- drivers/crypto/caam/key_gen.c | 32 +-
- drivers/crypto/caam/key_gen.h | 36 +-
- drivers/crypto/caam/pdb.h | 62 +
- drivers/crypto/caam/pkc_desc.c | 36 +
- drivers/crypto/caam/qi.c | 797 +++
- drivers/crypto/caam/qi.h | 204 +
- drivers/crypto/caam/regs.h | 63 +-
- drivers/crypto/caam/sg_sw_qm.h | 126 +
- drivers/crypto/caam/sg_sw_qm2.h | 81 +
- drivers/crypto/caam/sg_sw_sec4.h | 60 +-
- drivers/net/wireless/rsi/rsi_91x_usb.c | 2 +-
- drivers/staging/wilc1000/linux_wlan.c | 2 +-
- drivers/staging/wilc1000/wilc_wfi_cfgoperations.c | 2 +-
- include/crypto/acompress.h | 269 +
- include/crypto/internal/acompress.h | 81 +
- include/crypto/internal/scompress.h | 136 +
- include/linux/crypto.h | 3 +
- include/uapi/linux/cryptouser.h | 5 +
- scripts/spelling.txt | 3 +
- sound/soc/amd/acp-pcm-dma.c | 2 +-
- 57 files changed, 19177 insertions(+), 3988 deletions(-)
+ crypto/Kconfig | 30 +
+ crypto/Makefile | 4 +
+ crypto/acompress.c | 169 +
+ crypto/algboss.c | 12 +-
+ crypto/crypto_user.c | 19 +
+ crypto/scompress.c | 356 +
+ crypto/tcrypt.c | 17 +-
+ crypto/testmgr.c | 1708 ++---
+ crypto/testmgr.h | 1125 ++--
+ crypto/tls.c | 607 ++
+ drivers/crypto/caam/Kconfig | 77 +-
+ drivers/crypto/caam/Makefile | 16 +-
+ drivers/crypto/caam/caamalg.c | 2185 ++----
+ drivers/crypto/caam/caamalg_desc.c | 1961 ++++++
+ drivers/crypto/caam/caamalg_desc.h | 127 +
+ drivers/crypto/caam/caamalg_qi.c | 3321 +++++++++
+ drivers/crypto/caam/caamalg_qi2.c | 5938 +++++++++++++++++
+ drivers/crypto/caam/caamalg_qi2.h | 283 +
+ drivers/crypto/caam/caamhash.c | 555 +-
+ drivers/crypto/caam/caamhash_desc.c | 108 +
+ drivers/crypto/caam/caamhash_desc.h | 49 +
+ drivers/crypto/caam/caampkc.c | 471 +-
+ drivers/crypto/caam/caampkc.h | 58 +
+ drivers/crypto/caam/caamrng.c | 16 +-
+ drivers/crypto/caam/compat.h | 1 +
+ drivers/crypto/caam/ctrl.c | 358 +-
+ drivers/crypto/caam/ctrl.h | 2 +
+ drivers/crypto/caam/desc.h | 84 +-
+ drivers/crypto/caam/desc_constr.h | 180 +-
+ drivers/crypto/caam/dpseci.c | 858 +++
+ drivers/crypto/caam/dpseci.h | 395 ++
+ drivers/crypto/caam/dpseci_cmd.h | 261 +
+ drivers/crypto/caam/error.c | 127 +-
+ drivers/crypto/caam/error.h | 10 +-
+ drivers/crypto/caam/intern.h | 31 +-
+ drivers/crypto/caam/jr.c | 72 +-
+ drivers/crypto/caam/jr.h | 2 +
+ drivers/crypto/caam/key_gen.c | 32 +-
+ drivers/crypto/caam/key_gen.h | 36 +-
+ drivers/crypto/caam/pdb.h | 62 +
+ drivers/crypto/caam/pkc_desc.c | 36 +
+ drivers/crypto/caam/qi.c | 804 +++
+ drivers/crypto/caam/qi.h | 204 +
+ drivers/crypto/caam/regs.h | 63 +-
+ drivers/crypto/caam/sg_sw_qm.h | 126 +
+ drivers/crypto/caam/sg_sw_qm2.h | 81 +
+ drivers/crypto/caam/sg_sw_sec4.h | 60 +-
+ drivers/crypto/talitos.c | 8 +
+ drivers/net/wireless/rsi/rsi_91x_usb.c | 2 +-
+ drivers/staging/wilc1000/linux_wlan.c | 2 +-
+ .../staging/wilc1000/wilc_wfi_cfgoperations.c | 2 +-
+ include/crypto/acompress.h | 269 +
+ include/crypto/internal/acompress.h | 81 +
+ include/crypto/internal/scompress.h | 136 +
+ include/linux/crypto.h | 3 +
+ include/uapi/linux/cryptouser.h | 5 +
+ scripts/spelling.txt | 3 +
+ sound/soc/amd/acp-pcm-dma.c | 2 +-
+ 58 files changed, 19620 insertions(+), 3990 deletions(-)
create mode 100644 crypto/acompress.c
create mode 100644 crypto/scompress.c
create mode 100644 crypto/tls.c
bool rfc3686;
bool geniv;
};
-@@ -163,302 +96,70 @@ struct caam_aead_alg {
+@@ -163,302 +96,71 @@ struct caam_aead_alg {
bool registered;
};
- unsigned int enckeylen;
- unsigned int split_key_len;
- unsigned int split_key_pad_len;
++ enum dma_data_direction dir;
+ struct device *jrdev;
+ struct alginfo adata;
+ struct alginfo cdata;
+ cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
+ ctrlpriv->era);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
/*
* Job Descriptor and Shared Descriptors
+ cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
+ ctrlpriv->era);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
return 0;
}
-@@ -470,11 +171,12 @@ static int aead_set_sh_desc(struct crypt
+@@ -470,11 +172,12 @@ static int aead_set_sh_desc(struct crypt
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
-@@ -482,7 +184,7 @@ static int aead_set_sh_desc(struct crypt
+@@ -482,7 +185,7 @@ static int aead_set_sh_desc(struct crypt
return 0;
/* NULL encryption / decryption */
return aead_null_set_sh_desc(aead);
/*
-@@ -497,8 +199,14 @@ static int aead_set_sh_desc(struct crypt
+@@ -497,8 +200,14 @@ static int aead_set_sh_desc(struct crypt
* RFC3686 specific:
* CONTEXT1[255:128] = {NONCE, IV, COUNTER}
*/
if (alg->caam.geniv)
goto skip_enc;
-@@ -507,146 +215,64 @@ static int aead_set_sh_desc(struct crypt
+@@ -507,146 +216,64 @@ static int aead_set_sh_desc(struct crypt
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
+ ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
+ false, ctrlpriv->era);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
skip_enc:
/*
+ ctx->authsize, alg->caam.geniv, is_rfc3686,
+ nonce, ctx1_iv_off, false, ctrlpriv->era);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
if (!alg->caam.geniv)
goto skip_givenc;
-@@ -655,107 +281,32 @@ skip_enc:
+@@ -655,107 +282,32 @@ skip_enc:
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
+ ctx->authsize, is_rfc3686, nonce,
+ ctx1_iv_off, false, ctrlpriv->era);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
skip_givenc:
return 0;
-@@ -776,12 +327,12 @@ static int gcm_set_sh_desc(struct crypto
+@@ -776,12 +328,12 @@ static int gcm_set_sh_desc(struct crypto
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
return 0;
/*
-@@ -789,175 +340,35 @@ static int gcm_set_sh_desc(struct crypto
+@@ -789,175 +341,35 @@ static int gcm_set_sh_desc(struct crypto
* Job Descriptor and Shared Descriptor
* must fit into the 64-word Descriptor h/w Buffer
*/
-#endif
+ cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
/*
* Job Descriptor and Shared Descriptors
-#endif
+ cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
return 0;
}
-@@ -976,11 +387,12 @@ static int rfc4106_set_sh_desc(struct cr
+@@ -976,11 +388,12 @@ static int rfc4106_set_sh_desc(struct cr
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
return 0;
/*
-@@ -988,148 +400,37 @@ static int rfc4106_set_sh_desc(struct cr
+@@ -988,148 +401,37 @@ static int rfc4106_set_sh_desc(struct cr
* Job Descriptor and Shared Descriptor
* must fit into the 64-word Descriptor h/w Buffer
*/
+ cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ false);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
/*
* Job Descriptor and Shared Descriptors
+ cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ false);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
return 0;
}
-@@ -1149,12 +450,12 @@ static int rfc4543_set_sh_desc(struct cr
+@@ -1149,12 +451,12 @@ static int rfc4543_set_sh_desc(struct cr
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
return 0;
/*
-@@ -1162,151 +463,37 @@ static int rfc4543_set_sh_desc(struct cr
+@@ -1162,151 +464,37 @@ static int rfc4543_set_sh_desc(struct cr
* Job Descriptor and Shared Descriptor
* must fit into the 64-word Descriptor h/w Buffer
*/
+ cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ false);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
/*
* Job Descriptor and Shared Descriptors
+ cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ false);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
return 0;
}
-@@ -1322,74 +509,67 @@ static int rfc4543_setauthsize(struct cr
+@@ -1322,74 +510,67 @@ static int rfc4543_setauthsize(struct cr
return 0;
}
+ keys.enckeylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma,
+ ctx->adata.keylen_pad +
-+ keys.enckeylen, DMA_TO_DEVICE);
++ keys.enckeylen, ctx->dir);
+ goto skip_split_key;
+ }
+
- }
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
-+ keys.enckeylen, DMA_TO_DEVICE);
++ keys.enckeylen, ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
badkey:
crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
-@@ -1400,7 +580,6 @@ static int gcm_setkey(struct crypto_aead
+@@ -1400,7 +581,6 @@ static int gcm_setkey(struct crypto_aead
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
-@@ -1408,21 +587,10 @@ static int gcm_setkey(struct crypto_aead
+@@ -1408,21 +588,10 @@ static int gcm_setkey(struct crypto_aead
#endif
memcpy(ctx->key, key, keylen);
- return -ENOMEM;
- }
- ctx->enckeylen = keylen;
-+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
++ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
+ ctx->cdata.keylen = keylen;
- ret = gcm_set_sh_desc(aead);
}
static int rfc4106_setkey(struct crypto_aead *aead,
-@@ -1430,7 +598,6 @@ static int rfc4106_setkey(struct crypto_
+@@ -1430,7 +599,6 @@ static int rfc4106_setkey(struct crypto_
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
if (keylen < 4)
return -EINVAL;
-@@ -1446,22 +613,10 @@ static int rfc4106_setkey(struct crypto_
+@@ -1446,22 +614,10 @@ static int rfc4106_setkey(struct crypto_
* The last four bytes of the key material are used as the salt value
* in the nonce. Update the AES key length.
*/
- return ret;
+ ctx->cdata.keylen = keylen - 4;
+ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
-+ DMA_TO_DEVICE);
++ ctx->dir);
+ return rfc4106_set_sh_desc(aead);
}
static int rfc4543_setkey(struct crypto_aead *aead,
-@@ -1469,7 +624,6 @@ static int rfc4543_setkey(struct crypto_
+@@ -1469,7 +625,6 @@ static int rfc4543_setkey(struct crypto_
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
if (keylen < 4)
return -EINVAL;
-@@ -1485,43 +639,28 @@ static int rfc4543_setkey(struct crypto_
+@@ -1485,43 +640,28 @@ static int rfc4543_setkey(struct crypto_
* The last four bytes of the key material are used as the salt value
* in the nonce. Update the AES key length.
*/
- return ret;
+ ctx->cdata.keylen = keylen - 4;
+ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
-+ DMA_TO_DEVICE);
++ ctx->dir);
+ return rfc4543_set_sh_desc(aead);
}
#ifdef DEBUG
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-@@ -1544,215 +683,33 @@ static int ablkcipher_setkey(struct cryp
+@@ -1544,215 +684,33 @@ static int ablkcipher_setkey(struct cryp
keylen -= CTR_RFC3686_NONCE_SIZE;
}
- /* Load iv */
- append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
-+ cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
-+ ctx1_iv_off);
-+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
-
+-
- /* Load counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
--
++ cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
++ ctx1_iv_off);
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
++ desc_bytes(desc), ctx->dir);
+
- ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
- desc_bytes(desc),
- DMA_TO_DEVICE);
+ cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
+ cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
}
static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
-@@ -1760,8 +717,7 @@ static int xts_ablkcipher_setkey(struct
+@@ -1760,8 +718,7 @@ static int xts_ablkcipher_setkey(struct
{
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
struct device *jrdev = ctx->jrdev;
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
crypto_ablkcipher_set_flags(ablkcipher,
-@@ -1771,126 +727,38 @@ static int xts_ablkcipher_setkey(struct
+@@ -1771,126 +728,38 @@ static int xts_ablkcipher_setkey(struct
}
memcpy(ctx->key, key, keylen);
-#endif
+ cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
/* xts_ablkcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
-#endif
+ cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
return 0;
}
int sec4_sg_bytes;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
-@@ -1899,12 +767,12 @@ struct aead_edesc {
+@@ -1899,12 +768,12 @@ struct aead_edesc {
/*
* ablkcipher_edesc - s/w-extended ablkcipher descriptor
* @hw_desc: the h/w job descriptor followed by any referenced link tables
*/
struct ablkcipher_edesc {
-@@ -1924,10 +792,11 @@ static void caam_unmap(struct device *de
+@@ -1924,10 +793,11 @@ static void caam_unmap(struct device *de
int sec4_sg_bytes)
{
if (dst != src) {
}
if (iv_dma)
-@@ -2021,8 +890,7 @@ static void ablkcipher_encrypt_done(stru
+@@ -2021,8 +891,7 @@ static void ablkcipher_encrypt_done(stru
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
if (err)
caam_jr_strstatus(jrdev, err);
-@@ -2031,10 +899,10 @@ static void ablkcipher_encrypt_done(stru
+@@ -2031,10 +900,10 @@ static void ablkcipher_encrypt_done(stru
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
edesc->src_nents > 1 ? 100 : ivsize, 1);
ablkcipher_unmap(jrdev, edesc, req);
-@@ -2062,8 +930,7 @@ static void ablkcipher_decrypt_done(stru
+@@ -2062,8 +931,7 @@ static void ablkcipher_decrypt_done(stru
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
if (err)
caam_jr_strstatus(jrdev, err);
-@@ -2071,10 +938,10 @@ static void ablkcipher_decrypt_done(stru
+@@ -2071,10 +939,10 @@ static void ablkcipher_decrypt_done(stru
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1);
ablkcipher_unmap(jrdev, edesc, req);
-@@ -2114,7 +981,7 @@ static void init_aead_job(struct aead_re
+@@ -2114,7 +982,7 @@ static void init_aead_job(struct aead_re
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
if (all_contig) {
in_options = 0;
} else {
src_dma = edesc->sec4_sg_dma;
-@@ -2129,7 +996,7 @@ static void init_aead_job(struct aead_re
+@@ -2129,7 +997,7 @@ static void init_aead_job(struct aead_re
out_options = in_options;
if (unlikely(req->src != req->dst)) {
dst_dma = sg_dma_address(req->dst);
} else {
dst_dma = edesc->sec4_sg_dma +
-@@ -2147,9 +1014,6 @@ static void init_aead_job(struct aead_re
+@@ -2147,9 +1015,6 @@ static void init_aead_job(struct aead_re
append_seq_out_ptr(desc, dst_dma,
req->assoclen + req->cryptlen - authsize,
out_options);
}
static void init_gcm_job(struct aead_request *req,
-@@ -2164,6 +1028,7 @@ static void init_gcm_job(struct aead_req
+@@ -2164,6 +1029,7 @@ static void init_gcm_job(struct aead_req
unsigned int last;
init_aead_job(req, edesc, all_contig, encrypt);
/* BUG This should not be specific to generic GCM. */
last = 0;
-@@ -2175,7 +1040,7 @@ static void init_gcm_job(struct aead_req
+@@ -2175,7 +1041,7 @@ static void init_gcm_job(struct aead_req
FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
/* Append Salt */
if (!generic_gcm)
/* Append IV */
append_data(desc, req->iv, ivsize);
/* End of blank commands */
-@@ -2190,7 +1055,8 @@ static void init_authenc_job(struct aead
+@@ -2190,7 +1056,8 @@ static void init_authenc_job(struct aead
struct caam_aead_alg, aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
u32 *desc = edesc->hw_desc;
-@@ -2213,6 +1079,15 @@ static void init_authenc_job(struct aead
+@@ -2213,6 +1080,15 @@ static void init_authenc_job(struct aead
init_aead_job(req, edesc, all_contig, encrypt);
if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
append_load_as_imm(desc, req->iv, ivsize,
LDST_CLASS_1_CCB |
-@@ -2236,16 +1111,15 @@ static void init_ablkcipher_job(u32 *sh_
+@@ -2236,16 +1112,15 @@ static void init_ablkcipher_job(u32 *sh_
int len, sec4_sg_index = 0;
#ifdef DEBUG
len = desc_len(sh_desc);
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
-@@ -2261,7 +1135,7 @@ static void init_ablkcipher_job(u32 *sh_
+@@ -2261,7 +1136,7 @@ static void init_ablkcipher_job(u32 *sh_
append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
if (likely(req->src == req->dst)) {
dst_dma = sg_dma_address(req->src);
} else {
dst_dma = edesc->sec4_sg_dma +
-@@ -2269,7 +1143,7 @@ static void init_ablkcipher_job(u32 *sh_
+@@ -2269,7 +1144,7 @@ static void init_ablkcipher_job(u32 *sh_
out_options = LDST_SGF;
}
} else {
dst_dma = sg_dma_address(req->dst);
} else {
dst_dma = edesc->sec4_sg_dma +
-@@ -2296,20 +1170,18 @@ static void init_ablkcipher_giv_job(u32
+@@ -2296,20 +1171,18 @@ static void init_ablkcipher_giv_job(u32
int len, sec4_sg_index = 0;
#ifdef DEBUG
src_dma = sg_dma_address(req->src);
in_options = 0;
} else {
-@@ -2340,87 +1212,100 @@ static struct aead_edesc *aead_edesc_all
+@@ -2340,87 +1213,100 @@ static struct aead_edesc *aead_edesc_all
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
edesc->sec4_sg + sec4_sg_index, 0);
}
-@@ -2573,13 +1458,9 @@ static int aead_decrypt(struct aead_requ
+@@ -2573,13 +1459,9 @@ static int aead_decrypt(struct aead_requ
u32 *desc;
int ret = 0;
/* allocate extended descriptor */
edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
-@@ -2619,51 +1500,80 @@ static struct ablkcipher_edesc *ablkciph
+@@ -2619,51 +1501,80 @@ static struct ablkcipher_edesc *ablkciph
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
return ERR_PTR(-ENOMEM);
}
-@@ -2673,23 +1583,24 @@ static struct ablkcipher_edesc *ablkciph
+@@ -2673,23 +1584,24 @@ static struct ablkcipher_edesc *ablkciph
edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
desc_bytes;
return ERR_PTR(-ENOMEM);
}
-@@ -2701,7 +1612,7 @@ static struct ablkcipher_edesc *ablkciph
+@@ -2701,7 +1613,7 @@ static struct ablkcipher_edesc *ablkciph
sec4_sg_bytes, 1);
#endif
return edesc;
}
-@@ -2792,30 +1703,54 @@ static struct ablkcipher_edesc *ablkciph
+@@ -2792,30 +1704,54 @@ static struct ablkcipher_edesc *ablkciph
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
struct device *jrdev = ctx->jrdev;
}
/*
-@@ -2825,21 +1760,29 @@ static struct ablkcipher_edesc *ablkciph
+@@ -2825,21 +1761,29 @@ static struct ablkcipher_edesc *ablkciph
iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, iv_dma)) {
dev_err(jrdev, "unable to map IV\n");
return ERR_PTR(-ENOMEM);
}
-@@ -2849,24 +1792,24 @@ static struct ablkcipher_edesc *ablkciph
+@@ -2849,24 +1793,24 @@ static struct ablkcipher_edesc *ablkciph
edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
desc_bytes;
return ERR_PTR(-ENOMEM);
}
edesc->iv_dma = iv_dma;
-@@ -2878,7 +1821,7 @@ static struct ablkcipher_edesc *ablkciph
+@@ -2878,7 +1822,7 @@ static struct ablkcipher_edesc *ablkciph
sec4_sg_bytes, 1);
#endif
return edesc;
}
-@@ -2889,7 +1832,7 @@ static int ablkcipher_givencrypt(struct
+@@ -2889,7 +1833,7 @@ static int ablkcipher_givencrypt(struct
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
struct device *jrdev = ctx->jrdev;
u32 *desc;
int ret = 0;
-@@ -2933,7 +1876,6 @@ struct caam_alg_template {
+@@ -2933,7 +1877,6 @@ struct caam_alg_template {
} template_u;
u32 class1_alg_type;
u32 class2_alg_type;
};
static struct caam_alg_template driver_algs[] = {
-@@ -3118,7 +2060,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3118,7 +2061,6 @@ static struct caam_aead_alg driver_aeads
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3140,7 +2081,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3140,7 +2082,6 @@ static struct caam_aead_alg driver_aeads
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3162,7 +2102,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3162,7 +2103,6 @@ static struct caam_aead_alg driver_aeads
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3184,7 +2123,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3184,7 +2124,6 @@ static struct caam_aead_alg driver_aeads
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3206,7 +2144,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3206,7 +2145,6 @@ static struct caam_aead_alg driver_aeads
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3228,7 +2165,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3228,7 +2166,6 @@ static struct caam_aead_alg driver_aeads
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3250,7 +2186,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3250,7 +2187,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3273,7 +2208,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3273,7 +2209,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3296,7 +2230,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3296,7 +2231,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3319,7 +2252,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3319,7 +2253,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3342,7 +2274,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3342,7 +2275,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3365,7 +2296,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3365,7 +2297,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3388,7 +2318,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3388,7 +2319,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3411,7 +2340,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3411,7 +2341,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3434,7 +2362,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3434,7 +2363,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3457,7 +2384,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3457,7 +2385,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3480,7 +2406,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3480,7 +2407,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3503,7 +2428,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3503,7 +2429,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3526,7 +2450,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3526,7 +2451,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
}
},
{
-@@ -3549,7 +2472,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3549,7 +2473,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
}
},
-@@ -3573,7 +2495,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3573,7 +2496,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3597,7 +2518,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3597,7 +2519,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3621,7 +2541,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3621,7 +2542,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3645,7 +2564,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3645,7 +2565,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3669,7 +2587,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3669,7 +2588,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3693,7 +2610,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3693,7 +2611,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3717,7 +2633,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3717,7 +2634,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3741,7 +2656,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3741,7 +2657,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3765,7 +2679,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3765,7 +2680,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3789,7 +2702,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3789,7 +2703,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3812,7 +2724,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3812,7 +2725,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3835,7 +2746,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3835,7 +2747,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3858,7 +2768,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3858,7 +2769,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3881,7 +2790,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3881,7 +2791,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3904,7 +2812,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3904,7 +2813,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3927,7 +2834,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3927,7 +2835,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3950,7 +2856,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3950,7 +2857,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3973,7 +2878,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3973,7 +2879,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3996,7 +2900,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3996,7 +2901,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -4019,7 +2922,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4019,7 +2923,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -4042,7 +2944,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4042,7 +2945,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -4065,7 +2966,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4065,7 +2967,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -4090,7 +2990,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4090,7 +2991,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
},
},
-@@ -4115,7 +3014,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4115,7 +3015,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
.geniv = true,
},
-@@ -4141,7 +3039,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4141,7 +3040,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
},
},
-@@ -4166,7 +3063,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4166,7 +3064,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
.geniv = true,
},
-@@ -4192,7 +3088,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4192,7 +3089,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
},
},
-@@ -4217,7 +3112,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4217,7 +3113,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
.geniv = true,
},
-@@ -4243,7 +3137,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4243,7 +3138,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
},
},
-@@ -4268,7 +3161,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4268,7 +3162,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
.geniv = true,
},
-@@ -4294,7 +3186,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4294,7 +3187,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
},
},
-@@ -4319,7 +3210,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4319,7 +3211,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
.geniv = true,
},
-@@ -4345,7 +3235,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4345,7 +3236,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
},
},
-@@ -4370,7 +3259,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4370,7 +3260,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
.geniv = true,
},
-@@ -4385,16 +3273,34 @@ struct caam_crypto_alg {
+@@ -4383,18 +3272,44 @@ struct caam_crypto_alg {
+ struct caam_alg_entry caam;
+ };
- static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
+-static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
++static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
++ bool uses_dkp)
{
+ dma_addr_t dma_addr;
++ struct caam_drv_private *priv;
+
ctx->jrdev = caam_jr_alloc();
if (IS_ERR(ctx->jrdev)) {
return PTR_ERR(ctx->jrdev);
}
++ priv = dev_get_drvdata(ctx->jrdev->parent);
++ if (priv->era >= 6 && uses_dkp)
++ ctx->dir = DMA_BIDIRECTIONAL;
++ else
++ ctx->dir = DMA_TO_DEVICE;
++
+ dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
+ offsetof(struct caam_ctx,
+ sh_desc_enc_dma),
-+ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
++ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(ctx->jrdev, dma_addr)) {
+ dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
+ caam_jr_free(ctx->jrdev);
return 0;
}
-@@ -4421,25 +3327,9 @@ static int caam_aead_init(struct crypto_
+@@ -4406,7 +3321,7 @@ static int caam_cra_init(struct crypto_t
+ container_of(alg, struct caam_crypto_alg, crypto_alg);
+ struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
+
+- return caam_init_common(ctx, &caam_alg->caam);
++ return caam_init_common(ctx, &caam_alg->caam, false);
+ }
+
+ static int caam_aead_init(struct crypto_aead *tfm)
+@@ -4416,30 +3331,15 @@ static int caam_aead_init(struct crypto_
+ container_of(alg, struct caam_aead_alg, aead);
+ struct caam_ctx *ctx = crypto_aead_ctx(tfm);
+
+- return caam_init_common(ctx, &caam_alg->caam);
++ return caam_init_common(ctx, &caam_alg->caam,
++ alg->setkey == aead_setkey);
+ }
static void caam_exit_common(struct caam_ctx *ctx)
{
-
+ dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
+ offsetof(struct caam_ctx, sh_desc_enc_dma),
-+ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
++ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
caam_jr_free(ctx->jrdev);
}
-@@ -4515,7 +3405,6 @@ static struct caam_crypto_alg *caam_alg_
+@@ -4515,7 +3415,6 @@ static struct caam_crypto_alg *caam_alg_
t_alg->caam.class1_alg_type = template->class1_alg_type;
t_alg->caam.class2_alg_type = template->class2_alg_type;
+ /* VSOL = payloadlen + icvlen + padlen */
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 4);
+
-+#ifdef __LITTLE_ENDIAN
-+ append_moveb(desc, MOVE_WAITCOMP |
-+ MOVE_SRC_MATH0 | MOVE_DEST_MATH0 | 8);
-+#endif
++ if (caam_little_end)
++ append_moveb(desc, MOVE_WAITCOMP |
++ MOVE_SRC_MATH0 | MOVE_DEST_MATH0 | 8);
++
+ /* update Len field */
+ append_math_sub(desc, REG0, REG0, REG2, 8);
+
+ * SEQ OUT PTR command, Output Pointer (2 words) and
+ * Output Length into math registers.
+ */
-+#ifdef __LITTLE_ENDIAN
-+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
-+ MOVE_DEST_MATH0 | (55 * 4 << MOVE_OFFSET_SHIFT) |
-+ 20);
-+#else
-+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
-+ MOVE_DEST_MATH0 | (54 * 4 << MOVE_OFFSET_SHIFT) |
-+ 20);
-+#endif
++ if (caam_little_end)
++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
++ MOVE_DEST_MATH0 |
++ (55 * 4 << MOVE_OFFSET_SHIFT) | 20);
++ else
++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
++ MOVE_DEST_MATH0 |
++ (54 * 4 << MOVE_OFFSET_SHIFT) | 20);
++
+ /* Transform SEQ OUT PTR command in SEQ IN PTR command */
+ append_math_and_imm_u32(desc, REG0, REG0, IMM,
+ ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR));
+ (4 << LDST_OFFSET_SHIFT));
+ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
+ /* Move the updated fields back to the Job Descriptor */
-+#ifdef __LITTLE_ENDIAN
-+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
-+ MOVE_DEST_DESCBUF | (55 * 4 << MOVE_OFFSET_SHIFT) |
-+ 24);
-+#else
-+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
-+ MOVE_DEST_DESCBUF | (54 * 4 << MOVE_OFFSET_SHIFT) |
-+ 24);
-+#endif
++ if (caam_little_end)
++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
++ MOVE_DEST_DESCBUF |
++ (55 * 4 << MOVE_OFFSET_SHIFT) | 24);
++ else
++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
++ MOVE_DEST_DESCBUF |
++ (54 * 4 << MOVE_OFFSET_SHIFT) | 24);
++
+ /*
+ * Read the new SEQ IN PTR command, Input Pointer, Input Length
+ * and then jump back to the next command from the
+ * Move the SEQ OUT PTR command, Output Pointer (1 word) and
+ * Output Length into math registers.
+ */
-+#ifdef __LITTLE_ENDIAN
-+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
-+ MOVE_DEST_MATH0 | (54 * 4 << MOVE_OFFSET_SHIFT) |
-+ 12);
-+#else
-+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
-+ MOVE_DEST_MATH0 | (53 * 4 << MOVE_OFFSET_SHIFT) |
-+ 12);
-+#endif
++ if (caam_little_end)
++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
++ MOVE_DEST_MATH0 |
++ (54 * 4 << MOVE_OFFSET_SHIFT) | 12);
++ else
++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
++ MOVE_DEST_MATH0 |
++ (53 * 4 << MOVE_OFFSET_SHIFT) | 12);
++
+ /* Transform SEQ OUT PTR command in SEQ IN PTR command */
+ append_math_and_imm_u64(desc, REG0, REG0, IMM,
+ ~(((u64)(CMD_SEQ_IN_PTR ^
+ (4 << LDST_OFFSET_SHIFT));
+ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
+ /* Move the updated fields back to the Job Descriptor */
-+#ifdef __LITTLE_ENDIAN
-+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
-+ MOVE_DEST_DESCBUF | (54 * 4 << MOVE_OFFSET_SHIFT) |
-+ 16);
-+#else
-+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
-+ MOVE_DEST_DESCBUF | (53 * 4 << MOVE_OFFSET_SHIFT) |
-+ 16);
-+#endif
++ if (caam_little_end)
++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
++ MOVE_DEST_DESCBUF |
++ (54 * 4 << MOVE_OFFSET_SHIFT) | 16);
++ else
++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
++ MOVE_DEST_DESCBUF |
++ (53 * 4 << MOVE_OFFSET_SHIFT) | 16);
++
+ /*
+ * Read the new SEQ IN PTR command, Input Pointer, Input Length
+ * and then jump back to the next command from the
+#endif /* _CAAMALG_DESC_H_ */
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_qi.c
-@@ -0,0 +1,2929 @@
+@@ -0,0 +1,3321 @@
+/*
+ * Freescale FSL CAAM support for crypto API over QI backend.
+ * Based on caamalg.c
+ u32 sh_desc_givenc[DESC_MAX_USED_LEN];
+ u8 key[CAAM_MAX_KEY_SIZE];
+ dma_addr_t key_dma;
++ enum dma_data_direction dir;
+ struct alginfo adata;
+ struct alginfo cdata;
+ unsigned int authsize;
+ keys.enckeylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma,
+ ctx->adata.keylen_pad +
-+ keys.enckeylen, DMA_TO_DEVICE);
++ keys.enckeylen, ctx->dir);
+ goto skip_split_key;
+ }
+
+ /* postpend encryption key to auth split key */
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
-+ keys.enckeylen, DMA_TO_DEVICE);
++ keys.enckeylen, ctx->dir);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+ keys.enckeylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma,
+ ctx->adata.keylen_pad +
-+ keys.enckeylen, DMA_TO_DEVICE);
++ keys.enckeylen, ctx->dir);
+ goto skip_split_key;
+ }
+
+ /* postpend encryption key to auth split key */
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
-+ keys.enckeylen, DMA_TO_DEVICE);
++ keys.enckeylen, ctx->dir);
+
+#ifdef DEBUG
+ dev_err(jrdev, "split keylen %d split keylen padded %d\n",
+ return -EINVAL;
+}
+
++static int gcm_set_sh_desc(struct crypto_aead *aead)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ unsigned int ivsize = crypto_aead_ivsize(aead);
++ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
++ ctx->cdata.keylen;
++
++ if (!ctx->cdata.keylen || !ctx->authsize)
++ return 0;
++
++ /*
++ * Job Descriptor and Shared Descriptor
++ * must fit into the 64-word Descriptor h/w Buffer
++ */
++ if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
++ ctx->cdata.key_inline = true;
++ ctx->cdata.key_virt = ctx->key;
++ } else {
++ ctx->cdata.key_inline = false;
++ ctx->cdata.key_dma = ctx->key_dma;
++ }
++
++ cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
++ ctx->authsize, true);
++
++ /*
++ * Job Descriptor and Shared Descriptor
++ * must fit into the 64-word Descriptor h/w Buffer
++ */
++ if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
++ ctx->cdata.key_inline = true;
++ ctx->cdata.key_virt = ctx->key;
++ } else {
++ ctx->cdata.key_inline = false;
++ ctx->cdata.key_dma = ctx->key_dma;
++ }
++
++ cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
++ ctx->authsize, true);
++
++ return 0;
++}
++
++static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
++
++ ctx->authsize = authsize;
++ gcm_set_sh_desc(authenc);
++
++ return 0;
++}
++
++static int gcm_setkey(struct crypto_aead *aead,
++ const u8 *key, unsigned int keylen)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ struct device *jrdev = ctx->jrdev;
++ int ret;
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
++#endif
++
++ memcpy(ctx->key, key, keylen);
++ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
++ ctx->cdata.keylen = keylen;
++
++ ret = gcm_set_sh_desc(aead);
++ if (ret)
++ return ret;
++
++ /* Now update the driver contexts with the new shared descriptor */
++ if (ctx->drv_ctx[ENCRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
++ ctx->sh_desc_enc);
++ if (ret) {
++ dev_err(jrdev, "driver enc context update failed\n");
++ return ret;
++ }
++ }
++
++ if (ctx->drv_ctx[DECRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
++ ctx->sh_desc_dec);
++ if (ret) {
++ dev_err(jrdev, "driver dec context update failed\n");
++ return ret;
++ }
++ }
++
++ return 0;
++}
++
++static int rfc4106_set_sh_desc(struct crypto_aead *aead)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ unsigned int ivsize = crypto_aead_ivsize(aead);
++ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
++ ctx->cdata.keylen;
++
++ if (!ctx->cdata.keylen || !ctx->authsize)
++ return 0;
++
++ ctx->cdata.key_virt = ctx->key;
++
++ /*
++ * Job Descriptor and Shared Descriptor
++ * must fit into the 64-word Descriptor h/w Buffer
++ */
++ if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
++ ctx->cdata.key_inline = true;
++ } else {
++ ctx->cdata.key_inline = false;
++ ctx->cdata.key_dma = ctx->key_dma;
++ }
++
++ cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
++ ctx->authsize, true);
++
++ /*
++ * Job Descriptor and Shared Descriptor
++ * must fit into the 64-word Descriptor h/w Buffer
++ */
++ if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
++ ctx->cdata.key_inline = true;
++ } else {
++ ctx->cdata.key_inline = false;
++ ctx->cdata.key_dma = ctx->key_dma;
++ }
++
++ cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
++ ctx->authsize, true);
++
++ return 0;
++}
++
++static int rfc4106_setauthsize(struct crypto_aead *authenc,
++ unsigned int authsize)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
++
++ ctx->authsize = authsize;
++ rfc4106_set_sh_desc(authenc);
++
++ return 0;
++}
++
++static int rfc4106_setkey(struct crypto_aead *aead,
++ const u8 *key, unsigned int keylen)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ struct device *jrdev = ctx->jrdev;
++ int ret;
++
++ if (keylen < 4)
++ return -EINVAL;
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
++#endif
++
++ memcpy(ctx->key, key, keylen);
++ /*
++ * The last four bytes of the key material are used as the salt value
++ * in the nonce. Update the AES key length.
++ */
++ ctx->cdata.keylen = keylen - 4;
++ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
++ ctx->dir);
++
++ ret = rfc4106_set_sh_desc(aead);
++ if (ret)
++ return ret;
++
++ /* Now update the driver contexts with the new shared descriptor */
++ if (ctx->drv_ctx[ENCRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
++ ctx->sh_desc_enc);
++ if (ret) {
++ dev_err(jrdev, "driver enc context update failed\n");
++ return ret;
++ }
++ }
++
++ if (ctx->drv_ctx[DECRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
++ ctx->sh_desc_dec);
++ if (ret) {
++ dev_err(jrdev, "driver dec context update failed\n");
++ return ret;
++ }
++ }
++
++ return 0;
++}
++
++static int rfc4543_set_sh_desc(struct crypto_aead *aead)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ unsigned int ivsize = crypto_aead_ivsize(aead);
++ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
++ ctx->cdata.keylen;
++
++ if (!ctx->cdata.keylen || !ctx->authsize)
++ return 0;
++
++ ctx->cdata.key_virt = ctx->key;
++
++ /*
++ * Job Descriptor and Shared Descriptor
++ * must fit into the 64-word Descriptor h/w Buffer
++ */
++ if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
++ ctx->cdata.key_inline = true;
++ } else {
++ ctx->cdata.key_inline = false;
++ ctx->cdata.key_dma = ctx->key_dma;
++ }
++
++ cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
++ ctx->authsize, true);
++
++ /*
++ * Job Descriptor and Shared Descriptor
++ * must fit into the 64-word Descriptor h/w Buffer
++ */
++ if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
++ ctx->cdata.key_inline = true;
++ } else {
++ ctx->cdata.key_inline = false;
++ ctx->cdata.key_dma = ctx->key_dma;
++ }
++
++ cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
++ ctx->authsize, true);
++
++ return 0;
++}
++
++static int rfc4543_setauthsize(struct crypto_aead *authenc,
++ unsigned int authsize)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
++
++ ctx->authsize = authsize;
++ rfc4543_set_sh_desc(authenc);
++
++ return 0;
++}
++
++static int rfc4543_setkey(struct crypto_aead *aead,
++ const u8 *key, unsigned int keylen)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ struct device *jrdev = ctx->jrdev;
++ int ret;
++
++ if (keylen < 4)
++ return -EINVAL;
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
++#endif
++
++ memcpy(ctx->key, key, keylen);
++ /*
++ * The last four bytes of the key material are used as the salt value
++ * in the nonce. Update the AES key length.
++ */
++ ctx->cdata.keylen = keylen - 4;
++ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
++ ctx->dir);
++
++ ret = rfc4543_set_sh_desc(aead);
++ if (ret)
++ return ret;
++
++ /* Now update the driver contexts with the new shared descriptor */
++ if (ctx->drv_ctx[ENCRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
++ ctx->sh_desc_enc);
++ if (ret) {
++ dev_err(jrdev, "driver enc context update failed\n");
++ return ret;
++ }
++ }
++
++ if (ctx->drv_ctx[DECRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
++ ctx->sh_desc_dec);
++ if (ret) {
++ dev_err(jrdev, "driver dec context update failed\n");
++ return ret;
++ }
++ }
++
++ return 0;
++}
++
+static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+ const u8 *key, unsigned int keylen)
+{
+ qidev = caam_ctx->qidev;
+
+ if (unlikely(status)) {
++ u32 ssrc = status & JRSTA_SSRC_MASK;
++ u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
++
+ caam_jr_strstatus(qidev, status);
-+ ecode = -EIO;
++ /*
++ * verify hw auth check passed else return -EBADMSG
++ */
++ if (ssrc == JRSTA_SSRC_CCB_ERROR &&
++ err_id == JRSTA_CCBERR_ERRID_ICVCHK)
++ ecode = -EBADMSG;
++ else
++ ecode = -EIO;
+ }
+
+ edesc = container_of(drv_req, typeof(*edesc), drv_req);
+ return aead_crypt(req, false);
+}
+
++static int ipsec_gcm_encrypt(struct aead_request *req)
++{
++ if (req->assoclen < 8)
++ return -EINVAL;
++
++ return aead_crypt(req, true);
++}
++
++static int ipsec_gcm_decrypt(struct aead_request *req)
++{
++ if (req->assoclen < 8)
++ return -EINVAL;
++
++ return aead_crypt(req, false);
++}
++
+static void tls_done(struct caam_drv_req *drv_req, u32 status)
+{
+ struct device *qidev;
+};
+
+static struct caam_aead_alg driver_aeads[] = {
++ {
++ .aead = {
++ .base = {
++ .cra_name = "rfc4106(gcm(aes))",
++ .cra_driver_name = "rfc4106-gcm-aes-caam-qi",
++ .cra_blocksize = 1,
++ },
++ .setkey = rfc4106_setkey,
++ .setauthsize = rfc4106_setauthsize,
++ .encrypt = ipsec_gcm_encrypt,
++ .decrypt = ipsec_gcm_decrypt,
++ .ivsize = 8,
++ .maxauthsize = AES_BLOCK_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "rfc4543(gcm(aes))",
++ .cra_driver_name = "rfc4543-gcm-aes-caam-qi",
++ .cra_blocksize = 1,
++ },
++ .setkey = rfc4543_setkey,
++ .setauthsize = rfc4543_setauthsize,
++ .encrypt = ipsec_gcm_encrypt,
++ .decrypt = ipsec_gcm_decrypt,
++ .ivsize = 8,
++ .maxauthsize = AES_BLOCK_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
++ },
++ },
++ /* Galois Counter Mode */
++ {
++ .aead = {
++ .base = {
++ .cra_name = "gcm(aes)",
++ .cra_driver_name = "gcm-aes-caam-qi",
++ .cra_blocksize = 1,
++ },
++ .setkey = gcm_setkey,
++ .setauthsize = gcm_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = 12,
++ .maxauthsize = AES_BLOCK_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
++ }
++ },
+ /* single-pass ipsec_esp descriptor */
+ {
+ .aead = {
+ struct caam_alg_entry caam;
+};
+
-+static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
++static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
++ bool uses_dkp)
+{
+ struct caam_drv_private *priv;
+ /* Digest sizes for MD5, SHA1, SHA-224, SHA-256, SHA-384, SHA-512 */
+ return PTR_ERR(ctx->jrdev);
+ }
+
++ priv = dev_get_drvdata(ctx->jrdev->parent);
++ if (priv->era >= 6 && uses_dkp)
++ ctx->dir = DMA_BIDIRECTIONAL;
++ else
++ ctx->dir = DMA_TO_DEVICE;
++
+ ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
-+ DMA_TO_DEVICE);
++ ctx->dir);
+ if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
+ dev_err(ctx->jrdev, "unable to map key\n");
+ caam_jr_free(ctx->jrdev);
+ ctx->authsize = 0;
+ }
+
-+ priv = dev_get_drvdata(ctx->jrdev->parent);
+ ctx->qidev = priv->qidev;
+
+ spin_lock_init(&ctx->lock);
+ crypto_alg);
+ struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
+
-+ return caam_init_common(ctx, &caam_alg->caam);
++ return caam_init_common(ctx, &caam_alg->caam, false);
+}
+
+static int caam_aead_init(struct crypto_aead *tfm)
+ aead);
+ struct caam_ctx *ctx = crypto_aead_ctx(tfm);
+
-+ return caam_init_common(ctx, &caam_alg->caam);
++ return caam_init_common(ctx, &caam_alg->caam,
++ (alg->setkey == aead_setkey) ||
++ (alg->setkey == tls_setkey));
+}
+
+static void caam_exit_common(struct caam_ctx *ctx)
+ caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
+ caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
+
-+ dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
-+ DMA_TO_DEVICE);
++ dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
+
+ caam_jr_free(ctx->jrdev);
+}
+MODULE_AUTHOR("Freescale Semiconductor");
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_qi2.c
-@@ -0,0 +1,5920 @@
+@@ -0,0 +1,5938 @@
+/*
+ * Copyright 2015-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
++#include <linux/fsl/mc.h>
+#include "compat.h"
+#include "regs.h"
+#include "caamalg_qi2.h"
+#include "key_gen.h"
+#include "caamalg_desc.h"
+#include "caamhash_desc.h"
-+#include "../../../drivers/staging/fsl-mc/include/mc.h"
+#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
+#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
+
+ * @key: virtual address of the key(s): [authentication key], encryption key
+ * @flc_dma: I/O virtual addresses of the Flow Contexts
+ * @key_dma: I/O virtual address of the key
++ * @dir: DMA direction for mapping key and Flow Contexts
+ * @dev: dpseci device
+ * @adata: authentication algorithm details
+ * @cdata: encryption algorithm details
+ u8 key[CAAM_MAX_KEY_SIZE];
+ dma_addr_t flc_dma[NUM_OP];
+ dma_addr_t key_dma;
++ enum dma_data_direction dir;
+ struct device *dev;
+ struct alginfo adata;
+ struct alginfo cdata;
+ ivsize, ctx->authsize, is_rfc3686, nonce,
+ ctx1_iv_off, true, priv->sec_attr.era);
+
-+ flc->flc[1] = desc_len(desc); /* SDL */
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
-+ DMA_BIDIRECTIONAL);
++ ctx->dir);
+
+ /* aead_decrypt shared descriptor */
+ if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
+ ivsize, ctx->authsize, alg->caam.geniv,
+ is_rfc3686, nonce, ctx1_iv_off, true,
+ priv->sec_attr.era);
-+ flc->flc[1] = desc_len(desc); /* SDL */
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
-+ DMA_BIDIRECTIONAL);
++ ctx->dir);
+
+ return 0;
+}
+ memcpy(ctx->key, keys.authkey, keys.authkeylen);
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
+ dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
-+ keys.enckeylen, DMA_BIDIRECTIONAL);
++ keys.enckeylen, ctx->dir);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+ edesc->dst_nents = dst_nents;
+ edesc->iv_dma = iv_dma;
+
-+ edesc->assoclen_dma = dma_map_single(dev, &req->assoclen, 4,
++ edesc->assoclen = cpu_to_caam32(req->assoclen);
++ edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, edesc->assoclen_dma)) {
+ dev_err(dev, "unable to map assoclen\n");
+ cnstr_shdsc_tls_encap(desc, &ctx->cdata, &ctx->adata,
+ assoclen, ivsize, ctx->authsize, blocksize,
+ priv->sec_attr.era);
-+ flc->flc[1] = desc_len(desc);
++ flc->flc[1] = cpu_to_caam32(desc_len(desc));
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
-+ DMA_BIDIRECTIONAL);
++ ctx->dir);
+
+ /*
+ * TLS 1.0 decrypt shared descriptor
+ desc = flc->sh_desc;
+ cnstr_shdsc_tls_decap(desc, &ctx->cdata, &ctx->adata, assoclen, ivsize,
+ ctx->authsize, blocksize, priv->sec_attr.era);
-+ flc->flc[1] = desc_len(desc); /* SDL */
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
-+ DMA_BIDIRECTIONAL);
++ ctx->dir);
+
+ return 0;
+}
+ memcpy(ctx->key, keys.authkey, keys.authkeylen);
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
+ dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
-+ keys.enckeylen, DMA_BIDIRECTIONAL);
++ keys.enckeylen, ctx->dir);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
-+ flc->flc[1] = desc_len(desc); /* SDL */
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
-+ DMA_BIDIRECTIONAL);
++ ctx->dir);
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
-+ flc->flc[1] = desc_len(desc); /* SDL */
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
-+ DMA_BIDIRECTIONAL);
++ ctx->dir);
+
+ return 0;
+}
+#endif
+
+ memcpy(ctx->key, key, keylen);
-+ dma_sync_single_for_device(dev, ctx->key_dma, keylen,
-+ DMA_BIDIRECTIONAL);
++ dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
+ ctx->cdata.keylen = keylen;
+
+ return gcm_set_sh_desc(aead);
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
-+ flc->flc[1] = desc_len(desc); /* SDL */
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
-+ DMA_BIDIRECTIONAL);
++ ctx->dir);
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
-+ flc->flc[1] = desc_len(desc); /* SDL */
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
-+ DMA_BIDIRECTIONAL);
++ ctx->dir);
+
+ return 0;
+}
+ */
+ ctx->cdata.keylen = keylen - 4;
+ dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
-+ DMA_BIDIRECTIONAL);
++ ctx->dir);
+
+ return rfc4106_set_sh_desc(aead);
+}
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
-+ flc->flc[1] = desc_len(desc); /* SDL */
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
-+ DMA_BIDIRECTIONAL);
++ ctx->dir);
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
-+ flc->flc[1] = desc_len(desc); /* SDL */
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
-+ DMA_BIDIRECTIONAL);
++ ctx->dir);
+
+ return 0;
+}
+ */
+ ctx->cdata.keylen = keylen - 4;
+ dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
-+ DMA_BIDIRECTIONAL);
++ ctx->dir);
+
+ return rfc4543_set_sh_desc(aead);
+}
+ desc = flc->sh_desc;
+ cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize,
+ is_rfc3686, ctx1_iv_off);
-+ flc->flc[1] = desc_len(desc); /* SDL */
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
-+ DMA_BIDIRECTIONAL);
++ ctx->dir);
+
+ /* ablkcipher_decrypt shared descriptor */
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize,
+ is_rfc3686, ctx1_iv_off);
-+ flc->flc[1] = desc_len(desc); /* SDL */
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
-+ DMA_BIDIRECTIONAL);
++ ctx->dir);
+
+ /* ablkcipher_givencrypt shared descriptor */
+ flc = &ctx->flc[GIVENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata,
+ ivsize, is_rfc3686, ctx1_iv_off);
-+ flc->flc[1] = desc_len(desc); /* SDL */
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[GIVENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
-+ DMA_BIDIRECTIONAL);
++ ctx->dir);
+
+ return 0;
+}
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
-+ flc->flc[1] = desc_len(desc); /* SDL */
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
-+ DMA_BIDIRECTIONAL);
++ ctx->dir);
+
+ /* xts_ablkcipher_decrypt shared descriptor */
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
-+ flc->flc[1] = desc_len(desc); /* SDL */
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
-+ DMA_BIDIRECTIONAL);
++ ctx->dir);
+
+ return 0;
+}
+ struct caam_alg_entry caam;
+};
+
-+static int caam_cra_init(struct crypto_tfm *tfm)
++static int caam_cra_init(struct crypto_tfm *tfm, bool uses_dkp)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
+ caam_alg->caam.class2_alg_type;
+
+ ctx->dev = caam_alg->caam.dev;
++ ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+
+ dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
+ offsetof(struct caam_ctx, flc_dma),
-+ DMA_BIDIRECTIONAL,
-+ DMA_ATTR_SKIP_CPU_SYNC);
++ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(ctx->dev, dma_addr)) {
+ dev_err(ctx->dev, "unable to map key, shared descriptors\n");
+ return -ENOMEM;
+ crypto_ablkcipher_crt(__crypto_ablkcipher_cast(tfm));
+
+ ablkcipher_tfm->reqsize = sizeof(struct caam_request);
-+ return caam_cra_init(tfm);
++ return caam_cra_init(tfm, false);
+}
+
+static int caam_cra_init_aead(struct crypto_aead *tfm)
+{
++ struct aead_alg *alg = crypto_aead_alg(tfm);
++
+ crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
-+ return caam_cra_init(crypto_aead_tfm(tfm));
++ return caam_cra_init(crypto_aead_tfm(tfm),
++ (alg->setkey == aead_setkey) ||
++ (alg->setkey == tls_setkey));
+}
+
+static void caam_exit_common(struct caam_ctx *ctx)
+{
+ dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
-+ offsetof(struct caam_ctx, flc_dma),
-+ DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
++ offsetof(struct caam_ctx, flc_dma), ctx->dir,
++ DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+static void caam_cra_exit(struct crypto_tfm *tfm)
+ desc = flc->sh_desc;
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
+ ctx->ctx_len, true, priv->sec_attr.era);
-+ flc->flc[1] = desc_len(desc); /* SDL */
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
+ desc_bytes(desc), DMA_BIDIRECTIONAL);
+#ifdef DEBUG
+ desc = flc->sh_desc;
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
+ ctx->ctx_len, false, priv->sec_attr.era);
-+ flc->flc[1] = desc_len(desc); /* SDL */
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
+ desc_bytes(desc), DMA_BIDIRECTIONAL);
+#ifdef DEBUG
+ desc = flc->sh_desc;
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
+ ctx->ctx_len, true, priv->sec_attr.era);
-+ flc->flc[1] = desc_len(desc); /* SDL */
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
+ desc_bytes(desc), DMA_BIDIRECTIONAL);
+#ifdef DEBUG
+ desc = flc->sh_desc;
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
+ ctx->ctx_len, false, priv->sec_attr.era);
-+ flc->flc[1] = desc_len(desc); /* SDL */
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
+ desc_bytes(desc), DMA_BIDIRECTIONAL);
+#ifdef DEBUG
+ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
-+ flc->flc[1] = desc_len(desc); /* SDL */
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
+ desc_bytes(desc), DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, flc_dma)) {
+ template->name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->driver_name);
++ t_alg->ahash_alg.setkey = NULL;
+ }
+ alg->cra_module = THIS_MODULE;
+ alg->cra_init = caam_hash_cra_init;
+ /* Register notification callbacks */
+ err = dpaa2_io_service_register(NULL, nctx);
+ if (unlikely(err)) {
-+ dev_err(dev, "notification register failed\n");
++ dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
+ nctx->cb = NULL;
++ /*
++ * If no affine DPIO for this core, there's probably
++ * none available for next cores either. Signal we want
++ * to retry later, in case the DPIO devices weren't
++ * probed yet.
++ */
++ err = -EPROBE_DEFER;
+ goto err;
+ }
+
+ /* Obtain a MC portal */
+ err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
+ if (err) {
-+ dev_err(dev, "MC portal allocation failed\n");
++ if (err == -ENXIO)
++ err = -EPROBE_DEFER;
++ else
++ dev_err(dev, "MC portal allocation failed\n");
++
+ goto err_dma_mask;
+ }
+
+ memset(&fd, 0, sizeof(fd));
+ dpaa2_fd_set_format(&fd, dpaa2_fd_list);
+ dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
-+ dpaa2_fd_set_len(&fd, req->fd_flt[1].len);
++ dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
+ dpaa2_fd_set_flc(&fd, req->flc_dma);
+
+ /*
+module_fsl_mc_driver(dpaa2_caam_driver);
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_qi2.h
-@@ -0,0 +1,281 @@
+@@ -0,0 +1,283 @@
+/*
+ * Copyright 2015-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @qm_sg_bytes: length of dma mapped h/w link table
+ * @qm_sg_dma: bus physical mapped address of h/w link table
++ * @assoclen: associated data length, in CAAM endianness
+ * @assoclen_dma: bus physical mapped address of req->assoclen
+ * @sgt: the h/w link table
+ */
+ dma_addr_t iv_dma;
+ int qm_sg_bytes;
+ dma_addr_t qm_sg_dma;
++ unsigned int assoclen;
+ dma_addr_t assoclen_dma;
+#define CAAM_QI_MAX_AEAD_SG \
+ ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \
#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
CAAM_MAX_HASH_KEY_SIZE)
#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
-@@ -103,20 +96,14 @@ struct caam_hash_ctx {
+@@ -103,20 +96,15 @@ struct caam_hash_ctx {
u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
dma_addr_t sh_desc_fin_dma;
dma_addr_t sh_desc_digest_dma;
- dma_addr_t sh_desc_finup_dma;
++ enum dma_data_direction dir;
struct device *jrdev;
- u32 alg_type;
- u32 alg_op;
};
/* ahash state */
-@@ -143,6 +130,31 @@ struct caam_export_state {
+@@ -143,6 +131,31 @@ struct caam_export_state {
int (*finup)(struct ahash_request *req);
};
/* Common job descriptor seq in/out ptr routines */
/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
-@@ -175,40 +187,31 @@ static inline dma_addr_t map_seq_out_ptr
+@@ -175,40 +188,31 @@ static inline dma_addr_t map_seq_out_ptr
return dst_dma;
}
struct caam_hash_state *state, int ctx_len,
struct sec4_sg_entry *sec4_sg, u32 flag)
{
-@@ -224,124 +227,22 @@ static inline int ctx_map_to_sec4_sg(u32
+@@ -224,124 +228,22 @@ static inline int ctx_map_to_sec4_sg(u32
return 0;
}
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
+ ctx->ctx_len, true, ctrlpriv->era);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash update shdesc@"__stringify(__LINE__)": ",
-@@ -350,17 +251,10 @@ static int ahash_set_sh_desc(struct cryp
+@@ -350,17 +252,10 @@ static int ahash_set_sh_desc(struct cryp
/* ahash_update_first shared descriptor */
desc = ctx->sh_desc_update_first;
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
+ ctx->ctx_len, false, ctrlpriv->era);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash update first shdesc@"__stringify(__LINE__)": ",
-@@ -369,53 +263,22 @@ static int ahash_set_sh_desc(struct cryp
+@@ -369,53 +264,22 @@ static int ahash_set_sh_desc(struct cryp
/* ahash_final shared descriptor */
desc = ctx->sh_desc_fin;
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
+ ctx->ctx_len, true, ctrlpriv->era);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
+ ctx->ctx_len, false, ctrlpriv->era);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash digest shdesc@"__stringify(__LINE__)": ",
-@@ -426,14 +289,6 @@ static int ahash_set_sh_desc(struct cryp
+@@ -426,14 +290,6 @@ static int ahash_set_sh_desc(struct cryp
return 0;
}
/* Digest hash size if it is too large */
static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
u32 *keylen, u8 *key_out, u32 digestsize)
-@@ -469,7 +324,7 @@ static int hash_digest_key(struct caam_h
+@@ -469,7 +325,7 @@ static int hash_digest_key(struct caam_h
}
/* Job descriptor to perform unkeyed hash on key_in */
OP_ALG_AS_INITFINAL);
append_seq_in_ptr(desc, src_dma, *keylen, 0);
append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
-@@ -513,12 +368,10 @@ static int hash_digest_key(struct caam_h
+@@ -513,12 +369,10 @@ static int hash_digest_key(struct caam_h
static int ahash_setkey(struct crypto_ahash *ahash,
const u8 *key, unsigned int keylen)
{
int ret;
u8 *hashed_key = NULL;
-@@ -539,43 +392,29 @@ static int ahash_setkey(struct crypto_ah
+@@ -539,43 +393,29 @@ static int ahash_setkey(struct crypto_ah
key = hashed_key;
}
bad_free_key:
kfree(hashed_key);
crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
-@@ -604,6 +443,8 @@ static inline void ahash_unmap(struct de
+@@ -604,6 +444,8 @@ static inline void ahash_unmap(struct de
struct ahash_edesc *edesc,
struct ahash_request *req, int dst_len)
{
if (edesc->src_nents)
dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
if (edesc->dst_dma)
-@@ -612,6 +453,12 @@ static inline void ahash_unmap(struct de
+@@ -612,6 +454,12 @@ static inline void ahash_unmap(struct de
if (edesc->sec4_sg_bytes)
dma_unmap_single(dev, edesc->sec4_sg_dma,
edesc->sec4_sg_bytes, DMA_TO_DEVICE);
}
static inline void ahash_unmap_ctx(struct device *dev,
-@@ -643,8 +490,7 @@ static void ahash_done(struct device *jr
+@@ -643,8 +491,7 @@ static void ahash_done(struct device *jr
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
if (err)
caam_jr_strstatus(jrdev, err);
-@@ -671,19 +517,19 @@ static void ahash_done_bi(struct device
+@@ -671,19 +518,19 @@ static void ahash_done_bi(struct device
struct ahash_edesc *edesc;
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
kfree(edesc);
#ifdef DEBUG
-@@ -713,8 +559,7 @@ static void ahash_done_ctx_src(struct de
+@@ -713,8 +560,7 @@ static void ahash_done_ctx_src(struct de
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
if (err)
caam_jr_strstatus(jrdev, err);
-@@ -741,19 +586,19 @@ static void ahash_done_ctx_dst(struct de
+@@ -741,19 +587,19 @@ static void ahash_done_ctx_dst(struct de
struct ahash_edesc *edesc;
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
kfree(edesc);
#ifdef DEBUG
-@@ -835,13 +680,12 @@ static int ahash_update_ctx(struct ahash
+@@ -835,13 +681,12 @@ static int ahash_update_ctx(struct ahash
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
int in_len = *buflen + req->nbytes, to_hash;
u32 *desc;
int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
-@@ -890,15 +734,14 @@ static int ahash_update_ctx(struct ahash
+@@ -890,15 +735,14 @@ static int ahash_update_ctx(struct ahash
edesc->src_nents = src_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
if (mapped_nents) {
sg_to_sec4_sg_last(req->src, mapped_nents,
-@@ -909,12 +752,10 @@ static int ahash_update_ctx(struct ahash
+@@ -909,12 +753,10 @@ static int ahash_update_ctx(struct ahash
to_hash - *buflen,
*next_buflen, 0);
} else {
desc = edesc->hw_desc;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-@@ -969,12 +810,9 @@ static int ahash_final_ctx(struct ahash_
+@@ -969,12 +811,9 @@ static int ahash_final_ctx(struct ahash_
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
u32 *desc;
int sec4_sg_bytes, sec4_sg_src_index;
int digestsize = crypto_ahash_digestsize(ahash);
-@@ -994,18 +832,17 @@ static int ahash_final_ctx(struct ahash_
+@@ -994,18 +833,17 @@ static int ahash_final_ctx(struct ahash_
desc = edesc->hw_desc;
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
-@@ -1048,12 +885,9 @@ static int ahash_finup_ctx(struct ahash_
+@@ -1048,12 +886,9 @@ static int ahash_finup_ctx(struct ahash_
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
u32 *desc;
int sec4_sg_src_index;
int src_nents, mapped_nents;
-@@ -1082,7 +916,7 @@ static int ahash_finup_ctx(struct ahash_
+@@ -1082,7 +917,7 @@ static int ahash_finup_ctx(struct ahash_
/* allocate space for base edesc and hw desc commands, link tables */
edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
flags);
if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
-@@ -1093,14 +927,14 @@ static int ahash_finup_ctx(struct ahash_
+@@ -1093,14 +928,14 @@ static int ahash_finup_ctx(struct ahash_
edesc->src_nents = src_nents;
ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
sec4_sg_src_index, ctx->ctx_len + buflen,
-@@ -1136,15 +970,18 @@ static int ahash_digest(struct ahash_req
+@@ -1136,15 +971,18 @@ static int ahash_digest(struct ahash_req
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
src_nents = sg_nents_for_len(req->src, req->nbytes);
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
-@@ -1215,10 +1052,10 @@ static int ahash_final_no_ctx(struct aha
+@@ -1215,10 +1053,10 @@ static int ahash_final_no_ctx(struct aha
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
u32 *desc;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
-@@ -1246,7 +1083,6 @@ static int ahash_final_no_ctx(struct aha
+@@ -1246,7 +1084,6 @@ static int ahash_final_no_ctx(struct aha
dev_err(jrdev, "unable to map dst\n");
goto unmap;
}
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
-@@ -1276,13 +1112,12 @@ static int ahash_update_no_ctx(struct ah
+@@ -1276,13 +1113,12 @@ static int ahash_update_no_ctx(struct ah
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
int in_len = *buflen + req->nbytes, to_hash;
int sec4_sg_bytes, src_nents, mapped_nents;
struct ahash_edesc *edesc;
-@@ -1329,10 +1164,11 @@ static int ahash_update_no_ctx(struct ah
+@@ -1329,10 +1165,11 @@ static int ahash_update_no_ctx(struct ah
edesc->src_nents = src_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
sg_to_sec4_sg_last(req->src, mapped_nents,
edesc->sec4_sg + 1, 0);
-@@ -1342,8 +1178,6 @@ static int ahash_update_no_ctx(struct ah
+@@ -1342,8 +1179,6 @@ static int ahash_update_no_ctx(struct ah
*next_buflen, 0);
}
desc = edesc->hw_desc;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-@@ -1403,12 +1237,9 @@ static int ahash_finup_no_ctx(struct aha
+@@ -1403,12 +1238,9 @@ static int ahash_finup_no_ctx(struct aha
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
u32 *desc;
int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
int digestsize = crypto_ahash_digestsize(ahash);
-@@ -1450,9 +1281,9 @@ static int ahash_finup_no_ctx(struct aha
+@@ -1450,9 +1282,9 @@ static int ahash_finup_no_ctx(struct aha
edesc->src_nents = src_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
req->nbytes);
-@@ -1496,11 +1327,10 @@ static int ahash_update_first(struct aha
+@@ -1496,11 +1328,10 @@ static int ahash_update_first(struct aha
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
int to_hash;
u32 *desc;
int src_nents, mapped_nents;
-@@ -1545,7 +1375,6 @@ static int ahash_update_first(struct aha
+@@ -1545,7 +1376,6 @@ static int ahash_update_first(struct aha
}
edesc->src_nents = src_nents;
ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
to_hash);
-@@ -1582,6 +1411,7 @@ static int ahash_update_first(struct aha
+@@ -1582,6 +1412,7 @@ static int ahash_update_first(struct aha
state->final = ahash_final_no_ctx;
scatterwalk_map_and_copy(next_buf, req->src, 0,
req->nbytes, 0);
}
#ifdef DEBUG
print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
-@@ -1688,7 +1518,6 @@ struct caam_hash_template {
+@@ -1688,7 +1519,6 @@ struct caam_hash_template {
unsigned int blocksize;
struct ahash_alg template_ahash;
u32 alg_type;
};
/* ahash descriptors */
-@@ -1714,7 +1543,6 @@ static struct caam_hash_template driver_
+@@ -1714,7 +1544,6 @@ static struct caam_hash_template driver_
},
},
.alg_type = OP_ALG_ALGSEL_SHA1,
}, {
.name = "sha224",
.driver_name = "sha224-caam",
-@@ -1736,7 +1564,6 @@ static struct caam_hash_template driver_
+@@ -1736,7 +1565,6 @@ static struct caam_hash_template driver_
},
},
.alg_type = OP_ALG_ALGSEL_SHA224,
}, {
.name = "sha256",
.driver_name = "sha256-caam",
-@@ -1758,7 +1585,6 @@ static struct caam_hash_template driver_
+@@ -1758,7 +1586,6 @@ static struct caam_hash_template driver_
},
},
.alg_type = OP_ALG_ALGSEL_SHA256,
}, {
.name = "sha384",
.driver_name = "sha384-caam",
-@@ -1780,7 +1606,6 @@ static struct caam_hash_template driver_
+@@ -1780,7 +1607,6 @@ static struct caam_hash_template driver_
},
},
.alg_type = OP_ALG_ALGSEL_SHA384,
}, {
.name = "sha512",
.driver_name = "sha512-caam",
-@@ -1802,7 +1627,6 @@ static struct caam_hash_template driver_
+@@ -1802,7 +1628,6 @@ static struct caam_hash_template driver_
},
},
.alg_type = OP_ALG_ALGSEL_SHA512,
}, {
.name = "md5",
.driver_name = "md5-caam",
-@@ -1824,14 +1648,12 @@ static struct caam_hash_template driver_
+@@ -1824,14 +1649,12 @@ static struct caam_hash_template driver_
},
},
.alg_type = OP_ALG_ALGSEL_MD5,
struct ahash_alg ahash_alg;
};
-@@ -1853,6 +1675,7 @@ static int caam_hash_cra_init(struct cry
+@@ -1853,6 +1676,8 @@ static int caam_hash_cra_init(struct cry
HASH_MSG_LEN + SHA256_DIGEST_SIZE,
HASH_MSG_LEN + 64,
HASH_MSG_LEN + SHA512_DIGEST_SIZE };
+ dma_addr_t dma_addr;
++ struct caam_drv_private *priv;
/*
* Get a Job ring from Job Ring driver to ensure in-order
-@@ -1863,11 +1686,31 @@ static int caam_hash_cra_init(struct cry
+@@ -1863,11 +1688,34 @@ static int caam_hash_cra_init(struct cry
pr_err("Job Ring Device allocation for transform failed\n");
return PTR_ERR(ctx->jrdev);
}
+
++ priv = dev_get_drvdata(ctx->jrdev->parent);
++ ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
++
+ dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
+ offsetof(struct caam_hash_ctx,
+ sh_desc_update_dma),
-+ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
++ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(ctx->jrdev, dma_addr)) {
+ dev_err(ctx->jrdev, "unable to map shared descriptors\n");
+ caam_jr_free(ctx->jrdev);
OP_ALG_ALGSEL_SHIFT];
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
-@@ -1879,30 +1722,10 @@ static void caam_hash_cra_exit(struct cr
+@@ -1879,30 +1727,10 @@ static void caam_hash_cra_exit(struct cr
{
struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
+ offsetof(struct caam_hash_ctx,
+ sh_desc_update_dma),
-+ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
++ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
caam_jr_free(ctx->jrdev);
}
-@@ -1961,7 +1784,6 @@ caam_hash_alloc(struct caam_hash_templat
+@@ -1961,7 +1789,6 @@ caam_hash_alloc(struct caam_hash_templat
alg->cra_type = &crypto_ahash_type;
t_alg->alg_type = template->alg_type;
+#endif /* DESC_CONSTR_H */
--- /dev/null
+++ b/drivers/crypto/caam/dpseci.c
-@@ -0,0 +1,859 @@
+@@ -0,0 +1,858 @@
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
-+#include "../../../drivers/staging/fsl-mc/include/mc-sys.h"
-+#include "../../../drivers/staging/fsl-mc/include/mc-cmd.h"
++#include <linux/fsl/mc.h>
+#include "../../../drivers/staging/fsl-mc/include/dpopr.h"
+#include "dpseci.h"
+#include "dpseci_cmd.h"
+int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
+ u16 *token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_open *cmd_params;
+ int err;
+
+ */
+int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
+ cmd_flags,
+int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
+ const struct dpseci_cfg *cfg, u32 *obj_id)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_create *cmd_params;
+ int i, err;
+
+int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
+ u32 object_id)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_destroy *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY,
+ */
+int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
+ cmd_flags,
+ */
+int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
+ cmd_flags,
+int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ int *en)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_rsp_is_enabled *rsp_params;
+ int err;
+
+ */
+int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET,
+ cmd_flags,
+int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u8 *en)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_irq_enable *cmd_params;
+ struct dpseci_rsp_get_irq_enable *rsp_params;
+ int err;
+int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u8 en)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_irq_enable *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_ENABLE,
+int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 *mask)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_irq_mask *cmd_params;
+ int err;
+
+int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 mask)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_irq_mask *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_MASK,
+int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 *status)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_irq_status *cmd_params;
+ int err;
+
+int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 status)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_irq_status *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLEAR_IRQ_STATUS,
+int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpseci_attr *attr)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_rsp_get_attributes *rsp_params;
+ int err;
+
+int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, const struct dpseci_rx_queue_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_queue *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
+int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, struct dpseci_rx_queue_attr *attr)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_queue *cmd_params;
+ int err;
+
+int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, struct dpseci_tx_queue_attr *attr)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_queue *cmd_params;
+ struct dpseci_rsp_get_tx_queue *rsp_params;
+ int err;
+int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpseci_sec_attr *attr)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_rsp_get_sec_attr *rsp_params;
+ int err;
+
+int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpseci_sec_counters *counters)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_rsp_get_sec_counters *rsp_params;
+ int err;
+
+int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_rsp_get_api_version *rsp_params;
+ int err;
+
+int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
+ u8 options, struct opr_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_opr *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(
+int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
+ struct opr_cfg *cfg, struct opr_qry *qry)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_opr *cmd_params;
+ struct dpseci_rsp_get_opr *rsp_params;
+ int err;
+int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, const struct dpseci_congestion_notification_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_congestion_notification *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(
+int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, struct dpseci_congestion_notification_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_congestion_notification *rsp_params;
+ int err;
+
+}
--- /dev/null
+++ b/drivers/crypto/caam/qi.c
-@@ -0,0 +1,797 @@
+@@ -0,0 +1,804 @@
+/*
+ * CAAM/SEC 4.x QI transport/backend driver
+ * Queue Interface backend functionality
+ return qman_cb_dqrr_stop;
+
+ fd = &dqrr->fd;
-+ if (unlikely(fd->status))
-+ dev_err(qidev, "Error: %#x in CAAM response FD\n", fd->status);
++ if (unlikely(fd->status)) {
++ u32 ssrc = fd->status & JRSTA_SSRC_MASK;
++ u8 err_id = fd->status & JRSTA_CCBERR_ERRID_MASK;
++
++ if (ssrc != JRSTA_SSRC_CCB_ERROR ||
++ err_id != JRSTA_CCBERR_ERRID_ICVCHK)
++ dev_err(qidev, "Error: %#x in CAAM response FD\n",
++ fd->status);
++ }
+
+ if (unlikely(fd->format != fd->format)) {
+ dev_err(qidev, "Non-compound FD from CAAM\n");
- return sg_nents;
-}
+#endif /* _SG_SW_SEC4_H_ */
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -1241,6 +1241,14 @@ static int ipsec_esp(struct talitos_edes
+ ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
+ sg_count, areq->assoclen, tbl_off, elen);
+
++ /*
++ * In case of SEC 2.x+, cipher in len must include only the ciphertext,
++ * while extent is used for ICV len.
++ */
++ if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
++ (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
++ desc->ptr[4].len = cpu_to_be16(cryptlen);
++
+ if (ret > 1) {
+ tbl_off += ret;
+ sync_needed = true;
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -516,7 +516,7 @@ err:
-From 515d590e3d5313110faa4f2c86f7784d9b070fa9 Mon Sep 17 00:00:00 2001
+From d3d537ebe9884e7d945ab74bb02312d0c2c9b08d Mon Sep 17 00:00:00 2001
From: Yangbo Lu <yangbo.lu@nxp.com>
-Date: Wed, 17 Jan 2018 15:30:59 +0800
-Subject: [PATCH 17/30] dma: support layerscape
+Date: Thu, 5 Jul 2018 17:32:53 +0800
+Subject: [PATCH 17/32] dma: support layerscape
This is an integrated patch for layerscape dma support.
---
drivers/dma/Kconfig | 31 +
drivers/dma/Makefile | 3 +
- drivers/dma/caam_dma.c | 563 ++++++++++++++
+ drivers/dma/caam_dma.c | 563 ++++++++++
drivers/dma/dpaa2-qdma/Kconfig | 8 +
drivers/dma/dpaa2-qdma/Makefile | 8 +
- drivers/dma/dpaa2-qdma/dpaa2-qdma.c | 986 ++++++++++++++++++++++++
- drivers/dma/dpaa2-qdma/dpaa2-qdma.h | 262 +++++++
- drivers/dma/dpaa2-qdma/dpdmai.c | 454 +++++++++++
- drivers/dma/dpaa2-qdma/fsl_dpdmai.h | 521 +++++++++++++
- drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h | 222 ++++++
- drivers/dma/fsl-qdma.c | 1243 +++++++++++++++++++++++++++++++
- 11 files changed, 4301 insertions(+)
+ drivers/dma/dpaa2-qdma/dpaa2-qdma.c | 940 +++++++++++++++++
+ drivers/dma/dpaa2-qdma/dpaa2-qdma.h | 227 +++++
+ drivers/dma/dpaa2-qdma/dpdmai.c | 515 ++++++++++
+ drivers/dma/dpaa2-qdma/fsl_dpdmai.h | 521 ++++++++++
+ drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h | 222 ++++
+ drivers/dma/fsl-qdma.c | 1243 +++++++++++++++++++++++
+ 11 files changed, 4281 insertions(+)
create mode 100644 drivers/dma/caam_dma.c
create mode 100644 drivers/dma/dpaa2-qdma/Kconfig
create mode 100644 drivers/dma/dpaa2-qdma/Makefile
+fsl-dpaa2-qdma-objs := dpaa2-qdma.o dpdmai.o
--- /dev/null
+++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.c
-@@ -0,0 +1,986 @@
+@@ -0,0 +1,940 @@
+/*
+ * drivers/dma/dpaa2-qdma/dpaa2-qdma.c
+ *
+
+#include "../virt-dma.h"
+
-+#include "../../../drivers/staging/fsl-mc/include/mc.h"
++#include <linux/fsl/mc.h>
+#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
+#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
+#include "fsl_dpdmai_cmd.h"
+ comp_temp->fl_bus_addr = comp_temp->fd_bus_addr +
+ sizeof(struct dpaa2_fd);
+ comp_temp->desc_virt_addr =
-+ (void *)((struct dpaa2_frame_list *)
++ (void *)((struct dpaa2_fl_entry *)
+ comp_temp->fl_virt_addr + 3);
+ comp_temp->desc_bus_addr = comp_temp->fl_bus_addr +
-+ sizeof(struct dpaa2_frame_list) * 3;
++ sizeof(struct dpaa2_fl_entry) * 3;
+
+ comp_temp->qchan = dpaa2_chan;
+ comp_temp->sg_blk_num = 0;
+ memset(fd, 0, sizeof(struct dpaa2_fd));
+
+ /* fd populated */
-+ fd->simple.addr = dpaa2_comp->fl_bus_addr;
++ dpaa2_fd_set_addr(fd, dpaa2_comp->fl_bus_addr);
+ /* Bypass memory translation, Frame list format, short length disable */
+ /* we need to disable BMT if fsl-mc use iova addr */
+ if (smmu_disable)
-+ fd->simple.bpid = QMAN_FD_BMT_ENABLE;
-+ fd->simple.format_offset = QMAN_FD_FMT_ENABLE | QMAN_FD_SL_DISABLE;
++ dpaa2_fd_set_bpid(fd, QMAN_FD_BMT_ENABLE);
++ dpaa2_fd_set_format(fd, QMAN_FD_FMT_ENABLE | QMAN_FD_SL_DISABLE);
+
-+ fd->simple.frc = format | QDMA_SER_CTX;
++ dpaa2_fd_set_frc(fd, format | QDMA_SER_CTX);
+}
+
+/* first frame list for descriptor buffer */
+static void dpaa2_qdma_populate_first_framel(
-+ struct dpaa2_frame_list *f_list,
++ struct dpaa2_fl_entry *f_list,
+ struct dpaa2_qdma_comp *dpaa2_comp)
+{
+ struct dpaa2_qdma_sd_d *sdd;
+ sdd = (struct dpaa2_qdma_sd_d *)dpaa2_comp->desc_virt_addr;
+ memset(sdd, 0, 2 * (sizeof(*sdd)));
+ /* source and destination descriptor */
-+ sdd->cmd = QDMA_SD_CMD_RDTTYPE_COHERENT; /* source descriptor CMD */
++ sdd->cmd = cpu_to_le32(QDMA_SD_CMD_RDTTYPE_COHERENT); /* source descriptor CMD */
+ sdd++;
-+ sdd->cmd = QDMA_DD_CMD_WRTTYPE_COHERENT; /* dest descriptor CMD */
++ sdd->cmd = cpu_to_le32(QDMA_DD_CMD_WRTTYPE_COHERENT); /* dest descriptor CMD */
+
-+ memset(f_list, 0, sizeof(struct dpaa2_frame_list));
++ memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
+ /* first frame list to source descriptor */
-+ f_list->addr_lo = dpaa2_comp->desc_bus_addr;
-+ f_list->addr_hi = (dpaa2_comp->desc_bus_addr >> 32);
-+ f_list->data_len.data_len_sl0 = 0x20; /* source/destination desc len */
-+ f_list->fmt = QDMA_FL_FMT_SBF; /* single buffer frame */
++
++ dpaa2_fl_set_addr(f_list, dpaa2_comp->desc_bus_addr);
++ dpaa2_fl_set_len(f_list, 0x20);
++ dpaa2_fl_set_format(f_list, QDMA_FL_FMT_SBF | QDMA_FL_SL_LONG);
++
+ if (smmu_disable)
-+ f_list->bmt = QDMA_FL_BMT_ENABLE; /* bypass memory translation */
-+ f_list->sl = QDMA_FL_SL_LONG; /* long length */
-+ f_list->f = 0; /* not the last frame list */
++ f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE); /* bypass memory translation */
+}
+
+/* source and destination frame list */
-+static void dpaa2_qdma_populate_frames(struct dpaa2_frame_list *f_list,
++static void dpaa2_qdma_populate_frames(struct dpaa2_fl_entry *f_list,
+ dma_addr_t dst, dma_addr_t src, size_t len, uint8_t fmt)
+{
+ /* source frame list to source buffer */
-+ memset(f_list, 0, sizeof(struct dpaa2_frame_list));
-+ f_list->addr_lo = src;
-+ f_list->addr_hi = (src >> 32);
-+ f_list->data_len.data_len_sl0 = len;
-+ f_list->fmt = fmt; /* single buffer frame or scatter gather frame */
++ memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
++
++
++ dpaa2_fl_set_addr(f_list, src);
++ dpaa2_fl_set_len(f_list, len);
++ dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG)); /* single buffer frame or scatter gather frame */
+ if (smmu_disable)
-+ f_list->bmt = QDMA_FL_BMT_ENABLE; /* bypass memory translation */
-+ f_list->sl = QDMA_FL_SL_LONG; /* long length */
-+ f_list->f = 0; /* not the last frame list */
++ f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE); /* bypass memory translation */
+
+ f_list++;
+ /* destination frame list to destination buffer */
-+ memset(f_list, 0, sizeof(struct dpaa2_frame_list));
-+ f_list->addr_lo = dst;
-+ f_list->addr_hi = (dst >> 32);
-+ f_list->data_len.data_len_sl0 = len;
-+ f_list->fmt = fmt; /* single buffer frame or scatter gather frame */
++ memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
++
++ dpaa2_fl_set_addr(f_list, dst);
++ dpaa2_fl_set_len(f_list, len);
++ dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
++ dpaa2_fl_set_final(f_list, QDMA_FL_F); /* single buffer frame or scatter gather frame */
+ if (smmu_disable)
-+ f_list->bmt = QDMA_FL_BMT_ENABLE; /* bypass memory translation */
-+ f_list->sl = QDMA_FL_SL_LONG; /* long length */
-+ f_list->f = QDMA_FL_F; /* Final bit: 1, for last frame list */
++ f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE); /* bypass memory translation */
+}
+
+static struct dma_async_tx_descriptor *dpaa2_qdma_prep_memcpy(
+{
+ struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
+ struct dpaa2_qdma_comp *dpaa2_comp;
-+ struct dpaa2_frame_list *f_list;
++ struct dpaa2_fl_entry *f_list;
+ uint32_t format;
+
+ dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
+ /* populate Frame descriptor */
+ dpaa2_qdma_populate_fd(format, dpaa2_comp);
+
-+ f_list = (struct dpaa2_frame_list *)dpaa2_comp->fl_virt_addr;
++ f_list = (struct dpaa2_fl_entry *)dpaa2_comp->fl_virt_addr;
+
+#ifdef LONG_FORMAT
+ /* first frame list for descriptor buffer (logn format) */
+ return total_len;
+}
+
-+static struct dma_async_tx_descriptor *dpaa2_qdma_prep_sg(
-+ struct dma_chan *chan,
-+ struct scatterlist *dst_sg, u32 dst_nents,
-+ struct scatterlist *src_sg, u32 src_nents,
-+ unsigned long flags)
-+{
-+ struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
-+ struct dpaa2_qdma_comp *dpaa2_comp;
-+ struct dpaa2_frame_list *f_list;
-+ struct device *dev = dpaa2_chan->qdma->priv->dev;
-+ uint32_t total_len = 0;
-+
-+ /* basic sanity checks */
-+ if (dst_nents == 0 || src_nents == 0)
-+ return NULL;
-+
-+ if (dst_sg == NULL || src_sg == NULL)
-+ return NULL;
-+
-+ /* get the descriptors required */
-+ dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
-+
-+ /* populate Frame descriptor */
-+ dpaa2_qdma_populate_fd(QDMA_FD_LONG_FORMAT, dpaa2_comp);
-+
-+ /* prepare Scatter gather entry for source and destination */
-+ total_len = dpaa2_qdma_populate_sg(dev, dpaa2_chan,
-+ dpaa2_comp, dst_sg, dst_nents, src_sg, src_nents);
-+
-+ f_list = (struct dpaa2_frame_list *)dpaa2_comp->fl_virt_addr;
-+ /* first frame list for descriptor buffer */
-+ dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp);
-+ f_list++;
-+ /* prepare Scatter gather entry for source and destination */
-+ /* populate source and destination frame list table */
-+ dpaa2_qdma_populate_frames(f_list, dpaa2_comp->sge_dst_bus_addr,
-+ dpaa2_comp->sge_src_bus_addr,
-+ total_len, QDMA_FL_FMT_SGE);
-+
-+ return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags);
-+}
-+
+static enum dma_status dpaa2_qdma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+
+ /* obtain FD and process the error */
+ fd = dpaa2_dq_fd(dq);
-+ status = fd->simple.ctrl & 0xff;
++
++ status = dpaa2_fd_get_ctrl(fd) & 0xff;
+ if (status)
+ dev_err(priv->dev, "FD error occurred\n");
+ found = 0;
+ fd_eq = (struct dpaa2_fd *)
+ dpaa2_comp->fd_virt_addr;
+
-+ if (fd_eq->simple.addr ==
-+ fd->simple.addr) {
++ if (le64_to_cpu(fd_eq->simple.addr) ==
++ le64_to_cpu(fd->simple.addr)) {
+
+ list_del(&dpaa2_comp->list);
+ list_add_tail(&dpaa2_comp->list,
+ dma_cap_set(DMA_PRIVATE, dpaa2_qdma->dma_dev.cap_mask);
+ dma_cap_set(DMA_SLAVE, dpaa2_qdma->dma_dev.cap_mask);
+ dma_cap_set(DMA_MEMCPY, dpaa2_qdma->dma_dev.cap_mask);
-+ dma_cap_set(DMA_SG, dpaa2_qdma->dma_dev.cap_mask);
+
+ dpaa2_qdma->dma_dev.dev = dev;
+ dpaa2_qdma->dma_dev.device_alloc_chan_resources
+ = dpaa2_qdma_free_chan_resources;
+ dpaa2_qdma->dma_dev.device_tx_status = dpaa2_qdma_tx_status;
+ dpaa2_qdma->dma_dev.device_prep_dma_memcpy = dpaa2_qdma_prep_memcpy;
-+ dpaa2_qdma->dma_dev.device_prep_dma_sg = dpaa2_qdma_prep_sg;
+ dpaa2_qdma->dma_dev.device_issue_pending = dpaa2_qdma_issue_pending;
+
+ err = dma_async_device_register(&dpaa2_qdma->dma_dev);
+MODULE_LICENSE("Dual BSD/GPL");
--- /dev/null
+++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.h
-@@ -0,0 +1,262 @@
+@@ -0,0 +1,227 @@
+/* Copyright 2015 NXP Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ } ctrl;
+} __attribute__((__packed__));
+
-+#define QMAN_FD_FMT_ENABLE (1 << 12) /* frame list table enable */
++#define QMAN_FD_FMT_ENABLE (1) /* frame list table enable */
+#define QMAN_FD_BMT_ENABLE (1 << 15) /* bypass memory translation */
+#define QMAN_FD_BMT_DISABLE (0 << 15) /* bypass memory translation */
+#define QMAN_FD_SL_DISABLE (0 << 14) /* short lengthe disabled */
+#define QMAN_FD_CBMT_DISABLE (0 << 15) /* Flow Context: 64bit virtual address */
+#define QMAN_FD_SC_DISABLE (0 << 27) /* stashing control */
+
-+#define QDMA_FL_FMT_SBF 0x0 /* Single buffer frame */
++#define QDMA_FL_FMT_SBF (0x0) /* Single buffer frame */
+#define QDMA_FL_FMT_SGE 0x2 /* Scatter gather frame */
-+#define QDMA_FL_BMT_ENABLE 0x1 /* enable bypass memory translation */
++#define QDMA_FL_BMT_ENABLE (0x1 << 15)/* enable bypass memory translation */
+#define QDMA_FL_BMT_DISABLE 0x0 /* enable bypass memory translation */
-+#define QDMA_FL_SL_LONG 0x0 /* long length */
++#define QDMA_FL_SL_LONG (0x0 << 2)/* long length */
+#define QDMA_FL_SL_SHORT 0x1 /* short length */
-+#define QDMA_FL_F 0x1 /* last frame list bit */
++#define QDMA_FL_F (0x1)/* last frame list bit */
+/*Description of Frame list table structure*/
-+struct dpaa2_frame_list {
-+ uint32_t addr_lo; /* lower 32 bits of address */
-+ uint32_t addr_hi:17; /* upper 17 bits of address */
-+ uint32_t resrvd:15;
-+ union {
-+ uint32_t data_len_sl0; /* If SL=0, then data length is 32 */
-+ struct {
-+ uint32_t data_len:18; /* IF SL=1; length is 18bit */
-+ uint32_t resrvd:2;
-+ uint32_t mem:12; /* Valid only when SL=1 */
-+ } data_len_sl1;
-+ } data_len;
-+ /* word 4 */
-+ uint32_t bpid:14; /* Frame buffer pool ID */
-+ uint32_t ivp:1; /* Invalid Pool ID. */
-+ uint32_t bmt:1; /* Bypass Memory Translation */
-+ uint32_t offset:12; /* Frame offset */
-+ uint32_t fmt:2; /* Frame Format */
-+ uint32_t sl:1; /* Short Length */
-+ uint32_t f:1; /* Final bit */
-+
-+ uint32_t frc; /* Frame Context */
-+ /* word 6 */
-+ uint32_t err:8; /* Frame errors */
-+ uint32_t resrvd0:8;
-+ uint32_t asal:4; /* accelerator-specific annotation length */
-+ uint32_t resrvd1:1;
-+ uint32_t ptv2:1;
-+ uint32_t ptv1:1;
-+ uint32_t pta:1; /* pass-through annotation */
-+ uint32_t resrvd2:8;
-+
-+ uint32_t flc_lo; /* lower 32 bits fo flow context */
-+ uint32_t flc_hi; /* higher 32 bits fo flow context */
-+} __attribute__((__packed__));
+
+struct dpaa2_qdma_chan {
+ struct virt_dma_chan vchan;
+
+/* FD pool size: one FD + 3 Frame list + 2 source/destination descriptor */
+#define FD_POOL_SIZE (sizeof(struct dpaa2_fd) + \
-+ sizeof(struct dpaa2_frame_list) * 3 + \
++ sizeof(struct dpaa2_fl_entry) * 3 + \
+ sizeof(struct dpaa2_qdma_sd_d) * 2)
+
+/* qdma_sg_blk + 16 SGs */
+#endif /* __DPAA2_QDMA_H */
--- /dev/null
+++ b/drivers/dma/dpaa2-qdma/dpdmai.c
-@@ -0,0 +1,454 @@
+@@ -0,0 +1,515 @@
+/* Copyright 2013-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+#include <linux/io.h>
+#include "fsl_dpdmai.h"
+#include "fsl_dpdmai_cmd.h"
-+#include "../../../drivers/staging/fsl-mc/include/mc-sys.h"
-+#include "../../../drivers/staging/fsl-mc/include/mc-cmd.h"
++#include <linux/fsl/mc.h>
++
++struct dpdmai_cmd_open {
++ __le32 dpdmai_id;
++};
++
++struct dpdmai_rsp_get_attributes {
++ __le32 id;
++ u8 num_of_priorities;
++ u8 pad0[3];
++ __le16 major;
++ __le16 minor;
++};
++
++
++struct dpdmai_cmd_queue {
++ __le32 dest_id;
++ u8 priority;
++ u8 queue;
++ u8 dest_type;
++ u8 pad;
++ __le64 user_ctx;
++ union {
++ __le32 options;
++ __le32 fqid;
++ };
++};
++
++struct dpdmai_rsp_get_tx_queue {
++ __le64 pad;
++ __le32 fqid;
++};
++
+
+int dpdmai_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpdmai_id,
+ uint16_t *token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
++ struct dpdmai_cmd_open *cmd_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN,
+ cmd_flags,
+ 0);
-+ DPDMAI_CMD_OPEN(cmd, dpdmai_id);
++
++ cmd_params = (struct dpdmai_cmd_open *)cmd.params;
++ cmd_params->dpdmai_id = cpu_to_le32(dpdmai_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ return err;
+
+ /* retrieve response parameters */
-+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
-+
++ *token = mc_cmd_hdr_read_token(&cmd);
+ return 0;
+}
+
+ uint32_t cmd_flags,
+ uint16_t token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLOSE,
+ const struct dpdmai_cfg *cfg,
+ uint16_t *token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ uint32_t cmd_flags,
+ uint16_t token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_ENABLE,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DISABLE,
+ uint16_t token,
+ int *en)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_IS_ENABLED,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_RESET,
+ int *type,
+ struct dpdmai_irq_cfg *irq_cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ uint8_t irq_index,
+ struct dpdmai_irq_cfg *irq_cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ,
+ uint8_t irq_index,
+ uint8_t *en)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ uint8_t irq_index,
+ uint8_t en)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_ENABLE,
+ uint8_t irq_index,
+ uint32_t *mask)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ uint8_t irq_index,
+ uint32_t mask)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_MASK,
+ uint8_t irq_index,
+ uint32_t *status)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ uint8_t irq_index,
+ uint32_t status)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLEAR_IRQ_STATUS,
+ uint16_t token,
+ struct dpdmai_attr *attr)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ int err;
++ struct dpdmai_rsp_get_attributes *rsp_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_ATTR,
+ return err;
+
+ /* retrieve response parameters */
-+ DPDMAI_RSP_GET_ATTR(cmd, attr);
++ rsp_params = (struct dpdmai_rsp_get_attributes *)cmd.params;
++ attr->id = le32_to_cpu(rsp_params->id);
++ attr->version.major = le16_to_cpu(rsp_params->major);
++ attr->version.minor = le16_to_cpu(rsp_params->minor);
++ attr->num_of_priorities = rsp_params->num_of_priorities;
++
+
+ return 0;
+}
+ uint8_t priority,
+ const struct dpdmai_rx_queue_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
++ struct dpdmai_cmd_queue *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_RX_QUEUE,
+ cmd_flags,
+ token);
-+ DPDMAI_CMD_SET_RX_QUEUE(cmd, priority, cfg);
++
++ cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
++ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
++ cmd_params->priority = cfg->dest_cfg.priority;
++ cmd_params->queue = priority;
++ cmd_params->dest_type = cfg->dest_cfg.dest_type;
++ cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
++ cmd_params->options = cpu_to_le32(cfg->options);
++
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+ uint16_t token,
+ uint8_t priority, struct dpdmai_rx_queue_attr *attr)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
++ struct dpdmai_cmd_queue *cmd_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_RX_QUEUE,
+ cmd_flags,
+ token);
-+ DPDMAI_CMD_GET_RX_QUEUE(cmd, priority);
++
++ cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
++ cmd_params->queue = priority;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ return err;
+
+ /* retrieve response parameters */
-+ DPDMAI_RSP_GET_RX_QUEUE(cmd, attr);
++ attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
++ attr->dest_cfg.priority = cmd_params->priority;
++ attr->dest_cfg.dest_type = cmd_params->dest_type;
++ attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
++ attr->fqid = le32_to_cpu(cmd_params->fqid);
+
+ return 0;
+}
+ uint8_t priority,
+ struct dpdmai_tx_queue_attr *attr)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
++ struct dpdmai_cmd_queue *cmd_params;
++ struct dpdmai_rsp_get_tx_queue *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_TX_QUEUE,
+ cmd_flags,
+ token);
-+ DPDMAI_CMD_GET_TX_QUEUE(cmd, priority);
++
++ cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
++ cmd_params->queue = priority;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ return err;
+
+ /* retrieve response parameters */
-+ DPDMAI_RSP_GET_TX_QUEUE(cmd, attr);
++
++ rsp_params = (struct dpdmai_rsp_get_tx_queue *)cmd.params;
++ attr->fqid = le32_to_cpu(rsp_params->fqid);
+
+ return 0;
+}
-From 0a6c701f92e1aa368c44632fa0985e92703354ed Mon Sep 17 00:00:00 2001
+From 89a1f0d7826df69d8e02268b97bc3da02e07203f Mon Sep 17 00:00:00 2001
From: Yangbo Lu <yangbo.lu@nxp.com>
-Date: Wed, 17 Jan 2018 15:35:48 +0800
-Subject: [PATCH 22/30] iommu: support layerscape
+Date: Thu, 5 Jul 2018 17:35:15 +0800
+Subject: [PATCH 22/32] iommu: support layerscape
This is an integrated patch for layerscape smmu support.
Signed-off-by: Sunil Goutham <sgoutham@cavium.com>
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
---
- drivers/iommu/amd_iommu.c | 56 ++++++----
- drivers/iommu/arm-smmu-v3.c | 111 ++++++++++++++------
- drivers/iommu/arm-smmu.c | 100 +++++++++++++++---
- drivers/iommu/dma-iommu.c | 242 ++++++++++++++++++++++++++++++++++++-------
- drivers/iommu/intel-iommu.c | 92 ++++++++++++----
- drivers/iommu/iommu.c | 219 ++++++++++++++++++++++++++++++++++++---
+ drivers/iommu/amd_iommu.c | 56 +++++---
+ drivers/iommu/arm-smmu-v3.c | 111 +++++++++++-----
+ drivers/iommu/arm-smmu.c | 100 ++++++++++++---
+ drivers/iommu/dma-iommu.c | 242 +++++++++++++++++++++++++++++------
+ drivers/iommu/intel-iommu.c | 92 ++++++++++---
+ drivers/iommu/iommu.c | 240 ++++++++++++++++++++++++++++++++--
drivers/iommu/mtk_iommu.c | 2 +
drivers/iommu/mtk_iommu_v1.c | 2 +
include/linux/dma-iommu.h | 11 ++
- include/linux/iommu.h | 55 +++++++---
- 10 files changed, 739 insertions(+), 151 deletions(-)
+ include/linux/iommu.h | 57 +++++++--
+ 10 files changed, 762 insertions(+), 151 deletions(-)
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
out_unlock:
mutex_unlock(&smmu_domain->init_mutex);
return ret;
-@@ -1712,6 +1723,9 @@ arm_smmu_iova_to_phys(struct iommu_domai
+@@ -1695,6 +1706,9 @@ arm_smmu_unmap(struct iommu_domain *doma
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
#include <linux/spinlock.h>
#include <linux/amba/bus.h>
-+#include "../staging/fsl-mc/include/mc-bus.h"
++#include <linux/fsl/mc.h>
#include "io-pgtable.h"
static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
-@@ -36,6 +36,7 @@
+@@ -33,9 +33,11 @@
+ #include <linux/bitops.h>
+ #include <linux/property.h>
+ #include <trace/events/iommu.h>
++#include <linux/fsl/mc.h>
static struct kset *iommu_group_kset;
static DEFINE_IDA(iommu_group_ida);
struct iommu_callback_data {
const struct iommu_ops *ops;
-@@ -68,6 +69,13 @@ struct iommu_group_attribute {
+@@ -68,6 +70,13 @@ struct iommu_group_attribute {
const char *buf, size_t count);
};
#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
struct iommu_group_attribute iommu_group_attr_##_name = \
__ATTR(_name, _mode, _show, _store)
-@@ -86,6 +94,18 @@ static int __iommu_attach_group(struct i
+@@ -86,6 +95,18 @@ static int __iommu_attach_group(struct i
static void __iommu_detach_group(struct iommu_domain *domain,
struct iommu_group *group);
static ssize_t iommu_group_attr_show(struct kobject *kobj,
struct attribute *__attr, char *buf)
{
-@@ -133,8 +153,131 @@ static ssize_t iommu_group_show_name(str
+@@ -133,8 +154,131 @@ static ssize_t iommu_group_show_name(str
return sprintf(buf, "%s\n", group->name);
}
static void iommu_group_release(struct kobject *kobj)
{
struct iommu_group *group = to_iommu_group(kobj);
-@@ -212,6 +355,11 @@ struct iommu_group *iommu_group_alloc(vo
+@@ -212,6 +356,11 @@ struct iommu_group *iommu_group_alloc(vo
*/
kobject_put(&group->kobj);
pr_debug("Allocated group %d\n", group->id);
return group;
-@@ -318,7 +466,7 @@ static int iommu_group_create_direct_map
+@@ -318,7 +467,7 @@ static int iommu_group_create_direct_map
struct device *dev)
{
struct iommu_domain *domain = group->default_domain;
struct list_head mappings;
unsigned long pg_size;
int ret = 0;
-@@ -331,18 +479,21 @@ static int iommu_group_create_direct_map
+@@ -331,18 +480,21 @@ static int iommu_group_create_direct_map
pg_size = 1UL << __ffs(domain->pgsize_bitmap);
INIT_LIST_HEAD(&mappings);
for (addr = start; addr < end; addr += pg_size) {
phys_addr_t phys_addr;
-@@ -358,7 +509,7 @@ static int iommu_group_create_direct_map
+@@ -358,7 +510,7 @@ static int iommu_group_create_direct_map
}
out:
return ret;
}
-@@ -563,6 +714,19 @@ struct iommu_group *iommu_group_get(stru
+@@ -563,6 +715,19 @@ struct iommu_group *iommu_group_get(stru
EXPORT_SYMBOL_GPL(iommu_group_get);
/**
* iommu_group_put - Decrement group reference
* @group: the group to use
*
-@@ -845,10 +1009,19 @@ struct iommu_group *iommu_group_get_for_
+@@ -812,6 +977,26 @@ struct iommu_group *pci_device_group(str
+ return group;
+ }
+
++/* Get the IOMMU group for device on fsl-mc bus */
++struct iommu_group *fsl_mc_device_group(struct device *dev)
++{
++ struct device *cont_dev = fsl_mc_cont_dev(dev);
++ struct iommu_group *group;
++
++ /* Container device is responsible for creating the iommu group */
++ if (fsl_mc_is_cont_dev(dev)) {
++ group = iommu_group_alloc();
++ if (IS_ERR(group))
++ return NULL;
++ } else {
++ get_device(cont_dev);
++ group = iommu_group_get(cont_dev);
++ put_device(cont_dev);
++ }
++
++ return group;
++}
++
+ /**
+ * iommu_group_get_for_dev - Find or create the IOMMU group for a device
+ * @dev: target device
+@@ -845,10 +1030,19 @@ struct iommu_group *iommu_group_get_for_
* IOMMU driver.
*/
if (!group->default_domain) {
}
ret = iommu_group_add_device(group, dev);
-@@ -1557,20 +1730,38 @@ int iommu_domain_set_attr(struct iommu_d
+@@ -1557,20 +1751,38 @@ int iommu_domain_set_attr(struct iommu_d
}
EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
extern void iommu_group_put(struct iommu_group *group);
extern int iommu_group_register_notifier(struct iommu_group *group,
struct notifier_block *nb);
-@@ -439,16 +460,22 @@ static inline void iommu_set_fault_handl
+@@ -330,6 +351,8 @@ static inline size_t iommu_map_sg(struct
+ extern struct iommu_group *pci_device_group(struct device *dev);
+ /* Generic device grouping function */
+ extern struct iommu_group *generic_device_group(struct device *dev);
++/* FSL-MC device grouping function */
++struct iommu_group *fsl_mc_device_group(struct device *dev);
+
+ /**
+ * struct iommu_fwspec - per-device IOMMU instance data
+@@ -439,16 +462,22 @@ static inline void iommu_set_fault_handl
{
}
-From 5a5ff01c790d49c0f6fd247f68f2fd9a2128ea91 Mon Sep 17 00:00:00 2001
+From dab02a7cc54494740e849cd51b554d100eb5541d Mon Sep 17 00:00:00 2001
From: Yangbo Lu <yangbo.lu@nxp.com>
-Date: Wed, 17 Jan 2018 15:36:28 +0800
-Subject: [PATCH 23/30] irqchip: support layerscape
+Date: Thu, 5 Jul 2018 17:36:09 +0800
+Subject: [PATCH 23/32] irqchip: support layerscape
This is an integrated patch for layerscape gic support.
Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
---
- drivers/irqchip/Makefile | 1 +
- drivers/irqchip/irq-gic-v3-its.c | 1 +
- include/linux/irqdomain.h | 36 ++++++++++++++++++++++++++++++++++++
- kernel/irq/irqdomain.c | 39 +++++++++++++++++++++++++++++++++++++++
- kernel/irq/msi.c | 4 ++--
- 5 files changed, 79 insertions(+), 2 deletions(-)
+ drivers/irqchip/Makefile | 1 +
+ drivers/irqchip/irq-gic-v3-its.c | 1 +
+ include/linux/irqchip/arm-gic-v3.h | 3 +++
+ include/linux/irqdomain.h | 36 +++++++++++++++++++++++++++
+ kernel/irq/irqdomain.c | 39 ++++++++++++++++++++++++++++++
+ kernel/irq/msi.c | 4 +--
+ 6 files changed, 82 insertions(+), 2 deletions(-)
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
-@@ -74,3 +74,4 @@ obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scf
+@@ -75,3 +75,4 @@ obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scf
obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o
obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o
obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o
info->ops = &its_msi_domain_ops;
info->data = its;
inner_domain->host_data = info;
+--- a/include/linux/irqchip/arm-gic-v3.h
++++ b/include/linux/irqchip/arm-gic-v3.h
+@@ -133,6 +133,9 @@
+ #define GIC_BASER_SHAREABILITY(reg, type) \
+ (GIC_BASER_##type << reg##_SHAREABILITY_SHIFT)
+
++/* encode a size field of width @w containing @n - 1 units */
++#define GIC_ENCODE_SZ(n, w) (((unsigned long)(n) - 1) & GENMASK_ULL(((w) - 1), 0))
++
+ #define GICR_PROPBASER_SHAREABILITY_SHIFT (10)
+ #define GICR_PROPBASER_INNER_CACHEABILITY_SHIFT (7)
+ #define GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT (56)
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
-@@ -183,6 +183,12 @@ enum {
+@@ -187,6 +187,12 @@ enum {
/* Irq domain is an IPI domain with single virq */
IRQ_DOMAIN_FLAG_IPI_SINGLE = (1 << 3),
/*
* Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
* for implementation specific purposes and ignored by the
-@@ -216,6 +222,7 @@ struct irq_domain *irq_domain_add_legacy
+@@ -220,6 +226,7 @@ struct irq_domain *irq_domain_add_legacy
void *host_data);
extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
enum irq_domain_bus_token bus_token);
extern void irq_set_default_host(struct irq_domain *host);
extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
irq_hw_number_t hwirq, int node,
-@@ -446,6 +453,19 @@ static inline bool irq_domain_is_ipi_sin
+@@ -453,6 +460,19 @@ static inline bool irq_domain_is_ipi_sin
{
return domain->flags & IRQ_DOMAIN_FLAG_IPI_SINGLE;
}
#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
static inline void irq_domain_activate_irq(struct irq_data *data) { }
static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
-@@ -477,6 +497,22 @@ static inline bool irq_domain_is_ipi_sin
+@@ -484,6 +504,22 @@ static inline bool irq_domain_is_ipi_sin
{
return false;
}
#else /* CONFIG_IRQ_DOMAIN */
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
-@@ -278,6 +278,31 @@ struct irq_domain *irq_find_matching_fws
+@@ -319,6 +319,31 @@ struct irq_domain *irq_find_matching_fws
EXPORT_SYMBOL_GPL(irq_find_matching_fwspec);
/**
* irq_set_default_host() - Set a "default" irq domain
* @domain: default domain pointer
*
-@@ -1408,6 +1433,20 @@ static void irq_domain_check_hierarchy(s
+@@ -1420,6 +1445,20 @@ static void irq_domain_check_hierarchy(s
if (domain->ops->alloc)
domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY;
}
-From a2a97f0d2c07a772899ca09967547bea6c9124c5 Mon Sep 17 00:00:00 2001
+From 1d35e363dd6e8bb1733bca0dfc186e3f70e692fe Mon Sep 17 00:00:00 2001
From: Yangbo Lu <yangbo.lu@nxp.com>
-Date: Wed, 17 Jan 2018 15:46:03 +0800
-Subject: [PATCH 29/30] usb: support layerscape
+Date: Thu, 5 Jul 2018 17:38:52 +0800
+Subject: [PATCH 29/32] usb: support layerscape
This is an integrated patch for layerscape usb support.
---
drivers/net/usb/cdc_ether.c | 8 +
drivers/net/usb/r8152.c | 6 +
- drivers/usb/common/common.c | 50 ++++++
+ drivers/usb/common/common.c | 50 +++++
drivers/usb/core/hub.c | 8 +
- drivers/usb/dwc3/core.c | 243 ++++++++++++++++++++++++++++-
- drivers/usb/dwc3/core.h | 51 ++++++-
+ drivers/usb/dwc3/core.c | 243 +++++++++++++++++++++-
+ drivers/usb/dwc3/core.h | 51 ++++-
drivers/usb/dwc3/ep0.c | 4 +-
drivers/usb/dwc3/gadget.c | 7 +
drivers/usb/dwc3/host.c | 24 ++-
- drivers/usb/gadget/udc/fsl_udc_core.c | 46 +++---
+ drivers/usb/gadget/udc/fsl_udc_core.c | 46 +++--
drivers/usb/gadget/udc/fsl_usb2_udc.h | 16 +-
- drivers/usb/host/Kconfig | 2 +-
- drivers/usb/host/ehci-fsl.c | 279 +++++++++++++++++++++++++++++++---
+ drivers/usb/host/Kconfig | 4 +-
+ drivers/usb/host/ehci-fsl.c | 279 ++++++++++++++++++++++++--
drivers/usb/host/ehci-fsl.h | 3 +
drivers/usb/host/ehci-hub.c | 4 +
- drivers/usb/host/ehci.h | 9 ++
- drivers/usb/host/fsl-mph-dr-of.c | 12 ++
- drivers/usb/host/xhci-plat.c | 10 ++
- drivers/usb/host/xhci-ring.c | 29 +++-
- drivers/usb/host/xhci.c | 38 ++++-
- drivers/usb/host/xhci.h | 5 +-
- drivers/usb/phy/phy-fsl-usb.c | 59 +++++--
+ drivers/usb/host/ehci.h | 9 +
+ drivers/usb/host/fsl-mph-dr-of.c | 16 +-
+ drivers/usb/host/xhci-hub.c | 22 ++
+ drivers/usb/host/xhci-plat.c | 16 +-
+ drivers/usb/host/xhci-ring.c | 29 ++-
+ drivers/usb/host/xhci.c | 38 +++-
+ drivers/usb/host/xhci.h | 6 +-
+ drivers/usb/phy/phy-fsl-usb.c | 59 ++++--
drivers/usb/phy/phy-fsl-usb.h | 8 +
include/linux/usb.h | 1 +
include/linux/usb/of.h | 2 +
- 25 files changed, 836 insertions(+), 88 deletions(-)
+ 26 files changed, 867 insertions(+), 92 deletions(-)
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
#endif
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
-@@ -165,7 +165,7 @@ config XPS_USB_HCD_XILINX
+@@ -164,8 +164,8 @@ config XPS_USB_HCD_XILINX
+ devices only.
config USB_EHCI_FSL
- tristate "Support for Freescale PPC on-chip EHCI USB controller"
+- tristate "Support for Freescale PPC on-chip EHCI USB controller"
- depends on FSL_SOC
++ tristate "Support for Freescale QorIQ(ARM/PPC) on-chip EHCI USB controller"
+ depends on USB_EHCI_HCD
select USB_EHCI_ROOT_HUB_TT
---help---
/*
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
+@@ -98,8 +98,8 @@ static struct platform_device *fsl_usb2_
+
+ pdev->dev.coherent_dma_mask = ofdev->dev.coherent_dma_mask;
+
+- if (!pdev->dev.dma_mask)
+- pdev->dev.dma_mask = &ofdev->dev.coherent_dma_mask;
++ if (!pdev->dev.dma_mask && ofdev->dev.of_node)
++ of_dma_configure(&pdev->dev, ofdev->dev.of_node);
+ else
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+
@@ -226,6 +226,18 @@ static int fsl_usb2_mph_dr_of_probe(stru
of_property_read_bool(np, "fsl,usb-erratum-a007792");
pdata->has_fsl_erratum_a005275 =
/*
* Determine whether phy_clk_valid needs to be checked
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -562,12 +562,34 @@ void xhci_set_link_state(struct xhci_hcd
+ int port_id, u32 link_state)
+ {
+ u32 temp;
++ u32 portpmsc_u2_backup = 0;
++
++ /* Backup U2 timeout info before initiating U3 entry erratum A-010131 */
++ if (xhci->shared_hcd->speed >= HCD_USB3 &&
++ link_state == USB_SS_PORT_LS_U3 &&
++ (xhci->quirks & XHCI_DIS_U1U2_WHEN_U3)) {
++ portpmsc_u2_backup = readl(port_array[port_id] + PORTPMSC);
++ portpmsc_u2_backup &= PORT_U2_TIMEOUT_MASK;
++ temp = readl(port_array[port_id] + PORTPMSC);
++ temp |= PORT_U2_TIMEOUT_MASK;
++ writel(temp, port_array[port_id] + PORTPMSC);
++ }
+
+ temp = readl(port_array[port_id]);
+ temp = xhci_port_state_to_neutral(temp);
+ temp &= ~PORT_PLS_MASK;
+ temp |= PORT_LINK_STROBE | link_state;
+ writel(temp, port_array[port_id]);
++
++ /* Restore U2 timeout info after U3 entry complete */
++ if (xhci->shared_hcd->speed >= HCD_USB3 &&
++ link_state == USB_SS_PORT_LS_U3 &&
++ (xhci->quirks & XHCI_DIS_U1U2_WHEN_U3)) {
++ temp = readl(port_array[port_id] + PORTPMSC);
++ temp &= ~PORT_U2_TIMEOUT_MASK;
++ temp |= portpmsc_u2_backup;
++ writel(temp, port_array[port_id] + PORTPMSC);
++ }
+ }
+
+ static void xhci_set_remote_wake_mask(struct xhci_hcd *xhci,
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
-@@ -223,6 +223,16 @@ static int xhci_plat_probe(struct platfo
- if (device_property_read_bool(&pdev->dev, "usb3-lpm-capable"))
- xhci->quirks |= XHCI_LPM_SUPPORT;
+@@ -220,8 +220,22 @@ static int xhci_plat_probe(struct platfo
+ goto disable_clk;
+ }
+- if (device_property_read_bool(&pdev->dev, "usb3-lpm-capable"))
++ if (device_property_read_bool(&pdev->dev, "usb3-lpm-capable")) {
+ xhci->quirks |= XHCI_LPM_SUPPORT;
++ if (device_property_read_bool(&pdev->dev,
++ "snps,dis-u1u2-when-u3-quirk"))
++ xhci->quirks |= XHCI_DIS_U1U2_WHEN_U3;
++ }
++
+ if (device_property_read_bool(&pdev->dev, "quirk-reverse-in-out"))
+ xhci->quirks |= XHCI_REVERSE_IN_OUT;
+
+
+ if (device_property_read_bool(&pdev->dev, "quirk-stop-ep-in-u1"))
+ xhci->quirks |= XHCI_STOP_EP_IN_U1;
-+
+
if (device_property_read_bool(&pdev->dev, "quirk-broken-port-ped"))
xhci->quirks |= XHCI_BROKEN_PORT_PED;
-
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1852,14 +1852,17 @@ static int finish_td(struct xhci_hcd *xh
#define XHCI_LINK_TRB_QUIRK (1 << 0)
#define XHCI_RESET_EP_QUIRK (1 << 1)
#define XHCI_NEC_HOST (1 << 2)
-@@ -1661,6 +1661,9 @@ struct xhci_hcd {
+@@ -1661,6 +1661,10 @@ struct xhci_hcd {
#define XHCI_SSIC_PORT_UNUSED (1 << 22)
#define XHCI_NO_64BIT_SUPPORT (1 << 23)
#define XHCI_MISSING_CAS (1 << 24)
+#define XHCI_REVERSE_IN_OUT (1 << 29)
+#define XHCI_STOP_TRANSFER_IN_BLOCK (1 << 30)
+#define XHCI_STOP_EP_IN_U1 (1 << 31)
++#define XHCI_DIS_U1U2_WHEN_U3 (1 << 32)
/* For controller with a broken Port Disable implementation */
#define XHCI_BROKEN_PORT_PED (1 << 25)
#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26)
-From 954edeee88305fecefe3f681e67a298f06e27974 Mon Sep 17 00:00:00 2001
+From e6af99cc1d56322fc960d072af1a7e0e9285b90c Mon Sep 17 00:00:00 2001
From: Yangbo Lu <yangbo.lu@nxp.com>
-Date: Wed, 17 Jan 2018 15:48:47 +0800
-Subject: [PATCH 30/30] vfio: support layerscape
+Date: Thu, 5 Jul 2018 17:39:43 +0800
+Subject: [PATCH 30/32] vfio: support layerscape
This is an integrated patch for layerscape vfio support.
drivers/vfio/Makefile | 1 +
drivers/vfio/fsl-mc/Kconfig | 9 +
drivers/vfio/fsl-mc/Makefile | 2 +
- drivers/vfio/fsl-mc/vfio_fsl_mc.c | 753 ++++++++++++++++++++++++++++++
- drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c | 199 ++++++++
- drivers/vfio/fsl-mc/vfio_fsl_mc_private.h | 55 +++
+ drivers/vfio/fsl-mc/vfio_fsl_mc.c | 752 ++++++++++++++++++++++
+ drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c | 199 ++++++
+ drivers/vfio/fsl-mc/vfio_fsl_mc_private.h | 55 ++
drivers/vfio/vfio_iommu_type1.c | 39 +-
include/uapi/linux/vfio.h | 1 +
- 9 files changed, 1058 insertions(+), 2 deletions(-)
+ 9 files changed, 1057 insertions(+), 2 deletions(-)
create mode 100644 drivers/vfio/fsl-mc/Kconfig
create mode 100644 drivers/vfio/fsl-mc/Makefile
create mode 100644 drivers/vfio/fsl-mc/vfio_fsl_mc.c
+obj-$(CONFIG_VFIO_FSL_MC) += vfio_fsl_mc.o vfio_fsl_mc_intr.o
--- /dev/null
+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
-@@ -0,0 +1,753 @@
+@@ -0,0 +1,752 @@
+/*
+ * Freescale Management Complex (MC) device passthrough using VFIO
+ *
+#include <linux/vfio.h>
+#include <linux/delay.h>
+
-+#include "../../staging/fsl-mc/include/mc.h"
-+#include "../../staging/fsl-mc/include/mc-bus.h"
-+#include "../../staging/fsl-mc/include/mc-sys.h"
-+#include "../../staging/fsl-mc/bus/dprc-cmd.h"
++#include <linux/fsl/mc.h>
+
+#include "vfio_fsl_mc_private.h"
+
+ uint64_t data[8];
+ int i;
+
-+ /* Read ioctl supported only for DPRC device */
-+ if (strcmp(vdev->mc_dev->obj_desc.type, "dprc"))
++ /* Read ioctl supported only for DPRC and DPMCP device */
++ if (strcmp(vdev->mc_dev->obj_desc.type, "dprc") &&
++ strcmp(vdev->mc_dev->obj_desc.type, "dpmcp"))
+ return -EINVAL;
+
+ if (index >= vdev->num_regions)
+ uint64_t data[8];
+ int ret;
+
-+ /* Write ioctl supported only for DPRC device */
-+ if (strcmp(vdev->mc_dev->obj_desc.type, "dprc"))
++ /* Write ioctl supported only for DPRC and DPMCP device */
++ if (strcmp(vdev->mc_dev->obj_desc.type, "dprc") &&
++ strcmp(vdev->mc_dev->obj_desc.type, "dpmcp"))
+ return -EINVAL;
+
+ if (index >= vdev->num_regions)
+#include <linux/eventfd.h>
+#include <linux/msi.h>
+
-+#include "../../staging/fsl-mc/include/mc.h"
++#include "linux/fsl/mc.h"
+#include "vfio_fsl_mc_private.h"
+
+static irqreturn_t vfio_fsl_mc_irq_handler(int irq_num, void *arg)
--- /dev/null
+From 2887442bd13bc8be687afc7172cb01c2b7f0dd3b Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Thu, 5 Jul 2018 17:41:14 +0800
+Subject: [PATCH 31/32] flexcan: support layerscape
+
+This is an integrated patch for layerscape flexcan support.
+
+Signed-off-by: Pankaj Bansal <pankaj.bansal@nxp.com>
+Signed-off-by: Bhupesh Sharma <bhupesh.sharma@freescale.com>
+Signed-off-by: Sakar Arora <Sakar.Arora@freescale.com>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+---
+ drivers/net/can/flexcan.c | 212 ++++++++++++++++++++++----------------
+ 1 file changed, 123 insertions(+), 89 deletions(-)
+
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -184,6 +184,7 @@
+ * MX53 FlexCAN2 03.00.00.00 yes no no no
+ * MX6s FlexCAN3 10.00.12.00 yes yes no yes
+ * VF610 FlexCAN3 ? no yes yes yes?
++ * LS1021A FlexCAN2 03.00.04.00 no yes no yes
+ *
+ * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
+ */
+@@ -260,6 +261,10 @@ struct flexcan_priv {
+ struct flexcan_platform_data *pdata;
+ const struct flexcan_devtype_data *devtype_data;
+ struct regulator *reg_xceiver;
++
++ /* Read and Write APIs */
++ u32 (*read)(void __iomem *addr);
++ void (*write)(u32 val, void __iomem *addr);
+ };
+
+ static struct flexcan_devtype_data fsl_p1010_devtype_data = {
+@@ -276,6 +281,10 @@ static struct flexcan_devtype_data fsl_v
+ .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_DISABLE_MECR,
+ };
+
++static const struct flexcan_devtype_data fsl_ls1021a_r2_devtype_data = {
++ .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_DISABLE_MECR,
++};
++
+ static const struct can_bittiming_const flexcan_bittiming_const = {
+ .name = DRV_NAME,
+ .tseg1_min = 4,
+@@ -288,32 +297,38 @@ static const struct can_bittiming_const
+ .brp_inc = 1,
+ };
+
+-/* Abstract off the read/write for arm versus ppc. This
+- * assumes that PPC uses big-endian registers and everything
+- * else uses little-endian registers, independent of CPU
+- * endianness.
++/* FlexCAN module is essentially modelled as a little-endian IP in most
++ * SoCs, i.e the registers as well as the message buffer areas are
++ * implemented in a little-endian fashion.
++ *
++ * However there are some SoCs (e.g. LS1021A) which implement the FlexCAN
++ * module in a big-endian fashion (i.e the registers as well as the
++ * message buffer areas are implemented in a big-endian way).
++ *
++ * In addition, the FlexCAN module can be found on SoCs having ARM or
++ * PPC cores. So, we need to abstract off the register read/write
++ * functions, ensuring that these cater to all the combinations of module
++ * endianness and underlying CPU endianness.
+ */
+-#if defined(CONFIG_PPC)
+-static inline u32 flexcan_read(void __iomem *addr)
++static inline u32 flexcan_read_be(void __iomem *addr)
+ {
+- return in_be32(addr);
++ return ioread32be(addr);
+ }
+
+-static inline void flexcan_write(u32 val, void __iomem *addr)
++static inline void flexcan_write_be(u32 val, void __iomem *addr)
+ {
+- out_be32(addr, val);
++ iowrite32be(val, addr);
+ }
+-#else
+-static inline u32 flexcan_read(void __iomem *addr)
++
++static inline u32 flexcan_read_le(void __iomem *addr)
+ {
+- return readl(addr);
++ return ioread32(addr);
+ }
+
+-static inline void flexcan_write(u32 val, void __iomem *addr)
++static inline void flexcan_write_le(u32 val, void __iomem *addr)
+ {
+- writel(val, addr);
++ iowrite32(val, addr);
+ }
+-#endif
+
+ static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
+ {
+@@ -344,14 +359,14 @@ static int flexcan_chip_enable(struct fl
+ unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
+ u32 reg;
+
+- reg = flexcan_read(®s->mcr);
++ reg = priv->read(®s->mcr);
+ reg &= ~FLEXCAN_MCR_MDIS;
+- flexcan_write(reg, ®s->mcr);
++ priv->write(reg, ®s->mcr);
+
+- while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK))
++ while (timeout-- && (priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK))
+ udelay(10);
+
+- if (flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)
++ if (priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)
+ return -ETIMEDOUT;
+
+ return 0;
+@@ -363,14 +378,14 @@ static int flexcan_chip_disable(struct f
+ unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
+ u32 reg;
+
+- reg = flexcan_read(®s->mcr);
++ reg = priv->read(®s->mcr);
+ reg |= FLEXCAN_MCR_MDIS;
+- flexcan_write(reg, ®s->mcr);
++ priv->write(reg, ®s->mcr);
+
+- while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK))
++ while (timeout-- && !(priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK))
+ udelay(10);
+
+- if (!(flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK))
++ if (!(priv->read(®s->mcr) & FLEXCAN_MCR_LPM_ACK))
+ return -ETIMEDOUT;
+
+ return 0;
+@@ -382,14 +397,14 @@ static int flexcan_chip_freeze(struct fl
+ unsigned int timeout = 1000 * 1000 * 10 / priv->can.bittiming.bitrate;
+ u32 reg;
+
+- reg = flexcan_read(®s->mcr);
++ reg = priv->read(®s->mcr);
+ reg |= FLEXCAN_MCR_HALT;
+- flexcan_write(reg, ®s->mcr);
++ priv->write(reg, ®s->mcr);
+
+- while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK))
++ while (timeout-- && !(priv->read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK))
+ udelay(100);
+
+- if (!(flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK))
++ if (!(priv->read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK))
+ return -ETIMEDOUT;
+
+ return 0;
+@@ -401,14 +416,14 @@ static int flexcan_chip_unfreeze(struct
+ unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
+ u32 reg;
+
+- reg = flexcan_read(®s->mcr);
++ reg = priv->read(®s->mcr);
+ reg &= ~FLEXCAN_MCR_HALT;
+- flexcan_write(reg, ®s->mcr);
++ priv->write(reg, ®s->mcr);
+
+- while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK))
++ while (timeout-- && (priv->read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK))
+ udelay(10);
+
+- if (flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)
++ if (priv->read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)
+ return -ETIMEDOUT;
+
+ return 0;
+@@ -419,11 +434,11 @@ static int flexcan_chip_softreset(struct
+ struct flexcan_regs __iomem *regs = priv->regs;
+ unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
+
+- flexcan_write(FLEXCAN_MCR_SOFTRST, ®s->mcr);
+- while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_SOFTRST))
++ priv->write(FLEXCAN_MCR_SOFTRST, ®s->mcr);
++ while (timeout-- && (priv->read(®s->mcr) & FLEXCAN_MCR_SOFTRST))
+ udelay(10);
+
+- if (flexcan_read(®s->mcr) & FLEXCAN_MCR_SOFTRST)
++ if (priv->read(®s->mcr) & FLEXCAN_MCR_SOFTRST)
+ return -ETIMEDOUT;
+
+ return 0;
+@@ -434,7 +449,7 @@ static int __flexcan_get_berr_counter(co
+ {
+ const struct flexcan_priv *priv = netdev_priv(dev);
+ struct flexcan_regs __iomem *regs = priv->regs;
+- u32 reg = flexcan_read(®s->ecr);
++ u32 reg = priv->read(®s->ecr);
+
+ bec->txerr = (reg >> 0) & 0xff;
+ bec->rxerr = (reg >> 8) & 0xff;
+@@ -491,24 +506,24 @@ static int flexcan_start_xmit(struct sk_
+
+ if (cf->can_dlc > 0) {
+ data = be32_to_cpup((__be32 *)&cf->data[0]);
+- flexcan_write(data, ®s->mb[FLEXCAN_TX_BUF_ID].data[0]);
++ priv->write(data, ®s->mb[FLEXCAN_TX_BUF_ID].data[0]);
+ }
+ if (cf->can_dlc > 4) {
+ data = be32_to_cpup((__be32 *)&cf->data[4]);
+- flexcan_write(data, ®s->mb[FLEXCAN_TX_BUF_ID].data[1]);
++ priv->write(data, ®s->mb[FLEXCAN_TX_BUF_ID].data[1]);
+ }
+
+ can_put_echo_skb(skb, dev, 0);
+
+- flexcan_write(can_id, ®s->mb[FLEXCAN_TX_BUF_ID].can_id);
+- flexcan_write(ctrl, ®s->mb[FLEXCAN_TX_BUF_ID].can_ctrl);
++ priv->write(can_id, ®s->mb[FLEXCAN_TX_BUF_ID].can_id);
++ priv->write(ctrl, ®s->mb[FLEXCAN_TX_BUF_ID].can_ctrl);
+
+ /* Errata ERR005829 step8:
+ * Write twice INACTIVE(0x8) code to first MB.
+ */
+- flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
++ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
+ ®s->mb[FLEXCAN_TX_BUF_RESERVED].can_ctrl);
+- flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
++ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
+ ®s->mb[FLEXCAN_TX_BUF_RESERVED].can_ctrl);
+
+ return NETDEV_TX_OK;
+@@ -632,8 +647,8 @@ static void flexcan_read_fifo(const stru
+ struct flexcan_mb __iomem *mb = ®s->mb[0];
+ u32 reg_ctrl, reg_id;
+
+- reg_ctrl = flexcan_read(&mb->can_ctrl);
+- reg_id = flexcan_read(&mb->can_id);
++ reg_ctrl = priv->read(&mb->can_ctrl);
++ reg_id = priv->read(&mb->can_id);
+ if (reg_ctrl & FLEXCAN_MB_CNT_IDE)
+ cf->can_id = ((reg_id >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ else
+@@ -643,12 +658,12 @@ static void flexcan_read_fifo(const stru
+ cf->can_id |= CAN_RTR_FLAG;
+ cf->can_dlc = get_can_dlc((reg_ctrl >> 16) & 0xf);
+
+- *(__be32 *)(cf->data + 0) = cpu_to_be32(flexcan_read(&mb->data[0]));
+- *(__be32 *)(cf->data + 4) = cpu_to_be32(flexcan_read(&mb->data[1]));
++ *(__be32 *)(cf->data + 0) = cpu_to_be32(priv->read(&mb->data[0]));
++ *(__be32 *)(cf->data + 4) = cpu_to_be32(priv->read(&mb->data[1]));
+
+ /* mark as read */
+- flexcan_write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, ®s->iflag1);
+- flexcan_read(®s->timer);
++ priv->write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, ®s->iflag1);
++ priv->read(®s->timer);
+ }
+
+ static int flexcan_read_frame(struct net_device *dev)
+@@ -685,17 +700,17 @@ static int flexcan_poll(struct napi_stru
+ /* The error bits are cleared on read,
+ * use saved value from irq handler.
+ */
+- reg_esr = flexcan_read(®s->esr) | priv->reg_esr;
++ reg_esr = priv->read(®s->esr) | priv->reg_esr;
+
+ /* handle state changes */
+ work_done += flexcan_poll_state(dev, reg_esr);
+
+ /* handle RX-FIFO */
+- reg_iflag1 = flexcan_read(®s->iflag1);
++ reg_iflag1 = priv->read(®s->iflag1);
+ while (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE &&
+ work_done < quota) {
+ work_done += flexcan_read_frame(dev);
+- reg_iflag1 = flexcan_read(®s->iflag1);
++ reg_iflag1 = priv->read(®s->iflag1);
+ }
+
+ /* report bus errors */
+@@ -705,8 +720,8 @@ static int flexcan_poll(struct napi_stru
+ if (work_done < quota) {
+ napi_complete_done(napi, work_done);
+ /* enable IRQs */
+- flexcan_write(FLEXCAN_IFLAG_DEFAULT, ®s->imask1);
+- flexcan_write(priv->reg_ctrl_default, ®s->ctrl);
++ priv->write(FLEXCAN_IFLAG_DEFAULT, ®s->imask1);
++ priv->write(priv->reg_ctrl_default, ®s->ctrl);
+ }
+
+ return work_done;
+@@ -720,12 +735,12 @@ static irqreturn_t flexcan_irq(int irq,
+ struct flexcan_regs __iomem *regs = priv->regs;
+ u32 reg_iflag1, reg_esr;
+
+- reg_iflag1 = flexcan_read(®s->iflag1);
+- reg_esr = flexcan_read(®s->esr);
++ reg_iflag1 = priv->read(®s->iflag1);
++ reg_esr = priv->read(®s->esr);
+
+ /* ACK all bus error and state change IRQ sources */
+ if (reg_esr & FLEXCAN_ESR_ALL_INT)
+- flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, ®s->esr);
++ priv->write(reg_esr & FLEXCAN_ESR_ALL_INT, ®s->esr);
+
+ /* schedule NAPI in case of:
+ * - rx IRQ
+@@ -739,16 +754,16 @@ static irqreturn_t flexcan_irq(int irq,
+ * save them for later use.
+ */
+ priv->reg_esr = reg_esr & FLEXCAN_ESR_ERR_BUS;
+- flexcan_write(FLEXCAN_IFLAG_DEFAULT &
++ priv->write(FLEXCAN_IFLAG_DEFAULT &
+ ~FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, ®s->imask1);
+- flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
++ priv->write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
+ ®s->ctrl);
+ napi_schedule(&priv->napi);
+ }
+
+ /* FIFO overflow */
+ if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_OVERFLOW) {
+- flexcan_write(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW, ®s->iflag1);
++ priv->write(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW, ®s->iflag1);
+ dev->stats.rx_over_errors++;
+ dev->stats.rx_errors++;
+ }
+@@ -760,9 +775,9 @@ static irqreturn_t flexcan_irq(int irq,
+ can_led_event(dev, CAN_LED_EVENT_TX);
+
+ /* after sending a RTR frame MB is in RX mode */
+- flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
++ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
+ ®s->mb[FLEXCAN_TX_BUF_ID].can_ctrl);
+- flexcan_write((1 << FLEXCAN_TX_BUF_ID), ®s->iflag1);
++ priv->write((1 << FLEXCAN_TX_BUF_ID), ®s->iflag1);
+ netif_wake_queue(dev);
+ }
+
+@@ -776,7 +791,7 @@ static void flexcan_set_bittiming(struct
+ struct flexcan_regs __iomem *regs = priv->regs;
+ u32 reg;
+
+- reg = flexcan_read(®s->ctrl);
++ reg = priv->read(®s->ctrl);
+ reg &= ~(FLEXCAN_CTRL_PRESDIV(0xff) |
+ FLEXCAN_CTRL_RJW(0x3) |
+ FLEXCAN_CTRL_PSEG1(0x7) |
+@@ -800,11 +815,11 @@ static void flexcan_set_bittiming(struct
+ reg |= FLEXCAN_CTRL_SMP;
+
+ netdev_dbg(dev, "writing ctrl=0x%08x\n", reg);
+- flexcan_write(reg, ®s->ctrl);
++ priv->write(reg, ®s->ctrl);
+
+ /* print chip status */
+ netdev_dbg(dev, "%s: mcr=0x%08x ctrl=0x%08x\n", __func__,
+- flexcan_read(®s->mcr), flexcan_read(®s->ctrl));
++ priv->read(®s->mcr), priv->read(®s->ctrl));
+ }
+
+ /* flexcan_chip_start
+@@ -842,13 +857,13 @@ static int flexcan_chip_start(struct net
+ * choose format C
+ * set max mailbox number
+ */
+- reg_mcr = flexcan_read(®s->mcr);
++ reg_mcr = priv->read(®s->mcr);
+ reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
+ reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_FEN | FLEXCAN_MCR_HALT |
+ FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS |
+ FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_MAXMB(FLEXCAN_TX_BUF_ID);
+ netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
+- flexcan_write(reg_mcr, ®s->mcr);
++ priv->write(reg_mcr, ®s->mcr);
+
+ /* CTRL
+ *
+@@ -861,7 +876,7 @@ static int flexcan_chip_start(struct net
+ * enable bus off interrupt
+ * (== FLEXCAN_CTRL_ERR_STATE)
+ */
+- reg_ctrl = flexcan_read(®s->ctrl);
++ reg_ctrl = priv->read(®s->ctrl);
+ reg_ctrl &= ~FLEXCAN_CTRL_TSYN;
+ reg_ctrl |= FLEXCAN_CTRL_BOFF_REC | FLEXCAN_CTRL_LBUF |
+ FLEXCAN_CTRL_ERR_STATE;
+@@ -881,29 +896,29 @@ static int flexcan_chip_start(struct net
+ /* leave interrupts disabled for now */
+ reg_ctrl &= ~FLEXCAN_CTRL_ERR_ALL;
+ netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
+- flexcan_write(reg_ctrl, ®s->ctrl);
++ priv->write(reg_ctrl, ®s->ctrl);
+
+ /* clear and invalidate all mailboxes first */
+ for (i = FLEXCAN_TX_BUF_ID; i < ARRAY_SIZE(regs->mb); i++) {
+- flexcan_write(FLEXCAN_MB_CODE_RX_INACTIVE,
++ priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
+ ®s->mb[i].can_ctrl);
+ }
+
+ /* Errata ERR005829: mark first TX mailbox as INACTIVE */
+- flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
++ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
+ ®s->mb[FLEXCAN_TX_BUF_RESERVED].can_ctrl);
+
+ /* mark TX mailbox as INACTIVE */
+- flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
++ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
+ ®s->mb[FLEXCAN_TX_BUF_ID].can_ctrl);
+
+ /* acceptance mask/acceptance code (accept everything) */
+- flexcan_write(0x0, ®s->rxgmask);
+- flexcan_write(0x0, ®s->rx14mask);
+- flexcan_write(0x0, ®s->rx15mask);
++ priv->write(0x0, ®s->rxgmask);
++ priv->write(0x0, ®s->rx14mask);
++ priv->write(0x0, ®s->rx15mask);
+
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_DISABLE_RXFG)
+- flexcan_write(0x0, ®s->rxfgmask);
++ priv->write(0x0, ®s->rxfgmask);
+
+ /* On Vybrid, disable memory error detection interrupts
+ * and freeze mode.
+@@ -916,16 +931,16 @@ static int flexcan_chip_start(struct net
+ * and Correction of Memory Errors" to write to
+ * MECR register
+ */
+- reg_ctrl2 = flexcan_read(®s->ctrl2);
++ reg_ctrl2 = priv->read(®s->ctrl2);
+ reg_ctrl2 |= FLEXCAN_CTRL2_ECRWRE;
+- flexcan_write(reg_ctrl2, ®s->ctrl2);
++ priv->write(reg_ctrl2, ®s->ctrl2);
+
+- reg_mecr = flexcan_read(®s->mecr);
++ reg_mecr = priv->read(®s->mecr);
+ reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS;
+- flexcan_write(reg_mecr, ®s->mecr);
++ priv->write(reg_mecr, ®s->mecr);
+ reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK |
+ FLEXCAN_MECR_FANCEI_MSK);
+- flexcan_write(reg_mecr, ®s->mecr);
++ priv->write(reg_mecr, ®s->mecr);
+ }
+
+ err = flexcan_transceiver_enable(priv);
+@@ -941,13 +956,13 @@ static int flexcan_chip_start(struct net
+
+ /* enable interrupts atomically */
+ disable_irq(dev->irq);
+- flexcan_write(priv->reg_ctrl_default, ®s->ctrl);
+- flexcan_write(FLEXCAN_IFLAG_DEFAULT, ®s->imask1);
++ priv->write(priv->reg_ctrl_default, ®s->ctrl);
++ priv->write(FLEXCAN_IFLAG_DEFAULT, ®s->imask1);
+ enable_irq(dev->irq);
+
+ /* print chip status */
+ netdev_dbg(dev, "%s: reading mcr=0x%08x ctrl=0x%08x\n", __func__,
+- flexcan_read(®s->mcr), flexcan_read(®s->ctrl));
++ priv->read(®s->mcr), priv->read(®s->ctrl));
+
+ return 0;
+
+@@ -972,8 +987,8 @@ static void flexcan_chip_stop(struct net
+ flexcan_chip_disable(priv);
+
+ /* Disable all interrupts */
+- flexcan_write(0, ®s->imask1);
+- flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
++ priv->write(0, ®s->imask1);
++ priv->write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
+ ®s->ctrl);
+
+ flexcan_transceiver_disable(priv);
+@@ -1089,25 +1104,25 @@ static int register_flexcandev(struct ne
+ err = flexcan_chip_disable(priv);
+ if (err)
+ goto out_disable_per;
+- reg = flexcan_read(®s->ctrl);
++ reg = priv->read(®s->ctrl);
+ reg |= FLEXCAN_CTRL_CLK_SRC;
+- flexcan_write(reg, ®s->ctrl);
++ priv->write(reg, ®s->ctrl);
+
+ err = flexcan_chip_enable(priv);
+ if (err)
+ goto out_chip_disable;
+
+ /* set freeze, halt and activate FIFO, restrict register access */
+- reg = flexcan_read(®s->mcr);
++ reg = priv->read(®s->mcr);
+ reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT |
+ FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV;
+- flexcan_write(reg, ®s->mcr);
++ priv->write(reg, ®s->mcr);
+
+ /* Currently we only support newer versions of this core
+ * featuring a RX FIFO. Older cores found on some Coldfire
+ * derivates are not yet supported.
+ */
+- reg = flexcan_read(®s->mcr);
++ reg = priv->read(®s->mcr);
+ if (!(reg & FLEXCAN_MCR_FEN)) {
+ netdev_err(dev, "Could not enable RX FIFO, unsupported core\n");
+ err = -ENODEV;
+@@ -1135,8 +1150,12 @@ static void unregister_flexcandev(struct
+ static const struct of_device_id flexcan_of_match[] = {
+ { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
+ { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
++ { .compatible = "fsl,imx53-flexcan", .data = &fsl_p1010_devtype_data, },
++ { .compatible = "fsl,imx35-flexcan", .data = &fsl_p1010_devtype_data, },
++ { .compatible = "fsl,imx25-flexcan", .data = &fsl_p1010_devtype_data, },
+ { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
+ { .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, },
++ { .compatible = "fsl,ls1021ar2-flexcan", .data = &fsl_ls1021a_r2_devtype_data, },
+ { /* sentinel */ },
+ };
+ MODULE_DEVICE_TABLE(of, flexcan_of_match);
+@@ -1213,6 +1232,21 @@ static int flexcan_probe(struct platform
+ dev->flags |= IFF_ECHO;
+
+ priv = netdev_priv(dev);
++
++ if (of_property_read_bool(pdev->dev.of_node, "big-endian")) {
++ priv->read = flexcan_read_be;
++ priv->write = flexcan_write_be;
++ } else {
++ if (of_device_is_compatible(pdev->dev.of_node,
++ "fsl,p1010-flexcan")) {
++ priv->read = flexcan_read_be;
++ priv->write = flexcan_write_be;
++ } else {
++ priv->read = flexcan_read_le;
++ priv->write = flexcan_write_le;
++ }
++ }
++
+ priv->can.clock.freq = clock_freq;
+ priv->can.bittiming_const = &flexcan_bittiming_const;
+ priv->can.do_set_mode = flexcan_set_mode;
--- /dev/null
+From fe22151c95c02c6bb145ea6c3685941e8fb09d60 Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <yangbo.lu@nxp.com>
+Date: Thu, 5 Jul 2018 17:43:16 +0800
+Subject: [PATCH 32/32] kvm: support layerscape
+
+This is an integrated patch for layerscape kvm support.
+
+Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
+Signed-off-by: Bharat Bhushan <Bharat.Bhushan@nxp.com>
+Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+---
+ arch/arm/include/asm/kvm_mmu.h | 3 +-
+ arch/arm/kvm/mmu.c | 56 ++++++++++++++++++++++++++++++--
+ arch/arm64/include/asm/kvm_mmu.h | 14 ++++++--
+ virt/kvm/arm/vgic/vgic-its.c | 24 +++++++++++---
+ virt/kvm/arm/vgic/vgic-v2.c | 3 +-
+ 5 files changed, 88 insertions(+), 12 deletions(-)
+
+--- a/arch/arm/include/asm/kvm_mmu.h
++++ b/arch/arm/include/asm/kvm_mmu.h
+@@ -55,7 +55,8 @@ void stage2_unmap_vm(struct kvm *kvm);
+ int kvm_alloc_stage2_pgd(struct kvm *kvm);
+ void kvm_free_stage2_pgd(struct kvm *kvm);
+ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
+- phys_addr_t pa, unsigned long size, bool writable);
++ phys_addr_t pa, unsigned long size, bool writable,
++ pgprot_t prot);
+
+ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
+
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -1020,9 +1020,11 @@ static int stage2_pmdp_test_and_clear_yo
+ * @guest_ipa: The IPA at which to insert the mapping
+ * @pa: The physical address of the device
+ * @size: The size of the mapping
++ * @prot: S2 page translation bits
+ */
+ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
+- phys_addr_t pa, unsigned long size, bool writable)
++ phys_addr_t pa, unsigned long size, bool writable,
++ pgprot_t prot)
+ {
+ phys_addr_t addr, end;
+ int ret = 0;
+@@ -1033,7 +1035,7 @@ int kvm_phys_addr_ioremap(struct kvm *kv
+ pfn = __phys_to_pfn(pa);
+
+ for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
+- pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
++ pte_t pte = pfn_pte(pfn, prot);
+
+ if (writable)
+ pte = kvm_s2pte_mkwrite(pte);
+@@ -1057,6 +1059,30 @@ out:
+ return ret;
+ }
+
++#ifdef CONFIG_ARM64
++static pgprot_t stage1_to_stage2_pgprot(pgprot_t prot)
++{
++ switch (pgprot_val(prot) & PTE_ATTRINDX_MASK) {
++ case PTE_ATTRINDX(MT_DEVICE_nGnRE):
++ case PTE_ATTRINDX(MT_DEVICE_nGnRnE):
++ case PTE_ATTRINDX(MT_DEVICE_GRE):
++ return PAGE_S2_DEVICE;
++ case PTE_ATTRINDX(MT_NORMAL_NC):
++ case PTE_ATTRINDX(MT_NORMAL):
++ return (pgprot_val(prot) & PTE_SHARED)
++ ? PAGE_S2
++ : PAGE_S2_NS;
++ }
++
++ return PAGE_S2_DEVICE;
++}
++#else
++static pgprot_t stage1_to_stage2_pgprot(pgprot_t prot)
++{
++ return PAGE_S2_DEVICE;
++}
++#endif
++
+ static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
+ {
+ kvm_pfn_t pfn = *pfnp;
+@@ -1308,6 +1334,19 @@ static int user_mem_abort(struct kvm_vcp
+ hugetlb = true;
+ gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
+ } else {
++ if (!is_vm_hugetlb_page(vma)) {
++ pte_t *pte;
++ spinlock_t *ptl;
++ pgprot_t prot;
++
++ pte = get_locked_pte(current->mm, memslot->userspace_addr, &ptl);
++ prot = stage1_to_stage2_pgprot(__pgprot(pte_val(*pte)));
++ pte_unmap_unlock(pte, ptl);
++#ifdef CONFIG_ARM64
++ if (pgprot_val(prot) == pgprot_val(PAGE_S2_NS))
++ mem_type = PAGE_S2_NS;
++#endif
++ }
+ /*
+ * Pages belonging to memslots that don't have the same
+ * alignment for userspace and IPA cannot be mapped using
+@@ -1345,6 +1384,11 @@ static int user_mem_abort(struct kvm_vcp
+ if (is_error_noslot_pfn(pfn))
+ return -EFAULT;
+
++#ifdef CONFIG_ARM64
++ if (pgprot_val(mem_type) == pgprot_val(PAGE_S2_NS)) {
++ flags |= KVM_S2PTE_FLAG_IS_IOMAP;
++ } else
++#endif
+ if (kvm_is_device_pfn(pfn)) {
+ mem_type = PAGE_S2_DEVICE;
+ flags |= KVM_S2PTE_FLAG_IS_IOMAP;
+@@ -1882,6 +1926,9 @@ int kvm_arch_prepare_memory_region(struc
+ gpa_t gpa = mem->guest_phys_addr +
+ (vm_start - mem->userspace_addr);
+ phys_addr_t pa;
++ pgprot_t prot;
++ pte_t *pte;
++ spinlock_t *ptl;
+
+ pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
+ pa += vm_start - vma->vm_start;
+@@ -1891,10 +1938,13 @@ int kvm_arch_prepare_memory_region(struc
+ ret = -EINVAL;
+ goto out;
+ }
++ pte = get_locked_pte(current->mm, mem->userspace_addr, &ptl);
++ prot = stage1_to_stage2_pgprot(__pgprot(pte_val(*pte)));
++ pte_unmap_unlock(pte, ptl);
+
+ ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
+ vm_end - vm_start,
+- writable);
++ writable, prot);
+ if (ret)
+ break;
+ }
+--- a/arch/arm64/include/asm/kvm_mmu.h
++++ b/arch/arm64/include/asm/kvm_mmu.h
+@@ -167,7 +167,8 @@ void stage2_unmap_vm(struct kvm *kvm);
+ int kvm_alloc_stage2_pgd(struct kvm *kvm);
+ void kvm_free_stage2_pgd(struct kvm *kvm);
+ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
+- phys_addr_t pa, unsigned long size, bool writable);
++ phys_addr_t pa, unsigned long size, bool writable,
++ pgprot_t prot);
+
+ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
+
+@@ -274,8 +275,15 @@ static inline void __coherent_cache_gues
+
+ static inline void __kvm_flush_dcache_pte(pte_t pte)
+ {
+- struct page *page = pte_page(pte);
+- kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
++ if (pfn_valid(pte_pfn(pte))) {
++ struct page *page = pte_page(pte);
++ kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
++ } else {
++ void __iomem *va = ioremap_cache_ns(pte_pfn(pte) << PAGE_SHIFT, PAGE_SIZE);
++
++ kvm_flush_dcache_to_poc(va, PAGE_SIZE);
++ iounmap(va);
++ }
+ }
+
+ static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
+--- a/virt/kvm/arm/vgic/vgic-its.c
++++ b/virt/kvm/arm/vgic/vgic-its.c
+@@ -176,6 +176,8 @@ static struct its_itte *find_itte(struct
+
+ #define GIC_LPI_OFFSET 8192
+
++#define VITS_TYPER_DEVBITS 17
++
+ /*
+ * Finds and returns a collection in the ITS collection table.
+ * Must be called with the its_lock mutex held.
+@@ -375,7 +377,7 @@ static unsigned long vgic_mmio_read_its_
+ * To avoid memory waste in the guest, we keep the number of IDBits and
+ * DevBits low - as least for the time being.
+ */
+- reg |= 0x0f << GITS_TYPER_DEVBITS_SHIFT;
++ reg |= GIC_ENCODE_SZ(VITS_TYPER_DEVBITS, 5) << GITS_TYPER_DEVBITS_SHIFT;
+ reg |= 0x0f << GITS_TYPER_IDBITS_SHIFT;
+
+ return extract_bytes(reg, addr & 7, len);
+@@ -601,16 +603,30 @@ static int vgic_its_cmd_handle_movi(stru
+ * Check whether an ID can be stored into the corresponding guest table.
+ * For a direct table this is pretty easy, but gets a bit nasty for
+ * indirect tables. We check whether the resulting guest physical address
+- * is actually valid (covered by a memslot and guest accessbible).
++ * is actually valid (covered by a memslot and guest accessible).
+ * For this we have to read the respective first level entry.
+ */
+-static bool vgic_its_check_id(struct vgic_its *its, u64 baser, int id)
++static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id)
+ {
+ int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
++ u64 indirect_ptr, type = GITS_BASER_TYPE(baser);
+ int index;
+- u64 indirect_ptr;
+ gfn_t gfn;
+
++ switch (type) {
++ case GITS_BASER_TYPE_DEVICE:
++ if (id >= BIT_ULL(VITS_TYPER_DEVBITS))
++ return false;
++ break;
++ case GITS_BASER_TYPE_COLLECTION:
++ /* as GITS_TYPER.CIL == 0, ITS supports 16-bit collection ID */
++ if (id >= BIT_ULL(16))
++ return false;
++ break;
++ default:
++ return false;
++ }
++
+ if (!(baser & GITS_BASER_INDIRECT)) {
+ phys_addr_t addr;
+
+--- a/virt/kvm/arm/vgic/vgic-v2.c
++++ b/virt/kvm/arm/vgic/vgic-v2.c
+@@ -290,7 +290,8 @@ int vgic_v2_map_resources(struct kvm *kv
+ if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
+ ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
+ kvm_vgic_global_state.vcpu_base,
+- KVM_VGIC_V2_CPU_SIZE, true);
++ KVM_VGIC_V2_CPU_SIZE, true,
++ PAGE_S2_DEVICE);
+ if (ret) {
+ kvm_err("Unable to remap VGIC CPU to VCPU\n");
+ goto out;
+++ /dev/null
-From f27ef8941ca29b2d10428754be51e8ee06bb1263 Mon Sep 17 00:00:00 2001
-From: Mathew McBride <matt@traverse.com.au>
-Date: Mon, 7 Aug 2017 10:19:48 +1000
-Subject: [PATCH] Recognize when an RGMII Link is set as fixed (in the device
- tree) and set up the MAC accordingly
-
----
- drivers/net/ethernet/freescale/sdk_dpaa/mac.c | 1
- drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac.c | 13 ++++++++++
- 2 files changed, 14 insertions(+)
-
---- a/drivers/net/ethernet/freescale/sdk_dpaa/mac.c
-+++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac.c
-@@ -386,6 +386,7 @@ static int __cold mac_probe(struct platf
- mac_dev->fixed_link->duplex = phy->duplex;
- mac_dev->fixed_link->pause = phy->pause;
- mac_dev->fixed_link->asym_pause = phy->asym_pause;
-+ printk(KERN_INFO "Setting up fixed link, speed %d duplex %d\n", mac_dev->fixed_link->speed, mac_dev->fixed_link->duplex);
- }
-
- _errno = mac_dev->init(mac_dev);
---- a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac.c
-+++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac.c
-@@ -36,6 +36,8 @@
-
- @Description FM mEMAC driver
- *//***************************************************************************/
-+#include <../../../../sdk_dpaa/mac.h>
-+#include <linux/phy_fixed.h>
-
- #include "std_ext.h"
- #include "string_ext.h"
-@@ -48,6 +50,8 @@
- #include "memac.h"
-
-
-+static t_Error MemacAdjustLink(t_Handle h_Memac, e_EnetSpeed speed, bool fullDuplex);
-+
- /*****************************************************************************/
- /* Internal routines */
- /*****************************************************************************/
-@@ -276,11 +280,20 @@ static t_Error MemacEnable(t_Handle h_Me
- {
- t_Memac *p_Memac = (t_Memac *)h_Memac;
-
-+ struct mac_device *mac_dev = (struct mac_device *)p_Memac->h_App;
-+
- SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
- SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
-
- fman_memac_enable(p_Memac->p_MemMap, (mode & e_COMM_MODE_RX), (mode & e_COMM_MODE_TX));
-
-+ if (ENET_INTERFACE_FROM_MODE(p_Memac->enetMode) == e_ENET_IF_RGMII) {
-+ if (mac_dev->fixed_link) {
-+ printk(KERN_INFO "This is a fixed-link, forcing speed %d duplex %d\n",mac_dev->fixed_link->speed,mac_dev->fixed_link->duplex);
-+ MemacAdjustLink(h_Memac,mac_dev->fixed_link->speed,mac_dev->fixed_link->duplex);
-+ }
-+ }
-+
- return E_OK;
- }
-
--- /dev/null
+From f27ef8941ca29b2d10428754be51e8ee06bb1263 Mon Sep 17 00:00:00 2001
+From: Mathew McBride <matt@traverse.com.au>
+Date: Mon, 7 Aug 2017 10:19:48 +1000
+Subject: [PATCH] Recognize when an RGMII Link is set as fixed (in the device
+ tree) and set up the MAC accordingly
+
+---
+ drivers/net/ethernet/freescale/sdk_dpaa/mac.c | 1
+ drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac.c | 13 ++++++++++
+ 2 files changed, 14 insertions(+)
+
+--- a/drivers/net/ethernet/freescale/sdk_dpaa/mac.c
++++ b/drivers/net/ethernet/freescale/sdk_dpaa/mac.c
+@@ -386,6 +386,7 @@ static int __cold mac_probe(struct platf
+ mac_dev->fixed_link->duplex = phy->duplex;
+ mac_dev->fixed_link->pause = phy->pause;
+ mac_dev->fixed_link->asym_pause = phy->asym_pause;
++ printk(KERN_INFO "Setting up fixed link, speed %d duplex %d\n", mac_dev->fixed_link->speed, mac_dev->fixed_link->duplex);
+ }
+
+ _errno = mac_dev->init(mac_dev);
+--- a/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac.c
++++ b/drivers/net/ethernet/freescale/sdk_fman/Peripherals/FM/MAC/memac.c
+@@ -36,6 +36,8 @@
+
+ @Description FM mEMAC driver
+ *//***************************************************************************/
++#include <../../../../sdk_dpaa/mac.h>
++#include <linux/phy_fixed.h>
+
+ #include "std_ext.h"
+ #include "string_ext.h"
+@@ -48,6 +50,8 @@
+ #include "memac.h"
+
+
++static t_Error MemacAdjustLink(t_Handle h_Memac, e_EnetSpeed speed, bool fullDuplex);
++
+ /*****************************************************************************/
+ /* Internal routines */
+ /*****************************************************************************/
+@@ -276,11 +280,20 @@ static t_Error MemacEnable(t_Handle h_Me
+ {
+ t_Memac *p_Memac = (t_Memac *)h_Memac;
+
++ struct mac_device *mac_dev = (struct mac_device *)p_Memac->h_App;
++
+ SANITY_CHECK_RETURN_ERROR(p_Memac, E_INVALID_HANDLE);
+ SANITY_CHECK_RETURN_ERROR(!p_Memac->p_MemacDriverParam, E_INVALID_STATE);
+
+ fman_memac_enable(p_Memac->p_MemMap, (mode & e_COMM_MODE_RX), (mode & e_COMM_MODE_TX));
+
++ if (ENET_INTERFACE_FROM_MODE(p_Memac->enetMode) == e_ENET_IF_RGMII) {
++ if (mac_dev->fixed_link) {
++ printk(KERN_INFO "This is a fixed-link, forcing speed %d duplex %d\n",mac_dev->fixed_link->speed,mac_dev->fixed_link->duplex);
++ MemacAdjustLink(h_Memac,mac_dev->fixed_link->speed,mac_dev->fixed_link->duplex);
++ }
++ }
++
+ return E_OK;
+ }
+