--- /dev/null
+From be59072c6eeb7535bf9a339fb9d5a8bfae17ac22 Mon Sep 17 00:00:00 2001
+From: Robert Marko <robert.marko@sartura.hr>
+Date: Mon, 14 Aug 2023 12:40:23 +0200
+Subject: [PATCH] dt-bindings: clock: qcom: ipq4019: add missing networking
+ resets
+
+Add bindings for the missing networking resets found in IPQ4019 GCC.
+
+Signed-off-by: Robert Marko <robert.marko@sartura.hr>
+Acked-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Link: https://lore.kernel.org/r/20230814104119.96858-1-robert.marko@sartura.hr
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+---
+ include/dt-bindings/clock/qcom,gcc-ipq4019.h | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/include/dt-bindings/clock/qcom,gcc-ipq4019.h
++++ b/include/dt-bindings/clock/qcom,gcc-ipq4019.h
+@@ -165,5 +165,11 @@
+ #define GCC_QDSS_BCR 69
+ #define GCC_MPM_BCR 70
+ #define GCC_SPDM_BCR 71
++#define ESS_MAC1_ARES 72
++#define ESS_MAC2_ARES 73
++#define ESS_MAC3_ARES 74
++#define ESS_MAC4_ARES 75
++#define ESS_MAC5_ARES 76
++#define ESS_PSGMII_ARES 77
+
+ #endif
--- /dev/null
+From 20014461691efc9e274c3870357152db7f091820 Mon Sep 17 00:00:00 2001
+From: Robert Marko <robert.marko@sartura.hr>
+Date: Mon, 14 Aug 2023 12:40:24 +0200
+Subject: [PATCH] clk: qcom: gcc-ipq4019: add missing networking resets
+
+IPQ4019 has more networking related resets that will be required for future
+wired networking support, so lets add them.
+
+Signed-off-by: Robert Marko <robert.marko@sartura.hr>
+Link: https://lore.kernel.org/r/20230814104119.96858-2-robert.marko@sartura.hr
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+---
+ drivers/clk/qcom/gcc-ipq4019.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/clk/qcom/gcc-ipq4019.c
++++ b/drivers/clk/qcom/gcc-ipq4019.c
+@@ -1707,6 +1707,12 @@ static const struct qcom_reset_map gcc_i
+ [GCC_TCSR_BCR] = {0x22000, 0},
+ [GCC_MPM_BCR] = {0x24000, 0},
+ [GCC_SPDM_BCR] = {0x25000, 0},
++ [ESS_MAC1_ARES] = {0x1200C, 0},
++ [ESS_MAC2_ARES] = {0x1200C, 1},
++ [ESS_MAC3_ARES] = {0x1200C, 2},
++ [ESS_MAC4_ARES] = {0x1200C, 3},
++ [ESS_MAC5_ARES] = {0x1200C, 4},
++ [ESS_PSGMII_ARES] = {0x1200C, 5},
+ };
+
+ static const struct regmap_config gcc_ipq4019_regmap_config = {
--- /dev/null
+From 9a0e95e34e9c0a713ddfd48c3a88a20d2bdfd514 Mon Sep 17 00:00:00 2001
+From: Gabor Juhos <j4g8y7@gmail.com>
+Date: Fri, 11 Aug 2023 13:10:07 +0200
+Subject: [PATCH] net: phy: Introduce PSGMII PHY interface mode
+
+The PSGMII interface is similar to QSGMII. The main difference
+is that the PSGMII interface combines five SGMII lines into a
+single link while in QSGMII only four lines are combined.
+
+Similarly to the QSGMII, this interface mode might also needs
+special handling within the MAC driver.
+
+It is commonly used by Qualcomm with their QCA807x PHY series and
+modern WiSoC-s.
+
+Add definitions for the PHY layer to allow to express this type
+of connection between the MAC and PHY.
+
+Signed-off-by: Gabor Juhos <j4g8y7@gmail.com>
+Signed-off-by: Robert Marko <robert.marko@sartura.hr>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ Documentation/networking/phy.rst | 4 ++++
+ drivers/net/phy/phy-core.c | 2 ++
+ drivers/net/phy/phylink.c | 3 +++
+ include/linux/phy.h | 4 ++++
+ 4 files changed, 13 insertions(+)
+
+--- a/Documentation/networking/phy.rst
++++ b/Documentation/networking/phy.rst
+@@ -323,6 +323,10 @@ Some of the interface modes are describe
+ contrast with the 1000BASE-X phy mode used for Clause 38 and 39 PMDs, this
+ interface mode has different autonegotiation and only supports full duplex.
+
++``PHY_INTERFACE_MODE_PSGMII``
++ This is the Penta SGMII mode, it is similar to QSGMII but it combines 5
++ SGMII lines into a single link compared to 4 on QSGMII.
++
+ Pause frames / flow control
+ ===========================
+
+--- a/drivers/net/phy/phy-core.c
++++ b/drivers/net/phy/phy-core.c
+@@ -140,6 +140,8 @@ int phy_interface_num_ports(phy_interfac
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_QUSGMII:
+ return 4;
++ case PHY_INTERFACE_MODE_PSGMII:
++ return 5;
+ case PHY_INTERFACE_MODE_MAX:
+ WARN_ONCE(1, "PHY_INTERFACE_MODE_MAX isn't a valid interface mode");
+ return 0;
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -187,6 +187,7 @@ static int phylink_interface_max_speed(p
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII:
++ case PHY_INTERFACE_MODE_PSGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_QUSGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+@@ -448,6 +449,7 @@ unsigned long phylink_get_capabilities(p
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII:
++ case PHY_INTERFACE_MODE_PSGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_QUSGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+@@ -814,6 +816,7 @@ static int phylink_parse_mode(struct phy
+
+ switch (pl->link_config.interface) {
+ case PHY_INTERFACE_MODE_SGMII:
++ case PHY_INTERFACE_MODE_PSGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_QUSGMII:
+ case PHY_INTERFACE_MODE_RGMII:
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -104,6 +104,7 @@ extern const int phy_10gbit_features_arr
+ * @PHY_INTERFACE_MODE_XGMII: 10 gigabit media-independent interface
+ * @PHY_INTERFACE_MODE_XLGMII:40 gigabit media-independent interface
+ * @PHY_INTERFACE_MODE_MOCA: Multimedia over Coax
++ * @PHY_INTERFACE_MODE_PSGMII: Penta SGMII
+ * @PHY_INTERFACE_MODE_QSGMII: Quad SGMII
+ * @PHY_INTERFACE_MODE_TRGMII: Turbo RGMII
+ * @PHY_INTERFACE_MODE_100BASEX: 100 BaseX
+@@ -141,6 +142,7 @@ typedef enum {
+ PHY_INTERFACE_MODE_XGMII,
+ PHY_INTERFACE_MODE_XLGMII,
+ PHY_INTERFACE_MODE_MOCA,
++ PHY_INTERFACE_MODE_PSGMII,
+ PHY_INTERFACE_MODE_QSGMII,
+ PHY_INTERFACE_MODE_TRGMII,
+ PHY_INTERFACE_MODE_100BASEX,
+@@ -248,6 +250,8 @@ static inline const char *phy_modes(phy_
+ return "xlgmii";
+ case PHY_INTERFACE_MODE_MOCA:
+ return "moca";
++ case PHY_INTERFACE_MODE_PSGMII:
++ return "psgmii";
+ case PHY_INTERFACE_MODE_QSGMII:
+ return "qsgmii";
+ case PHY_INTERFACE_MODE_TRGMII:
+++ /dev/null
-From 53cac0823f86c39eb4b00e2c9a7b2483a4182008 Mon Sep 17 00:00:00 2001
-From: Robert Marko <robert.marko@sartura.hr>
-Date: Mon, 14 Aug 2023 12:37:58 +0200
-Subject: [PATCH 1/2] dt-bindings: clock: qcom: ipq4019: add missing networking
- resets
-
-Add bindings for the missing networking resets found in IPQ4019 GCC.
-
-Signed-off-by: Robert Marko <robert.marko@sartura.hr>
----
- include/dt-bindings/clock/qcom,gcc-ipq4019.h | 6 ++++++
- 1 file changed, 6 insertions(+)
-
---- a/include/dt-bindings/clock/qcom,gcc-ipq4019.h
-+++ b/include/dt-bindings/clock/qcom,gcc-ipq4019.h
-@@ -165,5 +165,11 @@
- #define GCC_QDSS_BCR 69
- #define GCC_MPM_BCR 70
- #define GCC_SPDM_BCR 71
-+#define ESS_MAC1_ARES 72
-+#define ESS_MAC2_ARES 73
-+#define ESS_MAC3_ARES 74
-+#define ESS_MAC4_ARES 75
-+#define ESS_MAC5_ARES 76
-+#define ESS_PSGMII_ARES 77
-
- #endif
+++ /dev/null
-From 6038ba75e2aa8e57d4eaf20a90c8061c43b1117f Mon Sep 17 00:00:00 2001
-From: Robert Marko <robert.marko@sartura.hr>
-Date: Mon, 14 Aug 2023 12:39:04 +0200
-Subject: [PATCH 2/2] clk: qcom: gcc-ipq4019: add missing networking resets
-
-IPQ4019 has more networking related resets that will be required for future
-wired networking support, so lets add them.
-
-Signed-off-by: Robert Marko <robert.marko@sartura.hr>
----
- drivers/clk/qcom/gcc-ipq4019.c | 6 ++++++
- 1 file changed, 6 insertions(+)
-
---- a/drivers/clk/qcom/gcc-ipq4019.c
-+++ b/drivers/clk/qcom/gcc-ipq4019.c
-@@ -1707,6 +1707,12 @@ static const struct qcom_reset_map gcc_i
- [GCC_TCSR_BCR] = {0x22000, 0},
- [GCC_MPM_BCR] = {0x24000, 0},
- [GCC_SPDM_BCR] = {0x25000, 0},
-+ [ESS_MAC1_ARES] = {0x1200C, 0},
-+ [ESS_MAC2_ARES] = {0x1200C, 1},
-+ [ESS_MAC3_ARES] = {0x1200C, 2},
-+ [ESS_MAC4_ARES] = {0x1200C, 3},
-+ [ESS_MAC5_ARES] = {0x1200C, 4},
-+ [ESS_PSGMII_ARES] = {0x1200C, 5},
- };
-
- static const struct regmap_config gcc_ipq4019_regmap_config = {
--- /dev/null
+From 76e25c1f46456416ba5358be8a0677f1ab8196b6 Mon Sep 17 00:00:00 2001
+From: Maxime Chevallier <maxime.chevallier@bootlin.com>
+Date: Fri, 4 Nov 2022 18:41:48 +0100
+Subject: [PATCH] net: ipqess: introduce the Qualcomm IPQESS driver
+
+The Qualcomm IPQESS controller is a simple 1G Ethernet controller found
+on the IPQ4019 chip. This controller has some specificities, in that the
+IPQ4019 platform that includes that controller also has an internal
+switch, based on the QCA8K IP.
+
+It is connected to that switch through an internal link, and doesn't
+expose directly any external interface, hence it only supports the
+PHY_INTERFACE_MODE_INTERNAL for now.
+
+It has 16 RX and TX queues, with a very basic RSS fanout configured at
+init time.
+
+Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
+---
+ MAINTAINERS | 7 +
+ drivers/net/ethernet/qualcomm/Kconfig | 11 +
+ drivers/net/ethernet/qualcomm/Makefile | 2 +
+ drivers/net/ethernet/qualcomm/ipqess/Makefile | 8 +
+ drivers/net/ethernet/qualcomm/ipqess/ipqess.c | 1246 +++++++++++++++++
+ drivers/net/ethernet/qualcomm/ipqess/ipqess.h | 518 +++++++
+ .../ethernet/qualcomm/ipqess/ipqess_ethtool.c | 164 +++
+ 7 files changed, 1956 insertions(+)
+ create mode 100644 drivers/net/ethernet/qualcomm/ipqess/Makefile
+ create mode 100644 drivers/net/ethernet/qualcomm/ipqess/ipqess.c
+ create mode 100644 drivers/net/ethernet/qualcomm/ipqess/ipqess.h
+ create mode 100644 drivers/net/ethernet/qualcomm/ipqess/ipqess_ethtool.c
+
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -17064,6 +17064,13 @@ L: netdev@vger.kernel.org
+ S: Maintained
+ F: drivers/net/ethernet/qualcomm/emac/
+
++QUALCOMM IPQESS ETHERNET DRIVER
++M: Maxime Chevallier <maxime.chevallier@bootlin.com>
++L: netdev@vger.kernel.org
++S: Maintained
++F: Documentation/devicetree/bindings/net/qcom,ipq4019-ess-edma.yaml
++F: drivers/net/ethernet/qualcomm/ipqess/
++
+ QUALCOMM ETHQOS ETHERNET DRIVER
+ M: Vinod Koul <vkoul@kernel.org>
+ R: Bhupesh Sharma <bhupesh.sharma@linaro.org>
+--- a/drivers/net/ethernet/qualcomm/Kconfig
++++ b/drivers/net/ethernet/qualcomm/Kconfig
+@@ -60,6 +60,17 @@ config QCOM_EMAC
+ low power, Receive-Side Scaling (RSS), and IEEE 1588-2008
+ Precision Clock Synchronization Protocol.
+
++config QCOM_IPQ4019_ESS_EDMA
++ tristate "Qualcomm Atheros IPQ4019 ESS EDMA support"
++ depends on (OF && ARCH_QCOM) || COMPILE_TEST
++ select PHYLINK
++ help
++ This driver supports the Qualcomm Atheros IPQ40xx built-in
++ ESS EDMA ethernet controller.
++
++ To compile this driver as a module, choose M here: the
++ module will be called ipqess.
++
+ source "drivers/net/ethernet/qualcomm/rmnet/Kconfig"
+
+ endif # NET_VENDOR_QUALCOMM
+--- a/drivers/net/ethernet/qualcomm/Makefile
++++ b/drivers/net/ethernet/qualcomm/Makefile
+@@ -11,4 +11,6 @@ qcauart-objs := qca_uart.o
+
+ obj-y += emac/
+
++obj-$(CONFIG_QCOM_IPQ4019_ESS_EDMA) += ipqess/
++
+ obj-$(CONFIG_RMNET) += rmnet/
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ipqess/Makefile
+@@ -0,0 +1,8 @@
++# SPDX-License-Identifier: GPL-2.0-only
++#
++# Makefile for the IPQ ESS driver
++#
++
++obj-$(CONFIG_QCOM_IPQ4019_ESS_EDMA) += ipq_ess.o
++
++ipq_ess-objs := ipqess.o ipqess_ethtool.o
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ipqess/ipqess.c
+@@ -0,0 +1,1246 @@
++// SPDX-License-Identifier: GPL-2.0 OR ISC
++/* Copyright (c) 2014 - 2017, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2017 - 2018, John Crispin <john@phrozen.org>
++ * Copyright (c) 2018 - 2019, Christian Lamparter <chunkeey@gmail.com>
++ * Copyright (c) 2020 - 2021, Gabor Juhos <j4g8y7@gmail.com>
++ * Copyright (c) 2021 - 2022, Maxime Chevallier <maxime.chevallier@bootlin.com>
++ *
++ */
++
++#include <linux/bitfield.h>
++#include <linux/clk.h>
++#include <linux/if_vlan.h>
++#include <linux/interrupt.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/of_device.h>
++#include <linux/of_mdio.h>
++#include <linux/of_net.h>
++#include <linux/phylink.h>
++#include <linux/platform_device.h>
++#include <linux/reset.h>
++#include <linux/skbuff.h>
++#include <linux/vmalloc.h>
++#include <net/checksum.h>
++#include <net/ip6_checksum.h>
++
++#include "ipqess.h"
++
++#define IPQESS_RRD_SIZE 16
++#define IPQESS_NEXT_IDX(X, Y) (((X) + 1) & ((Y) - 1))
++#define IPQESS_TX_DMA_BUF_LEN 0x3fff
++
++static void ipqess_w32(struct ipqess *ess, u32 reg, u32 val)
++{
++ writel(val, ess->hw_addr + reg);
++}
++
++static u32 ipqess_r32(struct ipqess *ess, u16 reg)
++{
++ return readl(ess->hw_addr + reg);
++}
++
++static void ipqess_m32(struct ipqess *ess, u32 mask, u32 val, u16 reg)
++{
++ u32 _val = ipqess_r32(ess, reg);
++
++ _val &= ~mask;
++ _val |= val;
++
++ ipqess_w32(ess, reg, _val);
++}
++
++void ipqess_update_hw_stats(struct ipqess *ess)
++{
++ u32 *p;
++ u32 stat;
++ int i;
++
++ lockdep_assert_held(&ess->stats_lock);
++
++ p = (u32 *)&ess->ipqess_stats;
++ for (i = 0; i < IPQESS_MAX_TX_QUEUE; i++) {
++ stat = ipqess_r32(ess, IPQESS_REG_TX_STAT_PKT_Q(i));
++ *p += stat;
++ p++;
++ }
++
++ for (i = 0; i < IPQESS_MAX_TX_QUEUE; i++) {
++ stat = ipqess_r32(ess, IPQESS_REG_TX_STAT_BYTE_Q(i));
++ *p += stat;
++ p++;
++ }
++
++ for (i = 0; i < IPQESS_MAX_RX_QUEUE; i++) {
++ stat = ipqess_r32(ess, IPQESS_REG_RX_STAT_PKT_Q(i));
++ *p += stat;
++ p++;
++ }
++
++ for (i = 0; i < IPQESS_MAX_RX_QUEUE; i++) {
++ stat = ipqess_r32(ess, IPQESS_REG_RX_STAT_BYTE_Q(i));
++ *p += stat;
++ p++;
++ }
++}
++
++static int ipqess_tx_ring_alloc(struct ipqess *ess)
++{
++ struct device *dev = &ess->pdev->dev;
++ int i;
++
++ for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
++ struct ipqess_tx_ring *tx_ring = &ess->tx_ring[i];
++ size_t size;
++ u32 idx;
++
++ tx_ring->ess = ess;
++ tx_ring->ring_id = i;
++ tx_ring->idx = i * 4;
++ tx_ring->count = IPQESS_TX_RING_SIZE;
++ tx_ring->nq = netdev_get_tx_queue(ess->netdev, i);
++
++ size = sizeof(struct ipqess_buf) * IPQESS_TX_RING_SIZE;
++ tx_ring->buf = devm_kzalloc(dev, size, GFP_KERNEL);
++ if (!tx_ring->buf)
++ return -ENOMEM;
++
++ size = sizeof(struct ipqess_tx_desc) * IPQESS_TX_RING_SIZE;
++ tx_ring->hw_desc = dmam_alloc_coherent(dev, size, &tx_ring->dma,
++ GFP_KERNEL);
++ if (!tx_ring->hw_desc)
++ return -ENOMEM;
++
++ ipqess_w32(ess, IPQESS_REG_TPD_BASE_ADDR_Q(tx_ring->idx),
++ (u32)tx_ring->dma);
++
++ idx = ipqess_r32(ess, IPQESS_REG_TPD_IDX_Q(tx_ring->idx));
++ idx >>= IPQESS_TPD_CONS_IDX_SHIFT; /* need u32 here */
++ idx &= 0xffff;
++ tx_ring->head = idx;
++ tx_ring->tail = idx;
++
++ ipqess_m32(ess, IPQESS_TPD_PROD_IDX_MASK << IPQESS_TPD_PROD_IDX_SHIFT,
++ idx, IPQESS_REG_TPD_IDX_Q(tx_ring->idx));
++ ipqess_w32(ess, IPQESS_REG_TX_SW_CONS_IDX_Q(tx_ring->idx), idx);
++ ipqess_w32(ess, IPQESS_REG_TPD_RING_SIZE, IPQESS_TX_RING_SIZE);
++ }
++
++ return 0;
++}
++
++static int ipqess_tx_unmap_and_free(struct device *dev, struct ipqess_buf *buf)
++{
++ int len = 0;
++
++ if (buf->flags & IPQESS_DESC_SINGLE)
++ dma_unmap_single(dev, buf->dma, buf->length, DMA_TO_DEVICE);
++ else if (buf->flags & IPQESS_DESC_PAGE)
++ dma_unmap_page(dev, buf->dma, buf->length, DMA_TO_DEVICE);
++
++ if (buf->flags & IPQESS_DESC_LAST) {
++ len = buf->skb->len;
++ dev_kfree_skb_any(buf->skb);
++ }
++
++ buf->flags = 0;
++
++ return len;
++}
++
++static void ipqess_tx_ring_free(struct ipqess *ess)
++{
++ int i;
++
++ for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
++ int j;
++
++ if (ess->tx_ring[i].hw_desc)
++ continue;
++
++ for (j = 0; j < IPQESS_TX_RING_SIZE; j++) {
++ struct ipqess_buf *buf = &ess->tx_ring[i].buf[j];
++
++ ipqess_tx_unmap_and_free(&ess->pdev->dev, buf);
++ }
++
++ ess->tx_ring[i].buf = NULL;
++ }
++}
++
++static int ipqess_rx_buf_prepare(struct ipqess_buf *buf,
++ struct ipqess_rx_ring *rx_ring)
++{
++ memset(buf->skb->data, 0, sizeof(struct ipqess_rx_desc));
++
++ buf->dma = dma_map_single(rx_ring->ppdev, buf->skb->data,
++ IPQESS_RX_HEAD_BUFF_SIZE, DMA_FROM_DEVICE);
++ if (dma_mapping_error(rx_ring->ppdev, buf->dma)) {
++ dev_kfree_skb_any(buf->skb);
++ buf->skb = NULL;
++ return -EFAULT;
++ }
++
++ buf->length = IPQESS_RX_HEAD_BUFF_SIZE;
++ rx_ring->hw_desc[rx_ring->head] = (struct ipqess_rx_desc *)buf->dma;
++ rx_ring->head = (rx_ring->head + 1) % IPQESS_RX_RING_SIZE;
++
++ ipqess_m32(rx_ring->ess, IPQESS_RFD_PROD_IDX_BITS,
++ (rx_ring->head + IPQESS_RX_RING_SIZE - 1) % IPQESS_RX_RING_SIZE,
++ IPQESS_REG_RFD_IDX_Q(rx_ring->idx));
++
++ return 0;
++}
++
++/* locking is handled by the caller */
++static int ipqess_rx_buf_alloc_napi(struct ipqess_rx_ring *rx_ring)
++{
++ struct ipqess_buf *buf = &rx_ring->buf[rx_ring->head];
++
++ buf->skb = napi_alloc_skb(&rx_ring->napi_rx, IPQESS_RX_HEAD_BUFF_SIZE);
++ if (!buf->skb)
++ return -ENOMEM;
++
++ return ipqess_rx_buf_prepare(buf, rx_ring);
++}
++
++static int ipqess_rx_buf_alloc(struct ipqess_rx_ring *rx_ring)
++{
++ struct ipqess_buf *buf = &rx_ring->buf[rx_ring->head];
++
++ buf->skb = netdev_alloc_skb_ip_align(rx_ring->ess->netdev,
++ IPQESS_RX_HEAD_BUFF_SIZE);
++
++ if (!buf->skb)
++ return -ENOMEM;
++
++ return ipqess_rx_buf_prepare(buf, rx_ring);
++}
++
++static void ipqess_refill_work(struct work_struct *work)
++{
++ struct ipqess_rx_ring_refill *rx_refill = container_of(work,
++ struct ipqess_rx_ring_refill, refill_work);
++ struct ipqess_rx_ring *rx_ring = rx_refill->rx_ring;
++ int refill = 0;
++
++ /* don't let this loop by accident. */
++ while (atomic_dec_and_test(&rx_ring->refill_count)) {
++ napi_disable(&rx_ring->napi_rx);
++ if (ipqess_rx_buf_alloc(rx_ring)) {
++ refill++;
++ dev_dbg(rx_ring->ppdev,
++ "Not all buffers were reallocated");
++ }
++ napi_enable(&rx_ring->napi_rx);
++ }
++
++ if (atomic_add_return(refill, &rx_ring->refill_count))
++ schedule_work(&rx_refill->refill_work);
++}
++
++static int ipqess_rx_ring_alloc(struct ipqess *ess)
++{
++ int i;
++
++ for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
++ int j;
++
++ ess->rx_ring[i].ess = ess;
++ ess->rx_ring[i].ppdev = &ess->pdev->dev;
++ ess->rx_ring[i].ring_id = i;
++ ess->rx_ring[i].idx = i * 2;
++
++ ess->rx_ring[i].buf = devm_kzalloc(&ess->pdev->dev,
++ sizeof(struct ipqess_buf) * IPQESS_RX_RING_SIZE,
++ GFP_KERNEL);
++
++ if (!ess->rx_ring[i].buf)
++ return -ENOMEM;
++
++ ess->rx_ring[i].hw_desc =
++ dmam_alloc_coherent(&ess->pdev->dev,
++ sizeof(struct ipqess_rx_desc) * IPQESS_RX_RING_SIZE,
++ &ess->rx_ring[i].dma, GFP_KERNEL);
++
++ if (!ess->rx_ring[i].hw_desc)
++ return -ENOMEM;
++
++ for (j = 0; j < IPQESS_RX_RING_SIZE; j++)
++ if (ipqess_rx_buf_alloc(&ess->rx_ring[i]) < 0)
++ return -ENOMEM;
++
++ ess->rx_refill[i].rx_ring = &ess->rx_ring[i];
++ INIT_WORK(&ess->rx_refill[i].refill_work, ipqess_refill_work);
++
++ ipqess_w32(ess, IPQESS_REG_RFD_BASE_ADDR_Q(ess->rx_ring[i].idx),
++ (u32)(ess->rx_ring[i].dma));
++ }
++
++ ipqess_w32(ess, IPQESS_REG_RX_DESC0,
++ (IPQESS_RX_HEAD_BUFF_SIZE << IPQESS_RX_BUF_SIZE_SHIFT) |
++ (IPQESS_RX_RING_SIZE << IPQESS_RFD_RING_SIZE_SHIFT));
++
++ return 0;
++}
++
++static void ipqess_rx_ring_free(struct ipqess *ess)
++{
++ int i;
++
++ for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
++ int j;
++
++ cancel_work_sync(&ess->rx_refill[i].refill_work);
++ atomic_set(&ess->rx_ring[i].refill_count, 0);
++
++ for (j = 0; j < IPQESS_RX_RING_SIZE; j++) {
++ dma_unmap_single(&ess->pdev->dev,
++ ess->rx_ring[i].buf[j].dma,
++ ess->rx_ring[i].buf[j].length,
++ DMA_FROM_DEVICE);
++ dev_kfree_skb_any(ess->rx_ring[i].buf[j].skb);
++ }
++ }
++}
++
++static struct net_device_stats *ipqess_get_stats(struct net_device *netdev)
++{
++ struct ipqess *ess = netdev_priv(netdev);
++
++ spin_lock(&ess->stats_lock);
++ ipqess_update_hw_stats(ess);
++ spin_unlock(&ess->stats_lock);
++
++ return &ess->stats;
++}
++
++static int ipqess_rx_poll(struct ipqess_rx_ring *rx_ring, int budget)
++{
++ u32 length = 0, num_desc, tail, rx_ring_tail;
++ int done = 0;
++
++ rx_ring_tail = rx_ring->tail;
++
++ tail = ipqess_r32(rx_ring->ess, IPQESS_REG_RFD_IDX_Q(rx_ring->idx));
++ tail >>= IPQESS_RFD_CONS_IDX_SHIFT;
++ tail &= IPQESS_RFD_CONS_IDX_MASK;
++
++ while (done < budget) {
++ struct ipqess_rx_desc *rd;
++ struct sk_buff *skb;
++
++ if (rx_ring_tail == tail)
++ break;
++
++ dma_unmap_single(rx_ring->ppdev,
++ rx_ring->buf[rx_ring_tail].dma,
++ rx_ring->buf[rx_ring_tail].length,
++ DMA_FROM_DEVICE);
++
++ skb = xchg(&rx_ring->buf[rx_ring_tail].skb, NULL);
++ rd = (struct ipqess_rx_desc *)skb->data;
++ rx_ring_tail = IPQESS_NEXT_IDX(rx_ring_tail, IPQESS_RX_RING_SIZE);
++
++ /* Check if RRD is valid */
++ if (!(rd->rrd7 & cpu_to_le16(IPQESS_RRD_DESC_VALID))) {
++ num_desc = 1;
++ dev_kfree_skb_any(skb);
++ goto skip;
++ }
++
++ num_desc = le16_to_cpu(rd->rrd1) & IPQESS_RRD_NUM_RFD_MASK;
++ length = le16_to_cpu(rd->rrd6) & IPQESS_RRD_PKT_SIZE_MASK;
++
++ skb_reserve(skb, IPQESS_RRD_SIZE);
++ if (num_desc > 1) {
++ struct sk_buff *skb_prev = NULL;
++ int size_remaining;
++ int i;
++
++ skb->data_len = 0;
++ skb->tail += (IPQESS_RX_HEAD_BUFF_SIZE - IPQESS_RRD_SIZE);
++ skb->len = length;
++ skb->truesize = length;
++ size_remaining = length - (IPQESS_RX_HEAD_BUFF_SIZE - IPQESS_RRD_SIZE);
++
++ for (i = 1; i < num_desc; i++) {
++ struct sk_buff *skb_temp = rx_ring->buf[rx_ring_tail].skb;
++
++ dma_unmap_single(rx_ring->ppdev,
++ rx_ring->buf[rx_ring_tail].dma,
++ rx_ring->buf[rx_ring_tail].length,
++ DMA_FROM_DEVICE);
++
++ skb_put(skb_temp, min(size_remaining, IPQESS_RX_HEAD_BUFF_SIZE));
++ if (skb_prev)
++ skb_prev->next = rx_ring->buf[rx_ring_tail].skb;
++ else
++ skb_shinfo(skb)->frag_list = rx_ring->buf[rx_ring_tail].skb;
++ skb_prev = rx_ring->buf[rx_ring_tail].skb;
++ rx_ring->buf[rx_ring_tail].skb->next = NULL;
++
++ skb->data_len += rx_ring->buf[rx_ring_tail].skb->len;
++ size_remaining -= rx_ring->buf[rx_ring_tail].skb->len;
++
++ rx_ring_tail = IPQESS_NEXT_IDX(rx_ring_tail, IPQESS_RX_RING_SIZE);
++ }
++
++ } else {
++ skb_put(skb, length);
++ }
++
++ skb->dev = rx_ring->ess->netdev;
++ skb->protocol = eth_type_trans(skb, rx_ring->ess->netdev);
++ skb_record_rx_queue(skb, rx_ring->ring_id);
++
++ if (rd->rrd6 & cpu_to_le16(IPQESS_RRD_CSUM_FAIL_MASK))
++ skb_checksum_none_assert(skb);
++ else
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++
++ if (rd->rrd7 & cpu_to_le16(IPQESS_RRD_CVLAN))
++ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
++ le16_to_cpu(rd->rrd4));
++ else if (rd->rrd1 & cpu_to_le16(IPQESS_RRD_SVLAN))
++ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
++ le16_to_cpu(rd->rrd4));
++
++ napi_gro_receive(&rx_ring->napi_rx, skb);
++
++ rx_ring->ess->stats.rx_packets++;
++ rx_ring->ess->stats.rx_bytes += length;
++
++ done++;
++skip:
++
++ num_desc += atomic_xchg(&rx_ring->refill_count, 0);
++ while (num_desc) {
++ if (ipqess_rx_buf_alloc_napi(rx_ring)) {
++ num_desc = atomic_add_return(num_desc,
++ &rx_ring->refill_count);
++ if (num_desc >= DIV_ROUND_UP(IPQESS_RX_RING_SIZE * 4, 7))
++ schedule_work(&rx_ring->ess->rx_refill[rx_ring->ring_id].refill_work);
++ break;
++ }
++ num_desc--;
++ }
++ }
++
++ ipqess_w32(rx_ring->ess, IPQESS_REG_RX_SW_CONS_IDX_Q(rx_ring->idx),
++ rx_ring_tail);
++ rx_ring->tail = rx_ring_tail;
++
++ return done;
++}
++
++static int ipqess_tx_complete(struct ipqess_tx_ring *tx_ring, int budget)
++{
++ int total = 0, ret;
++ int done = 0;
++ u32 tail;
++
++ tail = ipqess_r32(tx_ring->ess, IPQESS_REG_TPD_IDX_Q(tx_ring->idx));
++ tail >>= IPQESS_TPD_CONS_IDX_SHIFT;
++ tail &= IPQESS_TPD_CONS_IDX_MASK;
++
++ do {
++ ret = ipqess_tx_unmap_and_free(&tx_ring->ess->pdev->dev,
++ &tx_ring->buf[tx_ring->tail]);
++ tx_ring->tail = IPQESS_NEXT_IDX(tx_ring->tail, tx_ring->count);
++
++ total += ret;
++ } while ((++done < budget) && (tx_ring->tail != tail));
++
++ ipqess_w32(tx_ring->ess, IPQESS_REG_TX_SW_CONS_IDX_Q(tx_ring->idx),
++ tx_ring->tail);
++
++ if (netif_tx_queue_stopped(tx_ring->nq)) {
++ netdev_dbg(tx_ring->ess->netdev, "waking up tx queue %d\n",
++ tx_ring->idx);
++ netif_tx_wake_queue(tx_ring->nq);
++ }
++
++ netdev_tx_completed_queue(tx_ring->nq, done, total);
++
++ return done;
++}
++
++static int ipqess_tx_napi(struct napi_struct *napi, int budget)
++{
++ struct ipqess_tx_ring *tx_ring = container_of(napi, struct ipqess_tx_ring,
++ napi_tx);
++ int work_done = 0;
++ u32 tx_status;
++
++ tx_status = ipqess_r32(tx_ring->ess, IPQESS_REG_TX_ISR);
++ tx_status &= BIT(tx_ring->idx);
++
++ work_done = ipqess_tx_complete(tx_ring, budget);
++
++ ipqess_w32(tx_ring->ess, IPQESS_REG_TX_ISR, tx_status);
++
++ if (likely(work_done < budget)) {
++ if (napi_complete_done(napi, work_done))
++ ipqess_w32(tx_ring->ess,
++ IPQESS_REG_TX_INT_MASK_Q(tx_ring->idx), 0x1);
++ }
++
++ return work_done;
++}
++
++static int ipqess_rx_napi(struct napi_struct *napi, int budget)
++{
++ struct ipqess_rx_ring *rx_ring = container_of(napi, struct ipqess_rx_ring,
++ napi_rx);
++ struct ipqess *ess = rx_ring->ess;
++ u32 rx_mask = BIT(rx_ring->idx);
++ int remaining_budget = budget;
++ int rx_done;
++ u32 status;
++
++ do {
++ ipqess_w32(ess, IPQESS_REG_RX_ISR, rx_mask);
++ rx_done = ipqess_rx_poll(rx_ring, remaining_budget);
++ remaining_budget -= rx_done;
++
++ status = ipqess_r32(ess, IPQESS_REG_RX_ISR);
++ } while (remaining_budget > 0 && (status & rx_mask));
++
++ if (remaining_budget <= 0)
++ return budget;
++
++ if (napi_complete_done(napi, budget - remaining_budget))
++ ipqess_w32(ess, IPQESS_REG_RX_INT_MASK_Q(rx_ring->idx), 0x1);
++
++ return budget - remaining_budget;
++}
++
++static irqreturn_t ipqess_interrupt_tx(int irq, void *priv)
++{
++ struct ipqess_tx_ring *tx_ring = (struct ipqess_tx_ring *)priv;
++
++ if (likely(napi_schedule_prep(&tx_ring->napi_tx))) {
++ __napi_schedule(&tx_ring->napi_tx);
++ ipqess_w32(tx_ring->ess, IPQESS_REG_TX_INT_MASK_Q(tx_ring->idx),
++ 0x0);
++ }
++
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t ipqess_interrupt_rx(int irq, void *priv)
++{
++ struct ipqess_rx_ring *rx_ring = (struct ipqess_rx_ring *)priv;
++
++ if (likely(napi_schedule_prep(&rx_ring->napi_rx))) {
++ __napi_schedule(&rx_ring->napi_rx);
++ ipqess_w32(rx_ring->ess, IPQESS_REG_RX_INT_MASK_Q(rx_ring->idx),
++ 0x0);
++ }
++
++ return IRQ_HANDLED;
++}
++
++static void ipqess_irq_enable(struct ipqess *ess)
++{
++ int i;
++
++ ipqess_w32(ess, IPQESS_REG_RX_ISR, 0xff);
++ ipqess_w32(ess, IPQESS_REG_TX_ISR, 0xffff);
++ for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
++ ipqess_w32(ess, IPQESS_REG_RX_INT_MASK_Q(ess->rx_ring[i].idx), 1);
++ ipqess_w32(ess, IPQESS_REG_TX_INT_MASK_Q(ess->tx_ring[i].idx), 1);
++ }
++}
++
++static void ipqess_irq_disable(struct ipqess *ess)
++{
++ int i;
++
++ for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
++ ipqess_w32(ess, IPQESS_REG_RX_INT_MASK_Q(ess->rx_ring[i].idx), 0);
++ ipqess_w32(ess, IPQESS_REG_TX_INT_MASK_Q(ess->tx_ring[i].idx), 0);
++ }
++}
++
++static int __init ipqess_init(struct net_device *netdev)
++{
++ struct ipqess *ess = netdev_priv(netdev);
++ struct device_node *of_node = ess->pdev->dev.of_node;
++ int ret;
++
++ ret = of_get_ethdev_address(of_node, netdev);
++ if (ret)
++ eth_hw_addr_random(netdev);
++
++ return phylink_of_phy_connect(ess->phylink, of_node, 0);
++}
++
++static void ipqess_uninit(struct net_device *netdev)
++{
++ struct ipqess *ess = netdev_priv(netdev);
++
++ phylink_disconnect_phy(ess->phylink);
++}
++
++static int ipqess_open(struct net_device *netdev)
++{
++ struct ipqess *ess = netdev_priv(netdev);
++ int i, err;
++
++ for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
++ int qid;
++
++ qid = ess->tx_ring[i].idx;
++ err = devm_request_irq(&netdev->dev, ess->tx_irq[qid],
++ ipqess_interrupt_tx, 0,
++ ess->tx_irq_names[qid],
++ &ess->tx_ring[i]);
++ if (err)
++ return err;
++
++ qid = ess->rx_ring[i].idx;
++ err = devm_request_irq(&netdev->dev, ess->rx_irq[qid],
++ ipqess_interrupt_rx, 0,
++ ess->rx_irq_names[qid],
++ &ess->rx_ring[i]);
++ if (err)
++ return err;
++
++ napi_enable(&ess->tx_ring[i].napi_tx);
++ napi_enable(&ess->rx_ring[i].napi_rx);
++ }
++
++ ipqess_irq_enable(ess);
++ phylink_start(ess->phylink);
++ netif_tx_start_all_queues(netdev);
++
++ return 0;
++}
++
++static int ipqess_stop(struct net_device *netdev)
++{
++ struct ipqess *ess = netdev_priv(netdev);
++ int i;
++
++ netif_tx_stop_all_queues(netdev);
++ phylink_stop(ess->phylink);
++ ipqess_irq_disable(ess);
++ for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
++ napi_disable(&ess->tx_ring[i].napi_tx);
++ napi_disable(&ess->rx_ring[i].napi_rx);
++ }
++
++ return 0;
++}
++
++static int ipqess_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
++{
++ struct ipqess *ess = netdev_priv(netdev);
++
++ return phylink_mii_ioctl(ess->phylink, ifr, cmd);
++}
++
++static u16 ipqess_tx_desc_available(struct ipqess_tx_ring *tx_ring)
++{
++ u16 count = 0;
++
++ if (tx_ring->tail <= tx_ring->head)
++ count = IPQESS_TX_RING_SIZE;
++
++ count += tx_ring->tail - tx_ring->head - 1;
++
++ return count;
++}
++
++static int ipqess_cal_txd_req(struct sk_buff *skb)
++{
++ int tpds;
++
++ /* one TPD for the header, and one for each fragments */
++ tpds = 1 + skb_shinfo(skb)->nr_frags;
++ if (skb_is_gso(skb) && skb_is_gso_v6(skb)) {
++ /* for LSOv2 one extra TPD is needed */
++ tpds++;
++ }
++
++ return tpds;
++}
++
++static struct ipqess_buf *ipqess_get_tx_buffer(struct ipqess_tx_ring *tx_ring,
++ struct ipqess_tx_desc *desc)
++{
++ return &tx_ring->buf[desc - tx_ring->hw_desc];
++}
++
++static struct ipqess_tx_desc *ipqess_tx_desc_next(struct ipqess_tx_ring *tx_ring)
++{
++ struct ipqess_tx_desc *desc;
++
++ desc = &tx_ring->hw_desc[tx_ring->head];
++ tx_ring->head = IPQESS_NEXT_IDX(tx_ring->head, tx_ring->count);
++
++ return desc;
++}
++
++static void ipqess_rollback_tx(struct ipqess *eth,
++ struct ipqess_tx_desc *first_desc, int ring_id)
++{
++ struct ipqess_tx_ring *tx_ring = ð->tx_ring[ring_id];
++ struct ipqess_tx_desc *desc = NULL;
++ struct ipqess_buf *buf;
++ u16 start_index, index;
++
++ start_index = first_desc - tx_ring->hw_desc;
++
++ index = start_index;
++ while (index != tx_ring->head) {
++ desc = &tx_ring->hw_desc[index];
++ buf = &tx_ring->buf[index];
++ ipqess_tx_unmap_and_free(ð->pdev->dev, buf);
++ memset(desc, 0, sizeof(*desc));
++ if (++index == tx_ring->count)
++ index = 0;
++ }
++ tx_ring->head = start_index;
++}
++
++static int ipqess_tx_map_and_fill(struct ipqess_tx_ring *tx_ring,
++ struct sk_buff *skb)
++{
++ struct ipqess_tx_desc *desc = NULL, *first_desc = NULL;
++ u32 word1 = 0, word3 = 0, lso_word1 = 0, svlan_tag = 0;
++ struct platform_device *pdev = tx_ring->ess->pdev;
++ struct ipqess_buf *buf = NULL;
++ u16 len;
++ int i;
++
++ if (skb_is_gso(skb)) {
++ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
++ lso_word1 |= IPQESS_TPD_IPV4_EN;
++ ip_hdr(skb)->check = 0;
++ tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
++ ip_hdr(skb)->daddr,
++ 0, IPPROTO_TCP, 0);
++ } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
++ lso_word1 |= IPQESS_TPD_LSO_V2_EN;
++ ipv6_hdr(skb)->payload_len = 0;
++ tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
++ &ipv6_hdr(skb)->daddr,
++ 0, IPPROTO_TCP, 0);
++ }
++
++ lso_word1 |= IPQESS_TPD_LSO_EN |
++ ((skb_shinfo(skb)->gso_size & IPQESS_TPD_MSS_MASK) <<
++ IPQESS_TPD_MSS_SHIFT) |
++ (skb_transport_offset(skb) << IPQESS_TPD_HDR_SHIFT);
++ } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
++ u8 css, cso;
++
++ cso = skb_checksum_start_offset(skb);
++ css = cso + skb->csum_offset;
++
++ word1 |= (IPQESS_TPD_CUSTOM_CSUM_EN);
++ word1 |= (cso >> 1) << IPQESS_TPD_HDR_SHIFT;
++ word1 |= ((css >> 1) << IPQESS_TPD_CUSTOM_CSUM_SHIFT);
++ }
++
++ if (skb_vlan_tag_present(skb)) {
++ switch (skb->vlan_proto) {
++ case htons(ETH_P_8021Q):
++ word3 |= BIT(IPQESS_TX_INS_CVLAN);
++ word3 |= skb_vlan_tag_get(skb) << IPQESS_TX_CVLAN_TAG_SHIFT;
++ break;
++ case htons(ETH_P_8021AD):
++ word1 |= BIT(IPQESS_TX_INS_SVLAN);
++ svlan_tag = skb_vlan_tag_get(skb);
++ break;
++ default:
++ dev_err(&pdev->dev, "no ctag or stag present\n");
++ goto vlan_tag_error;
++ }
++ }
++
++ if (eth_type_vlan(skb->protocol))
++ word1 |= IPQESS_TPD_VLAN_TAGGED;
++
++ if (skb->protocol == htons(ETH_P_PPP_SES))
++ word1 |= IPQESS_TPD_PPPOE_EN;
++
++ len = skb_headlen(skb);
++
++ first_desc = ipqess_tx_desc_next(tx_ring);
++ desc = first_desc;
++ if (lso_word1 & IPQESS_TPD_LSO_V2_EN) {
++ desc->addr = cpu_to_le32(skb->len);
++ desc->word1 = cpu_to_le32(word1 | lso_word1);
++ desc->svlan_tag = cpu_to_le16(svlan_tag);
++ desc->word3 = cpu_to_le32(word3);
++ desc = ipqess_tx_desc_next(tx_ring);
++ }
++
++ buf = ipqess_get_tx_buffer(tx_ring, desc);
++ buf->length = len;
++ buf->dma = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
++
++ if (dma_mapping_error(&pdev->dev, buf->dma))
++ goto dma_error;
++
++ desc->addr = cpu_to_le32(buf->dma);
++ desc->len = cpu_to_le16(len);
++
++ buf->flags |= IPQESS_DESC_SINGLE;
++ desc->word1 = cpu_to_le32(word1 | lso_word1);
++ desc->svlan_tag = cpu_to_le16(svlan_tag);
++ desc->word3 = cpu_to_le32(word3);
++
++ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
++ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
++
++ len = skb_frag_size(frag);
++ desc = ipqess_tx_desc_next(tx_ring);
++ buf = ipqess_get_tx_buffer(tx_ring, desc);
++ buf->length = len;
++ buf->flags |= IPQESS_DESC_PAGE;
++ buf->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len,
++ DMA_TO_DEVICE);
++
++ if (dma_mapping_error(&pdev->dev, buf->dma))
++ goto dma_error;
++
++ desc->addr = cpu_to_le32(buf->dma);
++ desc->len = cpu_to_le16(len);
++ desc->svlan_tag = cpu_to_le16(svlan_tag);
++ desc->word1 = cpu_to_le32(word1 | lso_word1);
++ desc->word3 = cpu_to_le32(word3);
++ }
++ desc->word1 |= cpu_to_le32(1 << IPQESS_TPD_EOP_SHIFT);
++ buf->skb = skb;
++ buf->flags |= IPQESS_DESC_LAST;
++
++ return 0;
++
++dma_error:
++ ipqess_rollback_tx(tx_ring->ess, first_desc, tx_ring->ring_id);
++ dev_err(&pdev->dev, "TX DMA map failed\n");
++
++vlan_tag_error:
++ return -ENOMEM;
++}
++
++static void ipqess_kick_tx(struct ipqess_tx_ring *tx_ring)
++{
++ /* Ensure that all TPDs has been written completely */
++ dma_wmb();
++
++ /* update software producer index */
++ ipqess_w32(tx_ring->ess, IPQESS_REG_TPD_IDX_Q(tx_ring->idx),
++ tx_ring->head);
++}
++
++static netdev_tx_t ipqess_xmit(struct sk_buff *skb, struct net_device *netdev)
++{
++ struct ipqess *ess = netdev_priv(netdev);
++ struct ipqess_tx_ring *tx_ring;
++ int avail;
++ int tx_num;
++ int ret;
++
++ tx_ring = &ess->tx_ring[skb_get_queue_mapping(skb)];
++ tx_num = ipqess_cal_txd_req(skb);
++ avail = ipqess_tx_desc_available(tx_ring);
++ if (avail < tx_num) {
++ netdev_dbg(netdev,
++ "stopping tx queue %d, avail=%d req=%d im=%x\n",
++ tx_ring->idx, avail, tx_num,
++ ipqess_r32(tx_ring->ess,
++ IPQESS_REG_TX_INT_MASK_Q(tx_ring->idx)));
++ netif_tx_stop_queue(tx_ring->nq);
++ ipqess_w32(tx_ring->ess, IPQESS_REG_TX_INT_MASK_Q(tx_ring->idx), 0x1);
++ ipqess_kick_tx(tx_ring);
++ return NETDEV_TX_BUSY;
++ }
++
++ ret = ipqess_tx_map_and_fill(tx_ring, skb);
++ if (ret) {
++ dev_kfree_skb_any(skb);
++ ess->stats.tx_errors++;
++ goto err_out;
++ }
++
++ ess->stats.tx_packets++;
++ ess->stats.tx_bytes += skb->len;
++ netdev_tx_sent_queue(tx_ring->nq, skb->len);
++
++ if (!netdev_xmit_more() || netif_xmit_stopped(tx_ring->nq))
++ ipqess_kick_tx(tx_ring);
++
++err_out:
++ return NETDEV_TX_OK;
++}
++
++static int ipqess_set_mac_address(struct net_device *netdev, void *p)
++{
++ struct ipqess *ess = netdev_priv(netdev);
++ const char *macaddr = netdev->dev_addr;
++ int ret = eth_mac_addr(netdev, p);
++
++ if (ret)
++ return ret;
++
++ ipqess_w32(ess, IPQESS_REG_MAC_CTRL1, (macaddr[0] << 8) | macaddr[1]);
++ ipqess_w32(ess, IPQESS_REG_MAC_CTRL0,
++ (macaddr[2] << 24) | (macaddr[3] << 16) | (macaddr[4] << 8) |
++ macaddr[5]);
++
++ return 0;
++}
++
++static void ipqess_tx_timeout(struct net_device *netdev, unsigned int txq_id)
++{
++ struct ipqess *ess = netdev_priv(netdev);
++ struct ipqess_tx_ring *tr = &ess->tx_ring[txq_id];
++
++ netdev_warn(netdev, "TX timeout on queue %d\n", tr->idx);
++}
++
++static const struct net_device_ops ipqess_axi_netdev_ops = {
++ .ndo_init = ipqess_init,
++ .ndo_uninit = ipqess_uninit,
++ .ndo_open = ipqess_open,
++ .ndo_stop = ipqess_stop,
++ .ndo_do_ioctl = ipqess_do_ioctl,
++ .ndo_start_xmit = ipqess_xmit,
++ .ndo_get_stats = ipqess_get_stats,
++ .ndo_set_mac_address = ipqess_set_mac_address,
++ .ndo_tx_timeout = ipqess_tx_timeout,
++};
++
++static void ipqess_hw_stop(struct ipqess *ess)
++{
++ int i;
++
++ /* disable all RX queue IRQs */
++ for (i = 0; i < IPQESS_MAX_RX_QUEUE; i++)
++ ipqess_w32(ess, IPQESS_REG_RX_INT_MASK_Q(i), 0);
++
++ /* disable all TX queue IRQs */
++ for (i = 0; i < IPQESS_MAX_TX_QUEUE; i++)
++ ipqess_w32(ess, IPQESS_REG_TX_INT_MASK_Q(i), 0);
++
++ /* disable all other IRQs */
++ ipqess_w32(ess, IPQESS_REG_MISC_IMR, 0);
++ ipqess_w32(ess, IPQESS_REG_WOL_IMR, 0);
++
++ /* clear the IRQ status registers */
++ ipqess_w32(ess, IPQESS_REG_RX_ISR, 0xff);
++ ipqess_w32(ess, IPQESS_REG_TX_ISR, 0xffff);
++ ipqess_w32(ess, IPQESS_REG_MISC_ISR, 0x1fff);
++ ipqess_w32(ess, IPQESS_REG_WOL_ISR, 0x1);
++ ipqess_w32(ess, IPQESS_REG_WOL_CTRL, 0);
++
++ /* disable RX and TX queues */
++ ipqess_m32(ess, IPQESS_RXQ_CTRL_EN_MASK, 0, IPQESS_REG_RXQ_CTRL);
++ ipqess_m32(ess, IPQESS_TXQ_CTRL_TXQ_EN, 0, IPQESS_REG_TXQ_CTRL);
++}
++
++static int ipqess_hw_init(struct ipqess *ess)
++{
++ int i, err;
++ u32 tmp;
++
++ ipqess_hw_stop(ess);
++
++ ipqess_m32(ess, BIT(IPQESS_INTR_SW_IDX_W_TYP_SHIFT),
++ IPQESS_INTR_SW_IDX_W_TYPE << IPQESS_INTR_SW_IDX_W_TYP_SHIFT,
++ IPQESS_REG_INTR_CTRL);
++
++ /* enable IRQ delay slot */
++ ipqess_w32(ess, IPQESS_REG_IRQ_MODRT_TIMER_INIT,
++ (IPQESS_TX_IMT << IPQESS_IRQ_MODRT_TX_TIMER_SHIFT) |
++ (IPQESS_RX_IMT << IPQESS_IRQ_MODRT_RX_TIMER_SHIFT));
++
++ /* Set Customer and Service VLAN TPIDs */
++ ipqess_w32(ess, IPQESS_REG_VLAN_CFG,
++ (ETH_P_8021Q << IPQESS_VLAN_CFG_CVLAN_TPID_SHIFT) |
++ (ETH_P_8021AD << IPQESS_VLAN_CFG_SVLAN_TPID_SHIFT));
++
++ /* Configure the TX Queue bursting */
++ ipqess_w32(ess, IPQESS_REG_TXQ_CTRL,
++ (IPQESS_TPD_BURST << IPQESS_TXQ_NUM_TPD_BURST_SHIFT) |
++ (IPQESS_TXF_BURST << IPQESS_TXQ_TXF_BURST_NUM_SHIFT) |
++ IPQESS_TXQ_CTRL_TPD_BURST_EN);
++
++ /* Set RSS type */
++ ipqess_w32(ess, IPQESS_REG_RSS_TYPE,
++ IPQESS_RSS_TYPE_IPV4TCP | IPQESS_RSS_TYPE_IPV6_TCP |
++ IPQESS_RSS_TYPE_IPV4_UDP | IPQESS_RSS_TYPE_IPV6UDP |
++ IPQESS_RSS_TYPE_IPV4 | IPQESS_RSS_TYPE_IPV6);
++
++ /* Set RFD ring burst and threshold */
++ ipqess_w32(ess, IPQESS_REG_RX_DESC1,
++ (IPQESS_RFD_BURST << IPQESS_RXQ_RFD_BURST_NUM_SHIFT) |
++ (IPQESS_RFD_THR << IPQESS_RXQ_RFD_PF_THRESH_SHIFT) |
++ (IPQESS_RFD_LTHR << IPQESS_RXQ_RFD_LOW_THRESH_SHIFT));
++
++ /* Set Rx FIFO
++ * - threshold to start to DMA data to host
++ */
++ ipqess_w32(ess, IPQESS_REG_RXQ_CTRL,
++ IPQESS_FIFO_THRESH_128_BYTE | IPQESS_RXQ_CTRL_RMV_VLAN);
++
++ err = ipqess_rx_ring_alloc(ess);
++ if (err)
++ return err;
++
++ err = ipqess_tx_ring_alloc(ess);
++ if (err)
++ goto err_rx_ring_free;
++
++ /* Load all of ring base addresses above into the dma engine */
++ ipqess_m32(ess, 0, BIT(IPQESS_LOAD_PTR_SHIFT), IPQESS_REG_TX_SRAM_PART);
++
++ /* Disable TX FIFO low watermark and high watermark */
++ ipqess_w32(ess, IPQESS_REG_TXF_WATER_MARK, 0);
++
++ /* Configure RSS indirection table.
++ * 128 hash will be configured in the following
++ * pattern: hash{0,1,2,3} = {Q0,Q2,Q4,Q6} respectively
++ * and so on
++ */
++ for (i = 0; i < IPQESS_NUM_IDT; i++)
++ ipqess_w32(ess, IPQESS_REG_RSS_IDT(i), IPQESS_RSS_IDT_VALUE);
++
++ /* Configure load balance mapping table.
++ * 4 table entry will be configured according to the
++ * following pattern: load_balance{0,1,2,3} = {Q0,Q1,Q3,Q4}
++ * respectively.
++ */
++ ipqess_w32(ess, IPQESS_REG_LB_RING, IPQESS_LB_REG_VALUE);
++
++ /* Configure Virtual queue for Tx rings */
++ ipqess_w32(ess, IPQESS_REG_VQ_CTRL0, IPQESS_VQ_REG_VALUE);
++ ipqess_w32(ess, IPQESS_REG_VQ_CTRL1, IPQESS_VQ_REG_VALUE);
++
++ /* Configure Max AXI Burst write size to 128 bytes*/
++ ipqess_w32(ess, IPQESS_REG_AXIW_CTRL_MAXWRSIZE,
++ IPQESS_AXIW_MAXWRSIZE_VALUE);
++
++ /* Enable TX queues */
++ ipqess_m32(ess, 0, IPQESS_TXQ_CTRL_TXQ_EN, IPQESS_REG_TXQ_CTRL);
++
++ /* Enable RX queues */
++ tmp = 0;
++ for (i = 0; i < IPQESS_NETDEV_QUEUES; i++)
++ tmp |= IPQESS_RXQ_CTRL_EN(ess->rx_ring[i].idx);
++
++ ipqess_m32(ess, IPQESS_RXQ_CTRL_EN_MASK, tmp, IPQESS_REG_RXQ_CTRL);
++
++ return 0;
++
++err_rx_ring_free:
++
++ ipqess_rx_ring_free(ess);
++ return err;
++}
++
++static void ipqess_mac_config(struct phylink_config *config, unsigned int mode,
++ const struct phylink_link_state *state)
++{
++ /* Nothing to do, use fixed Internal mode */
++}
++
++static void ipqess_mac_link_down(struct phylink_config *config,
++ unsigned int mode,
++ phy_interface_t interface)
++{
++ /* Nothing to do, use fixed Internal mode */
++}
++
++static void ipqess_mac_link_up(struct phylink_config *config,
++ struct phy_device *phy, unsigned int mode,
++ phy_interface_t interface,
++ int speed, int duplex,
++ bool tx_pause, bool rx_pause)
++{
++ /* Nothing to do, use fixed Internal mode */
++}
++
++static struct phylink_mac_ops ipqess_phylink_mac_ops = {
++ .validate = phylink_generic_validate,
++ .mac_config = ipqess_mac_config,
++ .mac_link_up = ipqess_mac_link_up,
++ .mac_link_down = ipqess_mac_link_down,
++};
++
++static void ipqess_reset(struct ipqess *ess)
++{
++ reset_control_assert(ess->ess_rst);
++
++ mdelay(10);
++
++ reset_control_deassert(ess->ess_rst);
++
++ /* Waiting for all inner tables to be flushed and reinitialized.
++ * This takes between 5 and 10 ms
++ */
++
++ mdelay(10);
++}
++
++static int ipqess_axi_probe(struct platform_device *pdev)
++{
++ struct device_node *np = pdev->dev.of_node;
++ struct net_device *netdev;
++ phy_interface_t phy_mode;
++ struct ipqess *ess;
++ int i, err = 0;
++
++ netdev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof(*ess),
++ IPQESS_NETDEV_QUEUES,
++ IPQESS_NETDEV_QUEUES);
++ if (!netdev)
++ return -ENOMEM;
++
++ ess = netdev_priv(netdev);
++ ess->netdev = netdev;
++ ess->pdev = pdev;
++ spin_lock_init(&ess->stats_lock);
++ SET_NETDEV_DEV(netdev, &pdev->dev);
++ platform_set_drvdata(pdev, netdev);
++
++ ess->hw_addr = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
++ if (IS_ERR(ess->hw_addr))
++ return PTR_ERR(ess->hw_addr);
++
++ err = of_get_phy_mode(np, &phy_mode);
++ if (err) {
++ dev_err(&pdev->dev, "incorrect phy-mode\n");
++ return err;
++ }
++
++ ess->ess_clk = devm_clk_get(&pdev->dev, NULL);
++ if (!IS_ERR(ess->ess_clk))
++ clk_prepare_enable(ess->ess_clk);
++
++ ess->ess_rst = devm_reset_control_get(&pdev->dev, NULL);
++ if (IS_ERR(ess->ess_rst))
++ goto err_clk;
++
++ ipqess_reset(ess);
++
++ ess->phylink_config.dev = &netdev->dev;
++ ess->phylink_config.type = PHYLINK_NETDEV;
++ ess->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 |
++ MAC_100 | MAC_1000FD;
++
++ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
++ ess->phylink_config.supported_interfaces);
++
++ ess->phylink = phylink_create(&ess->phylink_config,
++ of_fwnode_handle(np), phy_mode,
++ &ipqess_phylink_mac_ops);
++ if (IS_ERR(ess->phylink)) {
++ err = PTR_ERR(ess->phylink);
++ goto err_clk;
++ }
++
++ for (i = 0; i < IPQESS_MAX_TX_QUEUE; i++) {
++ ess->tx_irq[i] = platform_get_irq(pdev, i);
++ scnprintf(ess->tx_irq_names[i], sizeof(ess->tx_irq_names[i]),
++ "%s:txq%d", pdev->name, i);
++ }
++
++ for (i = 0; i < IPQESS_MAX_RX_QUEUE; i++) {
++ ess->rx_irq[i] = platform_get_irq(pdev, i + IPQESS_MAX_TX_QUEUE);
++ scnprintf(ess->rx_irq_names[i], sizeof(ess->rx_irq_names[i]),
++ "%s:rxq%d", pdev->name, i);
++ }
++
++ netdev->netdev_ops = &ipqess_axi_netdev_ops;
++ netdev->features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
++ NETIF_F_HW_VLAN_CTAG_RX |
++ NETIF_F_HW_VLAN_CTAG_TX |
++ NETIF_F_TSO | NETIF_F_GRO | NETIF_F_SG;
++ /* feature change is not supported yet */
++ netdev->hw_features = 0;
++ netdev->vlan_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_RXCSUM |
++ NETIF_F_TSO |
++ NETIF_F_GRO;
++ netdev->watchdog_timeo = 5 * HZ;
++ netdev->base_addr = (u32)ess->hw_addr;
++ netdev->max_mtu = 9000;
++ netdev->gso_max_segs = IPQESS_TX_RING_SIZE / 2;
++
++ ipqess_set_ethtool_ops(netdev);
++
++ err = ipqess_hw_init(ess);
++ if (err)
++ goto err_phylink;
++
++ for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
++ netif_napi_add_tx(netdev, &ess->tx_ring[i].napi_tx, ipqess_tx_napi);
++ netif_napi_add(netdev, &ess->rx_ring[i].napi_rx, ipqess_rx_napi);
++ }
++
++ err = register_netdev(netdev);
++ if (err)
++ goto err_hw_stop;
++
++ return 0;
++
++err_hw_stop:
++ ipqess_hw_stop(ess);
++
++ ipqess_tx_ring_free(ess);
++ ipqess_rx_ring_free(ess);
++err_phylink:
++ phylink_destroy(ess->phylink);
++
++err_clk:
++ clk_disable_unprepare(ess->ess_clk);
++
++ return err;
++}
++
++static int ipqess_axi_remove(struct platform_device *pdev)
++{
++ const struct net_device *netdev = platform_get_drvdata(pdev);
++ struct ipqess *ess = netdev_priv(netdev);
++
++ unregister_netdev(ess->netdev);
++ ipqess_hw_stop(ess);
++
++ ipqess_tx_ring_free(ess);
++ ipqess_rx_ring_free(ess);
++
++ phylink_destroy(ess->phylink);
++ clk_disable_unprepare(ess->ess_clk);
++
++ return 0;
++}
++
++static const struct of_device_id ipqess_of_mtable[] = {
++ {.compatible = "qcom,ipq4019-ess-edma" },
++ {}
++};
++MODULE_DEVICE_TABLE(of, ipqess_of_mtable);
++
++static struct platform_driver ipqess_axi_driver = {
++ .driver = {
++ .name = "ipqess-edma",
++ .of_match_table = ipqess_of_mtable,
++ },
++ .probe = ipqess_axi_probe,
++ .remove = ipqess_axi_remove,
++};
++
++module_platform_driver(ipqess_axi_driver);
++
++MODULE_AUTHOR("Qualcomm Atheros Inc");
++MODULE_AUTHOR("John Crispin <john@phrozen.org>");
++MODULE_AUTHOR("Christian Lamparter <chunkeey@gmail.com>");
++MODULE_AUTHOR("Gabor Juhos <j4g8y7@gmail.com>");
++MODULE_AUTHOR("Maxime Chevallier <maxime.chevallier@bootlin.com>");
++MODULE_LICENSE("GPL");
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ipqess/ipqess.h
+@@ -0,0 +1,518 @@
++/* SPDX-License-Identifier: (GPL-2.0 OR ISC) */
++/* Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2017 - 2018, John Crispin <john@phrozen.org>
++ * Copyright (c) 2018 - 2019, Christian Lamparter <chunkeey@gmail.com>
++ * Copyright (c) 2020 - 2021, Gabor Juhos <j4g8y7@gmail.com>
++ * Copyright (c) 2021 - 2022, Maxime Chevallier <maxime.chevallier@bootlin.com>
++ *
++ */
++
++#ifndef _IPQESS_H_
++#define _IPQESS_H_
++
++#define IPQESS_NETDEV_QUEUES 4
++
++#define IPQESS_TPD_EOP_SHIFT 31
++
++#define IPQESS_PORT_ID_SHIFT 12
++#define IPQESS_PORT_ID_MASK 0x7
++
++/* tpd word 3 bit 18-28 */
++#define IPQESS_TPD_PORT_BITMAP_SHIFT 18
++
++#define IPQESS_TPD_FROM_CPU_SHIFT 25
++
++#define IPQESS_RX_RING_SIZE 128
++#define IPQESS_RX_HEAD_BUFF_SIZE 1540
++#define IPQESS_TX_RING_SIZE 128
++#define IPQESS_MAX_RX_QUEUE 8
++#define IPQESS_MAX_TX_QUEUE 16
++
++/* Configurations */
++#define IPQESS_INTR_CLEAR_TYPE 0
++#define IPQESS_INTR_SW_IDX_W_TYPE 0
++#define IPQESS_FIFO_THRESH_TYPE 0
++#define IPQESS_RSS_TYPE 0
++#define IPQESS_RX_IMT 0x0020
++#define IPQESS_TX_IMT 0x0050
++#define IPQESS_TPD_BURST 5
++#define IPQESS_TXF_BURST 0x100
++#define IPQESS_RFD_BURST 8
++#define IPQESS_RFD_THR 16
++#define IPQESS_RFD_LTHR 0
++
++/* Flags used in transmit direction */
++#define IPQESS_DESC_LAST 0x1
++#define IPQESS_DESC_SINGLE 0x2
++#define IPQESS_DESC_PAGE 0x4
++
++struct ipqess_statistics {
++ u32 tx_q0_pkt;
++ u32 tx_q1_pkt;
++ u32 tx_q2_pkt;
++ u32 tx_q3_pkt;
++ u32 tx_q4_pkt;
++ u32 tx_q5_pkt;
++ u32 tx_q6_pkt;
++ u32 tx_q7_pkt;
++ u32 tx_q8_pkt;
++ u32 tx_q9_pkt;
++ u32 tx_q10_pkt;
++ u32 tx_q11_pkt;
++ u32 tx_q12_pkt;
++ u32 tx_q13_pkt;
++ u32 tx_q14_pkt;
++ u32 tx_q15_pkt;
++ u32 tx_q0_byte;
++ u32 tx_q1_byte;
++ u32 tx_q2_byte;
++ u32 tx_q3_byte;
++ u32 tx_q4_byte;
++ u32 tx_q5_byte;
++ u32 tx_q6_byte;
++ u32 tx_q7_byte;
++ u32 tx_q8_byte;
++ u32 tx_q9_byte;
++ u32 tx_q10_byte;
++ u32 tx_q11_byte;
++ u32 tx_q12_byte;
++ u32 tx_q13_byte;
++ u32 tx_q14_byte;
++ u32 tx_q15_byte;
++ u32 rx_q0_pkt;
++ u32 rx_q1_pkt;
++ u32 rx_q2_pkt;
++ u32 rx_q3_pkt;
++ u32 rx_q4_pkt;
++ u32 rx_q5_pkt;
++ u32 rx_q6_pkt;
++ u32 rx_q7_pkt;
++ u32 rx_q0_byte;
++ u32 rx_q1_byte;
++ u32 rx_q2_byte;
++ u32 rx_q3_byte;
++ u32 rx_q4_byte;
++ u32 rx_q5_byte;
++ u32 rx_q6_byte;
++ u32 rx_q7_byte;
++ u32 tx_desc_error;
++};
++
++struct ipqess_tx_desc {
++ __le16 len;
++ __le16 svlan_tag;
++ __le32 word1;
++ __le32 addr;
++ __le32 word3;
++} __aligned(16) __packed;
++
++struct ipqess_rx_desc {
++ __le16 rrd0;
++ __le16 rrd1;
++ __le16 rrd2;
++ __le16 rrd3;
++ __le16 rrd4;
++ __le16 rrd5;
++ __le16 rrd6;
++ __le16 rrd7;
++} __aligned(16) __packed;
++
++struct ipqess_buf {
++ struct sk_buff *skb;
++ dma_addr_t dma;
++ u32 flags;
++ u16 length;
++};
++
++struct ipqess_tx_ring {
++ struct napi_struct napi_tx;
++ u32 idx;
++ int ring_id;
++ struct ipqess *ess;
++ struct netdev_queue *nq;
++ struct ipqess_tx_desc *hw_desc;
++ struct ipqess_buf *buf;
++ dma_addr_t dma;
++ u16 count;
++ u16 head;
++ u16 tail;
++};
++
++struct ipqess_rx_ring {
++ struct napi_struct napi_rx;
++ u32 idx;
++ int ring_id;
++ struct ipqess *ess;
++ struct device *ppdev;
++ struct ipqess_rx_desc **hw_desc;
++ struct ipqess_buf *buf;
++ dma_addr_t dma;
++ u16 head;
++ u16 tail;
++ atomic_t refill_count;
++};
++
++struct ipqess_rx_ring_refill {
++ struct ipqess_rx_ring *rx_ring;
++ struct work_struct refill_work;
++};
++
++#define IPQESS_IRQ_NAME_LEN 32
++
++struct ipqess {
++ struct net_device *netdev;
++ void __iomem *hw_addr;
++
++ struct clk *ess_clk;
++ struct reset_control *ess_rst;
++
++ struct ipqess_rx_ring rx_ring[IPQESS_NETDEV_QUEUES];
++
++ struct platform_device *pdev;
++ struct phylink *phylink;
++ struct phylink_config phylink_config;
++ struct ipqess_tx_ring tx_ring[IPQESS_NETDEV_QUEUES];
++
++ struct ipqess_statistics ipqess_stats;
++
++ /* Protects stats */
++ spinlock_t stats_lock;
++ struct net_device_stats stats;
++
++ struct ipqess_rx_ring_refill rx_refill[IPQESS_NETDEV_QUEUES];
++ u32 tx_irq[IPQESS_MAX_TX_QUEUE];
++ char tx_irq_names[IPQESS_MAX_TX_QUEUE][IPQESS_IRQ_NAME_LEN];
++ u32 rx_irq[IPQESS_MAX_RX_QUEUE];
++ char rx_irq_names[IPQESS_MAX_TX_QUEUE][IPQESS_IRQ_NAME_LEN];
++};
++
++void ipqess_set_ethtool_ops(struct net_device *netdev);
++void ipqess_update_hw_stats(struct ipqess *ess);
++
++/* register definition */
++#define IPQESS_REG_MAS_CTRL 0x0
++#define IPQESS_REG_TIMEOUT_CTRL 0x004
++#define IPQESS_REG_DBG0 0x008
++#define IPQESS_REG_DBG1 0x00C
++#define IPQESS_REG_SW_CTRL0 0x100
++#define IPQESS_REG_SW_CTRL1 0x104
++
++/* Interrupt Status Register */
++#define IPQESS_REG_RX_ISR 0x200
++#define IPQESS_REG_TX_ISR 0x208
++#define IPQESS_REG_MISC_ISR 0x210
++#define IPQESS_REG_WOL_ISR 0x218
++
++#define IPQESS_MISC_ISR_RX_URG_Q(x) (1 << (x))
++
++#define IPQESS_MISC_ISR_AXIR_TIMEOUT 0x00000100
++#define IPQESS_MISC_ISR_AXIR_ERR 0x00000200
++#define IPQESS_MISC_ISR_TXF_DEAD 0x00000400
++#define IPQESS_MISC_ISR_AXIW_ERR 0x00000800
++#define IPQESS_MISC_ISR_AXIW_TIMEOUT 0x00001000
++
++#define IPQESS_WOL_ISR 0x00000001
++
++/* Interrupt Mask Register */
++#define IPQESS_REG_MISC_IMR 0x214
++#define IPQESS_REG_WOL_IMR 0x218
++
++#define IPQESS_RX_IMR_NORMAL_MASK 0x1
++#define IPQESS_TX_IMR_NORMAL_MASK 0x1
++#define IPQESS_MISC_IMR_NORMAL_MASK 0x80001FFF
++#define IPQESS_WOL_IMR_NORMAL_MASK 0x1
++
++/* Edma receive consumer index */
++#define IPQESS_REG_RX_SW_CONS_IDX_Q(x) (0x220 + ((x) << 2)) /* x is the queue id */
++
++/* Edma transmit consumer index */
++#define IPQESS_REG_TX_SW_CONS_IDX_Q(x) (0x240 + ((x) << 2)) /* x is the queue id */
++
++/* IRQ Moderator Initial Timer Register */
++#define IPQESS_REG_IRQ_MODRT_TIMER_INIT 0x280
++#define IPQESS_IRQ_MODRT_TIMER_MASK 0xFFFF
++#define IPQESS_IRQ_MODRT_RX_TIMER_SHIFT 0
++#define IPQESS_IRQ_MODRT_TX_TIMER_SHIFT 16
++
++/* Interrupt Control Register */
++#define IPQESS_REG_INTR_CTRL 0x284
++#define IPQESS_INTR_CLR_TYP_SHIFT 0
++#define IPQESS_INTR_SW_IDX_W_TYP_SHIFT 1
++#define IPQESS_INTR_CLEAR_TYPE_W1 0
++#define IPQESS_INTR_CLEAR_TYPE_R 1
++
++/* RX Interrupt Mask Register */
++#define IPQESS_REG_RX_INT_MASK_Q(x) (0x300 + ((x) << 2)) /* x = queue id */
++
++/* TX Interrupt mask register */
++#define IPQESS_REG_TX_INT_MASK_Q(x) (0x340 + ((x) << 2)) /* x = queue id */
++
++/* Load Ptr Register
++ * Software sets this bit after the initialization of the head and tail
++ */
++#define IPQESS_REG_TX_SRAM_PART 0x400
++#define IPQESS_LOAD_PTR_SHIFT 16
++
++/* TXQ Control Register */
++#define IPQESS_REG_TXQ_CTRL 0x404
++#define IPQESS_TXQ_CTRL_IP_OPTION_EN 0x10
++#define IPQESS_TXQ_CTRL_TXQ_EN 0x20
++#define IPQESS_TXQ_CTRL_ENH_MODE 0x40
++#define IPQESS_TXQ_CTRL_LS_8023_EN 0x80
++#define IPQESS_TXQ_CTRL_TPD_BURST_EN 0x100
++#define IPQESS_TXQ_CTRL_LSO_BREAK_EN 0x200
++#define IPQESS_TXQ_NUM_TPD_BURST_MASK 0xF
++#define IPQESS_TXQ_TXF_BURST_NUM_MASK 0xFFFF
++#define IPQESS_TXQ_NUM_TPD_BURST_SHIFT 0
++#define IPQESS_TXQ_TXF_BURST_NUM_SHIFT 16
++
++#define IPQESS_REG_TXF_WATER_MARK 0x408 /* In 8-bytes */
++#define IPQESS_TXF_WATER_MARK_MASK 0x0FFF
++#define IPQESS_TXF_LOW_WATER_MARK_SHIFT 0
++#define IPQESS_TXF_HIGH_WATER_MARK_SHIFT 16
++#define IPQESS_TXQ_CTRL_BURST_MODE_EN 0x80000000
++
++/* WRR Control Register */
++#define IPQESS_REG_WRR_CTRL_Q0_Q3 0x40c
++#define IPQESS_REG_WRR_CTRL_Q4_Q7 0x410
++#define IPQESS_REG_WRR_CTRL_Q8_Q11 0x414
++#define IPQESS_REG_WRR_CTRL_Q12_Q15 0x418
++
++/* Weight round robin(WRR), it takes queue as input, and computes
++ * starting bits where we need to write the weight for a particular
++ * queue
++ */
++#define IPQESS_WRR_SHIFT(x) (((x) * 5) % 20)
++
++/* Tx Descriptor Control Register */
++#define IPQESS_REG_TPD_RING_SIZE 0x41C
++#define IPQESS_TPD_RING_SIZE_SHIFT 0
++#define IPQESS_TPD_RING_SIZE_MASK 0xFFFF
++
++/* Transmit descriptor base address */
++#define IPQESS_REG_TPD_BASE_ADDR_Q(x) (0x420 + ((x) << 2)) /* x = queue id */
++
++/* TPD Index Register */
++#define IPQESS_REG_TPD_IDX_Q(x) (0x460 + ((x) << 2)) /* x = queue id */
++
++#define IPQESS_TPD_PROD_IDX_BITS 0x0000FFFF
++#define IPQESS_TPD_CONS_IDX_BITS 0xFFFF0000
++#define IPQESS_TPD_PROD_IDX_MASK 0xFFFF
++#define IPQESS_TPD_CONS_IDX_MASK 0xFFFF
++#define IPQESS_TPD_PROD_IDX_SHIFT 0
++#define IPQESS_TPD_CONS_IDX_SHIFT 16
++
++/* TX Virtual Queue Mapping Control Register */
++#define IPQESS_REG_VQ_CTRL0 0x4A0
++#define IPQESS_REG_VQ_CTRL1 0x4A4
++
++/* Virtual QID shift, it takes queue as input, and computes
++ * Virtual QID position in virtual qid control register
++ */
++#define IPQESS_VQ_ID_SHIFT(i) (((i) * 3) % 24)
++
++/* Virtual Queue Default Value */
++#define IPQESS_VQ_REG_VALUE 0x240240
++
++/* Tx side Port Interface Control Register */
++#define IPQESS_REG_PORT_CTRL 0x4A8
++#define IPQESS_PAD_EN_SHIFT 15
++
++/* Tx side VLAN Configuration Register */
++#define IPQESS_REG_VLAN_CFG 0x4AC
++
++#define IPQESS_VLAN_CFG_SVLAN_TPID_SHIFT 0
++#define IPQESS_VLAN_CFG_SVLAN_TPID_MASK 0xffff
++#define IPQESS_VLAN_CFG_CVLAN_TPID_SHIFT 16
++#define IPQESS_VLAN_CFG_CVLAN_TPID_MASK 0xffff
++
++#define IPQESS_TX_CVLAN 16
++#define IPQESS_TX_INS_CVLAN 17
++#define IPQESS_TX_CVLAN_TAG_SHIFT 0
++
++#define IPQESS_TX_SVLAN 14
++#define IPQESS_TX_INS_SVLAN 15
++#define IPQESS_TX_SVLAN_TAG_SHIFT 16
++
++/* Tx Queue Packet Statistic Register */
++#define IPQESS_REG_TX_STAT_PKT_Q(x) (0x700 + ((x) << 3)) /* x = queue id */
++
++#define IPQESS_TX_STAT_PKT_MASK 0xFFFFFF
++
++/* Tx Queue Byte Statistic Register */
++#define IPQESS_REG_TX_STAT_BYTE_Q(x) (0x704 + ((x) << 3)) /* x = queue id */
++
++/* Load Balance Based Ring Offset Register */
++#define IPQESS_REG_LB_RING 0x800
++#define IPQESS_LB_RING_ENTRY_MASK 0xff
++#define IPQESS_LB_RING_ID_MASK 0x7
++#define IPQESS_LB_RING_PROFILE_ID_MASK 0x3
++#define IPQESS_LB_RING_ENTRY_BIT_OFFSET 8
++#define IPQESS_LB_RING_ID_OFFSET 0
++#define IPQESS_LB_RING_PROFILE_ID_OFFSET 3
++#define IPQESS_LB_REG_VALUE 0x6040200
++
++/* Load Balance Priority Mapping Register */
++#define IPQESS_REG_LB_PRI_START 0x804
++#define IPQESS_REG_LB_PRI_END 0x810
++#define IPQESS_LB_PRI_REG_INC 4
++#define IPQESS_LB_PRI_ENTRY_BIT_OFFSET 4
++#define IPQESS_LB_PRI_ENTRY_MASK 0xf
++
++/* RSS Priority Mapping Register */
++#define IPQESS_REG_RSS_PRI 0x820
++#define IPQESS_RSS_PRI_ENTRY_MASK 0xf
++#define IPQESS_RSS_RING_ID_MASK 0x7
++#define IPQESS_RSS_PRI_ENTRY_BIT_OFFSET 4
++
++/* RSS Indirection Register */
++#define IPQESS_REG_RSS_IDT(x) (0x840 + ((x) << 2)) /* x = No. of indirection table */
++#define IPQESS_NUM_IDT 16
++#define IPQESS_RSS_IDT_VALUE 0x64206420
++
++/* Default RSS Ring Register */
++#define IPQESS_REG_DEF_RSS 0x890
++#define IPQESS_DEF_RSS_MASK 0x7
++
++/* RSS Hash Function Type Register */
++#define IPQESS_REG_RSS_TYPE 0x894
++#define IPQESS_RSS_TYPE_NONE 0x01
++#define IPQESS_RSS_TYPE_IPV4TCP 0x02
++#define IPQESS_RSS_TYPE_IPV6_TCP 0x04
++#define IPQESS_RSS_TYPE_IPV4_UDP 0x08
++#define IPQESS_RSS_TYPE_IPV6UDP 0x10
++#define IPQESS_RSS_TYPE_IPV4 0x20
++#define IPQESS_RSS_TYPE_IPV6 0x40
++#define IPQESS_RSS_HASH_MODE_MASK 0x7f
++
++#define IPQESS_REG_RSS_HASH_VALUE 0x8C0
++
++#define IPQESS_REG_RSS_TYPE_RESULT 0x8C4
++
++#define IPQESS_HASH_TYPE_START 0
++#define IPQESS_HASH_TYPE_END 5
++#define IPQESS_HASH_TYPE_SHIFT 12
++
++#define IPQESS_RFS_FLOW_ENTRIES 1024
++#define IPQESS_RFS_FLOW_ENTRIES_MASK (IPQESS_RFS_FLOW_ENTRIES - 1)
++#define IPQESS_RFS_EXPIRE_COUNT_PER_CALL 128
++
++/* RFD Base Address Register */
++#define IPQESS_REG_RFD_BASE_ADDR_Q(x) (0x950 + ((x) << 2)) /* x = queue id */
++
++/* RFD Index Register */
++#define IPQESS_REG_RFD_IDX_Q(x) (0x9B0 + ((x) << 2)) /* x = queue id */
++
++#define IPQESS_RFD_PROD_IDX_BITS 0x00000FFF
++#define IPQESS_RFD_CONS_IDX_BITS 0x0FFF0000
++#define IPQESS_RFD_PROD_IDX_MASK 0xFFF
++#define IPQESS_RFD_CONS_IDX_MASK 0xFFF
++#define IPQESS_RFD_PROD_IDX_SHIFT 0
++#define IPQESS_RFD_CONS_IDX_SHIFT 16
++
++/* Rx Descriptor Control Register */
++#define IPQESS_REG_RX_DESC0 0xA10
++#define IPQESS_RFD_RING_SIZE_MASK 0xFFF
++#define IPQESS_RX_BUF_SIZE_MASK 0xFFFF
++#define IPQESS_RFD_RING_SIZE_SHIFT 0
++#define IPQESS_RX_BUF_SIZE_SHIFT 16
++
++#define IPQESS_REG_RX_DESC1 0xA14
++#define IPQESS_RXQ_RFD_BURST_NUM_MASK 0x3F
++#define IPQESS_RXQ_RFD_PF_THRESH_MASK 0x1F
++#define IPQESS_RXQ_RFD_LOW_THRESH_MASK 0xFFF
++#define IPQESS_RXQ_RFD_BURST_NUM_SHIFT 0
++#define IPQESS_RXQ_RFD_PF_THRESH_SHIFT 8
++#define IPQESS_RXQ_RFD_LOW_THRESH_SHIFT 16
++
++/* RXQ Control Register */
++#define IPQESS_REG_RXQ_CTRL 0xA18
++#define IPQESS_FIFO_THRESH_TYPE_SHIF 0
++#define IPQESS_FIFO_THRESH_128_BYTE 0x0
++#define IPQESS_FIFO_THRESH_64_BYTE 0x1
++#define IPQESS_RXQ_CTRL_RMV_VLAN 0x00000002
++#define IPQESS_RXQ_CTRL_EN_MASK GENMASK(15, 8)
++#define IPQESS_RXQ_CTRL_EN(__qid) BIT(8 + (__qid))
++
++/* AXI Burst Size Config */
++#define IPQESS_REG_AXIW_CTRL_MAXWRSIZE 0xA1C
++#define IPQESS_AXIW_MAXWRSIZE_VALUE 0x0
++
++/* Rx Statistics Register */
++#define IPQESS_REG_RX_STAT_BYTE_Q(x) (0xA30 + ((x) << 2)) /* x = queue id */
++#define IPQESS_REG_RX_STAT_PKT_Q(x) (0xA50 + ((x) << 2)) /* x = queue id */
++
++/* WoL Pattern Length Register */
++#define IPQESS_REG_WOL_PATTERN_LEN0 0xC00
++#define IPQESS_WOL_PT_LEN_MASK 0xFF
++#define IPQESS_WOL_PT0_LEN_SHIFT 0
++#define IPQESS_WOL_PT1_LEN_SHIFT 8
++#define IPQESS_WOL_PT2_LEN_SHIFT 16
++#define IPQESS_WOL_PT3_LEN_SHIFT 24
++
++#define IPQESS_REG_WOL_PATTERN_LEN1 0xC04
++#define IPQESS_WOL_PT4_LEN_SHIFT 0
++#define IPQESS_WOL_PT5_LEN_SHIFT 8
++#define IPQESS_WOL_PT6_LEN_SHIFT 16
++
++/* WoL Control Register */
++#define IPQESS_REG_WOL_CTRL 0xC08
++#define IPQESS_WOL_WK_EN 0x00000001
++#define IPQESS_WOL_MG_EN 0x00000002
++#define IPQESS_WOL_PT0_EN 0x00000004
++#define IPQESS_WOL_PT1_EN 0x00000008
++#define IPQESS_WOL_PT2_EN 0x00000010
++#define IPQESS_WOL_PT3_EN 0x00000020
++#define IPQESS_WOL_PT4_EN 0x00000040
++#define IPQESS_WOL_PT5_EN 0x00000080
++#define IPQESS_WOL_PT6_EN 0x00000100
++
++/* MAC Control Register */
++#define IPQESS_REG_MAC_CTRL0 0xC20
++#define IPQESS_REG_MAC_CTRL1 0xC24
++
++/* WoL Pattern Register */
++#define IPQESS_REG_WOL_PATTERN_START 0x5000
++#define IPQESS_PATTERN_PART_REG_OFFSET 0x40
++
++/* TX descriptor fields */
++#define IPQESS_TPD_HDR_SHIFT 0
++#define IPQESS_TPD_PPPOE_EN 0x00000100
++#define IPQESS_TPD_IP_CSUM_EN 0x00000200
++#define IPQESS_TPD_TCP_CSUM_EN 0x0000400
++#define IPQESS_TPD_UDP_CSUM_EN 0x00000800
++#define IPQESS_TPD_CUSTOM_CSUM_EN 0x00000C00
++#define IPQESS_TPD_LSO_EN 0x00001000
++#define IPQESS_TPD_LSO_V2_EN 0x00002000
++/* The VLAN_TAGGED bit is not used in the publicly available
++ * drivers. The definition has been stolen from the Atheros
++ * 'alx' driver (drivers/net/ethernet/atheros/alx/hw.h). It
++ * seems that it has the same meaning in regard to the EDMA
++ * hardware.
++ */
++#define IPQESS_TPD_VLAN_TAGGED 0x00004000
++#define IPQESS_TPD_IPV4_EN 0x00010000
++#define IPQESS_TPD_MSS_MASK 0x1FFF
++#define IPQESS_TPD_MSS_SHIFT 18
++#define IPQESS_TPD_CUSTOM_CSUM_SHIFT 18
++
++/* RRD descriptor fields */
++#define IPQESS_RRD_NUM_RFD_MASK 0x000F
++#define IPQESS_RRD_PKT_SIZE_MASK 0x3FFF
++#define IPQESS_RRD_SRC_PORT_NUM_MASK 0x4000
++#define IPQESS_RRD_SVLAN 0x8000
++#define IPQESS_RRD_FLOW_COOKIE_MASK 0x07FF
++
++#define IPQESS_RRD_PKT_SIZE_MASK 0x3FFF
++#define IPQESS_RRD_CSUM_FAIL_MASK 0xC000
++#define IPQESS_RRD_CVLAN 0x0001
++#define IPQESS_RRD_DESC_VALID 0x8000
++
++#define IPQESS_RRD_PRIORITY_SHIFT 4
++#define IPQESS_RRD_PRIORITY_MASK 0x7
++#define IPQESS_RRD_PORT_TYPE_SHIFT 7
++#define IPQESS_RRD_PORT_TYPE_MASK 0x1F
++
++#define IPQESS_RRD_PORT_ID_MASK 0x7000
++
++#endif
+--- /dev/null
++++ b/drivers/net/ethernet/qualcomm/ipqess/ipqess_ethtool.c
+@@ -0,0 +1,164 @@
++// SPDX-License-Identifier: GPL-2.0 OR ISC
++/* Copyright (c) 2015 - 2016, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2017 - 2018, John Crispin <john@phrozen.org>
++ * Copyright (c) 2021 - 2022, Maxime Chevallier <maxime.chevallier@bootlin.com>
++ *
++ */
++
++#include <linux/ethtool.h>
++#include <linux/netdevice.h>
++#include <linux/string.h>
++#include <linux/phylink.h>
++
++#include "ipqess.h"
++
++struct ipqess_ethtool_stats {
++ u8 string[ETH_GSTRING_LEN];
++ u32 offset;
++};
++
++#define IPQESS_STAT(m) offsetof(struct ipqess_statistics, m)
++#define DRVINFO_LEN 32
++
++static const struct ipqess_ethtool_stats ipqess_stats[] = {
++ {"tx_q0_pkt", IPQESS_STAT(tx_q0_pkt)},
++ {"tx_q1_pkt", IPQESS_STAT(tx_q1_pkt)},
++ {"tx_q2_pkt", IPQESS_STAT(tx_q2_pkt)},
++ {"tx_q3_pkt", IPQESS_STAT(tx_q3_pkt)},
++ {"tx_q4_pkt", IPQESS_STAT(tx_q4_pkt)},
++ {"tx_q5_pkt", IPQESS_STAT(tx_q5_pkt)},
++ {"tx_q6_pkt", IPQESS_STAT(tx_q6_pkt)},
++ {"tx_q7_pkt", IPQESS_STAT(tx_q7_pkt)},
++ {"tx_q8_pkt", IPQESS_STAT(tx_q8_pkt)},
++ {"tx_q9_pkt", IPQESS_STAT(tx_q9_pkt)},
++ {"tx_q10_pkt", IPQESS_STAT(tx_q10_pkt)},
++ {"tx_q11_pkt", IPQESS_STAT(tx_q11_pkt)},
++ {"tx_q12_pkt", IPQESS_STAT(tx_q12_pkt)},
++ {"tx_q13_pkt", IPQESS_STAT(tx_q13_pkt)},
++ {"tx_q14_pkt", IPQESS_STAT(tx_q14_pkt)},
++ {"tx_q15_pkt", IPQESS_STAT(tx_q15_pkt)},
++ {"tx_q0_byte", IPQESS_STAT(tx_q0_byte)},
++ {"tx_q1_byte", IPQESS_STAT(tx_q1_byte)},
++ {"tx_q2_byte", IPQESS_STAT(tx_q2_byte)},
++ {"tx_q3_byte", IPQESS_STAT(tx_q3_byte)},
++ {"tx_q4_byte", IPQESS_STAT(tx_q4_byte)},
++ {"tx_q5_byte", IPQESS_STAT(tx_q5_byte)},
++ {"tx_q6_byte", IPQESS_STAT(tx_q6_byte)},
++ {"tx_q7_byte", IPQESS_STAT(tx_q7_byte)},
++ {"tx_q8_byte", IPQESS_STAT(tx_q8_byte)},
++ {"tx_q9_byte", IPQESS_STAT(tx_q9_byte)},
++ {"tx_q10_byte", IPQESS_STAT(tx_q10_byte)},
++ {"tx_q11_byte", IPQESS_STAT(tx_q11_byte)},
++ {"tx_q12_byte", IPQESS_STAT(tx_q12_byte)},
++ {"tx_q13_byte", IPQESS_STAT(tx_q13_byte)},
++ {"tx_q14_byte", IPQESS_STAT(tx_q14_byte)},
++ {"tx_q15_byte", IPQESS_STAT(tx_q15_byte)},
++ {"rx_q0_pkt", IPQESS_STAT(rx_q0_pkt)},
++ {"rx_q1_pkt", IPQESS_STAT(rx_q1_pkt)},
++ {"rx_q2_pkt", IPQESS_STAT(rx_q2_pkt)},
++ {"rx_q3_pkt", IPQESS_STAT(rx_q3_pkt)},
++ {"rx_q4_pkt", IPQESS_STAT(rx_q4_pkt)},
++ {"rx_q5_pkt", IPQESS_STAT(rx_q5_pkt)},
++ {"rx_q6_pkt", IPQESS_STAT(rx_q6_pkt)},
++ {"rx_q7_pkt", IPQESS_STAT(rx_q7_pkt)},
++ {"rx_q0_byte", IPQESS_STAT(rx_q0_byte)},
++ {"rx_q1_byte", IPQESS_STAT(rx_q1_byte)},
++ {"rx_q2_byte", IPQESS_STAT(rx_q2_byte)},
++ {"rx_q3_byte", IPQESS_STAT(rx_q3_byte)},
++ {"rx_q4_byte", IPQESS_STAT(rx_q4_byte)},
++ {"rx_q5_byte", IPQESS_STAT(rx_q5_byte)},
++ {"rx_q6_byte", IPQESS_STAT(rx_q6_byte)},
++ {"rx_q7_byte", IPQESS_STAT(rx_q7_byte)},
++ {"tx_desc_error", IPQESS_STAT(tx_desc_error)},
++};
++
++static int ipqess_get_strset_count(struct net_device *netdev, int sset)
++{
++ switch (sset) {
++ case ETH_SS_STATS:
++ return ARRAY_SIZE(ipqess_stats);
++ default:
++ netdev_dbg(netdev, "%s: Unsupported string set", __func__);
++ return -EOPNOTSUPP;
++ }
++}
++
++static void ipqess_get_strings(struct net_device *netdev, u32 stringset,
++ u8 *data)
++{
++ u8 *p = data;
++ u32 i;
++
++ switch (stringset) {
++ case ETH_SS_STATS:
++ for (i = 0; i < ARRAY_SIZE(ipqess_stats); i++)
++ ethtool_sprintf(&p, ipqess_stats[i].string);
++ break;
++ }
++}
++
++static void ipqess_get_ethtool_stats(struct net_device *netdev,
++ struct ethtool_stats *stats,
++ uint64_t *data)
++{
++ struct ipqess *ess = netdev_priv(netdev);
++ u32 *essstats = (u32 *)&ess->ipqess_stats;
++ int i;
++
++ spin_lock(&ess->stats_lock);
++
++ ipqess_update_hw_stats(ess);
++
++ for (i = 0; i < ARRAY_SIZE(ipqess_stats); i++)
++ data[i] = *(u32 *)(essstats + (ipqess_stats[i].offset / sizeof(u32)));
++
++ spin_unlock(&ess->stats_lock);
++}
++
++static void ipqess_get_drvinfo(struct net_device *dev,
++ struct ethtool_drvinfo *info)
++{
++ strscpy(info->driver, "qca_ipqess", DRVINFO_LEN);
++ strscpy(info->bus_info, "axi", ETHTOOL_BUSINFO_LEN);
++}
++
++static int ipqess_get_link_ksettings(struct net_device *netdev,
++ struct ethtool_link_ksettings *cmd)
++{
++ struct ipqess *ess = netdev_priv(netdev);
++
++ return phylink_ethtool_ksettings_get(ess->phylink, cmd);
++}
++
++static int ipqess_set_link_ksettings(struct net_device *netdev,
++ const struct ethtool_link_ksettings *cmd)
++{
++ struct ipqess *ess = netdev_priv(netdev);
++
++ return phylink_ethtool_ksettings_set(ess->phylink, cmd);
++}
++
++static void ipqess_get_ringparam(struct net_device *netdev,
++ struct ethtool_ringparam *ring,
++ struct kernel_ethtool_ringparam *kernel_ering,
++ struct netlink_ext_ack *extack)
++{
++ ring->tx_max_pending = IPQESS_TX_RING_SIZE;
++ ring->rx_max_pending = IPQESS_RX_RING_SIZE;
++}
++
++static const struct ethtool_ops ipqesstool_ops = {
++ .get_drvinfo = &ipqess_get_drvinfo,
++ .get_link = ðtool_op_get_link,
++ .get_link_ksettings = &ipqess_get_link_ksettings,
++ .set_link_ksettings = &ipqess_set_link_ksettings,
++ .get_strings = &ipqess_get_strings,
++ .get_sset_count = &ipqess_get_strset_count,
++ .get_ethtool_stats = &ipqess_get_ethtool_stats,
++ .get_ringparam = ipqess_get_ringparam,
++};
++
++void ipqess_set_ethtool_ops(struct net_device *netdev)
++{
++ netdev->ethtool_ops = &ipqesstool_ops;
++}
--- /dev/null
+From a32e16b3c2fc1954ad6e09737439f60e5890278e Mon Sep 17 00:00:00 2001
+From: Maxime Chevallier <maxime.chevallier@bootlin.com>
+Date: Fri, 4 Nov 2022 18:41:49 +0100
+Subject: [PATCH] net: dsa: add out-of-band tagging protocol
+
+This tagging protocol is designed for the situation where the link
+between the MAC and the Switch is designed such that the Destination
+Port, which is usually embedded in some part of the Ethernet Header, is
+sent out-of-band, and isn't present at all in the Ethernet frame.
+
+This can happen when the MAC and Switch are tightly integrated on an
+SoC, as is the case with the Qualcomm IPQ4019 for example, where the DSA
+tag is inserted directly into the DMA descriptors. In that case,
+the MAC driver is responsible for sending the tag to the switch using
+the out-of-band medium. To do so, the MAC driver needs to have the
+information of the destination port for that skb.
+
+Add a new tagging protocol based on SKB extensions to convey the
+information about the destination port to the MAC driver
+
+Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
+---
+ Documentation/networking/dsa/dsa.rst | 13 +++++++-
+ MAINTAINERS | 1 +
+ include/linux/dsa/oob.h | 16 +++++++++
+ include/linux/skbuff.h | 3 ++
+ include/net/dsa.h | 2 ++
+ net/core/skbuff.c | 10 ++++++
+ net/dsa/Kconfig | 9 +++++
+ net/dsa/Makefile | 1 +
+ net/dsa/tag_oob.c | 49 ++++++++++++++++++++++++++++
+ 9 files changed, 103 insertions(+), 1 deletion(-)
+ create mode 100644 include/linux/dsa/oob.h
+ create mode 100644 net/dsa/tag_oob.c
+
+--- a/Documentation/networking/dsa/dsa.rst
++++ b/Documentation/networking/dsa/dsa.rst
+@@ -66,7 +66,8 @@ Switch tagging protocols
+ ------------------------
+
+ DSA supports many vendor-specific tagging protocols, one software-defined
+-tagging protocol, and a tag-less mode as well (``DSA_TAG_PROTO_NONE``).
++tagging protocol, a tag-less mode as well (``DSA_TAG_PROTO_NONE``) and an
++out-of-band tagging protocol (``DSA_TAG_PROTO_OOB``).
+
+ The exact format of the tag protocol is vendor specific, but in general, they
+ all contain something which:
+@@ -217,6 +218,16 @@ receive all frames regardless of the val
+ setting the ``promisc_on_master`` property of the ``struct dsa_device_ops``.
+ Note that this assumes a DSA-unaware master driver, which is the norm.
+
++Some SoCs have a tight integration between the conduit network interface and the
++embedded switch, such that the DSA tag isn't transmitted in the packet data,
++but through another media, using so-called out-of-band tagging. In that case,
++the host MAC driver is in charge of transmitting the tag to the switch.
++An example is the IPQ4019 SoC, that transmits the tag between the ipqess
++ethernet controller and the qca8k switch using the DMA descriptor. In that
++configuration, tag-chaining is permitted, but the OOB tag will always be the
++top-most switch in the tree. The tagger (``DSA_TAG_PROTO_OOB``) uses skb
++extensions to transmit the tag to and from the MAC driver.
++
+ Master network devices
+ ----------------------
+
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -17070,6 +17070,7 @@ L: netdev@vger.kernel.org
+ S: Maintained
+ F: Documentation/devicetree/bindings/net/qcom,ipq4019-ess-edma.yaml
+ F: drivers/net/ethernet/qualcomm/ipqess/
++F: net/dsa/tag_oob.c
+
+ QUALCOMM ETHQOS ETHERNET DRIVER
+ M: Vinod Koul <vkoul@kernel.org>
+--- /dev/null
++++ b/include/linux/dsa/oob.h
+@@ -0,0 +1,16 @@
++/* SPDX-License-Identifier: GPL-2.0-only
++ * Copyright (C) 2022 Maxime Chevallier <maxime.chevallier@bootlin.com>
++ */
++
++#ifndef _NET_DSA_OOB_H
++#define _NET_DSA_OOB_H
++
++#include <linux/skbuff.h>
++
++struct dsa_oob_tag_info {
++ u16 port;
++};
++
++int dsa_oob_tag_push(struct sk_buff *skb, struct dsa_oob_tag_info *ti);
++int dsa_oob_tag_pop(struct sk_buff *skb, struct dsa_oob_tag_info *ti);
++#endif
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -4580,6 +4580,9 @@ enum skb_ext_id {
+ #if IS_ENABLED(CONFIG_MCTP_FLOWS)
+ SKB_EXT_MCTP,
+ #endif
++#if IS_ENABLED(CONFIG_NET_DSA_TAG_OOB)
++ SKB_EXT_DSA_OOB,
++#endif
+ SKB_EXT_NUM, /* must be last */
+ };
+
+--- a/include/net/dsa.h
++++ b/include/net/dsa.h
+@@ -55,6 +55,7 @@ struct phylink_link_state;
+ #define DSA_TAG_PROTO_RTL8_4T_VALUE 25
+ #define DSA_TAG_PROTO_RZN1_A5PSW_VALUE 26
+ #define DSA_TAG_PROTO_LAN937X_VALUE 27
++#define DSA_TAG_PROTO_OOB_VALUE 28
+
+ enum dsa_tag_protocol {
+ DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE,
+@@ -85,6 +86,7 @@ enum dsa_tag_protocol {
+ DSA_TAG_PROTO_RTL8_4T = DSA_TAG_PROTO_RTL8_4T_VALUE,
+ DSA_TAG_PROTO_RZN1_A5PSW = DSA_TAG_PROTO_RZN1_A5PSW_VALUE,
+ DSA_TAG_PROTO_LAN937X = DSA_TAG_PROTO_LAN937X_VALUE,
++ DSA_TAG_PROTO_OOB = DSA_TAG_PROTO_OOB_VALUE,
+ };
+
+ struct dsa_switch;
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -62,8 +62,12 @@
+ #include <linux/mpls.h>
+ #include <linux/kcov.h>
+ #include <linux/if.h>
++#ifdef CONFIG_NET_DSA_TAG_OOB
++#include <linux/dsa/oob.h>
++#endif
+
+ #include <net/protocol.h>
++#include <net/dsa.h>
+ #include <net/dst.h>
+ #include <net/sock.h>
+ #include <net/checksum.h>
+@@ -4510,6 +4514,9 @@ static const u8 skb_ext_type_len[] = {
+ #if IS_ENABLED(CONFIG_MCTP_FLOWS)
+ [SKB_EXT_MCTP] = SKB_EXT_CHUNKSIZEOF(struct mctp_flow),
+ #endif
++#if IS_ENABLED(CONFIG_NET_DSA_TAG_OOB)
++ [SKB_EXT_DSA_OOB] = SKB_EXT_CHUNKSIZEOF(struct dsa_oob_tag_info),
++#endif
+ };
+
+ static __always_inline unsigned int skb_ext_total_length(void)
+@@ -4530,6 +4537,9 @@ static __always_inline unsigned int skb_
+ #if IS_ENABLED(CONFIG_MCTP_FLOWS)
+ skb_ext_type_len[SKB_EXT_MCTP] +
+ #endif
++#if IS_ENABLED(CONFIG_NET_DSA_TAG_OOB)
++ skb_ext_type_len[SKB_EXT_DSA_OOB] +
++#endif
+ 0;
+ }
+
+--- a/net/dsa/Kconfig
++++ b/net/dsa/Kconfig
+@@ -113,6 +113,15 @@ config NET_DSA_TAG_OCELOT_8021Q
+ this mode, less TCAM resources (VCAP IS1, IS2, ES0) are available for
+ use with tc-flower.
+
++config NET_DSA_TAG_OOB
++ select SKB_EXTENSIONS
++ tristate "Tag driver for Out-of-band tagging drivers"
++ help
++ Say Y or M if you want to enable support for pairs of embedded
++ switches and host MAC drivers which perform demultiplexing and
++ packet steering to ports using out of band metadata processed
++ by the DSA master, rather than tags present in the packets.
++
+ config NET_DSA_TAG_QCA
+ tristate "Tag driver for Qualcomm Atheros QCA8K switches"
+ help
+--- a/net/dsa/Makefile
++++ b/net/dsa/Makefile
+@@ -22,6 +22,7 @@ obj-$(CONFIG_NET_DSA_TAG_LAN9303) += tag
+ obj-$(CONFIG_NET_DSA_TAG_MTK) += tag_mtk.o
+ obj-$(CONFIG_NET_DSA_TAG_OCELOT) += tag_ocelot.o
+ obj-$(CONFIG_NET_DSA_TAG_OCELOT_8021Q) += tag_ocelot_8021q.o
++obj-$(CONFIG_NET_DSA_TAG_OOB) += tag_oob.o
+ obj-$(CONFIG_NET_DSA_TAG_QCA) += tag_qca.o
+ obj-$(CONFIG_NET_DSA_TAG_RTL4_A) += tag_rtl4_a.o
+ obj-$(CONFIG_NET_DSA_TAG_RTL8_4) += tag_rtl8_4.o
+--- /dev/null
++++ b/net/dsa/tag_oob.c
+@@ -0,0 +1,49 @@
++// SPDX-License-Identifier: GPL-2.0-only
++
++/* Copyright (c) 2022, Maxime Chevallier <maxime.chevallier@bootlin.com> */
++
++#include <linux/bitfield.h>
++#include <linux/dsa/oob.h>
++#include <linux/skbuff.h>
++
++#include "dsa_priv.h"
++
++static struct sk_buff *oob_tag_xmit(struct sk_buff *skb,
++ struct net_device *dev)
++{
++ struct dsa_oob_tag_info *tag_info = skb_ext_add(skb, SKB_EXT_DSA_OOB);
++ struct dsa_port *dp = dsa_slave_to_port(dev);
++
++ tag_info->port = dp->index;
++
++ return skb;
++}
++
++static struct sk_buff *oob_tag_rcv(struct sk_buff *skb,
++ struct net_device *dev)
++{
++ struct dsa_oob_tag_info *tag_info = skb_ext_find(skb, SKB_EXT_DSA_OOB);
++
++ if (!tag_info)
++ return NULL;
++
++ skb->dev = dsa_master_find_slave(dev, 0, tag_info->port);
++ if (!skb->dev)
++ return NULL;
++
++ return skb;
++}
++
++static const struct dsa_device_ops oob_tag_dsa_ops = {
++ .name = "oob",
++ .proto = DSA_TAG_PROTO_OOB,
++ .xmit = oob_tag_xmit,
++ .rcv = oob_tag_rcv,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("DSA tag driver for out-of-band tagging");
++MODULE_AUTHOR("Maxime Chevallier <maxime.chevallier@bootlin.com>");
++MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_OOB);
++
++module_dsa_tag_driver(oob_tag_dsa_ops);
--- /dev/null
+From 4975e2b3f1d37bba04f262784cef0d5b7e0a30a4 Mon Sep 17 00:00:00 2001
+From: Maxime Chevallier <maxime.chevallier@bootlin.com>
+Date: Fri, 4 Nov 2022 18:41:50 +0100
+Subject: [PATCH] net: ipqess: Add out-of-band DSA tagging support
+
+On the IPQ4019, there's an 5 ports switch connected to the CPU through
+the IPQESS Ethernet controller. The way the DSA tag is sent-out to that
+switch is through the DMA descriptor, due to how tightly it is
+integrated with the switch.
+
+We use the out-of-band tagging protocol by getting the source
+port from the descriptor, push it into the skb extensions, and have the
+tagger pull it to infer the destination netdev. The reverse process is
+done on the TX side, where the driver pulls the tag from the skb and
+builds the descriptor accordingly.
+
+Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
+---
+ drivers/net/ethernet/qualcomm/Kconfig | 1 +
+ drivers/net/ethernet/qualcomm/ipqess/ipqess.c | 64 ++++++++++++++++++-
+ drivers/net/ethernet/qualcomm/ipqess/ipqess.h | 4 ++
+ 3 files changed, 68 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/qualcomm/Kconfig
++++ b/drivers/net/ethernet/qualcomm/Kconfig
+@@ -64,6 +64,7 @@ config QCOM_IPQ4019_ESS_EDMA
+ tristate "Qualcomm Atheros IPQ4019 ESS EDMA support"
+ depends on (OF && ARCH_QCOM) || COMPILE_TEST
+ select PHYLINK
++ select NET_DSA_TAG_OOB
+ help
+ This driver supports the Qualcomm Atheros IPQ40xx built-in
+ ESS EDMA ethernet controller.
+--- a/drivers/net/ethernet/qualcomm/ipqess/ipqess.c
++++ b/drivers/net/ethernet/qualcomm/ipqess/ipqess.c
+@@ -9,6 +9,7 @@
+
+ #include <linux/bitfield.h>
+ #include <linux/clk.h>
++#include <linux/dsa/oob.h>
+ #include <linux/if_vlan.h>
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
+@@ -22,6 +23,7 @@
+ #include <linux/skbuff.h>
+ #include <linux/vmalloc.h>
+ #include <net/checksum.h>
++#include <net/dsa.h>
+ #include <net/ip6_checksum.h>
+
+ #include "ipqess.h"
+@@ -327,6 +329,7 @@ static int ipqess_rx_poll(struct ipqess_
+ tail &= IPQESS_RFD_CONS_IDX_MASK;
+
+ while (done < budget) {
++ struct dsa_oob_tag_info *tag_info;
+ struct ipqess_rx_desc *rd;
+ struct sk_buff *skb;
+
+@@ -406,6 +409,12 @@ static int ipqess_rx_poll(struct ipqess_
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
+ le16_to_cpu(rd->rrd4));
+
++ if (likely(rx_ring->ess->dsa_ports)) {
++ tag_info = skb_ext_add(skb, SKB_EXT_DSA_OOB);
++ tag_info->port = FIELD_GET(IPQESS_RRD_PORT_ID_MASK,
++ le16_to_cpu(rd->rrd1));
++ }
++
+ napi_gro_receive(&rx_ring->napi_rx, skb);
+
+ rx_ring->ess->stats.rx_packets++;
+@@ -706,6 +715,23 @@ static void ipqess_rollback_tx(struct ip
+ tx_ring->head = start_index;
+ }
+
++static void ipqess_process_dsa_tag_sh(struct ipqess *ess, struct sk_buff *skb,
++ u32 *word3)
++{
++ struct dsa_oob_tag_info *tag_info;
++
++ if (unlikely(!ess->dsa_ports))
++ return;
++
++ tag_info = skb_ext_find(skb, SKB_EXT_DSA_OOB);
++ if (!tag_info)
++ return;
++
++ *word3 |= tag_info->port << IPQESS_TPD_PORT_BITMAP_SHIFT;
++ *word3 |= BIT(IPQESS_TPD_FROM_CPU_SHIFT);
++ *word3 |= 0x3e << IPQESS_TPD_PORT_BITMAP_SHIFT;
++}
++
+ static int ipqess_tx_map_and_fill(struct ipqess_tx_ring *tx_ring,
+ struct sk_buff *skb)
+ {
+@@ -716,6 +742,8 @@ static int ipqess_tx_map_and_fill(struct
+ u16 len;
+ int i;
+
++ ipqess_process_dsa_tag_sh(tx_ring->ess, skb, &word3);
++
+ if (skb_is_gso(skb)) {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
+ lso_word1 |= IPQESS_TPD_IPV4_EN;
+@@ -917,6 +945,33 @@ static const struct net_device_ops ipqes
+ .ndo_tx_timeout = ipqess_tx_timeout,
+ };
+
++static int ipqess_netdevice_event(struct notifier_block *nb,
++ unsigned long event, void *ptr)
++{
++ struct ipqess *ess = container_of(nb, struct ipqess, netdev_notifier);
++ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
++ struct netdev_notifier_changeupper_info *info;
++
++ if (dev != ess->netdev)
++ return NOTIFY_DONE;
++
++ switch (event) {
++ case NETDEV_CHANGEUPPER:
++ info = ptr;
++
++ if (!dsa_slave_dev_check(info->upper_dev))
++ return NOTIFY_DONE;
++
++ if (info->linking)
++ ess->dsa_ports++;
++ else
++ ess->dsa_ports--;
++
++ return NOTIFY_DONE;
++ }
++ return NOTIFY_OK;
++}
++
+ static void ipqess_hw_stop(struct ipqess *ess)
+ {
+ int i;
+@@ -1184,12 +1239,19 @@ static int ipqess_axi_probe(struct platf
+ netif_napi_add(netdev, &ess->rx_ring[i].napi_rx, ipqess_rx_napi);
+ }
+
+- err = register_netdev(netdev);
++ ess->netdev_notifier.notifier_call = ipqess_netdevice_event;
++ err = register_netdevice_notifier(&ess->netdev_notifier);
+ if (err)
+ goto err_hw_stop;
+
++ err = register_netdev(netdev);
++ if (err)
++ goto err_notifier_unregister;
++
+ return 0;
+
++err_notifier_unregister:
++ unregister_netdevice_notifier(&ess->netdev_notifier);
+ err_hw_stop:
+ ipqess_hw_stop(ess);
+
+--- a/drivers/net/ethernet/qualcomm/ipqess/ipqess.h
++++ b/drivers/net/ethernet/qualcomm/ipqess/ipqess.h
+@@ -171,6 +171,10 @@ struct ipqess {
+ struct platform_device *pdev;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
++
++ struct notifier_block netdev_notifier;
++ int dsa_ports;
++
+ struct ipqess_tx_ring tx_ring[IPQESS_NETDEV_QUEUES];
+
+ struct ipqess_statistics ipqess_stats;
+++ /dev/null
-From 44327d7098d4f32c24ec8c528e5aff6e030956bc Mon Sep 17 00:00:00 2001
-From: Robert Marko <robert.marko@sartura.hr>
-Date: Wed, 20 Oct 2021 13:21:45 +0200
-Subject: [PATCH] arm: dts: ipq4019: add ethernet controller DT node
-
-Since IPQ40xx SoC built-in ethernet controller now has a driver,
-add its DT node so it can be used.
-
-Signed-off-by: Robert Marko <robert.marko@sartura.hr>
----
- arch/arm/boot/dts/qcom-ipq4019.dtsi | 48 +++++++++++++++++++++++++++++
- 1 file changed, 48 insertions(+)
-
---- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
-+++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
-@@ -38,6 +38,7 @@
- spi1 = &blsp1_spi2;
- i2c0 = &blsp1_i2c3;
- i2c1 = &blsp1_i2c4;
-+ ethernet0 = &gmac;
- };
-
- cpus {
-@@ -589,6 +590,57 @@
- status = "disabled";
- };
-
-+ gmac: ethernet@c080000 {
-+ compatible = "qcom,ipq4019-ess-edma";
-+ reg = <0xc080000 0x8000>;
-+ resets = <&gcc ESS_RESET>;
-+ reset-names = "ess_rst";
-+ clocks = <&gcc GCC_ESS_CLK>;
-+ clock-names = "ess_clk";
-+ interrupts = <GIC_SPI 65 IRQ_TYPE_EDGE_RISING>,
-+ <GIC_SPI 66 IRQ_TYPE_EDGE_RISING>,
-+ <GIC_SPI 67 IRQ_TYPE_EDGE_RISING>,
-+ <GIC_SPI 68 IRQ_TYPE_EDGE_RISING>,
-+ <GIC_SPI 69 IRQ_TYPE_EDGE_RISING>,
-+ <GIC_SPI 70 IRQ_TYPE_EDGE_RISING>,
-+ <GIC_SPI 71 IRQ_TYPE_EDGE_RISING>,
-+ <GIC_SPI 72 IRQ_TYPE_EDGE_RISING>,
-+ <GIC_SPI 73 IRQ_TYPE_EDGE_RISING>,
-+ <GIC_SPI 74 IRQ_TYPE_EDGE_RISING>,
-+ <GIC_SPI 75 IRQ_TYPE_EDGE_RISING>,
-+ <GIC_SPI 76 IRQ_TYPE_EDGE_RISING>,
-+ <GIC_SPI 77 IRQ_TYPE_EDGE_RISING>,
-+ <GIC_SPI 78 IRQ_TYPE_EDGE_RISING>,
-+ <GIC_SPI 79 IRQ_TYPE_EDGE_RISING>,
-+ <GIC_SPI 80 IRQ_TYPE_EDGE_RISING>,
-+ <GIC_SPI 240 IRQ_TYPE_EDGE_RISING>,
-+ <GIC_SPI 241 IRQ_TYPE_EDGE_RISING>,
-+ <GIC_SPI 242 IRQ_TYPE_EDGE_RISING>,
-+ <GIC_SPI 243 IRQ_TYPE_EDGE_RISING>,
-+ <GIC_SPI 244 IRQ_TYPE_EDGE_RISING>,
-+ <GIC_SPI 245 IRQ_TYPE_EDGE_RISING>,
-+ <GIC_SPI 246 IRQ_TYPE_EDGE_RISING>,
-+ <GIC_SPI 247 IRQ_TYPE_EDGE_RISING>,
-+ <GIC_SPI 248 IRQ_TYPE_EDGE_RISING>,
-+ <GIC_SPI 249 IRQ_TYPE_EDGE_RISING>,
-+ <GIC_SPI 250 IRQ_TYPE_EDGE_RISING>,
-+ <GIC_SPI 251 IRQ_TYPE_EDGE_RISING>,
-+ <GIC_SPI 252 IRQ_TYPE_EDGE_RISING>,
-+ <GIC_SPI 253 IRQ_TYPE_EDGE_RISING>,
-+ <GIC_SPI 254 IRQ_TYPE_EDGE_RISING>,
-+ <GIC_SPI 255 IRQ_TYPE_EDGE_RISING>;
-+
-+ status = "disabled";
-+
-+ phy-mode = "internal";
-+ fixed-link {
-+ speed = <1000>;
-+ full-duplex;
-+ pause;
-+ asym-pause;
-+ };
-+ };
-+
- mdio: mdio@90000 {
- #address-cells = <1>;
- #size-cells = <0>;
--- /dev/null
+From 5f15f7f170c76220dfd36cb9037d7848d1fc4aaf Mon Sep 17 00:00:00 2001
+From: Robert Marko <robimarko@gmail.com>
+Date: Tue, 15 Aug 2023 14:30:50 +0200
+Subject: [PATCH] net: qualcomm: ipqess: release IRQ-s on network device stop
+
+Currently, IPQESS driver is obtaining the IRQ-s during ndo_open, but they
+are never freed as they are device managed.
+
+However, it is not enough for them to be released when device is removed
+as the same network device can be stopped and started multiple times which
+on the second start would lead to IRQ request to fail with -EBUSY as they
+have already been requested before and are not of the shared type with:
+[ 34.480769] ipqess-edma c080000.ethernet eth0: Link is Down
+[ 34.488070] ipqess-edma c080000.ethernet eth0: ipqess_open
+[ 34.488131] genirq: Flags mismatch irq 37. 00000001 (c080000.ethernet:txq0) vs. 00000001 (c080000.ethernet:txq0)
+[ 34.494527] ipqess-edma c080000.ethernet eth0: ipqess_open
+[ 34.502892] genirq: Flags mismatch irq 37. 00000001 (c080000.ethernet:txq0) vs. 00000001 (c080000.ethernet:txq0)
+[ 34.508137] qca8k-ipq4019 c000000.switch lan1: failed to open master eth0
+[ 34.518966] br-lan: port 1(lan1) entered blocking state
+[ 34.525165] br-lan: port 1(lan1) entered disabled state
+[ 34.530633] device lan1 entered promiscuous mode
+[ 34.548598] ipqess-edma c080000.ethernet eth0: ipqess_open
+[ 34.548660] genirq: Flags mismatch irq 37. 00000001 (c080000.ethernet:txq0) vs. 00000001 (c080000.ethernet:txq0)
+[ 34.553111] qca8k-ipq4019 c000000.switch lan2: failed to open master eth0
+[ 34.563841] br-lan: port 2(lan2) entered blocking state
+[ 34.570083] br-lan: port 2(lan2) entered disabled state
+[ 34.575530] device lan2 entered promiscuous mode
+[ 34.587067] ipqess-edma c080000.ethernet eth0: ipqess_open
+[ 34.587132] genirq: Flags mismatch irq 37. 00000001 (c080000.ethernet:txq0) vs. 00000001 (c080000.ethernet:txq0)
+[ 34.591579] qca8k-ipq4019 c000000.switch lan3: failed to open master eth0
+[ 34.602451] br-lan: port 3(lan3) entered blocking state
+[ 34.608496] br-lan: port 3(lan3) entered disabled state
+[ 34.614084] device lan3 entered promiscuous mode
+[ 34.626405] ipqess-edma c080000.ethernet eth0: ipqess_open
+[ 34.626468] genirq: Flags mismatch irq 37. 00000001 (c080000.ethernet:txq0) vs. 00000001 (c080000.ethernet:txq0)
+[ 34.630871] qca8k-ipq4019 c000000.switch lan4: failed to open master eth0
+[ 34.641689] br-lan: port 4(lan4) entered blocking state
+[ 34.647834] br-lan: port 4(lan4) entered disabled state
+[ 34.653455] device lan4 entered promiscuous mode
+[ 34.667282] ipqess-edma c080000.ethernet eth0: ipqess_open
+[ 34.667364] genirq: Flags mismatch irq 37. 00000001 (c080000.ethernet:txq0) vs. 00000001 (c080000.ethernet:txq0)
+[ 34.671830] qca8k-ipq4019 c000000.switch wan: failed to open master eth0
+
+So, lets free the IRQ-s on ndo_stop after stopping NAPI and HW IRQ-s.
+
+Signed-off-by: Robert Marko <robimarko@gmail.com>
+---
+ drivers/net/ethernet/qualcomm/ipqess/ipqess.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/drivers/net/ethernet/qualcomm/ipqess/ipqess.c
++++ b/drivers/net/ethernet/qualcomm/ipqess/ipqess.c
+@@ -636,9 +636,22 @@ static int ipqess_stop(struct net_device
+ netif_tx_stop_all_queues(netdev);
+ phylink_stop(ess->phylink);
+ ipqess_irq_disable(ess);
++
+ for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
++ int qid;
++
+ napi_disable(&ess->tx_ring[i].napi_tx);
+ napi_disable(&ess->rx_ring[i].napi_rx);
++
++ qid = ess->tx_ring[i].idx;
++ devm_free_irq(&netdev->dev,
++ ess->tx_irq[qid],
++ &ess->tx_ring[i]);
++
++ qid = ess->rx_ring[i].idx;
++ devm_free_irq(&netdev->dev,
++ ess->rx_irq[qid],
++ &ess->rx_ring[i]);
+ }
+
+ return 0;
--- /dev/null
+From 9fa4a57a65e270e4d579cace4de5c438f46c7d12 Mon Sep 17 00:00:00 2001
+From: Robert Marko <robimarko@gmail.com>
+Date: Tue, 15 Aug 2023 14:38:44 +0200
+Subject: [PATCH] net: qualcomm: ipqess: enable threaded NAPI by default
+
+Threaded NAPI provides a nice performance boost, so lets enable it by
+default.
+
+We do however need to move the __napi_schedule() after HW IRQ has been
+cleared in order to avoid concurency issues.
+
+Signed-off-by: Robert Marko <robimarko@gmail.com>
+---
+ drivers/net/ethernet/qualcomm/ipqess/ipqess.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/qualcomm/ipqess/ipqess.c
++++ b/drivers/net/ethernet/qualcomm/ipqess/ipqess.c
+@@ -530,9 +530,9 @@ static irqreturn_t ipqess_interrupt_tx(i
+ struct ipqess_tx_ring *tx_ring = (struct ipqess_tx_ring *)priv;
+
+ if (likely(napi_schedule_prep(&tx_ring->napi_tx))) {
+- __napi_schedule(&tx_ring->napi_tx);
+ ipqess_w32(tx_ring->ess, IPQESS_REG_TX_INT_MASK_Q(tx_ring->idx),
+ 0x0);
++ __napi_schedule(&tx_ring->napi_tx);
+ }
+
+ return IRQ_HANDLED;
+@@ -543,9 +543,9 @@ static irqreturn_t ipqess_interrupt_rx(i
+ struct ipqess_rx_ring *rx_ring = (struct ipqess_rx_ring *)priv;
+
+ if (likely(napi_schedule_prep(&rx_ring->napi_rx))) {
+- __napi_schedule(&rx_ring->napi_rx);
+ ipqess_w32(rx_ring->ess, IPQESS_REG_RX_INT_MASK_Q(rx_ring->idx),
+ 0x0);
++ __napi_schedule(&rx_ring->napi_rx);
+ }
+
+ return IRQ_HANDLED;
+@@ -1261,6 +1261,8 @@ static int ipqess_axi_probe(struct platf
+ if (err)
+ goto err_notifier_unregister;
+
++ dev_set_threaded(netdev, true);
++
+ return 0;
+
+ err_notifier_unregister:
--- /dev/null
+From 5b71dbb867680887d47954ce1cc145cb747cbce6 Mon Sep 17 00:00:00 2001
+From: Maxime Chevallier <maxime.chevallier@bootlin.com>
+Date: Fri, 4 Nov 2022 18:41:51 +0100
+Subject: [PATCH] ARM: dts: qcom: ipq4019: Add description for the IPQESS
+ Ethernet controller
+
+The Qualcomm IPQ4019 includes an internal 5 ports switch, which is
+connected to the CPU through the internal IPQESS Ethernet controller.
+
+Add support for this internal interface, which is internally connected to a
+modified version of the QCA8K Ethernet switch.
+
+This Ethernet controller only support a specific internal interface mode
+for connection to the switch.
+
+Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
+Reviewed-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+---
+ arch/arm/boot/dts/qcom-ipq4019.dtsi | 48 +++++++++++++++++++++++++++++
+ 1 file changed, 48 insertions(+)
+
+--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+@@ -591,6 +591,54 @@
+ status = "disabled";
+ };
+
++ gmac: ethernet@c080000 {
++ compatible = "qcom,ipq4019-ess-edma";
++ reg = <0xc080000 0x8000>;
++ resets = <&gcc ESS_RESET>;
++ reset-names = "ess";
++ clocks = <&gcc GCC_ESS_CLK>;
++ clock-names = "ess";
++ interrupts = <GIC_SPI 65 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 66 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 67 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 68 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 69 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 70 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 71 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 72 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 73 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 74 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 75 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 76 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 77 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 78 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 79 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 80 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 240 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 241 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 242 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 243 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 244 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 245 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 246 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 247 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 248 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 249 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 250 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 251 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 252 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 253 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 254 IRQ_TYPE_EDGE_RISING>,
++ <GIC_SPI 255 IRQ_TYPE_EDGE_RISING>;
++ phy-mode = "internal";
++ status = "disabled";
++ fixed-link {
++ speed = <1000>;
++ full-duplex;
++ pause;
++ };
++ };
++
+ mdio: mdio@90000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+++ /dev/null
-From 8cbdb2526c3d7ba2e0c8c771773595f195135f54 Mon Sep 17 00:00:00 2001
-From: Robert Marko <robert.marko@sartura.hr>
-Date: Mon, 1 Nov 2021 18:15:04 +0100
-Subject: [PATCH] arm: dts: ipq4019: add switch node
-
-Since the built-in IPQ40xx switch now has a driver, add the required node
-for it to work.
-
-Signed-off-by: Robert Marko <robert.marko@sartura.hr>
----
- arch/arm/boot/dts/qcom-ipq4019.dtsi | 76 +++++++++++++++++++++++++++++
- 1 file changed, 76 insertions(+)
-
---- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
-+++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
-@@ -591,6 +591,82 @@
- status = "disabled";
- };
-
-+ switch: switch@c000000 {
-+ compatible = "qca,ipq4019-qca8337n";
-+ reg = <0xc000000 0x80000>, <0x98000 0x800>;
-+ reg-names = "base", "psgmii_phy";
-+ resets = <&gcc ESS_PSGMII_ARES>;
-+ reset-names = "psgmii_rst";
-+ mdio = <&mdio>;
-+ psgmii-ethphy = <&psgmiiphy>;
-+
-+ status = "disabled";
-+
-+ ports {
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+
-+ port@0 { /* MAC0 */
-+ reg = <0>;
-+ label = "cpu";
-+ ethernet = <&gmac>;
-+ phy-mode = "internal";
-+
-+ fixed-link {
-+ speed = <1000>;
-+ full-duplex;
-+ pause;
-+ asym-pause;
-+ };
-+ };
-+
-+ swport1: port@1 { /* MAC1 */
-+ reg = <1>;
-+ label = "lan1";
-+ phy-handle = <ðphy0>;
-+ phy-mode = "psgmii";
-+
-+ status = "disabled";
-+ };
-+
-+ swport2: port@2 { /* MAC2 */
-+ reg = <2>;
-+ label = "lan2";
-+ phy-handle = <ðphy1>;
-+ phy-mode = "psgmii";
-+
-+ status = "disabled";
-+ };
-+
-+ swport3: port@3 { /* MAC3 */
-+ reg = <3>;
-+ label = "lan3";
-+ phy-handle = <ðphy2>;
-+ phy-mode = "psgmii";
-+
-+ status = "disabled";
-+ };
-+
-+ swport4: port@4 { /* MAC4 */
-+ reg = <4>;
-+ label = "lan4";
-+ phy-handle = <ðphy3>;
-+ phy-mode = "psgmii";
-+
-+ status = "disabled";
-+ };
-+
-+ swport5: port@5 { /* MAC5 */
-+ reg = <5>;
-+ label = "wan";
-+ phy-handle = <ðphy4>;
-+ phy-mode = "psgmii";
-+
-+ status = "disabled";
-+ };
-+ };
-+ };
-+
- gmac: ethernet@c080000 {
- compatible = "qcom,ipq4019-ess-edma";
- reg = <0xc080000 0x8000>;
--- /dev/null
+From a38126870488398932e017dd9d76174b4aadbbbb Mon Sep 17 00:00:00 2001
+From: Robert Marko <robert.marko@sartura.hr>
+Date: Sat, 10 Sep 2022 15:46:09 +0200
+Subject: [PATCH] net: dsa: qca8k: add IPQ4019 built-in switch support
+
+Qualcomm IPQ40xx SoC-s have a variant of QCA8337N switch built-in.
+
+It shares most of the stuff with its external counterpart, however it is
+modified for the SoC.
+Namely, it doesn't have second CPU port (Port 6), so it has 6 ports
+instead of 7.
+It also has no built-in PHY-s but rather requires external PSGMII based
+companion PHY-s (QCA8072 and QCA8075) for which it first needs to carry
+out calibration before using them.
+PSGMII has a SoC built-in PHY that is used to connect to the PHY-s which
+unfortunately requires some magic values as the datasheet doesnt document
+the bits that are being set or the register at all.
+
+Since its built-in it is MMIO like other peripherals and doesn't have its
+own MDIO bus but depends on the SoC provided one.
+
+CPU connection is at Port 0 and it uses some kind of a internal connection
+and no traditional RGMII/SGMII.
+
+It also doesn't use in-band tagging like other qca8k switches so a out of
+band based tagger is used.
+
+Signed-off-by: Robert Marko <robert.marko@sartura.hr>
+---
+ drivers/net/dsa/qca/Kconfig | 8 +
+ drivers/net/dsa/qca/Makefile | 1 +
+ drivers/net/dsa/qca/qca8k-common.c | 6 +-
+ drivers/net/dsa/qca/qca8k-ipq4019.c | 948 ++++++++++++++++++++++++++++
+ drivers/net/dsa/qca/qca8k.h | 56 ++
+ 5 files changed, 1016 insertions(+), 3 deletions(-)
+ create mode 100644 drivers/net/dsa/qca/qca8k-ipq4019.c
+
+--- a/drivers/net/dsa/qca/Kconfig
++++ b/drivers/net/dsa/qca/Kconfig
+@@ -23,3 +23,11 @@ config NET_DSA_QCA8K_LEDS_SUPPORT
+ help
+ This enabled support for LEDs present on the Qualcomm Atheros
+ QCA8K Ethernet switch chips.
++
++config NET_DSA_QCA8K_IPQ4019
++ tristate "Qualcomm Atheros IPQ4019 Ethernet switch support"
++ select NET_DSA_TAG_OOB
++ select REGMAP_MMIO
++ help
++ This enables support for the switch built-into Qualcomm Atheros
++ IPQ4019 SoCs.
+--- a/drivers/net/dsa/qca/Makefile
++++ b/drivers/net/dsa/qca/Makefile
+@@ -5,3 +5,4 @@ qca8k-y += qca8k-common.o qca8k-8xxx.
+ ifdef CONFIG_NET_DSA_QCA8K_LEDS_SUPPORT
+ qca8k-y += qca8k-leds.o
+ endif
++obj-$(CONFIG_NET_DSA_QCA8K_IPQ4019) += qca8k-ipq4019.o qca8k-common.o
+--- a/drivers/net/dsa/qca/qca8k-common.c
++++ b/drivers/net/dsa/qca/qca8k-common.c
+@@ -449,7 +449,7 @@ static int qca8k_vlan_del(struct qca8k_p
+
+ /* Check if we're the last member to be removed */
+ del = true;
+- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
++ for (i = 0; i < priv->ds->num_ports; i++) {
+ mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
+
+ if ((reg & mask) != mask) {
+@@ -642,7 +642,7 @@ int qca8k_port_bridge_join(struct dsa_sw
+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+ port_mask = BIT(cpu_port);
+
+- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
++ for (i = 0; i < ds->num_ports; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ continue;
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
+@@ -674,7 +674,7 @@ void qca8k_port_bridge_leave(struct dsa_
+
+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+
+- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
++ for (i = 0; i < ds->num_ports; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ continue;
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
+--- /dev/null
++++ b/drivers/net/dsa/qca/qca8k-ipq4019.c
+@@ -0,0 +1,948 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
++ * Copyright (C) 2011-2012, 2020-2021 Gabor Juhos <juhosg@openwrt.org>
++ * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2016 John Crispin <john@phrozen.org>
++ * Copyright (c) 2022 Robert Marko <robert.marko@sartura.hr>
++ */
++
++#include <linux/module.h>
++#include <linux/phy.h>
++#include <linux/netdevice.h>
++#include <linux/bitfield.h>
++#include <linux/regmap.h>
++#include <net/dsa.h>
++#include <linux/of_net.h>
++#include <linux/of_mdio.h>
++#include <linux/of_platform.h>
++#include <linux/mdio.h>
++#include <linux/phylink.h>
++
++#include "qca8k.h"
++
++static struct regmap_config qca8k_ipq4019_regmap_config = {
++ .reg_bits = 32,
++ .val_bits = 32,
++ .reg_stride = 4,
++ .max_register = 0x16ac, /* end MIB - Port6 range */
++ .rd_table = &qca8k_readable_table,
++};
++
++static struct regmap_config qca8k_ipq4019_psgmii_phy_regmap_config = {
++ .name = "psgmii-phy",
++ .reg_bits = 32,
++ .val_bits = 32,
++ .reg_stride = 4,
++ .max_register = 0x7fc,
++};
++
++static enum dsa_tag_protocol
++qca8k_ipq4019_get_tag_protocol(struct dsa_switch *ds, int port,
++ enum dsa_tag_protocol mp)
++{
++ return DSA_TAG_PROTO_OOB;
++}
++
++static struct phylink_pcs *
++qca8k_ipq4019_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
++ phy_interface_t interface)
++{
++ struct qca8k_priv *priv = ds->priv;
++ struct phylink_pcs *pcs = NULL;
++
++ switch (interface) {
++ case PHY_INTERFACE_MODE_PSGMII:
++ switch (port) {
++ case 0:
++ pcs = &priv->pcs_port_0.pcs;
++ break;
++ }
++ break;
++ default:
++ break;
++ }
++
++ return pcs;
++}
++
++static int qca8k_ipq4019_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
++ phy_interface_t interface,
++ const unsigned long *advertising,
++ bool permit_pause_to_mac)
++{
++ return 0;
++}
++
++static void qca8k_ipq4019_pcs_an_restart(struct phylink_pcs *pcs)
++{
++}
++
++static struct qca8k_pcs *pcs_to_qca8k_pcs(struct phylink_pcs *pcs)
++{
++ return container_of(pcs, struct qca8k_pcs, pcs);
++}
++
++static void qca8k_ipq4019_pcs_get_state(struct phylink_pcs *pcs,
++ struct phylink_link_state *state)
++{
++ struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
++ int port = pcs_to_qca8k_pcs(pcs)->port;
++ u32 reg;
++ int ret;
++
++ ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), ®);
++ if (ret < 0) {
++ state->link = false;
++ return;
++ }
++
++ state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
++ state->an_complete = state->link;
++ state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
++ DUPLEX_HALF;
++
++ switch (reg & QCA8K_PORT_STATUS_SPEED) {
++ case QCA8K_PORT_STATUS_SPEED_10:
++ state->speed = SPEED_10;
++ break;
++ case QCA8K_PORT_STATUS_SPEED_100:
++ state->speed = SPEED_100;
++ break;
++ case QCA8K_PORT_STATUS_SPEED_1000:
++ state->speed = SPEED_1000;
++ break;
++ default:
++ state->speed = SPEED_UNKNOWN;
++ break;
++ }
++
++ if (reg & QCA8K_PORT_STATUS_RXFLOW)
++ state->pause |= MLO_PAUSE_RX;
++ if (reg & QCA8K_PORT_STATUS_TXFLOW)
++ state->pause |= MLO_PAUSE_TX;
++}
++
++static const struct phylink_pcs_ops qca8k_pcs_ops = {
++ .pcs_get_state = qca8k_ipq4019_pcs_get_state,
++ .pcs_config = qca8k_ipq4019_pcs_config,
++ .pcs_an_restart = qca8k_ipq4019_pcs_an_restart,
++};
++
++static void qca8k_ipq4019_setup_pcs(struct qca8k_priv *priv,
++ struct qca8k_pcs *qpcs,
++ int port)
++{
++ qpcs->pcs.ops = &qca8k_pcs_ops;
++
++ /* We don't have interrupts for link changes, so we need to poll */
++ qpcs->pcs.poll = true;
++ qpcs->priv = priv;
++ qpcs->port = port;
++}
++
++static void qca8k_ipq4019_phylink_get_caps(struct dsa_switch *ds, int port,
++ struct phylink_config *config)
++{
++ switch (port) {
++ case 0: /* CPU port */
++ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
++ config->supported_interfaces);
++ break;
++
++ case 1:
++ case 2:
++ case 3:
++ __set_bit(PHY_INTERFACE_MODE_PSGMII,
++ config->supported_interfaces);
++ break;
++ case 4:
++ case 5:
++ phy_interface_set_rgmii(config->supported_interfaces);
++ __set_bit(PHY_INTERFACE_MODE_PSGMII,
++ config->supported_interfaces);
++ break;
++ }
++
++ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
++ MAC_10 | MAC_100 | MAC_1000FD;
++
++ config->legacy_pre_march2020 = false;
++}
++
++static void
++qca8k_phylink_ipq4019_mac_link_down(struct dsa_switch *ds, int port,
++ unsigned int mode,
++ phy_interface_t interface)
++{
++ struct qca8k_priv *priv = ds->priv;
++
++ qca8k_port_set_status(priv, port, 0);
++}
++
++static void
++qca8k_phylink_ipq4019_mac_link_up(struct dsa_switch *ds, int port,
++ unsigned int mode, phy_interface_t interface,
++ struct phy_device *phydev, int speed,
++ int duplex, bool tx_pause, bool rx_pause)
++{
++ struct qca8k_priv *priv = ds->priv;
++ u32 reg;
++
++ if (phylink_autoneg_inband(mode)) {
++ reg = QCA8K_PORT_STATUS_LINK_AUTO;
++ } else {
++ switch (speed) {
++ case SPEED_10:
++ reg = QCA8K_PORT_STATUS_SPEED_10;
++ break;
++ case SPEED_100:
++ reg = QCA8K_PORT_STATUS_SPEED_100;
++ break;
++ case SPEED_1000:
++ reg = QCA8K_PORT_STATUS_SPEED_1000;
++ break;
++ default:
++ reg = QCA8K_PORT_STATUS_LINK_AUTO;
++ break;
++ }
++
++ if (duplex == DUPLEX_FULL)
++ reg |= QCA8K_PORT_STATUS_DUPLEX;
++
++ if (rx_pause || dsa_is_cpu_port(ds, port))
++ reg |= QCA8K_PORT_STATUS_RXFLOW;
++
++ if (tx_pause || dsa_is_cpu_port(ds, port))
++ reg |= QCA8K_PORT_STATUS_TXFLOW;
++ }
++
++ reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
++
++ qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
++}
++
++static int psgmii_vco_calibrate(struct qca8k_priv *priv)
++{
++ int val, ret;
++
++ if (!priv->psgmii_ethphy) {
++ dev_err(priv->dev, "PSGMII eth PHY missing, calibration failed!\n");
++ return -ENODEV;
++ }
++
++ /* Fix PSGMII RX 20bit */
++ ret = phy_write(priv->psgmii_ethphy, MII_BMCR, 0x5b);
++ /* Reset PHY PSGMII */
++ ret = phy_write(priv->psgmii_ethphy, MII_BMCR, 0x1b);
++ /* Release PHY PSGMII reset */
++ ret = phy_write(priv->psgmii_ethphy, MII_BMCR, 0x5b);
++
++ /* Poll for VCO PLL calibration finish - Malibu(QCA8075) */
++ ret = phy_read_mmd_poll_timeout(priv->psgmii_ethphy,
++ MDIO_MMD_PMAPMD,
++ 0x28, val,
++ (val & BIT(0)),
++ 10000, 1000000,
++ false);
++ if (ret) {
++ dev_err(priv->dev, "QCA807x PSGMII VCO calibration PLL not ready\n");
++ return ret;
++ }
++ mdelay(50);
++
++ /* Freeze PSGMII RX CDR */
++ ret = phy_write(priv->psgmii_ethphy, MII_RESV2, 0x2230);
++
++ /* Start PSGMIIPHY VCO PLL calibration */
++ ret = regmap_set_bits(priv->psgmii,
++ PSGMIIPHY_VCO_CALIBRATION_CONTROL_REGISTER_1,
++ PSGMIIPHY_REG_PLL_VCO_CALIB_RESTART);
++
++ /* Poll for PSGMIIPHY PLL calibration finish - Dakota(IPQ40xx) */
++ ret = regmap_read_poll_timeout(priv->psgmii,
++ PSGMIIPHY_VCO_CALIBRATION_CONTROL_REGISTER_2,
++ val, val & PSGMIIPHY_REG_PLL_VCO_CALIB_READY,
++ 10000, 1000000);
++ if (ret) {
++ dev_err(priv->dev, "IPQ PSGMIIPHY VCO calibration PLL not ready\n");
++ return ret;
++ }
++ mdelay(50);
++
++ /* Release PSGMII RX CDR */
++ ret = phy_write(priv->psgmii_ethphy, MII_RESV2, 0x3230);
++ /* Release PSGMII RX 20bit */
++ ret = phy_write(priv->psgmii_ethphy, MII_BMCR, 0x5f);
++ mdelay(200);
++
++ return ret;
++}
++
++static void
++qca8k_switch_port_loopback_on_off(struct qca8k_priv *priv, int port, int on)
++{
++ u32 val = QCA8K_PORT_LOOKUP_LOOPBACK_EN;
++
++ if (on == 0)
++ val = 0;
++
++ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
++ QCA8K_PORT_LOOKUP_LOOPBACK_EN, val);
++}
++
++static int
++qca8k_wait_for_phy_link_state(struct phy_device *phy, int need_status)
++{
++ int a;
++ u16 status;
++
++ for (a = 0; a < 100; a++) {
++ status = phy_read(phy, MII_QCA8075_SSTATUS);
++ status &= QCA8075_PHY_SPEC_STATUS_LINK;
++ status = !!status;
++ if (status == need_status)
++ return 0;
++ mdelay(8);
++ }
++
++ return -1;
++}
++
++static void
++qca8k_phy_loopback_on_off(struct qca8k_priv *priv, struct phy_device *phy,
++ int sw_port, int on)
++{
++ if (on) {
++ phy_write(phy, MII_BMCR, BMCR_ANENABLE | BMCR_RESET);
++ phy_modify(phy, MII_BMCR, BMCR_PDOWN, BMCR_PDOWN);
++ qca8k_wait_for_phy_link_state(phy, 0);
++ qca8k_write(priv, QCA8K_REG_PORT_STATUS(sw_port), 0);
++ phy_write(phy, MII_BMCR,
++ BMCR_SPEED1000 |
++ BMCR_FULLDPLX |
++ BMCR_LOOPBACK);
++ qca8k_wait_for_phy_link_state(phy, 1);
++ qca8k_write(priv, QCA8K_REG_PORT_STATUS(sw_port),
++ QCA8K_PORT_STATUS_SPEED_1000 |
++ QCA8K_PORT_STATUS_TXMAC |
++ QCA8K_PORT_STATUS_RXMAC |
++ QCA8K_PORT_STATUS_DUPLEX);
++ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(sw_port),
++ QCA8K_PORT_LOOKUP_STATE_FORWARD,
++ QCA8K_PORT_LOOKUP_STATE_FORWARD);
++ } else { /* off */
++ qca8k_write(priv, QCA8K_REG_PORT_STATUS(sw_port), 0);
++ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(sw_port),
++ QCA8K_PORT_LOOKUP_STATE_DISABLED,
++ QCA8K_PORT_LOOKUP_STATE_DISABLED);
++ phy_write(phy, MII_BMCR, BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_RESET);
++ /* turn off the power of the phys - so that unused
++ ports do not raise links */
++ phy_modify(phy, MII_BMCR, BMCR_PDOWN, BMCR_PDOWN);
++ }
++}
++
++static void
++qca8k_phy_pkt_gen_prep(struct qca8k_priv *priv, struct phy_device *phy,
++ int pkts_num, int on)
++{
++ if (on) {
++ /* enable CRC checker and packets counters */
++ phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_CRC_AND_PKTS_COUNT, 0);
++ phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_CRC_AND_PKTS_COUNT,
++ QCA8075_MMD7_CNT_FRAME_CHK_EN | QCA8075_MMD7_CNT_SELFCLR);
++ qca8k_wait_for_phy_link_state(phy, 1);
++ /* packet number */
++ phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_PKT_GEN_PKT_NUMB, pkts_num);
++ /* pkt size - 1504 bytes + 20 bytes */
++ phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_PKT_GEN_PKT_SIZE, 1504);
++ } else { /* off */
++ /* packet number */
++ phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_PKT_GEN_PKT_NUMB, 0);
++ /* disable CRC checker and packet counter */
++ phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_CRC_AND_PKTS_COUNT, 0);
++ /* disable traffic gen */
++ phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_PKT_GEN_CTRL, 0);
++ }
++}
++
++static void
++qca8k_wait_for_phy_pkt_gen_fin(struct qca8k_priv *priv, struct phy_device *phy)
++{
++ int val;
++ /* wait for all traffic end: 4096(pkt num)*1524(size)*8ns(125MHz)=49938us */
++ phy_read_mmd_poll_timeout(phy, MDIO_MMD_AN, QCA8075_MMD7_PKT_GEN_CTRL,
++ val, !(val & QCA8075_MMD7_PKT_GEN_INPROGR),
++ 50000, 1000000, true);
++}
++
++static void
++qca8k_start_phy_pkt_gen(struct phy_device *phy)
++{
++ /* start traffic gen */
++ phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_PKT_GEN_CTRL,
++ QCA8075_MMD7_PKT_GEN_START | QCA8075_MMD7_PKT_GEN_INPROGR);
++}
++
++static int
++qca8k_start_all_phys_pkt_gens(struct qca8k_priv *priv)
++{
++ struct phy_device *phy;
++ phy = phy_device_create(priv->bus, QCA8075_MDIO_BRDCST_PHY_ADDR,
++ 0, 0, NULL);
++ if (!phy) {
++ dev_err(priv->dev, "unable to create mdio broadcast PHY(0x%x)\n",
++ QCA8075_MDIO_BRDCST_PHY_ADDR);
++ return -ENODEV;
++ }
++
++ qca8k_start_phy_pkt_gen(phy);
++
++ phy_device_free(phy);
++ return 0;
++}
++
++static int
++qca8k_get_phy_pkt_gen_test_result(struct phy_device *phy, int pkts_num)
++{
++ u32 tx_ok, tx_error;
++ u32 rx_ok, rx_error;
++ u32 tx_ok_high16;
++ u32 rx_ok_high16;
++ u32 tx_all_ok, rx_all_ok;
++
++ /* check counters */
++ tx_ok = phy_read_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_EG_FRAME_RECV_CNT_LO);
++ tx_ok_high16 = phy_read_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_EG_FRAME_RECV_CNT_HI);
++ tx_error = phy_read_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_EG_FRAME_ERR_CNT);
++ rx_ok = phy_read_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_IG_FRAME_RECV_CNT_LO);
++ rx_ok_high16 = phy_read_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_IG_FRAME_RECV_CNT_HI);
++ rx_error = phy_read_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_IG_FRAME_ERR_CNT);
++ tx_all_ok = tx_ok + (tx_ok_high16 << 16);
++ rx_all_ok = rx_ok + (rx_ok_high16 << 16);
++
++ if (tx_all_ok < pkts_num)
++ return -1;
++ if(rx_all_ok < pkts_num)
++ return -2;
++ if(tx_error)
++ return -3;
++ if(rx_error)
++ return -4;
++ return 0; /* test is ok */
++}
++
++static
++void qca8k_phy_broadcast_write_on_off(struct qca8k_priv *priv,
++ struct phy_device *phy, int on)
++{
++ u32 val;
++
++ val = phy_read_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_MDIO_BRDCST_WRITE);
++
++ if (on == 0)
++ val &= ~QCA8075_MMD7_MDIO_BRDCST_WRITE_EN;
++ else
++ val |= QCA8075_MMD7_MDIO_BRDCST_WRITE_EN;
++
++ phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_MDIO_BRDCST_WRITE, val);
++}
++
++static int
++qca8k_test_dsa_port_for_errors(struct qca8k_priv *priv, struct phy_device *phy,
++ int port, int test_phase)
++{
++ int res = 0;
++ const int test_pkts_num = QCA8075_PKT_GEN_PKTS_COUNT;
++
++ if (test_phase == 1) { /* start test preps */
++ qca8k_phy_loopback_on_off(priv, phy, port, 1);
++ qca8k_switch_port_loopback_on_off(priv, port, 1);
++ qca8k_phy_broadcast_write_on_off(priv, phy, 1);
++ qca8k_phy_pkt_gen_prep(priv, phy, test_pkts_num, 1);
++ } else if (test_phase == 2) {
++ /* wait for test results, collect it and cleanup */
++ qca8k_wait_for_phy_pkt_gen_fin(priv, phy);
++ res = qca8k_get_phy_pkt_gen_test_result(phy, test_pkts_num);
++ qca8k_phy_pkt_gen_prep(priv, phy, test_pkts_num, 0);
++ qca8k_phy_broadcast_write_on_off(priv, phy, 0);
++ qca8k_switch_port_loopback_on_off(priv, port, 0);
++ qca8k_phy_loopback_on_off(priv, phy, port, 0);
++ }
++
++ return res;
++}
++
++static int
++qca8k_do_dsa_sw_ports_self_test(struct qca8k_priv *priv, int parallel_test)
++{
++ struct device_node *dn = priv->dev->of_node;
++ struct device_node *ports, *port;
++ struct device_node *phy_dn;
++ struct phy_device *phy;
++ int reg, err = 0, test_phase;
++ u32 tests_result = 0;
++
++ ports = of_get_child_by_name(dn, "ports");
++ if (!ports) {
++ dev_err(priv->dev, "no ports child node found\n");
++ return -EINVAL;
++ }
++
++ for (test_phase = 1; test_phase <= 2; test_phase++) {
++ if (parallel_test && test_phase == 2) {
++ err = qca8k_start_all_phys_pkt_gens(priv);
++ if (err)
++ goto error;
++ }
++ for_each_available_child_of_node(ports, port) {
++ err = of_property_read_u32(port, "reg", ®);
++ if (err)
++ goto error;
++ if (reg >= QCA8K_NUM_PORTS) {
++ err = -EINVAL;
++ goto error;
++ }
++ phy_dn = of_parse_phandle(port, "phy-handle", 0);
++ if (phy_dn) {
++ phy = of_phy_find_device(phy_dn);
++ of_node_put(phy_dn);
++ if (phy) {
++ int result;
++ result = qca8k_test_dsa_port_for_errors(priv,
++ phy, reg, test_phase);
++ if (!parallel_test && test_phase == 1)
++ qca8k_start_phy_pkt_gen(phy);
++ put_device(&phy->mdio.dev);
++ if (test_phase == 2) {
++ tests_result <<= 1;
++ if (result)
++ tests_result |= 1;
++ }
++ }
++ }
++ }
++ }
++
++end:
++ of_node_put(ports);
++ qca8k_fdb_flush(priv);
++ return tests_result;
++error:
++ tests_result |= 0xf000;
++ goto end;
++}
++
++static int
++psgmii_vco_calibrate_and_test(struct dsa_switch *ds)
++{
++ int ret, a, test_result;
++ struct qca8k_priv *priv = ds->priv;
++
++ for (a = 0; a <= QCA8K_PSGMII_CALB_NUM; a++) {
++ ret = psgmii_vco_calibrate(priv);
++ if (ret)
++ return ret;
++ /* first we run serial test */
++ test_result = qca8k_do_dsa_sw_ports_self_test(priv, 0);
++ /* and if it is ok then we run the test in parallel */
++ if (!test_result)
++ test_result = qca8k_do_dsa_sw_ports_self_test(priv, 1);
++ if (!test_result) {
++ if (a > 0) {
++ dev_warn(priv->dev, "PSGMII work was stabilized after %d "
++ "calibration retries !\n", a);
++ }
++ return 0;
++ } else {
++ schedule();
++ if (a > 0 && a % 10 == 0) {
++ dev_err(priv->dev, "PSGMII work is unstable !!! "
++ "Let's try to wait a bit ... %d\n", a);
++ set_current_state(TASK_INTERRUPTIBLE);
++ schedule_timeout(msecs_to_jiffies(a * 100));
++ }
++ }
++ }
++
++ panic("PSGMII work is unstable !!! "
++ "Repeated recalibration attempts did not help(0x%x) !\n",
++ test_result);
++
++ return -EFAULT;
++}
++
++static int
++ipq4019_psgmii_configure(struct dsa_switch *ds)
++{
++ struct qca8k_priv *priv = ds->priv;
++ int ret;
++
++ if (!priv->psgmii_calibrated) {
++ dev_info(ds->dev, "PSGMII calibration!\n");
++ ret = psgmii_vco_calibrate_and_test(ds);
++
++ ret = regmap_clear_bits(priv->psgmii, PSGMIIPHY_MODE_CONTROL,
++ PSGMIIPHY_MODE_ATHR_CSCO_MODE_25M);
++ ret = regmap_write(priv->psgmii, PSGMIIPHY_TX_CONTROL,
++ PSGMIIPHY_TX_CONTROL_MAGIC_VALUE);
++
++ priv->psgmii_calibrated = true;
++
++ return ret;
++ }
++
++ return 0;
++}
++
++static void
++qca8k_phylink_ipq4019_mac_config(struct dsa_switch *ds, int port,
++ unsigned int mode,
++ const struct phylink_link_state *state)
++{
++ struct qca8k_priv *priv = ds->priv;
++
++ switch (port) {
++ case 0:
++ /* CPU port, no configuration needed */
++ return;
++ case 1:
++ case 2:
++ case 3:
++ if (state->interface == PHY_INTERFACE_MODE_PSGMII)
++ if (ipq4019_psgmii_configure(ds))
++ dev_err(ds->dev, "PSGMII configuration failed!\n");
++ return;
++ case 4:
++ case 5:
++ if (state->interface == PHY_INTERFACE_MODE_RGMII ||
++ state->interface == PHY_INTERFACE_MODE_RGMII_ID ||
++ state->interface == PHY_INTERFACE_MODE_RGMII_RXID ||
++ state->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
++ regmap_set_bits(priv->regmap,
++ QCA8K_IPQ4019_REG_RGMII_CTRL,
++ QCA8K_IPQ4019_RGMII_CTRL_CLK);
++ }
++
++ if (state->interface == PHY_INTERFACE_MODE_PSGMII)
++ if (ipq4019_psgmii_configure(ds))
++ dev_err(ds->dev, "PSGMII configuration failed!\n");
++ return;
++ default:
++ dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
++ return;
++ }
++}
++
++static int
++qca8k_ipq4019_setup_port(struct dsa_switch *ds, int port)
++{
++ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
++ int ret;
++
++ /* CPU port gets connected to all user ports of the switch */
++ if (dsa_is_cpu_port(ds, port)) {
++ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
++ QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
++ if (ret)
++ return ret;
++
++ /* Disable CPU ARP Auto-learning by default */
++ ret = regmap_clear_bits(priv->regmap,
++ QCA8K_PORT_LOOKUP_CTRL(port),
++ QCA8K_PORT_LOOKUP_LEARN);
++ if (ret)
++ return ret;
++ }
++
++ /* Individual user ports get connected to CPU port only */
++ if (dsa_is_user_port(ds, port)) {
++ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
++ QCA8K_PORT_LOOKUP_MEMBER,
++ BIT(QCA8K_IPQ4019_CPU_PORT));
++ if (ret)
++ return ret;
++
++ /* Enable ARP Auto-learning by default */
++ ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(port),
++ QCA8K_PORT_LOOKUP_LEARN);
++ if (ret)
++ return ret;
++
++ /* For port based vlans to work we need to set the
++ * default egress vid
++ */
++ ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
++ QCA8K_EGREES_VLAN_PORT_MASK(port),
++ QCA8K_EGREES_VLAN_PORT(port, QCA8K_PORT_VID_DEF));
++ if (ret)
++ return ret;
++
++ ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
++ QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
++ QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
++ if (ret)
++ return ret;
++ }
++
++ return 0;
++}
++
++static int
++qca8k_ipq4019_setup(struct dsa_switch *ds)
++{
++ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
++ int ret, i;
++
++ /* Make sure that port 0 is the cpu port */
++ if (!dsa_is_cpu_port(ds, QCA8K_IPQ4019_CPU_PORT)) {
++ dev_err(priv->dev, "port %d is not the CPU port",
++ QCA8K_IPQ4019_CPU_PORT);
++ return -EINVAL;
++ }
++
++ qca8k_ipq4019_setup_pcs(priv, &priv->pcs_port_0, 0);
++
++ /* Enable CPU Port */
++ ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
++ QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
++ if (ret) {
++ dev_err(priv->dev, "failed enabling CPU port");
++ return ret;
++ }
++
++ /* Enable MIB counters */
++ ret = qca8k_mib_init(priv);
++ if (ret)
++ dev_warn(priv->dev, "MIB init failed");
++
++ /* Disable forwarding by default on all ports */
++ for (i = 0; i < QCA8K_IPQ4019_NUM_PORTS; i++) {
++ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
++ QCA8K_PORT_LOOKUP_MEMBER, 0);
++ if (ret)
++ return ret;
++ }
++
++ /* Enable QCA header mode on the CPU port */
++ ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(QCA8K_IPQ4019_CPU_PORT),
++ FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
++ FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
++ if (ret) {
++ dev_err(priv->dev, "failed enabling QCA header mode");
++ return ret;
++ }
++
++ /* Disable MAC by default on all ports */
++ for (i = 0; i < QCA8K_IPQ4019_NUM_PORTS; i++) {
++ if (dsa_is_user_port(ds, i))
++ qca8k_port_set_status(priv, i, 0);
++ }
++
++ /* Forward all unknown frames to CPU port for Linux processing */
++ ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
++ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(QCA8K_IPQ4019_CPU_PORT)) |
++ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(QCA8K_IPQ4019_CPU_PORT)) |
++ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(QCA8K_IPQ4019_CPU_PORT)) |
++ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(QCA8K_IPQ4019_CPU_PORT)));
++ if (ret)
++ return ret;
++
++ /* Setup connection between CPU port & user ports */
++ for (i = 0; i < QCA8K_IPQ4019_NUM_PORTS; i++) {
++ ret = qca8k_ipq4019_setup_port(ds, i);
++ if (ret)
++ return ret;
++ }
++
++ /* Setup our port MTUs to match power on defaults */
++ ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
++ if (ret)
++ dev_warn(priv->dev, "failed setting MTU settings");
++
++ /* Flush the FDB table */
++ qca8k_fdb_flush(priv);
++
++ /* Set min a max ageing value supported */
++ ds->ageing_time_min = 7000;
++ ds->ageing_time_max = 458745000;
++
++ /* Set max number of LAGs supported */
++ ds->num_lag_ids = QCA8K_NUM_LAGS;
++
++ /* CPU port HW learning doesnt work correctly, so let DSA handle it */
++ ds->assisted_learning_on_cpu_port = true;
++
++ return 0;
++}
++
++static const struct dsa_switch_ops qca8k_ipq4019_switch_ops = {
++ .get_tag_protocol = qca8k_ipq4019_get_tag_protocol,
++ .setup = qca8k_ipq4019_setup,
++ .get_strings = qca8k_get_strings,
++ .get_ethtool_stats = qca8k_get_ethtool_stats,
++ .get_sset_count = qca8k_get_sset_count,
++ .set_ageing_time = qca8k_set_ageing_time,
++ .get_mac_eee = qca8k_get_mac_eee,
++ .set_mac_eee = qca8k_set_mac_eee,
++ .port_enable = qca8k_port_enable,
++ .port_disable = qca8k_port_disable,
++ .port_change_mtu = qca8k_port_change_mtu,
++ .port_max_mtu = qca8k_port_max_mtu,
++ .port_stp_state_set = qca8k_port_stp_state_set,
++ .port_bridge_join = qca8k_port_bridge_join,
++ .port_bridge_leave = qca8k_port_bridge_leave,
++ .port_fast_age = qca8k_port_fast_age,
++ .port_fdb_add = qca8k_port_fdb_add,
++ .port_fdb_del = qca8k_port_fdb_del,
++ .port_fdb_dump = qca8k_port_fdb_dump,
++ .port_mdb_add = qca8k_port_mdb_add,
++ .port_mdb_del = qca8k_port_mdb_del,
++ .port_mirror_add = qca8k_port_mirror_add,
++ .port_mirror_del = qca8k_port_mirror_del,
++ .port_vlan_filtering = qca8k_port_vlan_filtering,
++ .port_vlan_add = qca8k_port_vlan_add,
++ .port_vlan_del = qca8k_port_vlan_del,
++ .phylink_mac_select_pcs = qca8k_ipq4019_phylink_mac_select_pcs,
++ .phylink_get_caps = qca8k_ipq4019_phylink_get_caps,
++ .phylink_mac_config = qca8k_phylink_ipq4019_mac_config,
++ .phylink_mac_link_down = qca8k_phylink_ipq4019_mac_link_down,
++ .phylink_mac_link_up = qca8k_phylink_ipq4019_mac_link_up,
++ .port_lag_join = qca8k_port_lag_join,
++ .port_lag_leave = qca8k_port_lag_leave,
++};
++
++static const struct qca8k_match_data ipq4019 = {
++ .id = QCA8K_ID_IPQ4019,
++ .mib_count = QCA8K_QCA833X_MIB_COUNT,
++};
++
++static int
++qca8k_ipq4019_probe(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ struct qca8k_priv *priv;
++ void __iomem *base, *psgmii;
++ struct device_node *np = dev->of_node, *mdio_np, *psgmii_ethphy_np;
++ int ret;
++
++ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
++ if (!priv)
++ return -ENOMEM;
++
++ priv->dev = dev;
++ priv->info = &ipq4019;
++
++ /* Start by setting up the register mapping */
++ base = devm_platform_ioremap_resource_byname(pdev, "base");
++ if (IS_ERR(base))
++ return PTR_ERR(base);
++
++ priv->regmap = devm_regmap_init_mmio(dev, base,
++ &qca8k_ipq4019_regmap_config);
++ if (IS_ERR(priv->regmap)) {
++ ret = PTR_ERR(priv->regmap);
++ dev_err(dev, "base regmap initialization failed, %d\n", ret);
++ return ret;
++ }
++
++ psgmii = devm_platform_ioremap_resource_byname(pdev, "psgmii_phy");
++ if (IS_ERR(psgmii))
++ return PTR_ERR(psgmii);
++
++ priv->psgmii = devm_regmap_init_mmio(dev, psgmii,
++ &qca8k_ipq4019_psgmii_phy_regmap_config);
++ if (IS_ERR(priv->psgmii)) {
++ ret = PTR_ERR(priv->psgmii);
++ dev_err(dev, "PSGMII regmap initialization failed, %d\n", ret);
++ return ret;
++ }
++
++ mdio_np = of_parse_phandle(np, "mdio", 0);
++ if (!mdio_np) {
++ dev_err(dev, "unable to get MDIO bus phandle\n");
++ of_node_put(mdio_np);
++ return -EINVAL;
++ }
++
++ priv->bus = of_mdio_find_bus(mdio_np);
++ of_node_put(mdio_np);
++ if (!priv->bus) {
++ dev_err(dev, "unable to find MDIO bus\n");
++ return -EPROBE_DEFER;
++ }
++
++ psgmii_ethphy_np = of_parse_phandle(np, "psgmii-ethphy", 0);
++ if (!psgmii_ethphy_np) {
++ dev_dbg(dev, "unable to get PSGMII eth PHY phandle\n");
++ of_node_put(psgmii_ethphy_np);
++ }
++
++ if (psgmii_ethphy_np) {
++ priv->psgmii_ethphy = of_phy_find_device(psgmii_ethphy_np);
++ of_node_put(psgmii_ethphy_np);
++ if (!priv->psgmii_ethphy) {
++ dev_err(dev, "unable to get PSGMII eth PHY\n");
++ return -ENODEV;
++ }
++ }
++
++ /* Check the detected switch id */
++ ret = qca8k_read_switch_id(priv);
++ if (ret)
++ return ret;
++
++ priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
++ if (!priv->ds)
++ return -ENOMEM;
++
++ priv->ds->dev = dev;
++ priv->ds->num_ports = QCA8K_IPQ4019_NUM_PORTS;
++ priv->ds->priv = priv;
++ priv->ds->ops = &qca8k_ipq4019_switch_ops;
++ mutex_init(&priv->reg_mutex);
++ platform_set_drvdata(pdev, priv);
++
++ return dsa_register_switch(priv->ds);
++}
++
++static int
++qca8k_ipq4019_remove(struct platform_device *pdev)
++{
++ struct qca8k_priv *priv = dev_get_drvdata(&pdev->dev);
++ int i;
++
++ if (!priv)
++ return 0;
++
++ for (i = 0; i < QCA8K_IPQ4019_NUM_PORTS; i++)
++ qca8k_port_set_status(priv, i, 0);
++
++ dsa_unregister_switch(priv->ds);
++
++ platform_set_drvdata(pdev, NULL);
++
++ return 0;
++}
++
++static const struct of_device_id qca8k_ipq4019_of_match[] = {
++ { .compatible = "qca,ipq4019-qca8337n", },
++ { /* sentinel */ },
++};
++
++static struct platform_driver qca8k_ipq4019_driver = {
++ .probe = qca8k_ipq4019_probe,
++ .remove = qca8k_ipq4019_remove,
++ .driver = {
++ .name = "qca8k-ipq4019",
++ .of_match_table = qca8k_ipq4019_of_match,
++ },
++};
++
++module_platform_driver(qca8k_ipq4019_driver);
++
++MODULE_AUTHOR("Mathieu Olivari, John Crispin <john@phrozen.org>");
++MODULE_AUTHOR("Gabor Juhos <j4g8y7@gmail.com>, Robert Marko <robert.marko@sartura.hr>");
++MODULE_DESCRIPTION("Qualcomm IPQ4019 built-in switch driver");
++MODULE_LICENSE("GPL");
+--- a/drivers/net/dsa/qca/qca8k.h
++++ b/drivers/net/dsa/qca/qca8k.h
+@@ -19,7 +19,10 @@
+ #define QCA8K_ETHERNET_TIMEOUT 5
+
+ #define QCA8K_NUM_PORTS 7
++#define QCA8K_IPQ4019_NUM_PORTS 6
+ #define QCA8K_NUM_CPU_PORTS 2
++#define QCA8K_IPQ4019_NUM_CPU_PORTS 1
++#define QCA8K_IPQ4019_CPU_PORT 0
+ #define QCA8K_MAX_MTU 9000
+ #define QCA8K_NUM_LAGS 4
+ #define QCA8K_NUM_PORTS_FOR_LAG 4
+@@ -28,6 +31,7 @@
+ #define QCA8K_ID_QCA8327 0x12
+ #define PHY_ID_QCA8337 0x004dd036
+ #define QCA8K_ID_QCA8337 0x13
++#define QCA8K_ID_IPQ4019 0x14
+
+ #define QCA8K_QCA832X_MIB_COUNT 39
+ #define QCA8K_QCA833X_MIB_COUNT 41
+@@ -263,6 +267,7 @@
+ #define QCA8K_PORT_LOOKUP_STATE_LEARNING QCA8K_PORT_LOOKUP_STATE(0x3)
+ #define QCA8K_PORT_LOOKUP_STATE_FORWARD QCA8K_PORT_LOOKUP_STATE(0x4)
+ #define QCA8K_PORT_LOOKUP_LEARN BIT(20)
++#define QCA8K_PORT_LOOKUP_LOOPBACK_EN BIT(21)
+ #define QCA8K_PORT_LOOKUP_ING_MIRROR_EN BIT(25)
+
+ #define QCA8K_REG_GOL_TRUNK_CTRL0 0x700
+@@ -339,6 +344,53 @@
+ #define MII_ATH_MMD_ADDR 0x0d
+ #define MII_ATH_MMD_DATA 0x0e
+
++/* IPQ4019 PSGMII PHY registers */
++#define QCA8K_IPQ4019_REG_RGMII_CTRL 0x004
++#define QCA8K_IPQ4019_RGMII_CTRL_RGMII_RXC GENMASK(1, 0)
++#define QCA8K_IPQ4019_RGMII_CTRL_RGMII_TXC GENMASK(9, 8)
++/* Some kind of CLK selection
++ * 0: gcc_ess_dly2ns
++ * 1: gcc_ess_clk
++ */
++#define QCA8K_IPQ4019_RGMII_CTRL_CLK BIT(10)
++#define QCA8K_IPQ4019_RGMII_CTRL_DELAY_RMII0 GENMASK(17, 16)
++#define QCA8K_IPQ4019_RGMII_CTRL_INVERT_RMII0_REF_CLK BIT(18)
++#define QCA8K_IPQ4019_RGMII_CTRL_DELAY_RMII1 GENMASK(20, 19)
++#define QCA8K_IPQ4019_RGMII_CTRL_INVERT_RMII1_REF_CLK BIT(21)
++#define QCA8K_IPQ4019_RGMII_CTRL_INVERT_RMII0_MASTER_EN BIT(24)
++#define QCA8K_IPQ4019_RGMII_CTRL_INVERT_RMII1_MASTER_EN BIT(25)
++
++#define PSGMIIPHY_MODE_CONTROL 0x1b4
++#define PSGMIIPHY_MODE_ATHR_CSCO_MODE_25M BIT(0)
++#define PSGMIIPHY_TX_CONTROL 0x288
++#define PSGMIIPHY_TX_CONTROL_MAGIC_VALUE 0x8380
++#define PSGMIIPHY_VCO_CALIBRATION_CONTROL_REGISTER_1 0x9c
++#define PSGMIIPHY_REG_PLL_VCO_CALIB_RESTART BIT(14)
++#define PSGMIIPHY_VCO_CALIBRATION_CONTROL_REGISTER_2 0xa0
++#define PSGMIIPHY_REG_PLL_VCO_CALIB_READY BIT(0)
++
++#define QCA8K_PSGMII_CALB_NUM 100
++#define MII_QCA8075_SSTATUS 0x11
++#define QCA8075_PHY_SPEC_STATUS_LINK BIT(10)
++#define QCA8075_MMD7_CRC_AND_PKTS_COUNT 0x8029
++#define QCA8075_MMD7_PKT_GEN_PKT_NUMB 0x8021
++#define QCA8075_MMD7_PKT_GEN_PKT_SIZE 0x8062
++#define QCA8075_MMD7_PKT_GEN_CTRL 0x8020
++#define QCA8075_MMD7_CNT_SELFCLR BIT(1)
++#define QCA8075_MMD7_CNT_FRAME_CHK_EN BIT(0)
++#define QCA8075_MMD7_PKT_GEN_START BIT(13)
++#define QCA8075_MMD7_PKT_GEN_INPROGR BIT(15)
++#define QCA8075_MMD7_IG_FRAME_RECV_CNT_HI 0x802a
++#define QCA8075_MMD7_IG_FRAME_RECV_CNT_LO 0x802b
++#define QCA8075_MMD7_IG_FRAME_ERR_CNT 0x802c
++#define QCA8075_MMD7_EG_FRAME_RECV_CNT_HI 0x802d
++#define QCA8075_MMD7_EG_FRAME_RECV_CNT_LO 0x802e
++#define QCA8075_MMD7_EG_FRAME_ERR_CNT 0x802f
++#define QCA8075_MMD7_MDIO_BRDCST_WRITE 0x8028
++#define QCA8075_MMD7_MDIO_BRDCST_WRITE_EN BIT(15)
++#define QCA8075_MDIO_BRDCST_PHY_ADDR 0x1f
++#define QCA8075_PKT_GEN_PKTS_COUNT 4096
++
+ enum {
+ QCA8K_PORT_SPEED_10M = 0,
+ QCA8K_PORT_SPEED_100M = 1,
+@@ -467,6 +519,10 @@ struct qca8k_priv {
+ struct qca8k_pcs pcs_port_6;
+ const struct qca8k_match_data *info;
+ struct qca8k_led ports_led[QCA8K_LED_COUNT];
++ /* IPQ4019 specific */
++ struct regmap *psgmii;
++ struct phy_device *psgmii_ethphy;
++ bool psgmii_calibrated;
+ };
+
+ struct qca8k_mib_desc {
--- /dev/null
+From 19c507c3fe4a6fc60317dcae2c55de452aecb7d5 Mon Sep 17 00:00:00 2001
+From: Robert Marko <robert.marko@sartura.hr>
+Date: Mon, 1 Nov 2021 18:15:04 +0100
+Subject: [PATCH] arm: dts: ipq4019: add switch node
+
+Since the built-in IPQ40xx switch now has a driver, add the required node
+for it to work.
+
+Signed-off-by: Robert Marko <robert.marko@sartura.hr>
+---
+ arch/arm/boot/dts/qcom-ipq4019.dtsi | 76 +++++++++++++++++++++++++++++
+ 1 file changed, 76 insertions(+)
+
+--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+@@ -591,6 +591,82 @@
+ status = "disabled";
+ };
+
++ switch: switch@c000000 {
++ compatible = "qca,ipq4019-qca8337n";
++ reg = <0xc000000 0x80000>, <0x98000 0x800>;
++ reg-names = "base", "psgmii_phy";
++ resets = <&gcc ESS_PSGMII_ARES>;
++ reset-names = "psgmii_rst";
++ mdio = <&mdio>;
++ psgmii-ethphy = <&psgmiiphy>;
++
++ status = "disabled";
++
++ ports {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ port@0 { /* MAC0 */
++ reg = <0>;
++ label = "cpu";
++ ethernet = <&gmac>;
++ phy-mode = "internal";
++
++ fixed-link {
++ speed = <1000>;
++ full-duplex;
++ pause;
++ asym-pause;
++ };
++ };
++
++ swport1: port@1 { /* MAC1 */
++ reg = <1>;
++ label = "lan1";
++ phy-handle = <ðphy0>;
++ phy-mode = "psgmii";
++
++ status = "disabled";
++ };
++
++ swport2: port@2 { /* MAC2 */
++ reg = <2>;
++ label = "lan2";
++ phy-handle = <ðphy1>;
++ phy-mode = "psgmii";
++
++ status = "disabled";
++ };
++
++ swport3: port@3 { /* MAC3 */
++ reg = <3>;
++ label = "lan3";
++ phy-handle = <ðphy2>;
++ phy-mode = "psgmii";
++
++ status = "disabled";
++ };
++
++ swport4: port@4 { /* MAC4 */
++ reg = <4>;
++ label = "lan4";
++ phy-handle = <ðphy3>;
++ phy-mode = "psgmii";
++
++ status = "disabled";
++ };
++
++ swport5: port@5 { /* MAC5 */
++ reg = <5>;
++ label = "wan";
++ phy-handle = <ðphy4>;
++ phy-mode = "psgmii";
++
++ status = "disabled";
++ };
++ };
++ };
++
+ gmac: ethernet@c080000 {
+ compatible = "qcom,ipq4019-ess-edma";
+ reg = <0xc080000 0x8000>;
+++ /dev/null
-From dfba344da3657ee4d91e7ebd1217e93423d03624 Mon Sep 17 00:00:00 2001
-From: Robert Marko <robert.marko@sartura.hr>
-Date: Thu, 1 Oct 2020 15:05:35 +0200
-Subject: [PATCH] dt-bindings: net: add QCA807x PHY
-
-Add DT bindings for Qualcomm QCA807x PHY series.
-
-Signed-off-by: Robert Marko <robert.marko@sartura.hr>
----
- include/dt-bindings/net/qcom-qca807x.h | 45 ++++++++++++++++++++++++++
- 1 file changed, 45 insertions(+)
- create mode 100644 include/dt-bindings/net/qcom-qca807x.h
-
---- /dev/null
-+++ b/include/dt-bindings/net/qcom-qca807x.h
-@@ -0,0 +1,45 @@
-+/* SPDX-License-Identifier: GPL-2.0-or-later */
-+/*
-+ * Device Tree constants for the Qualcomm QCA807X PHYs
-+ */
-+
-+#ifndef _DT_BINDINGS_QCOM_QCA807X_H
-+#define _DT_BINDINGS_QCOM_QCA807X_H
-+
-+#define PSGMII_QSGMII_TX_DRIVER_140MV 0
-+#define PSGMII_QSGMII_TX_DRIVER_160MV 1
-+#define PSGMII_QSGMII_TX_DRIVER_180MV 2
-+#define PSGMII_QSGMII_TX_DRIVER_200MV 3
-+#define PSGMII_QSGMII_TX_DRIVER_220MV 4
-+#define PSGMII_QSGMII_TX_DRIVER_240MV 5
-+#define PSGMII_QSGMII_TX_DRIVER_260MV 6
-+#define PSGMII_QSGMII_TX_DRIVER_280MV 7
-+#define PSGMII_QSGMII_TX_DRIVER_300MV 8
-+#define PSGMII_QSGMII_TX_DRIVER_320MV 9
-+#define PSGMII_QSGMII_TX_DRIVER_400MV 10
-+#define PSGMII_QSGMII_TX_DRIVER_500MV 11
-+/* Default value */
-+#define PSGMII_QSGMII_TX_DRIVER_600MV 12
-+
-+/* Full amplitude, full bias current */
-+#define QCA807X_CONTROL_DAC_FULL_VOLT_BIAS 0
-+/* Amplitude follow DSP (amplitude is adjusted based on cable length), half bias current */
-+#define QCA807X_CONTROL_DAC_DSP_VOLT_HALF_BIAS 1
-+/* Full amplitude, bias current follow DSP (bias current is adjusted based on cable length) */
-+#define QCA807X_CONTROL_DAC_FULL_VOLT_DSP_BIAS 2
-+/* Both amplitude and bias current follow DSP */
-+#define QCA807X_CONTROL_DAC_DSP_VOLT_BIAS 3
-+/* Full amplitude, half bias current */
-+#define QCA807X_CONTROL_DAC_FULL_VOLT_HALF_BIAS 4
-+/* Amplitude follow DSP setting; 1/4 bias current when cable<10m,
-+ * otherwise half bias current
-+ */
-+#define QCA807X_CONTROL_DAC_DSP_VOLT_QUARTER_BIAS 5
-+/* Full amplitude; same bias current setting with “010” and “011”,
-+ * but half more bias is reduced when cable <10m
-+ */
-+#define QCA807X_CONTROL_DAC_FULL_VOLT_HALF_BIAS_SHORT 6
-+/* Amplitude follow DSP; same bias current setting with “110”, default value */
-+#define QCA807X_CONTROL_DAC_DSP_VOLT_HALF_BIAS_SHORT 7
-+
-+#endif
--- /dev/null
+From 96eb388c082bd0086b128d82def9daaab1617951 Mon Sep 17 00:00:00 2001
+From: Robert Marko <robert.marko@sartura.hr>
+Date: Thu, 1 Oct 2020 15:05:35 +0200
+Subject: [PATCH] dt-bindings: net: add QCA807x PHY
+
+Add DT bindings for Qualcomm QCA807x PHY series.
+
+Signed-off-by: Robert Marko <robert.marko@sartura.hr>
+---
+ include/dt-bindings/net/qcom-qca807x.h | 45 ++++++++++++++++++++++++++
+ 1 file changed, 45 insertions(+)
+ create mode 100644 include/dt-bindings/net/qcom-qca807x.h
+
+--- /dev/null
++++ b/include/dt-bindings/net/qcom-qca807x.h
+@@ -0,0 +1,45 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ * Device Tree constants for the Qualcomm QCA807X PHYs
++ */
++
++#ifndef _DT_BINDINGS_QCOM_QCA807X_H
++#define _DT_BINDINGS_QCOM_QCA807X_H
++
++#define PSGMII_QSGMII_TX_DRIVER_140MV 0
++#define PSGMII_QSGMII_TX_DRIVER_160MV 1
++#define PSGMII_QSGMII_TX_DRIVER_180MV 2
++#define PSGMII_QSGMII_TX_DRIVER_200MV 3
++#define PSGMII_QSGMII_TX_DRIVER_220MV 4
++#define PSGMII_QSGMII_TX_DRIVER_240MV 5
++#define PSGMII_QSGMII_TX_DRIVER_260MV 6
++#define PSGMII_QSGMII_TX_DRIVER_280MV 7
++#define PSGMII_QSGMII_TX_DRIVER_300MV 8
++#define PSGMII_QSGMII_TX_DRIVER_320MV 9
++#define PSGMII_QSGMII_TX_DRIVER_400MV 10
++#define PSGMII_QSGMII_TX_DRIVER_500MV 11
++/* Default value */
++#define PSGMII_QSGMII_TX_DRIVER_600MV 12
++
++/* Full amplitude, full bias current */
++#define QCA807X_CONTROL_DAC_FULL_VOLT_BIAS 0
++/* Amplitude follow DSP (amplitude is adjusted based on cable length), half bias current */
++#define QCA807X_CONTROL_DAC_DSP_VOLT_HALF_BIAS 1
++/* Full amplitude, bias current follow DSP (bias current is adjusted based on cable length) */
++#define QCA807X_CONTROL_DAC_FULL_VOLT_DSP_BIAS 2
++/* Both amplitude and bias current follow DSP */
++#define QCA807X_CONTROL_DAC_DSP_VOLT_BIAS 3
++/* Full amplitude, half bias current */
++#define QCA807X_CONTROL_DAC_FULL_VOLT_HALF_BIAS 4
++/* Amplitude follow DSP setting; 1/4 bias current when cable<10m,
++ * otherwise half bias current
++ */
++#define QCA807X_CONTROL_DAC_DSP_VOLT_QUARTER_BIAS 5
++/* Full amplitude; same bias current setting with “010” and “011”,
++ * but half more bias is reduced when cable <10m
++ */
++#define QCA807X_CONTROL_DAC_FULL_VOLT_HALF_BIAS_SHORT 6
++/* Amplitude follow DSP; same bias current setting with “110”, default value */
++#define QCA807X_CONTROL_DAC_DSP_VOLT_HALF_BIAS_SHORT 7
++
++#endif
+++ /dev/null
-From 95179775debad6b0bdab241d4a305135fc5e67dc Mon Sep 17 00:00:00 2001
-From: Robert Marko <robert.marko@sartura.hr>
-Date: Fri, 9 Sep 2022 23:44:42 +0200
-Subject: [PATCH] net: phy: Add Qualcom QCA807x driver
-
-This adds driver for the Qualcomm QCA8072 and QCA8075 PHY-s.
-
-They are 2 or 5 port IEEE 802.3 clause 22 compliant 10BASE-Te, 100BASE-TX and 1000BASE-T PHY-s.
-
-They feature 2 SerDes, one for PSGMII or QSGMII connection with MAC, while second one is SGMII for connection to MAC or fiber.
-
-Both models have a combo port that supports 1000BASE-X and 100BASE-FX fiber.
-
-Each PHY inside of QCA807x series has 4 digitally controlled output only pins that natively drive LED-s.
-But some vendors used these to driver generic LED-s controlled by userspace,
-so lets enable registering each PHY as GPIO controller and add driver for it.
-
-These are commonly used in Qualcomm IPQ40xx, IPQ60xx and IPQ807x boards.
-
-Signed-off-by: Robert Marko <robert.marko@sartura.hr>
----
- drivers/net/phy/Kconfig | 7 +++++++
- drivers/net/phy/Makefile | 1 +
- 2 files changed, 8 insertions(+)
-
---- a/drivers/net/phy/Kconfig
-+++ b/drivers/net/phy/Kconfig
-@@ -362,6 +362,13 @@ config AT803X_PHY
- Currently supports the AR8030, AR8031, AR8033, AR8035 and internal
- QCA8337(Internal qca8k PHY) model
-
-+config QCA807X_PHY
-+ tristate "Qualcomm QCA807x PHYs"
-+ depends on OF_MDIO
-+ help
-+ Currently supports the Qualcomm QCA8072, QCA8075 and the PSGMII
-+ control PHY.
-+
- config QSEMI_PHY
- tristate "Quality Semiconductor PHYs"
- help
---- a/drivers/net/phy/Makefile
-+++ b/drivers/net/phy/Makefile
-@@ -94,6 +94,7 @@ obj-$(CONFIG_MOTORCOMM_PHY) += motorcomm
- obj-$(CONFIG_NATIONAL_PHY) += national.o
- obj-$(CONFIG_NXP_C45_TJA11XX_PHY) += nxp-c45-tja11xx.o
- obj-$(CONFIG_NXP_TJA11XX_PHY) += nxp-tja11xx.o
-+obj-$(CONFIG_QCA807X_PHY) += qca807x.o
- obj-$(CONFIG_QSEMI_PHY) += qsemi.o
- obj-$(CONFIG_REALTEK_PHY) += realtek.o
- obj-$(CONFIG_RENESAS_PHY) += uPD60620.o
+++ /dev/null
-From 03ebcd291974e4ca5d77026111f63309bc708326 Mon Sep 17 00:00:00 2001
-From: Robert Marko <robert.marko@sartura.hr>
-Date: Fri, 2 Oct 2020 10:43:26 +0200
-Subject: [PATCH] arm: dts: ipq4019: QCA807x properties
-
-This adds necessary DT properties for QCA807x PHY-s to IPQ4019 DTSI.
-
-Signed-off-by: Robert Marko <robert.marko@sartura.hr>
----
- arch/arm/boot/dts/qcom-ipq4019.dtsi | 17 +++++++++++++++++
- 1 file changed, 17 insertions(+)
-
---- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
-+++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
-@@ -8,6 +8,7 @@
- #include <dt-bindings/clock/qcom,gcc-ipq4019.h>
- #include <dt-bindings/interrupt-controller/arm-gic.h>
- #include <dt-bindings/interrupt-controller/irq.h>
-+#include <dt-bindings/net/qcom-qca807x.h>
-
- / {
- #address-cells = <1>;
-@@ -724,22 +725,38 @@
-
- ethphy0: ethernet-phy@0 {
- reg = <0>;
-+
-+ qcom,control-dac = <QCA807X_CONTROL_DAC_DSP_VOLT_QUARTER_BIAS>;
- };
-
- ethphy1: ethernet-phy@1 {
- reg = <1>;
-+
-+ qcom,control-dac = <QCA807X_CONTROL_DAC_DSP_VOLT_QUARTER_BIAS>;
- };
-
- ethphy2: ethernet-phy@2 {
- reg = <2>;
-+
-+ qcom,control-dac = <QCA807X_CONTROL_DAC_DSP_VOLT_QUARTER_BIAS>;
- };
-
- ethphy3: ethernet-phy@3 {
- reg = <3>;
-+
-+ qcom,control-dac = <QCA807X_CONTROL_DAC_DSP_VOLT_QUARTER_BIAS>;
- };
-
- ethphy4: ethernet-phy@4 {
- reg = <4>;
-+
-+ qcom,control-dac = <QCA807X_CONTROL_DAC_DSP_VOLT_QUARTER_BIAS>;
-+ };
-+
-+ psgmiiphy: psgmii-phy@5 {
-+ reg = <5>;
-+
-+ qcom,tx-driver-strength = <PSGMII_QSGMII_TX_DRIVER_300MV>;
- };
- };
-
--- /dev/null
+From 876bb5b69c1e083be526c0ea261982d5eb78556f Mon Sep 17 00:00:00 2001
+From: Robert Marko <robert.marko@sartura.hr>
+Date: Fri, 9 Sep 2022 23:44:42 +0200
+Subject: [PATCH] net: phy: Add Qualcom QCA807x driver
+
+This adds driver for the Qualcomm QCA8072 and QCA8075 PHY-s.
+
+They are 2 or 5 port IEEE 802.3 clause 22 compliant 10BASE-Te, 100BASE-TX and 1000BASE-T PHY-s.
+
+They feature 2 SerDes, one for PSGMII or QSGMII connection with MAC, while second one is SGMII for connection to MAC or fiber.
+
+Both models have a combo port that supports 1000BASE-X and 100BASE-FX fiber.
+
+Each PHY inside of QCA807x series has 4 digitally controlled output only pins that natively drive LED-s.
+But some vendors used these to driver generic LED-s controlled by userspace,
+so lets enable registering each PHY as GPIO controller and add driver for it.
+
+These are commonly used in Qualcomm IPQ40xx, IPQ60xx and IPQ807x boards.
+
+Signed-off-by: Robert Marko <robert.marko@sartura.hr>
+---
+ drivers/net/phy/Kconfig | 7 +++++++
+ drivers/net/phy/Makefile | 1 +
+ 2 files changed, 8 insertions(+)
+
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -369,6 +369,13 @@ config AT803X_PHY
+ Currently supports the AR8030, AR8031, AR8033, AR8035 and internal
+ QCA8337(Internal qca8k PHY) model
+
++config QCA807X_PHY
++ tristate "Qualcomm QCA807x PHYs"
++ depends on OF_MDIO
++ help
++ Currently supports the Qualcomm QCA8072, QCA8075 and the PSGMII
++ control PHY.
++
+ config QSEMI_PHY
+ tristate "Quality Semiconductor PHYs"
+ help
+--- a/drivers/net/phy/Makefile
++++ b/drivers/net/phy/Makefile
+@@ -94,6 +94,7 @@ obj-$(CONFIG_MOTORCOMM_PHY) += motorcomm
+ obj-$(CONFIG_NATIONAL_PHY) += national.o
+ obj-$(CONFIG_NXP_C45_TJA11XX_PHY) += nxp-c45-tja11xx.o
+ obj-$(CONFIG_NXP_TJA11XX_PHY) += nxp-tja11xx.o
++obj-$(CONFIG_QCA807X_PHY) += qca807x.o
+ obj-$(CONFIG_QSEMI_PHY) += qsemi.o
+ obj-$(CONFIG_REALTEK_PHY) += realtek.o
+ obj-$(CONFIG_RENESAS_PHY) += uPD60620.o
--- /dev/null
+From 79b38b9f85da868ca59b66715c20aa55104b640b Mon Sep 17 00:00:00 2001
+From: Robert Marko <robert.marko@sartura.hr>
+Date: Fri, 2 Oct 2020 10:43:26 +0200
+Subject: [PATCH] arm: dts: ipq4019: QCA807x properties
+
+This adds necessary DT properties for QCA807x PHY-s to IPQ4019 DTSI.
+
+Signed-off-by: Robert Marko <robert.marko@sartura.hr>
+---
+ arch/arm/boot/dts/qcom-ipq4019.dtsi | 17 +++++++++++++++++
+ 1 file changed, 17 insertions(+)
+
+--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+@@ -8,6 +8,7 @@
+ #include <dt-bindings/clock/qcom,gcc-ipq4019.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
++#include <dt-bindings/net/qcom-qca807x.h>
+
+ / {
+ #address-cells = <1>;
+@@ -724,22 +725,38 @@
+
+ ethphy0: ethernet-phy@0 {
+ reg = <0>;
++
++ qcom,control-dac = <QCA807X_CONTROL_DAC_DSP_VOLT_QUARTER_BIAS>;
+ };
+
+ ethphy1: ethernet-phy@1 {
+ reg = <1>;
++
++ qcom,control-dac = <QCA807X_CONTROL_DAC_DSP_VOLT_QUARTER_BIAS>;
+ };
+
+ ethphy2: ethernet-phy@2 {
+ reg = <2>;
++
++ qcom,control-dac = <QCA807X_CONTROL_DAC_DSP_VOLT_QUARTER_BIAS>;
+ };
+
+ ethphy3: ethernet-phy@3 {
+ reg = <3>;
++
++ qcom,control-dac = <QCA807X_CONTROL_DAC_DSP_VOLT_QUARTER_BIAS>;
+ };
+
+ ethphy4: ethernet-phy@4 {
+ reg = <4>;
++
++ qcom,control-dac = <QCA807X_CONTROL_DAC_DSP_VOLT_QUARTER_BIAS>;
++ };
++
++ psgmiiphy: psgmii-phy@5 {
++ reg = <5>;
++
++ qcom,tx-driver-strength = <PSGMII_QSGMII_TX_DRIVER_300MV>;
+ };
+ };
+
--- /dev/null
+From d0055b03d9c8d48ad2b971821989b09ba95c39f8 Mon Sep 17 00:00:00 2001
+From: Christian Marangi <ansuelsmth@gmail.com>
+Date: Sun, 17 Sep 2023 20:18:31 +0200
+Subject: [PATCH] net: qualcomm: ipqess: fix TX timeout errors
+
+Currently logic to handle napi tx completion is flawed and on the long
+run on loaded condition cause TX timeout error with the queue not being
+able to handle any new packet.
+
+There are 2 main cause of this:
+- incrementing the packet done value wrongly
+- handling 2 times the tx_ring tail
+
+ipqess_tx_unmap_and_free may return 2 kind values:
+- 0: we are handling first and middle descriptor for the packet
+- packet len: we are at the last descriptor for the packet
+
+Done value was wrongly incremented also for first and intermediate
+descriptor for the packet resulting causing panic and TX timeouts by
+comunicating to the kernel an inconsistent value of packet handling not
+matching the expected ones.
+
+Tx_ring tail was handled twice for ipqess_tx_complete run resulting in
+again done value incremented wrongly and also problem with idx handling
+by actually skipping descriptor for some packets.
+
+Rework the loop logic to fix these 2 problem and also add some comments
+to make sure ipqess_tx_unmap_and_free ret value is better
+understandable.
+
+Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
+---
+ drivers/net/ethernet/qualcomm/ipqess/ipqess.c | 13 ++++++++++---
+ 1 file changed, 10 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/qualcomm/ipqess/ipqess.c
++++ b/drivers/net/ethernet/qualcomm/ipqess/ipqess.c
+@@ -453,13 +453,22 @@ static int ipqess_tx_complete(struct ipq
+ tail >>= IPQESS_TPD_CONS_IDX_SHIFT;
+ tail &= IPQESS_TPD_CONS_IDX_MASK;
+
+- do {
++ while ((tx_ring->tail != tail) && (done < budget)) {
+ ret = ipqess_tx_unmap_and_free(&tx_ring->ess->pdev->dev,
+ &tx_ring->buf[tx_ring->tail]);
+- tx_ring->tail = IPQESS_NEXT_IDX(tx_ring->tail, tx_ring->count);
++ /* ipqess_tx_unmap_and_free may return 2 kind values:
++ * - 0: we are handling first and middle descriptor for the packet
++ * - packet len: we are at the last descriptor for the packet
++ * Increment total bytes handled and packet done only if we are
++ * handling the last descriptor for the packet.
++ */
++ if (ret) {
++ total += ret;
++ done++;
++ }
+
+- total += ret;
+- } while ((++done < budget) && (tx_ring->tail != tail));
++ tx_ring->tail = IPQESS_NEXT_IDX(tx_ring->tail, tx_ring->count);
++ };
+
+ ipqess_w32(tx_ring->ess, IPQESS_REG_TX_SW_CONS_IDX_Q(tx_ring->idx),
+ tx_ring->tail);