--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2011-2012, 2020-2021 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016 John Crispin <john@phrozen.org>
+ * Copyright (c) 2021 Robert Marko <robert.marko@sartura.hr>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/version.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/mdio.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/phylink.h>
+#include <linux/reset.h>
+#include <net/dsa.h>
+
+#include "qca8k-ipq4019.h"
+
+#define MIB_DESC(_s, _o, _n) \
+ { \
+ .size = (_s), \
+ .offset = (_o), \
+ .name = (_n), \
+ }
+
+static const struct qca8k_mib_desc ar8327_mib[] = {
+ MIB_DESC(1, 0x00, "RxBroad"),
+ MIB_DESC(1, 0x04, "RxPause"),
+ MIB_DESC(1, 0x08, "RxMulti"),
+ MIB_DESC(1, 0x0c, "RxFcsErr"),
+ MIB_DESC(1, 0x10, "RxAlignErr"),
+ MIB_DESC(1, 0x14, "RxRunt"),
+ MIB_DESC(1, 0x18, "RxFragment"),
+ MIB_DESC(1, 0x1c, "Rx64Byte"),
+ MIB_DESC(1, 0x20, "Rx128Byte"),
+ MIB_DESC(1, 0x24, "Rx256Byte"),
+ MIB_DESC(1, 0x28, "Rx512Byte"),
+ MIB_DESC(1, 0x2c, "Rx1024Byte"),
+ MIB_DESC(1, 0x30, "Rx1518Byte"),
+ MIB_DESC(1, 0x34, "RxMaxByte"),
+ MIB_DESC(1, 0x38, "RxTooLong"),
+ MIB_DESC(2, 0x3c, "RxGoodByte"),
+ MIB_DESC(2, 0x44, "RxBadByte"),
+ MIB_DESC(1, 0x4c, "RxOverFlow"),
+ MIB_DESC(1, 0x50, "Filtered"),
+ MIB_DESC(1, 0x54, "TxBroad"),
+ MIB_DESC(1, 0x58, "TxPause"),
+ MIB_DESC(1, 0x5c, "TxMulti"),
+ MIB_DESC(1, 0x60, "TxUnderRun"),
+ MIB_DESC(1, 0x64, "Tx64Byte"),
+ MIB_DESC(1, 0x68, "Tx128Byte"),
+ MIB_DESC(1, 0x6c, "Tx256Byte"),
+ MIB_DESC(1, 0x70, "Tx512Byte"),
+ MIB_DESC(1, 0x74, "Tx1024Byte"),
+ MIB_DESC(1, 0x78, "Tx1518Byte"),
+ MIB_DESC(1, 0x7c, "TxMaxByte"),
+ MIB_DESC(1, 0x80, "TxOverSize"),
+ MIB_DESC(2, 0x84, "TxByte"),
+ MIB_DESC(1, 0x8c, "TxCollision"),
+ MIB_DESC(1, 0x90, "TxAbortCol"),
+ MIB_DESC(1, 0x94, "TxMultiCol"),
+ MIB_DESC(1, 0x98, "TxSingleCol"),
+ MIB_DESC(1, 0x9c, "TxExcDefer"),
+ MIB_DESC(1, 0xa0, "TxDefer"),
+ MIB_DESC(1, 0xa4, "TxLateCol"),
+ MIB_DESC(1, 0xa8, "RXUnicast"),
+ MIB_DESC(1, 0xac, "TXunicast"),
+};
+
+static int
+qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
+{
+ return regmap_read(priv->regmap, reg, val);
+}
+
+static int
+qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
+{
+ return regmap_write(priv->regmap, reg, val);
+}
+
+static int
+qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
+{
+ return regmap_update_bits(priv->regmap, reg, mask, write_val);
+}
+
+static int
+qca8k_reg_set(struct qca8k_priv *priv, u32 reg, u32 val)
+{
+ return regmap_set_bits(priv->regmap, reg, val);
+}
+
+static int
+qca8k_reg_clear(struct qca8k_priv *priv, u32 reg, u32 val)
+{
+ return regmap_clear_bits(priv->regmap, reg, val);
+}
+
+static const struct regmap_range qca8k_readable_ranges[] = {
+ regmap_reg_range(0x0000, 0x00e4), /* Global control */
+ regmap_reg_range(0x0100, 0x0168), /* EEE control */
+ regmap_reg_range(0x0200, 0x0270), /* Parser control */
+ regmap_reg_range(0x0400, 0x0454), /* ACL */
+ regmap_reg_range(0x0600, 0x0718), /* Lookup */
+ regmap_reg_range(0x0800, 0x0b70), /* QM */
+ regmap_reg_range(0x0c00, 0x0c80), /* PKT */
+ regmap_reg_range(0x0e00, 0x0e98), /* L3 */
+ regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
+ regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
+ regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
+ regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
+ regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
+ regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
+ regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
+
+};
+
+static const struct regmap_access_table qca8k_readable_table = {
+ .yes_ranges = qca8k_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
+};
+
+static struct regmap_config qca8k_ipq4019_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x16ac, /* end MIB - Port6 range */
+ .rd_table = &qca8k_readable_table,
+};
+
+static struct regmap_config qca8k_ipq4019_psgmii_phy_regmap_config = {
+ .name = "psgmii-phy",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x7fc,
+};
+
+static int
+qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
+{
+ u32 val;
+
+ return regmap_read_poll_timeout(priv->regmap, reg, val,
+ !(val & mask),
+ 0,
+ QCA8K_BUSY_WAIT_TIMEOUT);
+}
+
+static int
+qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
+{
+ u32 reg[4], val;
+ int i, ret;
+
+ /* load the ARL table into an array */
+ for (i = 0; i < 4; i++) {
+ ret = qca8k_read(priv, QCA8K_REG_ATU_DATA0 + (i * 4), &val);
+ if (ret < 0)
+ return ret;
+
+ reg[i] = val;
+ }
+
+ /* vid - 83:72 */
+ fdb->vid = (reg[2] >> QCA8K_ATU_VID_S) & QCA8K_ATU_VID_M;
+ /* aging - 67:64 */
+ fdb->aging = reg[2] & QCA8K_ATU_STATUS_M;
+ /* portmask - 54:48 */
+ fdb->port_mask = (reg[1] >> QCA8K_ATU_PORT_S) & QCA8K_ATU_PORT_M;
+ /* mac - 47:0 */
+ fdb->mac[0] = (reg[1] >> QCA8K_ATU_ADDR0_S) & 0xff;
+ fdb->mac[1] = reg[1] & 0xff;
+ fdb->mac[2] = (reg[0] >> QCA8K_ATU_ADDR2_S) & 0xff;
+ fdb->mac[3] = (reg[0] >> QCA8K_ATU_ADDR3_S) & 0xff;
+ fdb->mac[4] = (reg[0] >> QCA8K_ATU_ADDR4_S) & 0xff;
+ fdb->mac[5] = reg[0] & 0xff;
+
+ return 0;
+}
+
+static void
+qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac,
+ u8 aging)
+{
+ u32 reg[3] = { 0 };
+ int i;
+
+ /* vid - 83:72 */
+ reg[2] = (vid & QCA8K_ATU_VID_M) << QCA8K_ATU_VID_S;
+ /* aging - 67:64 */
+ reg[2] |= aging & QCA8K_ATU_STATUS_M;
+ /* portmask - 54:48 */
+ reg[1] = (port_mask & QCA8K_ATU_PORT_M) << QCA8K_ATU_PORT_S;
+ /* mac - 47:0 */
+ reg[1] |= mac[0] << QCA8K_ATU_ADDR0_S;
+ reg[1] |= mac[1];
+ reg[0] |= mac[2] << QCA8K_ATU_ADDR2_S;
+ reg[0] |= mac[3] << QCA8K_ATU_ADDR3_S;
+ reg[0] |= mac[4] << QCA8K_ATU_ADDR4_S;
+ reg[0] |= mac[5];
+
+ /* load the array into the ARL table */
+ for (i = 0; i < 3; i++)
+ qca8k_write(priv, QCA8K_REG_ATU_DATA0 + (i * 4), reg[i]);
+}
+
+static int
+qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port)
+{
+ u32 reg;
+ int ret;
+
+ /* Set the command and FDB index */
+ reg = QCA8K_ATU_FUNC_BUSY;
+ reg |= cmd;
+ if (port >= 0) {
+ reg |= QCA8K_ATU_FUNC_PORT_EN;
+ reg |= (port & QCA8K_ATU_FUNC_PORT_M) << QCA8K_ATU_FUNC_PORT_S;
+ }
+
+ /* Write the function register triggering the table access */
+ ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
+ if (ret)
+ return ret;
+
+ /* wait for completion */
+ ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
+ if (ret)
+ return ret;
+
+ /* Check for table full violation when adding an entry */
+ if (cmd == QCA8K_FDB_LOAD) {
+ ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, ®);
+ if (ret < 0)
+ return ret;
+ if (reg & QCA8K_ATU_FUNC_FULL)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb, int port)
+{
+ int ret;
+
+ qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
+ if (ret < 0)
+ return ret;
+
+ return qca8k_fdb_read(priv, fdb);
+}
+
+static int
+qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac, u16 port_mask,
+ u16 vid, u8 aging)
+{
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_write(priv, vid, port_mask, mac, aging);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+static int
+qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, u16 vid)
+{
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_write(priv, vid, port_mask, mac, 0);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+static void
+qca8k_fdb_flush(struct qca8k_priv *priv)
+{
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
+ mutex_unlock(&priv->reg_mutex);
+}
+
+static int
+qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid)
+{
+ u32 reg;
+ int ret;
+
+ /* Set the command and VLAN index */
+ reg = QCA8K_VTU_FUNC1_BUSY;
+ reg |= cmd;
+ reg |= vid << QCA8K_VTU_FUNC1_VID_S;
+
+ /* Write the function register triggering the table access */
+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
+ if (ret)
+ return ret;
+
+ /* wait for completion */
+ ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
+ if (ret)
+ return ret;
+
+ /* Check for table full violation when adding an entry */
+ if (cmd == QCA8K_VLAN_LOAD) {
+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, ®);
+ if (ret < 0)
+ return ret;
+ if (reg & QCA8K_VTU_FUNC1_FULL)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int
+qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged)
+{
+ u32 reg;
+ int ret;
+
+ /*
+ We do the right thing with VLAN 0 and treat it as untagged while
+ preserving the tag on egress.
+ */
+ if (vid == 0)
+ return 0;
+
+ mutex_lock(&priv->reg_mutex);
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
+ if (ret < 0)
+ goto out;
+
+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®);
+ if (ret < 0)
+ goto out;
+ reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
+ reg &= ~(QCA8K_VTU_FUNC0_EG_MODE_MASK << QCA8K_VTU_FUNC0_EG_MODE_S(port));
+ if (untagged)
+ reg |= QCA8K_VTU_FUNC0_EG_MODE_UNTAG <<
+ QCA8K_VTU_FUNC0_EG_MODE_S(port);
+ else
+ reg |= QCA8K_VTU_FUNC0_EG_MODE_TAG <<
+ QCA8K_VTU_FUNC0_EG_MODE_S(port);
+
+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
+ if (ret)
+ goto out;
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
+
+out:
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+static int
+qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
+{
+ u32 reg, mask;
+ int ret, i;
+ bool del;
+
+ mutex_lock(&priv->reg_mutex);
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
+ if (ret < 0)
+ goto out;
+
+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®);
+ if (ret < 0)
+ goto out;
+ reg &= ~(3 << QCA8K_VTU_FUNC0_EG_MODE_S(port));
+ reg |= QCA8K_VTU_FUNC0_EG_MODE_NOT <<
+ QCA8K_VTU_FUNC0_EG_MODE_S(port);
+
+ /* Check if we're the last member to be removed */
+ del = true;
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ mask = QCA8K_VTU_FUNC0_EG_MODE_NOT;
+ mask <<= QCA8K_VTU_FUNC0_EG_MODE_S(i);
+
+ if ((reg & mask) != mask) {
+ del = false;
+ break;
+ }
+ }
+
+ if (del) {
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
+ } else {
+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
+ if (ret)
+ goto out;
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
+ }
+
+out:
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+static int
+qca8k_mib_init(struct qca8k_priv *priv)
+{
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+ ret = qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY);
+ if (ret)
+ goto exit;
+
+ ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
+ if (ret)
+ goto exit;
+
+ ret = qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
+ if (ret)
+ goto exit;
+
+ ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
+
+exit:
+ mutex_unlock(&priv->reg_mutex);
+ return ret;
+}
+
+static void
+qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
+{
+ u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
+
+ /* Port 0 is internally connected to the CPU
+ * TODO: Probably check for RGMII as well if it doesnt work
+ * in RGMII mode.
+ */
+ if (port > QCA8K_CPU_PORT)
+ mask |= QCA8K_PORT_STATUS_LINK_AUTO;
+
+ if (enable)
+ qca8k_reg_set(priv, QCA8K_REG_PORT_STATUS(port), mask);
+ else
+ qca8k_reg_clear(priv, QCA8K_REG_PORT_STATUS(port), mask);
+}
+
+static int
+qca8k_setup_port(struct dsa_switch *ds, int port)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ int ret;
+
+ /* CPU port gets connected to all user ports of the switch */
+ if (dsa_is_cpu_port(ds, port)) {
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT),
+ QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
+ if (ret)
+ return ret;
+
+ /* Disable CPU ARP Auto-learning by default */
+ ret = qca8k_reg_clear(priv, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT),
+ QCA8K_PORT_LOOKUP_LEARN);
+ if (ret)
+ return ret;
+ }
+
+ /* Individual user ports get connected to CPU port only */
+ if (dsa_is_user_port(ds, port)) {
+ int shift = 16 * (port % 2);
+
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_MEMBER,
+ BIT(QCA8K_CPU_PORT));
+ if (ret)
+ return ret;
+
+ /* Enable ARP Auto-learning by default */
+ ret = qca8k_reg_set(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_LEARN);
+ if (ret)
+ return ret;
+
+ /* For port based vlans to work we need to set the
+ * default egress vid
+ */
+ ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
+ 0xfff << shift,
+ QCA8K_PORT_VID_DEF << shift);
+ if (ret)
+ return ret;
+
+ ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
+ QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
+ QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+qca8k_setup(struct dsa_switch *ds)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ int ret, i;
+
+ /* Make sure that port 0 is the cpu port */
+ if (!dsa_is_cpu_port(ds, 0)) {
+ dev_err(priv->dev, "port 0 is not the CPU port");
+ return -EINVAL;
+ }
+
+ /* Enable CPU Port */
+ ret = qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0,
+ QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
+ if (ret) {
+ dev_err(priv->dev, "failed enabling CPU port");
+ return ret;
+ }
+
+ /* Enable MIB counters */
+ ret = qca8k_mib_init(priv);
+ if (ret)
+ dev_warn(priv->dev, "MIB init failed");
+
+ /* Enable QCA header mode on the cpu port */
+ ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(QCA8K_CPU_PORT),
+ QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S |
+ QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S);
+ if (ret) {
+ dev_err(priv->dev, "failed enabling QCA header mode");
+ return ret;
+ }
+
+ /* Disable forwarding by default on all ports */
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
+ QCA8K_PORT_LOOKUP_MEMBER, 0);
+ if (ret)
+ return ret;
+ }
+
+ /* Disable MAC by default on all ports */
+ for (i = 1; i < QCA8K_NUM_PORTS; i++)
+ qca8k_port_set_status(priv, i, 0);
+
+ /* Forward all unknown frames to CPU port for Linux processing */
+ ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
+ BIT(QCA8K_CPU_PORT) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S |
+ BIT(QCA8K_CPU_PORT) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S |
+ BIT(QCA8K_CPU_PORT) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S |
+ BIT(QCA8K_CPU_PORT) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S);
+ if (ret)
+ return ret;
+
+ /* Setup connection between CPU port & user ports */
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ ret = qca8k_setup_port(ds, i);
+ if (ret)
+ return ret;
+ }
+
+ /* Setup our port MTUs to match power on defaults */
+ for (i = 0; i < QCA8K_NUM_PORTS; i++)
+ /* Set per port MTU to 1500 as the MTU change function
+ * will add the overhead and if its set to 1518 then it
+ * will apply the overhead again and we will end up with
+ * MTU of 1536 instead of 1518
+ */
+ priv->port_mtu[i] = ETH_DATA_LEN;
+ ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
+ if (ret)
+ dev_warn(priv->dev, "failed setting MTU settings");
+
+ /* Flush the FDB table */
+ qca8k_fdb_flush(priv);
+
+ /* We don't have interrupts for link changes, so we need to poll */
+ ds->pcs_poll = true;
+
+ /* CPU port HW learning doesnt work correctly, so let DSA handle it */
+ ds->assisted_learning_on_cpu_port = true;
+
+ return 0;
+}
+
+static int psgmii_vco_calibrate(struct qca8k_priv *priv)
+{
+ int val, ret;
+
+ if (!priv->psgmii_ethphy) {
+ dev_err(priv->dev, "PSGMII eth PHY missing, calibration failed!\n");
+ return -ENODEV;
+ }
+
+ /* Fix PSGMII RX 20bit */
+ ret = phy_write(priv->psgmii_ethphy, MII_BMCR, 0x5b);
+ /* Reset PHY PSGMII */
+ ret = phy_write(priv->psgmii_ethphy, MII_BMCR, 0x1b);
+ /* Release PHY PSGMII reset */
+ ret = phy_write(priv->psgmii_ethphy, MII_BMCR, 0x5b);
+
+ /* Poll for VCO PLL calibration finish - Malibu(QCA8075) */
+ ret = phy_read_mmd_poll_timeout(priv->psgmii_ethphy,
+ MDIO_MMD_PMAPMD,
+ 0x28, val,
+ (val & BIT(0)),
+ 10000, 1000000,
+ false);
+ if (ret) {
+ dev_err(priv->dev, "QCA807x PSGMII VCO calibration PLL not ready\n");
+ return ret;
+ }
+ mdelay(50);
+
+ /* Freeze PSGMII RX CDR */
+ ret = phy_write(priv->psgmii_ethphy, MII_RESV2, 0x2230);
+
+ /* Start PSGMIIPHY VCO PLL calibration */
+ ret = regmap_set_bits(priv->psgmii,
+ PSGMIIPHY_VCO_CALIBRATION_CONTROL_REGISTER_1,
+ PSGMIIPHY_REG_PLL_VCO_CALIB_RESTART);
+
+ /* Poll for PSGMIIPHY PLL calibration finish - Dakota(IPQ40xx) */
+ ret = regmap_read_poll_timeout(priv->psgmii,
+ PSGMIIPHY_VCO_CALIBRATION_CONTROL_REGISTER_2,
+ val, val & PSGMIIPHY_REG_PLL_VCO_CALIB_READY,
+ 10000, 1000000);
+ if (ret) {
+ dev_err(priv->dev, "IPQ PSGMIIPHY VCO calibration PLL not ready\n");
+ return ret;
+ }
+ mdelay(50);
+
+ /* Release PSGMII RX CDR */
+ ret = phy_write(priv->psgmii_ethphy, MII_RESV2, 0x3230);
+ /* Release PSGMII RX 20bit */
+ ret = phy_write(priv->psgmii_ethphy, MII_BMCR, 0x5f);
+ mdelay(200);
+
+ return ret;
+}
+
+static void
+qca8k_switch_port_loopback_on_off(struct qca8k_priv *priv, int port, int on)
+{
+ u32 val = QCA8K_PORT_LOOKUP_LOOPBACK;
+
+ if (on == 0)
+ val = 0;
+
+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_LOOPBACK, val);
+}
+
+static int
+qca8k_wait_for_phy_link_state(struct phy_device *phy, int need_status)
+{
+ int a;
+ u16 status;
+
+ for (a = 0; a < 100; a++) {
+ status = phy_read(phy, MII_QCA8075_SSTATUS);
+ status &= QCA8075_PHY_SPEC_STATUS_LINK;
+ status = !!status;
+ if (status == need_status)
+ return 0;
+ mdelay(8);
+ }
+
+ return -1;
+}
+
+static void
+qca8k_phy_loopback_on_off(struct qca8k_priv *priv, struct phy_device *phy,
+ int sw_port, int on)
+{
+ if (on) {
+ phy_write(phy, MII_BMCR, BMCR_ANENABLE | BMCR_RESET);
+ phy_modify(phy, MII_BMCR, BMCR_PDOWN, BMCR_PDOWN);
+ qca8k_wait_for_phy_link_state(phy, 0);
+ qca8k_write(priv, QCA8K_REG_PORT_STATUS(sw_port), 0);
+ phy_write(phy, MII_BMCR,
+ BMCR_SPEED1000 |
+ BMCR_FULLDPLX |
+ BMCR_LOOPBACK);
+ qca8k_wait_for_phy_link_state(phy, 1);
+ qca8k_write(priv, QCA8K_REG_PORT_STATUS(sw_port),
+ QCA8K_PORT_STATUS_SPEED_1000 |
+ QCA8K_PORT_STATUS_TXMAC |
+ QCA8K_PORT_STATUS_RXMAC |
+ QCA8K_PORT_STATUS_DUPLEX);
+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(sw_port),
+ QCA8K_PORT_LOOKUP_STATE_FORWARD,
+ QCA8K_PORT_LOOKUP_STATE_FORWARD);
+ } else { /* off */
+ qca8k_write(priv, QCA8K_REG_PORT_STATUS(sw_port), 0);
+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(sw_port),
+ QCA8K_PORT_LOOKUP_STATE_DISABLED,
+ QCA8K_PORT_LOOKUP_STATE_DISABLED);
+ phy_write(phy, MII_BMCR, BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_RESET);
+ /* turn off the power of the phys - so that unused
+ ports do not raise links */
+ phy_modify(phy, MII_BMCR, BMCR_PDOWN, BMCR_PDOWN);
+ }
+}
+
+static void
+qca8k_phy_pkt_gen_prep(struct qca8k_priv *priv, struct phy_device *phy,
+ int pkts_num, int on)
+{
+ if (on) {
+ /* enable CRC checker and packets counters */
+ phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_CRC_AND_PKTS_COUNT, 0);
+ phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_CRC_AND_PKTS_COUNT,
+ QCA8075_MMD7_CNT_FRAME_CHK_EN | QCA8075_MMD7_CNT_SELFCLR);
+ qca8k_wait_for_phy_link_state(phy, 1);
+ /* packet number */
+ phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_PKT_GEN_PKT_NUMB, pkts_num);
+ /* pkt size - 1504 bytes + 20 bytes */
+ phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_PKT_GEN_PKT_SIZE, 1504);
+ } else { /* off */
+ /* packet number */
+ phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_PKT_GEN_PKT_NUMB, 0);
+ /* disable CRC checker and packet counter */
+ phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_CRC_AND_PKTS_COUNT, 0);
+ /* disable traffic gen */
+ phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_PKT_GEN_CTRL, 0);
+ }
+}
+
+static void
+qca8k_wait_for_phy_pkt_gen_fin(struct qca8k_priv *priv, struct phy_device *phy)
+{
+ int val;
+ /* wait for all traffic end: 4096(pkt num)*1524(size)*8ns(125MHz)=49938us */
+ phy_read_mmd_poll_timeout(phy, MDIO_MMD_AN, QCA8075_MMD7_PKT_GEN_CTRL,
+ val, !(val & QCA8075_MMD7_PKT_GEN_INPROGR),
+ 50000, 1000000, true);
+}
+
+static void
+qca8k_start_phy_pkt_gen(struct phy_device *phy)
+{
+ /* start traffic gen */
+ phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_PKT_GEN_CTRL,
+ QCA8075_MMD7_PKT_GEN_START | QCA8075_MMD7_PKT_GEN_INPROGR);
+}
+
+static int
+qca8k_start_all_phys_pkt_gens(struct qca8k_priv *priv)
+{
+ struct phy_device *phy;
+ phy = phy_device_create(priv->bus, QCA8075_MDIO_BRDCST_PHY_ADDR,
+ 0, 0, NULL);
+ if (!phy) {
+ dev_err(priv->dev, "unable to create mdio broadcast PHY(0x%x)\n",
+ QCA8075_MDIO_BRDCST_PHY_ADDR);
+ return -ENODEV;
+ }
+
+ qca8k_start_phy_pkt_gen(phy);
+
+ phy_device_free(phy);
+ return 0;
+}
+
+static int
+qca8k_get_phy_pkt_gen_test_result(struct phy_device *phy, int pkts_num)
+{
+ u32 tx_ok, tx_error;
+ u32 rx_ok, rx_error;
+ u32 tx_ok_high16;
+ u32 rx_ok_high16;
+ u32 tx_all_ok, rx_all_ok;
+
+ /* check counters */
+ tx_ok = phy_read_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_EG_FRAME_RECV_CNT_LO);
+ tx_ok_high16 = phy_read_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_EG_FRAME_RECV_CNT_HI);
+ tx_error = phy_read_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_EG_FRAME_ERR_CNT);
+ rx_ok = phy_read_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_IG_FRAME_RECV_CNT_LO);
+ rx_ok_high16 = phy_read_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_IG_FRAME_RECV_CNT_HI);
+ rx_error = phy_read_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_IG_FRAME_ERR_CNT);
+ tx_all_ok = tx_ok + (tx_ok_high16 << 16);
+ rx_all_ok = rx_ok + (rx_ok_high16 << 16);
+
+ if (tx_all_ok < pkts_num)
+ return -1;
+ if(rx_all_ok < pkts_num)
+ return -2;
+ if(tx_error)
+ return -3;
+ if(rx_error)
+ return -4;
+ return 0; /* test is ok */
+}
+
+static
+void qca8k_phy_broadcast_write_on_off(struct qca8k_priv *priv,
+ struct phy_device *phy, int on)
+{
+ u32 val;
+
+ val = phy_read_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_MDIO_BRDCST_WRITE);
+
+ if (on == 0)
+ val &= ~QCA8075_MMD7_MDIO_BRDCST_WRITE_EN;
+ else
+ val |= QCA8075_MMD7_MDIO_BRDCST_WRITE_EN;
+
+ phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_MDIO_BRDCST_WRITE, val);
+}
+
+static int
+qca8k_test_dsa_port_for_errors(struct qca8k_priv *priv, struct phy_device *phy,
+ int port, int test_phase)
+{
+ int res = 0;
+ const int test_pkts_num = QCA8075_PKT_GEN_PKTS_COUNT;
+
+ if (test_phase == 1) { /* start test preps */
+ qca8k_phy_loopback_on_off(priv, phy, port, 1);
+ qca8k_switch_port_loopback_on_off(priv, port, 1);
+ qca8k_phy_broadcast_write_on_off(priv, phy, 1);
+ qca8k_phy_pkt_gen_prep(priv, phy, test_pkts_num, 1);
+ } else if (test_phase == 2) {
+ /* wait for test results, collect it and cleanup */
+ qca8k_wait_for_phy_pkt_gen_fin(priv, phy);
+ res = qca8k_get_phy_pkt_gen_test_result(phy, test_pkts_num);
+ qca8k_phy_pkt_gen_prep(priv, phy, test_pkts_num, 0);
+ qca8k_phy_broadcast_write_on_off(priv, phy, 0);
+ qca8k_switch_port_loopback_on_off(priv, port, 0);
+ qca8k_phy_loopback_on_off(priv, phy, port, 0);
+ }
+
+ return res;
+}
+
+static int
+qca8k_do_dsa_sw_ports_self_test(struct qca8k_priv *priv, int parallel_test)
+{
+ struct device_node *dn = priv->dev->of_node;
+ struct device_node *ports, *port;
+ struct device_node *phy_dn;
+ struct phy_device *phy;
+ int reg, err = 0, test_phase;
+ u32 tests_result = 0;
+
+ ports = of_get_child_by_name(dn, "ports");
+ if (!ports) {
+ dev_err(priv->dev, "no ports child node found\n");
+ return -EINVAL;
+ }
+
+ for (test_phase = 1; test_phase <= 2; test_phase++) {
+ if (parallel_test && test_phase == 2) {
+ err = qca8k_start_all_phys_pkt_gens(priv);
+ if (err)
+ goto error;
+ }
+ for_each_available_child_of_node(ports, port) {
+ err = of_property_read_u32(port, "reg", ®);
+ if (err)
+ goto error;
+ if (reg >= QCA8K_NUM_PORTS) {
+ err = -EINVAL;
+ goto error;
+ }
+ phy_dn = of_parse_phandle(port, "phy-handle", 0);
+ if (phy_dn) {
+ phy = of_phy_find_device(phy_dn);
+ of_node_put(phy_dn);
+ if (phy) {
+ int result;
+ result = qca8k_test_dsa_port_for_errors(priv,
+ phy, reg, test_phase);
+ if (!parallel_test && test_phase == 1)
+ qca8k_start_phy_pkt_gen(phy);
+ put_device(&phy->mdio.dev);
+ if (test_phase == 2) {
+ tests_result <<= 1;
+ if (result)
+ tests_result |= 1;
+ }
+ }
+ }
+ }
+ }
+
+end:
+ of_node_put(ports);
+ qca8k_fdb_flush(priv);
+ return tests_result;
+error:
+ tests_result |= 0xf000;
+ goto end;
+}
+
+static int
+psgmii_vco_calibrate_and_test(struct dsa_switch *ds)
+{
+ int ret, a, test_result;
+ struct qca8k_priv *priv = ds->priv;
+
+ for (a = 0; a <= QCA8K_PSGMII_CALB_NUM; a++) {
+ ret = psgmii_vco_calibrate(priv);
+ if (ret)
+ return ret;
+ /* first we run serial test */
+ test_result = qca8k_do_dsa_sw_ports_self_test(priv, 0);
+ /* and if it is ok then we run the test in parallel */
+ if (!test_result)
+ test_result = qca8k_do_dsa_sw_ports_self_test(priv, 1);
+ if (!test_result) {
+ if (a > 0) {
+ dev_warn(priv->dev, "PSGMII work was stabilized after %d "
+ "calibration retries !\n", a);
+ }
+ return 0;
+ } else {
+ schedule();
+ if (a > 0 && a % 10 == 0) {
+ dev_err(priv->dev, "PSGMII work is unstable !!! "
+ "Let's try to wait a bit ... %d\n", a);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(a * 100));
+ }
+ }
+ }
+
+ panic("PSGMII work is unstable !!! "
+ "Repeated recalibration attempts did not help(0x%x) !\n",
+ test_result);
+
+ return -EFAULT;
+}
+
+static int
+ipq4019_psgmii_configure(struct dsa_switch *ds)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int ret;
+
+ if (!priv->psgmii_calibrated) {
+ ret = psgmii_vco_calibrate_and_test(ds);
+
+ ret = regmap_clear_bits(priv->psgmii, PSGMIIPHY_MODE_CONTROL,
+ PSGMIIPHY_MODE_ATHR_CSCO_MODE_25M);
+ ret = regmap_write(priv->psgmii, PSGMIIPHY_TX_CONTROL,
+ PSGMIIPHY_TX_CONTROL_MAGIC_VALUE);
+
+ priv->psgmii_calibrated = true;
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ switch (port) {
+ case 0:
+ /* CPU port, no configuration needed */
+ return;
+ case 1:
+ case 2:
+ case 3:
+ if (state->interface == PHY_INTERFACE_MODE_PSGMII)
+ if (ipq4019_psgmii_configure(ds))
+ dev_err(ds->dev, "PSGMII configuration failed!\n");
+ return;
+ case 4:
+ case 5:
+ if (state->interface == PHY_INTERFACE_MODE_RGMII ||
+ state->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ state->interface == PHY_INTERFACE_MODE_RGMII_RXID ||
+ state->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
+ qca8k_reg_set(priv, QCA8K_REG_RGMII_CTRL, QCA8K_RGMII_CTRL_CLK);
+ }
+
+ if (state->interface == PHY_INTERFACE_MODE_PSGMII)
+ if (ipq4019_psgmii_configure(ds))
+ dev_err(ds->dev, "PSGMII configuration failed!\n");
+ return;
+ default:
+ dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
+ return;
+ }
+}
+
+static void
+qca8k_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ switch (port) {
+ case 0: /* CPU port */
+ if (state->interface != PHY_INTERFACE_MODE_INTERNAL)
+ goto unsupported;
+ break;
+ case 1:
+ case 2:
+ case 3:
+ /* Only PSGMII mode is supported */
+ if (state->interface != PHY_INTERFACE_MODE_PSGMII)
+ goto unsupported;
+ break;
+ case 4:
+ case 5:
+ /* PSGMII and RGMII modes are supported */
+ if (state->interface != PHY_INTERFACE_MODE_PSGMII &&
+ state->interface != PHY_INTERFACE_MODE_RGMII &&
+ state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
+ state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
+ state->interface != PHY_INTERFACE_MODE_RGMII_TXID)
+ goto unsupported;
+ break;
+ default:
+unsupported:
+ dev_warn(ds->dev, "interface '%s' (%d) on port %d is not supported\n",
+ phy_modes(state->interface), state->interface, port);
+ linkmode_zero(supported);
+ return;
+ }
+
+ if (port == 0) {
+ phylink_set_port_modes(mask);
+
+ phylink_set(mask, 1000baseT_Full);
+
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
+ } else {
+ /* Simply copy what PHYs tell us */
+ linkmode_copy(state->advertising, supported);
+ }
+}
+
+static int
+qca8k_phylink_mac_link_state(struct dsa_switch *ds, int port,
+ struct phylink_link_state *state)
+{
+ struct qca8k_priv *priv = ds->priv;
+ u32 reg;
+ int ret;
+
+ ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), ®);
+ if (ret < 0)
+ return ret;
+
+ state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
+ state->an_complete = state->link;
+ state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO);
+ state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
+ DUPLEX_HALF;
+
+ switch (reg & QCA8K_PORT_STATUS_SPEED) {
+ case QCA8K_PORT_STATUS_SPEED_10:
+ state->speed = SPEED_10;
+ break;
+ case QCA8K_PORT_STATUS_SPEED_100:
+ state->speed = SPEED_100;
+ break;
+ case QCA8K_PORT_STATUS_SPEED_1000:
+ state->speed = SPEED_1000;
+ break;
+ default:
+ state->speed = SPEED_UNKNOWN;
+ break;
+ }
+
+ state->pause = MLO_PAUSE_NONE;
+ if (reg & QCA8K_PORT_STATUS_RXFLOW)
+ state->pause |= MLO_PAUSE_RX;
+ if (reg & QCA8K_PORT_STATUS_TXFLOW)
+ state->pause |= MLO_PAUSE_TX;
+
+ return 1;
+}
+
+static void
+qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ qca8k_port_set_status(priv, port, 0);
+}
+
+static void
+qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
+ phy_interface_t interface, struct phy_device *phydev,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
+{
+ struct qca8k_priv *priv = ds->priv;
+ u32 reg;
+
+ if (phylink_autoneg_inband(mode)) {
+ reg = QCA8K_PORT_STATUS_LINK_AUTO;
+ } else {
+ switch (speed) {
+ case SPEED_10:
+ reg = QCA8K_PORT_STATUS_SPEED_10;
+ break;
+ case SPEED_100:
+ reg = QCA8K_PORT_STATUS_SPEED_100;
+ break;
+ case SPEED_1000:
+ reg = QCA8K_PORT_STATUS_SPEED_1000;
+ break;
+ default:
+ reg = QCA8K_PORT_STATUS_LINK_AUTO;
+ break;
+ }
+
+ if (duplex == DUPLEX_FULL)
+ reg |= QCA8K_PORT_STATUS_DUPLEX;
+
+ if (rx_pause || dsa_is_cpu_port(ds, port))
+ reg |= QCA8K_PORT_STATUS_RXFLOW;
+
+ if (tx_pause || dsa_is_cpu_port(ds, port))
+ reg |= QCA8K_PORT_STATUS_TXFLOW;
+ }
+
+ reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
+
+ qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
+}
+
+static void
+qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
+{
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++)
+ strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
+ ETH_GSTRING_LEN);
+}
+
+static void
+qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ const struct qca8k_mib_desc *mib;
+ u32 reg, i, val;
+ u32 hi = 0;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++) {
+ mib = &ar8327_mib[i];
+ reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
+
+ ret = qca8k_read(priv, reg, &val);
+ if (ret < 0)
+ continue;
+
+ if (mib->size == 2) {
+ ret = qca8k_read(priv, reg + 4, &hi);
+ if (ret < 0)
+ continue;
+ }
+
+ data[i] = val;
+ if (mib->size == 2)
+ data[i] |= (u64)hi << 32;
+ }
+}
+
+static int
+qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ return ARRAY_SIZE(ar8327_mib);
+}
+
+static int
+qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
+ u32 reg;
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+ ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, ®);
+ if (ret < 0)
+ goto exit;
+
+ if (eee->eee_enabled)
+ reg |= lpi_en;
+ else
+ reg &= ~lpi_en;
+ ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
+
+exit:
+ mutex_unlock(&priv->reg_mutex);
+ return ret;
+}
+
+static int
+qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
+{
+ /* Nothing to do on the port's MAC */
+ return 0;
+}
+
+static void
+qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ u32 stp_state;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
+ break;
+ case BR_STATE_BLOCKING:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
+ break;
+ case BR_STATE_LISTENING:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
+ break;
+ case BR_STATE_LEARNING:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ default:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
+ break;
+ }
+
+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
+}
+
+static int
+qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ int port_mask, cpu_port;
+ int i, ret;
+
+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+ port_mask = BIT(cpu_port);
+
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ continue;
+ if (dsa_to_port(ds, i)->bridge_dev != br)
+ continue;
+ /* Add this port to the portvlan mask of the other ports
+ * in the bridge
+ */
+ ret = qca8k_reg_set(priv,
+ QCA8K_PORT_LOOKUP_CTRL(i),
+ BIT(port));
+ if (ret)
+ return ret;
+ if (i != port)
+ port_mask |= BIT(i);
+ }
+
+ /* Add all other ports to this ports portvlan mask */
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_MEMBER, port_mask);
+
+ return ret;
+}
+
+static void
+qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ int cpu_port, i;
+
+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ continue;
+ if (dsa_to_port(ds, i)->bridge_dev != br)
+ continue;
+ /* Remove this port to the portvlan mask of the other ports
+ * in the bridge
+ */
+ qca8k_reg_clear(priv,
+ QCA8K_PORT_LOOKUP_CTRL(i),
+ BIT(port));
+ }
+
+ /* Set the cpu port to be the only one in the portvlan mask of
+ * this port
+ */
+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
+}
+
+void qca8k_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
+ mutex_unlock(&priv->reg_mutex);
+}
+
+int qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
+{
+ struct qca8k_priv *priv = ds->priv;
+ unsigned int secs = msecs / 1000;
+ u32 val;
+
+ /* AGE_TIME reg is set in 7s step */
+ val = secs / 7;
+
+ /* Handle case with 0 as val to NOT disable
+ * learning
+ */
+ if (!val)
+ val = 1;
+
+ return qca8k_rmw(priv, QCA8K_REG_ATU_CTRL,
+ QCA8K_ATU_AGE_TIME_MASK,
+ QCA8K_ATU_AGE_TIME(val));
+}
+
+static int
+qca8k_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+
+ qca8k_port_set_status(priv, port, 1);
+ priv->port_sts[port].enabled = 1;
+
+ if (dsa_is_user_port(ds, port))
+ phy_support_asym_pause(phy);
+
+ return 0;
+}
+
+static void
+qca8k_port_disable(struct dsa_switch *ds, int port)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+
+ qca8k_port_set_status(priv, port, 0);
+ priv->port_sts[port].enabled = 0;
+}
+
+static int
+qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int i, mtu = 0;
+
+ priv->port_mtu[port] = new_mtu;
+
+ for (i = 0; i < QCA8K_NUM_PORTS; i++)
+ if (priv->port_mtu[i] > mtu)
+ mtu = priv->port_mtu[i];
+
+ /* Include L2 header / FCS length */
+ return qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, mtu + ETH_HLEN + ETH_FCS_LEN);
+}
+
+static int
+qca8k_port_max_mtu(struct dsa_switch *ds, int port)
+{
+ return QCA8K_MAX_MTU;
+}
+
+static int
+qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
+ u16 port_mask, u16 vid)
+{
+ /* Set the vid to the port vlan id if no vid is set */
+ if (!vid)
+ vid = QCA8K_PORT_VID_DEF;
+
+ return qca8k_fdb_add(priv, addr, port_mask, vid,
+ QCA8K_ATU_STATUS_STATIC);
+}
+
+static int
+qca8k_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ u16 port_mask = BIT(port);
+
+ return qca8k_port_fdb_insert(priv, addr, port_mask, vid);
+}
+
+static int
+qca8k_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ u16 port_mask = BIT(port);
+
+ if (!vid)
+ vid = QCA8K_PORT_VID_DEF;
+
+ return qca8k_fdb_del(priv, addr, port_mask, vid);
+}
+
+static int
+qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ struct qca8k_fdb _fdb = { 0 };
+ int cnt = QCA8K_NUM_FDB_RECORDS;
+ bool is_static;
+ int ret = 0;
+
+ mutex_lock(&priv->reg_mutex);
+ while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
+ if (!_fdb.aging)
+ break;
+ is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC);
+ ret = cb(_fdb.mac, _fdb.vid, is_static, data);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&priv->reg_mutex);
+
+ return 0;
+}
+
+static int
+qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
+ struct netlink_ext_ack *extack)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ if (vlan_filtering) {
+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_VLAN_MODE,
+ QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
+ } else {
+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_VLAN_MODE,
+ QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
+ }
+
+ return 0;
+}
+
+static int
+qca8k_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct qca8k_priv *priv = ds->priv;
+ int ret = 0;
+
+ ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
+ if (ret) {
+ dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
+ return ret;
+ }
+
+ if (pvid) {
+ int shift = 16 * (port % 2);
+
+ qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
+ 0xfff << shift, vlan->vid << shift);
+ qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
+ QCA8K_PORT_VLAN_CVID(vlan->vid) |
+ QCA8K_PORT_VLAN_SVID(vlan->vid));
+ }
+ return 0;
+}
+
+static int
+qca8k_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int ret = 0;
+
+ ret = qca8k_vlan_del(priv, port, vlan->vid);
+ if (ret)
+ dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
+
+ return ret;
+}
+
+static enum dsa_tag_protocol
+qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol mp)
+{
+ return DSA_TAG_PROTO_IPQ4019;
+}
+
+static const struct dsa_switch_ops qca8k_switch_ops = {
+ .get_tag_protocol = qca8k_get_tag_protocol,
+ .setup = qca8k_setup,
+ .get_strings = qca8k_get_strings,
+ .get_ethtool_stats = qca8k_get_ethtool_stats,
+ .get_sset_count = qca8k_get_sset_count,
+ .set_ageing_time = qca8k_set_ageing_time,
+ .get_mac_eee = qca8k_get_mac_eee,
+ .set_mac_eee = qca8k_set_mac_eee,
+ .port_enable = qca8k_port_enable,
+ .port_disable = qca8k_port_disable,
+ .port_change_mtu = qca8k_port_change_mtu,
+ .port_max_mtu = qca8k_port_max_mtu,
+ .port_stp_state_set = qca8k_port_stp_state_set,
+ .port_bridge_join = qca8k_port_bridge_join,
+ .port_bridge_leave = qca8k_port_bridge_leave,
+ .port_fast_age = qca8k_port_fast_age,
+ .port_fdb_add = qca8k_port_fdb_add,
+ .port_fdb_del = qca8k_port_fdb_del,
+ .port_fdb_dump = qca8k_port_fdb_dump,
+ .port_vlan_filtering = qca8k_port_vlan_filtering,
+ .port_vlan_add = qca8k_port_vlan_add,
+ .port_vlan_del = qca8k_port_vlan_del,
+ .phylink_validate = qca8k_phylink_validate,
+ .phylink_mac_link_state = qca8k_phylink_mac_link_state,
+ .phylink_mac_config = qca8k_phylink_mac_config,
+ .phylink_mac_link_down = qca8k_phylink_mac_link_down,
+ .phylink_mac_link_up = qca8k_phylink_mac_link_up,
+};
+
+static int
+qca8k_ipq4019_probe(struct platform_device *pdev)
+{
+ struct qca8k_priv *priv;
+ void __iomem *base, *psgmii;
+ struct device_node *np = pdev->dev.of_node, *mdio_np, *psgmii_ethphy_np;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = &pdev->dev;
+
+ base = devm_platform_ioremap_resource_byname(pdev, "base");
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ priv->regmap = devm_regmap_init_mmio(priv->dev, base,
+ &qca8k_ipq4019_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ ret = PTR_ERR(priv->regmap);
+ dev_err(priv->dev, "base regmap initialization failed, %d\n", ret);
+ return ret;
+ }
+
+ psgmii = devm_platform_ioremap_resource_byname(pdev, "psgmii_phy");
+ if (IS_ERR(psgmii))
+ return PTR_ERR(psgmii);
+
+ priv->psgmii = devm_regmap_init_mmio(priv->dev, psgmii,
+ &qca8k_ipq4019_psgmii_phy_regmap_config);
+ if (IS_ERR(priv->psgmii)) {
+ ret = PTR_ERR(priv->psgmii);
+ dev_err(priv->dev, "PSGMII regmap initialization failed, %d\n", ret);
+ return ret;
+ }
+
+ mdio_np = of_parse_phandle(np, "mdio", 0);
+ if (!mdio_np) {
+ dev_err(&pdev->dev, "unable to get MDIO bus phandle\n");
+ of_node_put(mdio_np);
+ return -EINVAL;
+ }
+
+ priv->bus = of_mdio_find_bus(mdio_np);
+ of_node_put(mdio_np);
+ if (!priv->bus) {
+ dev_err(&pdev->dev, "unable to find MDIO bus\n");
+ return -EPROBE_DEFER;
+ }
+
+ psgmii_ethphy_np = of_parse_phandle(np, "psgmii-ethphy", 0);
+ if (!psgmii_ethphy_np) {
+ dev_dbg(&pdev->dev, "unable to get PSGMII eth PHY phandle\n");
+ of_node_put(psgmii_ethphy_np);
+ }
+
+ if (psgmii_ethphy_np) {
+ priv->psgmii_ethphy = of_phy_find_device(psgmii_ethphy_np);
+ of_node_put(psgmii_ethphy_np);
+ if (!priv->psgmii_ethphy) {
+ dev_err(&pdev->dev, "unable to get PSGMII eth PHY\n");
+ return -ENODEV;
+ }
+ }
+
+ priv->ds = devm_kzalloc(priv->dev, sizeof(*priv->ds), GFP_KERNEL);
+ if (!priv->ds)
+ return -ENOMEM;
+
+ priv->ds->dev = priv->dev;
+ priv->ds->num_ports = QCA8K_NUM_PORTS;
+ priv->ds->priv = priv;
+ priv->ops = qca8k_switch_ops;
+ priv->ds->ops = &priv->ops;
+
+ mutex_init(&priv->reg_mutex);
+ platform_set_drvdata(pdev, priv);
+
+ return dsa_register_switch(priv->ds);
+}
+
+static int
+qca8k_ipq4019_remove(struct platform_device *pdev)
+{
+ struct qca8k_priv *priv = dev_get_drvdata(&pdev->dev);
+ int i;
+
+ if (!priv)
+ return 0;
+
+ for (i = 0; i < QCA8K_NUM_PORTS; i++)
+ qca8k_port_set_status(priv, i, 0);
+
+ dsa_unregister_switch(priv->ds);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id qca8k_ipq4019_of_match[] = {
+ { .compatible = "qca,ipq4019-qca8337n" },
+ { /* sentinel */ },
+};
+
+static struct platform_driver qca8k_ipq4019_driver = {
+ .probe = qca8k_ipq4019_probe,
+ .remove = qca8k_ipq4019_remove,
+ .driver = {
+ .name = "qca8k-ipq4019",
+ .of_match_table = qca8k_ipq4019_of_match,
+ },
+};
+
+module_platform_driver(qca8k_ipq4019_driver);
+
+MODULE_AUTHOR("Mathieu Olivari, John Crispin <john@phrozen.org>");
+MODULE_AUTHOR("Gabor Juhos <j4g8y7@gmail.com>, Robert Marko <robert.marko@sartura.hr>");
+MODULE_DESCRIPTION("Qualcomm IPQ4019 built-in switch driver");
+MODULE_LICENSE("GPL v2");
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __QCA8K_H
+#define __QCA8K_H
+
+#include <linux/regmap.h>
+
+#define QCA8K_NUM_PORTS 6
+#define QCA8K_CPU_PORT 0
+#define QCA8K_MAX_MTU 9000
+
+#define QCA8K_BUSY_WAIT_TIMEOUT 2000
+
+#define QCA8K_NUM_FDB_RECORDS 2048
+
+#define QCA8K_PORT_VID_DEF 1
+
+/* Global control registers */
+#define QCA8K_REG_MASK_CTRL 0x000
+#define QCA8K_MASK_CTRL_REV_ID_MASK GENMASK(7, 0)
+#define QCA8K_MASK_CTRL_REV_ID(x) ((x) >> 0)
+#define QCA8K_MASK_CTRL_DEVICE_ID_MASK GENMASK(15, 8)
+#define QCA8K_MASK_CTRL_DEVICE_ID(x) ((x) >> 8)
+#define QCA8K_REG_RGMII_CTRL 0x004
+#define QCA8K_RGMII_CTRL_RGMII_RXC GENMASK(1, 0)
+#define QCA8K_RGMII_CTRL_RGMII_TXC GENMASK(9, 8)
+/* Some kind of CLK selection
+ * 0: gcc_ess_dly2ns
+ * 1: gcc_ess_clk
+ */
+#define QCA8K_RGMII_CTRL_CLK BIT(10)
+#define QCA8K_RGMII_CTRL_DELAY_RMII0 GENMASK(17, 16)
+#define QCA8K_RGMII_CTRL_INVERT_RMII0_REF_CLK BIT(18)
+#define QCA8K_RGMII_CTRL_DELAY_RMII1 GENMASK(20, 19)
+#define QCA8K_RGMII_CTRL_INVERT_RMII1_REF_CLK BIT(21)
+#define QCA8K_RGMII_CTRL_INVERT_RMII0_MASTER_EN BIT(24)
+#define QCA8K_RGMII_CTRL_INVERT_RMII1_MASTER_EN BIT(25)
+#define QCA8K_REG_MODULE_EN 0x030
+#define QCA8K_MODULE_EN_MIB BIT(0)
+#define QCA8K_REG_MIB 0x034
+#define QCA8K_MIB_FLUSH BIT(24)
+#define QCA8K_MIB_CPU_KEEP BIT(20)
+#define QCA8K_MIB_BUSY BIT(17)
+#define QCA8K_GOL_MAC_ADDR0 0x60
+#define QCA8K_GOL_MAC_ADDR1 0x64
+#define QCA8K_MAX_FRAME_SIZE 0x78
+#define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4)
+#define QCA8K_PORT_STATUS_SPEED GENMASK(1, 0)
+#define QCA8K_PORT_STATUS_SPEED_10 0
+#define QCA8K_PORT_STATUS_SPEED_100 0x1
+#define QCA8K_PORT_STATUS_SPEED_1000 0x2
+#define QCA8K_PORT_STATUS_TXMAC BIT(2)
+#define QCA8K_PORT_STATUS_RXMAC BIT(3)
+#define QCA8K_PORT_STATUS_TXFLOW BIT(4)
+#define QCA8K_PORT_STATUS_RXFLOW BIT(5)
+#define QCA8K_PORT_STATUS_DUPLEX BIT(6)
+#define QCA8K_PORT_STATUS_LINK_UP BIT(8)
+#define QCA8K_PORT_STATUS_LINK_AUTO BIT(9)
+#define QCA8K_PORT_STATUS_LINK_PAUSE BIT(10)
+#define QCA8K_PORT_STATUS_FLOW_AUTO BIT(12)
+#define QCA8K_REG_PORT_HDR_CTRL(_i) (0x9c + (_i * 4))
+#define QCA8K_PORT_HDR_CTRL_RX_MASK GENMASK(3, 2)
+#define QCA8K_PORT_HDR_CTRL_RX_S 2
+#define QCA8K_PORT_HDR_CTRL_TX_MASK GENMASK(1, 0)
+#define QCA8K_PORT_HDR_CTRL_TX_S 0
+#define QCA8K_PORT_HDR_CTRL_ALL 2
+#define QCA8K_PORT_HDR_CTRL_MGMT 1
+#define QCA8K_PORT_HDR_CTRL_NONE 0
+#define QCA8K_REG_SGMII_CTRL 0x0e0
+#define QCA8K_SGMII_EN_PLL BIT(1)
+#define QCA8K_SGMII_EN_RX BIT(2)
+#define QCA8K_SGMII_EN_TX BIT(3)
+#define QCA8K_SGMII_EN_SD BIT(4)
+#define QCA8K_SGMII_CLK125M_DELAY BIT(7)
+#define QCA8K_SGMII_MODE_CTRL_MASK (BIT(22) | BIT(23))
+#define QCA8K_SGMII_MODE_CTRL_BASEX (0 << 22)
+#define QCA8K_SGMII_MODE_CTRL_PHY (1 << 22)
+#define QCA8K_SGMII_MODE_CTRL_MAC (2 << 22)
+
+/* EEE control registers */
+#define QCA8K_REG_EEE_CTRL 0x100
+#define QCA8K_REG_EEE_CTRL_LPI_EN(_i) ((_i + 1) * 2)
+
+/* ACL registers */
+#define QCA8K_REG_PORT_VLAN_CTRL0(_i) (0x420 + (_i * 8))
+#define QCA8K_PORT_VLAN_CVID(x) (x << 16)
+#define QCA8K_PORT_VLAN_SVID(x) x
+#define QCA8K_REG_PORT_VLAN_CTRL1(_i) (0x424 + (_i * 8))
+#define QCA8K_REG_IPV4_PRI_BASE_ADDR 0x470
+#define QCA8K_REG_IPV4_PRI_ADDR_MASK 0x474
+
+/* Lookup registers */
+#define QCA8K_REG_ATU_DATA0 0x600
+#define QCA8K_ATU_ADDR2_S 24
+#define QCA8K_ATU_ADDR3_S 16
+#define QCA8K_ATU_ADDR4_S 8
+#define QCA8K_REG_ATU_DATA1 0x604
+#define QCA8K_ATU_PORT_M 0x7f
+#define QCA8K_ATU_PORT_S 16
+#define QCA8K_ATU_ADDR0_S 8
+#define QCA8K_REG_ATU_DATA2 0x608
+#define QCA8K_ATU_VID_M 0xfff
+#define QCA8K_ATU_VID_S 8
+#define QCA8K_ATU_STATUS_M 0xf
+#define QCA8K_ATU_STATUS_STATIC 0xf
+#define QCA8K_REG_ATU_FUNC 0x60c
+#define QCA8K_ATU_FUNC_BUSY BIT(31)
+#define QCA8K_ATU_FUNC_PORT_EN BIT(14)
+#define QCA8K_ATU_FUNC_MULTI_EN BIT(13)
+#define QCA8K_ATU_FUNC_FULL BIT(12)
+#define QCA8K_ATU_FUNC_PORT_M 0xf
+#define QCA8K_ATU_FUNC_PORT_S 8
+#define QCA8K_REG_VTU_FUNC0 0x610
+#define QCA8K_VTU_FUNC0_VALID BIT(20)
+#define QCA8K_VTU_FUNC0_IVL_EN BIT(19)
+#define QCA8K_VTU_FUNC0_EG_MODE_S(_i) (4 + (_i) * 2)
+#define QCA8K_VTU_FUNC0_EG_MODE_MASK 3
+#define QCA8K_VTU_FUNC0_EG_MODE_UNMOD 0
+#define QCA8K_VTU_FUNC0_EG_MODE_UNTAG 1
+#define QCA8K_VTU_FUNC0_EG_MODE_TAG 2
+#define QCA8K_VTU_FUNC0_EG_MODE_NOT 3
+#define QCA8K_REG_VTU_FUNC1 0x614
+#define QCA8K_VTU_FUNC1_BUSY BIT(31)
+#define QCA8K_VTU_FUNC1_VID_S 16
+#define QCA8K_VTU_FUNC1_FULL BIT(4)
+#define QCA8K_REG_ATU_CTRL 0x618
+#define QCA8K_ATU_AGE_TIME_MASK GENMASK(15, 0)
+#define QCA8K_ATU_AGE_TIME(x) FIELD_PREP(QCA8K_ATU_AGE_TIME_MASK, (x))
+#define QCA8K_REG_GLOBAL_FW_CTRL0 0x620
+#define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10)
+#define QCA8K_REG_GLOBAL_FW_CTRL1 0x624
+#define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S 24
+#define QCA8K_GLOBAL_FW_CTRL1_BC_DP_S 16
+#define QCA8K_GLOBAL_FW_CTRL1_MC_DP_S 8
+#define QCA8K_GLOBAL_FW_CTRL1_UC_DP_S 0
+#define QCA8K_PORT_LOOKUP_CTRL(_i) (0x660 + (_i) * 0xc)
+#define QCA8K_PORT_LOOKUP_MEMBER GENMASK(6, 0)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE GENMASK(9, 8)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE_NONE (0 << 8)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE_FALLBACK (1 << 8)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE_CHECK (2 << 8)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE (3 << 8)
+#define QCA8K_PORT_LOOKUP_STATE_MASK GENMASK(18, 16)
+#define QCA8K_PORT_LOOKUP_STATE_DISABLED (0 << 16)
+#define QCA8K_PORT_LOOKUP_STATE_BLOCKING (1 << 16)
+#define QCA8K_PORT_LOOKUP_STATE_LISTENING (2 << 16)
+#define QCA8K_PORT_LOOKUP_STATE_LEARNING (3 << 16)
+#define QCA8K_PORT_LOOKUP_STATE_FORWARD (4 << 16)
+#define QCA8K_PORT_LOOKUP_STATE GENMASK(18, 16)
+#define QCA8K_PORT_LOOKUP_LEARN BIT(20)
+#define QCA8K_PORT_LOOKUP_LOOPBACK BIT(21)
+
+#define QCA8K_REG_GLOBAL_FC_THRESH 0x800
+#define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) ((x) << 16)
+#define QCA8K_GLOBAL_FC_GOL_XON_THRES_S GENMASK(24, 16)
+#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES(x) ((x) << 0)
+#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES_S GENMASK(8, 0)
+
+#define QCA8K_REG_PORT_HOL_CTRL0(_i) (0x970 + (_i) * 0x8)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF GENMASK(3, 0)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI0(x) ((x) << 0)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF GENMASK(7, 4)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI1(x) ((x) << 4)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF GENMASK(11, 8)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI2(x) ((x) << 8)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF GENMASK(15, 12)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI3(x) ((x) << 12)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF GENMASK(19, 16)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI4(x) ((x) << 16)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF GENMASK(23, 20)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI5(x) ((x) << 20)
+#define QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF GENMASK(29, 24)
+#define QCA8K_PORT_HOL_CTRL0_EG_PORT(x) ((x) << 24)
+
+#define QCA8K_REG_PORT_HOL_CTRL1(_i) (0x974 + (_i) * 0x8)
+#define QCA8K_PORT_HOL_CTRL1_ING_BUF GENMASK(3, 0)
+#define QCA8K_PORT_HOL_CTRL1_ING(x) ((x) << 0)
+#define QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN BIT(6)
+#define QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN BIT(7)
+#define QCA8K_PORT_HOL_CTRL1_WRED_EN BIT(8)
+#define QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN BIT(16)
+
+/* Pkt edit registers */
+#define QCA8K_EGRESS_VLAN(x) (0x0c70 + (4 * (x / 2)))
+
+/* L3 registers */
+#define QCA8K_HROUTER_CONTROL 0xe00
+#define QCA8K_HROUTER_CONTROL_GLB_LOCKTIME_M GENMASK(17, 16)
+#define QCA8K_HROUTER_CONTROL_GLB_LOCKTIME_S 16
+#define QCA8K_HROUTER_CONTROL_ARP_AGE_MODE 1
+#define QCA8K_HROUTER_PBASED_CONTROL1 0xe08
+#define QCA8K_HROUTER_PBASED_CONTROL2 0xe0c
+#define QCA8K_HNAT_CONTROL 0xe38
+
+/* MIB registers */
+#define QCA8K_PORT_MIB_COUNTER(_i) (0x1000 + (_i) * 0x100)
+
+/* IPQ4019 PSGMII PHY registers */
+#define PSGMIIPHY_MODE_CONTROL 0x1b4
+#define PSGMIIPHY_MODE_ATHR_CSCO_MODE_25M BIT(0)
+#define PSGMIIPHY_TX_CONTROL 0x288
+#define PSGMIIPHY_TX_CONTROL_MAGIC_VALUE 0x8380
+#define PSGMIIPHY_VCO_CALIBRATION_CONTROL_REGISTER_1 0x9c
+#define PSGMIIPHY_REG_PLL_VCO_CALIB_RESTART BIT(14)
+#define PSGMIIPHY_VCO_CALIBRATION_CONTROL_REGISTER_2 0xa0
+#define PSGMIIPHY_REG_PLL_VCO_CALIB_READY BIT(0)
+
+#define QCA8K_PSGMII_CALB_NUM 100
+#define MII_QCA8075_SSTATUS 0x11
+#define QCA8075_PHY_SPEC_STATUS_LINK BIT(10)
+#define QCA8075_MMD7_CRC_AND_PKTS_COUNT 0x8029
+#define QCA8075_MMD7_PKT_GEN_PKT_NUMB 0x8021
+#define QCA8075_MMD7_PKT_GEN_PKT_SIZE 0x8062
+#define QCA8075_MMD7_PKT_GEN_CTRL 0x8020
+#define QCA8075_MMD7_CNT_SELFCLR BIT(1)
+#define QCA8075_MMD7_CNT_FRAME_CHK_EN BIT(0)
+#define QCA8075_MMD7_PKT_GEN_START BIT(13)
+#define QCA8075_MMD7_PKT_GEN_INPROGR BIT(15)
+#define QCA8075_MMD7_IG_FRAME_RECV_CNT_HI 0x802a
+#define QCA8075_MMD7_IG_FRAME_RECV_CNT_LO 0x802b
+#define QCA8075_MMD7_IG_FRAME_ERR_CNT 0x802c
+#define QCA8075_MMD7_EG_FRAME_RECV_CNT_HI 0x802d
+#define QCA8075_MMD7_EG_FRAME_RECV_CNT_LO 0x802e
+#define QCA8075_MMD7_EG_FRAME_ERR_CNT 0x802f
+#define QCA8075_MMD7_MDIO_BRDCST_WRITE 0x8028
+#define QCA8075_MMD7_MDIO_BRDCST_WRITE_EN BIT(15)
+#define QCA8075_MDIO_BRDCST_PHY_ADDR 0x1f
+#define QCA8075_PKT_GEN_PKTS_COUNT 4096
+
+enum {
+ QCA8K_PORT_SPEED_10M = 0,
+ QCA8K_PORT_SPEED_100M = 1,
+ QCA8K_PORT_SPEED_1000M = 2,
+ QCA8K_PORT_SPEED_ERR = 3,
+};
+
+enum qca8k_fdb_cmd {
+ QCA8K_FDB_FLUSH = 1,
+ QCA8K_FDB_LOAD = 2,
+ QCA8K_FDB_PURGE = 3,
+ QCA8K_FDB_FLUSH_PORT = 5,
+ QCA8K_FDB_NEXT = 6,
+ QCA8K_FDB_SEARCH = 7,
+};
+
+enum qca8k_vlan_cmd {
+ QCA8K_VLAN_FLUSH = 1,
+ QCA8K_VLAN_LOAD = 2,
+ QCA8K_VLAN_PURGE = 3,
+ QCA8K_VLAN_REMOVE_PORT = 4,
+ QCA8K_VLAN_NEXT = 5,
+ QCA8K_VLAN_READ = 6,
+};
+
+struct ar8xxx_port_status {
+ int enabled;
+};
+
+struct qca8k_priv {
+ struct regmap *regmap;
+ struct mii_bus *bus;
+ struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS];
+ struct dsa_switch *ds;
+ struct mutex reg_mutex;
+ struct device *dev;
+ struct dsa_switch_ops ops;
+ unsigned int port_mtu[QCA8K_NUM_PORTS];
+
+ /* IPQ4019 specific */
+ struct regmap *psgmii;
+ bool psgmii_calibrated;
+ struct phy_device *psgmii_ethphy;
+};
+
+struct qca8k_mib_desc {
+ unsigned int size;
+ unsigned int offset;
+ const char *name;
+};
+
+struct qca8k_fdb {
+ u16 vid;
+ u8 port_mask;
+ u8 aging;
+ u8 mac[6];
+};
+
+#endif /* __QCA8K_H */
--- /dev/null
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the IPQ ESS driver
+#
+
+obj-$(CONFIG_QCOM_IPQ4019_ESS_EDMA) += ipq_ess.o
+
+ipq_ess-objs := ipqess.o ipqess_ethtool.o
--- /dev/null
+// SPDX-License-Identifier: (GPL-2.0 OR ISC)
+/* Copyright (c) 2014 - 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017 - 2018, John Crispin <john@phrozen.org>
+ * Copyright (c) 2018 - 2019, Christian Lamparter <chunkeey@gmail.com>
+ * Copyright (c) 2020 - 2021, Gabor Juhos <j4g8y7@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/dsa/ipq4019.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/phylink.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/skbuff.h>
+#include <linux/vmalloc.h>
+#include <net/checksum.h>
+#include <net/dsa.h>
+#include <net/ip6_checksum.h>
+
+#include "ipqess.h"
+
+#define IPQESS_RRD_SIZE 16
+#define IPQESS_NEXT_IDX(X, Y) (((X) + 1) & ((Y) - 1))
+#define IPQESS_TX_DMA_BUF_LEN 0x3fff
+
+static void ipqess_w32(struct ipqess *ess, u32 reg, u32 val)
+{
+ writel(val, ess->hw_addr + reg);
+}
+
+static u32 ipqess_r32(struct ipqess *ess, u16 reg)
+{
+ return readl(ess->hw_addr + reg);
+}
+
+static void ipqess_m32(struct ipqess *ess, u32 mask, u32 val, u16 reg)
+{
+ u32 _val = ipqess_r32(ess, reg);
+ _val &= ~mask;
+ _val |= val;
+ ipqess_w32(ess, reg, _val);
+}
+
+void ipqess_update_hw_stats(struct ipqess *ess)
+{
+ uint32_t *p;
+ u32 stat;
+ int i;
+
+ lockdep_assert_held(&ess->stats_lock);
+
+ p = (uint32_t *)&(ess->ipqessstats);
+ for (i = 0; i < IPQESS_MAX_TX_QUEUE; i++) {
+ stat = ipqess_r32(ess, IPQESS_REG_TX_STAT_PKT_Q(i));
+ *p += stat;
+ p++;
+ }
+
+ for (i = 0; i < IPQESS_MAX_TX_QUEUE; i++) {
+ stat = ipqess_r32(ess, IPQESS_REG_TX_STAT_BYTE_Q(i));
+ *p += stat;
+ p++;
+ }
+
+ for (i = 0; i < IPQESS_MAX_RX_QUEUE; i++) {
+ stat = ipqess_r32(ess, IPQESS_REG_RX_STAT_PKT_Q(i));
+ *p += stat;
+ p++;
+ }
+
+ for (i = 0; i < IPQESS_MAX_RX_QUEUE; i++) {
+ stat = ipqess_r32(ess, IPQESS_REG_RX_STAT_BYTE_Q(i));
+ *p += stat;
+ p++;
+ }
+}
+
+static int ipqess_tx_ring_alloc(struct ipqess *ess)
+{
+ struct device *dev = &ess->pdev->dev;
+ int i;
+
+ for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
+ struct ipqess_tx_ring *tx_ring = &ess->tx_ring[i];
+ size_t size;
+ u32 idx;
+
+ tx_ring->ess = ess;
+ tx_ring->ring_id = i;
+ tx_ring->idx = i * 4;
+ tx_ring->count = IPQESS_TX_RING_SIZE;
+ tx_ring->nq = netdev_get_tx_queue(ess->netdev, i);
+
+ size = sizeof(struct ipqess_buf) * IPQESS_TX_RING_SIZE;
+ tx_ring->buf = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (!tx_ring->buf) {
+ netdev_err(ess->netdev, "buffer alloc of tx ring failed");
+ return -ENOMEM;
+ }
+
+ size = sizeof(struct ipqess_tx_desc) * IPQESS_TX_RING_SIZE;
+ tx_ring->hw_desc = dmam_alloc_coherent(dev, size, &tx_ring->dma,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!tx_ring->hw_desc) {
+ netdev_err(ess->netdev, "descriptor allocation for tx ring failed");
+ return -ENOMEM;
+ }
+
+ ipqess_w32(ess, IPQESS_REG_TPD_BASE_ADDR_Q(tx_ring->idx),
+ (u32)tx_ring->dma);
+
+ idx = ipqess_r32(ess, IPQESS_REG_TPD_IDX_Q(tx_ring->idx));
+ idx >>= IPQESS_TPD_CONS_IDX_SHIFT; /* need u32 here */
+ idx &= 0xffff;
+ tx_ring->head = tx_ring->tail = idx;
+
+ ipqess_m32(ess, IPQESS_TPD_PROD_IDX_MASK << IPQESS_TPD_PROD_IDX_SHIFT,
+ idx, IPQESS_REG_TPD_IDX_Q(tx_ring->idx));
+ ipqess_w32(ess, IPQESS_REG_TX_SW_CONS_IDX_Q(tx_ring->idx), idx);
+ ipqess_w32(ess, IPQESS_REG_TPD_RING_SIZE, IPQESS_TX_RING_SIZE);
+ }
+
+ return 0;
+}
+
+static int ipqess_tx_unmap_and_free(struct device *dev, struct ipqess_buf *buf)
+{
+ int len = 0;
+
+ if (buf->flags & IPQESS_DESC_SINGLE)
+ dma_unmap_single(dev, buf->dma, buf->length, DMA_TO_DEVICE);
+ else if (buf->flags & IPQESS_DESC_PAGE)
+ dma_unmap_page(dev, buf->dma, buf->length, DMA_TO_DEVICE);
+
+ if (buf->flags & IPQESS_DESC_LAST) {
+ len = buf->skb->len;
+ dev_kfree_skb_any(buf->skb);
+ }
+
+ buf->flags = 0;
+
+ return len;
+}
+
+static void ipqess_tx_ring_free(struct ipqess *ess)
+{
+ int i;
+
+ for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
+ int j;
+
+ if (ess->tx_ring[i].hw_desc)
+ continue;
+
+ for (j = 0; j < IPQESS_TX_RING_SIZE; j++) {
+ struct ipqess_buf *buf = &ess->tx_ring[i].buf[j];
+
+ ipqess_tx_unmap_and_free(&ess->pdev->dev, buf);
+ }
+
+ ess->tx_ring[i].buf = NULL;
+ }
+}
+
+static int ipqess_rx_buf_prepare(struct ipqess_buf *buf,
+ struct ipqess_rx_ring *rx_ring)
+{
+ /* Clean the HW DESC header, otherwise we might end up
+ * with a spurious desc because of random garbage */
+ memset(buf->skb->data, 0, sizeof(struct ipqess_rx_desc));
+
+ buf->dma = dma_map_single(rx_ring->ppdev, buf->skb->data,
+ IPQESS_RX_HEAD_BUFF_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->ppdev, buf->dma)) {
+ dev_err_once(rx_ring->ppdev,
+ "IPQESS DMA mapping failed for linear address %x",
+ buf->dma);
+ dev_kfree_skb_any(buf->skb);
+ buf->skb = NULL;
+ return -EFAULT;
+ }
+
+ buf->length = IPQESS_RX_HEAD_BUFF_SIZE;
+ rx_ring->hw_desc[rx_ring->head] = (struct ipqess_rx_desc *)buf->dma;
+ rx_ring->head = (rx_ring->head + 1) % IPQESS_RX_RING_SIZE;
+
+ ipqess_m32(rx_ring->ess, IPQESS_RFD_PROD_IDX_BITS,
+ (rx_ring->head + IPQESS_RX_RING_SIZE - 1) % IPQESS_RX_RING_SIZE,
+ IPQESS_REG_RFD_IDX_Q(rx_ring->idx));
+
+ return 0;
+}
+
+/* locking is handled by the caller */
+static int ipqess_rx_buf_alloc_napi(struct ipqess_rx_ring *rx_ring)
+{
+ struct ipqess_buf *buf = &rx_ring->buf[rx_ring->head];
+
+ buf->skb = napi_alloc_skb(&rx_ring->napi_rx,
+ IPQESS_RX_HEAD_BUFF_SIZE);
+ if (!buf->skb)
+ return -ENOMEM;
+
+ return ipqess_rx_buf_prepare(buf, rx_ring);
+}
+
+static int ipqess_rx_buf_alloc(struct ipqess_rx_ring *rx_ring)
+{
+ struct ipqess_buf *buf = &rx_ring->buf[rx_ring->head];
+
+ buf->skb = netdev_alloc_skb_ip_align(rx_ring->ess->netdev,
+ IPQESS_RX_HEAD_BUFF_SIZE);
+ if (!buf->skb)
+ return -ENOMEM;
+
+ return ipqess_rx_buf_prepare(buf, rx_ring);
+}
+
+static void ipqess_refill_work(struct work_struct *work)
+{
+ struct ipqess_rx_ring_refill *rx_refill = container_of(work,
+ struct ipqess_rx_ring_refill, refill_work);
+ struct ipqess_rx_ring *rx_ring = rx_refill->rx_ring;
+ int refill = 0;
+
+ /* don't let this loop by accident. */
+ while (atomic_dec_and_test(&rx_ring->refill_count)) {
+ napi_disable(&rx_ring->napi_rx);
+ if (ipqess_rx_buf_alloc(rx_ring)) {
+ refill++;
+ dev_dbg(rx_ring->ppdev,
+ "Not all buffers were reallocated");
+ }
+ napi_enable(&rx_ring->napi_rx);
+ }
+
+ if (atomic_add_return(refill, &rx_ring->refill_count))
+ schedule_work(&rx_refill->refill_work);
+}
+
+
+static int ipqess_rx_ring_alloc(struct ipqess *ess)
+{
+ int i;
+
+ for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
+ int j;
+
+ ess->rx_ring[i].ess = ess;
+ ess->rx_ring[i].ppdev = &ess->pdev->dev;
+ ess->rx_ring[i].ring_id = i;
+ ess->rx_ring[i].idx = i * 2;
+
+ ess->rx_ring[i].buf = devm_kzalloc(&ess->pdev->dev,
+ sizeof(struct ipqess_buf) * IPQESS_RX_RING_SIZE,
+ GFP_KERNEL);
+ if (!ess->rx_ring[i].buf)
+ return -ENOMEM;
+
+ ess->rx_ring[i].hw_desc = dmam_alloc_coherent(&ess->pdev->dev,
+ sizeof(struct ipqess_rx_desc) * IPQESS_RX_RING_SIZE,
+ &ess->rx_ring[i].dma, GFP_KERNEL);
+ if (!ess->rx_ring[i].hw_desc)
+ return -ENOMEM;
+
+ for (j = 0; j < IPQESS_RX_RING_SIZE; j++)
+ if (ipqess_rx_buf_alloc(&ess->rx_ring[i]) < 0)
+ return -ENOMEM;
+
+ ess->rx_refill[i].rx_ring = &ess->rx_ring[i];
+ INIT_WORK(&ess->rx_refill[i].refill_work, ipqess_refill_work);
+
+ ipqess_w32(ess, IPQESS_REG_RFD_BASE_ADDR_Q(ess->rx_ring[i].idx),
+ (u32)(ess->rx_ring[i].dma));
+ }
+
+ ipqess_w32(ess, IPQESS_REG_RX_DESC0,
+ (IPQESS_RX_HEAD_BUFF_SIZE << IPQESS_RX_BUF_SIZE_SHIFT) |
+ (IPQESS_RX_RING_SIZE << IPQESS_RFD_RING_SIZE_SHIFT));
+
+ return 0;
+}
+
+static void ipqess_rx_ring_free(struct ipqess *ess)
+{
+ int i;
+
+ for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
+ int j;
+
+ atomic_set(&ess->rx_ring[i].refill_count, 0);
+ cancel_work_sync(&ess->rx_refill[i].refill_work);
+
+ for (j = 0; j < IPQESS_RX_RING_SIZE; j++) {
+ dma_unmap_single(&ess->pdev->dev,
+ ess->rx_ring[i].buf[j].dma,
+ ess->rx_ring[i].buf[j].length,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(ess->rx_ring[i].buf[j].skb);
+ }
+ }
+}
+
+static struct net_device_stats *ipqess_get_stats(struct net_device *netdev)
+{
+ struct ipqess *ess = netdev_priv(netdev);
+
+ spin_lock(&ess->stats_lock);
+ ipqess_update_hw_stats(ess);
+ spin_unlock(&ess->stats_lock);
+
+ return &ess->stats;
+}
+
+static int ipqess_rx_poll(struct ipqess_rx_ring *rx_ring, int budget)
+{
+ u32 length = 0, num_desc, tail, rx_ring_tail;
+ int done = 0;
+
+ rx_ring_tail = rx_ring->tail;
+
+ tail = ipqess_r32(rx_ring->ess, IPQESS_REG_RFD_IDX_Q(rx_ring->idx));
+ tail >>= IPQESS_RFD_CONS_IDX_SHIFT;
+ tail &= IPQESS_RFD_CONS_IDX_MASK;
+
+ while (done < budget) {
+ struct sk_buff *skb;
+ struct ipqess_rx_desc *rd;
+
+ if (rx_ring_tail == tail)
+ break;
+
+ dma_unmap_single(rx_ring->ppdev,
+ rx_ring->buf[rx_ring_tail].dma,
+ rx_ring->buf[rx_ring_tail].length,
+ DMA_FROM_DEVICE);
+
+ skb = xchg(&rx_ring->buf[rx_ring_tail].skb, NULL);
+ rd = (struct ipqess_rx_desc *)skb->data;
+ rx_ring_tail = IPQESS_NEXT_IDX(rx_ring_tail, IPQESS_RX_RING_SIZE);
+
+ /* Check if RRD is valid */
+ if (!(rd->rrd7 & IPQESS_RRD_DESC_VALID)) {
+ num_desc = 1;
+ dev_kfree_skb_any(skb);
+ goto skip;
+ }
+
+ num_desc = rd->rrd1 & IPQESS_RRD_NUM_RFD_MASK;
+ length = rd->rrd6 & IPQESS_RRD_PKT_SIZE_MASK;
+
+ skb_reserve(skb, IPQESS_RRD_SIZE);
+ if (num_desc > 1) {
+ /* can we use build_skb here ? */
+ struct sk_buff *skb_prev = NULL;
+ int size_remaining;
+ int i;
+
+ skb->data_len = 0;
+ skb->tail += (IPQESS_RX_HEAD_BUFF_SIZE - IPQESS_RRD_SIZE);
+ skb->len = skb->truesize = length;
+ size_remaining = length - (IPQESS_RX_HEAD_BUFF_SIZE - IPQESS_RRD_SIZE);
+
+ for (i = 1; i < num_desc; i++) {
+ /* TODO: use build_skb ? */
+ struct sk_buff *skb_temp = rx_ring->buf[rx_ring_tail].skb;
+
+ dma_unmap_single(rx_ring->ppdev,
+ rx_ring->buf[rx_ring_tail].dma,
+ rx_ring->buf[rx_ring_tail].length,
+ DMA_FROM_DEVICE);
+
+ skb_put(skb_temp, min(size_remaining, IPQESS_RX_HEAD_BUFF_SIZE));
+ if (skb_prev)
+ skb_prev->next = rx_ring->buf[rx_ring_tail].skb;
+ else
+ skb_shinfo(skb)->frag_list = rx_ring->buf[rx_ring_tail].skb;
+ skb_prev = rx_ring->buf[rx_ring_tail].skb;
+ rx_ring->buf[rx_ring_tail].skb->next = NULL;
+
+ skb->data_len += rx_ring->buf[rx_ring_tail].skb->len;
+ size_remaining -= rx_ring->buf[rx_ring_tail].skb->len;
+
+ rx_ring_tail = IPQESS_NEXT_IDX(rx_ring_tail, IPQESS_RX_RING_SIZE);
+ }
+
+ } else {
+ skb_put(skb, length);
+ }
+
+ skb->dev = rx_ring->ess->netdev;
+ skb->protocol = eth_type_trans(skb, rx_ring->ess->netdev);
+ skb_record_rx_queue(skb, rx_ring->ring_id);
+
+ if (rd->rrd6 & IPQESS_RRD_CSUM_FAIL_MASK)
+ skb_checksum_none_assert(skb);
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (rd->rrd7 & IPQESS_RRD_CVLAN) {
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rd->rrd4);
+ } else if (rd->rrd1 & IPQESS_RRD_SVLAN) {
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), rd->rrd4);
+ }
+ napi_gro_receive(&rx_ring->napi_rx, skb);
+
+ /* TODO: do we need to have these here ? */
+ rx_ring->ess->stats.rx_packets++;
+ rx_ring->ess->stats.rx_bytes += length;
+
+ done++;
+skip:
+
+ num_desc += atomic_xchg(&rx_ring->refill_count, 0);
+ while (num_desc) {
+ if (ipqess_rx_buf_alloc_napi(rx_ring)) {
+ num_desc = atomic_add_return(num_desc,
+ &rx_ring->refill_count);
+ if (num_desc >= ((4 * IPQESS_RX_RING_SIZE + 6) / 7))
+ schedule_work(&rx_ring->ess->rx_refill[rx_ring->ring_id].refill_work);
+ break;
+ }
+ num_desc--;
+ }
+ }
+
+ ipqess_w32(rx_ring->ess, IPQESS_REG_RX_SW_CONS_IDX_Q(rx_ring->idx),
+ rx_ring_tail);
+ rx_ring->tail = rx_ring_tail;
+
+ return done;
+}
+
+static int ipqess_tx_complete(struct ipqess_tx_ring *tx_ring, int budget)
+{
+ u32 tail;
+ int done = 0;
+ int total = 0, ret;
+
+ tail = ipqess_r32(tx_ring->ess, IPQESS_REG_TPD_IDX_Q(tx_ring->idx));
+ tail >>= IPQESS_TPD_CONS_IDX_SHIFT;
+ tail &= IPQESS_TPD_CONS_IDX_MASK;
+
+ while ((tx_ring->tail != tail) && (done < budget)) {
+ //pr_info("freeing txq:%d tail:%d tailbuf:%p\n", tx_ring->idx, tx_ring->tail, &tx_ring->buf[tx_ring->tail]);
+ ret = ipqess_tx_unmap_and_free(&tx_ring->ess->pdev->dev,
+ &tx_ring->buf[tx_ring->tail]);
+ tx_ring->tail = IPQESS_NEXT_IDX(tx_ring->tail, tx_ring->count);
+ if (ret) {
+ total += ret;
+ done++;
+ }
+ }
+
+ ipqess_w32(tx_ring->ess,
+ IPQESS_REG_TX_SW_CONS_IDX_Q(tx_ring->idx),
+ tx_ring->tail);
+
+ if (netif_tx_queue_stopped(tx_ring->nq)) {
+ netdev_dbg(tx_ring->ess->netdev, "waking up tx queue %d\n",
+ tx_ring->idx);
+ netif_tx_wake_queue(tx_ring->nq);
+ }
+
+ netdev_tx_completed_queue(tx_ring->nq, done, total);
+
+ return done;
+}
+
+static int ipqess_tx_napi(struct napi_struct *napi, int budget)
+{
+ struct ipqess_tx_ring *tx_ring = container_of(napi, struct ipqess_tx_ring,
+ napi_tx);
+ u32 tx_status;
+ int work_done = 0;
+
+ tx_status = ipqess_r32(tx_ring->ess, IPQESS_REG_TX_ISR);
+ tx_status &= BIT(tx_ring->idx);
+
+ work_done = ipqess_tx_complete(tx_ring, budget);
+
+ ipqess_w32(tx_ring->ess, IPQESS_REG_TX_ISR, tx_status);
+
+ if (likely(work_done < budget)) {
+ if (napi_complete_done(napi, work_done))
+ ipqess_w32(tx_ring->ess,
+ IPQESS_REG_TX_INT_MASK_Q(tx_ring->idx), 0x1);
+ }
+
+ return work_done;
+}
+
+static int ipqess_rx_napi(struct napi_struct *napi, int budget)
+{
+ struct ipqess_rx_ring *rx_ring = container_of(napi, struct ipqess_rx_ring,
+ napi_rx);
+ struct ipqess *ess = rx_ring->ess;
+ int remain_budget = budget;
+ int rx_done;
+ u32 rx_mask = BIT(rx_ring->idx);
+ u32 status;
+
+poll_again:
+ ipqess_w32(ess, IPQESS_REG_RX_ISR, rx_mask);
+ rx_done = ipqess_rx_poll(rx_ring, remain_budget);
+
+ if (rx_done == remain_budget)
+ return budget;
+
+ status = ipqess_r32(ess, IPQESS_REG_RX_ISR);
+ if (status & rx_mask) {
+ remain_budget -= rx_done;
+ goto poll_again;
+ }
+
+ if (napi_complete_done(napi, rx_done + budget - remain_budget))
+ ipqess_w32(ess, IPQESS_REG_RX_INT_MASK_Q(rx_ring->idx), 0x1);
+
+ return rx_done + budget - remain_budget;
+}
+
+static irqreturn_t ipqess_interrupt_tx(int irq, void *priv)
+{
+ struct ipqess_tx_ring *tx_ring = (struct ipqess_tx_ring *) priv;
+
+ if (likely(napi_schedule_prep(&tx_ring->napi_tx))) {
+ ipqess_w32(tx_ring->ess,
+ IPQESS_REG_TX_INT_MASK_Q(tx_ring->idx),
+ 0x0);
+ __napi_schedule(&tx_ring->napi_tx);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ipqess_interrupt_rx(int irq, void *priv)
+{
+ struct ipqess_rx_ring *rx_ring = (struct ipqess_rx_ring *) priv;
+
+ if (likely(napi_schedule_prep(&rx_ring->napi_rx))) {
+ ipqess_w32(rx_ring->ess,
+ IPQESS_REG_RX_INT_MASK_Q(rx_ring->idx),
+ 0x0);
+ __napi_schedule(&rx_ring->napi_rx);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void ipqess_irq_enable(struct ipqess *ess)
+{
+ int i;
+
+ ipqess_w32(ess, IPQESS_REG_RX_ISR, 0xff);
+ ipqess_w32(ess, IPQESS_REG_TX_ISR, 0xffff);
+ for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
+ ipqess_w32(ess, IPQESS_REG_RX_INT_MASK_Q(ess->rx_ring[i].idx), 1);
+ ipqess_w32(ess, IPQESS_REG_TX_INT_MASK_Q(ess->tx_ring[i].idx), 1);
+ }
+}
+
+static void ipqess_irq_disable(struct ipqess *ess)
+{
+ int i;
+
+ for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
+ ipqess_w32(ess, IPQESS_REG_RX_INT_MASK_Q(ess->rx_ring[i].idx), 0);
+ ipqess_w32(ess, IPQESS_REG_TX_INT_MASK_Q(ess->tx_ring[i].idx), 0);
+ }
+}
+
+static int __init ipqess_init(struct net_device *netdev)
+{
+ struct ipqess *ess = netdev_priv(netdev);
+ struct device_node *of_node = ess->pdev->dev.of_node;
+ return phylink_of_phy_connect(ess->phylink, of_node, 0);
+}
+
+static void ipqess_uninit(struct net_device *netdev)
+{
+ struct ipqess *ess = netdev_priv(netdev);
+
+ phylink_disconnect_phy(ess->phylink);
+}
+
+static int ipqess_open(struct net_device *netdev)
+{
+ struct ipqess *ess = netdev_priv(netdev);
+ int i;
+
+ for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
+ napi_enable(&ess->tx_ring[i].napi_tx);
+ napi_enable(&ess->rx_ring[i].napi_rx);
+ }
+ ipqess_irq_enable(ess);
+ phylink_start(ess->phylink);
+ netif_tx_start_all_queues(netdev);
+
+ return 0;
+}
+
+static int ipqess_stop(struct net_device *netdev)
+{
+ struct ipqess *ess = netdev_priv(netdev);
+ int i;
+
+ netif_tx_stop_all_queues(netdev);
+ phylink_stop(ess->phylink);
+ ipqess_irq_disable(ess);
+ for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
+ napi_disable(&ess->tx_ring[i].napi_tx);
+ napi_disable(&ess->rx_ring[i].napi_rx);
+ }
+
+ return 0;
+}
+
+static int ipqess_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct ipqess *ess = netdev_priv(netdev);
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ return phylink_mii_ioctl(ess->phylink, ifr, cmd);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+
+static inline u16 ipqess_tx_desc_available(struct ipqess_tx_ring *tx_ring)
+{
+ u16 count = 0;
+
+ if (tx_ring->tail <= tx_ring->head)
+ count = IPQESS_TX_RING_SIZE;
+
+ count += tx_ring->tail - tx_ring->head - 1;
+
+ return count;
+}
+
+static inline int ipqess_cal_txd_req(struct sk_buff *skb)
+{
+ int tpds;
+
+ /* one TPD for the header, and one for each fragments */
+ tpds = 1 + skb_shinfo(skb)->nr_frags;
+ if (skb_is_gso(skb) && skb_is_gso_v6(skb)) {
+ /* for LSOv2 one extra TPD is needed */
+ tpds++;
+ }
+
+ return tpds;
+}
+
+static struct ipqess_buf *ipqess_get_tx_buffer(struct ipqess_tx_ring *tx_ring,
+ struct ipqess_tx_desc *desc)
+{
+ return &tx_ring->buf[desc - tx_ring->hw_desc];
+}
+
+static struct ipqess_tx_desc *ipqess_tx_desc_next(struct ipqess_tx_ring *tx_ring)
+{
+ struct ipqess_tx_desc *desc;
+
+ desc = &tx_ring->hw_desc[tx_ring->head];
+ tx_ring->head = IPQESS_NEXT_IDX(tx_ring->head, tx_ring->count);
+
+ return desc;
+}
+
+static void ipqess_rollback_tx(struct ipqess *eth,
+ struct ipqess_tx_desc *first_desc, int ring_id)
+{
+ struct ipqess_tx_ring *tx_ring = ð->tx_ring[ring_id];
+ struct ipqess_buf *buf;
+ struct ipqess_tx_desc *desc = NULL;
+ u16 start_index, index;
+
+ start_index = first_desc - tx_ring->hw_desc;
+
+ index = start_index;
+ while (index != tx_ring->head) {
+ desc = &tx_ring->hw_desc[index];
+ buf = &tx_ring->buf[index];
+ ipqess_tx_unmap_and_free(ð->pdev->dev, buf);
+ memset(desc, 0, sizeof(struct ipqess_tx_desc));
+ if (++index == tx_ring->count)
+ index = 0;
+ }
+ tx_ring->head = start_index;
+}
+
+static bool ipqess_process_dsa_tag_sh(struct sk_buff *skb, u32 *word3)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ struct ipq40xx_dsa_tag_data *tag_data;
+
+ if (shinfo->dsa_tag_proto != DSA_TAG_PROTO_IPQ4019)
+ return false;
+
+ tag_data = (struct ipq40xx_dsa_tag_data *)shinfo->dsa_tag_data;
+
+ pr_debug("SH tag @ %08x, dp:%02x from_cpu:%u\n",
+ (u32)tag_data, tag_data->dp, tag_data->from_cpu);
+
+ *word3 |= tag_data->dp << IPQESS_TPD_PORT_BITMAP_SHIFT;
+ if (tag_data->from_cpu)
+ *word3 |= BIT(IPQESS_TPD_FROM_CPU_SHIFT);
+
+ return true;
+}
+
+static void ipqess_get_dp_info(struct ipqess *ess, struct sk_buff *skb,
+ u32 *word3)
+{
+ if (netdev_uses_dsa(ess->netdev)) {
+
+ if (ipqess_process_dsa_tag_sh(skb, word3))
+ return;
+ }
+
+ *word3 |= 0x3e << IPQESS_TPD_PORT_BITMAP_SHIFT;
+}
+
+static int ipqess_tx_map_and_fill(struct ipqess_tx_ring *tx_ring, struct sk_buff *skb)
+{
+ struct ipqess_buf *buf = NULL;
+ struct platform_device *pdev = tx_ring->ess->pdev;
+ struct ipqess_tx_desc *desc = NULL, *first_desc = NULL;
+ u32 word1 = 0, word3 = 0, lso_word1 = 0, svlan_tag = 0;
+ u16 len;
+ int i;
+
+ ipqess_get_dp_info(tx_ring->ess, skb, &word3);
+
+ if (skb_is_gso(skb)) {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
+ lso_word1 |= IPQESS_TPD_IPV4_EN;
+ ip_hdr(skb)->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
+ } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
+ lso_word1 |= IPQESS_TPD_LSO_V2_EN;
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
+ }
+
+ lso_word1 |= IPQESS_TPD_LSO_EN |
+ ((skb_shinfo(skb)->gso_size & IPQESS_TPD_MSS_MASK) << IPQESS_TPD_MSS_SHIFT) |
+ (skb_transport_offset(skb) << IPQESS_TPD_HDR_SHIFT);
+ } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ u8 css, cso;
+ cso = skb_checksum_start_offset(skb);
+ css = cso + skb->csum_offset;
+
+ word1 |= (IPQESS_TPD_CUSTOM_CSUM_EN);
+ word1 |= (cso >> 1) << IPQESS_TPD_HDR_SHIFT;
+ word1 |= ((css >> 1) << IPQESS_TPD_CUSTOM_CSUM_SHIFT);
+ }
+
+ if (skb_vlan_tag_present(skb)) {
+ switch (skb->vlan_proto) {
+ case htons(ETH_P_8021Q):
+ word3 |= BIT(IPQESS_TX_INS_CVLAN);
+ word3 |= skb_vlan_tag_get(skb) << IPQESS_TX_CVLAN_TAG_SHIFT;
+ break;
+ case htons(ETH_P_8021AD):
+ word1 |= BIT(IPQESS_TX_INS_SVLAN);
+ svlan_tag = skb_vlan_tag_get(skb);
+ break;
+ default:
+ dev_err(&pdev->dev, "no ctag or stag present\n");
+ goto vlan_tag_error;
+ }
+ }
+
+ if (eth_type_vlan(skb->protocol))
+ word1 |= IPQESS_TPD_VLAN_TAGGED;
+
+ if (skb->protocol == htons(ETH_P_PPP_SES))
+ word1 |= IPQESS_TPD_PPPOE_EN;
+
+ len = skb_headlen(skb);
+
+ first_desc = desc = ipqess_tx_desc_next(tx_ring);
+ if (lso_word1 & IPQESS_TPD_LSO_V2_EN) {
+ desc->addr = cpu_to_le16(skb->len);
+ desc->word1 = word1 | lso_word1;
+ desc->svlan_tag = svlan_tag;
+ desc->word3 = word3;
+ desc = ipqess_tx_desc_next(tx_ring);
+ }
+
+ buf = ipqess_get_tx_buffer(tx_ring, desc);
+ buf->length = len;
+ buf->dma = dma_map_single(&pdev->dev,
+ skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buf->dma))
+ goto dma_error;
+
+ desc->addr = cpu_to_le32(buf->dma);
+ desc->len = cpu_to_le16(len);
+
+ buf->flags |= IPQESS_DESC_SINGLE;
+ desc->word1 = word1 | lso_word1;
+ desc->svlan_tag = svlan_tag;
+ desc->word3 = word3;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ len = skb_frag_size(frag);
+ desc = ipqess_tx_desc_next(tx_ring);
+ buf = ipqess_get_tx_buffer(tx_ring, desc);
+ buf->length = len;
+ buf->flags |= IPQESS_DESC_PAGE;
+ buf->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buf->dma))
+ goto dma_error;
+
+ desc->addr = cpu_to_le32(buf->dma);
+ desc->len = cpu_to_le16(len);
+ desc->svlan_tag = svlan_tag;
+ desc->word1 = word1 | lso_word1;
+ desc->word3 = word3;
+ }
+ desc->word1 |= 1 << IPQESS_TPD_EOP_SHIFT;
+ buf->skb = skb;
+ buf->flags |= IPQESS_DESC_LAST;
+
+ return 0;
+
+dma_error:
+ ipqess_rollback_tx(tx_ring->ess, first_desc, tx_ring->ring_id);
+ dev_err(&pdev->dev, "TX DMA map failed\n");
+
+vlan_tag_error:
+ return -ENOMEM;
+}
+
+static inline void ipqess_kick_tx(struct ipqess_tx_ring *tx_ring)
+{
+ /* Ensure that all TPDs has been written completely */
+ dma_wmb();
+
+ /* update software producer index */
+ ipqess_w32(tx_ring->ess, IPQESS_REG_TPD_IDX_Q(tx_ring->idx),
+ tx_ring->head);
+}
+
+static netdev_tx_t ipqess_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct ipqess *ess = netdev_priv(netdev);
+ struct ipqess_tx_ring *tx_ring;
+ int avail;
+ int tx_num;
+ int ret;
+
+ tx_ring = &ess->tx_ring[skb_get_queue_mapping(skb)];
+ tx_num = ipqess_cal_txd_req(skb);
+ avail = ipqess_tx_desc_available(tx_ring);
+ if (avail < tx_num) {
+ netdev_dbg(netdev,
+ "stopping tx queue %d, avail=%d req=%d im=%x\n",
+ tx_ring->idx, avail, tx_num,
+ ipqess_r32(tx_ring->ess,
+ IPQESS_REG_TX_INT_MASK_Q(tx_ring->idx)));
+ netif_tx_stop_queue(tx_ring->nq);
+ ipqess_w32(tx_ring->ess, IPQESS_REG_TX_INT_MASK_Q(tx_ring->idx), 0x1);
+ ipqess_kick_tx(tx_ring);
+ return NETDEV_TX_BUSY;
+ }
+
+ ret = ipqess_tx_map_and_fill(tx_ring, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ ess->stats.tx_errors++;
+ goto err_out;
+ }
+
+ ess->stats.tx_packets++;
+ ess->stats.tx_bytes += skb->len;
+ netdev_tx_sent_queue(tx_ring->nq, skb->len);
+
+ if (!netdev_xmit_more() || netif_xmit_stopped(tx_ring->nq))
+ ipqess_kick_tx(tx_ring);
+
+err_out:
+ return NETDEV_TX_OK;
+}
+
+static int ipqess_set_mac_address(struct net_device *netdev, void *p)
+{
+ int ret = eth_mac_addr(netdev, p);
+ struct ipqess *ess = netdev_priv(netdev);
+ const char *macaddr = netdev->dev_addr;
+
+ if (ret)
+ return ret;
+
+// spin_lock_bh(&mac->hw->page_lock);
+ ipqess_w32(ess, IPQESS_REG_MAC_CTRL1,
+ (macaddr[0] << 8) | macaddr[1]);
+ ipqess_w32(ess, IPQESS_REG_MAC_CTRL0,
+ (macaddr[2] << 24) | (macaddr[3] << 16) |
+ (macaddr[4] << 8) | macaddr[5]);
+// spin_unlock_bh(&mac->hw->page_lock);
+
+ return 0;
+}
+
+static void ipqess_tx_timeout(struct net_device *netdev, unsigned int txq_id)
+{
+ struct ipqess *ess = netdev_priv(netdev);
+ struct ipqess_tx_ring *tr = &ess->tx_ring[txq_id];
+
+ netdev_warn(netdev, "hardware queue %d is in stuck?\n",
+ tr->idx);
+
+ /* TODO: dump hardware queue */
+}
+
+static const struct net_device_ops ipqess_axi_netdev_ops = {
+ .ndo_init = ipqess_init,
+ .ndo_uninit = ipqess_uninit,
+ .ndo_open = ipqess_open,
+ .ndo_stop = ipqess_stop,
+ .ndo_do_ioctl = ipqess_do_ioctl,
+ .ndo_start_xmit = ipqess_xmit,
+ .ndo_get_stats = ipqess_get_stats,
+ .ndo_set_mac_address = ipqess_set_mac_address,
+ .ndo_tx_timeout = ipqess_tx_timeout,
+};
+
+static void ipqess_hw_stop(struct ipqess *ess)
+{
+ int i;
+
+ /* disable all RX queue IRQs */
+ for (i = 0; i < IPQESS_MAX_RX_QUEUE; i++)
+ ipqess_w32(ess, IPQESS_REG_RX_INT_MASK_Q(i), 0);
+
+ /* disable all TX queue IRQs */
+ for (i = 0; i < IPQESS_MAX_TX_QUEUE; i++)
+ ipqess_w32(ess, IPQESS_REG_TX_INT_MASK_Q(i), 0);
+
+ /* disable all other IRQs */
+ ipqess_w32(ess, IPQESS_REG_MISC_IMR, 0);
+ ipqess_w32(ess, IPQESS_REG_WOL_IMR, 0);
+
+ /* clear the IRQ status registers */
+ ipqess_w32(ess, IPQESS_REG_RX_ISR, 0xff);
+ ipqess_w32(ess, IPQESS_REG_TX_ISR, 0xffff);
+ ipqess_w32(ess, IPQESS_REG_MISC_ISR, 0x1fff);
+ ipqess_w32(ess, IPQESS_REG_WOL_ISR, 0x1);
+ ipqess_w32(ess, IPQESS_REG_WOL_CTRL, 0);
+
+ /* disable RX and TX queues */
+ ipqess_m32(ess, IPQESS_RXQ_CTRL_EN_MASK, 0, IPQESS_REG_RXQ_CTRL);
+ ipqess_m32(ess, IPQESS_TXQ_CTRL_TXQ_EN, 0, IPQESS_REG_TXQ_CTRL);
+}
+
+static int ipqess_hw_init(struct ipqess *ess)
+{
+ u32 tmp;
+ int i, err;
+
+ ipqess_hw_stop(ess);
+
+ ipqess_m32(ess, BIT(IPQESS_INTR_SW_IDX_W_TYP_SHIFT),
+ IPQESS_INTR_SW_IDX_W_TYPE << IPQESS_INTR_SW_IDX_W_TYP_SHIFT,
+ IPQESS_REG_INTR_CTRL);
+
+ /* enable IRQ delay slot */
+ ipqess_w32(ess, IPQESS_REG_IRQ_MODRT_TIMER_INIT,
+ (IPQESS_TX_IMT << IPQESS_IRQ_MODRT_TX_TIMER_SHIFT) |
+ (IPQESS_RX_IMT << IPQESS_IRQ_MODRT_RX_TIMER_SHIFT));
+
+ /* Set Customer and Service VLAN TPIDs */
+ ipqess_w32(ess, IPQESS_REG_VLAN_CFG,
+ (ETH_P_8021Q << IPQESS_VLAN_CFG_CVLAN_TPID_SHIFT) |
+ (ETH_P_8021AD << IPQESS_VLAN_CFG_SVLAN_TPID_SHIFT));
+
+ /* Configure the TX Queue bursting */
+ ipqess_w32(ess, IPQESS_REG_TXQ_CTRL,
+ (IPQESS_TPD_BURST << IPQESS_TXQ_NUM_TPD_BURST_SHIFT) |
+ (IPQESS_TXF_BURST << IPQESS_TXQ_TXF_BURST_NUM_SHIFT) |
+ IPQESS_TXQ_CTRL_TPD_BURST_EN);
+
+ /* Set RSS type */
+ ipqess_w32(ess, IPQESS_REG_RSS_TYPE,
+ IPQESS_RSS_TYPE_IPV4TCP | IPQESS_RSS_TYPE_IPV6_TCP |
+ IPQESS_RSS_TYPE_IPV4_UDP | IPQESS_RSS_TYPE_IPV6UDP |
+ IPQESS_RSS_TYPE_IPV4 | IPQESS_RSS_TYPE_IPV6);
+
+ /* Set RFD ring burst and threshold */
+ ipqess_w32(ess, IPQESS_REG_RX_DESC1,
+ (IPQESS_RFD_BURST << IPQESS_RXQ_RFD_BURST_NUM_SHIFT) |
+ (IPQESS_RFD_THR << IPQESS_RXQ_RFD_PF_THRESH_SHIFT) |
+ (IPQESS_RFD_LTHR << IPQESS_RXQ_RFD_LOW_THRESH_SHIFT));
+
+ /* Set Rx FIFO
+ * - threshold to start to DMA data to host
+ */
+ ipqess_w32(ess, IPQESS_REG_RXQ_CTRL,
+ IPQESS_FIFO_THRESH_128_BYTE | IPQESS_RXQ_CTRL_RMV_VLAN);
+
+ err = ipqess_rx_ring_alloc(ess);
+ if (err)
+ return err;
+
+ err = ipqess_tx_ring_alloc(ess);
+ if (err)
+ return err;
+
+ /* Load all of ring base addresses above into the dma engine */
+ ipqess_m32(ess, 0, BIT(IPQESS_LOAD_PTR_SHIFT),
+ IPQESS_REG_TX_SRAM_PART);
+
+ /* Disable TX FIFO low watermark and high watermark */
+ ipqess_w32(ess, IPQESS_REG_TXF_WATER_MARK, 0);
+
+ /* Configure RSS indirection table.
+ * 128 hash will be configured in the following
+ * pattern: hash{0,1,2,3} = {Q0,Q2,Q4,Q6} respectively
+ * and so on
+ */
+ for (i = 0; i < IPQESS_NUM_IDT; i++)
+ ipqess_w32(ess, IPQESS_REG_RSS_IDT(i), IPQESS_RSS_IDT_VALUE);
+
+ /* Configure load balance mapping table.
+ * 4 table entry will be configured according to the
+ * following pattern: load_balance{0,1,2,3} = {Q0,Q1,Q3,Q4}
+ * respectively.
+ */
+ ipqess_w32(ess, IPQESS_REG_LB_RING, IPQESS_LB_REG_VALUE);
+
+ /* Configure Virtual queue for Tx rings */
+ ipqess_w32(ess, IPQESS_REG_VQ_CTRL0, IPQESS_VQ_REG_VALUE);
+ ipqess_w32(ess, IPQESS_REG_VQ_CTRL1, IPQESS_VQ_REG_VALUE);
+
+ /* Configure Max AXI Burst write size to 128 bytes*/
+ ipqess_w32(ess, IPQESS_REG_AXIW_CTRL_MAXWRSIZE,
+ IPQESS_AXIW_MAXWRSIZE_VALUE);
+
+ /* Enable TX queues */
+ ipqess_m32(ess, 0, IPQESS_TXQ_CTRL_TXQ_EN, IPQESS_REG_TXQ_CTRL);
+
+ /* Enable RX queues */
+ tmp = 0;
+ for (i = 0; i < IPQESS_NETDEV_QUEUES; i++)
+ tmp |= IPQESS_RXQ_CTRL_EN(ess->rx_ring[i].idx);
+
+ ipqess_m32(ess, IPQESS_RXQ_CTRL_EN_MASK, tmp, IPQESS_REG_RXQ_CTRL);
+
+ return 0;
+}
+
+static void ipqess_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ struct ipqess *ess = container_of(config, struct ipqess, phylink_config);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ if (state->interface != PHY_INTERFACE_MODE_INTERNAL) {
+ dev_err(&ess->pdev->dev, "unsupported interface mode: %d\n",
+ state->interface);
+ linkmode_zero(supported);
+ return;
+ }
+
+ phylink_set_port_modes(mask);
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
+}
+
+static void ipqess_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ /* TODO */
+}
+
+static void ipqess_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ /* TODO */
+}
+
+static void ipqess_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy, unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ /* TODO */
+}
+
+static struct phylink_mac_ops ipqess_phylink_mac_ops = {
+ .validate = ipqess_validate,
+ .mac_config = ipqess_mac_config,
+ .mac_link_up = ipqess_mac_link_up,
+ .mac_link_down = ipqess_mac_link_down,
+};
+
+static void ipqess_cleanup(struct ipqess *ess)
+{
+ ipqess_hw_stop(ess);
+ unregister_netdev(ess->netdev);
+
+ ipqess_tx_ring_free(ess);
+ ipqess_rx_ring_free(ess);
+
+ if (!IS_ERR_OR_NULL(ess->phylink))
+ phylink_destroy(ess->phylink);
+}
+
+static void ess_reset(struct ipqess *ess)
+{
+ reset_control_assert(ess->ess_rst);
+
+ mdelay(10);
+
+ reset_control_deassert(ess->ess_rst);
+
+ /* Waiting for all inner tables to be flushed and reinitialized.
+ * This takes between 5 and 10ms.
+ */
+ mdelay(10);
+}
+
+static int ipqess_axi_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct ipqess *ess;
+ struct net_device *netdev;
+ struct resource *res;
+ int i, err = 0;
+
+ netdev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof(struct ipqess),
+ IPQESS_NETDEV_QUEUES,
+ IPQESS_NETDEV_QUEUES);
+ if (!netdev)
+ return -ENOMEM;
+
+ ess = netdev_priv(netdev);
+ ess->netdev = netdev;
+ ess->pdev = pdev;
+ spin_lock_init(&ess->stats_lock);
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ platform_set_drvdata(pdev, netdev);
+
+ err = of_get_mac_address(np, netdev->dev_addr);
+ if (err == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ if (err) {
+
+ random_ether_addr(netdev->dev_addr);
+ dev_info(&ess->pdev->dev, "generated random MAC address %pM\n",
+ netdev->dev_addr);
+ netdev->addr_assign_type = NET_ADDR_RANDOM;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ess->hw_addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ess->hw_addr)) {
+ err = PTR_ERR(ess->hw_addr);
+ goto err_out;
+ }
+
+ ess->ess_clk = of_clk_get_by_name(np, "ess_clk");
+ if (IS_ERR(ess->ess_clk)) {
+ dev_err(&pdev->dev, "Failed to get ess_clk\n");
+ return PTR_ERR(ess->ess_clk);
+ }
+
+ ess->ess_rst = devm_reset_control_get(&pdev->dev, "ess_rst");
+ if (IS_ERR(ess->ess_rst)) {
+ dev_err(&pdev->dev, "Failed to get ess_rst control!\n");
+ return PTR_ERR(ess->ess_rst);
+ }
+
+ clk_prepare_enable(ess->ess_clk);
+
+ ess_reset(ess);
+
+ ess->phylink_config.dev = &netdev->dev;
+ ess->phylink_config.type = PHYLINK_NETDEV;
+ ess->phylink_config.pcs_poll = true;
+
+ ess->phylink = phylink_create(&ess->phylink_config,
+ of_fwnode_handle(np),
+ PHY_INTERFACE_MODE_INTERNAL,
+ &ipqess_phylink_mac_ops);
+ if (IS_ERR(ess->phylink)) {
+ err = PTR_ERR(ess->phylink);
+ goto err_out;
+ }
+
+ for (i = 0; i < IPQESS_MAX_TX_QUEUE; i++) {
+ ess->tx_irq[i] = platform_get_irq(pdev, i);
+ scnprintf(ess->tx_irq_names[i], sizeof(ess->tx_irq_names[i]),
+ "%s:txq%d", pdev->name, i);
+ }
+
+ for (i = 0; i < IPQESS_MAX_RX_QUEUE; i++) {
+ ess->rx_irq[i] = platform_get_irq(pdev, i + IPQESS_MAX_TX_QUEUE);
+ scnprintf(ess->rx_irq_names[i], sizeof(ess->rx_irq_names[i]),
+ "%s:rxq%d", pdev->name, i);
+ }
+
+#undef NETIF_F_TSO6
+#define NETIF_F_TSO6 0
+
+ netdev->netdev_ops = &ipqess_axi_netdev_ops;
+ netdev->features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GRO | NETIF_F_SG;
+ /* feature change is not supported yet */
+ netdev->hw_features = 0;
+ netdev->vlan_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_RXCSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GRO;
+ netdev->watchdog_timeo = 5 * HZ;
+ netdev->base_addr = (u32) ess->hw_addr;
+ netdev->max_mtu = 9000;
+ netdev->gso_max_segs = IPQESS_TX_RING_SIZE / 2;
+
+ ipqess_set_ethtool_ops(netdev);
+
+ err = register_netdev(netdev);
+ if (err)
+ goto err_out;
+
+ err = ipqess_hw_init(ess);
+ if (err)
+ goto err_out;
+
+ dev_set_threaded(netdev, true);
+
+ for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
+ int qid;
+
+ netif_tx_napi_add(netdev, &ess->tx_ring[i].napi_tx,
+ ipqess_tx_napi, 64);
+ netif_napi_add(netdev,
+ &ess->rx_ring[i].napi_rx,
+ ipqess_rx_napi, 64);
+
+ qid = ess->tx_ring[i].idx;
+ err = devm_request_irq(&ess->netdev->dev, ess->tx_irq[qid],
+ ipqess_interrupt_tx, 0, ess->tx_irq_names[qid],
+ &ess->tx_ring[i]);
+ if (err)
+ goto err_out;
+
+ qid = ess->rx_ring[i].idx;
+ err = devm_request_irq(&ess->netdev->dev, ess->rx_irq[qid],
+ ipqess_interrupt_rx, 0, ess->rx_irq_names[qid],
+ &ess->rx_ring[i]);
+ if (err)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ ipqess_cleanup(ess);
+ return err;
+}
+
+static int ipqess_axi_remove(struct platform_device *pdev)
+{
+ const struct net_device *netdev = platform_get_drvdata(pdev);
+ struct ipqess *ess = netdev_priv(netdev);
+
+ ipqess_cleanup(ess);
+
+ return 0;
+}
+
+static const struct of_device_id ipqess_of_mtable[] = {
+ {.compatible = "qcom,ipq4019-ess-edma" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ipqess_of_mtable);
+
+static struct platform_driver ipqess_axi_driver = {
+ .driver = {
+ .name = "ipqess-edma",
+ .of_match_table = ipqess_of_mtable,
+ },
+ .probe = ipqess_axi_probe,
+ .remove = ipqess_axi_remove,
+};
+
+module_platform_driver(ipqess_axi_driver);
+
+MODULE_AUTHOR("Qualcomm Atheros Inc");
+MODULE_AUTHOR("John Crispin <john@phrozen.org>");
+MODULE_AUTHOR("Christian Lamparter <chunkeey@gmail.com>");
+MODULE_AUTHOR("Gabor Juhos <j4g8y7@gmail.com>");
+MODULE_LICENSE("GPL");
--- /dev/null
+// SPDX-License-Identifier: (GPL-2.0 OR ISC)
+/* Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017 - 2018, John Crispin <john@phrozen.org>
+ * Copyright (c) 2018 - 2019, Christian Lamparter <chunkeey@gmail.com>
+ * Copyright (c) 2020 - 2021, Gabor Juhos <j4g8y7@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _IPQESS_H_
+#define _IPQESS_H_
+
+#define IPQESS_NETDEV_QUEUES 4
+
+#define IPQESS_TPD_EOP_SHIFT 31
+
+#define IPQESS_PORT_ID_SHIFT 12
+#define IPQESS_PORT_ID_MASK 0x7
+
+/* tpd word 3 bit 18-28 */
+#define IPQESS_TPD_PORT_BITMAP_SHIFT 18
+
+#define IPQESS_TPD_FROM_CPU_SHIFT 25
+
+#define IPQESS_RX_RING_SIZE 128
+#define IPQESS_RX_HEAD_BUFF_SIZE 1540
+#define IPQESS_TX_RING_SIZE 128
+#define IPQESS_MAX_RX_QUEUE 8
+#define IPQESS_MAX_TX_QUEUE 16
+
+
+/* Configurations */
+#define IPQESS_INTR_CLEAR_TYPE 0
+#define IPQESS_INTR_SW_IDX_W_TYPE 0
+#define IPQESS_FIFO_THRESH_TYPE 0
+#define IPQESS_RSS_TYPE 0
+#define IPQESS_RX_IMT 0x0020
+#define IPQESS_TX_IMT 0x0050
+#define IPQESS_TPD_BURST 5
+#define IPQESS_TXF_BURST 0x100
+#define IPQESS_RFD_BURST 8
+#define IPQESS_RFD_THR 16
+#define IPQESS_RFD_LTHR 0
+
+/* Flags used in transmit direction */
+#define IPQESS_DESC_LAST 0x1
+#define IPQESS_DESC_SINGLE 0x2
+#define IPQESS_DESC_PAGE 0x4
+
+struct ipqesstool_statistics {
+ u32 tx_q0_pkt;
+ u32 tx_q1_pkt;
+ u32 tx_q2_pkt;
+ u32 tx_q3_pkt;
+ u32 tx_q4_pkt;
+ u32 tx_q5_pkt;
+ u32 tx_q6_pkt;
+ u32 tx_q7_pkt;
+ u32 tx_q8_pkt;
+ u32 tx_q9_pkt;
+ u32 tx_q10_pkt;
+ u32 tx_q11_pkt;
+ u32 tx_q12_pkt;
+ u32 tx_q13_pkt;
+ u32 tx_q14_pkt;
+ u32 tx_q15_pkt;
+ u32 tx_q0_byte;
+ u32 tx_q1_byte;
+ u32 tx_q2_byte;
+ u32 tx_q3_byte;
+ u32 tx_q4_byte;
+ u32 tx_q5_byte;
+ u32 tx_q6_byte;
+ u32 tx_q7_byte;
+ u32 tx_q8_byte;
+ u32 tx_q9_byte;
+ u32 tx_q10_byte;
+ u32 tx_q11_byte;
+ u32 tx_q12_byte;
+ u32 tx_q13_byte;
+ u32 tx_q14_byte;
+ u32 tx_q15_byte;
+ u32 rx_q0_pkt;
+ u32 rx_q1_pkt;
+ u32 rx_q2_pkt;
+ u32 rx_q3_pkt;
+ u32 rx_q4_pkt;
+ u32 rx_q5_pkt;
+ u32 rx_q6_pkt;
+ u32 rx_q7_pkt;
+ u32 rx_q0_byte;
+ u32 rx_q1_byte;
+ u32 rx_q2_byte;
+ u32 rx_q3_byte;
+ u32 rx_q4_byte;
+ u32 rx_q5_byte;
+ u32 rx_q6_byte;
+ u32 rx_q7_byte;
+ u32 tx_desc_error;
+};
+
+struct ipqess_tx_desc {
+ __le16 len;
+ __le16 svlan_tag;
+ __le32 word1;
+ __le32 addr;
+ __le32 word3;
+} __aligned(16) __packed;
+
+struct ipqess_rx_desc {
+ u16 rrd0;
+ u16 rrd1;
+ u16 rrd2;
+ u16 rrd3;
+ u16 rrd4;
+ u16 rrd5;
+ u16 rrd6;
+ u16 rrd7;
+} __aligned(16) __packed;
+
+struct ipqess_buf {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ u32 flags;
+ u16 length;
+};
+
+struct ipqess_tx_ring {
+ struct napi_struct napi_tx;
+ u32 idx;
+ int ring_id;
+ struct ipqess *ess;
+ struct netdev_queue *nq;
+ struct ipqess_tx_desc *hw_desc;
+ struct ipqess_buf *buf;
+ dma_addr_t dma;
+ u16 count;
+ u16 head;
+ u16 tail;
+};
+
+struct ipqess_rx_ring {
+ struct napi_struct napi_rx;
+ u32 idx;
+ int ring_id;
+ struct ipqess *ess;
+ struct device *ppdev;
+ struct ipqess_rx_desc **hw_desc;
+ struct ipqess_buf *buf;
+ dma_addr_t dma;
+ u16 head;
+ u16 tail;
+ atomic_t refill_count;
+};
+
+struct ipqess_rx_ring_refill {
+ struct ipqess_rx_ring *rx_ring;
+ struct work_struct refill_work;
+};
+
+#define IPQESS_IRQ_NAME_LEN 32
+
+struct ipqess {
+ struct net_device *netdev;
+ void __iomem *hw_addr;
+ struct clk *ess_clk;
+ struct reset_control *ess_rst;
+
+ struct ipqess_rx_ring rx_ring[IPQESS_NETDEV_QUEUES];
+
+ struct platform_device *pdev;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
+ struct ipqess_tx_ring tx_ring[IPQESS_NETDEV_QUEUES];
+
+ struct ipqesstool_statistics ipqessstats;
+ spinlock_t stats_lock;
+ struct net_device_stats stats;
+
+ struct ipqess_rx_ring_refill rx_refill[IPQESS_NETDEV_QUEUES];
+ u32 tx_irq[IPQESS_MAX_TX_QUEUE];
+ char tx_irq_names[IPQESS_MAX_TX_QUEUE][IPQESS_IRQ_NAME_LEN];
+ u32 rx_irq[IPQESS_MAX_RX_QUEUE];
+ char rx_irq_names[IPQESS_MAX_TX_QUEUE][IPQESS_IRQ_NAME_LEN];
+};
+
+static inline void build_test(void)
+{
+ struct ipqess *ess;
+ BUILD_BUG_ON(ARRAY_SIZE(ess->rx_ring) != ARRAY_SIZE(ess->rx_refill));
+}
+
+void ipqess_set_ethtool_ops(struct net_device *netdev);
+void ipqess_update_hw_stats(struct ipqess *ess);
+
+/* register definition */
+#define IPQESS_REG_MAS_CTRL 0x0
+#define IPQESS_REG_TIMEOUT_CTRL 0x004
+#define IPQESS_REG_DBG0 0x008
+#define IPQESS_REG_DBG1 0x00C
+#define IPQESS_REG_SW_CTRL0 0x100
+#define IPQESS_REG_SW_CTRL1 0x104
+
+/* Interrupt Status Register */
+#define IPQESS_REG_RX_ISR 0x200
+#define IPQESS_REG_TX_ISR 0x208
+#define IPQESS_REG_MISC_ISR 0x210
+#define IPQESS_REG_WOL_ISR 0x218
+
+#define IPQESS_MISC_ISR_RX_URG_Q(x) (1 << x)
+
+#define IPQESS_MISC_ISR_AXIR_TIMEOUT 0x00000100
+#define IPQESS_MISC_ISR_AXIR_ERR 0x00000200
+#define IPQESS_MISC_ISR_TXF_DEAD 0x00000400
+#define IPQESS_MISC_ISR_AXIW_ERR 0x00000800
+#define IPQESS_MISC_ISR_AXIW_TIMEOUT 0x00001000
+
+#define IPQESS_WOL_ISR 0x00000001
+
+/* Interrupt Mask Register */
+#define IPQESS_REG_MISC_IMR 0x214
+#define IPQESS_REG_WOL_IMR 0x218
+
+#define IPQESS_RX_IMR_NORMAL_MASK 0x1
+#define IPQESS_TX_IMR_NORMAL_MASK 0x1
+#define IPQESS_MISC_IMR_NORMAL_MASK 0x80001FFF
+#define IPQESS_WOL_IMR_NORMAL_MASK 0x1
+
+/* Edma receive consumer index */
+#define IPQESS_REG_RX_SW_CONS_IDX_Q(x) (0x220 + ((x) << 2)) /* x is the queue id */
+
+/* Edma transmit consumer index */
+#define IPQESS_REG_TX_SW_CONS_IDX_Q(x) (0x240 + ((x) << 2)) /* x is the queue id */
+
+/* IRQ Moderator Initial Timer Register */
+#define IPQESS_REG_IRQ_MODRT_TIMER_INIT 0x280
+#define IPQESS_IRQ_MODRT_TIMER_MASK 0xFFFF
+#define IPQESS_IRQ_MODRT_RX_TIMER_SHIFT 0
+#define IPQESS_IRQ_MODRT_TX_TIMER_SHIFT 16
+
+/* Interrupt Control Register */
+#define IPQESS_REG_INTR_CTRL 0x284
+#define IPQESS_INTR_CLR_TYP_SHIFT 0
+#define IPQESS_INTR_SW_IDX_W_TYP_SHIFT 1
+#define IPQESS_INTR_CLEAR_TYPE_W1 0
+#define IPQESS_INTR_CLEAR_TYPE_R 1
+
+/* RX Interrupt Mask Register */
+#define IPQESS_REG_RX_INT_MASK_Q(x) (0x300 + ((x) << 2)) /* x = queue id */
+
+/* TX Interrupt mask register */
+#define IPQESS_REG_TX_INT_MASK_Q(x) (0x340 + ((x) << 2)) /* x = queue id */
+
+/* Load Ptr Register
+ * Software sets this bit after the initialization of the head and tail
+ */
+#define IPQESS_REG_TX_SRAM_PART 0x400
+#define IPQESS_LOAD_PTR_SHIFT 16
+
+/* TXQ Control Register */
+#define IPQESS_REG_TXQ_CTRL 0x404
+#define IPQESS_TXQ_CTRL_IP_OPTION_EN 0x10
+#define IPQESS_TXQ_CTRL_TXQ_EN 0x20
+#define IPQESS_TXQ_CTRL_ENH_MODE 0x40
+#define IPQESS_TXQ_CTRL_LS_8023_EN 0x80
+#define IPQESS_TXQ_CTRL_TPD_BURST_EN 0x100
+#define IPQESS_TXQ_CTRL_LSO_BREAK_EN 0x200
+#define IPQESS_TXQ_NUM_TPD_BURST_MASK 0xF
+#define IPQESS_TXQ_TXF_BURST_NUM_MASK 0xFFFF
+#define IPQESS_TXQ_NUM_TPD_BURST_SHIFT 0
+#define IPQESS_TXQ_TXF_BURST_NUM_SHIFT 16
+
+#define IPQESS_REG_TXF_WATER_MARK 0x408 /* In 8-bytes */
+#define IPQESS_TXF_WATER_MARK_MASK 0x0FFF
+#define IPQESS_TXF_LOW_WATER_MARK_SHIFT 0
+#define IPQESS_TXF_HIGH_WATER_MARK_SHIFT 16
+#define IPQESS_TXQ_CTRL_BURST_MODE_EN 0x80000000
+
+/* WRR Control Register */
+#define IPQESS_REG_WRR_CTRL_Q0_Q3 0x40c
+#define IPQESS_REG_WRR_CTRL_Q4_Q7 0x410
+#define IPQESS_REG_WRR_CTRL_Q8_Q11 0x414
+#define IPQESS_REG_WRR_CTRL_Q12_Q15 0x418
+
+/* Weight round robin(WRR), it takes queue as input, and computes
+ * starting bits where we need to write the weight for a particular
+ * queue
+ */
+#define IPQESS_WRR_SHIFT(x) (((x) * 5) % 20)
+
+/* Tx Descriptor Control Register */
+#define IPQESS_REG_TPD_RING_SIZE 0x41C
+#define IPQESS_TPD_RING_SIZE_SHIFT 0
+#define IPQESS_TPD_RING_SIZE_MASK 0xFFFF
+
+/* Transmit descriptor base address */
+#define IPQESS_REG_TPD_BASE_ADDR_Q(x) (0x420 + ((x) << 2)) /* x = queue id */
+
+/* TPD Index Register */
+#define IPQESS_REG_TPD_IDX_Q(x) (0x460 + ((x) << 2)) /* x = queue id */
+
+#define IPQESS_TPD_PROD_IDX_BITS 0x0000FFFF
+#define IPQESS_TPD_CONS_IDX_BITS 0xFFFF0000
+#define IPQESS_TPD_PROD_IDX_MASK 0xFFFF
+#define IPQESS_TPD_CONS_IDX_MASK 0xFFFF
+#define IPQESS_TPD_PROD_IDX_SHIFT 0
+#define IPQESS_TPD_CONS_IDX_SHIFT 16
+
+/* TX Virtual Queue Mapping Control Register */
+#define IPQESS_REG_VQ_CTRL0 0x4A0
+#define IPQESS_REG_VQ_CTRL1 0x4A4
+
+/* Virtual QID shift, it takes queue as input, and computes
+ * Virtual QID position in virtual qid control register
+ */
+#define IPQESS_VQ_ID_SHIFT(i) (((i) * 3) % 24)
+
+/* Virtual Queue Default Value */
+#define IPQESS_VQ_REG_VALUE 0x240240
+
+/* Tx side Port Interface Control Register */
+#define IPQESS_REG_PORT_CTRL 0x4A8
+#define IPQESS_PAD_EN_SHIFT 15
+
+/* Tx side VLAN Configuration Register */
+#define IPQESS_REG_VLAN_CFG 0x4AC
+
+#define IPQESS_VLAN_CFG_SVLAN_TPID_SHIFT 0
+#define IPQESS_VLAN_CFG_SVLAN_TPID_MASK 0xffff
+#define IPQESS_VLAN_CFG_CVLAN_TPID_SHIFT 16
+#define IPQESS_VLAN_CFG_CVLAN_TPID_MASK 0xffff
+
+#define IPQESS_TX_CVLAN 16
+#define IPQESS_TX_INS_CVLAN 17
+#define IPQESS_TX_CVLAN_TAG_SHIFT 0
+
+#define IPQESS_TX_SVLAN 14
+#define IPQESS_TX_INS_SVLAN 15
+#define IPQESS_TX_SVLAN_TAG_SHIFT 16
+
+/* Tx Queue Packet Statistic Register */
+#define IPQESS_REG_TX_STAT_PKT_Q(x) (0x700 + ((x) << 3)) /* x = queue id */
+
+#define IPQESS_TX_STAT_PKT_MASK 0xFFFFFF
+
+/* Tx Queue Byte Statistic Register */
+#define IPQESS_REG_TX_STAT_BYTE_Q(x) (0x704 + ((x) << 3)) /* x = queue id */
+
+/* Load Balance Based Ring Offset Register */
+#define IPQESS_REG_LB_RING 0x800
+#define IPQESS_LB_RING_ENTRY_MASK 0xff
+#define IPQESS_LB_RING_ID_MASK 0x7
+#define IPQESS_LB_RING_PROFILE_ID_MASK 0x3
+#define IPQESS_LB_RING_ENTRY_BIT_OFFSET 8
+#define IPQESS_LB_RING_ID_OFFSET 0
+#define IPQESS_LB_RING_PROFILE_ID_OFFSET 3
+#define IPQESS_LB_REG_VALUE 0x6040200
+
+/* Load Balance Priority Mapping Register */
+#define IPQESS_REG_LB_PRI_START 0x804
+#define IPQESS_REG_LB_PRI_END 0x810
+#define IPQESS_LB_PRI_REG_INC 4
+#define IPQESS_LB_PRI_ENTRY_BIT_OFFSET 4
+#define IPQESS_LB_PRI_ENTRY_MASK 0xf
+
+/* RSS Priority Mapping Register */
+#define IPQESS_REG_RSS_PRI 0x820
+#define IPQESS_RSS_PRI_ENTRY_MASK 0xf
+#define IPQESS_RSS_RING_ID_MASK 0x7
+#define IPQESS_RSS_PRI_ENTRY_BIT_OFFSET 4
+
+/* RSS Indirection Register */
+#define IPQESS_REG_RSS_IDT(x) (0x840 + ((x) << 2)) /* x = No. of indirection table */
+#define IPQESS_NUM_IDT 16
+#define IPQESS_RSS_IDT_VALUE 0x64206420
+
+/* Default RSS Ring Register */
+#define IPQESS_REG_DEF_RSS 0x890
+#define IPQESS_DEF_RSS_MASK 0x7
+
+/* RSS Hash Function Type Register */
+#define IPQESS_REG_RSS_TYPE 0x894
+#define IPQESS_RSS_TYPE_NONE 0x01
+#define IPQESS_RSS_TYPE_IPV4TCP 0x02
+#define IPQESS_RSS_TYPE_IPV6_TCP 0x04
+#define IPQESS_RSS_TYPE_IPV4_UDP 0x08
+#define IPQESS_RSS_TYPE_IPV6UDP 0x10
+#define IPQESS_RSS_TYPE_IPV4 0x20
+#define IPQESS_RSS_TYPE_IPV6 0x40
+#define IPQESS_RSS_HASH_MODE_MASK 0x7f
+
+#define IPQESS_REG_RSS_HASH_VALUE 0x8C0
+
+#define IPQESS_REG_RSS_TYPE_RESULT 0x8C4
+
+#define IPQESS_HASH_TYPE_START 0
+#define IPQESS_HASH_TYPE_END 5
+#define IPQESS_HASH_TYPE_SHIFT 12
+
+#define IPQESS_RFS_FLOW_ENTRIES 1024
+#define IPQESS_RFS_FLOW_ENTRIES_MASK (IPQESS_RFS_FLOW_ENTRIES - 1)
+#define IPQESS_RFS_EXPIRE_COUNT_PER_CALL 128
+
+/* RFD Base Address Register */
+#define IPQESS_REG_RFD_BASE_ADDR_Q(x) (0x950 + ((x) << 2)) /* x = queue id */
+
+/* RFD Index Register */
+#define IPQESS_REG_RFD_IDX_Q(x) (0x9B0 + ((x) << 2)) /* x = queue id */
+
+#define IPQESS_RFD_PROD_IDX_BITS 0x00000FFF
+#define IPQESS_RFD_CONS_IDX_BITS 0x0FFF0000
+#define IPQESS_RFD_PROD_IDX_MASK 0xFFF
+#define IPQESS_RFD_CONS_IDX_MASK 0xFFF
+#define IPQESS_RFD_PROD_IDX_SHIFT 0
+#define IPQESS_RFD_CONS_IDX_SHIFT 16
+
+/* Rx Descriptor Control Register */
+#define IPQESS_REG_RX_DESC0 0xA10
+#define IPQESS_RFD_RING_SIZE_MASK 0xFFF
+#define IPQESS_RX_BUF_SIZE_MASK 0xFFFF
+#define IPQESS_RFD_RING_SIZE_SHIFT 0
+#define IPQESS_RX_BUF_SIZE_SHIFT 16
+
+#define IPQESS_REG_RX_DESC1 0xA14
+#define IPQESS_RXQ_RFD_BURST_NUM_MASK 0x3F
+#define IPQESS_RXQ_RFD_PF_THRESH_MASK 0x1F
+#define IPQESS_RXQ_RFD_LOW_THRESH_MASK 0xFFF
+#define IPQESS_RXQ_RFD_BURST_NUM_SHIFT 0
+#define IPQESS_RXQ_RFD_PF_THRESH_SHIFT 8
+#define IPQESS_RXQ_RFD_LOW_THRESH_SHIFT 16
+
+/* RXQ Control Register */
+#define IPQESS_REG_RXQ_CTRL 0xA18
+#define IPQESS_FIFO_THRESH_TYPE_SHIF 0
+#define IPQESS_FIFO_THRESH_128_BYTE 0x0
+#define IPQESS_FIFO_THRESH_64_BYTE 0x1
+#define IPQESS_RXQ_CTRL_RMV_VLAN 0x00000002
+#define IPQESS_RXQ_CTRL_EN_MASK GENMASK(15, 8)
+#define IPQESS_RXQ_CTRL_EN(__qid) BIT(8 + (__qid))
+
+/* AXI Burst Size Config */
+#define IPQESS_REG_AXIW_CTRL_MAXWRSIZE 0xA1C
+#define IPQESS_AXIW_MAXWRSIZE_VALUE 0x0
+
+/* Rx Statistics Register */
+#define IPQESS_REG_RX_STAT_BYTE_Q(x) (0xA30 + ((x) << 2)) /* x = queue id */
+#define IPQESS_REG_RX_STAT_PKT_Q(x) (0xA50 + ((x) << 2)) /* x = queue id */
+
+/* WoL Pattern Length Register */
+#define IPQESS_REG_WOL_PATTERN_LEN0 0xC00
+#define IPQESS_WOL_PT_LEN_MASK 0xFF
+#define IPQESS_WOL_PT0_LEN_SHIFT 0
+#define IPQESS_WOL_PT1_LEN_SHIFT 8
+#define IPQESS_WOL_PT2_LEN_SHIFT 16
+#define IPQESS_WOL_PT3_LEN_SHIFT 24
+
+#define IPQESS_REG_WOL_PATTERN_LEN1 0xC04
+#define IPQESS_WOL_PT4_LEN_SHIFT 0
+#define IPQESS_WOL_PT5_LEN_SHIFT 8
+#define IPQESS_WOL_PT6_LEN_SHIFT 16
+
+/* WoL Control Register */
+#define IPQESS_REG_WOL_CTRL 0xC08
+#define IPQESS_WOL_WK_EN 0x00000001
+#define IPQESS_WOL_MG_EN 0x00000002
+#define IPQESS_WOL_PT0_EN 0x00000004
+#define IPQESS_WOL_PT1_EN 0x00000008
+#define IPQESS_WOL_PT2_EN 0x00000010
+#define IPQESS_WOL_PT3_EN 0x00000020
+#define IPQESS_WOL_PT4_EN 0x00000040
+#define IPQESS_WOL_PT5_EN 0x00000080
+#define IPQESS_WOL_PT6_EN 0x00000100
+
+/* MAC Control Register */
+#define IPQESS_REG_MAC_CTRL0 0xC20
+#define IPQESS_REG_MAC_CTRL1 0xC24
+
+/* WoL Pattern Register */
+#define IPQESS_REG_WOL_PATTERN_START 0x5000
+#define IPQESS_PATTERN_PART_REG_OFFSET 0x40
+
+
+/* TX descriptor fields */
+#define IPQESS_TPD_HDR_SHIFT 0
+#define IPQESS_TPD_PPPOE_EN 0x00000100
+#define IPQESS_TPD_IP_CSUM_EN 0x00000200
+#define IPQESS_TPD_TCP_CSUM_EN 0x0000400
+#define IPQESS_TPD_UDP_CSUM_EN 0x00000800
+#define IPQESS_TPD_CUSTOM_CSUM_EN 0x00000C00
+#define IPQESS_TPD_LSO_EN 0x00001000
+#define IPQESS_TPD_LSO_V2_EN 0x00002000
+/* The VLAN_TAGGED bit is not used in the publicly available
+ * drivers. The definition has been stolen from the Atheros
+ * 'alx' driver (drivers/net/ethernet/atheros/alx/hw.h). It
+ * seems that it has the same meaning in regard to the EDMA
+ * hardware.
+ */
+#define IPQESS_TPD_VLAN_TAGGED 0x00004000
+#define IPQESS_TPD_IPV4_EN 0x00010000
+#define IPQESS_TPD_MSS_MASK 0x1FFF
+#define IPQESS_TPD_MSS_SHIFT 18
+#define IPQESS_TPD_CUSTOM_CSUM_SHIFT 18
+
+/* RRD descriptor fields */
+#define IPQESS_RRD_NUM_RFD_MASK 0x000F
+#define IPQESS_RRD_PKT_SIZE_MASK 0x3FFF
+#define IPQESS_RRD_SRC_PORT_NUM_MASK 0x4000
+#define IPQESS_RRD_SVLAN 0x8000
+#define IPQESS_RRD_FLOW_COOKIE_MASK 0x07FF;
+
+#define IPQESS_RRD_PKT_SIZE_MASK 0x3FFF
+#define IPQESS_RRD_CSUM_FAIL_MASK 0xC000
+#define IPQESS_RRD_CVLAN 0x0001
+#define IPQESS_RRD_DESC_VALID 0x8000
+
+#define IPQESS_RRD_PRIORITY_SHIFT 4
+#define IPQESS_RRD_PRIORITY_MASK 0x7
+#define IPQESS_RRD_PORT_TYPE_SHIFT 7
+#define IPQESS_RRD_PORT_TYPE_MASK 0x1F
+
+#endif
--- /dev/null
+// SPDX-License-Identifier: (GPL-2.0 OR ISC)
+/* Copyright (c) 2015 - 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017 - 2018, John Crispin <john@phrozen.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/string.h>
+#include <linux/phylink.h>
+
+#include "ipqess.h"
+
+struct ipqesstool_stats {
+ uint8_t string[ETH_GSTRING_LEN];
+ uint32_t offset;
+};
+
+#define IPQESS_STAT(m) offsetof(struct ipqesstool_statistics, m)
+#define DRVINFO_LEN 32
+
+static const struct ipqesstool_stats ipqess_stats[] = {
+ {"tx_q0_pkt", IPQESS_STAT(tx_q0_pkt)},
+ {"tx_q1_pkt", IPQESS_STAT(tx_q1_pkt)},
+ {"tx_q2_pkt", IPQESS_STAT(tx_q2_pkt)},
+ {"tx_q3_pkt", IPQESS_STAT(tx_q3_pkt)},
+ {"tx_q4_pkt", IPQESS_STAT(tx_q4_pkt)},
+ {"tx_q5_pkt", IPQESS_STAT(tx_q5_pkt)},
+ {"tx_q6_pkt", IPQESS_STAT(tx_q6_pkt)},
+ {"tx_q7_pkt", IPQESS_STAT(tx_q7_pkt)},
+ {"tx_q8_pkt", IPQESS_STAT(tx_q8_pkt)},
+ {"tx_q9_pkt", IPQESS_STAT(tx_q9_pkt)},
+ {"tx_q10_pkt", IPQESS_STAT(tx_q10_pkt)},
+ {"tx_q11_pkt", IPQESS_STAT(tx_q11_pkt)},
+ {"tx_q12_pkt", IPQESS_STAT(tx_q12_pkt)},
+ {"tx_q13_pkt", IPQESS_STAT(tx_q13_pkt)},
+ {"tx_q14_pkt", IPQESS_STAT(tx_q14_pkt)},
+ {"tx_q15_pkt", IPQESS_STAT(tx_q15_pkt)},
+ {"tx_q0_byte", IPQESS_STAT(tx_q0_byte)},
+ {"tx_q1_byte", IPQESS_STAT(tx_q1_byte)},
+ {"tx_q2_byte", IPQESS_STAT(tx_q2_byte)},
+ {"tx_q3_byte", IPQESS_STAT(tx_q3_byte)},
+ {"tx_q4_byte", IPQESS_STAT(tx_q4_byte)},
+ {"tx_q5_byte", IPQESS_STAT(tx_q5_byte)},
+ {"tx_q6_byte", IPQESS_STAT(tx_q6_byte)},
+ {"tx_q7_byte", IPQESS_STAT(tx_q7_byte)},
+ {"tx_q8_byte", IPQESS_STAT(tx_q8_byte)},
+ {"tx_q9_byte", IPQESS_STAT(tx_q9_byte)},
+ {"tx_q10_byte", IPQESS_STAT(tx_q10_byte)},
+ {"tx_q11_byte", IPQESS_STAT(tx_q11_byte)},
+ {"tx_q12_byte", IPQESS_STAT(tx_q12_byte)},
+ {"tx_q13_byte", IPQESS_STAT(tx_q13_byte)},
+ {"tx_q14_byte", IPQESS_STAT(tx_q14_byte)},
+ {"tx_q15_byte", IPQESS_STAT(tx_q15_byte)},
+ {"rx_q0_pkt", IPQESS_STAT(rx_q0_pkt)},
+ {"rx_q1_pkt", IPQESS_STAT(rx_q1_pkt)},
+ {"rx_q2_pkt", IPQESS_STAT(rx_q2_pkt)},
+ {"rx_q3_pkt", IPQESS_STAT(rx_q3_pkt)},
+ {"rx_q4_pkt", IPQESS_STAT(rx_q4_pkt)},
+ {"rx_q5_pkt", IPQESS_STAT(rx_q5_pkt)},
+ {"rx_q6_pkt", IPQESS_STAT(rx_q6_pkt)},
+ {"rx_q7_pkt", IPQESS_STAT(rx_q7_pkt)},
+ {"rx_q0_byte", IPQESS_STAT(rx_q0_byte)},
+ {"rx_q1_byte", IPQESS_STAT(rx_q1_byte)},
+ {"rx_q2_byte", IPQESS_STAT(rx_q2_byte)},
+ {"rx_q3_byte", IPQESS_STAT(rx_q3_byte)},
+ {"rx_q4_byte", IPQESS_STAT(rx_q4_byte)},
+ {"rx_q5_byte", IPQESS_STAT(rx_q5_byte)},
+ {"rx_q6_byte", IPQESS_STAT(rx_q6_byte)},
+ {"rx_q7_byte", IPQESS_STAT(rx_q7_byte)},
+ {"tx_desc_error", IPQESS_STAT(tx_desc_error)},
+};
+
+static int ipqess_get_strset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(ipqess_stats);
+ default:
+ netdev_dbg(netdev, "%s: Invalid string set", __func__);
+ return -EOPNOTSUPP;
+ }
+}
+
+static void ipqess_get_strings(struct net_device *netdev, uint32_t stringset,
+ uint8_t *data)
+{
+ uint8_t *p = data;
+ uint32_t i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(ipqess_stats); i++) {
+ memcpy(p, ipqess_stats[i].string,
+ min((size_t)ETH_GSTRING_LEN,
+ strlen(ipqess_stats[i].string) + 1));
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static void ipqess_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ uint64_t *data)
+{
+ struct ipqess *ess = netdev_priv(netdev);
+ u32 *essstats = (u32 *)&ess->ipqessstats;
+ int i;
+
+ spin_lock(&ess->stats_lock);
+
+ ipqess_update_hw_stats(ess);
+
+ for (i = 0; i < ARRAY_SIZE(ipqess_stats); i++)
+ data[i] = *(u32 *)(essstats + (ipqess_stats[i].offset / sizeof(u32)));
+
+ spin_unlock(&ess->stats_lock);
+}
+
+static void ipqess_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, "qca_ipqess", DRVINFO_LEN);
+ strlcpy(info->bus_info, "axi", ETHTOOL_BUSINFO_LEN);
+}
+
+static int ipqess_get_settings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct ipqess *ess = netdev_priv(netdev);
+
+ return phylink_ethtool_ksettings_get(ess->phylink, cmd);
+}
+
+static int ipqess_set_settings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct ipqess *ess = netdev_priv(netdev);
+
+ return phylink_ethtool_ksettings_set(ess->phylink, cmd);
+}
+
+static void ipqess_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ ring->tx_max_pending = IPQESS_TX_RING_SIZE;
+ ring->rx_max_pending = IPQESS_RX_RING_SIZE;
+}
+
+static const struct ethtool_ops ipqesstool_ops = {
+ .get_drvinfo = &ipqess_get_drvinfo,
+ .get_link = ðtool_op_get_link,
+ .get_link_ksettings = &ipqess_get_settings,
+ .set_link_ksettings = &ipqess_set_settings,
+ .get_strings = &ipqess_get_strings,
+ .get_sset_count = &ipqess_get_strset_count,
+ .get_ethtool_stats = &ipqess_get_ethtool_stats,
+ .get_ringparam = ipqess_get_ringparam,
+};
+
+void ipqess_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &ipqesstool_ops;
+}
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
- * Copyright (C) 2011-2012, 2020-2021 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
- * Copyright (c) 2016 John Crispin <john@phrozen.org>
- * Copyright (c) 2021 Robert Marko <robert.marko@sartura.hr>
- */
-
-#include <linux/bitfield.h>
-#include <linux/version.h>
-#include <linux/etherdevice.h>
-#include <linux/if_bridge.h>
-#include <linux/mdio.h>
-#include <linux/mfd/syscon.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/of_mdio.h>
-#include <linux/of_net.h>
-#include <linux/of_platform.h>
-#include <linux/phy.h>
-#include <linux/phylink.h>
-#include <linux/reset.h>
-#include <net/dsa.h>
-
-#include "qca8k-ipq4019.h"
-
-#define MIB_DESC(_s, _o, _n) \
- { \
- .size = (_s), \
- .offset = (_o), \
- .name = (_n), \
- }
-
-static const struct qca8k_mib_desc ar8327_mib[] = {
- MIB_DESC(1, 0x00, "RxBroad"),
- MIB_DESC(1, 0x04, "RxPause"),
- MIB_DESC(1, 0x08, "RxMulti"),
- MIB_DESC(1, 0x0c, "RxFcsErr"),
- MIB_DESC(1, 0x10, "RxAlignErr"),
- MIB_DESC(1, 0x14, "RxRunt"),
- MIB_DESC(1, 0x18, "RxFragment"),
- MIB_DESC(1, 0x1c, "Rx64Byte"),
- MIB_DESC(1, 0x20, "Rx128Byte"),
- MIB_DESC(1, 0x24, "Rx256Byte"),
- MIB_DESC(1, 0x28, "Rx512Byte"),
- MIB_DESC(1, 0x2c, "Rx1024Byte"),
- MIB_DESC(1, 0x30, "Rx1518Byte"),
- MIB_DESC(1, 0x34, "RxMaxByte"),
- MIB_DESC(1, 0x38, "RxTooLong"),
- MIB_DESC(2, 0x3c, "RxGoodByte"),
- MIB_DESC(2, 0x44, "RxBadByte"),
- MIB_DESC(1, 0x4c, "RxOverFlow"),
- MIB_DESC(1, 0x50, "Filtered"),
- MIB_DESC(1, 0x54, "TxBroad"),
- MIB_DESC(1, 0x58, "TxPause"),
- MIB_DESC(1, 0x5c, "TxMulti"),
- MIB_DESC(1, 0x60, "TxUnderRun"),
- MIB_DESC(1, 0x64, "Tx64Byte"),
- MIB_DESC(1, 0x68, "Tx128Byte"),
- MIB_DESC(1, 0x6c, "Tx256Byte"),
- MIB_DESC(1, 0x70, "Tx512Byte"),
- MIB_DESC(1, 0x74, "Tx1024Byte"),
- MIB_DESC(1, 0x78, "Tx1518Byte"),
- MIB_DESC(1, 0x7c, "TxMaxByte"),
- MIB_DESC(1, 0x80, "TxOverSize"),
- MIB_DESC(2, 0x84, "TxByte"),
- MIB_DESC(1, 0x8c, "TxCollision"),
- MIB_DESC(1, 0x90, "TxAbortCol"),
- MIB_DESC(1, 0x94, "TxMultiCol"),
- MIB_DESC(1, 0x98, "TxSingleCol"),
- MIB_DESC(1, 0x9c, "TxExcDefer"),
- MIB_DESC(1, 0xa0, "TxDefer"),
- MIB_DESC(1, 0xa4, "TxLateCol"),
- MIB_DESC(1, 0xa8, "RXUnicast"),
- MIB_DESC(1, 0xac, "TXunicast"),
-};
-
-static int
-qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
-{
- return regmap_read(priv->regmap, reg, val);
-}
-
-static int
-qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
-{
- return regmap_write(priv->regmap, reg, val);
-}
-
-static int
-qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
-{
- return regmap_update_bits(priv->regmap, reg, mask, write_val);
-}
-
-static int
-qca8k_reg_set(struct qca8k_priv *priv, u32 reg, u32 val)
-{
- return regmap_set_bits(priv->regmap, reg, val);
-}
-
-static int
-qca8k_reg_clear(struct qca8k_priv *priv, u32 reg, u32 val)
-{
- return regmap_clear_bits(priv->regmap, reg, val);
-}
-
-static const struct regmap_range qca8k_readable_ranges[] = {
- regmap_reg_range(0x0000, 0x00e4), /* Global control */
- regmap_reg_range(0x0100, 0x0168), /* EEE control */
- regmap_reg_range(0x0200, 0x0270), /* Parser control */
- regmap_reg_range(0x0400, 0x0454), /* ACL */
- regmap_reg_range(0x0600, 0x0718), /* Lookup */
- regmap_reg_range(0x0800, 0x0b70), /* QM */
- regmap_reg_range(0x0c00, 0x0c80), /* PKT */
- regmap_reg_range(0x0e00, 0x0e98), /* L3 */
- regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
- regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
- regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
- regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
- regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
- regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
- regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
-
-};
-
-static const struct regmap_access_table qca8k_readable_table = {
- .yes_ranges = qca8k_readable_ranges,
- .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
-};
-
-static struct regmap_config qca8k_ipq4019_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = 0x16ac, /* end MIB - Port6 range */
- .rd_table = &qca8k_readable_table,
-};
-
-static struct regmap_config qca8k_ipq4019_psgmii_phy_regmap_config = {
- .name = "psgmii-phy",
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = 0x7fc,
-};
-
-static int
-qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
-{
- u32 val;
-
- return regmap_read_poll_timeout(priv->regmap, reg, val,
- !(val & mask),
- 0,
- QCA8K_BUSY_WAIT_TIMEOUT);
-}
-
-static int
-qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
-{
- u32 reg[4], val;
- int i, ret;
-
- /* load the ARL table into an array */
- for (i = 0; i < 4; i++) {
- ret = qca8k_read(priv, QCA8K_REG_ATU_DATA0 + (i * 4), &val);
- if (ret < 0)
- return ret;
-
- reg[i] = val;
- }
-
- /* vid - 83:72 */
- fdb->vid = (reg[2] >> QCA8K_ATU_VID_S) & QCA8K_ATU_VID_M;
- /* aging - 67:64 */
- fdb->aging = reg[2] & QCA8K_ATU_STATUS_M;
- /* portmask - 54:48 */
- fdb->port_mask = (reg[1] >> QCA8K_ATU_PORT_S) & QCA8K_ATU_PORT_M;
- /* mac - 47:0 */
- fdb->mac[0] = (reg[1] >> QCA8K_ATU_ADDR0_S) & 0xff;
- fdb->mac[1] = reg[1] & 0xff;
- fdb->mac[2] = (reg[0] >> QCA8K_ATU_ADDR2_S) & 0xff;
- fdb->mac[3] = (reg[0] >> QCA8K_ATU_ADDR3_S) & 0xff;
- fdb->mac[4] = (reg[0] >> QCA8K_ATU_ADDR4_S) & 0xff;
- fdb->mac[5] = reg[0] & 0xff;
-
- return 0;
-}
-
-static void
-qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac,
- u8 aging)
-{
- u32 reg[3] = { 0 };
- int i;
-
- /* vid - 83:72 */
- reg[2] = (vid & QCA8K_ATU_VID_M) << QCA8K_ATU_VID_S;
- /* aging - 67:64 */
- reg[2] |= aging & QCA8K_ATU_STATUS_M;
- /* portmask - 54:48 */
- reg[1] = (port_mask & QCA8K_ATU_PORT_M) << QCA8K_ATU_PORT_S;
- /* mac - 47:0 */
- reg[1] |= mac[0] << QCA8K_ATU_ADDR0_S;
- reg[1] |= mac[1];
- reg[0] |= mac[2] << QCA8K_ATU_ADDR2_S;
- reg[0] |= mac[3] << QCA8K_ATU_ADDR3_S;
- reg[0] |= mac[4] << QCA8K_ATU_ADDR4_S;
- reg[0] |= mac[5];
-
- /* load the array into the ARL table */
- for (i = 0; i < 3; i++)
- qca8k_write(priv, QCA8K_REG_ATU_DATA0 + (i * 4), reg[i]);
-}
-
-static int
-qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port)
-{
- u32 reg;
- int ret;
-
- /* Set the command and FDB index */
- reg = QCA8K_ATU_FUNC_BUSY;
- reg |= cmd;
- if (port >= 0) {
- reg |= QCA8K_ATU_FUNC_PORT_EN;
- reg |= (port & QCA8K_ATU_FUNC_PORT_M) << QCA8K_ATU_FUNC_PORT_S;
- }
-
- /* Write the function register triggering the table access */
- ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
- if (ret)
- return ret;
-
- /* wait for completion */
- ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
- if (ret)
- return ret;
-
- /* Check for table full violation when adding an entry */
- if (cmd == QCA8K_FDB_LOAD) {
- ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, ®);
- if (ret < 0)
- return ret;
- if (reg & QCA8K_ATU_FUNC_FULL)
- return -1;
- }
-
- return 0;
-}
-
-static int
-qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb, int port)
-{
- int ret;
-
- qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
- ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
- if (ret < 0)
- return ret;
-
- return qca8k_fdb_read(priv, fdb);
-}
-
-static int
-qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac, u16 port_mask,
- u16 vid, u8 aging)
-{
- int ret;
-
- mutex_lock(&priv->reg_mutex);
- qca8k_fdb_write(priv, vid, port_mask, mac, aging);
- ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
- mutex_unlock(&priv->reg_mutex);
-
- return ret;
-}
-
-static int
-qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, u16 vid)
-{
- int ret;
-
- mutex_lock(&priv->reg_mutex);
- qca8k_fdb_write(priv, vid, port_mask, mac, 0);
- ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
- mutex_unlock(&priv->reg_mutex);
-
- return ret;
-}
-
-static void
-qca8k_fdb_flush(struct qca8k_priv *priv)
-{
- mutex_lock(&priv->reg_mutex);
- qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
- mutex_unlock(&priv->reg_mutex);
-}
-
-static int
-qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid)
-{
- u32 reg;
- int ret;
-
- /* Set the command and VLAN index */
- reg = QCA8K_VTU_FUNC1_BUSY;
- reg |= cmd;
- reg |= vid << QCA8K_VTU_FUNC1_VID_S;
-
- /* Write the function register triggering the table access */
- ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
- if (ret)
- return ret;
-
- /* wait for completion */
- ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
- if (ret)
- return ret;
-
- /* Check for table full violation when adding an entry */
- if (cmd == QCA8K_VLAN_LOAD) {
- ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, ®);
- if (ret < 0)
- return ret;
- if (reg & QCA8K_VTU_FUNC1_FULL)
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static int
-qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged)
-{
- u32 reg;
- int ret;
-
- /*
- We do the right thing with VLAN 0 and treat it as untagged while
- preserving the tag on egress.
- */
- if (vid == 0)
- return 0;
-
- mutex_lock(&priv->reg_mutex);
- ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
- if (ret < 0)
- goto out;
-
- ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®);
- if (ret < 0)
- goto out;
- reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
- reg &= ~(QCA8K_VTU_FUNC0_EG_MODE_MASK << QCA8K_VTU_FUNC0_EG_MODE_S(port));
- if (untagged)
- reg |= QCA8K_VTU_FUNC0_EG_MODE_UNTAG <<
- QCA8K_VTU_FUNC0_EG_MODE_S(port);
- else
- reg |= QCA8K_VTU_FUNC0_EG_MODE_TAG <<
- QCA8K_VTU_FUNC0_EG_MODE_S(port);
-
- ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
- if (ret)
- goto out;
- ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
-
-out:
- mutex_unlock(&priv->reg_mutex);
-
- return ret;
-}
-
-static int
-qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
-{
- u32 reg, mask;
- int ret, i;
- bool del;
-
- mutex_lock(&priv->reg_mutex);
- ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
- if (ret < 0)
- goto out;
-
- ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®);
- if (ret < 0)
- goto out;
- reg &= ~(3 << QCA8K_VTU_FUNC0_EG_MODE_S(port));
- reg |= QCA8K_VTU_FUNC0_EG_MODE_NOT <<
- QCA8K_VTU_FUNC0_EG_MODE_S(port);
-
- /* Check if we're the last member to be removed */
- del = true;
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- mask = QCA8K_VTU_FUNC0_EG_MODE_NOT;
- mask <<= QCA8K_VTU_FUNC0_EG_MODE_S(i);
-
- if ((reg & mask) != mask) {
- del = false;
- break;
- }
- }
-
- if (del) {
- ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
- } else {
- ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
- if (ret)
- goto out;
- ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
- }
-
-out:
- mutex_unlock(&priv->reg_mutex);
-
- return ret;
-}
-
-static int
-qca8k_mib_init(struct qca8k_priv *priv)
-{
- int ret;
-
- mutex_lock(&priv->reg_mutex);
- ret = qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY);
- if (ret)
- goto exit;
-
- ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
- if (ret)
- goto exit;
-
- ret = qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
- if (ret)
- goto exit;
-
- ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
-
-exit:
- mutex_unlock(&priv->reg_mutex);
- return ret;
-}
-
-static void
-qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
-{
- u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
-
- /* Port 0 is internally connected to the CPU
- * TODO: Probably check for RGMII as well if it doesnt work
- * in RGMII mode.
- */
- if (port > QCA8K_CPU_PORT)
- mask |= QCA8K_PORT_STATUS_LINK_AUTO;
-
- if (enable)
- qca8k_reg_set(priv, QCA8K_REG_PORT_STATUS(port), mask);
- else
- qca8k_reg_clear(priv, QCA8K_REG_PORT_STATUS(port), mask);
-}
-
-static int
-qca8k_setup_port(struct dsa_switch *ds, int port)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int ret;
-
- /* CPU port gets connected to all user ports of the switch */
- if (dsa_is_cpu_port(ds, port)) {
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT),
- QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
- if (ret)
- return ret;
-
- /* Disable CPU ARP Auto-learning by default */
- ret = qca8k_reg_clear(priv, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT),
- QCA8K_PORT_LOOKUP_LEARN);
- if (ret)
- return ret;
- }
-
- /* Individual user ports get connected to CPU port only */
- if (dsa_is_user_port(ds, port)) {
- int shift = 16 * (port % 2);
-
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_MEMBER,
- BIT(QCA8K_CPU_PORT));
- if (ret)
- return ret;
-
- /* Enable ARP Auto-learning by default */
- ret = qca8k_reg_set(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_LEARN);
- if (ret)
- return ret;
-
- /* For port based vlans to work we need to set the
- * default egress vid
- */
- ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
- 0xfff << shift,
- QCA8K_PORT_VID_DEF << shift);
- if (ret)
- return ret;
-
- ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
- QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
- QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int
-qca8k_setup(struct dsa_switch *ds)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int ret, i;
-
- /* Make sure that port 0 is the cpu port */
- if (!dsa_is_cpu_port(ds, 0)) {
- dev_err(priv->dev, "port 0 is not the CPU port");
- return -EINVAL;
- }
-
- /* Enable CPU Port */
- ret = qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0,
- QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
- if (ret) {
- dev_err(priv->dev, "failed enabling CPU port");
- return ret;
- }
-
- /* Enable MIB counters */
- ret = qca8k_mib_init(priv);
- if (ret)
- dev_warn(priv->dev, "MIB init failed");
-
- /* Enable QCA header mode on the cpu port */
- ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(QCA8K_CPU_PORT),
- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S |
- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S);
- if (ret) {
- dev_err(priv->dev, "failed enabling QCA header mode");
- return ret;
- }
-
- /* Disable forwarding by default on all ports */
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
- QCA8K_PORT_LOOKUP_MEMBER, 0);
- if (ret)
- return ret;
- }
-
- /* Disable MAC by default on all ports */
- for (i = 1; i < QCA8K_NUM_PORTS; i++)
- qca8k_port_set_status(priv, i, 0);
-
- /* Forward all unknown frames to CPU port for Linux processing */
- ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
- BIT(QCA8K_CPU_PORT) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S |
- BIT(QCA8K_CPU_PORT) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S |
- BIT(QCA8K_CPU_PORT) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S |
- BIT(QCA8K_CPU_PORT) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S);
- if (ret)
- return ret;
-
- /* Setup connection between CPU port & user ports */
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- ret = qca8k_setup_port(ds, i);
- if (ret)
- return ret;
- }
-
- /* Setup our port MTUs to match power on defaults */
- for (i = 0; i < QCA8K_NUM_PORTS; i++)
- /* Set per port MTU to 1500 as the MTU change function
- * will add the overhead and if its set to 1518 then it
- * will apply the overhead again and we will end up with
- * MTU of 1536 instead of 1518
- */
- priv->port_mtu[i] = ETH_DATA_LEN;
- ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
- if (ret)
- dev_warn(priv->dev, "failed setting MTU settings");
-
- /* Flush the FDB table */
- qca8k_fdb_flush(priv);
-
- /* We don't have interrupts for link changes, so we need to poll */
- ds->pcs_poll = true;
-
- /* CPU port HW learning doesnt work correctly, so let DSA handle it */
- ds->assisted_learning_on_cpu_port = true;
-
- return 0;
-}
-
-static int psgmii_vco_calibrate(struct qca8k_priv *priv)
-{
- int val, ret;
-
- if (!priv->psgmii_ethphy) {
- dev_err(priv->dev, "PSGMII eth PHY missing, calibration failed!\n");
- return -ENODEV;
- }
-
- /* Fix PSGMII RX 20bit */
- ret = phy_write(priv->psgmii_ethphy, MII_BMCR, 0x5b);
- /* Reset PHY PSGMII */
- ret = phy_write(priv->psgmii_ethphy, MII_BMCR, 0x1b);
- /* Release PHY PSGMII reset */
- ret = phy_write(priv->psgmii_ethphy, MII_BMCR, 0x5b);
-
- /* Poll for VCO PLL calibration finish - Malibu(QCA8075) */
- ret = phy_read_mmd_poll_timeout(priv->psgmii_ethphy,
- MDIO_MMD_PMAPMD,
- 0x28, val,
- (val & BIT(0)),
- 10000, 1000000,
- false);
- if (ret) {
- dev_err(priv->dev, "QCA807x PSGMII VCO calibration PLL not ready\n");
- return ret;
- }
- mdelay(50);
-
- /* Freeze PSGMII RX CDR */
- ret = phy_write(priv->psgmii_ethphy, MII_RESV2, 0x2230);
-
- /* Start PSGMIIPHY VCO PLL calibration */
- ret = regmap_set_bits(priv->psgmii,
- PSGMIIPHY_VCO_CALIBRATION_CONTROL_REGISTER_1,
- PSGMIIPHY_REG_PLL_VCO_CALIB_RESTART);
-
- /* Poll for PSGMIIPHY PLL calibration finish - Dakota(IPQ40xx) */
- ret = regmap_read_poll_timeout(priv->psgmii,
- PSGMIIPHY_VCO_CALIBRATION_CONTROL_REGISTER_2,
- val, val & PSGMIIPHY_REG_PLL_VCO_CALIB_READY,
- 10000, 1000000);
- if (ret) {
- dev_err(priv->dev, "IPQ PSGMIIPHY VCO calibration PLL not ready\n");
- return ret;
- }
- mdelay(50);
-
- /* Release PSGMII RX CDR */
- ret = phy_write(priv->psgmii_ethphy, MII_RESV2, 0x3230);
- /* Release PSGMII RX 20bit */
- ret = phy_write(priv->psgmii_ethphy, MII_BMCR, 0x5f);
- mdelay(200);
-
- return ret;
-}
-
-static void
-qca8k_switch_port_loopback_on_off(struct qca8k_priv *priv, int port, int on)
-{
- u32 val = QCA8K_PORT_LOOKUP_LOOPBACK;
-
- if (on == 0)
- val = 0;
-
- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_LOOPBACK, val);
-}
-
-static int
-qca8k_wait_for_phy_link_state(struct phy_device *phy, int need_status)
-{
- int a;
- u16 status;
-
- for (a = 0; a < 100; a++) {
- status = phy_read(phy, MII_QCA8075_SSTATUS);
- status &= QCA8075_PHY_SPEC_STATUS_LINK;
- status = !!status;
- if (status == need_status)
- return 0;
- mdelay(8);
- }
-
- return -1;
-}
-
-static void
-qca8k_phy_loopback_on_off(struct qca8k_priv *priv, struct phy_device *phy,
- int sw_port, int on)
-{
- if (on) {
- phy_write(phy, MII_BMCR, BMCR_ANENABLE | BMCR_RESET);
- phy_modify(phy, MII_BMCR, BMCR_PDOWN, BMCR_PDOWN);
- qca8k_wait_for_phy_link_state(phy, 0);
- qca8k_write(priv, QCA8K_REG_PORT_STATUS(sw_port), 0);
- phy_write(phy, MII_BMCR,
- BMCR_SPEED1000 |
- BMCR_FULLDPLX |
- BMCR_LOOPBACK);
- qca8k_wait_for_phy_link_state(phy, 1);
- qca8k_write(priv, QCA8K_REG_PORT_STATUS(sw_port),
- QCA8K_PORT_STATUS_SPEED_1000 |
- QCA8K_PORT_STATUS_TXMAC |
- QCA8K_PORT_STATUS_RXMAC |
- QCA8K_PORT_STATUS_DUPLEX);
- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(sw_port),
- QCA8K_PORT_LOOKUP_STATE_FORWARD,
- QCA8K_PORT_LOOKUP_STATE_FORWARD);
- } else { /* off */
- qca8k_write(priv, QCA8K_REG_PORT_STATUS(sw_port), 0);
- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(sw_port),
- QCA8K_PORT_LOOKUP_STATE_DISABLED,
- QCA8K_PORT_LOOKUP_STATE_DISABLED);
- phy_write(phy, MII_BMCR, BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_RESET);
- /* turn off the power of the phys - so that unused
- ports do not raise links */
- phy_modify(phy, MII_BMCR, BMCR_PDOWN, BMCR_PDOWN);
- }
-}
-
-static void
-qca8k_phy_pkt_gen_prep(struct qca8k_priv *priv, struct phy_device *phy,
- int pkts_num, int on)
-{
- if (on) {
- /* enable CRC checker and packets counters */
- phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_CRC_AND_PKTS_COUNT, 0);
- phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_CRC_AND_PKTS_COUNT,
- QCA8075_MMD7_CNT_FRAME_CHK_EN | QCA8075_MMD7_CNT_SELFCLR);
- qca8k_wait_for_phy_link_state(phy, 1);
- /* packet number */
- phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_PKT_GEN_PKT_NUMB, pkts_num);
- /* pkt size - 1504 bytes + 20 bytes */
- phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_PKT_GEN_PKT_SIZE, 1504);
- } else { /* off */
- /* packet number */
- phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_PKT_GEN_PKT_NUMB, 0);
- /* disable CRC checker and packet counter */
- phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_CRC_AND_PKTS_COUNT, 0);
- /* disable traffic gen */
- phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_PKT_GEN_CTRL, 0);
- }
-}
-
-static void
-qca8k_wait_for_phy_pkt_gen_fin(struct qca8k_priv *priv, struct phy_device *phy)
-{
- int val;
- /* wait for all traffic end: 4096(pkt num)*1524(size)*8ns(125MHz)=49938us */
- phy_read_mmd_poll_timeout(phy, MDIO_MMD_AN, QCA8075_MMD7_PKT_GEN_CTRL,
- val, !(val & QCA8075_MMD7_PKT_GEN_INPROGR),
- 50000, 1000000, true);
-}
-
-static void
-qca8k_start_phy_pkt_gen(struct phy_device *phy)
-{
- /* start traffic gen */
- phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_PKT_GEN_CTRL,
- QCA8075_MMD7_PKT_GEN_START | QCA8075_MMD7_PKT_GEN_INPROGR);
-}
-
-static int
-qca8k_start_all_phys_pkt_gens(struct qca8k_priv *priv)
-{
- struct phy_device *phy;
- phy = phy_device_create(priv->bus, QCA8075_MDIO_BRDCST_PHY_ADDR,
- 0, 0, NULL);
- if (!phy) {
- dev_err(priv->dev, "unable to create mdio broadcast PHY(0x%x)\n",
- QCA8075_MDIO_BRDCST_PHY_ADDR);
- return -ENODEV;
- }
-
- qca8k_start_phy_pkt_gen(phy);
-
- phy_device_free(phy);
- return 0;
-}
-
-static int
-qca8k_get_phy_pkt_gen_test_result(struct phy_device *phy, int pkts_num)
-{
- u32 tx_ok, tx_error;
- u32 rx_ok, rx_error;
- u32 tx_ok_high16;
- u32 rx_ok_high16;
- u32 tx_all_ok, rx_all_ok;
-
- /* check counters */
- tx_ok = phy_read_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_EG_FRAME_RECV_CNT_LO);
- tx_ok_high16 = phy_read_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_EG_FRAME_RECV_CNT_HI);
- tx_error = phy_read_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_EG_FRAME_ERR_CNT);
- rx_ok = phy_read_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_IG_FRAME_RECV_CNT_LO);
- rx_ok_high16 = phy_read_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_IG_FRAME_RECV_CNT_HI);
- rx_error = phy_read_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_IG_FRAME_ERR_CNT);
- tx_all_ok = tx_ok + (tx_ok_high16 << 16);
- rx_all_ok = rx_ok + (rx_ok_high16 << 16);
-
- if (tx_all_ok < pkts_num)
- return -1;
- if(rx_all_ok < pkts_num)
- return -2;
- if(tx_error)
- return -3;
- if(rx_error)
- return -4;
- return 0; /* test is ok */
-}
-
-static
-void qca8k_phy_broadcast_write_on_off(struct qca8k_priv *priv,
- struct phy_device *phy, int on)
-{
- u32 val;
-
- val = phy_read_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_MDIO_BRDCST_WRITE);
-
- if (on == 0)
- val &= ~QCA8075_MMD7_MDIO_BRDCST_WRITE_EN;
- else
- val |= QCA8075_MMD7_MDIO_BRDCST_WRITE_EN;
-
- phy_write_mmd(phy, MDIO_MMD_AN, QCA8075_MMD7_MDIO_BRDCST_WRITE, val);
-}
-
-static int
-qca8k_test_dsa_port_for_errors(struct qca8k_priv *priv, struct phy_device *phy,
- int port, int test_phase)
-{
- int res = 0;
- const int test_pkts_num = QCA8075_PKT_GEN_PKTS_COUNT;
-
- if (test_phase == 1) { /* start test preps */
- qca8k_phy_loopback_on_off(priv, phy, port, 1);
- qca8k_switch_port_loopback_on_off(priv, port, 1);
- qca8k_phy_broadcast_write_on_off(priv, phy, 1);
- qca8k_phy_pkt_gen_prep(priv, phy, test_pkts_num, 1);
- } else if (test_phase == 2) {
- /* wait for test results, collect it and cleanup */
- qca8k_wait_for_phy_pkt_gen_fin(priv, phy);
- res = qca8k_get_phy_pkt_gen_test_result(phy, test_pkts_num);
- qca8k_phy_pkt_gen_prep(priv, phy, test_pkts_num, 0);
- qca8k_phy_broadcast_write_on_off(priv, phy, 0);
- qca8k_switch_port_loopback_on_off(priv, port, 0);
- qca8k_phy_loopback_on_off(priv, phy, port, 0);
- }
-
- return res;
-}
-
-static int
-qca8k_do_dsa_sw_ports_self_test(struct qca8k_priv *priv, int parallel_test)
-{
- struct device_node *dn = priv->dev->of_node;
- struct device_node *ports, *port;
- struct device_node *phy_dn;
- struct phy_device *phy;
- int reg, err = 0, test_phase;
- u32 tests_result = 0;
-
- ports = of_get_child_by_name(dn, "ports");
- if (!ports) {
- dev_err(priv->dev, "no ports child node found\n");
- return -EINVAL;
- }
-
- for (test_phase = 1; test_phase <= 2; test_phase++) {
- if (parallel_test && test_phase == 2) {
- err = qca8k_start_all_phys_pkt_gens(priv);
- if (err)
- goto error;
- }
- for_each_available_child_of_node(ports, port) {
- err = of_property_read_u32(port, "reg", ®);
- if (err)
- goto error;
- if (reg >= QCA8K_NUM_PORTS) {
- err = -EINVAL;
- goto error;
- }
- phy_dn = of_parse_phandle(port, "phy-handle", 0);
- if (phy_dn) {
- phy = of_phy_find_device(phy_dn);
- of_node_put(phy_dn);
- if (phy) {
- int result;
- result = qca8k_test_dsa_port_for_errors(priv,
- phy, reg, test_phase);
- if (!parallel_test && test_phase == 1)
- qca8k_start_phy_pkt_gen(phy);
- put_device(&phy->mdio.dev);
- if (test_phase == 2) {
- tests_result <<= 1;
- if (result)
- tests_result |= 1;
- }
- }
- }
- }
- }
-
-end:
- of_node_put(ports);
- qca8k_fdb_flush(priv);
- return tests_result;
-error:
- tests_result |= 0xf000;
- goto end;
-}
-
-static int
-psgmii_vco_calibrate_and_test(struct dsa_switch *ds)
-{
- int ret, a, test_result;
- struct qca8k_priv *priv = ds->priv;
-
- for (a = 0; a <= QCA8K_PSGMII_CALB_NUM; a++) {
- ret = psgmii_vco_calibrate(priv);
- if (ret)
- return ret;
- /* first we run serial test */
- test_result = qca8k_do_dsa_sw_ports_self_test(priv, 0);
- /* and if it is ok then we run the test in parallel */
- if (!test_result)
- test_result = qca8k_do_dsa_sw_ports_self_test(priv, 1);
- if (!test_result) {
- if (a > 0) {
- dev_warn(priv->dev, "PSGMII work was stabilized after %d "
- "calibration retries !\n", a);
- }
- return 0;
- } else {
- schedule();
- if (a > 0 && a % 10 == 0) {
- dev_err(priv->dev, "PSGMII work is unstable !!! "
- "Let's try to wait a bit ... %d\n", a);
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(msecs_to_jiffies(a * 100));
- }
- }
- }
-
- panic("PSGMII work is unstable !!! "
- "Repeated recalibration attempts did not help(0x%x) !\n",
- test_result);
-
- return -EFAULT;
-}
-
-static int
-ipq4019_psgmii_configure(struct dsa_switch *ds)
-{
- struct qca8k_priv *priv = ds->priv;
- int ret;
-
- if (!priv->psgmii_calibrated) {
- ret = psgmii_vco_calibrate_and_test(ds);
-
- ret = regmap_clear_bits(priv->psgmii, PSGMIIPHY_MODE_CONTROL,
- PSGMIIPHY_MODE_ATHR_CSCO_MODE_25M);
- ret = regmap_write(priv->psgmii, PSGMIIPHY_TX_CONTROL,
- PSGMIIPHY_TX_CONTROL_MAGIC_VALUE);
-
- priv->psgmii_calibrated = true;
-
- return ret;
- }
-
- return 0;
-}
-
-static void
-qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
- const struct phylink_link_state *state)
-{
- struct qca8k_priv *priv = ds->priv;
-
- switch (port) {
- case 0:
- /* CPU port, no configuration needed */
- return;
- case 1:
- case 2:
- case 3:
- if (state->interface == PHY_INTERFACE_MODE_PSGMII)
- if (ipq4019_psgmii_configure(ds))
- dev_err(ds->dev, "PSGMII configuration failed!\n");
- return;
- case 4:
- case 5:
- if (state->interface == PHY_INTERFACE_MODE_RGMII ||
- state->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- state->interface == PHY_INTERFACE_MODE_RGMII_RXID ||
- state->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
- qca8k_reg_set(priv, QCA8K_REG_RGMII_CTRL, QCA8K_RGMII_CTRL_CLK);
- }
-
- if (state->interface == PHY_INTERFACE_MODE_PSGMII)
- if (ipq4019_psgmii_configure(ds))
- dev_err(ds->dev, "PSGMII configuration failed!\n");
- return;
- default:
- dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
- return;
- }
-}
-
-static void
-qca8k_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- switch (port) {
- case 0: /* CPU port */
- if (state->interface != PHY_INTERFACE_MODE_INTERNAL)
- goto unsupported;
- break;
- case 1:
- case 2:
- case 3:
- /* Only PSGMII mode is supported */
- if (state->interface != PHY_INTERFACE_MODE_PSGMII)
- goto unsupported;
- break;
- case 4:
- case 5:
- /* PSGMII and RGMII modes are supported */
- if (state->interface != PHY_INTERFACE_MODE_PSGMII &&
- state->interface != PHY_INTERFACE_MODE_RGMII &&
- state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
- state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
- state->interface != PHY_INTERFACE_MODE_RGMII_TXID)
- goto unsupported;
- break;
- default:
-unsupported:
- dev_warn(ds->dev, "interface '%s' (%d) on port %d is not supported\n",
- phy_modes(state->interface), state->interface, port);
- linkmode_zero(supported);
- return;
- }
-
- if (port == 0) {
- phylink_set_port_modes(mask);
-
- phylink_set(mask, 1000baseT_Full);
-
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
- } else {
- /* Simply copy what PHYs tell us */
- linkmode_copy(state->advertising, supported);
- }
-}
-
-static int
-qca8k_phylink_mac_link_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
-{
- struct qca8k_priv *priv = ds->priv;
- u32 reg;
- int ret;
-
- ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), ®);
- if (ret < 0)
- return ret;
-
- state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
- state->an_complete = state->link;
- state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO);
- state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
- DUPLEX_HALF;
-
- switch (reg & QCA8K_PORT_STATUS_SPEED) {
- case QCA8K_PORT_STATUS_SPEED_10:
- state->speed = SPEED_10;
- break;
- case QCA8K_PORT_STATUS_SPEED_100:
- state->speed = SPEED_100;
- break;
- case QCA8K_PORT_STATUS_SPEED_1000:
- state->speed = SPEED_1000;
- break;
- default:
- state->speed = SPEED_UNKNOWN;
- break;
- }
-
- state->pause = MLO_PAUSE_NONE;
- if (reg & QCA8K_PORT_STATUS_RXFLOW)
- state->pause |= MLO_PAUSE_RX;
- if (reg & QCA8K_PORT_STATUS_TXFLOW)
- state->pause |= MLO_PAUSE_TX;
-
- return 1;
-}
-
-static void
-qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
- phy_interface_t interface)
-{
- struct qca8k_priv *priv = ds->priv;
-
- qca8k_port_set_status(priv, port, 0);
-}
-
-static void
-qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
- phy_interface_t interface, struct phy_device *phydev,
- int speed, int duplex, bool tx_pause, bool rx_pause)
-{
- struct qca8k_priv *priv = ds->priv;
- u32 reg;
-
- if (phylink_autoneg_inband(mode)) {
- reg = QCA8K_PORT_STATUS_LINK_AUTO;
- } else {
- switch (speed) {
- case SPEED_10:
- reg = QCA8K_PORT_STATUS_SPEED_10;
- break;
- case SPEED_100:
- reg = QCA8K_PORT_STATUS_SPEED_100;
- break;
- case SPEED_1000:
- reg = QCA8K_PORT_STATUS_SPEED_1000;
- break;
- default:
- reg = QCA8K_PORT_STATUS_LINK_AUTO;
- break;
- }
-
- if (duplex == DUPLEX_FULL)
- reg |= QCA8K_PORT_STATUS_DUPLEX;
-
- if (rx_pause || dsa_is_cpu_port(ds, port))
- reg |= QCA8K_PORT_STATUS_RXFLOW;
-
- if (tx_pause || dsa_is_cpu_port(ds, port))
- reg |= QCA8K_PORT_STATUS_TXFLOW;
- }
-
- reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
-
- qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
-}
-
-static void
-qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
-{
- int i;
-
- if (stringset != ETH_SS_STATS)
- return;
-
- for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++)
- strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
- ETH_GSTRING_LEN);
-}
-
-static void
-qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
- uint64_t *data)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- const struct qca8k_mib_desc *mib;
- u32 reg, i, val;
- u32 hi = 0;
- int ret;
-
- for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++) {
- mib = &ar8327_mib[i];
- reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
-
- ret = qca8k_read(priv, reg, &val);
- if (ret < 0)
- continue;
-
- if (mib->size == 2) {
- ret = qca8k_read(priv, reg + 4, &hi);
- if (ret < 0)
- continue;
- }
-
- data[i] = val;
- if (mib->size == 2)
- data[i] |= (u64)hi << 32;
- }
-}
-
-static int
-qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
-{
- if (sset != ETH_SS_STATS)
- return 0;
-
- return ARRAY_SIZE(ar8327_mib);
-}
-
-static int
-qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
- u32 reg;
- int ret;
-
- mutex_lock(&priv->reg_mutex);
- ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, ®);
- if (ret < 0)
- goto exit;
-
- if (eee->eee_enabled)
- reg |= lpi_en;
- else
- reg &= ~lpi_en;
- ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
-
-exit:
- mutex_unlock(&priv->reg_mutex);
- return ret;
-}
-
-static int
-qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
-{
- /* Nothing to do on the port's MAC */
- return 0;
-}
-
-static void
-qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- u32 stp_state;
-
- switch (state) {
- case BR_STATE_DISABLED:
- stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
- break;
- case BR_STATE_BLOCKING:
- stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
- break;
- case BR_STATE_LISTENING:
- stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
- break;
- case BR_STATE_LEARNING:
- stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
- break;
- case BR_STATE_FORWARDING:
- default:
- stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
- break;
- }
-
- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
-}
-
-static int
-qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int port_mask, cpu_port;
- int i, ret;
-
- cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
- port_mask = BIT(cpu_port);
-
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- if (dsa_is_cpu_port(ds, i))
- continue;
- if (dsa_to_port(ds, i)->bridge_dev != br)
- continue;
- /* Add this port to the portvlan mask of the other ports
- * in the bridge
- */
- ret = qca8k_reg_set(priv,
- QCA8K_PORT_LOOKUP_CTRL(i),
- BIT(port));
- if (ret)
- return ret;
- if (i != port)
- port_mask |= BIT(i);
- }
-
- /* Add all other ports to this ports portvlan mask */
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_MEMBER, port_mask);
-
- return ret;
-}
-
-static void
-qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int cpu_port, i;
-
- cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
-
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- if (dsa_is_cpu_port(ds, i))
- continue;
- if (dsa_to_port(ds, i)->bridge_dev != br)
- continue;
- /* Remove this port to the portvlan mask of the other ports
- * in the bridge
- */
- qca8k_reg_clear(priv,
- QCA8K_PORT_LOOKUP_CTRL(i),
- BIT(port));
- }
-
- /* Set the cpu port to be the only one in the portvlan mask of
- * this port
- */
- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
-}
-
-void qca8k_port_fast_age(struct dsa_switch *ds, int port)
-{
- struct qca8k_priv *priv = ds->priv;
-
- mutex_lock(&priv->reg_mutex);
- qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
- mutex_unlock(&priv->reg_mutex);
-}
-
-int qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
-{
- struct qca8k_priv *priv = ds->priv;
- unsigned int secs = msecs / 1000;
- u32 val;
-
- /* AGE_TIME reg is set in 7s step */
- val = secs / 7;
-
- /* Handle case with 0 as val to NOT disable
- * learning
- */
- if (!val)
- val = 1;
-
- return qca8k_rmw(priv, QCA8K_REG_ATU_CTRL,
- QCA8K_ATU_AGE_TIME_MASK,
- QCA8K_ATU_AGE_TIME(val));
-}
-
-static int
-qca8k_port_enable(struct dsa_switch *ds, int port,
- struct phy_device *phy)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-
- qca8k_port_set_status(priv, port, 1);
- priv->port_sts[port].enabled = 1;
-
- if (dsa_is_user_port(ds, port))
- phy_support_asym_pause(phy);
-
- return 0;
-}
-
-static void
-qca8k_port_disable(struct dsa_switch *ds, int port)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-
- qca8k_port_set_status(priv, port, 0);
- priv->port_sts[port].enabled = 0;
-}
-
-static int
-qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
-{
- struct qca8k_priv *priv = ds->priv;
- int i, mtu = 0;
-
- priv->port_mtu[port] = new_mtu;
-
- for (i = 0; i < QCA8K_NUM_PORTS; i++)
- if (priv->port_mtu[i] > mtu)
- mtu = priv->port_mtu[i];
-
- /* Include L2 header / FCS length */
- return qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, mtu + ETH_HLEN + ETH_FCS_LEN);
-}
-
-static int
-qca8k_port_max_mtu(struct dsa_switch *ds, int port)
-{
- return QCA8K_MAX_MTU;
-}
-
-static int
-qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
- u16 port_mask, u16 vid)
-{
- /* Set the vid to the port vlan id if no vid is set */
- if (!vid)
- vid = QCA8K_PORT_VID_DEF;
-
- return qca8k_fdb_add(priv, addr, port_mask, vid,
- QCA8K_ATU_STATUS_STATIC);
-}
-
-static int
-qca8k_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- u16 port_mask = BIT(port);
-
- return qca8k_port_fdb_insert(priv, addr, port_mask, vid);
-}
-
-static int
-qca8k_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- u16 port_mask = BIT(port);
-
- if (!vid)
- vid = QCA8K_PORT_VID_DEF;
-
- return qca8k_fdb_del(priv, addr, port_mask, vid);
-}
-
-static int
-qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
- dsa_fdb_dump_cb_t *cb, void *data)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- struct qca8k_fdb _fdb = { 0 };
- int cnt = QCA8K_NUM_FDB_RECORDS;
- bool is_static;
- int ret = 0;
-
- mutex_lock(&priv->reg_mutex);
- while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
- if (!_fdb.aging)
- break;
- is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC);
- ret = cb(_fdb.mac, _fdb.vid, is_static, data);
- if (ret)
- break;
- }
- mutex_unlock(&priv->reg_mutex);
-
- return 0;
-}
-
-static int
-qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
- struct netlink_ext_ack *extack)
-{
- struct qca8k_priv *priv = ds->priv;
-
- if (vlan_filtering) {
- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_VLAN_MODE,
- QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
- } else {
- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_VLAN_MODE,
- QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
- }
-
- return 0;
-}
-
-static int
-qca8k_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct netlink_ext_ack *extack)
-{
- bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
- bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- struct qca8k_priv *priv = ds->priv;
- int ret = 0;
-
- ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
- if (ret) {
- dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
- return ret;
- }
-
- if (pvid) {
- int shift = 16 * (port % 2);
-
- qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
- 0xfff << shift, vlan->vid << shift);
- qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
- QCA8K_PORT_VLAN_CVID(vlan->vid) |
- QCA8K_PORT_VLAN_SVID(vlan->vid));
- }
- return 0;
-}
-
-static int
-qca8k_port_vlan_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- struct qca8k_priv *priv = ds->priv;
- int ret = 0;
-
- ret = qca8k_vlan_del(priv, port, vlan->vid);
- if (ret)
- dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
-
- return ret;
-}
-
-static enum dsa_tag_protocol
-qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
- enum dsa_tag_protocol mp)
-{
- return DSA_TAG_PROTO_IPQ4019;
-}
-
-static const struct dsa_switch_ops qca8k_switch_ops = {
- .get_tag_protocol = qca8k_get_tag_protocol,
- .setup = qca8k_setup,
- .get_strings = qca8k_get_strings,
- .get_ethtool_stats = qca8k_get_ethtool_stats,
- .get_sset_count = qca8k_get_sset_count,
- .set_ageing_time = qca8k_set_ageing_time,
- .get_mac_eee = qca8k_get_mac_eee,
- .set_mac_eee = qca8k_set_mac_eee,
- .port_enable = qca8k_port_enable,
- .port_disable = qca8k_port_disable,
- .port_change_mtu = qca8k_port_change_mtu,
- .port_max_mtu = qca8k_port_max_mtu,
- .port_stp_state_set = qca8k_port_stp_state_set,
- .port_bridge_join = qca8k_port_bridge_join,
- .port_bridge_leave = qca8k_port_bridge_leave,
- .port_fast_age = qca8k_port_fast_age,
- .port_fdb_add = qca8k_port_fdb_add,
- .port_fdb_del = qca8k_port_fdb_del,
- .port_fdb_dump = qca8k_port_fdb_dump,
- .port_vlan_filtering = qca8k_port_vlan_filtering,
- .port_vlan_add = qca8k_port_vlan_add,
- .port_vlan_del = qca8k_port_vlan_del,
- .phylink_validate = qca8k_phylink_validate,
- .phylink_mac_link_state = qca8k_phylink_mac_link_state,
- .phylink_mac_config = qca8k_phylink_mac_config,
- .phylink_mac_link_down = qca8k_phylink_mac_link_down,
- .phylink_mac_link_up = qca8k_phylink_mac_link_up,
-};
-
-static int
-qca8k_ipq4019_probe(struct platform_device *pdev)
-{
- struct qca8k_priv *priv;
- void __iomem *base, *psgmii;
- struct device_node *np = pdev->dev.of_node, *mdio_np, *psgmii_ethphy_np;
- int ret;
-
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->dev = &pdev->dev;
-
- base = devm_platform_ioremap_resource_byname(pdev, "base");
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- priv->regmap = devm_regmap_init_mmio(priv->dev, base,
- &qca8k_ipq4019_regmap_config);
- if (IS_ERR(priv->regmap)) {
- ret = PTR_ERR(priv->regmap);
- dev_err(priv->dev, "base regmap initialization failed, %d\n", ret);
- return ret;
- }
-
- psgmii = devm_platform_ioremap_resource_byname(pdev, "psgmii_phy");
- if (IS_ERR(psgmii))
- return PTR_ERR(psgmii);
-
- priv->psgmii = devm_regmap_init_mmio(priv->dev, psgmii,
- &qca8k_ipq4019_psgmii_phy_regmap_config);
- if (IS_ERR(priv->psgmii)) {
- ret = PTR_ERR(priv->psgmii);
- dev_err(priv->dev, "PSGMII regmap initialization failed, %d\n", ret);
- return ret;
- }
-
- mdio_np = of_parse_phandle(np, "mdio", 0);
- if (!mdio_np) {
- dev_err(&pdev->dev, "unable to get MDIO bus phandle\n");
- of_node_put(mdio_np);
- return -EINVAL;
- }
-
- priv->bus = of_mdio_find_bus(mdio_np);
- of_node_put(mdio_np);
- if (!priv->bus) {
- dev_err(&pdev->dev, "unable to find MDIO bus\n");
- return -EPROBE_DEFER;
- }
-
- psgmii_ethphy_np = of_parse_phandle(np, "psgmii-ethphy", 0);
- if (!psgmii_ethphy_np) {
- dev_dbg(&pdev->dev, "unable to get PSGMII eth PHY phandle\n");
- of_node_put(psgmii_ethphy_np);
- }
-
- if (psgmii_ethphy_np) {
- priv->psgmii_ethphy = of_phy_find_device(psgmii_ethphy_np);
- of_node_put(psgmii_ethphy_np);
- if (!priv->psgmii_ethphy) {
- dev_err(&pdev->dev, "unable to get PSGMII eth PHY\n");
- return -ENODEV;
- }
- }
-
- priv->ds = devm_kzalloc(priv->dev, sizeof(*priv->ds), GFP_KERNEL);
- if (!priv->ds)
- return -ENOMEM;
-
- priv->ds->dev = priv->dev;
- priv->ds->num_ports = QCA8K_NUM_PORTS;
- priv->ds->priv = priv;
- priv->ops = qca8k_switch_ops;
- priv->ds->ops = &priv->ops;
-
- mutex_init(&priv->reg_mutex);
- platform_set_drvdata(pdev, priv);
-
- return dsa_register_switch(priv->ds);
-}
-
-static int
-qca8k_ipq4019_remove(struct platform_device *pdev)
-{
- struct qca8k_priv *priv = dev_get_drvdata(&pdev->dev);
- int i;
-
- if (!priv)
- return 0;
-
- for (i = 0; i < QCA8K_NUM_PORTS; i++)
- qca8k_port_set_status(priv, i, 0);
-
- dsa_unregister_switch(priv->ds);
-
- dev_set_drvdata(&pdev->dev, NULL);
-
- return 0;
-}
-
-static const struct of_device_id qca8k_ipq4019_of_match[] = {
- { .compatible = "qca,ipq4019-qca8337n" },
- { /* sentinel */ },
-};
-
-static struct platform_driver qca8k_ipq4019_driver = {
- .probe = qca8k_ipq4019_probe,
- .remove = qca8k_ipq4019_remove,
- .driver = {
- .name = "qca8k-ipq4019",
- .of_match_table = qca8k_ipq4019_of_match,
- },
-};
-
-module_platform_driver(qca8k_ipq4019_driver);
-
-MODULE_AUTHOR("Mathieu Olivari, John Crispin <john@phrozen.org>");
-MODULE_AUTHOR("Gabor Juhos <j4g8y7@gmail.com>, Robert Marko <robert.marko@sartura.hr>");
-MODULE_DESCRIPTION("Qualcomm IPQ4019 built-in switch driver");
-MODULE_LICENSE("GPL v2");
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
- * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __QCA8K_H
-#define __QCA8K_H
-
-#include <linux/regmap.h>
-
-#define QCA8K_NUM_PORTS 6
-#define QCA8K_CPU_PORT 0
-#define QCA8K_MAX_MTU 9000
-
-#define QCA8K_BUSY_WAIT_TIMEOUT 2000
-
-#define QCA8K_NUM_FDB_RECORDS 2048
-
-#define QCA8K_PORT_VID_DEF 1
-
-/* Global control registers */
-#define QCA8K_REG_MASK_CTRL 0x000
-#define QCA8K_MASK_CTRL_REV_ID_MASK GENMASK(7, 0)
-#define QCA8K_MASK_CTRL_REV_ID(x) ((x) >> 0)
-#define QCA8K_MASK_CTRL_DEVICE_ID_MASK GENMASK(15, 8)
-#define QCA8K_MASK_CTRL_DEVICE_ID(x) ((x) >> 8)
-#define QCA8K_REG_RGMII_CTRL 0x004
-#define QCA8K_RGMII_CTRL_RGMII_RXC GENMASK(1, 0)
-#define QCA8K_RGMII_CTRL_RGMII_TXC GENMASK(9, 8)
-/* Some kind of CLK selection
- * 0: gcc_ess_dly2ns
- * 1: gcc_ess_clk
- */
-#define QCA8K_RGMII_CTRL_CLK BIT(10)
-#define QCA8K_RGMII_CTRL_DELAY_RMII0 GENMASK(17, 16)
-#define QCA8K_RGMII_CTRL_INVERT_RMII0_REF_CLK BIT(18)
-#define QCA8K_RGMII_CTRL_DELAY_RMII1 GENMASK(20, 19)
-#define QCA8K_RGMII_CTRL_INVERT_RMII1_REF_CLK BIT(21)
-#define QCA8K_RGMII_CTRL_INVERT_RMII0_MASTER_EN BIT(24)
-#define QCA8K_RGMII_CTRL_INVERT_RMII1_MASTER_EN BIT(25)
-#define QCA8K_REG_MODULE_EN 0x030
-#define QCA8K_MODULE_EN_MIB BIT(0)
-#define QCA8K_REG_MIB 0x034
-#define QCA8K_MIB_FLUSH BIT(24)
-#define QCA8K_MIB_CPU_KEEP BIT(20)
-#define QCA8K_MIB_BUSY BIT(17)
-#define QCA8K_GOL_MAC_ADDR0 0x60
-#define QCA8K_GOL_MAC_ADDR1 0x64
-#define QCA8K_MAX_FRAME_SIZE 0x78
-#define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4)
-#define QCA8K_PORT_STATUS_SPEED GENMASK(1, 0)
-#define QCA8K_PORT_STATUS_SPEED_10 0
-#define QCA8K_PORT_STATUS_SPEED_100 0x1
-#define QCA8K_PORT_STATUS_SPEED_1000 0x2
-#define QCA8K_PORT_STATUS_TXMAC BIT(2)
-#define QCA8K_PORT_STATUS_RXMAC BIT(3)
-#define QCA8K_PORT_STATUS_TXFLOW BIT(4)
-#define QCA8K_PORT_STATUS_RXFLOW BIT(5)
-#define QCA8K_PORT_STATUS_DUPLEX BIT(6)
-#define QCA8K_PORT_STATUS_LINK_UP BIT(8)
-#define QCA8K_PORT_STATUS_LINK_AUTO BIT(9)
-#define QCA8K_PORT_STATUS_LINK_PAUSE BIT(10)
-#define QCA8K_PORT_STATUS_FLOW_AUTO BIT(12)
-#define QCA8K_REG_PORT_HDR_CTRL(_i) (0x9c + (_i * 4))
-#define QCA8K_PORT_HDR_CTRL_RX_MASK GENMASK(3, 2)
-#define QCA8K_PORT_HDR_CTRL_RX_S 2
-#define QCA8K_PORT_HDR_CTRL_TX_MASK GENMASK(1, 0)
-#define QCA8K_PORT_HDR_CTRL_TX_S 0
-#define QCA8K_PORT_HDR_CTRL_ALL 2
-#define QCA8K_PORT_HDR_CTRL_MGMT 1
-#define QCA8K_PORT_HDR_CTRL_NONE 0
-#define QCA8K_REG_SGMII_CTRL 0x0e0
-#define QCA8K_SGMII_EN_PLL BIT(1)
-#define QCA8K_SGMII_EN_RX BIT(2)
-#define QCA8K_SGMII_EN_TX BIT(3)
-#define QCA8K_SGMII_EN_SD BIT(4)
-#define QCA8K_SGMII_CLK125M_DELAY BIT(7)
-#define QCA8K_SGMII_MODE_CTRL_MASK (BIT(22) | BIT(23))
-#define QCA8K_SGMII_MODE_CTRL_BASEX (0 << 22)
-#define QCA8K_SGMII_MODE_CTRL_PHY (1 << 22)
-#define QCA8K_SGMII_MODE_CTRL_MAC (2 << 22)
-
-/* EEE control registers */
-#define QCA8K_REG_EEE_CTRL 0x100
-#define QCA8K_REG_EEE_CTRL_LPI_EN(_i) ((_i + 1) * 2)
-
-/* ACL registers */
-#define QCA8K_REG_PORT_VLAN_CTRL0(_i) (0x420 + (_i * 8))
-#define QCA8K_PORT_VLAN_CVID(x) (x << 16)
-#define QCA8K_PORT_VLAN_SVID(x) x
-#define QCA8K_REG_PORT_VLAN_CTRL1(_i) (0x424 + (_i * 8))
-#define QCA8K_REG_IPV4_PRI_BASE_ADDR 0x470
-#define QCA8K_REG_IPV4_PRI_ADDR_MASK 0x474
-
-/* Lookup registers */
-#define QCA8K_REG_ATU_DATA0 0x600
-#define QCA8K_ATU_ADDR2_S 24
-#define QCA8K_ATU_ADDR3_S 16
-#define QCA8K_ATU_ADDR4_S 8
-#define QCA8K_REG_ATU_DATA1 0x604
-#define QCA8K_ATU_PORT_M 0x7f
-#define QCA8K_ATU_PORT_S 16
-#define QCA8K_ATU_ADDR0_S 8
-#define QCA8K_REG_ATU_DATA2 0x608
-#define QCA8K_ATU_VID_M 0xfff
-#define QCA8K_ATU_VID_S 8
-#define QCA8K_ATU_STATUS_M 0xf
-#define QCA8K_ATU_STATUS_STATIC 0xf
-#define QCA8K_REG_ATU_FUNC 0x60c
-#define QCA8K_ATU_FUNC_BUSY BIT(31)
-#define QCA8K_ATU_FUNC_PORT_EN BIT(14)
-#define QCA8K_ATU_FUNC_MULTI_EN BIT(13)
-#define QCA8K_ATU_FUNC_FULL BIT(12)
-#define QCA8K_ATU_FUNC_PORT_M 0xf
-#define QCA8K_ATU_FUNC_PORT_S 8
-#define QCA8K_REG_VTU_FUNC0 0x610
-#define QCA8K_VTU_FUNC0_VALID BIT(20)
-#define QCA8K_VTU_FUNC0_IVL_EN BIT(19)
-#define QCA8K_VTU_FUNC0_EG_MODE_S(_i) (4 + (_i) * 2)
-#define QCA8K_VTU_FUNC0_EG_MODE_MASK 3
-#define QCA8K_VTU_FUNC0_EG_MODE_UNMOD 0
-#define QCA8K_VTU_FUNC0_EG_MODE_UNTAG 1
-#define QCA8K_VTU_FUNC0_EG_MODE_TAG 2
-#define QCA8K_VTU_FUNC0_EG_MODE_NOT 3
-#define QCA8K_REG_VTU_FUNC1 0x614
-#define QCA8K_VTU_FUNC1_BUSY BIT(31)
-#define QCA8K_VTU_FUNC1_VID_S 16
-#define QCA8K_VTU_FUNC1_FULL BIT(4)
-#define QCA8K_REG_ATU_CTRL 0x618
-#define QCA8K_ATU_AGE_TIME_MASK GENMASK(15, 0)
-#define QCA8K_ATU_AGE_TIME(x) FIELD_PREP(QCA8K_ATU_AGE_TIME_MASK, (x))
-#define QCA8K_REG_GLOBAL_FW_CTRL0 0x620
-#define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10)
-#define QCA8K_REG_GLOBAL_FW_CTRL1 0x624
-#define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S 24
-#define QCA8K_GLOBAL_FW_CTRL1_BC_DP_S 16
-#define QCA8K_GLOBAL_FW_CTRL1_MC_DP_S 8
-#define QCA8K_GLOBAL_FW_CTRL1_UC_DP_S 0
-#define QCA8K_PORT_LOOKUP_CTRL(_i) (0x660 + (_i) * 0xc)
-#define QCA8K_PORT_LOOKUP_MEMBER GENMASK(6, 0)
-#define QCA8K_PORT_LOOKUP_VLAN_MODE GENMASK(9, 8)
-#define QCA8K_PORT_LOOKUP_VLAN_MODE_NONE (0 << 8)
-#define QCA8K_PORT_LOOKUP_VLAN_MODE_FALLBACK (1 << 8)
-#define QCA8K_PORT_LOOKUP_VLAN_MODE_CHECK (2 << 8)
-#define QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE (3 << 8)
-#define QCA8K_PORT_LOOKUP_STATE_MASK GENMASK(18, 16)
-#define QCA8K_PORT_LOOKUP_STATE_DISABLED (0 << 16)
-#define QCA8K_PORT_LOOKUP_STATE_BLOCKING (1 << 16)
-#define QCA8K_PORT_LOOKUP_STATE_LISTENING (2 << 16)
-#define QCA8K_PORT_LOOKUP_STATE_LEARNING (3 << 16)
-#define QCA8K_PORT_LOOKUP_STATE_FORWARD (4 << 16)
-#define QCA8K_PORT_LOOKUP_STATE GENMASK(18, 16)
-#define QCA8K_PORT_LOOKUP_LEARN BIT(20)
-#define QCA8K_PORT_LOOKUP_LOOPBACK BIT(21)
-
-#define QCA8K_REG_GLOBAL_FC_THRESH 0x800
-#define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) ((x) << 16)
-#define QCA8K_GLOBAL_FC_GOL_XON_THRES_S GENMASK(24, 16)
-#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES(x) ((x) << 0)
-#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES_S GENMASK(8, 0)
-
-#define QCA8K_REG_PORT_HOL_CTRL0(_i) (0x970 + (_i) * 0x8)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF GENMASK(3, 0)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI0(x) ((x) << 0)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF GENMASK(7, 4)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI1(x) ((x) << 4)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF GENMASK(11, 8)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI2(x) ((x) << 8)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF GENMASK(15, 12)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI3(x) ((x) << 12)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF GENMASK(19, 16)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI4(x) ((x) << 16)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF GENMASK(23, 20)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI5(x) ((x) << 20)
-#define QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF GENMASK(29, 24)
-#define QCA8K_PORT_HOL_CTRL0_EG_PORT(x) ((x) << 24)
-
-#define QCA8K_REG_PORT_HOL_CTRL1(_i) (0x974 + (_i) * 0x8)
-#define QCA8K_PORT_HOL_CTRL1_ING_BUF GENMASK(3, 0)
-#define QCA8K_PORT_HOL_CTRL1_ING(x) ((x) << 0)
-#define QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN BIT(6)
-#define QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN BIT(7)
-#define QCA8K_PORT_HOL_CTRL1_WRED_EN BIT(8)
-#define QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN BIT(16)
-
-/* Pkt edit registers */
-#define QCA8K_EGRESS_VLAN(x) (0x0c70 + (4 * (x / 2)))
-
-/* L3 registers */
-#define QCA8K_HROUTER_CONTROL 0xe00
-#define QCA8K_HROUTER_CONTROL_GLB_LOCKTIME_M GENMASK(17, 16)
-#define QCA8K_HROUTER_CONTROL_GLB_LOCKTIME_S 16
-#define QCA8K_HROUTER_CONTROL_ARP_AGE_MODE 1
-#define QCA8K_HROUTER_PBASED_CONTROL1 0xe08
-#define QCA8K_HROUTER_PBASED_CONTROL2 0xe0c
-#define QCA8K_HNAT_CONTROL 0xe38
-
-/* MIB registers */
-#define QCA8K_PORT_MIB_COUNTER(_i) (0x1000 + (_i) * 0x100)
-
-/* IPQ4019 PSGMII PHY registers */
-#define PSGMIIPHY_MODE_CONTROL 0x1b4
-#define PSGMIIPHY_MODE_ATHR_CSCO_MODE_25M BIT(0)
-#define PSGMIIPHY_TX_CONTROL 0x288
-#define PSGMIIPHY_TX_CONTROL_MAGIC_VALUE 0x8380
-#define PSGMIIPHY_VCO_CALIBRATION_CONTROL_REGISTER_1 0x9c
-#define PSGMIIPHY_REG_PLL_VCO_CALIB_RESTART BIT(14)
-#define PSGMIIPHY_VCO_CALIBRATION_CONTROL_REGISTER_2 0xa0
-#define PSGMIIPHY_REG_PLL_VCO_CALIB_READY BIT(0)
-
-#define QCA8K_PSGMII_CALB_NUM 100
-#define MII_QCA8075_SSTATUS 0x11
-#define QCA8075_PHY_SPEC_STATUS_LINK BIT(10)
-#define QCA8075_MMD7_CRC_AND_PKTS_COUNT 0x8029
-#define QCA8075_MMD7_PKT_GEN_PKT_NUMB 0x8021
-#define QCA8075_MMD7_PKT_GEN_PKT_SIZE 0x8062
-#define QCA8075_MMD7_PKT_GEN_CTRL 0x8020
-#define QCA8075_MMD7_CNT_SELFCLR BIT(1)
-#define QCA8075_MMD7_CNT_FRAME_CHK_EN BIT(0)
-#define QCA8075_MMD7_PKT_GEN_START BIT(13)
-#define QCA8075_MMD7_PKT_GEN_INPROGR BIT(15)
-#define QCA8075_MMD7_IG_FRAME_RECV_CNT_HI 0x802a
-#define QCA8075_MMD7_IG_FRAME_RECV_CNT_LO 0x802b
-#define QCA8075_MMD7_IG_FRAME_ERR_CNT 0x802c
-#define QCA8075_MMD7_EG_FRAME_RECV_CNT_HI 0x802d
-#define QCA8075_MMD7_EG_FRAME_RECV_CNT_LO 0x802e
-#define QCA8075_MMD7_EG_FRAME_ERR_CNT 0x802f
-#define QCA8075_MMD7_MDIO_BRDCST_WRITE 0x8028
-#define QCA8075_MMD7_MDIO_BRDCST_WRITE_EN BIT(15)
-#define QCA8075_MDIO_BRDCST_PHY_ADDR 0x1f
-#define QCA8075_PKT_GEN_PKTS_COUNT 4096
-
-enum {
- QCA8K_PORT_SPEED_10M = 0,
- QCA8K_PORT_SPEED_100M = 1,
- QCA8K_PORT_SPEED_1000M = 2,
- QCA8K_PORT_SPEED_ERR = 3,
-};
-
-enum qca8k_fdb_cmd {
- QCA8K_FDB_FLUSH = 1,
- QCA8K_FDB_LOAD = 2,
- QCA8K_FDB_PURGE = 3,
- QCA8K_FDB_FLUSH_PORT = 5,
- QCA8K_FDB_NEXT = 6,
- QCA8K_FDB_SEARCH = 7,
-};
-
-enum qca8k_vlan_cmd {
- QCA8K_VLAN_FLUSH = 1,
- QCA8K_VLAN_LOAD = 2,
- QCA8K_VLAN_PURGE = 3,
- QCA8K_VLAN_REMOVE_PORT = 4,
- QCA8K_VLAN_NEXT = 5,
- QCA8K_VLAN_READ = 6,
-};
-
-struct ar8xxx_port_status {
- int enabled;
-};
-
-struct qca8k_priv {
- struct regmap *regmap;
- struct mii_bus *bus;
- struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS];
- struct dsa_switch *ds;
- struct mutex reg_mutex;
- struct device *dev;
- struct dsa_switch_ops ops;
- unsigned int port_mtu[QCA8K_NUM_PORTS];
-
- /* IPQ4019 specific */
- struct regmap *psgmii;
- bool psgmii_calibrated;
- struct phy_device *psgmii_ethphy;
-};
-
-struct qca8k_mib_desc {
- unsigned int size;
- unsigned int offset;
- const char *name;
-};
-
-struct qca8k_fdb {
- u16 vid;
- u8 port_mask;
- u8 aging;
- u8 mac[6];
-};
-
-#endif /* __QCA8K_H */
+++ /dev/null
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for the IPQ ESS driver
-#
-
-obj-$(CONFIG_QCOM_IPQ4019_ESS_EDMA) += ipq_ess.o
-
-ipq_ess-objs := ipqess.o ipqess_ethtool.o
+++ /dev/null
-// SPDX-License-Identifier: (GPL-2.0 OR ISC)
-/* Copyright (c) 2014 - 2017, The Linux Foundation. All rights reserved.
- * Copyright (c) 2017 - 2018, John Crispin <john@phrozen.org>
- * Copyright (c) 2018 - 2019, Christian Lamparter <chunkeey@gmail.com>
- * Copyright (c) 2020 - 2021, Gabor Juhos <j4g8y7@gmail.com>
- *
- * Permission to use, copy, modify, and/or distribute this software for
- * any purpose with or without fee is hereby granted, provided that the
- * above copyright notice and this permission notice appear in all copies.
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
- * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/bitfield.h>
-#include <linux/clk.h>
-#include <linux/dsa/ipq4019.h>
-#include <linux/if_vlan.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_mdio.h>
-#include <linux/of_net.h>
-#include <linux/phylink.h>
-#include <linux/platform_device.h>
-#include <linux/reset.h>
-#include <linux/skbuff.h>
-#include <linux/vmalloc.h>
-#include <net/checksum.h>
-#include <net/dsa.h>
-#include <net/ip6_checksum.h>
-
-#include "ipqess.h"
-
-#define IPQESS_RRD_SIZE 16
-#define IPQESS_NEXT_IDX(X, Y) (((X) + 1) & ((Y) - 1))
-#define IPQESS_TX_DMA_BUF_LEN 0x3fff
-
-static void ipqess_w32(struct ipqess *ess, u32 reg, u32 val)
-{
- writel(val, ess->hw_addr + reg);
-}
-
-static u32 ipqess_r32(struct ipqess *ess, u16 reg)
-{
- return readl(ess->hw_addr + reg);
-}
-
-static void ipqess_m32(struct ipqess *ess, u32 mask, u32 val, u16 reg)
-{
- u32 _val = ipqess_r32(ess, reg);
- _val &= ~mask;
- _val |= val;
- ipqess_w32(ess, reg, _val);
-}
-
-void ipqess_update_hw_stats(struct ipqess *ess)
-{
- uint32_t *p;
- u32 stat;
- int i;
-
- lockdep_assert_held(&ess->stats_lock);
-
- p = (uint32_t *)&(ess->ipqessstats);
- for (i = 0; i < IPQESS_MAX_TX_QUEUE; i++) {
- stat = ipqess_r32(ess, IPQESS_REG_TX_STAT_PKT_Q(i));
- *p += stat;
- p++;
- }
-
- for (i = 0; i < IPQESS_MAX_TX_QUEUE; i++) {
- stat = ipqess_r32(ess, IPQESS_REG_TX_STAT_BYTE_Q(i));
- *p += stat;
- p++;
- }
-
- for (i = 0; i < IPQESS_MAX_RX_QUEUE; i++) {
- stat = ipqess_r32(ess, IPQESS_REG_RX_STAT_PKT_Q(i));
- *p += stat;
- p++;
- }
-
- for (i = 0; i < IPQESS_MAX_RX_QUEUE; i++) {
- stat = ipqess_r32(ess, IPQESS_REG_RX_STAT_BYTE_Q(i));
- *p += stat;
- p++;
- }
-}
-
-static int ipqess_tx_ring_alloc(struct ipqess *ess)
-{
- struct device *dev = &ess->pdev->dev;
- int i;
-
- for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
- struct ipqess_tx_ring *tx_ring = &ess->tx_ring[i];
- size_t size;
- u32 idx;
-
- tx_ring->ess = ess;
- tx_ring->ring_id = i;
- tx_ring->idx = i * 4;
- tx_ring->count = IPQESS_TX_RING_SIZE;
- tx_ring->nq = netdev_get_tx_queue(ess->netdev, i);
-
- size = sizeof(struct ipqess_buf) * IPQESS_TX_RING_SIZE;
- tx_ring->buf = devm_kzalloc(dev, size, GFP_KERNEL);
- if (!tx_ring->buf) {
- netdev_err(ess->netdev, "buffer alloc of tx ring failed");
- return -ENOMEM;
- }
-
- size = sizeof(struct ipqess_tx_desc) * IPQESS_TX_RING_SIZE;
- tx_ring->hw_desc = dmam_alloc_coherent(dev, size, &tx_ring->dma,
- GFP_KERNEL | __GFP_ZERO);
- if (!tx_ring->hw_desc) {
- netdev_err(ess->netdev, "descriptor allocation for tx ring failed");
- return -ENOMEM;
- }
-
- ipqess_w32(ess, IPQESS_REG_TPD_BASE_ADDR_Q(tx_ring->idx),
- (u32)tx_ring->dma);
-
- idx = ipqess_r32(ess, IPQESS_REG_TPD_IDX_Q(tx_ring->idx));
- idx >>= IPQESS_TPD_CONS_IDX_SHIFT; /* need u32 here */
- idx &= 0xffff;
- tx_ring->head = tx_ring->tail = idx;
-
- ipqess_m32(ess, IPQESS_TPD_PROD_IDX_MASK << IPQESS_TPD_PROD_IDX_SHIFT,
- idx, IPQESS_REG_TPD_IDX_Q(tx_ring->idx));
- ipqess_w32(ess, IPQESS_REG_TX_SW_CONS_IDX_Q(tx_ring->idx), idx);
- ipqess_w32(ess, IPQESS_REG_TPD_RING_SIZE, IPQESS_TX_RING_SIZE);
- }
-
- return 0;
-}
-
-static int ipqess_tx_unmap_and_free(struct device *dev, struct ipqess_buf *buf)
-{
- int len = 0;
-
- if (buf->flags & IPQESS_DESC_SINGLE)
- dma_unmap_single(dev, buf->dma, buf->length, DMA_TO_DEVICE);
- else if (buf->flags & IPQESS_DESC_PAGE)
- dma_unmap_page(dev, buf->dma, buf->length, DMA_TO_DEVICE);
-
- if (buf->flags & IPQESS_DESC_LAST) {
- len = buf->skb->len;
- dev_kfree_skb_any(buf->skb);
- }
-
- buf->flags = 0;
-
- return len;
-}
-
-static void ipqess_tx_ring_free(struct ipqess *ess)
-{
- int i;
-
- for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
- int j;
-
- if (ess->tx_ring[i].hw_desc)
- continue;
-
- for (j = 0; j < IPQESS_TX_RING_SIZE; j++) {
- struct ipqess_buf *buf = &ess->tx_ring[i].buf[j];
-
- ipqess_tx_unmap_and_free(&ess->pdev->dev, buf);
- }
-
- ess->tx_ring[i].buf = NULL;
- }
-}
-
-static int ipqess_rx_buf_prepare(struct ipqess_buf *buf,
- struct ipqess_rx_ring *rx_ring)
-{
- /* Clean the HW DESC header, otherwise we might end up
- * with a spurious desc because of random garbage */
- memset(buf->skb->data, 0, sizeof(struct ipqess_rx_desc));
-
- buf->dma = dma_map_single(rx_ring->ppdev, buf->skb->data,
- IPQESS_RX_HEAD_BUFF_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(rx_ring->ppdev, buf->dma)) {
- dev_err_once(rx_ring->ppdev,
- "IPQESS DMA mapping failed for linear address %x",
- buf->dma);
- dev_kfree_skb_any(buf->skb);
- buf->skb = NULL;
- return -EFAULT;
- }
-
- buf->length = IPQESS_RX_HEAD_BUFF_SIZE;
- rx_ring->hw_desc[rx_ring->head] = (struct ipqess_rx_desc *)buf->dma;
- rx_ring->head = (rx_ring->head + 1) % IPQESS_RX_RING_SIZE;
-
- ipqess_m32(rx_ring->ess, IPQESS_RFD_PROD_IDX_BITS,
- (rx_ring->head + IPQESS_RX_RING_SIZE - 1) % IPQESS_RX_RING_SIZE,
- IPQESS_REG_RFD_IDX_Q(rx_ring->idx));
-
- return 0;
-}
-
-/* locking is handled by the caller */
-static int ipqess_rx_buf_alloc_napi(struct ipqess_rx_ring *rx_ring)
-{
- struct ipqess_buf *buf = &rx_ring->buf[rx_ring->head];
-
- buf->skb = napi_alloc_skb(&rx_ring->napi_rx,
- IPQESS_RX_HEAD_BUFF_SIZE);
- if (!buf->skb)
- return -ENOMEM;
-
- return ipqess_rx_buf_prepare(buf, rx_ring);
-}
-
-static int ipqess_rx_buf_alloc(struct ipqess_rx_ring *rx_ring)
-{
- struct ipqess_buf *buf = &rx_ring->buf[rx_ring->head];
-
- buf->skb = netdev_alloc_skb_ip_align(rx_ring->ess->netdev,
- IPQESS_RX_HEAD_BUFF_SIZE);
- if (!buf->skb)
- return -ENOMEM;
-
- return ipqess_rx_buf_prepare(buf, rx_ring);
-}
-
-static void ipqess_refill_work(struct work_struct *work)
-{
- struct ipqess_rx_ring_refill *rx_refill = container_of(work,
- struct ipqess_rx_ring_refill, refill_work);
- struct ipqess_rx_ring *rx_ring = rx_refill->rx_ring;
- int refill = 0;
-
- /* don't let this loop by accident. */
- while (atomic_dec_and_test(&rx_ring->refill_count)) {
- napi_disable(&rx_ring->napi_rx);
- if (ipqess_rx_buf_alloc(rx_ring)) {
- refill++;
- dev_dbg(rx_ring->ppdev,
- "Not all buffers were reallocated");
- }
- napi_enable(&rx_ring->napi_rx);
- }
-
- if (atomic_add_return(refill, &rx_ring->refill_count))
- schedule_work(&rx_refill->refill_work);
-}
-
-
-static int ipqess_rx_ring_alloc(struct ipqess *ess)
-{
- int i;
-
- for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
- int j;
-
- ess->rx_ring[i].ess = ess;
- ess->rx_ring[i].ppdev = &ess->pdev->dev;
- ess->rx_ring[i].ring_id = i;
- ess->rx_ring[i].idx = i * 2;
-
- ess->rx_ring[i].buf = devm_kzalloc(&ess->pdev->dev,
- sizeof(struct ipqess_buf) * IPQESS_RX_RING_SIZE,
- GFP_KERNEL);
- if (!ess->rx_ring[i].buf)
- return -ENOMEM;
-
- ess->rx_ring[i].hw_desc = dmam_alloc_coherent(&ess->pdev->dev,
- sizeof(struct ipqess_rx_desc) * IPQESS_RX_RING_SIZE,
- &ess->rx_ring[i].dma, GFP_KERNEL);
- if (!ess->rx_ring[i].hw_desc)
- return -ENOMEM;
-
- for (j = 0; j < IPQESS_RX_RING_SIZE; j++)
- if (ipqess_rx_buf_alloc(&ess->rx_ring[i]) < 0)
- return -ENOMEM;
-
- ess->rx_refill[i].rx_ring = &ess->rx_ring[i];
- INIT_WORK(&ess->rx_refill[i].refill_work, ipqess_refill_work);
-
- ipqess_w32(ess, IPQESS_REG_RFD_BASE_ADDR_Q(ess->rx_ring[i].idx),
- (u32)(ess->rx_ring[i].dma));
- }
-
- ipqess_w32(ess, IPQESS_REG_RX_DESC0,
- (IPQESS_RX_HEAD_BUFF_SIZE << IPQESS_RX_BUF_SIZE_SHIFT) |
- (IPQESS_RX_RING_SIZE << IPQESS_RFD_RING_SIZE_SHIFT));
-
- return 0;
-}
-
-static void ipqess_rx_ring_free(struct ipqess *ess)
-{
- int i;
-
- for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
- int j;
-
- atomic_set(&ess->rx_ring[i].refill_count, 0);
- cancel_work_sync(&ess->rx_refill[i].refill_work);
-
- for (j = 0; j < IPQESS_RX_RING_SIZE; j++) {
- dma_unmap_single(&ess->pdev->dev,
- ess->rx_ring[i].buf[j].dma,
- ess->rx_ring[i].buf[j].length,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(ess->rx_ring[i].buf[j].skb);
- }
- }
-}
-
-static struct net_device_stats *ipqess_get_stats(struct net_device *netdev)
-{
- struct ipqess *ess = netdev_priv(netdev);
-
- spin_lock(&ess->stats_lock);
- ipqess_update_hw_stats(ess);
- spin_unlock(&ess->stats_lock);
-
- return &ess->stats;
-}
-
-static int ipqess_rx_poll(struct ipqess_rx_ring *rx_ring, int budget)
-{
- u32 length = 0, num_desc, tail, rx_ring_tail;
- int done = 0;
-
- rx_ring_tail = rx_ring->tail;
-
- tail = ipqess_r32(rx_ring->ess, IPQESS_REG_RFD_IDX_Q(rx_ring->idx));
- tail >>= IPQESS_RFD_CONS_IDX_SHIFT;
- tail &= IPQESS_RFD_CONS_IDX_MASK;
-
- while (done < budget) {
- struct sk_buff *skb;
- struct ipqess_rx_desc *rd;
-
- if (rx_ring_tail == tail)
- break;
-
- dma_unmap_single(rx_ring->ppdev,
- rx_ring->buf[rx_ring_tail].dma,
- rx_ring->buf[rx_ring_tail].length,
- DMA_FROM_DEVICE);
-
- skb = xchg(&rx_ring->buf[rx_ring_tail].skb, NULL);
- rd = (struct ipqess_rx_desc *)skb->data;
- rx_ring_tail = IPQESS_NEXT_IDX(rx_ring_tail, IPQESS_RX_RING_SIZE);
-
- /* Check if RRD is valid */
- if (!(rd->rrd7 & IPQESS_RRD_DESC_VALID)) {
- num_desc = 1;
- dev_kfree_skb_any(skb);
- goto skip;
- }
-
- num_desc = rd->rrd1 & IPQESS_RRD_NUM_RFD_MASK;
- length = rd->rrd6 & IPQESS_RRD_PKT_SIZE_MASK;
-
- skb_reserve(skb, IPQESS_RRD_SIZE);
- if (num_desc > 1) {
- /* can we use build_skb here ? */
- struct sk_buff *skb_prev = NULL;
- int size_remaining;
- int i;
-
- skb->data_len = 0;
- skb->tail += (IPQESS_RX_HEAD_BUFF_SIZE - IPQESS_RRD_SIZE);
- skb->len = skb->truesize = length;
- size_remaining = length - (IPQESS_RX_HEAD_BUFF_SIZE - IPQESS_RRD_SIZE);
-
- for (i = 1; i < num_desc; i++) {
- /* TODO: use build_skb ? */
- struct sk_buff *skb_temp = rx_ring->buf[rx_ring_tail].skb;
-
- dma_unmap_single(rx_ring->ppdev,
- rx_ring->buf[rx_ring_tail].dma,
- rx_ring->buf[rx_ring_tail].length,
- DMA_FROM_DEVICE);
-
- skb_put(skb_temp, min(size_remaining, IPQESS_RX_HEAD_BUFF_SIZE));
- if (skb_prev)
- skb_prev->next = rx_ring->buf[rx_ring_tail].skb;
- else
- skb_shinfo(skb)->frag_list = rx_ring->buf[rx_ring_tail].skb;
- skb_prev = rx_ring->buf[rx_ring_tail].skb;
- rx_ring->buf[rx_ring_tail].skb->next = NULL;
-
- skb->data_len += rx_ring->buf[rx_ring_tail].skb->len;
- size_remaining -= rx_ring->buf[rx_ring_tail].skb->len;
-
- rx_ring_tail = IPQESS_NEXT_IDX(rx_ring_tail, IPQESS_RX_RING_SIZE);
- }
-
- } else {
- skb_put(skb, length);
- }
-
- skb->dev = rx_ring->ess->netdev;
- skb->protocol = eth_type_trans(skb, rx_ring->ess->netdev);
- skb_record_rx_queue(skb, rx_ring->ring_id);
-
- if (rd->rrd6 & IPQESS_RRD_CSUM_FAIL_MASK)
- skb_checksum_none_assert(skb);
- else
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- if (rd->rrd7 & IPQESS_RRD_CVLAN) {
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rd->rrd4);
- } else if (rd->rrd1 & IPQESS_RRD_SVLAN) {
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), rd->rrd4);
- }
- napi_gro_receive(&rx_ring->napi_rx, skb);
-
- /* TODO: do we need to have these here ? */
- rx_ring->ess->stats.rx_packets++;
- rx_ring->ess->stats.rx_bytes += length;
-
- done++;
-skip:
-
- num_desc += atomic_xchg(&rx_ring->refill_count, 0);
- while (num_desc) {
- if (ipqess_rx_buf_alloc_napi(rx_ring)) {
- num_desc = atomic_add_return(num_desc,
- &rx_ring->refill_count);
- if (num_desc >= ((4 * IPQESS_RX_RING_SIZE + 6) / 7))
- schedule_work(&rx_ring->ess->rx_refill[rx_ring->ring_id].refill_work);
- break;
- }
- num_desc--;
- }
- }
-
- ipqess_w32(rx_ring->ess, IPQESS_REG_RX_SW_CONS_IDX_Q(rx_ring->idx),
- rx_ring_tail);
- rx_ring->tail = rx_ring_tail;
-
- return done;
-}
-
-static int ipqess_tx_complete(struct ipqess_tx_ring *tx_ring, int budget)
-{
- u32 tail;
- int done = 0;
- int total = 0, ret;
-
- tail = ipqess_r32(tx_ring->ess, IPQESS_REG_TPD_IDX_Q(tx_ring->idx));
- tail >>= IPQESS_TPD_CONS_IDX_SHIFT;
- tail &= IPQESS_TPD_CONS_IDX_MASK;
-
- while ((tx_ring->tail != tail) && (done < budget)) {
- //pr_info("freeing txq:%d tail:%d tailbuf:%p\n", tx_ring->idx, tx_ring->tail, &tx_ring->buf[tx_ring->tail]);
- ret = ipqess_tx_unmap_and_free(&tx_ring->ess->pdev->dev,
- &tx_ring->buf[tx_ring->tail]);
- tx_ring->tail = IPQESS_NEXT_IDX(tx_ring->tail, tx_ring->count);
- if (ret) {
- total += ret;
- done++;
- }
- }
-
- ipqess_w32(tx_ring->ess,
- IPQESS_REG_TX_SW_CONS_IDX_Q(tx_ring->idx),
- tx_ring->tail);
-
- if (netif_tx_queue_stopped(tx_ring->nq)) {
- netdev_dbg(tx_ring->ess->netdev, "waking up tx queue %d\n",
- tx_ring->idx);
- netif_tx_wake_queue(tx_ring->nq);
- }
-
- netdev_tx_completed_queue(tx_ring->nq, done, total);
-
- return done;
-}
-
-static int ipqess_tx_napi(struct napi_struct *napi, int budget)
-{
- struct ipqess_tx_ring *tx_ring = container_of(napi, struct ipqess_tx_ring,
- napi_tx);
- u32 tx_status;
- int work_done = 0;
-
- tx_status = ipqess_r32(tx_ring->ess, IPQESS_REG_TX_ISR);
- tx_status &= BIT(tx_ring->idx);
-
- work_done = ipqess_tx_complete(tx_ring, budget);
-
- ipqess_w32(tx_ring->ess, IPQESS_REG_TX_ISR, tx_status);
-
- if (likely(work_done < budget)) {
- if (napi_complete_done(napi, work_done))
- ipqess_w32(tx_ring->ess,
- IPQESS_REG_TX_INT_MASK_Q(tx_ring->idx), 0x1);
- }
-
- return work_done;
-}
-
-static int ipqess_rx_napi(struct napi_struct *napi, int budget)
-{
- struct ipqess_rx_ring *rx_ring = container_of(napi, struct ipqess_rx_ring,
- napi_rx);
- struct ipqess *ess = rx_ring->ess;
- int remain_budget = budget;
- int rx_done;
- u32 rx_mask = BIT(rx_ring->idx);
- u32 status;
-
-poll_again:
- ipqess_w32(ess, IPQESS_REG_RX_ISR, rx_mask);
- rx_done = ipqess_rx_poll(rx_ring, remain_budget);
-
- if (rx_done == remain_budget)
- return budget;
-
- status = ipqess_r32(ess, IPQESS_REG_RX_ISR);
- if (status & rx_mask) {
- remain_budget -= rx_done;
- goto poll_again;
- }
-
- if (napi_complete_done(napi, rx_done + budget - remain_budget))
- ipqess_w32(ess, IPQESS_REG_RX_INT_MASK_Q(rx_ring->idx), 0x1);
-
- return rx_done + budget - remain_budget;
-}
-
-static irqreturn_t ipqess_interrupt_tx(int irq, void *priv)
-{
- struct ipqess_tx_ring *tx_ring = (struct ipqess_tx_ring *) priv;
-
- if (likely(napi_schedule_prep(&tx_ring->napi_tx))) {
- ipqess_w32(tx_ring->ess,
- IPQESS_REG_TX_INT_MASK_Q(tx_ring->idx),
- 0x0);
- __napi_schedule(&tx_ring->napi_tx);
- }
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t ipqess_interrupt_rx(int irq, void *priv)
-{
- struct ipqess_rx_ring *rx_ring = (struct ipqess_rx_ring *) priv;
-
- if (likely(napi_schedule_prep(&rx_ring->napi_rx))) {
- ipqess_w32(rx_ring->ess,
- IPQESS_REG_RX_INT_MASK_Q(rx_ring->idx),
- 0x0);
- __napi_schedule(&rx_ring->napi_rx);
- }
-
- return IRQ_HANDLED;
-}
-
-static void ipqess_irq_enable(struct ipqess *ess)
-{
- int i;
-
- ipqess_w32(ess, IPQESS_REG_RX_ISR, 0xff);
- ipqess_w32(ess, IPQESS_REG_TX_ISR, 0xffff);
- for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
- ipqess_w32(ess, IPQESS_REG_RX_INT_MASK_Q(ess->rx_ring[i].idx), 1);
- ipqess_w32(ess, IPQESS_REG_TX_INT_MASK_Q(ess->tx_ring[i].idx), 1);
- }
-}
-
-static void ipqess_irq_disable(struct ipqess *ess)
-{
- int i;
-
- for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
- ipqess_w32(ess, IPQESS_REG_RX_INT_MASK_Q(ess->rx_ring[i].idx), 0);
- ipqess_w32(ess, IPQESS_REG_TX_INT_MASK_Q(ess->tx_ring[i].idx), 0);
- }
-}
-
-static int __init ipqess_init(struct net_device *netdev)
-{
- struct ipqess *ess = netdev_priv(netdev);
- struct device_node *of_node = ess->pdev->dev.of_node;
- return phylink_of_phy_connect(ess->phylink, of_node, 0);
-}
-
-static void ipqess_uninit(struct net_device *netdev)
-{
- struct ipqess *ess = netdev_priv(netdev);
-
- phylink_disconnect_phy(ess->phylink);
-}
-
-static int ipqess_open(struct net_device *netdev)
-{
- struct ipqess *ess = netdev_priv(netdev);
- int i;
-
- for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
- napi_enable(&ess->tx_ring[i].napi_tx);
- napi_enable(&ess->rx_ring[i].napi_rx);
- }
- ipqess_irq_enable(ess);
- phylink_start(ess->phylink);
- netif_tx_start_all_queues(netdev);
-
- return 0;
-}
-
-static int ipqess_stop(struct net_device *netdev)
-{
- struct ipqess *ess = netdev_priv(netdev);
- int i;
-
- netif_tx_stop_all_queues(netdev);
- phylink_stop(ess->phylink);
- ipqess_irq_disable(ess);
- for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
- napi_disable(&ess->tx_ring[i].napi_tx);
- napi_disable(&ess->rx_ring[i].napi_rx);
- }
-
- return 0;
-}
-
-static int ipqess_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- struct ipqess *ess = netdev_priv(netdev);
-
- switch (cmd) {
- case SIOCGMIIPHY:
- case SIOCGMIIREG:
- case SIOCSMIIREG:
- return phylink_mii_ioctl(ess->phylink, ifr, cmd);
- default:
- break;
- }
-
- return -EOPNOTSUPP;
-}
-
-
-static inline u16 ipqess_tx_desc_available(struct ipqess_tx_ring *tx_ring)
-{
- u16 count = 0;
-
- if (tx_ring->tail <= tx_ring->head)
- count = IPQESS_TX_RING_SIZE;
-
- count += tx_ring->tail - tx_ring->head - 1;
-
- return count;
-}
-
-static inline int ipqess_cal_txd_req(struct sk_buff *skb)
-{
- int tpds;
-
- /* one TPD for the header, and one for each fragments */
- tpds = 1 + skb_shinfo(skb)->nr_frags;
- if (skb_is_gso(skb) && skb_is_gso_v6(skb)) {
- /* for LSOv2 one extra TPD is needed */
- tpds++;
- }
-
- return tpds;
-}
-
-static struct ipqess_buf *ipqess_get_tx_buffer(struct ipqess_tx_ring *tx_ring,
- struct ipqess_tx_desc *desc)
-{
- return &tx_ring->buf[desc - tx_ring->hw_desc];
-}
-
-static struct ipqess_tx_desc *ipqess_tx_desc_next(struct ipqess_tx_ring *tx_ring)
-{
- struct ipqess_tx_desc *desc;
-
- desc = &tx_ring->hw_desc[tx_ring->head];
- tx_ring->head = IPQESS_NEXT_IDX(tx_ring->head, tx_ring->count);
-
- return desc;
-}
-
-static void ipqess_rollback_tx(struct ipqess *eth,
- struct ipqess_tx_desc *first_desc, int ring_id)
-{
- struct ipqess_tx_ring *tx_ring = ð->tx_ring[ring_id];
- struct ipqess_buf *buf;
- struct ipqess_tx_desc *desc = NULL;
- u16 start_index, index;
-
- start_index = first_desc - tx_ring->hw_desc;
-
- index = start_index;
- while (index != tx_ring->head) {
- desc = &tx_ring->hw_desc[index];
- buf = &tx_ring->buf[index];
- ipqess_tx_unmap_and_free(ð->pdev->dev, buf);
- memset(desc, 0, sizeof(struct ipqess_tx_desc));
- if (++index == tx_ring->count)
- index = 0;
- }
- tx_ring->head = start_index;
-}
-
-static bool ipqess_process_dsa_tag_sh(struct sk_buff *skb, u32 *word3)
-{
- struct skb_shared_info *shinfo = skb_shinfo(skb);
- struct ipq40xx_dsa_tag_data *tag_data;
-
- if (shinfo->dsa_tag_proto != DSA_TAG_PROTO_IPQ4019)
- return false;
-
- tag_data = (struct ipq40xx_dsa_tag_data *)shinfo->dsa_tag_data;
-
- pr_debug("SH tag @ %08x, dp:%02x from_cpu:%u\n",
- (u32)tag_data, tag_data->dp, tag_data->from_cpu);
-
- *word3 |= tag_data->dp << IPQESS_TPD_PORT_BITMAP_SHIFT;
- if (tag_data->from_cpu)
- *word3 |= BIT(IPQESS_TPD_FROM_CPU_SHIFT);
-
- return true;
-}
-
-static void ipqess_get_dp_info(struct ipqess *ess, struct sk_buff *skb,
- u32 *word3)
-{
- if (netdev_uses_dsa(ess->netdev)) {
-
- if (ipqess_process_dsa_tag_sh(skb, word3))
- return;
- }
-
- *word3 |= 0x3e << IPQESS_TPD_PORT_BITMAP_SHIFT;
-}
-
-static int ipqess_tx_map_and_fill(struct ipqess_tx_ring *tx_ring, struct sk_buff *skb)
-{
- struct ipqess_buf *buf = NULL;
- struct platform_device *pdev = tx_ring->ess->pdev;
- struct ipqess_tx_desc *desc = NULL, *first_desc = NULL;
- u32 word1 = 0, word3 = 0, lso_word1 = 0, svlan_tag = 0;
- u16 len;
- int i;
-
- ipqess_get_dp_info(tx_ring->ess, skb, &word3);
-
- if (skb_is_gso(skb)) {
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
- lso_word1 |= IPQESS_TPD_IPV4_EN;
- ip_hdr(skb)->check = 0;
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
- ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
- } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
- lso_word1 |= IPQESS_TPD_LSO_V2_EN;
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
- }
-
- lso_word1 |= IPQESS_TPD_LSO_EN |
- ((skb_shinfo(skb)->gso_size & IPQESS_TPD_MSS_MASK) << IPQESS_TPD_MSS_SHIFT) |
- (skb_transport_offset(skb) << IPQESS_TPD_HDR_SHIFT);
- } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
- u8 css, cso;
- cso = skb_checksum_start_offset(skb);
- css = cso + skb->csum_offset;
-
- word1 |= (IPQESS_TPD_CUSTOM_CSUM_EN);
- word1 |= (cso >> 1) << IPQESS_TPD_HDR_SHIFT;
- word1 |= ((css >> 1) << IPQESS_TPD_CUSTOM_CSUM_SHIFT);
- }
-
- if (skb_vlan_tag_present(skb)) {
- switch (skb->vlan_proto) {
- case htons(ETH_P_8021Q):
- word3 |= BIT(IPQESS_TX_INS_CVLAN);
- word3 |= skb_vlan_tag_get(skb) << IPQESS_TX_CVLAN_TAG_SHIFT;
- break;
- case htons(ETH_P_8021AD):
- word1 |= BIT(IPQESS_TX_INS_SVLAN);
- svlan_tag = skb_vlan_tag_get(skb);
- break;
- default:
- dev_err(&pdev->dev, "no ctag or stag present\n");
- goto vlan_tag_error;
- }
- }
-
- if (eth_type_vlan(skb->protocol))
- word1 |= IPQESS_TPD_VLAN_TAGGED;
-
- if (skb->protocol == htons(ETH_P_PPP_SES))
- word1 |= IPQESS_TPD_PPPOE_EN;
-
- len = skb_headlen(skb);
-
- first_desc = desc = ipqess_tx_desc_next(tx_ring);
- if (lso_word1 & IPQESS_TPD_LSO_V2_EN) {
- desc->addr = cpu_to_le16(skb->len);
- desc->word1 = word1 | lso_word1;
- desc->svlan_tag = svlan_tag;
- desc->word3 = word3;
- desc = ipqess_tx_desc_next(tx_ring);
- }
-
- buf = ipqess_get_tx_buffer(tx_ring, desc);
- buf->length = len;
- buf->dma = dma_map_single(&pdev->dev,
- skb->data, len, DMA_TO_DEVICE);
- if (dma_mapping_error(&pdev->dev, buf->dma))
- goto dma_error;
-
- desc->addr = cpu_to_le32(buf->dma);
- desc->len = cpu_to_le16(len);
-
- buf->flags |= IPQESS_DESC_SINGLE;
- desc->word1 = word1 | lso_word1;
- desc->svlan_tag = svlan_tag;
- desc->word3 = word3;
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- len = skb_frag_size(frag);
- desc = ipqess_tx_desc_next(tx_ring);
- buf = ipqess_get_tx_buffer(tx_ring, desc);
- buf->length = len;
- buf->flags |= IPQESS_DESC_PAGE;
- buf->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len, DMA_TO_DEVICE);
- if (dma_mapping_error(&pdev->dev, buf->dma))
- goto dma_error;
-
- desc->addr = cpu_to_le32(buf->dma);
- desc->len = cpu_to_le16(len);
- desc->svlan_tag = svlan_tag;
- desc->word1 = word1 | lso_word1;
- desc->word3 = word3;
- }
- desc->word1 |= 1 << IPQESS_TPD_EOP_SHIFT;
- buf->skb = skb;
- buf->flags |= IPQESS_DESC_LAST;
-
- return 0;
-
-dma_error:
- ipqess_rollback_tx(tx_ring->ess, first_desc, tx_ring->ring_id);
- dev_err(&pdev->dev, "TX DMA map failed\n");
-
-vlan_tag_error:
- return -ENOMEM;
-}
-
-static inline void ipqess_kick_tx(struct ipqess_tx_ring *tx_ring)
-{
- /* Ensure that all TPDs has been written completely */
- dma_wmb();
-
- /* update software producer index */
- ipqess_w32(tx_ring->ess, IPQESS_REG_TPD_IDX_Q(tx_ring->idx),
- tx_ring->head);
-}
-
-static netdev_tx_t ipqess_xmit(struct sk_buff *skb,
- struct net_device *netdev)
-{
- struct ipqess *ess = netdev_priv(netdev);
- struct ipqess_tx_ring *tx_ring;
- int avail;
- int tx_num;
- int ret;
-
- tx_ring = &ess->tx_ring[skb_get_queue_mapping(skb)];
- tx_num = ipqess_cal_txd_req(skb);
- avail = ipqess_tx_desc_available(tx_ring);
- if (avail < tx_num) {
- netdev_dbg(netdev,
- "stopping tx queue %d, avail=%d req=%d im=%x\n",
- tx_ring->idx, avail, tx_num,
- ipqess_r32(tx_ring->ess,
- IPQESS_REG_TX_INT_MASK_Q(tx_ring->idx)));
- netif_tx_stop_queue(tx_ring->nq);
- ipqess_w32(tx_ring->ess, IPQESS_REG_TX_INT_MASK_Q(tx_ring->idx), 0x1);
- ipqess_kick_tx(tx_ring);
- return NETDEV_TX_BUSY;
- }
-
- ret = ipqess_tx_map_and_fill(tx_ring, skb);
- if (ret) {
- dev_kfree_skb_any(skb);
- ess->stats.tx_errors++;
- goto err_out;
- }
-
- ess->stats.tx_packets++;
- ess->stats.tx_bytes += skb->len;
- netdev_tx_sent_queue(tx_ring->nq, skb->len);
-
- if (!netdev_xmit_more() || netif_xmit_stopped(tx_ring->nq))
- ipqess_kick_tx(tx_ring);
-
-err_out:
- return NETDEV_TX_OK;
-}
-
-static int ipqess_set_mac_address(struct net_device *netdev, void *p)
-{
- int ret = eth_mac_addr(netdev, p);
- struct ipqess *ess = netdev_priv(netdev);
- const char *macaddr = netdev->dev_addr;
-
- if (ret)
- return ret;
-
-// spin_lock_bh(&mac->hw->page_lock);
- ipqess_w32(ess, IPQESS_REG_MAC_CTRL1,
- (macaddr[0] << 8) | macaddr[1]);
- ipqess_w32(ess, IPQESS_REG_MAC_CTRL0,
- (macaddr[2] << 24) | (macaddr[3] << 16) |
- (macaddr[4] << 8) | macaddr[5]);
-// spin_unlock_bh(&mac->hw->page_lock);
-
- return 0;
-}
-
-static void ipqess_tx_timeout(struct net_device *netdev, unsigned int txq_id)
-{
- struct ipqess *ess = netdev_priv(netdev);
- struct ipqess_tx_ring *tr = &ess->tx_ring[txq_id];
-
- netdev_warn(netdev, "hardware queue %d is in stuck?\n",
- tr->idx);
-
- /* TODO: dump hardware queue */
-}
-
-static const struct net_device_ops ipqess_axi_netdev_ops = {
- .ndo_init = ipqess_init,
- .ndo_uninit = ipqess_uninit,
- .ndo_open = ipqess_open,
- .ndo_stop = ipqess_stop,
- .ndo_do_ioctl = ipqess_do_ioctl,
- .ndo_start_xmit = ipqess_xmit,
- .ndo_get_stats = ipqess_get_stats,
- .ndo_set_mac_address = ipqess_set_mac_address,
- .ndo_tx_timeout = ipqess_tx_timeout,
-};
-
-static void ipqess_hw_stop(struct ipqess *ess)
-{
- int i;
-
- /* disable all RX queue IRQs */
- for (i = 0; i < IPQESS_MAX_RX_QUEUE; i++)
- ipqess_w32(ess, IPQESS_REG_RX_INT_MASK_Q(i), 0);
-
- /* disable all TX queue IRQs */
- for (i = 0; i < IPQESS_MAX_TX_QUEUE; i++)
- ipqess_w32(ess, IPQESS_REG_TX_INT_MASK_Q(i), 0);
-
- /* disable all other IRQs */
- ipqess_w32(ess, IPQESS_REG_MISC_IMR, 0);
- ipqess_w32(ess, IPQESS_REG_WOL_IMR, 0);
-
- /* clear the IRQ status registers */
- ipqess_w32(ess, IPQESS_REG_RX_ISR, 0xff);
- ipqess_w32(ess, IPQESS_REG_TX_ISR, 0xffff);
- ipqess_w32(ess, IPQESS_REG_MISC_ISR, 0x1fff);
- ipqess_w32(ess, IPQESS_REG_WOL_ISR, 0x1);
- ipqess_w32(ess, IPQESS_REG_WOL_CTRL, 0);
-
- /* disable RX and TX queues */
- ipqess_m32(ess, IPQESS_RXQ_CTRL_EN_MASK, 0, IPQESS_REG_RXQ_CTRL);
- ipqess_m32(ess, IPQESS_TXQ_CTRL_TXQ_EN, 0, IPQESS_REG_TXQ_CTRL);
-}
-
-static int ipqess_hw_init(struct ipqess *ess)
-{
- u32 tmp;
- int i, err;
-
- ipqess_hw_stop(ess);
-
- ipqess_m32(ess, BIT(IPQESS_INTR_SW_IDX_W_TYP_SHIFT),
- IPQESS_INTR_SW_IDX_W_TYPE << IPQESS_INTR_SW_IDX_W_TYP_SHIFT,
- IPQESS_REG_INTR_CTRL);
-
- /* enable IRQ delay slot */
- ipqess_w32(ess, IPQESS_REG_IRQ_MODRT_TIMER_INIT,
- (IPQESS_TX_IMT << IPQESS_IRQ_MODRT_TX_TIMER_SHIFT) |
- (IPQESS_RX_IMT << IPQESS_IRQ_MODRT_RX_TIMER_SHIFT));
-
- /* Set Customer and Service VLAN TPIDs */
- ipqess_w32(ess, IPQESS_REG_VLAN_CFG,
- (ETH_P_8021Q << IPQESS_VLAN_CFG_CVLAN_TPID_SHIFT) |
- (ETH_P_8021AD << IPQESS_VLAN_CFG_SVLAN_TPID_SHIFT));
-
- /* Configure the TX Queue bursting */
- ipqess_w32(ess, IPQESS_REG_TXQ_CTRL,
- (IPQESS_TPD_BURST << IPQESS_TXQ_NUM_TPD_BURST_SHIFT) |
- (IPQESS_TXF_BURST << IPQESS_TXQ_TXF_BURST_NUM_SHIFT) |
- IPQESS_TXQ_CTRL_TPD_BURST_EN);
-
- /* Set RSS type */
- ipqess_w32(ess, IPQESS_REG_RSS_TYPE,
- IPQESS_RSS_TYPE_IPV4TCP | IPQESS_RSS_TYPE_IPV6_TCP |
- IPQESS_RSS_TYPE_IPV4_UDP | IPQESS_RSS_TYPE_IPV6UDP |
- IPQESS_RSS_TYPE_IPV4 | IPQESS_RSS_TYPE_IPV6);
-
- /* Set RFD ring burst and threshold */
- ipqess_w32(ess, IPQESS_REG_RX_DESC1,
- (IPQESS_RFD_BURST << IPQESS_RXQ_RFD_BURST_NUM_SHIFT) |
- (IPQESS_RFD_THR << IPQESS_RXQ_RFD_PF_THRESH_SHIFT) |
- (IPQESS_RFD_LTHR << IPQESS_RXQ_RFD_LOW_THRESH_SHIFT));
-
- /* Set Rx FIFO
- * - threshold to start to DMA data to host
- */
- ipqess_w32(ess, IPQESS_REG_RXQ_CTRL,
- IPQESS_FIFO_THRESH_128_BYTE | IPQESS_RXQ_CTRL_RMV_VLAN);
-
- err = ipqess_rx_ring_alloc(ess);
- if (err)
- return err;
-
- err = ipqess_tx_ring_alloc(ess);
- if (err)
- return err;
-
- /* Load all of ring base addresses above into the dma engine */
- ipqess_m32(ess, 0, BIT(IPQESS_LOAD_PTR_SHIFT),
- IPQESS_REG_TX_SRAM_PART);
-
- /* Disable TX FIFO low watermark and high watermark */
- ipqess_w32(ess, IPQESS_REG_TXF_WATER_MARK, 0);
-
- /* Configure RSS indirection table.
- * 128 hash will be configured in the following
- * pattern: hash{0,1,2,3} = {Q0,Q2,Q4,Q6} respectively
- * and so on
- */
- for (i = 0; i < IPQESS_NUM_IDT; i++)
- ipqess_w32(ess, IPQESS_REG_RSS_IDT(i), IPQESS_RSS_IDT_VALUE);
-
- /* Configure load balance mapping table.
- * 4 table entry will be configured according to the
- * following pattern: load_balance{0,1,2,3} = {Q0,Q1,Q3,Q4}
- * respectively.
- */
- ipqess_w32(ess, IPQESS_REG_LB_RING, IPQESS_LB_REG_VALUE);
-
- /* Configure Virtual queue for Tx rings */
- ipqess_w32(ess, IPQESS_REG_VQ_CTRL0, IPQESS_VQ_REG_VALUE);
- ipqess_w32(ess, IPQESS_REG_VQ_CTRL1, IPQESS_VQ_REG_VALUE);
-
- /* Configure Max AXI Burst write size to 128 bytes*/
- ipqess_w32(ess, IPQESS_REG_AXIW_CTRL_MAXWRSIZE,
- IPQESS_AXIW_MAXWRSIZE_VALUE);
-
- /* Enable TX queues */
- ipqess_m32(ess, 0, IPQESS_TXQ_CTRL_TXQ_EN, IPQESS_REG_TXQ_CTRL);
-
- /* Enable RX queues */
- tmp = 0;
- for (i = 0; i < IPQESS_NETDEV_QUEUES; i++)
- tmp |= IPQESS_RXQ_CTRL_EN(ess->rx_ring[i].idx);
-
- ipqess_m32(ess, IPQESS_RXQ_CTRL_EN_MASK, tmp, IPQESS_REG_RXQ_CTRL);
-
- return 0;
-}
-
-static void ipqess_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct ipqess *ess = container_of(config, struct ipqess, phylink_config);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- if (state->interface != PHY_INTERFACE_MODE_INTERNAL) {
- dev_err(&ess->pdev->dev, "unsupported interface mode: %d\n",
- state->interface);
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-}
-
-static void ipqess_mac_config(struct phylink_config *config, unsigned int mode,
- const struct phylink_link_state *state)
-{
- /* TODO */
-}
-
-static void ipqess_mac_link_down(struct phylink_config *config,
- unsigned int mode,
- phy_interface_t interface)
-{
- /* TODO */
-}
-
-static void ipqess_mac_link_up(struct phylink_config *config,
- struct phy_device *phy, unsigned int mode,
- phy_interface_t interface,
- int speed, int duplex,
- bool tx_pause, bool rx_pause)
-{
- /* TODO */
-}
-
-static struct phylink_mac_ops ipqess_phylink_mac_ops = {
- .validate = ipqess_validate,
- .mac_config = ipqess_mac_config,
- .mac_link_up = ipqess_mac_link_up,
- .mac_link_down = ipqess_mac_link_down,
-};
-
-static void ipqess_cleanup(struct ipqess *ess)
-{
- ipqess_hw_stop(ess);
- unregister_netdev(ess->netdev);
-
- ipqess_tx_ring_free(ess);
- ipqess_rx_ring_free(ess);
-
- if (!IS_ERR_OR_NULL(ess->phylink))
- phylink_destroy(ess->phylink);
-}
-
-static void ess_reset(struct ipqess *ess)
-{
- reset_control_assert(ess->ess_rst);
-
- mdelay(10);
-
- reset_control_deassert(ess->ess_rst);
-
- /* Waiting for all inner tables to be flushed and reinitialized.
- * This takes between 5 and 10ms.
- */
- mdelay(10);
-}
-
-static int ipqess_axi_probe(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct ipqess *ess;
- struct net_device *netdev;
- struct resource *res;
- int i, err = 0;
-
- netdev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof(struct ipqess),
- IPQESS_NETDEV_QUEUES,
- IPQESS_NETDEV_QUEUES);
- if (!netdev)
- return -ENOMEM;
-
- ess = netdev_priv(netdev);
- ess->netdev = netdev;
- ess->pdev = pdev;
- spin_lock_init(&ess->stats_lock);
- SET_NETDEV_DEV(netdev, &pdev->dev);
- platform_set_drvdata(pdev, netdev);
-
- err = of_get_mac_address(np, netdev->dev_addr);
- if (err == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- if (err) {
-
- random_ether_addr(netdev->dev_addr);
- dev_info(&ess->pdev->dev, "generated random MAC address %pM\n",
- netdev->dev_addr);
- netdev->addr_assign_type = NET_ADDR_RANDOM;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ess->hw_addr = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(ess->hw_addr)) {
- err = PTR_ERR(ess->hw_addr);
- goto err_out;
- }
-
- ess->ess_clk = of_clk_get_by_name(np, "ess_clk");
- if (IS_ERR(ess->ess_clk)) {
- dev_err(&pdev->dev, "Failed to get ess_clk\n");
- return PTR_ERR(ess->ess_clk);
- }
-
- ess->ess_rst = devm_reset_control_get(&pdev->dev, "ess_rst");
- if (IS_ERR(ess->ess_rst)) {
- dev_err(&pdev->dev, "Failed to get ess_rst control!\n");
- return PTR_ERR(ess->ess_rst);
- }
-
- clk_prepare_enable(ess->ess_clk);
-
- ess_reset(ess);
-
- ess->phylink_config.dev = &netdev->dev;
- ess->phylink_config.type = PHYLINK_NETDEV;
- ess->phylink_config.pcs_poll = true;
-
- ess->phylink = phylink_create(&ess->phylink_config,
- of_fwnode_handle(np),
- PHY_INTERFACE_MODE_INTERNAL,
- &ipqess_phylink_mac_ops);
- if (IS_ERR(ess->phylink)) {
- err = PTR_ERR(ess->phylink);
- goto err_out;
- }
-
- for (i = 0; i < IPQESS_MAX_TX_QUEUE; i++) {
- ess->tx_irq[i] = platform_get_irq(pdev, i);
- scnprintf(ess->tx_irq_names[i], sizeof(ess->tx_irq_names[i]),
- "%s:txq%d", pdev->name, i);
- }
-
- for (i = 0; i < IPQESS_MAX_RX_QUEUE; i++) {
- ess->rx_irq[i] = platform_get_irq(pdev, i + IPQESS_MAX_TX_QUEUE);
- scnprintf(ess->rx_irq_names[i], sizeof(ess->rx_irq_names[i]),
- "%s:rxq%d", pdev->name, i);
- }
-
-#undef NETIF_F_TSO6
-#define NETIF_F_TSO6 0
-
- netdev->netdev_ops = &ipqess_axi_netdev_ops;
- netdev->features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_TSO | NETIF_F_TSO6 |
- NETIF_F_GRO | NETIF_F_SG;
- /* feature change is not supported yet */
- netdev->hw_features = 0;
- netdev->vlan_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_RXCSUM |
- NETIF_F_TSO | NETIF_F_TSO6 |
- NETIF_F_GRO;
- netdev->watchdog_timeo = 5 * HZ;
- netdev->base_addr = (u32) ess->hw_addr;
- netdev->max_mtu = 9000;
- netdev->gso_max_segs = IPQESS_TX_RING_SIZE / 2;
-
- ipqess_set_ethtool_ops(netdev);
-
- err = register_netdev(netdev);
- if (err)
- goto err_out;
-
- err = ipqess_hw_init(ess);
- if (err)
- goto err_out;
-
- dev_set_threaded(netdev, true);
-
- for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
- int qid;
-
- netif_tx_napi_add(netdev, &ess->tx_ring[i].napi_tx,
- ipqess_tx_napi, 64);
- netif_napi_add(netdev,
- &ess->rx_ring[i].napi_rx,
- ipqess_rx_napi, 64);
-
- qid = ess->tx_ring[i].idx;
- err = devm_request_irq(&ess->netdev->dev, ess->tx_irq[qid],
- ipqess_interrupt_tx, 0, ess->tx_irq_names[qid],
- &ess->tx_ring[i]);
- if (err)
- goto err_out;
-
- qid = ess->rx_ring[i].idx;
- err = devm_request_irq(&ess->netdev->dev, ess->rx_irq[qid],
- ipqess_interrupt_rx, 0, ess->rx_irq_names[qid],
- &ess->rx_ring[i]);
- if (err)
- goto err_out;
- }
-
- return 0;
-
-err_out:
- ipqess_cleanup(ess);
- return err;
-}
-
-static int ipqess_axi_remove(struct platform_device *pdev)
-{
- const struct net_device *netdev = platform_get_drvdata(pdev);
- struct ipqess *ess = netdev_priv(netdev);
-
- ipqess_cleanup(ess);
-
- return 0;
-}
-
-static const struct of_device_id ipqess_of_mtable[] = {
- {.compatible = "qcom,ipq4019-ess-edma" },
- {}
-};
-MODULE_DEVICE_TABLE(of, ipqess_of_mtable);
-
-static struct platform_driver ipqess_axi_driver = {
- .driver = {
- .name = "ipqess-edma",
- .of_match_table = ipqess_of_mtable,
- },
- .probe = ipqess_axi_probe,
- .remove = ipqess_axi_remove,
-};
-
-module_platform_driver(ipqess_axi_driver);
-
-MODULE_AUTHOR("Qualcomm Atheros Inc");
-MODULE_AUTHOR("John Crispin <john@phrozen.org>");
-MODULE_AUTHOR("Christian Lamparter <chunkeey@gmail.com>");
-MODULE_AUTHOR("Gabor Juhos <j4g8y7@gmail.com>");
-MODULE_LICENSE("GPL");
+++ /dev/null
-// SPDX-License-Identifier: (GPL-2.0 OR ISC)
-/* Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
- * Copyright (c) 2017 - 2018, John Crispin <john@phrozen.org>
- * Copyright (c) 2018 - 2019, Christian Lamparter <chunkeey@gmail.com>
- * Copyright (c) 2020 - 2021, Gabor Juhos <j4g8y7@gmail.com>
- *
- * Permission to use, copy, modify, and/or distribute this software for
- * any purpose with or without fee is hereby granted, provided that the
- * above copyright notice and this permission notice appear in all copies.
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
- * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _IPQESS_H_
-#define _IPQESS_H_
-
-#define IPQESS_NETDEV_QUEUES 4
-
-#define IPQESS_TPD_EOP_SHIFT 31
-
-#define IPQESS_PORT_ID_SHIFT 12
-#define IPQESS_PORT_ID_MASK 0x7
-
-/* tpd word 3 bit 18-28 */
-#define IPQESS_TPD_PORT_BITMAP_SHIFT 18
-
-#define IPQESS_TPD_FROM_CPU_SHIFT 25
-
-#define IPQESS_RX_RING_SIZE 128
-#define IPQESS_RX_HEAD_BUFF_SIZE 1540
-#define IPQESS_TX_RING_SIZE 128
-#define IPQESS_MAX_RX_QUEUE 8
-#define IPQESS_MAX_TX_QUEUE 16
-
-
-/* Configurations */
-#define IPQESS_INTR_CLEAR_TYPE 0
-#define IPQESS_INTR_SW_IDX_W_TYPE 0
-#define IPQESS_FIFO_THRESH_TYPE 0
-#define IPQESS_RSS_TYPE 0
-#define IPQESS_RX_IMT 0x0020
-#define IPQESS_TX_IMT 0x0050
-#define IPQESS_TPD_BURST 5
-#define IPQESS_TXF_BURST 0x100
-#define IPQESS_RFD_BURST 8
-#define IPQESS_RFD_THR 16
-#define IPQESS_RFD_LTHR 0
-
-/* Flags used in transmit direction */
-#define IPQESS_DESC_LAST 0x1
-#define IPQESS_DESC_SINGLE 0x2
-#define IPQESS_DESC_PAGE 0x4
-
-struct ipqesstool_statistics {
- u32 tx_q0_pkt;
- u32 tx_q1_pkt;
- u32 tx_q2_pkt;
- u32 tx_q3_pkt;
- u32 tx_q4_pkt;
- u32 tx_q5_pkt;
- u32 tx_q6_pkt;
- u32 tx_q7_pkt;
- u32 tx_q8_pkt;
- u32 tx_q9_pkt;
- u32 tx_q10_pkt;
- u32 tx_q11_pkt;
- u32 tx_q12_pkt;
- u32 tx_q13_pkt;
- u32 tx_q14_pkt;
- u32 tx_q15_pkt;
- u32 tx_q0_byte;
- u32 tx_q1_byte;
- u32 tx_q2_byte;
- u32 tx_q3_byte;
- u32 tx_q4_byte;
- u32 tx_q5_byte;
- u32 tx_q6_byte;
- u32 tx_q7_byte;
- u32 tx_q8_byte;
- u32 tx_q9_byte;
- u32 tx_q10_byte;
- u32 tx_q11_byte;
- u32 tx_q12_byte;
- u32 tx_q13_byte;
- u32 tx_q14_byte;
- u32 tx_q15_byte;
- u32 rx_q0_pkt;
- u32 rx_q1_pkt;
- u32 rx_q2_pkt;
- u32 rx_q3_pkt;
- u32 rx_q4_pkt;
- u32 rx_q5_pkt;
- u32 rx_q6_pkt;
- u32 rx_q7_pkt;
- u32 rx_q0_byte;
- u32 rx_q1_byte;
- u32 rx_q2_byte;
- u32 rx_q3_byte;
- u32 rx_q4_byte;
- u32 rx_q5_byte;
- u32 rx_q6_byte;
- u32 rx_q7_byte;
- u32 tx_desc_error;
-};
-
-struct ipqess_tx_desc {
- __le16 len;
- __le16 svlan_tag;
- __le32 word1;
- __le32 addr;
- __le32 word3;
-} __aligned(16) __packed;
-
-struct ipqess_rx_desc {
- u16 rrd0;
- u16 rrd1;
- u16 rrd2;
- u16 rrd3;
- u16 rrd4;
- u16 rrd5;
- u16 rrd6;
- u16 rrd7;
-} __aligned(16) __packed;
-
-struct ipqess_buf {
- struct sk_buff *skb;
- dma_addr_t dma;
- u32 flags;
- u16 length;
-};
-
-struct ipqess_tx_ring {
- struct napi_struct napi_tx;
- u32 idx;
- int ring_id;
- struct ipqess *ess;
- struct netdev_queue *nq;
- struct ipqess_tx_desc *hw_desc;
- struct ipqess_buf *buf;
- dma_addr_t dma;
- u16 count;
- u16 head;
- u16 tail;
-};
-
-struct ipqess_rx_ring {
- struct napi_struct napi_rx;
- u32 idx;
- int ring_id;
- struct ipqess *ess;
- struct device *ppdev;
- struct ipqess_rx_desc **hw_desc;
- struct ipqess_buf *buf;
- dma_addr_t dma;
- u16 head;
- u16 tail;
- atomic_t refill_count;
-};
-
-struct ipqess_rx_ring_refill {
- struct ipqess_rx_ring *rx_ring;
- struct work_struct refill_work;
-};
-
-#define IPQESS_IRQ_NAME_LEN 32
-
-struct ipqess {
- struct net_device *netdev;
- void __iomem *hw_addr;
- struct clk *ess_clk;
- struct reset_control *ess_rst;
-
- struct ipqess_rx_ring rx_ring[IPQESS_NETDEV_QUEUES];
-
- struct platform_device *pdev;
- struct phylink *phylink;
- struct phylink_config phylink_config;
- struct ipqess_tx_ring tx_ring[IPQESS_NETDEV_QUEUES];
-
- struct ipqesstool_statistics ipqessstats;
- spinlock_t stats_lock;
- struct net_device_stats stats;
-
- struct ipqess_rx_ring_refill rx_refill[IPQESS_NETDEV_QUEUES];
- u32 tx_irq[IPQESS_MAX_TX_QUEUE];
- char tx_irq_names[IPQESS_MAX_TX_QUEUE][IPQESS_IRQ_NAME_LEN];
- u32 rx_irq[IPQESS_MAX_RX_QUEUE];
- char rx_irq_names[IPQESS_MAX_TX_QUEUE][IPQESS_IRQ_NAME_LEN];
-};
-
-static inline void build_test(void)
-{
- struct ipqess *ess;
- BUILD_BUG_ON(ARRAY_SIZE(ess->rx_ring) != ARRAY_SIZE(ess->rx_refill));
-}
-
-void ipqess_set_ethtool_ops(struct net_device *netdev);
-void ipqess_update_hw_stats(struct ipqess *ess);
-
-/* register definition */
-#define IPQESS_REG_MAS_CTRL 0x0
-#define IPQESS_REG_TIMEOUT_CTRL 0x004
-#define IPQESS_REG_DBG0 0x008
-#define IPQESS_REG_DBG1 0x00C
-#define IPQESS_REG_SW_CTRL0 0x100
-#define IPQESS_REG_SW_CTRL1 0x104
-
-/* Interrupt Status Register */
-#define IPQESS_REG_RX_ISR 0x200
-#define IPQESS_REG_TX_ISR 0x208
-#define IPQESS_REG_MISC_ISR 0x210
-#define IPQESS_REG_WOL_ISR 0x218
-
-#define IPQESS_MISC_ISR_RX_URG_Q(x) (1 << x)
-
-#define IPQESS_MISC_ISR_AXIR_TIMEOUT 0x00000100
-#define IPQESS_MISC_ISR_AXIR_ERR 0x00000200
-#define IPQESS_MISC_ISR_TXF_DEAD 0x00000400
-#define IPQESS_MISC_ISR_AXIW_ERR 0x00000800
-#define IPQESS_MISC_ISR_AXIW_TIMEOUT 0x00001000
-
-#define IPQESS_WOL_ISR 0x00000001
-
-/* Interrupt Mask Register */
-#define IPQESS_REG_MISC_IMR 0x214
-#define IPQESS_REG_WOL_IMR 0x218
-
-#define IPQESS_RX_IMR_NORMAL_MASK 0x1
-#define IPQESS_TX_IMR_NORMAL_MASK 0x1
-#define IPQESS_MISC_IMR_NORMAL_MASK 0x80001FFF
-#define IPQESS_WOL_IMR_NORMAL_MASK 0x1
-
-/* Edma receive consumer index */
-#define IPQESS_REG_RX_SW_CONS_IDX_Q(x) (0x220 + ((x) << 2)) /* x is the queue id */
-
-/* Edma transmit consumer index */
-#define IPQESS_REG_TX_SW_CONS_IDX_Q(x) (0x240 + ((x) << 2)) /* x is the queue id */
-
-/* IRQ Moderator Initial Timer Register */
-#define IPQESS_REG_IRQ_MODRT_TIMER_INIT 0x280
-#define IPQESS_IRQ_MODRT_TIMER_MASK 0xFFFF
-#define IPQESS_IRQ_MODRT_RX_TIMER_SHIFT 0
-#define IPQESS_IRQ_MODRT_TX_TIMER_SHIFT 16
-
-/* Interrupt Control Register */
-#define IPQESS_REG_INTR_CTRL 0x284
-#define IPQESS_INTR_CLR_TYP_SHIFT 0
-#define IPQESS_INTR_SW_IDX_W_TYP_SHIFT 1
-#define IPQESS_INTR_CLEAR_TYPE_W1 0
-#define IPQESS_INTR_CLEAR_TYPE_R 1
-
-/* RX Interrupt Mask Register */
-#define IPQESS_REG_RX_INT_MASK_Q(x) (0x300 + ((x) << 2)) /* x = queue id */
-
-/* TX Interrupt mask register */
-#define IPQESS_REG_TX_INT_MASK_Q(x) (0x340 + ((x) << 2)) /* x = queue id */
-
-/* Load Ptr Register
- * Software sets this bit after the initialization of the head and tail
- */
-#define IPQESS_REG_TX_SRAM_PART 0x400
-#define IPQESS_LOAD_PTR_SHIFT 16
-
-/* TXQ Control Register */
-#define IPQESS_REG_TXQ_CTRL 0x404
-#define IPQESS_TXQ_CTRL_IP_OPTION_EN 0x10
-#define IPQESS_TXQ_CTRL_TXQ_EN 0x20
-#define IPQESS_TXQ_CTRL_ENH_MODE 0x40
-#define IPQESS_TXQ_CTRL_LS_8023_EN 0x80
-#define IPQESS_TXQ_CTRL_TPD_BURST_EN 0x100
-#define IPQESS_TXQ_CTRL_LSO_BREAK_EN 0x200
-#define IPQESS_TXQ_NUM_TPD_BURST_MASK 0xF
-#define IPQESS_TXQ_TXF_BURST_NUM_MASK 0xFFFF
-#define IPQESS_TXQ_NUM_TPD_BURST_SHIFT 0
-#define IPQESS_TXQ_TXF_BURST_NUM_SHIFT 16
-
-#define IPQESS_REG_TXF_WATER_MARK 0x408 /* In 8-bytes */
-#define IPQESS_TXF_WATER_MARK_MASK 0x0FFF
-#define IPQESS_TXF_LOW_WATER_MARK_SHIFT 0
-#define IPQESS_TXF_HIGH_WATER_MARK_SHIFT 16
-#define IPQESS_TXQ_CTRL_BURST_MODE_EN 0x80000000
-
-/* WRR Control Register */
-#define IPQESS_REG_WRR_CTRL_Q0_Q3 0x40c
-#define IPQESS_REG_WRR_CTRL_Q4_Q7 0x410
-#define IPQESS_REG_WRR_CTRL_Q8_Q11 0x414
-#define IPQESS_REG_WRR_CTRL_Q12_Q15 0x418
-
-/* Weight round robin(WRR), it takes queue as input, and computes
- * starting bits where we need to write the weight for a particular
- * queue
- */
-#define IPQESS_WRR_SHIFT(x) (((x) * 5) % 20)
-
-/* Tx Descriptor Control Register */
-#define IPQESS_REG_TPD_RING_SIZE 0x41C
-#define IPQESS_TPD_RING_SIZE_SHIFT 0
-#define IPQESS_TPD_RING_SIZE_MASK 0xFFFF
-
-/* Transmit descriptor base address */
-#define IPQESS_REG_TPD_BASE_ADDR_Q(x) (0x420 + ((x) << 2)) /* x = queue id */
-
-/* TPD Index Register */
-#define IPQESS_REG_TPD_IDX_Q(x) (0x460 + ((x) << 2)) /* x = queue id */
-
-#define IPQESS_TPD_PROD_IDX_BITS 0x0000FFFF
-#define IPQESS_TPD_CONS_IDX_BITS 0xFFFF0000
-#define IPQESS_TPD_PROD_IDX_MASK 0xFFFF
-#define IPQESS_TPD_CONS_IDX_MASK 0xFFFF
-#define IPQESS_TPD_PROD_IDX_SHIFT 0
-#define IPQESS_TPD_CONS_IDX_SHIFT 16
-
-/* TX Virtual Queue Mapping Control Register */
-#define IPQESS_REG_VQ_CTRL0 0x4A0
-#define IPQESS_REG_VQ_CTRL1 0x4A4
-
-/* Virtual QID shift, it takes queue as input, and computes
- * Virtual QID position in virtual qid control register
- */
-#define IPQESS_VQ_ID_SHIFT(i) (((i) * 3) % 24)
-
-/* Virtual Queue Default Value */
-#define IPQESS_VQ_REG_VALUE 0x240240
-
-/* Tx side Port Interface Control Register */
-#define IPQESS_REG_PORT_CTRL 0x4A8
-#define IPQESS_PAD_EN_SHIFT 15
-
-/* Tx side VLAN Configuration Register */
-#define IPQESS_REG_VLAN_CFG 0x4AC
-
-#define IPQESS_VLAN_CFG_SVLAN_TPID_SHIFT 0
-#define IPQESS_VLAN_CFG_SVLAN_TPID_MASK 0xffff
-#define IPQESS_VLAN_CFG_CVLAN_TPID_SHIFT 16
-#define IPQESS_VLAN_CFG_CVLAN_TPID_MASK 0xffff
-
-#define IPQESS_TX_CVLAN 16
-#define IPQESS_TX_INS_CVLAN 17
-#define IPQESS_TX_CVLAN_TAG_SHIFT 0
-
-#define IPQESS_TX_SVLAN 14
-#define IPQESS_TX_INS_SVLAN 15
-#define IPQESS_TX_SVLAN_TAG_SHIFT 16
-
-/* Tx Queue Packet Statistic Register */
-#define IPQESS_REG_TX_STAT_PKT_Q(x) (0x700 + ((x) << 3)) /* x = queue id */
-
-#define IPQESS_TX_STAT_PKT_MASK 0xFFFFFF
-
-/* Tx Queue Byte Statistic Register */
-#define IPQESS_REG_TX_STAT_BYTE_Q(x) (0x704 + ((x) << 3)) /* x = queue id */
-
-/* Load Balance Based Ring Offset Register */
-#define IPQESS_REG_LB_RING 0x800
-#define IPQESS_LB_RING_ENTRY_MASK 0xff
-#define IPQESS_LB_RING_ID_MASK 0x7
-#define IPQESS_LB_RING_PROFILE_ID_MASK 0x3
-#define IPQESS_LB_RING_ENTRY_BIT_OFFSET 8
-#define IPQESS_LB_RING_ID_OFFSET 0
-#define IPQESS_LB_RING_PROFILE_ID_OFFSET 3
-#define IPQESS_LB_REG_VALUE 0x6040200
-
-/* Load Balance Priority Mapping Register */
-#define IPQESS_REG_LB_PRI_START 0x804
-#define IPQESS_REG_LB_PRI_END 0x810
-#define IPQESS_LB_PRI_REG_INC 4
-#define IPQESS_LB_PRI_ENTRY_BIT_OFFSET 4
-#define IPQESS_LB_PRI_ENTRY_MASK 0xf
-
-/* RSS Priority Mapping Register */
-#define IPQESS_REG_RSS_PRI 0x820
-#define IPQESS_RSS_PRI_ENTRY_MASK 0xf
-#define IPQESS_RSS_RING_ID_MASK 0x7
-#define IPQESS_RSS_PRI_ENTRY_BIT_OFFSET 4
-
-/* RSS Indirection Register */
-#define IPQESS_REG_RSS_IDT(x) (0x840 + ((x) << 2)) /* x = No. of indirection table */
-#define IPQESS_NUM_IDT 16
-#define IPQESS_RSS_IDT_VALUE 0x64206420
-
-/* Default RSS Ring Register */
-#define IPQESS_REG_DEF_RSS 0x890
-#define IPQESS_DEF_RSS_MASK 0x7
-
-/* RSS Hash Function Type Register */
-#define IPQESS_REG_RSS_TYPE 0x894
-#define IPQESS_RSS_TYPE_NONE 0x01
-#define IPQESS_RSS_TYPE_IPV4TCP 0x02
-#define IPQESS_RSS_TYPE_IPV6_TCP 0x04
-#define IPQESS_RSS_TYPE_IPV4_UDP 0x08
-#define IPQESS_RSS_TYPE_IPV6UDP 0x10
-#define IPQESS_RSS_TYPE_IPV4 0x20
-#define IPQESS_RSS_TYPE_IPV6 0x40
-#define IPQESS_RSS_HASH_MODE_MASK 0x7f
-
-#define IPQESS_REG_RSS_HASH_VALUE 0x8C0
-
-#define IPQESS_REG_RSS_TYPE_RESULT 0x8C4
-
-#define IPQESS_HASH_TYPE_START 0
-#define IPQESS_HASH_TYPE_END 5
-#define IPQESS_HASH_TYPE_SHIFT 12
-
-#define IPQESS_RFS_FLOW_ENTRIES 1024
-#define IPQESS_RFS_FLOW_ENTRIES_MASK (IPQESS_RFS_FLOW_ENTRIES - 1)
-#define IPQESS_RFS_EXPIRE_COUNT_PER_CALL 128
-
-/* RFD Base Address Register */
-#define IPQESS_REG_RFD_BASE_ADDR_Q(x) (0x950 + ((x) << 2)) /* x = queue id */
-
-/* RFD Index Register */
-#define IPQESS_REG_RFD_IDX_Q(x) (0x9B0 + ((x) << 2)) /* x = queue id */
-
-#define IPQESS_RFD_PROD_IDX_BITS 0x00000FFF
-#define IPQESS_RFD_CONS_IDX_BITS 0x0FFF0000
-#define IPQESS_RFD_PROD_IDX_MASK 0xFFF
-#define IPQESS_RFD_CONS_IDX_MASK 0xFFF
-#define IPQESS_RFD_PROD_IDX_SHIFT 0
-#define IPQESS_RFD_CONS_IDX_SHIFT 16
-
-/* Rx Descriptor Control Register */
-#define IPQESS_REG_RX_DESC0 0xA10
-#define IPQESS_RFD_RING_SIZE_MASK 0xFFF
-#define IPQESS_RX_BUF_SIZE_MASK 0xFFFF
-#define IPQESS_RFD_RING_SIZE_SHIFT 0
-#define IPQESS_RX_BUF_SIZE_SHIFT 16
-
-#define IPQESS_REG_RX_DESC1 0xA14
-#define IPQESS_RXQ_RFD_BURST_NUM_MASK 0x3F
-#define IPQESS_RXQ_RFD_PF_THRESH_MASK 0x1F
-#define IPQESS_RXQ_RFD_LOW_THRESH_MASK 0xFFF
-#define IPQESS_RXQ_RFD_BURST_NUM_SHIFT 0
-#define IPQESS_RXQ_RFD_PF_THRESH_SHIFT 8
-#define IPQESS_RXQ_RFD_LOW_THRESH_SHIFT 16
-
-/* RXQ Control Register */
-#define IPQESS_REG_RXQ_CTRL 0xA18
-#define IPQESS_FIFO_THRESH_TYPE_SHIF 0
-#define IPQESS_FIFO_THRESH_128_BYTE 0x0
-#define IPQESS_FIFO_THRESH_64_BYTE 0x1
-#define IPQESS_RXQ_CTRL_RMV_VLAN 0x00000002
-#define IPQESS_RXQ_CTRL_EN_MASK GENMASK(15, 8)
-#define IPQESS_RXQ_CTRL_EN(__qid) BIT(8 + (__qid))
-
-/* AXI Burst Size Config */
-#define IPQESS_REG_AXIW_CTRL_MAXWRSIZE 0xA1C
-#define IPQESS_AXIW_MAXWRSIZE_VALUE 0x0
-
-/* Rx Statistics Register */
-#define IPQESS_REG_RX_STAT_BYTE_Q(x) (0xA30 + ((x) << 2)) /* x = queue id */
-#define IPQESS_REG_RX_STAT_PKT_Q(x) (0xA50 + ((x) << 2)) /* x = queue id */
-
-/* WoL Pattern Length Register */
-#define IPQESS_REG_WOL_PATTERN_LEN0 0xC00
-#define IPQESS_WOL_PT_LEN_MASK 0xFF
-#define IPQESS_WOL_PT0_LEN_SHIFT 0
-#define IPQESS_WOL_PT1_LEN_SHIFT 8
-#define IPQESS_WOL_PT2_LEN_SHIFT 16
-#define IPQESS_WOL_PT3_LEN_SHIFT 24
-
-#define IPQESS_REG_WOL_PATTERN_LEN1 0xC04
-#define IPQESS_WOL_PT4_LEN_SHIFT 0
-#define IPQESS_WOL_PT5_LEN_SHIFT 8
-#define IPQESS_WOL_PT6_LEN_SHIFT 16
-
-/* WoL Control Register */
-#define IPQESS_REG_WOL_CTRL 0xC08
-#define IPQESS_WOL_WK_EN 0x00000001
-#define IPQESS_WOL_MG_EN 0x00000002
-#define IPQESS_WOL_PT0_EN 0x00000004
-#define IPQESS_WOL_PT1_EN 0x00000008
-#define IPQESS_WOL_PT2_EN 0x00000010
-#define IPQESS_WOL_PT3_EN 0x00000020
-#define IPQESS_WOL_PT4_EN 0x00000040
-#define IPQESS_WOL_PT5_EN 0x00000080
-#define IPQESS_WOL_PT6_EN 0x00000100
-
-/* MAC Control Register */
-#define IPQESS_REG_MAC_CTRL0 0xC20
-#define IPQESS_REG_MAC_CTRL1 0xC24
-
-/* WoL Pattern Register */
-#define IPQESS_REG_WOL_PATTERN_START 0x5000
-#define IPQESS_PATTERN_PART_REG_OFFSET 0x40
-
-
-/* TX descriptor fields */
-#define IPQESS_TPD_HDR_SHIFT 0
-#define IPQESS_TPD_PPPOE_EN 0x00000100
-#define IPQESS_TPD_IP_CSUM_EN 0x00000200
-#define IPQESS_TPD_TCP_CSUM_EN 0x0000400
-#define IPQESS_TPD_UDP_CSUM_EN 0x00000800
-#define IPQESS_TPD_CUSTOM_CSUM_EN 0x00000C00
-#define IPQESS_TPD_LSO_EN 0x00001000
-#define IPQESS_TPD_LSO_V2_EN 0x00002000
-/* The VLAN_TAGGED bit is not used in the publicly available
- * drivers. The definition has been stolen from the Atheros
- * 'alx' driver (drivers/net/ethernet/atheros/alx/hw.h). It
- * seems that it has the same meaning in regard to the EDMA
- * hardware.
- */
-#define IPQESS_TPD_VLAN_TAGGED 0x00004000
-#define IPQESS_TPD_IPV4_EN 0x00010000
-#define IPQESS_TPD_MSS_MASK 0x1FFF
-#define IPQESS_TPD_MSS_SHIFT 18
-#define IPQESS_TPD_CUSTOM_CSUM_SHIFT 18
-
-/* RRD descriptor fields */
-#define IPQESS_RRD_NUM_RFD_MASK 0x000F
-#define IPQESS_RRD_PKT_SIZE_MASK 0x3FFF
-#define IPQESS_RRD_SRC_PORT_NUM_MASK 0x4000
-#define IPQESS_RRD_SVLAN 0x8000
-#define IPQESS_RRD_FLOW_COOKIE_MASK 0x07FF;
-
-#define IPQESS_RRD_PKT_SIZE_MASK 0x3FFF
-#define IPQESS_RRD_CSUM_FAIL_MASK 0xC000
-#define IPQESS_RRD_CVLAN 0x0001
-#define IPQESS_RRD_DESC_VALID 0x8000
-
-#define IPQESS_RRD_PRIORITY_SHIFT 4
-#define IPQESS_RRD_PRIORITY_MASK 0x7
-#define IPQESS_RRD_PORT_TYPE_SHIFT 7
-#define IPQESS_RRD_PORT_TYPE_MASK 0x1F
-
-#endif
+++ /dev/null
-// SPDX-License-Identifier: (GPL-2.0 OR ISC)
-/* Copyright (c) 2015 - 2016, The Linux Foundation. All rights reserved.
- * Copyright (c) 2017 - 2018, John Crispin <john@phrozen.org>
- *
- * Permission to use, copy, modify, and/or distribute this software for
- * any purpose with or without fee is hereby granted, provided that the
- * above copyright notice and this permission notice appear in all copies.
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
- * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/ethtool.h>
-#include <linux/netdevice.h>
-#include <linux/string.h>
-#include <linux/phylink.h>
-
-#include "ipqess.h"
-
-struct ipqesstool_stats {
- uint8_t string[ETH_GSTRING_LEN];
- uint32_t offset;
-};
-
-#define IPQESS_STAT(m) offsetof(struct ipqesstool_statistics, m)
-#define DRVINFO_LEN 32
-
-static const struct ipqesstool_stats ipqess_stats[] = {
- {"tx_q0_pkt", IPQESS_STAT(tx_q0_pkt)},
- {"tx_q1_pkt", IPQESS_STAT(tx_q1_pkt)},
- {"tx_q2_pkt", IPQESS_STAT(tx_q2_pkt)},
- {"tx_q3_pkt", IPQESS_STAT(tx_q3_pkt)},
- {"tx_q4_pkt", IPQESS_STAT(tx_q4_pkt)},
- {"tx_q5_pkt", IPQESS_STAT(tx_q5_pkt)},
- {"tx_q6_pkt", IPQESS_STAT(tx_q6_pkt)},
- {"tx_q7_pkt", IPQESS_STAT(tx_q7_pkt)},
- {"tx_q8_pkt", IPQESS_STAT(tx_q8_pkt)},
- {"tx_q9_pkt", IPQESS_STAT(tx_q9_pkt)},
- {"tx_q10_pkt", IPQESS_STAT(tx_q10_pkt)},
- {"tx_q11_pkt", IPQESS_STAT(tx_q11_pkt)},
- {"tx_q12_pkt", IPQESS_STAT(tx_q12_pkt)},
- {"tx_q13_pkt", IPQESS_STAT(tx_q13_pkt)},
- {"tx_q14_pkt", IPQESS_STAT(tx_q14_pkt)},
- {"tx_q15_pkt", IPQESS_STAT(tx_q15_pkt)},
- {"tx_q0_byte", IPQESS_STAT(tx_q0_byte)},
- {"tx_q1_byte", IPQESS_STAT(tx_q1_byte)},
- {"tx_q2_byte", IPQESS_STAT(tx_q2_byte)},
- {"tx_q3_byte", IPQESS_STAT(tx_q3_byte)},
- {"tx_q4_byte", IPQESS_STAT(tx_q4_byte)},
- {"tx_q5_byte", IPQESS_STAT(tx_q5_byte)},
- {"tx_q6_byte", IPQESS_STAT(tx_q6_byte)},
- {"tx_q7_byte", IPQESS_STAT(tx_q7_byte)},
- {"tx_q8_byte", IPQESS_STAT(tx_q8_byte)},
- {"tx_q9_byte", IPQESS_STAT(tx_q9_byte)},
- {"tx_q10_byte", IPQESS_STAT(tx_q10_byte)},
- {"tx_q11_byte", IPQESS_STAT(tx_q11_byte)},
- {"tx_q12_byte", IPQESS_STAT(tx_q12_byte)},
- {"tx_q13_byte", IPQESS_STAT(tx_q13_byte)},
- {"tx_q14_byte", IPQESS_STAT(tx_q14_byte)},
- {"tx_q15_byte", IPQESS_STAT(tx_q15_byte)},
- {"rx_q0_pkt", IPQESS_STAT(rx_q0_pkt)},
- {"rx_q1_pkt", IPQESS_STAT(rx_q1_pkt)},
- {"rx_q2_pkt", IPQESS_STAT(rx_q2_pkt)},
- {"rx_q3_pkt", IPQESS_STAT(rx_q3_pkt)},
- {"rx_q4_pkt", IPQESS_STAT(rx_q4_pkt)},
- {"rx_q5_pkt", IPQESS_STAT(rx_q5_pkt)},
- {"rx_q6_pkt", IPQESS_STAT(rx_q6_pkt)},
- {"rx_q7_pkt", IPQESS_STAT(rx_q7_pkt)},
- {"rx_q0_byte", IPQESS_STAT(rx_q0_byte)},
- {"rx_q1_byte", IPQESS_STAT(rx_q1_byte)},
- {"rx_q2_byte", IPQESS_STAT(rx_q2_byte)},
- {"rx_q3_byte", IPQESS_STAT(rx_q3_byte)},
- {"rx_q4_byte", IPQESS_STAT(rx_q4_byte)},
- {"rx_q5_byte", IPQESS_STAT(rx_q5_byte)},
- {"rx_q6_byte", IPQESS_STAT(rx_q6_byte)},
- {"rx_q7_byte", IPQESS_STAT(rx_q7_byte)},
- {"tx_desc_error", IPQESS_STAT(tx_desc_error)},
-};
-
-static int ipqess_get_strset_count(struct net_device *netdev, int sset)
-{
- switch (sset) {
- case ETH_SS_STATS:
- return ARRAY_SIZE(ipqess_stats);
- default:
- netdev_dbg(netdev, "%s: Invalid string set", __func__);
- return -EOPNOTSUPP;
- }
-}
-
-static void ipqess_get_strings(struct net_device *netdev, uint32_t stringset,
- uint8_t *data)
-{
- uint8_t *p = data;
- uint32_t i;
-
- switch (stringset) {
- case ETH_SS_STATS:
- for (i = 0; i < ARRAY_SIZE(ipqess_stats); i++) {
- memcpy(p, ipqess_stats[i].string,
- min((size_t)ETH_GSTRING_LEN,
- strlen(ipqess_stats[i].string) + 1));
- p += ETH_GSTRING_LEN;
- }
- break;
- }
-}
-
-static void ipqess_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats,
- uint64_t *data)
-{
- struct ipqess *ess = netdev_priv(netdev);
- u32 *essstats = (u32 *)&ess->ipqessstats;
- int i;
-
- spin_lock(&ess->stats_lock);
-
- ipqess_update_hw_stats(ess);
-
- for (i = 0; i < ARRAY_SIZE(ipqess_stats); i++)
- data[i] = *(u32 *)(essstats + (ipqess_stats[i].offset / sizeof(u32)));
-
- spin_unlock(&ess->stats_lock);
-}
-
-static void ipqess_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strlcpy(info->driver, "qca_ipqess", DRVINFO_LEN);
- strlcpy(info->bus_info, "axi", ETHTOOL_BUSINFO_LEN);
-}
-
-static int ipqess_get_settings(struct net_device *netdev,
- struct ethtool_link_ksettings *cmd)
-{
- struct ipqess *ess = netdev_priv(netdev);
-
- return phylink_ethtool_ksettings_get(ess->phylink, cmd);
-}
-
-static int ipqess_set_settings(struct net_device *netdev,
- const struct ethtool_link_ksettings *cmd)
-{
- struct ipqess *ess = netdev_priv(netdev);
-
- return phylink_ethtool_ksettings_set(ess->phylink, cmd);
-}
-
-static void ipqess_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
-{
- ring->tx_max_pending = IPQESS_TX_RING_SIZE;
- ring->rx_max_pending = IPQESS_RX_RING_SIZE;
-}
-
-static const struct ethtool_ops ipqesstool_ops = {
- .get_drvinfo = &ipqess_get_drvinfo,
- .get_link = ðtool_op_get_link,
- .get_link_ksettings = &ipqess_get_settings,
- .set_link_ksettings = &ipqess_set_settings,
- .get_strings = &ipqess_get_strings,
- .get_sset_count = &ipqess_get_strset_count,
- .get_ethtool_stats = &ipqess_get_ethtool_stats,
- .get_ringparam = ipqess_get_ringparam,
-};
-
-void ipqess_set_ethtool_ops(struct net_device *netdev)
-{
- netdev->ethtool_ops = &ipqesstool_ops;
-}