The functional behaviour for certain settings is different
depending on whether local forwarding is enabled or not.
- accept_ra - BOOLEAN
+ accept_ra - INTEGER
Accept Router Advertisements; autoconfigure using them.
+ It also determines whether or not to transmit Router
+ Solicitations. If and only if the functional setting is to
+ accept Router Advertisements, Router Solicitations will be
+ transmitted.
+
Possible values are:
0 Do not accept Router Advertisements.
1 Accept Router Advertisements if forwarding is disabled.
--- /dev/null
+/* bnx2x_dcb.c: Broadcom Everest network driver.
+ *
+ * Copyright 2009-2011 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Dmitry Kravkov
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/rtnetlink.h>
+#include <net/dcbnl.h>
+
+#include "bnx2x.h"
+#include "bnx2x_cmn.h"
+#include "bnx2x_dcb.h"
+
+/* forward declarations of dcbx related functions */
+static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
+static void bnx2x_pfc_set_pfc(struct bnx2x *bp);
+static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp);
+static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
+static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
+ u32 *set_configuration_ets_pg,
+ u32 *pri_pg_tbl);
+static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
+ u32 *pg_pri_orginal_spread,
+ struct pg_help_data *help_data);
+static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp,
+ struct pg_help_data *help_data,
+ struct dcbx_ets_feature *ets,
+ u32 *pg_pri_orginal_spread);
+static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
+ struct cos_help_data *cos_data,
+ u32 *pg_pri_orginal_spread,
+ struct dcbx_ets_feature *ets);
+static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
+ struct bnx2x_func_tx_start_params*);
+
+/* helpers: read/write len bytes from addr into buff by REG_RD/REG_WR */
+static void bnx2x_read_data(struct bnx2x *bp, u32 *buff,
+ u32 addr, u32 len)
+{
+ int i;
+ for (i = 0; i < len; i += 4, buff++)
+ *buff = REG_RD(bp, addr + i);
+}
+
+static void bnx2x_write_data(struct bnx2x *bp, u32 *buff,
+ u32 addr, u32 len)
+{
+ int i;
+ for (i = 0; i < len; i += 4, buff++)
+ REG_WR(bp, addr + i, *buff);
+}
+
+static void bnx2x_pfc_set(struct bnx2x *bp)
+{
+ struct bnx2x_nig_brb_pfc_port_params pfc_params = {0};
+ u32 pri_bit, val = 0;
+ int i;
+
+ pfc_params.num_of_rx_cos_priority_mask =
+ bp->dcbx_port_params.ets.num_of_cos;
+
+ /* Tx COS configuration */
+ for (i = 0; i < bp->dcbx_port_params.ets.num_of_cos; i++)
+ /*
+ * We configure only the pauseable bits (non pauseable aren't
+ * configured at all) it's done to avoid false pauses from
+ * network
+ */
+ pfc_params.rx_cos_priority_mask[i] =
+ bp->dcbx_port_params.ets.cos_params[i].pri_bitmask
+ & DCBX_PFC_PRI_PAUSE_MASK(bp);
+
+ /*
+ * Rx COS configuration
+ * Changing PFC RX configuration .
+ * In RX COS0 will always be configured to lossy and COS1 to lossless
+ */
+ for (i = 0 ; i < MAX_PFC_PRIORITIES ; i++) {
+ pri_bit = 1 << i;
+
+ if (pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp))
+ val |= 1 << (i * 4);
+ }
+
+ pfc_params.pkt_priority_to_cos = val;
+
+ /* RX COS0 */
+ pfc_params.llfc_low_priority_classes = 0;
+ /* RX COS1 */
+ pfc_params.llfc_high_priority_classes = DCBX_PFC_PRI_PAUSE_MASK(bp);
+
+ /* BRB configuration */
+ pfc_params.cos0_pauseable = false;
+ pfc_params.cos1_pauseable = true;
+
+ bnx2x_acquire_phy_lock(bp);
+ bp->link_params.feature_config_flags |= FEATURE_CONFIG_PFC_ENABLED;
+ bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &pfc_params);
+ bnx2x_release_phy_lock(bp);
+}
+
+static void bnx2x_pfc_clear(struct bnx2x *bp)
+{
+ struct bnx2x_nig_brb_pfc_port_params nig_params = {0};
+ nig_params.pause_enable = 1;
+#ifdef BNX2X_SAFC
+ if (bp->flags & SAFC_TX_FLAG) {
+ u32 high = 0, low = 0;
+ int i;
+
+ for (i = 0; i < BNX2X_MAX_PRIORITY; i++) {
+ if (bp->pri_map[i] == 1)
+ high |= (1 << i);
+ if (bp->pri_map[i] == 0)
+ low |= (1 << i);
+ }
+
+ nig_params.llfc_low_priority_classes = high;
+ nig_params.llfc_low_priority_classes = low;
+
+ nig_params.pause_enable = 0;
+ nig_params.llfc_enable = 1;
+ nig_params.llfc_out_en = 1;
+ }
+#endif /* BNX2X_SAFC */
+ bnx2x_acquire_phy_lock(bp);
+ bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_PFC_ENABLED;
+ bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &nig_params);
+ bnx2x_release_phy_lock(bp);
+}
+
+static void bnx2x_dump_dcbx_drv_param(struct bnx2x *bp,
+ struct dcbx_features *features,
+ u32 error)
+{
+ u8 i = 0;
+ DP(NETIF_MSG_LINK, "local_mib.error %x\n", error);
+
+ /* PG */
+ DP(NETIF_MSG_LINK,
+ "local_mib.features.ets.enabled %x\n", features->ets.enabled);
+ for (i = 0; i < DCBX_MAX_NUM_PG_BW_ENTRIES; i++)
+ DP(NETIF_MSG_LINK,
+ "local_mib.features.ets.pg_bw_tbl[%d] %d\n", i,
+ DCBX_PG_BW_GET(features->ets.pg_bw_tbl, i));
+ for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++)
+ DP(NETIF_MSG_LINK,
+ "local_mib.features.ets.pri_pg_tbl[%d] %d\n", i,
+ DCBX_PRI_PG_GET(features->ets.pri_pg_tbl, i));
+
+ /* pfc */
+ DP(NETIF_MSG_LINK, "dcbx_features.pfc.pri_en_bitmap %x\n",
+ features->pfc.pri_en_bitmap);
+ DP(NETIF_MSG_LINK, "dcbx_features.pfc.pfc_caps %x\n",
+ features->pfc.pfc_caps);
+ DP(NETIF_MSG_LINK, "dcbx_features.pfc.enabled %x\n",
+ features->pfc.enabled);
+
+ DP(NETIF_MSG_LINK, "dcbx_features.app.default_pri %x\n",
+ features->app.default_pri);
+ DP(NETIF_MSG_LINK, "dcbx_features.app.tc_supported %x\n",
+ features->app.tc_supported);
+ DP(NETIF_MSG_LINK, "dcbx_features.app.enabled %x\n",
+ features->app.enabled);
+ for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+ DP(NETIF_MSG_LINK,
+ "dcbx_features.app.app_pri_tbl[%x].app_id %x\n",
+ i, features->app.app_pri_tbl[i].app_id);
+ DP(NETIF_MSG_LINK,
+ "dcbx_features.app.app_pri_tbl[%x].pri_bitmap %x\n",
+ i, features->app.app_pri_tbl[i].pri_bitmap);
+ DP(NETIF_MSG_LINK,
+ "dcbx_features.app.app_pri_tbl[%x].appBitfield %x\n",
+ i, features->app.app_pri_tbl[i].appBitfield);
+ }
+}
+
+static void bnx2x_dcbx_get_ap_priority(struct bnx2x *bp,
+ u8 pri_bitmap,
+ u8 llfc_traf_type)
+{
+ u32 pri = MAX_PFC_PRIORITIES;
+ u32 index = MAX_PFC_PRIORITIES - 1;
+ u32 pri_mask;
+ u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+
+ /* Choose the highest priority */
+ while ((MAX_PFC_PRIORITIES == pri) && (0 != index)) {
+ pri_mask = 1 << index;
+ if (GET_FLAGS(pri_bitmap, pri_mask))
+ pri = index ;
+ index--;
+ }
+
+ if (pri < MAX_PFC_PRIORITIES)
+ ttp[llfc_traf_type] = max_t(u32, ttp[llfc_traf_type], pri);
+}
+
+static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp,
+ struct dcbx_app_priority_feature *app,
+ u32 error) {
+ u8 index;
+ u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+
+ if (GET_FLAGS(error, DCBX_LOCAL_APP_ERROR))
+ DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_ERROR\n");
+
+ if (GET_FLAGS(error, DCBX_LOCAL_APP_MISMATCH))
+ DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_MISMATCH\n");
+
+ if (app->enabled &&
+ !GET_FLAGS(error, DCBX_LOCAL_APP_ERROR | DCBX_LOCAL_APP_MISMATCH)) {
+
+ bp->dcbx_port_params.app.enabled = true;
+
+ for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++)
+ ttp[index] = 0;
+
+ if (app->default_pri < MAX_PFC_PRIORITIES)
+ ttp[LLFC_TRAFFIC_TYPE_NW] = app->default_pri;
+
+ for (index = 0 ; index < DCBX_MAX_APP_PROTOCOL; index++) {
+ struct dcbx_app_priority_entry *entry =
+ app->app_pri_tbl;
+
+ if (GET_FLAGS(entry[index].appBitfield,
+ DCBX_APP_SF_ETH_TYPE) &&
+ ETH_TYPE_FCOE == entry[index].app_id)
+ bnx2x_dcbx_get_ap_priority(bp,
+ entry[index].pri_bitmap,
+ LLFC_TRAFFIC_TYPE_FCOE);
+
+ if (GET_FLAGS(entry[index].appBitfield,
+ DCBX_APP_SF_PORT) &&
+ TCP_PORT_ISCSI == entry[index].app_id)
+ bnx2x_dcbx_get_ap_priority(bp,
+ entry[index].pri_bitmap,
+ LLFC_TRAFFIC_TYPE_ISCSI);
+ }
+ } else {
+ DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_DISABLED\n");
+ bp->dcbx_port_params.app.enabled = false;
+ for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++)
+ ttp[index] = INVALID_TRAFFIC_TYPE_PRIORITY;
+ }
+}
+
+static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp,
+ struct dcbx_ets_feature *ets,
+ u32 error) {
+ int i = 0;
+ u32 pg_pri_orginal_spread[DCBX_MAX_NUM_PG_BW_ENTRIES] = {0};
+ struct pg_help_data pg_help_data;
+ struct bnx2x_dcbx_cos_params *cos_params =
+ bp->dcbx_port_params.ets.cos_params;
+
+ memset(&pg_help_data, 0, sizeof(struct pg_help_data));
+
+
+ if (GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR))
+ DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_ERROR\n");
+
+
+ /* Clean up old settings of ets on COS */
+ for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params) ; i++) {
+ cos_params[i].pauseable = false;
+ cos_params[i].strict = BNX2X_DCBX_STRICT_INVALID;
+ cos_params[i].bw_tbl = DCBX_INVALID_COS_BW;
+ cos_params[i].pri_bitmask = 0;
+ }
+
+ if (bp->dcbx_port_params.app.enabled &&
+ !GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR) &&
+ ets->enabled) {
+ DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_ENABLE\n");
+ bp->dcbx_port_params.ets.enabled = true;
+
+ bnx2x_dcbx_get_ets_pri_pg_tbl(bp,
+ pg_pri_orginal_spread,
+ ets->pri_pg_tbl);
+
+ bnx2x_dcbx_get_num_pg_traf_type(bp,
+ pg_pri_orginal_spread,
+ &pg_help_data);
+
+ bnx2x_dcbx_fill_cos_params(bp, &pg_help_data,
+ ets, pg_pri_orginal_spread);
+
+ } else {
+ DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_DISABLED\n");
+ bp->dcbx_port_params.ets.enabled = false;
+ ets->pri_pg_tbl[0] = 0;
+
+ for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES ; i++)
+ DCBX_PG_BW_SET(ets->pg_bw_tbl, i, 1);
+ }
+}
+
+static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp,
+ struct dcbx_pfc_feature *pfc, u32 error)
+{
+
+ if (GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR))
+ DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_ERROR\n");
+
+ if (bp->dcbx_port_params.app.enabled &&
+ !GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR | DCBX_LOCAL_PFC_MISMATCH) &&
+ pfc->enabled) {
+ bp->dcbx_port_params.pfc.enabled = true;
+ bp->dcbx_port_params.pfc.priority_non_pauseable_mask =
+ ~(pfc->pri_en_bitmap);
+ } else {
+ DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_DISABLED\n");
+ bp->dcbx_port_params.pfc.enabled = false;
+ bp->dcbx_port_params.pfc.priority_non_pauseable_mask = 0;
+ }
+}
+
+/* maps unmapped priorities to to the same COS as L2 */
+static void bnx2x_dcbx_map_nw(struct bnx2x *bp)
+{
+ int i;
+ u32 unmapped = (1 << MAX_PFC_PRIORITIES) - 1; /* all ones */
+ u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+ u32 nw_prio = 1 << ttp[LLFC_TRAFFIC_TYPE_NW];
+ struct bnx2x_dcbx_cos_params *cos_params =
+ bp->dcbx_port_params.ets.cos_params;
+
+ /* get unmapped priorities by clearing mapped bits */
+ for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++)
+ unmapped &= ~(1 << ttp[i]);
+
+ /* find cos for nw prio and extend it with unmapped */
+ for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params); i++) {
+ if (cos_params[i].pri_bitmask & nw_prio) {
+ /* extend the bitmask with unmapped */
+ DP(NETIF_MSG_LINK,
+ "cos %d extended with 0x%08x\n", i, unmapped);
+ cos_params[i].pri_bitmask |= unmapped;
+ break;
+ }
+ }
+}
+
+static void bnx2x_get_dcbx_drv_param(struct bnx2x *bp,
+ struct dcbx_features *features,
+ u32 error)
+{
+ bnx2x_dcbx_get_ap_feature(bp, &features->app, error);
+
+ bnx2x_dcbx_get_pfc_feature(bp, &features->pfc, error);
+
+ bnx2x_dcbx_get_ets_feature(bp, &features->ets, error);
+
+ bnx2x_dcbx_map_nw(bp);
+}
+
+#define DCBX_LOCAL_MIB_MAX_TRY_READ (100)
+static int bnx2x_dcbx_read_mib(struct bnx2x *bp,
+ u32 *base_mib_addr,
+ u32 offset,
+ int read_mib_type)
+{
+ int max_try_read = 0;
+ u32 mib_size, prefix_seq_num, suffix_seq_num;
+ struct lldp_remote_mib *remote_mib ;
+ struct lldp_local_mib *local_mib;
+
+
+ switch (read_mib_type) {
+ case DCBX_READ_LOCAL_MIB:
+ mib_size = sizeof(struct lldp_local_mib);
+ break;
+ case DCBX_READ_REMOTE_MIB:
+ mib_size = sizeof(struct lldp_remote_mib);
+ break;
+ default:
+ return 1; /*error*/
+ }
+
+ offset += BP_PORT(bp) * mib_size;
+
+ do {
+ bnx2x_read_data(bp, base_mib_addr, offset, mib_size);
+
+ max_try_read++;
+
+ switch (read_mib_type) {
+ case DCBX_READ_LOCAL_MIB:
+ local_mib = (struct lldp_local_mib *) base_mib_addr;
+ prefix_seq_num = local_mib->prefix_seq_num;
+ suffix_seq_num = local_mib->suffix_seq_num;
+ break;
+ case DCBX_READ_REMOTE_MIB:
+ remote_mib = (struct lldp_remote_mib *) base_mib_addr;
+ prefix_seq_num = remote_mib->prefix_seq_num;
+ suffix_seq_num = remote_mib->suffix_seq_num;
+ break;
+ default:
+ return 1; /*error*/
+ }
+ } while ((prefix_seq_num != suffix_seq_num) &&
+ (max_try_read < DCBX_LOCAL_MIB_MAX_TRY_READ));
+
+ if (max_try_read >= DCBX_LOCAL_MIB_MAX_TRY_READ) {
+ BNX2X_ERR("MIB could not be read\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+static void bnx2x_pfc_set_pfc(struct bnx2x *bp)
+{
+ if (bp->dcbx_port_params.pfc.enabled &&
+ !(bp->dcbx_error & DCBX_REMOTE_MIB_ERROR))
+ /*
+ * 1. Fills up common PFC structures if required
+ * 2. Configure NIG, MAC and BRB via the elink
+ */
+ bnx2x_pfc_set(bp);
+ else
+ bnx2x_pfc_clear(bp);
+}
+
+static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
+{
+ struct bnx2x_func_state_params func_params = {0};
+
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_TX_STOP;
+
+ DP(NETIF_MSG_LINK, "STOP TRAFFIC\n");
+ return bnx2x_func_state_change(bp, &func_params);
+}
+
+static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
+{
+ struct bnx2x_func_state_params func_params = {0};
+ struct bnx2x_func_tx_start_params *tx_params =
+ &func_params.params.tx_start;
+
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_TX_START;
+
+ bnx2x_dcbx_fw_struct(bp, tx_params);
+
+ DP(NETIF_MSG_LINK, "START TRAFFIC\n");
+ return bnx2x_func_state_change(bp, &func_params);
+}
+
+static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp)
+{
+ struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets);
+ int rc = 0;
+
+ if (ets->num_of_cos == 0 || ets->num_of_cos > DCBX_COS_MAX_NUM_E2) {
+ BNX2X_ERR("Illegal number of COSes %d\n", ets->num_of_cos);
+ return;
+ }
+
+ /* valid COS entries */
+ if (ets->num_of_cos == 1) /* no ETS */
+ return;
+
+ /* sanity */
+ if (((BNX2X_DCBX_STRICT_INVALID == ets->cos_params[0].strict) &&
+ (DCBX_INVALID_COS_BW == ets->cos_params[0].bw_tbl)) ||
+ ((BNX2X_DCBX_STRICT_INVALID == ets->cos_params[1].strict) &&
+ (DCBX_INVALID_COS_BW == ets->cos_params[1].bw_tbl))) {
+ BNX2X_ERR("all COS should have at least bw_limit or strict"
+ "ets->cos_params[0].strict= %x"
+ "ets->cos_params[0].bw_tbl= %x"
+ "ets->cos_params[1].strict= %x"
+ "ets->cos_params[1].bw_tbl= %x",
+ ets->cos_params[0].strict,
+ ets->cos_params[0].bw_tbl,
+ ets->cos_params[1].strict,
+ ets->cos_params[1].bw_tbl);
+ return;
+ }
+ /* If we join a group and there is bw_tbl and strict then bw rules */
+ if ((DCBX_INVALID_COS_BW != ets->cos_params[0].bw_tbl) &&
+ (DCBX_INVALID_COS_BW != ets->cos_params[1].bw_tbl)) {
+ u32 bw_tbl_0 = ets->cos_params[0].bw_tbl;
+ u32 bw_tbl_1 = ets->cos_params[1].bw_tbl;
+ /* Do not allow 0-100 configuration
+ * since PBF does not support it
+ * force 1-99 instead
+ */
+ if (bw_tbl_0 == 0) {
+ bw_tbl_0 = 1;
+ bw_tbl_1 = 99;
+ } else if (bw_tbl_1 == 0) {
+ bw_tbl_1 = 1;
+ bw_tbl_0 = 99;
+ }
+
+ bnx2x_ets_bw_limit(&bp->link_params, bw_tbl_0, bw_tbl_1);
+ } else {
+ if (ets->cos_params[0].strict == BNX2X_DCBX_STRICT_COS_HIGHEST)
+ rc = bnx2x_ets_strict(&bp->link_params, 0);
+ else if (ets->cos_params[1].strict
+ == BNX2X_DCBX_STRICT_COS_HIGHEST)
+ rc = bnx2x_ets_strict(&bp->link_params, 1);
+ if (rc)
+ BNX2X_ERR("update_ets_params failed\n");
+ }
+}
+
+/*
+ * In E3B0 the configuration may have more than 2 COS.
+ */
+void bnx2x_dcbx_update_ets_config(struct bnx2x *bp)
+{
+ struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets);
+ struct bnx2x_ets_params ets_params = { 0 };
+ u8 i;
+
+ ets_params.num_of_cos = ets->num_of_cos;
+
+ for (i = 0; i < ets->num_of_cos; i++) {
+ /* COS is SP */
+ if (ets->cos_params[i].strict != BNX2X_DCBX_STRICT_INVALID) {
+ if (ets->cos_params[i].bw_tbl != DCBX_INVALID_COS_BW) {
+ BNX2X_ERR("COS can't be not BW and not SP\n");
+ return;
+ }
+
+ ets_params.cos[i].state = bnx2x_cos_state_strict;
+ ets_params.cos[i].params.sp_params.pri =
+ ets->cos_params[i].strict;
+ } else { /* COS is BW */
+ if (ets->cos_params[i].bw_tbl == DCBX_INVALID_COS_BW) {
+ BNX2X_ERR("COS can't be not BW and not SP\n");
+ return;
+ }
+ ets_params.cos[i].state = bnx2x_cos_state_bw;
+ ets_params.cos[i].params.bw_params.bw =
+ (u8)ets->cos_params[i].bw_tbl;
+ }
+ }
+
+ /* Configure the ETS in HW */
+ if (bnx2x_ets_e3b0_config(&bp->link_params, &bp->link_vars,
+ &ets_params)) {
+ BNX2X_ERR("bnx2x_ets_e3b0_config failed\n");
+ bnx2x_ets_disabled(&bp->link_params, &bp->link_vars);
+ }
+}
+
+static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp)
+{
+ bnx2x_ets_disabled(&bp->link_params, &bp->link_vars);
+
+ if (!bp->dcbx_port_params.ets.enabled ||
+ (bp->dcbx_error & DCBX_REMOTE_MIB_ERROR))
+ return;
+
+ if (CHIP_IS_E3B0(bp))
+ bnx2x_dcbx_update_ets_config(bp);
+ else
+ bnx2x_dcbx_2cos_limit_update_ets_config(bp);
+}
+
+#ifdef BCM_DCBNL
+static int bnx2x_dcbx_read_shmem_remote_mib(struct bnx2x *bp)
+{
+ struct lldp_remote_mib remote_mib = {0};
+ u32 dcbx_remote_mib_offset = SHMEM2_RD(bp, dcbx_remote_mib_offset);
+ int rc;
+
+ DP(NETIF_MSG_LINK, "dcbx_remote_mib_offset 0x%x\n",
+ dcbx_remote_mib_offset);
+
+ if (SHMEM_DCBX_REMOTE_MIB_NONE == dcbx_remote_mib_offset) {
+ BNX2X_ERR("FW doesn't support dcbx_remote_mib_offset\n");
+ return -EINVAL;
+ }
+
+ rc = bnx2x_dcbx_read_mib(bp, (u32 *)&remote_mib, dcbx_remote_mib_offset,
+ DCBX_READ_REMOTE_MIB);
+
+ if (rc) {
+ BNX2X_ERR("Faild to read remote mib from FW\n");
+ return rc;
+ }
+
+ /* save features and flags */
+ bp->dcbx_remote_feat = remote_mib.features;
+ bp->dcbx_remote_flags = remote_mib.flags;
+ return 0;
+}
+#endif
+
+static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp)
+{
+ struct lldp_local_mib local_mib = {0};
+ u32 dcbx_neg_res_offset = SHMEM2_RD(bp, dcbx_neg_res_offset);
+ int rc;
+
+ DP(NETIF_MSG_LINK, "dcbx_neg_res_offset 0x%x\n", dcbx_neg_res_offset);
+
+ if (SHMEM_DCBX_NEG_RES_NONE == dcbx_neg_res_offset) {
+ BNX2X_ERR("FW doesn't support dcbx_neg_res_offset\n");
+ return -EINVAL;
+ }
+
+ rc = bnx2x_dcbx_read_mib(bp, (u32 *)&local_mib, dcbx_neg_res_offset,
+ DCBX_READ_LOCAL_MIB);
+
+ if (rc) {
+ BNX2X_ERR("Faild to read local mib from FW\n");
+ return rc;
+ }
+
+ /* save features and error */
+ bp->dcbx_local_feat = local_mib.features;
+ bp->dcbx_error = local_mib.error;
+ return 0;
+}
+
+
+#ifdef BCM_DCBNL
+static inline
+u8 bnx2x_dcbx_dcbnl_app_up(struct dcbx_app_priority_entry *ent)
+{
+ u8 pri;
+
+ /* Choose the highest priority */
+ for (pri = MAX_PFC_PRIORITIES - 1; pri > 0; pri--)
+ if (ent->pri_bitmap & (1 << pri))
+ break;
+ return pri;
+}
+
+static inline
+u8 bnx2x_dcbx_dcbnl_app_idtype(struct dcbx_app_priority_entry *ent)
+{
+ return ((ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) ==
+ DCBX_APP_SF_PORT) ? DCB_APP_IDTYPE_PORTNUM :
+ DCB_APP_IDTYPE_ETHTYPE;
+}
+
+int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall)
+{
+ int i, err = 0;
+
+ for (i = 0; i < DCBX_MAX_APP_PROTOCOL && err == 0; i++) {
+ struct dcbx_app_priority_entry *ent =
+ &bp->dcbx_local_feat.app.app_pri_tbl[i];
+
+ if (ent->appBitfield & DCBX_APP_ENTRY_VALID) {
+ u8 up = bnx2x_dcbx_dcbnl_app_up(ent);
+
+ /* avoid invalid user-priority */
+ if (up) {
+ struct dcb_app app;
+ app.selector = bnx2x_dcbx_dcbnl_app_idtype(ent);
+ app.protocol = ent->app_id;
+ app.priority = delall ? 0 : up;
+ err = dcb_setapp(bp->dev, &app);
+ }
+ }
+ }
+ return err;
+}
+#endif
+
+static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
+{
+ if (SHMEM2_HAS(bp, drv_flags)) {
+ u32 drv_flags;
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS);
+ drv_flags = SHMEM2_RD(bp, drv_flags);
+
+ if (set)
+ SET_FLAGS(drv_flags, flags);
+ else
+ RESET_FLAGS(drv_flags, flags);
+
+ SHMEM2_WR(bp, drv_flags, drv_flags);
+ DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags);
+ bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS);
+ }
+}
+
+static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
+{
+ u8 prio, cos;
+ for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++) {
+ for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
+ if (bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask
+ & (1 << prio)) {
+ bp->prio_to_cos[prio] = cos;
+ DP(NETIF_MSG_LINK,
+ "tx_mapping %d --> %d\n", prio, cos);
+ }
+ }
+ }
+
+ /* setup tc must be called under rtnl lock, but we can't take it here
+ * as we are handling an attetntion on a work queue which must be
+ * flushed at some rtnl-locked contexts (e.g. if down)
+ */
+ if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+}
+
+void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
+{
+ switch (state) {
+ case BNX2X_DCBX_STATE_NEG_RECEIVED:
+ {
+ DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n");
+#ifdef BCM_DCBNL
+ /**
+ * Delete app tlvs from dcbnl before reading new
+ * negotiation results
+ */
+ bnx2x_dcbnl_update_applist(bp, true);
+
+ /* Read rmeote mib if dcbx is in the FW */
+ if (bnx2x_dcbx_read_shmem_remote_mib(bp))
+ return;
+#endif
+ /* Read neg results if dcbx is in the FW */
+ if (bnx2x_dcbx_read_shmem_neg_results(bp))
+ return;
+
+ bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat,
+ bp->dcbx_error);
+
+ bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat,
+ bp->dcbx_error);
+
+ /* mark DCBX result for PMF migration */
+ bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 1);
+#ifdef BCM_DCBNL
+ /**
+ * Add new app tlvs to dcbnl
+ */
+ bnx2x_dcbnl_update_applist(bp, false);
+#endif
+ bnx2x_dcbx_stop_hw_tx(bp);
+
+ /* reconfigure the netdevice with the results of the new
+ * dcbx negotiation.
+ */
+ bnx2x_dcbx_update_tc_mapping(bp);
+
+ return;
+ }
+ case BNX2X_DCBX_STATE_TX_PAUSED:
+ DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n");
+ bnx2x_pfc_set_pfc(bp);
+
+ bnx2x_dcbx_update_ets_params(bp);
+ bnx2x_dcbx_resume_hw_tx(bp);
+ return;
+ case BNX2X_DCBX_STATE_TX_RELEASED:
+ DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n");
+ bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0);
+#ifdef BCM_DCBNL
+ /*
+ * Send a notification for the new negotiated parameters
+ */
+ dcbnl_cee_notify(bp->dev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0);
+#endif
+ return;
+ default:
+ BNX2X_ERR("Unknown DCBX_STATE\n");
+ }
+}
+
+#define LLDP_ADMIN_MIB_OFFSET(bp) (PORT_MAX*sizeof(struct lldp_params) + \
+ BP_PORT(bp)*sizeof(struct lldp_admin_mib))
+
+static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
+ u32 dcbx_lldp_params_offset)
+{
+ struct lldp_admin_mib admin_mib;
+ u32 i, other_traf_type = PREDEFINED_APP_IDX_MAX, traf_type = 0;
+ u32 offset = dcbx_lldp_params_offset + LLDP_ADMIN_MIB_OFFSET(bp);
+
+ /*shortcuts*/
+ struct dcbx_features *af = &admin_mib.features;
+ struct bnx2x_config_dcbx_params *dp = &bp->dcbx_config_params;
+
+ memset(&admin_mib, 0, sizeof(struct lldp_admin_mib));
+
+ /* Read the data first */
+ bnx2x_read_data(bp, (u32 *)&admin_mib, offset,
+ sizeof(struct lldp_admin_mib));
+
+ if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON)
+ SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED);
+
+ if (dp->overwrite_settings == BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE) {
+
+ RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_CEE_VERSION_MASK);
+ admin_mib.ver_cfg_flags |=
+ (dp->admin_dcbx_version << DCBX_CEE_VERSION_SHIFT) &
+ DCBX_CEE_VERSION_MASK;
+
+ af->ets.enabled = (u8)dp->admin_ets_enable;
+
+ af->pfc.enabled = (u8)dp->admin_pfc_enable;
+
+ /* FOR IEEE dp->admin_tc_supported_tx_enable */
+ if (dp->admin_ets_configuration_tx_enable)
+ SET_FLAGS(admin_mib.ver_cfg_flags,
+ DCBX_ETS_CONFIG_TX_ENABLED);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags,
+ DCBX_ETS_CONFIG_TX_ENABLED);
+ /* For IEEE admin_ets_recommendation_tx_enable */
+ if (dp->admin_pfc_tx_enable)
+ SET_FLAGS(admin_mib.ver_cfg_flags,
+ DCBX_PFC_CONFIG_TX_ENABLED);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags,
+ DCBX_PFC_CONFIG_TX_ENABLED);
+
+ if (dp->admin_application_priority_tx_enable)
+ SET_FLAGS(admin_mib.ver_cfg_flags,
+ DCBX_APP_CONFIG_TX_ENABLED);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags,
+ DCBX_APP_CONFIG_TX_ENABLED);
+
+ if (dp->admin_ets_willing)
+ SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_WILLING);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_WILLING);
+ /* For IEEE admin_ets_reco_valid */
+ if (dp->admin_pfc_willing)
+ SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_WILLING);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_WILLING);
+
+ if (dp->admin_app_priority_willing)
+ SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_WILLING);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_WILLING);
+
+ for (i = 0 ; i < DCBX_MAX_NUM_PG_BW_ENTRIES; i++) {
+ DCBX_PG_BW_SET(af->ets.pg_bw_tbl, i,
+ (u8)dp->admin_configuration_bw_precentage[i]);
+
+ DP(NETIF_MSG_LINK, "pg_bw_tbl[%d] = %02x\n",
+ i, DCBX_PG_BW_GET(af->ets.pg_bw_tbl, i));
+ }
+
+ for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) {
+ DCBX_PRI_PG_SET(af->ets.pri_pg_tbl, i,
+ (u8)dp->admin_configuration_ets_pg[i]);
+
+ DP(NETIF_MSG_LINK, "pri_pg_tbl[%d] = %02x\n",
+ i, DCBX_PRI_PG_GET(af->ets.pri_pg_tbl, i));
+ }
+
+ /*For IEEE admin_recommendation_bw_precentage
+ *For IEEE admin_recommendation_ets_pg */
+ af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap;
+ for (i = 0; i < 4; i++) {
+ if (dp->admin_priority_app_table[i].valid) {
+ struct bnx2x_admin_priority_app_table *table =
+ dp->admin_priority_app_table;
+ if ((ETH_TYPE_FCOE == table[i].app_id) &&
+ (TRAFFIC_TYPE_ETH == table[i].traffic_type))
+ traf_type = FCOE_APP_IDX;
+ else if ((TCP_PORT_ISCSI == table[i].app_id) &&
+ (TRAFFIC_TYPE_PORT == table[i].traffic_type))
+ traf_type = ISCSI_APP_IDX;
+ else
+ traf_type = other_traf_type++;
+
+ af->app.app_pri_tbl[traf_type].app_id =
+ table[i].app_id;
+
+ af->app.app_pri_tbl[traf_type].pri_bitmap =
+ (u8)(1 << table[i].priority);
+
+ af->app.app_pri_tbl[traf_type].appBitfield =
+ (DCBX_APP_ENTRY_VALID);
+
+ af->app.app_pri_tbl[traf_type].appBitfield |=
+ (TRAFFIC_TYPE_ETH == table[i].traffic_type) ?
+ DCBX_APP_SF_ETH_TYPE : DCBX_APP_SF_PORT;
+ }
+ }
+
+ af->app.default_pri = (u8)dp->admin_default_priority;
+
+ }
+
+ /* Write the data. */
+ bnx2x_write_data(bp, (u32 *)&admin_mib, offset,
+ sizeof(struct lldp_admin_mib));
+
+}
+
+void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled)
+{
+ if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3(bp)) {
+ bp->dcb_state = dcb_on;
+ bp->dcbx_enabled = dcbx_enabled;
+ } else {
+ bp->dcb_state = false;
+ bp->dcbx_enabled = BNX2X_DCBX_ENABLED_INVALID;
+ }
+ DP(NETIF_MSG_LINK, "DCB state [%s:%s]\n",
+ dcb_on ? "ON" : "OFF",
+ dcbx_enabled == BNX2X_DCBX_ENABLED_OFF ? "user-mode" :
+ dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF ? "on-chip static" :
+ dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON ?
+ "on-chip with negotiation" : "invalid");
+}
+
+void bnx2x_dcbx_init_params(struct bnx2x *bp)
+{
+ bp->dcbx_config_params.admin_dcbx_version = 0x0; /* 0 - CEE; 1 - IEEE */
+ bp->dcbx_config_params.admin_ets_willing = 1;
+ bp->dcbx_config_params.admin_pfc_willing = 1;
+ bp->dcbx_config_params.overwrite_settings = 1;
+ bp->dcbx_config_params.admin_ets_enable = 1;
+ bp->dcbx_config_params.admin_pfc_enable = 1;
+ bp->dcbx_config_params.admin_tc_supported_tx_enable = 1;
+ bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1;
+ bp->dcbx_config_params.admin_pfc_tx_enable = 1;
+ bp->dcbx_config_params.admin_application_priority_tx_enable = 1;
+ bp->dcbx_config_params.admin_ets_reco_valid = 1;
+ bp->dcbx_config_params.admin_app_priority_willing = 1;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[0] = 00;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[1] = 50;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[2] = 50;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[3] = 0;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[4] = 0;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[5] = 0;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[6] = 0;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[7] = 0;
+ bp->dcbx_config_params.admin_configuration_ets_pg[0] = 1;
+ bp->dcbx_config_params.admin_configuration_ets_pg[1] = 0;
+ bp->dcbx_config_params.admin_configuration_ets_pg[2] = 0;
+ bp->dcbx_config_params.admin_configuration_ets_pg[3] = 2;
+ bp->dcbx_config_params.admin_configuration_ets_pg[4] = 0;
+ bp->dcbx_config_params.admin_configuration_ets_pg[5] = 0;
+ bp->dcbx_config_params.admin_configuration_ets_pg[6] = 0;
+ bp->dcbx_config_params.admin_configuration_ets_pg[7] = 0;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[0] = 0;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[1] = 1;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[2] = 2;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[3] = 0;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[4] = 7;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[5] = 5;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[6] = 6;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[7] = 7;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[0] = 0;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[1] = 1;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[2] = 2;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[3] = 3;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[4] = 4;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[5] = 5;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[6] = 6;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[7] = 7;
+ bp->dcbx_config_params.admin_pfc_bitmap = 0x8; /* FCoE(3) enable */
+ bp->dcbx_config_params.admin_priority_app_table[0].valid = 1;
+ bp->dcbx_config_params.admin_priority_app_table[1].valid = 1;
+ bp->dcbx_config_params.admin_priority_app_table[2].valid = 0;
+ bp->dcbx_config_params.admin_priority_app_table[3].valid = 0;
+ bp->dcbx_config_params.admin_priority_app_table[0].priority = 3;
+ bp->dcbx_config_params.admin_priority_app_table[1].priority = 0;
+ bp->dcbx_config_params.admin_priority_app_table[2].priority = 0;
+ bp->dcbx_config_params.admin_priority_app_table[3].priority = 0;
+ bp->dcbx_config_params.admin_priority_app_table[0].traffic_type = 0;
+ bp->dcbx_config_params.admin_priority_app_table[1].traffic_type = 1;
+ bp->dcbx_config_params.admin_priority_app_table[2].traffic_type = 0;
+ bp->dcbx_config_params.admin_priority_app_table[3].traffic_type = 0;
+ bp->dcbx_config_params.admin_priority_app_table[0].app_id = 0x8906;
+ bp->dcbx_config_params.admin_priority_app_table[1].app_id = 3260;
+ bp->dcbx_config_params.admin_priority_app_table[2].app_id = 0;
+ bp->dcbx_config_params.admin_priority_app_table[3].app_id = 0;
+ bp->dcbx_config_params.admin_default_priority =
+ bp->dcbx_config_params.admin_priority_app_table[1].priority;
+}
+
+void bnx2x_dcbx_init(struct bnx2x *bp)
+{
+ u32 dcbx_lldp_params_offset = SHMEM_LLDP_DCBX_PARAMS_NONE;
+
+ if (bp->dcbx_enabled <= 0)
+ return;
+
+ /* validate:
+ * chip of good for dcbx version,
+ * dcb is wanted
+ * the function is pmf
+ * shmem2 contains DCBX support fields
+ */
+ DP(NETIF_MSG_LINK, "dcb_state %d bp->port.pmf %d\n",
+ bp->dcb_state, bp->port.pmf);
+
+ if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf &&
+ SHMEM2_HAS(bp, dcbx_lldp_params_offset)) {
+ dcbx_lldp_params_offset =
+ SHMEM2_RD(bp, dcbx_lldp_params_offset);
+
+ DP(NETIF_MSG_LINK, "dcbx_lldp_params_offset 0x%x\n",
+ dcbx_lldp_params_offset);
+
+ bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0);
+
+ if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) {
+ bnx2x_dcbx_admin_mib_updated_params(bp,
+ dcbx_lldp_params_offset);
+
+ /* Let HW start negotiation */
+ bnx2x_fw_command(bp,
+ DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0);
+ }
+ }
+}
+static void
+bnx2x_dcbx_print_cos_params(struct bnx2x *bp,
+ struct bnx2x_func_tx_start_params *pfc_fw_cfg)
+{
+ u8 pri = 0;
+ u8 cos = 0;
+
+ DP(NETIF_MSG_LINK,
+ "pfc_fw_cfg->dcb_version %x\n", pfc_fw_cfg->dcb_version);
+ DP(NETIF_MSG_LINK,
+ "pdev->params.dcbx_port_params.pfc."
+ "priority_non_pauseable_mask %x\n",
+ bp->dcbx_port_params.pfc.priority_non_pauseable_mask);
+
+ for (cos = 0 ; cos < bp->dcbx_port_params.ets.num_of_cos ; cos++) {
+ DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
+ "cos_params[%d].pri_bitmask %x\n", cos,
+ bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask);
+
+ DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
+ "cos_params[%d].bw_tbl %x\n", cos,
+ bp->dcbx_port_params.ets.cos_params[cos].bw_tbl);
+
+ DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
+ "cos_params[%d].strict %x\n", cos,
+ bp->dcbx_port_params.ets.cos_params[cos].strict);
+
+ DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
+ "cos_params[%d].pauseable %x\n", cos,
+ bp->dcbx_port_params.ets.cos_params[cos].pauseable);
+ }
+
+ for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) {
+ DP(NETIF_MSG_LINK,
+ "pfc_fw_cfg->traffic_type_to_priority_cos[%d]."
+ "priority %x\n", pri,
+ pfc_fw_cfg->traffic_type_to_priority_cos[pri].priority);
+
+ DP(NETIF_MSG_LINK,
+ "pfc_fw_cfg->traffic_type_to_priority_cos[%d].cos %x\n",
+ pri, pfc_fw_cfg->traffic_type_to_priority_cos[pri].cos);
+ }
+}
+
+/* fills help_data according to pg_info */
+static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
+ u32 *pg_pri_orginal_spread,
+ struct pg_help_data *help_data)
+{
+ bool pg_found = false;
+ u32 i, traf_type, add_traf_type, add_pg;
+ u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+ struct pg_entry_help_data *data = help_data->data; /*shotcut*/
+
+ /* Set to invalid */
+ for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++)
+ data[i].pg = DCBX_ILLEGAL_PG;
+
+ for (add_traf_type = 0;
+ add_traf_type < LLFC_DRIVER_TRAFFIC_TYPE_MAX; add_traf_type++) {
+ pg_found = false;
+ if (ttp[add_traf_type] < MAX_PFC_PRIORITIES) {
+ add_pg = (u8)pg_pri_orginal_spread[ttp[add_traf_type]];
+ for (traf_type = 0;
+ traf_type < LLFC_DRIVER_TRAFFIC_TYPE_MAX;
+ traf_type++) {
+ if (data[traf_type].pg == add_pg) {
+ if (!(data[traf_type].pg_priority &
+ (1 << ttp[add_traf_type])))
+ data[traf_type].
+ num_of_dif_pri++;
+ data[traf_type].pg_priority |=
+ (1 << ttp[add_traf_type]);
+ pg_found = true;
+ break;
+ }
+ }
+ if (false == pg_found) {
+ data[help_data->num_of_pg].pg = add_pg;
+ data[help_data->num_of_pg].pg_priority =
+ (1 << ttp[add_traf_type]);
+ data[help_data->num_of_pg].num_of_dif_pri = 1;
+ help_data->num_of_pg++;
+ }
+ }
+ DP(NETIF_MSG_LINK,
+ "add_traf_type %d pg_found %s num_of_pg %d\n",
+ add_traf_type, (false == pg_found) ? "NO" : "YES",
+ help_data->num_of_pg);
+ }
+}
+
+static void bnx2x_dcbx_ets_disabled_entry_data(struct bnx2x *bp,
+ struct cos_help_data *cos_data,
+ u32 pri_join_mask)
+{
+ /* Only one priority than only one COS */
+ cos_data->data[0].pausable =
+ IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
+ cos_data->data[0].pri_join_mask = pri_join_mask;
+ cos_data->data[0].cos_bw = 100;
+ cos_data->num_of_cos = 1;
+}
+
+static inline void bnx2x_dcbx_add_to_cos_bw(struct bnx2x *bp,
+ struct cos_entry_help_data *data,
+ u8 pg_bw)
+{
+ if (data->cos_bw == DCBX_INVALID_COS_BW)
+ data->cos_bw = pg_bw;
+ else
+ data->cos_bw += pg_bw;
+}
+
+static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
+ struct cos_help_data *cos_data,
+ u32 *pg_pri_orginal_spread,
+ struct dcbx_ets_feature *ets)
+{
+ u32 pri_tested = 0;
+ u8 i = 0;
+ u8 entry = 0;
+ u8 pg_entry = 0;
+ u8 num_of_pri = LLFC_DRIVER_TRAFFIC_TYPE_MAX;
+
+ cos_data->data[0].pausable = true;
+ cos_data->data[1].pausable = false;
+ cos_data->data[0].pri_join_mask = cos_data->data[1].pri_join_mask = 0;
+
+ for (i = 0 ; i < num_of_pri ; i++) {
+ pri_tested = 1 << bp->dcbx_port_params.
+ app.traffic_type_priority[i];
+
+ if (pri_tested & DCBX_PFC_PRI_NON_PAUSE_MASK(bp)) {
+ cos_data->data[1].pri_join_mask |= pri_tested;
+ entry = 1;
+ } else {
+ cos_data->data[0].pri_join_mask |= pri_tested;
+ entry = 0;
+ }
+ pg_entry = (u8)pg_pri_orginal_spread[bp->dcbx_port_params.
+ app.traffic_type_priority[i]];
+ /* There can be only one strict pg */
+ if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES)
+ bnx2x_dcbx_add_to_cos_bw(bp, &cos_data->data[entry],
+ DCBX_PG_BW_GET(ets->pg_bw_tbl, pg_entry));
+ else
+ /* If we join a group and one is strict
+ * than the bw rulls */
+ cos_data->data[entry].strict =
+ BNX2X_DCBX_STRICT_COS_HIGHEST;
+ }
+ if ((0 == cos_data->data[0].pri_join_mask) &&
+ (0 == cos_data->data[1].pri_join_mask))
+ BNX2X_ERR("dcbx error: Both groups must have priorities\n");
+}
+
+
+#ifndef POWER_OF_2
+#define POWER_OF_2(x) ((0 != x) && (0 == (x & (x-1))))
+#endif
+
+static void bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(struct bnx2x *bp,
+ struct pg_help_data *pg_help_data,
+ struct cos_help_data *cos_data,
+ u32 pri_join_mask,
+ u8 num_of_dif_pri)
+{
+ u8 i = 0;
+ u32 pri_tested = 0;
+ u32 pri_mask_without_pri = 0;
+ u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+ /*debug*/
+ if (num_of_dif_pri == 1) {
+ bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data, pri_join_mask);
+ return;
+ }
+ /* single priority group */
+ if (pg_help_data->data[0].pg < DCBX_MAX_NUM_PG_BW_ENTRIES) {
+ /* If there are both pauseable and non-pauseable priorities,
+ * the pauseable priorities go to the first queue and
+ * the non-pauseable priorities go to the second queue.
+ */
+ if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) {
+ /* Pauseable */
+ cos_data->data[0].pausable = true;
+ /* Non pauseable.*/
+ cos_data->data[1].pausable = false;
+
+ if (2 == num_of_dif_pri) {
+ cos_data->data[0].cos_bw = 50;
+ cos_data->data[1].cos_bw = 50;
+ }
+
+ if (3 == num_of_dif_pri) {
+ if (POWER_OF_2(DCBX_PFC_PRI_GET_PAUSE(bp,
+ pri_join_mask))) {
+ cos_data->data[0].cos_bw = 33;
+ cos_data->data[1].cos_bw = 67;
+ } else {
+ cos_data->data[0].cos_bw = 67;
+ cos_data->data[1].cos_bw = 33;
+ }
+ }
+
+ } else if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask)) {
+ /* If there are only pauseable priorities,
+ * then one/two priorities go to the first queue
+ * and one priority goes to the second queue.
+ */
+ if (2 == num_of_dif_pri) {
+ cos_data->data[0].cos_bw = 50;
+ cos_data->data[1].cos_bw = 50;
+ } else {
+ cos_data->data[0].cos_bw = 67;
+ cos_data->data[1].cos_bw = 33;
+ }
+ cos_data->data[1].pausable = true;
+ cos_data->data[0].pausable = true;
+ /* All priorities except FCOE */
+ cos_data->data[0].pri_join_mask = (pri_join_mask &
+ ((u8)~(1 << ttp[LLFC_TRAFFIC_TYPE_FCOE])));
+ /* Only FCOE priority.*/
+ cos_data->data[1].pri_join_mask =
+ (1 << ttp[LLFC_TRAFFIC_TYPE_FCOE]);
+ } else
+ /* If there are only non-pauseable priorities,
+ * they will all go to the same queue.
+ */
+ bnx2x_dcbx_ets_disabled_entry_data(bp,
+ cos_data, pri_join_mask);
+ } else {
+ /* priority group which is not BW limited (PG#15):*/
+ if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) {
+ /* If there are both pauseable and non-pauseable
+ * priorities, the pauseable priorities go to the first
+ * queue and the non-pauseable priorities
+ * go to the second queue.
+ */
+ if (DCBX_PFC_PRI_GET_PAUSE(bp, pri_join_mask) >
+ DCBX_PFC_PRI_GET_NON_PAUSE(bp, pri_join_mask)) {
+ cos_data->data[0].strict =
+ BNX2X_DCBX_STRICT_COS_HIGHEST;
+ cos_data->data[1].strict =
+ BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(
+ BNX2X_DCBX_STRICT_COS_HIGHEST);
+ } else {
+ cos_data->data[0].strict =
+ BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(
+ BNX2X_DCBX_STRICT_COS_HIGHEST);
+ cos_data->data[1].strict =
+ BNX2X_DCBX_STRICT_COS_HIGHEST;
+ }
+ /* Pauseable */
+ cos_data->data[0].pausable = true;
+ /* Non pause-able.*/
+ cos_data->data[1].pausable = false;
+ } else {
+ /* If there are only pauseable priorities or
+ * only non-pauseable,* the lower priorities go
+ * to the first queue and the higherpriorities go
+ * to the second queue.
+ */
+ cos_data->data[0].pausable =
+ cos_data->data[1].pausable =
+ IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
+
+ for (i = 0 ; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) {
+ pri_tested = 1 << bp->dcbx_port_params.
+ app.traffic_type_priority[i];
+ /* Remove priority tested */
+ pri_mask_without_pri =
+ (pri_join_mask & ((u8)(~pri_tested)));
+ if (pri_mask_without_pri < pri_tested)
+ break;
+ }
+
+ if (i == LLFC_DRIVER_TRAFFIC_TYPE_MAX)
+ BNX2X_ERR("Invalid value for pri_join_mask -"
+ " could not find a priority\n");
+
+ cos_data->data[0].pri_join_mask = pri_mask_without_pri;
+ cos_data->data[1].pri_join_mask = pri_tested;
+ /* Both queues are strict priority,
+ * and that with the highest priority
+ * gets the highest strict priority in the arbiter.
+ */
+ cos_data->data[0].strict =
+ BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(
+ BNX2X_DCBX_STRICT_COS_HIGHEST);
+ cos_data->data[1].strict =
+ BNX2X_DCBX_STRICT_COS_HIGHEST;
+ }
+ }
+}
+
+static void bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params(
+ struct bnx2x *bp,
+ struct pg_help_data *pg_help_data,
+ struct dcbx_ets_feature *ets,
+ struct cos_help_data *cos_data,
+ u32 *pg_pri_orginal_spread,
+ u32 pri_join_mask,
+ u8 num_of_dif_pri)
+{
+ u8 i = 0;
+ u8 pg[DCBX_COS_MAX_NUM_E2] = { 0 };
+
+ /* If there are both pauseable and non-pauseable priorities,
+ * the pauseable priorities go to the first queue and
+ * the non-pauseable priorities go to the second queue.
+ */
+ if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) {
+ if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp,
+ pg_help_data->data[0].pg_priority) ||
+ IS_DCBX_PFC_PRI_MIX_PAUSE(bp,
+ pg_help_data->data[1].pg_priority)) {
+ /* If one PG contains both pauseable and
+ * non-pauseable priorities then ETS is disabled.
+ */
+ bnx2x_dcbx_separate_pauseable_from_non(bp, cos_data,
+ pg_pri_orginal_spread, ets);
+ bp->dcbx_port_params.ets.enabled = false;
+ return;
+ }
+
+ /* Pauseable */
+ cos_data->data[0].pausable = true;
+ /* Non pauseable. */
+ cos_data->data[1].pausable = false;
+ if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp,
+ pg_help_data->data[0].pg_priority)) {
+ /* 0 is pauseable */
+ cos_data->data[0].pri_join_mask =
+ pg_help_data->data[0].pg_priority;
+ pg[0] = pg_help_data->data[0].pg;
+ cos_data->data[1].pri_join_mask =
+ pg_help_data->data[1].pg_priority;
+ pg[1] = pg_help_data->data[1].pg;
+ } else {/* 1 is pauseable */
+ cos_data->data[0].pri_join_mask =
+ pg_help_data->data[1].pg_priority;
+ pg[0] = pg_help_data->data[1].pg;
+ cos_data->data[1].pri_join_mask =
+ pg_help_data->data[0].pg_priority;
+ pg[1] = pg_help_data->data[0].pg;
+ }
+ } else {
+ /* If there are only pauseable priorities or
+ * only non-pauseable, each PG goes to a queue.
+ */
+ cos_data->data[0].pausable = cos_data->data[1].pausable =
+ IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
+ cos_data->data[0].pri_join_mask =
+ pg_help_data->data[0].pg_priority;
+ pg[0] = pg_help_data->data[0].pg;
+ cos_data->data[1].pri_join_mask =
+ pg_help_data->data[1].pg_priority;
+ pg[1] = pg_help_data->data[1].pg;
+ }
+
+ /* There can be only one strict pg */
+ for (i = 0 ; i < ARRAY_SIZE(pg); i++) {
+ if (pg[i] < DCBX_MAX_NUM_PG_BW_ENTRIES)
+ cos_data->data[i].cos_bw =
+ DCBX_PG_BW_GET(ets->pg_bw_tbl, pg[i]);
+ else
+ cos_data->data[i].strict =
+ BNX2X_DCBX_STRICT_COS_HIGHEST;
+ }
+}
+
+static int bnx2x_dcbx_join_pgs(
+ struct bnx2x *bp,
+ struct dcbx_ets_feature *ets,
+ struct pg_help_data *pg_help_data,
+ u8 required_num_of_pg)
+{
+ u8 entry_joined = pg_help_data->num_of_pg - 1;
+ u8 entry_removed = entry_joined + 1;
+ u8 pg_joined = 0;
+
+ if (required_num_of_pg == 0 || ARRAY_SIZE(pg_help_data->data)
+ <= pg_help_data->num_of_pg) {
+
+ BNX2X_ERR("required_num_of_pg can't be zero\n");
+ return -EINVAL;
+ }
+
+ while (required_num_of_pg < pg_help_data->num_of_pg) {
+ entry_joined = pg_help_data->num_of_pg - 2;
+ entry_removed = entry_joined + 1;
+ /* protect index */
+ entry_removed %= ARRAY_SIZE(pg_help_data->data);
+
+ pg_help_data->data[entry_joined].pg_priority |=
+ pg_help_data->data[entry_removed].pg_priority;
+
+ pg_help_data->data[entry_joined].num_of_dif_pri +=
+ pg_help_data->data[entry_removed].num_of_dif_pri;
+
+ if (pg_help_data->data[entry_joined].pg == DCBX_STRICT_PRI_PG ||
+ pg_help_data->data[entry_removed].pg == DCBX_STRICT_PRI_PG)
+ /* Entries joined strict priority rules */
+ pg_help_data->data[entry_joined].pg =
+ DCBX_STRICT_PRI_PG;
+ else {
+ /* Entries can be joined join BW */
+ pg_joined = DCBX_PG_BW_GET(ets->pg_bw_tbl,
+ pg_help_data->data[entry_joined].pg) +
+ DCBX_PG_BW_GET(ets->pg_bw_tbl,
+ pg_help_data->data[entry_removed].pg);
+
+ DCBX_PG_BW_SET(ets->pg_bw_tbl,
+ pg_help_data->data[entry_joined].pg, pg_joined);
+ }
+ /* Joined the entries */
+ pg_help_data->num_of_pg--;
+ }
+
+ return 0;
+}
+
+static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
+ struct bnx2x *bp,
+ struct pg_help_data *pg_help_data,
+ struct dcbx_ets_feature *ets,
+ struct cos_help_data *cos_data,
+ u32 *pg_pri_orginal_spread,
+ u32 pri_join_mask,
+ u8 num_of_dif_pri)
+{
+ u8 i = 0;
+ u32 pri_tested = 0;
+ u8 entry = 0;
+ u8 pg_entry = 0;
+ bool b_found_strict = false;
+ u8 num_of_pri = LLFC_DRIVER_TRAFFIC_TYPE_MAX;
+
+ cos_data->data[0].pri_join_mask = cos_data->data[1].pri_join_mask = 0;
+ /* If there are both pauseable and non-pauseable priorities,
+ * the pauseable priorities go to the first queue and the
+ * non-pauseable priorities go to the second queue.
+ */
+ if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask))
+ bnx2x_dcbx_separate_pauseable_from_non(bp,
+ cos_data, pg_pri_orginal_spread, ets);
+ else {
+ /* If two BW-limited PG-s were combined to one queue,
+ * the BW is their sum.
+ *
+ * If there are only pauseable priorities or only non-pauseable,
+ * and there are both BW-limited and non-BW-limited PG-s,
+ * the BW-limited PG/s go to one queue and the non-BW-limited
+ * PG/s go to the second queue.
+ *
+ * If there are only pauseable priorities or only non-pauseable
+ * and all are BW limited, then two priorities go to the first
+ * queue and one priority goes to the second queue.
+ *
+ * We will join this two cases:
+ * if one is BW limited it will go to the secoend queue
+ * otherwise the last priority will get it
+ */
+
+ cos_data->data[0].pausable = cos_data->data[1].pausable =
+ IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
+
+ for (i = 0 ; i < num_of_pri; i++) {
+ pri_tested = 1 << bp->dcbx_port_params.
+ app.traffic_type_priority[i];
+ pg_entry = (u8)pg_pri_orginal_spread[bp->
+ dcbx_port_params.app.traffic_type_priority[i]];
+
+ if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES) {
+ entry = 0;
+
+ if (i == (num_of_pri-1) &&
+ false == b_found_strict)
+ /* last entry will be handled separately
+ * If no priority is strict than last
+ * enty goes to last queue.*/
+ entry = 1;
+ cos_data->data[entry].pri_join_mask |=
+ pri_tested;
+ bnx2x_dcbx_add_to_cos_bw(bp,
+ &cos_data->data[entry],
+ DCBX_PG_BW_GET(ets->pg_bw_tbl,
+ pg_entry));
+ } else {
+ b_found_strict = true;
+ cos_data->data[1].pri_join_mask |= pri_tested;
+ /* If we join a group and one is strict
+ * than the bw rulls */
+ cos_data->data[1].strict =
+ BNX2X_DCBX_STRICT_COS_HIGHEST;
+ }
+ }
+ }
+}
+
+
+static void bnx2x_dcbx_2cos_limit_cee_fill_cos_params(struct bnx2x *bp,
+ struct pg_help_data *help_data,
+ struct dcbx_ets_feature *ets,
+ struct cos_help_data *cos_data,
+ u32 *pg_pri_orginal_spread,
+ u32 pri_join_mask,
+ u8 num_of_dif_pri)
+{
+
+ /* default E2 settings */
+ cos_data->num_of_cos = DCBX_COS_MAX_NUM_E2;
+
+ switch (help_data->num_of_pg) {
+ case 1:
+ bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(
+ bp,
+ help_data,
+ cos_data,
+ pri_join_mask,
+ num_of_dif_pri);
+ break;
+ case 2:
+ bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params(
+ bp,
+ help_data,
+ ets,
+ cos_data,
+ pg_pri_orginal_spread,
+ pri_join_mask,
+ num_of_dif_pri);
+ break;
+
+ case 3:
+ bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
+ bp,
+ help_data,
+ ets,
+ cos_data,
+ pg_pri_orginal_spread,
+ pri_join_mask,
+ num_of_dif_pri);
+ break;
+ default:
+ BNX2X_ERR("Wrong pg_help_data.num_of_pg\n");
+ bnx2x_dcbx_ets_disabled_entry_data(bp,
+ cos_data, pri_join_mask);
+ }
+}
+
+static int bnx2x_dcbx_spread_strict_pri(struct bnx2x *bp,
+ struct cos_help_data *cos_data,
+ u8 entry,
+ u8 num_spread_of_entries,
+ u8 strict_app_pris)
+{
+ u8 strict_pri = BNX2X_DCBX_STRICT_COS_HIGHEST;
+ u8 num_of_app_pri = MAX_PFC_PRIORITIES;
+ u8 app_pri_bit = 0;
+
+ while (num_spread_of_entries && num_of_app_pri > 0) {
+ app_pri_bit = 1 << (num_of_app_pri - 1);
+ if (app_pri_bit & strict_app_pris) {
+ struct cos_entry_help_data *data = &cos_data->
+ data[entry];
+ num_spread_of_entries--;
+ if (num_spread_of_entries == 0) {
+ /* last entry needed put all the entries left */
+ data->cos_bw = DCBX_INVALID_COS_BW;
+ data->strict = strict_pri;
+ data->pri_join_mask = strict_app_pris;
+ data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
+ data->pri_join_mask);
+ } else {
+ strict_app_pris &= ~app_pri_bit;
+
+ data->cos_bw = DCBX_INVALID_COS_BW;
+ data->strict = strict_pri;
+ data->pri_join_mask = app_pri_bit;
+ data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
+ data->pri_join_mask);
+ }
+
+ strict_pri =
+ BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(strict_pri);
+ entry++;
+ }
+
+ num_of_app_pri--;
+ }
+
+ if (num_spread_of_entries)
+ return -EINVAL;
+
+ return 0;
+}
+
+static u8 bnx2x_dcbx_cee_fill_strict_pri(struct bnx2x *bp,
+ struct cos_help_data *cos_data,
+ u8 entry,
+ u8 num_spread_of_entries,
+ u8 strict_app_pris)
+{
+
+ if (bnx2x_dcbx_spread_strict_pri(bp, cos_data, entry,
+ num_spread_of_entries,
+ strict_app_pris)) {
+ struct cos_entry_help_data *data = &cos_data->
+ data[entry];
+ /* Fill BW entry */
+ data->cos_bw = DCBX_INVALID_COS_BW;
+ data->strict = BNX2X_DCBX_STRICT_COS_HIGHEST;
+ data->pri_join_mask = strict_app_pris;
+ data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
+ data->pri_join_mask);
+ return 1;
+ }
+
+ return num_spread_of_entries;
+}
+
+static void bnx2x_dcbx_cee_fill_cos_params(struct bnx2x *bp,
+ struct pg_help_data *help_data,
+ struct dcbx_ets_feature *ets,
+ struct cos_help_data *cos_data,
+ u32 pri_join_mask)
+
+{
+ u8 need_num_of_entries = 0;
+ u8 i = 0;
+ u8 entry = 0;
+
+ /*
+ * if the number of requested PG-s in CEE is greater than 3
+ * then the results are not determined since this is a violation
+ * of the standard.
+ */
+ if (help_data->num_of_pg > DCBX_COS_MAX_NUM_E3B0) {
+ if (bnx2x_dcbx_join_pgs(bp, ets, help_data,
+ DCBX_COS_MAX_NUM_E3B0)) {
+ BNX2X_ERR("Unable to reduce the number of PGs -"
+ "we will disables ETS\n");
+ bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data,
+ pri_join_mask);
+ return;
+ }
+ }
+
+ for (i = 0 ; i < help_data->num_of_pg; i++) {
+ struct pg_entry_help_data *pg = &help_data->data[i];
+ if (pg->pg < DCBX_MAX_NUM_PG_BW_ENTRIES) {
+ struct cos_entry_help_data *data = &cos_data->
+ data[entry];
+ /* Fill BW entry */
+ data->cos_bw = DCBX_PG_BW_GET(ets->pg_bw_tbl, pg->pg);
+ data->strict = BNX2X_DCBX_STRICT_INVALID;
+ data->pri_join_mask = pg->pg_priority;
+ data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
+ data->pri_join_mask);
+
+ entry++;
+ } else {
+ need_num_of_entries = min_t(u8,
+ (u8)pg->num_of_dif_pri,
+ (u8)DCBX_COS_MAX_NUM_E3B0 -
+ help_data->num_of_pg + 1);
+ /*
+ * If there are still VOQ-s which have no associated PG,
+ * then associate these VOQ-s to PG15. These PG-s will
+ * be used for SP between priorities on PG15.
+ */
+ entry += bnx2x_dcbx_cee_fill_strict_pri(bp, cos_data,
+ entry, need_num_of_entries, pg->pg_priority);
+ }
+ }
+
+ /* the entry will represent the number of COSes used */
+ cos_data->num_of_cos = entry;
+}
+static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp,
+ struct pg_help_data *help_data,
+ struct dcbx_ets_feature *ets,
+ u32 *pg_pri_orginal_spread)
+{
+ struct cos_help_data cos_data;
+ u8 i = 0;
+ u32 pri_join_mask = 0;
+ u8 num_of_dif_pri = 0;
+
+ memset(&cos_data, 0, sizeof(cos_data));
+
+ /* Validate the pg value */
+ for (i = 0; i < help_data->num_of_pg ; i++) {
+ if (DCBX_STRICT_PRIORITY != help_data->data[i].pg &&
+ DCBX_MAX_NUM_PG_BW_ENTRIES <= help_data->data[i].pg)
+ BNX2X_ERR("Invalid pg[%d] data %x\n", i,
+ help_data->data[i].pg);
+ pri_join_mask |= help_data->data[i].pg_priority;
+ num_of_dif_pri += help_data->data[i].num_of_dif_pri;
+ }
+
+ /* defaults */
+ cos_data.num_of_cos = 1;
+ for (i = 0; i < ARRAY_SIZE(cos_data.data); i++) {
+ cos_data.data[i].pri_join_mask = 0;
+ cos_data.data[i].pausable = false;
+ cos_data.data[i].strict = BNX2X_DCBX_STRICT_INVALID;
+ cos_data.data[i].cos_bw = DCBX_INVALID_COS_BW;
+ }
+
+ if (CHIP_IS_E3B0(bp))
+ bnx2x_dcbx_cee_fill_cos_params(bp, help_data, ets,
+ &cos_data, pri_join_mask);
+ else /* E2 + E3A0 */
+ bnx2x_dcbx_2cos_limit_cee_fill_cos_params(bp,
+ help_data, ets,
+ &cos_data,
+ pg_pri_orginal_spread,
+ pri_join_mask,
+ num_of_dif_pri);
+
+ for (i = 0; i < cos_data.num_of_cos ; i++) {
+ struct bnx2x_dcbx_cos_params *p =
+ &bp->dcbx_port_params.ets.cos_params[i];
+
+ p->strict = cos_data.data[i].strict;
+ p->bw_tbl = cos_data.data[i].cos_bw;
+ p->pri_bitmask = cos_data.data[i].pri_join_mask;
+ p->pauseable = cos_data.data[i].pausable;
+
+ /* sanity */
+ if (p->bw_tbl != DCBX_INVALID_COS_BW ||
+ p->strict != BNX2X_DCBX_STRICT_INVALID) {
+ if (p->pri_bitmask == 0)
+ BNX2X_ERR("Invalid pri_bitmask for %d\n", i);
+
+ if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) {
+
+ if (p->pauseable &&
+ DCBX_PFC_PRI_GET_NON_PAUSE(bp,
+ p->pri_bitmask) != 0)
+ BNX2X_ERR("Inconsistent config for "
+ "pausable COS %d\n", i);
+
+ if (!p->pauseable &&
+ DCBX_PFC_PRI_GET_PAUSE(bp,
+ p->pri_bitmask) != 0)
+ BNX2X_ERR("Inconsistent config for "
+ "nonpausable COS %d\n", i);
+ }
+ }
+
+ if (p->pauseable)
+ DP(NETIF_MSG_LINK, "COS %d PAUSABLE prijoinmask 0x%x\n",
+ i, cos_data.data[i].pri_join_mask);
+ else
+ DP(NETIF_MSG_LINK, "COS %d NONPAUSABLE prijoinmask "
+ "0x%x\n",
+ i, cos_data.data[i].pri_join_mask);
+ }
+
+ bp->dcbx_port_params.ets.num_of_cos = cos_data.num_of_cos ;
+}
+
+static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
+ u32 *set_configuration_ets_pg,
+ u32 *pri_pg_tbl)
+{
+ int i;
+
+ for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) {
+ set_configuration_ets_pg[i] = DCBX_PRI_PG_GET(pri_pg_tbl, i);
+
+ DP(NETIF_MSG_LINK, "set_configuration_ets_pg[%d] = 0x%x\n",
+ i, set_configuration_ets_pg[i]);
+ }
+}
+
+static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
+ struct bnx2x_func_tx_start_params *pfc_fw_cfg)
+{
+ u16 pri_bit = 0;
+ u8 cos = 0, pri = 0;
+ struct priority_cos *tt2cos;
+ u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+
+ memset(pfc_fw_cfg, 0, sizeof(*pfc_fw_cfg));
+
+ /* to disable DCB - the structure must be zeroed */
+ if (bp->dcbx_error & DCBX_REMOTE_MIB_ERROR)
+ return;
+
+ /*shortcut*/
+ tt2cos = pfc_fw_cfg->traffic_type_to_priority_cos;
+
+ /* Fw version should be incremented each update */
+ pfc_fw_cfg->dcb_version = ++bp->dcb_version;
+ pfc_fw_cfg->dcb_enabled = 1;
+
+ /* Fill priority parameters */
+ for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) {
+ tt2cos[pri].priority = ttp[pri];
+ pri_bit = 1 << tt2cos[pri].priority;
+
+ /* Fill COS parameters based on COS calculated to
+ * make it more general for future use */
+ for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++)
+ if (bp->dcbx_port_params.ets.cos_params[cos].
+ pri_bitmask & pri_bit)
+ tt2cos[pri].cos = cos;
+ }
+
+ /* we never want the FW to add a 0 vlan tag */
+ pfc_fw_cfg->dont_add_pri_0_en = 1;
+
+ bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg);
+}
+
+void bnx2x_dcbx_pmf_update(struct bnx2x *bp)
+{
+ /* if we need to syncronize DCBX result from prev PMF
+ * read it from shmem and update bp accordingly
+ */
+ if (SHMEM2_HAS(bp, drv_flags) &&
+ GET_FLAGS(SHMEM2_RD(bp, drv_flags), DRV_FLAGS_DCB_CONFIGURED)) {
+ /* Read neg results if dcbx is in the FW */
+ if (bnx2x_dcbx_read_shmem_neg_results(bp))
+ return;
+
+ bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat,
+ bp->dcbx_error);
+ bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat,
+ bp->dcbx_error);
+ }
+}
+
+/* DCB netlink */
+#ifdef BCM_DCBNL
+
+#define BNX2X_DCBX_CAPS (DCB_CAP_DCBX_LLD_MANAGED | \
+ DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_STATIC)
+
+static inline bool bnx2x_dcbnl_set_valid(struct bnx2x *bp)
+{
+ /* validate dcbnl call that may change HW state:
+ * DCB is on and DCBX mode was SUCCESSFULLY set by the user.
+ */
+ return bp->dcb_state && bp->dcbx_mode_uset;
+}
+
+static u8 bnx2x_dcbnl_get_state(struct net_device *netdev)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "state = %d\n", bp->dcb_state);
+ return bp->dcb_state;
+}
+
+static u8 bnx2x_dcbnl_set_state(struct net_device *netdev, u8 state)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "state = %s\n", state ? "on" : "off");
+
+ bnx2x_dcbx_set_state(bp, (state ? true : false), bp->dcbx_enabled);
+ return 0;
+}
+
+static void bnx2x_dcbnl_get_perm_hw_addr(struct net_device *netdev,
+ u8 *perm_addr)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "GET-PERM-ADDR\n");
+
+ /* first the HW mac address */
+ memcpy(perm_addr, netdev->dev_addr, netdev->addr_len);
+
+#ifdef BCM_CNIC
+ /* second SAN address */
+ memcpy(perm_addr+netdev->addr_len, bp->fip_mac, netdev->addr_len);
+#endif
+}
+
+static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio,
+ u8 prio_type, u8 pgid, u8 bw_pct,
+ u8 up_map)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+
+ DP(NETIF_MSG_LINK, "prio[%d] = %d\n", prio, pgid);
+ if (!bnx2x_dcbnl_set_valid(bp) || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES)
+ return;
+
+ /**
+ * bw_pct ingnored - band-width percentage devision between user
+ * priorities within the same group is not
+ * standard and hence not supported
+ *
+ * prio_type igonred - priority levels within the same group are not
+ * standard and hence are not supported. According
+ * to the standard pgid 15 is dedicated to strict
+ * prioirty traffic (on the port level).
+ *
+ * up_map ignored
+ */
+
+ bp->dcbx_config_params.admin_configuration_ets_pg[prio] = pgid;
+ bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1;
+}
+
+static void bnx2x_dcbnl_set_pg_bwgcfg_tx(struct net_device *netdev,
+ int pgid, u8 bw_pct)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "pgid[%d] = %d\n", pgid, bw_pct);
+
+ if (!bnx2x_dcbnl_set_valid(bp) || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES)
+ return;
+
+ bp->dcbx_config_params.admin_configuration_bw_precentage[pgid] = bw_pct;
+ bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1;
+}
+
+static void bnx2x_dcbnl_set_pg_tccfg_rx(struct net_device *netdev, int prio,
+ u8 prio_type, u8 pgid, u8 bw_pct,
+ u8 up_map)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "Nothing to set; No RX support\n");
+}
+
+static void bnx2x_dcbnl_set_pg_bwgcfg_rx(struct net_device *netdev,
+ int pgid, u8 bw_pct)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "Nothing to set; No RX support\n");
+}
+
+static void bnx2x_dcbnl_get_pg_tccfg_tx(struct net_device *netdev, int prio,
+ u8 *prio_type, u8 *pgid, u8 *bw_pct,
+ u8 *up_map)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "prio = %d\n", prio);
+
+ /**
+ * bw_pct ingnored - band-width percentage devision between user
+ * priorities within the same group is not
+ * standard and hence not supported
+ *
+ * prio_type igonred - priority levels within the same group are not
+ * standard and hence are not supported. According
+ * to the standard pgid 15 is dedicated to strict
+ * prioirty traffic (on the port level).
+ *
+ * up_map ignored
+ */
+ *up_map = *bw_pct = *prio_type = *pgid = 0;
+
+ if (!bp->dcb_state || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES)
+ return;
+
+ *pgid = DCBX_PRI_PG_GET(bp->dcbx_local_feat.ets.pri_pg_tbl, prio);
+}
+
+static void bnx2x_dcbnl_get_pg_bwgcfg_tx(struct net_device *netdev,
+ int pgid, u8 *bw_pct)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "pgid = %d\n", pgid);
+
+ *bw_pct = 0;
+
+ if (!bp->dcb_state || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES)
+ return;
+
+ *bw_pct = DCBX_PG_BW_GET(bp->dcbx_local_feat.ets.pg_bw_tbl, pgid);
+}
+
+static void bnx2x_dcbnl_get_pg_tccfg_rx(struct net_device *netdev, int prio,
+ u8 *prio_type, u8 *pgid, u8 *bw_pct,
+ u8 *up_map)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "Nothing to get; No RX support\n");
+
+ *prio_type = *pgid = *bw_pct = *up_map = 0;
+}
+
+static void bnx2x_dcbnl_get_pg_bwgcfg_rx(struct net_device *netdev,
+ int pgid, u8 *bw_pct)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "Nothing to get; No RX support\n");
+
+ *bw_pct = 0;
+}
+
+static void bnx2x_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio,
+ u8 setting)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "prio[%d] = %d\n", prio, setting);
+
+ if (!bnx2x_dcbnl_set_valid(bp) || prio >= MAX_PFC_PRIORITIES)
+ return;
+
+ bp->dcbx_config_params.admin_pfc_bitmap |= ((setting ? 1 : 0) << prio);
+
+ if (setting)
+ bp->dcbx_config_params.admin_pfc_tx_enable = 1;
+}
+
+static void bnx2x_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio,
+ u8 *setting)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "prio = %d\n", prio);
+
+ *setting = 0;
+
+ if (!bp->dcb_state || prio >= MAX_PFC_PRIORITIES)
+ return;
+
+ *setting = (bp->dcbx_local_feat.pfc.pri_en_bitmap >> prio) & 0x1;
+}
+
+static u8 bnx2x_dcbnl_set_all(struct net_device *netdev)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ int rc = 0;
+
+ DP(NETIF_MSG_LINK, "SET-ALL\n");
+
+ if (!bnx2x_dcbnl_set_valid(bp))
+ return 1;
+
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ netdev_err(bp->dev, "Handling parity error recovery. "
+ "Try again later\n");
+ return 1;
+ }
+ if (netif_running(bp->dev)) {
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ rc = bnx2x_nic_load(bp, LOAD_NORMAL);
+ }
+ DP(NETIF_MSG_LINK, "set_dcbx_params done (%d)\n", rc);
+ if (rc)
+ return 1;
+
+ return 0;
+}
+
+static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ u8 rval = 0;
+
+ if (bp->dcb_state) {
+ switch (capid) {
+ case DCB_CAP_ATTR_PG:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_PFC:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_UP2TC:
+ *cap = false;
+ break;
+ case DCB_CAP_ATTR_PG_TCS:
+ *cap = 0x80; /* 8 priorities for PGs */
+ break;
+ case DCB_CAP_ATTR_PFC_TCS:
+ *cap = 0x80; /* 8 priorities for PFC */
+ break;
+ case DCB_CAP_ATTR_GSP:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_BCN:
+ *cap = false;
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *cap = BNX2X_DCBX_CAPS;
++ break;
+ default:
+ rval = -EINVAL;
+ break;
+ }
+ } else
+ rval = -EINVAL;
+
+ DP(NETIF_MSG_LINK, "capid %d:%x\n", capid, *cap);
+ return rval;
+}
+
+static u8 bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ u8 rval = 0;
+
+ DP(NETIF_MSG_LINK, "tcid %d\n", tcid);
+
+ if (bp->dcb_state) {
+ switch (tcid) {
+ case DCB_NUMTCS_ATTR_PG:
+ *num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 :
+ DCBX_COS_MAX_NUM_E2;
+ break;
+ case DCB_NUMTCS_ATTR_PFC:
+ *num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 :
+ DCBX_COS_MAX_NUM_E2;
+ break;
+ default:
+ rval = -EINVAL;
+ break;
+ }
+ } else
+ rval = -EINVAL;
+
+ return rval;
+}
+
+static u8 bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "num tcs = %d; Not supported\n", num);
+ return -EINVAL;
+}
+
+static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "state = %d\n", bp->dcbx_local_feat.pfc.enabled);
+
+ if (!bp->dcb_state)
+ return 0;
+
+ return bp->dcbx_local_feat.pfc.enabled;
+}
+
+static void bnx2x_dcbnl_set_pfc_state(struct net_device *netdev, u8 state)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "state = %s\n", state ? "on" : "off");
+
+ if (!bnx2x_dcbnl_set_valid(bp))
+ return;
+
+ bp->dcbx_config_params.admin_pfc_tx_enable =
+ bp->dcbx_config_params.admin_pfc_enable = (state ? 1 : 0);
+}
+
+static void bnx2x_admin_app_set_ent(
+ struct bnx2x_admin_priority_app_table *app_ent,
+ u8 idtype, u16 idval, u8 up)
+{
+ app_ent->valid = 1;
+
+ switch (idtype) {
+ case DCB_APP_IDTYPE_ETHTYPE:
+ app_ent->traffic_type = TRAFFIC_TYPE_ETH;
+ break;
+ case DCB_APP_IDTYPE_PORTNUM:
+ app_ent->traffic_type = TRAFFIC_TYPE_PORT;
+ break;
+ default:
+ break; /* never gets here */
+ }
+ app_ent->app_id = idval;
+ app_ent->priority = up;
+}
+
+static bool bnx2x_admin_app_is_equal(
+ struct bnx2x_admin_priority_app_table *app_ent,
+ u8 idtype, u16 idval)
+{
+ if (!app_ent->valid)
+ return false;
+
+ switch (idtype) {
+ case DCB_APP_IDTYPE_ETHTYPE:
+ if (app_ent->traffic_type != TRAFFIC_TYPE_ETH)
+ return false;
+ break;
+ case DCB_APP_IDTYPE_PORTNUM:
+ if (app_ent->traffic_type != TRAFFIC_TYPE_PORT)
+ return false;
+ break;
+ default:
+ return false;
+ }
+ if (app_ent->app_id != idval)
+ return false;
+
+ return true;
+}
+
+static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up)
+{
+ int i, ff;
+
+ /* iterate over the app entries looking for idtype and idval */
+ for (i = 0, ff = -1; i < 4; i++) {
+ struct bnx2x_admin_priority_app_table *app_ent =
+ &bp->dcbx_config_params.admin_priority_app_table[i];
+ if (bnx2x_admin_app_is_equal(app_ent, idtype, idval))
+ break;
+
+ if (ff < 0 && !app_ent->valid)
+ ff = i;
+ }
+ if (i < 4)
+ /* if found overwrite up */
+ bp->dcbx_config_params.
+ admin_priority_app_table[i].priority = up;
+ else if (ff >= 0)
+ /* not found use first-free */
+ bnx2x_admin_app_set_ent(
+ &bp->dcbx_config_params.admin_priority_app_table[ff],
+ idtype, idval, up);
+ else
+ /* app table is full */
+ return -EBUSY;
+
+ /* up configured, if not 0 make sure feature is enabled */
+ if (up)
+ bp->dcbx_config_params.admin_application_priority_tx_enable = 1;
+
+ return 0;
+}
+
+static u8 bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype,
+ u16 idval, u8 up)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+
+ DP(NETIF_MSG_LINK, "app_type %d, app_id %x, prio bitmap %d\n",
+ idtype, idval, up);
+
+ if (!bnx2x_dcbnl_set_valid(bp))
+ return -EINVAL;
+
+ /* verify idtype */
+ switch (idtype) {
+ case DCB_APP_IDTYPE_ETHTYPE:
+ case DCB_APP_IDTYPE_PORTNUM:
+ break;
+ default:
+ return -EINVAL;
+ }
+ return bnx2x_set_admin_app_up(bp, idtype, idval, up);
+}
+
+static u8 bnx2x_dcbnl_get_dcbx(struct net_device *netdev)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ u8 state;
+
+ state = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE;
+
+ if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF)
+ state |= DCB_CAP_DCBX_STATIC;
+
+ return state;
+}
+
+static u8 bnx2x_dcbnl_set_dcbx(struct net_device *netdev, u8 state)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "state = %02x\n", state);
+
+ /* set dcbx mode */
+
+ if ((state & BNX2X_DCBX_CAPS) != state) {
+ BNX2X_ERR("Requested DCBX mode %x is beyond advertised "
+ "capabilities\n", state);
+ return 1;
+ }
+
+ if (bp->dcb_state != BNX2X_DCB_STATE_ON) {
+ BNX2X_ERR("DCB turned off, DCBX configuration is invalid\n");
+ return 1;
+ }
+
+ if (state & DCB_CAP_DCBX_STATIC)
+ bp->dcbx_enabled = BNX2X_DCBX_ENABLED_ON_NEG_OFF;
+ else
+ bp->dcbx_enabled = BNX2X_DCBX_ENABLED_ON_NEG_ON;
+
+ bp->dcbx_mode_uset = true;
+ return 0;
+}
+
+static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
+ u8 *flags)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ u8 rval = 0;
+
+ DP(NETIF_MSG_LINK, "featid %d\n", featid);
+
+ if (bp->dcb_state) {
+ *flags = 0;
+ switch (featid) {
+ case DCB_FEATCFG_ATTR_PG:
+ if (bp->dcbx_local_feat.ets.enabled)
+ *flags |= DCB_FEATCFG_ENABLE;
+ if (bp->dcbx_error & DCBX_LOCAL_ETS_ERROR)
+ *flags |= DCB_FEATCFG_ERROR;
+ break;
+ case DCB_FEATCFG_ATTR_PFC:
+ if (bp->dcbx_local_feat.pfc.enabled)
+ *flags |= DCB_FEATCFG_ENABLE;
+ if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR |
+ DCBX_LOCAL_PFC_MISMATCH))
+ *flags |= DCB_FEATCFG_ERROR;
+ break;
+ case DCB_FEATCFG_ATTR_APP:
+ if (bp->dcbx_local_feat.app.enabled)
+ *flags |= DCB_FEATCFG_ENABLE;
+ if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR |
+ DCBX_LOCAL_APP_MISMATCH))
+ *flags |= DCB_FEATCFG_ERROR;
+ break;
+ default:
+ rval = -EINVAL;
+ break;
+ }
+ } else
+ rval = -EINVAL;
+
+ return rval;
+}
+
+static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid,
+ u8 flags)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ u8 rval = 0;
+
+ DP(NETIF_MSG_LINK, "featid = %d flags = %02x\n", featid, flags);
+
+ /* ignore the 'advertise' flag */
+ if (bnx2x_dcbnl_set_valid(bp)) {
+ switch (featid) {
+ case DCB_FEATCFG_ATTR_PG:
+ bp->dcbx_config_params.admin_ets_enable =
+ flags & DCB_FEATCFG_ENABLE ? 1 : 0;
+ bp->dcbx_config_params.admin_ets_willing =
+ flags & DCB_FEATCFG_WILLING ? 1 : 0;
+ break;
+ case DCB_FEATCFG_ATTR_PFC:
+ bp->dcbx_config_params.admin_pfc_enable =
+ flags & DCB_FEATCFG_ENABLE ? 1 : 0;
+ bp->dcbx_config_params.admin_pfc_willing =
+ flags & DCB_FEATCFG_WILLING ? 1 : 0;
+ break;
+ case DCB_FEATCFG_ATTR_APP:
+ /* ignore enable, always enabled */
+ bp->dcbx_config_params.admin_app_priority_willing =
+ flags & DCB_FEATCFG_WILLING ? 1 : 0;
+ break;
+ default:
+ rval = -EINVAL;
+ break;
+ }
+ } else
+ rval = -EINVAL;
+
+ return rval;
+}
+
+static int bnx2x_peer_appinfo(struct net_device *netdev,
+ struct dcb_peer_app_info *info, u16* app_count)
+{
+ int i;
+ struct bnx2x *bp = netdev_priv(netdev);
+
+ DP(NETIF_MSG_LINK, "APP-INFO\n");
+
+ info->willing = (bp->dcbx_remote_flags & DCBX_APP_REM_WILLING) ?: 0;
+ info->error = (bp->dcbx_remote_flags & DCBX_APP_RX_ERROR) ?: 0;
+ *app_count = 0;
+
+ for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++)
+ if (bp->dcbx_remote_feat.app.app_pri_tbl[i].appBitfield &
+ DCBX_APP_ENTRY_VALID)
+ (*app_count)++;
+ return 0;
+}
+
+static int bnx2x_peer_apptable(struct net_device *netdev,
+ struct dcb_app *table)
+{
+ int i, j;
+ struct bnx2x *bp = netdev_priv(netdev);
+
+ DP(NETIF_MSG_LINK, "APP-TABLE\n");
+
+ for (i = 0, j = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+ struct dcbx_app_priority_entry *ent =
+ &bp->dcbx_remote_feat.app.app_pri_tbl[i];
+
+ if (ent->appBitfield & DCBX_APP_ENTRY_VALID) {
+ table[j].selector = bnx2x_dcbx_dcbnl_app_idtype(ent);
+ table[j].priority = bnx2x_dcbx_dcbnl_app_up(ent);
+ table[j++].protocol = ent->app_id;
+ }
+ }
+ return 0;
+}
+
+static int bnx2x_cee_peer_getpg(struct net_device *netdev, struct cee_pg *pg)
+{
+ int i;
+ struct bnx2x *bp = netdev_priv(netdev);
+
+ pg->willing = (bp->dcbx_remote_flags & DCBX_ETS_REM_WILLING) ?: 0;
+
+ for (i = 0; i < CEE_DCBX_MAX_PGS; i++) {
+ pg->pg_bw[i] =
+ DCBX_PG_BW_GET(bp->dcbx_remote_feat.ets.pg_bw_tbl, i);
+ pg->prio_pg[i] =
+ DCBX_PRI_PG_GET(bp->dcbx_remote_feat.ets.pri_pg_tbl, i);
+ }
+ return 0;
+}
+
+static int bnx2x_cee_peer_getpfc(struct net_device *netdev,
+ struct cee_pfc *pfc)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ pfc->tcs_supported = bp->dcbx_remote_feat.pfc.pfc_caps;
+ pfc->pfc_en = bp->dcbx_remote_feat.pfc.pri_en_bitmap;
+ return 0;
+}
+
+const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops = {
+ .getstate = bnx2x_dcbnl_get_state,
+ .setstate = bnx2x_dcbnl_set_state,
+ .getpermhwaddr = bnx2x_dcbnl_get_perm_hw_addr,
+ .setpgtccfgtx = bnx2x_dcbnl_set_pg_tccfg_tx,
+ .setpgbwgcfgtx = bnx2x_dcbnl_set_pg_bwgcfg_tx,
+ .setpgtccfgrx = bnx2x_dcbnl_set_pg_tccfg_rx,
+ .setpgbwgcfgrx = bnx2x_dcbnl_set_pg_bwgcfg_rx,
+ .getpgtccfgtx = bnx2x_dcbnl_get_pg_tccfg_tx,
+ .getpgbwgcfgtx = bnx2x_dcbnl_get_pg_bwgcfg_tx,
+ .getpgtccfgrx = bnx2x_dcbnl_get_pg_tccfg_rx,
+ .getpgbwgcfgrx = bnx2x_dcbnl_get_pg_bwgcfg_rx,
+ .setpfccfg = bnx2x_dcbnl_set_pfc_cfg,
+ .getpfccfg = bnx2x_dcbnl_get_pfc_cfg,
+ .setall = bnx2x_dcbnl_set_all,
+ .getcap = bnx2x_dcbnl_get_cap,
+ .getnumtcs = bnx2x_dcbnl_get_numtcs,
+ .setnumtcs = bnx2x_dcbnl_set_numtcs,
+ .getpfcstate = bnx2x_dcbnl_get_pfc_state,
+ .setpfcstate = bnx2x_dcbnl_set_pfc_state,
+ .setapp = bnx2x_dcbnl_set_app_up,
+ .getdcbx = bnx2x_dcbnl_get_dcbx,
+ .setdcbx = bnx2x_dcbnl_set_dcbx,
+ .getfeatcfg = bnx2x_dcbnl_get_featcfg,
+ .setfeatcfg = bnx2x_dcbnl_set_featcfg,
+ .peer_getappinfo = bnx2x_peer_appinfo,
+ .peer_getapptable = bnx2x_peer_apptable,
+ .cee_peer_getpg = bnx2x_cee_peer_getpg,
+ .cee_peer_getpfc = bnx2x_cee_peer_getpfc,
+};
+
+#endif /* BCM_DCBNL */
--- /dev/null
- int reg_offset;
+/* bnx2x_main.c: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/device.h> /* for dev_info() */
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include <linux/time.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/workqueue.h>
+#include <linux/crc32.h>
+#include <linux/crc32c.h>
+#include <linux/prefetch.h>
+#include <linux/zlib.h>
+#include <linux/io.h>
+#include <linux/stringify.h>
+#include <linux/vmalloc.h>
+
+#include "bnx2x.h"
+#include "bnx2x_init.h"
+#include "bnx2x_init_ops.h"
+#include "bnx2x_cmn.h"
+#include "bnx2x_dcb.h"
+#include "bnx2x_sp.h"
+
+#include <linux/firmware.h>
+#include "bnx2x_fw_file_hdr.h"
+/* FW files */
+#define FW_FILE_VERSION \
+ __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
+ __stringify(BCM_5710_FW_MINOR_VERSION) "." \
+ __stringify(BCM_5710_FW_REVISION_VERSION) "." \
+ __stringify(BCM_5710_FW_ENGINEERING_VERSION)
+#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
+#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
+#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
+
+/* Time in jiffies before concluding the transmitter is hung */
+#define TX_TIMEOUT (5*HZ)
+
+static char version[] __devinitdata =
+ "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver "
+ DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("Eliezer Tamir");
+MODULE_DESCRIPTION("Broadcom NetXtreme II "
+ "BCM57710/57711/57711E/"
+ "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
+ "57840/57840_MF Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+MODULE_FIRMWARE(FW_FILE_NAME_E1);
+MODULE_FIRMWARE(FW_FILE_NAME_E1H);
+MODULE_FIRMWARE(FW_FILE_NAME_E2);
+
+static int multi_mode = 1;
+module_param(multi_mode, int, 0);
+MODULE_PARM_DESC(multi_mode, " Multi queue mode "
+ "(0 Disable; 1 Enable (default))");
+
+int num_queues;
+module_param(num_queues, int, 0);
+MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
+ " (default is as a number of CPUs)");
+
+static int disable_tpa;
+module_param(disable_tpa, int, 0);
+MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
+
+#define INT_MODE_INTx 1
+#define INT_MODE_MSI 2
+static int int_mode;
+module_param(int_mode, int, 0);
+MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
+ "(1 INT#x; 2 MSI)");
+
+static int dropless_fc;
+module_param(dropless_fc, int, 0);
+MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
+
+static int poll;
+module_param(poll, int, 0);
+MODULE_PARM_DESC(poll, " Use polling (for debug)");
+
+static int mrrs = -1;
+module_param(mrrs, int, 0);
+MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
+
+static int debug;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, " Default debug msglevel");
+
+
+
+struct workqueue_struct *bnx2x_wq;
+
+enum bnx2x_board_type {
+ BCM57710 = 0,
+ BCM57711,
+ BCM57711E,
+ BCM57712,
+ BCM57712_MF,
+ BCM57800,
+ BCM57800_MF,
+ BCM57810,
+ BCM57810_MF,
+ BCM57840,
+ BCM57840_MF
+};
+
+/* indexed by board_type, above */
+static struct {
+ char *name;
+} board_info[] __devinitdata = {
+ { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
+ { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
+ { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
+ { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
+ { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
+ { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
+ { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
+ { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
+ { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
+ { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
+ { "Broadcom NetXtreme II BCM57840 10/20 Gigabit "
+ "Ethernet Multi Function"}
+};
+
+#ifndef PCI_DEVICE_ID_NX2_57710
+#define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57711
+#define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57711E
+#define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57712
+#define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57712_MF
+#define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57800
+#define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57800_MF
+#define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57810
+#define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57810_MF
+#define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840
+#define PCI_DEVICE_ID_NX2_57840 CHIP_NUM_57840
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_MF
+#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
+#endif
+static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840), BCM57840 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
+
+/****************************************************************************
+* General service functions
+****************************************************************************/
+
+static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
+ u32 addr, dma_addr_t mapping)
+{
+ REG_WR(bp, addr, U64_LO(mapping));
+ REG_WR(bp, addr + 4, U64_HI(mapping));
+}
+
+static inline void storm_memset_spq_addr(struct bnx2x *bp,
+ dma_addr_t mapping, u16 abs_fid)
+{
+ u32 addr = XSEM_REG_FAST_MEMORY +
+ XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
+
+ __storm_memset_dma_mapping(bp, addr, mapping);
+}
+
+static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
+ u16 pf_id)
+{
+ REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+ REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+ REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+}
+
+static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
+ u8 enable)
+{
+ REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+ REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+ REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+}
+
+static inline void storm_memset_eq_data(struct bnx2x *bp,
+ struct event_ring_data *eq_data,
+ u16 pfid)
+{
+ size_t size = sizeof(struct event_ring_data);
+
+ u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
+
+ __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
+}
+
+static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
+ u16 pfid)
+{
+ u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
+ REG_WR16(bp, addr, eq_prod);
+}
+
+/* used only at init
+ * locking is done by mcp
+ */
+static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
+{
+ pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
+ pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
+ pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
+ PCICFG_VENDOR_ID_OFFSET);
+}
+
+static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
+{
+ u32 val;
+
+ pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
+ pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
+ pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
+ PCICFG_VENDOR_ID_OFFSET);
+
+ return val;
+}
+
+#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
+#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
+#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
+#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
+#define DMAE_DP_DST_NONE "dst_addr [none]"
+
+static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
+ int msglvl)
+{
+ u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
+
+ switch (dmae->opcode & DMAE_COMMAND_DST) {
+ case DMAE_CMD_DST_PCI:
+ if (src_type == DMAE_CMD_SRC_PCI)
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+ dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ else
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%08x], len [%d*4], dst [%x:%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_lo >> 2,
+ dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ break;
+ case DMAE_CMD_DST_GRC:
+ if (src_type == DMAE_CMD_SRC_PCI)
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+ dmae->len, dmae->dst_addr_lo >> 2,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ else
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%08x], len [%d*4], dst [%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_lo >> 2,
+ dmae->len, dmae->dst_addr_lo >> 2,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ break;
+ default:
+ if (src_type == DMAE_CMD_SRC_PCI)
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n"
+ "comp_addr [%x:%08x] comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+ dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ else
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src_addr [%08x] len [%d * 4] dst_addr [none]\n"
+ "comp_addr [%x:%08x] comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_lo >> 2,
+ dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ break;
+ }
+
+}
+
+/* copy command into DMAE command memory and set DMAE command go */
+void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
+{
+ u32 cmd_offset;
+ int i;
+
+ cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
+ for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
+ REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
+
+ DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
+ idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
+ }
+ REG_WR(bp, dmae_reg_go_c[idx], 1);
+}
+
+u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
+{
+ return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
+ DMAE_CMD_C_ENABLE);
+}
+
+u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
+{
+ return opcode & ~DMAE_CMD_SRC_RESET;
+}
+
+u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
+ bool with_comp, u8 comp_type)
+{
+ u32 opcode = 0;
+
+ opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
+ (dst_type << DMAE_COMMAND_DST_SHIFT));
+
+ opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
+
+ opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
+ opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
+ (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
+ opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
+
+#ifdef __BIG_ENDIAN
+ opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
+#else
+ opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
+#endif
+ if (with_comp)
+ opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
+ return opcode;
+}
+
+static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
+ struct dmae_command *dmae,
+ u8 src_type, u8 dst_type)
+{
+ memset(dmae, 0, sizeof(struct dmae_command));
+
+ /* set the opcode */
+ dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
+ true, DMAE_COMP_PCI);
+
+ /* fill in the completion parameters */
+ dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
+ dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
+ dmae->comp_val = DMAE_COMP_VAL;
+}
+
+/* issue a dmae command over the init-channel and wailt for completion */
+static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
+ struct dmae_command *dmae)
+{
+ u32 *wb_comp = bnx2x_sp(bp, wb_comp);
+ int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
+ int rc = 0;
+
+ DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
+ bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
+ bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
+
+ /*
+ * Lock the dmae channel. Disable BHs to prevent a dead-lock
+ * as long as this code is called both from syscall context and
+ * from ndo_set_rx_mode() flow that may be called from BH.
+ */
+ spin_lock_bh(&bp->dmae_lock);
+
+ /* reset completion */
+ *wb_comp = 0;
+
+ /* post the command on the channel used for initializations */
+ bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
+
+ /* wait for completion */
+ udelay(5);
+ while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
+ DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
+
+ if (!cnt) {
+ BNX2X_ERR("DMAE timeout!\n");
+ rc = DMAE_TIMEOUT;
+ goto unlock;
+ }
+ cnt--;
+ udelay(50);
+ }
+ if (*wb_comp & DMAE_PCI_ERR_FLAG) {
+ BNX2X_ERR("DMAE PCI error!\n");
+ rc = DMAE_PCI_ERROR;
+ }
+
+ DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
+ bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
+ bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
+
+unlock:
+ spin_unlock_bh(&bp->dmae_lock);
+ return rc;
+}
+
+void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
+ u32 len32)
+{
+ struct dmae_command dmae;
+
+ if (!bp->dmae_ready) {
+ u32 *data = bnx2x_sp(bp, wb_data[0]);
+
+ DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
+ " using indirect\n", dst_addr, len32);
+ bnx2x_init_ind_wr(bp, dst_addr, data, len32);
+ return;
+ }
+
+ /* set opcode and fixed command fields */
+ bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
+
+ /* fill in addresses and len */
+ dmae.src_addr_lo = U64_LO(dma_addr);
+ dmae.src_addr_hi = U64_HI(dma_addr);
+ dmae.dst_addr_lo = dst_addr >> 2;
+ dmae.dst_addr_hi = 0;
+ dmae.len = len32;
+
+ bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
+
+ /* issue the command and wait for completion */
+ bnx2x_issue_dmae_with_comp(bp, &dmae);
+}
+
+void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
+{
+ struct dmae_command dmae;
+
+ if (!bp->dmae_ready) {
+ u32 *data = bnx2x_sp(bp, wb_data[0]);
+ int i;
+
+ DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
+ " using indirect\n", src_addr, len32);
+ for (i = 0; i < len32; i++)
+ data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
+ return;
+ }
+
+ /* set opcode and fixed command fields */
+ bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
+
+ /* fill in addresses and len */
+ dmae.src_addr_lo = src_addr >> 2;
+ dmae.src_addr_hi = 0;
+ dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
+ dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
+ dmae.len = len32;
+
+ bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
+
+ /* issue the command and wait for completion */
+ bnx2x_issue_dmae_with_comp(bp, &dmae);
+}
+
+static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
+ u32 addr, u32 len)
+{
+ int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
+ int offset = 0;
+
+ while (len > dmae_wr_max) {
+ bnx2x_write_dmae(bp, phys_addr + offset,
+ addr + offset, dmae_wr_max);
+ offset += dmae_wr_max * 4;
+ len -= dmae_wr_max;
+ }
+
+ bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
+}
+
+/* used only for slowpath so not inlined */
+static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
+{
+ u32 wb_write[2];
+
+ wb_write[0] = val_hi;
+ wb_write[1] = val_lo;
+ REG_WR_DMAE(bp, reg, wb_write, 2);
+}
+
+#ifdef USE_WB_RD
+static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
+{
+ u32 wb_data[2];
+
+ REG_RD_DMAE(bp, reg, wb_data, 2);
+
+ return HILO_U64(wb_data[0], wb_data[1]);
+}
+#endif
+
+static int bnx2x_mc_assert(struct bnx2x *bp)
+{
+ char last_idx;
+ int i, rc = 0;
+ u32 row0, row1, row2, row3;
+
+ /* XSTORM */
+ last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_ASSERT_LIST_INDEX_OFFSET);
+ if (last_idx)
+ BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
+
+ /* print the asserts */
+ for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
+
+ row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_ASSERT_LIST_OFFSET(i));
+ row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_ASSERT_LIST_OFFSET(i) + 4);
+ row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_ASSERT_LIST_OFFSET(i) + 8);
+ row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_ASSERT_LIST_OFFSET(i) + 12);
+
+ if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
+ BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
+ " 0x%08x 0x%08x 0x%08x\n",
+ i, row3, row2, row1, row0);
+ rc++;
+ } else {
+ break;
+ }
+ }
+
+ /* TSTORM */
+ last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_ASSERT_LIST_INDEX_OFFSET);
+ if (last_idx)
+ BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
+
+ /* print the asserts */
+ for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
+
+ row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_ASSERT_LIST_OFFSET(i));
+ row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_ASSERT_LIST_OFFSET(i) + 4);
+ row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_ASSERT_LIST_OFFSET(i) + 8);
+ row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_ASSERT_LIST_OFFSET(i) + 12);
+
+ if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
+ BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
+ " 0x%08x 0x%08x 0x%08x\n",
+ i, row3, row2, row1, row0);
+ rc++;
+ } else {
+ break;
+ }
+ }
+
+ /* CSTORM */
+ last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_ASSERT_LIST_INDEX_OFFSET);
+ if (last_idx)
+ BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
+
+ /* print the asserts */
+ for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
+
+ row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_ASSERT_LIST_OFFSET(i));
+ row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_ASSERT_LIST_OFFSET(i) + 4);
+ row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_ASSERT_LIST_OFFSET(i) + 8);
+ row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_ASSERT_LIST_OFFSET(i) + 12);
+
+ if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
+ BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
+ " 0x%08x 0x%08x 0x%08x\n",
+ i, row3, row2, row1, row0);
+ rc++;
+ } else {
+ break;
+ }
+ }
+
+ /* USTORM */
+ last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
+ USTORM_ASSERT_LIST_INDEX_OFFSET);
+ if (last_idx)
+ BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
+
+ /* print the asserts */
+ for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
+
+ row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
+ USTORM_ASSERT_LIST_OFFSET(i));
+ row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
+ USTORM_ASSERT_LIST_OFFSET(i) + 4);
+ row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
+ USTORM_ASSERT_LIST_OFFSET(i) + 8);
+ row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
+ USTORM_ASSERT_LIST_OFFSET(i) + 12);
+
+ if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
+ BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
+ " 0x%08x 0x%08x 0x%08x\n",
+ i, row3, row2, row1, row0);
+ rc++;
+ } else {
+ break;
+ }
+ }
+
+ return rc;
+}
+
+void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
+{
+ u32 addr, val;
+ u32 mark, offset;
+ __be32 data[9];
+ int word;
+ u32 trace_shmem_base;
+ if (BP_NOMCP(bp)) {
+ BNX2X_ERR("NO MCP - can not dump\n");
+ return;
+ }
+ netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
+ (bp->common.bc_ver & 0xff0000) >> 16,
+ (bp->common.bc_ver & 0xff00) >> 8,
+ (bp->common.bc_ver & 0xff));
+
+ val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
+ if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
+ printk("%s" "MCP PC at 0x%x\n", lvl, val);
+
+ if (BP_PATH(bp) == 0)
+ trace_shmem_base = bp->common.shmem_base;
+ else
+ trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
+ addr = trace_shmem_base - 0x0800 + 4;
+ mark = REG_RD(bp, addr);
+ mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
+ + ((mark + 0x3) & ~0x3) - 0x08000000;
+ printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
+
+ printk("%s", lvl);
+ for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
+ for (word = 0; word < 8; word++)
+ data[word] = htonl(REG_RD(bp, offset + 4*word));
+ data[8] = 0x0;
+ pr_cont("%s", (char *)data);
+ }
+ for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
+ for (word = 0; word < 8; word++)
+ data[word] = htonl(REG_RD(bp, offset + 4*word));
+ data[8] = 0x0;
+ pr_cont("%s", (char *)data);
+ }
+ printk("%s" "end of fw dump\n", lvl);
+}
+
+static inline void bnx2x_fw_dump(struct bnx2x *bp)
+{
+ bnx2x_fw_dump_lvl(bp, KERN_ERR);
+}
+
+void bnx2x_panic_dump(struct bnx2x *bp)
+{
+ int i;
+ u16 j;
+ struct hc_sp_status_block_data sp_sb_data;
+ int func = BP_FUNC(bp);
+#ifdef BNX2X_STOP_ON_ERROR
+ u16 start = 0, end = 0;
+ u8 cos;
+#endif
+
+ bp->stats_state = STATS_STATE_DISABLED;
+ DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
+
+ BNX2X_ERR("begin crash dump -----------------\n");
+
+ /* Indices */
+ /* Common */
+ BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
+ " spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
+ bp->def_idx, bp->def_att_idx, bp->attn_state,
+ bp->spq_prod_idx, bp->stats_counter);
+ BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
+ bp->def_status_blk->atten_status_block.attn_bits,
+ bp->def_status_blk->atten_status_block.attn_bits_ack,
+ bp->def_status_blk->atten_status_block.status_block_id,
+ bp->def_status_blk->atten_status_block.attn_bits_index);
+ BNX2X_ERR(" def (");
+ for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
+ pr_cont("0x%x%s",
+ bp->def_status_blk->sp_sb.index_values[i],
+ (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
+
+ for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
+ *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
+ i*sizeof(u32));
+
+ pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
+ sp_sb_data.igu_sb_id,
+ sp_sb_data.igu_seg_id,
+ sp_sb_data.p_func.pf_id,
+ sp_sb_data.p_func.vnic_id,
+ sp_sb_data.p_func.vf_id,
+ sp_sb_data.p_func.vf_valid,
+ sp_sb_data.state);
+
+
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ int loop;
+ struct hc_status_block_data_e2 sb_data_e2;
+ struct hc_status_block_data_e1x sb_data_e1x;
+ struct hc_status_block_sm *hc_sm_p =
+ CHIP_IS_E1x(bp) ?
+ sb_data_e1x.common.state_machine :
+ sb_data_e2.common.state_machine;
+ struct hc_index_data *hc_index_p =
+ CHIP_IS_E1x(bp) ?
+ sb_data_e1x.index_data :
+ sb_data_e2.index_data;
+ u8 data_size, cos;
+ u32 *sb_data_p;
+ struct bnx2x_fp_txdata txdata;
+
+ /* Rx */
+ BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
+ " rx_comp_prod(0x%x)"
+ " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
+ i, fp->rx_bd_prod, fp->rx_bd_cons,
+ fp->rx_comp_prod,
+ fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
+ BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
+ " fp_hc_idx(0x%x)\n",
+ fp->rx_sge_prod, fp->last_max_sge,
+ le16_to_cpu(fp->fp_hc_idx));
+
+ /* Tx */
+ for_each_cos_in_tx_queue(fp, cos)
+ {
+ txdata = fp->txdata[cos];
+ BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
+ " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
+ " *tx_cons_sb(0x%x)\n",
+ i, txdata.tx_pkt_prod,
+ txdata.tx_pkt_cons, txdata.tx_bd_prod,
+ txdata.tx_bd_cons,
+ le16_to_cpu(*txdata.tx_cons_sb));
+ }
+
+ loop = CHIP_IS_E1x(bp) ?
+ HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
+
+ /* host sb data */
+
+#ifdef BCM_CNIC
+ if (IS_FCOE_FP(fp))
+ continue;
+#endif
+ BNX2X_ERR(" run indexes (");
+ for (j = 0; j < HC_SB_MAX_SM; j++)
+ pr_cont("0x%x%s",
+ fp->sb_running_index[j],
+ (j == HC_SB_MAX_SM - 1) ? ")" : " ");
+
+ BNX2X_ERR(" indexes (");
+ for (j = 0; j < loop; j++)
+ pr_cont("0x%x%s",
+ fp->sb_index_values[j],
+ (j == loop - 1) ? ")" : " ");
+ /* fw sb data */
+ data_size = CHIP_IS_E1x(bp) ?
+ sizeof(struct hc_status_block_data_e1x) :
+ sizeof(struct hc_status_block_data_e2);
+ data_size /= sizeof(u32);
+ sb_data_p = CHIP_IS_E1x(bp) ?
+ (u32 *)&sb_data_e1x :
+ (u32 *)&sb_data_e2;
+ /* copy sb data in here */
+ for (j = 0; j < data_size; j++)
+ *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
+ j * sizeof(u32));
+
+ if (!CHIP_IS_E1x(bp)) {
+ pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) "
+ "vnic_id(0x%x) same_igu_sb_1b(0x%x) "
+ "state(0x%x)\n",
+ sb_data_e2.common.p_func.pf_id,
+ sb_data_e2.common.p_func.vf_id,
+ sb_data_e2.common.p_func.vf_valid,
+ sb_data_e2.common.p_func.vnic_id,
+ sb_data_e2.common.same_igu_sb_1b,
+ sb_data_e2.common.state);
+ } else {
+ pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) "
+ "vnic_id(0x%x) same_igu_sb_1b(0x%x) "
+ "state(0x%x)\n",
+ sb_data_e1x.common.p_func.pf_id,
+ sb_data_e1x.common.p_func.vf_id,
+ sb_data_e1x.common.p_func.vf_valid,
+ sb_data_e1x.common.p_func.vnic_id,
+ sb_data_e1x.common.same_igu_sb_1b,
+ sb_data_e1x.common.state);
+ }
+
+ /* SB_SMs data */
+ for (j = 0; j < HC_SB_MAX_SM; j++) {
+ pr_cont("SM[%d] __flags (0x%x) "
+ "igu_sb_id (0x%x) igu_seg_id(0x%x) "
+ "time_to_expire (0x%x) "
+ "timer_value(0x%x)\n", j,
+ hc_sm_p[j].__flags,
+ hc_sm_p[j].igu_sb_id,
+ hc_sm_p[j].igu_seg_id,
+ hc_sm_p[j].time_to_expire,
+ hc_sm_p[j].timer_value);
+ }
+
+ /* Indecies data */
+ for (j = 0; j < loop; j++) {
+ pr_cont("INDEX[%d] flags (0x%x) "
+ "timeout (0x%x)\n", j,
+ hc_index_p[j].flags,
+ hc_index_p[j].timeout);
+ }
+ }
+
+#ifdef BNX2X_STOP_ON_ERROR
+ /* Rings */
+ /* Rx */
+ for_each_rx_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
+ end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
+ for (j = start; j != end; j = RX_BD(j + 1)) {
+ u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
+ struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
+
+ BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
+ i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
+ }
+
+ start = RX_SGE(fp->rx_sge_prod);
+ end = RX_SGE(fp->last_max_sge);
+ for (j = start; j != end; j = RX_SGE(j + 1)) {
+ u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
+ struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
+
+ BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
+ i, j, rx_sge[1], rx_sge[0], sw_page->page);
+ }
+
+ start = RCQ_BD(fp->rx_comp_cons - 10);
+ end = RCQ_BD(fp->rx_comp_cons + 503);
+ for (j = start; j != end; j = RCQ_BD(j + 1)) {
+ u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
+
+ BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
+ i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
+ }
+ }
+
+ /* Tx */
+ for_each_tx_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ for_each_cos_in_tx_queue(fp, cos) {
+ struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+
+ start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
+ end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
+ for (j = start; j != end; j = TX_BD(j + 1)) {
+ struct sw_tx_bd *sw_bd =
+ &txdata->tx_buf_ring[j];
+
+ BNX2X_ERR("fp%d: txdata %d, "
+ "packet[%x]=[%p,%x]\n",
+ i, cos, j, sw_bd->skb,
+ sw_bd->first_bd);
+ }
+
+ start = TX_BD(txdata->tx_bd_cons - 10);
+ end = TX_BD(txdata->tx_bd_cons + 254);
+ for (j = start; j != end; j = TX_BD(j + 1)) {
+ u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
+
+ BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]="
+ "[%x:%x:%x:%x]\n",
+ i, cos, j, tx_bd[0], tx_bd[1],
+ tx_bd[2], tx_bd[3]);
+ }
+ }
+ }
+#endif
+ bnx2x_fw_dump(bp);
+ bnx2x_mc_assert(bp);
+ BNX2X_ERR("end crash dump -----------------\n");
+}
+
+/*
+ * FLR Support for E2
+ *
+ * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW
+ * initialization.
+ */
+#define FLR_WAIT_USEC 10000 /* 10 miliseconds */
+#define FLR_WAIT_INTERAVAL 50 /* usec */
+#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERAVAL) /* 200 */
+
+struct pbf_pN_buf_regs {
+ int pN;
+ u32 init_crd;
+ u32 crd;
+ u32 crd_freed;
+};
+
+struct pbf_pN_cmd_regs {
+ int pN;
+ u32 lines_occup;
+ u32 lines_freed;
+};
+
+static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
+ struct pbf_pN_buf_regs *regs,
+ u32 poll_count)
+{
+ u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
+ u32 cur_cnt = poll_count;
+
+ crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
+ crd = crd_start = REG_RD(bp, regs->crd);
+ init_crd = REG_RD(bp, regs->init_crd);
+
+ DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
+ DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd);
+ DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
+
+ while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
+ (init_crd - crd_start))) {
+ if (cur_cnt--) {
+ udelay(FLR_WAIT_INTERAVAL);
+ crd = REG_RD(bp, regs->crd);
+ crd_freed = REG_RD(bp, regs->crd_freed);
+ } else {
+ DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
+ regs->pN);
+ DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n",
+ regs->pN, crd);
+ DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
+ regs->pN, crd_freed);
+ break;
+ }
+ }
+ DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
+ poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN);
+}
+
+static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
+ struct pbf_pN_cmd_regs *regs,
+ u32 poll_count)
+{
+ u32 occup, to_free, freed, freed_start;
+ u32 cur_cnt = poll_count;
+
+ occup = to_free = REG_RD(bp, regs->lines_occup);
+ freed = freed_start = REG_RD(bp, regs->lines_freed);
+
+ DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
+ DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
+
+ while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
+ if (cur_cnt--) {
+ udelay(FLR_WAIT_INTERAVAL);
+ occup = REG_RD(bp, regs->lines_occup);
+ freed = REG_RD(bp, regs->lines_freed);
+ } else {
+ DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
+ regs->pN);
+ DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n",
+ regs->pN, occup);
+ DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
+ regs->pN, freed);
+ break;
+ }
+ }
+ DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
+ poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN);
+}
+
+static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
+ u32 expected, u32 poll_count)
+{
+ u32 cur_cnt = poll_count;
+ u32 val;
+
+ while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
+ udelay(FLR_WAIT_INTERAVAL);
+
+ return val;
+}
+
+static inline int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
+ char *msg, u32 poll_cnt)
+{
+ u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
+ if (val != 0) {
+ BNX2X_ERR("%s usage count=%d\n", msg, val);
+ return 1;
+ }
+ return 0;
+}
+
+static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
+{
+ /* adjust polling timeout */
+ if (CHIP_REV_IS_EMUL(bp))
+ return FLR_POLL_CNT * 2000;
+
+ if (CHIP_REV_IS_FPGA(bp))
+ return FLR_POLL_CNT * 120;
+
+ return FLR_POLL_CNT;
+}
+
+static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
+{
+ struct pbf_pN_cmd_regs cmd_regs[] = {
+ {0, (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_TQ_OCCUPANCY_Q0 :
+ PBF_REG_P0_TQ_OCCUPANCY,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_TQ_LINES_FREED_CNT_Q0 :
+ PBF_REG_P0_TQ_LINES_FREED_CNT},
+ {1, (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_TQ_OCCUPANCY_Q1 :
+ PBF_REG_P1_TQ_OCCUPANCY,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_TQ_LINES_FREED_CNT_Q1 :
+ PBF_REG_P1_TQ_LINES_FREED_CNT},
+ {4, (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_TQ_OCCUPANCY_LB_Q :
+ PBF_REG_P4_TQ_OCCUPANCY,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
+ PBF_REG_P4_TQ_LINES_FREED_CNT}
+ };
+
+ struct pbf_pN_buf_regs buf_regs[] = {
+ {0, (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_INIT_CRD_Q0 :
+ PBF_REG_P0_INIT_CRD ,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_CREDIT_Q0 :
+ PBF_REG_P0_CREDIT,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
+ PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
+ {1, (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_INIT_CRD_Q1 :
+ PBF_REG_P1_INIT_CRD,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_CREDIT_Q1 :
+ PBF_REG_P1_CREDIT,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
+ PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
+ {4, (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_INIT_CRD_LB_Q :
+ PBF_REG_P4_INIT_CRD,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_CREDIT_LB_Q :
+ PBF_REG_P4_CREDIT,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
+ PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
+ };
+
+ int i;
+
+ /* Verify the command queues are flushed P0, P1, P4 */
+ for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
+ bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
+
+
+ /* Verify the transmission buffers are flushed P0, P1, P4 */
+ for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
+ bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
+}
+
+#define OP_GEN_PARAM(param) \
+ (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
+
+#define OP_GEN_TYPE(type) \
+ (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
+
+#define OP_GEN_AGG_VECT(index) \
+ (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
+
+
+static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
+ u32 poll_cnt)
+{
+ struct sdm_op_gen op_gen = {0};
+
+ u32 comp_addr = BAR_CSTRORM_INTMEM +
+ CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
+ int ret = 0;
+
+ if (REG_RD(bp, comp_addr)) {
+ BNX2X_ERR("Cleanup complete is not 0\n");
+ return 1;
+ }
+
+ op_gen.command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
+ op_gen.command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
+ op_gen.command |= OP_GEN_AGG_VECT(clnup_func);
+ op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
+
+ DP(BNX2X_MSG_SP, "FW Final cleanup\n");
+ REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command);
+
+ if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
+ BNX2X_ERR("FW final cleanup did not succeed\n");
+ ret = 1;
+ }
+ /* Zero completion for nxt FLR */
+ REG_WR(bp, comp_addr, 0);
+
+ return ret;
+}
+
+static inline u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
+{
+ int pos;
+ u16 status;
+
+ pos = pci_pcie_cap(dev);
+ if (!pos)
+ return false;
+
+ pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
+ return status & PCI_EXP_DEVSTA_TRPND;
+}
+
+/* PF FLR specific routines
+*/
+static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
+{
+
+ /* wait for CFC PF usage-counter to zero (includes all the VFs) */
+ if (bnx2x_flr_clnup_poll_hw_counter(bp,
+ CFC_REG_NUM_LCIDS_INSIDE_PF,
+ "CFC PF usage counter timed out",
+ poll_cnt))
+ return 1;
+
+
+ /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
+ if (bnx2x_flr_clnup_poll_hw_counter(bp,
+ DORQ_REG_PF_USAGE_CNT,
+ "DQ PF usage counter timed out",
+ poll_cnt))
+ return 1;
+
+ /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
+ if (bnx2x_flr_clnup_poll_hw_counter(bp,
+ QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
+ "QM PF usage counter timed out",
+ poll_cnt))
+ return 1;
+
+ /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
+ if (bnx2x_flr_clnup_poll_hw_counter(bp,
+ TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
+ "Timers VNIC usage counter timed out",
+ poll_cnt))
+ return 1;
+ if (bnx2x_flr_clnup_poll_hw_counter(bp,
+ TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
+ "Timers NUM_SCANS usage counter timed out",
+ poll_cnt))
+ return 1;
+
+ /* Wait DMAE PF usage counter to zero */
+ if (bnx2x_flr_clnup_poll_hw_counter(bp,
+ dmae_reg_go_c[INIT_DMAE_C(bp)],
+ "DMAE dommand register timed out",
+ poll_cnt))
+ return 1;
+
+ return 0;
+}
+
+static void bnx2x_hw_enable_status(struct bnx2x *bp)
+{
+ u32 val;
+
+ val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
+ DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
+
+ val = REG_RD(bp, PBF_REG_DISABLE_PF);
+ DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
+
+ val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
+ DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
+
+ val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
+ DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
+
+ val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
+ DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
+
+ val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
+ DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
+
+ val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
+ DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
+
+ val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
+ DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
+ val);
+}
+
+static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
+{
+ u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
+
+ DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
+
+ /* Re-enable PF target read access */
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
+
+ /* Poll HW usage counters */
+ if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
+ return -EBUSY;
+
+ /* Zero the igu 'trailing edge' and 'leading edge' */
+
+ /* Send the FW cleanup command */
+ if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
+ return -EBUSY;
+
+ /* ATC cleanup */
+
+ /* Verify TX hw is flushed */
+ bnx2x_tx_hw_flushed(bp, poll_cnt);
+
+ /* Wait 100ms (not adjusted according to platform) */
+ msleep(100);
+
+ /* Verify no pending pci transactions */
+ if (bnx2x_is_pcie_pending(bp->pdev))
+ BNX2X_ERR("PCIE Transactions still pending\n");
+
+ /* Debug */
+ bnx2x_hw_enable_status(bp);
+
+ /*
+ * Master enable - Due to WB DMAE writes performed before this
+ * register is re-initialized as part of the regular function init
+ */
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
+
+ return 0;
+}
+
+static void bnx2x_hc_int_enable(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
+ u32 val = REG_RD(bp, addr);
+ int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
+ int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
+
+ if (msix) {
+ val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+ HC_CONFIG_0_REG_INT_LINE_EN_0);
+ val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+ } else if (msi) {
+ val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
+ val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+ HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+ } else {
+ val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+ HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+ HC_CONFIG_0_REG_INT_LINE_EN_0 |
+ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+
+ if (!CHIP_IS_E1(bp)) {
+ DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
+ val, port, addr);
+
+ REG_WR(bp, addr, val);
+
+ val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
+ }
+ }
+
+ if (CHIP_IS_E1(bp))
+ REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
+
+ DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
+ val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
+
+ REG_WR(bp, addr, val);
+ /*
+ * Ensure that HC_CONFIG is written before leading/trailing edge config
+ */
+ mmiowb();
+ barrier();
+
+ if (!CHIP_IS_E1(bp)) {
+ /* init leading/trailing edge */
+ if (IS_MF(bp)) {
+ val = (0xee0f | (1 << (BP_VN(bp) + 4)));
+ if (bp->port.pmf)
+ /* enable nig and gpio3 attention */
+ val |= 0x1100;
+ } else
+ val = 0xffff;
+
+ REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
+ REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
+ }
+
+ /* Make sure that interrupts are indeed enabled from here on */
+ mmiowb();
+}
+
+static void bnx2x_igu_int_enable(struct bnx2x *bp)
+{
+ u32 val;
+ int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
+ int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
+
+ val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
+
+ if (msix) {
+ val &= ~(IGU_PF_CONF_INT_LINE_EN |
+ IGU_PF_CONF_SINGLE_ISR_EN);
+ val |= (IGU_PF_CONF_FUNC_EN |
+ IGU_PF_CONF_MSI_MSIX_EN |
+ IGU_PF_CONF_ATTN_BIT_EN);
+ } else if (msi) {
+ val &= ~IGU_PF_CONF_INT_LINE_EN;
+ val |= (IGU_PF_CONF_FUNC_EN |
+ IGU_PF_CONF_MSI_MSIX_EN |
+ IGU_PF_CONF_ATTN_BIT_EN |
+ IGU_PF_CONF_SINGLE_ISR_EN);
+ } else {
+ val &= ~IGU_PF_CONF_MSI_MSIX_EN;
+ val |= (IGU_PF_CONF_FUNC_EN |
+ IGU_PF_CONF_INT_LINE_EN |
+ IGU_PF_CONF_ATTN_BIT_EN |
+ IGU_PF_CONF_SINGLE_ISR_EN);
+ }
+
+ DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
+ val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
+
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
+
+ barrier();
+
+ /* init leading/trailing edge */
+ if (IS_MF(bp)) {
+ val = (0xee0f | (1 << (BP_VN(bp) + 4)));
+ if (bp->port.pmf)
+ /* enable nig and gpio3 attention */
+ val |= 0x1100;
+ } else
+ val = 0xffff;
+
+ REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
+ REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
+
+ /* Make sure that interrupts are indeed enabled from here on */
+ mmiowb();
+}
+
+void bnx2x_int_enable(struct bnx2x *bp)
+{
+ if (bp->common.int_block == INT_BLOCK_HC)
+ bnx2x_hc_int_enable(bp);
+ else
+ bnx2x_igu_int_enable(bp);
+}
+
+static void bnx2x_hc_int_disable(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
+ u32 val = REG_RD(bp, addr);
+
+ /*
+ * in E1 we must use only PCI configuration space to disable
+ * MSI/MSIX capablility
+ * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
+ */
+ if (CHIP_IS_E1(bp)) {
+ /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
+ * Use mask register to prevent from HC sending interrupts
+ * after we exit the function
+ */
+ REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
+
+ val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+ HC_CONFIG_0_REG_INT_LINE_EN_0 |
+ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+ } else
+ val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+ HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+ HC_CONFIG_0_REG_INT_LINE_EN_0 |
+ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+
+ DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
+ val, port, addr);
+
+ /* flush all outstanding writes */
+ mmiowb();
+
+ REG_WR(bp, addr, val);
+ if (REG_RD(bp, addr) != val)
+ BNX2X_ERR("BUG! proper val not read from IGU!\n");
+}
+
+static void bnx2x_igu_int_disable(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
+
+ val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
+ IGU_PF_CONF_INT_LINE_EN |
+ IGU_PF_CONF_ATTN_BIT_EN);
+
+ DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
+
+ /* flush all outstanding writes */
+ mmiowb();
+
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
+ if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
+ BNX2X_ERR("BUG! proper val not read from IGU!\n");
+}
+
+void bnx2x_int_disable(struct bnx2x *bp)
+{
+ if (bp->common.int_block == INT_BLOCK_HC)
+ bnx2x_hc_int_disable(bp);
+ else
+ bnx2x_igu_int_disable(bp);
+}
+
+void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
+{
+ int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
+ int i, offset;
+
+ if (disable_hw)
+ /* prevent the HW from sending interrupts */
+ bnx2x_int_disable(bp);
+
+ /* make sure all ISRs are done */
+ if (msix) {
+ synchronize_irq(bp->msix_table[0].vector);
+ offset = 1;
+#ifdef BCM_CNIC
+ offset++;
+#endif
+ for_each_eth_queue(bp, i)
+ synchronize_irq(bp->msix_table[offset++].vector);
+ } else
+ synchronize_irq(bp->pdev->irq);
+
+ /* make sure sp_task is not running */
+ cancel_delayed_work(&bp->sp_task);
+ cancel_delayed_work(&bp->period_task);
+ flush_workqueue(bnx2x_wq);
+}
+
+/* fast path */
+
+/*
+ * General service functions
+ */
+
+/* Return true if succeeded to acquire the lock */
+static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
+{
+ u32 lock_status;
+ u32 resource_bit = (1 << resource);
+ int func = BP_FUNC(bp);
+ u32 hw_lock_control_reg;
+
+ DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
+
+ /* Validating that the resource is within range */
+ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
+ DP(NETIF_MSG_HW,
+ "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+ resource, HW_LOCK_MAX_RESOURCE_VALUE);
+ return false;
+ }
+
+ if (func <= 5)
+ hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
+ else
+ hw_lock_control_reg =
+ (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
+
+ /* Try to acquire the lock */
+ REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
+ lock_status = REG_RD(bp, hw_lock_control_reg);
+ if (lock_status & resource_bit)
+ return true;
+
+ DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
+ return false;
+}
+
+/**
+ * bnx2x_get_leader_lock_resource - get the recovery leader resource id
+ *
+ * @bp: driver handle
+ *
+ * Returns the recovery leader resource id according to the engine this function
+ * belongs to. Currently only only 2 engines is supported.
+ */
+static inline int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
+{
+ if (BP_PATH(bp))
+ return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
+ else
+ return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
+}
+
+/**
+ * bnx2x_trylock_leader_lock- try to aquire a leader lock.
+ *
+ * @bp: driver handle
+ *
+ * Tries to aquire a leader lock for cuurent engine.
+ */
+static inline bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
+{
+ return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
+}
+
+#ifdef BCM_CNIC
+static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
+#endif
+
+void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
+{
+ struct bnx2x *bp = fp->bp;
+ int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
+ int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
+ enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
+ struct bnx2x_queue_sp_obj *q_obj = &fp->q_obj;
+
+ DP(BNX2X_MSG_SP,
+ "fp %d cid %d got ramrod #%d state is %x type is %d\n",
+ fp->index, cid, command, bp->state,
+ rr_cqe->ramrod_cqe.ramrod_type);
+
+ switch (command) {
+ case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
+ DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
+ drv_cmd = BNX2X_Q_CMD_UPDATE;
+ break;
+
+ case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
+ DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
+ drv_cmd = BNX2X_Q_CMD_SETUP;
+ break;
+
+ case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
+ DP(NETIF_MSG_IFUP, "got MULTI[%d] tx-only setup ramrod\n", cid);
+ drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
+ break;
+
+ case (RAMROD_CMD_ID_ETH_HALT):
+ DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
+ drv_cmd = BNX2X_Q_CMD_HALT;
+ break;
+
+ case (RAMROD_CMD_ID_ETH_TERMINATE):
+ DP(BNX2X_MSG_SP, "got MULTI[%d] teminate ramrod\n", cid);
+ drv_cmd = BNX2X_Q_CMD_TERMINATE;
+ break;
+
+ case (RAMROD_CMD_ID_ETH_EMPTY):
+ DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
+ drv_cmd = BNX2X_Q_CMD_EMPTY;
+ break;
+
+ default:
+ BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
+ command, fp->index);
+ return;
+ }
+
+ if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
+ q_obj->complete_cmd(bp, q_obj, drv_cmd))
+ /* q_obj->complete_cmd() failure means that this was
+ * an unexpected completion.
+ *
+ * In this case we don't want to increase the bp->spq_left
+ * because apparently we haven't sent this command the first
+ * place.
+ */
+#ifdef BNX2X_STOP_ON_ERROR
+ bnx2x_panic();
+#else
+ return;
+#endif
+
+ smp_mb__before_atomic_inc();
+ atomic_inc(&bp->cq_spq_left);
+ /* push the change in bp->spq_left and towards the memory */
+ smp_mb__after_atomic_inc();
+
+ DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
+
+ return;
+}
+
+void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod)
+{
+ u32 start = BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset;
+
+ bnx2x_update_rx_prod_gen(bp, fp, bd_prod, rx_comp_prod, rx_sge_prod,
+ start);
+}
+
+irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
+{
+ struct bnx2x *bp = netdev_priv(dev_instance);
+ u16 status = bnx2x_ack_int(bp);
+ u16 mask;
+ int i;
+ u8 cos;
+
+ /* Return here if interrupt is shared and it's not for us */
+ if (unlikely(status == 0)) {
+ DP(NETIF_MSG_INTR, "not our interrupt!\n");
+ return IRQ_NONE;
+ }
+ DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return IRQ_HANDLED;
+#endif
+
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ mask = 0x2 << (fp->index + CNIC_PRESENT);
+ if (status & mask) {
+ /* Handle Rx or Tx according to SB id */
+ prefetch(fp->rx_cons_sb);
+ for_each_cos_in_tx_queue(fp, cos)
+ prefetch(fp->txdata[cos].tx_cons_sb);
+ prefetch(&fp->sb_running_index[SM_RX_ID]);
+ napi_schedule(&bnx2x_fp(bp, fp->index, napi));
+ status &= ~mask;
+ }
+ }
+
+#ifdef BCM_CNIC
+ mask = 0x2;
+ if (status & (mask | 0x1)) {
+ struct cnic_ops *c_ops = NULL;
+
+ if (likely(bp->state == BNX2X_STATE_OPEN)) {
+ rcu_read_lock();
+ c_ops = rcu_dereference(bp->cnic_ops);
+ if (c_ops)
+ c_ops->cnic_handler(bp->cnic_data, NULL);
+ rcu_read_unlock();
+ }
+
+ status &= ~mask;
+ }
+#endif
+
+ if (unlikely(status & 0x1)) {
+ queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+
+ status &= ~0x1;
+ if (!status)
+ return IRQ_HANDLED;
+ }
+
+ if (unlikely(status))
+ DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
+ status);
+
+ return IRQ_HANDLED;
+}
+
+/* Link */
+
+/*
+ * General service functions
+ */
+
+int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
+{
+ u32 lock_status;
+ u32 resource_bit = (1 << resource);
+ int func = BP_FUNC(bp);
+ u32 hw_lock_control_reg;
+ int cnt;
+
+ /* Validating that the resource is within range */
+ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
+ DP(NETIF_MSG_HW,
+ "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+ resource, HW_LOCK_MAX_RESOURCE_VALUE);
+ return -EINVAL;
+ }
+
+ if (func <= 5) {
+ hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
+ } else {
+ hw_lock_control_reg =
+ (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
+ }
+
+ /* Validating that the resource is not already taken */
+ lock_status = REG_RD(bp, hw_lock_control_reg);
+ if (lock_status & resource_bit) {
+ DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
+ lock_status, resource_bit);
+ return -EEXIST;
+ }
+
+ /* Try for 5 second every 5ms */
+ for (cnt = 0; cnt < 1000; cnt++) {
+ /* Try to acquire the lock */
+ REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
+ lock_status = REG_RD(bp, hw_lock_control_reg);
+ if (lock_status & resource_bit)
+ return 0;
+
+ msleep(5);
+ }
+ DP(NETIF_MSG_HW, "Timeout\n");
+ return -EAGAIN;
+}
+
+int bnx2x_release_leader_lock(struct bnx2x *bp)
+{
+ return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
+}
+
+int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
+{
+ u32 lock_status;
+ u32 resource_bit = (1 << resource);
+ int func = BP_FUNC(bp);
+ u32 hw_lock_control_reg;
+
+ DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
+
+ /* Validating that the resource is within range */
+ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
+ DP(NETIF_MSG_HW,
+ "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+ resource, HW_LOCK_MAX_RESOURCE_VALUE);
+ return -EINVAL;
+ }
+
+ if (func <= 5) {
+ hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
+ } else {
+ hw_lock_control_reg =
+ (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
+ }
+
+ /* Validating that the resource is currently taken */
+ lock_status = REG_RD(bp, hw_lock_control_reg);
+ if (!(lock_status & resource_bit)) {
+ DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
+ lock_status, resource_bit);
+ return -EFAULT;
+ }
+
+ REG_WR(bp, hw_lock_control_reg, resource_bit);
+ return 0;
+}
+
+
+int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
+{
+ /* The GPIO should be swapped if swap register is set and active */
+ int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
+ REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
+ int gpio_shift = gpio_num +
+ (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
+ u32 gpio_mask = (1 << gpio_shift);
+ u32 gpio_reg;
+ int value;
+
+ if (gpio_num > MISC_REGISTERS_GPIO_3) {
+ BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
+ return -EINVAL;
+ }
+
+ /* read GPIO value */
+ gpio_reg = REG_RD(bp, MISC_REG_GPIO);
+
+ /* get the requested pin value */
+ if ((gpio_reg & gpio_mask) == gpio_mask)
+ value = 1;
+ else
+ value = 0;
+
+ DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
+
+ return value;
+}
+
+int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
+{
+ /* The GPIO should be swapped if swap register is set and active */
+ int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
+ REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
+ int gpio_shift = gpio_num +
+ (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
+ u32 gpio_mask = (1 << gpio_shift);
+ u32 gpio_reg;
+
+ if (gpio_num > MISC_REGISTERS_GPIO_3) {
+ BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
+ return -EINVAL;
+ }
+
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+ /* read GPIO and mask except the float bits */
+ gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
+
+ switch (mode) {
+ case MISC_REGISTERS_GPIO_OUTPUT_LOW:
+ DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
+ gpio_num, gpio_shift);
+ /* clear FLOAT and set CLR */
+ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
+ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
+ break;
+
+ case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
+ DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
+ gpio_num, gpio_shift);
+ /* clear FLOAT and set SET */
+ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
+ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
+ break;
+
+ case MISC_REGISTERS_GPIO_INPUT_HI_Z:
+ DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
+ gpio_num, gpio_shift);
+ /* set FLOAT */
+ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
+ break;
+
+ default:
+ break;
+ }
+
+ REG_WR(bp, MISC_REG_GPIO, gpio_reg);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+
+ return 0;
+}
+
+int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
+{
+ u32 gpio_reg = 0;
+ int rc = 0;
+
+ /* Any port swapping should be handled by caller. */
+
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+ /* read GPIO and mask except the float bits */
+ gpio_reg = REG_RD(bp, MISC_REG_GPIO);
+ gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
+ gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
+ gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
+
+ switch (mode) {
+ case MISC_REGISTERS_GPIO_OUTPUT_LOW:
+ DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
+ /* set CLR */
+ gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
+ break;
+
+ case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
+ DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
+ /* set SET */
+ gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
+ break;
+
+ case MISC_REGISTERS_GPIO_INPUT_HI_Z:
+ DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
+ /* set FLOAT */
+ gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
+ break;
+
+ default:
+ BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc == 0)
+ REG_WR(bp, MISC_REG_GPIO, gpio_reg);
+
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+
+ return rc;
+}
+
+int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
+{
+ /* The GPIO should be swapped if swap register is set and active */
+ int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
+ REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
+ int gpio_shift = gpio_num +
+ (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
+ u32 gpio_mask = (1 << gpio_shift);
+ u32 gpio_reg;
+
+ if (gpio_num > MISC_REGISTERS_GPIO_3) {
+ BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
+ return -EINVAL;
+ }
+
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+ /* read GPIO int */
+ gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
+
+ switch (mode) {
+ case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
+ DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
+ "output low\n", gpio_num, gpio_shift);
+ /* clear SET and set CLR */
+ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
+ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
+ break;
+
+ case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
+ DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
+ "output high\n", gpio_num, gpio_shift);
+ /* clear CLR and set SET */
+ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
+ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
+ break;
+
+ default:
+ break;
+ }
+
+ REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+
+ return 0;
+}
+
+static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
+{
+ u32 spio_mask = (1 << spio_num);
+ u32 spio_reg;
+
+ if ((spio_num < MISC_REGISTERS_SPIO_4) ||
+ (spio_num > MISC_REGISTERS_SPIO_7)) {
+ BNX2X_ERR("Invalid SPIO %d\n", spio_num);
+ return -EINVAL;
+ }
+
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
+ /* read SPIO and mask except the float bits */
+ spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
+
+ switch (mode) {
+ case MISC_REGISTERS_SPIO_OUTPUT_LOW:
+ DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
+ /* clear FLOAT and set CLR */
+ spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
+ spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
+ break;
+
+ case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
+ DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
+ /* clear FLOAT and set SET */
+ spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
+ spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
+ break;
+
+ case MISC_REGISTERS_SPIO_INPUT_HI_Z:
+ DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
+ /* set FLOAT */
+ spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
+ break;
+
+ default:
+ break;
+ }
+
+ REG_WR(bp, MISC_REG_SPIO, spio_reg);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
+
+ return 0;
+}
+
+void bnx2x_calc_fc_adv(struct bnx2x *bp)
+{
+ u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
+ switch (bp->link_vars.ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
+ case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
+ bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
+ ADVERTISED_Pause);
+ break;
+
+ case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
+ bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
+ ADVERTISED_Pause);
+ break;
+
+ case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
+ bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
+ break;
+
+ default:
+ bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
+ ADVERTISED_Pause);
+ break;
+ }
+}
+
+u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
+{
+ if (!BP_NOMCP(bp)) {
+ u8 rc;
+ int cfx_idx = bnx2x_get_link_cfg_idx(bp);
+ u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
+ /*
+ * Initialize link parameters structure variables
+ * It is recommended to turn off RX FC for jumbo frames
+ * for better performance
+ */
+ if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
+ bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
+ else
+ bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
+
+ bnx2x_acquire_phy_lock(bp);
+
+ if (load_mode == LOAD_DIAG) {
+ struct link_params *lp = &bp->link_params;
+ lp->loopback_mode = LOOPBACK_XGXS;
+ /* do PHY loopback at 10G speed, if possible */
+ if (lp->req_line_speed[cfx_idx] < SPEED_10000) {
+ if (lp->speed_cap_mask[cfx_idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+ lp->req_line_speed[cfx_idx] =
+ SPEED_10000;
+ else
+ lp->req_line_speed[cfx_idx] =
+ SPEED_1000;
+ }
+ }
+
+ rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
+
+ bnx2x_release_phy_lock(bp);
+
+ bnx2x_calc_fc_adv(bp);
+
+ if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
+ bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
+ bnx2x_link_report(bp);
+ } else
+ queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
+ bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
+ return rc;
+ }
+ BNX2X_ERR("Bootcode is missing - can not initialize link\n");
+ return -EINVAL;
+}
+
+void bnx2x_link_set(struct bnx2x *bp)
+{
+ if (!BP_NOMCP(bp)) {
+ bnx2x_acquire_phy_lock(bp);
+ bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
+ bnx2x_phy_init(&bp->link_params, &bp->link_vars);
+ bnx2x_release_phy_lock(bp);
+
+ bnx2x_calc_fc_adv(bp);
+ } else
+ BNX2X_ERR("Bootcode is missing - can not set link\n");
+}
+
+static void bnx2x__link_reset(struct bnx2x *bp)
+{
+ if (!BP_NOMCP(bp)) {
+ bnx2x_acquire_phy_lock(bp);
+ bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
+ bnx2x_release_phy_lock(bp);
+ } else
+ BNX2X_ERR("Bootcode is missing - can not reset link\n");
+}
+
+u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
+{
+ u8 rc = 0;
+
+ if (!BP_NOMCP(bp)) {
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
+ is_serdes);
+ bnx2x_release_phy_lock(bp);
+ } else
+ BNX2X_ERR("Bootcode is missing - can not test link\n");
+
+ return rc;
+}
+
+static void bnx2x_init_port_minmax(struct bnx2x *bp)
+{
+ u32 r_param = bp->link_vars.line_speed / 8;
+ u32 fair_periodic_timeout_usec;
+ u32 t_fair;
+
+ memset(&(bp->cmng.rs_vars), 0,
+ sizeof(struct rate_shaping_vars_per_port));
+ memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
+
+ /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
+ bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
+
+ /* this is the threshold below which no timer arming will occur
+ 1.25 coefficient is for the threshold to be a little bigger
+ than the real time, to compensate for timer in-accuracy */
+ bp->cmng.rs_vars.rs_threshold =
+ (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
+
+ /* resolution of fairness timer */
+ fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
+ /* for 10G it is 1000usec. for 1G it is 10000usec. */
+ t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
+
+ /* this is the threshold below which we won't arm the timer anymore */
+ bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
+
+ /* we multiply by 1e3/8 to get bytes/msec.
+ We don't want the credits to pass a credit
+ of the t_fair*FAIR_MEM (algorithm resolution) */
+ bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
+ /* since each tick is 4 usec */
+ bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
+}
+
+/* Calculates the sum of vn_min_rates.
+ It's needed for further normalizing of the min_rates.
+ Returns:
+ sum of vn_min_rates.
+ or
+ 0 - if all the min_rates are 0.
+ In the later case fainess algorithm should be deactivated.
+ If not all min_rates are zero then those that are zeroes will be set to 1.
+ */
+static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
+{
+ int all_zero = 1;
+ int vn;
+
+ bp->vn_weight_sum = 0;
+ for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
+ u32 vn_cfg = bp->mf_config[vn];
+ u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
+ FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
+
+ /* Skip hidden vns */
+ if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
+ continue;
+
+ /* If min rate is zero - set it to 1 */
+ if (!vn_min_rate)
+ vn_min_rate = DEF_MIN_RATE;
+ else
+ all_zero = 0;
+
+ bp->vn_weight_sum += vn_min_rate;
+ }
+
+ /* if ETS or all min rates are zeros - disable fairness */
+ if (BNX2X_IS_ETS_ENABLED(bp)) {
+ bp->cmng.flags.cmng_enables &=
+ ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
+ DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
+ } else if (all_zero) {
+ bp->cmng.flags.cmng_enables &=
+ ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
+ DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
+ " fairness will be disabled\n");
+ } else
+ bp->cmng.flags.cmng_enables |=
+ CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
+}
+
+/* returns func by VN for current port */
+static inline int func_by_vn(struct bnx2x *bp, int vn)
+{
+ return 2 * vn + BP_PORT(bp);
+}
+
+static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
+{
+ struct rate_shaping_vars_per_vn m_rs_vn;
+ struct fairness_vars_per_vn m_fair_vn;
+ u32 vn_cfg = bp->mf_config[vn];
+ int func = func_by_vn(bp, vn);
+ u16 vn_min_rate, vn_max_rate;
+ int i;
+
+ /* If function is hidden - set min and max to zeroes */
+ if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
+ vn_min_rate = 0;
+ vn_max_rate = 0;
+
+ } else {
+ u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
+
+ vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
+ FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
+ /* If fairness is enabled (not all min rates are zeroes) and
+ if current min rate is zero - set it to 1.
+ This is a requirement of the algorithm. */
+ if (bp->vn_weight_sum && (vn_min_rate == 0))
+ vn_min_rate = DEF_MIN_RATE;
+
+ if (IS_MF_SI(bp))
+ /* maxCfg in percents of linkspeed */
+ vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
+ else
+ /* maxCfg is absolute in 100Mb units */
+ vn_max_rate = maxCfg * 100;
+ }
+
+ DP(NETIF_MSG_IFUP,
+ "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
+ func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
+
+ memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
+ memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
+
+ /* global vn counter - maximal Mbps for this vn */
+ m_rs_vn.vn_counter.rate = vn_max_rate;
+
+ /* quota - number of bytes transmitted in this period */
+ m_rs_vn.vn_counter.quota =
+ (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
+
+ if (bp->vn_weight_sum) {
+ /* credit for each period of the fairness algorithm:
+ number of bytes in T_FAIR (the vn share the port rate).
+ vn_weight_sum should not be larger than 10000, thus
+ T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
+ than zero */
+ m_fair_vn.vn_credit_delta =
+ max_t(u32, (vn_min_rate * (T_FAIR_COEF /
+ (8 * bp->vn_weight_sum))),
+ (bp->cmng.fair_vars.fair_threshold +
+ MIN_ABOVE_THRESH));
+ DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
+ m_fair_vn.vn_credit_delta);
+ }
+
+ /* Store it to internal memory */
+ for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
+ REG_WR(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
+ ((u32 *)(&m_rs_vn))[i]);
+
+ for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
+ REG_WR(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
+ ((u32 *)(&m_fair_vn))[i]);
+}
+
+static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
+{
+ if (CHIP_REV_IS_SLOW(bp))
+ return CMNG_FNS_NONE;
+ if (IS_MF(bp))
+ return CMNG_FNS_MINMAX;
+
+ return CMNG_FNS_NONE;
+}
+
+void bnx2x_read_mf_cfg(struct bnx2x *bp)
+{
+ int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
+
+ if (BP_NOMCP(bp))
+ return; /* what should be the default bvalue in this case */
+
+ /* For 2 port configuration the absolute function number formula
+ * is:
+ * abs_func = 2 * vn + BP_PORT + BP_PATH
+ *
+ * and there are 4 functions per port
+ *
+ * For 4 port configuration it is
+ * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
+ *
+ * and there are 2 functions per port
+ */
+ for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
+ int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
+
+ if (func >= E1H_FUNC_MAX)
+ break;
+
+ bp->mf_config[vn] =
+ MF_CFG_RD(bp, func_mf_config[func].config);
+ }
+}
+
+static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
+{
+
+ if (cmng_type == CMNG_FNS_MINMAX) {
+ int vn;
+
+ /* clear cmng_enables */
+ bp->cmng.flags.cmng_enables = 0;
+
+ /* read mf conf from shmem */
+ if (read_cfg)
+ bnx2x_read_mf_cfg(bp);
+
+ /* Init rate shaping and fairness contexts */
+ bnx2x_init_port_minmax(bp);
+
+ /* vn_weight_sum and enable fairness if not 0 */
+ bnx2x_calc_vn_weight_sum(bp);
+
+ /* calculate and set min-max rate for each vn */
+ if (bp->port.pmf)
+ for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
+ bnx2x_init_vn_minmax(bp, vn);
+
+ /* always enable rate shaping and fairness */
+ bp->cmng.flags.cmng_enables |=
+ CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
+ if (!bp->vn_weight_sum)
+ DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
+ " fairness will be disabled\n");
+ return;
+ }
+
+ /* rate shaping and fairness are disabled */
+ DP(NETIF_MSG_IFUP,
+ "rate shaping and fairness are disabled\n");
+}
+
+static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
+{
+ int func;
+ int vn;
+
+ /* Set the attention towards other drivers on the same port */
+ for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
+ if (vn == BP_VN(bp))
+ continue;
+
+ func = func_by_vn(bp, vn);
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
+ (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
+ }
+}
+
+/* This function is called upon link interrupt */
+static void bnx2x_link_attn(struct bnx2x *bp)
+{
+ /* Make sure that we are synced with the current statistics */
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+
+ bnx2x_link_update(&bp->link_params, &bp->link_vars);
+
+ if (bp->link_vars.link_up) {
+
+ /* dropless flow control */
+ if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
+ int port = BP_PORT(bp);
+ u32 pause_enabled = 0;
+
+ if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ pause_enabled = 1;
+
+ REG_WR(bp, BAR_USTRORM_INTMEM +
+ USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
+ pause_enabled);
+ }
+
+ if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
+ struct host_port_stats *pstats;
+
+ pstats = bnx2x_sp(bp, port_stats);
+ /* reset old mac stats */
+ memset(&(pstats->mac_stx[0]), 0,
+ sizeof(struct mac_stx));
+ }
+ if (bp->state == BNX2X_STATE_OPEN)
+ bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
+ }
+
+ if (bp->link_vars.link_up && bp->link_vars.line_speed) {
+ int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
+
+ if (cmng_fns != CMNG_FNS_NONE) {
+ bnx2x_cmng_fns_init(bp, false, cmng_fns);
+ storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+ } else
+ /* rate shaping and fairness are disabled */
+ DP(NETIF_MSG_IFUP,
+ "single function mode without fairness\n");
+ }
+
+ __bnx2x_link_report(bp);
+
+ if (IS_MF(bp))
+ bnx2x_link_sync_notify(bp);
+}
+
+void bnx2x__link_status_update(struct bnx2x *bp)
+{
+ if (bp->state != BNX2X_STATE_OPEN)
+ return;
+
+ bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
+
+ if (bp->link_vars.link_up)
+ bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
+ else
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+
+ /* indicate link status */
+ bnx2x_link_report(bp);
+}
+
+static void bnx2x_pmf_update(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ u32 val;
+
+ bp->port.pmf = 1;
+ DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
+
+ /*
+ * We need the mb() to ensure the ordering between the writing to
+ * bp->port.pmf here and reading it from the bnx2x_periodic_task().
+ */
+ smp_mb();
+
+ /* queue a periodic task */
+ queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
+
+ bnx2x_dcbx_pmf_update(bp);
+
+ /* enable nig attention */
+ val = (0xff0f | (1 << (BP_VN(bp) + 4)));
+ if (bp->common.int_block == INT_BLOCK_HC) {
+ REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
+ REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
+ } else if (!CHIP_IS_E1x(bp)) {
+ REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
+ REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
+ }
+
+ bnx2x_stats_handle(bp, STATS_EVENT_PMF);
+}
+
+/* end of Link */
+
+/* slow path */
+
+/*
+ * General service functions
+ */
+
+/* send the MCP a request, block until there is a reply */
+u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
+{
+ int mb_idx = BP_FW_MB_IDX(bp);
+ u32 seq;
+ u32 rc = 0;
+ u32 cnt = 1;
+ u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
+
+ mutex_lock(&bp->fw_mb_mutex);
+ seq = ++bp->fw_seq;
+ SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
+ SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
+
+ DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
+ (command | seq), param);
+
+ do {
+ /* let the FW do it's magic ... */
+ msleep(delay);
+
+ rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
+
+ /* Give the FW up to 5 second (500*10ms) */
+ } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
+
+ DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
+ cnt*delay, rc, seq);
+
+ /* is this a reply to our command? */
+ if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
+ rc &= FW_MSG_CODE_MASK;
+ else {
+ /* FW BUG! */
+ BNX2X_ERR("FW failed to respond!\n");
+ bnx2x_fw_dump(bp);
+ rc = 0;
+ }
+ mutex_unlock(&bp->fw_mb_mutex);
+
+ return rc;
+}
+
+static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
+{
+#ifdef BCM_CNIC
+ /* Statistics are not supported for CNIC Clients at the moment */
+ if (IS_FCOE_FP(fp))
+ return false;
+#endif
+ return true;
+}
+
+void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
+{
+ if (CHIP_IS_E1x(bp)) {
+ struct tstorm_eth_function_common_config tcfg = {0};
+
+ storm_memset_func_cfg(bp, &tcfg, p->func_id);
+ }
+
+ /* Enable the function in the FW */
+ storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
+ storm_memset_func_en(bp, p->func_id, 1);
+
+ /* spq */
+ if (p->func_flgs & FUNC_FLG_SPQ) {
+ storm_memset_spq_addr(bp, p->spq_map, p->func_id);
+ REG_WR(bp, XSEM_REG_FAST_MEMORY +
+ XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
+ }
+}
+
+/**
+ * bnx2x_get_tx_only_flags - Return common flags
+ *
+ * @bp device handle
+ * @fp queue handle
+ * @zero_stats TRUE if statistics zeroing is needed
+ *
+ * Return the flags that are common for the Tx-only and not normal connections.
+ */
+static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp,
+ bool zero_stats)
+{
+ unsigned long flags = 0;
+
+ /* PF driver will always initialize the Queue to an ACTIVE state */
+ __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
+
+ /* tx only connections collect statistics (on the same index as the
+ * parent connection). The statistics are zeroed when the parent
+ * connection is initialized.
+ */
+ if (stat_counter_valid(bp, fp)) {
+ __set_bit(BNX2X_Q_FLG_STATS, &flags);
+ if (zero_stats)
+ __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
+ }
+
+ return flags;
+}
+
+static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp,
+ bool leading)
+{
+ unsigned long flags = 0;
+
+ /* calculate other queue flags */
+ if (IS_MF_SD(bp))
+ __set_bit(BNX2X_Q_FLG_OV, &flags);
+
+ if (IS_FCOE_FP(fp))
+ __set_bit(BNX2X_Q_FLG_FCOE, &flags);
+
+ if (!fp->disable_tpa) {
+ __set_bit(BNX2X_Q_FLG_TPA, &flags);
+ __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
+ }
+
+ if (leading) {
+ __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
+ __set_bit(BNX2X_Q_FLG_MCAST, &flags);
+ }
+
+ /* Always set HW VLAN stripping */
+ __set_bit(BNX2X_Q_FLG_VLAN, &flags);
+
+
+ return flags | bnx2x_get_common_flags(bp, fp, true);
+}
+
+static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
+ u8 cos)
+{
+ gen_init->stat_id = bnx2x_stats_id(fp);
+ gen_init->spcl_id = fp->cl_id;
+
+ /* Always use mini-jumbo MTU for FCoE L2 ring */
+ if (IS_FCOE_FP(fp))
+ gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
+ else
+ gen_init->mtu = bp->dev->mtu;
+
+ gen_init->cos = cos;
+}
+
+static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
+ struct bnx2x_rxq_setup_params *rxq_init)
+{
+ u8 max_sge = 0;
+ u16 sge_sz = 0;
+ u16 tpa_agg_size = 0;
+
+ if (!fp->disable_tpa) {
+ pause->sge_th_lo = SGE_TH_LO(bp);
+ pause->sge_th_hi = SGE_TH_HI(bp);
+
+ /* validate SGE ring has enough to cross high threshold */
+ WARN_ON(bp->dropless_fc &&
+ pause->sge_th_hi + FW_PREFETCH_CNT >
+ MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
+
+ tpa_agg_size = min_t(u32,
+ (min_t(u32, 8, MAX_SKB_FRAGS) *
+ SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
+ max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
+ SGE_PAGE_SHIFT;
+ max_sge = ((max_sge + PAGES_PER_SGE - 1) &
+ (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
+ sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
+ 0xffff);
+ }
+
+ /* pause - not for e1 */
+ if (!CHIP_IS_E1(bp)) {
+ pause->bd_th_lo = BD_TH_LO(bp);
+ pause->bd_th_hi = BD_TH_HI(bp);
+
+ pause->rcq_th_lo = RCQ_TH_LO(bp);
+ pause->rcq_th_hi = RCQ_TH_HI(bp);
+ /*
+ * validate that rings have enough entries to cross
+ * high thresholds
+ */
+ WARN_ON(bp->dropless_fc &&
+ pause->bd_th_hi + FW_PREFETCH_CNT >
+ bp->rx_ring_size);
+ WARN_ON(bp->dropless_fc &&
+ pause->rcq_th_hi + FW_PREFETCH_CNT >
+ NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
+
+ pause->pri_map = 1;
+ }
+
+ /* rxq setup */
+ rxq_init->dscr_map = fp->rx_desc_mapping;
+ rxq_init->sge_map = fp->rx_sge_mapping;
+ rxq_init->rcq_map = fp->rx_comp_mapping;
+ rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
+
+ /* This should be a maximum number of data bytes that may be
+ * placed on the BD (not including paddings).
+ */
+ rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN -
+ IP_HEADER_ALIGNMENT_PADDING;
+
+ rxq_init->cl_qzone_id = fp->cl_qzone_id;
+ rxq_init->tpa_agg_sz = tpa_agg_size;
+ rxq_init->sge_buf_sz = sge_sz;
+ rxq_init->max_sges_pkt = max_sge;
+ rxq_init->rss_engine_id = BP_FUNC(bp);
+
+ /* Maximum number or simultaneous TPA aggregation for this Queue.
+ *
+ * For PF Clients it should be the maximum avaliable number.
+ * VF driver(s) may want to define it to a smaller value.
+ */
+ rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
+
+ rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
+ rxq_init->fw_sb_id = fp->fw_sb_id;
+
+ if (IS_FCOE_FP(fp))
+ rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
+ else
+ rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
+}
+
+static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
+ u8 cos)
+{
+ txq_init->dscr_map = fp->txdata[cos].tx_desc_mapping;
+ txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
+ txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
+ txq_init->fw_sb_id = fp->fw_sb_id;
+
+ /*
+ * set the tss leading client id for TX classfication ==
+ * leading RSS client id
+ */
+ txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
+
+ if (IS_FCOE_FP(fp)) {
+ txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
+ txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
+ }
+}
+
+static void bnx2x_pf_init(struct bnx2x *bp)
+{
+ struct bnx2x_func_init_params func_init = {0};
+ struct event_ring_data eq_data = { {0} };
+ u16 flags;
+
+ if (!CHIP_IS_E1x(bp)) {
+ /* reset IGU PF statistics: MSIX + ATTN */
+ /* PF */
+ REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
+ BNX2X_IGU_STAS_MSG_VF_CNT*4 +
+ (CHIP_MODE_IS_4_PORT(bp) ?
+ BP_FUNC(bp) : BP_VN(bp))*4, 0);
+ /* ATTN */
+ REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
+ BNX2X_IGU_STAS_MSG_VF_CNT*4 +
+ BNX2X_IGU_STAS_MSG_PF_CNT*4 +
+ (CHIP_MODE_IS_4_PORT(bp) ?
+ BP_FUNC(bp) : BP_VN(bp))*4, 0);
+ }
+
+ /* function setup flags */
+ flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
+
+ /* This flag is relevant for E1x only.
+ * E2 doesn't have a TPA configuration in a function level.
+ */
+ flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
+
+ func_init.func_flgs = flags;
+ func_init.pf_id = BP_FUNC(bp);
+ func_init.func_id = BP_FUNC(bp);
+ func_init.spq_map = bp->spq_mapping;
+ func_init.spq_prod = bp->spq_prod_idx;
+
+ bnx2x_func_init(bp, &func_init);
+
+ memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
+
+ /*
+ * Congestion management values depend on the link rate
+ * There is no active link so initial link rate is set to 10 Gbps.
+ * When the link comes up The congestion management values are
+ * re-calculated according to the actual link rate.
+ */
+ bp->link_vars.line_speed = SPEED_10000;
+ bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
+
+ /* Only the PMF sets the HW */
+ if (bp->port.pmf)
+ storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+
+ /* init Event Queue */
+ eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
+ eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
+ eq_data.producer = bp->eq_prod;
+ eq_data.index_id = HC_SP_INDEX_EQ_CONS;
+ eq_data.sb_id = DEF_SB_ID;
+ storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
+}
+
+
+static void bnx2x_e1h_disable(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+
+ bnx2x_tx_disable(bp);
+
+ REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
+}
+
+static void bnx2x_e1h_enable(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+
+ REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
+
+ /* Tx queue should be only reenabled */
+ netif_tx_wake_all_queues(bp->dev);
+
+ /*
+ * Should not call netif_carrier_on since it will be called if the link
+ * is up when checking for link state
+ */
+}
+
+/* called due to MCP event (on pmf):
+ * reread new bandwidth configuration
+ * configure FW
+ * notify others function about the change
+ */
+static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
+{
+ if (bp->link_vars.link_up) {
+ bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
+ bnx2x_link_sync_notify(bp);
+ }
+ storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+}
+
+static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
+{
+ bnx2x_config_mf_bw(bp);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
+}
+
+static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
+{
+ DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
+
+ if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
+
+ /*
+ * This is the only place besides the function initialization
+ * where the bp->flags can change so it is done without any
+ * locks
+ */
+ if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
+ DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
+ bp->flags |= MF_FUNC_DIS;
+
+ bnx2x_e1h_disable(bp);
+ } else {
+ DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
+ bp->flags &= ~MF_FUNC_DIS;
+
+ bnx2x_e1h_enable(bp);
+ }
+ dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
+ }
+ if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
+ bnx2x_config_mf_bw(bp);
+ dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
+ }
+
+ /* Report results to MCP */
+ if (dcc_event)
+ bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
+ else
+ bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
+}
+
+/* must be called under the spq lock */
+static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
+{
+ struct eth_spe *next_spe = bp->spq_prod_bd;
+
+ if (bp->spq_prod_bd == bp->spq_last_bd) {
+ bp->spq_prod_bd = bp->spq;
+ bp->spq_prod_idx = 0;
+ DP(NETIF_MSG_TIMER, "end of spq\n");
+ } else {
+ bp->spq_prod_bd++;
+ bp->spq_prod_idx++;
+ }
+ return next_spe;
+}
+
+/* must be called under the spq lock */
+static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
+{
+ int func = BP_FUNC(bp);
+
+ /*
+ * Make sure that BD data is updated before writing the producer:
+ * BD data is written to the memory, the producer is read from the
+ * memory, thus we need a full memory barrier to ensure the ordering.
+ */
+ mb();
+
+ REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
+ bp->spq_prod_idx);
+ mmiowb();
+}
+
+/**
+ * bnx2x_is_contextless_ramrod - check if the current command ends on EQ
+ *
+ * @cmd: command to check
+ * @cmd_type: command type
+ */
+static inline bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
+{
+ if ((cmd_type == NONE_CONNECTION_TYPE) ||
+ (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
+ (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
+ (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
+ (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
+ (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
+ (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
+ return true;
+ else
+ return false;
+
+}
+
+
+/**
+ * bnx2x_sp_post - place a single command on an SP ring
+ *
+ * @bp: driver handle
+ * @command: command to place (e.g. SETUP, FILTER_RULES, etc.)
+ * @cid: SW CID the command is related to
+ * @data_hi: command private data address (high 32 bits)
+ * @data_lo: command private data address (low 32 bits)
+ * @cmd_type: command type (e.g. NONE, ETH)
+ *
+ * SP data is handled as if it's always an address pair, thus data fields are
+ * not swapped to little endian in upper functions. Instead this function swaps
+ * data as if it's two u32 fields.
+ */
+int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
+ u32 data_hi, u32 data_lo, int cmd_type)
+{
+ struct eth_spe *spe;
+ u16 type;
+ bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return -EIO;
+#endif
+
+ spin_lock_bh(&bp->spq_lock);
+
+ if (common) {
+ if (!atomic_read(&bp->eq_spq_left)) {
+ BNX2X_ERR("BUG! EQ ring full!\n");
+ spin_unlock_bh(&bp->spq_lock);
+ bnx2x_panic();
+ return -EBUSY;
+ }
+ } else if (!atomic_read(&bp->cq_spq_left)) {
+ BNX2X_ERR("BUG! SPQ ring full!\n");
+ spin_unlock_bh(&bp->spq_lock);
+ bnx2x_panic();
+ return -EBUSY;
+ }
+
+ spe = bnx2x_sp_get_next(bp);
+
+ /* CID needs port number to be encoded int it */
+ spe->hdr.conn_and_cmd_data =
+ cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
+ HW_CID(bp, cid));
+
+ type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
+
+ type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
+ SPE_HDR_FUNCTION_ID);
+
+ spe->hdr.type = cpu_to_le16(type);
+
+ spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
+ spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
+
+ /*
+ * It's ok if the actual decrement is issued towards the memory
+ * somewhere between the spin_lock and spin_unlock. Thus no
+ * more explict memory barrier is needed.
+ */
+ if (common)
+ atomic_dec(&bp->eq_spq_left);
+ else
+ atomic_dec(&bp->cq_spq_left);
+
+
+ DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
+ "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) "
+ "type(0x%x) left (CQ, EQ) (%x,%x)\n",
+ bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
+ (u32)(U64_LO(bp->spq_mapping) +
+ (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
+ HW_CID(bp, cid), data_hi, data_lo, type,
+ atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
+
+ bnx2x_sp_prod_update(bp);
+ spin_unlock_bh(&bp->spq_lock);
+ return 0;
+}
+
+/* acquire split MCP access lock register */
+static int bnx2x_acquire_alr(struct bnx2x *bp)
+{
+ u32 j, val;
+ int rc = 0;
+
+ might_sleep();
+ for (j = 0; j < 1000; j++) {
+ val = (1UL << 31);
+ REG_WR(bp, GRCBASE_MCP + 0x9c, val);
+ val = REG_RD(bp, GRCBASE_MCP + 0x9c);
+ if (val & (1L << 31))
+ break;
+
+ msleep(5);
+ }
+ if (!(val & (1L << 31))) {
+ BNX2X_ERR("Cannot acquire MCP access lock register\n");
+ rc = -EBUSY;
+ }
+
+ return rc;
+}
+
+/* release split MCP access lock register */
+static void bnx2x_release_alr(struct bnx2x *bp)
+{
+ REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
+}
+
+#define BNX2X_DEF_SB_ATT_IDX 0x0001
+#define BNX2X_DEF_SB_IDX 0x0002
+
+static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
+{
+ struct host_sp_status_block *def_sb = bp->def_status_blk;
+ u16 rc = 0;
+
+ barrier(); /* status block is written to by the chip */
+ if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
+ bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
+ rc |= BNX2X_DEF_SB_ATT_IDX;
+ }
+
+ if (bp->def_idx != def_sb->sp_sb.running_index) {
+ bp->def_idx = def_sb->sp_sb.running_index;
+ rc |= BNX2X_DEF_SB_IDX;
+ }
+
+ /* Do not reorder: indecies reading should complete before handling */
+ barrier();
+ return rc;
+}
+
+/*
+ * slow path service functions
+ */
+
+static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
+{
+ int port = BP_PORT(bp);
+ u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+ MISC_REG_AEU_MASK_ATTN_FUNC_0;
+ u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
+ NIG_REG_MASK_INTERRUPT_PORT0;
+ u32 aeu_mask;
+ u32 nig_mask = 0;
+ u32 reg_addr;
+
+ if (bp->attn_state & asserted)
+ BNX2X_ERR("IGU ERROR\n");
+
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+ aeu_mask = REG_RD(bp, aeu_addr);
+
+ DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
+ aeu_mask, asserted);
+ aeu_mask &= ~(asserted & 0x3ff);
+ DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
+
+ REG_WR(bp, aeu_addr, aeu_mask);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+
+ DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
+ bp->attn_state |= asserted;
+ DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
+
+ if (asserted & ATTN_HARD_WIRED_MASK) {
+ if (asserted & ATTN_NIG_FOR_FUNC) {
+
+ bnx2x_acquire_phy_lock(bp);
+
+ /* save nig interrupt mask */
+ nig_mask = REG_RD(bp, nig_int_mask_addr);
+
+ /* If nig_mask is not set, no need to call the update
+ * function.
+ */
+ if (nig_mask) {
+ REG_WR(bp, nig_int_mask_addr, 0);
+
+ bnx2x_link_attn(bp);
+ }
+
+ /* handle unicore attn? */
+ }
+ if (asserted & ATTN_SW_TIMER_4_FUNC)
+ DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
+
+ if (asserted & GPIO_2_FUNC)
+ DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
+
+ if (asserted & GPIO_3_FUNC)
+ DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
+
+ if (asserted & GPIO_4_FUNC)
+ DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
+
+ if (port == 0) {
+ if (asserted & ATTN_GENERAL_ATTN_1) {
+ DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
+ }
+ if (asserted & ATTN_GENERAL_ATTN_2) {
+ DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
+ }
+ if (asserted & ATTN_GENERAL_ATTN_3) {
+ DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
+ }
+ } else {
+ if (asserted & ATTN_GENERAL_ATTN_4) {
+ DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
+ }
+ if (asserted & ATTN_GENERAL_ATTN_5) {
+ DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
+ }
+ if (asserted & ATTN_GENERAL_ATTN_6) {
+ DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
+ }
+ }
+
+ } /* if hardwired */
+
+ if (bp->common.int_block == INT_BLOCK_HC)
+ reg_addr = (HC_REG_COMMAND_REG + port*32 +
+ COMMAND_REG_ATTN_BITS_SET);
+ else
+ reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
+
+ DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
+ (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
+ REG_WR(bp, reg_addr, asserted);
+
+ /* now set back the mask */
+ if (asserted & ATTN_NIG_FOR_FUNC) {
+ REG_WR(bp, nig_int_mask_addr, nig_mask);
+ bnx2x_release_phy_lock(bp);
+ }
+}
+
+static inline void bnx2x_fan_failure(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ u32 ext_phy_config;
+ /* mark the failure */
+ ext_phy_config =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].external_phy_config);
+
+ ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
+ ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
+ SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
+ ext_phy_config);
+
+ /* log the failure */
+ netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
+ " the driver to shutdown the card to prevent permanent"
+ " damage. Please contact OEM Support for assistance\n");
+}
+
+static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
+{
+ int port = BP_PORT(bp);
+ int reg_offset;
+ u32 val;
+
+ reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
+ MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
+
+ if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
+
+ val = REG_RD(bp, reg_offset);
+ val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
+ REG_WR(bp, reg_offset, val);
+
+ BNX2X_ERR("SPIO5 hw attention\n");
+
+ /* Fan failure attention */
+ bnx2x_hw_reset_phy(&bp->link_params);
+ bnx2x_fan_failure(bp);
+ }
+
+ if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
+ bnx2x_acquire_phy_lock(bp);
+ bnx2x_handle_module_detect_int(&bp->link_params);
+ bnx2x_release_phy_lock(bp);
+ }
+
+ if (attn & HW_INTERRUT_ASSERT_SET_0) {
+
+ val = REG_RD(bp, reg_offset);
+ val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
+ REG_WR(bp, reg_offset, val);
+
+ BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
+ (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
+ bnx2x_panic();
+ }
+}
+
+static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
+{
+ u32 val;
+
+ if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
+
+ val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
+ BNX2X_ERR("DB hw attention 0x%x\n", val);
+ /* DORQ discard attention */
+ if (val & 0x2)
+ BNX2X_ERR("FATAL error from DORQ\n");
+ }
+
+ if (attn & HW_INTERRUT_ASSERT_SET_1) {
+
+ int port = BP_PORT(bp);
+ int reg_offset;
+
+ reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
+ MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
+
+ val = REG_RD(bp, reg_offset);
+ val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
+ REG_WR(bp, reg_offset, val);
+
+ BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
+ (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
+ bnx2x_panic();
+ }
+}
+
+static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
+{
+ u32 val;
+
+ if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
+
+ val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
+ BNX2X_ERR("CFC hw attention 0x%x\n", val);
+ /* CFC error attention */
+ if (val & 0x2)
+ BNX2X_ERR("FATAL error from CFC\n");
+ }
+
+ if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
+ val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
+ BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
+ /* RQ_USDMDP_FIFO_OVERFLOW */
+ if (val & 0x18000)
+ BNX2X_ERR("FATAL error from PXP\n");
+
+ if (!CHIP_IS_E1x(bp)) {
+ val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
+ BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
+ }
+ }
+
+ if (attn & HW_INTERRUT_ASSERT_SET_2) {
+
+ int port = BP_PORT(bp);
+ int reg_offset;
+
+ reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
+ MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
+
+ val = REG_RD(bp, reg_offset);
+ val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
+ REG_WR(bp, reg_offset, val);
+
+ BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
+ (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
+ bnx2x_panic();
+ }
+}
+
+static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
+{
+ u32 val;
+
+ if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
+
+ if (attn & BNX2X_PMF_LINK_ASSERT) {
+ int func = BP_FUNC(bp);
+
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
+ bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
+ func_mf_config[BP_ABS_FUNC(bp)].config);
+ val = SHMEM_RD(bp,
+ func_mb[BP_FW_MB_IDX(bp)].drv_status);
+ if (val & DRV_STATUS_DCC_EVENT_MASK)
+ bnx2x_dcc_event(bp,
+ (val & DRV_STATUS_DCC_EVENT_MASK));
+
+ if (val & DRV_STATUS_SET_MF_BW)
+ bnx2x_set_mf_bw(bp);
+
+ if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
+ bnx2x_pmf_update(bp);
+
+ if (bp->port.pmf &&
+ (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
+ bp->dcbx_enabled > 0)
+ /* start dcbx state machine */
+ bnx2x_dcbx_set_params(bp,
+ BNX2X_DCBX_STATE_NEG_RECEIVED);
+ if (bp->link_vars.periodic_flags &
+ PERIODIC_FLAGS_LINK_EVENT) {
+ /* sync with link */
+ bnx2x_acquire_phy_lock(bp);
+ bp->link_vars.periodic_flags &=
+ ~PERIODIC_FLAGS_LINK_EVENT;
+ bnx2x_release_phy_lock(bp);
+ if (IS_MF(bp))
+ bnx2x_link_sync_notify(bp);
+ bnx2x_link_report(bp);
+ }
+ /* Always call it here: bnx2x_link_report() will
+ * prevent the link indication duplication.
+ */
+ bnx2x__link_status_update(bp);
+ } else if (attn & BNX2X_MC_ASSERT_BITS) {
+
+ BNX2X_ERR("MC assert!\n");
+ bnx2x_mc_assert(bp);
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
+ bnx2x_panic();
+
+ } else if (attn & BNX2X_MCP_ASSERT) {
+
+ BNX2X_ERR("MCP assert!\n");
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
+ bnx2x_fw_dump(bp);
+
+ } else
+ BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
+ }
+
+ if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
+ BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
+ if (attn & BNX2X_GRC_TIMEOUT) {
+ val = CHIP_IS_E1(bp) ? 0 :
+ REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
+ BNX2X_ERR("GRC time-out 0x%08x\n", val);
+ }
+ if (attn & BNX2X_GRC_RSV) {
+ val = CHIP_IS_E1(bp) ? 0 :
+ REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
+ BNX2X_ERR("GRC reserved 0x%08x\n", val);
+ }
+ REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
+ }
+}
+
+/*
+ * Bits map:
+ * 0-7 - Engine0 load counter.
+ * 8-15 - Engine1 load counter.
+ * 16 - Engine0 RESET_IN_PROGRESS bit.
+ * 17 - Engine1 RESET_IN_PROGRESS bit.
+ * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active function
+ * on the engine
+ * 19 - Engine1 ONE_IS_LOADED.
+ * 20 - Chip reset flow bit. When set none-leader must wait for both engines
+ * leader to complete (check for both RESET_IN_PROGRESS bits and not for
+ * just the one belonging to its engine).
+ *
+ */
+#define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1
+
+#define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff
+#define BNX2X_PATH0_LOAD_CNT_SHIFT 0
+#define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00
+#define BNX2X_PATH1_LOAD_CNT_SHIFT 8
+#define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000
+#define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000
+#define BNX2X_GLOBAL_RESET_BIT 0x00040000
+
+/*
+ * Set the GLOBAL_RESET bit.
+ *
+ * Should be run under rtnl lock
+ */
+void bnx2x_set_reset_global(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+ REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
+ barrier();
+ mmiowb();
+}
+
+/*
+ * Clear the GLOBAL_RESET bit.
+ *
+ * Should be run under rtnl lock
+ */
+static inline void bnx2x_clear_reset_global(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+ REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
+ barrier();
+ mmiowb();
+}
+
+/*
+ * Checks the GLOBAL_RESET bit.
+ *
+ * should be run under rtnl lock
+ */
+static inline bool bnx2x_reset_is_global(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+ DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
+ return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
+}
+
+/*
+ * Clear RESET_IN_PROGRESS bit for the current engine.
+ *
+ * Should be run under rtnl lock
+ */
+static inline void bnx2x_set_reset_done(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 bit = BP_PATH(bp) ?
+ BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
+
+ /* Clear the bit */
+ val &= ~bit;
+ REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
+ barrier();
+ mmiowb();
+}
+
+/*
+ * Set RESET_IN_PROGRESS for the current engine.
+ *
+ * should be run under rtnl lock
+ */
+void bnx2x_set_reset_in_progress(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 bit = BP_PATH(bp) ?
+ BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
+
+ /* Set the bit */
+ val |= bit;
+ REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
+ barrier();
+ mmiowb();
+}
+
+/*
+ * Checks the RESET_IN_PROGRESS bit for the given engine.
+ * should be run under rtnl lock
+ */
+bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
+{
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 bit = engine ?
+ BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
+
+ /* return false if bit is set */
+ return (val & bit) ? false : true;
+}
+
+/*
+ * Increment the load counter for the current engine.
+ *
+ * should be run under rtnl lock
+ */
+void bnx2x_inc_load_cnt(struct bnx2x *bp)
+{
+ u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
+ BNX2X_PATH0_LOAD_CNT_MASK;
+ u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
+ BNX2X_PATH0_LOAD_CNT_SHIFT;
+
+ DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
+
+ /* get the current counter value */
+ val1 = (val & mask) >> shift;
+
+ /* increment... */
+ val1++;
+
+ /* clear the old value */
+ val &= ~mask;
+
+ /* set the new one */
+ val |= ((val1 << shift) & mask);
+
+ REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
+ barrier();
+ mmiowb();
+}
+
+/**
+ * bnx2x_dec_load_cnt - decrement the load counter
+ *
+ * @bp: driver handle
+ *
+ * Should be run under rtnl lock.
+ * Decrements the load counter for the current engine. Returns
+ * the new counter value.
+ */
+u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
+{
+ u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
+ BNX2X_PATH0_LOAD_CNT_MASK;
+ u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
+ BNX2X_PATH0_LOAD_CNT_SHIFT;
+
+ DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
+
+ /* get the current counter value */
+ val1 = (val & mask) >> shift;
+
+ /* decrement... */
+ val1--;
+
+ /* clear the old value */
+ val &= ~mask;
+
+ /* set the new one */
+ val |= ((val1 << shift) & mask);
+
+ REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
+ barrier();
+ mmiowb();
+
+ return val1;
+}
+
+/*
+ * Read the load counter for the current engine.
+ *
+ * should be run under rtnl lock
+ */
+static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp, int engine)
+{
+ u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
+ BNX2X_PATH0_LOAD_CNT_MASK);
+ u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
+ BNX2X_PATH0_LOAD_CNT_SHIFT);
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+ DP(NETIF_MSG_HW, "GLOB_REG=0x%08x\n", val);
+
+ val = (val & mask) >> shift;
+
+ DP(NETIF_MSG_HW, "load_cnt for engine %d = %d\n", engine, val);
+
+ return val;
+}
+
+/*
+ * Reset the load counter for the current engine.
+ *
+ * should be run under rtnl lock
+ */
+static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
+ BNX2X_PATH0_LOAD_CNT_MASK);
+
+ REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask));
+}
+
+static inline void _print_next_block(int idx, const char *blk)
+{
+ pr_cont("%s%s", idx ? ", " : "", blk);
+}
+
+static inline int bnx2x_check_blocks_with_parity0(u32 sig, int par_num,
+ bool print)
+{
+ int i = 0;
+ u32 cur_bit = 0;
+ for (i = 0; sig; i++) {
+ cur_bit = ((u32)0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "BRB");
+ break;
+ case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "PARSER");
+ break;
+ case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "TSDM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++,
+ "SEARCHER");
+ break;
+ case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "TCM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "TSEMI");
+ break;
+ case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "XPB");
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static inline int bnx2x_check_blocks_with_parity1(u32 sig, int par_num,
+ bool *global, bool print)
+{
+ int i = 0;
+ u32 cur_bit = 0;
+ for (i = 0; sig; i++) {
+ cur_bit = ((u32)0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "PBF");
+ break;
+ case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "QM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "TM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "XSDM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "XCM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "XSEMI");
+ break;
+ case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++,
+ "DOORBELLQ");
+ break;
+ case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "NIG");
+ break;
+ case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++,
+ "VAUX PCI CORE");
+ *global = true;
+ break;
+ case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "DEBUG");
+ break;
+ case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "USDM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "UCM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "USEMI");
+ break;
+ case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "UPB");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "CSDM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "CCM");
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static inline int bnx2x_check_blocks_with_parity2(u32 sig, int par_num,
+ bool print)
+{
+ int i = 0;
+ u32 cur_bit = 0;
+ for (i = 0; sig; i++) {
+ cur_bit = ((u32)0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "CSEMI");
+ break;
+ case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "PXP");
+ break;
+ case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++,
+ "PXPPCICLOCKCLIENT");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "CFC");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "CDU");
+ break;
+ case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "DMAE");
+ break;
+ case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "IGU");
+ break;
+ case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "MISC");
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static inline int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
+ bool *global, bool print)
+{
+ int i = 0;
+ u32 cur_bit = 0;
+ for (i = 0; sig; i++) {
+ cur_bit = ((u32)0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
+ if (print)
+ _print_next_block(par_num++, "MCP ROM");
+ *global = true;
+ break;
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
+ if (print)
+ _print_next_block(par_num++,
+ "MCP UMP RX");
+ *global = true;
+ break;
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
+ if (print)
+ _print_next_block(par_num++,
+ "MCP UMP TX");
+ *global = true;
+ break;
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
+ if (print)
+ _print_next_block(par_num++,
+ "MCP SCPAD");
+ *global = true;
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static inline int bnx2x_check_blocks_with_parity4(u32 sig, int par_num,
+ bool print)
+{
+ int i = 0;
+ u32 cur_bit = 0;
+ for (i = 0; sig; i++) {
+ cur_bit = ((u32)0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "PGLUE_B");
+ break;
+ case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "ATC");
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static inline bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
+ u32 *sig)
+{
+ if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
+ (sig[1] & HW_PRTY_ASSERT_SET_1) ||
+ (sig[2] & HW_PRTY_ASSERT_SET_2) ||
+ (sig[3] & HW_PRTY_ASSERT_SET_3) ||
+ (sig[4] & HW_PRTY_ASSERT_SET_4)) {
+ int par_num = 0;
+ DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
+ "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x "
+ "[4]:0x%08x\n",
+ sig[0] & HW_PRTY_ASSERT_SET_0,
+ sig[1] & HW_PRTY_ASSERT_SET_1,
+ sig[2] & HW_PRTY_ASSERT_SET_2,
+ sig[3] & HW_PRTY_ASSERT_SET_3,
+ sig[4] & HW_PRTY_ASSERT_SET_4);
+ if (print)
+ netdev_err(bp->dev,
+ "Parity errors detected in blocks: ");
+ par_num = bnx2x_check_blocks_with_parity0(
+ sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print);
+ par_num = bnx2x_check_blocks_with_parity1(
+ sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print);
+ par_num = bnx2x_check_blocks_with_parity2(
+ sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print);
+ par_num = bnx2x_check_blocks_with_parity3(
+ sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print);
+ par_num = bnx2x_check_blocks_with_parity4(
+ sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print);
+
+ if (print)
+ pr_cont("\n");
+
+ return true;
+ } else
+ return false;
+}
+
+/**
+ * bnx2x_chk_parity_attn - checks for parity attentions.
+ *
+ * @bp: driver handle
+ * @global: true if there was a global attention
+ * @print: show parity attention in syslog
+ */
+bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
+{
+ struct attn_route attn = { {0} };
+ int port = BP_PORT(bp);
+
+ attn.sig[0] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
+ port*4);
+ attn.sig[1] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
+ port*4);
+ attn.sig[2] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
+ port*4);
+ attn.sig[3] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
+ port*4);
+
+ if (!CHIP_IS_E1x(bp))
+ attn.sig[4] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
+ port*4);
+
+ return bnx2x_parity_attn(bp, global, print, attn.sig);
+}
+
+
+static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
+{
+ u32 val;
+ if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
+
+ val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
+ BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "ADDRESS_ERROR\n");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "INCORRECT_RCV_BEHAVIOR\n");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "WAS_ERROR_ATTN\n");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "VF_LENGTH_VIOLATION_ATTN\n");
+ if (val &
+ PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "VF_GRC_SPACE_VIOLATION_ATTN\n");
+ if (val &
+ PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "VF_MSIX_BAR_VIOLATION_ATTN\n");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "TCPL_ERROR_ATTN\n");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "TCPL_IN_TWO_RCBS_ATTN\n");
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+ "CSSNOOP_FIFO_OVERFLOW\n");
+ }
+ if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
+ val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
+ BNX2X_ERR("ATC hw attention 0x%x\n", val);
+ if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
+ if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
+ BNX2X_ERR("ATC_ATC_INT_STS_REG"
+ "_ATC_TCPL_TO_NOT_PEND\n");
+ if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_"
+ "ATC_GPA_MULTIPLE_HITS\n");
+ if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_"
+ "ATC_RCPL_TO_EMPTY_CNT\n");
+ if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
+ if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_"
+ "ATC_IREQ_LESS_THAN_STU\n");
+ }
+
+ if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
+ AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
+ BNX2X_ERR("FATAL parity attention set4 0x%x\n",
+ (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
+ AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
+ }
+
+}
+
+static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
+{
+ struct attn_route attn, *group_mask;
+ int port = BP_PORT(bp);
+ int index;
+ u32 reg_addr;
+ u32 val;
+ u32 aeu_mask;
+ bool global = false;
+
+ /* need to take HW lock because MCP or other port might also
+ try to handle this event */
+ bnx2x_acquire_alr(bp);
+
+ if (bnx2x_chk_parity_attn(bp, &global, true)) {
+#ifndef BNX2X_STOP_ON_ERROR
+ bp->recovery_state = BNX2X_RECOVERY_INIT;
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ /* Disable HW interrupts */
+ bnx2x_int_disable(bp);
+ /* In case of parity errors don't handle attentions so that
+ * other function would "see" parity errors.
+ */
+#else
+ bnx2x_panic();
+#endif
+ bnx2x_release_alr(bp);
+ return;
+ }
+
+ attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
+ attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
+ attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
+ attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
+ if (!CHIP_IS_E1x(bp))
+ attn.sig[4] =
+ REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
+ else
+ attn.sig[4] = 0;
+
+ DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
+ attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
+
+ for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
+ if (deasserted & (1 << index)) {
+ group_mask = &bp->attn_group[index];
+
+ DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
+ "%08x %08x %08x\n",
+ index,
+ group_mask->sig[0], group_mask->sig[1],
+ group_mask->sig[2], group_mask->sig[3],
+ group_mask->sig[4]);
+
+ bnx2x_attn_int_deasserted4(bp,
+ attn.sig[4] & group_mask->sig[4]);
+ bnx2x_attn_int_deasserted3(bp,
+ attn.sig[3] & group_mask->sig[3]);
+ bnx2x_attn_int_deasserted1(bp,
+ attn.sig[1] & group_mask->sig[1]);
+ bnx2x_attn_int_deasserted2(bp,
+ attn.sig[2] & group_mask->sig[2]);
+ bnx2x_attn_int_deasserted0(bp,
+ attn.sig[0] & group_mask->sig[0]);
+ }
+ }
+
+ bnx2x_release_alr(bp);
+
+ if (bp->common.int_block == INT_BLOCK_HC)
+ reg_addr = (HC_REG_COMMAND_REG + port*32 +
+ COMMAND_REG_ATTN_BITS_CLR);
+ else
+ reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
+
+ val = ~deasserted;
+ DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
+ (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
+ REG_WR(bp, reg_addr, val);
+
+ if (~bp->attn_state & deasserted)
+ BNX2X_ERR("IGU ERROR\n");
+
+ reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+ MISC_REG_AEU_MASK_ATTN_FUNC_0;
+
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+ aeu_mask = REG_RD(bp, reg_addr);
+
+ DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
+ aeu_mask, deasserted);
+ aeu_mask |= (deasserted & 0x3ff);
+ DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
+
+ REG_WR(bp, reg_addr, aeu_mask);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+
+ DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
+ bp->attn_state &= ~deasserted;
+ DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
+}
+
+static void bnx2x_attn_int(struct bnx2x *bp)
+{
+ /* read local copy of bits */
+ u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
+ attn_bits);
+ u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
+ attn_bits_ack);
+ u32 attn_state = bp->attn_state;
+
+ /* look for changed bits */
+ u32 asserted = attn_bits & ~attn_ack & ~attn_state;
+ u32 deasserted = ~attn_bits & attn_ack & attn_state;
+
+ DP(NETIF_MSG_HW,
+ "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
+ attn_bits, attn_ack, asserted, deasserted);
+
+ if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
+ BNX2X_ERR("BAD attention state\n");
+
+ /* handle bits that were raised */
+ if (asserted)
+ bnx2x_attn_int_asserted(bp, asserted);
+
+ if (deasserted)
+ bnx2x_attn_int_deasserted(bp, deasserted);
+}
+
+void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
+ u16 index, u8 op, u8 update)
+{
+ u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
+
+ bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
+ igu_addr);
+}
+
+static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
+{
+ /* No memory barriers */
+ storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
+ mmiowb(); /* keep prod updates ordered */
+}
+
+#ifdef BCM_CNIC
+static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
+ union event_ring_elem *elem)
+{
+ u8 err = elem->message.error;
+
+ if (!bp->cnic_eth_dev.starting_cid ||
+ (cid < bp->cnic_eth_dev.starting_cid &&
+ cid != bp->cnic_eth_dev.iscsi_l2_cid))
+ return 1;
+
+ DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
+
+ if (unlikely(err)) {
+
+ BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
+ cid);
+ bnx2x_panic_dump(bp);
+ }
+ bnx2x_cnic_cfc_comp(bp, cid, err);
+ return 0;
+}
+#endif
+
+static inline void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
+{
+ struct bnx2x_mcast_ramrod_params rparam;
+ int rc;
+
+ memset(&rparam, 0, sizeof(rparam));
+
+ rparam.mcast_obj = &bp->mcast_obj;
+
+ netif_addr_lock_bh(bp->dev);
+
+ /* Clear pending state for the last command */
+ bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
+
+ /* If there are pending mcast commands - send them */
+ if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
+ if (rc < 0)
+ BNX2X_ERR("Failed to send pending mcast commands: %d\n",
+ rc);
+ }
+
+ netif_addr_unlock_bh(bp->dev);
+}
+
+static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp,
+ union event_ring_elem *elem)
+{
+ unsigned long ramrod_flags = 0;
+ int rc = 0;
+ u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK;
+ struct bnx2x_vlan_mac_obj *vlan_mac_obj;
+
+ /* Always push next commands out, don't wait here */
+ __set_bit(RAMROD_CONT, &ramrod_flags);
+
+ switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
+ case BNX2X_FILTER_MAC_PENDING:
+#ifdef BCM_CNIC
+ if (cid == BNX2X_ISCSI_ETH_CID)
+ vlan_mac_obj = &bp->iscsi_l2_mac_obj;
+ else
+#endif
+ vlan_mac_obj = &bp->fp[cid].mac_obj;
+
+ break;
+ case BNX2X_FILTER_MCAST_PENDING:
+ /* This is only relevant for 57710 where multicast MACs are
+ * configured as unicast MACs using the same ramrod.
+ */
+ bnx2x_handle_mcast_eqe(bp);
+ return;
+ default:
+ BNX2X_ERR("Unsupported classification command: %d\n",
+ elem->message.data.eth_event.echo);
+ return;
+ }
+
+ rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
+
+ if (rc < 0)
+ BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
+ else if (rc > 0)
+ DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
+
+}
+
+#ifdef BCM_CNIC
+static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
+#endif
+
+static inline void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
+{
+ netif_addr_lock_bh(bp->dev);
+
+ clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
+
+ /* Send rx_mode command again if was requested */
+ if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
+ bnx2x_set_storm_rx_mode(bp);
+#ifdef BCM_CNIC
+ else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
+ &bp->sp_state))
+ bnx2x_set_iscsi_eth_rx_mode(bp, true);
+ else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
+ &bp->sp_state))
+ bnx2x_set_iscsi_eth_rx_mode(bp, false);
+#endif
+
+ netif_addr_unlock_bh(bp->dev);
+}
+
+static inline struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
+ struct bnx2x *bp, u32 cid)
+{
+ DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
+#ifdef BCM_CNIC
+ if (cid == BNX2X_FCOE_ETH_CID)
+ return &bnx2x_fcoe(bp, q_obj);
+ else
+#endif
+ return &bnx2x_fp(bp, CID_TO_FP(cid), q_obj);
+}
+
+static void bnx2x_eq_int(struct bnx2x *bp)
+{
+ u16 hw_cons, sw_cons, sw_prod;
+ union event_ring_elem *elem;
+ u32 cid;
+ u8 opcode;
+ int spqe_cnt = 0;
+ struct bnx2x_queue_sp_obj *q_obj;
+ struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
+ struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
+
+ hw_cons = le16_to_cpu(*bp->eq_cons_sb);
+
+ /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
+ * when we get the the next-page we nned to adjust so the loop
+ * condition below will be met. The next element is the size of a
+ * regular element and hence incrementing by 1
+ */
+ if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
+ hw_cons++;
+
+ /* This function may never run in parallel with itself for a
+ * specific bp, thus there is no need in "paired" read memory
+ * barrier here.
+ */
+ sw_cons = bp->eq_cons;
+ sw_prod = bp->eq_prod;
+
+ DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n",
+ hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
+
+ for (; sw_cons != hw_cons;
+ sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
+
+
+ elem = &bp->eq_ring[EQ_DESC(sw_cons)];
+
+ cid = SW_CID(elem->message.data.cfc_del_event.cid);
+ opcode = elem->message.opcode;
+
+
+ /* handle eq element */
+ switch (opcode) {
+ case EVENT_RING_OPCODE_STAT_QUERY:
+ DP(NETIF_MSG_TIMER, "got statistics comp event %d\n",
+ bp->stats_comp++);
+ /* nothing to do with stats comp */
+ goto next_spqe;
+
+ case EVENT_RING_OPCODE_CFC_DEL:
+ /* handle according to cid range */
+ /*
+ * we may want to verify here that the bp state is
+ * HALTING
+ */
+ DP(BNX2X_MSG_SP,
+ "got delete ramrod for MULTI[%d]\n", cid);
+#ifdef BCM_CNIC
+ if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
+ goto next_spqe;
+#endif
+ q_obj = bnx2x_cid_to_q_obj(bp, cid);
+
+ if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
+ break;
+
+
+
+ goto next_spqe;
+
+ case EVENT_RING_OPCODE_STOP_TRAFFIC:
+ DP(BNX2X_MSG_SP, "got STOP TRAFFIC\n");
+ if (f_obj->complete_cmd(bp, f_obj,
+ BNX2X_F_CMD_TX_STOP))
+ break;
+ bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
+ goto next_spqe;
+
+ case EVENT_RING_OPCODE_START_TRAFFIC:
+ DP(BNX2X_MSG_SP, "got START TRAFFIC\n");
+ if (f_obj->complete_cmd(bp, f_obj,
+ BNX2X_F_CMD_TX_START))
+ break;
+ bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
+ goto next_spqe;
+ case EVENT_RING_OPCODE_FUNCTION_START:
+ DP(BNX2X_MSG_SP, "got FUNC_START ramrod\n");
+ if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
+ break;
+
+ goto next_spqe;
+
+ case EVENT_RING_OPCODE_FUNCTION_STOP:
+ DP(BNX2X_MSG_SP, "got FUNC_STOP ramrod\n");
+ if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
+ break;
+
+ goto next_spqe;
+ }
+
+ switch (opcode | bp->state) {
+ case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
+ BNX2X_STATE_OPEN):
+ case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
+ BNX2X_STATE_OPENING_WAIT4_PORT):
+ cid = elem->message.data.eth_event.echo &
+ BNX2X_SWCID_MASK;
+ DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
+ cid);
+ rss_raw->clear_pending(rss_raw);
+ break;
+
+ case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
+ case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
+ case (EVENT_RING_OPCODE_SET_MAC |
+ BNX2X_STATE_CLOSING_WAIT4_HALT):
+ case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
+ BNX2X_STATE_OPEN):
+ case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
+ BNX2X_STATE_DIAG):
+ case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
+ BNX2X_STATE_CLOSING_WAIT4_HALT):
+ DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n");
+ bnx2x_handle_classification_eqe(bp, elem);
+ break;
+
+ case (EVENT_RING_OPCODE_MULTICAST_RULES |
+ BNX2X_STATE_OPEN):
+ case (EVENT_RING_OPCODE_MULTICAST_RULES |
+ BNX2X_STATE_DIAG):
+ case (EVENT_RING_OPCODE_MULTICAST_RULES |
+ BNX2X_STATE_CLOSING_WAIT4_HALT):
+ DP(BNX2X_MSG_SP, "got mcast ramrod\n");
+ bnx2x_handle_mcast_eqe(bp);
+ break;
+
+ case (EVENT_RING_OPCODE_FILTERS_RULES |
+ BNX2X_STATE_OPEN):
+ case (EVENT_RING_OPCODE_FILTERS_RULES |
+ BNX2X_STATE_DIAG):
+ case (EVENT_RING_OPCODE_FILTERS_RULES |
+ BNX2X_STATE_CLOSING_WAIT4_HALT):
+ DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
+ bnx2x_handle_rx_mode_eqe(bp);
+ break;
+ default:
+ /* unknown event log error and continue */
+ BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
+ elem->message.opcode, bp->state);
+ }
+next_spqe:
+ spqe_cnt++;
+ } /* for */
+
+ smp_mb__before_atomic_inc();
+ atomic_add(spqe_cnt, &bp->eq_spq_left);
+
+ bp->eq_cons = sw_cons;
+ bp->eq_prod = sw_prod;
+ /* Make sure that above mem writes were issued towards the memory */
+ smp_wmb();
+
+ /* update producer */
+ bnx2x_update_eq_prod(bp, bp->eq_prod);
+}
+
+static void bnx2x_sp_task(struct work_struct *work)
+{
+ struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
+ u16 status;
+
+ status = bnx2x_update_dsb_idx(bp);
+/* if (status == 0) */
+/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
+
+ DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
+
+ /* HW attentions */
+ if (status & BNX2X_DEF_SB_ATT_IDX) {
+ bnx2x_attn_int(bp);
+ status &= ~BNX2X_DEF_SB_ATT_IDX;
+ }
+
+ /* SP events: STAT_QUERY and others */
+ if (status & BNX2X_DEF_SB_IDX) {
+#ifdef BCM_CNIC
+ struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
+
+ if ((!NO_FCOE(bp)) &&
+ (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+ /*
+ * Prevent local bottom-halves from running as
+ * we are going to change the local NAPI list.
+ */
+ local_bh_disable();
+ napi_schedule(&bnx2x_fcoe(bp, napi));
+ local_bh_enable();
+ }
+#endif
+ /* Handle EQ completions */
+ bnx2x_eq_int(bp);
+
+ bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
+ le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
+
+ status &= ~BNX2X_DEF_SB_IDX;
+ }
+
+ if (unlikely(status))
+ DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
+ status);
+
+ bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
+ le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
+}
+
+irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
+{
+ struct net_device *dev = dev_instance;
+ struct bnx2x *bp = netdev_priv(dev);
+
+ bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
+ IGU_INT_DISABLE, 0);
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return IRQ_HANDLED;
+#endif
+
+#ifdef BCM_CNIC
+ {
+ struct cnic_ops *c_ops;
+
+ rcu_read_lock();
+ c_ops = rcu_dereference(bp->cnic_ops);
+ if (c_ops)
+ c_ops->cnic_handler(bp->cnic_data, NULL);
+ rcu_read_unlock();
+ }
+#endif
+ queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+
+ return IRQ_HANDLED;
+}
+
+/* end of slow path */
+
+
+void bnx2x_drv_pulse(struct bnx2x *bp)
+{
+ SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
+ bp->fw_drv_pulse_wr_seq);
+}
+
+
+static void bnx2x_timer(unsigned long data)
+{
+ u8 cos;
+ struct bnx2x *bp = (struct bnx2x *) data;
+
+ if (!netif_running(bp->dev))
+ return;
+
+ if (poll) {
+ struct bnx2x_fastpath *fp = &bp->fp[0];
+
+ for_each_cos_in_tx_queue(fp, cos)
+ bnx2x_tx_int(bp, &fp->txdata[cos]);
+ bnx2x_rx_int(fp, 1000);
+ }
+
+ if (!BP_NOMCP(bp)) {
+ int mb_idx = BP_FW_MB_IDX(bp);
+ u32 drv_pulse;
+ u32 mcp_pulse;
+
+ ++bp->fw_drv_pulse_wr_seq;
+ bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
+ /* TBD - add SYSTEM_TIME */
+ drv_pulse = bp->fw_drv_pulse_wr_seq;
+ bnx2x_drv_pulse(bp);
+
+ mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
+ MCP_PULSE_SEQ_MASK);
+ /* The delta between driver pulse and mcp response
+ * should be 1 (before mcp response) or 0 (after mcp response)
+ */
+ if ((drv_pulse != mcp_pulse) &&
+ (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
+ /* someone lost a heartbeat... */
+ BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
+ drv_pulse, mcp_pulse);
+ }
+ }
+
+ if (bp->state == BNX2X_STATE_OPEN)
+ bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
+
+ mod_timer(&bp->timer, jiffies + bp->current_interval);
+}
+
+/* end of Statistics */
+
+/* nic init */
+
+/*
+ * nic init service functions
+ */
+
+static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
+{
+ u32 i;
+ if (!(len%4) && !(addr%4))
+ for (i = 0; i < len; i += 4)
+ REG_WR(bp, addr + i, fill);
+ else
+ for (i = 0; i < len; i++)
+ REG_WR8(bp, addr + i, fill);
+
+}
+
+/* helper: writes FP SP data to FW - data_size in dwords */
+static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
+ int fw_sb_id,
+ u32 *sb_data_p,
+ u32 data_size)
+{
+ int index;
+ for (index = 0; index < data_size; index++)
+ REG_WR(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
+ sizeof(u32)*index,
+ *(sb_data_p + index));
+}
+
+static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
+{
+ u32 *sb_data_p;
+ u32 data_size = 0;
+ struct hc_status_block_data_e2 sb_data_e2;
+ struct hc_status_block_data_e1x sb_data_e1x;
+
+ /* disable the function first */
+ if (!CHIP_IS_E1x(bp)) {
+ memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
+ sb_data_e2.common.state = SB_DISABLED;
+ sb_data_e2.common.p_func.vf_valid = false;
+ sb_data_p = (u32 *)&sb_data_e2;
+ data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
+ } else {
+ memset(&sb_data_e1x, 0,
+ sizeof(struct hc_status_block_data_e1x));
+ sb_data_e1x.common.state = SB_DISABLED;
+ sb_data_e1x.common.p_func.vf_valid = false;
+ sb_data_p = (u32 *)&sb_data_e1x;
+ data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
+ }
+ bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
+
+ bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
+ CSTORM_STATUS_BLOCK_SIZE);
+ bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
+ CSTORM_SYNC_BLOCK_SIZE);
+}
+
+/* helper: writes SP SB data to FW */
+static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
+ struct hc_sp_status_block_data *sp_sb_data)
+{
+ int func = BP_FUNC(bp);
+ int i;
+ for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
+ REG_WR(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
+ i*sizeof(u32),
+ *((u32 *)sp_sb_data + i));
+}
+
+static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
+{
+ int func = BP_FUNC(bp);
+ struct hc_sp_status_block_data sp_sb_data;
+ memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
+
+ sp_sb_data.state = SB_DISABLED;
+ sp_sb_data.p_func.vf_valid = false;
+
+ bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
+
+ bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
+ CSTORM_SP_STATUS_BLOCK_SIZE);
+ bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
+ CSTORM_SP_SYNC_BLOCK_SIZE);
+
+}
+
+
+static inline
+void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
+ int igu_sb_id, int igu_seg_id)
+{
+ hc_sm->igu_sb_id = igu_sb_id;
+ hc_sm->igu_seg_id = igu_seg_id;
+ hc_sm->timer_value = 0xFF;
+ hc_sm->time_to_expire = 0xFFFFFFFF;
+}
+
+
+/* allocates state machine ids. */
+static inline
+void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
+{
+ /* zero out state machine indices */
+ /* rx indices */
+ index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
+
+ /* tx indices */
+ index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
+
+ /* map indices */
+ /* rx indices */
+ index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
+ SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+
+ /* tx indices */
+ index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
+ SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
+ SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
+ SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
+ SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+}
+
+static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
+ u8 vf_valid, int fw_sb_id, int igu_sb_id)
+{
+ int igu_seg_id;
+
+ struct hc_status_block_data_e2 sb_data_e2;
+ struct hc_status_block_data_e1x sb_data_e1x;
+ struct hc_status_block_sm *hc_sm_p;
+ int data_size;
+ u32 *sb_data_p;
+
+ if (CHIP_INT_MODE_IS_BC(bp))
+ igu_seg_id = HC_SEG_ACCESS_NORM;
+ else
+ igu_seg_id = IGU_SEG_ACCESS_NORM;
+
+ bnx2x_zero_fp_sb(bp, fw_sb_id);
+
+ if (!CHIP_IS_E1x(bp)) {
+ memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
+ sb_data_e2.common.state = SB_ENABLED;
+ sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
+ sb_data_e2.common.p_func.vf_id = vfid;
+ sb_data_e2.common.p_func.vf_valid = vf_valid;
+ sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
+ sb_data_e2.common.same_igu_sb_1b = true;
+ sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
+ sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
+ hc_sm_p = sb_data_e2.common.state_machine;
+ sb_data_p = (u32 *)&sb_data_e2;
+ data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
+ bnx2x_map_sb_state_machines(sb_data_e2.index_data);
+ } else {
+ memset(&sb_data_e1x, 0,
+ sizeof(struct hc_status_block_data_e1x));
+ sb_data_e1x.common.state = SB_ENABLED;
+ sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
+ sb_data_e1x.common.p_func.vf_id = 0xff;
+ sb_data_e1x.common.p_func.vf_valid = false;
+ sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
+ sb_data_e1x.common.same_igu_sb_1b = true;
+ sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
+ sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
+ hc_sm_p = sb_data_e1x.common.state_machine;
+ sb_data_p = (u32 *)&sb_data_e1x;
+ data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
+ bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
+ }
+
+ bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
+ igu_sb_id, igu_seg_id);
+ bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
+ igu_sb_id, igu_seg_id);
+
+ DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
+
+ /* write indecies to HW */
+ bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
+}
+
+static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
+ u16 tx_usec, u16 rx_usec)
+{
+ bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
+ false, rx_usec);
+ bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
+ HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
+ tx_usec);
+ bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
+ HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
+ tx_usec);
+ bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
+ HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
+ tx_usec);
+}
+
+static void bnx2x_init_def_sb(struct bnx2x *bp)
+{
+ struct host_sp_status_block *def_sb = bp->def_status_blk;
+ dma_addr_t mapping = bp->def_status_blk_mapping;
+ int igu_sp_sb_index;
+ int igu_seg_id;
+ int port = BP_PORT(bp);
+ int func = BP_FUNC(bp);
- reg_offset + 0x10 + 0x4*index);
++ int reg_offset, reg_offset_en5;
+ u64 section;
+ int index;
+ struct hc_sp_status_block_data sp_sb_data;
+ memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
+
+ if (CHIP_INT_MODE_IS_BC(bp)) {
+ igu_sp_sb_index = DEF_SB_IGU_ID;
+ igu_seg_id = HC_SEG_ACCESS_DEF;
+ } else {
+ igu_sp_sb_index = bp->igu_dsb_id;
+ igu_seg_id = IGU_SEG_ACCESS_DEF;
+ }
+
+ /* ATTN */
+ section = ((u64)mapping) + offsetof(struct host_sp_status_block,
+ atten_status_block);
+ def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
+
+ bp->attn_state = 0;
+
+ reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
+ MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
++ reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
++ MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
+ for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
+ int sindex;
+ /* take care of sig[0]..sig[4] */
+ for (sindex = 0; sindex < 4; sindex++)
+ bp->attn_group[index].sig[sindex] =
+ REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
+
+ if (!CHIP_IS_E1x(bp))
+ /*
+ * enable5 is separate from the rest of the registers,
+ * and therefore the address skip is 4
+ * and not 16 between the different groups
+ */
+ bp->attn_group[index].sig[4] = REG_RD(bp,
- preserve entry 0 which is used by the PMF */
++ reg_offset_en5 + 0x4*index);
+ else
+ bp->attn_group[index].sig[4] = 0;
+ }
+
+ if (bp->common.int_block == INT_BLOCK_HC) {
+ reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
+ HC_REG_ATTN_MSG0_ADDR_L);
+
+ REG_WR(bp, reg_offset, U64_LO(section));
+ REG_WR(bp, reg_offset + 4, U64_HI(section));
+ } else if (!CHIP_IS_E1x(bp)) {
+ REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
+ REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
+ }
+
+ section = ((u64)mapping) + offsetof(struct host_sp_status_block,
+ sp_sb);
+
+ bnx2x_zero_sp_sb(bp);
+
+ sp_sb_data.state = SB_ENABLED;
+ sp_sb_data.host_sb_addr.lo = U64_LO(section);
+ sp_sb_data.host_sb_addr.hi = U64_HI(section);
+ sp_sb_data.igu_sb_id = igu_sp_sb_index;
+ sp_sb_data.igu_seg_id = igu_seg_id;
+ sp_sb_data.p_func.pf_id = func;
+ sp_sb_data.p_func.vnic_id = BP_VN(bp);
+ sp_sb_data.p_func.vf_id = 0xff;
+
+ bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
+
+ bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
+}
+
+void bnx2x_update_coalesce(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_eth_queue(bp, i)
+ bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
+ bp->tx_ticks, bp->rx_ticks);
+}
+
+static void bnx2x_init_sp_ring(struct bnx2x *bp)
+{
+ spin_lock_init(&bp->spq_lock);
+ atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
+
+ bp->spq_prod_idx = 0;
+ bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
+ bp->spq_prod_bd = bp->spq;
+ bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
+}
+
+static void bnx2x_init_eq_ring(struct bnx2x *bp)
+{
+ int i;
+ for (i = 1; i <= NUM_EQ_PAGES; i++) {
+ union event_ring_elem *elem =
+ &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
+
+ elem->next_page.addr.hi =
+ cpu_to_le32(U64_HI(bp->eq_mapping +
+ BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
+ elem->next_page.addr.lo =
+ cpu_to_le32(U64_LO(bp->eq_mapping +
+ BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
+ }
+ bp->eq_cons = 0;
+ bp->eq_prod = NUM_EQ_DESC;
+ bp->eq_cons_sb = BNX2X_EQ_INDEX;
+ /* we want a warning message before it gets rought... */
+ atomic_set(&bp->eq_spq_left,
+ min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
+}
+
+
+/* called with netif_addr_lock_bh() */
+void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
+ unsigned long rx_mode_flags,
+ unsigned long rx_accept_flags,
+ unsigned long tx_accept_flags,
+ unsigned long ramrod_flags)
+{
+ struct bnx2x_rx_mode_ramrod_params ramrod_param;
+ int rc;
+
+ memset(&ramrod_param, 0, sizeof(ramrod_param));
+
+ /* Prepare ramrod parameters */
+ ramrod_param.cid = 0;
+ ramrod_param.cl_id = cl_id;
+ ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
+ ramrod_param.func_id = BP_FUNC(bp);
+
+ ramrod_param.pstate = &bp->sp_state;
+ ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING;
+
+ ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
+ ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);
+
+ set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
+
+ ramrod_param.ramrod_flags = ramrod_flags;
+ ramrod_param.rx_mode_flags = rx_mode_flags;
+
+ ramrod_param.rx_accept_flags = rx_accept_flags;
+ ramrod_param.tx_accept_flags = tx_accept_flags;
+
+ rc = bnx2x_config_rx_mode(bp, &ramrod_param);
+ if (rc < 0) {
+ BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
+ return;
+ }
+}
+
+/* called with netif_addr_lock_bh() */
+void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
+{
+ unsigned long rx_mode_flags = 0, ramrod_flags = 0;
+ unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
+
+#ifdef BCM_CNIC
+ if (!NO_FCOE(bp))
+
+ /* Configure rx_mode of FCoE Queue */
+ __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
+#endif
+
+ switch (bp->rx_mode) {
+ case BNX2X_RX_MODE_NONE:
+ /*
+ * 'drop all' supersedes any accept flags that may have been
+ * passed to the function.
+ */
+ break;
+ case BNX2X_RX_MODE_NORMAL:
+ __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_MULTICAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
+
+ /* internal switching mode */
+ __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_MULTICAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
+
+ break;
+ case BNX2X_RX_MODE_ALLMULTI:
+ __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
+
+ /* internal switching mode */
+ __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
+
+ break;
+ case BNX2X_RX_MODE_PROMISC:
+ /* According to deffinition of SI mode, iface in promisc mode
+ * should receive matched and unmatched (in resolution of port)
+ * unicast packets.
+ */
+ __set_bit(BNX2X_ACCEPT_UNMATCHED, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
+
+ /* internal switching mode */
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
+
+ if (IS_MF_SI(bp))
+ __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &tx_accept_flags);
+ else
+ __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
+
+ break;
+ default:
+ BNX2X_ERR("Unknown rx_mode: %d\n", bp->rx_mode);
+ return;
+ }
+
+ if (bp->rx_mode != BNX2X_RX_MODE_NONE) {
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, &tx_accept_flags);
+ }
+
+ __set_bit(RAMROD_RX, &ramrod_flags);
+ __set_bit(RAMROD_TX, &ramrod_flags);
+
+ bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, rx_accept_flags,
+ tx_accept_flags, ramrod_flags);
+}
+
+static void bnx2x_init_internal_common(struct bnx2x *bp)
+{
+ int i;
+
+ if (IS_MF_SI(bp))
+ /*
+ * In switch independent mode, the TSTORM needs to accept
+ * packets that failed classification, since approximate match
+ * mac addresses aren't written to NIG LLH
+ */
+ REG_WR8(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
+ else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */
+ REG_WR8(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0);
+
+ /* Zero this manually as its initialization is
+ currently missing in the initTool */
+ for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
+ REG_WR(bp, BAR_USTRORM_INTMEM +
+ USTORM_AGG_DATA_OFFSET + i * 4, 0);
+ if (!CHIP_IS_E1x(bp)) {
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
+ CHIP_INT_MODE_IS_BC(bp) ?
+ HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
+ }
+}
+
+static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
+{
+ switch (load_code) {
+ case FW_MSG_CODE_DRV_LOAD_COMMON:
+ case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
+ bnx2x_init_internal_common(bp);
+ /* no break */
+
+ case FW_MSG_CODE_DRV_LOAD_PORT:
+ /* nothing to do */
+ /* no break */
+
+ case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+ /* internal memory per function is
+ initialized inside bnx2x_pf_init */
+ break;
+
+ default:
+ BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
+ break;
+ }
+}
+
+static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
+{
+ return fp->bp->igu_base_sb + fp->index + CNIC_PRESENT;
+}
+
+static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
+{
+ return fp->bp->base_fw_ndsb + fp->index + CNIC_PRESENT;
+}
+
+static inline u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
+{
+ if (CHIP_IS_E1x(fp->bp))
+ return BP_L_ID(fp->bp) + fp->index;
+ else /* We want Client ID to be the same as IGU SB ID for 57712 */
+ return bnx2x_fp_igu_sb_id(fp);
+}
+
+static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
+{
+ struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
+ u8 cos;
+ unsigned long q_type = 0;
+ u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
+
+ fp->cid = fp_idx;
+ fp->cl_id = bnx2x_fp_cl_id(fp);
+ fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
+ fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp);
+ /* qZone id equals to FW (per path) client id */
+ fp->cl_qzone_id = bnx2x_fp_qzone_id(fp);
+
+ /* init shortcut */
+ fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
+ /* Setup SB indicies */
+ fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
+
+ /* Configure Queue State object */
+ __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
+ __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
+
+ BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS);
+
+ /* init tx data */
+ for_each_cos_in_tx_queue(fp, cos) {
+ bnx2x_init_txdata(bp, &fp->txdata[cos],
+ CID_COS_TO_TX_ONLY_CID(fp->cid, cos),
+ FP_COS_TO_TXQ(fp, cos),
+ BNX2X_TX_SB_INDEX_BASE + cos);
+ cids[cos] = fp->txdata[cos].cid;
+ }
+
+ bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, cids, fp->max_cos,
+ BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
+ bnx2x_sp_mapping(bp, q_rdata), q_type);
+
+ /**
+ * Configure classification DBs: Always enable Tx switching
+ */
+ bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
+
+ DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
+ "cl_id %d fw_sb %d igu_sb %d\n",
+ fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
+ fp->igu_sb_id);
+ bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
+ fp->fw_sb_id, fp->igu_sb_id);
+
+ bnx2x_update_fpsb_idx(fp);
+}
+
+void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
+{
+ int i;
+
+ for_each_eth_queue(bp, i)
+ bnx2x_init_eth_fp(bp, i);
+#ifdef BCM_CNIC
+ if (!NO_FCOE(bp))
+ bnx2x_init_fcoe_fp(bp);
+
+ bnx2x_init_sb(bp, bp->cnic_sb_mapping,
+ BNX2X_VF_ID_INVALID, false,
+ bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
+
+#endif
+
+ /* Initialize MOD_ABS interrupts */
+ bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
+ bp->common.shmem_base, bp->common.shmem2_base,
+ BP_PORT(bp));
+ /* ensure status block indices were read */
+ rmb();
+
+ bnx2x_init_def_sb(bp);
+ bnx2x_update_dsb_idx(bp);
+ bnx2x_init_rx_rings(bp);
+ bnx2x_init_tx_rings(bp);
+ bnx2x_init_sp_ring(bp);
+ bnx2x_init_eq_ring(bp);
+ bnx2x_init_internal(bp, load_code);
+ bnx2x_pf_init(bp);
+ bnx2x_stats_init(bp);
+
+ /* flush all before enabling interrupts */
+ mb();
+ mmiowb();
+
+ bnx2x_int_enable(bp);
+
+ /* Check for SPIO5 */
+ bnx2x_attn_int_deasserted0(bp,
+ REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
+ AEU_INPUTS_ATTN_BITS_SPIO5);
+}
+
+/* end of nic init */
+
+/*
+ * gzip service functions
+ */
+
+static int bnx2x_gunzip_init(struct bnx2x *bp)
+{
+ bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
+ &bp->gunzip_mapping, GFP_KERNEL);
+ if (bp->gunzip_buf == NULL)
+ goto gunzip_nomem1;
+
+ bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
+ if (bp->strm == NULL)
+ goto gunzip_nomem2;
+
+ bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
+ if (bp->strm->workspace == NULL)
+ goto gunzip_nomem3;
+
+ return 0;
+
+gunzip_nomem3:
+ kfree(bp->strm);
+ bp->strm = NULL;
+
+gunzip_nomem2:
+ dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
+ bp->gunzip_mapping);
+ bp->gunzip_buf = NULL;
+
+gunzip_nomem1:
+ netdev_err(bp->dev, "Cannot allocate firmware buffer for"
+ " un-compression\n");
+ return -ENOMEM;
+}
+
+static void bnx2x_gunzip_end(struct bnx2x *bp)
+{
+ if (bp->strm) {
+ vfree(bp->strm->workspace);
+ kfree(bp->strm);
+ bp->strm = NULL;
+ }
+
+ if (bp->gunzip_buf) {
+ dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
+ bp->gunzip_mapping);
+ bp->gunzip_buf = NULL;
+ }
+}
+
+static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
+{
+ int n, rc;
+
+ /* check gzip header */
+ if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
+ BNX2X_ERR("Bad gzip header\n");
+ return -EINVAL;
+ }
+
+ n = 10;
+
+#define FNAME 0x8
+
+ if (zbuf[3] & FNAME)
+ while ((zbuf[n++] != 0) && (n < len));
+
+ bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
+ bp->strm->avail_in = len - n;
+ bp->strm->next_out = bp->gunzip_buf;
+ bp->strm->avail_out = FW_BUF_SIZE;
+
+ rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
+ if (rc != Z_OK)
+ return rc;
+
+ rc = zlib_inflate(bp->strm, Z_FINISH);
+ if ((rc != Z_OK) && (rc != Z_STREAM_END))
+ netdev_err(bp->dev, "Firmware decompression error: %s\n",
+ bp->strm->msg);
+
+ bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
+ if (bp->gunzip_outlen & 0x3)
+ netdev_err(bp->dev, "Firmware decompression error:"
+ " gunzip_outlen (%d) not aligned\n",
+ bp->gunzip_outlen);
+ bp->gunzip_outlen >>= 2;
+
+ zlib_inflateEnd(bp->strm);
+
+ if (rc == Z_STREAM_END)
+ return 0;
+
+ return rc;
+}
+
+/* nic load/unload */
+
+/*
+ * General service functions
+ */
+
+/* send a NIG loopback debug packet */
+static void bnx2x_lb_pckt(struct bnx2x *bp)
+{
+ u32 wb_write[3];
+
+ /* Ethernet source and destination addresses */
+ wb_write[0] = 0x55555555;
+ wb_write[1] = 0x55555555;
+ wb_write[2] = 0x20; /* SOP */
+ REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
+
+ /* NON-IP protocol */
+ wb_write[0] = 0x09000000;
+ wb_write[1] = 0x55555555;
+ wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
+ REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
+}
+
+/* some of the internal memories
+ * are not directly readable from the driver
+ * to test them we send debug packets
+ */
+static int bnx2x_int_mem_test(struct bnx2x *bp)
+{
+ int factor;
+ int count, i;
+ u32 val = 0;
+
+ if (CHIP_REV_IS_FPGA(bp))
+ factor = 120;
+ else if (CHIP_REV_IS_EMUL(bp))
+ factor = 200;
+ else
+ factor = 1;
+
+ /* Disable inputs of parser neighbor blocks */
+ REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
+ REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
+ REG_WR(bp, CFC_REG_DEBUG0, 0x1);
+ REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
+
+ /* Write 0 to parser credits for CFC search request */
+ REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
+
+ /* send Ethernet packet */
+ bnx2x_lb_pckt(bp);
+
+ /* TODO do i reset NIG statistic? */
+ /* Wait until NIG register shows 1 packet of size 0x10 */
+ count = 1000 * factor;
+ while (count) {
+
+ bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
+ val = *bnx2x_sp(bp, wb_data[0]);
+ if (val == 0x10)
+ break;
+
+ msleep(10);
+ count--;
+ }
+ if (val != 0x10) {
+ BNX2X_ERR("NIG timeout val = 0x%x\n", val);
+ return -1;
+ }
+
+ /* Wait until PRS register shows 1 packet */
+ count = 1000 * factor;
+ while (count) {
+ val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
+ if (val == 1)
+ break;
+
+ msleep(10);
+ count--;
+ }
+ if (val != 0x1) {
+ BNX2X_ERR("PRS timeout val = 0x%x\n", val);
+ return -2;
+ }
+
+ /* Reset and init BRB, PRS */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
+ msleep(50);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
+ msleep(50);
+ bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
+
+ DP(NETIF_MSG_HW, "part2\n");
+
+ /* Disable inputs of parser neighbor blocks */
+ REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
+ REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
+ REG_WR(bp, CFC_REG_DEBUG0, 0x1);
+ REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
+
+ /* Write 0 to parser credits for CFC search request */
+ REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
+
+ /* send 10 Ethernet packets */
+ for (i = 0; i < 10; i++)
+ bnx2x_lb_pckt(bp);
+
+ /* Wait until NIG register shows 10 + 1
+ packets of size 11*0x10 = 0xb0 */
+ count = 1000 * factor;
+ while (count) {
+
+ bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
+ val = *bnx2x_sp(bp, wb_data[0]);
+ if (val == 0xb0)
+ break;
+
+ msleep(10);
+ count--;
+ }
+ if (val != 0xb0) {
+ BNX2X_ERR("NIG timeout val = 0x%x\n", val);
+ return -3;
+ }
+
+ /* Wait until PRS register shows 2 packets */
+ val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
+ if (val != 2)
+ BNX2X_ERR("PRS timeout val = 0x%x\n", val);
+
+ /* Write 1 to parser credits for CFC search request */
+ REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
+
+ /* Wait until PRS register shows 3 packets */
+ msleep(10 * factor);
+ /* Wait until NIG register shows 1 packet of size 0x10 */
+ val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
+ if (val != 3)
+ BNX2X_ERR("PRS timeout val = 0x%x\n", val);
+
+ /* clear NIG EOP FIFO */
+ for (i = 0; i < 11; i++)
+ REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
+ val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
+ if (val != 1) {
+ BNX2X_ERR("clear of NIG failed\n");
+ return -4;
+ }
+
+ /* Reset and init BRB, PRS, NIG */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
+ msleep(50);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
+ msleep(50);
+ bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
+#ifndef BCM_CNIC
+ /* set NIC mode */
+ REG_WR(bp, PRS_REG_NIC_MODE, 1);
+#endif
+
+ /* Enable inputs of parser neighbor blocks */
+ REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
+ REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
+ REG_WR(bp, CFC_REG_DEBUG0, 0x0);
+ REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
+
+ DP(NETIF_MSG_HW, "done\n");
+
+ return 0; /* OK */
+}
+
+static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
+{
+ REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
+ if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
+ else
+ REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
+ REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
+ REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
+ /*
+ * mask read length error interrupts in brb for parser
+ * (parsing unit and 'checksum and crc' unit)
+ * these errors are legal (PU reads fixed length and CAC can cause
+ * read length error on truncated packets)
+ */
+ REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
+ REG_WR(bp, QM_REG_QM_INT_MASK, 0);
+ REG_WR(bp, TM_REG_TM_INT_MASK, 0);
+ REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
+ REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
+ REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
+/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
+/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
+ REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
+ REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
+ REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
+/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
+/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
+ REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
+ REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
+ REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
+ REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
+/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
+/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
+
+ if (CHIP_REV_IS_FPGA(bp))
+ REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
+ else if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
+ (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
+ | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
+ | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
+ | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
+ | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
+ else
+ REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
+ REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
+ REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
+ REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
+/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
+
+ if (!CHIP_IS_E1x(bp))
+ /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
+ REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
+
+ REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
+ REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
+/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
+ REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
+}
+
+static void bnx2x_reset_common(struct bnx2x *bp)
+{
+ u32 val = 0x1400;
+
+ /* reset_common */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+ 0xd3ffff7f);
+
+ if (CHIP_IS_E3(bp)) {
+ val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
+ val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
+ }
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val);
+}
+
+static void bnx2x_setup_dmae(struct bnx2x *bp)
+{
+ bp->dmae_ready = 0;
+ spin_lock_init(&bp->dmae_lock);
+}
+
+static void bnx2x_init_pxp(struct bnx2x *bp)
+{
+ u16 devctl;
+ int r_order, w_order;
+
+ pci_read_config_word(bp->pdev,
+ pci_pcie_cap(bp->pdev) + PCI_EXP_DEVCTL, &devctl);
+ DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
+ w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
+ if (bp->mrrs == -1)
+ r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
+ else {
+ DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
+ r_order = bp->mrrs;
+ }
+
+ bnx2x_init_pxp_arb(bp, r_order, w_order);
+}
+
+static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
+{
+ int is_required;
+ u32 val;
+ int port;
+
+ if (BP_NOMCP(bp))
+ return;
+
+ is_required = 0;
+ val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
+ SHARED_HW_CFG_FAN_FAILURE_MASK;
+
+ if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
+ is_required = 1;
+
+ /*
+ * The fan failure mechanism is usually related to the PHY type since
+ * the power consumption of the board is affected by the PHY. Currently,
+ * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
+ */
+ else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
+ for (port = PORT_0; port < PORT_MAX; port++) {
+ is_required |=
+ bnx2x_fan_failure_det_req(
+ bp,
+ bp->common.shmem_base,
+ bp->common.shmem2_base,
+ port);
+ }
+
+ DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
+
+ if (is_required == 0)
+ return;
+
+ /* Fan failure is indicated by SPIO 5 */
+ bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
+ MISC_REGISTERS_SPIO_INPUT_HI_Z);
+
+ /* set to active low mode */
+ val = REG_RD(bp, MISC_REG_SPIO_INT);
+ val |= ((1 << MISC_REGISTERS_SPIO_5) <<
+ MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
+ REG_WR(bp, MISC_REG_SPIO_INT, val);
+
+ /* enable interrupt to signal the IGU */
+ val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
+ val |= (1 << MISC_REGISTERS_SPIO_5);
+ REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
+}
+
+static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
+{
+ u32 offset = 0;
+
+ if (CHIP_IS_E1(bp))
+ return;
+ if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
+ return;
+
+ switch (BP_ABS_FUNC(bp)) {
+ case 0:
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
+ break;
+ case 1:
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
+ break;
+ case 2:
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
+ break;
+ case 3:
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
+ break;
+ case 4:
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
+ break;
+ case 5:
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
+ break;
+ case 6:
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
+ break;
+ case 7:
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
+ break;
+ default:
+ return;
+ }
+
+ REG_WR(bp, offset, pretend_func_num);
+ REG_RD(bp, offset);
+ DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
+}
+
+void bnx2x_pf_disable(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
+ val &= ~IGU_PF_CONF_FUNC_EN;
+
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
+ REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
+}
+
+static inline void bnx2x__common_init_phy(struct bnx2x *bp)
+{
+ u32 shmem_base[2], shmem2_base[2];
+ shmem_base[0] = bp->common.shmem_base;
+ shmem2_base[0] = bp->common.shmem2_base;
+ if (!CHIP_IS_E1x(bp)) {
+ shmem_base[1] =
+ SHMEM2_RD(bp, other_shmem_base_addr);
+ shmem2_base[1] =
+ SHMEM2_RD(bp, other_shmem2_base_addr);
+ }
+ bnx2x_acquire_phy_lock(bp);
+ bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
+ bp->common.chip_id);
+ bnx2x_release_phy_lock(bp);
+}
+
+/**
+ * bnx2x_init_hw_common - initialize the HW at the COMMON phase.
+ *
+ * @bp: driver handle
+ */
+static int bnx2x_init_hw_common(struct bnx2x *bp)
+{
+ u32 val;
+
+ DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
+
+ /*
+ * take the UNDI lock to protect undi_unload flow from accessing
+ * registers while we're resetting the chip
+ */
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+
+ bnx2x_reset_common(bp);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
+
+ val = 0xfffc;
+ if (CHIP_IS_E3(bp)) {
+ val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
+ val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
+ }
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
+
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+
+ bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
+
+ if (!CHIP_IS_E1x(bp)) {
+ u8 abs_func_id;
+
+ /**
+ * 4-port mode or 2-port mode we need to turn of master-enable
+ * for everyone, after that, turn it back on for self.
+ * so, we disregard multi-function or not, and always disable
+ * for all functions on the given path, this means 0,2,4,6 for
+ * path 0 and 1,3,5,7 for path 1
+ */
+ for (abs_func_id = BP_PATH(bp);
+ abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) {
+ if (abs_func_id == BP_ABS_FUNC(bp)) {
+ REG_WR(bp,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
+ 1);
+ continue;
+ }
+
+ bnx2x_pretend_func(bp, abs_func_id);
+ /* clear pf enable */
+ bnx2x_pf_disable(bp);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+ }
+ }
+
+ bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON);
+ if (CHIP_IS_E1(bp)) {
+ /* enable HW interrupt from PXP on USDM overflow
+ bit 16 on INT_MASK_0 */
+ REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
+ }
+
+ bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
+ bnx2x_init_pxp(bp);
+
+#ifdef __BIG_ENDIAN
+ REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
+ REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
+ REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
+ REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
+ REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
+ /* make sure this value is 0 */
+ REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
+
+/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
+ REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
+ REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
+ REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
+ REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
+#endif
+
+ bnx2x_ilt_init_page_size(bp, INITOP_SET);
+
+ if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
+ REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
+
+ /* let the HW do it's magic ... */
+ msleep(100);
+ /* finish PXP init */
+ val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
+ if (val != 1) {
+ BNX2X_ERR("PXP2 CFG failed\n");
+ return -EBUSY;
+ }
+ val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
+ if (val != 1) {
+ BNX2X_ERR("PXP2 RD_INIT failed\n");
+ return -EBUSY;
+ }
+
+ /* Timers bug workaround E2 only. We need to set the entire ILT to
+ * have entries with value "0" and valid bit on.
+ * This needs to be done by the first PF that is loaded in a path
+ * (i.e. common phase)
+ */
+ if (!CHIP_IS_E1x(bp)) {
+/* In E2 there is a bug in the timers block that can cause function 6 / 7
+ * (i.e. vnic3) to start even if it is marked as "scan-off".
+ * This occurs when a different function (func2,3) is being marked
+ * as "scan-off". Real-life scenario for example: if a driver is being
+ * load-unloaded while func6,7 are down. This will cause the timer to access
+ * the ilt, translate to a logical address and send a request to read/write.
+ * Since the ilt for the function that is down is not valid, this will cause
+ * a translation error which is unrecoverable.
+ * The Workaround is intended to make sure that when this happens nothing fatal
+ * will occur. The workaround:
+ * 1. First PF driver which loads on a path will:
+ * a. After taking the chip out of reset, by using pretend,
+ * it will write "0" to the following registers of
+ * the other vnics.
+ * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
+ * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
+ * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
+ * And for itself it will write '1' to
+ * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
+ * dmae-operations (writing to pram for example.)
+ * note: can be done for only function 6,7 but cleaner this
+ * way.
+ * b. Write zero+valid to the entire ILT.
+ * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of
+ * VNIC3 (of that port). The range allocated will be the
+ * entire ILT. This is needed to prevent ILT range error.
+ * 2. Any PF driver load flow:
+ * a. ILT update with the physical addresses of the allocated
+ * logical pages.
+ * b. Wait 20msec. - note that this timeout is needed to make
+ * sure there are no requests in one of the PXP internal
+ * queues with "old" ILT addresses.
+ * c. PF enable in the PGLC.
+ * d. Clear the was_error of the PF in the PGLC. (could have
+ * occured while driver was down)
+ * e. PF enable in the CFC (WEAK + STRONG)
+ * f. Timers scan enable
+ * 3. PF driver unload flow:
+ * a. Clear the Timers scan_en.
+ * b. Polling for scan_on=0 for that PF.
+ * c. Clear the PF enable bit in the PXP.
+ * d. Clear the PF enable in the CFC (WEAK + STRONG)
+ * e. Write zero+valid to all ILT entries (The valid bit must
+ * stay set)
+ * f. If this is VNIC 3 of a port then also init
+ * first_timers_ilt_entry to zero and last_timers_ilt_entry
+ * to the last enrty in the ILT.
+ *
+ * Notes:
+ * Currently the PF error in the PGLC is non recoverable.
+ * In the future the there will be a recovery routine for this error.
+ * Currently attention is masked.
+ * Having an MCP lock on the load/unload process does not guarantee that
+ * there is no Timer disable during Func6/7 enable. This is because the
+ * Timers scan is currently being cleared by the MCP on FLR.
+ * Step 2.d can be done only for PF6/7 and the driver can also check if
+ * there is error before clearing it. But the flow above is simpler and
+ * more general.
+ * All ILT entries are written by zero+valid and not just PF6/7
+ * ILT entries since in the future the ILT entries allocation for
+ * PF-s might be dynamic.
+ */
+ struct ilt_client_info ilt_cli;
+ struct bnx2x_ilt ilt;
+ memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
+ memset(&ilt, 0, sizeof(struct bnx2x_ilt));
+
+ /* initialize dummy TM client */
+ ilt_cli.start = 0;
+ ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
+ ilt_cli.client_num = ILT_CLIENT_TM;
+
+ /* Step 1: set zeroes to all ilt page entries with valid bit on
+ * Step 2: set the timers first/last ilt entry to point
+ * to the entire range to prevent ILT range error for 3rd/4th
+ * vnic (this code assumes existance of the vnic)
+ *
+ * both steps performed by call to bnx2x_ilt_client_init_op()
+ * with dummy TM client
+ *
+ * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
+ * and his brother are split registers
+ */
+ bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
+ bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+
+ REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
+ REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
+ REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
+ }
+
+
+ REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
+ REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
+
+ if (!CHIP_IS_E1x(bp)) {
+ int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
+ (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
+ bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON);
+
+ bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON);
+
+ /* let the HW do it's magic ... */
+ do {
+ msleep(200);
+ val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
+ } while (factor-- && (val != 1));
+
+ if (val != 1) {
+ BNX2X_ERR("ATC_INIT failed\n");
+ return -EBUSY;
+ }
+ }
+
+ bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
+
+ /* clean the DMAE memory */
+ bp->dmae_ready = 1;
+ bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
+
+ bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON);
+
+ bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON);
+
+ bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON);
+
+ bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON);
+
+ bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
+ bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
+ bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
+ bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
+
+ bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
+
+
+ /* QM queues pointers table */
+ bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
+
+ /* soft reset pulse */
+ REG_WR(bp, QM_REG_SOFT_RESET, 1);
+ REG_WR(bp, QM_REG_SOFT_RESET, 0);
+
+#ifdef BCM_CNIC
+ bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
+#endif
+
+ bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
+ REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
+ if (!CHIP_REV_IS_SLOW(bp))
+ /* enable hw interrupt from doorbell Q */
+ REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
+
+ bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
+
+ bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
+ REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
+
+ if (!CHIP_IS_E1(bp))
+ REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
+
+ if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp))
+ /* Bit-map indicating which L2 hdrs may appear
+ * after the basic Ethernet header
+ */
+ REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
+ bp->path_has_ovlan ? 7 : 6);
+
+ bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON);
+
+ if (!CHIP_IS_E1x(bp)) {
+ /* reset VFC memories */
+ REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
+ VFC_MEMORIES_RST_REG_CAM_RST |
+ VFC_MEMORIES_RST_REG_RAM_RST);
+ REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
+ VFC_MEMORIES_RST_REG_CAM_RST |
+ VFC_MEMORIES_RST_REG_RAM_RST);
+
+ msleep(20);
+ }
+
+ bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON);
+
+ /* sync semi rtc */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+ 0x80000000);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
+ 0x80000000);
+
+ bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
+
+ if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
+ bp->path_has_ovlan ? 7 : 6);
+
+ REG_WR(bp, SRC_REG_SOFT_RST, 1);
+
+ bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
+
+#ifdef BCM_CNIC
+ REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
+ REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
+ REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
+ REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
+ REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
+ REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
+ REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
+ REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
+ REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
+ REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
+#endif
+ REG_WR(bp, SRC_REG_SOFT_RST, 0);
+
+ if (sizeof(union cdu_context) != 1024)
+ /* we currently assume that a context is 1024 bytes */
+ dev_alert(&bp->pdev->dev, "please adjust the size "
+ "of cdu_context(%ld)\n",
+ (long)sizeof(union cdu_context));
+
+ bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
+ val = (4 << 24) + (0 << 12) + 1024;
+ REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
+
+ bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON);
+ REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
+ /* enable context validation interrupt from CFC */
+ REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
+
+ /* set the thresholds to prevent CFC/CDU race */
+ REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
+
+ bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON);
+
+ if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp))
+ REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
+
+ bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON);
+
+ /* Reset PCIE errors for debug */
+ REG_WR(bp, 0x2814, 0xffffffff);
+ REG_WR(bp, 0x3820, 0xffffffff);
+
+ if (!CHIP_IS_E1x(bp)) {
+ REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
+ (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
+ PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
+ REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
+ (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
+ PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
+ PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
+ REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
+ (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
+ PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
+ PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
+ }
+
+ bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON);
+ if (!CHIP_IS_E1(bp)) {
+ /* in E3 this done in per-port section */
+ if (!CHIP_IS_E3(bp))
+ REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
+ }
+ if (CHIP_IS_E1H(bp))
+ /* not applicable for E2 (and above ...) */
+ REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
+
+ if (CHIP_REV_IS_SLOW(bp))
+ msleep(200);
+
+ /* finish CFC init */
+ val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
+ if (val != 1) {
+ BNX2X_ERR("CFC LL_INIT failed\n");
+ return -EBUSY;
+ }
+ val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
+ if (val != 1) {
+ BNX2X_ERR("CFC AC_INIT failed\n");
+ return -EBUSY;
+ }
+ val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
+ if (val != 1) {
+ BNX2X_ERR("CFC CAM_INIT failed\n");
+ return -EBUSY;
+ }
+ REG_WR(bp, CFC_REG_DEBUG0, 0);
+
+ if (CHIP_IS_E1(bp)) {
+ /* read NIG statistic
+ to see if this is our first up since powerup */
+ bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
+ val = *bnx2x_sp(bp, wb_data[0]);
+
+ /* do internal memory self test */
+ if ((val == 0) && bnx2x_int_mem_test(bp)) {
+ BNX2X_ERR("internal mem self test failed\n");
+ return -EBUSY;
+ }
+ }
+
+ bnx2x_setup_fan_failure_detection(bp);
+
+ /* clear PXP2 attentions */
+ REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
+
+ bnx2x_enable_blocks_attention(bp);
+ bnx2x_enable_blocks_parity(bp);
+
+ if (!BP_NOMCP(bp)) {
+ if (CHIP_IS_E1x(bp))
+ bnx2x__common_init_phy(bp);
+ } else
+ BNX2X_ERR("Bootcode is missing - can not initialize link\n");
+
+ return 0;
+}
+
+/**
+ * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase.
+ *
+ * @bp: driver handle
+ */
+static int bnx2x_init_hw_common_chip(struct bnx2x *bp)
+{
+ int rc = bnx2x_init_hw_common(bp);
+
+ if (rc)
+ return rc;
+
+ /* In E2 2-PORT mode, same ext phy is used for the two paths */
+ if (!BP_NOMCP(bp))
+ bnx2x__common_init_phy(bp);
+
+ return 0;
+}
+
+static int bnx2x_init_hw_port(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
+ u32 low, high;
+ u32 val;
+
+ bnx2x__link_reset(bp);
+
+ DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
+
+ REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
+
+ bnx2x_init_block(bp, BLOCK_MISC, init_phase);
+ bnx2x_init_block(bp, BLOCK_PXP, init_phase);
+ bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
+
+ /* Timers bug workaround: disables the pf_master bit in pglue at
+ * common phase, we need to enable it here before any dmae access are
+ * attempted. Therefore we manually added the enable-master to the
+ * port phase (it also happens in the function phase)
+ */
+ if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
+
+ bnx2x_init_block(bp, BLOCK_ATC, init_phase);
+ bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
+ bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
+ bnx2x_init_block(bp, BLOCK_QM, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_TCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_UCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_CCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_XCM, init_phase);
+
+ /* QM cid (connection) count */
+ bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
+
+#ifdef BCM_CNIC
+ bnx2x_init_block(bp, BLOCK_TM, init_phase);
+ REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
+ REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
+#endif
+
+ bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
+
+ if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
+ bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
+
+ if (IS_MF(bp))
+ low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
+ else if (bp->dev->mtu > 4096) {
+ if (bp->flags & ONE_PORT_FLAG)
+ low = 160;
+ else {
+ val = bp->dev->mtu;
+ /* (24*1024 + val*4)/256 */
+ low = 96 + (val/64) +
+ ((val % 64) ? 1 : 0);
+ }
+ } else
+ low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
+ high = low + 56; /* 14*1024/256 */
+ REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
+ REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
+ }
+
+ if (CHIP_MODE_IS_4_PORT(bp))
+ REG_WR(bp, (BP_PORT(bp) ?
+ BRB1_REG_MAC_GUARANTIED_1 :
+ BRB1_REG_MAC_GUARANTIED_0), 40);
+
+
+ bnx2x_init_block(bp, BLOCK_PRS, init_phase);
+ if (CHIP_IS_E3B0(bp))
+ /* Ovlan exists only if we are in multi-function +
+ * switch-dependent mode, in switch-independent there
+ * is no ovlan headers
+ */
+ REG_WR(bp, BP_PORT(bp) ?
+ PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
+ PRS_REG_HDRS_AFTER_BASIC_PORT_0,
+ (bp->path_has_ovlan ? 7 : 6));
+
+ bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_USDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
+ bnx2x_init_block(bp, BLOCK_USEM, init_phase);
+ bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
+ bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_UPB, init_phase);
+ bnx2x_init_block(bp, BLOCK_XPB, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_PBF, init_phase);
+
+ if (CHIP_IS_E1x(bp)) {
+ /* configure PBF to work without PAUSE mtu 9000 */
+ REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
+
+ /* update threshold */
+ REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
+ /* update init credit */
+ REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
+
+ /* probe changes */
+ REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
+ udelay(50);
+ REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
+ }
+
+#ifdef BCM_CNIC
+ bnx2x_init_block(bp, BLOCK_SRC, init_phase);
+#endif
+ bnx2x_init_block(bp, BLOCK_CDU, init_phase);
+ bnx2x_init_block(bp, BLOCK_CFC, init_phase);
+
+ if (CHIP_IS_E1(bp)) {
+ REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
+ REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
+ }
+ bnx2x_init_block(bp, BLOCK_HC, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_IGU, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
+ /* init aeu_mask_attn_func_0/1:
+ * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
+ * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
+ * bits 4-7 are used for "per vn group attention" */
+ val = IS_MF(bp) ? 0xF7 : 0x7;
+ /* Enable DCBX attention for all but E1 */
+ val |= CHIP_IS_E1(bp) ? 0 : 0x10;
+ REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
+
+ bnx2x_init_block(bp, BLOCK_NIG, init_phase);
+
+ if (!CHIP_IS_E1x(bp)) {
+ /* Bit-map indicating which L2 hdrs may appear after the
+ * basic Ethernet header
+ */
+ REG_WR(bp, BP_PORT(bp) ?
+ NIG_REG_P1_HDRS_AFTER_BASIC :
+ NIG_REG_P0_HDRS_AFTER_BASIC,
+ IS_MF_SD(bp) ? 7 : 6);
+
+ if (CHIP_IS_E3(bp))
+ REG_WR(bp, BP_PORT(bp) ?
+ NIG_REG_LLH1_MF_MODE :
+ NIG_REG_LLH_MF_MODE, IS_MF(bp));
+ }
+ if (!CHIP_IS_E3(bp))
+ REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
+
+ if (!CHIP_IS_E1(bp)) {
+ /* 0x2 disable mf_ov, 0x1 enable */
+ REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
+ (IS_MF_SD(bp) ? 0x1 : 0x2));
+
+ if (!CHIP_IS_E1x(bp)) {
+ val = 0;
+ switch (bp->mf_mode) {
+ case MULTI_FUNCTION_SD:
+ val = 1;
+ break;
+ case MULTI_FUNCTION_SI:
+ val = 2;
+ break;
+ }
+
+ REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
+ NIG_REG_LLH0_CLS_TYPE), val);
+ }
+ {
+ REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
+ REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
+ REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
+ }
+ }
+
+
+ /* If SPIO5 is set to generate interrupts, enable it for this port */
+ val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
+ if (val & (1 << MISC_REGISTERS_SPIO_5)) {
+ u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
+ MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
+ val = REG_RD(bp, reg_addr);
+ val |= AEU_INPUTS_ATTN_BITS_SPIO5;
+ REG_WR(bp, reg_addr, val);
+ }
+
+ return 0;
+}
+
+static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
+{
+ int reg;
+
+ if (CHIP_IS_E1(bp))
+ reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
+ else
+ reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
+
+ bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
+}
+
+static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
+{
+ bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/);
+}
+
+static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
+{
+ u32 i, base = FUNC_ILT_BASE(func);
+ for (i = base; i < base + ILT_PER_FUNC; i++)
+ bnx2x_ilt_wr(bp, i, 0);
+}
+
+static int bnx2x_init_hw_func(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ int func = BP_FUNC(bp);
+ int init_phase = PHASE_PF0 + func;
+ struct bnx2x_ilt *ilt = BP_ILT(bp);
+ u16 cdu_ilt_start;
+ u32 addr, val;
+ u32 main_mem_base, main_mem_size, main_mem_prty_clr;
+ int i, main_mem_width;
+
+ DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
+
+ /* FLR cleanup - hmmm */
+ if (!CHIP_IS_E1x(bp))
+ bnx2x_pf_flr_clnup(bp);
+
+ /* set MSI reconfigure capability */
+ if (bp->common.int_block == INT_BLOCK_HC) {
+ addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
+ val = REG_RD(bp, addr);
+ val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
+ REG_WR(bp, addr, val);
+ }
+
+ bnx2x_init_block(bp, BLOCK_PXP, init_phase);
+ bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
+
+ ilt = BP_ILT(bp);
+ cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
+
+ for (i = 0; i < L2_ILT_LINES(bp); i++) {
+ ilt->lines[cdu_ilt_start + i].page =
+ bp->context.vcxt + (ILT_PAGE_CIDS * i);
+ ilt->lines[cdu_ilt_start + i].page_mapping =
+ bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
+ /* cdu ilt pages are allocated manually so there's no need to
+ set the size */
+ }
+ bnx2x_ilt_init_op(bp, INITOP_SET);
+
+#ifdef BCM_CNIC
+ bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
+
+ /* T1 hash bits value determines the T1 number of entries */
+ REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
+#endif
+
+#ifndef BCM_CNIC
+ /* set NIC mode */
+ REG_WR(bp, PRS_REG_NIC_MODE, 1);
+#endif /* BCM_CNIC */
+
+ if (!CHIP_IS_E1x(bp)) {
+ u32 pf_conf = IGU_PF_CONF_FUNC_EN;
+
+ /* Turn on a single ISR mode in IGU if driver is going to use
+ * INT#x or MSI
+ */
+ if (!(bp->flags & USING_MSIX_FLAG))
+ pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
+ /*
+ * Timers workaround bug: function init part.
+ * Need to wait 20msec after initializing ILT,
+ * needed to make sure there are no requests in
+ * one of the PXP internal queues with "old" ILT addresses
+ */
+ msleep(20);
+ /*
+ * Master enable - Due to WB DMAE writes performed before this
+ * register is re-initialized as part of the regular function
+ * init
+ */
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
+ /* Enable the function in IGU */
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
+ }
+
+ bp->dmae_ready = 1;
+
+ bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
+
+ if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
+
+ bnx2x_init_block(bp, BLOCK_ATC, init_phase);
+ bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
+ bnx2x_init_block(bp, BLOCK_NIG, init_phase);
+ bnx2x_init_block(bp, BLOCK_SRC, init_phase);
+ bnx2x_init_block(bp, BLOCK_MISC, init_phase);
+ bnx2x_init_block(bp, BLOCK_TCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_UCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_CCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_XCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
+ bnx2x_init_block(bp, BLOCK_USEM, init_phase);
+ bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
+ bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
+
+ if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, QM_REG_PF_EN, 1);
+
+ if (!CHIP_IS_E1x(bp)) {
+ REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+ REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+ REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+ REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+ }
+ bnx2x_init_block(bp, BLOCK_QM, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_TM, init_phase);
+ bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
+ bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
+ bnx2x_init_block(bp, BLOCK_PRS, init_phase);
+ bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_USDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_UPB, init_phase);
+ bnx2x_init_block(bp, BLOCK_XPB, init_phase);
+ bnx2x_init_block(bp, BLOCK_PBF, init_phase);
+ if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, PBF_REG_DISABLE_PF, 0);
+
+ bnx2x_init_block(bp, BLOCK_CDU, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_CFC, init_phase);
+
+ if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
+
+ if (IS_MF(bp)) {
+ REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
+ REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
+ }
+
+ bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
+
+ /* HC init per function */
+ if (bp->common.int_block == INT_BLOCK_HC) {
+ if (CHIP_IS_E1H(bp)) {
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
+
+ REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
+ REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
+ }
+ bnx2x_init_block(bp, BLOCK_HC, init_phase);
+
+ } else {
+ int num_segs, sb_idx, prod_offset;
+
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
+
+ if (!CHIP_IS_E1x(bp)) {
+ REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
+ REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
+ }
+
+ bnx2x_init_block(bp, BLOCK_IGU, init_phase);
+
+ if (!CHIP_IS_E1x(bp)) {
+ int dsb_idx = 0;
+ /**
+ * Producer memory:
+ * E2 mode: address 0-135 match to the mapping memory;
+ * 136 - PF0 default prod; 137 - PF1 default prod;
+ * 138 - PF2 default prod; 139 - PF3 default prod;
+ * 140 - PF0 attn prod; 141 - PF1 attn prod;
+ * 142 - PF2 attn prod; 143 - PF3 attn prod;
+ * 144-147 reserved.
+ *
+ * E1.5 mode - In backward compatible mode;
+ * for non default SB; each even line in the memory
+ * holds the U producer and each odd line hold
+ * the C producer. The first 128 producers are for
+ * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
+ * producers are for the DSB for each PF.
+ * Each PF has five segments: (the order inside each
+ * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
+ * 132-135 C prods; 136-139 X prods; 140-143 T prods;
+ * 144-147 attn prods;
+ */
+ /* non-default-status-blocks */
+ num_segs = CHIP_INT_MODE_IS_BC(bp) ?
+ IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
+ for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
+ prod_offset = (bp->igu_base_sb + sb_idx) *
+ num_segs;
+
+ for (i = 0; i < num_segs; i++) {
+ addr = IGU_REG_PROD_CONS_MEMORY +
+ (prod_offset + i) * 4;
+ REG_WR(bp, addr, 0);
+ }
+ /* send consumer update with value 0 */
+ bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
+ USTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_igu_clear_sb(bp,
+ bp->igu_base_sb + sb_idx);
+ }
+
+ /* default-status-blocks */
+ num_segs = CHIP_INT_MODE_IS_BC(bp) ?
+ IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
+
+ if (CHIP_MODE_IS_4_PORT(bp))
+ dsb_idx = BP_FUNC(bp);
+ else
+ dsb_idx = BP_VN(bp);
+
+ prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
+ IGU_BC_BASE_DSB_PROD + dsb_idx :
+ IGU_NORM_BASE_DSB_PROD + dsb_idx);
+
+ /*
+ * igu prods come in chunks of E1HVN_MAX (4) -
+ * does not matters what is the current chip mode
+ */
+ for (i = 0; i < (num_segs * E1HVN_MAX);
+ i += E1HVN_MAX) {
+ addr = IGU_REG_PROD_CONS_MEMORY +
+ (prod_offset + i)*4;
+ REG_WR(bp, addr, 0);
+ }
+ /* send consumer update with 0 */
+ if (CHIP_INT_MODE_IS_BC(bp)) {
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ USTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ CSTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ XSTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ TSTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ ATTENTION_ID, 0, IGU_INT_NOP, 1);
+ } else {
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ USTORM_ID, 0, IGU_INT_NOP, 1);
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
+ ATTENTION_ID, 0, IGU_INT_NOP, 1);
+ }
+ bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
+
+ /* !!! these should become driver const once
+ rf-tool supports split-68 const */
+ REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
+ REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
+ REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
+ REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
+ REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
+ REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
+ }
+ }
+
+ /* Reset PCIE errors for debug */
+ REG_WR(bp, 0x2114, 0xffffffff);
+ REG_WR(bp, 0x2120, 0xffffffff);
+
+ if (CHIP_IS_E1x(bp)) {
+ main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
+ main_mem_base = HC_REG_MAIN_MEMORY +
+ BP_PORT(bp) * (main_mem_size * 4);
+ main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
+ main_mem_width = 8;
+
+ val = REG_RD(bp, main_mem_prty_clr);
+ if (val)
+ DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
+ "block during "
+ "function init (0x%x)!\n", val);
+
+ /* Clear "false" parity errors in MSI-X table */
+ for (i = main_mem_base;
+ i < main_mem_base + main_mem_size * 4;
+ i += main_mem_width) {
+ bnx2x_read_dmae(bp, i, main_mem_width / 4);
+ bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
+ i, main_mem_width / 4);
+ }
+ /* Clear HC parity attention */
+ REG_RD(bp, main_mem_prty_clr);
+ }
+
+#ifdef BNX2X_STOP_ON_ERROR
+ /* Enable STORMs SP logging */
+ REG_WR8(bp, BAR_USTRORM_INTMEM +
+ USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
+ REG_WR8(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
+ REG_WR8(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
+ REG_WR8(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
+#endif
+
+ bnx2x_phy_probe(&bp->link_params);
+
+ return 0;
+}
+
+
+void bnx2x_free_mem(struct bnx2x *bp)
+{
+ /* fastpath */
+ bnx2x_free_fp_mem(bp);
+ /* end of fastpath */
+
+ BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
+ sizeof(struct host_sp_status_block));
+
+ BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
+ bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+
+ BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
+ sizeof(struct bnx2x_slowpath));
+
+ BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
+ bp->context.size);
+
+ bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
+
+ BNX2X_FREE(bp->ilt->lines);
+
+#ifdef BCM_CNIC
+ if (!CHIP_IS_E1x(bp))
+ BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
+ sizeof(struct host_hc_status_block_e2));
+ else
+ BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
+ sizeof(struct host_hc_status_block_e1x));
+
+ BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
+#endif
+
+ BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
+
+ BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
+ BCM_PAGE_SIZE * NUM_EQ_PAGES);
+}
+
+static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
+{
+ int num_groups;
+
+ /* number of eth_queues */
+ u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp);
+
+ /* Total number of FW statistics requests =
+ * 1 for port stats + 1 for PF stats + num_eth_queues */
+ bp->fw_stats_num = 2 + num_queue_stats;
+
+
+ /* Request is built from stats_query_header and an array of
+ * stats_query_cmd_group each of which contains
+ * STATS_QUERY_CMD_COUNT rules. The real number or requests is
+ * configured in the stats_query_header.
+ */
+ num_groups = (2 + num_queue_stats) / STATS_QUERY_CMD_COUNT +
+ (((2 + num_queue_stats) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
+
+ bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
+ num_groups * sizeof(struct stats_query_cmd_group);
+
+ /* Data for statistics requests + stats_conter
+ *
+ * stats_counter holds per-STORM counters that are incremented
+ * when STORM has finished with the current request.
+ */
+ bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
+ sizeof(struct per_pf_stats) +
+ sizeof(struct per_queue_stats) * num_queue_stats +
+ sizeof(struct stats_counter);
+
+ BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
+ bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+
+ /* Set shortcuts */
+ bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
+ bp->fw_stats_req_mapping = bp->fw_stats_mapping;
+
+ bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
+ ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
+
+ bp->fw_stats_data_mapping = bp->fw_stats_mapping +
+ bp->fw_stats_req_sz;
+ return 0;
+
+alloc_mem_err:
+ BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
+ bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+ return -ENOMEM;
+}
+
+
+int bnx2x_alloc_mem(struct bnx2x *bp)
+{
+#ifdef BCM_CNIC
+ if (!CHIP_IS_E1x(bp))
+ /* size = the status block + ramrod buffers */
+ BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
+ sizeof(struct host_hc_status_block_e2));
+ else
+ BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
+ sizeof(struct host_hc_status_block_e1x));
+
+ /* allocate searcher T2 table */
+ BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
+#endif
+
+
+ BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
+ sizeof(struct host_sp_status_block));
+
+ BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
+ sizeof(struct bnx2x_slowpath));
+
+ /* Allocated memory for FW statistics */
+ if (bnx2x_alloc_fw_stats_mem(bp))
+ goto alloc_mem_err;
+
+ bp->context.size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
+
+ BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
+ bp->context.size);
+
+ BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
+
+ if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
+ goto alloc_mem_err;
+
+ /* Slow path ring */
+ BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
+
+ /* EQ */
+ BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
+ BCM_PAGE_SIZE * NUM_EQ_PAGES);
+
+
+ /* fastpath */
+ /* need to be done at the end, since it's self adjusting to amount
+ * of memory available for RSS queues
+ */
+ if (bnx2x_alloc_fp_mem(bp))
+ goto alloc_mem_err;
+ return 0;
+
+alloc_mem_err:
+ bnx2x_free_mem(bp);
+ return -ENOMEM;
+}
+
+/*
+ * Init service functions
+ */
+
+int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
+ struct bnx2x_vlan_mac_obj *obj, bool set,
+ int mac_type, unsigned long *ramrod_flags)
+{
+ int rc;
+ struct bnx2x_vlan_mac_ramrod_params ramrod_param;
+
+ memset(&ramrod_param, 0, sizeof(ramrod_param));
+
+ /* Fill general parameters */
+ ramrod_param.vlan_mac_obj = obj;
+ ramrod_param.ramrod_flags = *ramrod_flags;
+
+ /* Fill a user request section if needed */
+ if (!test_bit(RAMROD_CONT, ramrod_flags)) {
+ memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
+
+ __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
+
+ /* Set the command: ADD or DEL */
+ if (set)
+ ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
+ else
+ ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
+ }
+
+ rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+ if (rc < 0)
+ BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del"));
+ return rc;
+}
+
+int bnx2x_del_all_macs(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *mac_obj,
+ int mac_type, bool wait_for_comp)
+{
+ int rc;
+ unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
+
+ /* Wait for completion of requested */
+ if (wait_for_comp)
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+
+ /* Set the mac type of addresses we want to clear */
+ __set_bit(mac_type, &vlan_mac_flags);
+
+ rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags);
+ if (rc < 0)
+ BNX2X_ERR("Failed to delete MACs: %d\n", rc);
+
+ return rc;
+}
+
+int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
+{
+ unsigned long ramrod_flags = 0;
+
+ DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
+
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ /* Eth MAC is set on RSS leading client (fp[0]) */
+ return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->fp->mac_obj, set,
+ BNX2X_ETH_MAC, &ramrod_flags);
+}
+
+int bnx2x_setup_leading(struct bnx2x *bp)
+{
+ return bnx2x_setup_queue(bp, &bp->fp[0], 1);
+}
+
+/**
+ * bnx2x_set_int_mode - configure interrupt mode
+ *
+ * @bp: driver handle
+ *
+ * In case of MSI-X it will also try to enable MSI-X.
+ */
+static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
+{
+ switch (int_mode) {
+ case INT_MODE_MSI:
+ bnx2x_enable_msi(bp);
+ /* falling through... */
+ case INT_MODE_INTx:
+ bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
+ DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
+ break;
+ default:
+ /* Set number of queues according to bp->multi_mode value */
+ bnx2x_set_num_queues(bp);
+
+ DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
+ bp->num_queues);
+
+ /* if we can't use MSI-X we only need one fp,
+ * so try to enable MSI-X with the requested number of fp's
+ * and fallback to MSI or legacy INTx with one fp
+ */
+ if (bnx2x_enable_msix(bp)) {
+ /* failed to enable MSI-X */
+ if (bp->multi_mode)
+ DP(NETIF_MSG_IFUP,
+ "Multi requested but failed to "
+ "enable MSI-X (%d), "
+ "set number of queues to %d\n",
+ bp->num_queues,
+ 1 + NON_ETH_CONTEXT_USE);
+ bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
+
+ /* Try to enable MSI */
+ if (!(bp->flags & DISABLE_MSI_FLAG))
+ bnx2x_enable_msi(bp);
+ }
+ break;
+ }
+}
+
+/* must be called prioir to any HW initializations */
+static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
+{
+ return L2_ILT_LINES(bp);
+}
+
+void bnx2x_ilt_set_info(struct bnx2x *bp)
+{
+ struct ilt_client_info *ilt_client;
+ struct bnx2x_ilt *ilt = BP_ILT(bp);
+ u16 line = 0;
+
+ ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
+ DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
+
+ /* CDU */
+ ilt_client = &ilt->clients[ILT_CLIENT_CDU];
+ ilt_client->client_num = ILT_CLIENT_CDU;
+ ilt_client->page_size = CDU_ILT_PAGE_SZ;
+ ilt_client->flags = ILT_CLIENT_SKIP_MEM;
+ ilt_client->start = line;
+ line += bnx2x_cid_ilt_lines(bp);
+#ifdef BCM_CNIC
+ line += CNIC_ILT_LINES;
+#endif
+ ilt_client->end = line - 1;
+
+ DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
+ "flags 0x%x, hw psz %d\n",
+ ilt_client->start,
+ ilt_client->end,
+ ilt_client->page_size,
+ ilt_client->flags,
+ ilog2(ilt_client->page_size >> 12));
+
+ /* QM */
+ if (QM_INIT(bp->qm_cid_count)) {
+ ilt_client = &ilt->clients[ILT_CLIENT_QM];
+ ilt_client->client_num = ILT_CLIENT_QM;
+ ilt_client->page_size = QM_ILT_PAGE_SZ;
+ ilt_client->flags = 0;
+ ilt_client->start = line;
+
+ /* 4 bytes for each cid */
+ line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
+ QM_ILT_PAGE_SZ);
+
+ ilt_client->end = line - 1;
+
+ DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
+ "flags 0x%x, hw psz %d\n",
+ ilt_client->start,
+ ilt_client->end,
+ ilt_client->page_size,
+ ilt_client->flags,
+ ilog2(ilt_client->page_size >> 12));
+
+ }
+ /* SRC */
+ ilt_client = &ilt->clients[ILT_CLIENT_SRC];
+#ifdef BCM_CNIC
+ ilt_client->client_num = ILT_CLIENT_SRC;
+ ilt_client->page_size = SRC_ILT_PAGE_SZ;
+ ilt_client->flags = 0;
+ ilt_client->start = line;
+ line += SRC_ILT_LINES;
+ ilt_client->end = line - 1;
+
+ DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
+ "flags 0x%x, hw psz %d\n",
+ ilt_client->start,
+ ilt_client->end,
+ ilt_client->page_size,
+ ilt_client->flags,
+ ilog2(ilt_client->page_size >> 12));
+
+#else
+ ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
+#endif
+
+ /* TM */
+ ilt_client = &ilt->clients[ILT_CLIENT_TM];
+#ifdef BCM_CNIC
+ ilt_client->client_num = ILT_CLIENT_TM;
+ ilt_client->page_size = TM_ILT_PAGE_SZ;
+ ilt_client->flags = 0;
+ ilt_client->start = line;
+ line += TM_ILT_LINES;
+ ilt_client->end = line - 1;
+
+ DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
+ "flags 0x%x, hw psz %d\n",
+ ilt_client->start,
+ ilt_client->end,
+ ilt_client->page_size,
+ ilt_client->flags,
+ ilog2(ilt_client->page_size >> 12));
+
+#else
+ ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
+#endif
+ BUG_ON(line > ILT_MAX_LINES);
+}
+
+/**
+ * bnx2x_pf_q_prep_init - prepare INIT transition parameters
+ *
+ * @bp: driver handle
+ * @fp: pointer to fastpath
+ * @init_params: pointer to parameters structure
+ *
+ * parameters configured:
+ * - HC configuration
+ * - Queue's CDU context
+ */
+static inline void bnx2x_pf_q_prep_init(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
+{
+
+ u8 cos;
+ /* FCoE Queue uses Default SB, thus has no HC capabilities */
+ if (!IS_FCOE_FP(fp)) {
+ __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
+ __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
+
+ /* If HC is supporterd, enable host coalescing in the transition
+ * to INIT state.
+ */
+ __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
+ __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags);
+
+ /* HC rate */
+ init_params->rx.hc_rate = bp->rx_ticks ?
+ (1000000 / bp->rx_ticks) : 0;
+ init_params->tx.hc_rate = bp->tx_ticks ?
+ (1000000 / bp->tx_ticks) : 0;
+
+ /* FW SB ID */
+ init_params->rx.fw_sb_id = init_params->tx.fw_sb_id =
+ fp->fw_sb_id;
+
+ /*
+ * CQ index among the SB indices: FCoE clients uses the default
+ * SB, therefore it's different.
+ */
+ init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
+ init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
+ }
+
+ /* set maximum number of COSs supported by this queue */
+ init_params->max_cos = fp->max_cos;
+
+ DP(BNX2X_MSG_SP, "fp: %d setting queue params max cos to: %d\n",
+ fp->index, init_params->max_cos);
+
+ /* set the context pointers queue object */
+ for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++)
+ init_params->cxts[cos] =
+ &bp->context.vcxt[fp->txdata[cos].cid].eth;
+}
+
+int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ struct bnx2x_queue_state_params *q_params,
+ struct bnx2x_queue_setup_tx_only_params *tx_only_params,
+ int tx_index, bool leading)
+{
+ memset(tx_only_params, 0, sizeof(*tx_only_params));
+
+ /* Set the command */
+ q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
+
+ /* Set tx-only QUEUE flags: don't zero statistics */
+ tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false);
+
+ /* choose the index of the cid to send the slow path on */
+ tx_only_params->cid_index = tx_index;
+
+ /* Set general TX_ONLY_SETUP parameters */
+ bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index);
+
+ /* Set Tx TX_ONLY_SETUP parameters */
+ bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
+
+ DP(BNX2X_MSG_SP, "preparing to send tx-only ramrod for connection:"
+ "cos %d, primary cid %d, cid %d, "
+ "client id %d, sp-client id %d, flags %lx\n",
+ tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
+ q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
+ tx_only_params->gen_params.spcl_id, tx_only_params->flags);
+
+ /* send the ramrod */
+ return bnx2x_queue_state_change(bp, q_params);
+}
+
+
+/**
+ * bnx2x_setup_queue - setup queue
+ *
+ * @bp: driver handle
+ * @fp: pointer to fastpath
+ * @leading: is leading
+ *
+ * This function performs 2 steps in a Queue state machine
+ * actually: 1) RESET->INIT 2) INIT->SETUP
+ */
+
+int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ bool leading)
+{
+ struct bnx2x_queue_state_params q_params = {0};
+ struct bnx2x_queue_setup_params *setup_params =
+ &q_params.params.setup;
+ struct bnx2x_queue_setup_tx_only_params *tx_only_params =
+ &q_params.params.tx_only;
+ int rc;
+ u8 tx_index;
+
+ DP(BNX2X_MSG_SP, "setting up queue %d\n", fp->index);
+
+ /* reset IGU state skip FCoE L2 queue */
+ if (!IS_FCOE_FP(fp))
+ bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
+ IGU_INT_ENABLE, 0);
+
+ q_params.q_obj = &fp->q_obj;
+ /* We want to wait for completion in this context */
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+
+ /* Prepare the INIT parameters */
+ bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init);
+
+ /* Set the command */
+ q_params.cmd = BNX2X_Q_CMD_INIT;
+
+ /* Change the state to INIT */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Queue(%d) INIT failed\n", fp->index);
+ return rc;
+ }
+
+ DP(BNX2X_MSG_SP, "init complete\n");
+
+
+ /* Now move the Queue to the SETUP state... */
+ memset(setup_params, 0, sizeof(*setup_params));
+
+ /* Set QUEUE flags */
+ setup_params->flags = bnx2x_get_q_flags(bp, fp, leading);
+
+ /* Set general SETUP parameters */
+ bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params,
+ FIRST_TX_COS_INDEX);
+
+ bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params,
+ &setup_params->rxq_params);
+
+ bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params,
+ FIRST_TX_COS_INDEX);
+
+ /* Set the command */
+ q_params.cmd = BNX2X_Q_CMD_SETUP;
+
+ /* Change the state to SETUP */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index);
+ return rc;
+ }
+
+ /* loop through the relevant tx-only indices */
+ for (tx_index = FIRST_TX_ONLY_COS_INDEX;
+ tx_index < fp->max_cos;
+ tx_index++) {
+
+ /* prepare and send tx-only ramrod*/
+ rc = bnx2x_setup_tx_only(bp, fp, &q_params,
+ tx_only_params, tx_index, leading);
+ if (rc) {
+ BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n",
+ fp->index, tx_index);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+static int bnx2x_stop_queue(struct bnx2x *bp, int index)
+{
+ struct bnx2x_fastpath *fp = &bp->fp[index];
+ struct bnx2x_fp_txdata *txdata;
+ struct bnx2x_queue_state_params q_params = {0};
+ int rc, tx_index;
+
+ DP(BNX2X_MSG_SP, "stopping queue %d cid %d\n", index, fp->cid);
+
+ q_params.q_obj = &fp->q_obj;
+ /* We want to wait for completion in this context */
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+
+
+ /* close tx-only connections */
+ for (tx_index = FIRST_TX_ONLY_COS_INDEX;
+ tx_index < fp->max_cos;
+ tx_index++){
+
+ /* ascertain this is a normal queue*/
+ txdata = &fp->txdata[tx_index];
+
+ DP(BNX2X_MSG_SP, "stopping tx-only queue %d\n",
+ txdata->txq_index);
+
+ /* send halt terminate on tx-only connection */
+ q_params.cmd = BNX2X_Q_CMD_TERMINATE;
+ memset(&q_params.params.terminate, 0,
+ sizeof(q_params.params.terminate));
+ q_params.params.terminate.cid_index = tx_index;
+
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc)
+ return rc;
+
+ /* send halt terminate on tx-only connection */
+ q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
+ memset(&q_params.params.cfc_del, 0,
+ sizeof(q_params.params.cfc_del));
+ q_params.params.cfc_del.cid_index = tx_index;
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc)
+ return rc;
+ }
+ /* Stop the primary connection: */
+ /* ...halt the connection */
+ q_params.cmd = BNX2X_Q_CMD_HALT;
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc)
+ return rc;
+
+ /* ...terminate the connection */
+ q_params.cmd = BNX2X_Q_CMD_TERMINATE;
+ memset(&q_params.params.terminate, 0,
+ sizeof(q_params.params.terminate));
+ q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc)
+ return rc;
+ /* ...delete cfc entry */
+ q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
+ memset(&q_params.params.cfc_del, 0,
+ sizeof(q_params.params.cfc_del));
+ q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
+ return bnx2x_queue_state_change(bp, &q_params);
+}
+
+
+static void bnx2x_reset_func(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ int func = BP_FUNC(bp);
+ int i;
+
+ /* Disable the function in the FW */
+ REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
+ REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
+ REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
+
+ /* FP SBs */
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ REG_WR8(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
+ SB_DISABLED);
+ }
+
+#ifdef BCM_CNIC
+ /* CNIC SB */
+ REG_WR8(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(bnx2x_cnic_fw_sb_id(bp)),
+ SB_DISABLED);
+#endif
+ /* SP SB */
+ REG_WR8(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
+ SB_DISABLED);
+
+ for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
+ REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
+ 0);
+
+ /* Configure IGU */
+ if (bp->common.int_block == INT_BLOCK_HC) {
+ REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
+ REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
+ } else {
+ REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
+ REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
+ }
+
+#ifdef BCM_CNIC
+ /* Disable Timer scan */
+ REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
+ /*
+ * Wait for at least 10ms and up to 2 second for the timers scan to
+ * complete
+ */
+ for (i = 0; i < 200; i++) {
+ msleep(10);
+ if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
+ break;
+ }
+#endif
+ /* Clear ILT */
+ bnx2x_clear_func_ilt(bp, func);
+
+ /* Timers workaround bug for E2: if this is vnic-3,
+ * we need to set the entire ilt range for this timers.
+ */
+ if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) {
+ struct ilt_client_info ilt_cli;
+ /* use dummy TM client */
+ memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
+ ilt_cli.start = 0;
+ ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
+ ilt_cli.client_num = ILT_CLIENT_TM;
+
+ bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
+ }
+
+ /* this assumes that reset_port() called before reset_func()*/
+ if (!CHIP_IS_E1x(bp))
+ bnx2x_pf_disable(bp);
+
+ bp->dmae_ready = 0;
+}
+
+static void bnx2x_reset_port(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ u32 val;
+
+ /* Reset physical Link */
+ bnx2x__link_reset(bp);
+
+ REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
+
+ /* Do not rcv packets to BRB */
+ REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
+ /* Do not direct rcv packets that are not for MCP to the BRB */
+ REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
+ NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
+
+ /* Configure AEU */
+ REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
+
+ msleep(100);
+ /* Check for BRB port occupancy */
+ val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
+ if (val)
+ DP(NETIF_MSG_IFDOWN,
+ "BRB1 is not empty %d blocks are occupied\n", val);
+
+ /* TODO: Close Doorbell port? */
+}
+
+static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
+{
+ struct bnx2x_func_state_params func_params = {0};
+
+ /* Prepare parameters for function state transitions */
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_HW_RESET;
+
+ func_params.params.hw_init.load_phase = load_code;
+
+ return bnx2x_func_state_change(bp, &func_params);
+}
+
+static inline int bnx2x_func_stop(struct bnx2x *bp)
+{
+ struct bnx2x_func_state_params func_params = {0};
+ int rc;
+
+ /* Prepare parameters for function state transitions */
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_STOP;
+
+ /*
+ * Try to stop the function the 'good way'. If fails (in case
+ * of a parity error during bnx2x_chip_cleanup()) and we are
+ * not in a debug mode, perform a state transaction in order to
+ * enable further HW_RESET transaction.
+ */
+ rc = bnx2x_func_state_change(bp, &func_params);
+ if (rc) {
+#ifdef BNX2X_STOP_ON_ERROR
+ return rc;
+#else
+ BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry "
+ "transaction\n");
+ __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
+ return bnx2x_func_state_change(bp, &func_params);
+#endif
+ }
+
+ return 0;
+}
+
+/**
+ * bnx2x_send_unload_req - request unload mode from the MCP.
+ *
+ * @bp: driver handle
+ * @unload_mode: requested function's unload mode
+ *
+ * Return unload mode returned by the MCP: COMMON, PORT or FUNC.
+ */
+u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
+{
+ u32 reset_code = 0;
+ int port = BP_PORT(bp);
+
+ /* Select the UNLOAD request mode */
+ if (unload_mode == UNLOAD_NORMAL)
+ reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+
+ else if (bp->flags & NO_WOL_FLAG)
+ reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
+
+ else if (bp->wol) {
+ u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+ u8 *mac_addr = bp->dev->dev_addr;
+ u32 val;
++ u16 pmc;
++
+ /* The mac address is written to entries 1-4 to
++ * preserve entry 0 which is used by the PMF
++ */
+ u8 entry = (BP_VN(bp) + 1)*8;
+
+ val = (mac_addr[0] << 8) | mac_addr[1];
+ EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
+
+ val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
+ (mac_addr[4] << 8) | mac_addr[5];
+ EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
+
++ /* Enable the PME and clear the status */
++ pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc);
++ pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
++ pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc);
++
+ reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
+
+ } else
+ reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+
+ /* Send the request to the MCP */
+ if (!BP_NOMCP(bp))
+ reset_code = bnx2x_fw_command(bp, reset_code, 0);
+ else {
+ int path = BP_PATH(bp);
+
+ DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
+ "%d, %d, %d\n",
+ path, load_count[path][0], load_count[path][1],
+ load_count[path][2]);
+ load_count[path][0]--;
+ load_count[path][1 + port]--;
+ DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
+ "%d, %d, %d\n",
+ path, load_count[path][0], load_count[path][1],
+ load_count[path][2]);
+ if (load_count[path][0] == 0)
+ reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
+ else if (load_count[path][1 + port] == 0)
+ reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
+ else
+ reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
+ }
+
+ return reset_code;
+}
+
+/**
+ * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
+ *
+ * @bp: driver handle
+ */
+void bnx2x_send_unload_done(struct bnx2x *bp)
+{
+ /* Report UNLOAD_DONE to MCP */
+ if (!BP_NOMCP(bp))
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+}
+
+static inline int bnx2x_func_wait_started(struct bnx2x *bp)
+{
+ int tout = 50;
+ int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
+
+ if (!bp->port.pmf)
+ return 0;
+
+ /*
+ * (assumption: No Attention from MCP at this stage)
+ * PMF probably in the middle of TXdisable/enable transaction
+ * 1. Sync IRS for default SB
+ * 2. Sync SP queue - this guarantes us that attention handling started
+ * 3. Wait, that TXdisable/enable transaction completes
+ *
+ * 1+2 guranty that if DCBx attention was scheduled it already changed
+ * pending bit of transaction from STARTED-->TX_STOPPED, if we alredy
+ * received complettion for the transaction the state is TX_STOPPED.
+ * State will return to STARTED after completion of TX_STOPPED-->STARTED
+ * transaction.
+ */
+
+ /* make sure default SB ISR is done */
+ if (msix)
+ synchronize_irq(bp->msix_table[0].vector);
+ else
+ synchronize_irq(bp->pdev->irq);
+
+ flush_workqueue(bnx2x_wq);
+
+ while (bnx2x_func_get_state(bp, &bp->func_obj) !=
+ BNX2X_F_STATE_STARTED && tout--)
+ msleep(20);
+
+ if (bnx2x_func_get_state(bp, &bp->func_obj) !=
+ BNX2X_F_STATE_STARTED) {
+#ifdef BNX2X_STOP_ON_ERROR
+ return -EBUSY;
+#else
+ /*
+ * Failed to complete the transaction in a "good way"
+ * Force both transactions with CLR bit
+ */
+ struct bnx2x_func_state_params func_params = {0};
+
+ DP(BNX2X_MSG_SP, "Hmmm... unexpected function state! "
+ "Forcing STARTED-->TX_ST0PPED-->STARTED\n");
+
+ func_params.f_obj = &bp->func_obj;
+ __set_bit(RAMROD_DRV_CLR_ONLY,
+ &func_params.ramrod_flags);
+
+ /* STARTED-->TX_ST0PPED */
+ func_params.cmd = BNX2X_F_CMD_TX_STOP;
+ bnx2x_func_state_change(bp, &func_params);
+
+ /* TX_ST0PPED-->STARTED */
+ func_params.cmd = BNX2X_F_CMD_TX_START;
+ return bnx2x_func_state_change(bp, &func_params);
+#endif
+ }
+
+ return 0;
+}
+
+void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
+{
+ int port = BP_PORT(bp);
+ int i, rc = 0;
+ u8 cos;
+ struct bnx2x_mcast_ramrod_params rparam = {0};
+ u32 reset_code;
+
+ /* Wait until tx fastpath tasks complete */
+ for_each_tx_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ for_each_cos_in_tx_queue(fp, cos)
+ rc = bnx2x_clean_tx_queue(bp, &fp->txdata[cos]);
+#ifdef BNX2X_STOP_ON_ERROR
+ if (rc)
+ return;
+#endif
+ }
+
+ /* Give HW time to discard old tx messages */
+ usleep_range(1000, 1000);
+
+ /* Clean all ETH MACs */
+ rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_ETH_MAC, false);
+ if (rc < 0)
+ BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
+
+ /* Clean up UC list */
+ rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_UC_LIST_MAC,
+ true);
+ if (rc < 0)
+ BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: "
+ "%d\n", rc);
+
+ /* Disable LLH */
+ if (!CHIP_IS_E1(bp))
+ REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
+
+ /* Set "drop all" (stop Rx).
+ * We need to take a netif_addr_lock() here in order to prevent
+ * a race between the completion code and this code.
+ */
+ netif_addr_lock_bh(bp->dev);
+ /* Schedule the rx_mode command */
+ if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
+ set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
+ else
+ bnx2x_set_storm_rx_mode(bp);
+
+ /* Cleanup multicast configuration */
+ rparam.mcast_obj = &bp->mcast_obj;
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
+ if (rc < 0)
+ BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc);
+
+ netif_addr_unlock_bh(bp->dev);
+
+
+
+ /*
+ * Send the UNLOAD_REQUEST to the MCP. This will return if
+ * this function should perform FUNC, PORT or COMMON HW
+ * reset.
+ */
+ reset_code = bnx2x_send_unload_req(bp, unload_mode);
+
+ /*
+ * (assumption: No Attention from MCP at this stage)
+ * PMF probably in the middle of TXdisable/enable transaction
+ */
+ rc = bnx2x_func_wait_started(bp);
+ if (rc) {
+ BNX2X_ERR("bnx2x_func_wait_started failed\n");
+#ifdef BNX2X_STOP_ON_ERROR
+ return;
+#endif
+ }
+
+ /* Close multi and leading connections
+ * Completions for ramrods are collected in a synchronous way
+ */
+ for_each_queue(bp, i)
+ if (bnx2x_stop_queue(bp, i))
+#ifdef BNX2X_STOP_ON_ERROR
+ return;
+#else
+ goto unload_error;
+#endif
+ /* If SP settings didn't get completed so far - something
+ * very wrong has happen.
+ */
+ if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
+ BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n");
+
+#ifndef BNX2X_STOP_ON_ERROR
+unload_error:
+#endif
+ rc = bnx2x_func_stop(bp);
+ if (rc) {
+ BNX2X_ERR("Function stop failed!\n");
+#ifdef BNX2X_STOP_ON_ERROR
+ return;
+#endif
+ }
+
+ /* Disable HW interrupts, NAPI */
+ bnx2x_netif_stop(bp, 1);
+
+ /* Release IRQs */
+ bnx2x_free_irq(bp);
+
+ /* Reset the chip */
+ rc = bnx2x_reset_hw(bp, reset_code);
+ if (rc)
+ BNX2X_ERR("HW_RESET failed\n");
+
+
+ /* Report UNLOAD_DONE to MCP */
+ bnx2x_send_unload_done(bp);
+}
+
+void bnx2x_disable_close_the_gate(struct bnx2x *bp)
+{
+ u32 val;
+
+ DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
+
+ if (CHIP_IS_E1(bp)) {
+ int port = BP_PORT(bp);
+ u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+ MISC_REG_AEU_MASK_ATTN_FUNC_0;
+
+ val = REG_RD(bp, addr);
+ val &= ~(0x300);
+ REG_WR(bp, addr, val);
+ } else {
+ val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
+ val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
+ MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
+ REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
+ }
+}
+
+/* Close gates #2, #3 and #4: */
+static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
+{
+ u32 val;
+
+ /* Gates #2 and #4a are closed/opened for "not E1" only */
+ if (!CHIP_IS_E1(bp)) {
+ /* #4 */
+ REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
+ /* #2 */
+ REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
+ }
+
+ /* #3 */
+ if (CHIP_IS_E1x(bp)) {
+ /* Prevent interrupts from HC on both ports */
+ val = REG_RD(bp, HC_REG_CONFIG_1);
+ REG_WR(bp, HC_REG_CONFIG_1,
+ (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
+ (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
+
+ val = REG_RD(bp, HC_REG_CONFIG_0);
+ REG_WR(bp, HC_REG_CONFIG_0,
+ (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
+ (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
+ } else {
+ /* Prevent incomming interrupts in IGU */
+ val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
+
+ REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
+ (!close) ?
+ (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
+ (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
+ }
+
+ DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
+ close ? "closing" : "opening");
+ mmiowb();
+}
+
+#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
+
+static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
+{
+ /* Do some magic... */
+ u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
+ *magic_val = val & SHARED_MF_CLP_MAGIC;
+ MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
+}
+
+/**
+ * bnx2x_clp_reset_done - restore the value of the `magic' bit.
+ *
+ * @bp: driver handle
+ * @magic_val: old value of the `magic' bit.
+ */
+static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
+{
+ /* Restore the `magic' bit value... */
+ u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
+ MF_CFG_WR(bp, shared_mf_config.clp_mb,
+ (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
+}
+
+/**
+ * bnx2x_reset_mcp_prep - prepare for MCP reset.
+ *
+ * @bp: driver handle
+ * @magic_val: old value of 'magic' bit.
+ *
+ * Takes care of CLP configurations.
+ */
+static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
+{
+ u32 shmem;
+ u32 validity_offset;
+
+ DP(NETIF_MSG_HW, "Starting\n");
+
+ /* Set `magic' bit in order to save MF config */
+ if (!CHIP_IS_E1(bp))
+ bnx2x_clp_reset_prep(bp, magic_val);
+
+ /* Get shmem offset */
+ shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+ validity_offset = offsetof(struct shmem_region, validity_map[0]);
+
+ /* Clear validity map flags */
+ if (shmem > 0)
+ REG_WR(bp, shmem + validity_offset, 0);
+}
+
+#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
+#define MCP_ONE_TIMEOUT 100 /* 100 ms */
+
+/**
+ * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT
+ *
+ * @bp: driver handle
+ */
+static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
+{
+ /* special handling for emulation and FPGA,
+ wait 10 times longer */
+ if (CHIP_REV_IS_SLOW(bp))
+ msleep(MCP_ONE_TIMEOUT*10);
+ else
+ msleep(MCP_ONE_TIMEOUT);
+}
+
+/*
+ * initializes bp->common.shmem_base and waits for validity signature to appear
+ */
+static int bnx2x_init_shmem(struct bnx2x *bp)
+{
+ int cnt = 0;
+ u32 val = 0;
+
+ do {
+ bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+ if (bp->common.shmem_base) {
+ val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
+ if (val & SHR_MEM_VALIDITY_MB)
+ return 0;
+ }
+
+ bnx2x_mcp_wait_one(bp);
+
+ } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
+
+ BNX2X_ERR("BAD MCP validity signature\n");
+
+ return -ENODEV;
+}
+
+static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
+{
+ int rc = bnx2x_init_shmem(bp);
+
+ /* Restore the `magic' bit value */
+ if (!CHIP_IS_E1(bp))
+ bnx2x_clp_reset_done(bp, magic_val);
+
+ return rc;
+}
+
+static void bnx2x_pxp_prep(struct bnx2x *bp)
+{
+ if (!CHIP_IS_E1(bp)) {
+ REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
+ REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
+ mmiowb();
+ }
+}
+
+/*
+ * Reset the whole chip except for:
+ * - PCIE core
+ * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
+ * one reset bit)
+ * - IGU
+ * - MISC (including AEU)
+ * - GRC
+ * - RBCN, RBCP
+ */
+static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
+{
+ u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
+ u32 global_bits2, stay_reset2;
+
+ /*
+ * Bits that have to be set in reset_mask2 if we want to reset 'global'
+ * (per chip) blocks.
+ */
+ global_bits2 =
+ MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
+ MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
+
+ /* Don't reset the following blocks */
+ not_reset_mask1 =
+ MISC_REGISTERS_RESET_REG_1_RST_HC |
+ MISC_REGISTERS_RESET_REG_1_RST_PXPV |
+ MISC_REGISTERS_RESET_REG_1_RST_PXP;
+
+ not_reset_mask2 =
+ MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
+ MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_RBCN |
+ MISC_REGISTERS_RESET_REG_2_RST_GRC |
+ MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
+ MISC_REGISTERS_RESET_REG_2_RST_ATC |
+ MISC_REGISTERS_RESET_REG_2_PGLC;
+
+ /*
+ * Keep the following blocks in reset:
+ * - all xxMACs are handled by the bnx2x_link code.
+ */
+ stay_reset2 =
+ MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
+ MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
+ MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
+ MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
+ MISC_REGISTERS_RESET_REG_2_UMAC0 |
+ MISC_REGISTERS_RESET_REG_2_UMAC1 |
+ MISC_REGISTERS_RESET_REG_2_XMAC |
+ MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
+
+ /* Full reset masks according to the chip */
+ reset_mask1 = 0xffffffff;
+
+ if (CHIP_IS_E1(bp))
+ reset_mask2 = 0xffff;
+ else if (CHIP_IS_E1H(bp))
+ reset_mask2 = 0x1ffff;
+ else if (CHIP_IS_E2(bp))
+ reset_mask2 = 0xfffff;
+ else /* CHIP_IS_E3 */
+ reset_mask2 = 0x3ffffff;
+
+ /* Don't reset global blocks unless we need to */
+ if (!global)
+ reset_mask2 &= ~global_bits2;
+
+ /*
+ * In case of attention in the QM, we need to reset PXP
+ * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
+ * because otherwise QM reset would release 'close the gates' shortly
+ * before resetting the PXP, then the PSWRQ would send a write
+ * request to PGLUE. Then when PXP is reset, PGLUE would try to
+ * read the payload data from PSWWR, but PSWWR would not
+ * respond. The write queue in PGLUE would stuck, dmae commands
+ * would not return. Therefore it's important to reset the second
+ * reset register (containing the
+ * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
+ * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
+ * bit).
+ */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ reset_mask2 & (~not_reset_mask2));
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+ reset_mask1 & (~not_reset_mask1));
+
+ barrier();
+ mmiowb();
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+ reset_mask2 & (~stay_reset2));
+
+ barrier();
+ mmiowb();
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
+ mmiowb();
+}
+
+/**
+ * bnx2x_er_poll_igu_vq - poll for pending writes bit.
+ * It should get cleared in no more than 1s.
+ *
+ * @bp: driver handle
+ *
+ * It should get cleared in no more than 1s. Returns 0 if
+ * pending writes bit gets cleared.
+ */
+static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
+{
+ u32 cnt = 1000;
+ u32 pend_bits = 0;
+
+ do {
+ pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS);
+
+ if (pend_bits == 0)
+ break;
+
+ usleep_range(1000, 1000);
+ } while (cnt-- > 0);
+
+ if (cnt <= 0) {
+ BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n",
+ pend_bits);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int bnx2x_process_kill(struct bnx2x *bp, bool global)
+{
+ int cnt = 1000;
+ u32 val = 0;
+ u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
+
+
+ /* Empty the Tetris buffer, wait for 1s */
+ do {
+ sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
+ blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
+ port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
+ port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
+ pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
+ if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
+ ((port_is_idle_0 & 0x1) == 0x1) &&
+ ((port_is_idle_1 & 0x1) == 0x1) &&
+ (pgl_exp_rom2 == 0xffffffff))
+ break;
+ usleep_range(1000, 1000);
+ } while (cnt-- > 0);
+
+ if (cnt <= 0) {
+ DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
+ " are still"
+ " outstanding read requests after 1s!\n");
+ DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
+ " port_is_idle_0=0x%08x,"
+ " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
+ sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
+ pgl_exp_rom2);
+ return -EAGAIN;
+ }
+
+ barrier();
+
+ /* Close gates #2, #3 and #4 */
+ bnx2x_set_234_gates(bp, true);
+
+ /* Poll for IGU VQs for 57712 and newer chips */
+ if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
+ return -EAGAIN;
+
+
+ /* TBD: Indicate that "process kill" is in progress to MCP */
+
+ /* Clear "unprepared" bit */
+ REG_WR(bp, MISC_REG_UNPREPARED, 0);
+ barrier();
+
+ /* Make sure all is written to the chip before the reset */
+ mmiowb();
+
+ /* Wait for 1ms to empty GLUE and PCI-E core queues,
+ * PSWHST, GRC and PSWRD Tetris buffer.
+ */
+ usleep_range(1000, 1000);
+
+ /* Prepare to chip reset: */
+ /* MCP */
+ if (global)
+ bnx2x_reset_mcp_prep(bp, &val);
+
+ /* PXP */
+ bnx2x_pxp_prep(bp);
+ barrier();
+
+ /* reset the chip */
+ bnx2x_process_kill_chip_reset(bp, global);
+ barrier();
+
+ /* Recover after reset: */
+ /* MCP */
+ if (global && bnx2x_reset_mcp_comp(bp, val))
+ return -EAGAIN;
+
+ /* TBD: Add resetting the NO_MCP mode DB here */
+
+ /* PXP */
+ bnx2x_pxp_prep(bp);
+
+ /* Open the gates #2, #3 and #4 */
+ bnx2x_set_234_gates(bp, false);
+
+ /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
+ * reset state, re-enable attentions. */
+
+ return 0;
+}
+
+int bnx2x_leader_reset(struct bnx2x *bp)
+{
+ int rc = 0;
+ bool global = bnx2x_reset_is_global(bp);
+
+ /* Try to recover after the failure */
+ if (bnx2x_process_kill(bp, global)) {
+ netdev_err(bp->dev, "Something bad had happen on engine %d! "
+ "Aii!\n", BP_PATH(bp));
+ rc = -EAGAIN;
+ goto exit_leader_reset;
+ }
+
+ /*
+ * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver
+ * state.
+ */
+ bnx2x_set_reset_done(bp);
+ if (global)
+ bnx2x_clear_reset_global(bp);
+
+exit_leader_reset:
+ bp->is_leader = 0;
+ bnx2x_release_leader_lock(bp);
+ smp_mb();
+ return rc;
+}
+
+static inline void bnx2x_recovery_failed(struct bnx2x *bp)
+{
+ netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
+
+ /* Disconnect this device */
+ netif_device_detach(bp->dev);
+
+ /*
+ * Block ifup for all function on this engine until "process kill"
+ * or power cycle.
+ */
+ bnx2x_set_reset_in_progress(bp);
+
+ /* Shut down the power */
+ bnx2x_set_power_state(bp, PCI_D3hot);
+
+ bp->recovery_state = BNX2X_RECOVERY_FAILED;
+
+ smp_mb();
+}
+
+/*
+ * Assumption: runs under rtnl lock. This together with the fact
+ * that it's called only from bnx2x_sp_rtnl() ensure that it
+ * will never be called when netif_running(bp->dev) is false.
+ */
+static void bnx2x_parity_recover(struct bnx2x *bp)
+{
+ bool global = false;
+
+ DP(NETIF_MSG_HW, "Handling parity\n");
+ while (1) {
+ switch (bp->recovery_state) {
+ case BNX2X_RECOVERY_INIT:
+ DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
+ bnx2x_chk_parity_attn(bp, &global, false);
+
+ /* Try to get a LEADER_LOCK HW lock */
+ if (bnx2x_trylock_leader_lock(bp)) {
+ bnx2x_set_reset_in_progress(bp);
+ /*
+ * Check if there is a global attention and if
+ * there was a global attention, set the global
+ * reset bit.
+ */
+
+ if (global)
+ bnx2x_set_reset_global(bp);
+
+ bp->is_leader = 1;
+ }
+
+ /* Stop the driver */
+ /* If interface has been removed - break */
+ if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
+ return;
+
+ bp->recovery_state = BNX2X_RECOVERY_WAIT;
+
+ /*
+ * Reset MCP command sequence number and MCP mail box
+ * sequence as we are going to reset the MCP.
+ */
+ if (global) {
+ bp->fw_seq = 0;
+ bp->fw_drv_pulse_wr_seq = 0;
+ }
+
+ /* Ensure "is_leader", MCP command sequence and
+ * "recovery_state" update values are seen on other
+ * CPUs.
+ */
+ smp_mb();
+ break;
+
+ case BNX2X_RECOVERY_WAIT:
+ DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
+ if (bp->is_leader) {
+ int other_engine = BP_PATH(bp) ? 0 : 1;
+ u32 other_load_counter =
+ bnx2x_get_load_cnt(bp, other_engine);
+ u32 load_counter =
+ bnx2x_get_load_cnt(bp, BP_PATH(bp));
+ global = bnx2x_reset_is_global(bp);
+
+ /*
+ * In case of a parity in a global block, let
+ * the first leader that performs a
+ * leader_reset() reset the global blocks in
+ * order to clear global attentions. Otherwise
+ * the the gates will remain closed for that
+ * engine.
+ */
+ if (load_counter ||
+ (global && other_load_counter)) {
+ /* Wait until all other functions get
+ * down.
+ */
+ schedule_delayed_work(&bp->sp_rtnl_task,
+ HZ/10);
+ return;
+ } else {
+ /* If all other functions got down -
+ * try to bring the chip back to
+ * normal. In any case it's an exit
+ * point for a leader.
+ */
+ if (bnx2x_leader_reset(bp)) {
+ bnx2x_recovery_failed(bp);
+ return;
+ }
+
+ /* If we are here, means that the
+ * leader has succeeded and doesn't
+ * want to be a leader any more. Try
+ * to continue as a none-leader.
+ */
+ break;
+ }
+ } else { /* non-leader */
+ if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) {
+ /* Try to get a LEADER_LOCK HW lock as
+ * long as a former leader may have
+ * been unloaded by the user or
+ * released a leadership by another
+ * reason.
+ */
+ if (bnx2x_trylock_leader_lock(bp)) {
+ /* I'm a leader now! Restart a
+ * switch case.
+ */
+ bp->is_leader = 1;
+ break;
+ }
+
+ schedule_delayed_work(&bp->sp_rtnl_task,
+ HZ/10);
+ return;
+
+ } else {
+ /*
+ * If there was a global attention, wait
+ * for it to be cleared.
+ */
+ if (bnx2x_reset_is_global(bp)) {
+ schedule_delayed_work(
+ &bp->sp_rtnl_task,
+ HZ/10);
+ return;
+ }
+
+ if (bnx2x_nic_load(bp, LOAD_NORMAL))
+ bnx2x_recovery_failed(bp);
+ else {
+ bp->recovery_state =
+ BNX2X_RECOVERY_DONE;
+ smp_mb();
+ }
+
+ return;
+ }
+ }
+ default:
+ return;
+ }
+ }
+}
+
+/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
+ * scheduled on a general queue in order to prevent a dead lock.
+ */
+static void bnx2x_sp_rtnl_task(struct work_struct *work)
+{
+ struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
+
+ rtnl_lock();
+
+ if (!netif_running(bp->dev))
+ goto sp_rtnl_exit;
+
+ /* if stop on error is defined no recovery flows should be executed */
+#ifdef BNX2X_STOP_ON_ERROR
+ BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined "
+ "so reset not done to allow debug dump,\n"
+ "you will need to reboot when done\n");
+ goto sp_rtnl_not_reset;
+#endif
+
+ if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
+ /*
+ * Clear all pending SP commands as we are going to reset the
+ * function anyway.
+ */
+ bp->sp_rtnl_state = 0;
+ smp_mb();
+
+ bnx2x_parity_recover(bp);
+
+ goto sp_rtnl_exit;
+ }
+
+ if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
+ /*
+ * Clear all pending SP commands as we are going to reset the
+ * function anyway.
+ */
+ bp->sp_rtnl_state = 0;
+ smp_mb();
+
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ bnx2x_nic_load(bp, LOAD_NORMAL);
+
+ goto sp_rtnl_exit;
+ }
+#ifdef BNX2X_STOP_ON_ERROR
+sp_rtnl_not_reset:
+#endif
+ if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
+ bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
+
+sp_rtnl_exit:
+ rtnl_unlock();
+}
+
+/* end of nic load/unload */
+
+static void bnx2x_period_task(struct work_struct *work)
+{
+ struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
+
+ if (!netif_running(bp->dev))
+ goto period_task_exit;
+
+ if (CHIP_REV_IS_SLOW(bp)) {
+ BNX2X_ERR("period task called on emulation, ignoring\n");
+ goto period_task_exit;
+ }
+
+ bnx2x_acquire_phy_lock(bp);
+ /*
+ * The barrier is needed to ensure the ordering between the writing to
+ * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and
+ * the reading here.
+ */
+ smp_mb();
+ if (bp->port.pmf) {
+ bnx2x_period_func(&bp->link_params, &bp->link_vars);
+
+ /* Re-queue task in 1 sec */
+ queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ);
+ }
+
+ bnx2x_release_phy_lock(bp);
+period_task_exit:
+ return;
+}
+
+/*
+ * Init service functions
+ */
+
+static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
+{
+ u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
+ u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
+ return base + (BP_ABS_FUNC(bp)) * stride;
+}
+
+static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
+{
+ u32 reg = bnx2x_get_pretend_reg(bp);
+
+ /* Flush all outstanding writes */
+ mmiowb();
+
+ /* Pretend to be function 0 */
+ REG_WR(bp, reg, 0);
+ REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
+
+ /* From now we are in the "like-E1" mode */
+ bnx2x_int_disable(bp);
+
+ /* Flush all outstanding writes */
+ mmiowb();
+
+ /* Restore the original function */
+ REG_WR(bp, reg, BP_ABS_FUNC(bp));
+ REG_RD(bp, reg);
+}
+
+static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
+{
+ if (CHIP_IS_E1(bp))
+ bnx2x_int_disable(bp);
+ else
+ bnx2x_undi_int_disable_e1h(bp);
+}
+
+static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
+{
+ u32 val;
+
+ /* Check if there is any driver already loaded */
+ val = REG_RD(bp, MISC_REG_UNPREPARED);
+ if (val == 0x1) {
+
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+ /*
+ * Check if it is the UNDI driver
+ * UNDI driver initializes CID offset for normal bell to 0x7
+ */
+ val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
+ if (val == 0x7) {
+ u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+ /* save our pf_num */
+ int orig_pf_num = bp->pf_num;
+ int port;
+ u32 swap_en, swap_val, value;
+
+ /* clear the UNDI indication */
+ REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
+
+ BNX2X_DEV_INFO("UNDI is active! reset device\n");
+
+ /* try unload UNDI on port 0 */
+ bp->pf_num = 0;
+ bp->fw_seq =
+ (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
+ reset_code = bnx2x_fw_command(bp, reset_code, 0);
+
+ /* if UNDI is loaded on the other port */
+ if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
+
+ /* send "DONE" for previous unload */
+ bnx2x_fw_command(bp,
+ DRV_MSG_CODE_UNLOAD_DONE, 0);
+
+ /* unload UNDI on port 1 */
+ bp->pf_num = 1;
+ bp->fw_seq =
+ (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
+ reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+
+ bnx2x_fw_command(bp, reset_code, 0);
+ }
+
+ bnx2x_undi_int_disable(bp);
+ port = BP_PORT(bp);
+
+ /* close input traffic and wait for it */
+ /* Do not rcv packets to BRB */
+ REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
+ NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
+ /* Do not direct rcv packets that are not for MCP to
+ * the BRB */
+ REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
+ NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
+ /* clear AEU */
+ REG_WR(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+ MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
+ msleep(10);
+
+ /* save NIG port swap info */
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+ /* reset device */
+ REG_WR(bp,
+ GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+ 0xd3ffffff);
+
+ value = 0x1400;
+ if (CHIP_IS_E3(bp)) {
+ value |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
+ value |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
+ }
+
+ REG_WR(bp,
+ GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ value);
+
+ /* take the NIG out of reset and restore swap values */
+ REG_WR(bp,
+ GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
+ MISC_REGISTERS_RESET_REG_1_RST_NIG);
+ REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
+ REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
+
+ /* send unload done to the MCP */
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+
+ /* restore our func and fw_seq */
+ bp->pf_num = orig_pf_num;
+ bp->fw_seq =
+ (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
+ }
+
+ /* now it's safe to release the lock */
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+ }
+}
+
+static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
+{
+ u32 val, val2, val3, val4, id;
+ u16 pmc;
+
+ /* Get the chip revision id and number. */
+ /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
+ val = REG_RD(bp, MISC_REG_CHIP_NUM);
+ id = ((val & 0xffff) << 16);
+ val = REG_RD(bp, MISC_REG_CHIP_REV);
+ id |= ((val & 0xf) << 12);
+ val = REG_RD(bp, MISC_REG_CHIP_METAL);
+ id |= ((val & 0xff) << 4);
+ val = REG_RD(bp, MISC_REG_BOND_ID);
+ id |= (val & 0xf);
+ bp->common.chip_id = id;
+
+ /* Set doorbell size */
+ bp->db_size = (1 << BNX2X_DB_SHIFT);
+
+ if (!CHIP_IS_E1x(bp)) {
+ val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
+ if ((val & 1) == 0)
+ val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
+ else
+ val = (val >> 1) & 1;
+ BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
+ "2_PORT_MODE");
+ bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
+ CHIP_2_PORT_MODE;
+
+ if (CHIP_MODE_IS_4_PORT(bp))
+ bp->pfid = (bp->pf_num >> 1); /* 0..3 */
+ else
+ bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
+ } else {
+ bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
+ bp->pfid = bp->pf_num; /* 0..7 */
+ }
+
+ bp->link_params.chip_id = bp->common.chip_id;
+ BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
+
+ val = (REG_RD(bp, 0x2874) & 0x55);
+ if ((bp->common.chip_id & 0x1) ||
+ (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
+ bp->flags |= ONE_PORT_FLAG;
+ BNX2X_DEV_INFO("single port device\n");
+ }
+
+ val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
+ bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE <<
+ (val & MCPR_NVM_CFG4_FLASH_SIZE));
+ BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
+ bp->common.flash_size, bp->common.flash_size);
+
+ bnx2x_init_shmem(bp);
+
+
+
+ bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
+ MISC_REG_GENERIC_CR_1 :
+ MISC_REG_GENERIC_CR_0));
+
+ bp->link_params.shmem_base = bp->common.shmem_base;
+ bp->link_params.shmem2_base = bp->common.shmem2_base;
+ BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
+ bp->common.shmem_base, bp->common.shmem2_base);
+
+ if (!bp->common.shmem_base) {
+ BNX2X_DEV_INFO("MCP not active\n");
+ bp->flags |= NO_MCP_FLAG;
+ return;
+ }
+
+ bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
+ BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
+
+ bp->link_params.hw_led_mode = ((bp->common.hw_config &
+ SHARED_HW_CFG_LED_MODE_MASK) >>
+ SHARED_HW_CFG_LED_MODE_SHIFT);
+
+ bp->link_params.feature_config_flags = 0;
+ val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
+ if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
+ bp->link_params.feature_config_flags |=
+ FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
+ else
+ bp->link_params.feature_config_flags &=
+ ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
+
+ val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
+ bp->common.bc_ver = val;
+ BNX2X_DEV_INFO("bc_ver %X\n", val);
+ if (val < BNX2X_BC_VER) {
+ /* for now only warn
+ * later we might need to enforce this */
+ BNX2X_ERR("This driver needs bc_ver %X but found %X, "
+ "please upgrade BC\n", BNX2X_BC_VER, val);
+ }
+ bp->link_params.feature_config_flags |=
+ (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
+ FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
+
+ bp->link_params.feature_config_flags |=
+ (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
+ FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
+
+ bp->link_params.feature_config_flags |=
+ (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
+ FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
+
+ pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
+ bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
+
+ BNX2X_DEV_INFO("%sWoL capable\n",
+ (bp->flags & NO_WOL_FLAG) ? "not " : "");
+
+ val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
+ val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
+ val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
+ val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
+
+ dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
+ val, val2, val3, val4);
+}
+
+#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
+#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
+
+static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
+{
+ int pfid = BP_FUNC(bp);
+ int igu_sb_id;
+ u32 val;
+ u8 fid, igu_sb_cnt = 0;
+
+ bp->igu_base_sb = 0xff;
+ if (CHIP_INT_MODE_IS_BC(bp)) {
+ int vn = BP_VN(bp);
+ igu_sb_cnt = bp->igu_sb_cnt;
+ bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
+ FP_SB_MAX_E1x;
+
+ bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
+ (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
+
+ return;
+ }
+
+ /* IGU in normal mode - read CAM */
+ for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
+ igu_sb_id++) {
+ val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
+ if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
+ continue;
+ fid = IGU_FID(val);
+ if ((fid & IGU_FID_ENCODE_IS_PF)) {
+ if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
+ continue;
+ if (IGU_VEC(val) == 0)
+ /* default status block */
+ bp->igu_dsb_id = igu_sb_id;
+ else {
+ if (bp->igu_base_sb == 0xff)
+ bp->igu_base_sb = igu_sb_id;
+ igu_sb_cnt++;
+ }
+ }
+ }
+
+#ifdef CONFIG_PCI_MSI
+ /*
+ * It's expected that number of CAM entries for this functions is equal
+ * to the number evaluated based on the MSI-X table size. We want a
+ * harsh warning if these values are different!
+ */
+ WARN_ON(bp->igu_sb_cnt != igu_sb_cnt);
+#endif
+
+ if (igu_sb_cnt == 0)
+ BNX2X_ERR("CAM configuration error\n");
+}
+
+static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
+ u32 switch_cfg)
+{
+ int cfg_size = 0, idx, port = BP_PORT(bp);
+
+ /* Aggregation of supported attributes of all external phys */
+ bp->port.supported[0] = 0;
+ bp->port.supported[1] = 0;
+ switch (bp->link_params.num_phys) {
+ case 1:
+ bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
+ cfg_size = 1;
+ break;
+ case 2:
+ bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
+ cfg_size = 1;
+ break;
+ case 3:
+ if (bp->link_params.multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
+ bp->port.supported[1] =
+ bp->link_params.phy[EXT_PHY1].supported;
+ bp->port.supported[0] =
+ bp->link_params.phy[EXT_PHY2].supported;
+ } else {
+ bp->port.supported[0] =
+ bp->link_params.phy[EXT_PHY1].supported;
+ bp->port.supported[1] =
+ bp->link_params.phy[EXT_PHY2].supported;
+ }
+ cfg_size = 2;
+ break;
+ }
+
+ if (!(bp->port.supported[0] || bp->port.supported[1])) {
+ BNX2X_ERR("NVRAM config error. BAD phy config."
+ "PHY1 config 0x%x, PHY2 config 0x%x\n",
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].external_phy_config),
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].external_phy_config2));
+ return;
+ }
+
+ if (CHIP_IS_E3(bp))
+ bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR);
+ else {
+ switch (switch_cfg) {
+ case SWITCH_CFG_1G:
+ bp->port.phy_addr = REG_RD(
+ bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
+ break;
+ case SWITCH_CFG_10G:
+ bp->port.phy_addr = REG_RD(
+ bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
+ break;
+ default:
+ BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
+ bp->port.link_config[0]);
+ return;
+ }
+ }
+ BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
+ /* mask what we support according to speed_cap_mask per configuration */
+ for (idx = 0; idx < cfg_size; idx++) {
+ if (!(bp->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
+ bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
+
+ if (!(bp->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
+ bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
+
+ if (!(bp->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
+ bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
+
+ if (!(bp->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
+ bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
+
+ if (!(bp->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
+ bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full);
+
+ if (!(bp->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
+ bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
+
+ if (!(bp->link_params.speed_cap_mask[idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
+ bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
+
+ }
+
+ BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
+ bp->port.supported[1]);
+}
+
+static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
+{
+ u32 link_config, idx, cfg_size = 0;
+ bp->port.advertising[0] = 0;
+ bp->port.advertising[1] = 0;
+ switch (bp->link_params.num_phys) {
+ case 1:
+ case 2:
+ cfg_size = 1;
+ break;
+ case 3:
+ cfg_size = 2;
+ break;
+ }
+ for (idx = 0; idx < cfg_size; idx++) {
+ bp->link_params.req_duplex[idx] = DUPLEX_FULL;
+ link_config = bp->port.link_config[idx];
+ switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
+ case PORT_FEATURE_LINK_SPEED_AUTO:
+ if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_AUTO_NEG;
+ bp->port.advertising[idx] |=
+ bp->port.supported[idx];
+ } else {
+ /* force 10G, no AN */
+ bp->link_params.req_line_speed[idx] =
+ SPEED_10000;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_10000baseT_Full |
+ ADVERTISED_FIBRE);
+ continue;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_10M_FULL:
+ if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_10;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_10baseT_Full |
+ ADVERTISED_TP);
+ } else {
+ BNX2X_ERR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_10M_HALF:
+ if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_10;
+ bp->link_params.req_duplex[idx] =
+ DUPLEX_HALF;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_10baseT_Half |
+ ADVERTISED_TP);
+ } else {
+ BNX2X_ERR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_100M_FULL:
+ if (bp->port.supported[idx] &
+ SUPPORTED_100baseT_Full) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_100;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_100baseT_Full |
+ ADVERTISED_TP);
+ } else {
+ BNX2X_ERR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_100M_HALF:
+ if (bp->port.supported[idx] &
+ SUPPORTED_100baseT_Half) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_100;
+ bp->link_params.req_duplex[idx] =
+ DUPLEX_HALF;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_100baseT_Half |
+ ADVERTISED_TP);
+ } else {
+ BNX2X_ERR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_1G:
+ if (bp->port.supported[idx] &
+ SUPPORTED_1000baseT_Full) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_1000;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_1000baseT_Full |
+ ADVERTISED_TP);
+ } else {
+ BNX2X_ERR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_2_5G:
+ if (bp->port.supported[idx] &
+ SUPPORTED_2500baseX_Full) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_2500;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_2500baseX_Full |
+ ADVERTISED_TP);
+ } else {
+ BNX2X_ERR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+
+ case PORT_FEATURE_LINK_SPEED_10G_CX4:
+ if (bp->port.supported[idx] &
+ SUPPORTED_10000baseT_Full) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_10000;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_10000baseT_Full |
+ ADVERTISED_FIBRE);
+ } else {
+ BNX2X_ERR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
+ return;
+ }
+ break;
+ case PORT_FEATURE_LINK_SPEED_20G:
+ bp->link_params.req_line_speed[idx] = SPEED_20000;
+
+ break;
+ default:
+ BNX2X_ERR("NVRAM config error. "
+ "BAD link speed link_config 0x%x\n",
+ link_config);
+ bp->link_params.req_line_speed[idx] =
+ SPEED_AUTO_NEG;
+ bp->port.advertising[idx] =
+ bp->port.supported[idx];
+ break;
+ }
+
+ bp->link_params.req_flow_ctrl[idx] = (link_config &
+ PORT_FEATURE_FLOW_CONTROL_MASK);
+ if ((bp->link_params.req_flow_ctrl[idx] ==
+ BNX2X_FLOW_CTRL_AUTO) &&
+ !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
+ bp->link_params.req_flow_ctrl[idx] =
+ BNX2X_FLOW_CTRL_NONE;
+ }
+
+ BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
+ " 0x%x advertising 0x%x\n",
+ bp->link_params.req_line_speed[idx],
+ bp->link_params.req_duplex[idx],
+ bp->link_params.req_flow_ctrl[idx],
+ bp->port.advertising[idx]);
+ }
+}
+
+static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
+{
+ mac_hi = cpu_to_be16(mac_hi);
+ mac_lo = cpu_to_be32(mac_lo);
+ memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
+ memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
+}
+
+static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ u32 config;
+ u32 ext_phy_type, ext_phy_config;
+
+ bp->link_params.bp = bp;
+ bp->link_params.port = port;
+
+ bp->link_params.lane_config =
+ SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
+
+ bp->link_params.speed_cap_mask[0] =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].speed_capability_mask);
+ bp->link_params.speed_cap_mask[1] =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].speed_capability_mask2);
+ bp->port.link_config[0] =
+ SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
+
+ bp->port.link_config[1] =
+ SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
+
+ bp->link_params.multi_phy_config =
+ SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
+ /* If the device is capable of WoL, set the default state according
+ * to the HW
+ */
+ config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
+ bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
+ (config & PORT_FEATURE_WOL_ENABLED));
+
+ BNX2X_DEV_INFO("lane_config 0x%08x "
+ "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
+ bp->link_params.lane_config,
+ bp->link_params.speed_cap_mask[0],
+ bp->port.link_config[0]);
+
+ bp->link_params.switch_cfg = (bp->port.link_config[0] &
+ PORT_FEATURE_CONNECTED_SWITCH_MASK);
+ bnx2x_phy_probe(&bp->link_params);
+ bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
+
+ bnx2x_link_settings_requested(bp);
+
+ /*
+ * If connected directly, work with the internal PHY, otherwise, work
+ * with the external PHY
+ */
+ ext_phy_config =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].external_phy_config);
+ ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
+ if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
+ bp->mdio.prtad = bp->port.phy_addr;
+
+ else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
+ (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
+ bp->mdio.prtad =
+ XGXS_EXT_PHY_ADDR(ext_phy_config);
+
+ /*
+ * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
+ * In MF mode, it is set to cover self test cases
+ */
+ if (IS_MF(bp))
+ bp->port.need_hw_lock = 1;
+ else
+ bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
+ bp->common.shmem_base,
+ bp->common.shmem2_base);
+}
+
+#ifdef BCM_CNIC
+static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ int func = BP_ABS_FUNC(bp);
+
+ u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
+ drv_lic_key[port].max_iscsi_conn);
+ u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
+ drv_lic_key[port].max_fcoe_conn);
+
+ /* Get the number of maximum allowed iSCSI and FCoE connections */
+ bp->cnic_eth_dev.max_iscsi_conn =
+ (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
+ BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
+
+ bp->cnic_eth_dev.max_fcoe_conn =
+ (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
+ BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
+
+ /* Read the WWN: */
+ if (!IS_MF(bp)) {
+ /* Port info */
+ bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].
+ fcoe_wwn_port_name_upper);
+ bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].
+ fcoe_wwn_port_name_lower);
+
+ /* Node info */
+ bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].
+ fcoe_wwn_node_name_upper);
+ bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].
+ fcoe_wwn_node_name_lower);
+ } else if (!IS_MF_SD(bp)) {
+ u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
+
+ /*
+ * Read the WWN info only if the FCoE feature is enabled for
+ * this function.
+ */
+ if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
+ /* Port info */
+ bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
+ MF_CFG_RD(bp, func_ext_config[func].
+ fcoe_wwn_port_name_upper);
+ bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
+ MF_CFG_RD(bp, func_ext_config[func].
+ fcoe_wwn_port_name_lower);
+
+ /* Node info */
+ bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
+ MF_CFG_RD(bp, func_ext_config[func].
+ fcoe_wwn_node_name_upper);
+ bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
+ MF_CFG_RD(bp, func_ext_config[func].
+ fcoe_wwn_node_name_lower);
+ }
+ }
+
+ BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
+ bp->cnic_eth_dev.max_iscsi_conn,
+ bp->cnic_eth_dev.max_fcoe_conn);
+
+ /*
+ * If maximum allowed number of connections is zero -
+ * disable the feature.
+ */
+ if (!bp->cnic_eth_dev.max_iscsi_conn)
+ bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
+
+ if (!bp->cnic_eth_dev.max_fcoe_conn)
+ bp->flags |= NO_FCOE_FLAG;
+}
+#endif
+
+static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
+{
+ u32 val, val2;
+ int func = BP_ABS_FUNC(bp);
+ int port = BP_PORT(bp);
+#ifdef BCM_CNIC
+ u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
+ u8 *fip_mac = bp->fip_mac;
+#endif
+
+ /* Zero primary MAC configuration */
+ memset(bp->dev->dev_addr, 0, ETH_ALEN);
+
+ if (BP_NOMCP(bp)) {
+ BNX2X_ERROR("warning: random MAC workaround active\n");
+ random_ether_addr(bp->dev->dev_addr);
+ } else if (IS_MF(bp)) {
+ val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
+ val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
+ if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
+ (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
+ bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+
+#ifdef BCM_CNIC
+ /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
+ * FCoE MAC then the appropriate feature should be disabled.
+ */
+ if (IS_MF_SI(bp)) {
+ u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
+ if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
+ val2 = MF_CFG_RD(bp, func_ext_config[func].
+ iscsi_mac_addr_upper);
+ val = MF_CFG_RD(bp, func_ext_config[func].
+ iscsi_mac_addr_lower);
+ bnx2x_set_mac_buf(iscsi_mac, val, val2);
+ BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n",
+ iscsi_mac);
+ } else
+ bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
+
+ if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
+ val2 = MF_CFG_RD(bp, func_ext_config[func].
+ fcoe_mac_addr_upper);
+ val = MF_CFG_RD(bp, func_ext_config[func].
+ fcoe_mac_addr_lower);
+ bnx2x_set_mac_buf(fip_mac, val, val2);
+ BNX2X_DEV_INFO("Read FCoE L2 MAC to %pM\n",
+ fip_mac);
+
+ } else
+ bp->flags |= NO_FCOE_FLAG;
+ }
+#endif
+ } else {
+ /* in SF read MACs from port configuration */
+ val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
+ val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
+ bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+
+#ifdef BCM_CNIC
+ val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
+ iscsi_mac_upper);
+ val = SHMEM_RD(bp, dev_info.port_hw_config[port].
+ iscsi_mac_lower);
+ bnx2x_set_mac_buf(iscsi_mac, val, val2);
+
+ val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
+ fcoe_fip_mac_upper);
+ val = SHMEM_RD(bp, dev_info.port_hw_config[port].
+ fcoe_fip_mac_lower);
+ bnx2x_set_mac_buf(fip_mac, val, val2);
+#endif
+ }
+
+ memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
+ memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
+
+#ifdef BCM_CNIC
+ /* Set the FCoE MAC in MF_SD mode */
+ if (!CHIP_IS_E1x(bp) && IS_MF_SD(bp))
+ memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
+
+ /* Disable iSCSI if MAC configuration is
+ * invalid.
+ */
+ if (!is_valid_ether_addr(iscsi_mac)) {
+ bp->flags |= NO_ISCSI_FLAG;
+ memset(iscsi_mac, 0, ETH_ALEN);
+ }
+
+ /* Disable FCoE if MAC configuration is
+ * invalid.
+ */
+ if (!is_valid_ether_addr(fip_mac)) {
+ bp->flags |= NO_FCOE_FLAG;
+ memset(bp->fip_mac, 0, ETH_ALEN);
+ }
+#endif
+
+ if (!is_valid_ether_addr(bp->dev->dev_addr))
+ dev_err(&bp->pdev->dev,
+ "bad Ethernet MAC address configuration: "
+ "%pM, change it manually before bringing up "
+ "the appropriate network interface\n",
+ bp->dev->dev_addr);
+}
+
+static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
+{
+ int /*abs*/func = BP_ABS_FUNC(bp);
+ int vn;
+ u32 val = 0;
+ int rc = 0;
+
+ bnx2x_get_common_hwinfo(bp);
+
+ /*
+ * initialize IGU parameters
+ */
+ if (CHIP_IS_E1x(bp)) {
+ bp->common.int_block = INT_BLOCK_HC;
+
+ bp->igu_dsb_id = DEF_SB_IGU_ID;
+ bp->igu_base_sb = 0;
+ } else {
+ bp->common.int_block = INT_BLOCK_IGU;
+
+ /* do not allow device reset during IGU info preocessing */
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+
+ val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
+
+ if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
+ int tout = 5000;
+
+ BNX2X_DEV_INFO("FORCING Normal Mode\n");
+
+ val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
+ REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val);
+ REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f);
+
+ while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
+ tout--;
+ usleep_range(1000, 1000);
+ }
+
+ if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
+ dev_err(&bp->pdev->dev,
+ "FORCING Normal Mode failed!!!\n");
+ return -EPERM;
+ }
+ }
+
+ if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
+ BNX2X_DEV_INFO("IGU Backward Compatible Mode\n");
+ bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
+ } else
+ BNX2X_DEV_INFO("IGU Normal Mode\n");
+
+ bnx2x_get_igu_cam_info(bp);
+
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+ }
+
+ /*
+ * set base FW non-default (fast path) status block id, this value is
+ * used to initialize the fw_sb_id saved on the fp/queue structure to
+ * determine the id used by the FW.
+ */
+ if (CHIP_IS_E1x(bp))
+ bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp);
+ else /*
+ * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of
+ * the same queue are indicated on the same IGU SB). So we prefer
+ * FW and IGU SBs to be the same value.
+ */
+ bp->base_fw_ndsb = bp->igu_base_sb;
+
+ BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n"
+ "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb,
+ bp->igu_sb_cnt, bp->base_fw_ndsb);
+
+ /*
+ * Initialize MF configuration
+ */
+
+ bp->mf_ov = 0;
+ bp->mf_mode = 0;
+ vn = BP_VN(bp);
+
+ if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
+ BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
+ bp->common.shmem2_base, SHMEM2_RD(bp, size),
+ (u32)offsetof(struct shmem2_region, mf_cfg_addr));
+
+ if (SHMEM2_HAS(bp, mf_cfg_addr))
+ bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
+ else
+ bp->common.mf_cfg_base = bp->common.shmem_base +
+ offsetof(struct shmem_region, func_mb) +
+ E1H_FUNC_MAX * sizeof(struct drv_func_mb);
+ /*
+ * get mf configuration:
+ * 1. existence of MF configuration
+ * 2. MAC address must be legal (check only upper bytes)
+ * for Switch-Independent mode;
+ * OVLAN must be legal for Switch-Dependent mode
+ * 3. SF_MODE configures specific MF mode
+ */
+ if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
+ /* get mf configuration */
+ val = SHMEM_RD(bp,
+ dev_info.shared_feature_config.config);
+ val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
+
+ switch (val) {
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
+ val = MF_CFG_RD(bp, func_mf_config[func].
+ mac_upper);
+ /* check for legal mac (upper bytes)*/
+ if (val != 0xffff) {
+ bp->mf_mode = MULTI_FUNCTION_SI;
+ bp->mf_config[vn] = MF_CFG_RD(bp,
+ func_mf_config[func].config);
+ } else
+ BNX2X_DEV_INFO("illegal MAC address "
+ "for SI\n");
+ break;
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
+ /* get OV configuration */
+ val = MF_CFG_RD(bp,
+ func_mf_config[FUNC_0].e1hov_tag);
+ val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
+
+ if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
+ bp->mf_mode = MULTI_FUNCTION_SD;
+ bp->mf_config[vn] = MF_CFG_RD(bp,
+ func_mf_config[func].config);
+ } else
+ BNX2X_DEV_INFO("illegal OV for SD\n");
+ break;
+ default:
+ /* Unknown configuration: reset mf_config */
+ bp->mf_config[vn] = 0;
+ BNX2X_DEV_INFO("unkown MF mode 0x%x\n", val);
+ }
+ }
+
+ BNX2X_DEV_INFO("%s function mode\n",
+ IS_MF(bp) ? "multi" : "single");
+
+ switch (bp->mf_mode) {
+ case MULTI_FUNCTION_SD:
+ val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
+ FUNC_MF_CFG_E1HOV_TAG_MASK;
+ if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
+ bp->mf_ov = val;
+ bp->path_has_ovlan = true;
+
+ BNX2X_DEV_INFO("MF OV for func %d is %d "
+ "(0x%04x)\n", func, bp->mf_ov,
+ bp->mf_ov);
+ } else {
+ dev_err(&bp->pdev->dev,
+ "No valid MF OV for func %d, "
+ "aborting\n", func);
+ return -EPERM;
+ }
+ break;
+ case MULTI_FUNCTION_SI:
+ BNX2X_DEV_INFO("func %d is in MF "
+ "switch-independent mode\n", func);
+ break;
+ default:
+ if (vn) {
+ dev_err(&bp->pdev->dev,
+ "VN %d is in a single function mode, "
+ "aborting\n", vn);
+ return -EPERM;
+ }
+ break;
+ }
+
+ /* check if other port on the path needs ovlan:
+ * Since MF configuration is shared between ports
+ * Possible mixed modes are only
+ * {SF, SI} {SF, SD} {SD, SF} {SI, SF}
+ */
+ if (CHIP_MODE_IS_4_PORT(bp) &&
+ !bp->path_has_ovlan &&
+ !IS_MF(bp) &&
+ bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
+ u8 other_port = !BP_PORT(bp);
+ u8 other_func = BP_PATH(bp) + 2*other_port;
+ val = MF_CFG_RD(bp,
+ func_mf_config[other_func].e1hov_tag);
+ if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
+ bp->path_has_ovlan = true;
+ }
+ }
+
+ /* adjust igu_sb_cnt to MF for E1x */
+ if (CHIP_IS_E1x(bp) && IS_MF(bp))
+ bp->igu_sb_cnt /= E1HVN_MAX;
+
+ /* port info */
+ bnx2x_get_port_hwinfo(bp);
+
+ /* Get MAC addresses */
+ bnx2x_get_mac_hwinfo(bp);
+
+#ifdef BCM_CNIC
+ bnx2x_get_cnic_info(bp);
+#endif
+
+ /* Get current FW pulse sequence */
+ if (!BP_NOMCP(bp)) {
+ int mb_idx = BP_FW_MB_IDX(bp);
+
+ bp->fw_drv_pulse_wr_seq =
+ (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
+ DRV_PULSE_SEQ_MASK);
+ BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
+ }
+
+ return rc;
+}
+
+static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
+{
+ int cnt, i, block_end, rodi;
+ char vpd_data[BNX2X_VPD_LEN+1];
+ char str_id_reg[VENDOR_ID_LEN+1];
+ char str_id_cap[VENDOR_ID_LEN+1];
+ u8 len;
+
+ cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
+ memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
+
+ if (cnt < BNX2X_VPD_LEN)
+ goto out_not_found;
+
+ i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
+ PCI_VPD_LRDT_RO_DATA);
+ if (i < 0)
+ goto out_not_found;
+
+
+ block_end = i + PCI_VPD_LRDT_TAG_SIZE +
+ pci_vpd_lrdt_size(&vpd_data[i]);
+
+ i += PCI_VPD_LRDT_TAG_SIZE;
+
+ if (block_end > BNX2X_VPD_LEN)
+ goto out_not_found;
+
+ rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
+ PCI_VPD_RO_KEYWORD_MFR_ID);
+ if (rodi < 0)
+ goto out_not_found;
+
+ len = pci_vpd_info_field_size(&vpd_data[rodi]);
+
+ if (len != VENDOR_ID_LEN)
+ goto out_not_found;
+
+ rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
+
+ /* vendor specific info */
+ snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
+ snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
+ if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
+ !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
+
+ rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
+ PCI_VPD_RO_KEYWORD_VENDOR0);
+ if (rodi >= 0) {
+ len = pci_vpd_info_field_size(&vpd_data[rodi]);
+
+ rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
+
+ if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
+ memcpy(bp->fw_ver, &vpd_data[rodi], len);
+ bp->fw_ver[len] = ' ';
+ }
+ }
+ return;
+ }
+out_not_found:
+ return;
+}
+
+static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp)
+{
+ u32 flags = 0;
+
+ if (CHIP_REV_IS_FPGA(bp))
+ SET_FLAGS(flags, MODE_FPGA);
+ else if (CHIP_REV_IS_EMUL(bp))
+ SET_FLAGS(flags, MODE_EMUL);
+ else
+ SET_FLAGS(flags, MODE_ASIC);
+
+ if (CHIP_MODE_IS_4_PORT(bp))
+ SET_FLAGS(flags, MODE_PORT4);
+ else
+ SET_FLAGS(flags, MODE_PORT2);
+
+ if (CHIP_IS_E2(bp))
+ SET_FLAGS(flags, MODE_E2);
+ else if (CHIP_IS_E3(bp)) {
+ SET_FLAGS(flags, MODE_E3);
+ if (CHIP_REV(bp) == CHIP_REV_Ax)
+ SET_FLAGS(flags, MODE_E3_A0);
+ else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/
+ SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
+ }
+
+ if (IS_MF(bp)) {
+ SET_FLAGS(flags, MODE_MF);
+ switch (bp->mf_mode) {
+ case MULTI_FUNCTION_SD:
+ SET_FLAGS(flags, MODE_MF_SD);
+ break;
+ case MULTI_FUNCTION_SI:
+ SET_FLAGS(flags, MODE_MF_SI);
+ break;
+ }
+ } else
+ SET_FLAGS(flags, MODE_SF);
+
+#if defined(__LITTLE_ENDIAN)
+ SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
+#else /*(__BIG_ENDIAN)*/
+ SET_FLAGS(flags, MODE_BIG_ENDIAN);
+#endif
+ INIT_MODE_FLAGS(bp) = flags;
+}
+
+static int __devinit bnx2x_init_bp(struct bnx2x *bp)
+{
+ int func;
+ int timer_interval;
+ int rc;
+
+ mutex_init(&bp->port.phy_mutex);
+ mutex_init(&bp->fw_mb_mutex);
+ spin_lock_init(&bp->stats_lock);
+#ifdef BCM_CNIC
+ mutex_init(&bp->cnic_mutex);
+#endif
+
+ INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
+ INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
+ INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
+ rc = bnx2x_get_hwinfo(bp);
+ if (rc)
+ return rc;
+
+ bnx2x_set_modes_bitmap(bp);
+
+ rc = bnx2x_alloc_mem_bp(bp);
+ if (rc)
+ return rc;
+
+ bnx2x_read_fwinfo(bp);
+
+ func = BP_FUNC(bp);
+
+ /* need to reset chip if undi was active */
+ if (!BP_NOMCP(bp))
+ bnx2x_undi_unload(bp);
+
+ /* init fw_seq after undi_unload! */
+ if (!BP_NOMCP(bp)) {
+ bp->fw_seq =
+ (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
+ BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
+ }
+
+ if (CHIP_REV_IS_FPGA(bp))
+ dev_err(&bp->pdev->dev, "FPGA detected\n");
+
+ if (BP_NOMCP(bp) && (func == 0))
+ dev_err(&bp->pdev->dev, "MCP disabled, "
+ "must load devices in order!\n");
+
+ bp->multi_mode = multi_mode;
+
+ /* Set TPA flags */
+ if (disable_tpa) {
+ bp->flags &= ~TPA_ENABLE_FLAG;
+ bp->dev->features &= ~NETIF_F_LRO;
+ } else {
+ bp->flags |= TPA_ENABLE_FLAG;
+ bp->dev->features |= NETIF_F_LRO;
+ }
+ bp->disable_tpa = disable_tpa;
+
+ if (CHIP_IS_E1(bp))
+ bp->dropless_fc = 0;
+ else
+ bp->dropless_fc = dropless_fc;
+
+ bp->mrrs = mrrs;
+
+ bp->tx_ring_size = MAX_TX_AVAIL;
+
+ /* make sure that the numbers are in the right granularity */
+ bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
+ bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
+
+ timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
+ bp->current_interval = (poll ? poll : timer_interval);
+
+ init_timer(&bp->timer);
+ bp->timer.expires = jiffies + bp->current_interval;
+ bp->timer.data = (unsigned long) bp;
+ bp->timer.function = bnx2x_timer;
+
+ bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
+ bnx2x_dcbx_init_params(bp);
+
+#ifdef BCM_CNIC
+ if (CHIP_IS_E1x(bp))
+ bp->cnic_base_cl_id = FP_SB_MAX_E1x;
+ else
+ bp->cnic_base_cl_id = FP_SB_MAX_E2;
+#endif
+
+ /* multiple tx priority */
+ if (CHIP_IS_E1x(bp))
+ bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
+ if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
+ bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
+ if (CHIP_IS_E3B0(bp))
+ bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
+
+ return rc;
+}
+
+
+/****************************************************************************
+* General service functions
+****************************************************************************/
+
+/*
+ * net_device service functions
+ */
+
+/* called with rtnl_lock */
+static int bnx2x_open(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ bool global = false;
+ int other_engine = BP_PATH(bp) ? 0 : 1;
+ u32 other_load_counter, load_counter;
+
+ netif_carrier_off(dev);
+
+ bnx2x_set_power_state(bp, PCI_D0);
+
+ other_load_counter = bnx2x_get_load_cnt(bp, other_engine);
+ load_counter = bnx2x_get_load_cnt(bp, BP_PATH(bp));
+
+ /*
+ * If parity had happen during the unload, then attentions
+ * and/or RECOVERY_IN_PROGRES may still be set. In this case we
+ * want the first function loaded on the current engine to
+ * complete the recovery.
+ */
+ if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
+ bnx2x_chk_parity_attn(bp, &global, true))
+ do {
+ /*
+ * If there are attentions and they are in a global
+ * blocks, set the GLOBAL_RESET bit regardless whether
+ * it will be this function that will complete the
+ * recovery or not.
+ */
+ if (global)
+ bnx2x_set_reset_global(bp);
+
+ /*
+ * Only the first function on the current engine should
+ * try to recover in open. In case of attentions in
+ * global blocks only the first in the chip should try
+ * to recover.
+ */
+ if ((!load_counter &&
+ (!global || !other_load_counter)) &&
+ bnx2x_trylock_leader_lock(bp) &&
+ !bnx2x_leader_reset(bp)) {
+ netdev_info(bp->dev, "Recovered in open\n");
+ break;
+ }
+
+ /* recovery has failed... */
+ bnx2x_set_power_state(bp, PCI_D3hot);
+ bp->recovery_state = BNX2X_RECOVERY_FAILED;
+
+ netdev_err(bp->dev, "Recovery flow hasn't been properly"
+ " completed yet. Try again later. If u still see this"
+ " message after a few retries then power cycle is"
+ " required.\n");
+
+ return -EAGAIN;
+ } while (0);
+
+ bp->recovery_state = BNX2X_RECOVERY_DONE;
+ return bnx2x_nic_load(bp, LOAD_OPEN);
+}
+
+/* called with rtnl_lock */
+static int bnx2x_close(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ /* Unload the driver, release IRQs */
+ bnx2x_nic_unload(bp, UNLOAD_CLOSE);
+
+ /* Power off */
+ bnx2x_set_power_state(bp, PCI_D3hot);
+
+ return 0;
+}
+
+static inline int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p)
+{
+ int mc_count = netdev_mc_count(bp->dev);
+ struct bnx2x_mcast_list_elem *mc_mac =
+ kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC);
+ struct netdev_hw_addr *ha;
+
+ if (!mc_mac)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&p->mcast_list);
+
+ netdev_for_each_mc_addr(ha, bp->dev) {
+ mc_mac->mac = bnx2x_mc_addr(ha);
+ list_add_tail(&mc_mac->link, &p->mcast_list);
+ mc_mac++;
+ }
+
+ p->mcast_list_len = mc_count;
+
+ return 0;
+}
+
+static inline void bnx2x_free_mcast_macs_list(
+ struct bnx2x_mcast_ramrod_params *p)
+{
+ struct bnx2x_mcast_list_elem *mc_mac =
+ list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem,
+ link);
+
+ WARN_ON(!mc_mac);
+ kfree(mc_mac);
+}
+
+/**
+ * bnx2x_set_uc_list - configure a new unicast MACs list.
+ *
+ * @bp: driver handle
+ *
+ * We will use zero (0) as a MAC type for these MACs.
+ */
+static inline int bnx2x_set_uc_list(struct bnx2x *bp)
+{
+ int rc;
+ struct net_device *dev = bp->dev;
+ struct netdev_hw_addr *ha;
+ struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
+ unsigned long ramrod_flags = 0;
+
+ /* First schedule a cleanup up of old configuration */
+ rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false);
+ if (rc < 0) {
+ BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc);
+ return rc;
+ }
+
+ netdev_for_each_uc_addr(ha, dev) {
+ rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
+ BNX2X_UC_LIST_MAC, &ramrod_flags);
+ if (rc < 0) {
+ BNX2X_ERR("Failed to schedule ADD operations: %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ /* Execute the pending commands */
+ __set_bit(RAMROD_CONT, &ramrod_flags);
+ return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */,
+ BNX2X_UC_LIST_MAC, &ramrod_flags);
+}
+
+static inline int bnx2x_set_mc_list(struct bnx2x *bp)
+{
+ struct net_device *dev = bp->dev;
+ struct bnx2x_mcast_ramrod_params rparam = {0};
+ int rc = 0;
+
+ rparam.mcast_obj = &bp->mcast_obj;
+
+ /* first, clear all configured multicast MACs */
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
+ if (rc < 0) {
+ BNX2X_ERR("Failed to clear multicast "
+ "configuration: %d\n", rc);
+ return rc;
+ }
+
+ /* then, configure a new MACs list */
+ if (netdev_mc_count(dev)) {
+ rc = bnx2x_init_mcast_macs_list(bp, &rparam);
+ if (rc) {
+ BNX2X_ERR("Failed to create multicast MACs "
+ "list: %d\n", rc);
+ return rc;
+ }
+
+ /* Now add the new MACs */
+ rc = bnx2x_config_mcast(bp, &rparam,
+ BNX2X_MCAST_CMD_ADD);
+ if (rc < 0)
+ BNX2X_ERR("Failed to set a new multicast "
+ "configuration: %d\n", rc);
+
+ bnx2x_free_mcast_macs_list(&rparam);
+ }
+
+ return rc;
+}
+
+
+/* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
+void bnx2x_set_rx_mode(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u32 rx_mode = BNX2X_RX_MODE_NORMAL;
+
+ if (bp->state != BNX2X_STATE_OPEN) {
+ DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
+ return;
+ }
+
+ DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
+
+ if (dev->flags & IFF_PROMISC)
+ rx_mode = BNX2X_RX_MODE_PROMISC;
+ else if ((dev->flags & IFF_ALLMULTI) ||
+ ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
+ CHIP_IS_E1(bp)))
+ rx_mode = BNX2X_RX_MODE_ALLMULTI;
+ else {
+ /* some multicasts */
+ if (bnx2x_set_mc_list(bp) < 0)
+ rx_mode = BNX2X_RX_MODE_ALLMULTI;
+
+ if (bnx2x_set_uc_list(bp) < 0)
+ rx_mode = BNX2X_RX_MODE_PROMISC;
+ }
+
+ bp->rx_mode = rx_mode;
+
+ /* Schedule the rx_mode command */
+ if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
+ set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
+ return;
+ }
+
+ bnx2x_set_storm_rx_mode(bp);
+}
+
+/* called with rtnl_lock */
+static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
+ int devad, u16 addr)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ u16 value;
+ int rc;
+
+ DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
+ prtad, devad, addr);
+
+ /* The HW expects different devad if CL22 is used */
+ devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
+
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
+ bnx2x_release_phy_lock(bp);
+ DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
+
+ if (!rc)
+ rc = value;
+ return rc;
+}
+
+/* called with rtnl_lock */
+static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
+ u16 addr, u16 value)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ int rc;
+
+ DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
+ " value 0x%x\n", prtad, devad, addr, value);
+
+ /* The HW expects different devad if CL22 is used */
+ devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
+
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
+ bnx2x_release_phy_lock(bp);
+ return rc;
+}
+
+/* called with rtnl_lock */
+static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct mii_ioctl_data *mdio = if_mii(ifr);
+
+ DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
+ mdio->phy_id, mdio->reg_num, mdio->val_in);
+
+ if (!netif_running(dev))
+ return -EAGAIN;
+
+ return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void poll_bnx2x(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ disable_irq(bp->pdev->irq);
+ bnx2x_interrupt(bp->pdev->irq, dev);
+ enable_irq(bp->pdev->irq);
+}
+#endif
+
+static const struct net_device_ops bnx2x_netdev_ops = {
+ .ndo_open = bnx2x_open,
+ .ndo_stop = bnx2x_close,
+ .ndo_start_xmit = bnx2x_start_xmit,
+ .ndo_select_queue = bnx2x_select_queue,
+ .ndo_set_rx_mode = bnx2x_set_rx_mode,
+ .ndo_set_mac_address = bnx2x_change_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = bnx2x_ioctl,
+ .ndo_change_mtu = bnx2x_change_mtu,
+ .ndo_fix_features = bnx2x_fix_features,
+ .ndo_set_features = bnx2x_set_features,
+ .ndo_tx_timeout = bnx2x_tx_timeout,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = poll_bnx2x,
+#endif
+ .ndo_setup_tc = bnx2x_setup_tc,
+
+#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
+ .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
+#endif
+};
+
+static inline int bnx2x_set_coherency_mask(struct bnx2x *bp)
+{
+ struct device *dev = &bp->pdev->dev;
+
+ if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
+ bp->flags |= USING_DAC_FLAG;
+ if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
+ dev_err(dev, "dma_set_coherent_mask failed, "
+ "aborting\n");
+ return -EIO;
+ }
+ } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
+ dev_err(dev, "System does not support DMA, aborting\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
+ struct net_device *dev,
+ unsigned long board_type)
+{
+ struct bnx2x *bp;
+ int rc;
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ bp = netdev_priv(dev);
+
+ bp->dev = dev;
+ bp->pdev = pdev;
+ bp->flags = 0;
+ bp->pf_num = PCI_FUNC(pdev->devfn);
+
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_err(&bp->pdev->dev,
+ "Cannot enable PCI device, aborting\n");
+ goto err_out;
+ }
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ dev_err(&bp->pdev->dev,
+ "Cannot find PCI device base address, aborting\n");
+ rc = -ENODEV;
+ goto err_out_disable;
+ }
+
+ if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+ dev_err(&bp->pdev->dev, "Cannot find second PCI device"
+ " base address, aborting\n");
+ rc = -ENODEV;
+ goto err_out_disable;
+ }
+
+ if (atomic_read(&pdev->enable_cnt) == 1) {
+ rc = pci_request_regions(pdev, DRV_MODULE_NAME);
+ if (rc) {
+ dev_err(&bp->pdev->dev,
+ "Cannot obtain PCI resources, aborting\n");
+ goto err_out_disable;
+ }
+
+ pci_set_master(pdev);
+ pci_save_state(pdev);
+ }
+
+ bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+ if (bp->pm_cap == 0) {
+ dev_err(&bp->pdev->dev,
+ "Cannot find power management capability, aborting\n");
+ rc = -EIO;
+ goto err_out_release;
+ }
+
+ if (!pci_is_pcie(pdev)) {
+ dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
+ rc = -EIO;
+ goto err_out_release;
+ }
+
+ rc = bnx2x_set_coherency_mask(bp);
+ if (rc)
+ goto err_out_release;
+
+ dev->mem_start = pci_resource_start(pdev, 0);
+ dev->base_addr = dev->mem_start;
+ dev->mem_end = pci_resource_end(pdev, 0);
+
+ dev->irq = pdev->irq;
+
+ bp->regview = pci_ioremap_bar(pdev, 0);
+ if (!bp->regview) {
+ dev_err(&bp->pdev->dev,
+ "Cannot map register space, aborting\n");
+ rc = -ENOMEM;
+ goto err_out_release;
+ }
+
+ bnx2x_set_power_state(bp, PCI_D0);
+
+ /* clean indirect addresses */
+ pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
+ PCICFG_VENDOR_ID_OFFSET);
+ /*
+ * Clean the following indirect addresses for all functions since it
+ * is not used by the driver.
+ */
+ REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
+
+ if (CHIP_IS_E1x(bp)) {
+ REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
+ }
+
+ /*
+ * Enable internal target-read (in case we are probed after PF FLR).
+ * Must be done prior to any BAR read access. Only for 57712 and up
+ */
+ if (board_type != BCM57710 &&
+ board_type != BCM57711 &&
+ board_type != BCM57711E)
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
+
+ /* Reset the load counter */
+ bnx2x_clear_load_cnt(bp);
+
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ dev->netdev_ops = &bnx2x_netdev_ops;
+ bnx2x_set_ethtool_ops(dev);
+
+ dev->priv_flags |= IFF_UNICAST_FLT;
+
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_LRO |
+ NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX;
+
+ dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
+
+ dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX;
+ if (bp->flags & USING_DAC_FLAG)
+ dev->features |= NETIF_F_HIGHDMA;
+
+ /* Add Loopback capability to the device */
+ dev->hw_features |= NETIF_F_LOOPBACK;
+
+#ifdef BCM_DCBNL
+ dev->dcbnl_ops = &bnx2x_dcbnl_ops;
+#endif
+
+ /* get_port_hwinfo() will set prtad and mmds properly */
+ bp->mdio.prtad = MDIO_PRTAD_NONE;
+ bp->mdio.mmds = 0;
+ bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+ bp->mdio.dev = dev;
+ bp->mdio.mdio_read = bnx2x_mdio_read;
+ bp->mdio.mdio_write = bnx2x_mdio_write;
+
+ return 0;
+
+err_out_release:
+ if (atomic_read(&pdev->enable_cnt) == 1)
+ pci_release_regions(pdev);
+
+err_out_disable:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+err_out:
+ return rc;
+}
+
+static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
+ int *width, int *speed)
+{
+ u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
+
+ *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
+
+ /* return value of 1=2.5GHz 2=5GHz */
+ *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
+}
+
+static int bnx2x_check_firmware(struct bnx2x *bp)
+{
+ const struct firmware *firmware = bp->firmware;
+ struct bnx2x_fw_file_hdr *fw_hdr;
+ struct bnx2x_fw_file_section *sections;
+ u32 offset, len, num_ops;
+ u16 *ops_offsets;
+ int i;
+ const u8 *fw_ver;
+
+ if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
+ return -EINVAL;
+
+ fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
+ sections = (struct bnx2x_fw_file_section *)fw_hdr;
+
+ /* Make sure none of the offsets and sizes make us read beyond
+ * the end of the firmware data */
+ for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
+ offset = be32_to_cpu(sections[i].offset);
+ len = be32_to_cpu(sections[i].len);
+ if (offset + len > firmware->size) {
+ dev_err(&bp->pdev->dev,
+ "Section %d length is out of bounds\n", i);
+ return -EINVAL;
+ }
+ }
+
+ /* Likewise for the init_ops offsets */
+ offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
+ ops_offsets = (u16 *)(firmware->data + offset);
+ num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
+
+ for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
+ if (be16_to_cpu(ops_offsets[i]) > num_ops) {
+ dev_err(&bp->pdev->dev,
+ "Section offset %d is out of bounds\n", i);
+ return -EINVAL;
+ }
+ }
+
+ /* Check FW version */
+ offset = be32_to_cpu(fw_hdr->fw_version.offset);
+ fw_ver = firmware->data + offset;
+ if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
+ (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
+ (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
+ (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
+ dev_err(&bp->pdev->dev,
+ "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
+ fw_ver[0], fw_ver[1], fw_ver[2],
+ fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
+ BCM_5710_FW_MINOR_VERSION,
+ BCM_5710_FW_REVISION_VERSION,
+ BCM_5710_FW_ENGINEERING_VERSION);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
+{
+ const __be32 *source = (const __be32 *)_source;
+ u32 *target = (u32 *)_target;
+ u32 i;
+
+ for (i = 0; i < n/4; i++)
+ target[i] = be32_to_cpu(source[i]);
+}
+
+/*
+ Ops array is stored in the following format:
+ {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
+ */
+static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
+{
+ const __be32 *source = (const __be32 *)_source;
+ struct raw_op *target = (struct raw_op *)_target;
+ u32 i, j, tmp;
+
+ for (i = 0, j = 0; i < n/8; i++, j += 2) {
+ tmp = be32_to_cpu(source[j]);
+ target[i].op = (tmp >> 24) & 0xff;
+ target[i].offset = tmp & 0xffffff;
+ target[i].raw_data = be32_to_cpu(source[j + 1]);
+ }
+}
+
+/**
+ * IRO array is stored in the following format:
+ * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
+ */
+static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
+{
+ const __be32 *source = (const __be32 *)_source;
+ struct iro *target = (struct iro *)_target;
+ u32 i, j, tmp;
+
+ for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
+ target[i].base = be32_to_cpu(source[j]);
+ j++;
+ tmp = be32_to_cpu(source[j]);
+ target[i].m1 = (tmp >> 16) & 0xffff;
+ target[i].m2 = tmp & 0xffff;
+ j++;
+ tmp = be32_to_cpu(source[j]);
+ target[i].m3 = (tmp >> 16) & 0xffff;
+ target[i].size = tmp & 0xffff;
+ j++;
+ }
+}
+
+static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
+{
+ const __be16 *source = (const __be16 *)_source;
+ u16 *target = (u16 *)_target;
+ u32 i;
+
+ for (i = 0; i < n/2; i++)
+ target[i] = be16_to_cpu(source[i]);
+}
+
+#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
+do { \
+ u32 len = be32_to_cpu(fw_hdr->arr.len); \
+ bp->arr = kmalloc(len, GFP_KERNEL); \
+ if (!bp->arr) { \
+ pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
+ goto lbl; \
+ } \
+ func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
+ (u8 *)bp->arr, len); \
+} while (0)
+
+int bnx2x_init_firmware(struct bnx2x *bp)
+{
+ const char *fw_file_name;
+ struct bnx2x_fw_file_hdr *fw_hdr;
+ int rc;
+
+ if (CHIP_IS_E1(bp))
+ fw_file_name = FW_FILE_NAME_E1;
+ else if (CHIP_IS_E1H(bp))
+ fw_file_name = FW_FILE_NAME_E1H;
+ else if (!CHIP_IS_E1x(bp))
+ fw_file_name = FW_FILE_NAME_E2;
+ else {
+ BNX2X_ERR("Unsupported chip revision\n");
+ return -EINVAL;
+ }
+
+ BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
+
+ rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
+ if (rc) {
+ BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
+ goto request_firmware_exit;
+ }
+
+ rc = bnx2x_check_firmware(bp);
+ if (rc) {
+ BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
+ goto request_firmware_exit;
+ }
+
+ fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
+
+ /* Initialize the pointers to the init arrays */
+ /* Blob */
+ BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
+
+ /* Opcodes */
+ BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
+
+ /* Offsets */
+ BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
+ be16_to_cpu_n);
+
+ /* STORMs firmware */
+ INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
+ be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
+ INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
+ be32_to_cpu(fw_hdr->tsem_pram_data.offset);
+ INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
+ be32_to_cpu(fw_hdr->usem_int_table_data.offset);
+ INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
+ be32_to_cpu(fw_hdr->usem_pram_data.offset);
+ INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
+ be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
+ INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
+ be32_to_cpu(fw_hdr->xsem_pram_data.offset);
+ INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
+ be32_to_cpu(fw_hdr->csem_int_table_data.offset);
+ INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
+ be32_to_cpu(fw_hdr->csem_pram_data.offset);
+ /* IRO */
+ BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
+
+ return 0;
+
+iro_alloc_err:
+ kfree(bp->init_ops_offsets);
+init_offsets_alloc_err:
+ kfree(bp->init_ops);
+init_ops_alloc_err:
+ kfree(bp->init_data);
+request_firmware_exit:
+ release_firmware(bp->firmware);
+
+ return rc;
+}
+
+static void bnx2x_release_firmware(struct bnx2x *bp)
+{
+ kfree(bp->init_ops_offsets);
+ kfree(bp->init_ops);
+ kfree(bp->init_data);
+ release_firmware(bp->firmware);
+}
+
+
+static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
+ .init_hw_cmn_chip = bnx2x_init_hw_common_chip,
+ .init_hw_cmn = bnx2x_init_hw_common,
+ .init_hw_port = bnx2x_init_hw_port,
+ .init_hw_func = bnx2x_init_hw_func,
+
+ .reset_hw_cmn = bnx2x_reset_common,
+ .reset_hw_port = bnx2x_reset_port,
+ .reset_hw_func = bnx2x_reset_func,
+
+ .gunzip_init = bnx2x_gunzip_init,
+ .gunzip_end = bnx2x_gunzip_end,
+
+ .init_fw = bnx2x_init_firmware,
+ .release_fw = bnx2x_release_firmware,
+};
+
+void bnx2x__init_func_obj(struct bnx2x *bp)
+{
+ /* Prepare DMAE related driver resources */
+ bnx2x_setup_dmae(bp);
+
+ bnx2x_init_func_obj(bp, &bp->func_obj,
+ bnx2x_sp(bp, func_rdata),
+ bnx2x_sp_mapping(bp, func_rdata),
+ &bnx2x_func_sp_drv);
+}
+
+/* must be called after sriov-enable */
+static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp)
+{
+ int cid_count = BNX2X_L2_CID_COUNT(bp);
+
+#ifdef BCM_CNIC
+ cid_count += CNIC_CID_MAX;
+#endif
+ return roundup(cid_count, QM_CID_ROUND);
+}
+
+/**
+ * bnx2x_get_num_none_def_sbs - return the number of none default SBs
+ *
+ * @dev: pci device
+ *
+ */
+static inline int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev)
+{
+ int pos;
+ u16 control;
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+
+ /*
+ * If MSI-X is not supported - return number of SBs needed to support
+ * one fast path queue: one FP queue + SB for CNIC
+ */
+ if (!pos)
+ return 1 + CNIC_PRESENT;
+
+ /*
+ * The value in the PCI configuration space is the index of the last
+ * entry, namely one less than the actual size of the table, which is
+ * exactly what we want to return from this function: number of all SBs
+ * without the default SB.
+ */
+ pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control);
+ return control & PCI_MSIX_FLAGS_QSIZE;
+}
+
+static int __devinit bnx2x_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev = NULL;
+ struct bnx2x *bp;
+ int pcie_width, pcie_speed;
+ int rc, max_non_def_sbs;
+ int rx_count, tx_count, rss_count;
+ /*
+ * An estimated maximum supported CoS number according to the chip
+ * version.
+ * We will try to roughly estimate the maximum number of CoSes this chip
+ * may support in order to minimize the memory allocated for Tx
+ * netdev_queue's. This number will be accurately calculated during the
+ * initialization of bp->max_cos based on the chip versions AND chip
+ * revision in the bnx2x_init_bp().
+ */
+ u8 max_cos_est = 0;
+
+ switch (ent->driver_data) {
+ case BCM57710:
+ case BCM57711:
+ case BCM57711E:
+ max_cos_est = BNX2X_MULTI_TX_COS_E1X;
+ break;
+
+ case BCM57712:
+ case BCM57712_MF:
+ max_cos_est = BNX2X_MULTI_TX_COS_E2_E3A0;
+ break;
+
+ case BCM57800:
+ case BCM57800_MF:
+ case BCM57810:
+ case BCM57810_MF:
+ case BCM57840:
+ case BCM57840_MF:
+ max_cos_est = BNX2X_MULTI_TX_COS_E3B0;
+ break;
+
+ default:
+ pr_err("Unknown board_type (%ld), aborting\n",
+ ent->driver_data);
+ return -ENODEV;
+ }
+
+ max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev);
+
+ /* !!! FIXME !!!
+ * Do not allow the maximum SB count to grow above 16
+ * since Special CIDs starts from 16*BNX2X_MULTI_TX_COS=48.
+ * We will use the FP_SB_MAX_E1x macro for this matter.
+ */
+ max_non_def_sbs = min_t(int, FP_SB_MAX_E1x, max_non_def_sbs);
+
+ WARN_ON(!max_non_def_sbs);
+
+ /* Maximum number of RSS queues: one IGU SB goes to CNIC */
+ rss_count = max_non_def_sbs - CNIC_PRESENT;
+
+ /* Maximum number of netdev Rx queues: RSS + FCoE L2 */
+ rx_count = rss_count + FCOE_PRESENT;
+
+ /*
+ * Maximum number of netdev Tx queues:
+ * Maximum TSS queues * Maximum supported number of CoS + FCoE L2
+ */
+ tx_count = MAX_TXQS_PER_COS * max_cos_est + FCOE_PRESENT;
+
+ /* dev zeroed in init_etherdev */
+ dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
+ if (!dev) {
+ dev_err(&pdev->dev, "Cannot allocate net device\n");
+ return -ENOMEM;
+ }
+
+ bp = netdev_priv(dev);
+
+ DP(NETIF_MSG_DRV, "Allocated netdev with %d tx and %d rx queues\n",
+ tx_count, rx_count);
+
+ bp->igu_sb_cnt = max_non_def_sbs;
+ bp->msg_enable = debug;
+ pci_set_drvdata(pdev, dev);
+
+ rc = bnx2x_init_dev(pdev, dev, ent->driver_data);
+ if (rc < 0) {
+ free_netdev(dev);
+ return rc;
+ }
+
+ DP(NETIF_MSG_DRV, "max_non_def_sbs %d\n", max_non_def_sbs);
+
+ rc = bnx2x_init_bp(bp);
+ if (rc)
+ goto init_one_exit;
+
+ /*
+ * Map doorbels here as we need the real value of bp->max_cos which
+ * is initialized in bnx2x_init_bp().
+ */
+ bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
+ min_t(u64, BNX2X_DB_SIZE(bp),
+ pci_resource_len(pdev, 2)));
+ if (!bp->doorbells) {
+ dev_err(&bp->pdev->dev,
+ "Cannot map doorbell space, aborting\n");
+ rc = -ENOMEM;
+ goto init_one_exit;
+ }
+
+ /* calc qm_cid_count */
+ bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
+
+#ifdef BCM_CNIC
+ /* disable FCOE L2 queue for E1x and E3*/
+ if (CHIP_IS_E1x(bp) || CHIP_IS_E3(bp))
+ bp->flags |= NO_FCOE_FLAG;
+
+#endif
+
+ /* Configure interrupt mode: try to enable MSI-X/MSI if
+ * needed, set bp->num_queues appropriately.
+ */
+ bnx2x_set_int_mode(bp);
+
+ /* Add all NAPI objects */
+ bnx2x_add_all_napi(bp);
+
+ rc = register_netdev(dev);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot register net device\n");
+ goto init_one_exit;
+ }
+
+#ifdef BCM_CNIC
+ if (!NO_FCOE(bp)) {
+ /* Add storage MAC address */
+ rtnl_lock();
+ dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
+ rtnl_unlock();
+ }
+#endif
+
+ bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
+
+ netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
+ board_info[ent->driver_data].name,
+ (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
+ pcie_width,
+ ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
+ (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
+ "5GHz (Gen2)" : "2.5GHz",
+ dev->base_addr, bp->pdev->irq, dev->dev_addr);
+
+ return 0;
+
+init_one_exit:
+ if (bp->regview)
+ iounmap(bp->regview);
+
+ if (bp->doorbells)
+ iounmap(bp->doorbells);
+
+ free_netdev(dev);
+
+ if (atomic_read(&pdev->enable_cnt) == 1)
+ pci_release_regions(pdev);
+
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ return rc;
+}
+
+static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2x *bp;
+
+ if (!dev) {
+ dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
+ return;
+ }
+ bp = netdev_priv(dev);
+
+#ifdef BCM_CNIC
+ /* Delete storage MAC address */
+ if (!NO_FCOE(bp)) {
+ rtnl_lock();
+ dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
+ rtnl_unlock();
+ }
+#endif
+
+#ifdef BCM_DCBNL
+ /* Delete app tlvs from dcbnl */
+ bnx2x_dcbnl_update_applist(bp, true);
+#endif
+
+ unregister_netdev(dev);
+
+ /* Delete all NAPI objects */
+ bnx2x_del_all_napi(bp);
+
+ /* Power on: we can't let PCI layer write to us while we are in D3 */
+ bnx2x_set_power_state(bp, PCI_D0);
+
+ /* Disable MSI/MSI-X */
+ bnx2x_disable_msi(bp);
+
+ /* Power off */
+ bnx2x_set_power_state(bp, PCI_D3hot);
+
+ /* Make sure RESET task is not scheduled before continuing */
+ cancel_delayed_work_sync(&bp->sp_rtnl_task);
+
+ if (bp->regview)
+ iounmap(bp->regview);
+
+ if (bp->doorbells)
+ iounmap(bp->doorbells);
+
+ bnx2x_free_mem_bp(bp);
+
+ free_netdev(dev);
+
+ if (atomic_read(&pdev->enable_cnt) == 1)
+ pci_release_regions(pdev);
+
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
+{
+ int i;
+
+ bp->state = BNX2X_STATE_ERROR;
+
+ bp->rx_mode = BNX2X_RX_MODE_NONE;
+
+#ifdef BCM_CNIC
+ bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
+#endif
+ /* Stop Tx */
+ bnx2x_tx_disable(bp);
+
+ bnx2x_netif_stop(bp, 0);
+
+ del_timer_sync(&bp->timer);
+
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+
+ /* Release IRQs */
+ bnx2x_free_irq(bp);
+
+ /* Free SKBs, SGEs, TPA pool and driver internals */
+ bnx2x_free_skbs(bp);
+
+ for_each_rx_queue(bp, i)
+ bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
+
+ bnx2x_free_mem(bp);
+
+ bp->state = BNX2X_STATE_CLOSED;
+
+ netif_carrier_off(bp->dev);
+
+ return 0;
+}
+
+static void bnx2x_eeh_recover(struct bnx2x *bp)
+{
+ u32 val;
+
+ mutex_init(&bp->port.phy_mutex);
+
+ bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+ bp->link_params.shmem_base = bp->common.shmem_base;
+ BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
+
+ if (!bp->common.shmem_base ||
+ (bp->common.shmem_base < 0xA0000) ||
+ (bp->common.shmem_base >= 0xC0000)) {
+ BNX2X_DEV_INFO("MCP not active\n");
+ bp->flags |= NO_MCP_FLAG;
+ return;
+ }
+
+ val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
+ if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
+ != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
+ BNX2X_ERR("BAD MCP validity signature\n");
+
+ if (!BP_NOMCP(bp)) {
+ bp->fw_seq =
+ (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
+ BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
+ }
+}
+
+/**
+ * bnx2x_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2x *bp = netdev_priv(dev);
+
+ rtnl_lock();
+
+ netif_device_detach(dev);
+
+ if (state == pci_channel_io_perm_failure) {
+ rtnl_unlock();
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ if (netif_running(dev))
+ bnx2x_eeh_nic_unload(bp);
+
+ pci_disable_device(pdev);
+
+ rtnl_unlock();
+
+ /* Request a slot reset */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * bnx2x_io_slot_reset - called after the PCI bus has been reset
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot.
+ */
+static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2x *bp = netdev_priv(dev);
+
+ rtnl_lock();
+
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev,
+ "Cannot re-enable PCI device after reset\n");
+ rtnl_unlock();
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+
+ if (netif_running(dev))
+ bnx2x_set_power_state(bp, PCI_D0);
+
+ rtnl_unlock();
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * bnx2x_io_resume - called when traffic can start flowing again
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation.
+ */
+static void bnx2x_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ netdev_err(bp->dev, "Handling parity error recovery. "
+ "Try again later\n");
+ return;
+ }
+
+ rtnl_lock();
+
+ bnx2x_eeh_recover(bp);
+
+ if (netif_running(dev))
+ bnx2x_nic_load(bp, LOAD_NORMAL);
+
+ netif_device_attach(dev);
+
+ rtnl_unlock();
+}
+
+static struct pci_error_handlers bnx2x_err_handler = {
+ .error_detected = bnx2x_io_error_detected,
+ .slot_reset = bnx2x_io_slot_reset,
+ .resume = bnx2x_io_resume,
+};
+
+static struct pci_driver bnx2x_pci_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = bnx2x_pci_tbl,
+ .probe = bnx2x_init_one,
+ .remove = __devexit_p(bnx2x_remove_one),
+ .suspend = bnx2x_suspend,
+ .resume = bnx2x_resume,
+ .err_handler = &bnx2x_err_handler,
+};
+
+static int __init bnx2x_init(void)
+{
+ int ret;
+
+ pr_info("%s", version);
+
+ bnx2x_wq = create_singlethread_workqueue("bnx2x");
+ if (bnx2x_wq == NULL) {
+ pr_err("Cannot create workqueue\n");
+ return -ENOMEM;
+ }
+
+ ret = pci_register_driver(&bnx2x_pci_driver);
+ if (ret) {
+ pr_err("Cannot register driver\n");
+ destroy_workqueue(bnx2x_wq);
+ }
+ return ret;
+}
+
+static void __exit bnx2x_cleanup(void)
+{
+ pci_unregister_driver(&bnx2x_pci_driver);
+
+ destroy_workqueue(bnx2x_wq);
+}
+
+void bnx2x_notify_link_changed(struct bnx2x *bp)
+{
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1);
+}
+
+module_init(bnx2x_init);
+module_exit(bnx2x_cleanup);
+
+#ifdef BCM_CNIC
+/**
+ * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
+ *
+ * @bp: driver handle
+ * @set: set or clear the CAM entry
+ *
+ * This function will wait until the ramdord completion returns.
+ * Return 0 if success, -ENODEV if ramrod doesn't return.
+ */
+static inline int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
+{
+ unsigned long ramrod_flags = 0;
+
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac,
+ &bp->iscsi_l2_mac_obj, true,
+ BNX2X_ISCSI_ETH_MAC, &ramrod_flags);
+}
+
+/* count denotes the number of new completions we have seen */
+static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
+{
+ struct eth_spe *spe;
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return;
+#endif
+
+ spin_lock_bh(&bp->spq_lock);
+ BUG_ON(bp->cnic_spq_pending < count);
+ bp->cnic_spq_pending -= count;
+
+
+ for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
+ u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
+ & SPE_HDR_CONN_TYPE) >>
+ SPE_HDR_CONN_TYPE_SHIFT;
+ u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data)
+ >> SPE_HDR_CMD_ID_SHIFT) & 0xff;
+
+ /* Set validation for iSCSI L2 client before sending SETUP
+ * ramrod
+ */
+ if (type == ETH_CONNECTION_TYPE) {
+ if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
+ bnx2x_set_ctx_validation(bp, &bp->context.
+ vcxt[BNX2X_ISCSI_ETH_CID].eth,
+ BNX2X_ISCSI_ETH_CID);
+ }
+
+ /*
+ * There may be not more than 8 L2, not more than 8 L5 SPEs
+ * and in the air. We also check that number of outstanding
+ * COMMON ramrods is not more than the EQ and SPQ can
+ * accommodate.
+ */
+ if (type == ETH_CONNECTION_TYPE) {
+ if (!atomic_read(&bp->cq_spq_left))
+ break;
+ else
+ atomic_dec(&bp->cq_spq_left);
+ } else if (type == NONE_CONNECTION_TYPE) {
+ if (!atomic_read(&bp->eq_spq_left))
+ break;
+ else
+ atomic_dec(&bp->eq_spq_left);
+ } else if ((type == ISCSI_CONNECTION_TYPE) ||
+ (type == FCOE_CONNECTION_TYPE)) {
+ if (bp->cnic_spq_pending >=
+ bp->cnic_eth_dev.max_kwqe_pending)
+ break;
+ else
+ bp->cnic_spq_pending++;
+ } else {
+ BNX2X_ERR("Unknown SPE type: %d\n", type);
+ bnx2x_panic();
+ break;
+ }
+
+ spe = bnx2x_sp_get_next(bp);
+ *spe = *bp->cnic_kwq_cons;
+
+ DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
+ bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
+
+ if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
+ bp->cnic_kwq_cons = bp->cnic_kwq;
+ else
+ bp->cnic_kwq_cons++;
+ }
+ bnx2x_sp_prod_update(bp);
+ spin_unlock_bh(&bp->spq_lock);
+}
+
+static int bnx2x_cnic_sp_queue(struct net_device *dev,
+ struct kwqe_16 *kwqes[], u32 count)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int i;
+
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return -EIO;
+#endif
+
+ spin_lock_bh(&bp->spq_lock);
+
+ for (i = 0; i < count; i++) {
+ struct eth_spe *spe = (struct eth_spe *)kwqes[i];
+
+ if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
+ break;
+
+ *bp->cnic_kwq_prod = *spe;
+
+ bp->cnic_kwq_pending++;
+
+ DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
+ spe->hdr.conn_and_cmd_data, spe->hdr.type,
+ spe->data.update_data_addr.hi,
+ spe->data.update_data_addr.lo,
+ bp->cnic_kwq_pending);
+
+ if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
+ bp->cnic_kwq_prod = bp->cnic_kwq;
+ else
+ bp->cnic_kwq_prod++;
+ }
+
+ spin_unlock_bh(&bp->spq_lock);
+
+ if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
+ bnx2x_cnic_sp_post(bp, 0);
+
+ return i;
+}
+
+static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
+{
+ struct cnic_ops *c_ops;
+ int rc = 0;
+
+ mutex_lock(&bp->cnic_mutex);
+ c_ops = rcu_dereference_protected(bp->cnic_ops,
+ lockdep_is_held(&bp->cnic_mutex));
+ if (c_ops)
+ rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
+ mutex_unlock(&bp->cnic_mutex);
+
+ return rc;
+}
+
+static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
+{
+ struct cnic_ops *c_ops;
+ int rc = 0;
+
+ rcu_read_lock();
+ c_ops = rcu_dereference(bp->cnic_ops);
+ if (c_ops)
+ rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
+ rcu_read_unlock();
+
+ return rc;
+}
+
+/*
+ * for commands that have no data
+ */
+int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
+{
+ struct cnic_ctl_info ctl = {0};
+
+ ctl.cmd = cmd;
+
+ return bnx2x_cnic_ctl_send(bp, &ctl);
+}
+
+static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
+{
+ struct cnic_ctl_info ctl = {0};
+
+ /* first we tell CNIC and only then we count this as a completion */
+ ctl.cmd = CNIC_CTL_COMPLETION_CMD;
+ ctl.data.comp.cid = cid;
+ ctl.data.comp.error = err;
+
+ bnx2x_cnic_ctl_send_bh(bp, &ctl);
+ bnx2x_cnic_sp_post(bp, 0);
+}
+
+
+/* Called with netif_addr_lock_bh() taken.
+ * Sets an rx_mode config for an iSCSI ETH client.
+ * Doesn't block.
+ * Completion should be checked outside.
+ */
+static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
+{
+ unsigned long accept_flags = 0, ramrod_flags = 0;
+ u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
+ int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED;
+
+ if (start) {
+ /* Start accepting on iSCSI L2 ring. Accept all multicasts
+ * because it's the only way for UIO Queue to accept
+ * multicasts (in non-promiscuous mode only one Queue per
+ * function will receive multicast packets (leading in our
+ * case).
+ */
+ __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags);
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+
+ /* Clear STOP_PENDING bit if START is requested */
+ clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state);
+
+ sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED;
+ } else
+ /* Clear START_PENDING bit if STOP is requested */
+ clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state);
+
+ if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
+ set_bit(sched_state, &bp->sp_state);
+ else {
+ __set_bit(RAMROD_RX, &ramrod_flags);
+ bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0,
+ ramrod_flags);
+ }
+}
+
+
+static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int rc = 0;
+
+ switch (ctl->cmd) {
+ case DRV_CTL_CTXTBL_WR_CMD: {
+ u32 index = ctl->data.io.offset;
+ dma_addr_t addr = ctl->data.io.dma_addr;
+
+ bnx2x_ilt_wr(bp, index, addr);
+ break;
+ }
+
+ case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
+ int count = ctl->data.credit.credit_count;
+
+ bnx2x_cnic_sp_post(bp, count);
+ break;
+ }
+
+ /* rtnl_lock is held. */
+ case DRV_CTL_START_L2_CMD: {
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+ unsigned long sp_bits = 0;
+
+ /* Configure the iSCSI classification object */
+ bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj,
+ cp->iscsi_l2_client_id,
+ cp->iscsi_l2_cid, BP_FUNC(bp),
+ bnx2x_sp(bp, mac_rdata),
+ bnx2x_sp_mapping(bp, mac_rdata),
+ BNX2X_FILTER_MAC_PENDING,
+ &bp->sp_state, BNX2X_OBJ_TYPE_RX,
+ &bp->macs_pool);
+
+ /* Set iSCSI MAC address */
+ rc = bnx2x_set_iscsi_eth_mac_addr(bp);
+ if (rc)
+ break;
+
+ mmiowb();
+ barrier();
+
+ /* Start accepting on iSCSI L2 ring */
+
+ netif_addr_lock_bh(dev);
+ bnx2x_set_iscsi_eth_rx_mode(bp, true);
+ netif_addr_unlock_bh(dev);
+
+ /* bits to wait on */
+ __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
+ __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits);
+
+ if (!bnx2x_wait_sp_comp(bp, sp_bits))
+ BNX2X_ERR("rx_mode completion timed out!\n");
+
+ break;
+ }
+
+ /* rtnl_lock is held. */
+ case DRV_CTL_STOP_L2_CMD: {
+ unsigned long sp_bits = 0;
+
+ /* Stop accepting on iSCSI L2 ring */
+ netif_addr_lock_bh(dev);
+ bnx2x_set_iscsi_eth_rx_mode(bp, false);
+ netif_addr_unlock_bh(dev);
+
+ /* bits to wait on */
+ __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
+ __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits);
+
+ if (!bnx2x_wait_sp_comp(bp, sp_bits))
+ BNX2X_ERR("rx_mode completion timed out!\n");
+
+ mmiowb();
+ barrier();
+
+ /* Unset iSCSI L2 MAC */
+ rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj,
+ BNX2X_ISCSI_ETH_MAC, true);
+ break;
+ }
+ case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
+ int count = ctl->data.credit.credit_count;
+
+ smp_mb__before_atomic_inc();
+ atomic_add(count, &bp->cq_spq_left);
+ smp_mb__after_atomic_inc();
+ break;
+ }
+
+ default:
+ BNX2X_ERR("unknown command %x\n", ctl->cmd);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
+{
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+ if (bp->flags & USING_MSIX_FLAG) {
+ cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
+ cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
+ cp->irq_arr[0].vector = bp->msix_table[1].vector;
+ } else {
+ cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
+ cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
+ }
+ if (!CHIP_IS_E1x(bp))
+ cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
+ else
+ cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
+
+ cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp);
+ cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
+ cp->irq_arr[1].status_blk = bp->def_status_blk;
+ cp->irq_arr[1].status_blk_num = DEF_SB_ID;
+ cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
+
+ cp->num_irq = 2;
+}
+
+static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
+ void *data)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+ if (ops == NULL)
+ return -EINVAL;
+
+ bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!bp->cnic_kwq)
+ return -ENOMEM;
+
+ bp->cnic_kwq_cons = bp->cnic_kwq;
+ bp->cnic_kwq_prod = bp->cnic_kwq;
+ bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
+
+ bp->cnic_spq_pending = 0;
+ bp->cnic_kwq_pending = 0;
+
+ bp->cnic_data = data;
+
+ cp->num_irq = 0;
+ cp->drv_state |= CNIC_DRV_STATE_REGD;
+ cp->iro_arr = bp->iro_arr;
+
+ bnx2x_setup_cnic_irq_info(bp);
+
+ rcu_assign_pointer(bp->cnic_ops, ops);
+
+ return 0;
+}
+
+static int bnx2x_unregister_cnic(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+ mutex_lock(&bp->cnic_mutex);
+ cp->drv_state = 0;
+ rcu_assign_pointer(bp->cnic_ops, NULL);
+ mutex_unlock(&bp->cnic_mutex);
+ synchronize_rcu();
+ kfree(bp->cnic_kwq);
+ bp->cnic_kwq = NULL;
+
+ return 0;
+}
+
+struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+ /* If both iSCSI and FCoE are disabled - return NULL in
+ * order to indicate CNIC that it should not try to work
+ * with this device.
+ */
+ if (NO_ISCSI(bp) && NO_FCOE(bp))
+ return NULL;
+
+ cp->drv_owner = THIS_MODULE;
+ cp->chip_id = CHIP_ID(bp);
+ cp->pdev = bp->pdev;
+ cp->io_base = bp->regview;
+ cp->io_base2 = bp->doorbells;
+ cp->max_kwqe_pending = 8;
+ cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
+ cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
+ bnx2x_cid_ilt_lines(bp);
+ cp->ctx_tbl_len = CNIC_ILT_LINES;
+ cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
+ cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
+ cp->drv_ctl = bnx2x_drv_ctl;
+ cp->drv_register_cnic = bnx2x_register_cnic;
+ cp->drv_unregister_cnic = bnx2x_unregister_cnic;
+ cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
+ cp->iscsi_l2_client_id =
+ bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
+ cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
+
+ if (NO_ISCSI_OOO(bp))
+ cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
+
+ if (NO_ISCSI(bp))
+ cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
+
+ if (NO_FCOE(bp))
+ cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
+
+ DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
+ "starting cid %d\n",
+ cp->ctx_blk_size,
+ cp->ctx_tbl_offset,
+ cp->ctx_tbl_len,
+ cp->starting_cid);
+ return cp;
+}
+EXPORT_SYMBOL(bnx2x_cnic_probe);
+
+#endif /* BCM_CNIC */
+
--- /dev/null
+/* bnx2x_reg.h: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * The registers description starts with the register Access type followed
+ * by size in bits. For example [RW 32]. The access types are:
+ * R - Read only
+ * RC - Clear on read
+ * RW - Read/Write
+ * ST - Statistics register (clear on read)
+ * W - Write only
+ * WB - Wide bus register - the size is over 32 bits and it should be
+ * read/write in consecutive 32 bits accesses
+ * WR - Write Clear (write 1 to clear the bit)
+ *
+ */
+#ifndef BNX2X_REG_H
+#define BNX2X_REG_H
+
+#define ATC_ATC_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
+#define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS (0x1<<2)
+#define ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU (0x1<<5)
+#define ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT (0x1<<3)
+#define ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR (0x1<<4)
+#define ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND (0x1<<1)
+/* [RW 1] Initiate the ATC array - reset all the valid bits */
+#define ATC_REG_ATC_INIT_ARRAY 0x1100b8
+/* [R 1] ATC initalization done */
+#define ATC_REG_ATC_INIT_DONE 0x1100bc
+/* [RC 6] Interrupt register #0 read clear */
+#define ATC_REG_ATC_INT_STS_CLR 0x1101c0
+/* [RW 5] Parity mask register #0 read/write */
+#define ATC_REG_ATC_PRTY_MASK 0x1101d8
+/* [RC 5] Parity register #0 read clear */
+#define ATC_REG_ATC_PRTY_STS_CLR 0x1101d0
+/* [RW 19] Interrupt mask register #0 read/write */
+#define BRB1_REG_BRB1_INT_MASK 0x60128
+/* [R 19] Interrupt register #0 read */
+#define BRB1_REG_BRB1_INT_STS 0x6011c
+/* [RW 4] Parity mask register #0 read/write */
+#define BRB1_REG_BRB1_PRTY_MASK 0x60138
+/* [R 4] Parity register #0 read */
+#define BRB1_REG_BRB1_PRTY_STS 0x6012c
+/* [RC 4] Parity register #0 read clear */
+#define BRB1_REG_BRB1_PRTY_STS_CLR 0x60130
+/* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At
+ * address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address
+ * BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning -
+ * following reset the first rbc access to this reg must be write; there can
+ * be no more rbc writes after the first one; there can be any number of rbc
+ * read following the first write; rbc access not following these rules will
+ * result in hang condition. */
+#define BRB1_REG_FREE_LIST_PRS_CRDT 0x60200
+/* [RW 10] The number of free blocks below which the full signal to class 0
+ * is asserted */
+#define BRB1_REG_FULL_0_XOFF_THRESHOLD_0 0x601d0
+#define BRB1_REG_FULL_0_XOFF_THRESHOLD_1 0x60230
+/* [RW 11] The number of free blocks above which the full signal to class 0
+ * is de-asserted */
+#define BRB1_REG_FULL_0_XON_THRESHOLD_0 0x601d4
+#define BRB1_REG_FULL_0_XON_THRESHOLD_1 0x60234
+/* [RW 11] The number of free blocks below which the full signal to class 1
+ * is asserted */
+#define BRB1_REG_FULL_1_XOFF_THRESHOLD_0 0x601d8
+#define BRB1_REG_FULL_1_XOFF_THRESHOLD_1 0x60238
+/* [RW 11] The number of free blocks above which the full signal to class 1
+ * is de-asserted */
+#define BRB1_REG_FULL_1_XON_THRESHOLD_0 0x601dc
+#define BRB1_REG_FULL_1_XON_THRESHOLD_1 0x6023c
+/* [RW 11] The number of free blocks below which the full signal to the LB
+ * port is asserted */
+#define BRB1_REG_FULL_LB_XOFF_THRESHOLD 0x601e0
+/* [RW 10] The number of free blocks above which the full signal to the LB
+ * port is de-asserted */
+#define BRB1_REG_FULL_LB_XON_THRESHOLD 0x601e4
+/* [RW 10] The number of free blocks above which the High_llfc signal to
+ interface #n is de-asserted. */
+#define BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD_0 0x6014c
+/* [RW 10] The number of free blocks below which the High_llfc signal to
+ interface #n is asserted. */
+#define BRB1_REG_HIGH_LLFC_LOW_THRESHOLD_0 0x6013c
+/* [RW 11] The number of blocks guarantied for the LB port */
+#define BRB1_REG_LB_GUARANTIED 0x601ec
+/* [RW 11] The hysteresis on the guarantied buffer space for the Lb port
+ * before signaling XON. */
+#define BRB1_REG_LB_GUARANTIED_HYST 0x60264
+/* [RW 24] LL RAM data. */
+#define BRB1_REG_LL_RAM 0x61000
+/* [RW 10] The number of free blocks above which the Low_llfc signal to
+ interface #n is de-asserted. */
+#define BRB1_REG_LOW_LLFC_HIGH_THRESHOLD_0 0x6016c
+/* [RW 10] The number of free blocks below which the Low_llfc signal to
+ interface #n is asserted. */
+#define BRB1_REG_LOW_LLFC_LOW_THRESHOLD_0 0x6015c
+/* [RW 11] The number of blocks guarantied for class 0 in MAC 0. The
+ * register is applicable only when per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_0_CLASS_0_GUARANTIED 0x60244
+/* [RW 11] The hysteresis on the guarantied buffer space for class 0 in MAC
+ * 1 before signaling XON. The register is applicable only when
+ * per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST 0x60254
+/* [RW 11] The number of blocks guarantied for class 1 in MAC 0. The
+ * register is applicable only when per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_0_CLASS_1_GUARANTIED 0x60248
+/* [RW 11] The hysteresis on the guarantied buffer space for class 1in MAC 0
+ * before signaling XON. The register is applicable only when
+ * per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST 0x60258
+/* [RW 11] The number of blocks guarantied for class 0in MAC1.The register
+ * is applicable only when per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_1_CLASS_0_GUARANTIED 0x6024c
+/* [RW 11] The hysteresis on the guarantied buffer space for class 0 in MAC
+ * 1 before signaling XON. The register is applicable only when
+ * per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST 0x6025c
+/* [RW 11] The number of blocks guarantied for class 1 in MAC 1. The
+ * register is applicable only when per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_1_CLASS_1_GUARANTIED 0x60250
+/* [RW 11] The hysteresis on the guarantied buffer space for class 1 in MAC
+ * 1 before signaling XON. The register is applicable only when
+ * per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST 0x60260
+/* [RW 11] The number of blocks guarantied for the MAC port. The register is
+ * applicable only when per_class_guaranty_mode is reset. */
+#define BRB1_REG_MAC_GUARANTIED_0 0x601e8
+#define BRB1_REG_MAC_GUARANTIED_1 0x60240
+/* [R 24] The number of full blocks. */
+#define BRB1_REG_NUM_OF_FULL_BLOCKS 0x60090
+/* [ST 32] The number of cycles that the write_full signal towards MAC #0
+ was asserted. */
+#define BRB1_REG_NUM_OF_FULL_CYCLES_0 0x600c8
+#define BRB1_REG_NUM_OF_FULL_CYCLES_1 0x600cc
+#define BRB1_REG_NUM_OF_FULL_CYCLES_4 0x600d8
+/* [ST 32] The number of cycles that the pause signal towards MAC #0 was
+ asserted. */
+#define BRB1_REG_NUM_OF_PAUSE_CYCLES_0 0x600b8
+#define BRB1_REG_NUM_OF_PAUSE_CYCLES_1 0x600bc
+/* [RW 10] The number of free blocks below which the pause signal to class 0
+ * is asserted */
+#define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 0x601c0
+#define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 0x60220
+/* [RW 11] The number of free blocks above which the pause signal to class 0
+ * is de-asserted */
+#define BRB1_REG_PAUSE_0_XON_THRESHOLD_0 0x601c4
+#define BRB1_REG_PAUSE_0_XON_THRESHOLD_1 0x60224
+/* [RW 11] The number of free blocks below which the pause signal to class 1
+ * is asserted */
+#define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0 0x601c8
+#define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 0x60228
+/* [RW 11] The number of free blocks above which the pause signal to class 1
+ * is de-asserted */
+#define BRB1_REG_PAUSE_1_XON_THRESHOLD_0 0x601cc
+#define BRB1_REG_PAUSE_1_XON_THRESHOLD_1 0x6022c
+/* [RW 10] Write client 0: De-assert pause threshold. Not Functional */
+#define BRB1_REG_PAUSE_HIGH_THRESHOLD_0 0x60078
+#define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 0x6007c
+/* [RW 10] Write client 0: Assert pause threshold. */
+#define BRB1_REG_PAUSE_LOW_THRESHOLD_0 0x60068
+#define BRB1_REG_PAUSE_LOW_THRESHOLD_1 0x6006c
+/* [R 24] The number of full blocks occupied by port. */
+#define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 0x60094
+/* [RW 1] Reset the design by software. */
+#define BRB1_REG_SOFT_RESET 0x600dc
+/* [R 5] Used to read the value of the XX protection CAM occupancy counter. */
+#define CCM_REG_CAM_OCCUP 0xd0188
+/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define CCM_REG_CCM_CFC_IFEN 0xd003c
+/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define CCM_REG_CCM_CQM_IFEN 0xd000c
+/* [RW 1] If set the Q index; received from the QM is inserted to event ID.
+ Otherwise 0 is inserted. */
+#define CCM_REG_CCM_CQM_USE_Q 0xd00c0
+/* [RW 11] Interrupt mask register #0 read/write */
+#define CCM_REG_CCM_INT_MASK 0xd01e4
+/* [R 11] Interrupt register #0 read */
+#define CCM_REG_CCM_INT_STS 0xd01d8
+/* [RW 27] Parity mask register #0 read/write */
+#define CCM_REG_CCM_PRTY_MASK 0xd01f4
+/* [R 27] Parity register #0 read */
+#define CCM_REG_CCM_PRTY_STS 0xd01e8
+/* [RC 27] Parity register #0 read clear */
+#define CCM_REG_CCM_PRTY_STS_CLR 0xd01ec
+/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
+ REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
+ Is used to determine the number of the AG context REG-pairs written back;
+ when the input message Reg1WbFlg isn't set. */
+#define CCM_REG_CCM_REG0_SZ 0xd00c4
+/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define CCM_REG_CCM_STORM0_IFEN 0xd0004
+/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define CCM_REG_CCM_STORM1_IFEN 0xd0008
+/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
+ disregarded; valid output is deasserted; all other signals are treated as
+ usual; if 1 - normal activity. */
+#define CCM_REG_CDU_AG_RD_IFEN 0xd0030
+/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
+ are disregarded; all other signals are treated as usual; if 1 - normal
+ activity. */
+#define CCM_REG_CDU_AG_WR_IFEN 0xd002c
+/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
+ disregarded; valid output is deasserted; all other signals are treated as
+ usual; if 1 - normal activity. */
+#define CCM_REG_CDU_SM_RD_IFEN 0xd0038
+/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
+ input is disregarded; all other signals are treated as usual; if 1 -
+ normal activity. */
+#define CCM_REG_CDU_SM_WR_IFEN 0xd0034
+/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
+ the initial credit value; read returns the current value of the credit
+ counter. Must be initialized to 1 at start-up. */
+#define CCM_REG_CFC_INIT_CRD 0xd0204
+/* [RW 2] Auxiliary counter flag Q number 1. */
+#define CCM_REG_CNT_AUX1_Q 0xd00c8
+/* [RW 2] Auxiliary counter flag Q number 2. */
+#define CCM_REG_CNT_AUX2_Q 0xd00cc
+/* [RW 28] The CM header value for QM request (primary). */
+#define CCM_REG_CQM_CCM_HDR_P 0xd008c
+/* [RW 28] The CM header value for QM request (secondary). */
+#define CCM_REG_CQM_CCM_HDR_S 0xd0090
+/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define CCM_REG_CQM_CCM_IFEN 0xd0014
+/* [RW 6] QM output initial credit. Max credit available - 32. Write writes
+ the initial credit value; read returns the current value of the credit
+ counter. Must be initialized to 32 at start-up. */
+#define CCM_REG_CQM_INIT_CRD 0xd020c
+/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_CQM_P_WEIGHT 0xd00b8
+/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_CQM_S_WEIGHT 0xd00bc
+/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define CCM_REG_CSDM_IFEN 0xd0018
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the SDM interface is detected. */
+#define CCM_REG_CSDM_LENGTH_MIS 0xd0170
+/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_CSDM_WEIGHT 0xd00b4
+/* [RW 28] The CM header for QM formatting in case of an error in the QM
+ inputs. */
+#define CCM_REG_ERR_CCM_HDR 0xd0094
+/* [RW 8] The Event ID in case the input message ErrorFlg is set. */
+#define CCM_REG_ERR_EVNT_ID 0xd0098
+/* [RW 8] FIC0 output initial credit. Max credit available - 255. Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 64 at start-up. */
+#define CCM_REG_FIC0_INIT_CRD 0xd0210
+/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 64 at start-up. */
+#define CCM_REG_FIC1_INIT_CRD 0xd0214
+/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1
+ - strict priority defined by ~ccm_registers_gr_ag_pr.gr_ag_pr;
+ ~ccm_registers_gr_ld0_pr.gr_ld0_pr and
+ ~ccm_registers_gr_ld1_pr.gr_ld1_pr. Groups are according to channels and
+ outputs to STORM: aggregation; load FIC0; load FIC1 and store. */
+#define CCM_REG_GR_ARB_TYPE 0xd015c
+/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
+ highest priority is 3. It is supposed; that the Store channel priority is
+ the compliment to 4 of the rest priorities - Aggregation channel; Load
+ (FIC0) channel and Load (FIC1). */
+#define CCM_REG_GR_LD0_PR 0xd0164
+/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
+ highest priority is 3. It is supposed; that the Store channel priority is
+ the compliment to 4 of the rest priorities - Aggregation channel; Load
+ (FIC0) channel and Load (FIC1). */
+#define CCM_REG_GR_LD1_PR 0xd0168
+/* [RW 2] General flags index. */
+#define CCM_REG_INV_DONE_Q 0xd0108
+/* [RW 4] The number of double REG-pairs(128 bits); loaded from the STORM
+ context and sent to STORM; for a specific connection type. The double
+ REG-pairs are used in order to align to STORM context row size of 128
+ bits. The offset of these data in the STORM context is always 0. Index
+ _(0..15) stands for the connection type (one of 16). */
+#define CCM_REG_N_SM_CTX_LD_0 0xd004c
+#define CCM_REG_N_SM_CTX_LD_1 0xd0050
+#define CCM_REG_N_SM_CTX_LD_2 0xd0054
+#define CCM_REG_N_SM_CTX_LD_3 0xd0058
+#define CCM_REG_N_SM_CTX_LD_4 0xd005c
+/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define CCM_REG_PBF_IFEN 0xd0028
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the pbf interface is detected. */
+#define CCM_REG_PBF_LENGTH_MIS 0xd0180
+/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_PBF_WEIGHT 0xd00ac
+#define CCM_REG_PHYS_QNUM1_0 0xd0134
+#define CCM_REG_PHYS_QNUM1_1 0xd0138
+#define CCM_REG_PHYS_QNUM2_0 0xd013c
+#define CCM_REG_PHYS_QNUM2_1 0xd0140
+#define CCM_REG_PHYS_QNUM3_0 0xd0144
+#define CCM_REG_PHYS_QNUM3_1 0xd0148
+#define CCM_REG_QOS_PHYS_QNUM0_0 0xd0114
+#define CCM_REG_QOS_PHYS_QNUM0_1 0xd0118
+#define CCM_REG_QOS_PHYS_QNUM1_0 0xd011c
+#define CCM_REG_QOS_PHYS_QNUM1_1 0xd0120
+#define CCM_REG_QOS_PHYS_QNUM2_0 0xd0124
+#define CCM_REG_QOS_PHYS_QNUM2_1 0xd0128
+#define CCM_REG_QOS_PHYS_QNUM3_0 0xd012c
+#define CCM_REG_QOS_PHYS_QNUM3_1 0xd0130
+/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define CCM_REG_STORM_CCM_IFEN 0xd0010
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the STORM interface is detected. */
+#define CCM_REG_STORM_LENGTH_MIS 0xd016c
+/* [RW 3] The weight of the STORM input in the WRR (Weighted Round robin)
+ mechanism. 0 stands for weight 8 (the most prioritised); 1 stands for
+ weight 1(least prioritised); 2 stands for weight 2 (more prioritised);
+ tc. */
+#define CCM_REG_STORM_WEIGHT 0xd009c
+/* [RW 1] Input tsem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define CCM_REG_TSEM_IFEN 0xd001c
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the tsem interface is detected. */
+#define CCM_REG_TSEM_LENGTH_MIS 0xd0174
+/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_TSEM_WEIGHT 0xd00a0
+/* [RW 1] Input usem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define CCM_REG_USEM_IFEN 0xd0024
+/* [RC 1] Set when message length mismatch (relative to last indication) at
+ the usem interface is detected. */
+#define CCM_REG_USEM_LENGTH_MIS 0xd017c
+/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_USEM_WEIGHT 0xd00a8
+/* [RW 1] Input xsem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define CCM_REG_XSEM_IFEN 0xd0020
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the xsem interface is detected. */
+#define CCM_REG_XSEM_LENGTH_MIS 0xd0178
+/* [RW 3] The weight of the input xsem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_XSEM_WEIGHT 0xd00a4
+/* [RW 19] Indirect access to the descriptor table of the XX protection
+ mechanism. The fields are: [5:0] - message length; [12:6] - message
+ pointer; 18:13] - next pointer. */
+#define CCM_REG_XX_DESCR_TABLE 0xd0300
+#define CCM_REG_XX_DESCR_TABLE_SIZE 24
+/* [R 7] Used to read the value of XX protection Free counter. */
+#define CCM_REG_XX_FREE 0xd0184
+/* [RW 6] Initial value for the credit counter; responsible for fulfilling
+ of the Input Stage XX protection buffer by the XX protection pending
+ messages. Max credit available - 127. Write writes the initial credit
+ value; read returns the current value of the credit counter. Must be
+ initialized to maximum XX protected message size - 2 at start-up. */
+#define CCM_REG_XX_INIT_CRD 0xd0220
+/* [RW 7] The maximum number of pending messages; which may be stored in XX
+ protection. At read the ~ccm_registers_xx_free.xx_free counter is read.
+ At write comprises the start value of the ~ccm_registers_xx_free.xx_free
+ counter. */
+#define CCM_REG_XX_MSG_NUM 0xd0224
+/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
+#define CCM_REG_XX_OVFL_EVNT_ID 0xd0044
+/* [RW 18] Indirect access to the XX table of the XX protection mechanism.
+ The fields are: [5:0] - tail pointer; 11:6] - Link List size; 17:12] -
+ header pointer. */
+#define CCM_REG_XX_TABLE 0xd0280
+#define CDU_REG_CDU_CHK_MASK0 0x101000
+#define CDU_REG_CDU_CHK_MASK1 0x101004
+#define CDU_REG_CDU_CONTROL0 0x101008
+#define CDU_REG_CDU_DEBUG 0x101010
+#define CDU_REG_CDU_GLOBAL_PARAMS 0x101020
+/* [RW 7] Interrupt mask register #0 read/write */
+#define CDU_REG_CDU_INT_MASK 0x10103c
+/* [R 7] Interrupt register #0 read */
+#define CDU_REG_CDU_INT_STS 0x101030
+/* [RW 5] Parity mask register #0 read/write */
+#define CDU_REG_CDU_PRTY_MASK 0x10104c
+/* [R 5] Parity register #0 read */
+#define CDU_REG_CDU_PRTY_STS 0x101040
+/* [RC 5] Parity register #0 read clear */
+#define CDU_REG_CDU_PRTY_STS_CLR 0x101044
+/* [RC 32] logging of error data in case of a CDU load error:
+ {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error;
+ ype_error; ctual_active; ctual_compressed_context}; */
+#define CDU_REG_ERROR_DATA 0x101014
+/* [WB 216] L1TT ram access. each entry has the following format :
+ {mrege_regions[7:0]; ffset12[5:0]...offset0[5:0];
+ ength12[5:0]...length0[5:0]; d12[3:0]...id0[3:0]} */
+#define CDU_REG_L1TT 0x101800
+/* [WB 24] MATT ram access. each entry has the following
+ format:{RegionLength[11:0]; egionOffset[11:0]} */
+#define CDU_REG_MATT 0x101100
+/* [RW 1] when this bit is set the CDU operates in e1hmf mode */
+#define CDU_REG_MF_MODE 0x101050
+/* [R 1] indication the initializing the activity counter by the hardware
+ was done. */
+#define CFC_REG_AC_INIT_DONE 0x104078
+/* [RW 13] activity counter ram access */
+#define CFC_REG_ACTIVITY_COUNTER 0x104400
+#define CFC_REG_ACTIVITY_COUNTER_SIZE 256
+/* [R 1] indication the initializing the cams by the hardware was done. */
+#define CFC_REG_CAM_INIT_DONE 0x10407c
+/* [RW 2] Interrupt mask register #0 read/write */
+#define CFC_REG_CFC_INT_MASK 0x104108
+/* [R 2] Interrupt register #0 read */
+#define CFC_REG_CFC_INT_STS 0x1040fc
+/* [RC 2] Interrupt register #0 read clear */
+#define CFC_REG_CFC_INT_STS_CLR 0x104100
+/* [RW 4] Parity mask register #0 read/write */
+#define CFC_REG_CFC_PRTY_MASK 0x104118
+/* [R 4] Parity register #0 read */
+#define CFC_REG_CFC_PRTY_STS 0x10410c
+/* [RC 4] Parity register #0 read clear */
+#define CFC_REG_CFC_PRTY_STS_CLR 0x104110
+/* [RW 21] CID cam access (21:1 - Data; alid - 0) */
+#define CFC_REG_CID_CAM 0x104800
+#define CFC_REG_CONTROL0 0x104028
+#define CFC_REG_DEBUG0 0x104050
+/* [RW 14] indicates per error (in #cfc_registers_cfc_error_vector.cfc_error
+ vector) whether the cfc should be disabled upon it */
+#define CFC_REG_DISABLE_ON_ERROR 0x104044
+/* [RC 14] CFC error vector. when the CFC detects an internal error it will
+ set one of these bits. the bit description can be found in CFC
+ specifications */
+#define CFC_REG_ERROR_VECTOR 0x10403c
+/* [WB 93] LCID info ram access */
+#define CFC_REG_INFO_RAM 0x105000
+#define CFC_REG_INFO_RAM_SIZE 1024
+#define CFC_REG_INIT_REG 0x10404c
+#define CFC_REG_INTERFACES 0x104058
+/* [RW 24] {weight_load_client7[2:0] to weight_load_client0[2:0]}. this
+ field allows changing the priorities of the weighted-round-robin arbiter
+ which selects which CFC load client should be served next */
+#define CFC_REG_LCREQ_WEIGHTS 0x104084
+/* [RW 16] Link List ram access; data = {prev_lcid; ext_lcid} */
+#define CFC_REG_LINK_LIST 0x104c00
+#define CFC_REG_LINK_LIST_SIZE 256
+/* [R 1] indication the initializing the link list by the hardware was done. */
+#define CFC_REG_LL_INIT_DONE 0x104074
+/* [R 9] Number of allocated LCIDs which are at empty state */
+#define CFC_REG_NUM_LCIDS_ALLOC 0x104020
+/* [R 9] Number of Arriving LCIDs in Link List Block */
+#define CFC_REG_NUM_LCIDS_ARRIVING 0x104004
+#define CFC_REG_NUM_LCIDS_INSIDE_PF 0x104120
+/* [R 9] Number of Leaving LCIDs in Link List Block */
+#define CFC_REG_NUM_LCIDS_LEAVING 0x104018
+#define CFC_REG_WEAK_ENABLE_PF 0x104124
+/* [RW 8] The event id for aggregated interrupt 0 */
+#define CSDM_REG_AGG_INT_EVENT_0 0xc2038
+#define CSDM_REG_AGG_INT_EVENT_10 0xc2060
+#define CSDM_REG_AGG_INT_EVENT_11 0xc2064
+#define CSDM_REG_AGG_INT_EVENT_12 0xc2068
+#define CSDM_REG_AGG_INT_EVENT_13 0xc206c
+#define CSDM_REG_AGG_INT_EVENT_14 0xc2070
+#define CSDM_REG_AGG_INT_EVENT_15 0xc2074
+#define CSDM_REG_AGG_INT_EVENT_16 0xc2078
+#define CSDM_REG_AGG_INT_EVENT_2 0xc2040
+#define CSDM_REG_AGG_INT_EVENT_3 0xc2044
+#define CSDM_REG_AGG_INT_EVENT_4 0xc2048
+#define CSDM_REG_AGG_INT_EVENT_5 0xc204c
+#define CSDM_REG_AGG_INT_EVENT_6 0xc2050
+#define CSDM_REG_AGG_INT_EVENT_7 0xc2054
+#define CSDM_REG_AGG_INT_EVENT_8 0xc2058
+#define CSDM_REG_AGG_INT_EVENT_9 0xc205c
+/* [RW 1] For each aggregated interrupt index whether the mode is normal (0)
+ or auto-mask-mode (1) */
+#define CSDM_REG_AGG_INT_MODE_10 0xc21e0
+#define CSDM_REG_AGG_INT_MODE_11 0xc21e4
+#define CSDM_REG_AGG_INT_MODE_12 0xc21e8
+#define CSDM_REG_AGG_INT_MODE_13 0xc21ec
+#define CSDM_REG_AGG_INT_MODE_14 0xc21f0
+#define CSDM_REG_AGG_INT_MODE_15 0xc21f4
+#define CSDM_REG_AGG_INT_MODE_16 0xc21f8
+#define CSDM_REG_AGG_INT_MODE_6 0xc21d0
+#define CSDM_REG_AGG_INT_MODE_7 0xc21d4
+#define CSDM_REG_AGG_INT_MODE_8 0xc21d8
+#define CSDM_REG_AGG_INT_MODE_9 0xc21dc
+/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
+#define CSDM_REG_CFC_RSP_START_ADDR 0xc2008
+/* [RW 16] The maximum value of the completion counter #0 */
+#define CSDM_REG_CMP_COUNTER_MAX0 0xc201c
+/* [RW 16] The maximum value of the completion counter #1 */
+#define CSDM_REG_CMP_COUNTER_MAX1 0xc2020
+/* [RW 16] The maximum value of the completion counter #2 */
+#define CSDM_REG_CMP_COUNTER_MAX2 0xc2024
+/* [RW 16] The maximum value of the completion counter #3 */
+#define CSDM_REG_CMP_COUNTER_MAX3 0xc2028
+/* [RW 13] The start address in the internal RAM for the completion
+ counters. */
+#define CSDM_REG_CMP_COUNTER_START_ADDR 0xc200c
+/* [RW 32] Interrupt mask register #0 read/write */
+#define CSDM_REG_CSDM_INT_MASK_0 0xc229c
+#define CSDM_REG_CSDM_INT_MASK_1 0xc22ac
+/* [R 32] Interrupt register #0 read */
+#define CSDM_REG_CSDM_INT_STS_0 0xc2290
+#define CSDM_REG_CSDM_INT_STS_1 0xc22a0
+/* [RW 11] Parity mask register #0 read/write */
+#define CSDM_REG_CSDM_PRTY_MASK 0xc22bc
+/* [R 11] Parity register #0 read */
+#define CSDM_REG_CSDM_PRTY_STS 0xc22b0
+/* [RC 11] Parity register #0 read clear */
+#define CSDM_REG_CSDM_PRTY_STS_CLR 0xc22b4
+#define CSDM_REG_ENABLE_IN1 0xc2238
+#define CSDM_REG_ENABLE_IN2 0xc223c
+#define CSDM_REG_ENABLE_OUT1 0xc2240
+#define CSDM_REG_ENABLE_OUT2 0xc2244
+/* [RW 4] The initial number of messages that can be sent to the pxp control
+ interface without receiving any ACK. */
+#define CSDM_REG_INIT_CREDIT_PXP_CTRL 0xc24bc
+/* [ST 32] The number of ACK after placement messages received */
+#define CSDM_REG_NUM_OF_ACK_AFTER_PLACE 0xc227c
+/* [ST 32] The number of packet end messages received from the parser */
+#define CSDM_REG_NUM_OF_PKT_END_MSG 0xc2274
+/* [ST 32] The number of requests received from the pxp async if */
+#define CSDM_REG_NUM_OF_PXP_ASYNC_REQ 0xc2278
+/* [ST 32] The number of commands received in queue 0 */
+#define CSDM_REG_NUM_OF_Q0_CMD 0xc2248
+/* [ST 32] The number of commands received in queue 10 */
+#define CSDM_REG_NUM_OF_Q10_CMD 0xc226c
+/* [ST 32] The number of commands received in queue 11 */
+#define CSDM_REG_NUM_OF_Q11_CMD 0xc2270
+/* [ST 32] The number of commands received in queue 1 */
+#define CSDM_REG_NUM_OF_Q1_CMD 0xc224c
+/* [ST 32] The number of commands received in queue 3 */
+#define CSDM_REG_NUM_OF_Q3_CMD 0xc2250
+/* [ST 32] The number of commands received in queue 4 */
+#define CSDM_REG_NUM_OF_Q4_CMD 0xc2254
+/* [ST 32] The number of commands received in queue 5 */
+#define CSDM_REG_NUM_OF_Q5_CMD 0xc2258
+/* [ST 32] The number of commands received in queue 6 */
+#define CSDM_REG_NUM_OF_Q6_CMD 0xc225c
+/* [ST 32] The number of commands received in queue 7 */
+#define CSDM_REG_NUM_OF_Q7_CMD 0xc2260
+/* [ST 32] The number of commands received in queue 8 */
+#define CSDM_REG_NUM_OF_Q8_CMD 0xc2264
+/* [ST 32] The number of commands received in queue 9 */
+#define CSDM_REG_NUM_OF_Q9_CMD 0xc2268
+/* [RW 13] The start address in the internal RAM for queue counters */
+#define CSDM_REG_Q_COUNTER_START_ADDR 0xc2010
+/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
+#define CSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0xc2548
+/* [R 1] parser fifo empty in sdm_sync block */
+#define CSDM_REG_SYNC_PARSER_EMPTY 0xc2550
+/* [R 1] parser serial fifo empty in sdm_sync block */
+#define CSDM_REG_SYNC_SYNC_EMPTY 0xc2558
+/* [RW 32] Tick for timer counter. Applicable only when
+ ~csdm_registers_timer_tick_enable.timer_tick_enable =1 */
+#define CSDM_REG_TIMER_TICK 0xc2000
+/* [RW 5] The number of time_slots in the arbitration cycle */
+#define CSEM_REG_ARB_CYCLE_SIZE 0x200034
+/* [RW 3] The source that is associated with arbitration element 0. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2 */
+#define CSEM_REG_ARB_ELEMENT0 0x200020
+/* [RW 3] The source that is associated with arbitration element 1. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~csem_registers_arb_element0.arb_element0 */
+#define CSEM_REG_ARB_ELEMENT1 0x200024
+/* [RW 3] The source that is associated with arbitration element 2. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~csem_registers_arb_element0.arb_element0
+ and ~csem_registers_arb_element1.arb_element1 */
+#define CSEM_REG_ARB_ELEMENT2 0x200028
+/* [RW 3] The source that is associated with arbitration element 3. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
+ not be equal to register ~csem_registers_arb_element0.arb_element0 and
+ ~csem_registers_arb_element1.arb_element1 and
+ ~csem_registers_arb_element2.arb_element2 */
+#define CSEM_REG_ARB_ELEMENT3 0x20002c
+/* [RW 3] The source that is associated with arbitration element 4. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~csem_registers_arb_element0.arb_element0
+ and ~csem_registers_arb_element1.arb_element1 and
+ ~csem_registers_arb_element2.arb_element2 and
+ ~csem_registers_arb_element3.arb_element3 */
+#define CSEM_REG_ARB_ELEMENT4 0x200030
+/* [RW 32] Interrupt mask register #0 read/write */
+#define CSEM_REG_CSEM_INT_MASK_0 0x200110
+#define CSEM_REG_CSEM_INT_MASK_1 0x200120
+/* [R 32] Interrupt register #0 read */
+#define CSEM_REG_CSEM_INT_STS_0 0x200104
+#define CSEM_REG_CSEM_INT_STS_1 0x200114
+/* [RW 32] Parity mask register #0 read/write */
+#define CSEM_REG_CSEM_PRTY_MASK_0 0x200130
+#define CSEM_REG_CSEM_PRTY_MASK_1 0x200140
+/* [R 32] Parity register #0 read */
+#define CSEM_REG_CSEM_PRTY_STS_0 0x200124
+#define CSEM_REG_CSEM_PRTY_STS_1 0x200134
+/* [RC 32] Parity register #0 read clear */
+#define CSEM_REG_CSEM_PRTY_STS_CLR_0 0x200128
+#define CSEM_REG_CSEM_PRTY_STS_CLR_1 0x200138
+#define CSEM_REG_ENABLE_IN 0x2000a4
+#define CSEM_REG_ENABLE_OUT 0x2000a8
+/* [RW 32] This address space contains all registers and memories that are
+ placed in SEM_FAST block. The SEM_FAST registers are described in
+ appendix B. In order to access the sem_fast registers the base address
+ ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
+#define CSEM_REG_FAST_MEMORY 0x220000
+/* [RW 1] Disables input messages from FIC0 May be updated during run_time
+ by the microcode */
+#define CSEM_REG_FIC0_DISABLE 0x200224
+/* [RW 1] Disables input messages from FIC1 May be updated during run_time
+ by the microcode */
+#define CSEM_REG_FIC1_DISABLE 0x200234
+/* [RW 15] Interrupt table Read and write access to it is not possible in
+ the middle of the work */
+#define CSEM_REG_INT_TABLE 0x200400
+/* [ST 24] Statistics register. The number of messages that entered through
+ FIC0 */
+#define CSEM_REG_MSG_NUM_FIC0 0x200000
+/* [ST 24] Statistics register. The number of messages that entered through
+ FIC1 */
+#define CSEM_REG_MSG_NUM_FIC1 0x200004
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC0 */
+#define CSEM_REG_MSG_NUM_FOC0 0x200008
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC1 */
+#define CSEM_REG_MSG_NUM_FOC1 0x20000c
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC2 */
+#define CSEM_REG_MSG_NUM_FOC2 0x200010
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC3 */
+#define CSEM_REG_MSG_NUM_FOC3 0x200014
+/* [RW 1] Disables input messages from the passive buffer May be updated
+ during run_time by the microcode */
+#define CSEM_REG_PAS_DISABLE 0x20024c
+/* [WB 128] Debug only. Passive buffer memory */
+#define CSEM_REG_PASSIVE_BUFFER 0x202000
+/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
+#define CSEM_REG_PRAM 0x240000
+/* [R 16] Valid sleeping threads indication have bit per thread */
+#define CSEM_REG_SLEEP_THREADS_VALID 0x20026c
+/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
+#define CSEM_REG_SLOW_EXT_STORE_EMPTY 0x2002a0
+/* [RW 16] List of free threads . There is a bit per thread. */
+#define CSEM_REG_THREADS_LIST 0x2002e4
+/* [RW 3] The arbitration scheme of time_slot 0 */
+#define CSEM_REG_TS_0_AS 0x200038
+/* [RW 3] The arbitration scheme of time_slot 10 */
+#define CSEM_REG_TS_10_AS 0x200060
+/* [RW 3] The arbitration scheme of time_slot 11 */
+#define CSEM_REG_TS_11_AS 0x200064
+/* [RW 3] The arbitration scheme of time_slot 12 */
+#define CSEM_REG_TS_12_AS 0x200068
+/* [RW 3] The arbitration scheme of time_slot 13 */
+#define CSEM_REG_TS_13_AS 0x20006c
+/* [RW 3] The arbitration scheme of time_slot 14 */
+#define CSEM_REG_TS_14_AS 0x200070
+/* [RW 3] The arbitration scheme of time_slot 15 */
+#define CSEM_REG_TS_15_AS 0x200074
+/* [RW 3] The arbitration scheme of time_slot 16 */
+#define CSEM_REG_TS_16_AS 0x200078
+/* [RW 3] The arbitration scheme of time_slot 17 */
+#define CSEM_REG_TS_17_AS 0x20007c
+/* [RW 3] The arbitration scheme of time_slot 18 */
+#define CSEM_REG_TS_18_AS 0x200080
+/* [RW 3] The arbitration scheme of time_slot 1 */
+#define CSEM_REG_TS_1_AS 0x20003c
+/* [RW 3] The arbitration scheme of time_slot 2 */
+#define CSEM_REG_TS_2_AS 0x200040
+/* [RW 3] The arbitration scheme of time_slot 3 */
+#define CSEM_REG_TS_3_AS 0x200044
+/* [RW 3] The arbitration scheme of time_slot 4 */
+#define CSEM_REG_TS_4_AS 0x200048
+/* [RW 3] The arbitration scheme of time_slot 5 */
+#define CSEM_REG_TS_5_AS 0x20004c
+/* [RW 3] The arbitration scheme of time_slot 6 */
+#define CSEM_REG_TS_6_AS 0x200050
+/* [RW 3] The arbitration scheme of time_slot 7 */
+#define CSEM_REG_TS_7_AS 0x200054
+/* [RW 3] The arbitration scheme of time_slot 8 */
+#define CSEM_REG_TS_8_AS 0x200058
+/* [RW 3] The arbitration scheme of time_slot 9 */
+#define CSEM_REG_TS_9_AS 0x20005c
+/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
+ * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
+#define CSEM_REG_VFPF_ERR_NUM 0x200380
+/* [RW 1] Parity mask register #0 read/write */
+#define DBG_REG_DBG_PRTY_MASK 0xc0a8
+/* [R 1] Parity register #0 read */
+#define DBG_REG_DBG_PRTY_STS 0xc09c
+/* [RC 1] Parity register #0 read clear */
+#define DBG_REG_DBG_PRTY_STS_CLR 0xc0a0
+/* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The
+ * function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0;
+ * 4.Completion function=0; 5.Error handling=0 */
+#define DMAE_REG_BACKWARD_COMP_EN 0x10207c
+/* [RW 32] Commands memory. The address to command X; row Y is to calculated
+ as 14*X+Y. */
+#define DMAE_REG_CMD_MEM 0x102400
+#define DMAE_REG_CMD_MEM_SIZE 224
+/* [RW 1] If 0 - the CRC-16c initial value is all zeroes; if 1 - the CRC-16c
+ initial value is all ones. */
+#define DMAE_REG_CRC16C_INIT 0x10201c
+/* [RW 1] If 0 - the CRC-16 T10 initial value is all zeroes; if 1 - the
+ CRC-16 T10 initial value is all ones. */
+#define DMAE_REG_CRC16T10_INIT 0x102020
+/* [RW 2] Interrupt mask register #0 read/write */
+#define DMAE_REG_DMAE_INT_MASK 0x102054
+/* [RW 4] Parity mask register #0 read/write */
+#define DMAE_REG_DMAE_PRTY_MASK 0x102064
+/* [R 4] Parity register #0 read */
+#define DMAE_REG_DMAE_PRTY_STS 0x102058
+/* [RC 4] Parity register #0 read clear */
+#define DMAE_REG_DMAE_PRTY_STS_CLR 0x10205c
+/* [RW 1] Command 0 go. */
+#define DMAE_REG_GO_C0 0x102080
+/* [RW 1] Command 1 go. */
+#define DMAE_REG_GO_C1 0x102084
+/* [RW 1] Command 10 go. */
+#define DMAE_REG_GO_C10 0x102088
+/* [RW 1] Command 11 go. */
+#define DMAE_REG_GO_C11 0x10208c
+/* [RW 1] Command 12 go. */
+#define DMAE_REG_GO_C12 0x102090
+/* [RW 1] Command 13 go. */
+#define DMAE_REG_GO_C13 0x102094
+/* [RW 1] Command 14 go. */
+#define DMAE_REG_GO_C14 0x102098
+/* [RW 1] Command 15 go. */
+#define DMAE_REG_GO_C15 0x10209c
+/* [RW 1] Command 2 go. */
+#define DMAE_REG_GO_C2 0x1020a0
+/* [RW 1] Command 3 go. */
+#define DMAE_REG_GO_C3 0x1020a4
+/* [RW 1] Command 4 go. */
+#define DMAE_REG_GO_C4 0x1020a8
+/* [RW 1] Command 5 go. */
+#define DMAE_REG_GO_C5 0x1020ac
+/* [RW 1] Command 6 go. */
+#define DMAE_REG_GO_C6 0x1020b0
+/* [RW 1] Command 7 go. */
+#define DMAE_REG_GO_C7 0x1020b4
+/* [RW 1] Command 8 go. */
+#define DMAE_REG_GO_C8 0x1020b8
+/* [RW 1] Command 9 go. */
+#define DMAE_REG_GO_C9 0x1020bc
+/* [RW 1] DMAE GRC Interface (Target; aster) enable. If 0 - the acknowledge
+ input is disregarded; valid is deasserted; all other signals are treated
+ as usual; if 1 - normal activity. */
+#define DMAE_REG_GRC_IFEN 0x102008
+/* [RW 1] DMAE PCI Interface (Request; ead; rite) enable. If 0 - the
+ acknowledge input is disregarded; valid is deasserted; full is asserted;
+ all other signals are treated as usual; if 1 - normal activity. */
+#define DMAE_REG_PCI_IFEN 0x102004
+/* [RW 4] DMAE- PCI Request Interface initial credit. Write writes the
+ initial value to the credit counter; related to the address. Read returns
+ the current value of the counter. */
+#define DMAE_REG_PXP_REQ_INIT_CRD 0x1020c0
+/* [RW 8] Aggregation command. */
+#define DORQ_REG_AGG_CMD0 0x170060
+/* [RW 8] Aggregation command. */
+#define DORQ_REG_AGG_CMD1 0x170064
+/* [RW 8] Aggregation command. */
+#define DORQ_REG_AGG_CMD2 0x170068
+/* [RW 8] Aggregation command. */
+#define DORQ_REG_AGG_CMD3 0x17006c
+/* [RW 28] UCM Header. */
+#define DORQ_REG_CMHEAD_RX 0x170050
+/* [RW 32] Doorbell address for RBC doorbells (function 0). */
+#define DORQ_REG_DB_ADDR0 0x17008c
+/* [RW 5] Interrupt mask register #0 read/write */
+#define DORQ_REG_DORQ_INT_MASK 0x170180
+/* [R 5] Interrupt register #0 read */
+#define DORQ_REG_DORQ_INT_STS 0x170174
+/* [RC 5] Interrupt register #0 read clear */
+#define DORQ_REG_DORQ_INT_STS_CLR 0x170178
+/* [RW 2] Parity mask register #0 read/write */
+#define DORQ_REG_DORQ_PRTY_MASK 0x170190
+/* [R 2] Parity register #0 read */
+#define DORQ_REG_DORQ_PRTY_STS 0x170184
+/* [RC 2] Parity register #0 read clear */
+#define DORQ_REG_DORQ_PRTY_STS_CLR 0x170188
+/* [RW 8] The address to write the DPM CID to STORM. */
+#define DORQ_REG_DPM_CID_ADDR 0x170044
+/* [RW 5] The DPM mode CID extraction offset. */
+#define DORQ_REG_DPM_CID_OFST 0x170030
+/* [RW 12] The threshold of the DQ FIFO to send the almost full interrupt. */
+#define DORQ_REG_DQ_FIFO_AFULL_TH 0x17007c
+/* [RW 12] The threshold of the DQ FIFO to send the full interrupt. */
+#define DORQ_REG_DQ_FIFO_FULL_TH 0x170078
+/* [R 13] Current value of the DQ FIFO fill level according to following
+ pointer. The range is 0 - 256 FIFO rows; where each row stands for the
+ doorbell. */
+#define DORQ_REG_DQ_FILL_LVLF 0x1700a4
+/* [R 1] DQ FIFO full status. Is set; when FIFO filling level is more or
+ equal to full threshold; reset on full clear. */
+#define DORQ_REG_DQ_FULL_ST 0x1700c0
+/* [RW 28] The value sent to CM header in the case of CFC load error. */
+#define DORQ_REG_ERR_CMHEAD 0x170058
+#define DORQ_REG_IF_EN 0x170004
+#define DORQ_REG_MODE_ACT 0x170008
+/* [RW 5] The normal mode CID extraction offset. */
+#define DORQ_REG_NORM_CID_OFST 0x17002c
+/* [RW 28] TCM Header when only TCP context is loaded. */
+#define DORQ_REG_NORM_CMHEAD_TX 0x17004c
+/* [RW 3] The number of simultaneous outstanding requests to Context Fetch
+ Interface. */
+#define DORQ_REG_OUTST_REQ 0x17003c
+#define DORQ_REG_PF_USAGE_CNT 0x1701d0
+#define DORQ_REG_REGN 0x170038
+/* [R 4] Current value of response A counter credit. Initial credit is
+ configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd
+ register. */
+#define DORQ_REG_RSPA_CRD_CNT 0x1700ac
+/* [R 4] Current value of response B counter credit. Initial credit is
+ configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd
+ register. */
+#define DORQ_REG_RSPB_CRD_CNT 0x1700b0
+/* [RW 4] The initial credit at the Doorbell Response Interface. The write
+ writes the same initial credit to the rspa_crd_cnt and rspb_crd_cnt. The
+ read reads this written value. */
+#define DORQ_REG_RSP_INIT_CRD 0x170048
+/* [RW 4] Initial activity counter value on the load request; when the
+ shortcut is done. */
+#define DORQ_REG_SHRT_ACT_CNT 0x170070
+/* [RW 28] TCM Header when both ULP and TCP context is loaded. */
+#define DORQ_REG_SHRT_CMHEAD 0x170054
+#define HC_CONFIG_0_REG_ATTN_BIT_EN_0 (0x1<<4)
+#define HC_CONFIG_0_REG_BLOCK_DISABLE_0 (0x1<<0)
+#define HC_CONFIG_0_REG_INT_LINE_EN_0 (0x1<<3)
+#define HC_CONFIG_0_REG_MSI_ATTN_EN_0 (0x1<<7)
+#define HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 (0x1<<2)
+#define HC_CONFIG_0_REG_SINGLE_ISR_EN_0 (0x1<<1)
+#define HC_CONFIG_1_REG_BLOCK_DISABLE_1 (0x1<<0)
+#define HC_REG_AGG_INT_0 0x108050
+#define HC_REG_AGG_INT_1 0x108054
+#define HC_REG_ATTN_BIT 0x108120
+#define HC_REG_ATTN_IDX 0x108100
+#define HC_REG_ATTN_MSG0_ADDR_L 0x108018
+#define HC_REG_ATTN_MSG1_ADDR_L 0x108020
+#define HC_REG_ATTN_NUM_P0 0x108038
+#define HC_REG_ATTN_NUM_P1 0x10803c
+#define HC_REG_COMMAND_REG 0x108180
+#define HC_REG_CONFIG_0 0x108000
+#define HC_REG_CONFIG_1 0x108004
+#define HC_REG_FUNC_NUM_P0 0x1080ac
+#define HC_REG_FUNC_NUM_P1 0x1080b0
+/* [RW 3] Parity mask register #0 read/write */
+#define HC_REG_HC_PRTY_MASK 0x1080a0
+/* [R 3] Parity register #0 read */
+#define HC_REG_HC_PRTY_STS 0x108094
+/* [RC 3] Parity register #0 read clear */
+#define HC_REG_HC_PRTY_STS_CLR 0x108098
+#define HC_REG_INT_MASK 0x108108
+#define HC_REG_LEADING_EDGE_0 0x108040
+#define HC_REG_LEADING_EDGE_1 0x108048
+#define HC_REG_MAIN_MEMORY 0x108800
+#define HC_REG_MAIN_MEMORY_SIZE 152
+#define HC_REG_P0_PROD_CONS 0x108200
+#define HC_REG_P1_PROD_CONS 0x108400
+#define HC_REG_PBA_COMMAND 0x108140
+#define HC_REG_PCI_CONFIG_0 0x108010
+#define HC_REG_PCI_CONFIG_1 0x108014
+#define HC_REG_STATISTIC_COUNTERS 0x109000
+#define HC_REG_TRAILING_EDGE_0 0x108044
+#define HC_REG_TRAILING_EDGE_1 0x10804c
+#define HC_REG_UC_RAM_ADDR_0 0x108028
+#define HC_REG_UC_RAM_ADDR_1 0x108030
+#define HC_REG_USTORM_ADDR_FOR_COALESCE 0x108068
+#define HC_REG_VQID_0 0x108008
+#define HC_REG_VQID_1 0x10800c
+#define IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN (0x1<<1)
+#define IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE (0x1<<0)
+#define IGU_REG_ATTENTION_ACK_BITS 0x130108
+/* [R 4] Debug: attn_fsm */
+#define IGU_REG_ATTN_FSM 0x130054
+#define IGU_REG_ATTN_MSG_ADDR_H 0x13011c
+#define IGU_REG_ATTN_MSG_ADDR_L 0x130120
+/* [R 4] Debug: [3] - attention write done message is pending (0-no pending;
+ * 1-pending). [2:0] = PFID. Pending means attention message was sent; but
+ * write done didn't receive. */
+#define IGU_REG_ATTN_WRITE_DONE_PENDING 0x130030
+#define IGU_REG_BLOCK_CONFIGURATION 0x130000
+#define IGU_REG_COMMAND_REG_32LSB_DATA 0x130124
+#define IGU_REG_COMMAND_REG_CTRL 0x13012c
+/* [WB_R 32] Cleanup bit status per SB. 1 = cleanup is set. 0 = cleanup bit
+ * is clear. The bits in this registers are set and clear via the producer
+ * command. Data valid only in addresses 0-4. all the rest are zero. */
+#define IGU_REG_CSTORM_TYPE_0_SB_CLEANUP 0x130200
+/* [R 5] Debug: ctrl_fsm */
+#define IGU_REG_CTRL_FSM 0x130064
+/* [R 1] data available for error memory. If this bit is clear do not red
+ * from error_handling_memory. */
+#define IGU_REG_ERROR_HANDLING_DATA_VALID 0x130130
+/* [RW 11] Parity mask register #0 read/write */
+#define IGU_REG_IGU_PRTY_MASK 0x1300a8
+/* [R 11] Parity register #0 read */
+#define IGU_REG_IGU_PRTY_STS 0x13009c
+/* [RC 11] Parity register #0 read clear */
+#define IGU_REG_IGU_PRTY_STS_CLR 0x1300a0
+/* [R 4] Debug: int_handle_fsm */
+#define IGU_REG_INT_HANDLE_FSM 0x130050
+#define IGU_REG_LEADING_EDGE_LATCH 0x130134
+/* [RW 14] mapping CAM; relevant for E2 operating mode only. [0] - valid.
+ * [6:1] - vector number; [13:7] - FID (if VF - [13] = 0; [12:7] = VF
+ * number; if PF - [13] = 1; [12:10] = 0; [9:7] = PF number); */
+#define IGU_REG_MAPPING_MEMORY 0x131000
+#define IGU_REG_MAPPING_MEMORY_SIZE 136
+#define IGU_REG_PBA_STATUS_LSB 0x130138
+#define IGU_REG_PBA_STATUS_MSB 0x13013c
+#define IGU_REG_PCI_PF_MSI_EN 0x130140
+#define IGU_REG_PCI_PF_MSIX_EN 0x130144
+#define IGU_REG_PCI_PF_MSIX_FUNC_MASK 0x130148
+/* [WB_R 32] Each bit represent the pending bits status for that SB. 0 = no
+ * pending; 1 = pending. Pendings means interrupt was asserted; and write
+ * done was not received. Data valid only in addresses 0-4. all the rest are
+ * zero. */
+#define IGU_REG_PENDING_BITS_STATUS 0x130300
+#define IGU_REG_PF_CONFIGURATION 0x130154
+/* [RW 20] producers only. E2 mode: address 0-135 match to the mapping
+ * memory; 136 - PF0 default prod; 137 PF1 default prod; 138 - PF2 default
+ * prod; 139 PF3 default prod; 140 - PF0 - ATTN prod; 141 - PF1 - ATTN prod;
+ * 142 - PF2 - ATTN prod; 143 - PF3 - ATTN prod; 144-147 reserved. E1.5 mode
+ * - In backward compatible mode; for non default SB; each even line in the
+ * memory holds the U producer and each odd line hold the C producer. The
+ * first 128 producer are for NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The
+ * last 20 producers are for the DSB for each PF. each PF has five segments
+ * (the order inside each segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
+ * 132-135 C prods; 136-139 X prods; 140-143 T prods; 144-147 ATTN prods; */
+#define IGU_REG_PROD_CONS_MEMORY 0x132000
+/* [R 3] Debug: pxp_arb_fsm */
+#define IGU_REG_PXP_ARB_FSM 0x130068
+/* [RW 6] Write one for each bit will reset the appropriate memory. When the
+ * memory reset finished the appropriate bit will be clear. Bit 0 - mapping
+ * memory; Bit 1 - SB memory; Bit 2 - SB interrupt and mask register; Bit 3
+ * - MSIX memory; Bit 4 - PBA memory; Bit 5 - statistics; */
+#define IGU_REG_RESET_MEMORIES 0x130158
+/* [R 4] Debug: sb_ctrl_fsm */
+#define IGU_REG_SB_CTRL_FSM 0x13004c
+#define IGU_REG_SB_INT_BEFORE_MASK_LSB 0x13015c
+#define IGU_REG_SB_INT_BEFORE_MASK_MSB 0x130160
+#define IGU_REG_SB_MASK_LSB 0x130164
+#define IGU_REG_SB_MASK_MSB 0x130168
+/* [RW 16] Number of command that were dropped without causing an interrupt
+ * due to: read access for WO BAR address; or write access for RO BAR
+ * address or any access for reserved address or PCI function error is set
+ * and address is not MSIX; PBA or cleanup */
+#define IGU_REG_SILENT_DROP 0x13016c
+/* [RW 10] Number of MSI/MSIX/ATTN messages sent for the function: 0-63 -
+ * number of MSIX messages per VF; 64-67 - number of MSI/MSIX messages per
+ * PF; 68-71 number of ATTN messages per PF */
+#define IGU_REG_STATISTIC_NUM_MESSAGE_SENT 0x130800
+/* [RW 32] Number of cycles the timer mask masking the IGU interrupt when a
+ * timer mask command arrives. Value must be bigger than 100. */
+#define IGU_REG_TIMER_MASKING_VALUE 0x13003c
+#define IGU_REG_TRAILING_EDGE_LATCH 0x130104
+#define IGU_REG_VF_CONFIGURATION 0x130170
+/* [WB_R 32] Each bit represent write done pending bits status for that SB
+ * (MSI/MSIX message was sent and write done was not received yet). 0 =
+ * clear; 1 = set. Data valid only in addresses 0-4. all the rest are zero. */
+#define IGU_REG_WRITE_DONE_PENDING 0x130480
+#define MCP_A_REG_MCPR_SCRATCH 0x3a0000
+#define MCP_REG_MCPR_CPU_PROGRAM_COUNTER 0x8501c
+#define MCP_REG_MCPR_GP_INPUTS 0x800c0
+#define MCP_REG_MCPR_GP_OENABLE 0x800c8
+#define MCP_REG_MCPR_GP_OUTPUTS 0x800c4
+#define MCP_REG_MCPR_IMC_COMMAND 0x85900
+#define MCP_REG_MCPR_IMC_DATAREG0 0x85920
+#define MCP_REG_MCPR_IMC_SLAVE_CONTROL 0x85904
+#define MCP_REG_MCPR_CPU_PROGRAM_COUNTER 0x8501c
+#define MCP_REG_MCPR_NVM_ACCESS_ENABLE 0x86424
+#define MCP_REG_MCPR_NVM_ADDR 0x8640c
+#define MCP_REG_MCPR_NVM_CFG4 0x8642c
+#define MCP_REG_MCPR_NVM_COMMAND 0x86400
+#define MCP_REG_MCPR_NVM_READ 0x86410
+#define MCP_REG_MCPR_NVM_SW_ARB 0x86420
+#define MCP_REG_MCPR_NVM_WRITE 0x86408
+#define MCP_REG_MCPR_SCRATCH 0xa0000
+#define MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK (0x1<<1)
+#define MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK (0x1<<0)
+/* [R 32] read first 32 bit after inversion of function 0. mapped as
+ follows: [0] NIG attention for function0; [1] NIG attention for
+ function1; [2] GPIO1 mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp;
+ [6] GPIO1 function 1; [7] GPIO2 function 1; [8] GPIO3 function 1; [9]
+ GPIO4 function 1; [10] PCIE glue/PXP VPD event function0; [11] PCIE
+ glue/PXP VPD event function1; [12] PCIE glue/PXP Expansion ROM event0;
+ [13] PCIE glue/PXP Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16]
+ MSI/X indication for mcp; [17] MSI/X indication for function 1; [18] BRB
+ Parity error; [19] BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw
+ interrupt; [22] SRC Parity error; [23] SRC Hw interrupt; [24] TSDM Parity
+ error; [25] TSDM Hw interrupt; [26] TCM Parity error; [27] TCM Hw
+ interrupt; [28] TSEMI Parity error; [29] TSEMI Hw interrupt; [30] PBF
+ Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 0xa42c
+#define MISC_REG_AEU_AFTER_INVERT_1_FUNC_1 0xa430
+/* [R 32] read first 32 bit after inversion of mcp. mapped as follows: [0]
+ NIG attention for function0; [1] NIG attention for function1; [2] GPIO1
+ mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1;
+ [7] GPIO2 function 1; [8] GPIO3 function 1; [9] GPIO4 function 1; [10]
+ PCIE glue/PXP VPD event function0; [11] PCIE glue/PXP VPD event
+ function1; [12] PCIE glue/PXP Expansion ROM event0; [13] PCIE glue/PXP
+ Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16] MSI/X indication for
+ mcp; [17] MSI/X indication for function 1; [18] BRB Parity error; [19]
+ BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC
+ Parity error; [23] SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw
+ interrupt; [26] TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI
+ Parity error; [29] TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw
+ interrupt; */
+#define MISC_REG_AEU_AFTER_INVERT_1_MCP 0xa434
+/* [R 32] read second 32 bit after inversion of function 0. mapped as
+ follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
+ Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
+ interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
+ error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
+ interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
+ NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
+ [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
+ interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
+ Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
+ Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
+ Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
+ interrupt; */
+#define MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 0xa438
+#define MISC_REG_AEU_AFTER_INVERT_2_FUNC_1 0xa43c
+/* [R 32] read second 32 bit after inversion of mcp. mapped as follows: [0]
+ PBClient Parity error; [1] PBClient Hw interrupt; [2] QM Parity error;
+ [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw interrupt;
+ [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity error; [9]
+ XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw interrupt; [12]
+ DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] NIG Parity
+ error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; [17] Vaux
+ PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw interrupt;
+ [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM Parity error;
+ [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI Hw interrupt;
+ [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM Parity error;
+ [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw interrupt; */
+#define MISC_REG_AEU_AFTER_INVERT_2_MCP 0xa440
+/* [R 32] read third 32 bit after inversion of function 0. mapped as
+ follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP Parity
+ error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error; [5]
+ PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
+ interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
+ error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
+ Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
+ pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
+ MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
+ SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
+ timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
+ func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
+ attn1; */
+#define MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 0xa444
+#define MISC_REG_AEU_AFTER_INVERT_3_FUNC_1 0xa448
+/* [R 32] read third 32 bit after inversion of mcp. mapped as follows: [0]
+ CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP Parity error; [3] PXP
+ Hw interrupt; [4] PXPpciClockClient Parity error; [5] PXPpciClockClient
+ Hw interrupt; [6] CFC Parity error; [7] CFC Hw interrupt; [8] CDU Parity
+ error; [9] CDU Hw interrupt; [10] DMAE Parity error; [11] DMAE Hw
+ interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) Hw interrupt; [14]
+ MISC Parity error; [15] MISC Hw interrupt; [16] pxp_misc_mps_attn; [17]
+ Flash event; [18] SMB event; [19] MCP attn0; [20] MCP attn1; [21] SW
+ timers attn_1 func0; [22] SW timers attn_2 func0; [23] SW timers attn_3
+ func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW timers attn_1
+ func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 func1; [29] SW
+ timers attn_4 func1; [30] General attn0; [31] General attn1; */
+#define MISC_REG_AEU_AFTER_INVERT_3_MCP 0xa44c
+/* [R 32] read fourth 32 bit after inversion of function 0. mapped as
+ follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
+ General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
+ [7] General attn9; [8] General attn10; [9] General attn11; [10] General
+ attn12; [11] General attn13; [12] General attn14; [13] General attn15;
+ [14] General attn16; [15] General attn17; [16] General attn18; [17]
+ General attn19; [18] General attn20; [19] General attn21; [20] Main power
+ interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
+ Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
+ Latched timeout attention; [27] GRC Latched reserved access attention;
+ [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
+ Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 0xa450
+#define MISC_REG_AEU_AFTER_INVERT_4_FUNC_1 0xa454
+/* [R 32] read fourth 32 bit after inversion of mcp. mapped as follows: [0]
+ General attn2; [1] General attn3; [2] General attn4; [3] General attn5;
+ [4] General attn6; [5] General attn7; [6] General attn8; [7] General
+ attn9; [8] General attn10; [9] General attn11; [10] General attn12; [11]
+ General attn13; [12] General attn14; [13] General attn15; [14] General
+ attn16; [15] General attn17; [16] General attn18; [17] General attn19;
+ [18] General attn20; [19] General attn21; [20] Main power interrupt; [21]
+ RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN Latched attn; [24]
+ RBCU Latched attn; [25] RBCP Latched attn; [26] GRC Latched timeout
+ attention; [27] GRC Latched reserved access attention; [28] MCP Latched
+ rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP Latched
+ ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_AFTER_INVERT_4_MCP 0xa458
+/* [R 32] Read fifth 32 bit after inversion of function 0. Mapped as
+ * follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
+ * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
+ * CNIG attention (reserved); [7] CNIG parity (reserved); [31-8] Reserved; */
+#define MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 0xa700
+/* [W 14] write to this register results with the clear of the latched
+ signals; one in d0 clears RBCR latch; one in d1 clears RBCT latch; one in
+ d2 clears RBCN latch; one in d3 clears RBCU latch; one in d4 clears RBCP
+ latch; one in d5 clears GRC Latched timeout attention; one in d6 clears
+ GRC Latched reserved access attention; one in d7 clears Latched
+ rom_parity; one in d8 clears Latched ump_rx_parity; one in d9 clears
+ Latched ump_tx_parity; one in d10 clears Latched scpad_parity (both
+ ports); one in d11 clears pxpv_misc_mps_attn; one in d12 clears
+ pxp_misc_exp_rom_attn0; one in d13 clears pxp_misc_exp_rom_attn1; read
+ from this register return zero */
+#define MISC_REG_AEU_CLR_LATCH_SIGNAL 0xa45c
+/* [RW 32] first 32b for enabling the output for function 0 output0. mapped
+ as follows: [0] NIG attention for function0; [1] NIG attention for
+ function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function
+ 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
+ GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
+ function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
+ Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
+ SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X
+ indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
+ [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
+ SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
+ TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
+ TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0 0xa06c
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1 0xa07c
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2 0xa08c
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_3 0xa09c
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_5 0xa0bc
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_6 0xa0cc
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_7 0xa0dc
+/* [RW 32] first 32b for enabling the output for function 1 output0. mapped
+ as follows: [0] NIG attention for function0; [1] NIG attention for
+ function1; [2] GPIO1 function 1; [3] GPIO2 function 1; [4] GPIO3 function
+ 1; [5] GPIO4 function 1; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
+ GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
+ function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
+ Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
+ SPIO4; [15] SPIO5; [16] MSI/X indication for function 1; [17] MSI/X
+ indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
+ [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
+ SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
+ TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
+ TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 0xa10c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 0xa11c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 0xa12c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_3 0xa13c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_5 0xa15c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_6 0xa16c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_7 0xa17c
+/* [RW 32] first 32b for enabling the output for close the gate nig. mapped
+ as follows: [0] NIG attention for function0; [1] NIG attention for
+ function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function
+ 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
+ GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
+ function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
+ Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
+ SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X
+ indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
+ [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
+ SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
+ TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
+ TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_ENABLE1_NIG_0 0xa0ec
+#define MISC_REG_AEU_ENABLE1_NIG_1 0xa18c
+/* [RW 32] first 32b for enabling the output for close the gate pxp. mapped
+ as follows: [0] NIG attention for function0; [1] NIG attention for
+ function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function
+ 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
+ GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
+ function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
+ Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
+ SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X
+ indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
+ [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
+ SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
+ TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
+ TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_ENABLE1_PXP_0 0xa0fc
+#define MISC_REG_AEU_ENABLE1_PXP_1 0xa19c
+/* [RW 32] second 32b for enabling the output for function 0 output0. mapped
+ as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
+ Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
+ interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
+ error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
+ interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
+ NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
+ [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
+ interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
+ Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
+ Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
+ Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
+ interrupt; */
+#define MISC_REG_AEU_ENABLE2_FUNC_0_OUT_0 0xa070
+#define MISC_REG_AEU_ENABLE2_FUNC_0_OUT_1 0xa080
+/* [RW 32] second 32b for enabling the output for function 1 output0. mapped
+ as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
+ Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
+ interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
+ error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
+ interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
+ NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
+ [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
+ interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
+ Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
+ Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
+ Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
+ interrupt; */
+#define MISC_REG_AEU_ENABLE2_FUNC_1_OUT_0 0xa110
+#define MISC_REG_AEU_ENABLE2_FUNC_1_OUT_1 0xa120
+/* [RW 32] second 32b for enabling the output for close the gate nig. mapped
+ as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
+ Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
+ interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
+ error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
+ interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
+ NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
+ [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
+ interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
+ Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
+ Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
+ Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
+ interrupt; */
+#define MISC_REG_AEU_ENABLE2_NIG_0 0xa0f0
+#define MISC_REG_AEU_ENABLE2_NIG_1 0xa190
+/* [RW 32] second 32b for enabling the output for close the gate pxp. mapped
+ as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
+ Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
+ interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
+ error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
+ interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
+ NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
+ [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
+ interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
+ Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
+ Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
+ Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
+ interrupt; */
+#define MISC_REG_AEU_ENABLE2_PXP_0 0xa100
+#define MISC_REG_AEU_ENABLE2_PXP_1 0xa1a0
+/* [RW 32] third 32b for enabling the output for function 0 output0. mapped
+ as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
+ Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
+ [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
+ interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
+ error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
+ Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
+ pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
+ MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
+ SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
+ timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
+ func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
+ attn1; */
+#define MISC_REG_AEU_ENABLE3_FUNC_0_OUT_0 0xa074
+#define MISC_REG_AEU_ENABLE3_FUNC_0_OUT_1 0xa084
+/* [RW 32] third 32b for enabling the output for function 1 output0. mapped
+ as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
+ Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
+ [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
+ interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
+ error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
+ Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
+ pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
+ MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
+ SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
+ timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
+ func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
+ attn1; */
+#define MISC_REG_AEU_ENABLE3_FUNC_1_OUT_0 0xa114
+#define MISC_REG_AEU_ENABLE3_FUNC_1_OUT_1 0xa124
+/* [RW 32] third 32b for enabling the output for close the gate nig. mapped
+ as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
+ Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
+ [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
+ interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
+ error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
+ Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
+ pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
+ MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
+ SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
+ timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
+ func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
+ attn1; */
+#define MISC_REG_AEU_ENABLE3_NIG_0 0xa0f4
+#define MISC_REG_AEU_ENABLE3_NIG_1 0xa194
+/* [RW 32] third 32b for enabling the output for close the gate pxp. mapped
+ as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
+ Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
+ [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
+ interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
+ error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
+ Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
+ pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
+ MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
+ SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
+ timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
+ func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
+ attn1; */
+#define MISC_REG_AEU_ENABLE3_PXP_0 0xa104
+#define MISC_REG_AEU_ENABLE3_PXP_1 0xa1a4
+/* [RW 32] fourth 32b for enabling the output for function 0 output0.mapped
+ as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
+ General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
+ [7] General attn9; [8] General attn10; [9] General attn11; [10] General
+ attn12; [11] General attn13; [12] General attn14; [13] General attn15;
+ [14] General attn16; [15] General attn17; [16] General attn18; [17]
+ General attn19; [18] General attn20; [19] General attn21; [20] Main power
+ interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
+ Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
+ Latched timeout attention; [27] GRC Latched reserved access attention;
+ [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
+ Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 0xa078
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_2 0xa098
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_4 0xa0b8
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_5 0xa0c8
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_6 0xa0d8
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_7 0xa0e8
+/* [RW 32] fourth 32b for enabling the output for function 1 output0.mapped
+ as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
+ General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
+ [7] General attn9; [8] General attn10; [9] General attn11; [10] General
+ attn12; [11] General attn13; [12] General attn14; [13] General attn15;
+ [14] General attn16; [15] General attn17; [16] General attn18; [17]
+ General attn19; [18] General attn20; [19] General attn21; [20] Main power
+ interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
+ Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
+ Latched timeout attention; [27] GRC Latched reserved access attention;
+ [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
+ Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0 0xa118
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_2 0xa138
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_4 0xa158
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_5 0xa168
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_6 0xa178
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_7 0xa188
+/* [RW 32] fourth 32b for enabling the output for close the gate nig.mapped
+ as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
+ General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
+ [7] General attn9; [8] General attn10; [9] General attn11; [10] General
+ attn12; [11] General attn13; [12] General attn14; [13] General attn15;
+ [14] General attn16; [15] General attn17; [16] General attn18; [17]
+ General attn19; [18] General attn20; [19] General attn21; [20] Main power
+ interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
+ Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
+ Latched timeout attention; [27] GRC Latched reserved access attention;
+ [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
+ Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_ENABLE4_NIG_0 0xa0f8
+#define MISC_REG_AEU_ENABLE4_NIG_1 0xa198
+/* [RW 32] fourth 32b for enabling the output for close the gate pxp.mapped
+ as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
+ General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
+ [7] General attn9; [8] General attn10; [9] General attn11; [10] General
+ attn12; [11] General attn13; [12] General attn14; [13] General attn15;
+ [14] General attn16; [15] General attn17; [16] General attn18; [17]
+ General attn19; [18] General attn20; [19] General attn21; [20] Main power
+ interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
+ Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
+ Latched timeout attention; [27] GRC Latched reserved access attention;
+ [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
+ Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_ENABLE4_PXP_0 0xa108
+#define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8
++/* [RW 32] fifth 32b for enabling the output for function 0 output0. Mapped
++ * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
++ * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
++ * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1
++ * parity; [31-10] Reserved; */
++#define MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0 0xa688
++/* [RW 32] Fifth 32b for enabling the output for function 1 output0. Mapped
++ * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
++ * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
++ * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1
++ * parity; [31-10] Reserved; */
++#define MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 0xa6b0
+/* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu
+ 128 bit vector */
+#define MISC_REG_AEU_GENERAL_ATTN_0 0xa000
+#define MISC_REG_AEU_GENERAL_ATTN_1 0xa004
+#define MISC_REG_AEU_GENERAL_ATTN_10 0xa028
+#define MISC_REG_AEU_GENERAL_ATTN_11 0xa02c
+#define MISC_REG_AEU_GENERAL_ATTN_12 0xa030
+#define MISC_REG_AEU_GENERAL_ATTN_2 0xa008
+#define MISC_REG_AEU_GENERAL_ATTN_3 0xa00c
+#define MISC_REG_AEU_GENERAL_ATTN_4 0xa010
+#define MISC_REG_AEU_GENERAL_ATTN_5 0xa014
+#define MISC_REG_AEU_GENERAL_ATTN_6 0xa018
+#define MISC_REG_AEU_GENERAL_ATTN_7 0xa01c
+#define MISC_REG_AEU_GENERAL_ATTN_8 0xa020
+#define MISC_REG_AEU_GENERAL_ATTN_9 0xa024
+#define MISC_REG_AEU_GENERAL_MASK 0xa61c
+/* [RW 32] first 32b for inverting the input for function 0; for each bit:
+ 0= do not invert; 1= invert; mapped as follows: [0] NIG attention for
+ function0; [1] NIG attention for function1; [2] GPIO1 mcp; [3] GPIO2 mcp;
+ [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1; [7] GPIO2 function 1;
+ [8] GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
+ function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
+ Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
+ SPIO4; [15] SPIO5; [16] MSI/X indication for mcp; [17] MSI/X indication
+ for function 1; [18] BRB Parity error; [19] BRB Hw interrupt; [20] PRS
+ Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23] SRC Hw
+ interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26] TCM
+ Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29] TSEMI
+ Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_INVERTER_1_FUNC_0 0xa22c
+#define MISC_REG_AEU_INVERTER_1_FUNC_1 0xa23c
+/* [RW 32] second 32b for inverting the input for function 0; for each bit:
+ 0= do not invert; 1= invert. mapped as follows: [0] PBClient Parity
+ error; [1] PBClient Hw interrupt; [2] QM Parity error; [3] QM Hw
+ interrupt; [4] Timers Parity error; [5] Timers Hw interrupt; [6] XSDM
+ Parity error; [7] XSDM Hw interrupt; [8] XCM Parity error; [9] XCM Hw
+ interrupt; [10] XSEMI Parity error; [11] XSEMI Hw interrupt; [12]
+ DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] NIG Parity
+ error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; [17] Vaux
+ PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw interrupt;
+ [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM Parity error;
+ [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI Hw interrupt;
+ [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM Parity error;
+ [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw interrupt; */
+#define MISC_REG_AEU_INVERTER_2_FUNC_0 0xa230
+#define MISC_REG_AEU_INVERTER_2_FUNC_1 0xa240
+/* [RW 10] [7:0] = mask 8 attention output signals toward IGU function0;
+ [9:8] = raserved. Zero = mask; one = unmask */
+#define MISC_REG_AEU_MASK_ATTN_FUNC_0 0xa060
+#define MISC_REG_AEU_MASK_ATTN_FUNC_1 0xa064
+/* [RW 1] If set a system kill occurred */
+#define MISC_REG_AEU_SYS_KILL_OCCURRED 0xa610
+/* [RW 32] Represent the status of the input vector to the AEU when a system
+ kill occurred. The register is reset in por reset. Mapped as follows: [0]
+ NIG attention for function0; [1] NIG attention for function1; [2] GPIO1
+ mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1;
+ [7] GPIO2 function 1; [8] GPIO3 function 1; [9] GPIO4 function 1; [10]
+ PCIE glue/PXP VPD event function0; [11] PCIE glue/PXP VPD event
+ function1; [12] PCIE glue/PXP Expansion ROM event0; [13] PCIE glue/PXP
+ Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16] MSI/X indication for
+ mcp; [17] MSI/X indication for function 1; [18] BRB Parity error; [19]
+ BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC
+ Parity error; [23] SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw
+ interrupt; [26] TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI
+ Parity error; [29] TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw
+ interrupt; */
+#define MISC_REG_AEU_SYS_KILL_STATUS_0 0xa600
+#define MISC_REG_AEU_SYS_KILL_STATUS_1 0xa604
+#define MISC_REG_AEU_SYS_KILL_STATUS_2 0xa608
+#define MISC_REG_AEU_SYS_KILL_STATUS_3 0xa60c
+/* [R 4] This field indicates the type of the device. '0' - 2 Ports; '1' - 1
+ Port. */
+#define MISC_REG_BOND_ID 0xa400
+/* [R 8] These bits indicate the metal revision of the chip. This value
+ starts at 0x00 for each all-layer tape-out and increments by one for each
+ tape-out. */
+#define MISC_REG_CHIP_METAL 0xa404
+/* [R 16] These bits indicate the part number for the chip. */
+#define MISC_REG_CHIP_NUM 0xa408
+/* [R 4] These bits indicate the base revision of the chip. This value
+ starts at 0x0 for the A0 tape-out and increments by one for each
+ all-layer tape-out. */
+#define MISC_REG_CHIP_REV 0xa40c
+/* [RW 32] The following driver registers(1...16) represent 16 drivers and
+ 32 clients. Each client can be controlled by one driver only. One in each
+ bit represent that this driver control the appropriate client (Ex: bit 5
+ is set means this driver control client number 5). addr1 = set; addr0 =
+ clear; read from both addresses will give the same result = status. write
+ to address 1 will set a request to control all the clients that their
+ appropriate bit (in the write command) is set. if the client is free (the
+ appropriate bit in all the other drivers is clear) one will be written to
+ that driver register; if the client isn't free the bit will remain zero.
+ if the appropriate bit is set (the driver request to gain control on a
+ client it already controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW
+ interrupt will be asserted). write to address 0 will set a request to
+ free all the clients that their appropriate bit (in the write command) is
+ set. if the appropriate bit is clear (the driver request to free a client
+ it doesn't controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW interrupt will
+ be asserted). */
+#define MISC_REG_DRIVER_CONTROL_1 0xa510
+#define MISC_REG_DRIVER_CONTROL_7 0xa3c8
+/* [RW 1] e1hmf for WOL. If clr WOL signal o the PXP will be send on bit 0
+ only. */
+#define MISC_REG_E1HMF_MODE 0xa5f8
+/* [R 1] Status of four port mode path swap input pin. */
+#define MISC_REG_FOUR_PORT_PATH_SWAP 0xa75c
+/* [RW 2] 4 port path swap overwrite.[0] - Overwrite control; if it is 0 -
+ the path_swap output is equal to 4 port mode path swap input pin; if it
+ is 1 - the path_swap output is equal to bit[1] of this register; [1] -
+ Overwrite value. If bit[0] of this register is 1 this is the value that
+ receives the path_swap output. Reset on Hard reset. */
+#define MISC_REG_FOUR_PORT_PATH_SWAP_OVWR 0xa738
+/* [R 1] Status of 4 port mode port swap input pin. */
+#define MISC_REG_FOUR_PORT_PORT_SWAP 0xa754
+/* [RW 2] 4 port port swap overwrite.[0] - Overwrite control; if it is 0 -
+ the port_swap output is equal to 4 port mode port swap input pin; if it
+ is 1 - the port_swap output is equal to bit[1] of this register; [1] -
+ Overwrite value. If bit[0] of this register is 1 this is the value that
+ receives the port_swap output. Reset on Hard reset. */
+#define MISC_REG_FOUR_PORT_PORT_SWAP_OVWR 0xa734
+/* [RW 32] Debug only: spare RW register reset by core reset */
+#define MISC_REG_GENERIC_CR_0 0xa460
+#define MISC_REG_GENERIC_CR_1 0xa464
+/* [RW 32] Debug only: spare RW register reset by por reset */
+#define MISC_REG_GENERIC_POR_1 0xa474
+/* [RW 32] Bit[0]: EPIO MODE SEL: Setting this bit to 1 will allow SW/FW to
+ use all of the 32 Extended GPIO pins. Without setting this bit; an EPIO
+ can not be configured as an output. Each output has its output enable in
+ the MCP register space; but this bit needs to be set to make use of that.
+ Bit[3:1] spare. Bit[4]: WCVTMON_PWRDN: Powerdown for Warpcore VTMON. When
+ set to 1 - Powerdown. Bit[5]: WCVTMON_RESETB: Reset for Warpcore VTMON.
+ When set to 0 - vTMON is in reset. Bit[6]: setting this bit will change
+ the i/o to an output and will drive the TimeSync output. Bit[31:7]:
+ spare. Global register. Reset by hard reset. */
+#define MISC_REG_GEN_PURP_HWG 0xa9a0
+/* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of
+ these bits is written as a '1'; the corresponding SPIO bit will turn off
+ it's drivers and become an input. This is the reset state of all GPIO
+ pins. The read value of these bits will be a '1' if that last command
+ (#SET; #CLR; or #FLOAT) for this bit was a #FLOAT. (reset value 0xff).
+ [23-20] CLR port 1; 19-16] CLR port 0; When any of these bits is written
+ as a '1'; the corresponding GPIO bit will drive low. The read value of
+ these bits will be a '1' if that last command (#SET; #CLR; or #FLOAT) for
+ this bit was a #CLR. (reset value 0). [15-12] SET port 1; 11-8] port 0;
+ SET When any of these bits is written as a '1'; the corresponding GPIO
+ bit will drive high (if it has that capability). The read value of these
+ bits will be a '1' if that last command (#SET; #CLR; or #FLOAT) for this
+ bit was a #SET. (reset value 0). [7-4] VALUE port 1; [3-0] VALUE port 0;
+ RO; These bits indicate the read value of each of the eight GPIO pins.
+ This is the result value of the pin; not the drive value. Writing these
+ bits will have not effect. */
+#define MISC_REG_GPIO 0xa490
+/* [RW 8] These bits enable the GPIO_INTs to signals event to the
+ IGU/MCP.according to the following map: [0] p0_gpio_0; [1] p0_gpio_1; [2]
+ p0_gpio_2; [3] p0_gpio_3; [4] p1_gpio_0; [5] p1_gpio_1; [6] p1_gpio_2;
+ [7] p1_gpio_3; */
+#define MISC_REG_GPIO_EVENT_EN 0xa2bc
+/* [RW 32] GPIO INT. [31-28] OLD_CLR port1; [27-24] OLD_CLR port0; Writing a
+ '1' to these bit clears the corresponding bit in the #OLD_VALUE register.
+ This will acknowledge an interrupt on the falling edge of corresponding
+ GPIO input (reset value 0). [23-16] OLD_SET [23-16] port1; OLD_SET port0;
+ Writing a '1' to these bit sets the corresponding bit in the #OLD_VALUE
+ register. This will acknowledge an interrupt on the rising edge of
+ corresponding SPIO input (reset value 0). [15-12] OLD_VALUE [11-8] port1;
+ OLD_VALUE port0; RO; These bits indicate the old value of the GPIO input
+ value. When the ~INT_STATE bit is set; this bit indicates the OLD value
+ of the pin such that if ~INT_STATE is set and this bit is '0'; then the
+ interrupt is due to a low to high edge. If ~INT_STATE is set and this bit
+ is '1'; then the interrupt is due to a high to low edge (reset value 0).
+ [7-4] INT_STATE port1; [3-0] INT_STATE RO port0; These bits indicate the
+ current GPIO interrupt state for each GPIO pin. This bit is cleared when
+ the appropriate #OLD_SET or #OLD_CLR command bit is written. This bit is
+ set when the GPIO input does not match the current value in #OLD_VALUE
+ (reset value 0). */
+#define MISC_REG_GPIO_INT 0xa494
+/* [R 28] this field hold the last information that caused reserved
+ attention. bits [19:0] - address; [22:20] function; [23] reserved;
+ [27:24] the master that caused the attention - according to the following
+ encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
+ dbu; 8 = dmae */
+#define MISC_REG_GRC_RSV_ATTN 0xa3c0
+/* [R 28] this field hold the last information that caused timeout
+ attention. bits [19:0] - address; [22:20] function; [23] reserved;
+ [27:24] the master that caused the attention - according to the following
+ encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
+ dbu; 8 = dmae */
+#define MISC_REG_GRC_TIMEOUT_ATTN 0xa3c4
+/* [RW 1] Setting this bit enables a timer in the GRC block to timeout any
+ access that does not finish within
+ ~misc_registers_grc_timout_val.grc_timeout_val cycles. When this bit is
+ cleared; this timeout is disabled. If this timeout occurs; the GRC shall
+ assert it attention output. */
+#define MISC_REG_GRC_TIMEOUT_EN 0xa280
+/* [RW 28] 28 LSB of LCPLL first register; reset val = 521. inside order of
+ the bits is: [2:0] OAC reset value 001) CML output buffer bias control;
+ 111 for +40%; 011 for +20%; 001 for 0%; 000 for -20%. [5:3] Icp_ctrl
+ (reset value 001) Charge pump current control; 111 for 720u; 011 for
+ 600u; 001 for 480u and 000 for 360u. [7:6] Bias_ctrl (reset value 00)
+ Global bias control; When bit 7 is high bias current will be 10 0gh; When
+ bit 6 is high bias will be 100w; Valid values are 00; 10; 01. [10:8]
+ Pll_observe (reset value 010) Bits to control observability. bit 10 is
+ for test bias; bit 9 is for test CK; bit 8 is test Vc. [12:11] Vth_ctrl
+ (reset value 00) Comparator threshold control. 00 for 0.6V; 01 for 0.54V
+ and 10 for 0.66V. [13] pllSeqStart (reset value 0) Enables VCO tuning
+ sequencer: 1= sequencer disabled; 0= sequencer enabled (inverted
+ internally). [14] reserved (reset value 0) Reset for VCO sequencer is
+ connected to RESET input directly. [15] capRetry_en (reset value 0)
+ enable retry on cap search failure (inverted). [16] freqMonitor_e (reset
+ value 0) bit to continuously monitor vco freq (inverted). [17]
+ freqDetRestart_en (reset value 0) bit to enable restart when not freq
+ locked (inverted). [18] freqDetRetry_en (reset value 0) bit to enable
+ retry on freq det failure(inverted). [19] pllForceFdone_en (reset value
+ 0) bit to enable pllForceFdone & pllForceFpass into pllSeq. [20]
+ pllForceFdone (reset value 0) bit to force freqDone. [21] pllForceFpass
+ (reset value 0) bit to force freqPass. [22] pllForceDone_en (reset value
+ 0) bit to enable pllForceCapDone. [23] pllForceCapDone (reset value 0)
+ bit to force capDone. [24] pllForceCapPass_en (reset value 0) bit to
+ enable pllForceCapPass. [25] pllForceCapPass (reset value 0) bit to force
+ capPass. [26] capRestart (reset value 0) bit to force cap sequencer to
+ restart. [27] capSelectM_en (reset value 0) bit to enable cap select
+ register bits. */
+#define MISC_REG_LCPLL_CTRL_1 0xa2a4
+#define MISC_REG_LCPLL_CTRL_REG_2 0xa2a8
+/* [RW 4] Interrupt mask register #0 read/write */
+#define MISC_REG_MISC_INT_MASK 0xa388
+/* [RW 1] Parity mask register #0 read/write */
+#define MISC_REG_MISC_PRTY_MASK 0xa398
+/* [R 1] Parity register #0 read */
+#define MISC_REG_MISC_PRTY_STS 0xa38c
+/* [RC 1] Parity register #0 read clear */
+#define MISC_REG_MISC_PRTY_STS_CLR 0xa390
+#define MISC_REG_NIG_WOL_P0 0xa270
+#define MISC_REG_NIG_WOL_P1 0xa274
+/* [R 1] If set indicate that the pcie_rst_b was asserted without perst
+ assertion */
+#define MISC_REG_PCIE_HOT_RESET 0xa618
+/* [RW 32] 32 LSB of storm PLL first register; reset val = 0x 071d2911.
+ inside order of the bits is: [0] P1 divider[0] (reset value 1); [1] P1
+ divider[1] (reset value 0); [2] P1 divider[2] (reset value 0); [3] P1
+ divider[3] (reset value 0); [4] P2 divider[0] (reset value 1); [5] P2
+ divider[1] (reset value 0); [6] P2 divider[2] (reset value 0); [7] P2
+ divider[3] (reset value 0); [8] ph_det_dis (reset value 1); [9]
+ freq_det_dis (reset value 0); [10] Icpx[0] (reset value 0); [11] Icpx[1]
+ (reset value 1); [12] Icpx[2] (reset value 0); [13] Icpx[3] (reset value
+ 1); [14] Icpx[4] (reset value 0); [15] Icpx[5] (reset value 0); [16]
+ Rx[0] (reset value 1); [17] Rx[1] (reset value 0); [18] vc_en (reset
+ value 1); [19] vco_rng[0] (reset value 1); [20] vco_rng[1] (reset value
+ 1); [21] Kvco_xf[0] (reset value 0); [22] Kvco_xf[1] (reset value 0);
+ [23] Kvco_xf[2] (reset value 0); [24] Kvco_xs[0] (reset value 1); [25]
+ Kvco_xs[1] (reset value 1); [26] Kvco_xs[2] (reset value 1); [27]
+ testd_en (reset value 0); [28] testd_sel[0] (reset value 0); [29]
+ testd_sel[1] (reset value 0); [30] testd_sel[2] (reset value 0); [31]
+ testa_en (reset value 0); */
+#define MISC_REG_PLL_STORM_CTRL_1 0xa294
+#define MISC_REG_PLL_STORM_CTRL_2 0xa298
+#define MISC_REG_PLL_STORM_CTRL_3 0xa29c
+#define MISC_REG_PLL_STORM_CTRL_4 0xa2a0
+/* [R 1] Status of 4 port mode enable input pin. */
+#define MISC_REG_PORT4MODE_EN 0xa750
+/* [RW 2] 4 port mode enable overwrite.[0] - Overwrite control; if it is 0 -
+ * the port4mode_en output is equal to 4 port mode input pin; if it is 1 -
+ * the port4mode_en output is equal to bit[1] of this register; [1] -
+ * Overwrite value. If bit[0] of this register is 1 this is the value that
+ * receives the port4mode_en output . */
+#define MISC_REG_PORT4MODE_EN_OVWR 0xa720
+/* [RW 32] reset reg#2; rite/read one = the specific block is out of reset;
+ write/read zero = the specific block is in reset; addr 0-wr- the write
+ value will be written to the register; addr 1-set - one will be written
+ to all the bits that have the value of one in the data written (bits that
+ have the value of zero will not be change) ; addr 2-clear - zero will be
+ written to all the bits that have the value of one in the data written
+ (bits that have the value of zero will not be change); addr 3-ignore;
+ read ignore from all addr except addr 00; inside order of the bits is:
+ [0] rst_bmac0; [1] rst_bmac1; [2] rst_emac0; [3] rst_emac1; [4] rst_grc;
+ [5] rst_mcp_n_reset_reg_hard_core; [6] rst_ mcp_n_hard_core_rst_b; [7]
+ rst_ mcp_n_reset_cmn_cpu; [8] rst_ mcp_n_reset_cmn_core; [9] rst_rbcn;
+ [10] rst_dbg; [11] rst_misc_core; [12] rst_dbue (UART); [13]
+ Pci_resetmdio_n; [14] rst_emac0_hard_core; [15] rst_emac1_hard_core; 16]
+ rst_pxp_rq_rd_wr; 31:17] reserved */
+#define MISC_REG_RESET_REG_2 0xa590
+/* [RW 20] 20 bit GRC address where the scratch-pad of the MCP that is
+ shared with the driver resides */
+#define MISC_REG_SHARED_MEM_ADDR 0xa2b4
+/* [RW 32] SPIO. [31-24] FLOAT When any of these bits is written as a '1';
+ the corresponding SPIO bit will turn off it's drivers and become an
+ input. This is the reset state of all SPIO pins. The read value of these
+ bits will be a '1' if that last command (#SET; #CL; or #FLOAT) for this
+ bit was a #FLOAT. (reset value 0xff). [23-16] CLR When any of these bits
+ is written as a '1'; the corresponding SPIO bit will drive low. The read
+ value of these bits will be a '1' if that last command (#SET; #CLR; or
+#FLOAT) for this bit was a #CLR. (reset value 0). [15-8] SET When any of
+ these bits is written as a '1'; the corresponding SPIO bit will drive
+ high (if it has that capability). The read value of these bits will be a
+ '1' if that last command (#SET; #CLR; or #FLOAT) for this bit was a #SET.
+ (reset value 0). [7-0] VALUE RO; These bits indicate the read value of
+ each of the eight SPIO pins. This is the result value of the pin; not the
+ drive value. Writing these bits will have not effect. Each 8 bits field
+ is divided as follows: [0] VAUX Enable; when pulsed low; enables supply
+ from VAUX. (This is an output pin only; the FLOAT field is not applicable
+ for this pin); [1] VAUX Disable; when pulsed low; disables supply form
+ VAUX. (This is an output pin only; FLOAT field is not applicable for this
+ pin); [2] SEL_VAUX_B - Control to power switching logic. Drive low to
+ select VAUX supply. (This is an output pin only; it is not controlled by
+ the SET and CLR fields; it is controlled by the Main Power SM; the FLOAT
+ field is not applicable for this pin; only the VALUE fields is relevant -
+ it reflects the output value); [3] port swap [4] spio_4; [5] spio_5; [6]
+ Bit 0 of UMP device ID select; read by UMP firmware; [7] Bit 1 of UMP
+ device ID select; read by UMP firmware. */
+#define MISC_REG_SPIO 0xa4fc
+/* [RW 8] These bits enable the SPIO_INTs to signals event to the IGU/MC.
+ according to the following map: [3:0] reserved; [4] spio_4 [5] spio_5;
+ [7:0] reserved */
+#define MISC_REG_SPIO_EVENT_EN 0xa2b8
+/* [RW 32] SPIO INT. [31-24] OLD_CLR Writing a '1' to these bit clears the
+ corresponding bit in the #OLD_VALUE register. This will acknowledge an
+ interrupt on the falling edge of corresponding SPIO input (reset value
+ 0). [23-16] OLD_SET Writing a '1' to these bit sets the corresponding bit
+ in the #OLD_VALUE register. This will acknowledge an interrupt on the
+ rising edge of corresponding SPIO input (reset value 0). [15-8] OLD_VALUE
+ RO; These bits indicate the old value of the SPIO input value. When the
+ ~INT_STATE bit is set; this bit indicates the OLD value of the pin such
+ that if ~INT_STATE is set and this bit is '0'; then the interrupt is due
+ to a low to high edge. If ~INT_STATE is set and this bit is '1'; then the
+ interrupt is due to a high to low edge (reset value 0). [7-0] INT_STATE
+ RO; These bits indicate the current SPIO interrupt state for each SPIO
+ pin. This bit is cleared when the appropriate #OLD_SET or #OLD_CLR
+ command bit is written. This bit is set when the SPIO input does not
+ match the current value in #OLD_VALUE (reset value 0). */
+#define MISC_REG_SPIO_INT 0xa500
+/* [RW 32] reload value for counter 4 if reload; the value will be reload if
+ the counter reached zero and the reload bit
+ (~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */
+#define MISC_REG_SW_TIMER_RELOAD_VAL_4 0xa2fc
+/* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses
+ in this register. address 0 - timer 1; address 1 - timer 2, ... address 7 -
+ timer 8 */
+#define MISC_REG_SW_TIMER_VAL 0xa5c0
+/* [R 1] Status of two port mode path swap input pin. */
+#define MISC_REG_TWO_PORT_PATH_SWAP 0xa758
+/* [RW 2] 2 port swap overwrite.[0] - Overwrite control; if it is 0 - the
+ path_swap output is equal to 2 port mode path swap input pin; if it is 1
+ - the path_swap output is equal to bit[1] of this register; [1] -
+ Overwrite value. If bit[0] of this register is 1 this is the value that
+ receives the path_swap output. Reset on Hard reset. */
+#define MISC_REG_TWO_PORT_PATH_SWAP_OVWR 0xa72c
+/* [RW 1] Set by the MCP to remember if one or more of the drivers is/are
+ loaded; 0-prepare; -unprepare */
+#define MISC_REG_UNPREPARED 0xa424
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST (0x1<<0)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST (0x1<<1)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3)
+/* [RW 5] MDIO PHY Address. The WC uses this address to determine whether or
+ * not it is the recipient of the message on the MDIO interface. The value
+ * is compared to the value on ctrl_md_devad. Drives output
+ * misc_xgxs0_phy_addr. Global register. */
+#define MISC_REG_WC0_CTRL_PHY_ADDR 0xa9cc
+/* [RW 2] XMAC Core port mode. Indicates the number of ports on the system
+ side. This should be less than or equal to phy_port_mode; if some of the
+ ports are not used. This enables reduction of frequency on the core side.
+ This is a strap input for the XMAC_MP core. 00 - Single Port Mode; 01 -
+ Dual Port Mode; 10 - Tri Port Mode; 11 - Quad Port Mode. This is a strap
+ input for the XMAC_MP core; and should be changed only while reset is
+ held low. Reset on Hard reset. */
+#define MISC_REG_XMAC_CORE_PORT_MODE 0xa964
+/* [RW 2] XMAC PHY port mode. Indicates the number of ports on the Warp
+ Core. This is a strap input for the XMAC_MP core. 00 - Single Port Mode;
+ 01 - Dual Port Mode; 1x - Quad Port Mode; This is a strap input for the
+ XMAC_MP core; and should be changed only while reset is held low. Reset
+ on Hard reset. */
+#define MISC_REG_XMAC_PHY_PORT_MODE 0xa960
+/* [RW 32] 1 [47] Packet Size = 64 Write to this register write bits 31:0.
+ * Reads from this register will clear bits 31:0. */
+#define MSTAT_REG_RX_STAT_GR64_LO 0x200
+/* [RW 32] 1 [00] Tx Good Packet Count Write to this register write bits
+ * 31:0. Reads from this register will clear bits 31:0. */
+#define MSTAT_REG_TX_STAT_GTXPOK_LO 0
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST (0x1<<0)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST (0x1<<1)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3)
+#define NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN (0x1<<0)
+#define NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN (0x1<<0)
+#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT (0x1<<0)
+#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS (0x1<<9)
+#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G (0x1<<15)
+#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS (0xf<<18)
+/* [RW 1] Input enable for RX_BMAC0 IF */
+#define NIG_REG_BMAC0_IN_EN 0x100ac
+/* [RW 1] output enable for TX_BMAC0 IF */
+#define NIG_REG_BMAC0_OUT_EN 0x100e0
+/* [RW 1] output enable for TX BMAC pause port 0 IF */
+#define NIG_REG_BMAC0_PAUSE_OUT_EN 0x10110
+/* [RW 1] output enable for RX_BMAC0_REGS IF */
+#define NIG_REG_BMAC0_REGS_OUT_EN 0x100e8
+/* [RW 1] output enable for RX BRB1 port0 IF */
+#define NIG_REG_BRB0_OUT_EN 0x100f8
+/* [RW 1] Input enable for TX BRB1 pause port 0 IF */
+#define NIG_REG_BRB0_PAUSE_IN_EN 0x100c4
+/* [RW 1] output enable for RX BRB1 port1 IF */
+#define NIG_REG_BRB1_OUT_EN 0x100fc
+/* [RW 1] Input enable for TX BRB1 pause port 1 IF */
+#define NIG_REG_BRB1_PAUSE_IN_EN 0x100c8
+/* [RW 1] output enable for RX BRB1 LP IF */
+#define NIG_REG_BRB_LB_OUT_EN 0x10100
+/* [WB_W 82] Debug packet to LP from RBC; Data spelling:[63:0] data; 64]
+ error; [67:65]eop_bvalid; [68]eop; [69]sop; [70]port_id; 71]flush;
+ 72:73]-vnic_num; 81:74]-sideband_info */
+#define NIG_REG_DEBUG_PACKET_LB 0x10800
+/* [RW 1] Input enable for TX Debug packet */
+#define NIG_REG_EGRESS_DEBUG_IN_EN 0x100dc
+/* [RW 1] If 1 - egress drain mode for port0 is active. In this mode all
+ packets from PBFare not forwarded to the MAC and just deleted from FIFO.
+ First packet may be deleted from the middle. And last packet will be
+ always deleted till the end. */
+#define NIG_REG_EGRESS_DRAIN0_MODE 0x10060
+/* [RW 1] Output enable to EMAC0 */
+#define NIG_REG_EGRESS_EMAC0_OUT_EN 0x10120
+/* [RW 1] MAC configuration for packets of port0. If 1 - all packet outputs
+ to emac for port0; other way to bmac for port0 */
+#define NIG_REG_EGRESS_EMAC0_PORT 0x10058
+/* [RW 1] Input enable for TX PBF user packet port0 IF */
+#define NIG_REG_EGRESS_PBF0_IN_EN 0x100cc
+/* [RW 1] Input enable for TX PBF user packet port1 IF */
+#define NIG_REG_EGRESS_PBF1_IN_EN 0x100d0
+/* [RW 1] Input enable for TX UMP management packet port0 IF */
+#define NIG_REG_EGRESS_UMP0_IN_EN 0x100d4
+/* [RW 1] Input enable for RX_EMAC0 IF */
+#define NIG_REG_EMAC0_IN_EN 0x100a4
+/* [RW 1] output enable for TX EMAC pause port 0 IF */
+#define NIG_REG_EMAC0_PAUSE_OUT_EN 0x10118
+/* [R 1] status from emac0. This bit is set when MDINT from either the
+ EXT_MDINT pin or from the Copper PHY is driven low. This condition must
+ be cleared in the attached PHY device that is driving the MINT pin. */
+#define NIG_REG_EMAC0_STATUS_MISC_MI_INT 0x10494
+/* [WB 48] This address space contains BMAC0 registers. The BMAC registers
+ are described in appendix A. In order to access the BMAC0 registers; the
+ base address; NIG_REGISTERS_INGRESS_BMAC0_MEM; Offset: 0x10c00; should be
+ added to each BMAC register offset */
+#define NIG_REG_INGRESS_BMAC0_MEM 0x10c00
+/* [WB 48] This address space contains BMAC1 registers. The BMAC registers
+ are described in appendix A. In order to access the BMAC0 registers; the
+ base address; NIG_REGISTERS_INGRESS_BMAC1_MEM; Offset: 0x11000; should be
+ added to each BMAC register offset */
+#define NIG_REG_INGRESS_BMAC1_MEM 0x11000
+/* [R 1] FIFO empty in EOP descriptor FIFO of LP in NIG_RX_EOP */
+#define NIG_REG_INGRESS_EOP_LB_EMPTY 0x104e0
+/* [RW 17] Debug only. RX_EOP_DSCR_lb_FIFO in NIG_RX_EOP. Data
+ packet_length[13:0]; mac_error[14]; trunc_error[15]; parity[16] */
+#define NIG_REG_INGRESS_EOP_LB_FIFO 0x104e4
+/* [RW 27] 0 - must be active for Everest A0; 1- for Everest B0 when latch
+ logic for interrupts must be used. Enable per bit of interrupt of
+ ~latch_status.latch_status */
+#define NIG_REG_LATCH_BC_0 0x16210
+/* [RW 27] Latch for each interrupt from Unicore.b[0]
+ status_emac0_misc_mi_int; b[1] status_emac0_misc_mi_complete;
+ b[2]status_emac0_misc_cfg_change; b[3]status_emac0_misc_link_status;
+ b[4]status_emac0_misc_link_change; b[5]status_emac0_misc_attn;
+ b[6]status_serdes0_mac_crs; b[7]status_serdes0_autoneg_complete;
+ b[8]status_serdes0_fiber_rxact; b[9]status_serdes0_link_status;
+ b[10]status_serdes0_mr_page_rx; b[11]status_serdes0_cl73_an_complete;
+ b[12]status_serdes0_cl73_mr_page_rx; b[13]status_serdes0_rx_sigdet;
+ b[14]status_xgxs0_remotemdioreq; b[15]status_xgxs0_link10g;
+ b[16]status_xgxs0_autoneg_complete; b[17]status_xgxs0_fiber_rxact;
+ b[21:18]status_xgxs0_link_status; b[22]status_xgxs0_mr_page_rx;
+ b[23]status_xgxs0_cl73_an_complete; b[24]status_xgxs0_cl73_mr_page_rx;
+ b[25]status_xgxs0_rx_sigdet; b[26]status_xgxs0_mac_crs */
+#define NIG_REG_LATCH_STATUS_0 0x18000
+/* [RW 1] led 10g for port 0 */
+#define NIG_REG_LED_10G_P0 0x10320
+/* [RW 1] led 10g for port 1 */
+#define NIG_REG_LED_10G_P1 0x10324
+/* [RW 1] Port0: This bit is set to enable the use of the
+ ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 field
+ defined below. If this bit is cleared; then the blink rate will be about
+ 8Hz. */
+#define NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 0x10318
+/* [RW 12] Port0: Specifies the period of each blink cycle (on + off) for
+ Traffic LED in milliseconds. Must be a non-zero value. This 12-bit field
+ is reset to 0x080; giving a default blink period of approximately 8Hz. */
+#define NIG_REG_LED_CONTROL_BLINK_RATE_P0 0x10310
+/* [RW 1] Port0: If set along with the
+ ~nig_registers_led_control_override_traffic_p0.led_control_override_traffic_p0
+ bit and ~nig_registers_led_control_traffic_p0.led_control_traffic_p0 LED
+ bit; the Traffic LED will blink with the blink rate specified in
+ ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 and
+ ~nig_registers_led_control_blink_rate_ena_p0.led_control_blink_rate_ena_p0
+ fields. */
+#define NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 0x10308
+/* [RW 1] Port0: If set overrides hardware control of the Traffic LED. The
+ Traffic LED will then be controlled via bit ~nig_registers_
+ led_control_traffic_p0.led_control_traffic_p0 and bit
+ ~nig_registers_led_control_blink_traffic_p0.led_control_blink_traffic_p0 */
+#define NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 0x102f8
+/* [RW 1] Port0: If set along with the led_control_override_trafic_p0 bit;
+ turns on the Traffic LED. If the led_control_blink_traffic_p0 bit is also
+ set; the LED will blink with blink rate specified in
+ ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 and
+ ~nig_regsters_led_control_blink_rate_ena_p0.led_control_blink_rate_ena_p0
+ fields. */
+#define NIG_REG_LED_CONTROL_TRAFFIC_P0 0x10300
+/* [RW 4] led mode for port0: 0 MAC; 1-3 PHY1; 4 MAC2; 5-7 PHY4; 8-MAC3;
+ 9-11PHY7; 12 MAC4; 13-15 PHY10; */
+#define NIG_REG_LED_MODE_P0 0x102f0
+/* [RW 3] for port0 enable for llfc ppp and pause. b0 - brb1 enable; b1-
+ tsdm enable; b2- usdm enable */
+#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 0x16070
+#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 0x16074
+/* [RW 1] SAFC enable for port0. This register may get 1 only when
+ ~ppp_enable.ppp_enable = 0 and pause_enable.pause_enable =0 for the same
+ port */
+#define NIG_REG_LLFC_ENABLE_0 0x16208
+#define NIG_REG_LLFC_ENABLE_1 0x1620c
+/* [RW 16] classes are high-priority for port0 */
+#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0 0x16058
+#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 0x1605c
+/* [RW 16] classes are low-priority for port0 */
+#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0 0x16060
+#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 0x16064
+/* [RW 1] Output enable of message to LLFC BMAC IF for port0 */
+#define NIG_REG_LLFC_OUT_EN_0 0x160c8
+#define NIG_REG_LLFC_OUT_EN_1 0x160cc
+#define NIG_REG_LLH0_ACPI_PAT_0_CRC 0x1015c
+#define NIG_REG_LLH0_ACPI_PAT_6_LEN 0x10154
+#define NIG_REG_LLH0_BRB1_DRV_MASK 0x10244
+#define NIG_REG_LLH0_BRB1_DRV_MASK_MF 0x16048
+/* [RW 1] send to BRB1 if no match on any of RMP rules. */
+#define NIG_REG_LLH0_BRB1_NOT_MCP 0x1025c
+/* [RW 2] Determine the classification participants. 0: no classification.1:
+ classification upon VLAN id. 2: classification upon MAC address. 3:
+ classification upon both VLAN id & MAC addr. */
+#define NIG_REG_LLH0_CLS_TYPE 0x16080
+/* [RW 32] cm header for llh0 */
+#define NIG_REG_LLH0_CM_HEADER 0x1007c
+#define NIG_REG_LLH0_DEST_IP_0_1 0x101dc
+#define NIG_REG_LLH0_DEST_MAC_0_0 0x101c0
+/* [RW 16] destination TCP address 1. The LLH will look for this address in
+ all incoming packets. */
+#define NIG_REG_LLH0_DEST_TCP_0 0x10220
+/* [RW 16] destination UDP address 1 The LLH will look for this address in
+ all incoming packets. */
+#define NIG_REG_LLH0_DEST_UDP_0 0x10214
+#define NIG_REG_LLH0_ERROR_MASK 0x1008c
+/* [RW 8] event id for llh0 */
+#define NIG_REG_LLH0_EVENT_ID 0x10084
+#define NIG_REG_LLH0_FUNC_EN 0x160fc
+#define NIG_REG_LLH0_FUNC_MEM 0x16180
+#define NIG_REG_LLH0_FUNC_MEM_ENABLE 0x16140
+#define NIG_REG_LLH0_FUNC_VLAN_ID 0x16100
+/* [RW 1] Determine the IP version to look for in
+ ~nig_registers_llh0_dest_ip_0.llh0_dest_ip_0. 0 - IPv6; 1-IPv4 */
+#define NIG_REG_LLH0_IPV4_IPV6_0 0x10208
+/* [RW 1] t bit for llh0 */
+#define NIG_REG_LLH0_T_BIT 0x10074
+/* [RW 12] VLAN ID 1. In case of VLAN packet the LLH will look for this ID. */
+#define NIG_REG_LLH0_VLAN_ID_0 0x1022c
+/* [RW 8] init credit counter for port0 in LLH */
+#define NIG_REG_LLH0_XCM_INIT_CREDIT 0x10554
+#define NIG_REG_LLH0_XCM_MASK 0x10130
+#define NIG_REG_LLH1_BRB1_DRV_MASK 0x10248
+/* [RW 1] send to BRB1 if no match on any of RMP rules. */
+#define NIG_REG_LLH1_BRB1_NOT_MCP 0x102dc
+/* [RW 2] Determine the classification participants. 0: no classification.1:
+ classification upon VLAN id. 2: classification upon MAC address. 3:
+ classification upon both VLAN id & MAC addr. */
+#define NIG_REG_LLH1_CLS_TYPE 0x16084
+/* [RW 32] cm header for llh1 */
+#define NIG_REG_LLH1_CM_HEADER 0x10080
+#define NIG_REG_LLH1_ERROR_MASK 0x10090
+/* [RW 8] event id for llh1 */
+#define NIG_REG_LLH1_EVENT_ID 0x10088
+#define NIG_REG_LLH1_FUNC_MEM 0x161c0
+#define NIG_REG_LLH1_FUNC_MEM_ENABLE 0x16160
+#define NIG_REG_LLH1_FUNC_MEM_SIZE 16
+/* [RW 1] When this bit is set; the LLH will classify the packet before
+ * sending it to the BRB or calculating WoL on it. This bit controls port 1
+ * only. The legacy llh_multi_function_mode bit controls port 0. */
+#define NIG_REG_LLH1_MF_MODE 0x18614
+/* [RW 8] init credit counter for port1 in LLH */
+#define NIG_REG_LLH1_XCM_INIT_CREDIT 0x10564
+#define NIG_REG_LLH1_XCM_MASK 0x10134
+/* [RW 1] When this bit is set; the LLH will expect all packets to be with
+ e1hov */
+#define NIG_REG_LLH_E1HOV_MODE 0x160d8
+/* [RW 1] When this bit is set; the LLH will classify the packet before
+ sending it to the BRB or calculating WoL on it. */
+#define NIG_REG_LLH_MF_MODE 0x16024
+#define NIG_REG_MASK_INTERRUPT_PORT0 0x10330
+#define NIG_REG_MASK_INTERRUPT_PORT1 0x10334
+/* [RW 1] Output signal from NIG to EMAC0. When set enables the EMAC0 block. */
+#define NIG_REG_NIG_EMAC0_EN 0x1003c
+/* [RW 1] Output signal from NIG to EMAC1. When set enables the EMAC1 block. */
+#define NIG_REG_NIG_EMAC1_EN 0x10040
+/* [RW 1] Output signal from NIG to TX_EMAC0. When set indicates to the
+ EMAC0 to strip the CRC from the ingress packets. */
+#define NIG_REG_NIG_INGRESS_EMAC0_NO_CRC 0x10044
+/* [R 32] Interrupt register #0 read */
+#define NIG_REG_NIG_INT_STS_0 0x103b0
+#define NIG_REG_NIG_INT_STS_1 0x103c0
+/* [R 32] Legacy E1 and E1H location for parity error mask register. */
+#define NIG_REG_NIG_PRTY_MASK 0x103dc
+/* [RW 32] Parity mask register #0 read/write */
+#define NIG_REG_NIG_PRTY_MASK_0 0x183c8
+#define NIG_REG_NIG_PRTY_MASK_1 0x183d8
+/* [R 32] Legacy E1 and E1H location for parity error status register. */
+#define NIG_REG_NIG_PRTY_STS 0x103d0
+/* [R 32] Parity register #0 read */
+#define NIG_REG_NIG_PRTY_STS_0 0x183bc
+#define NIG_REG_NIG_PRTY_STS_1 0x183cc
+/* [R 32] Legacy E1 and E1H location for parity error status clear register. */
+#define NIG_REG_NIG_PRTY_STS_CLR 0x103d4
+/* [RC 32] Parity register #0 read clear */
+#define NIG_REG_NIG_PRTY_STS_CLR_0 0x183c0
+#define NIG_REG_NIG_PRTY_STS_CLR_1 0x183d0
+#define MCPR_IMC_COMMAND_ENABLE (1L<<31)
+#define MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT 16
+#define MCPR_IMC_COMMAND_OPERATION_BITSHIFT 28
+#define MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT 8
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header. */
+#define NIG_REG_P0_HDRS_AFTER_BASIC 0x18038
+/* [RW 1] HW PFC enable bit. Set this bit to enable the PFC functionality in
+ * the NIG. Other flow control modes such as PAUSE and SAFC/LLFC should be
+ * disabled when this bit is set. */
+#define NIG_REG_P0_HWPFC_ENABLE 0x18078
+#define NIG_REG_P0_LLH_FUNC_MEM2 0x18480
+#define NIG_REG_P0_LLH_FUNC_MEM2_ENABLE 0x18440
+/* [RW 1] Input enable for RX MAC interface. */
+#define NIG_REG_P0_MAC_IN_EN 0x185ac
+/* [RW 1] Output enable for TX MAC interface */
+#define NIG_REG_P0_MAC_OUT_EN 0x185b0
+/* [RW 1] Output enable for TX PAUSE signal to the MAC. */
+#define NIG_REG_P0_MAC_PAUSE_OUT_EN 0x185b4
+/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for
+ * future expansion) each priorty is to be mapped to. Bits 3:0 specify the
+ * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit
+ * priority field is extracted from the outer-most VLAN in receive packet.
+ * Only COS 0 and COS 1 are supported in E2. */
+#define NIG_REG_P0_PKT_PRIORITY_TO_COS 0x18054
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A
+ * priority is mapped to COS 0 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS0_PRIORITY_MASK 0x18058
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 1. A
+ * priority is mapped to COS 1 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS1_PRIORITY_MASK 0x1805c
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A
+ * priority is mapped to COS 2 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS2_PRIORITY_MASK 0x186b0
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 3. A
+ * priority is mapped to COS 3 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS3_PRIORITY_MASK 0x186b4
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 4. A
+ * priority is mapped to COS 4 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS4_PRIORITY_MASK 0x186b8
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 5. A
+ * priority is mapped to COS 5 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS5_PRIORITY_MASK 0x186bc
+/* [R 1] RX FIFO for receiving data from MAC is empty. */
+/* [RW 15] Specify which of the credit registers the client is to be mapped
+ * to. Bits[2:0] are for client 0; bits [14:12] are for client 4. For
+ * clients that are not subject to WFQ credit blocking - their
+ * specifications here are not used. */
+#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP 0x180f0
+/* [RW 32] Specify which of the credit registers the client is to be mapped
+ * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are
+ * for client 0; bits [35:32] are for client 8. For clients that are not
+ * subject to WFQ credit blocking - their specifications here are not used.
+ * This is a new register (with 2_) added in E3 B0 to accommodate the 9
+ * input clients to ETS arbiter. The reset default is set for management and
+ * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
+ * use credit registers 0-5 respectively (0x543210876). Note that credit
+ * registers can not be shared between clients. */
+#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB 0x18688
+/* [RW 4] Specify which of the credit registers the client is to be mapped
+ * to. This register specifies bits 35:32 of the 36-bit value. Bits[3:0] are
+ * for client 0; bits [35:32] are for client 8. For clients that are not
+ * subject to WFQ credit blocking - their specifications here are not used.
+ * This is a new register (with 2_) added in E3 B0 to accommodate the 9
+ * input clients to ETS arbiter. The reset default is set for management and
+ * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
+ * use credit registers 0-5 respectively (0x543210876). Note that credit
+ * registers can not be shared between clients. */
+#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB 0x1868c
+/* [RW 5] Specify whether the client competes directly in the strict
+ * priority arbiter. The bits are mapped according to client ID (client IDs
+ * are defined in tx_arb_priority_client). Default value is set to enable
+ * strict priorities for clients 0-2 -- management and debug traffic. */
+#define NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT 0x180e8
+/* [RW 5] Specify whether the client is subject to WFQ credit blocking. The
+ * bits are mapped according to client ID (client IDs are defined in
+ * tx_arb_priority_client). Default value is 0 for not using WFQ credit
+ * blocking. */
+#define NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x180ec
+/* [RW 32] Specify the upper bound that credit register 0 is allowed to
+ * reach. */
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0 0x1810c
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1 0x18110
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2 0x18114
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3 0x18118
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4 0x1811c
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5 0x186a0
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6 0x186a4
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7 0x186a8
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8 0x186ac
+/* [RW 32] Specify the weight (in bytes) to be added to credit register 0
+ * when it is time to increment. */
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0 0x180f8
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1 0x180fc
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2 0x18100
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3 0x18104
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4 0x18108
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5 0x18690
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6 0x18694
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7 0x18698
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8 0x1869c
+/* [RW 12] Specify the number of strict priority arbitration slots between
+ * two round-robin arbitration slots to avoid starvation. A value of 0 means
+ * no strict priority cycles - the strict priority with anti-starvation
+ * arbiter becomes a round-robin arbiter. */
+#define NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS 0x180f4
+/* [RW 15] Specify the client number to be assigned to each priority of the
+ * strict priority arbiter. Priority 0 is the highest priority. Bits [2:0]
+ * are for priority 0 client; bits [14:12] are for priority 4 client. The
+ * clients are assigned the following IDs: 0-management; 1-debug traffic
+ * from this port; 2-debug traffic from other port; 3-COS0 traffic; 4-COS1
+ * traffic. The reset value[14:0] is set to 0x4688 (15'b100_011_010_001_000)
+ * for management at priority 0; debug traffic at priorities 1 and 2; COS0
+ * traffic at priority 3; and COS1 traffic at priority 4. */
+#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT 0x180e4
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header. */
+#define NIG_REG_P1_HDRS_AFTER_BASIC 0x1818c
+#define NIG_REG_P1_LLH_FUNC_MEM2 0x184c0
+#define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE 0x18460
+/* [RW 32] Specify the client number to be assigned to each priority of the
+ * strict priority arbiter. This register specifies bits 31:0 of the 36-bit
+ * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
+ * client; bits [35-32] are for priority 8 client. The clients are assigned
+ * the following IDs: 0-management; 1-debug traffic from this port; 2-debug
+ * traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
+ * 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
+ * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
+ * accommodate the 9 input clients to ETS arbiter. */
+#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB 0x18680
+/* [RW 4] Specify the client number to be assigned to each priority of the
+ * strict priority arbiter. This register specifies bits 35:32 of the 36-bit
+ * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
+ * client; bits [35-32] are for priority 8 client. The clients are assigned
+ * the following IDs: 0-management; 1-debug traffic from this port; 2-debug
+ * traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
+ * 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
+ * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
+ * accommodate the 9 input clients to ETS arbiter. */
+#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB 0x18684
+#define NIG_REG_P1_MAC_IN_EN 0x185c0
+/* [RW 1] Output enable for TX MAC interface */
+#define NIG_REG_P1_MAC_OUT_EN 0x185c4
+/* [RW 1] Output enable for TX PAUSE signal to the MAC. */
+#define NIG_REG_P1_MAC_PAUSE_OUT_EN 0x185c8
+/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for
+ * future expansion) each priorty is to be mapped to. Bits 3:0 specify the
+ * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit
+ * priority field is extracted from the outer-most VLAN in receive packet.
+ * Only COS 0 and COS 1 are supported in E2. */
+#define NIG_REG_P1_PKT_PRIORITY_TO_COS 0x181a8
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A
+ * priority is mapped to COS 0 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P1_RX_COS0_PRIORITY_MASK 0x181ac
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 1. A
+ * priority is mapped to COS 1 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P1_RX_COS1_PRIORITY_MASK 0x181b0
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A
+ * priority is mapped to COS 2 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P1_RX_COS2_PRIORITY_MASK 0x186f8
+/* [R 1] RX FIFO for receiving data from MAC is empty. */
+#define NIG_REG_P1_RX_MACFIFO_EMPTY 0x1858c
+/* [R 1] TLLH FIFO is empty. */
+#define NIG_REG_P1_TLLH_FIFO_EMPTY 0x18338
+/* [RW 32] Specify which of the credit registers the client is to be mapped
+ * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are
+ * for client 0; bits [35:32] are for client 8. For clients that are not
+ * subject to WFQ credit blocking - their specifications here are not used.
+ * This is a new register (with 2_) added in E3 B0 to accommodate the 9
+ * input clients to ETS arbiter. The reset default is set for management and
+ * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
+ * use credit registers 0-5 respectively (0x543210876). Note that credit
+ * registers can not be shared between clients. Note also that there are
+ * only COS0-2 in port 1- there is a total of 6 clients in port 1. Only
+ * credit registers 0-5 are valid. This register should be configured
+ * appropriately before enabling WFQ. */
+#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB 0x186e8
+/* [RW 4] Specify which of the credit registers the client is to be mapped
+ * to. This register specifies bits 35:32 of the 36-bit value. Bits[3:0] are
+ * for client 0; bits [35:32] are for client 8. For clients that are not
+ * subject to WFQ credit blocking - their specifications here are not used.
+ * This is a new register (with 2_) added in E3 B0 to accommodate the 9
+ * input clients to ETS arbiter. The reset default is set for management and
+ * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
+ * use credit registers 0-5 respectively (0x543210876). Note that credit
+ * registers can not be shared between clients. Note also that there are
+ * only COS0-2 in port 1- there is a total of 6 clients in port 1. Only
+ * credit registers 0-5 are valid. This register should be configured
+ * appropriately before enabling WFQ. */
+#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB 0x186ec
+/* [RW 9] Specify whether the client competes directly in the strict
+ * priority arbiter. The bits are mapped according to client ID (client IDs
+ * are defined in tx_arb_priority_client2): 0-management; 1-debug traffic
+ * from this port; 2-debug traffic from other port; 3-COS0 traffic; 4-COS1
+ * traffic; 5-COS2 traffic; 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic.
+ * Default value is set to enable strict priorities for all clients. */
+#define NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT 0x18234
+/* [RW 9] Specify whether the client is subject to WFQ credit blocking. The
+ * bits are mapped according to client ID (client IDs are defined in
+ * tx_arb_priority_client2): 0-management; 1-debug traffic from this port;
+ * 2-debug traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2
+ * traffic; 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. Default value is
+ * 0 for not using WFQ credit blocking. */
+#define NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x18238
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 0x18258
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 0x1825c
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 0x18260
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 0x18264
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 0x18268
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 0x186f4
+/* [RW 32] Specify the weight (in bytes) to be added to credit register 0
+ * when it is time to increment. */
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 0x18244
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 0x18248
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 0x1824c
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 0x18250
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 0x18254
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 0x186f0
+/* [RW 12] Specify the number of strict priority arbitration slots between
+ two round-robin arbitration slots to avoid starvation. A value of 0 means
+ no strict priority cycles - the strict priority with anti-starvation
+ arbiter becomes a round-robin arbiter. */
+#define NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS 0x18240
+/* [RW 32] Specify the client number to be assigned to each priority of the
+ strict priority arbiter. This register specifies bits 31:0 of the 36-bit
+ value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
+ client; bits [35-32] are for priority 8 client. The clients are assigned
+ the following IDs: 0-management; 1-debug traffic from this port; 2-debug
+ traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
+ 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
+ set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
+ accommodate the 9 input clients to ETS arbiter. Note that this register
+ is the same as the one for port 0, except that port 1 only has COS 0-2
+ traffic. There is no traffic for COS 3-5 of port 1. */
+#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB 0x186e0
+/* [RW 4] Specify the client number to be assigned to each priority of the
+ strict priority arbiter. This register specifies bits 35:32 of the 36-bit
+ value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
+ client; bits [35-32] are for priority 8 client. The clients are assigned
+ the following IDs: 0-management; 1-debug traffic from this port; 2-debug
+ traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
+ 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
+ set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
+ accommodate the 9 input clients to ETS arbiter. Note that this register
+ is the same as the one for port 0, except that port 1 only has COS 0-2
+ traffic. There is no traffic for COS 3-5 of port 1. */
+#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB 0x186e4
+/* [R 1] TX FIFO for transmitting data to MAC is empty. */
+#define NIG_REG_P1_TX_MACFIFO_EMPTY 0x18594
+/* [R 1] FIFO empty status of the MCP TX FIFO used for storing MCP packets
+ forwarded to the host. */
+#define NIG_REG_P1_TX_MNG_HOST_FIFO_EMPTY 0x182b8
+/* [RW 32] Specify the upper bound that credit register 0 is allowed to
+ * reach. */
+/* [RW 1] Pause enable for port0. This register may get 1 only when
+ ~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same
+ port */
+#define NIG_REG_PAUSE_ENABLE_0 0x160c0
+#define NIG_REG_PAUSE_ENABLE_1 0x160c4
+/* [RW 1] Input enable for RX PBF LP IF */
+#define NIG_REG_PBF_LB_IN_EN 0x100b4
+/* [RW 1] Value of this register will be transmitted to port swap when
+ ~nig_registers_strap_override.strap_override =1 */
+#define NIG_REG_PORT_SWAP 0x10394
+/* [RW 1] PPP enable for port0. This register may get 1 only when
+ * ~safc_enable.safc_enable = 0 and pause_enable.pause_enable =0 for the
+ * same port */
+#define NIG_REG_PPP_ENABLE_0 0x160b0
+#define NIG_REG_PPP_ENABLE_1 0x160b4
+/* [RW 1] output enable for RX parser descriptor IF */
+#define NIG_REG_PRS_EOP_OUT_EN 0x10104
+/* [RW 1] Input enable for RX parser request IF */
+#define NIG_REG_PRS_REQ_IN_EN 0x100b8
+/* [RW 5] control to serdes - CL45 DEVAD */
+#define NIG_REG_SERDES0_CTRL_MD_DEVAD 0x10370
+/* [RW 1] control to serdes; 0 - clause 45; 1 - clause 22 */
+#define NIG_REG_SERDES0_CTRL_MD_ST 0x1036c
+/* [RW 5] control to serdes - CL22 PHY_ADD and CL45 PRTAD */
+#define NIG_REG_SERDES0_CTRL_PHY_ADDR 0x10374
+/* [R 1] status from serdes0 that inputs to interrupt logic of link status */
+#define NIG_REG_SERDES0_STATUS_LINK_STATUS 0x10578
+/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure
+ for port0 */
+#define NIG_REG_STAT0_BRB_DISCARD 0x105f0
+/* [R 32] Rx statistics : In user packets truncated due to BRB backpressure
+ for port0 */
+#define NIG_REG_STAT0_BRB_TRUNCATE 0x105f8
+/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that
+ between 1024 and 1522 bytes for port0 */
+#define NIG_REG_STAT0_EGRESS_MAC_PKT0 0x10750
+/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that
+ between 1523 bytes and above for port0 */
+#define NIG_REG_STAT0_EGRESS_MAC_PKT1 0x10760
+/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure
+ for port1 */
+#define NIG_REG_STAT1_BRB_DISCARD 0x10628
+/* [WB_R 36] Tx statistics : Number of packets from emac1 or bmac1 that
+ between 1024 and 1522 bytes for port1 */
+#define NIG_REG_STAT1_EGRESS_MAC_PKT0 0x107a0
+/* [WB_R 36] Tx statistics : Number of packets from emac1 or bmac1 that
+ between 1523 bytes and above for port1 */
+#define NIG_REG_STAT1_EGRESS_MAC_PKT1 0x107b0
+/* [WB_R 64] Rx statistics : User octets received for LP */
+#define NIG_REG_STAT2_BRB_OCTET 0x107e0
+#define NIG_REG_STATUS_INTERRUPT_PORT0 0x10328
+#define NIG_REG_STATUS_INTERRUPT_PORT1 0x1032c
+/* [RW 1] port swap mux selection. If this register equal to 0 then port
+ swap is equal to SPIO pin that inputs from ifmux_serdes_swap. If 1 then
+ ort swap is equal to ~nig_registers_port_swap.port_swap */
+#define NIG_REG_STRAP_OVERRIDE 0x10398
+/* [RW 1] output enable for RX_XCM0 IF */
+#define NIG_REG_XCM0_OUT_EN 0x100f0
+/* [RW 1] output enable for RX_XCM1 IF */
+#define NIG_REG_XCM1_OUT_EN 0x100f4
+/* [RW 1] control to xgxs - remote PHY in-band MDIO */
+#define NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST 0x10348
+/* [RW 5] control to xgxs - CL45 DEVAD */
+#define NIG_REG_XGXS0_CTRL_MD_DEVAD 0x1033c
+/* [RW 1] control to xgxs; 0 - clause 45; 1 - clause 22 */
+#define NIG_REG_XGXS0_CTRL_MD_ST 0x10338
+/* [RW 5] control to xgxs - CL22 PHY_ADD and CL45 PRTAD */
+#define NIG_REG_XGXS0_CTRL_PHY_ADDR 0x10340
+/* [R 1] status from xgxs0 that inputs to interrupt logic of link10g. */
+#define NIG_REG_XGXS0_STATUS_LINK10G 0x10680
+/* [R 4] status from xgxs0 that inputs to interrupt logic of link status */
+#define NIG_REG_XGXS0_STATUS_LINK_STATUS 0x10684
+/* [RW 2] selection for XGXS lane of port 0 in NIG_MUX block */
+#define NIG_REG_XGXS_LANE_SEL_P0 0x102e8
+/* [RW 1] selection for port0 for NIG_MUX block : 0 = SerDes; 1 = XGXS */
+#define NIG_REG_XGXS_SERDES0_MODE_SEL 0x102e0
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT (0x1<<0)
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS (0x1<<9)
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G (0x1<<15)
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS (0xf<<18)
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE 18
+/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter. */
+#define PBF_REG_COS0_UPPER_BOUND 0x15c05c
+/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter
+ * of port 0. */
+#define PBF_REG_COS0_UPPER_BOUND_P0 0x15c2cc
+/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter
+ * of port 1. */
+#define PBF_REG_COS0_UPPER_BOUND_P1 0x15c2e4
+/* [RW 31] The weight of COS0 in the ETS command arbiter. */
+#define PBF_REG_COS0_WEIGHT 0x15c054
+/* [RW 31] The weight of COS0 in port 0 ETS command arbiter. */
+#define PBF_REG_COS0_WEIGHT_P0 0x15c2a8
+/* [RW 31] The weight of COS0 in port 1 ETS command arbiter. */
+#define PBF_REG_COS0_WEIGHT_P1 0x15c2c0
+/* [RW 31] The upper bound of the weight of COS1 in the ETS command arbiter. */
+#define PBF_REG_COS1_UPPER_BOUND 0x15c060
+/* [RW 31] The weight of COS1 in the ETS command arbiter. */
+#define PBF_REG_COS1_WEIGHT 0x15c058
+/* [RW 31] The weight of COS1 in port 0 ETS command arbiter. */
+#define PBF_REG_COS1_WEIGHT_P0 0x15c2ac
+/* [RW 31] The weight of COS1 in port 1 ETS command arbiter. */
+#define PBF_REG_COS1_WEIGHT_P1 0x15c2c4
+/* [RW 31] The weight of COS2 in port 0 ETS command arbiter. */
+#define PBF_REG_COS2_WEIGHT_P0 0x15c2b0
+/* [RW 31] The weight of COS2 in port 1 ETS command arbiter. */
+#define PBF_REG_COS2_WEIGHT_P1 0x15c2c8
+/* [RW 31] The weight of COS3 in port 0 ETS command arbiter. */
+#define PBF_REG_COS3_WEIGHT_P0 0x15c2b4
+/* [RW 31] The weight of COS4 in port 0 ETS command arbiter. */
+#define PBF_REG_COS4_WEIGHT_P0 0x15c2b8
+/* [RW 31] The weight of COS5 in port 0 ETS command arbiter. */
+#define PBF_REG_COS5_WEIGHT_P0 0x15c2bc
+/* [R 11] Current credit for the LB queue in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_CREDIT_LB_Q 0x140338
+/* [R 11] Current credit for queue 0 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_CREDIT_Q0 0x14033c
+/* [R 11] Current credit for queue 1 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_CREDIT_Q1 0x140340
+/* [RW 1] Disable processing further tasks from port 0 (after ending the
+ current task in process). */
+#define PBF_REG_DISABLE_NEW_TASK_PROC_P0 0x14005c
+/* [RW 1] Disable processing further tasks from port 1 (after ending the
+ current task in process). */
+#define PBF_REG_DISABLE_NEW_TASK_PROC_P1 0x140060
+/* [RW 1] Disable processing further tasks from port 4 (after ending the
+ current task in process). */
+#define PBF_REG_DISABLE_NEW_TASK_PROC_P4 0x14006c
+#define PBF_REG_DISABLE_PF 0x1402e8
+/* [RW 18] For port 0: For each client that is subject to WFQ (the
+ * corresponding bit is 1); indicates to which of the credit registers this
+ * client is mapped. For clients which are not credit blocked; their mapping
+ * is dont care. */
+#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0 0x15c288
+/* [RW 9] For port 1: For each client that is subject to WFQ (the
+ * corresponding bit is 1); indicates to which of the credit registers this
+ * client is mapped. For clients which are not credit blocked; their mapping
+ * is dont care. */
+#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1 0x15c28c
+/* [RW 6] For port 0: Bit per client to indicate if the client competes in
+ * the strict priority arbiter directly (corresponding bit = 1); or first
+ * goes to the RR arbiter (corresponding bit = 0); and then competes in the
+ * lowest priority in the strict-priority arbiter. */
+#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 0x15c278
+/* [RW 3] For port 1: Bit per client to indicate if the client competes in
+ * the strict priority arbiter directly (corresponding bit = 1); or first
+ * goes to the RR arbiter (corresponding bit = 0); and then competes in the
+ * lowest priority in the strict-priority arbiter. */
+#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 0x15c27c
+/* [RW 6] For port 0: Bit per client to indicate if the client is subject to
+ * WFQ credit blocking (corresponding bit = 1). */
+#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 0x15c280
+/* [RW 3] For port 0: Bit per client to indicate if the client is subject to
+ * WFQ credit blocking (corresponding bit = 1). */
+#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 0x15c284
+/* [RW 16] For port 0: The number of strict priority arbitration slots
+ * between 2 RR arbitration slots. A value of 0 means no strict priority
+ * cycles; i.e. the strict-priority w/ anti-starvation arbiter is a RR
+ * arbiter. */
+#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0 0x15c2a0
+/* [RW 16] For port 1: The number of strict priority arbitration slots
+ * between 2 RR arbitration slots. A value of 0 means no strict priority
+ * cycles; i.e. the strict-priority w/ anti-starvation arbiter is a RR
+ * arbiter. */
+#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 0x15c2a4
+/* [RW 18] For port 0: Indicates which client is connected to each priority
+ * in the strict-priority arbiter. Priority 0 is the highest priority, and
+ * priority 5 is the lowest; to which the RR output is connected to (this is
+ * not configurable). */
+#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 0x15c270
+/* [RW 9] For port 1: Indicates which client is connected to each priority
+ * in the strict-priority arbiter. Priority 0 is the highest priority, and
+ * priority 5 is the lowest; to which the RR output is connected to (this is
+ * not configurable). */
+#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 0x15c274
+/* [RW 1] Indicates that ETS is performed between the COSes in the command
+ * arbiter. If reset strict priority w/ anti-starvation will be performed
+ * w/o WFQ. */
+#define PBF_REG_ETS_ENABLED 0x15c050
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header. */
+#define PBF_REG_HDRS_AFTER_BASIC 0x15c0a8
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */
+#define PBF_REG_HDRS_AFTER_TAG_0 0x15c0b8
+/* [R 1] Removed for E3 B0 - Indicates which COS is conncted to the highest
+ * priority in the command arbiter. */
+#define PBF_REG_HIGH_PRIORITY_COS_NUM 0x15c04c
+#define PBF_REG_IF_ENABLE_REG 0x140044
+/* [RW 1] Init bit. When set the initial credits are copied to the credit
+ registers (except the port credits). Should be set and then reset after
+ the configuration of the block has ended. */
+#define PBF_REG_INIT 0x140000
+/* [RW 11] Initial credit for the LB queue in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_INIT_CRD_LB_Q 0x15c248
+/* [RW 11] Initial credit for queue 0 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_INIT_CRD_Q0 0x15c230
+/* [RW 11] Initial credit for queue 1 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_INIT_CRD_Q1 0x15c234
+/* [RW 1] Init bit for port 0. When set the initial credit of port 0 is
+ copied to the credit register. Should be set and then reset after the
+ configuration of the port has ended. */
+#define PBF_REG_INIT_P0 0x140004
+/* [RW 1] Init bit for port 1. When set the initial credit of port 1 is
+ copied to the credit register. Should be set and then reset after the
+ configuration of the port has ended. */
+#define PBF_REG_INIT_P1 0x140008
+/* [RW 1] Init bit for port 4. When set the initial credit of port 4 is
+ copied to the credit register. Should be set and then reset after the
+ configuration of the port has ended. */
+#define PBF_REG_INIT_P4 0x14000c
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * the LB queue. Reset upon init. */
+#define PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q 0x140354
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * queue 0. Reset upon init. */
+#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 0x140358
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * queue 1. Reset upon init. */
+#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 0x14035c
+/* [RW 1] Enable for mac interface 0. */
+#define PBF_REG_MAC_IF0_ENABLE 0x140030
+/* [RW 1] Enable for mac interface 1. */
+#define PBF_REG_MAC_IF1_ENABLE 0x140034
+/* [RW 1] Enable for the loopback interface. */
+#define PBF_REG_MAC_LB_ENABLE 0x140040
+/* [RW 6] Bit-map indicating which headers must appear in the packet */
+#define PBF_REG_MUST_HAVE_HDRS 0x15c0c4
+/* [RW 16] The number of strict priority arbitration slots between 2 RR
+ * arbitration slots. A value of 0 means no strict priority cycles; i.e. the
+ * strict-priority w/ anti-starvation arbiter is a RR arbiter. */
+#define PBF_REG_NUM_STRICT_ARB_SLOTS 0x15c064
+/* [RW 10] Port 0 threshold used by arbiter in 16 byte lines used when pause
+ not suppoterd. */
+#define PBF_REG_P0_ARB_THRSH 0x1400e4
+/* [R 11] Current credit for port 0 in the tx port buffers in 16 byte lines. */
+#define PBF_REG_P0_CREDIT 0x140200
+/* [RW 11] Initial credit for port 0 in the tx port buffers in 16 byte
+ lines. */
+#define PBF_REG_P0_INIT_CRD 0x1400d0
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * port 0. Reset upon init. */
+#define PBF_REG_P0_INTERNAL_CRD_FREED_CNT 0x140308
+/* [R 1] Removed for E3 B0 - Indication that pause is enabled for port 0. */
+#define PBF_REG_P0_PAUSE_ENABLE 0x140014
+/* [R 8] Removed for E3 B0 - Number of tasks in port 0 task queue. */
+#define PBF_REG_P0_TASK_CNT 0x140204
+/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines
+ * freed from the task queue of port 0. Reset upon init. */
+#define PBF_REG_P0_TQ_LINES_FREED_CNT 0x1402f0
+/* [R 12] Number of 8 bytes lines occupied in the task queue of port 0. */
+#define PBF_REG_P0_TQ_OCCUPANCY 0x1402fc
+/* [R 11] Removed for E3 B0 - Current credit for port 1 in the tx port
+ * buffers in 16 byte lines. */
+#define PBF_REG_P1_CREDIT 0x140208
+/* [R 11] Removed for E3 B0 - Initial credit for port 0 in the tx port
+ * buffers in 16 byte lines. */
+#define PBF_REG_P1_INIT_CRD 0x1400d4
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * port 1. Reset upon init. */
+#define PBF_REG_P1_INTERNAL_CRD_FREED_CNT 0x14030c
+/* [R 8] Removed for E3 B0 - Number of tasks in port 1 task queue. */
+#define PBF_REG_P1_TASK_CNT 0x14020c
+/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines
+ * freed from the task queue of port 1. Reset upon init. */
+#define PBF_REG_P1_TQ_LINES_FREED_CNT 0x1402f4
+/* [R 12] Number of 8 bytes lines occupied in the task queue of port 1. */
+#define PBF_REG_P1_TQ_OCCUPANCY 0x140300
+/* [R 11] Current credit for port 4 in the tx port buffers in 16 byte lines. */
+#define PBF_REG_P4_CREDIT 0x140210
+/* [RW 11] Initial credit for port 4 in the tx port buffers in 16 byte
+ lines. */
+#define PBF_REG_P4_INIT_CRD 0x1400e0
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * port 4. Reset upon init. */
+#define PBF_REG_P4_INTERNAL_CRD_FREED_CNT 0x140310
+/* [R 8] Removed for E3 B0 - Number of tasks in port 4 task queue. */
+#define PBF_REG_P4_TASK_CNT 0x140214
+/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines
+ * freed from the task queue of port 4. Reset upon init. */
+#define PBF_REG_P4_TQ_LINES_FREED_CNT 0x1402f8
+/* [R 12] Number of 8 bytes lines occupied in the task queue of port 4. */
+#define PBF_REG_P4_TQ_OCCUPANCY 0x140304
+/* [RW 5] Interrupt mask register #0 read/write */
+#define PBF_REG_PBF_INT_MASK 0x1401d4
+/* [R 5] Interrupt register #0 read */
+#define PBF_REG_PBF_INT_STS 0x1401c8
+/* [RW 20] Parity mask register #0 read/write */
+#define PBF_REG_PBF_PRTY_MASK 0x1401e4
+/* [RC 20] Parity register #0 read clear */
+#define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc
+/* [RW 16] The Ethernet type value for L2 tag 0 */
+#define PBF_REG_TAG_ETHERTYPE_0 0x15c090
+/* [RW 4] The length of the info field for L2 tag 0. The length is between
+ * 2B and 14B; in 2B granularity */
+#define PBF_REG_TAG_LEN_0 0x15c09c
+/* [R 32] Cyclic counter for number of 8 byte lines freed from the LB task
+ * queue. Reset upon init. */
+#define PBF_REG_TQ_LINES_FREED_CNT_LB_Q 0x14038c
+/* [R 32] Cyclic counter for number of 8 byte lines freed from the task
+ * queue 0. Reset upon init. */
+#define PBF_REG_TQ_LINES_FREED_CNT_Q0 0x140390
+/* [R 32] Cyclic counter for number of 8 byte lines freed from task queue 1.
+ * Reset upon init. */
+#define PBF_REG_TQ_LINES_FREED_CNT_Q1 0x140394
+/* [R 13] Number of 8 bytes lines occupied in the task queue of the LB
+ * queue. */
+#define PBF_REG_TQ_OCCUPANCY_LB_Q 0x1403a8
+/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 0. */
+#define PBF_REG_TQ_OCCUPANCY_Q0 0x1403ac
+/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 1. */
+#define PBF_REG_TQ_OCCUPANCY_Q1 0x1403b0
+#define PB_REG_CONTROL 0
+/* [RW 2] Interrupt mask register #0 read/write */
+#define PB_REG_PB_INT_MASK 0x28
+/* [R 2] Interrupt register #0 read */
+#define PB_REG_PB_INT_STS 0x1c
+/* [RW 4] Parity mask register #0 read/write */
+#define PB_REG_PB_PRTY_MASK 0x38
+/* [R 4] Parity register #0 read */
+#define PB_REG_PB_PRTY_STS 0x2c
+/* [RC 4] Parity register #0 read clear */
+#define PB_REG_PB_PRTY_STS_CLR 0x30
+#define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW (0x1<<8)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR (0x1<<1)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN (0x1<<6)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN (0x1<<7)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN (0x1<<4)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN (0x1<<3)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN (0x1<<5)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN (0x1<<2)
+/* [R 8] Config space A attention dirty bits. Each bit indicates that the
+ * corresponding PF generates config space A attention. Set by PXP. Reset by
+ * MCP writing 1 to icfg_space_a_request_clr. Note: register contains bits
+ * from both paths. */
+#define PGLUE_B_REG_CFG_SPACE_A_REQUEST 0x9010
+/* [R 8] Config space B attention dirty bits. Each bit indicates that the
+ * corresponding PF generates config space B attention. Set by PXP. Reset by
+ * MCP writing 1 to icfg_space_b_request_clr. Note: register contains bits
+ * from both paths. */
+#define PGLUE_B_REG_CFG_SPACE_B_REQUEST 0x9014
+/* [RW 1] Type A PF enable inbound interrupt table for CSDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_CSDM_INB_INT_A_PF_ENABLE 0x9194
+/* [RW 18] Type B VF inbound interrupt table for CSDM: bits[17:9]-mask;
+ * its[8:0]-address. Bits [1:0] must be zero (DW resolution address). */
+#define PGLUE_B_REG_CSDM_INB_INT_B_VF 0x916c
+/* [RW 1] Type B VF enable inbound interrupt table for CSDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_CSDM_INB_INT_B_VF_ENABLE 0x919c
+/* [RW 16] Start offset of CSDM zone A (queue zone) in the internal RAM */
+#define PGLUE_B_REG_CSDM_START_OFFSET_A 0x9100
+/* [RW 16] Start offset of CSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_CSDM_START_OFFSET_B 0x9108
+/* [RW 5] VF Shift of CSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_CSDM_VF_SHIFT_B 0x9110
+/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
+#define PGLUE_B_REG_CSDM_ZONE_A_SIZE_PF 0x91ac
+/* [R 8] FLR request attention dirty bits for PFs 0 to 7. Each bit indicates
+ * that the FLR register of the corresponding PF was set. Set by PXP. Reset
+ * by MCP writing 1 to flr_request_pf_7_0_clr. Note: register contains bits
+ * from both paths. */
+#define PGLUE_B_REG_FLR_REQUEST_PF_7_0 0x9028
+/* [W 8] FLR request attention dirty bits clear for PFs 0 to 7. MCP writes 1
+ * to a bit in this register in order to clear the corresponding bit in
+ * flr_request_pf_7_0 register. Note: register contains bits from both
+ * paths. */
+#define PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR 0x9418
+/* [R 32] FLR request attention dirty bits for VFs 96 to 127. Each bit
+ * indicates that the FLR register of the corresponding VF was set. Set by
+ * PXP. Reset by MCP writing 1 to flr_request_vf_127_96_clr. */
+#define PGLUE_B_REG_FLR_REQUEST_VF_127_96 0x9024
+/* [R 32] FLR request attention dirty bits for VFs 0 to 31. Each bit
+ * indicates that the FLR register of the corresponding VF was set. Set by
+ * PXP. Reset by MCP writing 1 to flr_request_vf_31_0_clr. */
+#define PGLUE_B_REG_FLR_REQUEST_VF_31_0 0x9018
+/* [R 32] FLR request attention dirty bits for VFs 32 to 63. Each bit
+ * indicates that the FLR register of the corresponding VF was set. Set by
+ * PXP. Reset by MCP writing 1 to flr_request_vf_63_32_clr. */
+#define PGLUE_B_REG_FLR_REQUEST_VF_63_32 0x901c
+/* [R 32] FLR request attention dirty bits for VFs 64 to 95. Each bit
+ * indicates that the FLR register of the corresponding VF was set. Set by
+ * PXP. Reset by MCP writing 1 to flr_request_vf_95_64_clr. */
+#define PGLUE_B_REG_FLR_REQUEST_VF_95_64 0x9020
+/* [R 8] Each bit indicates an incorrect behavior in user RX interface. Bit
+ * 0 - Target memory read arrived with a correctable error. Bit 1 - Target
+ * memory read arrived with an uncorrectable error. Bit 2 - Configuration RW
+ * arrived with a correctable error. Bit 3 - Configuration RW arrived with
+ * an uncorrectable error. Bit 4 - Completion with Configuration Request
+ * Retry Status. Bit 5 - Expansion ROM access received with a write request.
+ * Bit 6 - Completion with pcie_rx_err of 0000; CMPL_STATUS of non-zero; and
+ * pcie_rx_last not asserted. Bit 7 - Completion with pcie_rx_err of 1010;
+ * and pcie_rx_last not asserted. */
+#define PGLUE_B_REG_INCORRECT_RCV_DETAILS 0x9068
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER 0x942c
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ 0x9430
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_WRITE 0x9434
+#define PGLUE_B_REG_INTERNAL_VFID_ENABLE 0x9438
+/* [R 9] Interrupt register #0 read */
+#define PGLUE_B_REG_PGLUE_B_INT_STS 0x9298
+/* [RC 9] Interrupt register #0 read clear */
+#define PGLUE_B_REG_PGLUE_B_INT_STS_CLR 0x929c
+/* [RW 2] Parity mask register #0 read/write */
+#define PGLUE_B_REG_PGLUE_B_PRTY_MASK 0x92b4
+/* [R 2] Parity register #0 read */
+#define PGLUE_B_REG_PGLUE_B_PRTY_STS 0x92a8
+/* [RC 2] Parity register #0 read clear */
+#define PGLUE_B_REG_PGLUE_B_PRTY_STS_CLR 0x92ac
+/* [R 13] Details of first request received with error. [2:0] - PFID. [3] -
+ * VF_VALID. [9:4] - VFID. [11:10] - Error Code - 0 - Indicates Completion
+ * Timeout of a User Tx non-posted request. 1 - unsupported request. 2 -
+ * completer abort. 3 - Illegal value for this field. [12] valid - indicates
+ * if there was a completion error since the last time this register was
+ * cleared. */
+#define PGLUE_B_REG_RX_ERR_DETAILS 0x9080
+/* [R 18] Details of first ATS Translation Completion request received with
+ * error. [2:0] - PFID. [3] - VF_VALID. [9:4] - VFID. [11:10] - Error Code -
+ * 0 - Indicates Completion Timeout of a User Tx non-posted request. 1 -
+ * unsupported request. 2 - completer abort. 3 - Illegal value for this
+ * field. [16:12] - ATC OTB EntryID. [17] valid - indicates if there was a
+ * completion error since the last time this register was cleared. */
+#define PGLUE_B_REG_RX_TCPL_ERR_DETAILS 0x9084
+/* [W 8] Debug only - Shadow BME bits clear for PFs 0 to 7. MCP writes 1 to
+ * a bit in this register in order to clear the corresponding bit in
+ * shadow_bme_pf_7_0 register. MCP should never use this unless a
+ * work-around is needed. Note: register contains bits from both paths. */
+#define PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR 0x9458
+/* [R 8] SR IOV disabled attention dirty bits. Each bit indicates that the
+ * VF enable register of the corresponding PF is written to 0 and was
+ * previously 1. Set by PXP. Reset by MCP writing 1 to
+ * sr_iov_disabled_request_clr. Note: register contains bits from both
+ * paths. */
+#define PGLUE_B_REG_SR_IOV_DISABLED_REQUEST 0x9030
+/* [R 32] Indicates the status of tags 32-63. 0 - tags is used - read
+ * completion did not return yet. 1 - tag is unused. Same functionality as
+ * pxp2_registers_pgl_exp_rom_data2 for tags 0-31. */
+#define PGLUE_B_REG_TAGS_63_32 0x9244
+/* [RW 1] Type A PF enable inbound interrupt table for TSDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_TSDM_INB_INT_A_PF_ENABLE 0x9170
+/* [RW 16] Start offset of TSDM zone A (queue zone) in the internal RAM */
+#define PGLUE_B_REG_TSDM_START_OFFSET_A 0x90c4
+/* [RW 16] Start offset of TSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_TSDM_START_OFFSET_B 0x90cc
+/* [RW 5] VF Shift of TSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_TSDM_VF_SHIFT_B 0x90d4
+/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
+#define PGLUE_B_REG_TSDM_ZONE_A_SIZE_PF 0x91a0
+/* [R 32] Address [31:0] of first read request not submitted due to error */
+#define PGLUE_B_REG_TX_ERR_RD_ADD_31_0 0x9098
+/* [R 32] Address [63:32] of first read request not submitted due to error */
+#define PGLUE_B_REG_TX_ERR_RD_ADD_63_32 0x909c
+/* [R 31] Details of first read request not submitted due to error. [4:0]
+ * VQID. [5] TREQ. 1 - Indicates the request is a Translation Request.
+ * [20:8] - Length in bytes. [23:21] - PFID. [24] - VF_VALID. [30:25] -
+ * VFID. */
+#define PGLUE_B_REG_TX_ERR_RD_DETAILS 0x90a0
+/* [R 26] Details of first read request not submitted due to error. [15:0]
+ * Request ID. [19:16] client ID. [20] - last SR. [24:21] - Error type -
+ * [21] - Indicates was_error was set; [22] - Indicates BME was cleared;
+ * [23] - Indicates FID_enable was cleared; [24] - Indicates VF with parent
+ * PF FLR_request or IOV_disable_request dirty bit is set. [25] valid -
+ * indicates if there was a request not submitted due to error since the
+ * last time this register was cleared. */
+#define PGLUE_B_REG_TX_ERR_RD_DETAILS2 0x90a4
+/* [R 32] Address [31:0] of first write request not submitted due to error */
+#define PGLUE_B_REG_TX_ERR_WR_ADD_31_0 0x9088
+/* [R 32] Address [63:32] of first write request not submitted due to error */
+#define PGLUE_B_REG_TX_ERR_WR_ADD_63_32 0x908c
+/* [R 31] Details of first write request not submitted due to error. [4:0]
+ * VQID. [20:8] - Length in bytes. [23:21] - PFID. [24] - VF_VALID. [30:25]
+ * - VFID. */
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS 0x9090
+/* [R 26] Details of first write request not submitted due to error. [15:0]
+ * Request ID. [19:16] client ID. [20] - last SR. [24:21] - Error type -
+ * [21] - Indicates was_error was set; [22] - Indicates BME was cleared;
+ * [23] - Indicates FID_enable was cleared; [24] - Indicates VF with parent
+ * PF FLR_request or IOV_disable_request dirty bit is set. [25] valid -
+ * indicates if there was a request not submitted due to error since the
+ * last time this register was cleared. */
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS2 0x9094
+/* [RW 10] Type A PF/VF inbound interrupt table for USDM: bits[9:5]-mask;
+ * its[4:0]-address relative to start_offset_a. Bits [1:0] can have any
+ * value (Byte resolution address). */
+#define PGLUE_B_REG_USDM_INB_INT_A_0 0x9128
+#define PGLUE_B_REG_USDM_INB_INT_A_1 0x912c
+#define PGLUE_B_REG_USDM_INB_INT_A_2 0x9130
+#define PGLUE_B_REG_USDM_INB_INT_A_3 0x9134
+#define PGLUE_B_REG_USDM_INB_INT_A_4 0x9138
+#define PGLUE_B_REG_USDM_INB_INT_A_5 0x913c
+#define PGLUE_B_REG_USDM_INB_INT_A_6 0x9140
+/* [RW 1] Type A PF enable inbound interrupt table for USDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_USDM_INB_INT_A_PF_ENABLE 0x917c
+/* [RW 1] Type A VF enable inbound interrupt table for USDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_USDM_INB_INT_A_VF_ENABLE 0x9180
+/* [RW 1] Type B VF enable inbound interrupt table for USDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_USDM_INB_INT_B_VF_ENABLE 0x9184
+/* [RW 16] Start offset of USDM zone A (queue zone) in the internal RAM */
+#define PGLUE_B_REG_USDM_START_OFFSET_A 0x90d8
+/* [RW 16] Start offset of USDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_USDM_START_OFFSET_B 0x90e0
+/* [RW 5] VF Shift of USDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_USDM_VF_SHIFT_B 0x90e8
+/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
+#define PGLUE_B_REG_USDM_ZONE_A_SIZE_PF 0x91a4
+/* [R 26] Details of first target VF request accessing VF GRC space that
+ * failed permission check. [14:0] Address. [15] w_nr: 0 - Read; 1 - Write.
+ * [21:16] VFID. [24:22] - PFID. [25] valid - indicates if there was a
+ * request accessing VF GRC space that failed permission check since the
+ * last time this register was cleared. Permission checks are: function
+ * permission; R/W permission; address range permission. */
+#define PGLUE_B_REG_VF_GRC_SPACE_VIOLATION_DETAILS 0x9234
+/* [R 31] Details of first target VF request with length violation (too many
+ * DWs) accessing BAR0. [12:0] Address in DWs (bits [14:2] of byte address).
+ * [14:13] BAR. [20:15] VFID. [23:21] - PFID. [29:24] - Length in DWs. [30]
+ * valid - indicates if there was a request with length violation since the
+ * last time this register was cleared. Length violations: length of more
+ * than 2DWs; length of 2DWs and address not QW aligned; window is GRC and
+ * length is more than 1 DW. */
+#define PGLUE_B_REG_VF_LENGTH_VIOLATION_DETAILS 0x9230
+/* [R 8] Was_error indication dirty bits for PFs 0 to 7. Each bit indicates
+ * that there was a completion with uncorrectable error for the
+ * corresponding PF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_pf_7_0_clr. */
+#define PGLUE_B_REG_WAS_ERROR_PF_7_0 0x907c
+/* [W 8] Was_error indication dirty bits clear for PFs 0 to 7. MCP writes 1
+ * to a bit in this register in order to clear the corresponding bit in
+ * flr_request_pf_7_0 register. */
+#define PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR 0x9470
+/* [R 32] Was_error indication dirty bits for VFs 96 to 127. Each bit
+ * indicates that there was a completion with uncorrectable error for the
+ * corresponding VF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_vf_127_96_clr. */
+#define PGLUE_B_REG_WAS_ERROR_VF_127_96 0x9078
+/* [W 32] Was_error indication dirty bits clear for VFs 96 to 127. MCP
+ * writes 1 to a bit in this register in order to clear the corresponding
+ * bit in was_error_vf_127_96 register. */
+#define PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR 0x9474
+/* [R 32] Was_error indication dirty bits for VFs 0 to 31. Each bit
+ * indicates that there was a completion with uncorrectable error for the
+ * corresponding VF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_vf_31_0_clr. */
+#define PGLUE_B_REG_WAS_ERROR_VF_31_0 0x906c
+/* [W 32] Was_error indication dirty bits clear for VFs 0 to 31. MCP writes
+ * 1 to a bit in this register in order to clear the corresponding bit in
+ * was_error_vf_31_0 register. */
+#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR 0x9478
+/* [R 32] Was_error indication dirty bits for VFs 32 to 63. Each bit
+ * indicates that there was a completion with uncorrectable error for the
+ * corresponding VF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_vf_63_32_clr. */
+#define PGLUE_B_REG_WAS_ERROR_VF_63_32 0x9070
+/* [W 32] Was_error indication dirty bits clear for VFs 32 to 63. MCP writes
+ * 1 to a bit in this register in order to clear the corresponding bit in
+ * was_error_vf_63_32 register. */
+#define PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR 0x947c
+/* [R 32] Was_error indication dirty bits for VFs 64 to 95. Each bit
+ * indicates that there was a completion with uncorrectable error for the
+ * corresponding VF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_vf_95_64_clr. */
+#define PGLUE_B_REG_WAS_ERROR_VF_95_64 0x9074
+/* [W 32] Was_error indication dirty bits clear for VFs 64 to 95. MCP writes
+ * 1 to a bit in this register in order to clear the corresponding bit in
+ * was_error_vf_95_64 register. */
+#define PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR 0x9480
+/* [RW 1] Type A PF enable inbound interrupt table for XSDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_XSDM_INB_INT_A_PF_ENABLE 0x9188
+/* [RW 16] Start offset of XSDM zone A (queue zone) in the internal RAM */
+#define PGLUE_B_REG_XSDM_START_OFFSET_A 0x90ec
+/* [RW 16] Start offset of XSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_XSDM_START_OFFSET_B 0x90f4
+/* [RW 5] VF Shift of XSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_XSDM_VF_SHIFT_B 0x90fc
+/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
+#define PGLUE_B_REG_XSDM_ZONE_A_SIZE_PF 0x91a8
+#define PRS_REG_A_PRSU_20 0x40134
+/* [R 8] debug only: CFC load request current credit. Transaction based. */
+#define PRS_REG_CFC_LD_CURRENT_CREDIT 0x40164
+/* [R 8] debug only: CFC search request current credit. Transaction based. */
+#define PRS_REG_CFC_SEARCH_CURRENT_CREDIT 0x40168
+/* [RW 6] The initial credit for the search message to the CFC interface.
+ Credit is transaction based. */
+#define PRS_REG_CFC_SEARCH_INITIAL_CREDIT 0x4011c
+/* [RW 24] CID for port 0 if no match */
+#define PRS_REG_CID_PORT_0 0x400fc
+/* [RW 32] The CM header for flush message where 'load existed' bit in CFC
+ load response is reset and packet type is 0. Used in packet start message
+ to TCM. */
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_0 0x400dc
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_1 0x400e0
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_2 0x400e4
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_3 0x400e8
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_4 0x400ec
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_5 0x400f0
+/* [RW 32] The CM header for flush message where 'load existed' bit in CFC
+ load response is set and packet type is 0. Used in packet start message
+ to TCM. */
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_0 0x400bc
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_1 0x400c0
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_2 0x400c4
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_3 0x400c8
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_4 0x400cc
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_5 0x400d0
+/* [RW 32] The CM header for a match and packet type 1 for loopback port.
+ Used in packet start message to TCM. */
+#define PRS_REG_CM_HDR_LOOPBACK_TYPE_1 0x4009c
+#define PRS_REG_CM_HDR_LOOPBACK_TYPE_2 0x400a0
+#define PRS_REG_CM_HDR_LOOPBACK_TYPE_3 0x400a4
+#define PRS_REG_CM_HDR_LOOPBACK_TYPE_4 0x400a8
+/* [RW 32] The CM header for a match and packet type 0. Used in packet start
+ message to TCM. */
+#define PRS_REG_CM_HDR_TYPE_0 0x40078
+#define PRS_REG_CM_HDR_TYPE_1 0x4007c
+#define PRS_REG_CM_HDR_TYPE_2 0x40080
+#define PRS_REG_CM_HDR_TYPE_3 0x40084
+#define PRS_REG_CM_HDR_TYPE_4 0x40088
+/* [RW 32] The CM header in case there was not a match on the connection */
+#define PRS_REG_CM_NO_MATCH_HDR 0x400b8
+/* [RW 1] Indicates if in e1hov mode. 0=non-e1hov mode; 1=e1hov mode. */
+#define PRS_REG_E1HOV_MODE 0x401c8
+/* [RW 8] The 8-bit event ID for a match and packet type 1. Used in packet
+ start message to TCM. */
+#define PRS_REG_EVENT_ID_1 0x40054
+#define PRS_REG_EVENT_ID_2 0x40058
+#define PRS_REG_EVENT_ID_3 0x4005c
+/* [RW 16] The Ethernet type value for FCoE */
+#define PRS_REG_FCOE_TYPE 0x401d0
+/* [RW 8] Context region for flush packet with packet type 0. Used in CFC
+ load request message. */
+#define PRS_REG_FLUSH_REGIONS_TYPE_0 0x40004
+#define PRS_REG_FLUSH_REGIONS_TYPE_1 0x40008
+#define PRS_REG_FLUSH_REGIONS_TYPE_2 0x4000c
+#define PRS_REG_FLUSH_REGIONS_TYPE_3 0x40010
+#define PRS_REG_FLUSH_REGIONS_TYPE_4 0x40014
+#define PRS_REG_FLUSH_REGIONS_TYPE_5 0x40018
+#define PRS_REG_FLUSH_REGIONS_TYPE_6 0x4001c
+#define PRS_REG_FLUSH_REGIONS_TYPE_7 0x40020
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header. */
+#define PRS_REG_HDRS_AFTER_BASIC 0x40238
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header for port 0 packets. */
+#define PRS_REG_HDRS_AFTER_BASIC_PORT_0 0x40270
+#define PRS_REG_HDRS_AFTER_BASIC_PORT_1 0x40290
+/* [R 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */
+#define PRS_REG_HDRS_AFTER_TAG_0 0x40248
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 for
+ * port 0 packets */
+#define PRS_REG_HDRS_AFTER_TAG_0_PORT_0 0x40280
+#define PRS_REG_HDRS_AFTER_TAG_0_PORT_1 0x402a0
+/* [RW 4] The increment value to send in the CFC load request message */
+#define PRS_REG_INC_VALUE 0x40048
+/* [RW 6] Bit-map indicating which headers must appear in the packet */
+#define PRS_REG_MUST_HAVE_HDRS 0x40254
+/* [RW 6] Bit-map indicating which headers must appear in the packet for
+ * port 0 packets */
+#define PRS_REG_MUST_HAVE_HDRS_PORT_0 0x4028c
+#define PRS_REG_MUST_HAVE_HDRS_PORT_1 0x402ac
+#define PRS_REG_NIC_MODE 0x40138
+/* [RW 8] The 8-bit event ID for cases where there is no match on the
+ connection. Used in packet start message to TCM. */
+#define PRS_REG_NO_MATCH_EVENT_ID 0x40070
+/* [ST 24] The number of input CFC flush packets */
+#define PRS_REG_NUM_OF_CFC_FLUSH_MESSAGES 0x40128
+/* [ST 32] The number of cycles the Parser halted its operation since it
+ could not allocate the next serial number */
+#define PRS_REG_NUM_OF_DEAD_CYCLES 0x40130
+/* [ST 24] The number of input packets */
+#define PRS_REG_NUM_OF_PACKETS 0x40124
+/* [ST 24] The number of input transparent flush packets */
+#define PRS_REG_NUM_OF_TRANSPARENT_FLUSH_MESSAGES 0x4012c
+/* [RW 8] Context region for received Ethernet packet with a match and
+ packet type 0. Used in CFC load request message */
+#define PRS_REG_PACKET_REGIONS_TYPE_0 0x40028
+#define PRS_REG_PACKET_REGIONS_TYPE_1 0x4002c
+#define PRS_REG_PACKET_REGIONS_TYPE_2 0x40030
+#define PRS_REG_PACKET_REGIONS_TYPE_3 0x40034
+#define PRS_REG_PACKET_REGIONS_TYPE_4 0x40038
+#define PRS_REG_PACKET_REGIONS_TYPE_5 0x4003c
+#define PRS_REG_PACKET_REGIONS_TYPE_6 0x40040
+#define PRS_REG_PACKET_REGIONS_TYPE_7 0x40044
+/* [R 2] debug only: Number of pending requests for CAC on port 0. */
+#define PRS_REG_PENDING_BRB_CAC0_RQ 0x40174
+/* [R 2] debug only: Number of pending requests for header parsing. */
+#define PRS_REG_PENDING_BRB_PRS_RQ 0x40170
+/* [R 1] Interrupt register #0 read */
+#define PRS_REG_PRS_INT_STS 0x40188
+/* [RW 8] Parity mask register #0 read/write */
+#define PRS_REG_PRS_PRTY_MASK 0x401a4
+/* [R 8] Parity register #0 read */
+#define PRS_REG_PRS_PRTY_STS 0x40198
+/* [RC 8] Parity register #0 read clear */
+#define PRS_REG_PRS_PRTY_STS_CLR 0x4019c
+/* [RW 8] Context region for pure acknowledge packets. Used in CFC load
+ request message */
+#define PRS_REG_PURE_REGIONS 0x40024
+/* [R 32] debug only: Serial number status lsb 32 bits. '1' indicates this
+ serail number was released by SDM but cannot be used because a previous
+ serial number was not released. */
+#define PRS_REG_SERIAL_NUM_STATUS_LSB 0x40154
+/* [R 32] debug only: Serial number status msb 32 bits. '1' indicates this
+ serail number was released by SDM but cannot be used because a previous
+ serial number was not released. */
+#define PRS_REG_SERIAL_NUM_STATUS_MSB 0x40158
+/* [R 4] debug only: SRC current credit. Transaction based. */
+#define PRS_REG_SRC_CURRENT_CREDIT 0x4016c
+/* [RW 16] The Ethernet type value for L2 tag 0 */
+#define PRS_REG_TAG_ETHERTYPE_0 0x401d4
+/* [RW 4] The length of the info field for L2 tag 0. The length is between
+ * 2B and 14B; in 2B granularity */
+#define PRS_REG_TAG_LEN_0 0x4022c
+/* [R 8] debug only: TCM current credit. Cycle based. */
+#define PRS_REG_TCM_CURRENT_CREDIT 0x40160
+/* [R 8] debug only: TSDM current credit. Transaction based. */
+#define PRS_REG_TSDM_CURRENT_CREDIT 0x4015c
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT (0x1<<19)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF (0x1<<20)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN (0x1<<22)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED (0x1<<23)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED (0x1<<24)
+#define PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR (0x1<<7)
+#define PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR (0x1<<7)
+/* [R 6] Debug only: Number of used entries in the data FIFO */
+#define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c
+/* [R 7] Debug only: Number of used entries in the header FIFO */
+#define PXP2_REG_HST_HEADER_FIFO_STATUS 0x120478
+#define PXP2_REG_PGL_ADDR_88_F0 0x120534
+/* [R 32] GRC address for configuration access to PCIE config address 0x88.
+ * any write to this PCIE address will cause a GRC write access to the
+ * address that's in t this register */
+#define PXP2_REG_PGL_ADDR_88_F1 0x120544
+#define PXP2_REG_PGL_ADDR_8C_F0 0x120538
+/* [R 32] GRC address for configuration access to PCIE config address 0x8c.
+ * any write to this PCIE address will cause a GRC write access to the
+ * address that's in t this register */
+#define PXP2_REG_PGL_ADDR_8C_F1 0x120548
+#define PXP2_REG_PGL_ADDR_90_F0 0x12053c
+/* [R 32] GRC address for configuration access to PCIE config address 0x90.
+ * any write to this PCIE address will cause a GRC write access to the
+ * address that's in t this register */
+#define PXP2_REG_PGL_ADDR_90_F1 0x12054c
+#define PXP2_REG_PGL_ADDR_94_F0 0x120540
+/* [R 32] GRC address for configuration access to PCIE config address 0x94.
+ * any write to this PCIE address will cause a GRC write access to the
+ * address that's in t this register */
+#define PXP2_REG_PGL_ADDR_94_F1 0x120550
+#define PXP2_REG_PGL_CONTROL0 0x120490
+#define PXP2_REG_PGL_CONTROL1 0x120514
+#define PXP2_REG_PGL_DEBUG 0x120520
+/* [RW 32] third dword data of expansion rom request. this register is
+ special. reading from it provides a vector outstanding read requests. if
+ a bit is zero it means that a read request on the corresponding tag did
+ not finish yet (not all completions have arrived for it) */
+#define PXP2_REG_PGL_EXP_ROM2 0x120808
+/* [RW 32] Inbound interrupt table for CSDM: bits[31:16]-mask;
+ its[15:0]-address */
+#define PXP2_REG_PGL_INT_CSDM_0 0x1204f4
+#define PXP2_REG_PGL_INT_CSDM_1 0x1204f8
+#define PXP2_REG_PGL_INT_CSDM_2 0x1204fc
+#define PXP2_REG_PGL_INT_CSDM_3 0x120500
+#define PXP2_REG_PGL_INT_CSDM_4 0x120504
+#define PXP2_REG_PGL_INT_CSDM_5 0x120508
+#define PXP2_REG_PGL_INT_CSDM_6 0x12050c
+#define PXP2_REG_PGL_INT_CSDM_7 0x120510
+/* [RW 32] Inbound interrupt table for TSDM: bits[31:16]-mask;
+ its[15:0]-address */
+#define PXP2_REG_PGL_INT_TSDM_0 0x120494
+#define PXP2_REG_PGL_INT_TSDM_1 0x120498
+#define PXP2_REG_PGL_INT_TSDM_2 0x12049c
+#define PXP2_REG_PGL_INT_TSDM_3 0x1204a0
+#define PXP2_REG_PGL_INT_TSDM_4 0x1204a4
+#define PXP2_REG_PGL_INT_TSDM_5 0x1204a8
+#define PXP2_REG_PGL_INT_TSDM_6 0x1204ac
+#define PXP2_REG_PGL_INT_TSDM_7 0x1204b0
+/* [RW 32] Inbound interrupt table for USDM: bits[31:16]-mask;
+ its[15:0]-address */
+#define PXP2_REG_PGL_INT_USDM_0 0x1204b4
+#define PXP2_REG_PGL_INT_USDM_1 0x1204b8
+#define PXP2_REG_PGL_INT_USDM_2 0x1204bc
+#define PXP2_REG_PGL_INT_USDM_3 0x1204c0
+#define PXP2_REG_PGL_INT_USDM_4 0x1204c4
+#define PXP2_REG_PGL_INT_USDM_5 0x1204c8
+#define PXP2_REG_PGL_INT_USDM_6 0x1204cc
+#define PXP2_REG_PGL_INT_USDM_7 0x1204d0
+/* [RW 32] Inbound interrupt table for XSDM: bits[31:16]-mask;
+ its[15:0]-address */
+#define PXP2_REG_PGL_INT_XSDM_0 0x1204d4
+#define PXP2_REG_PGL_INT_XSDM_1 0x1204d8
+#define PXP2_REG_PGL_INT_XSDM_2 0x1204dc
+#define PXP2_REG_PGL_INT_XSDM_3 0x1204e0
+#define PXP2_REG_PGL_INT_XSDM_4 0x1204e4
+#define PXP2_REG_PGL_INT_XSDM_5 0x1204e8
+#define PXP2_REG_PGL_INT_XSDM_6 0x1204ec
+#define PXP2_REG_PGL_INT_XSDM_7 0x1204f0
+/* [RW 3] this field allows one function to pretend being another function
+ when accessing any BAR mapped resource within the device. the value of
+ the field is the number of the function that will be accessed
+ effectively. after software write to this bit it must read it in order to
+ know that the new value is updated */
+#define PXP2_REG_PGL_PRETEND_FUNC_F0 0x120674
+#define PXP2_REG_PGL_PRETEND_FUNC_F1 0x120678
+#define PXP2_REG_PGL_PRETEND_FUNC_F2 0x12067c
+#define PXP2_REG_PGL_PRETEND_FUNC_F3 0x120680
+#define PXP2_REG_PGL_PRETEND_FUNC_F4 0x120684
+#define PXP2_REG_PGL_PRETEND_FUNC_F5 0x120688
+#define PXP2_REG_PGL_PRETEND_FUNC_F6 0x12068c
+#define PXP2_REG_PGL_PRETEND_FUNC_F7 0x120690
+/* [R 1] this bit indicates that a read request was blocked because of
+ bus_master_en was deasserted */
+#define PXP2_REG_PGL_READ_BLOCKED 0x120568
+#define PXP2_REG_PGL_TAGS_LIMIT 0x1205a8
+/* [R 18] debug only */
+#define PXP2_REG_PGL_TXW_CDTS 0x12052c
+/* [R 1] this bit indicates that a write request was blocked because of
+ bus_master_en was deasserted */
+#define PXP2_REG_PGL_WRITE_BLOCKED 0x120564
+#define PXP2_REG_PSWRQ_BW_ADD1 0x1201c0
+#define PXP2_REG_PSWRQ_BW_ADD10 0x1201e4
+#define PXP2_REG_PSWRQ_BW_ADD11 0x1201e8
+#define PXP2_REG_PSWRQ_BW_ADD2 0x1201c4
+#define PXP2_REG_PSWRQ_BW_ADD28 0x120228
+#define PXP2_REG_PSWRQ_BW_ADD3 0x1201c8
+#define PXP2_REG_PSWRQ_BW_ADD6 0x1201d4
+#define PXP2_REG_PSWRQ_BW_ADD7 0x1201d8
+#define PXP2_REG_PSWRQ_BW_ADD8 0x1201dc
+#define PXP2_REG_PSWRQ_BW_ADD9 0x1201e0
+#define PXP2_REG_PSWRQ_BW_CREDIT 0x12032c
+#define PXP2_REG_PSWRQ_BW_L1 0x1202b0
+#define PXP2_REG_PSWRQ_BW_L10 0x1202d4
+#define PXP2_REG_PSWRQ_BW_L11 0x1202d8
+#define PXP2_REG_PSWRQ_BW_L2 0x1202b4
+#define PXP2_REG_PSWRQ_BW_L28 0x120318
+#define PXP2_REG_PSWRQ_BW_L3 0x1202b8
+#define PXP2_REG_PSWRQ_BW_L6 0x1202c4
+#define PXP2_REG_PSWRQ_BW_L7 0x1202c8
+#define PXP2_REG_PSWRQ_BW_L8 0x1202cc
+#define PXP2_REG_PSWRQ_BW_L9 0x1202d0
+#define PXP2_REG_PSWRQ_BW_RD 0x120324
+#define PXP2_REG_PSWRQ_BW_UB1 0x120238
+#define PXP2_REG_PSWRQ_BW_UB10 0x12025c
+#define PXP2_REG_PSWRQ_BW_UB11 0x120260
+#define PXP2_REG_PSWRQ_BW_UB2 0x12023c
+#define PXP2_REG_PSWRQ_BW_UB28 0x1202a0
+#define PXP2_REG_PSWRQ_BW_UB3 0x120240
+#define PXP2_REG_PSWRQ_BW_UB6 0x12024c
+#define PXP2_REG_PSWRQ_BW_UB7 0x120250
+#define PXP2_REG_PSWRQ_BW_UB8 0x120254
+#define PXP2_REG_PSWRQ_BW_UB9 0x120258
+#define PXP2_REG_PSWRQ_BW_WR 0x120328
+#define PXP2_REG_PSWRQ_CDU0_L2P 0x120000
+#define PXP2_REG_PSWRQ_QM0_L2P 0x120038
+#define PXP2_REG_PSWRQ_SRC0_L2P 0x120054
+#define PXP2_REG_PSWRQ_TM0_L2P 0x12001c
+#define PXP2_REG_PSWRQ_TSDM0_L2P 0x1200e0
+/* [RW 32] Interrupt mask register #0 read/write */
+#define PXP2_REG_PXP2_INT_MASK_0 0x120578
+/* [R 32] Interrupt register #0 read */
+#define PXP2_REG_PXP2_INT_STS_0 0x12056c
+#define PXP2_REG_PXP2_INT_STS_1 0x120608
+/* [RC 32] Interrupt register #0 read clear */
+#define PXP2_REG_PXP2_INT_STS_CLR_0 0x120570
+/* [RW 32] Parity mask register #0 read/write */
+#define PXP2_REG_PXP2_PRTY_MASK_0 0x120588
+#define PXP2_REG_PXP2_PRTY_MASK_1 0x120598
+/* [R 32] Parity register #0 read */
+#define PXP2_REG_PXP2_PRTY_STS_0 0x12057c
+#define PXP2_REG_PXP2_PRTY_STS_1 0x12058c
+/* [RC 32] Parity register #0 read clear */
+#define PXP2_REG_PXP2_PRTY_STS_CLR_0 0x120580
+#define PXP2_REG_PXP2_PRTY_STS_CLR_1 0x120590
+/* [R 1] Debug only: The 'almost full' indication from each fifo (gives
+ indication about backpressure) */
+#define PXP2_REG_RD_ALMOST_FULL_0 0x120424
+/* [R 8] Debug only: The blocks counter - number of unused block ids */
+#define PXP2_REG_RD_BLK_CNT 0x120418
+/* [RW 8] Debug only: Total number of available blocks in Tetris Buffer.
+ Must be bigger than 6. Normally should not be changed. */
+#define PXP2_REG_RD_BLK_NUM_CFG 0x12040c
+/* [RW 2] CDU byte swapping mode configuration for master read requests */
+#define PXP2_REG_RD_CDURD_SWAP_MODE 0x120404
+/* [RW 1] When '1'; inputs to the PSWRD block are ignored */
+#define PXP2_REG_RD_DISABLE_INPUTS 0x120374
+/* [R 1] PSWRD internal memories initialization is done */
+#define PXP2_REG_RD_INIT_DONE 0x120370
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq10 */
+#define PXP2_REG_RD_MAX_BLKS_VQ10 0x1203a0
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq11 */
+#define PXP2_REG_RD_MAX_BLKS_VQ11 0x1203a4
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq17 */
+#define PXP2_REG_RD_MAX_BLKS_VQ17 0x1203bc
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq18 */
+#define PXP2_REG_RD_MAX_BLKS_VQ18 0x1203c0
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq19 */
+#define PXP2_REG_RD_MAX_BLKS_VQ19 0x1203c4
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq22 */
+#define PXP2_REG_RD_MAX_BLKS_VQ22 0x1203d0
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq25 */
+#define PXP2_REG_RD_MAX_BLKS_VQ25 0x1203dc
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq6 */
+#define PXP2_REG_RD_MAX_BLKS_VQ6 0x120390
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+ allocated for vq9 */
+#define PXP2_REG_RD_MAX_BLKS_VQ9 0x12039c
+/* [RW 2] PBF byte swapping mode configuration for master read requests */
+#define PXP2_REG_RD_PBF_SWAP_MODE 0x1203f4
+/* [R 1] Debug only: Indication if delivery ports are idle */
+#define PXP2_REG_RD_PORT_IS_IDLE_0 0x12041c
+#define PXP2_REG_RD_PORT_IS_IDLE_1 0x120420
+/* [RW 2] QM byte swapping mode configuration for master read requests */
+#define PXP2_REG_RD_QM_SWAP_MODE 0x1203f8
+/* [R 7] Debug only: The SR counter - number of unused sub request ids */
+#define PXP2_REG_RD_SR_CNT 0x120414
+/* [RW 2] SRC byte swapping mode configuration for master read requests */
+#define PXP2_REG_RD_SRC_SWAP_MODE 0x120400
+/* [RW 7] Debug only: Total number of available PCI read sub-requests. Must
+ be bigger than 1. Normally should not be changed. */
+#define PXP2_REG_RD_SR_NUM_CFG 0x120408
+/* [RW 1] Signals the PSWRD block to start initializing internal memories */
+#define PXP2_REG_RD_START_INIT 0x12036c
+/* [RW 2] TM byte swapping mode configuration for master read requests */
+#define PXP2_REG_RD_TM_SWAP_MODE 0x1203fc
+/* [RW 10] Bandwidth addition to VQ0 write requests */
+#define PXP2_REG_RQ_BW_RD_ADD0 0x1201bc
+/* [RW 10] Bandwidth addition to VQ12 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD12 0x1201ec
+/* [RW 10] Bandwidth addition to VQ13 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD13 0x1201f0
+/* [RW 10] Bandwidth addition to VQ14 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD14 0x1201f4
+/* [RW 10] Bandwidth addition to VQ15 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD15 0x1201f8
+/* [RW 10] Bandwidth addition to VQ16 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD16 0x1201fc
+/* [RW 10] Bandwidth addition to VQ17 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD17 0x120200
+/* [RW 10] Bandwidth addition to VQ18 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD18 0x120204
+/* [RW 10] Bandwidth addition to VQ19 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD19 0x120208
+/* [RW 10] Bandwidth addition to VQ20 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD20 0x12020c
+/* [RW 10] Bandwidth addition to VQ22 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD22 0x120210
+/* [RW 10] Bandwidth addition to VQ23 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD23 0x120214
+/* [RW 10] Bandwidth addition to VQ24 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD24 0x120218
+/* [RW 10] Bandwidth addition to VQ25 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD25 0x12021c
+/* [RW 10] Bandwidth addition to VQ26 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD26 0x120220
+/* [RW 10] Bandwidth addition to VQ27 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD27 0x120224
+/* [RW 10] Bandwidth addition to VQ4 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD4 0x1201cc
+/* [RW 10] Bandwidth addition to VQ5 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD5 0x1201d0
+/* [RW 10] Bandwidth Typical L for VQ0 Read requests */
+#define PXP2_REG_RQ_BW_RD_L0 0x1202ac
+/* [RW 10] Bandwidth Typical L for VQ12 Read requests */
+#define PXP2_REG_RQ_BW_RD_L12 0x1202dc
+/* [RW 10] Bandwidth Typical L for VQ13 Read requests */
+#define PXP2_REG_RQ_BW_RD_L13 0x1202e0
+/* [RW 10] Bandwidth Typical L for VQ14 Read requests */
+#define PXP2_REG_RQ_BW_RD_L14 0x1202e4
+/* [RW 10] Bandwidth Typical L for VQ15 Read requests */
+#define PXP2_REG_RQ_BW_RD_L15 0x1202e8
+/* [RW 10] Bandwidth Typical L for VQ16 Read requests */
+#define PXP2_REG_RQ_BW_RD_L16 0x1202ec
+/* [RW 10] Bandwidth Typical L for VQ17 Read requests */
+#define PXP2_REG_RQ_BW_RD_L17 0x1202f0
+/* [RW 10] Bandwidth Typical L for VQ18 Read requests */
+#define PXP2_REG_RQ_BW_RD_L18 0x1202f4
+/* [RW 10] Bandwidth Typical L for VQ19 Read requests */
+#define PXP2_REG_RQ_BW_RD_L19 0x1202f8
+/* [RW 10] Bandwidth Typical L for VQ20 Read requests */
+#define PXP2_REG_RQ_BW_RD_L20 0x1202fc
+/* [RW 10] Bandwidth Typical L for VQ22 Read requests */
+#define PXP2_REG_RQ_BW_RD_L22 0x120300
+/* [RW 10] Bandwidth Typical L for VQ23 Read requests */
+#define PXP2_REG_RQ_BW_RD_L23 0x120304
+/* [RW 10] Bandwidth Typical L for VQ24 Read requests */
+#define PXP2_REG_RQ_BW_RD_L24 0x120308
+/* [RW 10] Bandwidth Typical L for VQ25 Read requests */
+#define PXP2_REG_RQ_BW_RD_L25 0x12030c
+/* [RW 10] Bandwidth Typical L for VQ26 Read requests */
+#define PXP2_REG_RQ_BW_RD_L26 0x120310
+/* [RW 10] Bandwidth Typical L for VQ27 Read requests */
+#define PXP2_REG_RQ_BW_RD_L27 0x120314
+/* [RW 10] Bandwidth Typical L for VQ4 Read requests */
+#define PXP2_REG_RQ_BW_RD_L4 0x1202bc
+/* [RW 10] Bandwidth Typical L for VQ5 Read- currently not used */
+#define PXP2_REG_RQ_BW_RD_L5 0x1202c0
+/* [RW 7] Bandwidth upper bound for VQ0 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND0 0x120234
+/* [RW 7] Bandwidth upper bound for VQ12 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND12 0x120264
+/* [RW 7] Bandwidth upper bound for VQ13 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND13 0x120268
+/* [RW 7] Bandwidth upper bound for VQ14 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND14 0x12026c
+/* [RW 7] Bandwidth upper bound for VQ15 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND15 0x120270
+/* [RW 7] Bandwidth upper bound for VQ16 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND16 0x120274
+/* [RW 7] Bandwidth upper bound for VQ17 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND17 0x120278
+/* [RW 7] Bandwidth upper bound for VQ18 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND18 0x12027c
+/* [RW 7] Bandwidth upper bound for VQ19 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND19 0x120280
+/* [RW 7] Bandwidth upper bound for VQ20 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND20 0x120284
+/* [RW 7] Bandwidth upper bound for VQ22 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND22 0x120288
+/* [RW 7] Bandwidth upper bound for VQ23 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND23 0x12028c
+/* [RW 7] Bandwidth upper bound for VQ24 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND24 0x120290
+/* [RW 7] Bandwidth upper bound for VQ25 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND25 0x120294
+/* [RW 7] Bandwidth upper bound for VQ26 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND26 0x120298
+/* [RW 7] Bandwidth upper bound for VQ27 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND27 0x12029c
+/* [RW 7] Bandwidth upper bound for VQ4 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND4 0x120244
+/* [RW 7] Bandwidth upper bound for VQ5 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND5 0x120248
+/* [RW 10] Bandwidth addition to VQ29 write requests */
+#define PXP2_REG_RQ_BW_WR_ADD29 0x12022c
+/* [RW 10] Bandwidth addition to VQ30 write requests */
+#define PXP2_REG_RQ_BW_WR_ADD30 0x120230
+/* [RW 10] Bandwidth Typical L for VQ29 Write requests */
+#define PXP2_REG_RQ_BW_WR_L29 0x12031c
+/* [RW 10] Bandwidth Typical L for VQ30 Write requests */
+#define PXP2_REG_RQ_BW_WR_L30 0x120320
+/* [RW 7] Bandwidth upper bound for VQ29 */
+#define PXP2_REG_RQ_BW_WR_UBOUND29 0x1202a4
+/* [RW 7] Bandwidth upper bound for VQ30 */
+#define PXP2_REG_RQ_BW_WR_UBOUND30 0x1202a8
+/* [RW 18] external first_mem_addr field in L2P table for CDU module port 0 */
+#define PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR 0x120008
+/* [RW 2] Endian mode for cdu */
+#define PXP2_REG_RQ_CDU_ENDIAN_M 0x1201a0
+#define PXP2_REG_RQ_CDU_FIRST_ILT 0x12061c
+#define PXP2_REG_RQ_CDU_LAST_ILT 0x120620
+/* [RW 3] page size in L2P table for CDU module; -4k; -8k; -16k; -32k; -64k;
+ -128k */
+#define PXP2_REG_RQ_CDU_P_SIZE 0x120018
+/* [R 1] 1' indicates that the requester has finished its internal
+ configuration */
+#define PXP2_REG_RQ_CFG_DONE 0x1201b4
+/* [RW 2] Endian mode for debug */
+#define PXP2_REG_RQ_DBG_ENDIAN_M 0x1201a4
+/* [RW 1] When '1'; requests will enter input buffers but wont get out
+ towards the glue */
+#define PXP2_REG_RQ_DISABLE_INPUTS 0x120330
+/* [RW 4] Determines alignment of write SRs when a request is split into
+ * several SRs. 0 - 8B aligned. 1 - 64B aligned. 2 - 128B aligned. 3 - 256B
+ * aligned. 4 - 512B aligned. */
+#define PXP2_REG_RQ_DRAM_ALIGN 0x1205b0
+/* [RW 4] Determines alignment of read SRs when a request is split into
+ * several SRs. 0 - 8B aligned. 1 - 64B aligned. 2 - 128B aligned. 3 - 256B
+ * aligned. 4 - 512B aligned. */
+#define PXP2_REG_RQ_DRAM_ALIGN_RD 0x12092c
+/* [RW 1] when set the new alignment method (E2) will be applied; when reset
+ * the original alignment method (E1 E1H) will be applied */
+#define PXP2_REG_RQ_DRAM_ALIGN_SEL 0x120930
+/* [RW 1] If 1 ILT failiue will not result in ELT access; An interrupt will
+ be asserted */
+#define PXP2_REG_RQ_ELT_DISABLE 0x12066c
+/* [RW 2] Endian mode for hc */
+#define PXP2_REG_RQ_HC_ENDIAN_M 0x1201a8
+/* [RW 1] when '0' ILT logic will work as in A0; otherwise B0; for back
+ compatibility needs; Note that different registers are used per mode */
+#define PXP2_REG_RQ_ILT_MODE 0x1205b4
+/* [WB 53] Onchip address table */
+#define PXP2_REG_RQ_ONCHIP_AT 0x122000
+/* [WB 53] Onchip address table - B0 */
+#define PXP2_REG_RQ_ONCHIP_AT_B0 0x128000
+/* [RW 13] Pending read limiter threshold; in Dwords */
+#define PXP2_REG_RQ_PDR_LIMIT 0x12033c
+/* [RW 2] Endian mode for qm */
+#define PXP2_REG_RQ_QM_ENDIAN_M 0x120194
+#define PXP2_REG_RQ_QM_FIRST_ILT 0x120634
+#define PXP2_REG_RQ_QM_LAST_ILT 0x120638
+/* [RW 3] page size in L2P table for QM module; -4k; -8k; -16k; -32k; -64k;
+ -128k */
+#define PXP2_REG_RQ_QM_P_SIZE 0x120050
+/* [RW 1] 1' indicates that the RBC has finished configuring the PSWRQ */
+#define PXP2_REG_RQ_RBC_DONE 0x1201b0
+/* [RW 3] Max burst size filed for read requests port 0; 000 - 128B;
+ 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */
+#define PXP2_REG_RQ_RD_MBS0 0x120160
+/* [RW 3] Max burst size filed for read requests port 1; 000 - 128B;
+ 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */
+#define PXP2_REG_RQ_RD_MBS1 0x120168
+/* [RW 2] Endian mode for src */
+#define PXP2_REG_RQ_SRC_ENDIAN_M 0x12019c
+#define PXP2_REG_RQ_SRC_FIRST_ILT 0x12063c
+#define PXP2_REG_RQ_SRC_LAST_ILT 0x120640
+/* [RW 3] page size in L2P table for SRC module; -4k; -8k; -16k; -32k; -64k;
+ -128k */
+#define PXP2_REG_RQ_SRC_P_SIZE 0x12006c
+/* [RW 2] Endian mode for tm */
+#define PXP2_REG_RQ_TM_ENDIAN_M 0x120198
+#define PXP2_REG_RQ_TM_FIRST_ILT 0x120644
+#define PXP2_REG_RQ_TM_LAST_ILT 0x120648
+/* [RW 3] page size in L2P table for TM module; -4k; -8k; -16k; -32k; -64k;
+ -128k */
+#define PXP2_REG_RQ_TM_P_SIZE 0x120034
+/* [R 5] Number of entries in the ufifo; his fifo has l2p completions */
+#define PXP2_REG_RQ_UFIFO_NUM_OF_ENTRY 0x12080c
+/* [RW 18] external first_mem_addr field in L2P table for USDM module port 0 */
+#define PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR 0x120094
+/* [R 8] Number of entries occupied by vq 0 in pswrq memory */
+#define PXP2_REG_RQ_VQ0_ENTRY_CNT 0x120810
+/* [R 8] Number of entries occupied by vq 10 in pswrq memory */
+#define PXP2_REG_RQ_VQ10_ENTRY_CNT 0x120818
+/* [R 8] Number of entries occupied by vq 11 in pswrq memory */
+#define PXP2_REG_RQ_VQ11_ENTRY_CNT 0x120820
+/* [R 8] Number of entries occupied by vq 12 in pswrq memory */
+#define PXP2_REG_RQ_VQ12_ENTRY_CNT 0x120828
+/* [R 8] Number of entries occupied by vq 13 in pswrq memory */
+#define PXP2_REG_RQ_VQ13_ENTRY_CNT 0x120830
+/* [R 8] Number of entries occupied by vq 14 in pswrq memory */
+#define PXP2_REG_RQ_VQ14_ENTRY_CNT 0x120838
+/* [R 8] Number of entries occupied by vq 15 in pswrq memory */
+#define PXP2_REG_RQ_VQ15_ENTRY_CNT 0x120840
+/* [R 8] Number of entries occupied by vq 16 in pswrq memory */
+#define PXP2_REG_RQ_VQ16_ENTRY_CNT 0x120848
+/* [R 8] Number of entries occupied by vq 17 in pswrq memory */
+#define PXP2_REG_RQ_VQ17_ENTRY_CNT 0x120850
+/* [R 8] Number of entries occupied by vq 18 in pswrq memory */
+#define PXP2_REG_RQ_VQ18_ENTRY_CNT 0x120858
+/* [R 8] Number of entries occupied by vq 19 in pswrq memory */
+#define PXP2_REG_RQ_VQ19_ENTRY_CNT 0x120860
+/* [R 8] Number of entries occupied by vq 1 in pswrq memory */
+#define PXP2_REG_RQ_VQ1_ENTRY_CNT 0x120868
+/* [R 8] Number of entries occupied by vq 20 in pswrq memory */
+#define PXP2_REG_RQ_VQ20_ENTRY_CNT 0x120870
+/* [R 8] Number of entries occupied by vq 21 in pswrq memory */
+#define PXP2_REG_RQ_VQ21_ENTRY_CNT 0x120878
+/* [R 8] Number of entries occupied by vq 22 in pswrq memory */
+#define PXP2_REG_RQ_VQ22_ENTRY_CNT 0x120880
+/* [R 8] Number of entries occupied by vq 23 in pswrq memory */
+#define PXP2_REG_RQ_VQ23_ENTRY_CNT 0x120888
+/* [R 8] Number of entries occupied by vq 24 in pswrq memory */
+#define PXP2_REG_RQ_VQ24_ENTRY_CNT 0x120890
+/* [R 8] Number of entries occupied by vq 25 in pswrq memory */
+#define PXP2_REG_RQ_VQ25_ENTRY_CNT 0x120898
+/* [R 8] Number of entries occupied by vq 26 in pswrq memory */
+#define PXP2_REG_RQ_VQ26_ENTRY_CNT 0x1208a0
+/* [R 8] Number of entries occupied by vq 27 in pswrq memory */
+#define PXP2_REG_RQ_VQ27_ENTRY_CNT 0x1208a8
+/* [R 8] Number of entries occupied by vq 28 in pswrq memory */
+#define PXP2_REG_RQ_VQ28_ENTRY_CNT 0x1208b0
+/* [R 8] Number of entries occupied by vq 29 in pswrq memory */
+#define PXP2_REG_RQ_VQ29_ENTRY_CNT 0x1208b8
+/* [R 8] Number of entries occupied by vq 2 in pswrq memory */
+#define PXP2_REG_RQ_VQ2_ENTRY_CNT 0x1208c0
+/* [R 8] Number of entries occupied by vq 30 in pswrq memory */
+#define PXP2_REG_RQ_VQ30_ENTRY_CNT 0x1208c8
+/* [R 8] Number of entries occupied by vq 31 in pswrq memory */
+#define PXP2_REG_RQ_VQ31_ENTRY_CNT 0x1208d0
+/* [R 8] Number of entries occupied by vq 3 in pswrq memory */
+#define PXP2_REG_RQ_VQ3_ENTRY_CNT 0x1208d8
+/* [R 8] Number of entries occupied by vq 4 in pswrq memory */
+#define PXP2_REG_RQ_VQ4_ENTRY_CNT 0x1208e0
+/* [R 8] Number of entries occupied by vq 5 in pswrq memory */
+#define PXP2_REG_RQ_VQ5_ENTRY_CNT 0x1208e8
+/* [R 8] Number of entries occupied by vq 6 in pswrq memory */
+#define PXP2_REG_RQ_VQ6_ENTRY_CNT 0x1208f0
+/* [R 8] Number of entries occupied by vq 7 in pswrq memory */
+#define PXP2_REG_RQ_VQ7_ENTRY_CNT 0x1208f8
+/* [R 8] Number of entries occupied by vq 8 in pswrq memory */
+#define PXP2_REG_RQ_VQ8_ENTRY_CNT 0x120900
+/* [R 8] Number of entries occupied by vq 9 in pswrq memory */
+#define PXP2_REG_RQ_VQ9_ENTRY_CNT 0x120908
+/* [RW 3] Max burst size filed for write requests port 0; 000 - 128B;
+ 001:256B; 010: 512B; */
+#define PXP2_REG_RQ_WR_MBS0 0x12015c
+/* [RW 3] Max burst size filed for write requests port 1; 000 - 128B;
+ 001:256B; 010: 512B; */
+#define PXP2_REG_RQ_WR_MBS1 0x120164
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_CDU_MPS 0x1205f0
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_CSDM_MPS 0x1205d0
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_DBG_MPS 0x1205e8
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_DMAE_MPS 0x1205ec
+/* [RW 10] if Number of entries in dmae fifo will be higher than this
+ threshold then has_payload indication will be asserted; the default value
+ should be equal to > write MBS size! */
+#define PXP2_REG_WR_DMAE_TH 0x120368
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_HC_MPS 0x1205c8
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_QM_MPS 0x1205dc
+/* [RW 1] 0 - working in A0 mode; - working in B0 mode */
+#define PXP2_REG_WR_REV_MODE 0x120670
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_SRC_MPS 0x1205e4
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_TM_MPS 0x1205e0
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_TSDM_MPS 0x1205d4
+/* [RW 10] if Number of entries in usdmdp fifo will be higher than this
+ threshold then has_payload indication will be asserted; the default value
+ should be equal to > write MBS size! */
+#define PXP2_REG_WR_USDMDP_TH 0x120348
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_USDM_MPS 0x1205cc
+/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
+ buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_XSDM_MPS 0x1205d8
+/* [R 1] debug only: Indication if PSWHST arbiter is idle */
+#define PXP_REG_HST_ARB_IS_IDLE 0x103004
+/* [R 8] debug only: A bit mask for all PSWHST arbiter clients. '1' means
+ this client is waiting for the arbiter. */
+#define PXP_REG_HST_CLIENTS_WAITING_TO_ARB 0x103008
+/* [RW 1] When 1; doorbells are discarded and not passed to doorbell queue
+ block. Should be used for close the gates. */
+#define PXP_REG_HST_DISCARD_DOORBELLS 0x1030a4
+/* [R 1] debug only: '1' means this PSWHST is discarding doorbells. This bit
+ should update according to 'hst_discard_doorbells' register when the state
+ machine is idle */
+#define PXP_REG_HST_DISCARD_DOORBELLS_STATUS 0x1030a0
+/* [RW 1] When 1; new internal writes arriving to the block are discarded.
+ Should be used for close the gates. */
+#define PXP_REG_HST_DISCARD_INTERNAL_WRITES 0x1030a8
+/* [R 6] debug only: A bit mask for all PSWHST internal write clients. '1'
+ means this PSWHST is discarding inputs from this client. Each bit should
+ update according to 'hst_discard_internal_writes' register when the state
+ machine is idle. */
+#define PXP_REG_HST_DISCARD_INTERNAL_WRITES_STATUS 0x10309c
+/* [WB 160] Used for initialization of the inbound interrupts memory */
+#define PXP_REG_HST_INBOUND_INT 0x103800
+/* [RW 32] Interrupt mask register #0 read/write */
+#define PXP_REG_PXP_INT_MASK_0 0x103074
+#define PXP_REG_PXP_INT_MASK_1 0x103084
+/* [R 32] Interrupt register #0 read */
+#define PXP_REG_PXP_INT_STS_0 0x103068
+#define PXP_REG_PXP_INT_STS_1 0x103078
+/* [RC 32] Interrupt register #0 read clear */
+#define PXP_REG_PXP_INT_STS_CLR_0 0x10306c
+#define PXP_REG_PXP_INT_STS_CLR_1 0x10307c
+/* [RW 27] Parity mask register #0 read/write */
+#define PXP_REG_PXP_PRTY_MASK 0x103094
+/* [R 26] Parity register #0 read */
+#define PXP_REG_PXP_PRTY_STS 0x103088
+/* [RC 27] Parity register #0 read clear */
+#define PXP_REG_PXP_PRTY_STS_CLR 0x10308c
+/* [RW 4] The activity counter initial increment value sent in the load
+ request */
+#define QM_REG_ACTCTRINITVAL_0 0x168040
+#define QM_REG_ACTCTRINITVAL_1 0x168044
+#define QM_REG_ACTCTRINITVAL_2 0x168048
+#define QM_REG_ACTCTRINITVAL_3 0x16804c
+/* [RW 32] The base logical address (in bytes) of each physical queue. The
+ index I represents the physical queue number. The 12 lsbs are ignore and
+ considered zero so practically there are only 20 bits in this register;
+ queues 63-0 */
+#define QM_REG_BASEADDR 0x168900
+/* [RW 32] The base logical address (in bytes) of each physical queue. The
+ index I represents the physical queue number. The 12 lsbs are ignore and
+ considered zero so practically there are only 20 bits in this register;
+ queues 127-64 */
+#define QM_REG_BASEADDR_EXT_A 0x16e100
+/* [RW 16] The byte credit cost for each task. This value is for both ports */
+#define QM_REG_BYTECRDCOST 0x168234
+/* [RW 16] The initial byte credit value for both ports. */
+#define QM_REG_BYTECRDINITVAL 0x168238
+/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
+ queue uses port 0 else it uses port 1; queues 31-0 */
+#define QM_REG_BYTECRDPORT_LSB 0x168228
+/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
+ queue uses port 0 else it uses port 1; queues 95-64 */
+#define QM_REG_BYTECRDPORT_LSB_EXT_A 0x16e520
+/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
+ queue uses port 0 else it uses port 1; queues 63-32 */
+#define QM_REG_BYTECRDPORT_MSB 0x168224
+/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
+ queue uses port 0 else it uses port 1; queues 127-96 */
+#define QM_REG_BYTECRDPORT_MSB_EXT_A 0x16e51c
+/* [RW 16] The byte credit value that if above the QM is considered almost
+ full */
+#define QM_REG_BYTECREDITAFULLTHR 0x168094
+/* [RW 4] The initial credit for interface */
+#define QM_REG_CMINITCRD_0 0x1680cc
+#define QM_REG_BYTECRDCMDQ_0 0x16e6e8
+#define QM_REG_CMINITCRD_1 0x1680d0
+#define QM_REG_CMINITCRD_2 0x1680d4
+#define QM_REG_CMINITCRD_3 0x1680d8
+#define QM_REG_CMINITCRD_4 0x1680dc
+#define QM_REG_CMINITCRD_5 0x1680e0
+#define QM_REG_CMINITCRD_6 0x1680e4
+#define QM_REG_CMINITCRD_7 0x1680e8
+/* [RW 8] A mask bit per CM interface. If this bit is 0 then this interface
+ is masked */
+#define QM_REG_CMINTEN 0x1680ec
+/* [RW 12] A bit vector which indicates which one of the queues are tied to
+ interface 0 */
+#define QM_REG_CMINTVOQMASK_0 0x1681f4
+#define QM_REG_CMINTVOQMASK_1 0x1681f8
+#define QM_REG_CMINTVOQMASK_2 0x1681fc
+#define QM_REG_CMINTVOQMASK_3 0x168200
+#define QM_REG_CMINTVOQMASK_4 0x168204
+#define QM_REG_CMINTVOQMASK_5 0x168208
+#define QM_REG_CMINTVOQMASK_6 0x16820c
+#define QM_REG_CMINTVOQMASK_7 0x168210
+/* [RW 20] The number of connections divided by 16 which dictates the size
+ of each queue which belongs to even function number. */
+#define QM_REG_CONNNUM_0 0x168020
+/* [R 6] Keep the fill level of the fifo from write client 4 */
+#define QM_REG_CQM_WRC_FIFOLVL 0x168018
+/* [RW 8] The context regions sent in the CFC load request */
+#define QM_REG_CTXREG_0 0x168030
+#define QM_REG_CTXREG_1 0x168034
+#define QM_REG_CTXREG_2 0x168038
+#define QM_REG_CTXREG_3 0x16803c
+/* [RW 12] The VOQ mask used to select the VOQs which needs to be full for
+ bypass enable */
+#define QM_REG_ENBYPVOQMASK 0x16823c
+/* [RW 32] A bit mask per each physical queue. If a bit is set then the
+ physical queue uses the byte credit; queues 31-0 */
+#define QM_REG_ENBYTECRD_LSB 0x168220
+/* [RW 32] A bit mask per each physical queue. If a bit is set then the
+ physical queue uses the byte credit; queues 95-64 */
+#define QM_REG_ENBYTECRD_LSB_EXT_A 0x16e518
+/* [RW 32] A bit mask per each physical queue. If a bit is set then the
+ physical queue uses the byte credit; queues 63-32 */
+#define QM_REG_ENBYTECRD_MSB 0x16821c
+/* [RW 32] A bit mask per each physical queue. If a bit is set then the
+ physical queue uses the byte credit; queues 127-96 */
+#define QM_REG_ENBYTECRD_MSB_EXT_A 0x16e514
+/* [RW 4] If cleared then the secondary interface will not be served by the
+ RR arbiter */
+#define QM_REG_ENSEC 0x1680f0
+/* [RW 32] NA */
+#define QM_REG_FUNCNUMSEL_LSB 0x168230
+/* [RW 32] NA */
+#define QM_REG_FUNCNUMSEL_MSB 0x16822c
+/* [RW 32] A mask register to mask the Almost empty signals which will not
+ be use for the almost empty indication to the HW block; queues 31:0 */
+#define QM_REG_HWAEMPTYMASK_LSB 0x168218
+/* [RW 32] A mask register to mask the Almost empty signals which will not
+ be use for the almost empty indication to the HW block; queues 95-64 */
+#define QM_REG_HWAEMPTYMASK_LSB_EXT_A 0x16e510
+/* [RW 32] A mask register to mask the Almost empty signals which will not
+ be use for the almost empty indication to the HW block; queues 63:32 */
+#define QM_REG_HWAEMPTYMASK_MSB 0x168214
+/* [RW 32] A mask register to mask the Almost empty signals which will not
+ be use for the almost empty indication to the HW block; queues 127-96 */
+#define QM_REG_HWAEMPTYMASK_MSB_EXT_A 0x16e50c
+/* [RW 4] The number of outstanding request to CFC */
+#define QM_REG_OUTLDREQ 0x168804
+/* [RC 1] A flag to indicate that overflow error occurred in one of the
+ queues. */
+#define QM_REG_OVFERROR 0x16805c
+/* [RC 7] the Q where the overflow occurs */
+#define QM_REG_OVFQNUM 0x168058
+/* [R 16] Pause state for physical queues 15-0 */
+#define QM_REG_PAUSESTATE0 0x168410
+/* [R 16] Pause state for physical queues 31-16 */
+#define QM_REG_PAUSESTATE1 0x168414
+/* [R 16] Pause state for physical queues 47-32 */
+#define QM_REG_PAUSESTATE2 0x16e684
+/* [R 16] Pause state for physical queues 63-48 */
+#define QM_REG_PAUSESTATE3 0x16e688
+/* [R 16] Pause state for physical queues 79-64 */
+#define QM_REG_PAUSESTATE4 0x16e68c
+/* [R 16] Pause state for physical queues 95-80 */
+#define QM_REG_PAUSESTATE5 0x16e690
+/* [R 16] Pause state for physical queues 111-96 */
+#define QM_REG_PAUSESTATE6 0x16e694
+/* [R 16] Pause state for physical queues 127-112 */
+#define QM_REG_PAUSESTATE7 0x16e698
+/* [RW 2] The PCI attributes field used in the PCI request. */
+#define QM_REG_PCIREQAT 0x168054
+#define QM_REG_PF_EN 0x16e70c
+/* [R 24] The number of tasks stored in the QM for the PF. only even
+ * functions are valid in E2 (odd I registers will be hard wired to 0) */
+#define QM_REG_PF_USG_CNT_0 0x16e040
+/* [R 16] NOT USED */
+#define QM_REG_PORT0BYTECRD 0x168300
+/* [R 16] The byte credit of port 1 */
+#define QM_REG_PORT1BYTECRD 0x168304
+/* [RW 3] pci function number of queues 15-0 */
+#define QM_REG_PQ2PCIFUNC_0 0x16e6bc
+#define QM_REG_PQ2PCIFUNC_1 0x16e6c0
+#define QM_REG_PQ2PCIFUNC_2 0x16e6c4
+#define QM_REG_PQ2PCIFUNC_3 0x16e6c8
+#define QM_REG_PQ2PCIFUNC_4 0x16e6cc
+#define QM_REG_PQ2PCIFUNC_5 0x16e6d0
+#define QM_REG_PQ2PCIFUNC_6 0x16e6d4
+#define QM_REG_PQ2PCIFUNC_7 0x16e6d8
+/* [WB 54] Pointer Table Memory for queues 63-0; The mapping is as follow:
+ ptrtbl[53:30] read pointer; ptrtbl[29:6] write pointer; ptrtbl[5:4] read
+ bank0; ptrtbl[3:2] read bank 1; ptrtbl[1:0] write bank; */
+#define QM_REG_PTRTBL 0x168a00
+/* [WB 54] Pointer Table Memory for queues 127-64; The mapping is as follow:
+ ptrtbl[53:30] read pointer; ptrtbl[29:6] write pointer; ptrtbl[5:4] read
+ bank0; ptrtbl[3:2] read bank 1; ptrtbl[1:0] write bank; */
+#define QM_REG_PTRTBL_EXT_A 0x16e200
+/* [RW 2] Interrupt mask register #0 read/write */
+#define QM_REG_QM_INT_MASK 0x168444
+/* [R 2] Interrupt register #0 read */
+#define QM_REG_QM_INT_STS 0x168438
+/* [RW 12] Parity mask register #0 read/write */
+#define QM_REG_QM_PRTY_MASK 0x168454
+/* [R 12] Parity register #0 read */
+#define QM_REG_QM_PRTY_STS 0x168448
+/* [RC 12] Parity register #0 read clear */
+#define QM_REG_QM_PRTY_STS_CLR 0x16844c
+/* [R 32] Current queues in pipeline: Queues from 32 to 63 */
+#define QM_REG_QSTATUS_HIGH 0x16802c
+/* [R 32] Current queues in pipeline: Queues from 96 to 127 */
+#define QM_REG_QSTATUS_HIGH_EXT_A 0x16e408
+/* [R 32] Current queues in pipeline: Queues from 0 to 31 */
+#define QM_REG_QSTATUS_LOW 0x168028
+/* [R 32] Current queues in pipeline: Queues from 64 to 95 */
+#define QM_REG_QSTATUS_LOW_EXT_A 0x16e404
+/* [R 24] The number of tasks queued for each queue; queues 63-0 */
+#define QM_REG_QTASKCTR_0 0x168308
+/* [R 24] The number of tasks queued for each queue; queues 127-64 */
+#define QM_REG_QTASKCTR_EXT_A_0 0x16e584
+/* [RW 4] Queue tied to VOQ */
+#define QM_REG_QVOQIDX_0 0x1680f4
+#define QM_REG_QVOQIDX_10 0x16811c
+#define QM_REG_QVOQIDX_100 0x16e49c
+#define QM_REG_QVOQIDX_101 0x16e4a0
+#define QM_REG_QVOQIDX_102 0x16e4a4
+#define QM_REG_QVOQIDX_103 0x16e4a8
+#define QM_REG_QVOQIDX_104 0x16e4ac
+#define QM_REG_QVOQIDX_105 0x16e4b0
+#define QM_REG_QVOQIDX_106 0x16e4b4
+#define QM_REG_QVOQIDX_107 0x16e4b8
+#define QM_REG_QVOQIDX_108 0x16e4bc
+#define QM_REG_QVOQIDX_109 0x16e4c0
+#define QM_REG_QVOQIDX_11 0x168120
+#define QM_REG_QVOQIDX_110 0x16e4c4
+#define QM_REG_QVOQIDX_111 0x16e4c8
+#define QM_REG_QVOQIDX_112 0x16e4cc
+#define QM_REG_QVOQIDX_113 0x16e4d0
+#define QM_REG_QVOQIDX_114 0x16e4d4
+#define QM_REG_QVOQIDX_115 0x16e4d8
+#define QM_REG_QVOQIDX_116 0x16e4dc
+#define QM_REG_QVOQIDX_117 0x16e4e0
+#define QM_REG_QVOQIDX_118 0x16e4e4
+#define QM_REG_QVOQIDX_119 0x16e4e8
+#define QM_REG_QVOQIDX_12 0x168124
+#define QM_REG_QVOQIDX_120 0x16e4ec
+#define QM_REG_QVOQIDX_121 0x16e4f0
+#define QM_REG_QVOQIDX_122 0x16e4f4
+#define QM_REG_QVOQIDX_123 0x16e4f8
+#define QM_REG_QVOQIDX_124 0x16e4fc
+#define QM_REG_QVOQIDX_125 0x16e500
+#define QM_REG_QVOQIDX_126 0x16e504
+#define QM_REG_QVOQIDX_127 0x16e508
+#define QM_REG_QVOQIDX_13 0x168128
+#define QM_REG_QVOQIDX_14 0x16812c
+#define QM_REG_QVOQIDX_15 0x168130
+#define QM_REG_QVOQIDX_16 0x168134
+#define QM_REG_QVOQIDX_17 0x168138
+#define QM_REG_QVOQIDX_21 0x168148
+#define QM_REG_QVOQIDX_22 0x16814c
+#define QM_REG_QVOQIDX_23 0x168150
+#define QM_REG_QVOQIDX_24 0x168154
+#define QM_REG_QVOQIDX_25 0x168158
+#define QM_REG_QVOQIDX_26 0x16815c
+#define QM_REG_QVOQIDX_27 0x168160
+#define QM_REG_QVOQIDX_28 0x168164
+#define QM_REG_QVOQIDX_29 0x168168
+#define QM_REG_QVOQIDX_30 0x16816c
+#define QM_REG_QVOQIDX_31 0x168170
+#define QM_REG_QVOQIDX_32 0x168174
+#define QM_REG_QVOQIDX_33 0x168178
+#define QM_REG_QVOQIDX_34 0x16817c
+#define QM_REG_QVOQIDX_35 0x168180
+#define QM_REG_QVOQIDX_36 0x168184
+#define QM_REG_QVOQIDX_37 0x168188
+#define QM_REG_QVOQIDX_38 0x16818c
+#define QM_REG_QVOQIDX_39 0x168190
+#define QM_REG_QVOQIDX_40 0x168194
+#define QM_REG_QVOQIDX_41 0x168198
+#define QM_REG_QVOQIDX_42 0x16819c
+#define QM_REG_QVOQIDX_43 0x1681a0
+#define QM_REG_QVOQIDX_44 0x1681a4
+#define QM_REG_QVOQIDX_45 0x1681a8
+#define QM_REG_QVOQIDX_46 0x1681ac
+#define QM_REG_QVOQIDX_47 0x1681b0
+#define QM_REG_QVOQIDX_48 0x1681b4
+#define QM_REG_QVOQIDX_49 0x1681b8
+#define QM_REG_QVOQIDX_5 0x168108
+#define QM_REG_QVOQIDX_50 0x1681bc
+#define QM_REG_QVOQIDX_51 0x1681c0
+#define QM_REG_QVOQIDX_52 0x1681c4
+#define QM_REG_QVOQIDX_53 0x1681c8
+#define QM_REG_QVOQIDX_54 0x1681cc
+#define QM_REG_QVOQIDX_55 0x1681d0
+#define QM_REG_QVOQIDX_56 0x1681d4
+#define QM_REG_QVOQIDX_57 0x1681d8
+#define QM_REG_QVOQIDX_58 0x1681dc
+#define QM_REG_QVOQIDX_59 0x1681e0
+#define QM_REG_QVOQIDX_6 0x16810c
+#define QM_REG_QVOQIDX_60 0x1681e4
+#define QM_REG_QVOQIDX_61 0x1681e8
+#define QM_REG_QVOQIDX_62 0x1681ec
+#define QM_REG_QVOQIDX_63 0x1681f0
+#define QM_REG_QVOQIDX_64 0x16e40c
+#define QM_REG_QVOQIDX_65 0x16e410
+#define QM_REG_QVOQIDX_69 0x16e420
+#define QM_REG_QVOQIDX_7 0x168110
+#define QM_REG_QVOQIDX_70 0x16e424
+#define QM_REG_QVOQIDX_71 0x16e428
+#define QM_REG_QVOQIDX_72 0x16e42c
+#define QM_REG_QVOQIDX_73 0x16e430
+#define QM_REG_QVOQIDX_74 0x16e434
+#define QM_REG_QVOQIDX_75 0x16e438
+#define QM_REG_QVOQIDX_76 0x16e43c
+#define QM_REG_QVOQIDX_77 0x16e440
+#define QM_REG_QVOQIDX_78 0x16e444
+#define QM_REG_QVOQIDX_79 0x16e448
+#define QM_REG_QVOQIDX_8 0x168114
+#define QM_REG_QVOQIDX_80 0x16e44c
+#define QM_REG_QVOQIDX_81 0x16e450
+#define QM_REG_QVOQIDX_85 0x16e460
+#define QM_REG_QVOQIDX_86 0x16e464
+#define QM_REG_QVOQIDX_87 0x16e468
+#define QM_REG_QVOQIDX_88 0x16e46c
+#define QM_REG_QVOQIDX_89 0x16e470
+#define QM_REG_QVOQIDX_9 0x168118
+#define QM_REG_QVOQIDX_90 0x16e474
+#define QM_REG_QVOQIDX_91 0x16e478
+#define QM_REG_QVOQIDX_92 0x16e47c
+#define QM_REG_QVOQIDX_93 0x16e480
+#define QM_REG_QVOQIDX_94 0x16e484
+#define QM_REG_QVOQIDX_95 0x16e488
+#define QM_REG_QVOQIDX_96 0x16e48c
+#define QM_REG_QVOQIDX_97 0x16e490
+#define QM_REG_QVOQIDX_98 0x16e494
+#define QM_REG_QVOQIDX_99 0x16e498
+/* [RW 1] Initialization bit command */
+#define QM_REG_SOFT_RESET 0x168428
+/* [RW 8] The credit cost per every task in the QM. A value per each VOQ */
+#define QM_REG_TASKCRDCOST_0 0x16809c
+#define QM_REG_TASKCRDCOST_1 0x1680a0
+#define QM_REG_TASKCRDCOST_2 0x1680a4
+#define QM_REG_TASKCRDCOST_4 0x1680ac
+#define QM_REG_TASKCRDCOST_5 0x1680b0
+/* [R 6] Keep the fill level of the fifo from write client 3 */
+#define QM_REG_TQM_WRC_FIFOLVL 0x168010
+/* [R 6] Keep the fill level of the fifo from write client 2 */
+#define QM_REG_UQM_WRC_FIFOLVL 0x168008
+/* [RC 32] Credit update error register */
+#define QM_REG_VOQCRDERRREG 0x168408
+/* [R 16] The credit value for each VOQ */
+#define QM_REG_VOQCREDIT_0 0x1682d0
+#define QM_REG_VOQCREDIT_1 0x1682d4
+#define QM_REG_VOQCREDIT_4 0x1682e0
+/* [RW 16] The credit value that if above the QM is considered almost full */
+#define QM_REG_VOQCREDITAFULLTHR 0x168090
+/* [RW 16] The init and maximum credit for each VoQ */
+#define QM_REG_VOQINITCREDIT_0 0x168060
+#define QM_REG_VOQINITCREDIT_1 0x168064
+#define QM_REG_VOQINITCREDIT_2 0x168068
+#define QM_REG_VOQINITCREDIT_4 0x168070
+#define QM_REG_VOQINITCREDIT_5 0x168074
+/* [RW 1] The port of which VOQ belongs */
+#define QM_REG_VOQPORT_0 0x1682a0
+#define QM_REG_VOQPORT_1 0x1682a4
+#define QM_REG_VOQPORT_2 0x1682a8
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_0_LSB 0x168240
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_0_LSB_EXT_A 0x16e524
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_0_MSB 0x168244
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_0_MSB_EXT_A 0x16e528
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_10_LSB 0x168290
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_10_LSB_EXT_A 0x16e574
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_10_MSB 0x168294
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_10_MSB_EXT_A 0x16e578
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_11_LSB 0x168298
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_11_LSB_EXT_A 0x16e57c
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_11_MSB 0x16829c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_11_MSB_EXT_A 0x16e580
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_1_LSB 0x168248
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_1_LSB_EXT_A 0x16e52c
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_1_MSB 0x16824c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_1_MSB_EXT_A 0x16e530
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_2_LSB 0x168250
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_2_LSB_EXT_A 0x16e534
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_2_MSB 0x168254
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_2_MSB_EXT_A 0x16e538
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_3_LSB 0x168258
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_3_LSB_EXT_A 0x16e53c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_3_MSB_EXT_A 0x16e540
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_4_LSB 0x168260
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_4_LSB_EXT_A 0x16e544
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_4_MSB 0x168264
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_4_MSB_EXT_A 0x16e548
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_5_LSB 0x168268
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_5_LSB_EXT_A 0x16e54c
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_5_MSB 0x16826c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_5_MSB_EXT_A 0x16e550
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_6_LSB 0x168270
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_6_LSB_EXT_A 0x16e554
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_6_MSB 0x168274
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_6_MSB_EXT_A 0x16e558
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_7_LSB 0x168278
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_7_LSB_EXT_A 0x16e55c
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_7_MSB 0x16827c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_7_MSB_EXT_A 0x16e560
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_8_LSB 0x168280
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_8_LSB_EXT_A 0x16e564
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_8_MSB 0x168284
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_8_MSB_EXT_A 0x16e568
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_9_LSB 0x168288
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_9_LSB_EXT_A 0x16e56c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_9_MSB_EXT_A 0x16e570
+/* [RW 32] Wrr weights */
+#define QM_REG_WRRWEIGHTS_0 0x16880c
+#define QM_REG_WRRWEIGHTS_1 0x168810
+#define QM_REG_WRRWEIGHTS_10 0x168814
+#define QM_REG_WRRWEIGHTS_11 0x168818
+#define QM_REG_WRRWEIGHTS_12 0x16881c
+#define QM_REG_WRRWEIGHTS_13 0x168820
+#define QM_REG_WRRWEIGHTS_14 0x168824
+#define QM_REG_WRRWEIGHTS_15 0x168828
+#define QM_REG_WRRWEIGHTS_16 0x16e000
+#define QM_REG_WRRWEIGHTS_17 0x16e004
+#define QM_REG_WRRWEIGHTS_18 0x16e008
+#define QM_REG_WRRWEIGHTS_19 0x16e00c
+#define QM_REG_WRRWEIGHTS_2 0x16882c
+#define QM_REG_WRRWEIGHTS_20 0x16e010
+#define QM_REG_WRRWEIGHTS_21 0x16e014
+#define QM_REG_WRRWEIGHTS_22 0x16e018
+#define QM_REG_WRRWEIGHTS_23 0x16e01c
+#define QM_REG_WRRWEIGHTS_24 0x16e020
+#define QM_REG_WRRWEIGHTS_25 0x16e024
+#define QM_REG_WRRWEIGHTS_26 0x16e028
+#define QM_REG_WRRWEIGHTS_27 0x16e02c
+#define QM_REG_WRRWEIGHTS_28 0x16e030
+#define QM_REG_WRRWEIGHTS_29 0x16e034
+#define QM_REG_WRRWEIGHTS_3 0x168830
+#define QM_REG_WRRWEIGHTS_30 0x16e038
+#define QM_REG_WRRWEIGHTS_31 0x16e03c
+#define QM_REG_WRRWEIGHTS_4 0x168834
+#define QM_REG_WRRWEIGHTS_5 0x168838
+#define QM_REG_WRRWEIGHTS_6 0x16883c
+#define QM_REG_WRRWEIGHTS_7 0x168840
+#define QM_REG_WRRWEIGHTS_8 0x168844
+#define QM_REG_WRRWEIGHTS_9 0x168848
+/* [R 6] Keep the fill level of the fifo from write client 1 */
+#define QM_REG_XQM_WRC_FIFOLVL 0x168000
+/* [W 1] reset to parity interrupt */
+#define SEM_FAST_REG_PARITY_RST 0x18840
+#define SRC_REG_COUNTFREE0 0x40500
+/* [RW 1] If clr the searcher is compatible to E1 A0 - support only two
+ ports. If set the searcher support 8 functions. */
+#define SRC_REG_E1HMF_ENABLE 0x404cc
+#define SRC_REG_FIRSTFREE0 0x40510
+#define SRC_REG_KEYRSS0_0 0x40408
+#define SRC_REG_KEYRSS0_7 0x40424
+#define SRC_REG_KEYRSS1_9 0x40454
+#define SRC_REG_KEYSEARCH_0 0x40458
+#define SRC_REG_KEYSEARCH_1 0x4045c
+#define SRC_REG_KEYSEARCH_2 0x40460
+#define SRC_REG_KEYSEARCH_3 0x40464
+#define SRC_REG_KEYSEARCH_4 0x40468
+#define SRC_REG_KEYSEARCH_5 0x4046c
+#define SRC_REG_KEYSEARCH_6 0x40470
+#define SRC_REG_KEYSEARCH_7 0x40474
+#define SRC_REG_KEYSEARCH_8 0x40478
+#define SRC_REG_KEYSEARCH_9 0x4047c
+#define SRC_REG_LASTFREE0 0x40530
+#define SRC_REG_NUMBER_HASH_BITS0 0x40400
+/* [RW 1] Reset internal state machines. */
+#define SRC_REG_SOFT_RST 0x4049c
+/* [R 3] Interrupt register #0 read */
+#define SRC_REG_SRC_INT_STS 0x404ac
+/* [RW 3] Parity mask register #0 read/write */
+#define SRC_REG_SRC_PRTY_MASK 0x404c8
+/* [R 3] Parity register #0 read */
+#define SRC_REG_SRC_PRTY_STS 0x404bc
+/* [RC 3] Parity register #0 read clear */
+#define SRC_REG_SRC_PRTY_STS_CLR 0x404c0
+/* [R 4] Used to read the value of the XX protection CAM occupancy counter. */
+#define TCM_REG_CAM_OCCUP 0x5017c
+/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
+ disregarded; valid output is deasserted; all other signals are treated as
+ usual; if 1 - normal activity. */
+#define TCM_REG_CDU_AG_RD_IFEN 0x50034
+/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
+ are disregarded; all other signals are treated as usual; if 1 - normal
+ activity. */
+#define TCM_REG_CDU_AG_WR_IFEN 0x50030
+/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
+ disregarded; valid output is deasserted; all other signals are treated as
+ usual; if 1 - normal activity. */
+#define TCM_REG_CDU_SM_RD_IFEN 0x5003c
+/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
+ input is disregarded; all other signals are treated as usual; if 1 -
+ normal activity. */
+#define TCM_REG_CDU_SM_WR_IFEN 0x50038
+/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
+ the initial credit value; read returns the current value of the credit
+ counter. Must be initialized to 1 at start-up. */
+#define TCM_REG_CFC_INIT_CRD 0x50204
+/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_CP_WEIGHT 0x500c0
+/* [RW 1] Input csem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define TCM_REG_CSEM_IFEN 0x5002c
+/* [RC 1] Message length mismatch (relative to last indication) at the In#9
+ interface. */
+#define TCM_REG_CSEM_LENGTH_MIS 0x50174
+/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_CSEM_WEIGHT 0x500bc
+/* [RW 8] The Event ID in case of ErrorFlg is set in the input message. */
+#define TCM_REG_ERR_EVNT_ID 0x500a0
+/* [RW 28] The CM erroneous header for QM and Timers formatting. */
+#define TCM_REG_ERR_TCM_HDR 0x5009c
+/* [RW 8] The Event ID for Timers expiration. */
+#define TCM_REG_EXPR_EVNT_ID 0x500a4
+/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 64 at start-up. */
+#define TCM_REG_FIC0_INIT_CRD 0x5020c
+/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 64 at start-up. */
+#define TCM_REG_FIC1_INIT_CRD 0x50210
+/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1
+ - strict priority defined by ~tcm_registers_gr_ag_pr.gr_ag_pr;
+ ~tcm_registers_gr_ld0_pr.gr_ld0_pr and
+ ~tcm_registers_gr_ld1_pr.gr_ld1_pr. */
+#define TCM_REG_GR_ARB_TYPE 0x50114
+/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
+ highest priority is 3. It is supposed that the Store channel is the
+ compliment of the other 3 groups. */
+#define TCM_REG_GR_LD0_PR 0x5011c
+/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
+ highest priority is 3. It is supposed that the Store channel is the
+ compliment of the other 3 groups. */
+#define TCM_REG_GR_LD1_PR 0x50120
+/* [RW 4] The number of double REG-pairs; loaded from the STORM context and
+ sent to STORM; for a specific connection type. The double REG-pairs are
+ used to align to STORM context row size of 128 bits. The offset of these
+ data in the STORM context is always 0. Index _i stands for the connection
+ type (one of 16). */
+#define TCM_REG_N_SM_CTX_LD_0 0x50050
+#define TCM_REG_N_SM_CTX_LD_1 0x50054
+#define TCM_REG_N_SM_CTX_LD_2 0x50058
+#define TCM_REG_N_SM_CTX_LD_3 0x5005c
+#define TCM_REG_N_SM_CTX_LD_4 0x50060
+#define TCM_REG_N_SM_CTX_LD_5 0x50064
+/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define TCM_REG_PBF_IFEN 0x50024
+/* [RC 1] Message length mismatch (relative to last indication) at the In#7
+ interface. */
+#define TCM_REG_PBF_LENGTH_MIS 0x5016c
+/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_PBF_WEIGHT 0x500b4
+#define TCM_REG_PHYS_QNUM0_0 0x500e0
+#define TCM_REG_PHYS_QNUM0_1 0x500e4
+#define TCM_REG_PHYS_QNUM1_0 0x500e8
+#define TCM_REG_PHYS_QNUM1_1 0x500ec
+#define TCM_REG_PHYS_QNUM2_0 0x500f0
+#define TCM_REG_PHYS_QNUM2_1 0x500f4
+#define TCM_REG_PHYS_QNUM3_0 0x500f8
+#define TCM_REG_PHYS_QNUM3_1 0x500fc
+/* [RW 1] Input prs Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define TCM_REG_PRS_IFEN 0x50020
+/* [RC 1] Message length mismatch (relative to last indication) at the In#6
+ interface. */
+#define TCM_REG_PRS_LENGTH_MIS 0x50168
+/* [RW 3] The weight of the input prs in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_PRS_WEIGHT 0x500b0
+/* [RW 8] The Event ID for Timers formatting in case of stop done. */
+#define TCM_REG_STOP_EVNT_ID 0x500a8
+/* [RC 1] Message length mismatch (relative to last indication) at the STORM
+ interface. */
+#define TCM_REG_STORM_LENGTH_MIS 0x50160
+/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define TCM_REG_STORM_TCM_IFEN 0x50010
+/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_STORM_WEIGHT 0x500ac
+/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define TCM_REG_TCM_CFC_IFEN 0x50040
+/* [RW 11] Interrupt mask register #0 read/write */
+#define TCM_REG_TCM_INT_MASK 0x501dc
+/* [R 11] Interrupt register #0 read */
+#define TCM_REG_TCM_INT_STS 0x501d0
+/* [RW 27] Parity mask register #0 read/write */
+#define TCM_REG_TCM_PRTY_MASK 0x501ec
+/* [R 27] Parity register #0 read */
+#define TCM_REG_TCM_PRTY_STS 0x501e0
+/* [RC 27] Parity register #0 read clear */
+#define TCM_REG_TCM_PRTY_STS_CLR 0x501e4
+/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
+ REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
+ Is used to determine the number of the AG context REG-pairs written back;
+ when the input message Reg1WbFlg isn't set. */
+#define TCM_REG_TCM_REG0_SZ 0x500d8
+/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define TCM_REG_TCM_STORM0_IFEN 0x50004
+/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define TCM_REG_TCM_STORM1_IFEN 0x50008
+/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define TCM_REG_TCM_TQM_IFEN 0x5000c
+/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */
+#define TCM_REG_TCM_TQM_USE_Q 0x500d4
+/* [RW 28] The CM header for Timers expiration command. */
+#define TCM_REG_TM_TCM_HDR 0x50098
+/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define TCM_REG_TM_TCM_IFEN 0x5001c
+/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_TM_WEIGHT 0x500d0
+/* [RW 6] QM output initial credit. Max credit available - 32.Write writes
+ the initial credit value; read returns the current value of the credit
+ counter. Must be initialized to 32 at start-up. */
+#define TCM_REG_TQM_INIT_CRD 0x5021c
+/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_TQM_P_WEIGHT 0x500c8
+/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_TQM_S_WEIGHT 0x500cc
+/* [RW 28] The CM header value for QM request (primary). */
+#define TCM_REG_TQM_TCM_HDR_P 0x50090
+/* [RW 28] The CM header value for QM request (secondary). */
+#define TCM_REG_TQM_TCM_HDR_S 0x50094
+/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define TCM_REG_TQM_TCM_IFEN 0x50014
+/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define TCM_REG_TSDM_IFEN 0x50018
+/* [RC 1] Message length mismatch (relative to last indication) at the SDM
+ interface. */
+#define TCM_REG_TSDM_LENGTH_MIS 0x50164
+/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_TSDM_WEIGHT 0x500c4
+/* [RW 1] Input usem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define TCM_REG_USEM_IFEN 0x50028
+/* [RC 1] Message length mismatch (relative to last indication) at the In#8
+ interface. */
+#define TCM_REG_USEM_LENGTH_MIS 0x50170
+/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_USEM_WEIGHT 0x500b8
+/* [RW 21] Indirect access to the descriptor table of the XX protection
+ mechanism. The fields are: [5:0] - length of the message; 15:6] - message
+ pointer; 20:16] - next pointer. */
+#define TCM_REG_XX_DESCR_TABLE 0x50280
+#define TCM_REG_XX_DESCR_TABLE_SIZE 29
+/* [R 6] Use to read the value of XX protection Free counter. */
+#define TCM_REG_XX_FREE 0x50178
+/* [RW 6] Initial value for the credit counter; responsible for fulfilling
+ of the Input Stage XX protection buffer by the XX protection pending
+ messages. Max credit available - 127.Write writes the initial credit
+ value; read returns the current value of the credit counter. Must be
+ initialized to 19 at start-up. */
+#define TCM_REG_XX_INIT_CRD 0x50220
+/* [RW 6] Maximum link list size (messages locked) per connection in the XX
+ protection. */
+#define TCM_REG_XX_MAX_LL_SZ 0x50044
+/* [RW 6] The maximum number of pending messages; which may be stored in XX
+ protection. ~tcm_registers_xx_free.xx_free is read on read. */
+#define TCM_REG_XX_MSG_NUM 0x50224
+/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
+#define TCM_REG_XX_OVFL_EVNT_ID 0x50048
+/* [RW 16] Indirect access to the XX table of the XX protection mechanism.
+ The fields are:[4:0] - tail pointer; [10:5] - Link List size; 15:11] -
+ header pointer. */
+#define TCM_REG_XX_TABLE 0x50240
+/* [RW 4] Load value for cfc ac credit cnt. */
+#define TM_REG_CFC_AC_CRDCNT_VAL 0x164208
+/* [RW 4] Load value for cfc cld credit cnt. */
+#define TM_REG_CFC_CLD_CRDCNT_VAL 0x164210
+/* [RW 8] Client0 context region. */
+#define TM_REG_CL0_CONT_REGION 0x164030
+/* [RW 8] Client1 context region. */
+#define TM_REG_CL1_CONT_REGION 0x164034
+/* [RW 8] Client2 context region. */
+#define TM_REG_CL2_CONT_REGION 0x164038
+/* [RW 2] Client in High priority client number. */
+#define TM_REG_CLIN_PRIOR0_CLIENT 0x164024
+/* [RW 4] Load value for clout0 cred cnt. */
+#define TM_REG_CLOUT_CRDCNT0_VAL 0x164220
+/* [RW 4] Load value for clout1 cred cnt. */
+#define TM_REG_CLOUT_CRDCNT1_VAL 0x164228
+/* [RW 4] Load value for clout2 cred cnt. */
+#define TM_REG_CLOUT_CRDCNT2_VAL 0x164230
+/* [RW 1] Enable client0 input. */
+#define TM_REG_EN_CL0_INPUT 0x164008
+/* [RW 1] Enable client1 input. */
+#define TM_REG_EN_CL1_INPUT 0x16400c
+/* [RW 1] Enable client2 input. */
+#define TM_REG_EN_CL2_INPUT 0x164010
+#define TM_REG_EN_LINEAR0_TIMER 0x164014
+/* [RW 1] Enable real time counter. */
+#define TM_REG_EN_REAL_TIME_CNT 0x1640d8
+/* [RW 1] Enable for Timers state machines. */
+#define TM_REG_EN_TIMERS 0x164000
+/* [RW 4] Load value for expiration credit cnt. CFC max number of
+ outstanding load requests for timers (expiration) context loading. */
+#define TM_REG_EXP_CRDCNT_VAL 0x164238
+/* [RW 32] Linear0 logic address. */
+#define TM_REG_LIN0_LOGIC_ADDR 0x164240
+/* [RW 18] Linear0 Max active cid (in banks of 32 entries). */
+#define TM_REG_LIN0_MAX_ACTIVE_CID 0x164048
+/* [ST 16] Linear0 Number of scans counter. */
+#define TM_REG_LIN0_NUM_SCANS 0x1640a0
+/* [WB 64] Linear0 phy address. */
+#define TM_REG_LIN0_PHY_ADDR 0x164270
+/* [RW 1] Linear0 physical address valid. */
+#define TM_REG_LIN0_PHY_ADDR_VALID 0x164248
+#define TM_REG_LIN0_SCAN_ON 0x1640d0
+/* [RW 24] Linear0 array scan timeout. */
+#define TM_REG_LIN0_SCAN_TIME 0x16403c
+#define TM_REG_LIN0_VNIC_UC 0x164128
+/* [RW 32] Linear1 logic address. */
+#define TM_REG_LIN1_LOGIC_ADDR 0x164250
+/* [WB 64] Linear1 phy address. */
+#define TM_REG_LIN1_PHY_ADDR 0x164280
+/* [RW 1] Linear1 physical address valid. */
+#define TM_REG_LIN1_PHY_ADDR_VALID 0x164258
+/* [RW 6] Linear timer set_clear fifo threshold. */
+#define TM_REG_LIN_SETCLR_FIFO_ALFULL_THR 0x164070
+/* [RW 2] Load value for pci arbiter credit cnt. */
+#define TM_REG_PCIARB_CRDCNT_VAL 0x164260
+/* [RW 20] The amount of hardware cycles for each timer tick. */
+#define TM_REG_TIMER_TICK_SIZE 0x16401c
+/* [RW 8] Timers Context region. */
+#define TM_REG_TM_CONTEXT_REGION 0x164044
+/* [RW 1] Interrupt mask register #0 read/write */
+#define TM_REG_TM_INT_MASK 0x1640fc
+/* [R 1] Interrupt register #0 read */
+#define TM_REG_TM_INT_STS 0x1640f0
+/* [RW 7] Parity mask register #0 read/write */
+#define TM_REG_TM_PRTY_MASK 0x16410c
+/* [RC 7] Parity register #0 read clear */
+#define TM_REG_TM_PRTY_STS_CLR 0x164104
+/* [RW 8] The event id for aggregated interrupt 0 */
+#define TSDM_REG_AGG_INT_EVENT_0 0x42038
+#define TSDM_REG_AGG_INT_EVENT_1 0x4203c
+#define TSDM_REG_AGG_INT_EVENT_2 0x42040
+#define TSDM_REG_AGG_INT_EVENT_3 0x42044
+#define TSDM_REG_AGG_INT_EVENT_4 0x42048
+/* [RW 1] The T bit for aggregated interrupt 0 */
+#define TSDM_REG_AGG_INT_T_0 0x420b8
+#define TSDM_REG_AGG_INT_T_1 0x420bc
+/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
+#define TSDM_REG_CFC_RSP_START_ADDR 0x42008
+/* [RW 16] The maximum value of the completion counter #0 */
+#define TSDM_REG_CMP_COUNTER_MAX0 0x4201c
+/* [RW 16] The maximum value of the completion counter #1 */
+#define TSDM_REG_CMP_COUNTER_MAX1 0x42020
+/* [RW 16] The maximum value of the completion counter #2 */
+#define TSDM_REG_CMP_COUNTER_MAX2 0x42024
+/* [RW 16] The maximum value of the completion counter #3 */
+#define TSDM_REG_CMP_COUNTER_MAX3 0x42028
+/* [RW 13] The start address in the internal RAM for the completion
+ counters. */
+#define TSDM_REG_CMP_COUNTER_START_ADDR 0x4200c
+#define TSDM_REG_ENABLE_IN1 0x42238
+#define TSDM_REG_ENABLE_IN2 0x4223c
+#define TSDM_REG_ENABLE_OUT1 0x42240
+#define TSDM_REG_ENABLE_OUT2 0x42244
+/* [RW 4] The initial number of messages that can be sent to the pxp control
+ interface without receiving any ACK. */
+#define TSDM_REG_INIT_CREDIT_PXP_CTRL 0x424bc
+/* [ST 32] The number of ACK after placement messages received */
+#define TSDM_REG_NUM_OF_ACK_AFTER_PLACE 0x4227c
+/* [ST 32] The number of packet end messages received from the parser */
+#define TSDM_REG_NUM_OF_PKT_END_MSG 0x42274
+/* [ST 32] The number of requests received from the pxp async if */
+#define TSDM_REG_NUM_OF_PXP_ASYNC_REQ 0x42278
+/* [ST 32] The number of commands received in queue 0 */
+#define TSDM_REG_NUM_OF_Q0_CMD 0x42248
+/* [ST 32] The number of commands received in queue 10 */
+#define TSDM_REG_NUM_OF_Q10_CMD 0x4226c
+/* [ST 32] The number of commands received in queue 11 */
+#define TSDM_REG_NUM_OF_Q11_CMD 0x42270
+/* [ST 32] The number of commands received in queue 1 */
+#define TSDM_REG_NUM_OF_Q1_CMD 0x4224c
+/* [ST 32] The number of commands received in queue 3 */
+#define TSDM_REG_NUM_OF_Q3_CMD 0x42250
+/* [ST 32] The number of commands received in queue 4 */
+#define TSDM_REG_NUM_OF_Q4_CMD 0x42254
+/* [ST 32] The number of commands received in queue 5 */
+#define TSDM_REG_NUM_OF_Q5_CMD 0x42258
+/* [ST 32] The number of commands received in queue 6 */
+#define TSDM_REG_NUM_OF_Q6_CMD 0x4225c
+/* [ST 32] The number of commands received in queue 7 */
+#define TSDM_REG_NUM_OF_Q7_CMD 0x42260
+/* [ST 32] The number of commands received in queue 8 */
+#define TSDM_REG_NUM_OF_Q8_CMD 0x42264
+/* [ST 32] The number of commands received in queue 9 */
+#define TSDM_REG_NUM_OF_Q9_CMD 0x42268
+/* [RW 13] The start address in the internal RAM for the packet end message */
+#define TSDM_REG_PCK_END_MSG_START_ADDR 0x42014
+/* [RW 13] The start address in the internal RAM for queue counters */
+#define TSDM_REG_Q_COUNTER_START_ADDR 0x42010
+/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
+#define TSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0x42548
+/* [R 1] parser fifo empty in sdm_sync block */
+#define TSDM_REG_SYNC_PARSER_EMPTY 0x42550
+/* [R 1] parser serial fifo empty in sdm_sync block */
+#define TSDM_REG_SYNC_SYNC_EMPTY 0x42558
+/* [RW 32] Tick for timer counter. Applicable only when
+ ~tsdm_registers_timer_tick_enable.timer_tick_enable =1 */
+#define TSDM_REG_TIMER_TICK 0x42000
+/* [RW 32] Interrupt mask register #0 read/write */
+#define TSDM_REG_TSDM_INT_MASK_0 0x4229c
+#define TSDM_REG_TSDM_INT_MASK_1 0x422ac
+/* [R 32] Interrupt register #0 read */
+#define TSDM_REG_TSDM_INT_STS_0 0x42290
+#define TSDM_REG_TSDM_INT_STS_1 0x422a0
+/* [RW 11] Parity mask register #0 read/write */
+#define TSDM_REG_TSDM_PRTY_MASK 0x422bc
+/* [R 11] Parity register #0 read */
+#define TSDM_REG_TSDM_PRTY_STS 0x422b0
+/* [RC 11] Parity register #0 read clear */
+#define TSDM_REG_TSDM_PRTY_STS_CLR 0x422b4
+/* [RW 5] The number of time_slots in the arbitration cycle */
+#define TSEM_REG_ARB_CYCLE_SIZE 0x180034
+/* [RW 3] The source that is associated with arbitration element 0. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2 */
+#define TSEM_REG_ARB_ELEMENT0 0x180020
+/* [RW 3] The source that is associated with arbitration element 1. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~tsem_registers_arb_element0.arb_element0 */
+#define TSEM_REG_ARB_ELEMENT1 0x180024
+/* [RW 3] The source that is associated with arbitration element 2. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~tsem_registers_arb_element0.arb_element0
+ and ~tsem_registers_arb_element1.arb_element1 */
+#define TSEM_REG_ARB_ELEMENT2 0x180028
+/* [RW 3] The source that is associated with arbitration element 3. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
+ not be equal to register ~tsem_registers_arb_element0.arb_element0 and
+ ~tsem_registers_arb_element1.arb_element1 and
+ ~tsem_registers_arb_element2.arb_element2 */
+#define TSEM_REG_ARB_ELEMENT3 0x18002c
+/* [RW 3] The source that is associated with arbitration element 4. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~tsem_registers_arb_element0.arb_element0
+ and ~tsem_registers_arb_element1.arb_element1 and
+ ~tsem_registers_arb_element2.arb_element2 and
+ ~tsem_registers_arb_element3.arb_element3 */
+#define TSEM_REG_ARB_ELEMENT4 0x180030
+#define TSEM_REG_ENABLE_IN 0x1800a4
+#define TSEM_REG_ENABLE_OUT 0x1800a8
+/* [RW 32] This address space contains all registers and memories that are
+ placed in SEM_FAST block. The SEM_FAST registers are described in
+ appendix B. In order to access the sem_fast registers the base address
+ ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
+#define TSEM_REG_FAST_MEMORY 0x1a0000
+/* [RW 1] Disables input messages from FIC0 May be updated during run_time
+ by the microcode */
+#define TSEM_REG_FIC0_DISABLE 0x180224
+/* [RW 1] Disables input messages from FIC1 May be updated during run_time
+ by the microcode */
+#define TSEM_REG_FIC1_DISABLE 0x180234
+/* [RW 15] Interrupt table Read and write access to it is not possible in
+ the middle of the work */
+#define TSEM_REG_INT_TABLE 0x180400
+/* [ST 24] Statistics register. The number of messages that entered through
+ FIC0 */
+#define TSEM_REG_MSG_NUM_FIC0 0x180000
+/* [ST 24] Statistics register. The number of messages that entered through
+ FIC1 */
+#define TSEM_REG_MSG_NUM_FIC1 0x180004
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC0 */
+#define TSEM_REG_MSG_NUM_FOC0 0x180008
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC1 */
+#define TSEM_REG_MSG_NUM_FOC1 0x18000c
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC2 */
+#define TSEM_REG_MSG_NUM_FOC2 0x180010
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC3 */
+#define TSEM_REG_MSG_NUM_FOC3 0x180014
+/* [RW 1] Disables input messages from the passive buffer May be updated
+ during run_time by the microcode */
+#define TSEM_REG_PAS_DISABLE 0x18024c
+/* [WB 128] Debug only. Passive buffer memory */
+#define TSEM_REG_PASSIVE_BUFFER 0x181000
+/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
+#define TSEM_REG_PRAM 0x1c0000
+/* [R 8] Valid sleeping threads indication have bit per thread */
+#define TSEM_REG_SLEEP_THREADS_VALID 0x18026c
+/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
+#define TSEM_REG_SLOW_EXT_STORE_EMPTY 0x1802a0
+/* [RW 8] List of free threads . There is a bit per thread. */
+#define TSEM_REG_THREADS_LIST 0x1802e4
+/* [RC 32] Parity register #0 read clear */
+#define TSEM_REG_TSEM_PRTY_STS_CLR_0 0x180118
+#define TSEM_REG_TSEM_PRTY_STS_CLR_1 0x180128
+/* [RW 3] The arbitration scheme of time_slot 0 */
+#define TSEM_REG_TS_0_AS 0x180038
+/* [RW 3] The arbitration scheme of time_slot 10 */
+#define TSEM_REG_TS_10_AS 0x180060
+/* [RW 3] The arbitration scheme of time_slot 11 */
+#define TSEM_REG_TS_11_AS 0x180064
+/* [RW 3] The arbitration scheme of time_slot 12 */
+#define TSEM_REG_TS_12_AS 0x180068
+/* [RW 3] The arbitration scheme of time_slot 13 */
+#define TSEM_REG_TS_13_AS 0x18006c
+/* [RW 3] The arbitration scheme of time_slot 14 */
+#define TSEM_REG_TS_14_AS 0x180070
+/* [RW 3] The arbitration scheme of time_slot 15 */
+#define TSEM_REG_TS_15_AS 0x180074
+/* [RW 3] The arbitration scheme of time_slot 16 */
+#define TSEM_REG_TS_16_AS 0x180078
+/* [RW 3] The arbitration scheme of time_slot 17 */
+#define TSEM_REG_TS_17_AS 0x18007c
+/* [RW 3] The arbitration scheme of time_slot 18 */
+#define TSEM_REG_TS_18_AS 0x180080
+/* [RW 3] The arbitration scheme of time_slot 1 */
+#define TSEM_REG_TS_1_AS 0x18003c
+/* [RW 3] The arbitration scheme of time_slot 2 */
+#define TSEM_REG_TS_2_AS 0x180040
+/* [RW 3] The arbitration scheme of time_slot 3 */
+#define TSEM_REG_TS_3_AS 0x180044
+/* [RW 3] The arbitration scheme of time_slot 4 */
+#define TSEM_REG_TS_4_AS 0x180048
+/* [RW 3] The arbitration scheme of time_slot 5 */
+#define TSEM_REG_TS_5_AS 0x18004c
+/* [RW 3] The arbitration scheme of time_slot 6 */
+#define TSEM_REG_TS_6_AS 0x180050
+/* [RW 3] The arbitration scheme of time_slot 7 */
+#define TSEM_REG_TS_7_AS 0x180054
+/* [RW 3] The arbitration scheme of time_slot 8 */
+#define TSEM_REG_TS_8_AS 0x180058
+/* [RW 3] The arbitration scheme of time_slot 9 */
+#define TSEM_REG_TS_9_AS 0x18005c
+/* [RW 32] Interrupt mask register #0 read/write */
+#define TSEM_REG_TSEM_INT_MASK_0 0x180100
+#define TSEM_REG_TSEM_INT_MASK_1 0x180110
+/* [R 32] Interrupt register #0 read */
+#define TSEM_REG_TSEM_INT_STS_0 0x1800f4
+#define TSEM_REG_TSEM_INT_STS_1 0x180104
+/* [RW 32] Parity mask register #0 read/write */
+#define TSEM_REG_TSEM_PRTY_MASK_0 0x180120
+#define TSEM_REG_TSEM_PRTY_MASK_1 0x180130
+/* [R 32] Parity register #0 read */
+#define TSEM_REG_TSEM_PRTY_STS_0 0x180114
+#define TSEM_REG_TSEM_PRTY_STS_1 0x180124
+/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
+ * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
+#define TSEM_REG_VFPF_ERR_NUM 0x180380
+/* [RW 32] Indirect access to AG context with 32-bits granularity. The bits
+ * [10:8] of the address should be the offset within the accessed LCID
+ * context; the bits [7:0] are the accessed LCID.Example: to write to REG10
+ * LCID100. The RBC address should be 12'ha64. */
+#define UCM_REG_AG_CTX 0xe2000
+/* [R 5] Used to read the XX protection CAM occupancy counter. */
+#define UCM_REG_CAM_OCCUP 0xe0170
+/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
+ disregarded; valid output is deasserted; all other signals are treated as
+ usual; if 1 - normal activity. */
+#define UCM_REG_CDU_AG_RD_IFEN 0xe0038
+/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
+ are disregarded; all other signals are treated as usual; if 1 - normal
+ activity. */
+#define UCM_REG_CDU_AG_WR_IFEN 0xe0034
+/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
+ disregarded; valid output is deasserted; all other signals are treated as
+ usual; if 1 - normal activity. */
+#define UCM_REG_CDU_SM_RD_IFEN 0xe0040
+/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
+ input is disregarded; all other signals are treated as usual; if 1 -
+ normal activity. */
+#define UCM_REG_CDU_SM_WR_IFEN 0xe003c
+/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
+ the initial credit value; read returns the current value of the credit
+ counter. Must be initialized to 1 at start-up. */
+#define UCM_REG_CFC_INIT_CRD 0xe0204
+/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_CP_WEIGHT 0xe00c4
+/* [RW 1] Input csem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define UCM_REG_CSEM_IFEN 0xe0028
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the csem interface is detected. */
+#define UCM_REG_CSEM_LENGTH_MIS 0xe0160
+/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_CSEM_WEIGHT 0xe00b8
+/* [RW 1] Input dorq Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define UCM_REG_DORQ_IFEN 0xe0030
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the dorq interface is detected. */
+#define UCM_REG_DORQ_LENGTH_MIS 0xe0168
+/* [RW 3] The weight of the input dorq in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_DORQ_WEIGHT 0xe00c0
+/* [RW 8] The Event ID in case ErrorFlg input message bit is set. */
+#define UCM_REG_ERR_EVNT_ID 0xe00a4
+/* [RW 28] The CM erroneous header for QM and Timers formatting. */
+#define UCM_REG_ERR_UCM_HDR 0xe00a0
+/* [RW 8] The Event ID for Timers expiration. */
+#define UCM_REG_EXPR_EVNT_ID 0xe00a8
+/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 64 at start-up. */
+#define UCM_REG_FIC0_INIT_CRD 0xe020c
+/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 64 at start-up. */
+#define UCM_REG_FIC1_INIT_CRD 0xe0210
+/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1
+ - strict priority defined by ~ucm_registers_gr_ag_pr.gr_ag_pr;
+ ~ucm_registers_gr_ld0_pr.gr_ld0_pr and
+ ~ucm_registers_gr_ld1_pr.gr_ld1_pr. */
+#define UCM_REG_GR_ARB_TYPE 0xe0144
+/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
+ highest priority is 3. It is supposed that the Store channel group is
+ compliment to the others. */
+#define UCM_REG_GR_LD0_PR 0xe014c
+/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
+ highest priority is 3. It is supposed that the Store channel group is
+ compliment to the others. */
+#define UCM_REG_GR_LD1_PR 0xe0150
+/* [RW 2] The queue index for invalidate counter flag decision. */
+#define UCM_REG_INV_CFLG_Q 0xe00e4
+/* [RW 5] The number of double REG-pairs; loaded from the STORM context and
+ sent to STORM; for a specific connection type. the double REG-pairs are
+ used in order to align to STORM context row size of 128 bits. The offset
+ of these data in the STORM context is always 0. Index _i stands for the
+ connection type (one of 16). */
+#define UCM_REG_N_SM_CTX_LD_0 0xe0054
+#define UCM_REG_N_SM_CTX_LD_1 0xe0058
+#define UCM_REG_N_SM_CTX_LD_2 0xe005c
+#define UCM_REG_N_SM_CTX_LD_3 0xe0060
+#define UCM_REG_N_SM_CTX_LD_4 0xe0064
+#define UCM_REG_N_SM_CTX_LD_5 0xe0068
+#define UCM_REG_PHYS_QNUM0_0 0xe0110
+#define UCM_REG_PHYS_QNUM0_1 0xe0114
+#define UCM_REG_PHYS_QNUM1_0 0xe0118
+#define UCM_REG_PHYS_QNUM1_1 0xe011c
+#define UCM_REG_PHYS_QNUM2_0 0xe0120
+#define UCM_REG_PHYS_QNUM2_1 0xe0124
+#define UCM_REG_PHYS_QNUM3_0 0xe0128
+#define UCM_REG_PHYS_QNUM3_1 0xe012c
+/* [RW 8] The Event ID for Timers formatting in case of stop done. */
+#define UCM_REG_STOP_EVNT_ID 0xe00ac
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the STORM interface is detected. */
+#define UCM_REG_STORM_LENGTH_MIS 0xe0154
+/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define UCM_REG_STORM_UCM_IFEN 0xe0010
+/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_STORM_WEIGHT 0xe00b0
+/* [RW 4] Timers output initial credit. Max credit available - 15.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 4 at start-up. */
+#define UCM_REG_TM_INIT_CRD 0xe021c
+/* [RW 28] The CM header for Timers expiration command. */
+#define UCM_REG_TM_UCM_HDR 0xe009c
+/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define UCM_REG_TM_UCM_IFEN 0xe001c
+/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_TM_WEIGHT 0xe00d4
+/* [RW 1] Input tsem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define UCM_REG_TSEM_IFEN 0xe0024
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the tsem interface is detected. */
+#define UCM_REG_TSEM_LENGTH_MIS 0xe015c
+/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_TSEM_WEIGHT 0xe00b4
+/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define UCM_REG_UCM_CFC_IFEN 0xe0044
+/* [RW 11] Interrupt mask register #0 read/write */
+#define UCM_REG_UCM_INT_MASK 0xe01d4
+/* [R 11] Interrupt register #0 read */
+#define UCM_REG_UCM_INT_STS 0xe01c8
+/* [RW 27] Parity mask register #0 read/write */
+#define UCM_REG_UCM_PRTY_MASK 0xe01e4
+/* [R 27] Parity register #0 read */
+#define UCM_REG_UCM_PRTY_STS 0xe01d8
+/* [RC 27] Parity register #0 read clear */
+#define UCM_REG_UCM_PRTY_STS_CLR 0xe01dc
+/* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS
+ REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
+ Is used to determine the number of the AG context REG-pairs written back;
+ when the Reg1WbFlg isn't set. */
+#define UCM_REG_UCM_REG0_SZ 0xe00dc
+/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define UCM_REG_UCM_STORM0_IFEN 0xe0004
+/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define UCM_REG_UCM_STORM1_IFEN 0xe0008
+/* [RW 1] CM - Timers Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define UCM_REG_UCM_TM_IFEN 0xe0020
+/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define UCM_REG_UCM_UQM_IFEN 0xe000c
+/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */
+#define UCM_REG_UCM_UQM_USE_Q 0xe00d8
+/* [RW 6] QM output initial credit. Max credit available - 32.Write writes
+ the initial credit value; read returns the current value of the credit
+ counter. Must be initialized to 32 at start-up. */
+#define UCM_REG_UQM_INIT_CRD 0xe0220
+/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_UQM_P_WEIGHT 0xe00cc
+/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_UQM_S_WEIGHT 0xe00d0
+/* [RW 28] The CM header value for QM request (primary). */
+#define UCM_REG_UQM_UCM_HDR_P 0xe0094
+/* [RW 28] The CM header value for QM request (secondary). */
+#define UCM_REG_UQM_UCM_HDR_S 0xe0098
+/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define UCM_REG_UQM_UCM_IFEN 0xe0014
+/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define UCM_REG_USDM_IFEN 0xe0018
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the SDM interface is detected. */
+#define UCM_REG_USDM_LENGTH_MIS 0xe0158
+/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_USDM_WEIGHT 0xe00c8
+/* [RW 1] Input xsem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define UCM_REG_XSEM_IFEN 0xe002c
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+ at the xsem interface isdetected. */
+#define UCM_REG_XSEM_LENGTH_MIS 0xe0164
+/* [RW 3] The weight of the input xsem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_XSEM_WEIGHT 0xe00bc
+/* [RW 20] Indirect access to the descriptor table of the XX protection
+ mechanism. The fields are:[5:0] - message length; 14:6] - message
+ pointer; 19:15] - next pointer. */
+#define UCM_REG_XX_DESCR_TABLE 0xe0280
+#define UCM_REG_XX_DESCR_TABLE_SIZE 27
+/* [R 6] Use to read the XX protection Free counter. */
+#define UCM_REG_XX_FREE 0xe016c
+/* [RW 6] Initial value for the credit counter; responsible for fulfilling
+ of the Input Stage XX protection buffer by the XX protection pending
+ messages. Write writes the initial credit value; read returns the current
+ value of the credit counter. Must be initialized to 12 at start-up. */
+#define UCM_REG_XX_INIT_CRD 0xe0224
+/* [RW 6] The maximum number of pending messages; which may be stored in XX
+ protection. ~ucm_registers_xx_free.xx_free read on read. */
+#define UCM_REG_XX_MSG_NUM 0xe0228
+/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
+#define UCM_REG_XX_OVFL_EVNT_ID 0xe004c
+/* [RW 16] Indirect access to the XX table of the XX protection mechanism.
+ The fields are: [4:0] - tail pointer; 10:5] - Link List size; 15:11] -
+ header pointer. */
+#define UCM_REG_XX_TABLE 0xe0300
+#define UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE (0x1<<28)
+#define UMAC_COMMAND_CONFIG_REG_LOOP_ENA (0x1<<15)
+#define UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK (0x1<<24)
+#define UMAC_COMMAND_CONFIG_REG_PAD_EN (0x1<<5)
+#define UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE (0x1<<8)
+#define UMAC_COMMAND_CONFIG_REG_PROMIS_EN (0x1<<4)
+#define UMAC_COMMAND_CONFIG_REG_RX_ENA (0x1<<1)
+#define UMAC_COMMAND_CONFIG_REG_SW_RESET (0x1<<13)
+#define UMAC_COMMAND_CONFIG_REG_TX_ENA (0x1<<0)
+#define UMAC_REG_COMMAND_CONFIG 0x8
+/* [RW 32] Register Bit 0 refers to Bit 16 of the MAC address; Bit 1 refers
+ * to bit 17 of the MAC address etc. */
+#define UMAC_REG_MAC_ADDR0 0xc
+/* [RW 16] Register Bit 0 refers to Bit 0 of the MAC address; Register Bit 1
+ * refers to Bit 1 of the MAC address etc. Bits 16 to 31 are reserved. */
+#define UMAC_REG_MAC_ADDR1 0x10
+/* [RW 14] Defines a 14-Bit maximum frame length used by the MAC receive
+ * logic to check frames. */
+#define UMAC_REG_MAXFR 0x14
+/* [RW 8] The event id for aggregated interrupt 0 */
+#define USDM_REG_AGG_INT_EVENT_0 0xc4038
+#define USDM_REG_AGG_INT_EVENT_1 0xc403c
+#define USDM_REG_AGG_INT_EVENT_2 0xc4040
+#define USDM_REG_AGG_INT_EVENT_4 0xc4048
+#define USDM_REG_AGG_INT_EVENT_5 0xc404c
+#define USDM_REG_AGG_INT_EVENT_6 0xc4050
+/* [RW 1] For each aggregated interrupt index whether the mode is normal (0)
+ or auto-mask-mode (1) */
+#define USDM_REG_AGG_INT_MODE_0 0xc41b8
+#define USDM_REG_AGG_INT_MODE_1 0xc41bc
+#define USDM_REG_AGG_INT_MODE_4 0xc41c8
+#define USDM_REG_AGG_INT_MODE_5 0xc41cc
+#define USDM_REG_AGG_INT_MODE_6 0xc41d0
+/* [RW 1] The T bit for aggregated interrupt 5 */
+#define USDM_REG_AGG_INT_T_5 0xc40cc
+#define USDM_REG_AGG_INT_T_6 0xc40d0
+/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
+#define USDM_REG_CFC_RSP_START_ADDR 0xc4008
+/* [RW 16] The maximum value of the completion counter #0 */
+#define USDM_REG_CMP_COUNTER_MAX0 0xc401c
+/* [RW 16] The maximum value of the completion counter #1 */
+#define USDM_REG_CMP_COUNTER_MAX1 0xc4020
+/* [RW 16] The maximum value of the completion counter #2 */
+#define USDM_REG_CMP_COUNTER_MAX2 0xc4024
+/* [RW 16] The maximum value of the completion counter #3 */
+#define USDM_REG_CMP_COUNTER_MAX3 0xc4028
+/* [RW 13] The start address in the internal RAM for the completion
+ counters. */
+#define USDM_REG_CMP_COUNTER_START_ADDR 0xc400c
+#define USDM_REG_ENABLE_IN1 0xc4238
+#define USDM_REG_ENABLE_IN2 0xc423c
+#define USDM_REG_ENABLE_OUT1 0xc4240
+#define USDM_REG_ENABLE_OUT2 0xc4244
+/* [RW 4] The initial number of messages that can be sent to the pxp control
+ interface without receiving any ACK. */
+#define USDM_REG_INIT_CREDIT_PXP_CTRL 0xc44c0
+/* [ST 32] The number of ACK after placement messages received */
+#define USDM_REG_NUM_OF_ACK_AFTER_PLACE 0xc4280
+/* [ST 32] The number of packet end messages received from the parser */
+#define USDM_REG_NUM_OF_PKT_END_MSG 0xc4278
+/* [ST 32] The number of requests received from the pxp async if */
+#define USDM_REG_NUM_OF_PXP_ASYNC_REQ 0xc427c
+/* [ST 32] The number of commands received in queue 0 */
+#define USDM_REG_NUM_OF_Q0_CMD 0xc4248
+/* [ST 32] The number of commands received in queue 10 */
+#define USDM_REG_NUM_OF_Q10_CMD 0xc4270
+/* [ST 32] The number of commands received in queue 11 */
+#define USDM_REG_NUM_OF_Q11_CMD 0xc4274
+/* [ST 32] The number of commands received in queue 1 */
+#define USDM_REG_NUM_OF_Q1_CMD 0xc424c
+/* [ST 32] The number of commands received in queue 2 */
+#define USDM_REG_NUM_OF_Q2_CMD 0xc4250
+/* [ST 32] The number of commands received in queue 3 */
+#define USDM_REG_NUM_OF_Q3_CMD 0xc4254
+/* [ST 32] The number of commands received in queue 4 */
+#define USDM_REG_NUM_OF_Q4_CMD 0xc4258
+/* [ST 32] The number of commands received in queue 5 */
+#define USDM_REG_NUM_OF_Q5_CMD 0xc425c
+/* [ST 32] The number of commands received in queue 6 */
+#define USDM_REG_NUM_OF_Q6_CMD 0xc4260
+/* [ST 32] The number of commands received in queue 7 */
+#define USDM_REG_NUM_OF_Q7_CMD 0xc4264
+/* [ST 32] The number of commands received in queue 8 */
+#define USDM_REG_NUM_OF_Q8_CMD 0xc4268
+/* [ST 32] The number of commands received in queue 9 */
+#define USDM_REG_NUM_OF_Q9_CMD 0xc426c
+/* [RW 13] The start address in the internal RAM for the packet end message */
+#define USDM_REG_PCK_END_MSG_START_ADDR 0xc4014
+/* [RW 13] The start address in the internal RAM for queue counters */
+#define USDM_REG_Q_COUNTER_START_ADDR 0xc4010
+/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
+#define USDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0xc4550
+/* [R 1] parser fifo empty in sdm_sync block */
+#define USDM_REG_SYNC_PARSER_EMPTY 0xc4558
+/* [R 1] parser serial fifo empty in sdm_sync block */
+#define USDM_REG_SYNC_SYNC_EMPTY 0xc4560
+/* [RW 32] Tick for timer counter. Applicable only when
+ ~usdm_registers_timer_tick_enable.timer_tick_enable =1 */
+#define USDM_REG_TIMER_TICK 0xc4000
+/* [RW 32] Interrupt mask register #0 read/write */
+#define USDM_REG_USDM_INT_MASK_0 0xc42a0
+#define USDM_REG_USDM_INT_MASK_1 0xc42b0
+/* [R 32] Interrupt register #0 read */
+#define USDM_REG_USDM_INT_STS_0 0xc4294
+#define USDM_REG_USDM_INT_STS_1 0xc42a4
+/* [RW 11] Parity mask register #0 read/write */
+#define USDM_REG_USDM_PRTY_MASK 0xc42c0
+/* [R 11] Parity register #0 read */
+#define USDM_REG_USDM_PRTY_STS 0xc42b4
+/* [RC 11] Parity register #0 read clear */
+#define USDM_REG_USDM_PRTY_STS_CLR 0xc42b8
+/* [RW 5] The number of time_slots in the arbitration cycle */
+#define USEM_REG_ARB_CYCLE_SIZE 0x300034
+/* [RW 3] The source that is associated with arbitration element 0. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2 */
+#define USEM_REG_ARB_ELEMENT0 0x300020
+/* [RW 3] The source that is associated with arbitration element 1. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~usem_registers_arb_element0.arb_element0 */
+#define USEM_REG_ARB_ELEMENT1 0x300024
+/* [RW 3] The source that is associated with arbitration element 2. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~usem_registers_arb_element0.arb_element0
+ and ~usem_registers_arb_element1.arb_element1 */
+#define USEM_REG_ARB_ELEMENT2 0x300028
+/* [RW 3] The source that is associated with arbitration element 3. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
+ not be equal to register ~usem_registers_arb_element0.arb_element0 and
+ ~usem_registers_arb_element1.arb_element1 and
+ ~usem_registers_arb_element2.arb_element2 */
+#define USEM_REG_ARB_ELEMENT3 0x30002c
+/* [RW 3] The source that is associated with arbitration element 4. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~usem_registers_arb_element0.arb_element0
+ and ~usem_registers_arb_element1.arb_element1 and
+ ~usem_registers_arb_element2.arb_element2 and
+ ~usem_registers_arb_element3.arb_element3 */
+#define USEM_REG_ARB_ELEMENT4 0x300030
+#define USEM_REG_ENABLE_IN 0x3000a4
+#define USEM_REG_ENABLE_OUT 0x3000a8
+/* [RW 32] This address space contains all registers and memories that are
+ placed in SEM_FAST block. The SEM_FAST registers are described in
+ appendix B. In order to access the sem_fast registers the base address
+ ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
+#define USEM_REG_FAST_MEMORY 0x320000
+/* [RW 1] Disables input messages from FIC0 May be updated during run_time
+ by the microcode */
+#define USEM_REG_FIC0_DISABLE 0x300224
+/* [RW 1] Disables input messages from FIC1 May be updated during run_time
+ by the microcode */
+#define USEM_REG_FIC1_DISABLE 0x300234
+/* [RW 15] Interrupt table Read and write access to it is not possible in
+ the middle of the work */
+#define USEM_REG_INT_TABLE 0x300400
+/* [ST 24] Statistics register. The number of messages that entered through
+ FIC0 */
+#define USEM_REG_MSG_NUM_FIC0 0x300000
+/* [ST 24] Statistics register. The number of messages that entered through
+ FIC1 */
+#define USEM_REG_MSG_NUM_FIC1 0x300004
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC0 */
+#define USEM_REG_MSG_NUM_FOC0 0x300008
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC1 */
+#define USEM_REG_MSG_NUM_FOC1 0x30000c
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC2 */
+#define USEM_REG_MSG_NUM_FOC2 0x300010
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC3 */
+#define USEM_REG_MSG_NUM_FOC3 0x300014
+/* [RW 1] Disables input messages from the passive buffer May be updated
+ during run_time by the microcode */
+#define USEM_REG_PAS_DISABLE 0x30024c
+/* [WB 128] Debug only. Passive buffer memory */
+#define USEM_REG_PASSIVE_BUFFER 0x302000
+/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
+#define USEM_REG_PRAM 0x340000
+/* [R 16] Valid sleeping threads indication have bit per thread */
+#define USEM_REG_SLEEP_THREADS_VALID 0x30026c
+/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
+#define USEM_REG_SLOW_EXT_STORE_EMPTY 0x3002a0
+/* [RW 16] List of free threads . There is a bit per thread. */
+#define USEM_REG_THREADS_LIST 0x3002e4
+/* [RW 3] The arbitration scheme of time_slot 0 */
+#define USEM_REG_TS_0_AS 0x300038
+/* [RW 3] The arbitration scheme of time_slot 10 */
+#define USEM_REG_TS_10_AS 0x300060
+/* [RW 3] The arbitration scheme of time_slot 11 */
+#define USEM_REG_TS_11_AS 0x300064
+/* [RW 3] The arbitration scheme of time_slot 12 */
+#define USEM_REG_TS_12_AS 0x300068
+/* [RW 3] The arbitration scheme of time_slot 13 */
+#define USEM_REG_TS_13_AS 0x30006c
+/* [RW 3] The arbitration scheme of time_slot 14 */
+#define USEM_REG_TS_14_AS 0x300070
+/* [RW 3] The arbitration scheme of time_slot 15 */
+#define USEM_REG_TS_15_AS 0x300074
+/* [RW 3] The arbitration scheme of time_slot 16 */
+#define USEM_REG_TS_16_AS 0x300078
+/* [RW 3] The arbitration scheme of time_slot 17 */
+#define USEM_REG_TS_17_AS 0x30007c
+/* [RW 3] The arbitration scheme of time_slot 18 */
+#define USEM_REG_TS_18_AS 0x300080
+/* [RW 3] The arbitration scheme of time_slot 1 */
+#define USEM_REG_TS_1_AS 0x30003c
+/* [RW 3] The arbitration scheme of time_slot 2 */
+#define USEM_REG_TS_2_AS 0x300040
+/* [RW 3] The arbitration scheme of time_slot 3 */
+#define USEM_REG_TS_3_AS 0x300044
+/* [RW 3] The arbitration scheme of time_slot 4 */
+#define USEM_REG_TS_4_AS 0x300048
+/* [RW 3] The arbitration scheme of time_slot 5 */
+#define USEM_REG_TS_5_AS 0x30004c
+/* [RW 3] The arbitration scheme of time_slot 6 */
+#define USEM_REG_TS_6_AS 0x300050
+/* [RW 3] The arbitration scheme of time_slot 7 */
+#define USEM_REG_TS_7_AS 0x300054
+/* [RW 3] The arbitration scheme of time_slot 8 */
+#define USEM_REG_TS_8_AS 0x300058
+/* [RW 3] The arbitration scheme of time_slot 9 */
+#define USEM_REG_TS_9_AS 0x30005c
+/* [RW 32] Interrupt mask register #0 read/write */
+#define USEM_REG_USEM_INT_MASK_0 0x300110
+#define USEM_REG_USEM_INT_MASK_1 0x300120
+/* [R 32] Interrupt register #0 read */
+#define USEM_REG_USEM_INT_STS_0 0x300104
+#define USEM_REG_USEM_INT_STS_1 0x300114
+/* [RW 32] Parity mask register #0 read/write */
+#define USEM_REG_USEM_PRTY_MASK_0 0x300130
+#define USEM_REG_USEM_PRTY_MASK_1 0x300140
+/* [R 32] Parity register #0 read */
+#define USEM_REG_USEM_PRTY_STS_0 0x300124
+#define USEM_REG_USEM_PRTY_STS_1 0x300134
+/* [RC 32] Parity register #0 read clear */
+#define USEM_REG_USEM_PRTY_STS_CLR_0 0x300128
+#define USEM_REG_USEM_PRTY_STS_CLR_1 0x300138
+/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
+ * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
+#define USEM_REG_VFPF_ERR_NUM 0x300380
+#define VFC_MEMORIES_RST_REG_CAM_RST (0x1<<0)
+#define VFC_MEMORIES_RST_REG_RAM_RST (0x1<<1)
+#define VFC_REG_MEMORIES_RST 0x1943c
+/* [RW 32] Indirect access to AG context with 32-bits granularity. The bits
+ * [12:8] of the address should be the offset within the accessed LCID
+ * context; the bits [7:0] are the accessed LCID.Example: to write to REG10
+ * LCID100. The RBC address should be 13'ha64. */
+#define XCM_REG_AG_CTX 0x28000
+/* [RW 2] The queue index for registration on Aux1 counter flag. */
+#define XCM_REG_AUX1_Q 0x20134
+/* [RW 2] Per each decision rule the queue index to register to. */
+#define XCM_REG_AUX_CNT_FLG_Q_19 0x201b0
+/* [R 5] Used to read the XX protection CAM occupancy counter. */
+#define XCM_REG_CAM_OCCUP 0x20244
+/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
+ disregarded; valid output is deasserted; all other signals are treated as
+ usual; if 1 - normal activity. */
+#define XCM_REG_CDU_AG_RD_IFEN 0x20044
+/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
+ are disregarded; all other signals are treated as usual; if 1 - normal
+ activity. */
+#define XCM_REG_CDU_AG_WR_IFEN 0x20040
+/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
+ disregarded; valid output is deasserted; all other signals are treated as
+ usual; if 1 - normal activity. */
+#define XCM_REG_CDU_SM_RD_IFEN 0x2004c
+/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
+ input is disregarded; all other signals are treated as usual; if 1 -
+ normal activity. */
+#define XCM_REG_CDU_SM_WR_IFEN 0x20048
+/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
+ the initial credit value; read returns the current value of the credit
+ counter. Must be initialized to 1 at start-up. */
+#define XCM_REG_CFC_INIT_CRD 0x20404
+/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_CP_WEIGHT 0x200dc
+/* [RW 1] Input csem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_CSEM_IFEN 0x20028
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+ the csem interface. */
+#define XCM_REG_CSEM_LENGTH_MIS 0x20228
+/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_CSEM_WEIGHT 0x200c4
+/* [RW 1] Input dorq Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_DORQ_IFEN 0x20030
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+ the dorq interface. */
+#define XCM_REG_DORQ_LENGTH_MIS 0x20230
+/* [RW 3] The weight of the input dorq in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_DORQ_WEIGHT 0x200cc
+/* [RW 8] The Event ID in case the ErrorFlg input message bit is set. */
+#define XCM_REG_ERR_EVNT_ID 0x200b0
+/* [RW 28] The CM erroneous header for QM and Timers formatting. */
+#define XCM_REG_ERR_XCM_HDR 0x200ac
+/* [RW 8] The Event ID for Timers expiration. */
+#define XCM_REG_EXPR_EVNT_ID 0x200b4
+/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 64 at start-up. */
+#define XCM_REG_FIC0_INIT_CRD 0x2040c
+/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 64 at start-up. */
+#define XCM_REG_FIC1_INIT_CRD 0x20410
+#define XCM_REG_GLB_DEL_ACK_MAX_CNT_0 0x20118
+#define XCM_REG_GLB_DEL_ACK_MAX_CNT_1 0x2011c
+#define XCM_REG_GLB_DEL_ACK_TMR_VAL_0 0x20108
+#define XCM_REG_GLB_DEL_ACK_TMR_VAL_1 0x2010c
+/* [RW 1] Arbitratiojn between Input Arbiter groups: 0 - fair Round-Robin; 1
+ - strict priority defined by ~xcm_registers_gr_ag_pr.gr_ag_pr;
+ ~xcm_registers_gr_ld0_pr.gr_ld0_pr and
+ ~xcm_registers_gr_ld1_pr.gr_ld1_pr. */
+#define XCM_REG_GR_ARB_TYPE 0x2020c
+/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
+ highest priority is 3. It is supposed that the Channel group is the
+ compliment of the other 3 groups. */
+#define XCM_REG_GR_LD0_PR 0x20214
+/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
+ highest priority is 3. It is supposed that the Channel group is the
+ compliment of the other 3 groups. */
+#define XCM_REG_GR_LD1_PR 0x20218
+/* [RW 1] Input nig0 Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_NIG0_IFEN 0x20038
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+ the nig0 interface. */
+#define XCM_REG_NIG0_LENGTH_MIS 0x20238
+/* [RW 3] The weight of the input nig0 in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_NIG0_WEIGHT 0x200d4
+/* [RW 1] Input nig1 Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_NIG1_IFEN 0x2003c
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+ the nig1 interface. */
+#define XCM_REG_NIG1_LENGTH_MIS 0x2023c
+/* [RW 5] The number of double REG-pairs; loaded from the STORM context and
+ sent to STORM; for a specific connection type. The double REG-pairs are
+ used in order to align to STORM context row size of 128 bits. The offset
+ of these data in the STORM context is always 0. Index _i stands for the
+ connection type (one of 16). */
+#define XCM_REG_N_SM_CTX_LD_0 0x20060
+#define XCM_REG_N_SM_CTX_LD_1 0x20064
+#define XCM_REG_N_SM_CTX_LD_2 0x20068
+#define XCM_REG_N_SM_CTX_LD_3 0x2006c
+#define XCM_REG_N_SM_CTX_LD_4 0x20070
+#define XCM_REG_N_SM_CTX_LD_5 0x20074
+/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define XCM_REG_PBF_IFEN 0x20034
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+ the pbf interface. */
+#define XCM_REG_PBF_LENGTH_MIS 0x20234
+/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_PBF_WEIGHT 0x200d0
+#define XCM_REG_PHYS_QNUM3_0 0x20100
+#define XCM_REG_PHYS_QNUM3_1 0x20104
+/* [RW 8] The Event ID for Timers formatting in case of stop done. */
+#define XCM_REG_STOP_EVNT_ID 0x200b8
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+ the STORM interface. */
+#define XCM_REG_STORM_LENGTH_MIS 0x2021c
+/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_STORM_WEIGHT 0x200bc
+/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_STORM_XCM_IFEN 0x20010
+/* [RW 4] Timers output initial credit. Max credit available - 15.Write
+ writes the initial credit value; read returns the current value of the
+ credit counter. Must be initialized to 4 at start-up. */
+#define XCM_REG_TM_INIT_CRD 0x2041c
+/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_TM_WEIGHT 0x200ec
+/* [RW 28] The CM header for Timers expiration command. */
+#define XCM_REG_TM_XCM_HDR 0x200a8
+/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_TM_XCM_IFEN 0x2001c
+/* [RW 1] Input tsem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_TSEM_IFEN 0x20024
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+ the tsem interface. */
+#define XCM_REG_TSEM_LENGTH_MIS 0x20224
+/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_TSEM_WEIGHT 0x200c0
+/* [RW 2] The queue index for registration on UNA greater NXT decision rule. */
+#define XCM_REG_UNA_GT_NXT_Q 0x20120
+/* [RW 1] Input usem Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_USEM_IFEN 0x2002c
+/* [RC 1] Message length mismatch (relative to last indication) at the usem
+ interface. */
+#define XCM_REG_USEM_LENGTH_MIS 0x2022c
+/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_USEM_WEIGHT 0x200c8
+#define XCM_REG_WU_DA_CNT_CMD00 0x201d4
+#define XCM_REG_WU_DA_CNT_CMD01 0x201d8
+#define XCM_REG_WU_DA_CNT_CMD10 0x201dc
+#define XCM_REG_WU_DA_CNT_CMD11 0x201e0
+#define XCM_REG_WU_DA_CNT_UPD_VAL00 0x201e4
+#define XCM_REG_WU_DA_CNT_UPD_VAL01 0x201e8
+#define XCM_REG_WU_DA_CNT_UPD_VAL10 0x201ec
+#define XCM_REG_WU_DA_CNT_UPD_VAL11 0x201f0
+#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00 0x201c4
+#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01 0x201c8
+#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD10 0x201cc
+#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD11 0x201d0
+/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define XCM_REG_XCM_CFC_IFEN 0x20050
+/* [RW 14] Interrupt mask register #0 read/write */
+#define XCM_REG_XCM_INT_MASK 0x202b4
+/* [R 14] Interrupt register #0 read */
+#define XCM_REG_XCM_INT_STS 0x202a8
+/* [RW 30] Parity mask register #0 read/write */
+#define XCM_REG_XCM_PRTY_MASK 0x202c4
+/* [R 30] Parity register #0 read */
+#define XCM_REG_XCM_PRTY_STS 0x202b8
+/* [RC 30] Parity register #0 read clear */
+#define XCM_REG_XCM_PRTY_STS_CLR 0x202bc
+
+/* [RW 4] The size of AG context region 0 in REG-pairs. Designates the MS
+ REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
+ Is used to determine the number of the AG context REG-pairs written back;
+ when the Reg1WbFlg isn't set. */
+#define XCM_REG_XCM_REG0_SZ 0x200f4
+/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define XCM_REG_XCM_STORM0_IFEN 0x20004
+/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define XCM_REG_XCM_STORM1_IFEN 0x20008
+/* [RW 1] CM - Timers Interface enable. If 0 - the valid input is
+ disregarded; acknowledge output is deasserted; all other signals are
+ treated as usual; if 1 - normal activity. */
+#define XCM_REG_XCM_TM_IFEN 0x20020
+/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
+ disregarded; valid is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define XCM_REG_XCM_XQM_IFEN 0x2000c
+/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */
+#define XCM_REG_XCM_XQM_USE_Q 0x200f0
+/* [RW 4] The value by which CFC updates the activity counter at QM bypass. */
+#define XCM_REG_XQM_BYP_ACT_UPD 0x200fc
+/* [RW 6] QM output initial credit. Max credit available - 32.Write writes
+ the initial credit value; read returns the current value of the credit
+ counter. Must be initialized to 32 at start-up. */
+#define XCM_REG_XQM_INIT_CRD 0x20420
+/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_XQM_P_WEIGHT 0x200e4
+/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
+ stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_XQM_S_WEIGHT 0x200e8
+/* [RW 28] The CM header value for QM request (primary). */
+#define XCM_REG_XQM_XCM_HDR_P 0x200a0
+/* [RW 28] The CM header value for QM request (secondary). */
+#define XCM_REG_XQM_XCM_HDR_S 0x200a4
+/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define XCM_REG_XQM_XCM_IFEN 0x20014
+/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
+ acknowledge output is deasserted; all other signals are treated as usual;
+ if 1 - normal activity. */
+#define XCM_REG_XSDM_IFEN 0x20018
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+ the SDM interface. */
+#define XCM_REG_XSDM_LENGTH_MIS 0x20220
+/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
+ weight 8 (the most prioritised); 1 stands for weight 1(least
+ prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_XSDM_WEIGHT 0x200e0
+/* [RW 17] Indirect access to the descriptor table of the XX protection
+ mechanism. The fields are: [5:0] - message length; 11:6] - message
+ pointer; 16:12] - next pointer. */
+#define XCM_REG_XX_DESCR_TABLE 0x20480
+#define XCM_REG_XX_DESCR_TABLE_SIZE 32
+/* [R 6] Used to read the XX protection Free counter. */
+#define XCM_REG_XX_FREE 0x20240
+/* [RW 6] Initial value for the credit counter; responsible for fulfilling
+ of the Input Stage XX protection buffer by the XX protection pending
+ messages. Max credit available - 3.Write writes the initial credit value;
+ read returns the current value of the credit counter. Must be initialized
+ to 2 at start-up. */
+#define XCM_REG_XX_INIT_CRD 0x20424
+/* [RW 6] The maximum number of pending messages; which may be stored in XX
+ protection. ~xcm_registers_xx_free.xx_free read on read. */
+#define XCM_REG_XX_MSG_NUM 0x20428
+/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
+#define XCM_REG_XX_OVFL_EVNT_ID 0x20058
+#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0)
+#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1)
+#define XMAC_CTRL_REG_LINE_LOCAL_LPBK (0x1<<2)
+#define XMAC_CTRL_REG_RX_EN (0x1<<1)
+#define XMAC_CTRL_REG_SOFT_RESET (0x1<<6)
+#define XMAC_CTRL_REG_TX_EN (0x1<<0)
+#define XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN (0x1<<18)
+#define XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN (0x1<<17)
+#define XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN (0x1<<0)
+#define XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN (0x1<<3)
+#define XMAC_PFC_CTRL_HI_REG_RX_PFC_EN (0x1<<4)
+#define XMAC_PFC_CTRL_HI_REG_TX_PFC_EN (0x1<<5)
+#define XMAC_REG_CLEAR_RX_LSS_STATUS 0x60
+#define XMAC_REG_CTRL 0
+/* [RW 16] Upper 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC
+ * packets transmitted by the MAC */
+#define XMAC_REG_CTRL_SA_HI 0x2c
+/* [RW 32] Lower 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC
+ * packets transmitted by the MAC */
+#define XMAC_REG_CTRL_SA_LO 0x28
+#define XMAC_REG_PAUSE_CTRL 0x68
+#define XMAC_REG_PFC_CTRL 0x70
+#define XMAC_REG_PFC_CTRL_HI 0x74
+#define XMAC_REG_RX_LSS_STATUS 0x58
+/* [RW 14] Maximum packet size in receive direction; exclusive of preamble &
+ * CRC in strip mode */
+#define XMAC_REG_RX_MAX_SIZE 0x40
+#define XMAC_REG_TX_CTRL 0x20
+/* [RW 16] Indirect access to the XX table of the XX protection mechanism.
+ The fields are:[4:0] - tail pointer; 9:5] - Link List size; 14:10] -
+ header pointer. */
+#define XCM_REG_XX_TABLE 0x20500
+/* [RW 8] The event id for aggregated interrupt 0 */
+#define XSDM_REG_AGG_INT_EVENT_0 0x166038
+#define XSDM_REG_AGG_INT_EVENT_1 0x16603c
+#define XSDM_REG_AGG_INT_EVENT_10 0x166060
+#define XSDM_REG_AGG_INT_EVENT_11 0x166064
+#define XSDM_REG_AGG_INT_EVENT_12 0x166068
+#define XSDM_REG_AGG_INT_EVENT_13 0x16606c
+#define XSDM_REG_AGG_INT_EVENT_14 0x166070
+#define XSDM_REG_AGG_INT_EVENT_2 0x166040
+#define XSDM_REG_AGG_INT_EVENT_3 0x166044
+#define XSDM_REG_AGG_INT_EVENT_4 0x166048
+#define XSDM_REG_AGG_INT_EVENT_5 0x16604c
+#define XSDM_REG_AGG_INT_EVENT_6 0x166050
+#define XSDM_REG_AGG_INT_EVENT_7 0x166054
+#define XSDM_REG_AGG_INT_EVENT_8 0x166058
+#define XSDM_REG_AGG_INT_EVENT_9 0x16605c
+/* [RW 1] For each aggregated interrupt index whether the mode is normal (0)
+ or auto-mask-mode (1) */
+#define XSDM_REG_AGG_INT_MODE_0 0x1661b8
+#define XSDM_REG_AGG_INT_MODE_1 0x1661bc
+/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
+#define XSDM_REG_CFC_RSP_START_ADDR 0x166008
+/* [RW 16] The maximum value of the completion counter #0 */
+#define XSDM_REG_CMP_COUNTER_MAX0 0x16601c
+/* [RW 16] The maximum value of the completion counter #1 */
+#define XSDM_REG_CMP_COUNTER_MAX1 0x166020
+/* [RW 16] The maximum value of the completion counter #2 */
+#define XSDM_REG_CMP_COUNTER_MAX2 0x166024
+/* [RW 16] The maximum value of the completion counter #3 */
+#define XSDM_REG_CMP_COUNTER_MAX3 0x166028
+/* [RW 13] The start address in the internal RAM for the completion
+ counters. */
+#define XSDM_REG_CMP_COUNTER_START_ADDR 0x16600c
+#define XSDM_REG_ENABLE_IN1 0x166238
+#define XSDM_REG_ENABLE_IN2 0x16623c
+#define XSDM_REG_ENABLE_OUT1 0x166240
+#define XSDM_REG_ENABLE_OUT2 0x166244
+/* [RW 4] The initial number of messages that can be sent to the pxp control
+ interface without receiving any ACK. */
+#define XSDM_REG_INIT_CREDIT_PXP_CTRL 0x1664bc
+/* [ST 32] The number of ACK after placement messages received */
+#define XSDM_REG_NUM_OF_ACK_AFTER_PLACE 0x16627c
+/* [ST 32] The number of packet end messages received from the parser */
+#define XSDM_REG_NUM_OF_PKT_END_MSG 0x166274
+/* [ST 32] The number of requests received from the pxp async if */
+#define XSDM_REG_NUM_OF_PXP_ASYNC_REQ 0x166278
+/* [ST 32] The number of commands received in queue 0 */
+#define XSDM_REG_NUM_OF_Q0_CMD 0x166248
+/* [ST 32] The number of commands received in queue 10 */
+#define XSDM_REG_NUM_OF_Q10_CMD 0x16626c
+/* [ST 32] The number of commands received in queue 11 */
+#define XSDM_REG_NUM_OF_Q11_CMD 0x166270
+/* [ST 32] The number of commands received in queue 1 */
+#define XSDM_REG_NUM_OF_Q1_CMD 0x16624c
+/* [ST 32] The number of commands received in queue 3 */
+#define XSDM_REG_NUM_OF_Q3_CMD 0x166250
+/* [ST 32] The number of commands received in queue 4 */
+#define XSDM_REG_NUM_OF_Q4_CMD 0x166254
+/* [ST 32] The number of commands received in queue 5 */
+#define XSDM_REG_NUM_OF_Q5_CMD 0x166258
+/* [ST 32] The number of commands received in queue 6 */
+#define XSDM_REG_NUM_OF_Q6_CMD 0x16625c
+/* [ST 32] The number of commands received in queue 7 */
+#define XSDM_REG_NUM_OF_Q7_CMD 0x166260
+/* [ST 32] The number of commands received in queue 8 */
+#define XSDM_REG_NUM_OF_Q8_CMD 0x166264
+/* [ST 32] The number of commands received in queue 9 */
+#define XSDM_REG_NUM_OF_Q9_CMD 0x166268
+/* [RW 13] The start address in the internal RAM for queue counters */
+#define XSDM_REG_Q_COUNTER_START_ADDR 0x166010
+/* [W 17] Generate an operation after completion; bit-16 is
+ * AggVectIdx_valid; bits 15:8 are AggVectIdx; bits 7:5 are the TRIG and
+ * bits 4:0 are the T124Param[4:0] */
+#define XSDM_REG_OPERATION_GEN 0x1664c4
+/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
+#define XSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0x166548
+/* [R 1] parser fifo empty in sdm_sync block */
+#define XSDM_REG_SYNC_PARSER_EMPTY 0x166550
+/* [R 1] parser serial fifo empty in sdm_sync block */
+#define XSDM_REG_SYNC_SYNC_EMPTY 0x166558
+/* [RW 32] Tick for timer counter. Applicable only when
+ ~xsdm_registers_timer_tick_enable.timer_tick_enable =1 */
+#define XSDM_REG_TIMER_TICK 0x166000
+/* [RW 32] Interrupt mask register #0 read/write */
+#define XSDM_REG_XSDM_INT_MASK_0 0x16629c
+#define XSDM_REG_XSDM_INT_MASK_1 0x1662ac
+/* [R 32] Interrupt register #0 read */
+#define XSDM_REG_XSDM_INT_STS_0 0x166290
+#define XSDM_REG_XSDM_INT_STS_1 0x1662a0
+/* [RW 11] Parity mask register #0 read/write */
+#define XSDM_REG_XSDM_PRTY_MASK 0x1662bc
+/* [R 11] Parity register #0 read */
+#define XSDM_REG_XSDM_PRTY_STS 0x1662b0
+/* [RC 11] Parity register #0 read clear */
+#define XSDM_REG_XSDM_PRTY_STS_CLR 0x1662b4
+/* [RW 5] The number of time_slots in the arbitration cycle */
+#define XSEM_REG_ARB_CYCLE_SIZE 0x280034
+/* [RW 3] The source that is associated with arbitration element 0. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2 */
+#define XSEM_REG_ARB_ELEMENT0 0x280020
+/* [RW 3] The source that is associated with arbitration element 1. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~xsem_registers_arb_element0.arb_element0 */
+#define XSEM_REG_ARB_ELEMENT1 0x280024
+/* [RW 3] The source that is associated with arbitration element 2. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~xsem_registers_arb_element0.arb_element0
+ and ~xsem_registers_arb_element1.arb_element1 */
+#define XSEM_REG_ARB_ELEMENT2 0x280028
+/* [RW 3] The source that is associated with arbitration element 3. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
+ not be equal to register ~xsem_registers_arb_element0.arb_element0 and
+ ~xsem_registers_arb_element1.arb_element1 and
+ ~xsem_registers_arb_element2.arb_element2 */
+#define XSEM_REG_ARB_ELEMENT3 0x28002c
+/* [RW 3] The source that is associated with arbitration element 4. Source
+ decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+ sleeping thread with priority 1; 4- sleeping thread with priority 2.
+ Could not be equal to register ~xsem_registers_arb_element0.arb_element0
+ and ~xsem_registers_arb_element1.arb_element1 and
+ ~xsem_registers_arb_element2.arb_element2 and
+ ~xsem_registers_arb_element3.arb_element3 */
+#define XSEM_REG_ARB_ELEMENT4 0x280030
+#define XSEM_REG_ENABLE_IN 0x2800a4
+#define XSEM_REG_ENABLE_OUT 0x2800a8
+/* [RW 32] This address space contains all registers and memories that are
+ placed in SEM_FAST block. The SEM_FAST registers are described in
+ appendix B. In order to access the sem_fast registers the base address
+ ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
+#define XSEM_REG_FAST_MEMORY 0x2a0000
+/* [RW 1] Disables input messages from FIC0 May be updated during run_time
+ by the microcode */
+#define XSEM_REG_FIC0_DISABLE 0x280224
+/* [RW 1] Disables input messages from FIC1 May be updated during run_time
+ by the microcode */
+#define XSEM_REG_FIC1_DISABLE 0x280234
+/* [RW 15] Interrupt table Read and write access to it is not possible in
+ the middle of the work */
+#define XSEM_REG_INT_TABLE 0x280400
+/* [ST 24] Statistics register. The number of messages that entered through
+ FIC0 */
+#define XSEM_REG_MSG_NUM_FIC0 0x280000
+/* [ST 24] Statistics register. The number of messages that entered through
+ FIC1 */
+#define XSEM_REG_MSG_NUM_FIC1 0x280004
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC0 */
+#define XSEM_REG_MSG_NUM_FOC0 0x280008
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC1 */
+#define XSEM_REG_MSG_NUM_FOC1 0x28000c
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC2 */
+#define XSEM_REG_MSG_NUM_FOC2 0x280010
+/* [ST 24] Statistics register. The number of messages that were sent to
+ FOC3 */
+#define XSEM_REG_MSG_NUM_FOC3 0x280014
+/* [RW 1] Disables input messages from the passive buffer May be updated
+ during run_time by the microcode */
+#define XSEM_REG_PAS_DISABLE 0x28024c
+/* [WB 128] Debug only. Passive buffer memory */
+#define XSEM_REG_PASSIVE_BUFFER 0x282000
+/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
+#define XSEM_REG_PRAM 0x2c0000
+/* [R 16] Valid sleeping threads indication have bit per thread */
+#define XSEM_REG_SLEEP_THREADS_VALID 0x28026c
+/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
+#define XSEM_REG_SLOW_EXT_STORE_EMPTY 0x2802a0
+/* [RW 16] List of free threads . There is a bit per thread. */
+#define XSEM_REG_THREADS_LIST 0x2802e4
+/* [RW 3] The arbitration scheme of time_slot 0 */
+#define XSEM_REG_TS_0_AS 0x280038
+/* [RW 3] The arbitration scheme of time_slot 10 */
+#define XSEM_REG_TS_10_AS 0x280060
+/* [RW 3] The arbitration scheme of time_slot 11 */
+#define XSEM_REG_TS_11_AS 0x280064
+/* [RW 3] The arbitration scheme of time_slot 12 */
+#define XSEM_REG_TS_12_AS 0x280068
+/* [RW 3] The arbitration scheme of time_slot 13 */
+#define XSEM_REG_TS_13_AS 0x28006c
+/* [RW 3] The arbitration scheme of time_slot 14 */
+#define XSEM_REG_TS_14_AS 0x280070
+/* [RW 3] The arbitration scheme of time_slot 15 */
+#define XSEM_REG_TS_15_AS 0x280074
+/* [RW 3] The arbitration scheme of time_slot 16 */
+#define XSEM_REG_TS_16_AS 0x280078
+/* [RW 3] The arbitration scheme of time_slot 17 */
+#define XSEM_REG_TS_17_AS 0x28007c
+/* [RW 3] The arbitration scheme of time_slot 18 */
+#define XSEM_REG_TS_18_AS 0x280080
+/* [RW 3] The arbitration scheme of time_slot 1 */
+#define XSEM_REG_TS_1_AS 0x28003c
+/* [RW 3] The arbitration scheme of time_slot 2 */
+#define XSEM_REG_TS_2_AS 0x280040
+/* [RW 3] The arbitration scheme of time_slot 3 */
+#define XSEM_REG_TS_3_AS 0x280044
+/* [RW 3] The arbitration scheme of time_slot 4 */
+#define XSEM_REG_TS_4_AS 0x280048
+/* [RW 3] The arbitration scheme of time_slot 5 */
+#define XSEM_REG_TS_5_AS 0x28004c
+/* [RW 3] The arbitration scheme of time_slot 6 */
+#define XSEM_REG_TS_6_AS 0x280050
+/* [RW 3] The arbitration scheme of time_slot 7 */
+#define XSEM_REG_TS_7_AS 0x280054
+/* [RW 3] The arbitration scheme of time_slot 8 */
+#define XSEM_REG_TS_8_AS 0x280058
+/* [RW 3] The arbitration scheme of time_slot 9 */
+#define XSEM_REG_TS_9_AS 0x28005c
+/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
+ * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
+#define XSEM_REG_VFPF_ERR_NUM 0x280380
+/* [RW 32] Interrupt mask register #0 read/write */
+#define XSEM_REG_XSEM_INT_MASK_0 0x280110
+#define XSEM_REG_XSEM_INT_MASK_1 0x280120
+/* [R 32] Interrupt register #0 read */
+#define XSEM_REG_XSEM_INT_STS_0 0x280104
+#define XSEM_REG_XSEM_INT_STS_1 0x280114
+/* [RW 32] Parity mask register #0 read/write */
+#define XSEM_REG_XSEM_PRTY_MASK_0 0x280130
+#define XSEM_REG_XSEM_PRTY_MASK_1 0x280140
+/* [R 32] Parity register #0 read */
+#define XSEM_REG_XSEM_PRTY_STS_0 0x280124
+#define XSEM_REG_XSEM_PRTY_STS_1 0x280134
+/* [RC 32] Parity register #0 read clear */
+#define XSEM_REG_XSEM_PRTY_STS_CLR_0 0x280128
+#define XSEM_REG_XSEM_PRTY_STS_CLR_1 0x280138
+#define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0)
+#define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1)
+#define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0)
+#define MCPR_NVM_CFG4_FLASH_SIZE (0x7L<<0)
+#define MCPR_NVM_COMMAND_DOIT (1L<<4)
+#define MCPR_NVM_COMMAND_DONE (1L<<3)
+#define MCPR_NVM_COMMAND_FIRST (1L<<7)
+#define MCPR_NVM_COMMAND_LAST (1L<<8)
+#define MCPR_NVM_COMMAND_WR (1L<<5)
+#define MCPR_NVM_SW_ARB_ARB_ARB1 (1L<<9)
+#define MCPR_NVM_SW_ARB_ARB_REQ_CLR1 (1L<<5)
+#define MCPR_NVM_SW_ARB_ARB_REQ_SET1 (1L<<1)
+#define BIGMAC_REGISTER_BMAC_CONTROL (0x00<<3)
+#define BIGMAC_REGISTER_BMAC_XGXS_CONTROL (0x01<<3)
+#define BIGMAC_REGISTER_CNT_MAX_SIZE (0x05<<3)
+#define BIGMAC_REGISTER_RX_CONTROL (0x21<<3)
+#define BIGMAC_REGISTER_RX_LLFC_MSG_FLDS (0x46<<3)
+#define BIGMAC_REGISTER_RX_LSS_STATUS (0x43<<3)
+#define BIGMAC_REGISTER_RX_MAX_SIZE (0x23<<3)
+#define BIGMAC_REGISTER_RX_STAT_GR64 (0x26<<3)
+#define BIGMAC_REGISTER_RX_STAT_GRIPJ (0x42<<3)
+#define BIGMAC_REGISTER_TX_CONTROL (0x07<<3)
+#define BIGMAC_REGISTER_TX_MAX_SIZE (0x09<<3)
+#define BIGMAC_REGISTER_TX_PAUSE_THRESHOLD (0x0A<<3)
+#define BIGMAC_REGISTER_TX_SOURCE_ADDR (0x08<<3)
+#define BIGMAC_REGISTER_TX_STAT_GTBYT (0x20<<3)
+#define BIGMAC_REGISTER_TX_STAT_GTPKT (0x0C<<3)
+#define BIGMAC2_REGISTER_BMAC_CONTROL (0x00<<3)
+#define BIGMAC2_REGISTER_BMAC_XGXS_CONTROL (0x01<<3)
+#define BIGMAC2_REGISTER_CNT_MAX_SIZE (0x05<<3)
+#define BIGMAC2_REGISTER_PFC_CONTROL (0x06<<3)
+#define BIGMAC2_REGISTER_RX_CONTROL (0x3A<<3)
+#define BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS (0x62<<3)
+#define BIGMAC2_REGISTER_RX_LSS_STAT (0x3E<<3)
+#define BIGMAC2_REGISTER_RX_MAX_SIZE (0x3C<<3)
+#define BIGMAC2_REGISTER_RX_STAT_GR64 (0x40<<3)
+#define BIGMAC2_REGISTER_RX_STAT_GRIPJ (0x5f<<3)
+#define BIGMAC2_REGISTER_RX_STAT_GRPP (0x51<<3)
+#define BIGMAC2_REGISTER_TX_CONTROL (0x1C<<3)
+#define BIGMAC2_REGISTER_TX_MAX_SIZE (0x1E<<3)
+#define BIGMAC2_REGISTER_TX_PAUSE_CONTROL (0x20<<3)
+#define BIGMAC2_REGISTER_TX_SOURCE_ADDR (0x1D<<3)
+#define BIGMAC2_REGISTER_TX_STAT_GTBYT (0x39<<3)
+#define BIGMAC2_REGISTER_TX_STAT_GTPOK (0x22<<3)
+#define BIGMAC2_REGISTER_TX_STAT_GTPP (0x24<<3)
+#define EMAC_LED_1000MB_OVERRIDE (1L<<1)
+#define EMAC_LED_100MB_OVERRIDE (1L<<2)
+#define EMAC_LED_10MB_OVERRIDE (1L<<3)
+#define EMAC_LED_2500MB_OVERRIDE (1L<<12)
+#define EMAC_LED_OVERRIDE (1L<<0)
+#define EMAC_LED_TRAFFIC (1L<<6)
+#define EMAC_MDIO_COMM_COMMAND_ADDRESS (0L<<26)
+#define EMAC_MDIO_COMM_COMMAND_READ_22 (2L<<26)
+#define EMAC_MDIO_COMM_COMMAND_READ_45 (3L<<26)
+#define EMAC_MDIO_COMM_COMMAND_WRITE_22 (1L<<26)
+#define EMAC_MDIO_COMM_COMMAND_WRITE_45 (1L<<26)
+#define EMAC_MDIO_COMM_DATA (0xffffL<<0)
+#define EMAC_MDIO_COMM_START_BUSY (1L<<29)
+#define EMAC_MDIO_MODE_AUTO_POLL (1L<<4)
+#define EMAC_MDIO_MODE_CLAUSE_45 (1L<<31)
+#define EMAC_MDIO_MODE_CLOCK_CNT (0x3ffL<<16)
+#define EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT 16
+#define EMAC_MDIO_STATUS_10MB (1L<<1)
+#define EMAC_MODE_25G_MODE (1L<<5)
+#define EMAC_MODE_HALF_DUPLEX (1L<<1)
+#define EMAC_MODE_PORT_GMII (2L<<2)
+#define EMAC_MODE_PORT_MII (1L<<2)
+#define EMAC_MODE_PORT_MII_10M (3L<<2)
+#define EMAC_MODE_RESET (1L<<0)
+#define EMAC_REG_EMAC_LED 0xc
+#define EMAC_REG_EMAC_MAC_MATCH 0x10
+#define EMAC_REG_EMAC_MDIO_COMM 0xac
+#define EMAC_REG_EMAC_MDIO_MODE 0xb4
+#define EMAC_REG_EMAC_MDIO_STATUS 0xb0
+#define EMAC_REG_EMAC_MODE 0x0
+#define EMAC_REG_EMAC_RX_MODE 0xc8
+#define EMAC_REG_EMAC_RX_MTU_SIZE 0x9c
+#define EMAC_REG_EMAC_RX_STAT_AC 0x180
+#define EMAC_REG_EMAC_RX_STAT_AC_28 0x1f4
+#define EMAC_REG_EMAC_RX_STAT_AC_COUNT 23
+#define EMAC_REG_EMAC_TX_MODE 0xbc
+#define EMAC_REG_EMAC_TX_STAT_AC 0x280
+#define EMAC_REG_EMAC_TX_STAT_AC_COUNT 22
+#define EMAC_REG_RX_PFC_MODE 0x320
+#define EMAC_REG_RX_PFC_MODE_PRIORITIES (1L<<2)
+#define EMAC_REG_RX_PFC_MODE_RX_EN (1L<<1)
+#define EMAC_REG_RX_PFC_MODE_TX_EN (1L<<0)
+#define EMAC_REG_RX_PFC_PARAM 0x324
+#define EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT 0
+#define EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT 16
+#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD 0x328
+#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT (0xffff<<0)
+#define EMAC_REG_RX_PFC_STATS_XOFF_SENT 0x330
+#define EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT (0xffff<<0)
+#define EMAC_REG_RX_PFC_STATS_XON_RCVD 0x32c
+#define EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT (0xffff<<0)
+#define EMAC_REG_RX_PFC_STATS_XON_SENT 0x334
+#define EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT (0xffff<<0)
+#define EMAC_RX_MODE_FLOW_EN (1L<<2)
+#define EMAC_RX_MODE_KEEP_MAC_CONTROL (1L<<3)
+#define EMAC_RX_MODE_KEEP_VLAN_TAG (1L<<10)
+#define EMAC_RX_MODE_PROMISCUOUS (1L<<8)
+#define EMAC_RX_MODE_RESET (1L<<0)
+#define EMAC_RX_MTU_SIZE_JUMBO_ENA (1L<<31)
+#define EMAC_TX_MODE_EXT_PAUSE_EN (1L<<3)
+#define EMAC_TX_MODE_FLOW_EN (1L<<4)
+#define EMAC_TX_MODE_RESET (1L<<0)
+#define MISC_REGISTERS_GPIO_0 0
+#define MISC_REGISTERS_GPIO_1 1
+#define MISC_REGISTERS_GPIO_2 2
+#define MISC_REGISTERS_GPIO_3 3
+#define MISC_REGISTERS_GPIO_CLR_POS 16
+#define MISC_REGISTERS_GPIO_FLOAT (0xffL<<24)
+#define MISC_REGISTERS_GPIO_FLOAT_POS 24
+#define MISC_REGISTERS_GPIO_HIGH 1
+#define MISC_REGISTERS_GPIO_INPUT_HI_Z 2
+#define MISC_REGISTERS_GPIO_INT_CLR_POS 24
+#define MISC_REGISTERS_GPIO_INT_OUTPUT_CLR 0
+#define MISC_REGISTERS_GPIO_INT_OUTPUT_SET 1
+#define MISC_REGISTERS_GPIO_INT_SET_POS 16
+#define MISC_REGISTERS_GPIO_LOW 0
+#define MISC_REGISTERS_GPIO_OUTPUT_HIGH 1
+#define MISC_REGISTERS_GPIO_OUTPUT_LOW 0
+#define MISC_REGISTERS_GPIO_PORT_SHIFT 4
+#define MISC_REGISTERS_GPIO_SET_POS 8
+#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588
+#define MISC_REGISTERS_RESET_REG_1_RST_HC (0x1<<29)
+#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7)
+#define MISC_REGISTERS_RESET_REG_1_RST_PXP (0x1<<26)
+#define MISC_REGISTERS_RESET_REG_1_RST_PXPV (0x1<<27)
+#define MISC_REGISTERS_RESET_REG_1_SET 0x584
+#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598
+#define MISC_REGISTERS_RESET_REG_2_MSTAT0 (0x1<<24)
+#define MISC_REGISTERS_RESET_REG_2_MSTAT1 (0x1<<25)
+#define MISC_REGISTERS_RESET_REG_2_PGLC (0x1<<19)
+#define MISC_REGISTERS_RESET_REG_2_RST_ATC (0x1<<17)
+#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0)
+#define MISC_REGISTERS_RESET_REG_2_RST_BMAC1 (0x1<<1)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0 (0x1<<2)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE (0x1<<14)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1 (0x1<<3)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE (0x1<<15)
+#define MISC_REGISTERS_RESET_REG_2_RST_GRC (0x1<<4)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B (0x1<<6)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE (0x1<<8)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU (0x1<<7)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE (0x1<<5)
+#define MISC_REGISTERS_RESET_REG_2_RST_MDIO (0x1<<13)
+#define MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE (0x1<<11)
+#define MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO (0x1<<13)
+#define MISC_REGISTERS_RESET_REG_2_RST_RBCN (0x1<<9)
+#define MISC_REGISTERS_RESET_REG_2_SET 0x594
+#define MISC_REGISTERS_RESET_REG_2_UMAC0 (0x1<<20)
+#define MISC_REGISTERS_RESET_REG_2_UMAC1 (0x1<<21)
+#define MISC_REGISTERS_RESET_REG_2_XMAC (0x1<<22)
+#define MISC_REGISTERS_RESET_REG_2_XMAC_SOFT (0x1<<23)
+#define MISC_REGISTERS_RESET_REG_3_CLEAR 0x5a8
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ (0x1<<1)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN (0x1<<2)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD (0x1<<3)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW (0x1<<0)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ (0x1<<5)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN (0x1<<6)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD (0x1<<7)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW (0x1<<4)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB (0x1<<8)
+#define MISC_REGISTERS_RESET_REG_3_SET 0x5a4
+#define MISC_REGISTERS_SPIO_4 4
+#define MISC_REGISTERS_SPIO_5 5
+#define MISC_REGISTERS_SPIO_7 7
+#define MISC_REGISTERS_SPIO_CLR_POS 16
+#define MISC_REGISTERS_SPIO_FLOAT (0xffL<<24)
+#define MISC_REGISTERS_SPIO_FLOAT_POS 24
+#define MISC_REGISTERS_SPIO_INPUT_HI_Z 2
+#define MISC_REGISTERS_SPIO_INT_OLD_SET_POS 16
+#define MISC_REGISTERS_SPIO_OUTPUT_HIGH 1
+#define MISC_REGISTERS_SPIO_OUTPUT_LOW 0
+#define MISC_REGISTERS_SPIO_SET_POS 8
+#define HW_LOCK_DRV_FLAGS 10
+#define HW_LOCK_MAX_RESOURCE_VALUE 31
+#define HW_LOCK_RESOURCE_GPIO 1
+#define HW_LOCK_RESOURCE_MDIO 0
+#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3
+#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8
+#define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9
+#define HW_LOCK_RESOURCE_SPIO 2
+#define HW_LOCK_RESOURCE_RESET 5
+#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4)
+#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18)
+#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (0x1<<31)
+#define AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR (0x1<<30)
+#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (0x1<<9)
+#define AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR (0x1<<8)
+#define AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT (0x1<<7)
+#define AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR (0x1<<6)
+#define AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT (0x1<<29)
+#define AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR (0x1<<28)
+#define AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT (0x1<<1)
+#define AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR (0x1<<0)
+#define AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR (0x1<<18)
+#define AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT (0x1<<11)
+#define AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR (0x1<<10)
+#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT (0x1<<13)
+#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR (0x1<<12)
+#define AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (0x1<<12)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (0x1<<28)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (0x1<<31)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (0x1<<29)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (0x1<<30)
+#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (0x1<<15)
+#define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR (0x1<<14)
+#define AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR (0x1<<14)
+#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (0x1<<20)
+#define AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT (0x1<<31)
+#define AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR (0x1<<30)
+#define AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR (0x1<<0)
+#define AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR (0x1<<3)
+#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR (0x1<<4)
+#define AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT (0x1<<3)
+#define AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT (0x1<<3)
+#define AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR (0x1<<22)
+#define AEU_INPUTS_ATTN_BITS_SPIO5 (0x1<<15)
+#define AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT (0x1<<27)
+#define AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR (0x1<<26)
+#define AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR (0x1<<4)
+#define AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT (0x1<<25)
+#define AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR (0x1<<24)
+#define AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT (0x1<<29)
+#define AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR (0x1<<28)
+#define AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT (0x1<<23)
+#define AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR (0x1<<22)
+#define AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT (0x1<<27)
+#define AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR (0x1<<26)
+#define AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT (0x1<<21)
+#define AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR (0x1<<20)
+#define AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT (0x1<<25)
+#define AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR (0x1<<24)
+#define AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR (0x1<<16)
+#define AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT (0x1<<9)
+#define AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR (0x1<<8)
+#define AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT (0x1<<7)
+#define AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR (0x1<<6)
+#define AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT (0x1<<11)
+#define AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR (0x1<<10)
+
+#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 (0x1<<9)
+
+#define RESERVED_GENERAL_ATTENTION_BIT_0 0
+
+#define EVEREST_GEN_ATTN_IN_USE_MASK 0x7ffe0
+#define EVEREST_LATCHED_ATTN_IN_USE_MASK 0xffe00000
+
+#define RESERVED_GENERAL_ATTENTION_BIT_6 6
+#define RESERVED_GENERAL_ATTENTION_BIT_7 7
+#define RESERVED_GENERAL_ATTENTION_BIT_8 8
+#define RESERVED_GENERAL_ATTENTION_BIT_9 9
+#define RESERVED_GENERAL_ATTENTION_BIT_10 10
+#define RESERVED_GENERAL_ATTENTION_BIT_11 11
+#define RESERVED_GENERAL_ATTENTION_BIT_12 12
+#define RESERVED_GENERAL_ATTENTION_BIT_13 13
+#define RESERVED_GENERAL_ATTENTION_BIT_14 14
+#define RESERVED_GENERAL_ATTENTION_BIT_15 15
+#define RESERVED_GENERAL_ATTENTION_BIT_16 16
+#define RESERVED_GENERAL_ATTENTION_BIT_17 17
+#define RESERVED_GENERAL_ATTENTION_BIT_18 18
+#define RESERVED_GENERAL_ATTENTION_BIT_19 19
+#define RESERVED_GENERAL_ATTENTION_BIT_20 20
+#define RESERVED_GENERAL_ATTENTION_BIT_21 21
+
+/* storm asserts attention bits */
+#define TSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_7
+#define USTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_8
+#define CSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_9
+#define XSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_10
+
+/* mcp error attention bit */
+#define MCP_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_11
+
+/*E1H NIG status sync attention mapped to group 4-7*/
+#define LINK_SYNC_ATTENTION_BIT_FUNC_0 RESERVED_GENERAL_ATTENTION_BIT_12
+#define LINK_SYNC_ATTENTION_BIT_FUNC_1 RESERVED_GENERAL_ATTENTION_BIT_13
+#define LINK_SYNC_ATTENTION_BIT_FUNC_2 RESERVED_GENERAL_ATTENTION_BIT_14
+#define LINK_SYNC_ATTENTION_BIT_FUNC_3 RESERVED_GENERAL_ATTENTION_BIT_15
+#define LINK_SYNC_ATTENTION_BIT_FUNC_4 RESERVED_GENERAL_ATTENTION_BIT_16
+#define LINK_SYNC_ATTENTION_BIT_FUNC_5 RESERVED_GENERAL_ATTENTION_BIT_17
+#define LINK_SYNC_ATTENTION_BIT_FUNC_6 RESERVED_GENERAL_ATTENTION_BIT_18
+#define LINK_SYNC_ATTENTION_BIT_FUNC_7 RESERVED_GENERAL_ATTENTION_BIT_19
+
+
+#define LATCHED_ATTN_RBCR 23
+#define LATCHED_ATTN_RBCT 24
+#define LATCHED_ATTN_RBCN 25
+#define LATCHED_ATTN_RBCU 26
+#define LATCHED_ATTN_RBCP 27
+#define LATCHED_ATTN_TIMEOUT_GRC 28
+#define LATCHED_ATTN_RSVD_GRC 29
+#define LATCHED_ATTN_ROM_PARITY_MCP 30
+#define LATCHED_ATTN_UM_RX_PARITY_MCP 31
+#define LATCHED_ATTN_UM_TX_PARITY_MCP 32
+#define LATCHED_ATTN_SCPAD_PARITY_MCP 33
+
+#define GENERAL_ATTEN_WORD(atten_name) ((94 + atten_name) / 32)
+#define GENERAL_ATTEN_OFFSET(atten_name)\
+ (1UL << ((94 + atten_name) % 32))
+/*
+ * This file defines GRC base address for every block.
+ * This file is included by chipsim, asm microcode and cpp microcode.
+ * These values are used in Design.xml on regBase attribute
+ * Use the base with the generated offsets of specific registers.
+ */
+
+#define GRCBASE_PXPCS 0x000000
+#define GRCBASE_PCICONFIG 0x002000
+#define GRCBASE_PCIREG 0x002400
+#define GRCBASE_EMAC0 0x008000
+#define GRCBASE_EMAC1 0x008400
+#define GRCBASE_DBU 0x008800
+#define GRCBASE_MISC 0x00A000
+#define GRCBASE_DBG 0x00C000
+#define GRCBASE_NIG 0x010000
+#define GRCBASE_XCM 0x020000
+#define GRCBASE_PRS 0x040000
+#define GRCBASE_SRCH 0x040400
+#define GRCBASE_TSDM 0x042000
+#define GRCBASE_TCM 0x050000
+#define GRCBASE_BRB1 0x060000
+#define GRCBASE_MCP 0x080000
+#define GRCBASE_UPB 0x0C1000
+#define GRCBASE_CSDM 0x0C2000
+#define GRCBASE_USDM 0x0C4000
+#define GRCBASE_CCM 0x0D0000
+#define GRCBASE_UCM 0x0E0000
+#define GRCBASE_CDU 0x101000
+#define GRCBASE_DMAE 0x102000
+#define GRCBASE_PXP 0x103000
+#define GRCBASE_CFC 0x104000
+#define GRCBASE_HC 0x108000
+#define GRCBASE_PXP2 0x120000
+#define GRCBASE_PBF 0x140000
+#define GRCBASE_UMAC0 0x160000
+#define GRCBASE_UMAC1 0x160400
+#define GRCBASE_XPB 0x161000
+#define GRCBASE_MSTAT0 0x162000
+#define GRCBASE_MSTAT1 0x162800
+#define GRCBASE_XMAC0 0x163000
+#define GRCBASE_XMAC1 0x163800
+#define GRCBASE_TIMERS 0x164000
+#define GRCBASE_XSDM 0x166000
+#define GRCBASE_QM 0x168000
+#define GRCBASE_DQ 0x170000
+#define GRCBASE_TSEM 0x180000
+#define GRCBASE_CSEM 0x200000
+#define GRCBASE_XSEM 0x280000
+#define GRCBASE_USEM 0x300000
+#define GRCBASE_MISC_AEU GRCBASE_MISC
+
+
+/* offset of configuration space in the pci core register */
+#define PCICFG_OFFSET 0x2000
+#define PCICFG_VENDOR_ID_OFFSET 0x00
+#define PCICFG_DEVICE_ID_OFFSET 0x02
+#define PCICFG_COMMAND_OFFSET 0x04
+#define PCICFG_COMMAND_IO_SPACE (1<<0)
+#define PCICFG_COMMAND_MEM_SPACE (1<<1)
+#define PCICFG_COMMAND_BUS_MASTER (1<<2)
+#define PCICFG_COMMAND_SPECIAL_CYCLES (1<<3)
+#define PCICFG_COMMAND_MWI_CYCLES (1<<4)
+#define PCICFG_COMMAND_VGA_SNOOP (1<<5)
+#define PCICFG_COMMAND_PERR_ENA (1<<6)
+#define PCICFG_COMMAND_STEPPING (1<<7)
+#define PCICFG_COMMAND_SERR_ENA (1<<8)
+#define PCICFG_COMMAND_FAST_B2B (1<<9)
+#define PCICFG_COMMAND_INT_DISABLE (1<<10)
+#define PCICFG_COMMAND_RESERVED (0x1f<<11)
+#define PCICFG_STATUS_OFFSET 0x06
+#define PCICFG_REVESION_ID_OFFSET 0x08
+#define PCICFG_CACHE_LINE_SIZE 0x0c
+#define PCICFG_LATENCY_TIMER 0x0d
+#define PCICFG_BAR_1_LOW 0x10
+#define PCICFG_BAR_1_HIGH 0x14
+#define PCICFG_BAR_2_LOW 0x18
+#define PCICFG_BAR_2_HIGH 0x1c
+#define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET 0x2c
+#define PCICFG_SUBSYSTEM_ID_OFFSET 0x2e
+#define PCICFG_INT_LINE 0x3c
+#define PCICFG_INT_PIN 0x3d
+#define PCICFG_PM_CAPABILITY 0x48
+#define PCICFG_PM_CAPABILITY_VERSION (0x3<<16)
+#define PCICFG_PM_CAPABILITY_CLOCK (1<<19)
+#define PCICFG_PM_CAPABILITY_RESERVED (1<<20)
+#define PCICFG_PM_CAPABILITY_DSI (1<<21)
+#define PCICFG_PM_CAPABILITY_AUX_CURRENT (0x7<<22)
+#define PCICFG_PM_CAPABILITY_D1_SUPPORT (1<<25)
+#define PCICFG_PM_CAPABILITY_D2_SUPPORT (1<<26)
+#define PCICFG_PM_CAPABILITY_PME_IN_D0 (1<<27)
+#define PCICFG_PM_CAPABILITY_PME_IN_D1 (1<<28)
+#define PCICFG_PM_CAPABILITY_PME_IN_D2 (1<<29)
+#define PCICFG_PM_CAPABILITY_PME_IN_D3_HOT (1<<30)
+#define PCICFG_PM_CAPABILITY_PME_IN_D3_COLD (1<<31)
+#define PCICFG_PM_CSR_OFFSET 0x4c
+#define PCICFG_PM_CSR_STATE (0x3<<0)
+#define PCICFG_PM_CSR_PME_ENABLE (1<<8)
+#define PCICFG_PM_CSR_PME_STATUS (1<<15)
+#define PCICFG_MSI_CAP_ID_OFFSET 0x58
+#define PCICFG_MSI_CONTROL_ENABLE (0x1<<16)
+#define PCICFG_MSI_CONTROL_MCAP (0x7<<17)
+#define PCICFG_MSI_CONTROL_MENA (0x7<<20)
+#define PCICFG_MSI_CONTROL_64_BIT_ADDR_CAP (0x1<<23)
+#define PCICFG_MSI_CONTROL_MSI_PVMASK_CAPABLE (0x1<<24)
+#define PCICFG_GRC_ADDRESS 0x78
+#define PCICFG_GRC_DATA 0x80
+#define PCICFG_MSIX_CAP_ID_OFFSET 0xa0
+#define PCICFG_MSIX_CONTROL_TABLE_SIZE (0x7ff<<16)
+#define PCICFG_MSIX_CONTROL_RESERVED (0x7<<27)
+#define PCICFG_MSIX_CONTROL_FUNC_MASK (0x1<<30)
+#define PCICFG_MSIX_CONTROL_MSIX_ENABLE (0x1<<31)
+
+#define PCICFG_DEVICE_CONTROL 0xb4
+#define PCICFG_DEVICE_STATUS 0xb6
+#define PCICFG_DEVICE_STATUS_CORR_ERR_DET (1<<0)
+#define PCICFG_DEVICE_STATUS_NON_FATAL_ERR_DET (1<<1)
+#define PCICFG_DEVICE_STATUS_FATAL_ERR_DET (1<<2)
+#define PCICFG_DEVICE_STATUS_UNSUP_REQ_DET (1<<3)
+#define PCICFG_DEVICE_STATUS_AUX_PWR_DET (1<<4)
+#define PCICFG_DEVICE_STATUS_NO_PEND (1<<5)
+#define PCICFG_LINK_CONTROL 0xbc
+
+
+#define BAR_USTRORM_INTMEM 0x400000
+#define BAR_CSTRORM_INTMEM 0x410000
+#define BAR_XSTRORM_INTMEM 0x420000
+#define BAR_TSTRORM_INTMEM 0x430000
+
+/* for accessing the IGU in case of status block ACK */
+#define BAR_IGU_INTMEM 0x440000
+
+#define BAR_DOORBELL_OFFSET 0x800000
+
+#define BAR_ME_REGISTER 0x450000
+
+/* config_2 offset */
+#define GRC_CONFIG_2_SIZE_REG 0x408
+#define PCI_CONFIG_2_BAR1_SIZE (0xfL<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_DISABLED (0L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_64K (1L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_128K (2L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_256K (3L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_512K (4L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_1M (5L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_2M (6L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_4M (7L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_8M (8L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_16M (9L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_32M (10L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_64M (11L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_128M (12L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_256M (13L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_512M (14L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_1G (15L<<0)
+#define PCI_CONFIG_2_BAR1_64ENA (1L<<4)
+#define PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5)
+#define PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6)
+#define PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7)
+#define PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED (0L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_2K (1L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_4K (2L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_8K (3L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_16K (4L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_32K (5L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_64K (6L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_128K (7L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_256K (8L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_512K (9L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_1M (10L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_2M (11L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_4M (12L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_8M (13L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_16M (14L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_32M (15L<<8)
+#define PCI_CONFIG_2_BAR_PREFETCH (1L<<16)
+#define PCI_CONFIG_2_RESERVED0 (0x7fffL<<17)
+
+/* config_3 offset */
+#define GRC_CONFIG_3_SIZE_REG 0x40c
+#define PCI_CONFIG_3_STICKY_BYTE (0xffL<<0)
+#define PCI_CONFIG_3_FORCE_PME (1L<<24)
+#define PCI_CONFIG_3_PME_STATUS (1L<<25)
+#define PCI_CONFIG_3_PME_ENABLE (1L<<26)
+#define PCI_CONFIG_3_PM_STATE (0x3L<<27)
+#define PCI_CONFIG_3_VAUX_PRESET (1L<<30)
+#define PCI_CONFIG_3_PCI_POWER (1L<<31)
+
+#define GRC_BAR2_CONFIG 0x4e0
+#define PCI_CONFIG_2_BAR2_SIZE (0xfL<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_DISABLED (0L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_64K (1L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_128K (2L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_256K (3L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_512K (4L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_1M (5L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_2M (6L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_4M (7L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_8M (8L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_16M (9L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_32M (10L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_64M (11L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_128M (12L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_256M (13L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_512M (14L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_1G (15L<<0)
+#define PCI_CONFIG_2_BAR2_64ENA (1L<<4)
+
+#define PCI_PM_DATA_A 0x410
+#define PCI_PM_DATA_B 0x414
+#define PCI_ID_VAL1 0x434
+#define PCI_ID_VAL2 0x438
+
+#define PXPCS_TL_CONTROL_5 0x814
+#define PXPCS_TL_CONTROL_5_UNKNOWNTYPE_ERR_ATTN (1 << 29) /*WC*/
+#define PXPCS_TL_CONTROL_5_BOUNDARY4K_ERR_ATTN (1 << 28) /*WC*/
+#define PXPCS_TL_CONTROL_5_MRRS_ERR_ATTN (1 << 27) /*WC*/
+#define PXPCS_TL_CONTROL_5_MPS_ERR_ATTN (1 << 26) /*WC*/
+#define PXPCS_TL_CONTROL_5_TTX_BRIDGE_FORWARD_ERR (1 << 25) /*WC*/
+#define PXPCS_TL_CONTROL_5_TTX_TXINTF_OVERFLOW (1 << 24) /*WC*/
+#define PXPCS_TL_CONTROL_5_PHY_ERR_ATTN (1 << 23) /*RO*/
+#define PXPCS_TL_CONTROL_5_DL_ERR_ATTN (1 << 22) /*RO*/
+#define PXPCS_TL_CONTROL_5_TTX_ERR_NP_TAG_IN_USE (1 << 21) /*WC*/
+#define PXPCS_TL_CONTROL_5_TRX_ERR_UNEXP_RTAG (1 << 20) /*WC*/
+#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT1 (1 << 19) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 (1 << 18) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_ECRC1 (1 << 17) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP1 (1 << 16) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW1 (1 << 15) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL1 (1 << 14) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT1 (1 << 13) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT1 (1 << 12) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL1 (1 << 11) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP1 (1 << 10) /*WC*/
+#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT (1 << 9) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT (1 << 8) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_ECRC (1 << 7) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP (1 << 6) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW (1 << 5) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL (1 << 4) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT (1 << 3) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT (1 << 2) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL (1 << 1) /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP (1 << 0) /*WC*/
+
+
+#define PXPCS_TL_FUNC345_STAT 0x854
+#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT4 (1 << 29) /* WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4\
+ (1 << 28) /* Unsupported Request Error Status in function4, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_ECRC4\
+ (1 << 27) /* ECRC Error TLP Status Status in function 4, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP4\
+ (1 << 26) /* Malformed TLP Status Status in function 4, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW4\
+ (1 << 25) /* Receiver Overflow Status Status in function 4, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL4\
+ (1 << 24) /* Unexpected Completion Status Status in function 4, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT4\
+ (1 << 23) /* Receive UR Statusin function 4. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT4\
+ (1 << 22) /* Completer Timeout Status Status in function 4, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL4\
+ (1 << 21) /* Flow Control Protocol Error Status Status in \
+ function 4, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP4\
+ (1 << 20) /* Poisoned Error Status Status in function 4, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT3 (1 << 19) /* WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3\
+ (1 << 18) /* Unsupported Request Error Status in function3, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_ECRC3\
+ (1 << 17) /* ECRC Error TLP Status Status in function 3, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP3\
+ (1 << 16) /* Malformed TLP Status Status in function 3, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW3\
+ (1 << 15) /* Receiver Overflow Status Status in function 3, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL3\
+ (1 << 14) /* Unexpected Completion Status Status in function 3, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT3\
+ (1 << 13) /* Receive UR Statusin function 3. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT3\
+ (1 << 12) /* Completer Timeout Status Status in function 3, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL3\
+ (1 << 11) /* Flow Control Protocol Error Status Status in \
+ function 3, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP3\
+ (1 << 10) /* Poisoned Error Status Status in function 3, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT2 (1 << 9) /* WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2\
+ (1 << 8) /* Unsupported Request Error Status for Function 2, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_ECRC2\
+ (1 << 7) /* ECRC Error TLP Status Status for Function 2, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP2\
+ (1 << 6) /* Malformed TLP Status Status for Function 2, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW2\
+ (1 << 5) /* Receiver Overflow Status Status for Function 2, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL2\
+ (1 << 4) /* Unexpected Completion Status Status for Function 2, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT2\
+ (1 << 3) /* Receive UR Statusfor Function 2. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT2\
+ (1 << 2) /* Completer Timeout Status Status for Function 2, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL2\
+ (1 << 1) /* Flow Control Protocol Error Status Status for \
+ Function 2, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP2\
+ (1 << 0) /* Poisoned Error Status Status for Function 2, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+
+
+#define PXPCS_TL_FUNC678_STAT 0x85C
+#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT7 (1 << 29) /* WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7\
+ (1 << 28) /* Unsupported Request Error Status in function7, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_ECRC7\
+ (1 << 27) /* ECRC Error TLP Status Status in function 7, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP7\
+ (1 << 26) /* Malformed TLP Status Status in function 7, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW7\
+ (1 << 25) /* Receiver Overflow Status Status in function 7, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL7\
+ (1 << 24) /* Unexpected Completion Status Status in function 7, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT7\
+ (1 << 23) /* Receive UR Statusin function 7. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT7\
+ (1 << 22) /* Completer Timeout Status Status in function 7, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL7\
+ (1 << 21) /* Flow Control Protocol Error Status Status in \
+ function 7, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP7\
+ (1 << 20) /* Poisoned Error Status Status in function 7, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT6 (1 << 19) /* WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6\
+ (1 << 18) /* Unsupported Request Error Status in function6, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_ECRC6\
+ (1 << 17) /* ECRC Error TLP Status Status in function 6, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP6\
+ (1 << 16) /* Malformed TLP Status Status in function 6, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW6\
+ (1 << 15) /* Receiver Overflow Status Status in function 6, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL6\
+ (1 << 14) /* Unexpected Completion Status Status in function 6, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT6\
+ (1 << 13) /* Receive UR Statusin function 6. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT6\
+ (1 << 12) /* Completer Timeout Status Status in function 6, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL6\
+ (1 << 11) /* Flow Control Protocol Error Status Status in \
+ function 6, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP6\
+ (1 << 10) /* Poisoned Error Status Status in function 6, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT5 (1 << 9) /* WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5\
+ (1 << 8) /* Unsupported Request Error Status for Function 5, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_ECRC5\
+ (1 << 7) /* ECRC Error TLP Status Status for Function 5, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP5\
+ (1 << 6) /* Malformed TLP Status Status for Function 5, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW5\
+ (1 << 5) /* Receiver Overflow Status Status for Function 5, if \
+ set, generate pcie_err_attn output when this error is seen.. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL5\
+ (1 << 4) /* Unexpected Completion Status Status for Function 5, \
+ if set, generate pcie_err_attn output when this error is seen. WC \
+ */
+#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT5\
+ (1 << 3) /* Receive UR Statusfor Function 5. If set, generate \
+ pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT5\
+ (1 << 2) /* Completer Timeout Status Status for Function 5, if \
+ set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL5\
+ (1 << 1) /* Flow Control Protocol Error Status Status for \
+ Function 5, if set, generate pcie_err_attn output when this error \
+ is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP5\
+ (1 << 0) /* Poisoned Error Status Status for Function 5, if set, \
+ generate pcie_err_attn output when this error is seen.. WC */
+
+
+#define BAR_USTRORM_INTMEM 0x400000
+#define BAR_CSTRORM_INTMEM 0x410000
+#define BAR_XSTRORM_INTMEM 0x420000
+#define BAR_TSTRORM_INTMEM 0x430000
+
+/* for accessing the IGU in case of status block ACK */
+#define BAR_IGU_INTMEM 0x440000
+
+#define BAR_DOORBELL_OFFSET 0x800000
+
+#define BAR_ME_REGISTER 0x450000
+#define ME_REG_PF_NUM_SHIFT 0
+#define ME_REG_PF_NUM\
+ (7L<<ME_REG_PF_NUM_SHIFT) /* Relative PF Num */
+#define ME_REG_VF_VALID (1<<8)
+#define ME_REG_VF_NUM_SHIFT 9
+#define ME_REG_VF_NUM_MASK (0x3f<<ME_REG_VF_NUM_SHIFT)
+#define ME_REG_VF_ERR (0x1<<3)
+#define ME_REG_ABS_PF_NUM_SHIFT 16
+#define ME_REG_ABS_PF_NUM\
+ (7L<<ME_REG_ABS_PF_NUM_SHIFT) /* Absolute PF Num */
+
+
+#define MDIO_REG_BANK_CL73_IEEEB0 0x0
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN 0x0200
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN 0x1000
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_MAIN_RST 0x8000
+
+#define MDIO_REG_BANK_CL73_IEEEB1 0x10
+#define MDIO_CL73_IEEEB1_AN_ADV1 0x00
+#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE 0x0400
+#define MDIO_CL73_IEEEB1_AN_ADV1_ASYMMETRIC 0x0800
+#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH 0x0C00
+#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK 0x0C00
+#define MDIO_CL73_IEEEB1_AN_ADV2 0x01
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M 0x0000
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX 0x0020
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 0x0040
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR 0x0080
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1 0x03
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE 0x0400
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_ASYMMETRIC 0x0800
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_BOTH 0x0C00
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK 0x0C00
+
+#define MDIO_REG_BANK_RX0 0x80b0
+#define MDIO_RX0_RX_STATUS 0x10
+#define MDIO_RX0_RX_STATUS_SIGDET 0x8000
+#define MDIO_RX0_RX_STATUS_RX_SEQ_DONE 0x1000
+#define MDIO_RX0_RX_EQ_BOOST 0x1c
+#define MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_RX1 0x80c0
+#define MDIO_RX1_RX_EQ_BOOST 0x1c
+#define MDIO_RX1_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX1_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_RX2 0x80d0
+#define MDIO_RX2_RX_EQ_BOOST 0x1c
+#define MDIO_RX2_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX2_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_RX3 0x80e0
+#define MDIO_RX3_RX_EQ_BOOST 0x1c
+#define MDIO_RX3_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX3_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_RX_ALL 0x80f0
+#define MDIO_RX_ALL_RX_EQ_BOOST 0x1c
+#define MDIO_RX_ALL_RX_EQ_BOOST_EQUALIZER_CTRL_MASK 0x7
+#define MDIO_RX_ALL_RX_EQ_BOOST_OFFSET_CTRL 0x10
+
+#define MDIO_REG_BANK_TX0 0x8060
+#define MDIO_TX0_TX_DRIVER 0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
+
+#define MDIO_REG_BANK_TX1 0x8070
+#define MDIO_TX1_TX_DRIVER 0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
+
+#define MDIO_REG_BANK_TX2 0x8080
+#define MDIO_TX2_TX_DRIVER 0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
+
+#define MDIO_REG_BANK_TX3 0x8090
+#define MDIO_TX3_TX_DRIVER 0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK 0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT 12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT 8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK 0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT 4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK 0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT 1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T 1
+
+#define MDIO_REG_BANK_XGXS_BLOCK0 0x8000
+#define MDIO_BLOCK0_XGXS_CONTROL 0x10
+
+#define MDIO_REG_BANK_XGXS_BLOCK1 0x8010
+#define MDIO_BLOCK1_LANE_CTRL0 0x15
+#define MDIO_BLOCK1_LANE_CTRL1 0x16
+#define MDIO_BLOCK1_LANE_CTRL2 0x17
+#define MDIO_BLOCK1_LANE_PRBS 0x19
+
+#define MDIO_REG_BANK_XGXS_BLOCK2 0x8100
+#define MDIO_XGXS_BLOCK2_RX_LN_SWAP 0x10
+#define MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE 0x8000
+#define MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE 0x4000
+#define MDIO_XGXS_BLOCK2_TX_LN_SWAP 0x11
+#define MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE 0x8000
+#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G 0x14
+#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS 0x0001
+#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS 0x0010
+#define MDIO_XGXS_BLOCK2_TEST_MODE_LANE 0x15
+
+#define MDIO_REG_BANK_GP_STATUS 0x8120
+#define MDIO_GP_STATUS_TOP_AN_STATUS1 0x1B
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE 0x0001
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE 0x0002
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS 0x0004
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS 0x0008
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE 0x0010
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_LP_NP_BAM_ABLE 0x0020
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE 0x0040
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE 0x0080
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK 0x3f00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M 0x0000
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M 0x0100
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G 0x0200
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G 0x0300
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G 0x0400
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G 0x0500
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG 0x0600
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4 0x0700
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12G_HIG 0x0800
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12_5G 0x0900
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_13G 0x0A00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_15G 0x0B00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_16G 0x0C00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX 0x0D00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 0x0E00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR 0x0F00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI 0x1B00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS 0x1E00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI 0x1F00
+
+
+#define MDIO_REG_BANK_10G_PARALLEL_DETECT 0x8130
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS 0x10
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK 0x8000
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL 0x11
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN 0x1
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK 0x13
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT (0xb71<<1)
+
+#define MDIO_REG_BANK_SERDES_DIGITAL 0x8300
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1 0x10
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE 0x0001
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_TBI_IF 0x0002
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN 0x0004
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT 0x0008
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET 0x0010
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE 0x0020
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2 0x11
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN 0x0001
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_AN_FST_TMR 0x0040
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1 0x14
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SGMII 0x0001
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_LINK 0x0002
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_DUPLEX 0x0004
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_MASK 0x0018
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_SHIFT 3
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_2_5G 0x0018
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_1G 0x0010
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_100M 0x0008
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_10M 0x0000
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2 0x15
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED 0x0002
+#define MDIO_SERDES_DIGITAL_MISC1 0x18
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_MASK 0xE000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_25M 0x0000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_100M 0x2000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_125M 0x4000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M 0x6000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_187_5M 0x8000
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL 0x0010
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK 0x000f
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_2_5G 0x0000
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_5G 0x0001
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_6G 0x0002
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_HIG 0x0003
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4 0x0004
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_12G 0x0005
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_12_5G 0x0006
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G 0x0007
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_15G 0x0008
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_16G 0x0009
+
+#define MDIO_REG_BANK_OVER_1G 0x8320
+#define MDIO_OVER_1G_DIGCTL_3_4 0x14
+#define MDIO_OVER_1G_DIGCTL_3_4_MP_ID_MASK 0xffe0
+#define MDIO_OVER_1G_DIGCTL_3_4_MP_ID_SHIFT 5
+#define MDIO_OVER_1G_UP1 0x19
+#define MDIO_OVER_1G_UP1_2_5G 0x0001
+#define MDIO_OVER_1G_UP1_5G 0x0002
+#define MDIO_OVER_1G_UP1_6G 0x0004
+#define MDIO_OVER_1G_UP1_10G 0x0010
+#define MDIO_OVER_1G_UP1_10GH 0x0008
+#define MDIO_OVER_1G_UP1_12G 0x0020
+#define MDIO_OVER_1G_UP1_12_5G 0x0040
+#define MDIO_OVER_1G_UP1_13G 0x0080
+#define MDIO_OVER_1G_UP1_15G 0x0100
+#define MDIO_OVER_1G_UP1_16G 0x0200
+#define MDIO_OVER_1G_UP2 0x1A
+#define MDIO_OVER_1G_UP2_IPREDRIVER_MASK 0x0007
+#define MDIO_OVER_1G_UP2_IDRIVER_MASK 0x0038
+#define MDIO_OVER_1G_UP2_PREEMPHASIS_MASK 0x03C0
+#define MDIO_OVER_1G_UP3 0x1B
+#define MDIO_OVER_1G_UP3_HIGIG2 0x0001
+#define MDIO_OVER_1G_LP_UP1 0x1C
+#define MDIO_OVER_1G_LP_UP2 0x1D
+#define MDIO_OVER_1G_LP_UP2_MR_ADV_OVER_1G_MASK 0x03ff
+#define MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK 0x0780
+#define MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT 7
+#define MDIO_OVER_1G_LP_UP3 0x1E
+
+#define MDIO_REG_BANK_REMOTE_PHY 0x8330
+#define MDIO_REMOTE_PHY_MISC_RX_STATUS 0x10
+#define MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG 0x0010
+#define MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG 0x0600
+
+#define MDIO_REG_BANK_BAM_NEXT_PAGE 0x8350
+#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL 0x10
+#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE 0x0001
+#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN 0x0002
+
+#define MDIO_REG_BANK_CL73_USERB0 0x8370
+#define MDIO_CL73_USERB0_CL73_UCTRL 0x10
+#define MDIO_CL73_USERB0_CL73_UCTRL_USTAT1_MUXSEL 0x0002
+#define MDIO_CL73_USERB0_CL73_USTAT1 0x11
+#define MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK 0x0100
+#define MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37 0x0400
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1 0x12
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN 0x8000
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN 0x4000
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN 0x2000
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL3 0x14
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR 0x0001
+
+#define MDIO_REG_BANK_AER_BLOCK 0xFFD0
+#define MDIO_AER_BLOCK_AER_REG 0x1E
+
+#define MDIO_REG_BANK_COMBO_IEEE0 0xFFE0
+#define MDIO_COMBO_IEEE0_MII_CONTROL 0x10
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK 0x2040
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_10 0x0000
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100 0x2000
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000 0x0040
+#define MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX 0x0100
+#define MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN 0x0200
+#define MDIO_COMBO_IEEO_MII_CONTROL_AN_EN 0x1000
+#define MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK 0x4000
+#define MDIO_COMBO_IEEO_MII_CONTROL_RESET 0x8000
+#define MDIO_COMBO_IEEE0_MII_STATUS 0x11
+#define MDIO_COMBO_IEEE0_MII_STATUS_LINK_PASS 0x0004
+#define MDIO_COMBO_IEEE0_MII_STATUS_AUTONEG_COMPLETE 0x0020
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV 0x14
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX 0x0020
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_HALF_DUPLEX 0x0040
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK 0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE 0x0000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC 0x0080
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC 0x0100
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH 0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_NEXT_PAGE 0x8000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1 0x15
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_NEXT_PAGE 0x8000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_ACK 0x4000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_MASK 0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_NONE 0x0000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_BOTH 0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_HALF_DUP_CAP 0x0040
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_FULL_DUP_CAP 0x0020
+/*WhenthelinkpartnerisinSGMIImode(bit0=1),then
+bit15=link,bit12=duplex,bits11:10=speed,bit14=acknowledge.
+Theotherbitsarereservedandshouldbezero*/
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_SGMII_MODE 0x0001
+
+
+#define MDIO_PMA_DEVAD 0x1
+/*ieee*/
+#define MDIO_PMA_REG_CTRL 0x0
+#define MDIO_PMA_REG_STATUS 0x1
+#define MDIO_PMA_REG_10G_CTRL2 0x7
+#define MDIO_PMA_REG_TX_DISABLE 0x0009
+#define MDIO_PMA_REG_RX_SD 0xa
+/*bcm*/
+#define MDIO_PMA_REG_BCM_CTRL 0x0096
+#define MDIO_PMA_REG_FEC_CTRL 0x00ab
+#define MDIO_PMA_REG_PHY_IDENTIFIER 0xc800
+#define MDIO_PMA_REG_DIGITAL_CTRL 0xc808
+#define MDIO_PMA_REG_DIGITAL_STATUS 0xc809
+#define MDIO_PMA_REG_TX_POWER_DOWN 0xca02
+#define MDIO_PMA_REG_CMU_PLL_BYPASS 0xca09
+#define MDIO_PMA_REG_MISC_CTRL 0xca0a
+#define MDIO_PMA_REG_GEN_CTRL 0xca10
+#define MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP 0x0188
+#define MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET 0x018a
+#define MDIO_PMA_REG_M8051_MSGIN_REG 0xca12
+#define MDIO_PMA_REG_M8051_MSGOUT_REG 0xca13
+#define MDIO_PMA_REG_ROM_VER1 0xca19
+#define MDIO_PMA_REG_ROM_VER2 0xca1a
+#define MDIO_PMA_REG_EDC_FFE_MAIN 0xca1b
+#define MDIO_PMA_REG_PLL_BANDWIDTH 0xca1d
+#define MDIO_PMA_REG_PLL_CTRL 0xca1e
+#define MDIO_PMA_REG_MISC_CTRL0 0xca23
+#define MDIO_PMA_REG_LRM_MODE 0xca3f
+#define MDIO_PMA_REG_CDR_BANDWIDTH 0xca46
+#define MDIO_PMA_REG_MISC_CTRL1 0xca85
+
+#define MDIO_PMA_REG_SFP_TWO_WIRE_CTRL 0x8000
+#define MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK 0x000c
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE 0x0000
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE 0x0004
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IN_PROGRESS 0x0008
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_FAILED 0x000c
+#define MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT 0x8002
+#define MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR 0x8003
+#define MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF 0xc820
+#define MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK 0xff
+#define MDIO_PMA_REG_8726_TX_CTRL1 0xca01
+#define MDIO_PMA_REG_8726_TX_CTRL2 0xca05
+
+#define MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR 0x8005
+#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF 0x8007
+#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK 0xff
+#define MDIO_PMA_REG_8727_TX_CTRL1 0xca02
+#define MDIO_PMA_REG_8727_TX_CTRL2 0xca05
+#define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808
+#define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e
+#define MDIO_PMA_REG_8727_PCS_GP 0xc842
+#define MDIO_PMA_REG_8727_OPT_CFG_REG 0xc8e4
+
+#define MDIO_AN_REG_8727_MISC_CTRL 0x8309
+
+#define MDIO_PMA_REG_8073_CHIP_REV 0xc801
+#define MDIO_PMA_REG_8073_SPEED_LINK_STATUS 0xc820
+#define MDIO_PMA_REG_8073_XAUI_WA 0xc841
+#define MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL 0xcd08
+
+#define MDIO_PMA_REG_7101_RESET 0xc000
+#define MDIO_PMA_REG_7107_LED_CNTL 0xc007
+#define MDIO_PMA_REG_7107_LINK_LED_CNTL 0xc009
+#define MDIO_PMA_REG_7101_VER1 0xc026
+#define MDIO_PMA_REG_7101_VER2 0xc027
+
+#define MDIO_PMA_REG_8481_PMD_SIGNAL 0xa811
+#define MDIO_PMA_REG_8481_LED1_MASK 0xa82c
+#define MDIO_PMA_REG_8481_LED2_MASK 0xa82f
+#define MDIO_PMA_REG_8481_LED3_MASK 0xa832
+#define MDIO_PMA_REG_8481_LED3_BLINK 0xa834
+#define MDIO_PMA_REG_8481_LED5_MASK 0xa838
+#define MDIO_PMA_REG_8481_SIGNAL_MASK 0xa835
+#define MDIO_PMA_REG_8481_LINK_SIGNAL 0xa83b
+#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK 0x800
+#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT 11
+
+
+#define MDIO_WIS_DEVAD 0x2
+/*bcm*/
+#define MDIO_WIS_REG_LASI_CNTL 0x9002
+#define MDIO_WIS_REG_LASI_STATUS 0x9005
+
+#define MDIO_PCS_DEVAD 0x3
+#define MDIO_PCS_REG_STATUS 0x0020
+#define MDIO_PCS_REG_LASI_STATUS 0x9005
+#define MDIO_PCS_REG_7101_DSP_ACCESS 0xD000
+#define MDIO_PCS_REG_7101_SPI_MUX 0xD008
+#define MDIO_PCS_REG_7101_SPI_CTRL_ADDR 0xE12A
+#define MDIO_PCS_REG_7101_SPI_RESET_BIT (5)
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR 0xE02A
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_WRITE_ENABLE_CMD (6)
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_BULK_ERASE_CMD (0xC7)
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_PAGE_PROGRAM_CMD (2)
+#define MDIO_PCS_REG_7101_SPI_BYTES_TO_TRANSFER_ADDR 0xE028
+
+
+#define MDIO_XS_DEVAD 0x4
+#define MDIO_XS_PLL_SEQUENCER 0x8000
+#define MDIO_XS_SFX7101_XGXS_TEST1 0xc00a
+
+#define MDIO_XS_8706_REG_BANK_RX0 0x80bc
+#define MDIO_XS_8706_REG_BANK_RX1 0x80cc
+#define MDIO_XS_8706_REG_BANK_RX2 0x80dc
+#define MDIO_XS_8706_REG_BANK_RX3 0x80ec
+#define MDIO_XS_8706_REG_BANK_RXA 0x80fc
+
+#define MDIO_XS_REG_8073_RX_CTRL_PCIE 0x80FA
+
+#define MDIO_AN_DEVAD 0x7
+/*ieee*/
+#define MDIO_AN_REG_CTRL 0x0000
+#define MDIO_AN_REG_STATUS 0x0001
+#define MDIO_AN_REG_STATUS_AN_COMPLETE 0x0020
+#define MDIO_AN_REG_ADV_PAUSE 0x0010
+#define MDIO_AN_REG_ADV_PAUSE_PAUSE 0x0400
+#define MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC 0x0800
+#define MDIO_AN_REG_ADV_PAUSE_BOTH 0x0C00
+#define MDIO_AN_REG_ADV_PAUSE_MASK 0x0C00
+#define MDIO_AN_REG_ADV 0x0011
+#define MDIO_AN_REG_ADV2 0x0012
+#define MDIO_AN_REG_LP_AUTO_NEG 0x0013
+#define MDIO_AN_REG_MASTER_STATUS 0x0021
+/*bcm*/
+#define MDIO_AN_REG_LINK_STATUS 0x8304
+#define MDIO_AN_REG_CL37_CL73 0x8370
+#define MDIO_AN_REG_CL37_AN 0xffe0
+#define MDIO_AN_REG_CL37_FC_LD 0xffe4
+#define MDIO_AN_REG_CL37_FC_LP 0xffe5
+
+#define MDIO_AN_REG_8073_2_5G 0x8329
+#define MDIO_AN_REG_8073_BAM 0x8350
+
+#define MDIO_AN_REG_8481_10GBASE_T_AN_CTRL 0x0020
+#define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0
+#define MDIO_AN_REG_8481_LEGACY_MII_STATUS 0xffe1
+#define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4
+#define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION 0xffe6
+#define MDIO_AN_REG_8481_1000T_CTRL 0xffe9
+#define MDIO_AN_REG_8481_EXPANSION_REG_RD_RW 0xfff5
+#define MDIO_AN_REG_8481_EXPANSION_REG_ACCESS 0xfff7
+#define MDIO_AN_REG_8481_AUX_CTRL 0xfff8
+#define MDIO_AN_REG_8481_LEGACY_SHADOW 0xfffc
+
+/* BCM84823 only */
+#define MDIO_CTL_DEVAD 0x1e
+#define MDIO_CTL_REG_84823_MEDIA 0x401a
+#define MDIO_CTL_REG_84823_MEDIA_MAC_MASK 0x0018
+ /* These pins configure the BCM84823 interface to MAC after reset. */
+#define MDIO_CTL_REG_84823_CTRL_MAC_XFI 0x0008
+#define MDIO_CTL_REG_84823_MEDIA_MAC_XAUI_M 0x0010
+ /* These pins configure the BCM84823 interface to Line after reset. */
+#define MDIO_CTL_REG_84823_MEDIA_LINE_MASK 0x0060
+#define MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L 0x0020
+#define MDIO_CTL_REG_84823_MEDIA_LINE_XFI 0x0040
+ /* When this pin is active high during reset, 10GBASE-T core is power
+ * down, When it is active low the 10GBASE-T is power up
+ */
+#define MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN 0x0080
+#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK 0x0100
+#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000
+#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100
+#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000
+#define MDIO_CTL_REG_84823_USER_CTRL_REG 0x4005
+#define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080
+
+#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3
+#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
+
+/* BCM84833 only */
+#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a
+#define MDIO_84833_SUPER_ISOLATE 0x8000
+/* These are mailbox register set used by 84833. */
+#define MDIO_84833_TOP_CFG_SCRATCH_REG0 0x4005
+#define MDIO_84833_TOP_CFG_SCRATCH_REG1 0x4006
+#define MDIO_84833_TOP_CFG_SCRATCH_REG2 0x4007
+#define MDIO_84833_TOP_CFG_SCRATCH_REG3 0x4008
+#define MDIO_84833_TOP_CFG_SCRATCH_REG4 0x4009
+#define MDIO_84833_TOP_CFG_DATA3_REG 0x4011
+#define MDIO_84833_TOP_CFG_DATA4_REG 0x4012
+
+/* Mailbox command set used by 84833. */
+#define PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE 0x2
+/* Mailbox status set used by 84833. */
+#define PHY84833_CMD_RECEIVED 0x0001
+#define PHY84833_CMD_IN_PROGRESS 0x0002
+#define PHY84833_CMD_COMPLETE_PASS 0x0004
+#define PHY84833_CMD_COMPLETE_ERROR 0x0008
+#define PHY84833_CMD_OPEN_FOR_CMDS 0x0010
+#define PHY84833_CMD_SYSTEM_BOOT 0x0020
+#define PHY84833_CMD_NOT_OPEN_FOR_CMDS 0x0040
+#define PHY84833_CMD_CLEAR_COMPLETE 0x0080
+#define PHY84833_CMD_OPEN_OVERRIDE 0xa5a5
+
+
+/* 84833 F/W Feature Commands */
+#define PHY84833_DIAG_CMD_GET_EEE_MODE 0x27
+#define PHY84833_DIAG_CMD_SET_EEE_MODE 0x28
+
+/* Warpcore clause 45 addressing */
+#define MDIO_WC_DEVAD 0x3
+#define MDIO_WC_REG_IEEE0BLK_MIICNTL 0x0
+#define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2 0x12
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY 0x4000
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ 0x8000
+#define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150 0x96
+#define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000
+#define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e
+#define MDIO_WC_REG_XGXSBLK1_DESKEW 0x8010
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL0 0x8015
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL1 0x8016
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL2 0x8017
+#define MDIO_WC_REG_TX0_ANA_CTRL0 0x8061
+#define MDIO_WC_REG_TX1_ANA_CTRL0 0x8071
+#define MDIO_WC_REG_TX2_ANA_CTRL0 0x8081
+#define MDIO_WC_REG_TX3_ANA_CTRL0 0x8091
+#define MDIO_WC_REG_TX0_TX_DRIVER 0x8067
+#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET 0x04
+#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_MASK 0x00f0
+#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET 0x08
+#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET 0x0c
+#define MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_MASK 0x7000
+#define MDIO_WC_REG_TX1_TX_DRIVER 0x8077
+#define MDIO_WC_REG_TX2_TX_DRIVER 0x8087
+#define MDIO_WC_REG_TX3_TX_DRIVER 0x8097
+#define MDIO_WC_REG_RX0_ANARXCONTROL1G 0x80b9
+#define MDIO_WC_REG_RX2_ANARXCONTROL1G 0x80d9
+#define MDIO_WC_REG_RX0_PCI_CTRL 0x80ba
+#define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca
+#define MDIO_WC_REG_RX2_PCI_CTRL 0x80da
+#define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea
+#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104
+#define MDIO_WC_REG_XGXS_STATUS3 0x8129
+#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130
+#define MDIO_WC_REG_PAR_DET_10G_CTRL 0x8131
+#define MDIO_WC_REG_XGXS_X2_CONTROL2 0x8141
+#define MDIO_WC_REG_XGXS_RX_LN_SWAP1 0x816B
+#define MDIO_WC_REG_XGXS_TX_LN_SWAP1 0x8169
+#define MDIO_WC_REG_GP2_STATUS_GP_2_0 0x81d0
+#define MDIO_WC_REG_GP2_STATUS_GP_2_1 0x81d1
+#define MDIO_WC_REG_GP2_STATUS_GP_2_2 0x81d2
+#define MDIO_WC_REG_GP2_STATUS_GP_2_3 0x81d3
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4 0x81d4
+#define MDIO_WC_REG_UC_INFO_B0_DEAD_TRAP 0x81EE
+#define MDIO_WC_REG_UC_INFO_B1_VERSION 0x81F0
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE 0x81F2
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE0_OFFSET 0x0
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT 0x0
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_OPT_LR 0x1
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC 0x2
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_XLAUI 0x3
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_LONG_CH_6G 0x4
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE1_OFFSET 0x4
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE2_OFFSET 0x8
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE3_OFFSET 0xc
+#define MDIO_WC_REG_UC_INFO_B1_CRC 0x81FE
+#define MDIO_WC_REG_DSC_SMC 0x8213
+#define MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0 0x821e
+#define MDIO_WC_REG_TX_FIR_TAP 0x82e2
+#define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET 0x00
+#define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_MASK 0x000f
+#define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET 0x04
+#define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_MASK 0x03f0
+#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET 0x0a
+#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_MASK 0x7c00
+#define MDIO_WC_REG_TX_FIR_TAP_ENABLE 0x8000
+#define MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL 0x82e3
+#define MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL 0x82e6
+#define MDIO_WC_REG_CL72_USERB0_CL72_BR_DEF_CTRL 0x82e7
+#define MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL 0x82e8
+#define MDIO_WC_REG_CL72_USERB0_CL72_MISC4_CONTROL 0x82ec
+#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1 0x8300
+#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2 0x8301
+#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3 0x8302
+#define MDIO_WC_REG_SERDESDIGITAL_STATUS1000X1 0x8304
+#define MDIO_WC_REG_SERDESDIGITAL_MISC1 0x8308
+#define MDIO_WC_REG_SERDESDIGITAL_MISC2 0x8309
+#define MDIO_WC_REG_DIGITAL3_UP1 0x8329
+#define MDIO_WC_REG_DIGITAL4_MISC3 0x833c
+#define MDIO_WC_REG_DIGITAL5_MISC6 0x8345
+#define MDIO_WC_REG_DIGITAL5_MISC7 0x8349
+#define MDIO_WC_REG_DIGITAL5_ACTUAL_SPEED 0x834e
+#define MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL 0x8350
+#define MDIO_WC_REG_CL49_USERB0_CTRL 0x8368
+#define MDIO_WC_REG_TX66_CONTROL 0x83b0
+#define MDIO_WC_REG_RX66_CONTROL 0x83c0
+#define MDIO_WC_REG_RX66_SCW0 0x83c2
+#define MDIO_WC_REG_RX66_SCW1 0x83c3
+#define MDIO_WC_REG_RX66_SCW2 0x83c4
+#define MDIO_WC_REG_RX66_SCW3 0x83c5
+#define MDIO_WC_REG_RX66_SCW0_MASK 0x83c6
+#define MDIO_WC_REG_RX66_SCW1_MASK 0x83c7
+#define MDIO_WC_REG_RX66_SCW2_MASK 0x83c8
+#define MDIO_WC_REG_RX66_SCW3_MASK 0x83c9
+#define MDIO_WC_REG_FX100_CTRL1 0x8400
+#define MDIO_WC_REG_FX100_CTRL3 0x8402
+
+#define MDIO_WC_REG_MICROBLK_CMD 0xffc2
+#define MDIO_WC_REG_MICROBLK_DL_STATUS 0xffc5
+#define MDIO_WC_REG_MICROBLK_CMD3 0xffcc
+
+#define MDIO_WC_REG_AERBLK_AER 0xffde
+#define MDIO_WC_REG_COMBO_IEEE0_MIICTRL 0xffe0
+#define MDIO_WC_REG_COMBO_IEEE0_MIIISTAT 0xffe1
+
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET 0x810A
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET_RX_BITSHIFT 0
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET_TX_BITSHIFT 4
+
+#define MDIO_WC0_XGXS_BLK6_XGXS_X2_CONTROL2 0x8141
+
+#define DIGITAL5_ACTUAL_SPEED_TX_MASK 0x003f
+
+/* 54618se */
+#define MDIO_REG_GPHY_PHYID_LSB 0x3
+#define MDIO_REG_GPHY_ID_54618SE 0x5cd5
+#define MDIO_REG_GPHY_CL45_ADDR_REG 0xd
+#define MDIO_REG_GPHY_CL45_DATA_REG 0xe
+#define MDIO_REG_GPHY_EEE_ADV 0x3c
+#define MDIO_REG_GPHY_EEE_1G (0x1 << 2)
+#define MDIO_REG_GPHY_EEE_100 (0x1 << 1)
+#define MDIO_REG_GPHY_EEE_RESOLVED 0x803e
+#define MDIO_REG_INTR_STATUS 0x1a
+#define MDIO_REG_INTR_MASK 0x1b
+#define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1)
+#define MDIO_REG_GPHY_SHADOW 0x1c
+#define MDIO_REG_GPHY_SHADOW_LED_SEL2 (0x0e << 10)
+#define MDIO_REG_GPHY_SHADOW_WR_ENA (0x1 << 15)
+#define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED (0x1e << 10)
+#define MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD (0x1 << 8)
+
+#define IGU_FUNC_BASE 0x0400
+
+#define IGU_ADDR_MSIX 0x0000
+#define IGU_ADDR_INT_ACK 0x0200
+#define IGU_ADDR_PROD_UPD 0x0201
+#define IGU_ADDR_ATTN_BITS_UPD 0x0202
+#define IGU_ADDR_ATTN_BITS_SET 0x0203
+#define IGU_ADDR_ATTN_BITS_CLR 0x0204
+#define IGU_ADDR_COALESCE_NOW 0x0205
+#define IGU_ADDR_SIMD_MASK 0x0206
+#define IGU_ADDR_SIMD_NOMASK 0x0207
+#define IGU_ADDR_MSI_CTL 0x0210
+#define IGU_ADDR_MSI_ADDR_LO 0x0211
+#define IGU_ADDR_MSI_ADDR_HI 0x0212
+#define IGU_ADDR_MSI_DATA 0x0213
+
+#define IGU_USE_REGISTER_ustorm_type_0_sb_cleanup 0
+#define IGU_USE_REGISTER_ustorm_type_1_sb_cleanup 1
+#define IGU_USE_REGISTER_cstorm_type_0_sb_cleanup 2
+#define IGU_USE_REGISTER_cstorm_type_1_sb_cleanup 3
+
+#define COMMAND_REG_INT_ACK 0x0
+#define COMMAND_REG_PROD_UPD 0x4
+#define COMMAND_REG_ATTN_BITS_UPD 0x8
+#define COMMAND_REG_ATTN_BITS_SET 0xc
+#define COMMAND_REG_ATTN_BITS_CLR 0x10
+#define COMMAND_REG_COALESCE_NOW 0x14
+#define COMMAND_REG_SIMD_MASK 0x18
+#define COMMAND_REG_SIMD_NOMASK 0x1c
+
+
+#define IGU_MEM_BASE 0x0000
+
+#define IGU_MEM_MSIX_BASE 0x0000
+#define IGU_MEM_MSIX_UPPER 0x007f
+#define IGU_MEM_MSIX_RESERVED_UPPER 0x01ff
+
+#define IGU_MEM_PBA_MSIX_BASE 0x0200
+#define IGU_MEM_PBA_MSIX_UPPER 0x0200
+
+#define IGU_CMD_BACKWARD_COMP_PROD_UPD 0x0201
+#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff
+
+#define IGU_CMD_INT_ACK_BASE 0x0400
+#define IGU_CMD_INT_ACK_UPPER\
+ (IGU_CMD_INT_ACK_BASE + MAX_SB_PER_PORT * NUM_OF_PORTS_PER_PATH - 1)
+#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x04ff
+
+#define IGU_CMD_E2_PROD_UPD_BASE 0x0500
+#define IGU_CMD_E2_PROD_UPD_UPPER\
+ (IGU_CMD_E2_PROD_UPD_BASE + MAX_SB_PER_PORT * NUM_OF_PORTS_PER_PATH - 1)
+#define IGU_CMD_E2_PROD_UPD_RESERVED_UPPER 0x059f
+
+#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05a0
+#define IGU_CMD_ATTN_BIT_SET_UPPER 0x05a1
+#define IGU_CMD_ATTN_BIT_CLR_UPPER 0x05a2
+
+#define IGU_REG_SISR_MDPC_WMASK_UPPER 0x05a3
+#define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER 0x05a4
+#define IGU_REG_SISR_MDPC_WMASK_MSB_UPPER 0x05a5
+#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05a6
+
+#define IGU_REG_RESERVED_UPPER 0x05ff
+/* Fields of IGU PF CONFIGRATION REGISTER */
+#define IGU_PF_CONF_FUNC_EN (0x1<<0) /* function enable */
+#define IGU_PF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */
+#define IGU_PF_CONF_INT_LINE_EN (0x1<<2) /* INT enable */
+#define IGU_PF_CONF_ATTN_BIT_EN (0x1<<3) /* attention enable */
+#define IGU_PF_CONF_SINGLE_ISR_EN (0x1<<4) /* single ISR mode enable */
+#define IGU_PF_CONF_SIMD_MODE (0x1<<5) /* simd all ones mode */
+
+/* Fields of IGU VF CONFIGRATION REGISTER */
+#define IGU_VF_CONF_FUNC_EN (0x1<<0) /* function enable */
+#define IGU_VF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */
+#define IGU_VF_CONF_PARENT_MASK (0x3<<2) /* Parent PF */
+#define IGU_VF_CONF_PARENT_SHIFT 2 /* Parent PF */
+#define IGU_VF_CONF_SINGLE_ISR_EN (0x1<<4) /* single ISR mode enable */
+
+
+#define IGU_BC_DSB_NUM_SEGS 5
+#define IGU_BC_NDSB_NUM_SEGS 2
+#define IGU_NORM_DSB_NUM_SEGS 2
+#define IGU_NORM_NDSB_NUM_SEGS 1
+#define IGU_BC_BASE_DSB_PROD 128
+#define IGU_NORM_BASE_DSB_PROD 136
+
+ /* FID (if VF - [6] = 0; [5:0] = VF number; if PF - [6] = 1; \
+ [5:2] = 0; [1:0] = PF number) */
+#define IGU_FID_ENCODE_IS_PF (0x1<<6)
+#define IGU_FID_ENCODE_IS_PF_SHIFT 6
+#define IGU_FID_VF_NUM_MASK (0x3f)
+#define IGU_FID_PF_NUM_MASK (0x7)
+
+#define IGU_REG_MAPPING_MEMORY_VALID (1<<0)
+#define IGU_REG_MAPPING_MEMORY_VECTOR_MASK (0x3F<<1)
+#define IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT 1
+#define IGU_REG_MAPPING_MEMORY_FID_MASK (0x7F<<7)
+#define IGU_REG_MAPPING_MEMORY_FID_SHIFT 7
+
+
+#define CDU_REGION_NUMBER_XCM_AG 2
+#define CDU_REGION_NUMBER_UCM_AG 4
+
+
+/**
+ * String-to-compress [31:8] = CID (all 24 bits)
+ * String-to-compress [7:4] = Region
+ * String-to-compress [3:0] = Type
+ */
+#define CDU_VALID_DATA(_cid, _region, _type)\
+ (((_cid) << 8) | (((_region)&0xf)<<4) | (((_type)&0xf)))
+#define CDU_CRC8(_cid, _region, _type)\
+ (calc_crc8(CDU_VALID_DATA(_cid, _region, _type), 0xff))
+#define CDU_RSRVD_VALUE_TYPE_A(_cid, _region, _type)\
+ (0x80 | ((CDU_CRC8(_cid, _region, _type)) & 0x7f))
+#define CDU_RSRVD_VALUE_TYPE_B(_crc, _type)\
+ (0x80 | ((_type)&0xf << 3) | ((CDU_CRC8(_cid, _region, _type)) & 0x7))
+#define CDU_RSRVD_INVALIDATE_CONTEXT_VALUE(_val) ((_val) & ~0x80)
+
+/******************************************************************************
+ * Description:
+ * Calculates crc 8 on a word value: polynomial 0-1-2-8
+ * Code was translated from Verilog.
+ * Return:
+ *****************************************************************************/
+static inline u8 calc_crc8(u32 data, u8 crc)
+{
+ u8 D[32];
+ u8 NewCRC[8];
+ u8 C[8];
+ u8 crc_res;
+ u8 i;
+
+ /* split the data into 31 bits */
+ for (i = 0; i < 32; i++) {
+ D[i] = (u8)(data & 1);
+ data = data >> 1;
+ }
+
+ /* split the crc into 8 bits */
+ for (i = 0; i < 8; i++) {
+ C[i] = crc & 1;
+ crc = crc >> 1;
+ }
+
+ NewCRC[0] = D[31] ^ D[30] ^ D[28] ^ D[23] ^ D[21] ^ D[19] ^ D[18] ^
+ D[16] ^ D[14] ^ D[12] ^ D[8] ^ D[7] ^ D[6] ^ D[0] ^ C[4] ^
+ C[6] ^ C[7];
+ NewCRC[1] = D[30] ^ D[29] ^ D[28] ^ D[24] ^ D[23] ^ D[22] ^ D[21] ^
+ D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^
+ D[12] ^ D[9] ^ D[6] ^ D[1] ^ D[0] ^ C[0] ^ C[4] ^ C[5] ^
+ C[6];
+ NewCRC[2] = D[29] ^ D[28] ^ D[25] ^ D[24] ^ D[22] ^ D[17] ^ D[15] ^
+ D[13] ^ D[12] ^ D[10] ^ D[8] ^ D[6] ^ D[2] ^ D[1] ^ D[0] ^
+ C[0] ^ C[1] ^ C[4] ^ C[5];
+ NewCRC[3] = D[30] ^ D[29] ^ D[26] ^ D[25] ^ D[23] ^ D[18] ^ D[16] ^
+ D[14] ^ D[13] ^ D[11] ^ D[9] ^ D[7] ^ D[3] ^ D[2] ^ D[1] ^
+ C[1] ^ C[2] ^ C[5] ^ C[6];
+ NewCRC[4] = D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[19] ^ D[17] ^
+ D[15] ^ D[14] ^ D[12] ^ D[10] ^ D[8] ^ D[4] ^ D[3] ^ D[2] ^
+ C[0] ^ C[2] ^ C[3] ^ C[6] ^ C[7];
+ NewCRC[5] = D[31] ^ D[28] ^ D[27] ^ D[25] ^ D[20] ^ D[18] ^ D[16] ^
+ D[15] ^ D[13] ^ D[11] ^ D[9] ^ D[5] ^ D[4] ^ D[3] ^ C[1] ^
+ C[3] ^ C[4] ^ C[7];
+ NewCRC[6] = D[29] ^ D[28] ^ D[26] ^ D[21] ^ D[19] ^ D[17] ^ D[16] ^
+ D[14] ^ D[12] ^ D[10] ^ D[6] ^ D[5] ^ D[4] ^ C[2] ^ C[4] ^
+ C[5];
+ NewCRC[7] = D[30] ^ D[29] ^ D[27] ^ D[22] ^ D[20] ^ D[18] ^ D[17] ^
+ D[15] ^ D[13] ^ D[11] ^ D[7] ^ D[6] ^ D[5] ^ C[3] ^ C[5] ^
+ C[6];
+
+ crc_res = 0;
+ for (i = 0; i < 8; i++)
+ crc_res |= (NewCRC[i] << i);
+
+ return crc_res;
+}
+
+
+#endif /* BNX2X_REG_H */
--- /dev/null
- l2t_release(L2DATA(tdev), e);
+/*
+ * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <net/neighbour.h>
+#include <linux/notifier.h>
+#include <linux/atomic.h>
+#include <linux/proc_fs.h>
+#include <linux/if_vlan.h>
+#include <net/netevent.h>
+#include <linux/highmem.h>
+#include <linux/vmalloc.h>
+
+#include "common.h"
+#include "regs.h"
+#include "cxgb3_ioctl.h"
+#include "cxgb3_ctl_defs.h"
+#include "cxgb3_defs.h"
+#include "l2t.h"
+#include "firmware_exports.h"
+#include "cxgb3_offload.h"
+
+static LIST_HEAD(client_list);
+static LIST_HEAD(ofld_dev_list);
+static DEFINE_MUTEX(cxgb3_db_lock);
+
+static DEFINE_RWLOCK(adapter_list_lock);
+static LIST_HEAD(adapter_list);
+
+static const unsigned int MAX_ATIDS = 64 * 1024;
+static const unsigned int ATID_BASE = 0x10000;
+
+static void cxgb_neigh_update(struct neighbour *neigh);
+static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new);
+
+static inline int offload_activated(struct t3cdev *tdev)
+{
+ const struct adapter *adapter = tdev2adap(tdev);
+
+ return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
+}
+
+/**
+ * cxgb3_register_client - register an offload client
+ * @client: the client
+ *
+ * Add the client to the client list,
+ * and call backs the client for each activated offload device
+ */
+void cxgb3_register_client(struct cxgb3_client *client)
+{
+ struct t3cdev *tdev;
+
+ mutex_lock(&cxgb3_db_lock);
+ list_add_tail(&client->client_list, &client_list);
+
+ if (client->add) {
+ list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
+ if (offload_activated(tdev))
+ client->add(tdev);
+ }
+ }
+ mutex_unlock(&cxgb3_db_lock);
+}
+
+EXPORT_SYMBOL(cxgb3_register_client);
+
+/**
+ * cxgb3_unregister_client - unregister an offload client
+ * @client: the client
+ *
+ * Remove the client to the client list,
+ * and call backs the client for each activated offload device.
+ */
+void cxgb3_unregister_client(struct cxgb3_client *client)
+{
+ struct t3cdev *tdev;
+
+ mutex_lock(&cxgb3_db_lock);
+ list_del(&client->client_list);
+
+ if (client->remove) {
+ list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
+ if (offload_activated(tdev))
+ client->remove(tdev);
+ }
+ }
+ mutex_unlock(&cxgb3_db_lock);
+}
+
+EXPORT_SYMBOL(cxgb3_unregister_client);
+
+/**
+ * cxgb3_add_clients - activate registered clients for an offload device
+ * @tdev: the offload device
+ *
+ * Call backs all registered clients once a offload device is activated
+ */
+void cxgb3_add_clients(struct t3cdev *tdev)
+{
+ struct cxgb3_client *client;
+
+ mutex_lock(&cxgb3_db_lock);
+ list_for_each_entry(client, &client_list, client_list) {
+ if (client->add)
+ client->add(tdev);
+ }
+ mutex_unlock(&cxgb3_db_lock);
+}
+
+/**
+ * cxgb3_remove_clients - deactivates registered clients
+ * for an offload device
+ * @tdev: the offload device
+ *
+ * Call backs all registered clients once a offload device is deactivated
+ */
+void cxgb3_remove_clients(struct t3cdev *tdev)
+{
+ struct cxgb3_client *client;
+
+ mutex_lock(&cxgb3_db_lock);
+ list_for_each_entry(client, &client_list, client_list) {
+ if (client->remove)
+ client->remove(tdev);
+ }
+ mutex_unlock(&cxgb3_db_lock);
+}
+
+void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port)
+{
+ struct cxgb3_client *client;
+
+ mutex_lock(&cxgb3_db_lock);
+ list_for_each_entry(client, &client_list, client_list) {
+ if (client->event_handler)
+ client->event_handler(tdev, event, port);
+ }
+ mutex_unlock(&cxgb3_db_lock);
+}
+
+static struct net_device *get_iff_from_mac(struct adapter *adapter,
+ const unsigned char *mac,
+ unsigned int vlan)
+{
+ int i;
+
+ for_each_port(adapter, i) {
+ struct net_device *dev = adapter->port[i];
+
+ if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
+ if (vlan && vlan != VLAN_VID_MASK) {
+ rcu_read_lock();
+ dev = __vlan_find_dev_deep(dev, vlan);
+ rcu_read_unlock();
+ } else if (netif_is_bond_slave(dev)) {
+ while (dev->master)
+ dev = dev->master;
+ }
+ return dev;
+ }
+ }
+ return NULL;
+}
+
+static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
+ void *data)
+{
+ int i;
+ int ret = 0;
+ unsigned int val = 0;
+ struct ulp_iscsi_info *uiip = data;
+
+ switch (req) {
+ case ULP_ISCSI_GET_PARAMS:
+ uiip->pdev = adapter->pdev;
+ uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT);
+ uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT);
+ uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK);
+
+ val = t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ);
+ for (i = 0; i < 4; i++, val >>= 8)
+ uiip->pgsz_factor[i] = val & 0xFF;
+
+ val = t3_read_reg(adapter, A_TP_PARA_REG7);
+ uiip->max_txsz =
+ uiip->max_rxsz = min((val >> S_PMMAXXFERLEN0)&M_PMMAXXFERLEN0,
+ (val >> S_PMMAXXFERLEN1)&M_PMMAXXFERLEN1);
+ /*
+ * On tx, the iscsi pdu has to be <= tx page size and has to
+ * fit into the Tx PM FIFO.
+ */
+ val = min(adapter->params.tp.tx_pg_size,
+ t3_read_reg(adapter, A_PM1_TX_CFG) >> 17);
+ uiip->max_txsz = min(val, uiip->max_txsz);
+
+ /* set MaxRxData to 16224 */
+ val = t3_read_reg(adapter, A_TP_PARA_REG2);
+ if ((val >> S_MAXRXDATA) != 0x3f60) {
+ val &= (M_RXCOALESCESIZE << S_RXCOALESCESIZE);
+ val |= V_MAXRXDATA(0x3f60);
+ printk(KERN_INFO
+ "%s, iscsi set MaxRxData to 16224 (0x%x).\n",
+ adapter->name, val);
+ t3_write_reg(adapter, A_TP_PARA_REG2, val);
+ }
+
+ /*
+ * on rx, the iscsi pdu has to be < rx page size and the
+ * the max rx data length programmed in TP
+ */
+ val = min(adapter->params.tp.rx_pg_size,
+ ((t3_read_reg(adapter, A_TP_PARA_REG2)) >>
+ S_MAXRXDATA) & M_MAXRXDATA);
+ uiip->max_rxsz = min(val, uiip->max_rxsz);
+ break;
+ case ULP_ISCSI_SET_PARAMS:
+ t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask);
+ /* program the ddp page sizes */
+ for (i = 0; i < 4; i++)
+ val |= (uiip->pgsz_factor[i] & 0xF) << (8 * i);
+ if (val && (val != t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ))) {
+ printk(KERN_INFO
+ "%s, setting iscsi pgsz 0x%x, %u,%u,%u,%u.\n",
+ adapter->name, val, uiip->pgsz_factor[0],
+ uiip->pgsz_factor[1], uiip->pgsz_factor[2],
+ uiip->pgsz_factor[3]);
+ t3_write_reg(adapter, A_ULPRX_ISCSI_PSZ, val);
+ }
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+ return ret;
+}
+
+/* Response queue used for RDMA events. */
+#define ASYNC_NOTIF_RSPQ 0
+
+static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data)
+{
+ int ret = 0;
+
+ switch (req) {
+ case RDMA_GET_PARAMS: {
+ struct rdma_info *rdma = data;
+ struct pci_dev *pdev = adapter->pdev;
+
+ rdma->udbell_physbase = pci_resource_start(pdev, 2);
+ rdma->udbell_len = pci_resource_len(pdev, 2);
+ rdma->tpt_base =
+ t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT);
+ rdma->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);
+ rdma->pbl_base =
+ t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT);
+ rdma->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);
+ rdma->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);
+ rdma->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);
+ rdma->kdb_addr = adapter->regs + A_SG_KDOORBELL;
+ rdma->pdev = pdev;
+ break;
+ }
+ case RDMA_CQ_OP:{
+ unsigned long flags;
+ struct rdma_cq_op *rdma = data;
+
+ /* may be called in any context */
+ spin_lock_irqsave(&adapter->sge.reg_lock, flags);
+ ret = t3_sge_cqcntxt_op(adapter, rdma->id, rdma->op,
+ rdma->credits);
+ spin_unlock_irqrestore(&adapter->sge.reg_lock, flags);
+ break;
+ }
+ case RDMA_GET_MEM:{
+ struct ch_mem_range *t = data;
+ struct mc7 *mem;
+
+ if ((t->addr & 7) || (t->len & 7))
+ return -EINVAL;
+ if (t->mem_id == MEM_CM)
+ mem = &adapter->cm;
+ else if (t->mem_id == MEM_PMRX)
+ mem = &adapter->pmrx;
+ else if (t->mem_id == MEM_PMTX)
+ mem = &adapter->pmtx;
+ else
+ return -EINVAL;
+
+ ret =
+ t3_mc7_bd_read(mem, t->addr / 8, t->len / 8,
+ (u64 *) t->buf);
+ if (ret)
+ return ret;
+ break;
+ }
+ case RDMA_CQ_SETUP:{
+ struct rdma_cq_setup *rdma = data;
+
+ spin_lock_irq(&adapter->sge.reg_lock);
+ ret =
+ t3_sge_init_cqcntxt(adapter, rdma->id,
+ rdma->base_addr, rdma->size,
+ ASYNC_NOTIF_RSPQ,
+ rdma->ovfl_mode, rdma->credits,
+ rdma->credit_thres);
+ spin_unlock_irq(&adapter->sge.reg_lock);
+ break;
+ }
+ case RDMA_CQ_DISABLE:
+ spin_lock_irq(&adapter->sge.reg_lock);
+ ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data);
+ spin_unlock_irq(&adapter->sge.reg_lock);
+ break;
+ case RDMA_CTRL_QP_SETUP:{
+ struct rdma_ctrlqp_setup *rdma = data;
+
+ spin_lock_irq(&adapter->sge.reg_lock);
+ ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0,
+ SGE_CNTXT_RDMA,
+ ASYNC_NOTIF_RSPQ,
+ rdma->base_addr, rdma->size,
+ FW_RI_TID_START, 1, 0);
+ spin_unlock_irq(&adapter->sge.reg_lock);
+ break;
+ }
+ case RDMA_GET_MIB: {
+ spin_lock(&adapter->stats_lock);
+ t3_tp_get_mib_stats(adapter, (struct tp_mib_stats *)data);
+ spin_unlock(&adapter->stats_lock);
+ break;
+ }
+ default:
+ ret = -EOPNOTSUPP;
+ }
+ return ret;
+}
+
+static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data)
+{
+ struct adapter *adapter = tdev2adap(tdev);
+ struct tid_range *tid;
+ struct mtutab *mtup;
+ struct iff_mac *iffmacp;
+ struct ddp_params *ddpp;
+ struct adap_ports *ports;
+ struct ofld_page_info *rx_page_info;
+ struct tp_params *tp = &adapter->params.tp;
+ int i;
+
+ switch (req) {
+ case GET_MAX_OUTSTANDING_WR:
+ *(unsigned int *)data = FW_WR_NUM;
+ break;
+ case GET_WR_LEN:
+ *(unsigned int *)data = WR_FLITS;
+ break;
+ case GET_TX_MAX_CHUNK:
+ *(unsigned int *)data = 1 << 20; /* 1MB */
+ break;
+ case GET_TID_RANGE:
+ tid = data;
+ tid->num = t3_mc5_size(&adapter->mc5) -
+ adapter->params.mc5.nroutes -
+ adapter->params.mc5.nfilters - adapter->params.mc5.nservers;
+ tid->base = 0;
+ break;
+ case GET_STID_RANGE:
+ tid = data;
+ tid->num = adapter->params.mc5.nservers;
+ tid->base = t3_mc5_size(&adapter->mc5) - tid->num -
+ adapter->params.mc5.nfilters - adapter->params.mc5.nroutes;
+ break;
+ case GET_L2T_CAPACITY:
+ *(unsigned int *)data = 2048;
+ break;
+ case GET_MTUS:
+ mtup = data;
+ mtup->size = NMTUS;
+ mtup->mtus = adapter->params.mtus;
+ break;
+ case GET_IFF_FROM_MAC:
+ iffmacp = data;
+ iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr,
+ iffmacp->vlan_tag &
+ VLAN_VID_MASK);
+ break;
+ case GET_DDP_PARAMS:
+ ddpp = data;
+ ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT);
+ ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT);
+ ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK);
+ break;
+ case GET_PORTS:
+ ports = data;
+ ports->nports = adapter->params.nports;
+ for_each_port(adapter, i)
+ ports->lldevs[i] = adapter->port[i];
+ break;
+ case ULP_ISCSI_GET_PARAMS:
+ case ULP_ISCSI_SET_PARAMS:
+ if (!offload_running(adapter))
+ return -EAGAIN;
+ return cxgb_ulp_iscsi_ctl(adapter, req, data);
+ case RDMA_GET_PARAMS:
+ case RDMA_CQ_OP:
+ case RDMA_CQ_SETUP:
+ case RDMA_CQ_DISABLE:
+ case RDMA_CTRL_QP_SETUP:
+ case RDMA_GET_MEM:
+ case RDMA_GET_MIB:
+ if (!offload_running(adapter))
+ return -EAGAIN;
+ return cxgb_rdma_ctl(adapter, req, data);
+ case GET_RX_PAGE_INFO:
+ rx_page_info = data;
+ rx_page_info->page_size = tp->rx_pg_size;
+ rx_page_info->num = tp->rx_num_pgs;
+ break;
+ case GET_ISCSI_IPV4ADDR: {
+ struct iscsi_ipv4addr *p = data;
+ struct port_info *pi = netdev_priv(p->dev);
+ p->ipv4addr = pi->iscsi_ipv4addr;
+ break;
+ }
+ case GET_EMBEDDED_INFO: {
+ struct ch_embedded_info *e = data;
+
+ spin_lock(&adapter->stats_lock);
+ t3_get_fw_version(adapter, &e->fw_vers);
+ t3_get_tp_version(adapter, &e->tp_vers);
+ spin_unlock(&adapter->stats_lock);
+ break;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+/*
+ * Dummy handler for Rx offload packets in case we get an offload packet before
+ * proper processing is setup. This complains and drops the packet as it isn't
+ * normal to get offload packets at this stage.
+ */
+static int rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs,
+ int n)
+{
+ while (n--)
+ dev_kfree_skb_any(skbs[n]);
+ return 0;
+}
+
+static void dummy_neigh_update(struct t3cdev *dev, struct neighbour *neigh)
+{
+}
+
+void cxgb3_set_dummy_ops(struct t3cdev *dev)
+{
+ dev->recv = rx_offload_blackhole;
+ dev->neigh_update = dummy_neigh_update;
+}
+
+/*
+ * Free an active-open TID.
+ */
+void *cxgb3_free_atid(struct t3cdev *tdev, int atid)
+{
+ struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
+ union active_open_entry *p = atid2entry(t, atid);
+ void *ctx = p->t3c_tid.ctx;
+
+ spin_lock_bh(&t->atid_lock);
+ p->next = t->afree;
+ t->afree = p;
+ t->atids_in_use--;
+ spin_unlock_bh(&t->atid_lock);
+
+ return ctx;
+}
+
+EXPORT_SYMBOL(cxgb3_free_atid);
+
+/*
+ * Free a server TID and return it to the free pool.
+ */
+void cxgb3_free_stid(struct t3cdev *tdev, int stid)
+{
+ struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
+ union listen_entry *p = stid2entry(t, stid);
+
+ spin_lock_bh(&t->stid_lock);
+ p->next = t->sfree;
+ t->sfree = p;
+ t->stids_in_use--;
+ spin_unlock_bh(&t->stid_lock);
+}
+
+EXPORT_SYMBOL(cxgb3_free_stid);
+
+void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client,
+ void *ctx, unsigned int tid)
+{
+ struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
+
+ t->tid_tab[tid].client = client;
+ t->tid_tab[tid].ctx = ctx;
+ atomic_inc(&t->tids_in_use);
+}
+
+EXPORT_SYMBOL(cxgb3_insert_tid);
+
+/*
+ * Populate a TID_RELEASE WR. The skb must be already propely sized.
+ */
+static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid)
+{
+ struct cpl_tid_release *req;
+
+ skb->priority = CPL_PRIORITY_SETUP;
+ req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
+}
+
+static void t3_process_tid_release_list(struct work_struct *work)
+{
+ struct t3c_data *td = container_of(work, struct t3c_data,
+ tid_release_task);
+ struct sk_buff *skb;
+ struct t3cdev *tdev = td->dev;
+
+
+ spin_lock_bh(&td->tid_release_lock);
+ while (td->tid_release_list) {
+ struct t3c_tid_entry *p = td->tid_release_list;
+
+ td->tid_release_list = p->ctx;
+ spin_unlock_bh(&td->tid_release_lock);
+
+ skb = alloc_skb(sizeof(struct cpl_tid_release),
+ GFP_KERNEL);
+ if (!skb)
+ skb = td->nofail_skb;
+ if (!skb) {
+ spin_lock_bh(&td->tid_release_lock);
+ p->ctx = (void *)td->tid_release_list;
+ td->tid_release_list = (struct t3c_tid_entry *)p;
+ break;
+ }
+ mk_tid_release(skb, p - td->tid_maps.tid_tab);
+ cxgb3_ofld_send(tdev, skb);
+ p->ctx = NULL;
+ if (skb == td->nofail_skb)
+ td->nofail_skb =
+ alloc_skb(sizeof(struct cpl_tid_release),
+ GFP_KERNEL);
+ spin_lock_bh(&td->tid_release_lock);
+ }
+ td->release_list_incomplete = (td->tid_release_list == NULL) ? 0 : 1;
+ spin_unlock_bh(&td->tid_release_lock);
+
+ if (!td->nofail_skb)
+ td->nofail_skb =
+ alloc_skb(sizeof(struct cpl_tid_release),
+ GFP_KERNEL);
+}
+
+/* use ctx as a next pointer in the tid release list */
+void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid)
+{
+ struct t3c_data *td = T3C_DATA(tdev);
+ struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid];
+
+ spin_lock_bh(&td->tid_release_lock);
+ p->ctx = (void *)td->tid_release_list;
+ p->client = NULL;
+ td->tid_release_list = p;
+ if (!p->ctx || td->release_list_incomplete)
+ schedule_work(&td->tid_release_task);
+ spin_unlock_bh(&td->tid_release_lock);
+}
+
+EXPORT_SYMBOL(cxgb3_queue_tid_release);
+
+/*
+ * Remove a tid from the TID table. A client may defer processing its last
+ * CPL message if it is locked at the time it arrives, and while the message
+ * sits in the client's backlog the TID may be reused for another connection.
+ * To handle this we atomically switch the TID association if it still points
+ * to the original client context.
+ */
+void cxgb3_remove_tid(struct t3cdev *tdev, void *ctx, unsigned int tid)
+{
+ struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
+
+ BUG_ON(tid >= t->ntids);
+ if (tdev->type == T3A)
+ (void)cmpxchg(&t->tid_tab[tid].ctx, ctx, NULL);
+ else {
+ struct sk_buff *skb;
+
+ skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
+ if (likely(skb)) {
+ mk_tid_release(skb, tid);
+ cxgb3_ofld_send(tdev, skb);
+ t->tid_tab[tid].ctx = NULL;
+ } else
+ cxgb3_queue_tid_release(tdev, tid);
+ }
+ atomic_dec(&t->tids_in_use);
+}
+
+EXPORT_SYMBOL(cxgb3_remove_tid);
+
+int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client,
+ void *ctx)
+{
+ int atid = -1;
+ struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
+
+ spin_lock_bh(&t->atid_lock);
+ if (t->afree &&
+ t->atids_in_use + atomic_read(&t->tids_in_use) + MC5_MIN_TIDS <=
+ t->ntids) {
+ union active_open_entry *p = t->afree;
+
+ atid = (p - t->atid_tab) + t->atid_base;
+ t->afree = p->next;
+ p->t3c_tid.ctx = ctx;
+ p->t3c_tid.client = client;
+ t->atids_in_use++;
+ }
+ spin_unlock_bh(&t->atid_lock);
+ return atid;
+}
+
+EXPORT_SYMBOL(cxgb3_alloc_atid);
+
+int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client,
+ void *ctx)
+{
+ int stid = -1;
+ struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
+
+ spin_lock_bh(&t->stid_lock);
+ if (t->sfree) {
+ union listen_entry *p = t->sfree;
+
+ stid = (p - t->stid_tab) + t->stid_base;
+ t->sfree = p->next;
+ p->t3c_tid.ctx = ctx;
+ p->t3c_tid.client = client;
+ t->stids_in_use++;
+ }
+ spin_unlock_bh(&t->stid_lock);
+ return stid;
+}
+
+EXPORT_SYMBOL(cxgb3_alloc_stid);
+
+/* Get the t3cdev associated with a net_device */
+struct t3cdev *dev2t3cdev(struct net_device *dev)
+{
+ const struct port_info *pi = netdev_priv(dev);
+
+ return (struct t3cdev *)pi->adapter;
+}
+
+EXPORT_SYMBOL(dev2t3cdev);
+
+static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+ struct cpl_smt_write_rpl *rpl = cplhdr(skb);
+
+ if (rpl->status != CPL_ERR_NONE)
+ printk(KERN_ERR
+ "Unexpected SMT_WRITE_RPL status %u for entry %u\n",
+ rpl->status, GET_TID(rpl));
+
+ return CPL_RET_BUF_DONE;
+}
+
+static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+ struct cpl_l2t_write_rpl *rpl = cplhdr(skb);
+
+ if (rpl->status != CPL_ERR_NONE)
+ printk(KERN_ERR
+ "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
+ rpl->status, GET_TID(rpl));
+
+ return CPL_RET_BUF_DONE;
+}
+
+static int do_rte_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+ struct cpl_rte_write_rpl *rpl = cplhdr(skb);
+
+ if (rpl->status != CPL_ERR_NONE)
+ printk(KERN_ERR
+ "Unexpected RTE_WRITE_RPL status %u for entry %u\n",
+ rpl->status, GET_TID(rpl));
+
+ return CPL_RET_BUF_DONE;
+}
+
+static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+ struct cpl_act_open_rpl *rpl = cplhdr(skb);
+ unsigned int atid = G_TID(ntohl(rpl->atid));
+ struct t3c_tid_entry *t3c_tid;
+
+ t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client &&
+ t3c_tid->client->handlers &&
+ t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) {
+ return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb,
+ t3c_tid->
+ ctx);
+ } else {
+ printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+ dev->name, CPL_ACT_OPEN_RPL);
+ return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+ }
+}
+
+static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+ union opcode_tid *p = cplhdr(skb);
+ unsigned int stid = G_TID(ntohl(p->opcode_tid));
+ struct t3c_tid_entry *t3c_tid;
+
+ t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
+ t3c_tid->client->handlers[p->opcode]) {
+ return t3c_tid->client->handlers[p->opcode] (dev, skb,
+ t3c_tid->ctx);
+ } else {
+ printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+ dev->name, p->opcode);
+ return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+ }
+}
+
+static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+ union opcode_tid *p = cplhdr(skb);
+ unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
+ struct t3c_tid_entry *t3c_tid;
+
+ t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
+ t3c_tid->client->handlers[p->opcode]) {
+ return t3c_tid->client->handlers[p->opcode]
+ (dev, skb, t3c_tid->ctx);
+ } else {
+ printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+ dev->name, p->opcode);
+ return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+ }
+}
+
+static int do_cr(struct t3cdev *dev, struct sk_buff *skb)
+{
+ struct cpl_pass_accept_req *req = cplhdr(skb);
+ unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
+ struct tid_info *t = &(T3C_DATA(dev))->tid_maps;
+ struct t3c_tid_entry *t3c_tid;
+ unsigned int tid = GET_TID(req);
+
+ if (unlikely(tid >= t->ntids)) {
+ printk("%s: passive open TID %u too large\n",
+ dev->name, tid);
+ t3_fatal_err(tdev2adap(dev));
+ return CPL_RET_BUF_DONE;
+ }
+
+ t3c_tid = lookup_stid(t, stid);
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
+ t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) {
+ return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]
+ (dev, skb, t3c_tid->ctx);
+ } else {
+ printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+ dev->name, CPL_PASS_ACCEPT_REQ);
+ return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+ }
+}
+
+/*
+ * Returns an sk_buff for a reply CPL message of size len. If the input
+ * sk_buff has no other users it is trimmed and reused, otherwise a new buffer
+ * is allocated. The input skb must be of size at least len. Note that this
+ * operation does not destroy the original skb data even if it decides to reuse
+ * the buffer.
+ */
+static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len,
+ gfp_t gfp)
+{
+ if (likely(!skb_cloned(skb))) {
+ BUG_ON(skb->len < len);
+ __skb_trim(skb, len);
+ skb_get(skb);
+ } else {
+ skb = alloc_skb(len, gfp);
+ if (skb)
+ __skb_put(skb, len);
+ }
+ return skb;
+}
+
+static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
+{
+ union opcode_tid *p = cplhdr(skb);
+ unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
+ struct t3c_tid_entry *t3c_tid;
+
+ t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
+ t3c_tid->client->handlers[p->opcode]) {
+ return t3c_tid->client->handlers[p->opcode]
+ (dev, skb, t3c_tid->ctx);
+ } else {
+ struct cpl_abort_req_rss *req = cplhdr(skb);
+ struct cpl_abort_rpl *rpl;
+ struct sk_buff *reply_skb;
+ unsigned int tid = GET_TID(req);
+ u8 cmd = req->status;
+
+ if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
+ req->status == CPL_ERR_PERSIST_NEG_ADVICE)
+ goto out;
+
+ reply_skb = cxgb3_get_cpl_reply_skb(skb,
+ sizeof(struct
+ cpl_abort_rpl),
+ GFP_ATOMIC);
+
+ if (!reply_skb) {
+ printk("do_abort_req_rss: couldn't get skb!\n");
+ goto out;
+ }
+ reply_skb->priority = CPL_PRIORITY_DATA;
+ __skb_put(reply_skb, sizeof(struct cpl_abort_rpl));
+ rpl = cplhdr(reply_skb);
+ rpl->wr.wr_hi =
+ htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
+ rpl->wr.wr_lo = htonl(V_WR_TID(tid));
+ OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
+ rpl->cmd = cmd;
+ cxgb3_ofld_send(dev, reply_skb);
+out:
+ return CPL_RET_BUF_DONE;
+ }
+}
+
+static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb)
+{
+ struct cpl_act_establish *req = cplhdr(skb);
+ unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
+ struct tid_info *t = &(T3C_DATA(dev))->tid_maps;
+ struct t3c_tid_entry *t3c_tid;
+ unsigned int tid = GET_TID(req);
+
+ if (unlikely(tid >= t->ntids)) {
+ printk("%s: active establish TID %u too large\n",
+ dev->name, tid);
+ t3_fatal_err(tdev2adap(dev));
+ return CPL_RET_BUF_DONE;
+ }
+
+ t3c_tid = lookup_atid(t, atid);
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
+ t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) {
+ return t3c_tid->client->handlers[CPL_ACT_ESTABLISH]
+ (dev, skb, t3c_tid->ctx);
+ } else {
+ printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+ dev->name, CPL_ACT_ESTABLISH);
+ return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+ }
+}
+
+static int do_trace(struct t3cdev *dev, struct sk_buff *skb)
+{
+ struct cpl_trace_pkt *p = cplhdr(skb);
+
+ skb->protocol = htons(0xffff);
+ skb->dev = dev->lldev;
+ skb_pull(skb, sizeof(*p));
+ skb_reset_mac_header(skb);
+ netif_receive_skb(skb);
+ return 0;
+}
+
+/*
+ * That skb would better have come from process_responses() where we abuse
+ * ->priority and ->csum to carry our data. NB: if we get to per-arch
+ * ->csum, the things might get really interesting here.
+ */
+
+static inline u32 get_hwtid(struct sk_buff *skb)
+{
+ return ntohl((__force __be32)skb->priority) >> 8 & 0xfffff;
+}
+
+static inline u32 get_opcode(struct sk_buff *skb)
+{
+ return G_OPCODE(ntohl((__force __be32)skb->csum));
+}
+
+static int do_term(struct t3cdev *dev, struct sk_buff *skb)
+{
+ unsigned int hwtid = get_hwtid(skb);
+ unsigned int opcode = get_opcode(skb);
+ struct t3c_tid_entry *t3c_tid;
+
+ t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
+ if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
+ t3c_tid->client->handlers[opcode]) {
+ return t3c_tid->client->handlers[opcode] (dev, skb,
+ t3c_tid->ctx);
+ } else {
+ printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+ dev->name, opcode);
+ return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+ }
+}
+
+static int nb_callback(struct notifier_block *self, unsigned long event,
+ void *ctx)
+{
+ switch (event) {
+ case (NETEVENT_NEIGH_UPDATE):{
+ cxgb_neigh_update((struct neighbour *)ctx);
+ break;
+ }
+ case (NETEVENT_REDIRECT):{
+ struct netevent_redirect *nr = ctx;
+ cxgb_redirect(nr->old, nr->new);
+ cxgb_neigh_update(dst_get_neighbour(nr->new));
+ break;
+ }
+ default:
+ break;
+ }
+ return 0;
+}
+
+static struct notifier_block nb = {
+ .notifier_call = nb_callback
+};
+
+/*
+ * Process a received packet with an unknown/unexpected CPL opcode.
+ */
+static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb)
+{
+ printk(KERN_ERR "%s: received bad CPL command 0x%x\n", dev->name,
+ *skb->data);
+ return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
+}
+
+/*
+ * Handlers for each CPL opcode
+ */
+static cpl_handler_func cpl_handlers[NUM_CPL_CMDS];
+
+/*
+ * Add a new handler to the CPL dispatch table. A NULL handler may be supplied
+ * to unregister an existing handler.
+ */
+void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h)
+{
+ if (opcode < NUM_CPL_CMDS)
+ cpl_handlers[opcode] = h ? h : do_bad_cpl;
+ else
+ printk(KERN_ERR "T3C: handler registration for "
+ "opcode %x failed\n", opcode);
+}
+
+EXPORT_SYMBOL(t3_register_cpl_handler);
+
+/*
+ * T3CDEV's receive method.
+ */
+static int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n)
+{
+ while (n--) {
+ struct sk_buff *skb = *skbs++;
+ unsigned int opcode = get_opcode(skb);
+ int ret = cpl_handlers[opcode] (dev, skb);
+
+#if VALIDATE_TID
+ if (ret & CPL_RET_UNKNOWN_TID) {
+ union opcode_tid *p = cplhdr(skb);
+
+ printk(KERN_ERR "%s: CPL message (opcode %u) had "
+ "unknown TID %u\n", dev->name, opcode,
+ G_TID(ntohl(p->opcode_tid)));
+ }
+#endif
+ if (ret & CPL_RET_BUF_DONE)
+ kfree_skb(skb);
+ }
+ return 0;
+}
+
+/*
+ * Sends an sk_buff to a T3C driver after dealing with any active network taps.
+ */
+int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb)
+{
+ int r;
+
+ local_bh_disable();
+ r = dev->send(dev, skb);
+ local_bh_enable();
+ return r;
+}
+
+EXPORT_SYMBOL(cxgb3_ofld_send);
+
+static int is_offloading(struct net_device *dev)
+{
+ struct adapter *adapter;
+ int i;
+
+ read_lock_bh(&adapter_list_lock);
+ list_for_each_entry(adapter, &adapter_list, adapter_list) {
+ for_each_port(adapter, i) {
+ if (dev == adapter->port[i]) {
+ read_unlock_bh(&adapter_list_lock);
+ return 1;
+ }
+ }
+ }
+ read_unlock_bh(&adapter_list_lock);
+ return 0;
+}
+
+static void cxgb_neigh_update(struct neighbour *neigh)
+{
+ struct net_device *dev = neigh->dev;
+
+ if (dev && (is_offloading(dev))) {
+ struct t3cdev *tdev = dev2t3cdev(dev);
+
+ BUG_ON(!tdev);
+ t3_l2t_update(tdev, neigh);
+ }
+}
+
+static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
+{
+ struct sk_buff *skb;
+ struct cpl_set_tcb_field *req;
+
+ skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
+ if (!skb) {
+ printk(KERN_ERR "%s: cannot allocate skb!\n", __func__);
+ return;
+ }
+ skb->priority = CPL_PRIORITY_CONTROL;
+ req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
+ req->reply = 0;
+ req->cpu_idx = 0;
+ req->word = htons(W_TCB_L2T_IX);
+ req->mask = cpu_to_be64(V_TCB_L2T_IX(M_TCB_L2T_IX));
+ req->val = cpu_to_be64(V_TCB_L2T_IX(e->idx));
+ tdev->send(tdev, skb);
+}
+
+static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
+{
+ struct net_device *olddev, *newdev;
+ struct tid_info *ti;
+ struct t3cdev *tdev;
+ u32 tid;
+ int update_tcb;
+ struct l2t_entry *e;
+ struct t3c_tid_entry *te;
+
+ olddev = dst_get_neighbour(old)->dev;
+ newdev = dst_get_neighbour(new)->dev;
+ if (!is_offloading(olddev))
+ return;
+ if (!is_offloading(newdev)) {
+ printk(KERN_WARNING "%s: Redirect to non-offload "
+ "device ignored.\n", __func__);
+ return;
+ }
+ tdev = dev2t3cdev(olddev);
+ BUG_ON(!tdev);
+ if (tdev != dev2t3cdev(newdev)) {
+ printk(KERN_WARNING "%s: Redirect to different "
+ "offload device ignored.\n", __func__);
+ return;
+ }
+
+ /* Add new L2T entry */
+ e = t3_l2t_get(tdev, dst_get_neighbour(new), newdev);
+ if (!e) {
+ printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
+ __func__);
+ return;
+ }
+
+ /* Walk tid table and notify clients of dst change. */
+ ti = &(T3C_DATA(tdev))->tid_maps;
+ for (tid = 0; tid < ti->ntids; tid++) {
+ te = lookup_tid(ti, tid);
+ BUG_ON(!te);
+ if (te && te->ctx && te->client && te->client->redirect) {
+ update_tcb = te->client->redirect(te->ctx, old, new, e);
+ if (update_tcb) {
++ rcu_read_lock();
+ l2t_hold(L2DATA(tdev), e);
++ rcu_read_unlock();
+ set_l2t_ix(tdev, tid, e);
+ }
+ }
+ }
- L2DATA(dev) = t3_init_l2t(l2t_capacity);
++ l2t_release(tdev, e);
+}
+
+/*
+ * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
+ * The allocated memory is cleared.
+ */
+void *cxgb_alloc_mem(unsigned long size)
+{
+ void *p = kzalloc(size, GFP_KERNEL);
+
+ if (!p)
+ p = vzalloc(size);
+ return p;
+}
+
+/*
+ * Free memory allocated through t3_alloc_mem().
+ */
+void cxgb_free_mem(void *addr)
+{
+ if (is_vmalloc_addr(addr))
+ vfree(addr);
+ else
+ kfree(addr);
+}
+
+/*
+ * Allocate and initialize the TID tables. Returns 0 on success.
+ */
+static int init_tid_tabs(struct tid_info *t, unsigned int ntids,
+ unsigned int natids, unsigned int nstids,
+ unsigned int atid_base, unsigned int stid_base)
+{
+ unsigned long size = ntids * sizeof(*t->tid_tab) +
+ natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab);
+
+ t->tid_tab = cxgb_alloc_mem(size);
+ if (!t->tid_tab)
+ return -ENOMEM;
+
+ t->stid_tab = (union listen_entry *)&t->tid_tab[ntids];
+ t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids];
+ t->ntids = ntids;
+ t->nstids = nstids;
+ t->stid_base = stid_base;
+ t->sfree = NULL;
+ t->natids = natids;
+ t->atid_base = atid_base;
+ t->afree = NULL;
+ t->stids_in_use = t->atids_in_use = 0;
+ atomic_set(&t->tids_in_use, 0);
+ spin_lock_init(&t->stid_lock);
+ spin_lock_init(&t->atid_lock);
+
+ /*
+ * Setup the free lists for stid_tab and atid_tab.
+ */
+ if (nstids) {
+ while (--nstids)
+ t->stid_tab[nstids - 1].next = &t->stid_tab[nstids];
+ t->sfree = t->stid_tab;
+ }
+ if (natids) {
+ while (--natids)
+ t->atid_tab[natids - 1].next = &t->atid_tab[natids];
+ t->afree = t->atid_tab;
+ }
+ return 0;
+}
+
+static void free_tid_maps(struct tid_info *t)
+{
+ cxgb_free_mem(t->tid_tab);
+}
+
+static inline void add_adapter(struct adapter *adap)
+{
+ write_lock_bh(&adapter_list_lock);
+ list_add_tail(&adap->adapter_list, &adapter_list);
+ write_unlock_bh(&adapter_list_lock);
+}
+
+static inline void remove_adapter(struct adapter *adap)
+{
+ write_lock_bh(&adapter_list_lock);
+ list_del(&adap->adapter_list);
+ write_unlock_bh(&adapter_list_lock);
+}
+
+int cxgb3_offload_activate(struct adapter *adapter)
+{
+ struct t3cdev *dev = &adapter->tdev;
+ int natids, err;
+ struct t3c_data *t;
+ struct tid_range stid_range, tid_range;
+ struct mtutab mtutab;
+ unsigned int l2t_capacity;
+
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (!t)
+ return -ENOMEM;
+
+ err = -EOPNOTSUPP;
+ if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 ||
+ dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 ||
+ dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 ||
+ dev->ctl(dev, GET_MTUS, &mtutab) < 0 ||
+ dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 ||
+ dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0)
+ goto out_free;
+
+ err = -ENOMEM;
- L2DATA(dev) = NULL;
++ RCU_INIT_POINTER(dev->l2opt, t3_init_l2t(l2t_capacity));
+ if (!L2DATA(dev))
+ goto out_free;
+
+ natids = min(tid_range.num / 2, MAX_ATIDS);
+ err = init_tid_tabs(&t->tid_maps, tid_range.num, natids,
+ stid_range.num, ATID_BASE, stid_range.base);
+ if (err)
+ goto out_free_l2t;
+
+ t->mtus = mtutab.mtus;
+ t->nmtus = mtutab.size;
+
+ INIT_WORK(&t->tid_release_task, t3_process_tid_release_list);
+ spin_lock_init(&t->tid_release_lock);
+ INIT_LIST_HEAD(&t->list_node);
+ t->dev = dev;
+
+ T3C_DATA(dev) = t;
+ dev->recv = process_rx;
+ dev->neigh_update = t3_l2t_update;
+
+ /* Register netevent handler once */
+ if (list_empty(&adapter_list))
+ register_netevent_notifier(&nb);
+
+ t->nofail_skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_KERNEL);
+ t->release_list_incomplete = 0;
+
+ add_adapter(adapter);
+ return 0;
+
+out_free_l2t:
+ t3_free_l2t(L2DATA(dev));
- t3_free_l2t(L2DATA(tdev));
- L2DATA(tdev) = NULL;
++ rcu_assign_pointer(dev->l2opt, NULL);
+out_free:
+ kfree(t);
+ return err;
+}
+
++static void clean_l2_data(struct rcu_head *head)
++{
++ struct l2t_data *d = container_of(head, struct l2t_data, rcu_head);
++ t3_free_l2t(d);
++}
++
++
+void cxgb3_offload_deactivate(struct adapter *adapter)
+{
+ struct t3cdev *tdev = &adapter->tdev;
+ struct t3c_data *t = T3C_DATA(tdev);
++ struct l2t_data *d;
+
+ remove_adapter(adapter);
+ if (list_empty(&adapter_list))
+ unregister_netevent_notifier(&nb);
+
+ free_tid_maps(&t->tid_maps);
+ T3C_DATA(tdev) = NULL;
++ rcu_read_lock();
++ d = L2DATA(tdev);
++ rcu_read_unlock();
++ rcu_assign_pointer(tdev->l2opt, NULL);
++ call_rcu(&d->rcu_head, clean_l2_data);
+ if (t->nofail_skb)
+ kfree_skb(t->nofail_skb);
+ kfree(t);
+}
+
+static inline void register_tdev(struct t3cdev *tdev)
+{
+ static int unit;
+
+ mutex_lock(&cxgb3_db_lock);
+ snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++);
+ list_add_tail(&tdev->ofld_dev_list, &ofld_dev_list);
+ mutex_unlock(&cxgb3_db_lock);
+}
+
+static inline void unregister_tdev(struct t3cdev *tdev)
+{
+ mutex_lock(&cxgb3_db_lock);
+ list_del(&tdev->ofld_dev_list);
+ mutex_unlock(&cxgb3_db_lock);
+}
+
+static inline int adap2type(struct adapter *adapter)
+{
+ int type = 0;
+
+ switch (adapter->params.rev) {
+ case T3_REV_A:
+ type = T3A;
+ break;
+ case T3_REV_B:
+ case T3_REV_B2:
+ type = T3B;
+ break;
+ case T3_REV_C:
+ type = T3C;
+ break;
+ }
+ return type;
+}
+
+void __devinit cxgb3_adapter_ofld(struct adapter *adapter)
+{
+ struct t3cdev *tdev = &adapter->tdev;
+
+ INIT_LIST_HEAD(&tdev->ofld_dev_list);
+
+ cxgb3_set_dummy_ops(tdev);
+ tdev->send = t3_offload_tx;
+ tdev->ctl = cxgb_offload_ctl;
+ tdev->type = adap2type(adapter);
+
+ register_tdev(tdev);
+}
+
+void __devexit cxgb3_adapter_unofld(struct adapter *adapter)
+{
+ struct t3cdev *tdev = &adapter->tdev;
+
+ tdev->recv = NULL;
+ tdev->neigh_update = NULL;
+
+ unregister_tdev(tdev);
+}
+
+void __init cxgb3_offload_init(void)
+{
+ int i;
+
+ for (i = 0; i < NUM_CPL_CMDS; ++i)
+ cpl_handlers[i] = do_bad_cpl;
+
+ t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl);
+ t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
+ t3_register_cpl_handler(CPL_RTE_WRITE_RPL, do_rte_write_rpl);
+ t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl);
+ t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl);
+ t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr);
+ t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl);
+ t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss);
+ t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish);
+ t3_register_cpl_handler(CPL_SET_TCB_RPL, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_GET_TCB_RPL, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term);
+ t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_TRACE_PKT, do_trace);
+ t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl);
+ t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl);
+}
--- /dev/null
- struct l2t_entry *e;
- struct l2t_data *d = L2DATA(cdev);
+/*
+ * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <linux/jhash.h>
+#include <linux/slab.h>
+#include <net/neighbour.h>
+#include "common.h"
+#include "t3cdev.h"
+#include "cxgb3_defs.h"
+#include "l2t.h"
+#include "t3_cpl.h"
+#include "firmware_exports.h"
+
+#define VLAN_NONE 0xfff
+
+/*
+ * Module locking notes: There is a RW lock protecting the L2 table as a
+ * whole plus a spinlock per L2T entry. Entry lookups and allocations happen
+ * under the protection of the table lock, individual entry changes happen
+ * while holding that entry's spinlock. The table lock nests outside the
+ * entry locks. Allocations of new entries take the table lock as writers so
+ * no other lookups can happen while allocating new entries. Entry updates
+ * take the table lock as readers so multiple entries can be updated in
+ * parallel. An L2T entry can be dropped by decrementing its reference count
+ * and therefore can happen in parallel with entry allocation but no entry
+ * can change state or increment its ref count during allocation as both of
+ * these perform lookups.
+ */
+
+static inline unsigned int vlan_prio(const struct l2t_entry *e)
+{
+ return e->vlan >> 13;
+}
+
+static inline unsigned int arp_hash(u32 key, int ifindex,
+ const struct l2t_data *d)
+{
+ return jhash_2words(key, ifindex, 0) & (d->nentries - 1);
+}
+
+static inline void neigh_replace(struct l2t_entry *e, struct neighbour *n)
+{
+ neigh_hold(n);
+ if (e->neigh)
+ neigh_release(e->neigh);
+ e->neigh = n;
+}
+
+/*
+ * Set up an L2T entry and send any packets waiting in the arp queue. The
+ * supplied skb is used for the CPL_L2T_WRITE_REQ. Must be called with the
+ * entry locked.
+ */
+static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
+ struct l2t_entry *e)
+{
+ struct cpl_l2t_write_req *req;
+ struct sk_buff *tmp;
+
+ if (!skb) {
+ skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+ }
+
+ req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx));
+ req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) |
+ V_L2T_W_VLAN(e->vlan & VLAN_VID_MASK) |
+ V_L2T_W_PRIO(vlan_prio(e)));
+ memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
+ memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
+ skb->priority = CPL_PRIORITY_CONTROL;
+ cxgb3_ofld_send(dev, skb);
+
+ skb_queue_walk_safe(&e->arpq, skb, tmp) {
+ __skb_unlink(skb, &e->arpq);
+ cxgb3_ofld_send(dev, skb);
+ }
+ e->state = L2T_STATE_VALID;
+
+ return 0;
+}
+
+/*
+ * Add a packet to the an L2T entry's queue of packets awaiting resolution.
+ * Must be called with the entry's lock held.
+ */
+static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
+{
+ __skb_queue_tail(&e->arpq, skb);
+}
+
+int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
+ struct l2t_entry *e)
+{
+again:
+ switch (e->state) {
+ case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
+ neigh_event_send(e->neigh, NULL);
+ spin_lock_bh(&e->lock);
+ if (e->state == L2T_STATE_STALE)
+ e->state = L2T_STATE_VALID;
+ spin_unlock_bh(&e->lock);
+ case L2T_STATE_VALID: /* fast-path, send the packet on */
+ return cxgb3_ofld_send(dev, skb);
+ case L2T_STATE_RESOLVING:
+ spin_lock_bh(&e->lock);
+ if (e->state != L2T_STATE_RESOLVING) {
+ /* ARP already completed */
+ spin_unlock_bh(&e->lock);
+ goto again;
+ }
+ arpq_enqueue(e, skb);
+ spin_unlock_bh(&e->lock);
+
+ /*
+ * Only the first packet added to the arpq should kick off
+ * resolution. However, because the alloc_skb below can fail,
+ * we allow each packet added to the arpq to retry resolution
+ * as a way of recovering from transient memory exhaustion.
+ * A better way would be to use a work request to retry L2T
+ * entries when there's no memory.
+ */
+ if (!neigh_event_send(e->neigh, NULL)) {
+ skb = alloc_skb(sizeof(struct cpl_l2t_write_req),
+ GFP_ATOMIC);
+ if (!skb)
+ break;
+
+ spin_lock_bh(&e->lock);
+ if (!skb_queue_empty(&e->arpq))
+ setup_l2e_send_pending(dev, skb, e);
+ else /* we lost the race */
+ __kfree_skb(skb);
+ spin_unlock_bh(&e->lock);
+ }
+ }
+ return 0;
+}
+
+EXPORT_SYMBOL(t3_l2t_send_slow);
+
+void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e)
+{
+again:
+ switch (e->state) {
+ case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
+ neigh_event_send(e->neigh, NULL);
+ spin_lock_bh(&e->lock);
+ if (e->state == L2T_STATE_STALE) {
+ e->state = L2T_STATE_VALID;
+ }
+ spin_unlock_bh(&e->lock);
+ return;
+ case L2T_STATE_VALID: /* fast-path, send the packet on */
+ return;
+ case L2T_STATE_RESOLVING:
+ spin_lock_bh(&e->lock);
+ if (e->state != L2T_STATE_RESOLVING) {
+ /* ARP already completed */
+ spin_unlock_bh(&e->lock);
+ goto again;
+ }
+ spin_unlock_bh(&e->lock);
+
+ /*
+ * Only the first packet added to the arpq should kick off
+ * resolution. However, because the alloc_skb below can fail,
+ * we allow each packet added to the arpq to retry resolution
+ * as a way of recovering from transient memory exhaustion.
+ * A better way would be to use a work request to retry L2T
+ * entries when there's no memory.
+ */
+ neigh_event_send(e->neigh, NULL);
+ }
+}
+
+EXPORT_SYMBOL(t3_l2t_send_event);
+
+/*
+ * Allocate a free L2T entry. Must be called with l2t_data.lock held.
+ */
+static struct l2t_entry *alloc_l2e(struct l2t_data *d)
+{
+ struct l2t_entry *end, *e, **p;
+
+ if (!atomic_read(&d->nfree))
+ return NULL;
+
+ /* there's definitely a free entry */
+ for (e = d->rover, end = &d->l2tab[d->nentries]; e != end; ++e)
+ if (atomic_read(&e->refcnt) == 0)
+ goto found;
+
+ for (e = &d->l2tab[1]; atomic_read(&e->refcnt); ++e) ;
+found:
+ d->rover = e + 1;
+ atomic_dec(&d->nfree);
+
+ /*
+ * The entry we found may be an inactive entry that is
+ * presently in the hash table. We need to remove it.
+ */
+ if (e->state != L2T_STATE_UNUSED) {
+ int hash = arp_hash(e->addr, e->ifindex, d);
+
+ for (p = &d->l2tab[hash].first; *p; p = &(*p)->next)
+ if (*p == e) {
+ *p = e->next;
+ break;
+ }
+ e->state = L2T_STATE_UNUSED;
+ }
+ return e;
+}
+
+/*
+ * Called when an L2T entry has no more users. The entry is left in the hash
+ * table since it is likely to be reused but we also bump nfree to indicate
+ * that the entry can be reallocated for a different neighbor. We also drop
+ * the existing neighbor reference in case the neighbor is going away and is
+ * waiting on our reference.
+ *
+ * Because entries can be reallocated to other neighbors once their ref count
+ * drops to 0 we need to take the entry's lock to avoid races with a new
+ * incarnation.
+ */
+void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e)
+{
+ spin_lock_bh(&e->lock);
+ if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
+ if (e->neigh) {
+ neigh_release(e->neigh);
+ e->neigh = NULL;
+ }
+ }
+ spin_unlock_bh(&e->lock);
+ atomic_inc(&d->nfree);
+}
+
+EXPORT_SYMBOL(t3_l2e_free);
+
+/*
+ * Update an L2T entry that was previously used for the same next hop as neigh.
+ * Must be called with softirqs disabled.
+ */
+static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
+{
+ unsigned int nud_state;
+
+ spin_lock(&e->lock); /* avoid race with t3_l2t_free */
+
+ if (neigh != e->neigh)
+ neigh_replace(e, neigh);
+ nud_state = neigh->nud_state;
+ if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
+ !(nud_state & NUD_VALID))
+ e->state = L2T_STATE_RESOLVING;
+ else if (nud_state & NUD_CONNECTED)
+ e->state = L2T_STATE_VALID;
+ else
+ e->state = L2T_STATE_STALE;
+ spin_unlock(&e->lock);
+}
+
+struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
+ struct net_device *dev)
+{
- int hash = arp_hash(addr, ifidx, d);
++ struct l2t_entry *e = NULL;
++ struct l2t_data *d;
++ int hash;
+ u32 addr = *(u32 *) neigh->primary_key;
+ int ifidx = neigh->dev->ifindex;
+ struct port_info *p = netdev_priv(dev);
+ int smt_idx = p->port_id;
+
++ rcu_read_lock();
++ d = L2DATA(cdev);
++ if (!d)
++ goto done_rcu;
++
++ hash = arp_hash(addr, ifidx, d);
++
+ write_lock_bh(&d->lock);
+ for (e = d->l2tab[hash].first; e; e = e->next)
+ if (e->addr == addr && e->ifindex == ifidx &&
+ e->smt_idx == smt_idx) {
+ l2t_hold(d, e);
+ if (atomic_read(&e->refcnt) == 1)
+ reuse_entry(e, neigh);
+ goto done;
+ }
+
+ /* Need to allocate a new entry */
+ e = alloc_l2e(d);
+ if (e) {
+ spin_lock(&e->lock); /* avoid race with t3_l2t_free */
+ e->next = d->l2tab[hash].first;
+ d->l2tab[hash].first = e;
+ e->state = L2T_STATE_RESOLVING;
+ e->addr = addr;
+ e->ifindex = ifidx;
+ e->smt_idx = smt_idx;
+ atomic_set(&e->refcnt, 1);
+ neigh_replace(e, neigh);
+ if (neigh->dev->priv_flags & IFF_802_1Q_VLAN)
+ e->vlan = vlan_dev_vlan_id(neigh->dev);
+ else
+ e->vlan = VLAN_NONE;
+ spin_unlock(&e->lock);
+ }
+done:
+ write_unlock_bh(&d->lock);
++done_rcu:
++ rcu_read_unlock();
+ return e;
+}
+
+EXPORT_SYMBOL(t3_l2t_get);
+
+/*
+ * Called when address resolution fails for an L2T entry to handle packets
+ * on the arpq head. If a packet specifies a failure handler it is invoked,
+ * otherwise the packets is sent to the offload device.
+ *
+ * XXX: maybe we should abandon the latter behavior and just require a failure
+ * handler.
+ */
+static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff_head *arpq)
+{
+ struct sk_buff *skb, *tmp;
+
+ skb_queue_walk_safe(arpq, skb, tmp) {
+ struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
+
+ __skb_unlink(skb, arpq);
+ if (cb->arp_failure_handler)
+ cb->arp_failure_handler(dev, skb);
+ else
+ cxgb3_ofld_send(dev, skb);
+ }
+}
+
+/*
+ * Called when the host's ARP layer makes a change to some entry that is
+ * loaded into the HW L2 table.
+ */
+void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh)
+{
+ struct sk_buff_head arpq;
+ struct l2t_entry *e;
+ struct l2t_data *d = L2DATA(dev);
+ u32 addr = *(u32 *) neigh->primary_key;
+ int ifidx = neigh->dev->ifindex;
+ int hash = arp_hash(addr, ifidx, d);
+
+ read_lock_bh(&d->lock);
+ for (e = d->l2tab[hash].first; e; e = e->next)
+ if (e->addr == addr && e->ifindex == ifidx) {
+ spin_lock(&e->lock);
+ goto found;
+ }
+ read_unlock_bh(&d->lock);
+ return;
+
+found:
+ __skb_queue_head_init(&arpq);
+
+ read_unlock(&d->lock);
+ if (atomic_read(&e->refcnt)) {
+ if (neigh != e->neigh)
+ neigh_replace(e, neigh);
+
+ if (e->state == L2T_STATE_RESOLVING) {
+ if (neigh->nud_state & NUD_FAILED) {
+ skb_queue_splice_init(&e->arpq, &arpq);
+ } else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE))
+ setup_l2e_send_pending(dev, NULL, e);
+ } else {
+ e->state = neigh->nud_state & NUD_CONNECTED ?
+ L2T_STATE_VALID : L2T_STATE_STALE;
+ if (memcmp(e->dmac, neigh->ha, 6))
+ setup_l2e_send_pending(dev, NULL, e);
+ }
+ }
+ spin_unlock_bh(&e->lock);
+
+ if (!skb_queue_empty(&arpq))
+ handle_failed_resolution(dev, &arpq);
+}
+
+struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
+{
+ struct l2t_data *d;
+ int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry);
+
+ d = cxgb_alloc_mem(size);
+ if (!d)
+ return NULL;
+
+ d->nentries = l2t_capacity;
+ d->rover = &d->l2tab[1]; /* entry 0 is not used */
+ atomic_set(&d->nfree, l2t_capacity - 1);
+ rwlock_init(&d->lock);
+
+ for (i = 0; i < l2t_capacity; ++i) {
+ d->l2tab[i].idx = i;
+ d->l2tab[i].state = L2T_STATE_UNUSED;
+ __skb_queue_head_init(&d->l2tab[i].arpq);
+ spin_lock_init(&d->l2tab[i].lock);
+ atomic_set(&d->l2tab[i].refcnt, 0);
+ }
+ return d;
+}
+
+void t3_free_l2t(struct l2t_data *d)
+{
+ cxgb_free_mem(d);
+}
+
--- /dev/null
- #define L2DATA(dev) ((dev)->l2opt)
+/*
+ * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _CHELSIO_L2T_H
+#define _CHELSIO_L2T_H
+
+#include <linux/spinlock.h>
+#include "t3cdev.h"
+#include <linux/atomic.h>
+
+enum {
+ L2T_STATE_VALID, /* entry is up to date */
+ L2T_STATE_STALE, /* entry may be used but needs revalidation */
+ L2T_STATE_RESOLVING, /* entry needs address resolution */
+ L2T_STATE_UNUSED /* entry not in use */
+};
+
+struct neighbour;
+struct sk_buff;
+
+/*
+ * Each L2T entry plays multiple roles. First of all, it keeps state for the
+ * corresponding entry of the HW L2 table and maintains a queue of offload
+ * packets awaiting address resolution. Second, it is a node of a hash table
+ * chain, where the nodes of the chain are linked together through their next
+ * pointer. Finally, each node is a bucket of a hash table, pointing to the
+ * first element in its chain through its first pointer.
+ */
+struct l2t_entry {
+ u16 state; /* entry state */
+ u16 idx; /* entry index */
+ u32 addr; /* dest IP address */
+ int ifindex; /* neighbor's net_device's ifindex */
+ u16 smt_idx; /* SMT index */
+ u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */
+ struct neighbour *neigh; /* associated neighbour */
+ struct l2t_entry *first; /* start of hash chain */
+ struct l2t_entry *next; /* next l2t_entry on chain */
+ struct sk_buff_head arpq; /* queue of packets awaiting resolution */
+ spinlock_t lock;
+ atomic_t refcnt; /* entry reference count */
+ u8 dmac[6]; /* neighbour's MAC address */
+};
+
+struct l2t_data {
+ unsigned int nentries; /* number of entries */
+ struct l2t_entry *rover; /* starting point for next allocation */
+ atomic_t nfree; /* number of free entries */
+ rwlock_t lock;
+ struct l2t_entry l2tab[0];
++ struct rcu_head rcu_head; /* to handle rcu cleanup */
+};
+
+typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
+ struct sk_buff * skb);
+
+/*
+ * Callback stored in an skb to handle address resolution failure.
+ */
+struct l2t_skb_cb {
+ arp_failure_handler_func arp_failure_handler;
+};
+
+#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
+
+static inline void set_arp_failure_handler(struct sk_buff *skb,
+ arp_failure_handler_func hnd)
+{
+ L2T_SKB_CB(skb)->arp_failure_handler = hnd;
+}
+
+/*
+ * Getting to the L2 data from an offload device.
+ */
- static inline void l2t_release(struct l2t_data *d, struct l2t_entry *e)
++#define L2DATA(cdev) (rcu_dereference((cdev)->l2opt))
+
+#define W_TCB_L2T_IX 0
+#define S_TCB_L2T_IX 7
+#define M_TCB_L2T_IX 0x7ffULL
+#define V_TCB_L2T_IX(x) ((x) << S_TCB_L2T_IX)
+
+void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
+void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh);
+struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
+ struct net_device *dev);
+int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
+ struct l2t_entry *e);
+void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e);
+struct l2t_data *t3_init_l2t(unsigned int l2t_capacity);
+void t3_free_l2t(struct l2t_data *d);
+
+int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb);
+
+static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb,
+ struct l2t_entry *e)
+{
+ if (likely(e->state == L2T_STATE_VALID))
+ return cxgb3_ofld_send(dev, skb);
+ return t3_l2t_send_slow(dev, skb, e);
+}
+
- if (atomic_dec_and_test(&e->refcnt))
++static inline void l2t_release(struct t3cdev *t, struct l2t_entry *e)
+{
- if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
++ struct l2t_data *d;
++
++ rcu_read_lock();
++ d = L2DATA(t);
++
++ if (atomic_dec_and_test(&e->refcnt) && d)
+ t3_l2e_free(d, e);
++
++ rcu_read_unlock();
+}
+
+static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
+{
++ if (d && atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
+ atomic_dec(&d->nfree);
+}
+
+#endif
--- /dev/null
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitmap.h>
+#include <linux/crc32.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/etherdevice.h>
+#include <linux/firmware.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <linux/init.h>
+#include <linux/log2.h>
+#include <linux/mdio.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/rtnetlink.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/sockios.h>
+#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
+#include <net/neighbour.h>
+#include <net/netevent.h>
+#include <asm/uaccess.h>
+
+#include "cxgb4.h"
+#include "t4_regs.h"
+#include "t4_msg.h"
+#include "t4fw_api.h"
+#include "l2t.h"
+
+#define DRV_VERSION "1.3.0-ko"
+#define DRV_DESC "Chelsio T4 Network Driver"
+
+/*
+ * Max interrupt hold-off timer value in us. Queues fall back to this value
+ * under extreme memory pressure so it's largish to give the system time to
+ * recover.
+ */
+#define MAX_SGE_TIMERVAL 200U
+
+#ifdef CONFIG_PCI_IOV
+/*
+ * Virtual Function provisioning constants. We need two extra Ingress Queues
+ * with Interrupt capability to serve as the VF's Firmware Event Queue and
+ * Forwarded Interrupt Queue (when using MSI mode) -- neither will have Free
+ * Lists associated with them). For each Ethernet/Control Egress Queue and
+ * for each Free List, we need an Egress Context.
+ */
+enum {
+ VFRES_NPORTS = 1, /* # of "ports" per VF */
+ VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
+
+ VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
+ VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
+ VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
+ VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
+ VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
+ VFRES_TC = 0, /* PCI-E traffic class */
+ VFRES_NEXACTF = 16, /* # of exact MPS filters */
+
+ VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
+ VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
+};
+
+/*
+ * Provide a Port Access Rights Mask for the specified PF/VF. This is very
+ * static and likely not to be useful in the long run. We really need to
+ * implement some form of persistent configuration which the firmware
+ * controls.
+ */
+static unsigned int pfvfres_pmask(struct adapter *adapter,
+ unsigned int pf, unsigned int vf)
+{
+ unsigned int portn, portvec;
+
+ /*
+ * Give PF's access to all of the ports.
+ */
+ if (vf == 0)
+ return FW_PFVF_CMD_PMASK_MASK;
+
+ /*
+ * For VFs, we'll assign them access to the ports based purely on the
+ * PF. We assign active ports in order, wrapping around if there are
+ * fewer active ports than PFs: e.g. active port[pf % nports].
+ * Unfortunately the adapter's port_info structs haven't been
+ * initialized yet so we have to compute this.
+ */
+ if (adapter->params.nports == 0)
+ return 0;
+
+ portn = pf % adapter->params.nports;
+ portvec = adapter->params.portvec;
+ for (;;) {
+ /*
+ * Isolate the lowest set bit in the port vector. If we're at
+ * the port number that we want, return that as the pmask.
+ * otherwise mask that bit out of the port vector and
+ * decrement our port number ...
+ */
+ unsigned int pmask = portvec ^ (portvec & (portvec-1));
+ if (portn == 0)
+ return pmask;
+ portn--;
+ portvec &= ~pmask;
+ }
+ /*NOTREACHED*/
+}
+#endif
+
+enum {
+ MEMWIN0_APERTURE = 65536,
+ MEMWIN0_BASE = 0x30000,
+ MEMWIN1_APERTURE = 32768,
+ MEMWIN1_BASE = 0x28000,
+ MEMWIN2_APERTURE = 2048,
+ MEMWIN2_BASE = 0x1b800,
+};
+
+enum {
+ MAX_TXQ_ENTRIES = 16384,
+ MAX_CTRL_TXQ_ENTRIES = 1024,
+ MAX_RSPQ_ENTRIES = 16384,
+ MAX_RX_BUFFERS = 16384,
+ MIN_TXQ_ENTRIES = 32,
+ MIN_CTRL_TXQ_ENTRIES = 32,
+ MIN_RSPQ_ENTRIES = 128,
+ MIN_FL_ENTRIES = 16
+};
+
+#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
+ NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
+
+#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
+
+static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
+ CH_DEVICE(0xa000, 0), /* PE10K */
+ CH_DEVICE(0x4001, -1),
+ CH_DEVICE(0x4002, -1),
+ CH_DEVICE(0x4003, -1),
+ CH_DEVICE(0x4004, -1),
+ CH_DEVICE(0x4005, -1),
+ CH_DEVICE(0x4006, -1),
+ CH_DEVICE(0x4007, -1),
+ CH_DEVICE(0x4008, -1),
+ CH_DEVICE(0x4009, -1),
+ CH_DEVICE(0x400a, -1),
+ CH_DEVICE(0x4401, 4),
+ CH_DEVICE(0x4402, 4),
+ CH_DEVICE(0x4403, 4),
+ CH_DEVICE(0x4404, 4),
+ CH_DEVICE(0x4405, 4),
+ CH_DEVICE(0x4406, 4),
+ CH_DEVICE(0x4407, 4),
+ CH_DEVICE(0x4408, 4),
+ CH_DEVICE(0x4409, 4),
+ CH_DEVICE(0x440a, 4),
+ { 0, }
+};
+
+#define FW_FNAME "cxgb4/t4fw.bin"
+
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
+MODULE_FIRMWARE(FW_FNAME);
+
+static int dflt_msg_enable = DFLT_MSG_ENABLE;
+
+module_param(dflt_msg_enable, int, 0644);
+MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
+
+/*
+ * The driver uses the best interrupt scheme available on a platform in the
+ * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
+ * of these schemes the driver may consider as follows:
+ *
+ * msi = 2: choose from among all three options
+ * msi = 1: only consider MSI and INTx interrupts
+ * msi = 0: force INTx interrupts
+ */
+static int msi = 2;
+
+module_param(msi, int, 0644);
+MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
+
+/*
+ * Queue interrupt hold-off timer values. Queues default to the first of these
+ * upon creation.
+ */
+static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
+
+module_param_array(intr_holdoff, uint, NULL, 0644);
+MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
+ "0..4 in microseconds");
+
+static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
+
+module_param_array(intr_cnt, uint, NULL, 0644);
+MODULE_PARM_DESC(intr_cnt,
+ "thresholds 1..3 for queue interrupt packet counters");
+
+static int vf_acls;
+
+#ifdef CONFIG_PCI_IOV
+module_param(vf_acls, bool, 0644);
+MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
+
+static unsigned int num_vf[4];
+
+module_param_array(num_vf, uint, NULL, 0644);
+MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
+#endif
+
+static struct dentry *cxgb4_debugfs_root;
+
+static LIST_HEAD(adapter_list);
+static DEFINE_MUTEX(uld_mutex);
+static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
+static const char *uld_str[] = { "RDMA", "iSCSI" };
+
+static void link_report(struct net_device *dev)
+{
+ if (!netif_carrier_ok(dev))
+ netdev_info(dev, "link down\n");
+ else {
+ static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
+
+ const char *s = "10Mbps";
+ const struct port_info *p = netdev_priv(dev);
+
+ switch (p->link_cfg.speed) {
+ case SPEED_10000:
+ s = "10Gbps";
+ break;
+ case SPEED_1000:
+ s = "1000Mbps";
+ break;
+ case SPEED_100:
+ s = "100Mbps";
+ break;
+ }
+
+ netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
+ fc[p->link_cfg.fc]);
+ }
+}
+
+void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
+{
+ struct net_device *dev = adapter->port[port_id];
+
+ /* Skip changes from disabled ports. */
+ if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
+ if (link_stat)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+
+ link_report(dev);
+ }
+}
+
+void t4_os_portmod_changed(const struct adapter *adap, int port_id)
+{
+ static const char *mod_str[] = {
+ NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
+ };
+
+ const struct net_device *dev = adap->port[port_id];
+ const struct port_info *pi = netdev_priv(dev);
+
+ if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
+ netdev_info(dev, "port module unplugged\n");
+ else if (pi->mod_type < ARRAY_SIZE(mod_str))
+ netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
+}
+
+/*
+ * Configure the exact and hash address filters to handle a port's multicast
+ * and secondary unicast MAC addresses.
+ */
+static int set_addr_filters(const struct net_device *dev, bool sleep)
+{
+ u64 mhash = 0;
+ u64 uhash = 0;
+ bool free = true;
+ u16 filt_idx[7];
+ const u8 *addr[7];
+ int ret, naddr = 0;
+ const struct netdev_hw_addr *ha;
+ int uc_cnt = netdev_uc_count(dev);
+ int mc_cnt = netdev_mc_count(dev);
+ const struct port_info *pi = netdev_priv(dev);
+ unsigned int mb = pi->adapter->fn;
+
+ /* first do the secondary unicast addresses */
+ netdev_for_each_uc_addr(ha, dev) {
+ addr[naddr++] = ha->addr;
+ if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
+ ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
+ naddr, addr, filt_idx, &uhash, sleep);
+ if (ret < 0)
+ return ret;
+
+ free = false;
+ naddr = 0;
+ }
+ }
+
+ /* next set up the multicast addresses */
+ netdev_for_each_mc_addr(ha, dev) {
+ addr[naddr++] = ha->addr;
+ if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
+ ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
+ naddr, addr, filt_idx, &mhash, sleep);
+ if (ret < 0)
+ return ret;
+
+ free = false;
+ naddr = 0;
+ }
+ }
+
+ return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
+ uhash | mhash, sleep);
+}
+
+/*
+ * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
+ * If @mtu is -1 it is left unchanged.
+ */
+static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
+{
+ int ret;
+ struct port_info *pi = netdev_priv(dev);
+
+ ret = set_addr_filters(dev, sleep_ok);
+ if (ret == 0)
+ ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
+ (dev->flags & IFF_PROMISC) ? 1 : 0,
+ (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
+ sleep_ok);
+ return ret;
+}
+
+/**
+ * link_start - enable a port
+ * @dev: the port to enable
+ *
+ * Performs the MAC and PHY actions needed to enable a port.
+ */
+static int link_start(struct net_device *dev)
+{
+ int ret;
+ struct port_info *pi = netdev_priv(dev);
+ unsigned int mb = pi->adapter->fn;
+
+ /*
+ * We do not set address filters and promiscuity here, the stack does
+ * that step explicitly.
+ */
+ ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
+ !!(dev->features & NETIF_F_HW_VLAN_RX), true);
+ if (ret == 0) {
+ ret = t4_change_mac(pi->adapter, mb, pi->viid,
+ pi->xact_addr_filt, dev->dev_addr, true,
+ true);
+ if (ret >= 0) {
+ pi->xact_addr_filt = ret;
+ ret = 0;
+ }
+ }
+ if (ret == 0)
+ ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
+ &pi->link_cfg);
+ if (ret == 0)
+ ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
+ return ret;
+}
+
+/*
+ * Response queue handler for the FW event queue.
+ */
+static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
+ const struct pkt_gl *gl)
+{
+ u8 opcode = ((const struct rss_header *)rsp)->opcode;
+
+ rsp++; /* skip RSS header */
+ if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
+ const struct cpl_sge_egr_update *p = (void *)rsp;
+ unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
+ struct sge_txq *txq;
+
+ txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
+ txq->restarts++;
+ if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
+ struct sge_eth_txq *eq;
+
+ eq = container_of(txq, struct sge_eth_txq, q);
+ netif_tx_wake_queue(eq->txq);
+ } else {
+ struct sge_ofld_txq *oq;
+
+ oq = container_of(txq, struct sge_ofld_txq, q);
+ tasklet_schedule(&oq->qresume_tsk);
+ }
+ } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
+ const struct cpl_fw6_msg *p = (void *)rsp;
+
+ if (p->type == 0)
+ t4_handle_fw_rpl(q->adap, p->data);
+ } else if (opcode == CPL_L2T_WRITE_RPL) {
+ const struct cpl_l2t_write_rpl *p = (void *)rsp;
+
+ do_l2t_write_rpl(q->adap, p);
+ } else
+ dev_err(q->adap->pdev_dev,
+ "unexpected CPL %#x on FW event queue\n", opcode);
+ return 0;
+}
+
+/**
+ * uldrx_handler - response queue handler for ULD queues
+ * @q: the response queue that received the packet
+ * @rsp: the response queue descriptor holding the offload message
+ * @gl: the gather list of packet fragments
+ *
+ * Deliver an ingress offload packet to a ULD. All processing is done by
+ * the ULD, we just maintain statistics.
+ */
+static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
+ const struct pkt_gl *gl)
+{
+ struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
+
+ if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
+ rxq->stats.nomem++;
+ return -1;
+ }
+ if (gl == NULL)
+ rxq->stats.imm++;
+ else if (gl == CXGB4_MSG_AN)
+ rxq->stats.an++;
+ else
+ rxq->stats.pkts++;
+ return 0;
+}
+
+static void disable_msi(struct adapter *adapter)
+{
+ if (adapter->flags & USING_MSIX) {
+ pci_disable_msix(adapter->pdev);
+ adapter->flags &= ~USING_MSIX;
+ } else if (adapter->flags & USING_MSI) {
+ pci_disable_msi(adapter->pdev);
+ adapter->flags &= ~USING_MSI;
+ }
+}
+
+/*
+ * Interrupt handler for non-data events used with MSI-X.
+ */
+static irqreturn_t t4_nondata_intr(int irq, void *cookie)
+{
+ struct adapter *adap = cookie;
+
+ u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
+ if (v & PFSW) {
+ adap->swintr = 1;
+ t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
+ }
+ t4_slow_intr_handler(adap);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Name the MSI-X interrupts.
+ */
+static void name_msix_vecs(struct adapter *adap)
+{
+ int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
+
+ /* non-data interrupts */
+ snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
+
+ /* FW events */
+ snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
+ adap->port[0]->name);
+
+ /* Ethernet queues */
+ for_each_port(adap, j) {
+ struct net_device *d = adap->port[j];
+ const struct port_info *pi = netdev_priv(d);
+
+ for (i = 0; i < pi->nqsets; i++, msi_idx++)
+ snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
+ d->name, i);
+ }
+
+ /* offload queues */
+ for_each_ofldrxq(&adap->sge, i)
+ snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
+ adap->port[0]->name, i);
+
+ for_each_rdmarxq(&adap->sge, i)
+ snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
+ adap->port[0]->name, i);
+}
+
+static int request_msix_queue_irqs(struct adapter *adap)
+{
+ struct sge *s = &adap->sge;
+ int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2;
+
+ err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
+ adap->msix_info[1].desc, &s->fw_evtq);
+ if (err)
+ return err;
+
+ for_each_ethrxq(s, ethqidx) {
+ err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
+ adap->msix_info[msi].desc,
+ &s->ethrxq[ethqidx].rspq);
+ if (err)
+ goto unwind;
+ msi++;
+ }
+ for_each_ofldrxq(s, ofldqidx) {
+ err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
+ adap->msix_info[msi].desc,
+ &s->ofldrxq[ofldqidx].rspq);
+ if (err)
+ goto unwind;
+ msi++;
+ }
+ for_each_rdmarxq(s, rdmaqidx) {
+ err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
+ adap->msix_info[msi].desc,
+ &s->rdmarxq[rdmaqidx].rspq);
+ if (err)
+ goto unwind;
+ msi++;
+ }
+ return 0;
+
+unwind:
+ while (--rdmaqidx >= 0)
+ free_irq(adap->msix_info[--msi].vec,
+ &s->rdmarxq[rdmaqidx].rspq);
+ while (--ofldqidx >= 0)
+ free_irq(adap->msix_info[--msi].vec,
+ &s->ofldrxq[ofldqidx].rspq);
+ while (--ethqidx >= 0)
+ free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq);
+ free_irq(adap->msix_info[1].vec, &s->fw_evtq);
+ return err;
+}
+
+static void free_msix_queue_irqs(struct adapter *adap)
+{
+ int i, msi = 2;
+ struct sge *s = &adap->sge;
+
+ free_irq(adap->msix_info[1].vec, &s->fw_evtq);
+ for_each_ethrxq(s, i)
+ free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq);
+ for_each_ofldrxq(s, i)
+ free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq);
+ for_each_rdmarxq(s, i)
+ free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq);
+}
+
+/**
+ * write_rss - write the RSS table for a given port
+ * @pi: the port
+ * @queues: array of queue indices for RSS
+ *
+ * Sets up the portion of the HW RSS table for the port's VI to distribute
+ * packets to the Rx queues in @queues.
+ */
+static int write_rss(const struct port_info *pi, const u16 *queues)
+{
+ u16 *rss;
+ int i, err;
+ const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
+
+ rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
+ if (!rss)
+ return -ENOMEM;
+
+ /* map the queue indices to queue ids */
+ for (i = 0; i < pi->rss_size; i++, queues++)
+ rss[i] = q[*queues].rspq.abs_id;
+
+ err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
+ pi->rss_size, rss, pi->rss_size);
+ kfree(rss);
+ return err;
+}
+
+/**
+ * setup_rss - configure RSS
+ * @adap: the adapter
+ *
+ * Sets up RSS for each port.
+ */
+static int setup_rss(struct adapter *adap)
+{
+ int i, err;
+
+ for_each_port(adap, i) {
+ const struct port_info *pi = adap2pinfo(adap, i);
+
+ err = write_rss(pi, pi->rss);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+/*
+ * Return the channel of the ingress queue with the given qid.
+ */
+static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
+{
+ qid -= p->ingr_start;
+ return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
+}
+
+/*
+ * Wait until all NAPI handlers are descheduled.
+ */
+static void quiesce_rx(struct adapter *adap)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
+ struct sge_rspq *q = adap->sge.ingr_map[i];
+
+ if (q && q->handler)
+ napi_disable(&q->napi);
+ }
+}
+
+/*
+ * Enable NAPI scheduling and interrupt generation for all Rx queues.
+ */
+static void enable_rx(struct adapter *adap)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
+ struct sge_rspq *q = adap->sge.ingr_map[i];
+
+ if (!q)
+ continue;
+ if (q->handler)
+ napi_enable(&q->napi);
+ /* 0-increment GTS to start the timer and enable interrupts */
+ t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
+ SEINTARM(q->intr_params) |
+ INGRESSQID(q->cntxt_id));
+ }
+}
+
+/**
+ * setup_sge_queues - configure SGE Tx/Rx/response queues
+ * @adap: the adapter
+ *
+ * Determines how many sets of SGE queues to use and initializes them.
+ * We support multiple queue sets per port if we have MSI-X, otherwise
+ * just one queue set per port.
+ */
+static int setup_sge_queues(struct adapter *adap)
+{
+ int err, msi_idx, i, j;
+ struct sge *s = &adap->sge;
+
+ bitmap_zero(s->starving_fl, MAX_EGRQ);
+ bitmap_zero(s->txq_maperr, MAX_EGRQ);
+
+ if (adap->flags & USING_MSIX)
+ msi_idx = 1; /* vector 0 is for non-queue interrupts */
+ else {
+ err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
+ NULL, NULL);
+ if (err)
+ return err;
+ msi_idx = -((int)s->intrq.abs_id + 1);
+ }
+
+ err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
+ msi_idx, NULL, fwevtq_handler);
+ if (err) {
+freeout: t4_free_sge_resources(adap);
+ return err;
+ }
+
+ for_each_port(adap, i) {
+ struct net_device *dev = adap->port[i];
+ struct port_info *pi = netdev_priv(dev);
+ struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
+ struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
+
+ for (j = 0; j < pi->nqsets; j++, q++) {
+ if (msi_idx > 0)
+ msi_idx++;
+ err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
+ msi_idx, &q->fl,
+ t4_ethrx_handler);
+ if (err)
+ goto freeout;
+ q->rspq.idx = j;
+ memset(&q->stats, 0, sizeof(q->stats));
+ }
+ for (j = 0; j < pi->nqsets; j++, t++) {
+ err = t4_sge_alloc_eth_txq(adap, t, dev,
+ netdev_get_tx_queue(dev, j),
+ s->fw_evtq.cntxt_id);
+ if (err)
+ goto freeout;
+ }
+ }
+
+ j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
+ for_each_ofldrxq(s, i) {
+ struct sge_ofld_rxq *q = &s->ofldrxq[i];
+ struct net_device *dev = adap->port[i / j];
+
+ if (msi_idx > 0)
+ msi_idx++;
+ err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
+ &q->fl, uldrx_handler);
+ if (err)
+ goto freeout;
+ memset(&q->stats, 0, sizeof(q->stats));
+ s->ofld_rxq[i] = q->rspq.abs_id;
+ err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
+ s->fw_evtq.cntxt_id);
+ if (err)
+ goto freeout;
+ }
+
+ for_each_rdmarxq(s, i) {
+ struct sge_ofld_rxq *q = &s->rdmarxq[i];
+
+ if (msi_idx > 0)
+ msi_idx++;
+ err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
+ msi_idx, &q->fl, uldrx_handler);
+ if (err)
+ goto freeout;
+ memset(&q->stats, 0, sizeof(q->stats));
+ s->rdma_rxq[i] = q->rspq.abs_id;
+ }
+
+ for_each_port(adap, i) {
+ /*
+ * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
+ * have RDMA queues, and that's the right value.
+ */
+ err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
+ s->fw_evtq.cntxt_id,
+ s->rdmarxq[i].rspq.cntxt_id);
+ if (err)
+ goto freeout;
+ }
+
+ t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
+ RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
+ QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
+ return 0;
+}
+
+/*
+ * Returns 0 if new FW was successfully loaded, a positive errno if a load was
+ * started but failed, and a negative errno if flash load couldn't start.
+ */
+static int upgrade_fw(struct adapter *adap)
+{
+ int ret;
+ u32 vers;
+ const struct fw_hdr *hdr;
+ const struct firmware *fw;
+ struct device *dev = adap->pdev_dev;
+
+ ret = request_firmware(&fw, FW_FNAME, dev);
+ if (ret < 0) {
+ dev_err(dev, "unable to load firmware image " FW_FNAME
+ ", error %d\n", ret);
+ return ret;
+ }
+
+ hdr = (const struct fw_hdr *)fw->data;
+ vers = ntohl(hdr->fw_ver);
+ if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) {
+ ret = -EINVAL; /* wrong major version, won't do */
+ goto out;
+ }
+
+ /*
+ * If the flash FW is unusable or we found something newer, load it.
+ */
+ if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
+ vers > adap->params.fw_vers) {
+ ret = -t4_load_fw(adap, fw->data, fw->size);
+ if (!ret)
+ dev_info(dev, "firmware upgraded to version %pI4 from "
+ FW_FNAME "\n", &hdr->fw_ver);
+ }
+out: release_firmware(fw);
+ return ret;
+}
+
+/*
+ * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
+ * The allocated memory is cleared.
+ */
+void *t4_alloc_mem(size_t size)
+{
+ void *p = kzalloc(size, GFP_KERNEL);
+
+ if (!p)
+ p = vzalloc(size);
+ return p;
+}
+
+/*
+ * Free memory allocated through alloc_mem().
+ */
+static void t4_free_mem(void *addr)
+{
+ if (is_vmalloc_addr(addr))
+ vfree(addr);
+ else
+ kfree(addr);
+}
+
+static inline int is_offload(const struct adapter *adap)
+{
+ return adap->params.offload;
+}
+
+/*
+ * Implementation of ethtool operations.
+ */
+
+static u32 get_msglevel(struct net_device *dev)
+{
+ return netdev2adap(dev)->msg_enable;
+}
+
+static void set_msglevel(struct net_device *dev, u32 val)
+{
+ netdev2adap(dev)->msg_enable = val;
+}
+
+static char stats_strings[][ETH_GSTRING_LEN] = {
+ "TxOctetsOK ",
+ "TxFramesOK ",
+ "TxBroadcastFrames ",
+ "TxMulticastFrames ",
+ "TxUnicastFrames ",
+ "TxErrorFrames ",
+
+ "TxFrames64 ",
+ "TxFrames65To127 ",
+ "TxFrames128To255 ",
+ "TxFrames256To511 ",
+ "TxFrames512To1023 ",
+ "TxFrames1024To1518 ",
+ "TxFrames1519ToMax ",
+
+ "TxFramesDropped ",
+ "TxPauseFrames ",
+ "TxPPP0Frames ",
+ "TxPPP1Frames ",
+ "TxPPP2Frames ",
+ "TxPPP3Frames ",
+ "TxPPP4Frames ",
+ "TxPPP5Frames ",
+ "TxPPP6Frames ",
+ "TxPPP7Frames ",
+
+ "RxOctetsOK ",
+ "RxFramesOK ",
+ "RxBroadcastFrames ",
+ "RxMulticastFrames ",
+ "RxUnicastFrames ",
+
+ "RxFramesTooLong ",
+ "RxJabberErrors ",
+ "RxFCSErrors ",
+ "RxLengthErrors ",
+ "RxSymbolErrors ",
+ "RxRuntFrames ",
+
+ "RxFrames64 ",
+ "RxFrames65To127 ",
+ "RxFrames128To255 ",
+ "RxFrames256To511 ",
+ "RxFrames512To1023 ",
+ "RxFrames1024To1518 ",
+ "RxFrames1519ToMax ",
+
+ "RxPauseFrames ",
+ "RxPPP0Frames ",
+ "RxPPP1Frames ",
+ "RxPPP2Frames ",
+ "RxPPP3Frames ",
+ "RxPPP4Frames ",
+ "RxPPP5Frames ",
+ "RxPPP6Frames ",
+ "RxPPP7Frames ",
+
+ "RxBG0FramesDropped ",
+ "RxBG1FramesDropped ",
+ "RxBG2FramesDropped ",
+ "RxBG3FramesDropped ",
+ "RxBG0FramesTrunc ",
+ "RxBG1FramesTrunc ",
+ "RxBG2FramesTrunc ",
+ "RxBG3FramesTrunc ",
+
+ "TSO ",
+ "TxCsumOffload ",
+ "RxCsumGood ",
+ "VLANextractions ",
+ "VLANinsertions ",
+ "GROpackets ",
+ "GROmerged ",
+};
+
+static int get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(stats_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+#define T4_REGMAP_SIZE (160 * 1024)
+
+static int get_regs_len(struct net_device *dev)
+{
+ return T4_REGMAP_SIZE;
+}
+
+static int get_eeprom_len(struct net_device *dev)
+{
+ return EEPROMSIZE;
+}
+
+static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct adapter *adapter = netdev2adap(dev);
+
+ strcpy(info->driver, KBUILD_MODNAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(adapter->pdev));
+
+ if (!adapter->params.fw_vers)
+ strcpy(info->fw_version, "N/A");
+ else
+ snprintf(info->fw_version, sizeof(info->fw_version),
+ "%u.%u.%u.%u, TP %u.%u.%u.%u",
+ FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
+ FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
+ FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
+ FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
+ FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
+ FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
+ FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
+ FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
+}
+
+static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ if (stringset == ETH_SS_STATS)
+ memcpy(data, stats_strings, sizeof(stats_strings));
+}
+
+/*
+ * port stats maintained per queue of the port. They should be in the same
+ * order as in stats_strings above.
+ */
+struct queue_port_stats {
+ u64 tso;
+ u64 tx_csum;
+ u64 rx_csum;
+ u64 vlan_ex;
+ u64 vlan_ins;
+ u64 gro_pkts;
+ u64 gro_merged;
+};
+
+static void collect_sge_port_stats(const struct adapter *adap,
+ const struct port_info *p, struct queue_port_stats *s)
+{
+ int i;
+ const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
+ const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
+
+ memset(s, 0, sizeof(*s));
+ for (i = 0; i < p->nqsets; i++, rx++, tx++) {
+ s->tso += tx->tso;
+ s->tx_csum += tx->tx_cso;
+ s->rx_csum += rx->stats.rx_cso;
+ s->vlan_ex += rx->stats.vlan_ex;
+ s->vlan_ins += tx->vlan_ins;
+ s->gro_pkts += rx->stats.lro_pkts;
+ s->gro_merged += rx->stats.lro_merged;
+ }
+}
+
+static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
+
+ data += sizeof(struct port_stats) / sizeof(u64);
+ collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
+}
+
+/*
+ * Return a version number to identify the type of adapter. The scheme is:
+ * - bits 0..9: chip version
+ * - bits 10..15: chip revision
+ * - bits 16..23: register dump version
+ */
+static inline unsigned int mk_adap_vers(const struct adapter *ap)
+{
+ return 4 | (ap->params.rev << 10) | (1 << 16);
+}
+
+static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
+ unsigned int end)
+{
+ u32 *p = buf + start;
+
+ for ( ; start <= end; start += sizeof(u32))
+ *p++ = t4_read_reg(ap, start);
+}
+
+static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *buf)
+{
+ static const unsigned int reg_ranges[] = {
+ 0x1008, 0x1108,
+ 0x1180, 0x11b4,
+ 0x11fc, 0x123c,
+ 0x1300, 0x173c,
+ 0x1800, 0x18fc,
+ 0x3000, 0x30d8,
+ 0x30e0, 0x5924,
+ 0x5960, 0x59d4,
+ 0x5a00, 0x5af8,
+ 0x6000, 0x6098,
+ 0x6100, 0x6150,
+ 0x6200, 0x6208,
+ 0x6240, 0x6248,
+ 0x6280, 0x6338,
+ 0x6370, 0x638c,
+ 0x6400, 0x643c,
+ 0x6500, 0x6524,
+ 0x6a00, 0x6a38,
+ 0x6a60, 0x6a78,
+ 0x6b00, 0x6b84,
+ 0x6bf0, 0x6c84,
+ 0x6cf0, 0x6d84,
+ 0x6df0, 0x6e84,
+ 0x6ef0, 0x6f84,
+ 0x6ff0, 0x7084,
+ 0x70f0, 0x7184,
+ 0x71f0, 0x7284,
+ 0x72f0, 0x7384,
+ 0x73f0, 0x7450,
+ 0x7500, 0x7530,
+ 0x7600, 0x761c,
+ 0x7680, 0x76cc,
+ 0x7700, 0x7798,
+ 0x77c0, 0x77fc,
+ 0x7900, 0x79fc,
+ 0x7b00, 0x7c38,
+ 0x7d00, 0x7efc,
+ 0x8dc0, 0x8e1c,
+ 0x8e30, 0x8e78,
+ 0x8ea0, 0x8f6c,
+ 0x8fc0, 0x9074,
+ 0x90fc, 0x90fc,
+ 0x9400, 0x9458,
+ 0x9600, 0x96bc,
+ 0x9800, 0x9808,
+ 0x9820, 0x983c,
+ 0x9850, 0x9864,
+ 0x9c00, 0x9c6c,
+ 0x9c80, 0x9cec,
+ 0x9d00, 0x9d6c,
+ 0x9d80, 0x9dec,
+ 0x9e00, 0x9e6c,
+ 0x9e80, 0x9eec,
+ 0x9f00, 0x9f6c,
+ 0x9f80, 0x9fec,
+ 0xd004, 0xd03c,
+ 0xdfc0, 0xdfe0,
+ 0xe000, 0xea7c,
+ 0xf000, 0x11190,
+ 0x19040, 0x1906c,
+ 0x19078, 0x19080,
+ 0x1908c, 0x19124,
+ 0x19150, 0x191b0,
+ 0x191d0, 0x191e8,
+ 0x19238, 0x1924c,
+ 0x193f8, 0x19474,
+ 0x19490, 0x194f8,
+ 0x19800, 0x19f30,
+ 0x1a000, 0x1a06c,
+ 0x1a0b0, 0x1a120,
+ 0x1a128, 0x1a138,
+ 0x1a190, 0x1a1c4,
+ 0x1a1fc, 0x1a1fc,
+ 0x1e040, 0x1e04c,
+ 0x1e284, 0x1e28c,
+ 0x1e2c0, 0x1e2c0,
+ 0x1e2e0, 0x1e2e0,
+ 0x1e300, 0x1e384,
+ 0x1e3c0, 0x1e3c8,
+ 0x1e440, 0x1e44c,
+ 0x1e684, 0x1e68c,
+ 0x1e6c0, 0x1e6c0,
+ 0x1e6e0, 0x1e6e0,
+ 0x1e700, 0x1e784,
+ 0x1e7c0, 0x1e7c8,
+ 0x1e840, 0x1e84c,
+ 0x1ea84, 0x1ea8c,
+ 0x1eac0, 0x1eac0,
+ 0x1eae0, 0x1eae0,
+ 0x1eb00, 0x1eb84,
+ 0x1ebc0, 0x1ebc8,
+ 0x1ec40, 0x1ec4c,
+ 0x1ee84, 0x1ee8c,
+ 0x1eec0, 0x1eec0,
+ 0x1eee0, 0x1eee0,
+ 0x1ef00, 0x1ef84,
+ 0x1efc0, 0x1efc8,
+ 0x1f040, 0x1f04c,
+ 0x1f284, 0x1f28c,
+ 0x1f2c0, 0x1f2c0,
+ 0x1f2e0, 0x1f2e0,
+ 0x1f300, 0x1f384,
+ 0x1f3c0, 0x1f3c8,
+ 0x1f440, 0x1f44c,
+ 0x1f684, 0x1f68c,
+ 0x1f6c0, 0x1f6c0,
+ 0x1f6e0, 0x1f6e0,
+ 0x1f700, 0x1f784,
+ 0x1f7c0, 0x1f7c8,
+ 0x1f840, 0x1f84c,
+ 0x1fa84, 0x1fa8c,
+ 0x1fac0, 0x1fac0,
+ 0x1fae0, 0x1fae0,
+ 0x1fb00, 0x1fb84,
+ 0x1fbc0, 0x1fbc8,
+ 0x1fc40, 0x1fc4c,
+ 0x1fe84, 0x1fe8c,
+ 0x1fec0, 0x1fec0,
+ 0x1fee0, 0x1fee0,
+ 0x1ff00, 0x1ff84,
+ 0x1ffc0, 0x1ffc8,
+ 0x20000, 0x2002c,
+ 0x20100, 0x2013c,
+ 0x20190, 0x201c8,
+ 0x20200, 0x20318,
+ 0x20400, 0x20528,
+ 0x20540, 0x20614,
+ 0x21000, 0x21040,
+ 0x2104c, 0x21060,
+ 0x210c0, 0x210ec,
+ 0x21200, 0x21268,
+ 0x21270, 0x21284,
+ 0x212fc, 0x21388,
+ 0x21400, 0x21404,
+ 0x21500, 0x21518,
+ 0x2152c, 0x2153c,
+ 0x21550, 0x21554,
+ 0x21600, 0x21600,
+ 0x21608, 0x21628,
+ 0x21630, 0x2163c,
+ 0x21700, 0x2171c,
+ 0x21780, 0x2178c,
+ 0x21800, 0x21c38,
+ 0x21c80, 0x21d7c,
+ 0x21e00, 0x21e04,
+ 0x22000, 0x2202c,
+ 0x22100, 0x2213c,
+ 0x22190, 0x221c8,
+ 0x22200, 0x22318,
+ 0x22400, 0x22528,
+ 0x22540, 0x22614,
+ 0x23000, 0x23040,
+ 0x2304c, 0x23060,
+ 0x230c0, 0x230ec,
+ 0x23200, 0x23268,
+ 0x23270, 0x23284,
+ 0x232fc, 0x23388,
+ 0x23400, 0x23404,
+ 0x23500, 0x23518,
+ 0x2352c, 0x2353c,
+ 0x23550, 0x23554,
+ 0x23600, 0x23600,
+ 0x23608, 0x23628,
+ 0x23630, 0x2363c,
+ 0x23700, 0x2371c,
+ 0x23780, 0x2378c,
+ 0x23800, 0x23c38,
+ 0x23c80, 0x23d7c,
+ 0x23e00, 0x23e04,
+ 0x24000, 0x2402c,
+ 0x24100, 0x2413c,
+ 0x24190, 0x241c8,
+ 0x24200, 0x24318,
+ 0x24400, 0x24528,
+ 0x24540, 0x24614,
+ 0x25000, 0x25040,
+ 0x2504c, 0x25060,
+ 0x250c0, 0x250ec,
+ 0x25200, 0x25268,
+ 0x25270, 0x25284,
+ 0x252fc, 0x25388,
+ 0x25400, 0x25404,
+ 0x25500, 0x25518,
+ 0x2552c, 0x2553c,
+ 0x25550, 0x25554,
+ 0x25600, 0x25600,
+ 0x25608, 0x25628,
+ 0x25630, 0x2563c,
+ 0x25700, 0x2571c,
+ 0x25780, 0x2578c,
+ 0x25800, 0x25c38,
+ 0x25c80, 0x25d7c,
+ 0x25e00, 0x25e04,
+ 0x26000, 0x2602c,
+ 0x26100, 0x2613c,
+ 0x26190, 0x261c8,
+ 0x26200, 0x26318,
+ 0x26400, 0x26528,
+ 0x26540, 0x26614,
+ 0x27000, 0x27040,
+ 0x2704c, 0x27060,
+ 0x270c0, 0x270ec,
+ 0x27200, 0x27268,
+ 0x27270, 0x27284,
+ 0x272fc, 0x27388,
+ 0x27400, 0x27404,
+ 0x27500, 0x27518,
+ 0x2752c, 0x2753c,
+ 0x27550, 0x27554,
+ 0x27600, 0x27600,
+ 0x27608, 0x27628,
+ 0x27630, 0x2763c,
+ 0x27700, 0x2771c,
+ 0x27780, 0x2778c,
+ 0x27800, 0x27c38,
+ 0x27c80, 0x27d7c,
+ 0x27e00, 0x27e04
+ };
+
+ int i;
+ struct adapter *ap = netdev2adap(dev);
+
+ regs->version = mk_adap_vers(ap);
+
+ memset(buf, 0, T4_REGMAP_SIZE);
+ for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
+ reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
+}
+
+static int restart_autoneg(struct net_device *dev)
+{
+ struct port_info *p = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EAGAIN;
+ if (p->link_cfg.autoneg != AUTONEG_ENABLE)
+ return -EINVAL;
+ t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
+ return 0;
+}
+
+static int identify_port(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ unsigned int val;
+ struct adapter *adap = netdev2adap(dev);
+
+ if (state == ETHTOOL_ID_ACTIVE)
+ val = 0xffff;
+ else if (state == ETHTOOL_ID_INACTIVE)
+ val = 0;
+ else
+ return -EINVAL;
+
+ return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
+}
+
+static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
+{
+ unsigned int v = 0;
+
+ if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
+ type == FW_PORT_TYPE_BT_XAUI) {
+ v |= SUPPORTED_TP;
+ if (caps & FW_PORT_CAP_SPEED_100M)
+ v |= SUPPORTED_100baseT_Full;
+ if (caps & FW_PORT_CAP_SPEED_1G)
+ v |= SUPPORTED_1000baseT_Full;
+ if (caps & FW_PORT_CAP_SPEED_10G)
+ v |= SUPPORTED_10000baseT_Full;
+ } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
+ v |= SUPPORTED_Backplane;
+ if (caps & FW_PORT_CAP_SPEED_1G)
+ v |= SUPPORTED_1000baseKX_Full;
+ if (caps & FW_PORT_CAP_SPEED_10G)
+ v |= SUPPORTED_10000baseKX4_Full;
+ } else if (type == FW_PORT_TYPE_KR)
+ v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
+ else if (type == FW_PORT_TYPE_BP_AP)
+ v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
+ SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
+ else if (type == FW_PORT_TYPE_BP4_AP)
+ v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
+ SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
+ SUPPORTED_10000baseKX4_Full;
+ else if (type == FW_PORT_TYPE_FIBER_XFI ||
+ type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
+ v |= SUPPORTED_FIBRE;
+
+ if (caps & FW_PORT_CAP_ANEG)
+ v |= SUPPORTED_Autoneg;
+ return v;
+}
+
+static unsigned int to_fw_linkcaps(unsigned int caps)
+{
+ unsigned int v = 0;
+
+ if (caps & ADVERTISED_100baseT_Full)
+ v |= FW_PORT_CAP_SPEED_100M;
+ if (caps & ADVERTISED_1000baseT_Full)
+ v |= FW_PORT_CAP_SPEED_1G;
+ if (caps & ADVERTISED_10000baseT_Full)
+ v |= FW_PORT_CAP_SPEED_10G;
+ return v;
+}
+
+static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ const struct port_info *p = netdev_priv(dev);
+
+ if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
+ p->port_type == FW_PORT_TYPE_BT_XFI ||
+ p->port_type == FW_PORT_TYPE_BT_XAUI)
+ cmd->port = PORT_TP;
+ else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
+ p->port_type == FW_PORT_TYPE_FIBER_XAUI)
+ cmd->port = PORT_FIBRE;
+ else if (p->port_type == FW_PORT_TYPE_SFP) {
+ if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
+ p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
+ cmd->port = PORT_DA;
+ else
+ cmd->port = PORT_FIBRE;
+ } else
+ cmd->port = PORT_OTHER;
+
+ if (p->mdio_addr >= 0) {
+ cmd->phy_address = p->mdio_addr;
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
+ MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
+ } else {
+ cmd->phy_address = 0; /* not really, but no better option */
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->mdio_support = 0;
+ }
+
+ cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
+ cmd->advertising = from_fw_linkcaps(p->port_type,
+ p->link_cfg.advertising);
+ ethtool_cmd_speed_set(cmd,
+ netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
+ cmd->duplex = DUPLEX_FULL;
+ cmd->autoneg = p->link_cfg.autoneg;
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 0;
+ return 0;
+}
+
+static unsigned int speed_to_caps(int speed)
+{
+ if (speed == SPEED_100)
+ return FW_PORT_CAP_SPEED_100M;
+ if (speed == SPEED_1000)
+ return FW_PORT_CAP_SPEED_1G;
+ if (speed == SPEED_10000)
+ return FW_PORT_CAP_SPEED_10G;
+ return 0;
+}
+
+static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ unsigned int cap;
+ struct port_info *p = netdev_priv(dev);
+ struct link_config *lc = &p->link_cfg;
+ u32 speed = ethtool_cmd_speed(cmd);
+
+ if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
+ return -EINVAL;
+
+ if (!(lc->supported & FW_PORT_CAP_ANEG)) {
+ /*
+ * PHY offers a single speed. See if that's what's
+ * being requested.
+ */
+ if (cmd->autoneg == AUTONEG_DISABLE &&
+ (lc->supported & speed_to_caps(speed)))
+ return 0;
+ return -EINVAL;
+ }
+
+ if (cmd->autoneg == AUTONEG_DISABLE) {
+ cap = speed_to_caps(speed);
+
+ if (!(lc->supported & cap) || (speed == SPEED_1000) ||
+ (speed == SPEED_10000))
+ return -EINVAL;
+ lc->requested_speed = cap;
+ lc->advertising = 0;
+ } else {
+ cap = to_fw_linkcaps(cmd->advertising);
+ if (!(lc->supported & cap))
+ return -EINVAL;
+ lc->requested_speed = 0;
+ lc->advertising = cap | FW_PORT_CAP_ANEG;
+ }
+ lc->autoneg = cmd->autoneg;
+
+ if (netif_running(dev))
+ return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
+ lc);
+ return 0;
+}
+
+static void get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct port_info *p = netdev_priv(dev);
+
+ epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
+ epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
+ epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
+}
+
+static int set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct port_info *p = netdev_priv(dev);
+ struct link_config *lc = &p->link_cfg;
+
+ if (epause->autoneg == AUTONEG_DISABLE)
+ lc->requested_fc = 0;
+ else if (lc->supported & FW_PORT_CAP_ANEG)
+ lc->requested_fc = PAUSE_AUTONEG;
+ else
+ return -EINVAL;
+
+ if (epause->rx_pause)
+ lc->requested_fc |= PAUSE_RX;
+ if (epause->tx_pause)
+ lc->requested_fc |= PAUSE_TX;
+ if (netif_running(dev))
+ return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
+ lc);
+ return 0;
+}
+
+static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ const struct sge *s = &pi->adapter->sge;
+
+ e->rx_max_pending = MAX_RX_BUFFERS;
+ e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
+ e->rx_jumbo_max_pending = 0;
+ e->tx_max_pending = MAX_TXQ_ENTRIES;
+
+ e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
+ e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
+ e->rx_jumbo_pending = 0;
+ e->tx_pending = s->ethtxq[pi->first_qset].q.size;
+}
+
+static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+{
+ int i;
+ const struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ struct sge *s = &adapter->sge;
+
+ if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
+ e->tx_pending > MAX_TXQ_ENTRIES ||
+ e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
+ e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
+ e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
+ return -EINVAL;
+
+ if (adapter->flags & FULL_INIT_DONE)
+ return -EBUSY;
+
+ for (i = 0; i < pi->nqsets; ++i) {
+ s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
+ s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
+ s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
+ }
+ return 0;
+}
+
+static int closest_timer(const struct sge *s, int time)
+{
+ int i, delta, match = 0, min_delta = INT_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
+ delta = time - s->timer_val[i];
+ if (delta < 0)
+ delta = -delta;
+ if (delta < min_delta) {
+ min_delta = delta;
+ match = i;
+ }
+ }
+ return match;
+}
+
+static int closest_thres(const struct sge *s, int thres)
+{
+ int i, delta, match = 0, min_delta = INT_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
+ delta = thres - s->counter_val[i];
+ if (delta < 0)
+ delta = -delta;
+ if (delta < min_delta) {
+ min_delta = delta;
+ match = i;
+ }
+ }
+ return match;
+}
+
+/*
+ * Return a queue's interrupt hold-off time in us. 0 means no timer.
+ */
+static unsigned int qtimer_val(const struct adapter *adap,
+ const struct sge_rspq *q)
+{
+ unsigned int idx = q->intr_params >> 1;
+
+ return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
+}
+
+/**
+ * set_rxq_intr_params - set a queue's interrupt holdoff parameters
+ * @adap: the adapter
+ * @q: the Rx queue
+ * @us: the hold-off time in us, or 0 to disable timer
+ * @cnt: the hold-off packet count, or 0 to disable counter
+ *
+ * Sets an Rx queue's interrupt hold-off time and packet count. At least
+ * one of the two needs to be enabled for the queue to generate interrupts.
+ */
+static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
+ unsigned int us, unsigned int cnt)
+{
+ if ((us | cnt) == 0)
+ cnt = 1;
+
+ if (cnt) {
+ int err;
+ u32 v, new_idx;
+
+ new_idx = closest_thres(&adap->sge, cnt);
+ if (q->desc && q->pktcnt_idx != new_idx) {
+ /* the queue has already been created, update it */
+ v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
+ FW_PARAMS_PARAM_YZ(q->cntxt_id);
+ err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
+ &new_idx);
+ if (err)
+ return err;
+ }
+ q->pktcnt_idx = new_idx;
+ }
+
+ us = us == 0 ? 6 : closest_timer(&adap->sge, us);
+ q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
+ return 0;
+}
+
+static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+
+ return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq,
+ c->rx_coalesce_usecs, c->rx_max_coalesced_frames);
+}
+
+static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ const struct adapter *adap = pi->adapter;
+ const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
+
+ c->rx_coalesce_usecs = qtimer_val(adap, rq);
+ c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
+ adap->sge.counter_val[rq->pktcnt_idx] : 0;
+ return 0;
+}
+
+/**
+ * eeprom_ptov - translate a physical EEPROM address to virtual
+ * @phys_addr: the physical EEPROM address
+ * @fn: the PCI function number
+ * @sz: size of function-specific area
+ *
+ * Translate a physical EEPROM address to virtual. The first 1K is
+ * accessed through virtual addresses starting at 31K, the rest is
+ * accessed through virtual addresses starting at 0.
+ *
+ * The mapping is as follows:
+ * [0..1K) -> [31K..32K)
+ * [1K..1K+A) -> [31K-A..31K)
+ * [1K+A..ES) -> [0..ES-A-1K)
+ *
+ * where A = @fn * @sz, and ES = EEPROM size.
+ */
+static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
+{
+ fn *= sz;
+ if (phys_addr < 1024)
+ return phys_addr + (31 << 10);
+ if (phys_addr < 1024 + fn)
+ return 31744 - fn + phys_addr - 1024;
+ if (phys_addr < EEPROMSIZE)
+ return phys_addr - 1024 - fn;
+ return -EINVAL;
+}
+
+/*
+ * The next two routines implement eeprom read/write from physical addresses.
+ */
+static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
+{
+ int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
+
+ if (vaddr >= 0)
+ vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
+ return vaddr < 0 ? vaddr : 0;
+}
+
+static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
+{
+ int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
+
+ if (vaddr >= 0)
+ vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
+ return vaddr < 0 ? vaddr : 0;
+}
+
+#define EEPROM_MAGIC 0x38E2F10C
+
+static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
+ u8 *data)
+{
+ int i, err = 0;
+ struct adapter *adapter = netdev2adap(dev);
+
+ u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ e->magic = EEPROM_MAGIC;
+ for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
+ err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
+
+ if (!err)
+ memcpy(data, buf + e->offset, e->len);
+ kfree(buf);
+ return err;
+}
+
+static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ u8 *buf;
+ int err = 0;
+ u32 aligned_offset, aligned_len, *p;
+ struct adapter *adapter = netdev2adap(dev);
+
+ if (eeprom->magic != EEPROM_MAGIC)
+ return -EINVAL;
+
+ aligned_offset = eeprom->offset & ~3;
+ aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
+
+ if (adapter->fn > 0) {
+ u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
+
+ if (aligned_offset < start ||
+ aligned_offset + aligned_len > start + EEPROMPFSIZE)
+ return -EPERM;
+ }
+
+ if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
+ /*
+ * RMW possibly needed for first or last words.
+ */
+ buf = kmalloc(aligned_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
+ if (!err && aligned_len > 4)
+ err = eeprom_rd_phys(adapter,
+ aligned_offset + aligned_len - 4,
+ (u32 *)&buf[aligned_len - 4]);
+ if (err)
+ goto out;
+ memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
+ } else
+ buf = data;
+
+ err = t4_seeprom_wp(adapter, false);
+ if (err)
+ goto out;
+
+ for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
+ err = eeprom_wr_phys(adapter, aligned_offset, *p);
+ aligned_offset += 4;
+ }
+
+ if (!err)
+ err = t4_seeprom_wp(adapter, true);
+out:
+ if (buf != data)
+ kfree(buf);
+ return err;
+}
+
+static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
+{
+ int ret;
+ const struct firmware *fw;
+ struct adapter *adap = netdev2adap(netdev);
+
+ ef->data[sizeof(ef->data) - 1] = '\0';
+ ret = request_firmware(&fw, ef->data, adap->pdev_dev);
+ if (ret < 0)
+ return ret;
+
+ ret = t4_load_fw(adap, fw->data, fw->size);
+ release_firmware(fw);
+ if (!ret)
+ dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
+ return ret;
+}
+
+#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
+#define BCAST_CRC 0xa0ccc1a6
+
+static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ wol->supported = WAKE_BCAST | WAKE_MAGIC;
+ wol->wolopts = netdev2adap(dev)->wol;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ int err = 0;
+ struct port_info *pi = netdev_priv(dev);
+
+ if (wol->wolopts & ~WOL_SUPPORTED)
+ return -EINVAL;
+ t4_wol_magic_enable(pi->adapter, pi->tx_chan,
+ (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
+ if (wol->wolopts & WAKE_BCAST) {
+ err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
+ ~0ULL, 0, false);
+ if (!err)
+ err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
+ ~6ULL, ~0ULL, BCAST_CRC, true);
+ } else
+ t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
+ return err;
+}
+
+static int cxgb_set_features(struct net_device *dev, u32 features)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ u32 changed = dev->features ^ features;
+ int err;
+
+ if (!(changed & NETIF_F_HW_VLAN_RX))
+ return 0;
+
+ err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
+ -1, -1, -1,
+ !!(features & NETIF_F_HW_VLAN_RX), true);
+ if (unlikely(err))
+ dev->features = features ^ NETIF_F_HW_VLAN_RX;
+ return err;
+}
+
+static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ unsigned int n = min_t(unsigned int, p->size, pi->rss_size);
+
+ p->size = pi->rss_size;
+ while (n--)
+ p->ring_index[n] = pi->rss[n];
+ return 0;
+}
+
+static int set_rss_table(struct net_device *dev,
+ const struct ethtool_rxfh_indir *p)
+{
+ unsigned int i;
+ struct port_info *pi = netdev_priv(dev);
+
+ if (p->size != pi->rss_size)
+ return -EINVAL;
+ for (i = 0; i < p->size; i++)
+ if (p->ring_index[i] >= pi->nqsets)
+ return -EINVAL;
+ for (i = 0; i < p->size; i++)
+ pi->rss[i] = p->ring_index[i];
+ if (pi->adapter->flags & FULL_INIT_DONE)
+ return write_rss(pi, pi->rss);
+ return 0;
+}
+
+static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+ u32 *rules)
+{
+ const struct port_info *pi = netdev_priv(dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXFH: {
+ unsigned int v = pi->rss_mode;
+
+ info->data = 0;
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case UDP_V4_FLOW:
+ if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
+ (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case IPV4_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case UDP_V6_FLOW:
+ if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
+ (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case IPV6_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ }
+ return 0;
+ }
+ case ETHTOOL_GRXRINGS:
+ info->data = pi->nqsets;
+ return 0;
+ }
+ return -EOPNOTSUPP;
+}
+
+static struct ethtool_ops cxgb_ethtool_ops = {
+ .get_settings = get_settings,
+ .set_settings = set_settings,
+ .get_drvinfo = get_drvinfo,
+ .get_msglevel = get_msglevel,
+ .set_msglevel = set_msglevel,
+ .get_ringparam = get_sge_param,
+ .set_ringparam = set_sge_param,
+ .get_coalesce = get_coalesce,
+ .set_coalesce = set_coalesce,
+ .get_eeprom_len = get_eeprom_len,
+ .get_eeprom = get_eeprom,
+ .set_eeprom = set_eeprom,
+ .get_pauseparam = get_pauseparam,
+ .set_pauseparam = set_pauseparam,
+ .get_link = ethtool_op_get_link,
+ .get_strings = get_strings,
+ .set_phys_id = identify_port,
+ .nway_reset = restart_autoneg,
+ .get_sset_count = get_sset_count,
+ .get_ethtool_stats = get_stats,
+ .get_regs_len = get_regs_len,
+ .get_regs = get_regs,
+ .get_wol = get_wol,
+ .set_wol = set_wol,
+ .get_rxnfc = get_rxnfc,
+ .get_rxfh_indir = get_rss_table,
+ .set_rxfh_indir = set_rss_table,
+ .flash_device = set_flash,
+};
+
+/*
+ * debugfs support
+ */
+
+static int mem_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ loff_t pos = *ppos;
+ loff_t avail = file->f_path.dentry->d_inode->i_size;
+ unsigned int mem = (uintptr_t)file->private_data & 3;
+ struct adapter *adap = file->private_data - mem;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= avail)
+ return 0;
+ if (count > avail - pos)
+ count = avail - pos;
+
+ while (count) {
+ size_t len;
+ int ret, ofst;
+ __be32 data[16];
+
+ if (mem == MEM_MC)
+ ret = t4_mc_read(adap, pos, data, NULL);
+ else
+ ret = t4_edc_read(adap, mem, pos, data, NULL);
+ if (ret)
+ return ret;
+
+ ofst = pos % sizeof(data);
+ len = min(count, sizeof(data) - ofst);
+ if (copy_to_user(buf, (u8 *)data + ofst, len))
+ return -EFAULT;
+
+ buf += len;
+ pos += len;
+ count -= len;
+ }
+ count = pos - *ppos;
+ *ppos = pos;
+ return count;
+}
+
+static const struct file_operations mem_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = mem_open,
+ .read = mem_read,
+ .llseek = default_llseek,
+};
+
+static void __devinit add_debugfs_mem(struct adapter *adap, const char *name,
+ unsigned int idx, unsigned int size_mb)
+{
+ struct dentry *de;
+
+ de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
+ (void *)adap + idx, &mem_debugfs_fops);
+ if (de && de->d_inode)
+ de->d_inode->i_size = size_mb << 20;
+}
+
+static int __devinit setup_debugfs(struct adapter *adap)
+{
+ int i;
+
+ if (IS_ERR_OR_NULL(adap->debugfs_root))
+ return -1;
+
+ i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
+ if (i & EDRAM0_ENABLE)
+ add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
+ if (i & EDRAM1_ENABLE)
+ add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
+ if (i & EXT_MEM_ENABLE)
+ add_debugfs_mem(adap, "mc", MEM_MC,
+ EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
+ if (adap->l2t)
+ debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
+ &t4_l2t_fops);
+ return 0;
+}
+
+/*
+ * upper-layer driver support
+ */
+
+/*
+ * Allocate an active-open TID and set it to the supplied value.
+ */
+int cxgb4_alloc_atid(struct tid_info *t, void *data)
+{
+ int atid = -1;
+
+ spin_lock_bh(&t->atid_lock);
+ if (t->afree) {
+ union aopen_entry *p = t->afree;
+
+ atid = p - t->atid_tab;
+ t->afree = p->next;
+ p->data = data;
+ t->atids_in_use++;
+ }
+ spin_unlock_bh(&t->atid_lock);
+ return atid;
+}
+EXPORT_SYMBOL(cxgb4_alloc_atid);
+
+/*
+ * Release an active-open TID.
+ */
+void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
+{
+ union aopen_entry *p = &t->atid_tab[atid];
+
+ spin_lock_bh(&t->atid_lock);
+ p->next = t->afree;
+ t->afree = p;
+ t->atids_in_use--;
+ spin_unlock_bh(&t->atid_lock);
+}
+EXPORT_SYMBOL(cxgb4_free_atid);
+
+/*
+ * Allocate a server TID and set it to the supplied value.
+ */
+int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
+{
+ int stid;
+
+ spin_lock_bh(&t->stid_lock);
+ if (family == PF_INET) {
+ stid = find_first_zero_bit(t->stid_bmap, t->nstids);
+ if (stid < t->nstids)
+ __set_bit(stid, t->stid_bmap);
+ else
+ stid = -1;
+ } else {
+ stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
+ if (stid < 0)
+ stid = -1;
+ }
+ if (stid >= 0) {
+ t->stid_tab[stid].data = data;
+ stid += t->stid_base;
+ t->stids_in_use++;
+ }
+ spin_unlock_bh(&t->stid_lock);
+ return stid;
+}
+EXPORT_SYMBOL(cxgb4_alloc_stid);
+
+/*
+ * Release a server TID.
+ */
+void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
+{
+ stid -= t->stid_base;
+ spin_lock_bh(&t->stid_lock);
+ if (family == PF_INET)
+ __clear_bit(stid, t->stid_bmap);
+ else
+ bitmap_release_region(t->stid_bmap, stid, 2);
+ t->stid_tab[stid].data = NULL;
+ t->stids_in_use--;
+ spin_unlock_bh(&t->stid_lock);
+}
+EXPORT_SYMBOL(cxgb4_free_stid);
+
+/*
+ * Populate a TID_RELEASE WR. Caller must properly size the skb.
+ */
+static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
+ unsigned int tid)
+{
+ struct cpl_tid_release *req;
+
+ set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
+ req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
+ INIT_TP_WR(req, tid);
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
+}
+
+/*
+ * Queue a TID release request and if necessary schedule a work queue to
+ * process it.
+ */
+static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
+ unsigned int tid)
+{
+ void **p = &t->tid_tab[tid];
+ struct adapter *adap = container_of(t, struct adapter, tids);
+
+ spin_lock_bh(&adap->tid_release_lock);
+ *p = adap->tid_release_head;
+ /* Low 2 bits encode the Tx channel number */
+ adap->tid_release_head = (void **)((uintptr_t)p | chan);
+ if (!adap->tid_release_task_busy) {
+ adap->tid_release_task_busy = true;
+ schedule_work(&adap->tid_release_task);
+ }
+ spin_unlock_bh(&adap->tid_release_lock);
+}
+
+/*
+ * Process the list of pending TID release requests.
+ */
+static void process_tid_release_list(struct work_struct *work)
+{
+ struct sk_buff *skb;
+ struct adapter *adap;
+
+ adap = container_of(work, struct adapter, tid_release_task);
+
+ spin_lock_bh(&adap->tid_release_lock);
+ while (adap->tid_release_head) {
+ void **p = adap->tid_release_head;
+ unsigned int chan = (uintptr_t)p & 3;
+ p = (void *)p - chan;
+
+ adap->tid_release_head = *p;
+ *p = NULL;
+ spin_unlock_bh(&adap->tid_release_lock);
+
+ while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
+ GFP_KERNEL)))
+ schedule_timeout_uninterruptible(1);
+
+ mk_tid_release(skb, chan, p - adap->tids.tid_tab);
+ t4_ofld_send(adap, skb);
+ spin_lock_bh(&adap->tid_release_lock);
+ }
+ adap->tid_release_task_busy = false;
+ spin_unlock_bh(&adap->tid_release_lock);
+}
+
+/*
+ * Release a TID and inform HW. If we are unable to allocate the release
+ * message we defer to a work queue.
+ */
+void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
+{
+ void *old;
+ struct sk_buff *skb;
+ struct adapter *adap = container_of(t, struct adapter, tids);
+
+ old = t->tid_tab[tid];
+ skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
+ if (likely(skb)) {
+ t->tid_tab[tid] = NULL;
+ mk_tid_release(skb, chan, tid);
+ t4_ofld_send(adap, skb);
+ } else
+ cxgb4_queue_tid_release(t, chan, tid);
+ if (old)
+ atomic_dec(&t->tids_in_use);
+}
+EXPORT_SYMBOL(cxgb4_remove_tid);
+
+/*
+ * Allocate and initialize the TID tables. Returns 0 on success.
+ */
+static int tid_init(struct tid_info *t)
+{
+ size_t size;
+ unsigned int natids = t->natids;
+
+ size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) +
+ t->nstids * sizeof(*t->stid_tab) +
+ BITS_TO_LONGS(t->nstids) * sizeof(long);
+ t->tid_tab = t4_alloc_mem(size);
+ if (!t->tid_tab)
+ return -ENOMEM;
+
+ t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
+ t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
+ t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids];
+ spin_lock_init(&t->stid_lock);
+ spin_lock_init(&t->atid_lock);
+
+ t->stids_in_use = 0;
+ t->afree = NULL;
+ t->atids_in_use = 0;
+ atomic_set(&t->tids_in_use, 0);
+
+ /* Setup the free list for atid_tab and clear the stid bitmap. */
+ if (natids) {
+ while (--natids)
+ t->atid_tab[natids - 1].next = &t->atid_tab[natids];
+ t->afree = t->atid_tab;
+ }
+ bitmap_zero(t->stid_bmap, t->nstids);
+ return 0;
+}
+
+/**
+ * cxgb4_create_server - create an IP server
+ * @dev: the device
+ * @stid: the server TID
+ * @sip: local IP address to bind server to
+ * @sport: the server's TCP port
+ * @queue: queue to direct messages from this server to
+ *
+ * Create an IP server for the given port and address.
+ * Returns <0 on error and one of the %NET_XMIT_* values on success.
+ */
+int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
+ __be32 sip, __be16 sport, unsigned int queue)
+{
+ unsigned int chan;
+ struct sk_buff *skb;
+ struct adapter *adap;
+ struct cpl_pass_open_req *req;
+
+ skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ adap = netdev2adap(dev);
+ req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
+ INIT_TP_WR(req, 0);
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
+ req->local_port = sport;
+ req->peer_port = htons(0);
+ req->local_ip = sip;
+ req->peer_ip = htonl(0);
+ chan = rxq_to_chan(&adap->sge, queue);
+ req->opt0 = cpu_to_be64(TX_CHAN(chan));
+ req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
+ SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
+ return t4_mgmt_tx(adap, skb);
+}
+EXPORT_SYMBOL(cxgb4_create_server);
+
+/**
+ * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
+ * @mtus: the HW MTU table
+ * @mtu: the target MTU
+ * @idx: index of selected entry in the MTU table
+ *
+ * Returns the index and the value in the HW MTU table that is closest to
+ * but does not exceed @mtu, unless @mtu is smaller than any value in the
+ * table, in which case that smallest available value is selected.
+ */
+unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
+ unsigned int *idx)
+{
+ unsigned int i = 0;
+
+ while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
+ ++i;
+ if (idx)
+ *idx = i;
+ return mtus[i];
+}
+EXPORT_SYMBOL(cxgb4_best_mtu);
+
+/**
+ * cxgb4_port_chan - get the HW channel of a port
+ * @dev: the net device for the port
+ *
+ * Return the HW Tx channel of the given port.
+ */
+unsigned int cxgb4_port_chan(const struct net_device *dev)
+{
+ return netdev2pinfo(dev)->tx_chan;
+}
+EXPORT_SYMBOL(cxgb4_port_chan);
+
+/**
+ * cxgb4_port_viid - get the VI id of a port
+ * @dev: the net device for the port
+ *
+ * Return the VI id of the given port.
+ */
+unsigned int cxgb4_port_viid(const struct net_device *dev)
+{
+ return netdev2pinfo(dev)->viid;
+}
+EXPORT_SYMBOL(cxgb4_port_viid);
+
+/**
+ * cxgb4_port_idx - get the index of a port
+ * @dev: the net device for the port
+ *
+ * Return the index of the given port.
+ */
+unsigned int cxgb4_port_idx(const struct net_device *dev)
+{
+ return netdev2pinfo(dev)->port_id;
+}
+EXPORT_SYMBOL(cxgb4_port_idx);
+
+void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
+ struct tp_tcp_stats *v6)
+{
+ struct adapter *adap = pci_get_drvdata(pdev);
+
+ spin_lock(&adap->stats_lock);
+ t4_tp_get_tcp_stats(adap, v4, v6);
+ spin_unlock(&adap->stats_lock);
+}
+EXPORT_SYMBOL(cxgb4_get_tcp_stats);
+
+void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
+ const unsigned int *pgsz_order)
+{
+ struct adapter *adap = netdev2adap(dev);
+
+ t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
+ t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
+ HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
+ HPZ3(pgsz_order[3]));
+}
+EXPORT_SYMBOL(cxgb4_iscsi_init);
+
+static struct pci_driver cxgb4_driver;
+
+static void check_neigh_update(struct neighbour *neigh)
+{
+ const struct device *parent;
+ const struct net_device *netdev = neigh->dev;
+
+ if (netdev->priv_flags & IFF_802_1Q_VLAN)
+ netdev = vlan_dev_real_dev(netdev);
+ parent = netdev->dev.parent;
+ if (parent && parent->driver == &cxgb4_driver.driver)
+ t4_l2t_update(dev_get_drvdata(parent), neigh);
+}
+
+static int netevent_cb(struct notifier_block *nb, unsigned long event,
+ void *data)
+{
+ switch (event) {
+ case NETEVENT_NEIGH_UPDATE:
+ check_neigh_update(data);
+ break;
+ case NETEVENT_REDIRECT:
+ default:
+ break;
+ }
+ return 0;
+}
+
+static bool netevent_registered;
+static struct notifier_block cxgb4_netevent_nb = {
+ .notifier_call = netevent_cb
+};
+
+static void uld_attach(struct adapter *adap, unsigned int uld)
+{
+ void *handle;
+ struct cxgb4_lld_info lli;
+
+ lli.pdev = adap->pdev;
+ lli.l2t = adap->l2t;
+ lli.tids = &adap->tids;
+ lli.ports = adap->port;
+ lli.vr = &adap->vres;
+ lli.mtus = adap->params.mtus;
+ if (uld == CXGB4_ULD_RDMA) {
+ lli.rxq_ids = adap->sge.rdma_rxq;
+ lli.nrxq = adap->sge.rdmaqs;
+ } else if (uld == CXGB4_ULD_ISCSI) {
+ lli.rxq_ids = adap->sge.ofld_rxq;
+ lli.nrxq = adap->sge.ofldqsets;
+ }
+ lli.ntxq = adap->sge.ofldqsets;
+ lli.nchan = adap->params.nports;
+ lli.nports = adap->params.nports;
+ lli.wr_cred = adap->params.ofldq_wr_cred;
+ lli.adapter_type = adap->params.rev;
+ lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
+ lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
+ t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
+ (adap->fn * 4));
+ lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
+ t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
+ (adap->fn * 4));
+ lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
+ lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
+ lli.fw_vers = adap->params.fw_vers;
+
+ handle = ulds[uld].add(&lli);
+ if (IS_ERR(handle)) {
+ dev_warn(adap->pdev_dev,
+ "could not attach to the %s driver, error %ld\n",
+ uld_str[uld], PTR_ERR(handle));
+ return;
+ }
+
+ adap->uld_handle[uld] = handle;
+
+ if (!netevent_registered) {
+ register_netevent_notifier(&cxgb4_netevent_nb);
+ netevent_registered = true;
+ }
+
+ if (adap->flags & FULL_INIT_DONE)
+ ulds[uld].state_change(handle, CXGB4_STATE_UP);
+}
+
+static void attach_ulds(struct adapter *adap)
+{
+ unsigned int i;
+
+ mutex_lock(&uld_mutex);
+ list_add_tail(&adap->list_node, &adapter_list);
+ for (i = 0; i < CXGB4_ULD_MAX; i++)
+ if (ulds[i].add)
+ uld_attach(adap, i);
+ mutex_unlock(&uld_mutex);
+}
+
+static void detach_ulds(struct adapter *adap)
+{
+ unsigned int i;
+
+ mutex_lock(&uld_mutex);
+ list_del(&adap->list_node);
+ for (i = 0; i < CXGB4_ULD_MAX; i++)
+ if (adap->uld_handle[i]) {
+ ulds[i].state_change(adap->uld_handle[i],
+ CXGB4_STATE_DETACH);
+ adap->uld_handle[i] = NULL;
+ }
+ if (netevent_registered && list_empty(&adapter_list)) {
+ unregister_netevent_notifier(&cxgb4_netevent_nb);
+ netevent_registered = false;
+ }
+ mutex_unlock(&uld_mutex);
+}
+
+static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
+{
+ unsigned int i;
+
+ mutex_lock(&uld_mutex);
+ for (i = 0; i < CXGB4_ULD_MAX; i++)
+ if (adap->uld_handle[i])
+ ulds[i].state_change(adap->uld_handle[i], new_state);
+ mutex_unlock(&uld_mutex);
+}
+
+/**
+ * cxgb4_register_uld - register an upper-layer driver
+ * @type: the ULD type
+ * @p: the ULD methods
+ *
+ * Registers an upper-layer driver with this driver and notifies the ULD
+ * about any presently available devices that support its type. Returns
+ * %-EBUSY if a ULD of the same type is already registered.
+ */
+int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
+{
+ int ret = 0;
+ struct adapter *adap;
+
+ if (type >= CXGB4_ULD_MAX)
+ return -EINVAL;
+ mutex_lock(&uld_mutex);
+ if (ulds[type].add) {
+ ret = -EBUSY;
+ goto out;
+ }
+ ulds[type] = *p;
+ list_for_each_entry(adap, &adapter_list, list_node)
+ uld_attach(adap, type);
+out: mutex_unlock(&uld_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(cxgb4_register_uld);
+
+/**
+ * cxgb4_unregister_uld - unregister an upper-layer driver
+ * @type: the ULD type
+ *
+ * Unregisters an existing upper-layer driver.
+ */
+int cxgb4_unregister_uld(enum cxgb4_uld type)
+{
+ struct adapter *adap;
+
+ if (type >= CXGB4_ULD_MAX)
+ return -EINVAL;
+ mutex_lock(&uld_mutex);
+ list_for_each_entry(adap, &adapter_list, list_node)
+ adap->uld_handle[type] = NULL;
+ ulds[type].add = NULL;
+ mutex_unlock(&uld_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(cxgb4_unregister_uld);
+
+/**
+ * cxgb_up - enable the adapter
+ * @adap: adapter being enabled
+ *
+ * Called when the first port is enabled, this function performs the
+ * actions necessary to make an adapter operational, such as completing
+ * the initialization of HW modules, and enabling interrupts.
+ *
+ * Must be called with the rtnl lock held.
+ */
+static int cxgb_up(struct adapter *adap)
+{
+ int err;
+
+ err = setup_sge_queues(adap);
+ if (err)
+ goto out;
+ err = setup_rss(adap);
+ if (err)
+ goto freeq;
+
+ if (adap->flags & USING_MSIX) {
+ name_msix_vecs(adap);
+ err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
+ adap->msix_info[0].desc, adap);
+ if (err)
+ goto irq_err;
+
+ err = request_msix_queue_irqs(adap);
+ if (err) {
+ free_irq(adap->msix_info[0].vec, adap);
+ goto irq_err;
+ }
+ } else {
+ err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
+ (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
+ adap->port[0]->name, adap);
+ if (err)
+ goto irq_err;
+ }
+ enable_rx(adap);
+ t4_sge_start(adap);
+ t4_intr_enable(adap);
+ adap->flags |= FULL_INIT_DONE;
+ notify_ulds(adap, CXGB4_STATE_UP);
+ out:
+ return err;
+ irq_err:
+ dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
+ freeq:
+ t4_free_sge_resources(adap);
+ goto out;
+}
+
+static void cxgb_down(struct adapter *adapter)
+{
+ t4_intr_disable(adapter);
+ cancel_work_sync(&adapter->tid_release_task);
+ adapter->tid_release_task_busy = false;
+ adapter->tid_release_head = NULL;
+
+ if (adapter->flags & USING_MSIX) {
+ free_msix_queue_irqs(adapter);
+ free_irq(adapter->msix_info[0].vec, adapter);
+ } else
+ free_irq(adapter->pdev->irq, adapter);
+ quiesce_rx(adapter);
+ t4_sge_stop(adapter);
+ t4_free_sge_resources(adapter);
+ adapter->flags &= ~FULL_INIT_DONE;
+}
+
+/*
+ * net_device operations
+ */
+static int cxgb_open(struct net_device *dev)
+{
+ int err;
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ netif_carrier_off(dev);
+
+ if (!(adapter->flags & FULL_INIT_DONE)) {
+ err = cxgb_up(adapter);
+ if (err < 0)
+ return err;
+ }
+
+ err = link_start(dev);
+ if (!err)
+ netif_tx_start_all_queues(dev);
+ return err;
+}
+
+static int cxgb_close(struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ netif_tx_stop_all_queues(dev);
+ netif_carrier_off(dev);
+ return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
+}
+
+static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *ns)
+{
+ struct port_stats stats;
+ struct port_info *p = netdev_priv(dev);
+ struct adapter *adapter = p->adapter;
+
+ spin_lock(&adapter->stats_lock);
+ t4_get_port_stats(adapter, p->tx_chan, &stats);
+ spin_unlock(&adapter->stats_lock);
+
+ ns->tx_bytes = stats.tx_octets;
+ ns->tx_packets = stats.tx_frames;
+ ns->rx_bytes = stats.rx_octets;
+ ns->rx_packets = stats.rx_frames;
+ ns->multicast = stats.rx_mcast_frames;
+
+ /* detailed rx_errors */
+ ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
+ stats.rx_runt;
+ ns->rx_over_errors = 0;
+ ns->rx_crc_errors = stats.rx_fcs_err;
+ ns->rx_frame_errors = stats.rx_symbol_err;
+ ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
+ stats.rx_ovflow2 + stats.rx_ovflow3 +
+ stats.rx_trunc0 + stats.rx_trunc1 +
+ stats.rx_trunc2 + stats.rx_trunc3;
+ ns->rx_missed_errors = 0;
+
+ /* detailed tx_errors */
+ ns->tx_aborted_errors = 0;
+ ns->tx_carrier_errors = 0;
+ ns->tx_fifo_errors = 0;
+ ns->tx_heartbeat_errors = 0;
+ ns->tx_window_errors = 0;
+
+ ns->tx_errors = stats.tx_error_frames;
+ ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
+ ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
+ return ns;
+}
+
+static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+ unsigned int mbox;
+ int ret = 0, prtad, devad;
+ struct port_info *pi = netdev_priv(dev);
+ struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ if (pi->mdio_addr < 0)
+ return -EOPNOTSUPP;
+ data->phy_id = pi->mdio_addr;
+ break;
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ if (mdio_phy_id_is_c45(data->phy_id)) {
+ prtad = mdio_phy_id_prtad(data->phy_id);
+ devad = mdio_phy_id_devad(data->phy_id);
+ } else if (data->phy_id < 32) {
+ prtad = data->phy_id;
+ devad = 0;
+ data->reg_num &= 0x1f;
+ } else
+ return -EINVAL;
+
+ mbox = pi->adapter->fn;
+ if (cmd == SIOCGMIIREG)
+ ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
+ data->reg_num, &data->val_out);
+ else
+ ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
+ data->reg_num, data->val_in);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return ret;
+}
+
+static void cxgb_set_rxmode(struct net_device *dev)
+{
+ /* unfortunately we can't return errors to the stack */
+ set_rxmode(dev, -1, false);
+}
+
+static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
+{
+ int ret;
+ struct port_info *pi = netdev_priv(dev);
+
+ if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
+ return -EINVAL;
+ ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
+ -1, -1, -1, true);
+ if (!ret)
+ dev->mtu = new_mtu;
+ return ret;
+}
+
+static int cxgb_set_mac_addr(struct net_device *dev, void *p)
+{
+ int ret;
+ struct sockaddr *addr = p;
+ struct port_info *pi = netdev_priv(dev);
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
+ pi->xact_addr_filt, addr->sa_data, true, true);
+ if (ret < 0)
+ return ret;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ pi->xact_addr_filt = ret;
+ return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void cxgb_netpoll(struct net_device *dev)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+
+ if (adap->flags & USING_MSIX) {
+ int i;
+ struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
+
+ for (i = pi->nqsets; i; i--, rx++)
+ t4_sge_intr_msix(0, &rx->rspq);
+ } else
+ t4_intr_handler(adap)(0, adap);
+}
+#endif
+
+static const struct net_device_ops cxgb4_netdev_ops = {
+ .ndo_open = cxgb_open,
+ .ndo_stop = cxgb_close,
+ .ndo_start_xmit = t4_eth_xmit,
+ .ndo_get_stats64 = cxgb_get_stats,
+ .ndo_set_rx_mode = cxgb_set_rxmode,
+ .ndo_set_mac_address = cxgb_set_mac_addr,
+ .ndo_set_features = cxgb_set_features,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = cxgb_ioctl,
+ .ndo_change_mtu = cxgb_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cxgb_netpoll,
+#endif
+};
+
+void t4_fatal_err(struct adapter *adap)
+{
+ t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
+ t4_intr_disable(adap);
+ dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
+}
+
+static void setup_memwin(struct adapter *adap)
+{
+ u32 bar0;
+
+ bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
+ t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
+ (bar0 + MEMWIN0_BASE) | BIR(0) |
+ WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
+ t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
+ (bar0 + MEMWIN1_BASE) | BIR(0) |
+ WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
+ t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
+ (bar0 + MEMWIN2_BASE) | BIR(0) |
+ WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
+ if (adap->vres.ocq.size) {
+ unsigned int start, sz_kb;
+
+ start = pci_resource_start(adap->pdev, 2) +
+ OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
+ sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
+ t4_write_reg(adap,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
+ start | BIR(1) | WINDOW(ilog2(sz_kb)));
+ t4_write_reg(adap,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
+ adap->vres.ocq.start);
+ t4_read_reg(adap,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
+ }
+}
+
+static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
+{
+ u32 v;
+ int ret;
+
+ /* get device capabilities */
+ memset(c, 0, sizeof(*c));
+ c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST | FW_CMD_READ);
+ c->retval_len16 = htonl(FW_LEN16(*c));
+ ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
+ if (ret < 0)
+ return ret;
+
+ /* select capabilities we'll be using */
+ if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
+ if (!vf_acls)
+ c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
+ else
+ c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
+ } else if (vf_acls) {
+ dev_err(adap->pdev_dev, "virtualization ACLs not supported");
+ return ret;
+ }
+ c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST | FW_CMD_WRITE);
+ ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = t4_config_glbl_rss(adap, adap->fn,
+ FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
+ FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
+ FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
+ if (ret < 0)
+ return ret;
+
+ ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
+ 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
+ if (ret < 0)
+ return ret;
+
+ t4_sge_init(adap);
+
+ /* tweak some settings */
+ t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
+ t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
+ t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
+ v = t4_read_reg(adap, TP_PIO_DATA);
+ t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
+
+ /* get basic stuff going */
+ return t4_early_init(adap, adap->fn);
+}
+
+/*
+ * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
+ */
+#define MAX_ATIDS 8192U
+
+/*
+ * Phase 0 of initialization: contact FW, obtain config, perform basic init.
+ */
+static int adap_init0(struct adapter *adap)
+{
+ int ret;
+ u32 v, port_vec;
+ enum dev_state state;
+ u32 params[7], val[7];
+ struct fw_caps_config_cmd c;
+
+ ret = t4_check_fw_version(adap);
+ if (ret == -EINVAL || ret > 0) {
+ if (upgrade_fw(adap) >= 0) /* recache FW version */
+ ret = t4_check_fw_version(adap);
+ }
+ if (ret < 0)
+ return ret;
+
+ /* contact FW, request master */
+ ret = t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, &state);
+ if (ret < 0) {
+ dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
+ ret);
+ return ret;
+ }
+
+ /* reset device */
+ ret = t4_fw_reset(adap, adap->fn, PIORSTMODE | PIORST);
+ if (ret < 0)
+ goto bye;
+
+ for (v = 0; v < SGE_NTIMERS - 1; v++)
+ adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL);
+ adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
+ adap->sge.counter_val[0] = 1;
+ for (v = 1; v < SGE_NCOUNTERS; v++)
+ adap->sge.counter_val[v] = min(intr_cnt[v - 1],
+ THRESHOLD_3_MASK);
+#define FW_PARAM_DEV(param) \
+ (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
+
+ params[0] = FW_PARAM_DEV(CCLK);
+ ret = t4_query_params(adap, adap->fn, adap->fn, 0, 1, params, val);
+ if (ret < 0)
+ goto bye;
+ adap->params.vpd.cclk = val[0];
+
+ ret = adap_init1(adap, &c);
+ if (ret < 0)
+ goto bye;
+
+#define FW_PARAM_PFVF(param) \
+ (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
+ FW_PARAMS_PARAM_Y(adap->fn))
+
+ params[0] = FW_PARAM_DEV(PORTVEC);
+ params[1] = FW_PARAM_PFVF(L2T_START);
+ params[2] = FW_PARAM_PFVF(L2T_END);
+ params[3] = FW_PARAM_PFVF(FILTER_START);
+ params[4] = FW_PARAM_PFVF(FILTER_END);
+ params[5] = FW_PARAM_PFVF(IQFLINT_START);
+ params[6] = FW_PARAM_PFVF(EQ_START);
+ ret = t4_query_params(adap, adap->fn, adap->fn, 0, 7, params, val);
+ if (ret < 0)
+ goto bye;
+ port_vec = val[0];
+ adap->tids.ftid_base = val[3];
+ adap->tids.nftids = val[4] - val[3] + 1;
+ adap->sge.ingr_start = val[5];
+ adap->sge.egr_start = val[6];
+
+ if (c.ofldcaps) {
+ /* query offload-related parameters */
+ params[0] = FW_PARAM_DEV(NTID);
+ params[1] = FW_PARAM_PFVF(SERVER_START);
+ params[2] = FW_PARAM_PFVF(SERVER_END);
+ params[3] = FW_PARAM_PFVF(TDDP_START);
+ params[4] = FW_PARAM_PFVF(TDDP_END);
+ params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
+ ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
+ val);
+ if (ret < 0)
+ goto bye;
+ adap->tids.ntids = val[0];
+ adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
+ adap->tids.stid_base = val[1];
+ adap->tids.nstids = val[2] - val[1] + 1;
+ adap->vres.ddp.start = val[3];
+ adap->vres.ddp.size = val[4] - val[3] + 1;
+ adap->params.ofldq_wr_cred = val[5];
+ adap->params.offload = 1;
+ }
+ if (c.rdmacaps) {
+ params[0] = FW_PARAM_PFVF(STAG_START);
+ params[1] = FW_PARAM_PFVF(STAG_END);
+ params[2] = FW_PARAM_PFVF(RQ_START);
+ params[3] = FW_PARAM_PFVF(RQ_END);
+ params[4] = FW_PARAM_PFVF(PBL_START);
+ params[5] = FW_PARAM_PFVF(PBL_END);
+ ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
+ val);
+ if (ret < 0)
+ goto bye;
+ adap->vres.stag.start = val[0];
+ adap->vres.stag.size = val[1] - val[0] + 1;
+ adap->vres.rq.start = val[2];
+ adap->vres.rq.size = val[3] - val[2] + 1;
+ adap->vres.pbl.start = val[4];
+ adap->vres.pbl.size = val[5] - val[4] + 1;
+
+ params[0] = FW_PARAM_PFVF(SQRQ_START);
+ params[1] = FW_PARAM_PFVF(SQRQ_END);
+ params[2] = FW_PARAM_PFVF(CQ_START);
+ params[3] = FW_PARAM_PFVF(CQ_END);
+ params[4] = FW_PARAM_PFVF(OCQ_START);
+ params[5] = FW_PARAM_PFVF(OCQ_END);
+ ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
+ val);
+ if (ret < 0)
+ goto bye;
+ adap->vres.qp.start = val[0];
+ adap->vres.qp.size = val[1] - val[0] + 1;
+ adap->vres.cq.start = val[2];
+ adap->vres.cq.size = val[3] - val[2] + 1;
+ adap->vres.ocq.start = val[4];
+ adap->vres.ocq.size = val[5] - val[4] + 1;
+ }
+ if (c.iscsicaps) {
+ params[0] = FW_PARAM_PFVF(ISCSI_START);
+ params[1] = FW_PARAM_PFVF(ISCSI_END);
+ ret = t4_query_params(adap, adap->fn, adap->fn, 0, 2, params,
+ val);
+ if (ret < 0)
+ goto bye;
+ adap->vres.iscsi.start = val[0];
+ adap->vres.iscsi.size = val[1] - val[0] + 1;
+ }
+#undef FW_PARAM_PFVF
+#undef FW_PARAM_DEV
+
+ adap->params.nports = hweight32(port_vec);
+ adap->params.portvec = port_vec;
+ adap->flags |= FW_OK;
+
+ /* These are finalized by FW initialization, load their values now */
+ v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
+ adap->params.tp.tre = TIMERRESOLUTION_GET(v);
+ t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
+ t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
+ adap->params.b_wnd);
+
+#ifdef CONFIG_PCI_IOV
+ /*
+ * Provision resource limits for Virtual Functions. We currently
+ * grant them all the same static resource limits except for the Port
+ * Access Rights Mask which we're assigning based on the PF. All of
+ * the static provisioning stuff for both the PF and VF really needs
+ * to be managed in a persistent manner for each device which the
+ * firmware controls.
+ */
+ {
+ int pf, vf;
+
+ for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
+ if (num_vf[pf] <= 0)
+ continue;
+
+ /* VF numbering starts at 1! */
+ for (vf = 1; vf <= num_vf[pf]; vf++) {
+ ret = t4_cfg_pfvf(adap, adap->fn, pf, vf,
+ VFRES_NEQ, VFRES_NETHCTRL,
+ VFRES_NIQFLINT, VFRES_NIQ,
+ VFRES_TC, VFRES_NVI,
+ FW_PFVF_CMD_CMASK_MASK,
+ pfvfres_pmask(adap, pf, vf),
+ VFRES_NEXACTF,
+ VFRES_R_CAPS, VFRES_WX_CAPS);
+ if (ret < 0)
+ dev_warn(adap->pdev_dev, "failed to "
+ "provision pf/vf=%d/%d; "
+ "err=%d\n", pf, vf, ret);
+ }
+ }
+ }
+#endif
+
+ setup_memwin(adap);
+ return 0;
+
+ /*
+ * If a command timed out or failed with EIO FW does not operate within
+ * its spec or something catastrophic happened to HW/FW, stop issuing
+ * commands.
+ */
+bye: if (ret != -ETIMEDOUT && ret != -EIO)
+ t4_fw_bye(adap, adap->fn);
+ return ret;
+}
+
+/* EEH callbacks */
+
+static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ int i;
+ struct adapter *adap = pci_get_drvdata(pdev);
+
+ if (!adap)
+ goto out;
+
+ rtnl_lock();
+ adap->flags &= ~FW_OK;
+ notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
+ for_each_port(adap, i) {
+ struct net_device *dev = adap->port[i];
+
+ netif_device_detach(dev);
+ netif_carrier_off(dev);
+ }
+ if (adap->flags & FULL_INIT_DONE)
+ cxgb_down(adap);
+ rtnl_unlock();
+ pci_disable_device(pdev);
+out: return state == pci_channel_io_perm_failure ?
+ PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
+{
+ int i, ret;
+ struct fw_caps_config_cmd c;
+ struct adapter *adap = pci_get_drvdata(pdev);
+
+ if (!adap) {
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+ return PCI_ERS_RESULT_RECOVERED;
+ }
+
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+
+ if (t4_wait_dev_ready(adap) < 0)
+ return PCI_ERS_RESULT_DISCONNECT;
+ if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL))
+ return PCI_ERS_RESULT_DISCONNECT;
+ adap->flags |= FW_OK;
+ if (adap_init1(adap, &c))
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ for_each_port(adap, i) {
+ struct port_info *p = adap2pinfo(adap, i);
+
+ ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
+ NULL, NULL);
+ if (ret < 0)
+ return PCI_ERS_RESULT_DISCONNECT;
+ p->viid = ret;
+ p->xact_addr_filt = -1;
+ }
+
+ t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
+ adap->params.b_wnd);
+ setup_memwin(adap);
+ if (cxgb_up(adap))
+ return PCI_ERS_RESULT_DISCONNECT;
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void eeh_resume(struct pci_dev *pdev)
+{
+ int i;
+ struct adapter *adap = pci_get_drvdata(pdev);
+
+ if (!adap)
+ return;
+
+ rtnl_lock();
+ for_each_port(adap, i) {
+ struct net_device *dev = adap->port[i];
+
+ if (netif_running(dev)) {
+ link_start(dev);
+ cxgb_set_rxmode(dev);
+ }
+ netif_device_attach(dev);
+ }
+ rtnl_unlock();
+}
+
+static struct pci_error_handlers cxgb4_eeh = {
+ .error_detected = eeh_err_detected,
+ .slot_reset = eeh_slot_reset,
+ .resume = eeh_resume,
+};
+
+static inline bool is_10g_port(const struct link_config *lc)
+{
+ return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
+}
+
+static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
+ unsigned int size, unsigned int iqe_size)
+{
+ q->intr_params = QINTR_TIMER_IDX(timer_idx) |
+ (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
+ q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
+ q->iqe_len = iqe_size;
+ q->size = size;
+}
+
+/*
+ * Perform default configuration of DMA queues depending on the number and type
+ * of ports we found and the number of available CPUs. Most settings can be
+ * modified by the admin prior to actual use.
+ */
+static void __devinit cfg_queues(struct adapter *adap)
+{
+ struct sge *s = &adap->sge;
+ int i, q10g = 0, n10g = 0, qidx = 0;
+
+ for_each_port(adap, i)
+ n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
+
+ /*
+ * We default to 1 queue per non-10G port and up to # of cores queues
+ * per 10G port.
+ */
+ if (n10g)
+ q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
+ if (q10g > num_online_cpus())
+ q10g = num_online_cpus();
+
+ for_each_port(adap, i) {
+ struct port_info *pi = adap2pinfo(adap, i);
+
+ pi->first_qset = qidx;
+ pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
+ qidx += pi->nqsets;
+ }
+
+ s->ethqsets = qidx;
+ s->max_ethqsets = qidx; /* MSI-X may lower it later */
+
+ if (is_offload(adap)) {
+ /*
+ * For offload we use 1 queue/channel if all ports are up to 1G,
+ * otherwise we divide all available queues amongst the channels
+ * capped by the number of available cores.
+ */
+ if (n10g) {
+ i = min_t(int, ARRAY_SIZE(s->ofldrxq),
+ num_online_cpus());
+ s->ofldqsets = roundup(i, adap->params.nports);
+ } else
+ s->ofldqsets = adap->params.nports;
+ /* For RDMA one Rx queue per channel suffices */
+ s->rdmaqs = adap->params.nports;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
+ struct sge_eth_rxq *r = &s->ethrxq[i];
+
+ init_rspq(&r->rspq, 0, 0, 1024, 64);
+ r->fl.size = 72;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
+ s->ethtxq[i].q.size = 1024;
+
+ for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
+ s->ctrlq[i].q.size = 512;
+
+ for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
+ s->ofldtxq[i].q.size = 1024;
+
+ for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
+ struct sge_ofld_rxq *r = &s->ofldrxq[i];
+
+ init_rspq(&r->rspq, 0, 0, 1024, 64);
+ r->rspq.uld = CXGB4_ULD_ISCSI;
+ r->fl.size = 72;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
+ struct sge_ofld_rxq *r = &s->rdmarxq[i];
+
+ init_rspq(&r->rspq, 0, 0, 511, 64);
+ r->rspq.uld = CXGB4_ULD_RDMA;
+ r->fl.size = 72;
+ }
+
+ init_rspq(&s->fw_evtq, 6, 0, 512, 64);
+ init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
+}
+
+/*
+ * Reduce the number of Ethernet queues across all ports to at most n.
+ * n provides at least one queue per port.
+ */
+static void __devinit reduce_ethqs(struct adapter *adap, int n)
+{
+ int i;
+ struct port_info *pi;
+
+ while (n < adap->sge.ethqsets)
+ for_each_port(adap, i) {
+ pi = adap2pinfo(adap, i);
+ if (pi->nqsets > 1) {
+ pi->nqsets--;
+ adap->sge.ethqsets--;
+ if (adap->sge.ethqsets <= n)
+ break;
+ }
+ }
+
+ n = 0;
+ for_each_port(adap, i) {
+ pi = adap2pinfo(adap, i);
+ pi->first_qset = n;
+ n += pi->nqsets;
+ }
+}
+
+/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
+#define EXTRA_VECS 2
+
+static int __devinit enable_msix(struct adapter *adap)
+{
+ int ofld_need = 0;
+ int i, err, want, need;
+ struct sge *s = &adap->sge;
+ unsigned int nchan = adap->params.nports;
+ struct msix_entry entries[MAX_INGQ + 1];
+
+ for (i = 0; i < ARRAY_SIZE(entries); ++i)
+ entries[i].entry = i;
+
+ want = s->max_ethqsets + EXTRA_VECS;
+ if (is_offload(adap)) {
+ want += s->rdmaqs + s->ofldqsets;
+ /* need nchan for each possible ULD */
+ ofld_need = 2 * nchan;
+ }
+ need = adap->params.nports + EXTRA_VECS + ofld_need;
+
+ while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
+ want = err;
+
+ if (!err) {
+ /*
+ * Distribute available vectors to the various queue groups.
+ * Every group gets its minimum requirement and NIC gets top
+ * priority for leftovers.
+ */
+ i = want - EXTRA_VECS - ofld_need;
+ if (i < s->max_ethqsets) {
+ s->max_ethqsets = i;
+ if (i < s->ethqsets)
+ reduce_ethqs(adap, i);
+ }
+ if (is_offload(adap)) {
+ i = want - EXTRA_VECS - s->max_ethqsets;
+ i -= ofld_need - nchan;
+ s->ofldqsets = (i / nchan) * nchan; /* round down */
+ }
+ for (i = 0; i < want; ++i)
+ adap->msix_info[i].vec = entries[i].vector;
+ } else if (err > 0)
+ dev_info(adap->pdev_dev,
+ "only %d MSI-X vectors left, not using MSI-X\n", err);
+ return err;
+}
+
+#undef EXTRA_VECS
+
+static int __devinit init_rss(struct adapter *adap)
+{
+ unsigned int i, j;
+
+ for_each_port(adap, i) {
+ struct port_info *pi = adap2pinfo(adap, i);
+
+ pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
+ if (!pi->rss)
+ return -ENOMEM;
+ for (j = 0; j < pi->rss_size; j++)
+ pi->rss[j] = j % pi->nqsets;
+ }
+ return 0;
+}
+
+static void __devinit print_port_info(const struct net_device *dev)
+{
+ static const char *base[] = {
+ "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
+ "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
+ };
+
+ char buf[80];
+ char *bufp = buf;
+ const char *spd = "";
+ const struct port_info *pi = netdev_priv(dev);
+ const struct adapter *adap = pi->adapter;
+
+ if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
+ spd = " 2.5 GT/s";
+ else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
+ spd = " 5 GT/s";
+
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
+ bufp += sprintf(bufp, "100/");
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
+ bufp += sprintf(bufp, "1000/");
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
+ bufp += sprintf(bufp, "10G/");
+ if (bufp != buf)
+ --bufp;
+ sprintf(bufp, "BASE-%s", base[pi->port_type]);
+
+ netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
+ adap->params.vpd.id, adap->params.rev, buf,
+ is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
+ (adap->flags & USING_MSIX) ? " MSI-X" :
+ (adap->flags & USING_MSI) ? " MSI" : "");
+ netdev_info(dev, "S/N: %s, E/C: %s\n",
+ adap->params.vpd.sn, adap->params.vpd.ec);
+}
+
+static void __devinit enable_pcie_relaxed_ordering(struct pci_dev *dev)
+{
+ u16 v;
+ int pos;
+
+ pos = pci_pcie_cap(dev);
+ if (pos > 0) {
+ pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &v);
+ v |= PCI_EXP_DEVCTL_RELAX_EN;
+ pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, v);
+ }
+}
+
+/*
+ * Free the following resources:
+ * - memory used for tables
+ * - MSI/MSI-X
+ * - net devices
+ * - resources FW is holding for us
+ */
+static void free_some_resources(struct adapter *adapter)
+{
+ unsigned int i;
+
+ t4_free_mem(adapter->l2t);
+ t4_free_mem(adapter->tids.tid_tab);
+ disable_msi(adapter);
+
+ for_each_port(adapter, i)
+ if (adapter->port[i]) {
+ kfree(adap2pinfo(adapter, i)->rss);
+ free_netdev(adapter->port[i]);
+ }
+ if (adapter->flags & FW_OK)
+ t4_fw_bye(adapter, adapter->fn);
+}
+
+#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
+ NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
+
+static int __devinit init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int func, i, err;
+ struct port_info *pi;
+ unsigned int highdma = 0;
+ struct adapter *adapter = NULL;
+
+ printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
+
+ err = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (err) {
+ /* Just info, some other driver may have claimed the device. */
+ dev_info(&pdev->dev, "cannot obtain PCI resources\n");
+ return err;
+ }
+
+ /* We control everything through one PF */
+ func = PCI_FUNC(pdev->devfn);
+ if (func != ent->driver_data) {
+ pci_save_state(pdev); /* to restore SR-IOV later */
+ goto sriov;
+ }
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "cannot enable PCI device\n");
+ goto out_release_regions;
+ }
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ highdma = NETIF_F_HIGHDMA;
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
+ "coherent allocations\n");
+ goto out_disable_device;
+ }
+ } else {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "no usable DMA configuration\n");
+ goto out_disable_device;
+ }
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+ enable_pcie_relaxed_ordering(pdev);
+ pci_set_master(pdev);
+ pci_save_state(pdev);
+
+ adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+ if (!adapter) {
+ err = -ENOMEM;
+ goto out_disable_device;
+ }
+
+ adapter->regs = pci_ioremap_bar(pdev, 0);
+ if (!adapter->regs) {
+ dev_err(&pdev->dev, "cannot map device registers\n");
+ err = -ENOMEM;
+ goto out_free_adapter;
+ }
+
+ adapter->pdev = pdev;
+ adapter->pdev_dev = &pdev->dev;
+ adapter->fn = func;
+ adapter->msg_enable = dflt_msg_enable;
+ memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
+
+ spin_lock_init(&adapter->stats_lock);
+ spin_lock_init(&adapter->tid_release_lock);
+
+ INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
+
+ err = t4_prep_adapter(adapter);
+ if (err)
+ goto out_unmap_bar;
+ err = adap_init0(adapter);
+ if (err)
+ goto out_unmap_bar;
+
+ for_each_port(adapter, i) {
+ struct net_device *netdev;
+
+ netdev = alloc_etherdev_mq(sizeof(struct port_info),
+ MAX_ETH_QSETS);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto out_free_dev;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adapter->port[i] = netdev;
+ pi = netdev_priv(netdev);
+ pi->adapter = adapter;
+ pi->xact_addr_filt = -1;
+ pi->port_id = i;
+ netdev->irq = pdev->irq;
+
+ netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM | NETIF_F_RXHASH |
+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ netdev->features |= netdev->hw_features | highdma;
+ netdev->vlan_features = netdev->features & VLAN_FEAT;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ netdev->netdev_ops = &cxgb4_netdev_ops;
+ SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
+ }
+
+ pci_set_drvdata(pdev, adapter);
+
+ if (adapter->flags & FW_OK) {
+ err = t4_port_init(adapter, func, func, 0);
+ if (err)
+ goto out_free_dev;
+ }
+
+ /*
+ * Configure queues and allocate tables now, they can be needed as
+ * soon as the first register_netdev completes.
+ */
+ cfg_queues(adapter);
+
+ adapter->l2t = t4_init_l2t();
+ if (!adapter->l2t) {
+ /* We tolerate a lack of L2T, giving up some functionality */
+ dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
+ adapter->params.offload = 0;
+ }
+
+ if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
+ dev_warn(&pdev->dev, "could not allocate TID table, "
+ "continuing\n");
+ adapter->params.offload = 0;
+ }
+
+ /* See what interrupts we'll be using */
+ if (msi > 1 && enable_msix(adapter) == 0)
+ adapter->flags |= USING_MSIX;
+ else if (msi > 0 && pci_enable_msi(pdev) == 0)
+ adapter->flags |= USING_MSI;
+
+ err = init_rss(adapter);
+ if (err)
+ goto out_free_dev;
+
+ /*
+ * The card is now ready to go. If any errors occur during device
+ * registration we do not fail the whole card but rather proceed only
+ * with the ports we manage to register successfully. However we must
+ * register at least one net device.
+ */
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
+ netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
+
+ err = register_netdev(adapter->port[i]);
+ if (err)
+ break;
+ adapter->chan_map[pi->tx_chan] = i;
+ print_port_info(adapter->port[i]);
+ }
+ if (i == 0) {
+ dev_err(&pdev->dev, "could not register any net devices\n");
+ goto out_free_dev;
+ }
+ if (err) {
+ dev_warn(&pdev->dev, "only %d net devices registered\n", i);
+ err = 0;
+ }
+
+ if (cxgb4_debugfs_root) {
+ adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
+ cxgb4_debugfs_root);
+ setup_debugfs(adapter);
+ }
+
++ /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
++ pdev->needs_freset = 1;
++
+ if (is_offload(adapter))
+ attach_ulds(adapter);
+
+sriov:
+#ifdef CONFIG_PCI_IOV
+ if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
+ if (pci_enable_sriov(pdev, num_vf[func]) == 0)
+ dev_info(&pdev->dev,
+ "instantiated %u virtual functions\n",
+ num_vf[func]);
+#endif
+ return 0;
+
+ out_free_dev:
+ free_some_resources(adapter);
+ out_unmap_bar:
+ iounmap(adapter->regs);
+ out_free_adapter:
+ kfree(adapter);
+ out_disable_device:
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+ out_release_regions:
+ pci_release_regions(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void __devexit remove_one(struct pci_dev *pdev)
+{
+ struct adapter *adapter = pci_get_drvdata(pdev);
+
+ pci_disable_sriov(pdev);
+
+ if (adapter) {
+ int i;
+
+ if (is_offload(adapter))
+ detach_ulds(adapter);
+
+ for_each_port(adapter, i)
+ if (adapter->port[i]->reg_state == NETREG_REGISTERED)
+ unregister_netdev(adapter->port[i]);
+
+ if (adapter->debugfs_root)
+ debugfs_remove_recursive(adapter->debugfs_root);
+
+ if (adapter->flags & FULL_INIT_DONE)
+ cxgb_down(adapter);
+
+ free_some_resources(adapter);
+ iounmap(adapter->regs);
+ kfree(adapter);
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+ pci_release_regions(pdev);
+ pci_set_drvdata(pdev, NULL);
+ } else
+ pci_release_regions(pdev);
+}
+
+static struct pci_driver cxgb4_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = cxgb4_pci_tbl,
+ .probe = init_one,
+ .remove = __devexit_p(remove_one),
+ .err_handler = &cxgb4_eeh,
+};
+
+static int __init cxgb4_init_module(void)
+{
+ int ret;
+
+ /* Debugfs support is optional, just warn if this fails */
+ cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (!cxgb4_debugfs_root)
+ pr_warning("could not create debugfs entry, continuing\n");
+
+ ret = pci_register_driver(&cxgb4_driver);
+ if (ret < 0)
+ debugfs_remove(cxgb4_debugfs_root);
+ return ret;
+}
+
+static void __exit cxgb4_cleanup_module(void)
+{
+ pci_unregister_driver(&cxgb4_driver);
+ debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
+}
+
+module_init(cxgb4_init_module);
+module_exit(cxgb4_cleanup_module);
--- /dev/null
- rc = h_free_logical_lan(adapter->vdev->unit_address);
- } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
+/*
+ * IBM Power Virtual Ethernet Device Driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2003, 2010
+ *
+ * Authors: Dave Larson <larson1@us.ibm.com>
+ * Santiago Leon <santil@linux.vnet.ibm.com>
+ * Brian King <brking@linux.vnet.ibm.com>
+ * Robert Jennings <rcj@linux.vnet.ibm.com>
+ * Anton Blanchard <anton@au.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/pm.h>
+#include <linux/ethtool.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/slab.h>
+#include <asm/hvcall.h>
+#include <linux/atomic.h>
+#include <asm/vio.h>
+#include <asm/iommu.h>
+#include <asm/firmware.h>
+
+#include "ibmveth.h"
+
+static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
+static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
+static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
+
+static struct kobj_type ktype_veth_pool;
+
+
+static const char ibmveth_driver_name[] = "ibmveth";
+static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
+#define ibmveth_driver_version "1.04"
+
+MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
+MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(ibmveth_driver_version);
+
+static unsigned int tx_copybreak __read_mostly = 128;
+module_param(tx_copybreak, uint, 0644);
+MODULE_PARM_DESC(tx_copybreak,
+ "Maximum size of packet that is copied to a new buffer on transmit");
+
+static unsigned int rx_copybreak __read_mostly = 128;
+module_param(rx_copybreak, uint, 0644);
+MODULE_PARM_DESC(rx_copybreak,
+ "Maximum size of packet that is copied to a new buffer on receive");
+
+static unsigned int rx_flush __read_mostly = 0;
+module_param(rx_flush, uint, 0644);
+MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
+
+struct ibmveth_stat {
+ char name[ETH_GSTRING_LEN];
+ int offset;
+};
+
+#define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
+#define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
+
+struct ibmveth_stat ibmveth_stats[] = {
+ { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
+ { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
+ { "replenish_add_buff_failure",
+ IBMVETH_STAT_OFF(replenish_add_buff_failure) },
+ { "replenish_add_buff_success",
+ IBMVETH_STAT_OFF(replenish_add_buff_success) },
+ { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
+ { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
+ { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
+ { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
+ { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
+ { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
+};
+
+/* simple methods of getting data from the current rxq entry */
+static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
+{
+ return adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off;
+}
+
+static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
+{
+ return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >>
+ IBMVETH_RXQ_TOGGLE_SHIFT;
+}
+
+static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
+{
+ return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle;
+}
+
+static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
+{
+ return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID;
+}
+
+static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
+{
+ return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
+}
+
+static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
+{
+ return adapter->rx_queue.queue_addr[adapter->rx_queue.index].length;
+}
+
+static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
+{
+ return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
+}
+
+/* setup the initial settings for a buffer pool */
+static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
+ u32 pool_index, u32 pool_size,
+ u32 buff_size, u32 pool_active)
+{
+ pool->size = pool_size;
+ pool->index = pool_index;
+ pool->buff_size = buff_size;
+ pool->threshold = pool_size * 7 / 8;
+ pool->active = pool_active;
+}
+
+/* allocate and setup an buffer pool - called during open */
+static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
+{
+ int i;
+
+ pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
+
+ if (!pool->free_map)
+ return -1;
+
+ pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
+ if (!pool->dma_addr) {
+ kfree(pool->free_map);
+ pool->free_map = NULL;
+ return -1;
+ }
+
+ pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
+
+ if (!pool->skbuff) {
+ kfree(pool->dma_addr);
+ pool->dma_addr = NULL;
+
+ kfree(pool->free_map);
+ pool->free_map = NULL;
+ return -1;
+ }
+
+ memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
+
+ for (i = 0; i < pool->size; ++i)
+ pool->free_map[i] = i;
+
+ atomic_set(&pool->available, 0);
+ pool->producer_index = 0;
+ pool->consumer_index = 0;
+
+ return 0;
+}
+
+static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
+{
+ unsigned long offset;
+
+ for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
+ asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
+}
+
+/* replenish the buffers for a pool. note that we don't need to
+ * skb_reserve these since they are used for incoming...
+ */
+static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
+ struct ibmveth_buff_pool *pool)
+{
+ u32 i;
+ u32 count = pool->size - atomic_read(&pool->available);
+ u32 buffers_added = 0;
+ struct sk_buff *skb;
+ unsigned int free_index, index;
+ u64 correlator;
+ unsigned long lpar_rc;
+ dma_addr_t dma_addr;
+
+ mb();
+
+ for (i = 0; i < count; ++i) {
+ union ibmveth_buf_desc desc;
+
+ skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
+
+ if (!skb) {
+ netdev_dbg(adapter->netdev,
+ "replenish: unable to allocate skb\n");
+ adapter->replenish_no_mem++;
+ break;
+ }
+
+ free_index = pool->consumer_index;
+ pool->consumer_index++;
+ if (pool->consumer_index >= pool->size)
+ pool->consumer_index = 0;
+ index = pool->free_map[free_index];
+
+ BUG_ON(index == IBM_VETH_INVALID_MAP);
+ BUG_ON(pool->skbuff[index] != NULL);
+
+ dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
+ pool->buff_size, DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
+ goto failure;
+
+ pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
+ pool->dma_addr[index] = dma_addr;
+ pool->skbuff[index] = skb;
+
+ correlator = ((u64)pool->index << 32) | index;
+ *(u64 *)skb->data = correlator;
+
+ desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
+ desc.fields.address = dma_addr;
+
+ if (rx_flush) {
+ unsigned int len = min(pool->buff_size,
+ adapter->netdev->mtu +
+ IBMVETH_BUFF_OH);
+ ibmveth_flush_buffer(skb->data, len);
+ }
+ lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
+ desc.desc);
+
+ if (lpar_rc != H_SUCCESS) {
+ goto failure;
+ } else {
+ buffers_added++;
+ adapter->replenish_add_buff_success++;
+ }
+ }
+
+ mb();
+ atomic_add(buffers_added, &(pool->available));
+ return;
+
+failure:
+ pool->free_map[free_index] = index;
+ pool->skbuff[index] = NULL;
+ if (pool->consumer_index == 0)
+ pool->consumer_index = pool->size - 1;
+ else
+ pool->consumer_index--;
+ if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
+ dma_unmap_single(&adapter->vdev->dev,
+ pool->dma_addr[index], pool->buff_size,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ adapter->replenish_add_buff_failure++;
+
+ mb();
+ atomic_add(buffers_added, &(pool->available));
+}
+
+/* replenish routine */
+static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
+{
+ int i;
+
+ adapter->replenish_task_cycles++;
+
+ for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
+ struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
+
+ if (pool->active &&
+ (atomic_read(&pool->available) < pool->threshold))
+ ibmveth_replenish_buffer_pool(adapter, pool);
+ }
+
+ adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) +
+ 4096 - 8);
+}
+
+/* empty and free ana buffer pool - also used to do cleanup in error paths */
+static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
+ struct ibmveth_buff_pool *pool)
+{
+ int i;
+
+ kfree(pool->free_map);
+ pool->free_map = NULL;
+
+ if (pool->skbuff && pool->dma_addr) {
+ for (i = 0; i < pool->size; ++i) {
+ struct sk_buff *skb = pool->skbuff[i];
+ if (skb) {
+ dma_unmap_single(&adapter->vdev->dev,
+ pool->dma_addr[i],
+ pool->buff_size,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ pool->skbuff[i] = NULL;
+ }
+ }
+ }
+
+ if (pool->dma_addr) {
+ kfree(pool->dma_addr);
+ pool->dma_addr = NULL;
+ }
+
+ if (pool->skbuff) {
+ kfree(pool->skbuff);
+ pool->skbuff = NULL;
+ }
+}
+
+/* remove a buffer from a pool */
+static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
+ u64 correlator)
+{
+ unsigned int pool = correlator >> 32;
+ unsigned int index = correlator & 0xffffffffUL;
+ unsigned int free_index;
+ struct sk_buff *skb;
+
+ BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
+ BUG_ON(index >= adapter->rx_buff_pool[pool].size);
+
+ skb = adapter->rx_buff_pool[pool].skbuff[index];
+
+ BUG_ON(skb == NULL);
+
+ adapter->rx_buff_pool[pool].skbuff[index] = NULL;
+
+ dma_unmap_single(&adapter->vdev->dev,
+ adapter->rx_buff_pool[pool].dma_addr[index],
+ adapter->rx_buff_pool[pool].buff_size,
+ DMA_FROM_DEVICE);
+
+ free_index = adapter->rx_buff_pool[pool].producer_index;
+ adapter->rx_buff_pool[pool].producer_index++;
+ if (adapter->rx_buff_pool[pool].producer_index >=
+ adapter->rx_buff_pool[pool].size)
+ adapter->rx_buff_pool[pool].producer_index = 0;
+ adapter->rx_buff_pool[pool].free_map[free_index] = index;
+
+ mb();
+
+ atomic_dec(&(adapter->rx_buff_pool[pool].available));
+}
+
+/* get the current buffer on the rx queue */
+static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
+{
+ u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
+ unsigned int pool = correlator >> 32;
+ unsigned int index = correlator & 0xffffffffUL;
+
+ BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
+ BUG_ON(index >= adapter->rx_buff_pool[pool].size);
+
+ return adapter->rx_buff_pool[pool].skbuff[index];
+}
+
+/* recycle the current buffer on the rx queue */
+static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
+{
+ u32 q_index = adapter->rx_queue.index;
+ u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
+ unsigned int pool = correlator >> 32;
+ unsigned int index = correlator & 0xffffffffUL;
+ union ibmveth_buf_desc desc;
+ unsigned long lpar_rc;
+ int ret = 1;
+
+ BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
+ BUG_ON(index >= adapter->rx_buff_pool[pool].size);
+
+ if (!adapter->rx_buff_pool[pool].active) {
+ ibmveth_rxq_harvest_buffer(adapter);
+ ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
+ goto out;
+ }
+
+ desc.fields.flags_len = IBMVETH_BUF_VALID |
+ adapter->rx_buff_pool[pool].buff_size;
+ desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
+
+ lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
+
+ if (lpar_rc != H_SUCCESS) {
+ netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
+ "during recycle rc=%ld", lpar_rc);
+ ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
+ ret = 0;
+ }
+
+ if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
+ adapter->rx_queue.index = 0;
+ adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
+ }
+
+out:
+ return ret;
+}
+
+static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
+{
+ ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
+
+ if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
+ adapter->rx_queue.index = 0;
+ adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
+ }
+}
+
+static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
+{
+ int i;
+ struct device *dev = &adapter->vdev->dev;
+
+ if (adapter->buffer_list_addr != NULL) {
+ if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
+ dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
+ DMA_BIDIRECTIONAL);
+ adapter->buffer_list_dma = DMA_ERROR_CODE;
+ }
+ free_page((unsigned long)adapter->buffer_list_addr);
+ adapter->buffer_list_addr = NULL;
+ }
+
+ if (adapter->filter_list_addr != NULL) {
+ if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
+ dma_unmap_single(dev, adapter->filter_list_dma, 4096,
+ DMA_BIDIRECTIONAL);
+ adapter->filter_list_dma = DMA_ERROR_CODE;
+ }
+ free_page((unsigned long)adapter->filter_list_addr);
+ adapter->filter_list_addr = NULL;
+ }
+
+ if (adapter->rx_queue.queue_addr != NULL) {
+ if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) {
+ dma_unmap_single(dev,
+ adapter->rx_queue.queue_dma,
+ adapter->rx_queue.queue_len,
+ DMA_BIDIRECTIONAL);
+ adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
+ }
+ kfree(adapter->rx_queue.queue_addr);
+ adapter->rx_queue.queue_addr = NULL;
+ }
+
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
+ if (adapter->rx_buff_pool[i].active)
+ ibmveth_free_buffer_pool(adapter,
+ &adapter->rx_buff_pool[i]);
+
+ if (adapter->bounce_buffer != NULL) {
+ if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
+ dma_unmap_single(&adapter->vdev->dev,
+ adapter->bounce_buffer_dma,
+ adapter->netdev->mtu + IBMVETH_BUFF_OH,
+ DMA_BIDIRECTIONAL);
+ adapter->bounce_buffer_dma = DMA_ERROR_CODE;
+ }
+ kfree(adapter->bounce_buffer);
+ adapter->bounce_buffer = NULL;
+ }
+}
+
+static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
+ union ibmveth_buf_desc rxq_desc, u64 mac_address)
+{
+ int rc, try_again = 1;
+
+ /*
+ * After a kexec the adapter will still be open, so our attempt to
+ * open it will fail. So if we get a failure we free the adapter and
+ * try again, but only once.
+ */
+retry:
+ rc = h_register_logical_lan(adapter->vdev->unit_address,
+ adapter->buffer_list_dma, rxq_desc.desc,
+ adapter->filter_list_dma, mac_address);
+
+ if (rc != H_SUCCESS && try_again) {
+ do {
+ rc = h_free_logical_lan(adapter->vdev->unit_address);
+ } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
+
+ try_again = 0;
+ goto retry;
+ }
+
+ return rc;
+}
+
+static int ibmveth_open(struct net_device *netdev)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(netdev);
+ u64 mac_address = 0;
+ int rxq_entries = 1;
+ unsigned long lpar_rc;
+ int rc;
+ union ibmveth_buf_desc rxq_desc;
+ int i;
+ struct device *dev;
+
+ netdev_dbg(netdev, "open starting\n");
+
+ napi_enable(&adapter->napi);
+
+ for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
+ rxq_entries += adapter->rx_buff_pool[i].size;
+
+ adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
+ adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
+
+ if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
+ netdev_err(netdev, "unable to allocate filter or buffer list "
+ "pages\n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
+ rxq_entries;
+ adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len,
+ GFP_KERNEL);
+
+ if (!adapter->rx_queue.queue_addr) {
+ netdev_err(netdev, "unable to allocate rx queue pages\n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ dev = &adapter->vdev->dev;
+
+ adapter->buffer_list_dma = dma_map_single(dev,
+ adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
+ adapter->filter_list_dma = dma_map_single(dev,
+ adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
+ adapter->rx_queue.queue_dma = dma_map_single(dev,
+ adapter->rx_queue.queue_addr,
+ adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
+
+ if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
+ (dma_mapping_error(dev, adapter->filter_list_dma)) ||
+ (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
+ netdev_err(netdev, "unable to map filter or buffer list "
+ "pages\n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ adapter->rx_queue.index = 0;
+ adapter->rx_queue.num_slots = rxq_entries;
+ adapter->rx_queue.toggle = 1;
+
+ memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
+ mac_address = mac_address >> 16;
+
+ rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
+ adapter->rx_queue.queue_len;
+ rxq_desc.fields.address = adapter->rx_queue.queue_dma;
+
+ netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
+ netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
+ netdev_dbg(netdev, "receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
+
+ h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
+
+ lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
+
+ if (lpar_rc != H_SUCCESS) {
+ netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
+ lpar_rc);
+ netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
+ "desc:0x%llx MAC:0x%llx\n",
+ adapter->buffer_list_dma,
+ adapter->filter_list_dma,
+ rxq_desc.desc,
+ mac_address);
+ rc = -ENONET;
+ goto err_out;
+ }
+
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
+ if (!adapter->rx_buff_pool[i].active)
+ continue;
+ if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
+ netdev_err(netdev, "unable to alloc pool\n");
+ adapter->rx_buff_pool[i].active = 0;
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ }
+
+ netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
+ rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
+ netdev);
+ if (rc != 0) {
+ netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
+ netdev->irq, rc);
+ do {
++ lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
++ } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
+
+ goto err_out;
+ }
+
+ adapter->bounce_buffer =
+ kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
+ if (!adapter->bounce_buffer) {
+ netdev_err(netdev, "unable to allocate bounce buffer\n");
+ rc = -ENOMEM;
+ goto err_out_free_irq;
+ }
+ adapter->bounce_buffer_dma =
+ dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
+ netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
+ netdev_err(netdev, "unable to map bounce buffer\n");
+ rc = -ENOMEM;
+ goto err_out_free_irq;
+ }
+
+ netdev_dbg(netdev, "initial replenish cycle\n");
+ ibmveth_interrupt(netdev->irq, netdev);
+
+ netif_start_queue(netdev);
+
+ netdev_dbg(netdev, "open complete\n");
+
+ return 0;
+
+err_out_free_irq:
+ free_irq(netdev->irq, netdev);
+err_out:
+ ibmveth_cleanup(adapter);
+ napi_disable(&adapter->napi);
+ return rc;
+}
+
+static int ibmveth_close(struct net_device *netdev)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(netdev);
+ long lpar_rc;
+
+ netdev_dbg(netdev, "close starting\n");
+
+ napi_disable(&adapter->napi);
+
+ if (!adapter->pool_config)
+ netif_stop_queue(netdev);
+
+ h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
+
+ do {
+ lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
+ } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
+
+ if (lpar_rc != H_SUCCESS) {
+ netdev_err(netdev, "h_free_logical_lan failed with %lx, "
+ "continuing with close\n", lpar_rc);
+ }
+
+ free_irq(netdev->irq, netdev);
+
+ adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) +
+ 4096 - 8);
+
+ ibmveth_cleanup(adapter);
+
+ netdev_dbg(netdev, "close complete\n");
+
+ return 0;
+}
+
+static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
+ SUPPORTED_FIBRE);
+ cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
+ ADVERTISED_FIBRE);
+ ethtool_cmd_speed_set(cmd, SPEED_1000);
+ cmd->duplex = DUPLEX_FULL;
+ cmd->port = PORT_FIBRE;
+ cmd->phy_address = 0;
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->autoneg = AUTONEG_ENABLE;
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 1;
+ return 0;
+}
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1);
+ strncpy(info->version, ibmveth_driver_version,
+ sizeof(info->version) - 1);
+}
+
+static u32 ibmveth_fix_features(struct net_device *dev, u32 features)
+{
+ /*
+ * Since the ibmveth firmware interface does not have the
+ * concept of separate tx/rx checksum offload enable, if rx
+ * checksum is disabled we also have to disable tx checksum
+ * offload. Once we disable rx checksum offload, we are no
+ * longer allowed to send tx buffers that are not properly
+ * checksummed.
+ */
+
+ if (!(features & NETIF_F_RXCSUM))
+ features &= ~NETIF_F_ALL_CSUM;
+
+ return features;
+}
+
+static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(dev);
+ unsigned long set_attr, clr_attr, ret_attr;
+ unsigned long set_attr6, clr_attr6;
+ long ret, ret4, ret6;
+ int rc1 = 0, rc2 = 0;
+ int restart = 0;
+
+ if (netif_running(dev)) {
+ restart = 1;
+ adapter->pool_config = 1;
+ ibmveth_close(dev);
+ adapter->pool_config = 0;
+ }
+
+ set_attr = 0;
+ clr_attr = 0;
+ set_attr6 = 0;
+ clr_attr6 = 0;
+
+ if (data) {
+ set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
+ set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
+ } else {
+ clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
+ clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
+ }
+
+ ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
+
+ if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
+ !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
+ (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
+ ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
+ set_attr, &ret_attr);
+
+ if (ret4 != H_SUCCESS) {
+ netdev_err(dev, "unable to change IPv4 checksum "
+ "offload settings. %d rc=%ld\n",
+ data, ret4);
+
+ h_illan_attributes(adapter->vdev->unit_address,
+ set_attr, clr_attr, &ret_attr);
+
+ if (data == 1)
+ dev->features &= ~NETIF_F_IP_CSUM;
+
+ } else {
+ adapter->fw_ipv4_csum_support = data;
+ }
+
+ ret6 = h_illan_attributes(adapter->vdev->unit_address,
+ clr_attr6, set_attr6, &ret_attr);
+
+ if (ret6 != H_SUCCESS) {
+ netdev_err(dev, "unable to change IPv6 checksum "
+ "offload settings. %d rc=%ld\n",
+ data, ret6);
+
+ h_illan_attributes(adapter->vdev->unit_address,
+ set_attr6, clr_attr6, &ret_attr);
+
+ if (data == 1)
+ dev->features &= ~NETIF_F_IPV6_CSUM;
+
+ } else
+ adapter->fw_ipv6_csum_support = data;
+
+ if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
+ adapter->rx_csum = data;
+ else
+ rc1 = -EIO;
+ } else {
+ rc1 = -EIO;
+ netdev_err(dev, "unable to change checksum offload settings."
+ " %d rc=%ld ret_attr=%lx\n", data, ret,
+ ret_attr);
+ }
+
+ if (restart)
+ rc2 = ibmveth_open(dev);
+
+ return rc1 ? rc1 : rc2;
+}
+
+static int ibmveth_set_features(struct net_device *dev, u32 features)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(dev);
+ int rx_csum = !!(features & NETIF_F_RXCSUM);
+ int rc;
+
+ if (rx_csum == adapter->rx_csum)
+ return 0;
+
+ rc = ibmveth_set_csum_offload(dev, rx_csum);
+ if (rc && !adapter->rx_csum)
+ dev->features = features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
+
+ return rc;
+}
+
+static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN)
+ memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN);
+}
+
+static int ibmveth_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(ibmveth_stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void ibmveth_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ int i;
+ struct ibmveth_adapter *adapter = netdev_priv(dev);
+
+ for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++)
+ data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
+}
+
+static const struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+ .get_settings = netdev_get_settings,
+ .get_link = ethtool_op_get_link,
+ .get_strings = ibmveth_get_strings,
+ .get_sset_count = ibmveth_get_sset_count,
+ .get_ethtool_stats = ibmveth_get_ethtool_stats,
+};
+
+static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ return -EOPNOTSUPP;
+}
+
+#define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
+
+static int ibmveth_send(struct ibmveth_adapter *adapter,
+ union ibmveth_buf_desc *descs)
+{
+ unsigned long correlator;
+ unsigned int retry_count;
+ unsigned long ret;
+
+ /*
+ * The retry count sets a maximum for the number of broadcast and
+ * multicast destinations within the system.
+ */
+ retry_count = 1024;
+ correlator = 0;
+ do {
+ ret = h_send_logical_lan(adapter->vdev->unit_address,
+ descs[0].desc, descs[1].desc,
+ descs[2].desc, descs[3].desc,
+ descs[4].desc, descs[5].desc,
+ correlator, &correlator);
+ } while ((ret == H_BUSY) && (retry_count--));
+
+ if (ret != H_SUCCESS && ret != H_DROPPED) {
+ netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
+ "with rc=%ld\n", ret);
+ return 1;
+ }
+
+ return 0;
+}
+
+static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(netdev);
+ unsigned int desc_flags;
+ union ibmveth_buf_desc descs[6];
+ int last, i;
+ int force_bounce = 0;
+ dma_addr_t dma_addr;
+
+ /*
+ * veth handles a maximum of 6 segments including the header, so
+ * we have to linearize the skb if there are more than this.
+ */
+ if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
+ netdev->stats.tx_dropped++;
+ goto out;
+ }
+
+ /* veth can't checksum offload UDP */
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ ((skb->protocol == htons(ETH_P_IP) &&
+ ip_hdr(skb)->protocol != IPPROTO_TCP) ||
+ (skb->protocol == htons(ETH_P_IPV6) &&
+ ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
+ skb_checksum_help(skb)) {
+
+ netdev_err(netdev, "tx: failed to checksum packet\n");
+ netdev->stats.tx_dropped++;
+ goto out;
+ }
+
+ desc_flags = IBMVETH_BUF_VALID;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ unsigned char *buf = skb_transport_header(skb) +
+ skb->csum_offset;
+
+ desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
+
+ /* Need to zero out the checksum */
+ buf[0] = 0;
+ buf[1] = 0;
+ }
+
+retry_bounce:
+ memset(descs, 0, sizeof(descs));
+
+ /*
+ * If a linear packet is below the rx threshold then
+ * copy it into the static bounce buffer. This avoids the
+ * cost of a TCE insert and remove.
+ */
+ if (force_bounce || (!skb_is_nonlinear(skb) &&
+ (skb->len < tx_copybreak))) {
+ skb_copy_from_linear_data(skb, adapter->bounce_buffer,
+ skb->len);
+
+ descs[0].fields.flags_len = desc_flags | skb->len;
+ descs[0].fields.address = adapter->bounce_buffer_dma;
+
+ if (ibmveth_send(adapter, descs)) {
+ adapter->tx_send_failed++;
+ netdev->stats.tx_dropped++;
+ } else {
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += skb->len;
+ }
+
+ goto out;
+ }
+
+ /* Map the header */
+ dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
+ goto map_failed;
+
+ descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
+ descs[0].fields.address = dma_addr;
+
+ /* Map the frags */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
+ frag->size, DMA_TO_DEVICE);
+
+ if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
+ goto map_failed_frags;
+
+ descs[i+1].fields.flags_len = desc_flags | frag->size;
+ descs[i+1].fields.address = dma_addr;
+ }
+
+ if (ibmveth_send(adapter, descs)) {
+ adapter->tx_send_failed++;
+ netdev->stats.tx_dropped++;
+ } else {
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += skb->len;
+ }
+
+ dma_unmap_single(&adapter->vdev->dev,
+ descs[0].fields.address,
+ descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
+ DMA_TO_DEVICE);
+
+ for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
+ dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
+ descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
+ DMA_TO_DEVICE);
+
+out:
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+
+map_failed_frags:
+ last = i+1;
+ for (i = 0; i < last; i++)
+ dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
+ descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
+ DMA_TO_DEVICE);
+
+map_failed:
+ if (!firmware_has_feature(FW_FEATURE_CMO))
+ netdev_err(netdev, "tx: unable to map xmit buffer\n");
+ adapter->tx_map_failed++;
+ skb_linearize(skb);
+ force_bounce = 1;
+ goto retry_bounce;
+}
+
+static int ibmveth_poll(struct napi_struct *napi, int budget)
+{
+ struct ibmveth_adapter *adapter =
+ container_of(napi, struct ibmveth_adapter, napi);
+ struct net_device *netdev = adapter->netdev;
+ int frames_processed = 0;
+ unsigned long lpar_rc;
+
+restart_poll:
+ do {
+ if (!ibmveth_rxq_pending_buffer(adapter))
+ break;
+
+ smp_rmb();
+ if (!ibmveth_rxq_buffer_valid(adapter)) {
+ wmb(); /* suggested by larson1 */
+ adapter->rx_invalid_buffer++;
+ netdev_dbg(netdev, "recycling invalid buffer\n");
+ ibmveth_rxq_recycle_buffer(adapter);
+ } else {
+ struct sk_buff *skb, *new_skb;
+ int length = ibmveth_rxq_frame_length(adapter);
+ int offset = ibmveth_rxq_frame_offset(adapter);
+ int csum_good = ibmveth_rxq_csum_good(adapter);
+
+ skb = ibmveth_rxq_get_buffer(adapter);
+
+ new_skb = NULL;
+ if (length < rx_copybreak)
+ new_skb = netdev_alloc_skb(netdev, length);
+
+ if (new_skb) {
+ skb_copy_to_linear_data(new_skb,
+ skb->data + offset,
+ length);
+ if (rx_flush)
+ ibmveth_flush_buffer(skb->data,
+ length + offset);
+ if (!ibmveth_rxq_recycle_buffer(adapter))
+ kfree_skb(skb);
+ skb = new_skb;
+ } else {
+ ibmveth_rxq_harvest_buffer(adapter);
+ skb_reserve(skb, offset);
+ }
+
+ skb_put(skb, length);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (csum_good)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ netif_receive_skb(skb); /* send it up */
+
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += length;
+ frames_processed++;
+ }
+ } while (frames_processed < budget);
+
+ ibmveth_replenish_task(adapter);
+
+ if (frames_processed < budget) {
+ /* We think we are done - reenable interrupts,
+ * then check once more to make sure we are done.
+ */
+ lpar_rc = h_vio_signal(adapter->vdev->unit_address,
+ VIO_IRQ_ENABLE);
+
+ BUG_ON(lpar_rc != H_SUCCESS);
+
+ napi_complete(napi);
+
+ if (ibmveth_rxq_pending_buffer(adapter) &&
+ napi_reschedule(napi)) {
+ lpar_rc = h_vio_signal(adapter->vdev->unit_address,
+ VIO_IRQ_DISABLE);
+ goto restart_poll;
+ }
+ }
+
+ return frames_processed;
+}
+
+static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
+{
+ struct net_device *netdev = dev_instance;
+ struct ibmveth_adapter *adapter = netdev_priv(netdev);
+ unsigned long lpar_rc;
+
+ if (napi_schedule_prep(&adapter->napi)) {
+ lpar_rc = h_vio_signal(adapter->vdev->unit_address,
+ VIO_IRQ_DISABLE);
+ BUG_ON(lpar_rc != H_SUCCESS);
+ __napi_schedule(&adapter->napi);
+ }
+ return IRQ_HANDLED;
+}
+
+static void ibmveth_set_multicast_list(struct net_device *netdev)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(netdev);
+ unsigned long lpar_rc;
+
+ if ((netdev->flags & IFF_PROMISC) ||
+ (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
+ lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
+ IbmVethMcastEnableRecv |
+ IbmVethMcastDisableFiltering,
+ 0);
+ if (lpar_rc != H_SUCCESS) {
+ netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
+ "entering promisc mode\n", lpar_rc);
+ }
+ } else {
+ struct netdev_hw_addr *ha;
+ /* clear the filter table & disable filtering */
+ lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
+ IbmVethMcastEnableRecv |
+ IbmVethMcastDisableFiltering |
+ IbmVethMcastClearFilterTable,
+ 0);
+ if (lpar_rc != H_SUCCESS) {
+ netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
+ "attempting to clear filter table\n",
+ lpar_rc);
+ }
+ /* add the addresses to the filter table */
+ netdev_for_each_mc_addr(ha, netdev) {
+ /* add the multicast address to the filter table */
+ unsigned long mcast_addr = 0;
+ memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
+ lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
+ IbmVethMcastAddFilter,
+ mcast_addr);
+ if (lpar_rc != H_SUCCESS) {
+ netdev_err(netdev, "h_multicast_ctrl rc=%ld "
+ "when adding an entry to the filter "
+ "table\n", lpar_rc);
+ }
+ }
+
+ /* re-enable filtering */
+ lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
+ IbmVethMcastEnableFiltering,
+ 0);
+ if (lpar_rc != H_SUCCESS) {
+ netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
+ "enabling filtering\n", lpar_rc);
+ }
+ }
+}
+
+static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(dev);
+ struct vio_dev *viodev = adapter->vdev;
+ int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
+ int i, rc;
+ int need_restart = 0;
+
+ if (new_mtu < IBMVETH_MIN_MTU)
+ return -EINVAL;
+
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
+ if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size)
+ break;
+
+ if (i == IBMVETH_NUM_BUFF_POOLS)
+ return -EINVAL;
+
+ /* Deactivate all the buffer pools so that the next loop can activate
+ only the buffer pools necessary to hold the new MTU */
+ if (netif_running(adapter->netdev)) {
+ need_restart = 1;
+ adapter->pool_config = 1;
+ ibmveth_close(adapter->netdev);
+ adapter->pool_config = 0;
+ }
+
+ /* Look for an active buffer pool that can hold the new MTU */
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
+ adapter->rx_buff_pool[i].active = 1;
+
+ if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
+ dev->mtu = new_mtu;
+ vio_cmo_set_dev_desired(viodev,
+ ibmveth_get_desired_dma
+ (viodev));
+ if (need_restart) {
+ return ibmveth_open(adapter->netdev);
+ }
+ return 0;
+ }
+ }
+
+ if (need_restart && (rc = ibmveth_open(adapter->netdev)))
+ return rc;
+
+ return -EINVAL;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void ibmveth_poll_controller(struct net_device *dev)
+{
+ ibmveth_replenish_task(netdev_priv(dev));
+ ibmveth_interrupt(dev->irq, dev);
+}
+#endif
+
+/**
+ * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
+ *
+ * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
+ *
+ * Return value:
+ * Number of bytes of IO data the driver will need to perform well.
+ */
+static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
+{
+ struct net_device *netdev = dev_get_drvdata(&vdev->dev);
+ struct ibmveth_adapter *adapter;
+ unsigned long ret;
+ int i;
+ int rxqentries = 1;
+
+ /* netdev inits at probe time along with the structures we need below*/
+ if (netdev == NULL)
+ return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT);
+
+ adapter = netdev_priv(netdev);
+
+ ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
+ ret += IOMMU_PAGE_ALIGN(netdev->mtu);
+
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
+ /* add the size of the active receive buffers */
+ if (adapter->rx_buff_pool[i].active)
+ ret +=
+ adapter->rx_buff_pool[i].size *
+ IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
+ buff_size);
+ rxqentries += adapter->rx_buff_pool[i].size;
+ }
+ /* add the size of the receive queue entries */
+ ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry));
+
+ return ret;
+}
+
+static const struct net_device_ops ibmveth_netdev_ops = {
+ .ndo_open = ibmveth_open,
+ .ndo_stop = ibmveth_close,
+ .ndo_start_xmit = ibmveth_start_xmit,
+ .ndo_set_rx_mode = ibmveth_set_multicast_list,
+ .ndo_do_ioctl = ibmveth_ioctl,
+ .ndo_change_mtu = ibmveth_change_mtu,
+ .ndo_fix_features = ibmveth_fix_features,
+ .ndo_set_features = ibmveth_set_features,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ibmveth_poll_controller,
+#endif
+};
+
+static int __devinit ibmveth_probe(struct vio_dev *dev,
+ const struct vio_device_id *id)
+{
+ int rc, i;
+ struct net_device *netdev;
+ struct ibmveth_adapter *adapter;
+ unsigned char *mac_addr_p;
+ unsigned int *mcastFilterSize_p;
+
+ dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
+ dev->unit_address);
+
+ mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
+ NULL);
+ if (!mac_addr_p) {
+ dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
+ return -EINVAL;
+ }
+
+ mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
+ VETH_MCAST_FILTER_SIZE, NULL);
+ if (!mcastFilterSize_p) {
+ dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
+ "attribute\n");
+ return -EINVAL;
+ }
+
+ netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
+
+ if (!netdev)
+ return -ENOMEM;
+
+ adapter = netdev_priv(netdev);
+ dev_set_drvdata(&dev->dev, netdev);
+
+ adapter->vdev = dev;
+ adapter->netdev = netdev;
+ adapter->mcastFilterSize = *mcastFilterSize_p;
+ adapter->pool_config = 0;
+
+ netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
+
+ /*
+ * Some older boxes running PHYP non-natively have an OF that returns
+ * a 8-byte local-mac-address field (and the first 2 bytes have to be
+ * ignored) while newer boxes' OF return a 6-byte field. Note that
+ * IEEE 1275 specifies that local-mac-address must be a 6-byte field.
+ * The RPA doc specifies that the first byte must be 10b, so we'll
+ * just look for it to solve this 8 vs. 6 byte field issue
+ */
+ if ((*mac_addr_p & 0x3) != 0x02)
+ mac_addr_p += 2;
+
+ adapter->mac_addr = 0;
+ memcpy(&adapter->mac_addr, mac_addr_p, 6);
+
+ netdev->irq = dev->irq;
+ netdev->netdev_ops = &ibmveth_netdev_ops;
+ netdev->ethtool_ops = &netdev_ethtool_ops;
+ SET_NETDEV_DEV(netdev, &dev->dev);
+ netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ netdev->features |= netdev->hw_features;
+
+ memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
+
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
+ struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
+ int error;
+
+ ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
+ pool_count[i], pool_size[i],
+ pool_active[i]);
+ error = kobject_init_and_add(kobj, &ktype_veth_pool,
+ &dev->dev.kobj, "pool%d", i);
+ if (!error)
+ kobject_uevent(kobj, KOBJ_ADD);
+ }
+
+ netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
+
+ adapter->buffer_list_dma = DMA_ERROR_CODE;
+ adapter->filter_list_dma = DMA_ERROR_CODE;
+ adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
+
+ netdev_dbg(netdev, "registering netdev...\n");
+
+ ibmveth_set_features(netdev, netdev->features);
+
+ rc = register_netdev(netdev);
+
+ if (rc) {
+ netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc);
+ free_netdev(netdev);
+ return rc;
+ }
+
+ netdev_dbg(netdev, "registered\n");
+
+ return 0;
+}
+
+static int __devexit ibmveth_remove(struct vio_dev *dev)
+{
+ struct net_device *netdev = dev_get_drvdata(&dev->dev);
+ struct ibmveth_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
+ kobject_put(&adapter->rx_buff_pool[i].kobj);
+
+ unregister_netdev(netdev);
+
+ free_netdev(netdev);
+ dev_set_drvdata(&dev->dev, NULL);
+
+ return 0;
+}
+
+static struct attribute veth_active_attr;
+static struct attribute veth_num_attr;
+static struct attribute veth_size_attr;
+
+static ssize_t veth_pool_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct ibmveth_buff_pool *pool = container_of(kobj,
+ struct ibmveth_buff_pool,
+ kobj);
+
+ if (attr == &veth_active_attr)
+ return sprintf(buf, "%d\n", pool->active);
+ else if (attr == &veth_num_attr)
+ return sprintf(buf, "%d\n", pool->size);
+ else if (attr == &veth_size_attr)
+ return sprintf(buf, "%d\n", pool->buff_size);
+ return 0;
+}
+
+static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ibmveth_buff_pool *pool = container_of(kobj,
+ struct ibmveth_buff_pool,
+ kobj);
+ struct net_device *netdev = dev_get_drvdata(
+ container_of(kobj->parent, struct device, kobj));
+ struct ibmveth_adapter *adapter = netdev_priv(netdev);
+ long value = simple_strtol(buf, NULL, 10);
+ long rc;
+
+ if (attr == &veth_active_attr) {
+ if (value && !pool->active) {
+ if (netif_running(netdev)) {
+ if (ibmveth_alloc_buffer_pool(pool)) {
+ netdev_err(netdev,
+ "unable to alloc pool\n");
+ return -ENOMEM;
+ }
+ pool->active = 1;
+ adapter->pool_config = 1;
+ ibmveth_close(netdev);
+ adapter->pool_config = 0;
+ if ((rc = ibmveth_open(netdev)))
+ return rc;
+ } else {
+ pool->active = 1;
+ }
+ } else if (!value && pool->active) {
+ int mtu = netdev->mtu + IBMVETH_BUFF_OH;
+ int i;
+ /* Make sure there is a buffer pool with buffers that
+ can hold a packet of the size of the MTU */
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
+ if (pool == &adapter->rx_buff_pool[i])
+ continue;
+ if (!adapter->rx_buff_pool[i].active)
+ continue;
+ if (mtu <= adapter->rx_buff_pool[i].buff_size)
+ break;
+ }
+
+ if (i == IBMVETH_NUM_BUFF_POOLS) {
+ netdev_err(netdev, "no active pool >= MTU\n");
+ return -EPERM;
+ }
+
+ if (netif_running(netdev)) {
+ adapter->pool_config = 1;
+ ibmveth_close(netdev);
+ pool->active = 0;
+ adapter->pool_config = 0;
+ if ((rc = ibmveth_open(netdev)))
+ return rc;
+ }
+ pool->active = 0;
+ }
+ } else if (attr == &veth_num_attr) {
+ if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
+ return -EINVAL;
+ } else {
+ if (netif_running(netdev)) {
+ adapter->pool_config = 1;
+ ibmveth_close(netdev);
+ adapter->pool_config = 0;
+ pool->size = value;
+ if ((rc = ibmveth_open(netdev)))
+ return rc;
+ } else {
+ pool->size = value;
+ }
+ }
+ } else if (attr == &veth_size_attr) {
+ if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
+ return -EINVAL;
+ } else {
+ if (netif_running(netdev)) {
+ adapter->pool_config = 1;
+ ibmveth_close(netdev);
+ adapter->pool_config = 0;
+ pool->buff_size = value;
+ if ((rc = ibmveth_open(netdev)))
+ return rc;
+ } else {
+ pool->buff_size = value;
+ }
+ }
+ }
+
+ /* kick the interrupt handler to allocate/deallocate pools */
+ ibmveth_interrupt(netdev->irq, netdev);
+ return count;
+}
+
+
+#define ATTR(_name, _mode) \
+ struct attribute veth_##_name##_attr = { \
+ .name = __stringify(_name), .mode = _mode, \
+ };
+
+static ATTR(active, 0644);
+static ATTR(num, 0644);
+static ATTR(size, 0644);
+
+static struct attribute *veth_pool_attrs[] = {
+ &veth_active_attr,
+ &veth_num_attr,
+ &veth_size_attr,
+ NULL,
+};
+
+static const struct sysfs_ops veth_pool_ops = {
+ .show = veth_pool_show,
+ .store = veth_pool_store,
+};
+
+static struct kobj_type ktype_veth_pool = {
+ .release = NULL,
+ .sysfs_ops = &veth_pool_ops,
+ .default_attrs = veth_pool_attrs,
+};
+
+static int ibmveth_resume(struct device *dev)
+{
+ struct net_device *netdev = dev_get_drvdata(dev);
+ ibmveth_interrupt(netdev->irq, netdev);
+ return 0;
+}
+
+static struct vio_device_id ibmveth_device_table[] __devinitdata = {
+ { "network", "IBM,l-lan"},
+ { "", "" }
+};
+MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
+
+static struct dev_pm_ops ibmveth_pm_ops = {
+ .resume = ibmveth_resume
+};
+
+static struct vio_driver ibmveth_driver = {
+ .id_table = ibmveth_device_table,
+ .probe = ibmveth_probe,
+ .remove = ibmveth_remove,
+ .get_desired_dma = ibmveth_get_desired_dma,
+ .driver = {
+ .name = ibmveth_driver_name,
+ .owner = THIS_MODULE,
+ .pm = &ibmveth_pm_ops,
+ }
+};
+
+static int __init ibmveth_module_init(void)
+{
+ printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name,
+ ibmveth_driver_string, ibmveth_driver_version);
+
+ return vio_register_driver(&ibmveth_driver);
+}
+
+static void __exit ibmveth_module_exit(void)
+{
+ vio_unregister_driver(&ibmveth_driver);
+}
+
+module_init(ibmveth_module_init);
+module_exit(ibmveth_module_exit);
--- /dev/null
- if ((int_en & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))
- == 0) {
- return IRQ_HANDLED;
- }
+/*
+ * Copyright (C) 1999 - 2010 Intel Corporation.
+ * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
+ *
+ * This code was derived from the Intel e1000e Linux driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include "pch_gbe.h"
+#include "pch_gbe_api.h"
+
+#define DRV_VERSION "1.00"
+const char pch_driver_version[] = DRV_VERSION;
+
+#define PCI_DEVICE_ID_INTEL_IOH1_GBE 0x8802 /* Pci device ID */
+#define PCH_GBE_MAR_ENTRIES 16
+#define PCH_GBE_SHORT_PKT 64
+#define DSC_INIT16 0xC000
+#define PCH_GBE_DMA_ALIGN 0
+#define PCH_GBE_DMA_PADDING 2
+#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */
+#define PCH_GBE_COPYBREAK_DEFAULT 256
+#define PCH_GBE_PCI_BAR 1
+#define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */
+
+/* Macros for ML7223 */
+#define PCI_VENDOR_ID_ROHM 0x10db
+#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013
+
+/* Macros for ML7831 */
+#define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802
+
+#define PCH_GBE_TX_WEIGHT 64
+#define PCH_GBE_RX_WEIGHT 64
+#define PCH_GBE_RX_BUFFER_WRITE 16
+
+/* Initialize the wake-on-LAN settings */
+#define PCH_GBE_WL_INIT_SETTING (PCH_GBE_WLC_MP)
+
+#define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \
+ PCH_GBE_CHIP_TYPE_INTERNAL | \
+ PCH_GBE_RGMII_MODE_RGMII \
+ )
+
+/* Ethertype field values */
+#define PCH_GBE_MAX_RX_BUFFER_SIZE 0x2880
+#define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318
+#define PCH_GBE_FRAME_SIZE_2048 2048
+#define PCH_GBE_FRAME_SIZE_4096 4096
+#define PCH_GBE_FRAME_SIZE_8192 8192
+
+#define PCH_GBE_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
+#define PCH_GBE_RX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_rx_desc)
+#define PCH_GBE_TX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_tx_desc)
+#define PCH_GBE_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+/* Pause packet value */
+#define PCH_GBE_PAUSE_PKT1_VALUE 0x00C28001
+#define PCH_GBE_PAUSE_PKT2_VALUE 0x00000100
+#define PCH_GBE_PAUSE_PKT4_VALUE 0x01000888
+#define PCH_GBE_PAUSE_PKT5_VALUE 0x0000FFFF
+
+#define PCH_GBE_ETH_ALEN 6
+
+/* This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register. Each bit is documented below:
+ * o RXT0 = Receiver Timer Interrupt (ring 0)
+ * o TXDW = Transmit Descriptor Written Back
+ * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ * o RXSEQ = Receive Sequence Error
+ * o LSC = Link Status Change
+ */
+#define PCH_GBE_INT_ENABLE_MASK ( \
+ PCH_GBE_INT_RX_DMA_CMPLT | \
+ PCH_GBE_INT_RX_DSC_EMP | \
+ PCH_GBE_INT_RX_FIFO_ERR | \
+ PCH_GBE_INT_WOL_DET | \
+ PCH_GBE_INT_TX_CMPLT \
+ )
+
+#define PCH_GBE_INT_DISABLE_ALL 0
+
+static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
+
+static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
+static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
+ int data);
+
+inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
+{
+ iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
+}
+
+/**
+ * pch_gbe_mac_read_mac_addr - Read MAC address
+ * @hw: Pointer to the HW structure
+ * Returns
+ * 0: Successful.
+ */
+s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
+{
+ u32 adr1a, adr1b;
+
+ adr1a = ioread32(&hw->reg->mac_adr[0].high);
+ adr1b = ioread32(&hw->reg->mac_adr[0].low);
+
+ hw->mac.addr[0] = (u8)(adr1a & 0xFF);
+ hw->mac.addr[1] = (u8)((adr1a >> 8) & 0xFF);
+ hw->mac.addr[2] = (u8)((adr1a >> 16) & 0xFF);
+ hw->mac.addr[3] = (u8)((adr1a >> 24) & 0xFF);
+ hw->mac.addr[4] = (u8)(adr1b & 0xFF);
+ hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF);
+
+ pr_debug("hw->mac.addr : %pM\n", hw->mac.addr);
+ return 0;
+}
+
+/**
+ * pch_gbe_wait_clr_bit - Wait to clear a bit
+ * @reg: Pointer of register
+ * @busy: Busy bit
+ */
+static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
+{
+ u32 tmp;
+ /* wait busy */
+ tmp = 1000;
+ while ((ioread32(reg) & bit) && --tmp)
+ cpu_relax();
+ if (!tmp)
+ pr_err("Error: busy bit is not cleared\n");
+}
+
+/**
+ * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context
+ * @reg: Pointer of register
+ * @busy: Busy bit
+ */
+static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit)
+{
+ u32 tmp;
+ int ret = -1;
+ /* wait busy */
+ tmp = 20;
+ while ((ioread32(reg) & bit) && --tmp)
+ udelay(5);
+ if (!tmp)
+ pr_err("Error: busy bit is not cleared\n");
+ else
+ ret = 0;
+ return ret;
+}
+
+/**
+ * pch_gbe_mac_mar_set - Set MAC address register
+ * @hw: Pointer to the HW structure
+ * @addr: Pointer to the MAC address
+ * @index: MAC address array register
+ */
+static void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index)
+{
+ u32 mar_low, mar_high, adrmask;
+
+ pr_debug("index : 0x%x\n", index);
+
+ /*
+ * HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
+ */
+ mar_high = ((u32) addr[0] | ((u32) addr[1] << 8) |
+ ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+ mar_low = ((u32) addr[4] | ((u32) addr[5] << 8));
+ /* Stop the MAC Address of index. */
+ adrmask = ioread32(&hw->reg->ADDR_MASK);
+ iowrite32((adrmask | (0x0001 << index)), &hw->reg->ADDR_MASK);
+ /* wait busy */
+ pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
+ /* Set the MAC address to the MAC address 1A/1B register */
+ iowrite32(mar_high, &hw->reg->mac_adr[index].high);
+ iowrite32(mar_low, &hw->reg->mac_adr[index].low);
+ /* Start the MAC address of index */
+ iowrite32((adrmask & ~(0x0001 << index)), &hw->reg->ADDR_MASK);
+ /* wait busy */
+ pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
+}
+
+/**
+ * pch_gbe_mac_reset_hw - Reset hardware
+ * @hw: Pointer to the HW structure
+ */
+static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
+{
+ /* Read the MAC address. and store to the private data */
+ pch_gbe_mac_read_mac_addr(hw);
+ iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
+#ifdef PCH_GBE_MAC_IFOP_RGMII
+ iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
+#endif
+ pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
+ /* Setup the receive address */
+ pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
+ return;
+}
+
+static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw)
+{
+ /* Read the MAC address. and store to the private data */
+ pch_gbe_mac_read_mac_addr(hw);
+ iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET);
+ pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST);
+ /* Setup the MAC address */
+ pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
+ return;
+}
+
+/**
+ * pch_gbe_mac_init_rx_addrs - Initialize receive address's
+ * @hw: Pointer to the HW structure
+ * @mar_count: Receive address registers
+ */
+static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count)
+{
+ u32 i;
+
+ /* Setup the receive address */
+ pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
+
+ /* Zero out the other receive addresses */
+ for (i = 1; i < mar_count; i++) {
+ iowrite32(0, &hw->reg->mac_adr[i].high);
+ iowrite32(0, &hw->reg->mac_adr[i].low);
+ }
+ iowrite32(0xFFFE, &hw->reg->ADDR_MASK);
+ /* wait busy */
+ pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
+}
+
+
+/**
+ * pch_gbe_mac_mc_addr_list_update - Update Multicast addresses
+ * @hw: Pointer to the HW structure
+ * @mc_addr_list: Array of multicast addresses to program
+ * @mc_addr_count: Number of multicast addresses to program
+ * @mar_used_count: The first MAC Address register free to program
+ * @mar_total_num: Total number of supported MAC Address Registers
+ */
+static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count,
+ u32 mar_used_count, u32 mar_total_num)
+{
+ u32 i, adrmask;
+
+ /* Load the first set of multicast addresses into the exact
+ * filters (RAR). If there are not enough to fill the RAR
+ * array, clear the filters.
+ */
+ for (i = mar_used_count; i < mar_total_num; i++) {
+ if (mc_addr_count) {
+ pch_gbe_mac_mar_set(hw, mc_addr_list, i);
+ mc_addr_count--;
+ mc_addr_list += PCH_GBE_ETH_ALEN;
+ } else {
+ /* Clear MAC address mask */
+ adrmask = ioread32(&hw->reg->ADDR_MASK);
+ iowrite32((adrmask | (0x0001 << i)),
+ &hw->reg->ADDR_MASK);
+ /* wait busy */
+ pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
+ /* Clear MAC address */
+ iowrite32(0, &hw->reg->mac_adr[i].high);
+ iowrite32(0, &hw->reg->mac_adr[i].low);
+ }
+ }
+}
+
+/**
+ * pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings
+ * @hw: Pointer to the HW structure
+ * Returns
+ * 0: Successful.
+ * Negative value: Failed.
+ */
+s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw)
+{
+ struct pch_gbe_mac_info *mac = &hw->mac;
+ u32 rx_fctrl;
+
+ pr_debug("mac->fc = %u\n", mac->fc);
+
+ rx_fctrl = ioread32(&hw->reg->RX_FCTRL);
+
+ switch (mac->fc) {
+ case PCH_GBE_FC_NONE:
+ rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
+ mac->tx_fc_enable = false;
+ break;
+ case PCH_GBE_FC_RX_PAUSE:
+ rx_fctrl |= PCH_GBE_FL_CTRL_EN;
+ mac->tx_fc_enable = false;
+ break;
+ case PCH_GBE_FC_TX_PAUSE:
+ rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
+ mac->tx_fc_enable = true;
+ break;
+ case PCH_GBE_FC_FULL:
+ rx_fctrl |= PCH_GBE_FL_CTRL_EN;
+ mac->tx_fc_enable = true;
+ break;
+ default:
+ pr_err("Flow control param set incorrectly\n");
+ return -EINVAL;
+ }
+ if (mac->link_duplex == DUPLEX_HALF)
+ rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
+ iowrite32(rx_fctrl, &hw->reg->RX_FCTRL);
+ pr_debug("RX_FCTRL reg : 0x%08x mac->tx_fc_enable : %d\n",
+ ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable);
+ return 0;
+}
+
+/**
+ * pch_gbe_mac_set_wol_event - Set wake-on-lan event
+ * @hw: Pointer to the HW structure
+ * @wu_evt: Wake up event
+ */
+static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt)
+{
+ u32 addr_mask;
+
+ pr_debug("wu_evt : 0x%08x ADDR_MASK reg : 0x%08x\n",
+ wu_evt, ioread32(&hw->reg->ADDR_MASK));
+
+ if (wu_evt) {
+ /* Set Wake-On-Lan address mask */
+ addr_mask = ioread32(&hw->reg->ADDR_MASK);
+ iowrite32(addr_mask, &hw->reg->WOL_ADDR_MASK);
+ /* wait busy */
+ pch_gbe_wait_clr_bit(&hw->reg->WOL_ADDR_MASK, PCH_GBE_WLA_BUSY);
+ iowrite32(0, &hw->reg->WOL_ST);
+ iowrite32((wu_evt | PCH_GBE_WLC_WOL_MODE), &hw->reg->WOL_CTRL);
+ iowrite32(0x02, &hw->reg->TCPIP_ACC);
+ iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
+ } else {
+ iowrite32(0, &hw->reg->WOL_CTRL);
+ iowrite32(0, &hw->reg->WOL_ST);
+ }
+ return;
+}
+
+/**
+ * pch_gbe_mac_ctrl_miim - Control MIIM interface
+ * @hw: Pointer to the HW structure
+ * @addr: Address of PHY
+ * @dir: Operetion. (Write or Read)
+ * @reg: Access register of PHY
+ * @data: Write data.
+ *
+ * Returns: Read date.
+ */
+u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
+ u16 data)
+{
+ u32 data_out = 0;
+ unsigned int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->miim_lock, flags);
+
+ for (i = 100; i; --i) {
+ if ((ioread32(&hw->reg->MIIM) & PCH_GBE_MIIM_OPER_READY))
+ break;
+ udelay(20);
+ }
+ if (i == 0) {
+ pr_err("pch-gbe.miim won't go Ready\n");
+ spin_unlock_irqrestore(&hw->miim_lock, flags);
+ return 0; /* No way to indicate timeout error */
+ }
+ iowrite32(((reg << PCH_GBE_MIIM_REG_ADDR_SHIFT) |
+ (addr << PCH_GBE_MIIM_PHY_ADDR_SHIFT) |
+ dir | data), &hw->reg->MIIM);
+ for (i = 0; i < 100; i++) {
+ udelay(20);
+ data_out = ioread32(&hw->reg->MIIM);
+ if ((data_out & PCH_GBE_MIIM_OPER_READY))
+ break;
+ }
+ spin_unlock_irqrestore(&hw->miim_lock, flags);
+
+ pr_debug("PHY %s: reg=%d, data=0x%04X\n",
+ dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg,
+ dir == PCH_GBE_MIIM_OPER_READ ? data_out : data);
+ return (u16) data_out;
+}
+
+/**
+ * pch_gbe_mac_set_pause_packet - Set pause packet
+ * @hw: Pointer to the HW structure
+ */
+static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
+{
+ unsigned long tmp2, tmp3;
+
+ /* Set Pause packet */
+ tmp2 = hw->mac.addr[1];
+ tmp2 = (tmp2 << 8) | hw->mac.addr[0];
+ tmp2 = PCH_GBE_PAUSE_PKT2_VALUE | (tmp2 << 16);
+
+ tmp3 = hw->mac.addr[5];
+ tmp3 = (tmp3 << 8) | hw->mac.addr[4];
+ tmp3 = (tmp3 << 8) | hw->mac.addr[3];
+ tmp3 = (tmp3 << 8) | hw->mac.addr[2];
+
+ iowrite32(PCH_GBE_PAUSE_PKT1_VALUE, &hw->reg->PAUSE_PKT1);
+ iowrite32(tmp2, &hw->reg->PAUSE_PKT2);
+ iowrite32(tmp3, &hw->reg->PAUSE_PKT3);
+ iowrite32(PCH_GBE_PAUSE_PKT4_VALUE, &hw->reg->PAUSE_PKT4);
+ iowrite32(PCH_GBE_PAUSE_PKT5_VALUE, &hw->reg->PAUSE_PKT5);
+
+ /* Transmit Pause Packet */
+ iowrite32(PCH_GBE_PS_PKT_RQ, &hw->reg->PAUSE_REQ);
+
+ pr_debug("PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ ioread32(&hw->reg->PAUSE_PKT1), ioread32(&hw->reg->PAUSE_PKT2),
+ ioread32(&hw->reg->PAUSE_PKT3), ioread32(&hw->reg->PAUSE_PKT4),
+ ioread32(&hw->reg->PAUSE_PKT5));
+
+ return;
+}
+
+
+/**
+ * pch_gbe_alloc_queues - Allocate memory for all rings
+ * @adapter: Board private structure to initialize
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
+{
+ int size;
+
+ size = (int)sizeof(struct pch_gbe_tx_ring);
+ adapter->tx_ring = kzalloc(size, GFP_KERNEL);
+ if (!adapter->tx_ring)
+ return -ENOMEM;
+ size = (int)sizeof(struct pch_gbe_rx_ring);
+ adapter->rx_ring = kzalloc(size, GFP_KERNEL);
+ if (!adapter->rx_ring) {
+ kfree(adapter->tx_ring);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/**
+ * pch_gbe_init_stats - Initialize status
+ * @adapter: Board private structure to initialize
+ */
+static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter)
+{
+ memset(&adapter->stats, 0, sizeof(adapter->stats));
+ return;
+}
+
+/**
+ * pch_gbe_init_phy - Initialize PHY
+ * @adapter: Board private structure to initialize
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ u32 addr;
+ u16 bmcr, stat;
+
+ /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
+ for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
+ adapter->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
+ bmcr = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMCR);
+ stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
+ stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
+ if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
+ break;
+ }
+ adapter->hw.phy.addr = adapter->mii.phy_id;
+ pr_debug("phy_addr = %d\n", adapter->mii.phy_id);
+ if (addr == 32)
+ return -EAGAIN;
+ /* Selected the phy and isolate the rest */
+ for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
+ if (addr != adapter->mii.phy_id) {
+ pch_gbe_mdio_write(netdev, addr, MII_BMCR,
+ BMCR_ISOLATE);
+ } else {
+ bmcr = pch_gbe_mdio_read(netdev, addr, MII_BMCR);
+ pch_gbe_mdio_write(netdev, addr, MII_BMCR,
+ bmcr & ~BMCR_ISOLATE);
+ }
+ }
+
+ /* MII setup */
+ adapter->mii.phy_id_mask = 0x1F;
+ adapter->mii.reg_num_mask = 0x1F;
+ adapter->mii.dev = adapter->netdev;
+ adapter->mii.mdio_read = pch_gbe_mdio_read;
+ adapter->mii.mdio_write = pch_gbe_mdio_write;
+ adapter->mii.supports_gmii = mii_check_gmii_support(&adapter->mii);
+ return 0;
+}
+
+/**
+ * pch_gbe_mdio_read - The read function for mii
+ * @netdev: Network interface device structure
+ * @addr: Phy ID
+ * @reg: Access location
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+
+ return pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_READ, reg,
+ (u16) 0);
+}
+
+/**
+ * pch_gbe_mdio_write - The write function for mii
+ * @netdev: Network interface device structure
+ * @addr: Phy ID (not used)
+ * @reg: Access location
+ * @data: Write data
+ */
+static void pch_gbe_mdio_write(struct net_device *netdev,
+ int addr, int reg, int data)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+
+ pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_WRITE, reg, data);
+}
+
+/**
+ * pch_gbe_reset_task - Reset processing at the time of transmission timeout
+ * @work: Pointer of board private structure
+ */
+static void pch_gbe_reset_task(struct work_struct *work)
+{
+ struct pch_gbe_adapter *adapter;
+ adapter = container_of(work, struct pch_gbe_adapter, reset_task);
+
+ rtnl_lock();
+ pch_gbe_reinit_locked(adapter);
+ rtnl_unlock();
+}
+
+/**
+ * pch_gbe_reinit_locked- Re-initialization
+ * @adapter: Board private structure
+ */
+void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
+{
+ pch_gbe_down(adapter);
+ pch_gbe_up(adapter);
+}
+
+/**
+ * pch_gbe_reset - Reset GbE
+ * @adapter: Board private structure
+ */
+void pch_gbe_reset(struct pch_gbe_adapter *adapter)
+{
+ pch_gbe_mac_reset_hw(&adapter->hw);
+ /* Setup the receive address. */
+ pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES);
+ if (pch_gbe_hal_init_hw(&adapter->hw))
+ pr_err("Hardware Error\n");
+}
+
+/**
+ * pch_gbe_free_irq - Free an interrupt
+ * @adapter: Board private structure
+ */
+static void pch_gbe_free_irq(struct pch_gbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ free_irq(adapter->pdev->irq, netdev);
+ if (adapter->have_msi) {
+ pci_disable_msi(adapter->pdev);
+ pr_debug("call pci_disable_msi\n");
+ }
+}
+
+/**
+ * pch_gbe_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: Board private structure
+ */
+static void pch_gbe_irq_disable(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+
+ atomic_inc(&adapter->irq_sem);
+ iowrite32(0, &hw->reg->INT_EN);
+ ioread32(&hw->reg->INT_ST);
+ synchronize_irq(adapter->pdev->irq);
+
+ pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
+}
+
+/**
+ * pch_gbe_irq_enable - Enable default interrupt generation settings
+ * @adapter: Board private structure
+ */
+static void pch_gbe_irq_enable(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+
+ if (likely(atomic_dec_and_test(&adapter->irq_sem)))
+ iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
+ ioread32(&hw->reg->INT_ST);
+ pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
+}
+
+
+
+/**
+ * pch_gbe_setup_tctl - configure the Transmit control registers
+ * @adapter: Board private structure
+ */
+static void pch_gbe_setup_tctl(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ u32 tx_mode, tcpip;
+
+ tx_mode = PCH_GBE_TM_LONG_PKT |
+ PCH_GBE_TM_ST_AND_FD |
+ PCH_GBE_TM_SHORT_PKT |
+ PCH_GBE_TM_TH_TX_STRT_8 |
+ PCH_GBE_TM_TH_ALM_EMP_4 | PCH_GBE_TM_TH_ALM_FULL_8;
+
+ iowrite32(tx_mode, &hw->reg->TX_MODE);
+
+ tcpip = ioread32(&hw->reg->TCPIP_ACC);
+ tcpip |= PCH_GBE_TX_TCPIPACC_EN;
+ iowrite32(tcpip, &hw->reg->TCPIP_ACC);
+ return;
+}
+
+/**
+ * pch_gbe_configure_tx - Configure Transmit Unit after Reset
+ * @adapter: Board private structure
+ */
+static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ u32 tdba, tdlen, dctrl;
+
+ pr_debug("dma addr = 0x%08llx size = 0x%08x\n",
+ (unsigned long long)adapter->tx_ring->dma,
+ adapter->tx_ring->size);
+
+ /* Setup the HW Tx Head and Tail descriptor pointers */
+ tdba = adapter->tx_ring->dma;
+ tdlen = adapter->tx_ring->size - 0x10;
+ iowrite32(tdba, &hw->reg->TX_DSC_BASE);
+ iowrite32(tdlen, &hw->reg->TX_DSC_SIZE);
+ iowrite32(tdba, &hw->reg->TX_DSC_SW_P);
+
+ /* Enables Transmission DMA */
+ dctrl = ioread32(&hw->reg->DMA_CTRL);
+ dctrl |= PCH_GBE_TX_DMA_EN;
+ iowrite32(dctrl, &hw->reg->DMA_CTRL);
+}
+
+/**
+ * pch_gbe_setup_rctl - Configure the receive control registers
+ * @adapter: Board private structure
+ */
+static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ u32 rx_mode, tcpip;
+
+ rx_mode = PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN |
+ PCH_GBE_RH_ALM_EMP_4 | PCH_GBE_RH_ALM_FULL_4 | PCH_GBE_RH_RD_TRG_8;
+
+ iowrite32(rx_mode, &hw->reg->RX_MODE);
+
+ tcpip = ioread32(&hw->reg->TCPIP_ACC);
+
+ tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
+ tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
+ iowrite32(tcpip, &hw->reg->TCPIP_ACC);
+ return;
+}
+
+/**
+ * pch_gbe_configure_rx - Configure Receive Unit after Reset
+ * @adapter: Board private structure
+ */
+static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ u32 rdba, rdlen, rctl, rxdma;
+
+ pr_debug("dma adr = 0x%08llx size = 0x%08x\n",
+ (unsigned long long)adapter->rx_ring->dma,
+ adapter->rx_ring->size);
+
+ pch_gbe_mac_force_mac_fc(hw);
+
+ /* Disables Receive MAC */
+ rctl = ioread32(&hw->reg->MAC_RX_EN);
+ iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
+
+ /* Disables Receive DMA */
+ rxdma = ioread32(&hw->reg->DMA_CTRL);
+ rxdma &= ~PCH_GBE_RX_DMA_EN;
+ iowrite32(rxdma, &hw->reg->DMA_CTRL);
+
+ pr_debug("MAC_RX_EN reg = 0x%08x DMA_CTRL reg = 0x%08x\n",
+ ioread32(&hw->reg->MAC_RX_EN),
+ ioread32(&hw->reg->DMA_CTRL));
+
+ /* Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring */
+ rdba = adapter->rx_ring->dma;
+ rdlen = adapter->rx_ring->size - 0x10;
+ iowrite32(rdba, &hw->reg->RX_DSC_BASE);
+ iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
+ iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
+}
+
+/**
+ * pch_gbe_unmap_and_free_tx_resource - Unmap and free tx socket buffer
+ * @adapter: Board private structure
+ * @buffer_info: Buffer information structure
+ */
+static void pch_gbe_unmap_and_free_tx_resource(
+ struct pch_gbe_adapter *adapter, struct pch_gbe_buffer *buffer_info)
+{
+ if (buffer_info->mapped) {
+ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
+ buffer_info->mapped = false;
+ }
+ if (buffer_info->skb) {
+ dev_kfree_skb_any(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+}
+
+/**
+ * pch_gbe_unmap_and_free_rx_resource - Unmap and free rx socket buffer
+ * @adapter: Board private structure
+ * @buffer_info: Buffer information structure
+ */
+static void pch_gbe_unmap_and_free_rx_resource(
+ struct pch_gbe_adapter *adapter,
+ struct pch_gbe_buffer *buffer_info)
+{
+ if (buffer_info->mapped) {
+ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_FROM_DEVICE);
+ buffer_info->mapped = false;
+ }
+ if (buffer_info->skb) {
+ dev_kfree_skb_any(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+}
+
+/**
+ * pch_gbe_clean_tx_ring - Free Tx Buffers
+ * @adapter: Board private structure
+ * @tx_ring: Ring to be cleaned
+ */
+static void pch_gbe_clean_tx_ring(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *tx_ring)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ struct pch_gbe_buffer *buffer_info;
+ unsigned long size;
+ unsigned int i;
+
+ /* Free all the Tx ring sk_buffs */
+ for (i = 0; i < tx_ring->count; i++) {
+ buffer_info = &tx_ring->buffer_info[i];
+ pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info);
+ }
+ pr_debug("call pch_gbe_unmap_and_free_tx_resource() %d count\n", i);
+
+ size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count;
+ memset(tx_ring->buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(tx_ring->desc, 0, tx_ring->size);
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+ iowrite32(tx_ring->dma, &hw->reg->TX_DSC_HW_P);
+ iowrite32((tx_ring->size - 0x10), &hw->reg->TX_DSC_SIZE);
+}
+
+/**
+ * pch_gbe_clean_rx_ring - Free Rx Buffers
+ * @adapter: Board private structure
+ * @rx_ring: Ring to free buffers from
+ */
+static void
+pch_gbe_clean_rx_ring(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rx_ring)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ struct pch_gbe_buffer *buffer_info;
+ unsigned long size;
+ unsigned int i;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info);
+ }
+ pr_debug("call pch_gbe_unmap_and_free_rx_resource() %d count\n", i);
+ size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count;
+ memset(rx_ring->buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+ iowrite32(rx_ring->dma, &hw->reg->RX_DSC_HW_P);
+ iowrite32((rx_ring->size - 0x10), &hw->reg->RX_DSC_SIZE);
+}
+
+static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
+ u16 duplex)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ unsigned long rgmii = 0;
+
+ /* Set the RGMII control. */
+#ifdef PCH_GBE_MAC_IFOP_RGMII
+ switch (speed) {
+ case SPEED_10:
+ rgmii = (PCH_GBE_RGMII_RATE_2_5M |
+ PCH_GBE_MAC_RGMII_CTRL_SETTING);
+ break;
+ case SPEED_100:
+ rgmii = (PCH_GBE_RGMII_RATE_25M |
+ PCH_GBE_MAC_RGMII_CTRL_SETTING);
+ break;
+ case SPEED_1000:
+ rgmii = (PCH_GBE_RGMII_RATE_125M |
+ PCH_GBE_MAC_RGMII_CTRL_SETTING);
+ break;
+ }
+ iowrite32(rgmii, &hw->reg->RGMII_CTRL);
+#else /* GMII */
+ rgmii = 0;
+ iowrite32(rgmii, &hw->reg->RGMII_CTRL);
+#endif
+}
+static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
+ u16 duplex)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pch_gbe_hw *hw = &adapter->hw;
+ unsigned long mode = 0;
+
+ /* Set the communication mode */
+ switch (speed) {
+ case SPEED_10:
+ mode = PCH_GBE_MODE_MII_ETHER;
+ netdev->tx_queue_len = 10;
+ break;
+ case SPEED_100:
+ mode = PCH_GBE_MODE_MII_ETHER;
+ netdev->tx_queue_len = 100;
+ break;
+ case SPEED_1000:
+ mode = PCH_GBE_MODE_GMII_ETHER;
+ break;
+ }
+ if (duplex == DUPLEX_FULL)
+ mode |= PCH_GBE_MODE_FULL_DUPLEX;
+ else
+ mode |= PCH_GBE_MODE_HALF_DUPLEX;
+ iowrite32(mode, &hw->reg->MODE);
+}
+
+/**
+ * pch_gbe_watchdog - Watchdog process
+ * @data: Board private structure
+ */
+static void pch_gbe_watchdog(unsigned long data)
+{
+ struct pch_gbe_adapter *adapter = (struct pch_gbe_adapter *)data;
+ struct net_device *netdev = adapter->netdev;
+ struct pch_gbe_hw *hw = &adapter->hw;
+
+ pr_debug("right now = %ld\n", jiffies);
+
+ pch_gbe_update_stats(adapter);
+ if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) {
+ struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
+ netdev->tx_queue_len = adapter->tx_queue_len;
+ /* mii library handles link maintenance tasks */
+ if (mii_ethtool_gset(&adapter->mii, &cmd)) {
+ pr_err("ethtool get setting Error\n");
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies +
+ PCH_GBE_WATCHDOG_PERIOD));
+ return;
+ }
+ hw->mac.link_speed = ethtool_cmd_speed(&cmd);
+ hw->mac.link_duplex = cmd.duplex;
+ /* Set the RGMII control. */
+ pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
+ hw->mac.link_duplex);
+ /* Set the communication mode */
+ pch_gbe_set_mode(adapter, hw->mac.link_speed,
+ hw->mac.link_duplex);
+ netdev_dbg(netdev,
+ "Link is Up %d Mbps %s-Duplex\n",
+ hw->mac.link_speed,
+ cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+ } else if ((!mii_link_ok(&adapter->mii)) &&
+ (netif_carrier_ok(netdev))) {
+ netdev_dbg(netdev, "NIC Link is Down\n");
+ hw->mac.link_speed = SPEED_10;
+ hw->mac.link_duplex = DUPLEX_HALF;
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + PCH_GBE_WATCHDOG_PERIOD));
+}
+
+/**
+ * pch_gbe_tx_queue - Carry out queuing of the transmission data
+ * @adapter: Board private structure
+ * @tx_ring: Tx descriptor ring structure
+ * @skb: Sockt buffer structure
+ */
+static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *tx_ring,
+ struct sk_buff *skb)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ struct pch_gbe_tx_desc *tx_desc;
+ struct pch_gbe_buffer *buffer_info;
+ struct sk_buff *tmp_skb;
+ unsigned int frame_ctrl;
+ unsigned int ring_num;
+ unsigned long flags;
+
+ /*-- Set frame control --*/
+ frame_ctrl = 0;
+ if (unlikely(skb->len < PCH_GBE_SHORT_PKT))
+ frame_ctrl |= PCH_GBE_TXD_CTRL_APAD;
+ if (skb->ip_summed == CHECKSUM_NONE)
+ frame_ctrl |= PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
+
+ /* Performs checksum processing */
+ /*
+ * It is because the hardware accelerator does not support a checksum,
+ * when the received data size is less than 64 bytes.
+ */
+ if (skb->len < PCH_GBE_SHORT_PKT && skb->ip_summed != CHECKSUM_NONE) {
+ frame_ctrl |= PCH_GBE_TXD_CTRL_APAD |
+ PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+ unsigned int offset;
+ iph->check = 0;
+ iph->check = ip_fast_csum((u8 *) iph, iph->ihl);
+ offset = skb_transport_offset(skb);
+ if (iph->protocol == IPPROTO_TCP) {
+ skb->csum = 0;
+ tcp_hdr(skb)->check = 0;
+ skb->csum = skb_checksum(skb, offset,
+ skb->len - offset, 0);
+ tcp_hdr(skb)->check =
+ csum_tcpudp_magic(iph->saddr,
+ iph->daddr,
+ skb->len - offset,
+ IPPROTO_TCP,
+ skb->csum);
+ } else if (iph->protocol == IPPROTO_UDP) {
+ skb->csum = 0;
+ udp_hdr(skb)->check = 0;
+ skb->csum =
+ skb_checksum(skb, offset,
+ skb->len - offset, 0);
+ udp_hdr(skb)->check =
+ csum_tcpudp_magic(iph->saddr,
+ iph->daddr,
+ skb->len - offset,
+ IPPROTO_UDP,
+ skb->csum);
+ }
+ }
+ }
+ spin_lock_irqsave(&tx_ring->tx_lock, flags);
+ ring_num = tx_ring->next_to_use;
+ if (unlikely((ring_num + 1) == tx_ring->count))
+ tx_ring->next_to_use = 0;
+ else
+ tx_ring->next_to_use = ring_num + 1;
+
+ spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+ buffer_info = &tx_ring->buffer_info[ring_num];
+ tmp_skb = buffer_info->skb;
+
+ /* [Header:14][payload] ---> [Header:14][paddong:2][payload] */
+ memcpy(tmp_skb->data, skb->data, ETH_HLEN);
+ tmp_skb->data[ETH_HLEN] = 0x00;
+ tmp_skb->data[ETH_HLEN + 1] = 0x00;
+ tmp_skb->len = skb->len;
+ memcpy(&tmp_skb->data[ETH_HLEN + 2], &skb->data[ETH_HLEN],
+ (skb->len - ETH_HLEN));
+ /*-- Set Buffer information --*/
+ buffer_info->length = tmp_skb->len;
+ buffer_info->dma = dma_map_single(&adapter->pdev->dev, tmp_skb->data,
+ buffer_info->length,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
+ pr_err("TX DMA map failed\n");
+ buffer_info->dma = 0;
+ buffer_info->time_stamp = 0;
+ tx_ring->next_to_use = ring_num;
+ return;
+ }
+ buffer_info->mapped = true;
+ buffer_info->time_stamp = jiffies;
+
+ /*-- Set Tx descriptor --*/
+ tx_desc = PCH_GBE_TX_DESC(*tx_ring, ring_num);
+ tx_desc->buffer_addr = (buffer_info->dma);
+ tx_desc->length = (tmp_skb->len);
+ tx_desc->tx_words_eob = ((tmp_skb->len + 3));
+ tx_desc->tx_frame_ctrl = (frame_ctrl);
+ tx_desc->gbec_status = (DSC_INIT16);
+
+ if (unlikely(++ring_num == tx_ring->count))
+ ring_num = 0;
+
+ /* Update software pointer of TX descriptor */
+ iowrite32(tx_ring->dma +
+ (int)sizeof(struct pch_gbe_tx_desc) * ring_num,
+ &hw->reg->TX_DSC_SW_P);
+ dev_kfree_skb_any(skb);
+}
+
+/**
+ * pch_gbe_update_stats - Update the board statistics counters
+ * @adapter: Board private structure
+ */
+void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct pch_gbe_hw_stats *stats = &adapter->stats;
+ unsigned long flags;
+
+ /*
+ * Prevent stats update while adapter is being reset, or if the pci
+ * connection is down.
+ */
+ if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
+ return;
+
+ spin_lock_irqsave(&adapter->stats_lock, flags);
+
+ /* Update device status "adapter->stats" */
+ stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
+ stats->tx_errors = stats->tx_length_errors +
+ stats->tx_aborted_errors +
+ stats->tx_carrier_errors + stats->tx_timeout_count;
+
+ /* Update network device status "adapter->net_stats" */
+ netdev->stats.rx_packets = stats->rx_packets;
+ netdev->stats.rx_bytes = stats->rx_bytes;
+ netdev->stats.rx_dropped = stats->rx_dropped;
+ netdev->stats.tx_packets = stats->tx_packets;
+ netdev->stats.tx_bytes = stats->tx_bytes;
+ netdev->stats.tx_dropped = stats->tx_dropped;
+ /* Fill out the OS statistics structure */
+ netdev->stats.multicast = stats->multicast;
+ netdev->stats.collisions = stats->collisions;
+ /* Rx Errors */
+ netdev->stats.rx_errors = stats->rx_errors;
+ netdev->stats.rx_crc_errors = stats->rx_crc_errors;
+ netdev->stats.rx_frame_errors = stats->rx_frame_errors;
+ /* Tx Errors */
+ netdev->stats.tx_errors = stats->tx_errors;
+ netdev->stats.tx_aborted_errors = stats->tx_aborted_errors;
+ netdev->stats.tx_carrier_errors = stats->tx_carrier_errors;
+
+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
+}
+
+static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ u32 rxdma;
+ u16 value;
+ int ret;
+
+ /* Disable Receive DMA */
+ rxdma = ioread32(&hw->reg->DMA_CTRL);
+ rxdma &= ~PCH_GBE_RX_DMA_EN;
+ iowrite32(rxdma, &hw->reg->DMA_CTRL);
+ /* Wait Rx DMA BUS is IDLE */
+ ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK);
+ if (ret) {
+ /* Disable Bus master */
+ pci_read_config_word(adapter->pdev, PCI_COMMAND, &value);
+ value &= ~PCI_COMMAND_MASTER;
+ pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
+ /* Stop Receive */
+ pch_gbe_mac_reset_rx(hw);
+ /* Enable Bus master */
+ value |= PCI_COMMAND_MASTER;
+ pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
+ } else {
+ /* Stop Receive */
+ pch_gbe_mac_reset_rx(hw);
+ }
+}
+
+static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
+{
+ u32 rxdma;
+
+ /* Enables Receive DMA */
+ rxdma = ioread32(&hw->reg->DMA_CTRL);
+ rxdma |= PCH_GBE_RX_DMA_EN;
+ iowrite32(rxdma, &hw->reg->DMA_CTRL);
+ /* Enables Receive */
+ iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
+ return;
+}
+
+/**
+ * pch_gbe_intr - Interrupt Handler
+ * @irq: Interrupt number
+ * @data: Pointer to a network interface device structure
+ * Returns
+ * - IRQ_HANDLED: Our interrupt
+ * - IRQ_NONE: Not our interrupt
+ */
+static irqreturn_t pch_gbe_intr(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+ u32 int_st;
+ u32 int_en;
+
+ /* Check request status */
+ int_st = ioread32(&hw->reg->INT_ST);
+ int_st = int_st & ioread32(&hw->reg->INT_EN);
+ /* When request status is no interruption factor */
+ if (unlikely(!int_st))
+ return IRQ_NONE; /* Not our interrupt. End processing. */
+ pr_debug("%s occur int_st = 0x%08x\n", __func__, int_st);
+ if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
+ adapter->stats.intr_rx_frame_err_count++;
+ if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
+ if (!adapter->rx_stop_flag) {
+ adapter->stats.intr_rx_fifo_err_count++;
+ pr_debug("Rx fifo over run\n");
+ adapter->rx_stop_flag = true;
+ int_en = ioread32(&hw->reg->INT_EN);
+ iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
+ &hw->reg->INT_EN);
+ pch_gbe_stop_receive(adapter);
++ int_st |= ioread32(&hw->reg->INT_ST);
++ int_st = int_st & ioread32(&hw->reg->INT_EN);
+ }
+ if (int_st & PCH_GBE_INT_RX_DMA_ERR)
+ adapter->stats.intr_rx_dma_err_count++;
+ if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
+ adapter->stats.intr_tx_fifo_err_count++;
+ if (int_st & PCH_GBE_INT_TX_DMA_ERR)
+ adapter->stats.intr_tx_dma_err_count++;
+ if (int_st & PCH_GBE_INT_TCPIP_ERR)
+ adapter->stats.intr_tcpip_err_count++;
+ /* When Rx descriptor is empty */
+ if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
+ adapter->stats.intr_rx_dsc_empty_count++;
+ pr_debug("Rx descriptor is empty\n");
+ int_en = ioread32(&hw->reg->INT_EN);
+ iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
+ if (hw->mac.tx_fc_enable) {
+ /* Set Pause packet */
+ pch_gbe_mac_set_pause_packet(hw);
+ }
- if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))) {
+ }
+
+ /* When request status is Receive interruption */
- bool cleaned = false;
++ if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) ||
++ (adapter->rx_stop_flag == true)) {
+ if (likely(napi_schedule_prep(&adapter->napi))) {
+ /* Enable only Rx Descriptor empty */
+ atomic_inc(&adapter->irq_sem);
+ int_en = ioread32(&hw->reg->INT_EN);
+ int_en &=
+ ~(PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT);
+ iowrite32(int_en, &hw->reg->INT_EN);
+ /* Start polling for NAPI */
+ __napi_schedule(&adapter->napi);
+ }
+ }
+ pr_debug("return = 0x%08x INT_EN reg = 0x%08x\n",
+ IRQ_HANDLED, ioread32(&hw->reg->INT_EN));
+ return IRQ_HANDLED;
+}
+
+/**
+ * pch_gbe_alloc_rx_buffers - Replace used receive buffers; legacy & extended
+ * @adapter: Board private structure
+ * @rx_ring: Rx descriptor ring
+ * @cleaned_count: Cleaned count
+ */
+static void
+pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct pch_gbe_hw *hw = &adapter->hw;
+ struct pch_gbe_rx_desc *rx_desc;
+ struct pch_gbe_buffer *buffer_info;
+ struct sk_buff *skb;
+ unsigned int i;
+ unsigned int bufsz;
+
+ bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
+ i = rx_ring->next_to_use;
+
+ while ((cleaned_count--)) {
+ buffer_info = &rx_ring->buffer_info[i];
+ skb = netdev_alloc_skb(netdev, bufsz);
+ if (unlikely(!skb)) {
+ /* Better luck next round */
+ adapter->stats.rx_alloc_buff_failed++;
+ break;
+ }
+ /* align */
+ skb_reserve(skb, NET_IP_ALIGN);
+ buffer_info->skb = skb;
+
+ buffer_info->dma = dma_map_single(&pdev->dev,
+ buffer_info->rx_buffer,
+ buffer_info->length,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
+ dev_kfree_skb(skb);
+ buffer_info->skb = NULL;
+ buffer_info->dma = 0;
+ adapter->stats.rx_alloc_buff_failed++;
+ break; /* while !buffer_info->skb */
+ }
+ buffer_info->mapped = true;
+ rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
+ rx_desc->buffer_addr = (buffer_info->dma);
+ rx_desc->gbec_status = DSC_INIT16;
+
+ pr_debug("i = %d buffer_info->dma = 0x08%llx buffer_info->length = 0x%x\n",
+ i, (unsigned long long)buffer_info->dma,
+ buffer_info->length);
+
+ if (unlikely(++i == rx_ring->count))
+ i = 0;
+ }
+ if (likely(rx_ring->next_to_use != i)) {
+ rx_ring->next_to_use = i;
+ if (unlikely(i-- == 0))
+ i = (rx_ring->count - 1);
+ iowrite32(rx_ring->dma +
+ (int)sizeof(struct pch_gbe_rx_desc) * i,
+ &hw->reg->RX_DSC_SW_P);
+ }
+ return;
+}
+
+static int
+pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct pch_gbe_buffer *buffer_info;
+ unsigned int i;
+ unsigned int bufsz;
+ unsigned int size;
+
+ bufsz = adapter->rx_buffer_len;
+
+ size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
+ rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
+ &rx_ring->rx_buff_pool_logic,
+ GFP_KERNEL);
+ if (!rx_ring->rx_buff_pool) {
+ pr_err("Unable to allocate memory for the receive poll buffer\n");
+ return -ENOMEM;
+ }
+ memset(rx_ring->rx_buff_pool, 0, size);
+ rx_ring->rx_buff_pool_size = size;
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i;
+ buffer_info->length = bufsz;
+ }
+ return 0;
+}
+
+/**
+ * pch_gbe_alloc_tx_buffers - Allocate transmit buffers
+ * @adapter: Board private structure
+ * @tx_ring: Tx descriptor ring
+ */
+static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *tx_ring)
+{
+ struct pch_gbe_buffer *buffer_info;
+ struct sk_buff *skb;
+ unsigned int i;
+ unsigned int bufsz;
+ struct pch_gbe_tx_desc *tx_desc;
+
+ bufsz =
+ adapter->hw.mac.max_frame_size + PCH_GBE_DMA_ALIGN + NET_IP_ALIGN;
+
+ for (i = 0; i < tx_ring->count; i++) {
+ buffer_info = &tx_ring->buffer_info[i];
+ skb = netdev_alloc_skb(adapter->netdev, bufsz);
+ skb_reserve(skb, PCH_GBE_DMA_ALIGN);
+ buffer_info->skb = skb;
+ tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
+ tx_desc->gbec_status = (DSC_INIT16);
+ }
+ return;
+}
+
+/**
+ * pch_gbe_clean_tx - Reclaim resources after transmit completes
+ * @adapter: Board private structure
+ * @tx_ring: Tx descriptor ring
+ * Returns
+ * true: Cleaned the descriptor
+ * false: Not cleaned the descriptor
+ */
+static bool
+pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *tx_ring)
+{
+ struct pch_gbe_tx_desc *tx_desc;
+ struct pch_gbe_buffer *buffer_info;
+ struct sk_buff *skb;
+ unsigned int i;
+ unsigned int cleaned_count = 0;
- cleaned = true;
++ bool cleaned = true;
+
+ pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
+
+ i = tx_ring->next_to_clean;
+ tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
+ pr_debug("gbec_status:0x%04x dma_status:0x%04x\n",
+ tx_desc->gbec_status, tx_desc->dma_status);
+
+ while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
+ pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status);
- if (cleaned_count++ == PCH_GBE_TX_WEIGHT)
+ buffer_info = &tx_ring->buffer_info[i];
+ skb = buffer_info->skb;
+
+ if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) {
+ adapter->stats.tx_aborted_errors++;
+ pr_err("Transfer Abort Error\n");
+ } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CRSER)
+ ) {
+ adapter->stats.tx_carrier_errors++;
+ pr_err("Transfer Carrier Sense Error\n");
+ } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_EXCOL)
+ ) {
+ adapter->stats.tx_aborted_errors++;
+ pr_err("Transfer Collision Abort Error\n");
+ } else if ((tx_desc->gbec_status &
+ (PCH_GBE_TXD_GMAC_STAT_SNGCOL |
+ PCH_GBE_TXD_GMAC_STAT_MLTCOL))) {
+ adapter->stats.collisions++;
+ adapter->stats.tx_packets++;
+ adapter->stats.tx_bytes += skb->len;
+ pr_debug("Transfer Collision\n");
+ } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CMPLT)
+ ) {
+ adapter->stats.tx_packets++;
+ adapter->stats.tx_bytes += skb->len;
+ }
+ if (buffer_info->mapped) {
+ pr_debug("unmap buffer_info->dma : %d\n", i);
+ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
+ buffer_info->mapped = false;
+ }
+ if (buffer_info->skb) {
+ pr_debug("trim buffer_info->skb : %d\n", i);
+ skb_trim(buffer_info->skb, 0);
+ }
+ tx_desc->gbec_status = DSC_INIT16;
+ if (unlikely(++i == tx_ring->count))
+ i = 0;
+ tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
+
+ /* weight of a sort for tx, to avoid endless transmit cleanup */
- struct net_device *netdev = adapter->netdev;
++ if (cleaned_count++ == PCH_GBE_TX_WEIGHT) {
++ cleaned = false;
+ break;
++ }
+ }
+ pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
+ cleaned_count);
+ /* Recover from running out of Tx resources in xmit_frame */
+ if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev)))) {
+ netif_wake_queue(adapter->netdev);
+ adapter->stats.tx_restart_count++;
+ pr_debug("Tx wake queue\n");
+ }
+ spin_lock(&adapter->tx_queue_lock);
+ tx_ring->next_to_clean = i;
+ spin_unlock(&adapter->tx_queue_lock);
+ pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
+ return cleaned;
+}
+
+/**
+ * pch_gbe_clean_rx - Send received data up the network stack; legacy
+ * @adapter: Board private structure
+ * @rx_ring: Rx descriptor ring
+ * @work_done: Completed count
+ * @work_to_do: Request count
+ * Returns
+ * true: Cleaned the descriptor
+ * false: Not cleaned the descriptor
+ */
+static bool
+pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rx_ring,
+ int *work_done, int work_to_do)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct pch_gbe_buffer *buffer_info;
+ struct pch_gbe_rx_desc *rx_desc;
+ u32 length;
+ unsigned int i;
+ unsigned int cleaned_count = 0;
+ bool cleaned = false;
+ struct sk_buff *skb;
+ u8 dma_status;
+ u16 gbec_status;
+ u32 tcp_ip_status;
+
+ i = rx_ring->next_to_clean;
+
+ while (*work_done < work_to_do) {
+ /* Check Rx descriptor status */
+ rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
+ if (rx_desc->gbec_status == DSC_INIT16)
+ break;
+ cleaned = true;
+ cleaned_count++;
+
+ dma_status = rx_desc->dma_status;
+ gbec_status = rx_desc->gbec_status;
+ tcp_ip_status = rx_desc->tcp_ip_status;
+ rx_desc->gbec_status = DSC_INIT16;
+ buffer_info = &rx_ring->buffer_info[i];
+ skb = buffer_info->skb;
+ buffer_info->skb = NULL;
+
+ /* unmap dma */
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_FROM_DEVICE);
+ buffer_info->mapped = false;
+
+ pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x "
+ "TCP:0x%08x] BufInf = 0x%p\n",
+ i, dma_status, gbec_status, tcp_ip_status,
+ buffer_info);
+ /* Error check */
+ if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NOTOCTAL)) {
+ adapter->stats.rx_frame_errors++;
+ pr_err("Receive Not Octal Error\n");
+ } else if (unlikely(gbec_status &
+ PCH_GBE_RXD_GMAC_STAT_NBLERR)) {
+ adapter->stats.rx_frame_errors++;
+ pr_err("Receive Nibble Error\n");
+ } else if (unlikely(gbec_status &
+ PCH_GBE_RXD_GMAC_STAT_CRCERR)) {
+ adapter->stats.rx_crc_errors++;
+ pr_err("Receive CRC Error\n");
+ } else {
+ /* get receive length */
+ /* length convert[-3], length includes FCS length */
+ length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN;
+ if (rx_desc->rx_words_eob & 0x02)
+ length = length - 4;
+ /*
+ * buffer_info->rx_buffer: [Header:14][payload]
+ * skb->data: [Reserve:2][Header:14][payload]
+ */
+ memcpy(skb->data, buffer_info->rx_buffer, length);
+
+ /* update status of driver */
+ adapter->stats.rx_bytes += length;
+ adapter->stats.rx_packets++;
+ if ((gbec_status & PCH_GBE_RXD_GMAC_STAT_MARMLT))
+ adapter->stats.multicast++;
+ /* Write meta date of skb */
+ skb_put(skb, length);
+ skb->protocol = eth_type_trans(skb, netdev);
+ if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
+ skb->ip_summed = CHECKSUM_NONE;
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ napi_gro_receive(&adapter->napi, skb);
+ (*work_done)++;
+ pr_debug("Receive skb->ip_summed: %d length: %d\n",
+ skb->ip_summed, length);
+ }
+ /* return some buffers to hardware, one at a time is too slow */
+ if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
+ pch_gbe_alloc_rx_buffers(adapter, rx_ring,
+ cleaned_count);
+ cleaned_count = 0;
+ }
+ if (++i == rx_ring->count)
+ i = 0;
+ }
+ rx_ring->next_to_clean = i;
+ if (cleaned_count)
+ pch_gbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+ return cleaned;
+}
+
+/**
+ * pch_gbe_setup_tx_resources - Allocate Tx resources (Descriptors)
+ * @adapter: Board private structure
+ * @tx_ring: Tx descriptor ring (for a specific queue) to setup
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *tx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct pch_gbe_tx_desc *tx_desc;
+ int size;
+ int desNo;
+
+ size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
+ tx_ring->buffer_info = vzalloc(size);
+ if (!tx_ring->buffer_info) {
+ pr_err("Unable to allocate memory for the buffer information\n");
+ return -ENOMEM;
+ }
+
+ tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
+
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+ if (!tx_ring->desc) {
+ vfree(tx_ring->buffer_info);
+ pr_err("Unable to allocate memory for the transmit descriptor ring\n");
+ return -ENOMEM;
+ }
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+ spin_lock_init(&tx_ring->tx_lock);
+
+ for (desNo = 0; desNo < tx_ring->count; desNo++) {
+ tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
+ tx_desc->gbec_status = DSC_INIT16;
+ }
+ pr_debug("tx_ring->desc = 0x%p tx_ring->dma = 0x%08llx\n"
+ "next_to_clean = 0x%08x next_to_use = 0x%08x\n",
+ tx_ring->desc, (unsigned long long)tx_ring->dma,
+ tx_ring->next_to_clean, tx_ring->next_to_use);
+ return 0;
+}
+
+/**
+ * pch_gbe_setup_rx_resources - Allocate Rx resources (Descriptors)
+ * @adapter: Board private structure
+ * @rx_ring: Rx descriptor ring (for a specific queue) to setup
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct pch_gbe_rx_desc *rx_desc;
+ int size;
+ int desNo;
+
+ size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
+ rx_ring->buffer_info = vzalloc(size);
+ if (!rx_ring->buffer_info) {
+ pr_err("Unable to allocate memory for the receive descriptor ring\n");
+ return -ENOMEM;
+ }
+ rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+
+ if (!rx_ring->desc) {
+ pr_err("Unable to allocate memory for the receive descriptor ring\n");
+ vfree(rx_ring->buffer_info);
+ return -ENOMEM;
+ }
+ memset(rx_ring->desc, 0, rx_ring->size);
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+ for (desNo = 0; desNo < rx_ring->count; desNo++) {
+ rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo);
+ rx_desc->gbec_status = DSC_INIT16;
+ }
+ pr_debug("rx_ring->desc = 0x%p rx_ring->dma = 0x%08llx "
+ "next_to_clean = 0x%08x next_to_use = 0x%08x\n",
+ rx_ring->desc, (unsigned long long)rx_ring->dma,
+ rx_ring->next_to_clean, rx_ring->next_to_use);
+ return 0;
+}
+
+/**
+ * pch_gbe_free_tx_resources - Free Tx Resources
+ * @adapter: Board private structure
+ * @tx_ring: Tx descriptor ring for a specific queue
+ */
+void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_tx_ring *tx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ pch_gbe_clean_tx_ring(adapter, tx_ring);
+ vfree(tx_ring->buffer_info);
+ tx_ring->buffer_info = NULL;
+ pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+ tx_ring->desc = NULL;
+}
+
+/**
+ * pch_gbe_free_rx_resources - Free Rx Resources
+ * @adapter: Board private structure
+ * @rx_ring: Ring to clean the resources from
+ */
+void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ pch_gbe_clean_rx_ring(adapter, rx_ring);
+ vfree(rx_ring->buffer_info);
+ rx_ring->buffer_info = NULL;
+ pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+ rx_ring->desc = NULL;
+}
+
+/**
+ * pch_gbe_request_irq - Allocate an interrupt line
+ * @adapter: Board private structure
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err;
+ int flags;
+
+ flags = IRQF_SHARED;
+ adapter->have_msi = false;
+ err = pci_enable_msi(adapter->pdev);
+ pr_debug("call pci_enable_msi\n");
+ if (err) {
+ pr_debug("call pci_enable_msi - Error: %d\n", err);
+ } else {
+ flags = 0;
+ adapter->have_msi = true;
+ }
+ err = request_irq(adapter->pdev->irq, &pch_gbe_intr,
+ flags, netdev->name, netdev);
+ if (err)
+ pr_err("Unable to allocate interrupt Error: %d\n", err);
+ pr_debug("adapter->have_msi : %d flags : 0x%04x return : 0x%04x\n",
+ adapter->have_msi, flags, err);
+ return err;
+}
+
+
+static void pch_gbe_set_multi(struct net_device *netdev);
+/**
+ * pch_gbe_up - Up GbE network device
+ * @adapter: Board private structure
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+int pch_gbe_up(struct pch_gbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
+ struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
+ int err;
+
+ /* hardware has been reset, we need to reload some things */
+ pch_gbe_set_multi(netdev);
+
+ pch_gbe_setup_tctl(adapter);
+ pch_gbe_configure_tx(adapter);
+ pch_gbe_setup_rctl(adapter);
+ pch_gbe_configure_rx(adapter);
+
+ err = pch_gbe_request_irq(adapter);
+ if (err) {
+ pr_err("Error: can't bring device up\n");
+ return err;
+ }
+ err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
+ if (err) {
+ pr_err("Error: can't bring device up\n");
+ return err;
+ }
+ pch_gbe_alloc_tx_buffers(adapter, tx_ring);
+ pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
+ adapter->tx_queue_len = netdev->tx_queue_len;
+ pch_gbe_start_receive(&adapter->hw);
+
+ mod_timer(&adapter->watchdog_timer, jiffies);
+
+ napi_enable(&adapter->napi);
+ pch_gbe_irq_enable(adapter);
+ netif_start_queue(adapter->netdev);
+
+ return 0;
+}
+
+/**
+ * pch_gbe_down - Down GbE network device
+ * @adapter: Board private structure
+ */
+void pch_gbe_down(struct pch_gbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
+
+ /* signal that we're down so the interrupt handler does not
+ * reschedule our watchdog timer */
+ napi_disable(&adapter->napi);
+ atomic_set(&adapter->irq_sem, 0);
+
+ pch_gbe_irq_disable(adapter);
+ pch_gbe_free_irq(adapter);
+
+ del_timer_sync(&adapter->watchdog_timer);
+
+ netdev->tx_queue_len = adapter->tx_queue_len;
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
+ pch_gbe_reset(adapter);
+ pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
+ pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
+
+ pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size,
+ rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic);
+ rx_ring->rx_buff_pool_logic = 0;
+ rx_ring->rx_buff_pool_size = 0;
+ rx_ring->rx_buff_pool = NULL;
+}
+
+/**
+ * pch_gbe_sw_init - Initialize general software structures (struct pch_gbe_adapter)
+ * @adapter: Board private structure to initialize
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+
+ adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
+ hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+
+ /* Initialize the hardware-specific values */
+ if (pch_gbe_hal_setup_init_funcs(hw)) {
+ pr_err("Hardware Initialization Failure\n");
+ return -EIO;
+ }
+ if (pch_gbe_alloc_queues(adapter)) {
+ pr_err("Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+ spin_lock_init(&adapter->hw.miim_lock);
+ spin_lock_init(&adapter->tx_queue_lock);
+ spin_lock_init(&adapter->stats_lock);
+ spin_lock_init(&adapter->ethtool_lock);
+ atomic_set(&adapter->irq_sem, 0);
+ pch_gbe_irq_disable(adapter);
+
+ pch_gbe_init_stats(adapter);
+
+ pr_debug("rx_buffer_len : %d mac.min_frame_size : %d mac.max_frame_size : %d\n",
+ (u32) adapter->rx_buffer_len,
+ hw->mac.min_frame_size, hw->mac.max_frame_size);
+ return 0;
+}
+
+/**
+ * pch_gbe_open - Called when a network interface is made active
+ * @netdev: Network interface device structure
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+static int pch_gbe_open(struct net_device *netdev)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+ int err;
+
+ /* allocate transmit descriptors */
+ err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
+ if (err)
+ goto err_setup_tx;
+ /* allocate receive descriptors */
+ err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
+ if (err)
+ goto err_setup_rx;
+ pch_gbe_hal_power_up_phy(hw);
+ err = pch_gbe_up(adapter);
+ if (err)
+ goto err_up;
+ pr_debug("Success End\n");
+ return 0;
+
+err_up:
+ if (!adapter->wake_up_evt)
+ pch_gbe_hal_power_down_phy(hw);
+ pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
+err_setup_rx:
+ pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
+err_setup_tx:
+ pch_gbe_reset(adapter);
+ pr_err("Error End\n");
+ return err;
+}
+
+/**
+ * pch_gbe_stop - Disables a network interface
+ * @netdev: Network interface device structure
+ * Returns
+ * 0: Successfully
+ */
+static int pch_gbe_stop(struct net_device *netdev)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+
+ pch_gbe_down(adapter);
+ if (!adapter->wake_up_evt)
+ pch_gbe_hal_power_down_phy(hw);
+ pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
+ pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
+ return 0;
+}
+
+/**
+ * pch_gbe_xmit_frame - Packet transmitting start
+ * @skb: Socket buffer structure
+ * @netdev: Network interface device structure
+ * Returns
+ * - NETDEV_TX_OK: Normal end
+ * - NETDEV_TX_BUSY: Error end
+ */
+static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
+ unsigned long flags;
+
+ if (unlikely(skb->len > (adapter->hw.mac.max_frame_size - 4))) {
+ pr_err("Transfer length Error: skb len: %d > max: %d\n",
+ skb->len, adapter->hw.mac.max_frame_size);
+ dev_kfree_skb_any(skb);
+ adapter->stats.tx_length_errors++;
+ return NETDEV_TX_OK;
+ }
+ if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
+ /* Collision - tell upper layer to requeue */
+ return NETDEV_TX_LOCKED;
+ }
+ if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
+ netif_stop_queue(netdev);
+ spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+ pr_debug("Return : BUSY next_to use : 0x%08x next_to clean : 0x%08x\n",
+ tx_ring->next_to_use, tx_ring->next_to_clean);
+ return NETDEV_TX_BUSY;
+ }
+ spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+
+ /* CRC,ITAG no support */
+ pch_gbe_tx_queue(adapter, tx_ring, skb);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * pch_gbe_get_stats - Get System Network Statistics
+ * @netdev: Network interface device structure
+ * Returns: The current stats
+ */
+static struct net_device_stats *pch_gbe_get_stats(struct net_device *netdev)
+{
+ /* only return the current stats */
+ return &netdev->stats;
+}
+
+/**
+ * pch_gbe_set_multi - Multicast and Promiscuous mode set
+ * @netdev: Network interface device structure
+ */
+static void pch_gbe_set_multi(struct net_device *netdev)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+ struct netdev_hw_addr *ha;
+ u8 *mta_list;
+ u32 rctl;
+ int i;
+ int mc_count;
+
+ pr_debug("netdev->flags : 0x%08x\n", netdev->flags);
+
+ /* Check for Promiscuous and All Multicast modes */
+ rctl = ioread32(&hw->reg->RX_MODE);
+ mc_count = netdev_mc_count(netdev);
+ if ((netdev->flags & IFF_PROMISC)) {
+ rctl &= ~PCH_GBE_ADD_FIL_EN;
+ rctl &= ~PCH_GBE_MLT_FIL_EN;
+ } else if ((netdev->flags & IFF_ALLMULTI)) {
+ /* all the multicasting receive permissions */
+ rctl |= PCH_GBE_ADD_FIL_EN;
+ rctl &= ~PCH_GBE_MLT_FIL_EN;
+ } else {
+ if (mc_count >= PCH_GBE_MAR_ENTRIES) {
+ /* all the multicasting receive permissions */
+ rctl |= PCH_GBE_ADD_FIL_EN;
+ rctl &= ~PCH_GBE_MLT_FIL_EN;
+ } else {
+ rctl |= (PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
+ }
+ }
+ iowrite32(rctl, &hw->reg->RX_MODE);
+
+ if (mc_count >= PCH_GBE_MAR_ENTRIES)
+ return;
+ mta_list = kmalloc(mc_count * ETH_ALEN, GFP_ATOMIC);
+ if (!mta_list)
+ return;
+
+ /* The shared function expects a packed array of only addresses. */
+ i = 0;
+ netdev_for_each_mc_addr(ha, netdev) {
+ if (i == mc_count)
+ break;
+ memcpy(mta_list + (i++ * ETH_ALEN), &ha->addr, ETH_ALEN);
+ }
+ pch_gbe_mac_mc_addr_list_update(hw, mta_list, i, 1,
+ PCH_GBE_MAR_ENTRIES);
+ kfree(mta_list);
+
+ pr_debug("RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x netdev->mc_count : 0x%08x\n",
+ ioread32(&hw->reg->RX_MODE), mc_count);
+}
+
+/**
+ * pch_gbe_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: Network interface device structure
+ * @addr: Pointer to an address structure
+ * Returns
+ * 0: Successfully
+ * -EADDRNOTAVAIL: Failed
+ */
+static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *skaddr = addr;
+ int ret_val;
+
+ if (!is_valid_ether_addr(skaddr->sa_data)) {
+ ret_val = -EADDRNOTAVAIL;
+ } else {
+ memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
+ memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len);
+ pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
+ ret_val = 0;
+ }
+ pr_debug("ret_val : 0x%08x\n", ret_val);
+ pr_debug("dev_addr : %pM\n", netdev->dev_addr);
+ pr_debug("mac_addr : %pM\n", adapter->hw.mac.addr);
+ pr_debug("MAC_ADR1AB reg : 0x%08x 0x%08x\n",
+ ioread32(&adapter->hw.reg->mac_adr[0].high),
+ ioread32(&adapter->hw.reg->mac_adr[0].low));
+ return ret_val;
+}
+
+/**
+ * pch_gbe_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: Network interface device structure
+ * @new_mtu: New value for maximum frame size
+ * Returns
+ * 0: Successfully
+ * -EINVAL: Failed
+ */
+static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ int max_frame;
+ unsigned long old_rx_buffer_len = adapter->rx_buffer_len;
+ int err;
+
+ max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+ if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
+ (max_frame > PCH_GBE_MAX_JUMBO_FRAME_SIZE)) {
+ pr_err("Invalid MTU setting\n");
+ return -EINVAL;
+ }
+ if (max_frame <= PCH_GBE_FRAME_SIZE_2048)
+ adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
+ else if (max_frame <= PCH_GBE_FRAME_SIZE_4096)
+ adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_4096;
+ else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
+ adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
+ else
+ adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE;
+
+ if (netif_running(netdev)) {
+ pch_gbe_down(adapter);
+ err = pch_gbe_up(adapter);
+ if (err) {
+ adapter->rx_buffer_len = old_rx_buffer_len;
+ pch_gbe_up(adapter);
+ return -ENOMEM;
+ } else {
+ netdev->mtu = new_mtu;
+ adapter->hw.mac.max_frame_size = max_frame;
+ }
+ } else {
+ pch_gbe_reset(adapter);
+ netdev->mtu = new_mtu;
+ adapter->hw.mac.max_frame_size = max_frame;
+ }
+
+ pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n",
+ max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
+ adapter->hw.mac.max_frame_size);
+ return 0;
+}
+
+/**
+ * pch_gbe_set_features - Reset device after features changed
+ * @netdev: Network interface device structure
+ * @features: New features
+ * Returns
+ * 0: HW state updated successfully
+ */
+static int pch_gbe_set_features(struct net_device *netdev, u32 features)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ u32 changed = features ^ netdev->features;
+
+ if (!(changed & NETIF_F_RXCSUM))
+ return 0;
+
+ if (netif_running(netdev))
+ pch_gbe_reinit_locked(adapter);
+ else
+ pch_gbe_reset(adapter);
+
+ return 0;
+}
+
+/**
+ * pch_gbe_ioctl - Controls register through a MII interface
+ * @netdev: Network interface device structure
+ * @ifr: Pointer to ifr structure
+ * @cmd: Control command
+ * Returns
+ * 0: Successfully
+ * Negative value: Failed
+ */
+static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ pr_debug("cmd : 0x%04x\n", cmd);
+
+ return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
+}
+
+/**
+ * pch_gbe_tx_timeout - Respond to a Tx Hang
+ * @netdev: Network interface device structure
+ */
+static void pch_gbe_tx_timeout(struct net_device *netdev)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ /* Do the reset outside of interrupt context */
+ adapter->stats.tx_timeout_count++;
+ schedule_work(&adapter->reset_task);
+}
+
+/**
+ * pch_gbe_napi_poll - NAPI receive and transfer polling callback
+ * @napi: Pointer of polling device struct
+ * @budget: The maximum number of a packet
+ * Returns
+ * false: Exit the polling mode
+ * true: Continue the polling mode
+ */
+static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct pch_gbe_adapter *adapter =
+ container_of(napi, struct pch_gbe_adapter, napi);
- /* Keep link state information with original netdev */
- if (!netif_carrier_ok(netdev)) {
+ int work_done = 0;
+ bool poll_end_flag = false;
+ bool cleaned = false;
+ u32 int_en;
+
+ pr_debug("budget : %d\n", budget);
+
- } else {
- pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
++ pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
++ cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
++
++ if (!cleaned)
++ work_done = budget;
++ /* If no Tx and not enough Rx work done,
++ * exit the polling mode
++ */
++ if (work_done < budget)
+ poll_end_flag = true;
- &adapter->hw.reg->INT_EN);
++
++ if (poll_end_flag) {
++ napi_complete(napi);
++ if (adapter->rx_stop_flag) {
++ adapter->rx_stop_flag = false;
++ pch_gbe_start_receive(&adapter->hw);
++ }
++ pch_gbe_irq_enable(adapter);
++ } else
+ if (adapter->rx_stop_flag) {
+ adapter->rx_stop_flag = false;
+ pch_gbe_start_receive(&adapter->hw);
+ int_en = ioread32(&adapter->hw.reg->INT_EN);
+ iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR),
- cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
-
- if (cleaned)
- work_done = budget;
- /* If no Tx and not enough Rx work done,
- * exit the polling mode
- */
- if ((work_done < budget) || !netif_running(netdev))
- poll_end_flag = true;
- }
-
- if (poll_end_flag) {
- napi_complete(napi);
- pch_gbe_irq_enable(adapter);
- }
++ &adapter->hw.reg->INT_EN);
+ }
+
+ pr_debug("poll_end_flag : %d work_done : %d budget : %d\n",
+ poll_end_flag, work_done, budget);
+
+ return work_done;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * pch_gbe_netpoll - Used by things like netconsole to send skbs
+ * @netdev: Network interface device structure
+ */
+static void pch_gbe_netpoll(struct net_device *netdev)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ disable_irq(adapter->pdev->irq);
+ pch_gbe_intr(adapter->pdev->irq, netdev);
+ enable_irq(adapter->pdev->irq);
+}
+#endif
+
+static const struct net_device_ops pch_gbe_netdev_ops = {
+ .ndo_open = pch_gbe_open,
+ .ndo_stop = pch_gbe_stop,
+ .ndo_start_xmit = pch_gbe_xmit_frame,
+ .ndo_get_stats = pch_gbe_get_stats,
+ .ndo_set_mac_address = pch_gbe_set_mac,
+ .ndo_tx_timeout = pch_gbe_tx_timeout,
+ .ndo_change_mtu = pch_gbe_change_mtu,
+ .ndo_set_features = pch_gbe_set_features,
+ .ndo_do_ioctl = pch_gbe_ioctl,
+ .ndo_set_rx_mode = pch_gbe_set_multi,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = pch_gbe_netpoll,
+#endif
+};
+
+static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ netif_device_detach(netdev);
+ if (netif_running(netdev))
+ pch_gbe_down(adapter);
+ pci_disable_device(pdev);
+ /* Request a slot slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+
+ if (pci_enable_device(pdev)) {
+ pr_err("Cannot re-enable PCI device after reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ pci_set_master(pdev);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pch_gbe_hal_power_up_phy(hw);
+ pch_gbe_reset(adapter);
+ /* Clear wake up status */
+ pch_gbe_mac_set_wol_event(hw, 0);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void pch_gbe_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ if (netif_running(netdev)) {
+ if (pch_gbe_up(adapter)) {
+ pr_debug("can't bring device back up after reset\n");
+ return;
+ }
+ }
+ netif_device_attach(netdev);
+}
+
+static int __pch_gbe_suspend(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+ u32 wufc = adapter->wake_up_evt;
+ int retval = 0;
+
+ netif_device_detach(netdev);
+ if (netif_running(netdev))
+ pch_gbe_down(adapter);
+ if (wufc) {
+ pch_gbe_set_multi(netdev);
+ pch_gbe_setup_rctl(adapter);
+ pch_gbe_configure_rx(adapter);
+ pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
+ hw->mac.link_duplex);
+ pch_gbe_set_mode(adapter, hw->mac.link_speed,
+ hw->mac.link_duplex);
+ pch_gbe_mac_set_wol_event(hw, wufc);
+ pci_disable_device(pdev);
+ } else {
+ pch_gbe_hal_power_down_phy(hw);
+ pch_gbe_mac_set_wol_event(hw, wufc);
+ pci_disable_device(pdev);
+ }
+ return retval;
+}
+
+#ifdef CONFIG_PM
+static int pch_gbe_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+
+ return __pch_gbe_suspend(pdev);
+}
+
+static int pch_gbe_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ struct pch_gbe_hw *hw = &adapter->hw;
+ u32 err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ pr_err("Cannot enable PCI device from suspend\n");
+ return err;
+ }
+ pci_set_master(pdev);
+ pch_gbe_hal_power_up_phy(hw);
+ pch_gbe_reset(adapter);
+ /* Clear wake on lan control and status */
+ pch_gbe_mac_set_wol_event(hw, 0);
+
+ if (netif_running(netdev))
+ pch_gbe_up(adapter);
+ netif_device_attach(netdev);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static void pch_gbe_shutdown(struct pci_dev *pdev)
+{
+ __pch_gbe_suspend(pdev);
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, true);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+static void pch_gbe_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+
+ cancel_work_sync(&adapter->reset_task);
+ unregister_netdev(netdev);
+
+ pch_gbe_hal_phy_hw_reset(&adapter->hw);
+
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+
+ iounmap(adapter->hw.reg);
+ pci_release_regions(pdev);
+ free_netdev(netdev);
+ pci_disable_device(pdev);
+}
+
+static int pch_gbe_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_id)
+{
+ struct net_device *netdev;
+ struct pch_gbe_adapter *adapter;
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
+ || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret) {
+ ret = pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "ERR: No usable DMA "
+ "configuration, aborting\n");
+ goto err_disable_device;
+ }
+ }
+ }
+
+ ret = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "ERR: Can't reserve PCI I/O and memory resources\n");
+ goto err_disable_device;
+ }
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter));
+ if (!netdev) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev,
+ "ERR: Can't allocate and set up an Ethernet device\n");
+ goto err_release_pci;
+ }
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ adapter->hw.back = adapter;
+ adapter->hw.reg = pci_iomap(pdev, PCH_GBE_PCI_BAR, 0);
+ if (!adapter->hw.reg) {
+ ret = -EIO;
+ dev_err(&pdev->dev, "Can't ioremap\n");
+ goto err_free_netdev;
+ }
+
+ netdev->netdev_ops = &pch_gbe_netdev_ops;
+ netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
+ netif_napi_add(netdev, &adapter->napi,
+ pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
+ netdev->hw_features = NETIF_F_RXCSUM |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ netdev->features = netdev->hw_features;
+ pch_gbe_set_ethtool_ops(netdev);
+
+ pch_gbe_mac_load_mac_addr(&adapter->hw);
+ pch_gbe_mac_reset_hw(&adapter->hw);
+
+ /* setup the private structure */
+ ret = pch_gbe_sw_init(adapter);
+ if (ret)
+ goto err_iounmap;
+
+ /* Initialize PHY */
+ ret = pch_gbe_init_phy(adapter);
+ if (ret) {
+ dev_err(&pdev->dev, "PHY initialize error\n");
+ goto err_free_adapter;
+ }
+ pch_gbe_hal_get_bus_info(&adapter->hw);
+
+ /* Read the MAC address. and store to the private data */
+ ret = pch_gbe_hal_read_mac_addr(&adapter->hw);
+ if (ret) {
+ dev_err(&pdev->dev, "MAC address Read Error\n");
+ goto err_free_adapter;
+ }
+
+ memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ dev_err(&pdev->dev, "Invalid MAC Address\n");
+ ret = -EIO;
+ goto err_free_adapter;
+ }
+ setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog,
+ (unsigned long)adapter);
+
+ INIT_WORK(&adapter->reset_task, pch_gbe_reset_task);
+
+ pch_gbe_check_options(adapter);
+
+ /* initialize the wol settings based on the eeprom settings */
+ adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
+ dev_info(&pdev->dev, "MAC address : %pM\n", netdev->dev_addr);
+
+ /* reset the hardware with the new settings */
+ pch_gbe_reset(adapter);
+
+ ret = register_netdev(netdev);
+ if (ret)
+ goto err_free_adapter;
+ /* tell the stack to leave us alone until pch_gbe_open() is called */
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
+ dev_dbg(&pdev->dev, "OKIsemi(R) PCH Network Connection\n");
+
+ device_set_wakeup_enable(&pdev->dev, 1);
+ return 0;
+
+err_free_adapter:
+ pch_gbe_hal_phy_hw_reset(&adapter->hw);
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+err_iounmap:
+ iounmap(adapter->hw.reg);
+err_free_netdev:
+ free_netdev(netdev);
+err_release_pci:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
+ {.vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
+ .class_mask = (0xFFFF00)
+ },
+ {.vendor = PCI_VENDOR_ID_ROHM,
+ .device = PCI_DEVICE_ID_ROHM_ML7223_GBE,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
+ .class_mask = (0xFFFF00)
+ },
+ {.vendor = PCI_VENDOR_ID_ROHM,
+ .device = PCI_DEVICE_ID_ROHM_ML7831_GBE,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
+ .class_mask = (0xFFFF00)
+ },
+ /* required last entry */
+ {0}
+};
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops pch_gbe_pm_ops = {
+ .suspend = pch_gbe_suspend,
+ .resume = pch_gbe_resume,
+ .freeze = pch_gbe_suspend,
+ .thaw = pch_gbe_resume,
+ .poweroff = pch_gbe_suspend,
+ .restore = pch_gbe_resume,
+};
+#endif
+
+static struct pci_error_handlers pch_gbe_err_handler = {
+ .error_detected = pch_gbe_io_error_detected,
+ .slot_reset = pch_gbe_io_slot_reset,
+ .resume = pch_gbe_io_resume
+};
+
+static struct pci_driver pch_gbe_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = pch_gbe_pcidev_id,
+ .probe = pch_gbe_probe,
+ .remove = pch_gbe_remove,
+#ifdef CONFIG_PM
+ .driver.pm = &pch_gbe_pm_ops,
+#endif
+ .shutdown = pch_gbe_shutdown,
+ .err_handler = &pch_gbe_err_handler
+};
+
+
+static int __init pch_gbe_init_module(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&pch_gbe_driver);
+ if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) {
+ if (copybreak == 0) {
+ pr_info("copybreak disabled\n");
+ } else {
+ pr_info("copybreak enabled for packets <= %u bytes\n",
+ copybreak);
+ }
+ }
+ return ret;
+}
+
+static void __exit pch_gbe_exit_module(void)
+{
+ pci_unregister_driver(&pch_gbe_driver);
+}
+
+module_init(pch_gbe_init_module);
+module_exit(pch_gbe_exit_module);
+
+MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
+MODULE_AUTHOR("OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
+
+module_param(copybreak, uint, 0644);
+MODULE_PARM_DESC(copybreak,
+ "Maximum size of packet that is copied to a new buffer on receive");
+
+/* pch_gbe_main.c */
goto dropped;
/* Register the client MAC in the transtable */
- tt_local_add(soft_iface, ethhdr->h_source);
+ tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
- orig_node = transtable_search(bat_priv, ethhdr->h_dest);
+ orig_node = transtable_search(bat_priv, ethhdr->h_source,
+ ethhdr->h_dest);
- if (is_multicast_ether_addr(ethhdr->h_dest) ||
- (orig_node && orig_node->gw_flags)) {
+ do_bcast = is_multicast_ether_addr(ethhdr->h_dest);
- if (do_bcast || (orig_node && orig_node->gw_flags)) {
++ if (do_bcast || (orig_node && orig_node->gw_flags)) {
ret = gw_is_target(bat_priv, skb, orig_node);
if (ret < 0)