Gather periodic PCIe statistics for ethtool -S.
Signed-off-by: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
bp->hw_rx_port_stats_ext_map);
bp->hw_rx_port_stats_ext = NULL;
}
+
+ if (bp->hw_pcie_stats) {
+ dma_free_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
+ bp->hw_pcie_stats, bp->hw_pcie_stats_map);
+ bp->hw_pcie_stats = NULL;
+ }
}
static void bnxt_free_ring_stats(struct bnxt *bp)
alloc_tx_ext_stats:
if (bp->hw_tx_port_stats_ext)
- return 0;
+ goto alloc_pcie_stats;
if (bp->hwrm_spec_code >= 0x10902) {
bp->hw_tx_port_stats_ext =
GFP_KERNEL);
}
bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
+
+alloc_pcie_stats:
+ if (bp->hw_pcie_stats ||
+ !(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
+ return 0;
+
+ bp->hw_pcie_stats =
+ dma_alloc_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
+ &bp->hw_pcie_stats_map, GFP_KERNEL);
+ if (!bp->hw_pcie_stats)
+ return 0;
+
+ bp->flags |= BNXT_FLAG_PCIE_STATS;
return 0;
}
bp->flags |= BNXT_FLAG_ROCEV1_CAP;
if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
bp->flags |= BNXT_FLAG_ROCEV2_CAP;
+ if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
bp->tx_push_thresh = 0;
if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
return rc;
}
+static int bnxt_hwrm_pcie_qstats(struct bnxt *bp)
+{
+ struct hwrm_pcie_qstats_input req = {0};
+
+ if (!(bp->flags & BNXT_FLAG_PCIE_STATS))
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1);
+ req.pcie_stat_size = cpu_to_le16(sizeof(struct pcie_ctx_hw_stats));
+ req.pcie_stat_host_addr = cpu_to_le64(bp->hw_pcie_stats_map);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
{
if (bp->vxlan_port_cnt) {
if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
bnxt_hwrm_port_qstats(bp);
bnxt_hwrm_port_qstats_ext(bp);
+ bnxt_hwrm_pcie_qstats(bp);
}
if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
#define BNXT_FLAG_DIM 0x2000000
#define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000
#define BNXT_FLAG_PORT_STATS_EXT 0x10000000
+ #define BNXT_FLAG_PCIE_STATS 0x40000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
BNXT_FLAG_RFS | \
#define BNXT_FW_CAP_KONG_MB_CHNL 0x00000080
#define BNXT_FW_CAP_OVS_64BIT_HANDLE 0x00000400
#define BNXT_FW_CAP_TRUSTED_VF 0x00000800
+ #define BNXT_FW_CAP_PCIE_STATS_SUPPORTED 0x00020000
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
u32 hwrm_spec_code;
struct tx_port_stats *hw_tx_port_stats;
struct rx_port_stats_ext *hw_rx_port_stats_ext;
struct tx_port_stats_ext *hw_tx_port_stats_ext;
+ struct pcie_ctx_hw_stats *hw_pcie_stats;
dma_addr_t hw_rx_port_stats_map;
dma_addr_t hw_tx_port_stats_map;
dma_addr_t hw_rx_port_stats_ext_map;
dma_addr_t hw_tx_port_stats_ext_map;
+ dma_addr_t hw_pcie_stats_map;
int hw_port_stats_size;
u16 fw_rx_stats_ext_size;
u16 fw_tx_stats_ext_size;
#define BNXT_TX_STATS_EXT_OFFSET(counter) \
(offsetof(struct tx_port_stats_ext, counter) / 8)
+#define BNXT_PCIE_STATS_OFFSET(counter) \
+ (offsetof(struct pcie_ctx_hw_stats, counter) / 8)
+
#define I2C_DEV_ADDR_A0 0xa0
#define I2C_DEV_ADDR_A2 0xa2
#define SFF_DIAG_SUPPORT_OFFSET 0x5c
BNXT_TX_STATS_PRI_ENTRY(counter, 6), \
BNXT_TX_STATS_PRI_ENTRY(counter, 7)
+#define BNXT_PCIE_STATS_ENTRY(counter) \
+ { BNXT_PCIE_STATS_OFFSET(counter), __stringify(counter) }
+
enum {
RX_TOTAL_DISCARDS,
TX_TOTAL_DISCARDS,
BNXT_TX_STATS_PRI_ENTRIES(tx_packets),
};
+static const struct {
+ long offset;
+ char string[ETH_GSTRING_LEN];
+} bnxt_pcie_stats_arr[] = {
+ BNXT_PCIE_STATS_ENTRY(pcie_pl_signal_integrity),
+ BNXT_PCIE_STATS_ENTRY(pcie_dl_signal_integrity),
+ BNXT_PCIE_STATS_ENTRY(pcie_tl_signal_integrity),
+ BNXT_PCIE_STATS_ENTRY(pcie_link_integrity),
+ BNXT_PCIE_STATS_ENTRY(pcie_tx_traffic_rate),
+ BNXT_PCIE_STATS_ENTRY(pcie_rx_traffic_rate),
+ BNXT_PCIE_STATS_ENTRY(pcie_tx_dllp_statistics),
+ BNXT_PCIE_STATS_ENTRY(pcie_rx_dllp_statistics),
+ BNXT_PCIE_STATS_ENTRY(pcie_equalization_time),
+ BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram[0]),
+ BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram[2]),
+ BNXT_PCIE_STATS_ENTRY(pcie_recovery_histogram),
+};
+
#define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats)
#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
#define BNXT_NUM_STATS_PRI \
ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \
ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \
ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
+#define BNXT_NUM_PCIE_STATS ARRAY_SIZE(bnxt_pcie_stats_arr)
static int bnxt_get_num_stats(struct bnxt *bp)
{
num_stats += BNXT_NUM_STATS_PRI;
}
+ if (bp->flags & BNXT_FLAG_PCIE_STATS)
+ num_stats += BNXT_NUM_PCIE_STATS;
+
return num_stats;
}
}
}
}
+ if (bp->flags & BNXT_FLAG_PCIE_STATS) {
+ __le64 *pcie_stats = (__le64 *)bp->hw_pcie_stats;
+
+ for (i = 0; i < BNXT_NUM_PCIE_STATS; i++, j++) {
+ buf[j] = le64_to_cpu(*(pcie_stats +
+ bnxt_pcie_stats_arr[i].offset));
+ }
+ }
}
static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
}
}
}
+ if (bp->flags & BNXT_FLAG_PCIE_STATS) {
+ for (i = 0; i < BNXT_NUM_PCIE_STATS; i++) {
+ strcpy(buf, bnxt_pcie_stats_arr[i].string);
+ buf += ETH_GSTRING_LEN;
+ }
+ }
break;
case ETH_SS_TEST:
if (bp->num_tests)