};
#define FW_FNAME "cxgb4/t4fw.bin"
+#define FW5_FNAME "cxgb4/t5fw.bin"
#define FW_CFNAME "cxgb4/t4-config.txt"
+#define FW5_CFNAME "cxgb4/t5-config.txt"
MODULE_DESCRIPTION(DRV_DESC);
MODULE_AUTHOR("Chelsio Communications");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
MODULE_FIRMWARE(FW_FNAME);
+MODULE_FIRMWARE(FW5_FNAME);
/*
* Normally we're willing to become the firmware's Master PF but will be happy
module_param(vf_acls, bool, 0644);
MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
-static unsigned int num_vf[4];
+/* Since T5 has more num of PFs, using NUM_OF_PF_WITH_SRIOV_T5
+ * macro as num_vf array size
+ */
+static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV_T5];
module_param_array(num_vf, uint, NULL, 0644);
-MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
+MODULE_PARM_DESC(num_vf,
+ "number of VFs for each of PFs 0-3 for T4 and PFs 0-7 for T5");
#endif
/*
static int upgrade_fw(struct adapter *adap)
{
int ret;
- u32 vers;
+ u32 vers, exp_major;
const struct fw_hdr *hdr;
const struct firmware *fw;
struct device *dev = adap->pdev_dev;
+ char *fw_file_name;
- ret = request_firmware(&fw, FW_FNAME, dev);
+ switch (CHELSIO_CHIP_VERSION(adap->chip)) {
+ case CHELSIO_T4:
+ fw_file_name = FW_FNAME;
+ exp_major = FW_VERSION_MAJOR;
+ break;
+ case CHELSIO_T5:
+ fw_file_name = FW5_FNAME;
+ exp_major = FW_VERSION_MAJOR_T5;
+ break;
+ default:
+ dev_err(dev, "Unsupported chip type, %x\n", adap->chip);
+ return -EINVAL;
+ }
+
+ ret = request_firmware(&fw, fw_file_name, dev);
if (ret < 0) {
- dev_err(dev, "unable to load firmware image " FW_FNAME
- ", error %d\n", ret);
+ dev_err(dev, "unable to load firmware image %s, error %d\n",
+ fw_file_name, ret);
return ret;
}
hdr = (const struct fw_hdr *)fw->data;
vers = ntohl(hdr->fw_ver);
- if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) {
+ if (FW_HDR_FW_VER_MAJOR_GET(vers) != exp_major) {
ret = -EINVAL; /* wrong major version, won't do */
goto out;
}
/*
* If the flash FW is unusable or we found something newer, load it.
*/
- if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
+ if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != exp_major ||
vers > adap->params.fw_vers) {
dev_info(dev, "upgrading firmware ...\n");
ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size,
/*force=*/false);
if (!ret)
- dev_info(dev, "firmware successfully upgraded to "
- FW_FNAME " (%d.%d.%d.%d)\n",
- FW_HDR_FW_VER_MAJOR_GET(vers),
- FW_HDR_FW_VER_MINOR_GET(vers),
- FW_HDR_FW_VER_MICRO_GET(vers),
- FW_HDR_FW_VER_BUILD_GET(vers));
+ dev_info(dev,
+ "firmware upgraded to version %pI4 from %s\n",
+ &hdr->fw_ver, fw_file_name);
else
dev_err(dev, "firmware upgrade failed! err=%d\n", -ret);
} else {
*/
static inline unsigned int mk_adap_vers(const struct adapter *ap)
{
- return 4 | (ap->params.rev << 10) | (1 << 16);
+ return CHELSIO_CHIP_VERSION(ap->chip) |
+ (CHELSIO_CHIP_RELEASE(ap->chip) << 10) | (1 << 16);
}
static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
unsigned long mtype = 0, maddr = 0;
u32 finiver, finicsum, cfcsum;
int ret, using_flash;
+ char *fw_config_file, fw_config_file_path[256];
/*
* Reset device if necessary.
* then use that. Otherwise, use the configuration file stored
* in the adapter flash ...
*/
- ret = request_firmware(&cf, FW_CFNAME, adapter->pdev_dev);
+ switch (CHELSIO_CHIP_VERSION(adapter->chip)) {
+ case CHELSIO_T4:
+ fw_config_file = FW_CFNAME;
+ break;
+ case CHELSIO_T5:
+ fw_config_file = FW5_CFNAME;
+ break;
+ default:
+ dev_err(adapter->pdev_dev, "Device %d is not supported\n",
+ adapter->pdev->device);
+ ret = -EINVAL;
+ goto bye;
+ }
+
+ ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
if (ret < 0) {
using_flash = 1;
mtype = FW_MEMTYPE_CF_FLASH;
if (ret < 0)
goto bye;
+ sprintf(fw_config_file_path, "/lib/firmware/%s", fw_config_file);
/*
* Return successfully and note that we're operating with parameters
* not supplied by the driver, rather than from hard-wired
"Configuration File %s, version %#x, computed checksum %#x\n",
(using_flash
? "in device FLASH"
- : "/lib/firmware/" FW_CFNAME),
+ : fw_config_file_path),
finiver, cfcsum);
return 0;
*/
{
int pf, vf;
+ int max_no_pf = is_t4(adapter->chip) ? NUM_OF_PF_WITH_SRIOV_T4 :
+ NUM_OF_PF_WITH_SRIOV_T5;
- for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
+ for (pf = 0; pf < max_no_pf; pf++) {
if (num_vf[pf] <= 0)
continue;
sprintf(bufp, "BASE-%s", base[pi->port_type]);
netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
- adap->params.vpd.id, adap->params.rev, buf,
+ adap->params.vpd.id,
+ CHELSIO_CHIP_RELEASE(adap->params.rev), buf,
is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
(adap->flags & USING_MSIX) ? " MSI-X" :
(adap->flags & USING_MSI) ? " MSI" : "");
struct port_info *pi;
bool highdma = false;
struct adapter *adapter = NULL;
+#ifdef CONFIG_PCI_IOV
+ int max_no_pf;
+#endif
printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
sriov:
#ifdef CONFIG_PCI_IOV
- if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
+ max_no_pf = is_t4(adapter->chip) ? NUM_OF_PF_WITH_SRIOV_T4 :
+ NUM_OF_PF_WITH_SRIOV_T5;
+
+ if (func < max_no_pf && num_vf[func] > 0)
if (pci_enable_sriov(pdev, num_vf[func]) == 0)
dev_info(&pdev->dev,
"instantiated %u virtual functions\n",
static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
{
+ u32 val;
if (q->pend_cred >= 8) {
+ val = PIDX(q->pend_cred / 8);
+ if (!is_t4(adap->chip))
+ val |= DBTYPE(1);
wmb();
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO(1) |
- QID(q->cntxt_id) | PIDX(q->pend_cred / 8));
+ QID(q->cntxt_id) | val);
q->pend_cred &= 7;
}
}
const struct pkt_gl *gl)
{
struct sk_buff *skb;
- struct cpl_trace_pkt *p;
skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
if (unlikely(!skb)) {
return 0;
}
- p = (struct cpl_trace_pkt *)skb->data;
- __skb_pull(skb, sizeof(*p));
+ if (is_t4(adap->chip))
+ __skb_pull(skb, sizeof(struct cpl_trace_pkt));
+ else
+ __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt));
+
skb_reset_mac_header(skb);
skb->protocol = htons(0xffff);
skb->dev = adap->port[0];
const struct cpl_rx_pkt *pkt;
struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
struct sge *s = &q->adap->sge;
+ int cpl_trace_pkt = is_t4(q->adap->chip) ?
+ CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
- if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT))
+ if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
return handle_trace_pkt(q->adap, si);
pkt = (const struct cpl_rx_pkt *)rsp;
* Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
* and generate an interrupt when this occurs so we can recover.
*/
- t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
- V_HP_INT_THRESH(M_HP_INT_THRESH) |
- V_LP_INT_THRESH(M_LP_INT_THRESH),
- V_HP_INT_THRESH(dbfifo_int_thresh) |
- V_LP_INT_THRESH(dbfifo_int_thresh));
+ if (is_t4(adap->chip)) {
+ t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
+ V_HP_INT_THRESH(M_HP_INT_THRESH) |
+ V_LP_INT_THRESH(M_LP_INT_THRESH),
+ V_HP_INT_THRESH(dbfifo_int_thresh) |
+ V_LP_INT_THRESH(dbfifo_int_thresh));
+ } else {
+ t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
+ V_LP_INT_THRESH_T5(M_LP_INT_THRESH_T5),
+ V_LP_INT_THRESH_T5(dbfifo_int_thresh));
+ t4_set_reg_field(adap, SGE_DBFIFO_STATUS2,
+ V_HP_INT_THRESH_T5(M_HP_INT_THRESH_T5),
+ V_HP_INT_THRESH_T5(dbfifo_int_thresh));
+ }
t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP,
F_ENABLE_DROP);
}
#define EEPROM_STAT_ADDR 0x7bfc
-#define VPD_LEN 512
#define VPD_BASE 0x400
#define VPD_BASE_OLD 0
+#define VPD_LEN 1024
/**
* t4_seeprom_wp - enable/disable EEPROM write protection
{
u32 api_vers[2];
int ret, major, minor, micro;
+ int exp_major, exp_minor, exp_micro;
ret = get_fw_version(adapter, &adapter->params.fw_vers);
if (!ret)
major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers);
minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers);
micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers);
+
+ switch (CHELSIO_CHIP_VERSION(adapter->chip)) {
+ case CHELSIO_T4:
+ exp_major = FW_VERSION_MAJOR;
+ exp_minor = FW_VERSION_MINOR;
+ exp_micro = FW_VERSION_MICRO;
+ break;
+ case CHELSIO_T5:
+ exp_major = FW_VERSION_MAJOR_T5;
+ exp_minor = FW_VERSION_MINOR_T5;
+ exp_micro = FW_VERSION_MICRO_T5;
+ break;
+ default:
+ dev_err(adapter->pdev_dev, "Unsupported chip type, %x\n",
+ adapter->chip);
+ return -EINVAL;
+ }
+
memcpy(adapter->params.api_vers, api_vers,
sizeof(adapter->params.api_vers));
- if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
+ if (major != exp_major) { /* major mismatch - fail */
dev_err(adapter->pdev_dev,
"card FW has major version %u, driver wants %u\n",
- major, FW_VERSION_MAJOR);
+ major, exp_major);
return -EINVAL;
}
- if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
+ if (minor == exp_minor && micro == exp_micro)
return 0; /* perfect match */
/* Minor/micro version mismatch. Report it but often it's OK. */
{ 0 }
};
+ static struct intr_info t5_pcie_intr_info[] = {
+ { MSTGRPPERR, "Master Response Read Queue parity error",
+ -1, 1 },
+ { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
+ { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
+ { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
+ { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
+ { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
+ { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
+ { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
+ -1, 1 },
+ { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
+ -1, 1 },
+ { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
+ { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
+ { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
+ { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
+ { DREQWRPERR, "PCI DMA channel write request parity error",
+ -1, 1 },
+ { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
+ { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
+ { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
+ { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
+ { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
+ { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
+ { FIDPERR, "PCI FID parity error", -1, 1 },
+ { VFIDPERR, "PCI INTx clear parity error", -1, 1 },
+ { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
+ { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
+ { IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
+ -1, 1 },
+ { IPRXDATAGRPPERR, "PCI IP Rx data group parity error", -1, 1 },
+ { RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
+ { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
+ { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
+ { READRSPERR, "Outbound read error", -1, 0 },
+ { 0 }
+ };
+
int fat;
fat = t4_handle_intr_status(adapter,
t4_handle_intr_status(adapter,
PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
pcie_port_intr_info) +
- t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info);
+ t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
+ is_t4(adapter->chip) ?
+ pcie_intr_info : t5_pcie_intr_info);
+
if (fat)
t4_fatal_err(adapter);
}
*/
static void xgmac_intr_handler(struct adapter *adap, int port)
{
- u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
+ u32 v, int_cause_reg;
+
+ if (is_t4(adap->chip))
+ int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE);
+ else
+ int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE);
+
+ v = t4_read_reg(adap, int_cause_reg);
v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
if (!v)
u32 bgmap = get_mps_bg_map(adap, idx);
#define GET_STAT(name) \
- t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L))
+ t4_read_reg64(adap, \
+ (is_t4(adap->chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
+ T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
p->tx_octets = GET_STAT(TX_PORT_BYTES);
void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
const u8 *addr)
{
+ u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
+
+ if (is_t4(adap->chip)) {
+ mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
+ mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
+ port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
+ } else {
+ mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO);
+ mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI);
+ port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
+ }
+
if (addr) {
- t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO),
+ t4_write_reg(adap, mag_id_reg_l,
(addr[2] << 24) | (addr[3] << 16) |
(addr[4] << 8) | addr[5]);
- t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI),
+ t4_write_reg(adap, mag_id_reg_h,
(addr[0] << 8) | addr[1]);
}
- t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN,
+ t4_set_reg_field(adap, port_cfg_reg, MAGICEN,
addr ? MAGICEN : 0);
}
u64 mask0, u64 mask1, unsigned int crc, bool enable)
{
int i;
+ u32 port_cfg_reg;
+
+ if (is_t4(adap->chip))
+ port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
+ else
+ port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
if (!enable) {
- t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2),
- PATEN, 0);
+ t4_set_reg_field(adap, port_cfg_reg, PATEN, 0);
return 0;
}
if (map > 0xff)
return -EINVAL;
-#define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name)
+#define EPIO_REG(name) \
+ (is_t4(adap->chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \
+ T5_PORT_REG(port, MAC_PORT_EPIO_##name))
t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
t4_write_reg(adap, EPIO_REG(DATA2), mask1);
int i, ret;
struct fw_vi_mac_cmd c;
struct fw_vi_mac_exact *p;
+ unsigned int max_naddr = is_t4(adap->chip) ?
+ NUM_MPS_CLS_SRAM_L_INSTANCES :
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
if (naddr > 7)
return -EINVAL;
u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
if (idx)
- idx[i] = index >= NEXACT_MAC ? 0xffff : index;
- if (index < NEXACT_MAC)
+ idx[i] = index >= max_naddr ? 0xffff : index;
+ if (index < max_naddr)
ret++;
else if (hash)
*hash |= (1ULL << hash_mac_addr(addr[i]));
int ret, mode;
struct fw_vi_mac_cmd c;
struct fw_vi_mac_exact *p = c.u.exact;
+ unsigned int max_mac_addr = is_t4(adap->chip) ?
+ NUM_MPS_CLS_SRAM_L_INSTANCES :
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
if (idx < 0) /* new allocation */
idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
if (ret == 0) {
ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
- if (ret >= NEXACT_MAC)
+ if (ret >= max_mac_addr)
ret = -ENOMEM;
}
return ret;
*/
int t4_prep_adapter(struct adapter *adapter)
{
- int ret;
+ int ret, ver;
+ uint16_t device_id;
ret = t4_wait_dev_ready(adapter);
if (ret < 0)
return ret;
}
+ /* Retrieve adapter's device ID
+ */
+ pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
+ ver = device_id >> 12;
+ switch (ver) {
+ case CHELSIO_T4:
+ adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T4,
+ adapter->params.rev);
+ break;
+ case CHELSIO_T5:
+ adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T5,
+ adapter->params.rev);
+ break;
+ default:
+ dev_err(adapter->pdev_dev, "Device %d is not supported\n",
+ device_id);
+ return -EINVAL;
+ }
+
+ /* Reassign the updated revision field */
+ adapter->params.rev = adapter->chip;
+
init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
/*