Collect SGE freelist queue and congestion manager contexts.
Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
Signed-off-by: Ganesh Goudar <ganeshgr@chelsio.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
u32 reserved[16];
};
+#define CUDBG_MAX_FL_QIDS 1024
+
+struct cudbg_ch_cntxt {
+ u32 cntxt_type;
+ u32 cntxt_id;
+ u32 data[SGE_CTXT_SIZE / 4];
+};
+
#define CUDBG_MAX_RPLC_SIZE 128
struct cudbg_mps_tcam {
CUDBG_PCIE_INDIRECT = 50,
CUDBG_PM_INDIRECT = 51,
CUDBG_TID_INFO = 54,
+ CUDBG_DUMP_CONTEXT = 56,
CUDBG_MPS_TCAM = 57,
CUDBG_VPD_DATA = 58,
CUDBG_LE_TCAM = 59,
return rc;
}
+int cudbg_dump_context_size(struct adapter *padap)
+{
+ u32 value, size;
+ u8 flq;
+
+ value = t4_read_reg(padap, SGE_FLM_CFG_A);
+
+ /* Get number of data freelist queues */
+ flq = HDRSTARTFLQ_G(value);
+ size = CUDBG_MAX_FL_QIDS >> flq;
+
+ /* Add extra space for congestion manager contexts.
+ * The number of CONM contexts are same as number of freelist
+ * queues.
+ */
+ size += size;
+ return size * sizeof(struct cudbg_ch_cntxt);
+}
+
+static void cudbg_read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid,
+ enum ctxt_type ctype, u32 *data)
+{
+ struct adapter *padap = pdbg_init->adap;
+ int rc = -1;
+
+ /* Under heavy traffic, the SGE Queue contexts registers will be
+ * frequently accessed by firmware.
+ *
+ * To avoid conflicts with firmware, always ask firmware to fetch
+ * the SGE Queue contexts via mailbox. On failure, fallback to
+ * accessing hardware registers directly.
+ */
+ if (is_fw_attached(pdbg_init))
+ rc = t4_sge_ctxt_rd(padap, padap->mbox, cid, ctype, data);
+ if (rc)
+ t4_sge_ctxt_rd_bd(padap, cid, ctype, data);
+}
+
+int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ struct cudbg_ch_cntxt *buff;
+ u32 size, i = 0;
+ int rc;
+
+ rc = cudbg_dump_context_size(padap);
+ if (rc <= 0)
+ return CUDBG_STATUS_ENTITY_NOT_FOUND;
+
+ size = rc;
+ rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ if (rc)
+ return rc;
+
+ buff = (struct cudbg_ch_cntxt *)temp_buff.data;
+ while (size > 0) {
+ buff->cntxt_type = CTXT_FLM;
+ buff->cntxt_id = i;
+ cudbg_read_sge_ctxt(pdbg_init, i, CTXT_FLM, buff->data);
+ buff++;
+ size -= sizeof(struct cudbg_ch_cntxt);
+
+ buff->cntxt_type = CTXT_CNM;
+ buff->cntxt_id = i;
+ cudbg_read_sge_ctxt(pdbg_init, i, CTXT_CNM, buff->data);
+ buff++;
+ size -= sizeof(struct cudbg_ch_cntxt);
+
+ i++;
+ }
+
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
+
static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
{
*mask = x | y;
int cudbg_collect_tid(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
+int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
struct cudbg_entity_hdr *entity_hdr);
u32 cudbg_cim_obq_size(struct adapter *padap, int qid);
+int cudbg_dump_context_size(struct adapter *padap);
struct cudbg_tcam;
void cudbg_fill_le_tcam_info(struct adapter *padap,
void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED]);
void t4_get_tx_sched(struct adapter *adap, unsigned int sched,
unsigned int *kbps, unsigned int *ipg, bool sleep_ok);
+int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
+ enum ctxt_type ctype, u32 *data);
+int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid,
+ enum ctxt_type ctype, u32 *data);
int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
int rateunit, int ratemode, int channel, int class,
int minrate, int maxrate, int weight, int pktsize);
{ CUDBG_PCIE_INDIRECT, cudbg_collect_pcie_indirect },
{ CUDBG_PM_INDIRECT, cudbg_collect_pm_indirect },
{ CUDBG_TID_INFO, cudbg_collect_tid },
+ { CUDBG_DUMP_CONTEXT, cudbg_collect_dump_context },
{ CUDBG_MPS_TCAM, cudbg_collect_mps_tcam },
{ CUDBG_VPD_DATA, cudbg_collect_vpd_data },
{ CUDBG_LE_TCAM, cudbg_collect_le_tcam },
case CUDBG_TID_INFO:
len = sizeof(struct cudbg_tid_info_region_rev1);
break;
+ case CUDBG_DUMP_CONTEXT:
+ len = cudbg_dump_context_size(adap);
+ break;
case CUDBG_MPS_TCAM:
len = sizeof(struct cudbg_mps_tcam) *
adap->params.arch.mps_tcam_size;
}
}
+/* t4_sge_ctxt_rd - read an SGE context through FW
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @cid: the context id
+ * @ctype: the context type
+ * @data: where to store the context data
+ *
+ * Issues a FW command through the given mailbox to read an SGE context.
+ */
+int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
+ enum ctxt_type ctype, u32 *data)
+{
+ struct fw_ldst_cmd c;
+ int ret;
+
+ if (ctype == CTXT_FLM)
+ ret = FW_LDST_ADDRSPC_SGE_FLMC;
+ else
+ ret = FW_LDST_ADDRSPC_SGE_CONMC;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F |
+ FW_LDST_CMD_ADDRSPACE_V(ret));
+ c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+ c.u.idctxt.physid = cpu_to_be32(cid);
+
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (ret == 0) {
+ data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
+ data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
+ data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
+ data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
+ data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
+ data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
+ }
+ return ret;
+}
+
+/**
+ * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
+ * @adap: the adapter
+ * @cid: the context id
+ * @ctype: the context type
+ * @data: where to store the context data
+ *
+ * Reads an SGE context directly, bypassing FW. This is only for
+ * debugging when FW is unavailable.
+ */
+int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid,
+ enum ctxt_type ctype, u32 *data)
+{
+ int i, ret;
+
+ t4_write_reg(adap, SGE_CTXT_CMD_A, CTXTQID_V(cid) | CTXTTYPE_V(ctype));
+ ret = t4_wait_op_done(adap, SGE_CTXT_CMD_A, BUSY_F, 0, 3, 1);
+ if (!ret)
+ for (i = SGE_CTXT_DATA0_A; i <= SGE_CTXT_DATA5_A; i += 4)
+ *data++ = t4_read_reg(adap, i);
+ return ret;
+}
+
int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
int rateunit, int ratemode, int channel, int class,
int minrate, int maxrate, int weight, int pktsize)
ULPRX_LA_SIZE = 512, /* # of 256-bit words in ULP_RX LA */
};
+/* SGE context types */
+enum ctxt_type {
+ CTXT_FLM = 2,
+ CTXT_CNM,
+};
+
enum {
SF_PAGE_SIZE = 256, /* serial flash page size */
SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
enum {
SGE_MAX_WR_LEN = 512, /* max WR size in bytes */
+ SGE_CTXT_SIZE = 24, /* size of SGE context */
SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */
SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */
SGE_MAX_IQ_SIZE = 65520,
#define T6_DBVFIFO_SIZE_M 0x1fffU
#define T6_DBVFIFO_SIZE_G(x) (((x) >> T6_DBVFIFO_SIZE_S) & T6_DBVFIFO_SIZE_M)
+#define SGE_CTXT_CMD_A 0x11fc
+
+#define BUSY_S 31
+#define BUSY_V(x) ((x) << BUSY_S)
+#define BUSY_F BUSY_V(1U)
+
+#define CTXTTYPE_S 24
+#define CTXTTYPE_M 0x3U
+#define CTXTTYPE_V(x) ((x) << CTXTTYPE_S)
+
+#define CTXTQID_S 0
+#define CTXTQID_M 0x1ffffU
+#define CTXTQID_V(x) ((x) << CTXTQID_S)
+
+#define SGE_CTXT_DATA0_A 0x1200
+#define SGE_CTXT_DATA5_A 0x1214
+
#define GLOBALENABLE_S 0
#define GLOBALENABLE_V(x) ((x) << GLOBALENABLE_S)
#define GLOBALENABLE_F GLOBALENABLE_V(1U)
#define SGE_IMSG_CTXT_BADDR_A 0x1088
#define SGE_FLM_CACHE_BADDR_A 0x108c
+#define SGE_FLM_CFG_A 0x1090
+
+#define NOHDR_S 18
+#define NOHDR_V(x) ((x) << NOHDR_S)
+#define NOHDR_F NOHDR_V(1U)
+
+#define HDRSTARTFLQ_S 11
+#define HDRSTARTFLQ_M 0x7U
+#define HDRSTARTFLQ_G(x) (((x) >> HDRSTARTFLQ_S) & HDRSTARTFLQ_M)
+
#define SGE_INGRESS_RX_THRESHOLD_A 0x10a0
#define THRESHOLD_0_S 24