cxgb4: collect egress and ingress SGE queue contexts
authorRahul Lakkireddy <rahul.lakkireddy@chelsio.com>
Fri, 8 Dec 2017 04:18:40 +0000 (09:48 +0530)
committerDavid S. Miller <davem@davemloft.net>
Fri, 8 Dec 2017 19:31:50 +0000 (14:31 -0500)
Use meminfo to identify the egress and ingress context regions and
fetch all valid contexts from those regions. Also flush all contexts
before attempting collection to prevent stale information.

Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
Signed-off-by: Ganesh Goudar <ganeshgr@chelsio.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.h

index 1e1719526e763fdda21f2584b0d29c77171cb725..0759160449806d0792df33c3818c9f2f0a887152 100644 (file)
@@ -185,6 +185,7 @@ struct cudbg_tid_info_region_rev1 {
        u32 reserved[16];
 };
 
+#define CUDBG_LOWMEM_MAX_CTXT_QIDS 256
 #define CUDBG_MAX_FL_QIDS 1024
 
 struct cudbg_ch_cntxt {
index 2e3cf36347a295a83674609243836f1c0e3f3fa4..38866f3f45ac7fc4e0a2bd2e074b72b3eaeb06b1 100644 (file)
@@ -1594,22 +1594,108 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init,
        return rc;
 }
 
-int cudbg_dump_context_size(struct adapter *padap)
+static int cudbg_sge_ctxt_check_valid(u32 *buf, int type)
+{
+       int index, bit, bit_pos = 0;
+
+       switch (type) {
+       case CTXT_EGRESS:
+               bit_pos = 176;
+               break;
+       case CTXT_INGRESS:
+               bit_pos = 141;
+               break;
+       case CTXT_FLM:
+               bit_pos = 89;
+               break;
+       }
+       index = bit_pos / 32;
+       bit =  bit_pos % 32;
+       return buf[index] & (1U << bit);
+}
+
+static int cudbg_get_ctxt_region_info(struct adapter *padap,
+                                     struct cudbg_region_info *ctx_info,
+                                     u8 *mem_type)
 {
-       u32 value, size;
+       struct cudbg_mem_desc mem_desc;
+       struct cudbg_meminfo meminfo;
+       u32 i, j, value, found;
        u8 flq;
+       int rc;
+
+       rc = cudbg_fill_meminfo(padap, &meminfo);
+       if (rc)
+               return rc;
+
+       /* Get EGRESS and INGRESS context region size */
+       for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) {
+               found = 0;
+               memset(&mem_desc, 0, sizeof(struct cudbg_mem_desc));
+               for (j = 0; j < ARRAY_SIZE(meminfo.avail); j++) {
+                       rc = cudbg_get_mem_region(padap, &meminfo, j,
+                                                 cudbg_region[i],
+                                                 &mem_desc);
+                       if (!rc) {
+                               found = 1;
+                               rc = cudbg_get_mem_relative(padap, &meminfo, j,
+                                                           &mem_desc.base,
+                                                           &mem_desc.limit);
+                               if (rc) {
+                                       ctx_info[i].exist = false;
+                                       break;
+                               }
+                               ctx_info[i].exist = true;
+                               ctx_info[i].start = mem_desc.base;
+                               ctx_info[i].end = mem_desc.limit;
+                               mem_type[i] = j;
+                               break;
+                       }
+               }
+               if (!found)
+                       ctx_info[i].exist = false;
+       }
 
+       /* Get FLM and CNM max qid. */
        value = t4_read_reg(padap, SGE_FLM_CFG_A);
 
        /* Get number of data freelist queues */
        flq = HDRSTARTFLQ_G(value);
-       size = CUDBG_MAX_FL_QIDS >> flq;
+       ctx_info[CTXT_FLM].exist = true;
+       ctx_info[CTXT_FLM].end = (CUDBG_MAX_FL_QIDS >> flq) * SGE_CTXT_SIZE;
 
-       /* Add extra space for congestion manager contexts.
-        * The number of CONM contexts are same as number of freelist
+       /* The number of CONM contexts are same as number of freelist
         * queues.
         */
-       size += size;
+       ctx_info[CTXT_CNM].exist = true;
+       ctx_info[CTXT_CNM].end = ctx_info[CTXT_FLM].end;
+
+       return 0;
+}
+
+int cudbg_dump_context_size(struct adapter *padap)
+{
+       struct cudbg_region_info region_info[CTXT_CNM + 1] = { {0} };
+       u8 mem_type[CTXT_INGRESS + 1] = { 0 };
+       u32 i, size = 0;
+       int rc;
+
+       /* Get max valid qid for each type of queue */
+       rc = cudbg_get_ctxt_region_info(padap, region_info, mem_type);
+       if (rc)
+               return rc;
+
+       for (i = 0; i < CTXT_CNM; i++) {
+               if (!region_info[i].exist) {
+                       if (i == CTXT_EGRESS || i == CTXT_INGRESS)
+                               size += CUDBG_LOWMEM_MAX_CTXT_QIDS *
+                                       SGE_CTXT_SIZE;
+                       continue;
+               }
+
+               size += (region_info[i].end - region_info[i].start + 1) /
+                       SGE_CTXT_SIZE;
+       }
        return size * sizeof(struct cudbg_ch_cntxt);
 }
 
@@ -1632,16 +1718,54 @@ static void cudbg_read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid,
                t4_sge_ctxt_rd_bd(padap, cid, ctype, data);
 }
 
+static void cudbg_get_sge_ctxt_fw(struct cudbg_init *pdbg_init, u32 max_qid,
+                                 u8 ctxt_type,
+                                 struct cudbg_ch_cntxt **out_buff)
+{
+       struct cudbg_ch_cntxt *buff = *out_buff;
+       int rc;
+       u32 j;
+
+       for (j = 0; j < max_qid; j++) {
+               cudbg_read_sge_ctxt(pdbg_init, j, ctxt_type, buff->data);
+               rc = cudbg_sge_ctxt_check_valid(buff->data, ctxt_type);
+               if (!rc)
+                       continue;
+
+               buff->cntxt_type = ctxt_type;
+               buff->cntxt_id = j;
+               buff++;
+               if (ctxt_type == CTXT_FLM) {
+                       cudbg_read_sge_ctxt(pdbg_init, j, CTXT_CNM, buff->data);
+                       buff->cntxt_type = CTXT_CNM;
+                       buff->cntxt_id = j;
+                       buff++;
+               }
+       }
+
+       *out_buff = buff;
+}
+
 int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
                               struct cudbg_buffer *dbg_buff,
                               struct cudbg_error *cudbg_err)
 {
+       struct cudbg_region_info region_info[CTXT_CNM + 1] = { {0} };
        struct adapter *padap = pdbg_init->adap;
+       u32 j, size, max_ctx_size, max_ctx_qid;
+       u8 mem_type[CTXT_INGRESS + 1] = { 0 };
        struct cudbg_buffer temp_buff = { 0 };
        struct cudbg_ch_cntxt *buff;
-       u32 size, i = 0;
+       u64 *dst_off, *src_off;
+       u8 *ctx_buf;
+       u8 i, k;
        int rc;
 
+       /* Get max valid qid for each type of queue */
+       rc = cudbg_get_ctxt_region_info(padap, region_info, mem_type);
+       if (rc)
+               return rc;
+
        rc = cudbg_dump_context_size(padap);
        if (rc <= 0)
                return CUDBG_STATUS_ENTITY_NOT_FOUND;
@@ -1651,23 +1775,79 @@ int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
        if (rc)
                return rc;
 
+       /* Get buffer with enough space to read the biggest context
+        * region in memory.
+        */
+       max_ctx_size = max(region_info[CTXT_EGRESS].end -
+                          region_info[CTXT_EGRESS].start + 1,
+                          region_info[CTXT_INGRESS].end -
+                          region_info[CTXT_INGRESS].start + 1);
+
+       ctx_buf = kvzalloc(max_ctx_size, GFP_KERNEL);
+       if (!ctx_buf) {
+               cudbg_put_buff(&temp_buff, dbg_buff);
+               return -ENOMEM;
+       }
+
        buff = (struct cudbg_ch_cntxt *)temp_buff.data;
-       while (size > 0) {
-               buff->cntxt_type = CTXT_FLM;
-               buff->cntxt_id = i;
-               cudbg_read_sge_ctxt(pdbg_init, i, CTXT_FLM, buff->data);
-               buff++;
-               size -= sizeof(struct cudbg_ch_cntxt);
 
-               buff->cntxt_type = CTXT_CNM;
-               buff->cntxt_id = i;
-               cudbg_read_sge_ctxt(pdbg_init, i, CTXT_CNM, buff->data);
-               buff++;
-               size -= sizeof(struct cudbg_ch_cntxt);
+       /* Collect EGRESS and INGRESS context data.
+        * In case of failures, fallback to collecting via FW or
+        * backdoor access.
+        */
+       for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) {
+               if (!region_info[i].exist) {
+                       max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS;
+                       cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, i,
+                                             &buff);
+                       continue;
+               }
 
-               i++;
+               max_ctx_size = region_info[i].end - region_info[i].start + 1;
+               max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE;
+
+               t4_sge_ctxt_flush(padap, padap->mbox, i);
+               rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type[i],
+                                 region_info[i].start, max_ctx_size,
+                                 (__be32 *)ctx_buf, 1);
+               if (rc) {
+                       max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS;
+                       cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, i,
+                                             &buff);
+                       continue;
+               }
+
+               for (j = 0; j < max_ctx_qid; j++) {
+                       src_off = (u64 *)(ctx_buf + j * SGE_CTXT_SIZE);
+                       dst_off = (u64 *)buff->data;
+
+                       /* The data is stored in 64-bit cpu order.  Convert it
+                        * to big endian before parsing.
+                        */
+                       for (k = 0; k < SGE_CTXT_SIZE / sizeof(u64); k++)
+                               dst_off[k] = cpu_to_be64(src_off[k]);
+
+                       rc = cudbg_sge_ctxt_check_valid(buff->data, i);
+                       if (!rc)
+                               continue;
+
+                       buff->cntxt_type = i;
+                       buff->cntxt_id = j;
+                       buff++;
+               }
        }
 
+       kvfree(ctx_buf);
+
+       /* Collect FREELIST and CONGESTION MANAGER contexts */
+       max_ctx_size = region_info[CTXT_FLM].end -
+                      region_info[CTXT_FLM].start + 1;
+       max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE;
+       /* Since FLM and CONM are 1-to-1 mapped, the below function
+        * will fetch both FLM and CONM contexts.
+        */
+       cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, CTXT_FLM, &buff);
+
        cudbg_write_and_release_buff(&temp_buff, dbg_buff);
        return rc;
 }
index e680beed903049c77e9f3f48c22e6fc768649ebc..97dc3efeb234068166932d55816d86a147606e12 100644 (file)
@@ -1654,7 +1654,7 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
                    unsigned int vf, unsigned int eqid);
 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
                    unsigned int vf, unsigned int eqid);
-int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox);
+int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type);
 void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl);
 int t4_update_port_info(struct port_info *pi);
 int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
index 6f900ffe25cccaec30b3dfba70226ad08398900a..87ac1e4dafc1307cc062051a65ab8ac69193136e 100644 (file)
@@ -1673,7 +1673,7 @@ int cxgb4_flush_eq_cache(struct net_device *dev)
 {
        struct adapter *adap = netdev2adap(dev);
 
-       return t4_sge_ctxt_flush(adap, adap->mbox);
+       return t4_sge_ctxt_flush(adap, adap->mbox, CTXT_EGRESS);
 }
 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
 
index ccb2798c34d1cc82565d32929aebb8cd454707da..112963defd0b1126ae37fa0d35b5b8182bc3ddc5 100644 (file)
@@ -6530,18 +6530,21 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state)
  *      t4_sge_ctxt_flush - flush the SGE context cache
  *      @adap: the adapter
  *      @mbox: mailbox to use for the FW command
+ *      @ctx_type: Egress or Ingress
  *
  *      Issues a FW command through the given mailbox to flush the
  *      SGE context cache.
  */
-int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
+int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
 {
        int ret;
        u32 ldst_addrspace;
        struct fw_ldst_cmd c;
 
        memset(&c, 0, sizeof(c));
-       ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_SGE_EGRC);
+       ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(ctxt_type == CTXT_EGRESS ?
+                                                FW_LDST_ADDRSPC_SGE_EGRC :
+                                                FW_LDST_ADDRSPC_SGE_INGC);
        c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
                                        FW_CMD_REQUEST_F | FW_CMD_READ_F |
                                        ldst_addrspace);
index a964ed18435615b0bba94974601ce8f736560f1f..83afb32c8491e674db343f69d6cb9018ff680ffd 100644 (file)
@@ -70,7 +70,9 @@ enum {
 
 /* SGE context types */
 enum ctxt_type {
-       CTXT_FLM = 2,
+       CTXT_EGRESS,
+       CTXT_INGRESS,
+       CTXT_FLM,
        CTXT_CNM,
 };