bnxt_en: Add TPA ID mapping logic for 57500 chips.
authorMichael Chan <michael.chan@broadcom.com>
Mon, 29 Jul 2019 10:10:26 +0000 (06:10 -0400)
committerDavid S. Miller <davem@davemloft.net>
Mon, 29 Jul 2019 21:19:09 +0000 (14:19 -0700)
The new TPA feature on 57500 supports a larger number of concurrent TPAs
(up to 1024) divided among the functions.  We need to add some logic to
map the hardware TPA ID to a software index that keeps track of each TPA
in progress.  A 1:1 direct mapping without translation would be too
wasteful as we would have to allocate 1024 TPA structures for each RX
ring on each PCI function.

Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h

index 59358e5ddc37e65a8f258abec95ee8757b7da0b0..05c69a5626beddf92a427192be03d5049c764901 100644 (file)
@@ -1152,6 +1152,33 @@ static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
        rxr->rx_next_cons = 0xffff;
 }
 
+static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
+{
+       struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
+       u16 idx = agg_id & MAX_TPA_P5_MASK;
+
+       if (test_bit(idx, map->agg_idx_bmap))
+               idx = find_first_zero_bit(map->agg_idx_bmap,
+                                         BNXT_AGG_IDX_BMAP_SIZE);
+       __set_bit(idx, map->agg_idx_bmap);
+       map->agg_id_tbl[agg_id] = idx;
+       return idx;
+}
+
+static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
+{
+       struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
+
+       __clear_bit(idx, map->agg_idx_bmap);
+}
+
+static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
+{
+       struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
+
+       return map->agg_id_tbl[agg_id];
+}
+
 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
                           struct rx_tpa_start_cmp *tpa_start,
                           struct rx_tpa_start_cmp_ext *tpa_start1)
@@ -1162,10 +1189,12 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
        struct rx_bd *prod_bd;
        dma_addr_t mapping;
 
-       if (bp->flags & BNXT_FLAG_CHIP_P5)
+       if (bp->flags & BNXT_FLAG_CHIP_P5) {
                agg_id = TPA_START_AGG_ID_P5(tpa_start);
-       else
+               agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
+       } else {
                agg_id = TPA_START_AGG_ID(tpa_start);
+       }
        cons = tpa_start->rx_tpa_start_cmp_opaque;
        prod = rxr->rx_prod;
        cons_rx_buf = &rxr->rx_buf_ring[cons];
@@ -1445,6 +1474,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
 
        if (bp->flags & BNXT_FLAG_CHIP_P5) {
                agg_id = TPA_END_AGG_ID_P5(tpa_end);
+               agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
                agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
                tpa_info = &rxr->rx_tpa[agg_id];
                if (unlikely(agg_bufs != tpa_info->agg_count)) {
@@ -1454,6 +1484,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
                }
                tpa_info->agg_count = 0;
                *event |= BNXT_AGG_EVENT;
+               bnxt_free_agg_idx(rxr, agg_id);
                idx = agg_id;
                gro = !!(bp->flags & BNXT_FLAG_GRO);
        } else {
@@ -1560,6 +1591,7 @@ static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
        u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
        struct bnxt_tpa_info *tpa_info;
 
+       agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
        tpa_info = &rxr->rx_tpa[agg_id];
        BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
        tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
@@ -2383,6 +2415,7 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
        max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
        for (i = 0; i < bp->rx_nr_rings; i++) {
                struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+               struct bnxt_tpa_idx_map *map;
                int j;
 
                if (rxr->rx_tpa) {
@@ -2453,6 +2486,9 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
                        __free_page(rxr->rx_page);
                        rxr->rx_page = NULL;
                }
+               map = rxr->rx_tpa_idx_map;
+               if (map)
+                       memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
        }
 }
 
@@ -2548,6 +2584,8 @@ static void bnxt_free_tpa_info(struct bnxt *bp)
        for (i = 0; i < bp->rx_nr_rings; i++) {
                struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
 
+               kfree(rxr->rx_tpa_idx_map);
+               rxr->rx_tpa_idx_map = NULL;
                if (rxr->rx_tpa) {
                        kfree(rxr->rx_tpa[0].agg_arr);
                        rxr->rx_tpa[0].agg_arr = NULL;
@@ -2586,6 +2624,10 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp)
                        return -ENOMEM;
                for (j = 1; j < bp->max_tpa; j++)
                        rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
+               rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
+                                             GFP_KERNEL);
+               if (!rxr->rx_tpa_idx_map)
+                       return -ENOMEM;
        }
        return 0;
 }
index 290f42669ba5b19c821cdd23d000bc5d7cbc4a28..309cf99bcda93429322094f0706f6bbd274a48e5 100644 (file)
@@ -555,6 +555,7 @@ struct nqe_cn {
 
 #define MAX_TPA                64
 #define MAX_TPA_P5     256
+#define MAX_TPA_P5_MASK        (MAX_TPA_P5 - 1)
 #define MAX_TPA_SEGS_P5        0x3f
 
 #if (BNXT_PAGE_SHIFT == 16)
@@ -841,6 +842,13 @@ struct bnxt_tpa_info {
        struct rx_agg_cmp       *agg_arr;
 };
 
+#define BNXT_AGG_IDX_BMAP_SIZE (MAX_TPA_P5 / BITS_PER_LONG)
+
+struct bnxt_tpa_idx_map {
+       u16             agg_id_tbl[1024];
+       unsigned long   agg_idx_bmap[BNXT_AGG_IDX_BMAP_SIZE];
+};
+
 struct bnxt_rx_ring_info {
        struct bnxt_napi        *bnapi;
        u16                     rx_prod;
@@ -868,6 +876,7 @@ struct bnxt_rx_ring_info {
        dma_addr_t              rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
 
        struct bnxt_tpa_info    *rx_tpa;
+       struct bnxt_tpa_idx_map *rx_tpa_idx_map;
 
        struct bnxt_ring_struct rx_ring_struct;
        struct bnxt_ring_struct rx_agg_ring_struct;