ixgbevf: add counters for Rx page allocations
authorEmil Tantilov <emil.s.tantilov@intel.com>
Mon, 11 Dec 2017 18:37:15 +0000 (10:37 -0800)
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>
Fri, 26 Jan 2018 15:46:51 +0000 (07:46 -0800)
We already had placehloders for failed page and buffer allocations.
Added alloc_rx_page and made sure the stats are properly updated and
exposed in ethtool.

Signed-off-by: Emil Tantilov <emil.s.tantilov@intel.com>
Tested-by: Krishneil Singh <krishneil.k.singh@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
drivers/net/ethernet/intel/ixgbevf/ethtool.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c

index ff9d05f308eef583cc3268e00bf7d089a07340d7..4400e49090b47d8e2715901ede4525dab06238c2 100644 (file)
@@ -75,6 +75,9 @@ static struct ixgbe_stats ixgbevf_gstrings_stats[] = {
        IXGBEVF_STAT("tx_timeout_count", tx_timeout_count),
        IXGBEVF_NETDEV_STAT(multicast),
        IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error),
+       IXGBEVF_STAT("alloc_rx_page", alloc_rx_page),
+       IXGBEVF_STAT("alloc_rx_page_failed", alloc_rx_page_failed),
+       IXGBEVF_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
 };
 
 #define IXGBEVF_QUEUE_STATS_LEN ( \
index c70a789035aea7ed16da1ba091a32ff16d59ed1d..f6952425c87d40662022f17aa04c331ac4e1929b 100644 (file)
@@ -84,6 +84,7 @@ struct ixgbevf_tx_queue_stats {
 struct ixgbevf_rx_queue_stats {
        u64 alloc_rx_page_failed;
        u64 alloc_rx_buff_failed;
+       u64 alloc_rx_page;
        u64 csum_err;
 };
 
@@ -295,8 +296,9 @@ struct ixgbevf_adapter {
        u64 hw_csum_rx_error;
        u64 hw_rx_no_dma_resources;
        int num_msix_vectors;
-       u32 alloc_rx_page_failed;
-       u32 alloc_rx_buff_failed;
+       u64 alloc_rx_page_failed;
+       u64 alloc_rx_buff_failed;
+       u64 alloc_rx_page;
 
        struct msix_entry *msix_entries;
 
index ae2402ddd9fb0853c689b2888ee5c02bed24790f..350afec3dde84b693054ca172e3123a640464c52 100644 (file)
@@ -604,7 +604,7 @@ static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
        if (dma_mapping_error(rx_ring->dev, dma)) {
                __free_page(page);
 
-               rx_ring->rx_stats.alloc_rx_buff_failed++;
+               rx_ring->rx_stats.alloc_rx_page_failed++;
                return false;
        }
 
@@ -612,6 +612,7 @@ static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
        bi->page = page;
        bi->page_offset = 0;
        bi->pagecnt_bias = 1;
+       rx_ring->rx_stats.alloc_rx_page++;
 
        return true;
 }
@@ -963,8 +964,10 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
                skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb);
 
                /* exit if we failed to retrieve a buffer */
-               if (!skb)
+               if (!skb) {
+                       rx_ring->rx_stats.alloc_rx_buff_failed++;
                        break;
+               }
 
                cleaned_count++;
 
@@ -2749,6 +2752,8 @@ out:
 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
+       u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
+       u64 alloc_rx_page = 0, hw_csum_rx_error = 0;
        int i;
 
        if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
@@ -2769,10 +2774,18 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
                                adapter->stats.vfmprc);
 
        for (i = 0;  i  < adapter->num_rx_queues;  i++) {
-               adapter->hw_csum_rx_error +=
-                       adapter->rx_ring[i]->hw_csum_rx_error;
-               adapter->rx_ring[i]->hw_csum_rx_error = 0;
+               struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
+
+               hw_csum_rx_error += rx_ring->rx_stats.csum_err;
+               alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
+               alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
+               alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
        }
+
+       adapter->hw_csum_rx_error = hw_csum_rx_error;
+       adapter->alloc_rx_page_failed = alloc_rx_page_failed;
+       adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
+       adapter->alloc_rx_page = alloc_rx_page;
 }
 
 /**