ice: allow empty Rx descriptors
authorMitch Williams <mitch.a.williams@intel.com>
Thu, 25 Jul 2019 08:55:34 +0000 (01:55 -0700)
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>
Tue, 20 Aug 2019 21:30:37 +0000 (14:30 -0700)
In some circumstances, the hardware will hand us a receive descriptor
which has no data attached, but is otherwise valid. The receive code was
improperly ignoring these descriptors, which result in an infinite loop.

To fix this, change the receive code to process all descriptors,
regardless of the size of the associated data. Add checks to the
memory-handling functions to allow for zero size.

Signed-off-by: Mitch Williams <mitch.a.williams@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
drivers/net/ethernet/intel/ice/ice_txrx.c

index c88e0701e1d71be2634b99f1fb5df97baee85ea9..e5c4c9139e546ac24358fd15bf471507d4fc4130 100644 (file)
@@ -607,6 +607,8 @@ ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
        unsigned int truesize = ICE_RXBUF_2048;
 #endif
 
+       if (!size)
+               return;
        skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
                        rx_buf->page_offset, size, truesize);
 
@@ -662,6 +664,8 @@ ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
        prefetchw(rx_buf->page);
        *skb = rx_buf->skb;
 
+       if (!size)
+               return rx_buf;
        /* we are reusing so sync this buffer for CPU use */
        dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
                                      rx_buf->page_offset, size,
@@ -745,8 +749,11 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
  */
 static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
 {
-               /* hand second half of page back to the ring */
+       if (!rx_buf)
+               return;
+
        if (ice_can_reuse_rx_page(rx_buf)) {
+               /* hand second half of page back to the ring */
                ice_reuse_rx_page(rx_ring, rx_buf);
                rx_ring->rx_stats.page_reuse_count++;
        } else {
@@ -1031,8 +1038,9 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
                size = le16_to_cpu(rx_desc->wb.pkt_len) &
                        ICE_RX_FLX_DESC_PKT_LEN_M;
 
+               /* retrieve a buffer from the ring */
                rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
-               /* allocate (if needed) and populate skb */
+
                if (skb)
                        ice_add_rx_frag(rx_buf, skb, size);
                else
@@ -1041,7 +1049,8 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
                /* exit if we failed to retrieve a buffer */
                if (!skb) {
                        rx_ring->rx_stats.alloc_buf_failed++;
-                       rx_buf->pagecnt_bias++;
+                       if (rx_buf)
+                               rx_buf->pagecnt_bias++;
                        break;
                }