net: ena: fix theoretical Rx hang on low memory systems
authorNetanel Belgazal <netanel@amazon.com>
Sun, 11 Jun 2017 12:42:48 +0000 (15:42 +0300)
committerDavid S. Miller <davem@davemloft.net>
Sun, 11 Jun 2017 20:36:46 +0000 (16:36 -0400)
For the rare case where the device runs out of free rx buffer
descriptors (in case of pressure on kernel  memory),
and the napi handler continuously fail to refill new Rx descriptors
until device rx queue totally runs out of all free rx buffers
to post incoming packet, leading to a deadlock:
* The device won't send interrupts since all the new
Rx packets will be dropped.
* The napi handler won't try to allocate new Rx descriptors
since allocation is part of NAPI that's not being invoked any more

The fix involves detecting this scenario and rescheduling NAPI
(to refill buffers) by the keepalive/watchdog task.

Fixes: 1738cd3ed342 ("Add a driver for Amazon Elastic Network Adapters (ENA)")
Signed-off-by: Netanel Belgazal <netanel@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/amazon/ena/ena_ethtool.c
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/amazon/ena/ena_netdev.h

index 67b2338f8fb34100df983fc11727d4e661548b24..533b2fbdeef165811117ba01e36cc412e4ce50be 100644 (file)
@@ -94,6 +94,7 @@ static const struct ena_stats ena_stats_rx_strings[] = {
        ENA_STAT_RX_ENTRY(dma_mapping_err),
        ENA_STAT_RX_ENTRY(bad_desc_num),
        ENA_STAT_RX_ENTRY(rx_copybreak_pkt),
+       ENA_STAT_RX_ENTRY(empty_rx_ring),
 };
 
 static const struct ena_stats ena_stats_ena_com_strings[] = {
index 4e9fbddd3b47eec1049cb2d653017b3f44b1b7e3..3c366bfbbab13db6a902f0f775cba94f114501c7 100644 (file)
@@ -190,6 +190,7 @@ static void ena_init_io_rings(struct ena_adapter *adapter)
                rxr->sgl_size = adapter->max_rx_sgl_size;
                rxr->smoothed_interval =
                        ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
+               rxr->empty_rx_queue = 0;
        }
 }
 
@@ -2619,6 +2620,58 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter)
        adapter->last_monitored_tx_qid = i % adapter->num_queues;
 }
 
+/* trigger napi schedule after 2 consecutive detections */
+#define EMPTY_RX_REFILL 2
+/* For the rare case where the device runs out of Rx descriptors and the
+ * napi handler failed to refill new Rx descriptors (due to a lack of memory
+ * for example).
+ * This case will lead to a deadlock:
+ * The device won't send interrupts since all the new Rx packets will be dropped
+ * The napi handler won't allocate new Rx descriptors so the device will be
+ * able to send new packets.
+ *
+ * This scenario can happen when the kernel's vm.min_free_kbytes is too small.
+ * It is recommended to have at least 512MB, with a minimum of 128MB for
+ * constrained environment).
+ *
+ * When such a situation is detected - Reschedule napi
+ */
+static void check_for_empty_rx_ring(struct ena_adapter *adapter)
+{
+       struct ena_ring *rx_ring;
+       int i, refill_required;
+
+       if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
+               return;
+
+       if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
+               return;
+
+       for (i = 0; i < adapter->num_queues; i++) {
+               rx_ring = &adapter->rx_ring[i];
+
+               refill_required =
+                       ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
+               if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
+                       rx_ring->empty_rx_queue++;
+
+                       if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
+                               u64_stats_update_begin(&rx_ring->syncp);
+                               rx_ring->rx_stats.empty_rx_ring++;
+                               u64_stats_update_end(&rx_ring->syncp);
+
+                               netif_err(adapter, drv, adapter->netdev,
+                                         "trigger refill for ring %d\n", i);
+
+                               napi_schedule(rx_ring->napi);
+                               rx_ring->empty_rx_queue = 0;
+                       }
+               } else {
+                       rx_ring->empty_rx_queue = 0;
+               }
+       }
+}
+
 /* Check for keep alive expiration */
 static void check_for_missing_keep_alive(struct ena_adapter *adapter)
 {
@@ -2673,6 +2726,8 @@ static void ena_timer_service(unsigned long data)
 
        check_for_missing_tx_completions(adapter);
 
+       check_for_empty_rx_ring(adapter);
+
        if (debug_area)
                ena_dump_stats_to_buf(adapter, debug_area);
 
index 0e22bce6239d0e06c73a366e0d98a2348a9b7fa9..8828f1d6dd22bac9cdc8efe8a6a4d3ec11e5a071 100644 (file)
@@ -184,6 +184,7 @@ struct ena_stats_rx {
        u64 dma_mapping_err;
        u64 bad_desc_num;
        u64 rx_copybreak_pkt;
+       u64 empty_rx_ring;
 };
 
 struct ena_ring {
@@ -231,6 +232,7 @@ struct ena_ring {
                struct ena_stats_tx tx_stats;
                struct ena_stats_rx rx_stats;
        };
+       int empty_rx_queue;
 } ____cacheline_aligned;
 
 struct ena_stats_dev {