qed: Fix possibility of list corruption during rmmod flows
authorMichal Kalderon <Michal.Kalderon@cavium.com>
Wed, 16 May 2018 11:44:39 +0000 (14:44 +0300)
committerDavid S. Miller <davem@davemloft.net>
Wed, 16 May 2018 18:49:01 +0000 (14:49 -0400)
The ll2 flows of flushing the txq/rxq need to be synchronized with the
regular fp processing. Caused list corruption during load/unload stress
tests.

Fixes: 0a7fb11c23c0f ("qed: Add Light L2 support")
Signed-off-by: Ariel Elior <Ariel.Elior@cavium.com>
Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/qlogic/qed/qed_ll2.c

index be63f0a2705d8dd64b82695fca9b6a22697e6eae..4a78294d6481fb0f3da79ebeedefb6f0be9ade99 100644 (file)
@@ -292,6 +292,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
        struct qed_ll2_tx_packet *p_pkt = NULL;
        struct qed_ll2_info *p_ll2_conn;
        struct qed_ll2_tx_queue *p_tx;
+       unsigned long flags = 0;
        dma_addr_t tx_frag;
 
        p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
@@ -300,6 +301,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
 
        p_tx = &p_ll2_conn->tx_queue;
 
+       spin_lock_irqsave(&p_tx->lock, flags);
        while (!list_empty(&p_tx->active_descq)) {
                p_pkt = list_first_entry(&p_tx->active_descq,
                                         struct qed_ll2_tx_packet, list_entry);
@@ -309,6 +311,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
                list_del(&p_pkt->list_entry);
                b_last_packet = list_empty(&p_tx->active_descq);
                list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
+               spin_unlock_irqrestore(&p_tx->lock, flags);
                if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
                        struct qed_ooo_buffer *p_buffer;
 
@@ -328,7 +331,9 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
                                                      b_last_frag,
                                                      b_last_packet);
                }
+               spin_lock_irqsave(&p_tx->lock, flags);
        }
+       spin_unlock_irqrestore(&p_tx->lock, flags);
 }
 
 static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
@@ -556,6 +561,7 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
        struct qed_ll2_info *p_ll2_conn = NULL;
        struct qed_ll2_rx_packet *p_pkt = NULL;
        struct qed_ll2_rx_queue *p_rx;
+       unsigned long flags = 0;
 
        p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
        if (!p_ll2_conn)
@@ -563,13 +569,14 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
 
        p_rx = &p_ll2_conn->rx_queue;
 
+       spin_lock_irqsave(&p_rx->lock, flags);
        while (!list_empty(&p_rx->active_descq)) {
                p_pkt = list_first_entry(&p_rx->active_descq,
                                         struct qed_ll2_rx_packet, list_entry);
                if (!p_pkt)
                        break;
-
                list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
+               spin_unlock_irqrestore(&p_rx->lock, flags);
 
                if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
                        struct qed_ooo_buffer *p_buffer;
@@ -588,7 +595,9 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
                                                      cookie,
                                                      rx_buf_addr, b_last);
                }
+               spin_lock_irqsave(&p_rx->lock, flags);
        }
+       spin_unlock_irqrestore(&p_rx->lock, flags);
 }
 
 static bool