* group the rings as "efficiently" as possible. You would add new
* mapping configurations in here.
**/
-static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter)
+static void ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter)
{
- int q_vectors;
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ int rxr_remaining = adapter->num_rx_queues, rxr_idx = 0;
+ int txr_remaining = adapter->num_tx_queues, txr_idx = 0;
int v_start = 0;
- int rxr_idx = 0, txr_idx = 0;
- int rxr_remaining = adapter->num_rx_queues;
- int txr_remaining = adapter->num_tx_queues;
- int i, j;
- int rqpv, tqpv;
- int err = 0;
- /* No mapping required if MSI-X is disabled. */
+ /* only one q_vector if MSI-X is disabled. */
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
- goto out;
-
- q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ q_vectors = 1;
/*
- * The ideal configuration...
- * We have enough vectors to map one per queue.
+ * If we don't have enough vectors for a 1-to-1 mapping, we'll have to
+ * group them so there are multiple queues per vector.
+ *
+ * Re-adjusting *qpv takes care of the remainder.
*/
- if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
- for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
+ for (; v_start < q_vectors && rxr_remaining; v_start++) {
+ int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_start);
+ for (; rqpv; rqpv--, rxr_idx++, rxr_remaining--)
map_vector_to_rxq(adapter, v_start, rxr_idx);
-
- for (; txr_idx < txr_remaining; v_start++, txr_idx++)
- map_vector_to_txq(adapter, v_start, txr_idx);
-
- goto out;
}
/*
- * If we don't have enough vectors for a 1-to-1
- * mapping, we'll have to group them so there are
- * multiple queues per vector.
+ * If there are not enough q_vectors for each ring to have it's own
+ * vector then we must pair up Rx/Tx on a each vector
*/
- /* Re-adjusting *qpv takes care of the remainder. */
- for (i = v_start; i < q_vectors; i++) {
- rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
- for (j = 0; j < rqpv; j++) {
- map_vector_to_rxq(adapter, i, rxr_idx);
- rxr_idx++;
- rxr_remaining--;
- }
- tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
- for (j = 0; j < tqpv; j++) {
- map_vector_to_txq(adapter, i, txr_idx);
- txr_idx++;
- txr_remaining--;
- }
+ if ((v_start + txr_remaining) > q_vectors)
+ v_start = 0;
+
+ for (; v_start < q_vectors && txr_remaining; v_start++) {
+ int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_start);
+ for (; tqpv; tqpv--, txr_idx++, txr_remaining--)
+ map_vector_to_txq(adapter, v_start, txr_idx);
}
-out:
- return err;
}
/**
int vector, err;
int ri = 0, ti = 0;
- err = ixgbe_map_rings_to_vectors(adapter);
- if (err)
- return err;
-
for (vector = 0; vector < q_vectors; vector++) {
struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
struct msix_entry *entry = &adapter->msix_entries[vector];
struct net_device *netdev = adapter->netdev;
int err;
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+ /* map all of the rings to the q_vectors */
+ ixgbe_map_rings_to_vectors(adapter);
+
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
err = ixgbe_request_msix_irqs(adapter);
- } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
+ else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
netdev->name, adapter);
- } else {
+ else
err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
netdev->name, adapter);
- }
- if (err)
+ if (err) {
e_err(probe, "request_irq failed, Error %d\n", err);
+ /* place q_vectors and rings back into a known good state */
+ ixgbe_reset_q_vectors(adapter);
+ }
+
return err;
}
int i, q_vectors;
q_vectors = adapter->num_msix_vectors;
-
i = q_vectors - 1;
free_irq(adapter->msix_entries[i].vector, adapter);
-
i--;
+
for (; i >= 0; i--) {
/* free only the irqs that were actually requested */
if (!adapter->q_vector[i]->rx.ring &&
ixgbe_set_ivar(adapter, 0, 0, 0);
ixgbe_set_ivar(adapter, 1, 0, 0);
- map_vector_to_rxq(adapter, 0, 0);
- map_vector_to_txq(adapter, 0, 0);
-
e_info(hw, "Legacy interrupt IVAR setup done\n");
}