i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
{
struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
- struct i40e_ring *rx_ring = adapter->rx_rings[r_idx];
+ struct i40e_ring *rx_ring = &adapter->rx_rings[r_idx];
rx_ring->q_vector = q_vector;
rx_ring->next = q_vector->rx.ring;
i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
{
struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
- struct i40e_ring *tx_ring = adapter->tx_rings[t_idx];
+ struct i40e_ring *tx_ring = &adapter->tx_rings[t_idx];
tx_ring->q_vector = q_vector;
tx_ring->next = q_vector->tx.ring;
int i;
for (i = 0; i < adapter->num_active_queues; i++)
- adapter->tx_rings[i]->tail = hw->hw_addr + I40E_QTX_TAIL1(i);
+ adapter->tx_rings[i].tail = hw->hw_addr + I40E_QTX_TAIL1(i);
}
/**
}
for (i = 0; i < adapter->num_active_queues; i++) {
- adapter->rx_rings[i]->tail = hw->hw_addr + I40E_QRX_TAIL1(i);
- adapter->rx_rings[i]->rx_buf_len = rx_buf_len;
+ adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i);
+ adapter->rx_rings[i].rx_buf_len = rx_buf_len;
}
}
adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
for (i = 0; i < adapter->num_active_queues; i++) {
- struct i40e_ring *ring = adapter->rx_rings[i];
+ struct i40e_ring *ring = &adapter->rx_rings[i];
i40evf_alloc_rx_buffers_1buf(ring, ring->count);
ring->next_to_use = ring->count - 1;
**/
static void i40evf_free_queues(struct i40evf_adapter *adapter)
{
- int i;
-
if (!adapter->vsi_res)
return;
- for (i = 0; i < adapter->num_active_queues; i++) {
- if (adapter->tx_rings[i])
- kfree_rcu(adapter->tx_rings[i], rcu);
- adapter->tx_rings[i] = NULL;
- adapter->rx_rings[i] = NULL;
- }
+ kfree(adapter->tx_rings);
+ kfree(adapter->rx_rings);
}
/**
{
int i;
+ adapter->tx_rings = kcalloc(adapter->num_active_queues,
+ sizeof(struct i40e_ring), GFP_KERNEL);
+ if (!adapter->tx_rings)
+ goto err_out;
+ adapter->rx_rings = kcalloc(adapter->num_active_queues,
+ sizeof(struct i40e_ring), GFP_KERNEL);
+ if (!adapter->rx_rings)
+ goto err_out;
+
for (i = 0; i < adapter->num_active_queues; i++) {
struct i40e_ring *tx_ring;
struct i40e_ring *rx_ring;
- tx_ring = kzalloc(sizeof(*tx_ring) * 2, GFP_KERNEL);
- if (!tx_ring)
- goto err_out;
+ tx_ring = &adapter->tx_rings[i];
tx_ring->queue_index = i;
tx_ring->netdev = adapter->netdev;
tx_ring->count = adapter->tx_desc_count;
if (adapter->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR;
- adapter->tx_rings[i] = tx_ring;
- rx_ring = &tx_ring[1];
+ rx_ring = &adapter->rx_rings[i];
rx_ring->queue_index = i;
rx_ring->netdev = adapter->netdev;
rx_ring->dev = &adapter->pdev->dev;
rx_ring->count = adapter->rx_desc_count;
- adapter->rx_rings[i] = rx_ring;
}
return 0;
struct i40e_q_vector *q_vector;
num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
- adapter->q_vectors = kzalloc(sizeof(*q_vector) * num_q_vectors,
+ adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
GFP_KERNEL);
if (!adapter->q_vectors)
goto err_out;
int i;
for (i = 0; i < adapter->num_active_queues; i++)
- if (adapter->tx_rings[i]->desc)
- i40evf_free_tx_resources(adapter->tx_rings[i]);
+ if (adapter->tx_rings[i].desc)
+ i40evf_free_tx_resources(&adapter->tx_rings[i]);
}
/**
int i, err = 0;
for (i = 0; i < adapter->num_active_queues; i++) {
- adapter->tx_rings[i]->count = adapter->tx_desc_count;
- err = i40evf_setup_tx_descriptors(adapter->tx_rings[i]);
+ adapter->tx_rings[i].count = adapter->tx_desc_count;
+ err = i40evf_setup_tx_descriptors(&adapter->tx_rings[i]);
if (!err)
continue;
dev_err(&adapter->pdev->dev,
int i, err = 0;
for (i = 0; i < adapter->num_active_queues; i++) {
- adapter->rx_rings[i]->count = adapter->rx_desc_count;
- err = i40evf_setup_rx_descriptors(adapter->rx_rings[i]);
+ adapter->rx_rings[i].count = adapter->rx_desc_count;
+ err = i40evf_setup_rx_descriptors(&adapter->rx_rings[i]);
if (!err)
continue;
dev_err(&adapter->pdev->dev,
int i;
for (i = 0; i < adapter->num_active_queues; i++)
- if (adapter->rx_rings[i]->desc)
- i40evf_free_rx_resources(adapter->rx_rings[i]);
+ if (adapter->rx_rings[i].desc)
+ i40evf_free_rx_resources(&adapter->rx_rings[i]);
}
/**
for (i = 0; i < pairs; i++) {
vqpi->txq.vsi_id = vqci->vsi_id;
vqpi->txq.queue_id = i;
- vqpi->txq.ring_len = adapter->tx_rings[i]->count;
- vqpi->txq.dma_ring_addr = adapter->tx_rings[i]->dma;
+ vqpi->txq.ring_len = adapter->tx_rings[i].count;
+ vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma;
vqpi->txq.headwb_enabled = 1;
vqpi->txq.dma_headwb_addr = vqpi->txq.dma_ring_addr +
(vqpi->txq.ring_len * sizeof(struct i40e_tx_desc));
vqpi->rxq.vsi_id = vqci->vsi_id;
vqpi->rxq.queue_id = i;
- vqpi->rxq.ring_len = adapter->rx_rings[i]->count;
- vqpi->rxq.dma_ring_addr = adapter->rx_rings[i]->dma;
+ vqpi->rxq.ring_len = adapter->rx_rings[i].count;
+ vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
vqpi->rxq.max_pkt_size = adapter->netdev->mtu
+ ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
- vqpi->rxq.databuffer_size = adapter->rx_rings[i]->rx_buf_len;
+ vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len;
vqpi++;
}