From efecfd5f803d5957ccf003310bff432c6ebd653b Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Fri, 16 Mar 2018 15:34:04 -0700 Subject: [PATCH] ixgbevf: Delay tail write for XDP packets Current XDP implementation hits the tail on every XDP_TX; change the driver to only hit the tail after packet processing is complete. Based on commit 7379f97a4fce ("ixgbe: delay tail write to every 'n' packets") Signed-off-by: Tony Nguyen Acked-by: John Fastabend Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- .../net/ethernet/intel/ixgbevf/ixgbevf_main.c | 30 +++++++++++-------- 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 309f549808e4..5167e81e0cf1 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1016,14 +1016,8 @@ static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring, cpu_to_le32((len << IXGBE_ADVTXD_PAYLEN_SHIFT) | IXGBE_ADVTXD_CC); - /* Force memory writes to complete before letting h/w know there - * are new descriptors to fetch. (Only applicable for weak-ordered - * memory model archs, such as IA-64). - * - * We also need this memory barrier to make certain all of the - * status bits have been updated before next_to_watch is written. - */ - wmb(); + /* Avoid any potential race with cleanup */ + smp_wmb(); /* set next_to_watch value indicating a packet is present */ i++; @@ -1033,8 +1027,6 @@ static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring, tx_buffer->next_to_watch = tx_desc; ring->next_to_use = i; - /* notify HW of packet */ - ixgbevf_write_tail(ring, i); return IXGBEVF_XDP_TX; } @@ -1101,6 +1093,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, struct ixgbevf_adapter *adapter = q_vector->adapter; u16 cleaned_count = ixgbevf_desc_unused(rx_ring); struct sk_buff *skb = rx_ring->skb; + bool xdp_xmit = false; struct xdp_buff xdp; xdp.rxq = &rx_ring->xdp_rxq; @@ -1142,11 +1135,13 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, } if (IS_ERR(skb)) { - if (PTR_ERR(skb) == -IXGBEVF_XDP_TX) + if (PTR_ERR(skb) == -IXGBEVF_XDP_TX) { + xdp_xmit = true; ixgbevf_rx_buffer_flip(rx_ring, rx_buffer, size); - else + } else { rx_buffer->pagecnt_bias++; + } total_rx_packets++; total_rx_bytes += size; } else if (skb) { @@ -1208,6 +1203,17 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, /* place incomplete frames back on ring for completion */ rx_ring->skb = skb; + if (xdp_xmit) { + struct ixgbevf_ring *xdp_ring = + adapter->xdp_ring[rx_ring->queue_index]; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. + */ + wmb(); + ixgbevf_write_tail(xdp_ring, xdp_ring->next_to_use); + } + u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; rx_ring->stats.bytes += total_rx_bytes; -- 2.30.2