dpaa2-eth: Release buffers back to pool on XDP_DROP
authorIoana Ciocoi Radulescu <ruxandra.radulescu@nxp.com>
Mon, 26 Nov 2018 16:27:31 +0000 (16:27 +0000)
committerDavid S. Miller <davem@davemloft.net>
Wed, 28 Nov 2018 18:57:45 +0000 (10:57 -0800)
Instead of freeing the RX buffers, release them back into the pool.
We wait for the maximum number of buffers supported by a single
release command to accumulate before issuing the command.

Also, don't unmap the Rx buffers at the beginning of the Rx routine
anymore, since that would require remapping them before release.
Instead, just do a DMA sync at first and only unmap if the frame is
meant for the stack.

Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h

index 174c960dd62df194a284f83220350d895e8110a2..ac4cb817ee9a506909c12591eb5386d35bc91933 100644 (file)
@@ -217,10 +217,34 @@ static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
        }
 }
 
+static void xdp_release_buf(struct dpaa2_eth_priv *priv,
+                           struct dpaa2_eth_channel *ch,
+                           dma_addr_t addr)
+{
+       int err;
+
+       ch->xdp.drop_bufs[ch->xdp.drop_cnt++] = addr;
+       if (ch->xdp.drop_cnt < DPAA2_ETH_BUFS_PER_CMD)
+               return;
+
+       while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
+                                              ch->xdp.drop_bufs,
+                                              ch->xdp.drop_cnt)) == -EBUSY)
+               cpu_relax();
+
+       if (err) {
+               free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt);
+               ch->buf_count -= ch->xdp.drop_cnt;
+       }
+
+       ch->xdp.drop_cnt = 0;
+}
+
 static u32 run_xdp(struct dpaa2_eth_priv *priv,
                   struct dpaa2_eth_channel *ch,
                   struct dpaa2_fd *fd, void *vaddr)
 {
+       dma_addr_t addr = dpaa2_fd_get_addr(fd);
        struct bpf_prog *xdp_prog;
        struct xdp_buff xdp;
        u32 xdp_act = XDP_PASS;
@@ -250,8 +274,7 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
        case XDP_ABORTED:
                trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
        case XDP_DROP:
-               ch->buf_count--;
-               free_rx_fd(priv, fd, vaddr);
+               xdp_release_buf(priv, ch, addr);
                break;
        }
 
@@ -282,7 +305,8 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
        trace_dpaa2_rx_fd(priv->net_dev, fd);
 
        vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
-       dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
+       dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+                               DMA_FROM_DEVICE);
 
        fas = dpaa2_get_fas(vaddr, false);
        prefetch(fas);
@@ -300,10 +324,14 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
                        return;
                }
 
+               dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+                                DMA_FROM_DEVICE);
                skb = build_linear_skb(ch, fd, vaddr);
        } else if (fd_format == dpaa2_fd_sg) {
                WARN_ON(priv->xdp_prog);
 
+               dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+                                DMA_FROM_DEVICE);
                skb = build_frag_skb(priv, ch, buf_data);
                skb_free_frag(vaddr);
                percpu_extras->rx_sg_frames++;
index 2873a15b6b283ed10b671b55787d436c449ba189..23cf9d94f233cab456c196f3101b5902315e36f6 100644 (file)
@@ -285,6 +285,8 @@ struct dpaa2_eth_fq {
 
 struct dpaa2_eth_ch_xdp {
        struct bpf_prog *prog;
+       u64 drop_bufs[DPAA2_ETH_BUFS_PER_CMD];
+       int drop_cnt;
 };
 
 struct dpaa2_eth_channel {