/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
#define VIRTIO_XDP_HEADROOM 256
+/* Separating two types of XDP xmit */
+#define VIRTIO_XDP_TX BIT(0)
+#define VIRTIO_XDP_REDIR BIT(1)
+
/* RX packet size EWMA. The average packet size is used to determine the packet
* buffer size when refilling RX rings. As the entire RX ring may be refilled
* at once, the weight is chosen so that the EWMA will be insensitive to short-
struct receive_queue *rq,
void *buf, void *ctx,
unsigned int len,
- bool *xdp_xmit)
+ unsigned int *xdp_xmit)
{
struct sk_buff *skb;
struct bpf_prog *xdp_prog;
trace_xdp_exception(vi->dev, xdp_prog, act);
goto err_xdp;
}
- *xdp_xmit = true;
+ *xdp_xmit |= VIRTIO_XDP_TX;
rcu_read_unlock();
goto xdp_xmit;
case XDP_REDIRECT:
err = xdp_do_redirect(dev, &xdp, xdp_prog);
if (err)
goto err_xdp;
- *xdp_xmit = true;
+ *xdp_xmit |= VIRTIO_XDP_REDIR;
rcu_read_unlock();
goto xdp_xmit;
default:
void *buf,
void *ctx,
unsigned int len,
- bool *xdp_xmit)
+ unsigned int *xdp_xmit)
{
struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
put_page(xdp_page);
goto err_xdp;
}
- *xdp_xmit = true;
+ *xdp_xmit |= VIRTIO_XDP_TX;
if (unlikely(xdp_page != page))
put_page(page);
rcu_read_unlock();
put_page(xdp_page);
goto err_xdp;
}
- *xdp_xmit = true;
+ *xdp_xmit |= VIRTIO_XDP_REDIR;
if (unlikely(xdp_page != page))
put_page(page);
rcu_read_unlock();
}
static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
- void *buf, unsigned int len, void **ctx, bool *xdp_xmit)
+ void *buf, unsigned int len, void **ctx,
+ unsigned int *xdp_xmit)
{
struct net_device *dev = vi->dev;
struct sk_buff *skb;
}
}
-static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit)
+static int virtnet_receive(struct receive_queue *rq, int budget,
+ unsigned int *xdp_xmit)
{
struct virtnet_info *vi = rq->vq->vdev->priv;
unsigned int len, received = 0, bytes = 0;
struct virtnet_info *vi = rq->vq->vdev->priv;
struct send_queue *sq;
unsigned int received, qp;
- bool xdp_xmit = false;
+ unsigned int xdp_xmit = 0;
virtnet_poll_cleantx(rq);
if (received < budget)
virtqueue_napi_complete(napi, rq->vq, received);
- if (xdp_xmit) {
+ if (xdp_xmit & VIRTIO_XDP_REDIR)
+ xdp_do_flush_map();
+
+ if (xdp_xmit & VIRTIO_XDP_TX) {
qp = vi->curr_queue_pairs - vi->xdp_queue_pairs +
smp_processor_id();
sq = &vi->sq[qp];
virtqueue_kick(sq->vq);
- xdp_do_flush_map();
}
return received;