u64 xdp_redirect;
u64 xdp_drops;
u64 xdp_tx;
+ u64 xdp_tx_err;
+ u64 xdp_xmit;
+ u64 xdp_xmit_err;
};
struct veth_rq_stats {
static const struct veth_q_stat_desc veth_rq_stats_desc[] = {
{ "xdp_packets", VETH_RQ_STAT(xdp_packets) },
{ "xdp_bytes", VETH_RQ_STAT(xdp_bytes) },
- { "xdp_drops", VETH_RQ_STAT(xdp_drops) },
{ "rx_drops", VETH_RQ_STAT(rx_drops) },
+ { "rx_xdp_redirect", VETH_RQ_STAT(xdp_redirect) },
+ { "rx_xdp_drops", VETH_RQ_STAT(xdp_drops) },
+ { "rx_xdp_tx", VETH_RQ_STAT(xdp_tx) },
+ { "rx_xdp_tx_errors", VETH_RQ_STAT(xdp_tx_err) },
+ { "tx_xdp_xmit", VETH_RQ_STAT(xdp_xmit) },
+ { "tx_xdp_xmit_errors", VETH_RQ_STAT(xdp_xmit_err) },
};
#define VETH_RQ_STATS_LEN ARRAY_SIZE(veth_rq_stats_desc)
for (i = 0; i < dev->real_num_rx_queues; i++) {
for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
snprintf(p, ETH_GSTRING_LEN,
- "rx_queue_%u_%.11s",
+ "rx_queue_%u_%.18s",
i, veth_rq_stats_desc[j].desc);
p += ETH_GSTRING_LEN;
}
}
static int veth_xdp_xmit(struct net_device *dev, int n,
- struct xdp_frame **frames, u32 flags)
+ struct xdp_frame **frames,
+ u32 flags, bool ndo_xmit)
{
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
+ unsigned int qidx, max_len;
struct net_device *rcv;
int i, ret, drops = n;
- unsigned int max_len;
struct veth_rq *rq;
rcu_read_lock();
}
rcv_priv = netdev_priv(rcv);
- rq = &rcv_priv->rq[veth_select_rxq(rcv)];
+ qidx = veth_select_rxq(rcv);
+ rq = &rcv_priv->rq[qidx];
/* Non-NULL xdp_prog ensures that xdp_ring is initialized on receive
* side. This means an XDP program is loaded on the peer and the peer
* device is up.
if (flags & XDP_XMIT_FLUSH)
__veth_xdp_flush(rq);
+ rq = &priv->rq[qidx];
+ u64_stats_update_begin(&rq->stats.syncp);
+ if (ndo_xmit) {
+ rq->stats.vs.xdp_xmit += n - drops;
+ rq->stats.vs.xdp_xmit_err += drops;
+ } else {
+ rq->stats.vs.xdp_tx += n - drops;
+ rq->stats.vs.xdp_tx_err += drops;
+ }
+ u64_stats_update_end(&rq->stats.syncp);
+
if (likely(!drops)) {
rcu_read_unlock();
return n;
return ret;
}
+static int veth_ndo_xdp_xmit(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ return veth_xdp_xmit(dev, n, frames, flags, true);
+}
+
static void veth_xdp_flush_bq(struct net_device *dev, struct veth_xdp_tx_bq *bq)
{
int sent, i, err = 0;
- sent = veth_xdp_xmit(dev, bq->count, bq->q, 0);
+ sent = veth_xdp_xmit(dev, bq->count, bq->q, 0, false);
if (sent < 0) {
err = sent;
sent = 0;
}
u64_stats_update_begin(&rq->stats.syncp);
+ rq->stats.vs.xdp_redirect += stats->xdp_redirect;
rq->stats.vs.xdp_bytes += stats->xdp_bytes;
rq->stats.vs.xdp_drops += stats->xdp_drops;
rq->stats.vs.rx_drops += stats->rx_drops;
.ndo_features_check = passthru_features_check,
.ndo_set_rx_headroom = veth_set_rx_headroom,
.ndo_bpf = veth_xdp,
- .ndo_xdp_xmit = veth_xdp_xmit,
+ .ndo_xdp_xmit = veth_ndo_xdp_xmit,
};
#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \