From 03895e63ff97447e5eab1b71ed46e3662a45bfe8 Mon Sep 17 00:00:00 2001 From: Kevin Laatz Date: Tue, 27 Aug 2019 02:25:29 +0000 Subject: [PATCH] samples/bpf: add buffer recycling for unaligned chunks to xdpsock This patch adds buffer recycling support for unaligned buffers. Since we don't mask the addr to 2k at umem_reg in unaligned mode, we need to make sure we give back the correct (original) addr to the fill queue. We achieve this using the new descriptor format and associated masks. The new format uses the upper 16-bits for the offset and the lower 48-bits for the addr. Since we have a field for the offset, we no longer need to modify the actual address. As such, all we have to do to get back the original address is mask for the lower 48 bits (i.e. strip the offset and we get the address on it's own). Signed-off-by: Kevin Laatz Signed-off-by: Bruce Richardson Acked-by: Jonathan Lemon Signed-off-by: Daniel Borkmann --- samples/bpf/xdpsock_user.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c index 7312eab4d201..dc3d50f8ed86 100644 --- a/samples/bpf/xdpsock_user.c +++ b/samples/bpf/xdpsock_user.c @@ -484,6 +484,7 @@ static void kick_tx(struct xsk_socket_info *xsk) static inline void complete_tx_l2fwd(struct xsk_socket_info *xsk, struct pollfd *fds) { + struct xsk_umem_info *umem = xsk->umem; u32 idx_cq = 0, idx_fq = 0; unsigned int rcvd; size_t ndescs; @@ -498,24 +499,23 @@ static inline void complete_tx_l2fwd(struct xsk_socket_info *xsk, xsk->outstanding_tx; /* re-add completed Tx buffers */ - rcvd = xsk_ring_cons__peek(&xsk->umem->cq, ndescs, &idx_cq); + rcvd = xsk_ring_cons__peek(&umem->cq, ndescs, &idx_cq); if (rcvd > 0) { unsigned int i; int ret; - ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq); + ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq); while (ret != rcvd) { if (ret < 0) exit_with_error(-ret); - if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) + if (xsk_ring_prod__needs_wakeup(&umem->fq)) ret = poll(fds, num_socks, opt_timeout); - ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, - &idx_fq); + ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq); } + for (i = 0; i < rcvd; i++) - *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx_fq++) = - *xsk_ring_cons__comp_addr(&xsk->umem->cq, - idx_cq++); + *xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) = + *xsk_ring_cons__comp_addr(&umem->cq, idx_cq++); xsk_ring_prod__submit(&xsk->umem->fq, rcvd); xsk_ring_cons__release(&xsk->umem->cq, rcvd); @@ -568,10 +568,13 @@ static void rx_drop(struct xsk_socket_info *xsk, struct pollfd *fds) for (i = 0; i < rcvd; i++) { u64 addr = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx)->addr; u32 len = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++)->len; + u64 orig = xsk_umem__extract_addr(addr); + + addr = xsk_umem__add_offset_to_addr(addr); char *pkt = xsk_umem__get_data(xsk->umem->buffer, addr); hex_dump(pkt, len, addr); - *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx_fq++) = addr; + *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx_fq++) = orig; } xsk_ring_prod__submit(&xsk->umem->fq, rcvd); @@ -680,12 +683,15 @@ static void l2fwd(struct xsk_socket_info *xsk, struct pollfd *fds) for (i = 0; i < rcvd; i++) { u64 addr = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx)->addr; u32 len = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++)->len; + u64 orig = xsk_umem__extract_addr(addr); + + addr = xsk_umem__add_offset_to_addr(addr); char *pkt = xsk_umem__get_data(xsk->umem->buffer, addr); swap_mac_addresses(pkt); hex_dump(pkt, len, addr); - xsk_ring_prod__tx_desc(&xsk->tx, idx_tx)->addr = addr; + xsk_ring_prod__tx_desc(&xsk->tx, idx_tx)->addr = orig; xsk_ring_prod__tx_desc(&xsk->tx, idx_tx++)->len = len; } -- 2.30.2