offset += metalen;
addr = xsk_umem_adjust_offset(xs->umem, addr, offset);
- err = xskq_produce_batch_desc(xs->rx, addr, len);
+ err = xskq_prod_reserve_desc(xs->rx, addr, len);
if (!err) {
xskq_discard_addr(xs->umem->fq);
xdp_return_buff(xdp);
static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
{
- int err = xskq_produce_batch_desc(xs->rx, (u64)xdp->handle, len);
+ int err = xskq_prod_reserve_desc(xs->rx, xdp->handle, len);
if (err)
xs->rx_dropped++;
static void xsk_flush(struct xdp_sock *xs)
{
- xskq_produce_flush_desc(xs->rx);
+ xskq_prod_submit(xs->rx);
xs->sk.sk_data_ready(&xs->sk);
}
memcpy(buffer, xdp->data_meta, len + metalen);
addr = xsk_umem_adjust_offset(xs->umem, addr, metalen);
- err = xskq_produce_batch_desc(xs->rx, addr, len);
+ err = xskq_prod_reserve_desc(xs->rx, addr, len);
if (err)
goto out_drop;
xskq_discard_addr(xs->umem->fq);
- xskq_produce_flush_desc(xs->rx);
+ xskq_prod_submit(xs->rx);
spin_unlock_bh(&xs->rx_lock);
void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
{
- xskq_produce_flush_addr_n(umem->cq, nb_entries);
+ xskq_prod_submit_n(umem->cq, nb_entries);
}
EXPORT_SYMBOL(xsk_umem_complete_tx);
if (!xskq_peek_desc(xs->tx, desc, umem))
continue;
- if (xskq_produce_addr_lazy(umem->cq, desc->addr))
+ if (xskq_prod_reserve_addr(umem->cq, desc->addr))
goto out;
xskq_discard_desc(xs->tx);
unsigned long flags;
spin_lock_irqsave(&xs->tx_completion_lock, flags);
- WARN_ON_ONCE(xskq_produce_addr(xs->umem->cq, addr));
+ xskq_prod_submit_addr(xs->umem->cq, addr);
spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
sock_wfree(skb);
addr = desc.addr;
buffer = xdp_umem_get_data(xs->umem, addr);
err = skb_store_bits(skb, 0, buffer, len);
- if (unlikely(err) || xskq_reserve_addr(xs->umem->cq)) {
+ if (unlikely(err) || xskq_prod_reserve(xs->umem->cq)) {
kfree_skb(skb);
goto out;
}
__xsk_sendmsg(sk);
}
- if (xs->rx && !xskq_empty_desc(xs->rx))
+ if (xs->rx && !xskq_prod_is_empty(xs->rx))
mask |= EPOLLIN | EPOLLRDNORM;
if (xs->tx && !xskq_full_desc(xs->tx))
mask |= EPOLLOUT | EPOLLWRNORM;
q->cons_tail++;
}
-static inline int xskq_produce_addr(struct xsk_queue *q, u64 addr)
+static inline int xskq_prod_reserve(struct xsk_queue *q)
{
- struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
- unsigned int idx = q->ring->producer;
+ if (xskq_nb_free(q, 1) == 0)
+ return -ENOSPC;
/* A, matches D */
- ring->desc[idx++ & q->ring_mask] = addr;
-
- /* Order producer and data */
- smp_wmb(); /* B, matches C */
-
- WRITE_ONCE(q->ring->producer, idx);
+ q->cached_prod++;
return 0;
}
-static inline int xskq_produce_addr_lazy(struct xsk_queue *q, u64 addr)
+static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr)
{
struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
return 0;
}
-static inline void xskq_produce_flush_addr_n(struct xsk_queue *q,
- u32 nb_entries)
+static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx)
{
/* Order producer and data */
smp_wmb(); /* B, matches C */
- WRITE_ONCE(q->ring->producer, q->ring->producer + nb_entries);
+ WRITE_ONCE(q->ring->producer, idx);
}
-static inline int xskq_reserve_addr(struct xsk_queue *q)
+static inline void xskq_prod_submit(struct xsk_queue *q)
{
- if (xskq_nb_free(q, 1) == 0)
- return -ENOSPC;
+ __xskq_prod_submit(q, q->cached_prod);
+}
- /* A, matches D */
- q->cached_prod++;
- return 0;
+static inline void xskq_prod_submit_addr(struct xsk_queue *q, u64 addr)
+{
+ struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
+ u32 idx = q->ring->producer;
+
+ ring->desc[idx++ & q->ring_mask] = addr;
+
+ __xskq_prod_submit(q, idx);
+}
+
+static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries)
+{
+ __xskq_prod_submit(q, q->ring->producer + nb_entries);
}
/* Rx/Tx queue */
q->cons_tail++;
}
-static inline int xskq_produce_batch_desc(struct xsk_queue *q,
- u64 addr, u32 len)
+static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
+ u64 addr, u32 len)
{
struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
- unsigned int idx;
+ u32 idx;
if (xskq_nb_free(q, 1) == 0)
return -ENOSPC;
return 0;
}
-static inline void xskq_produce_flush_desc(struct xsk_queue *q)
-{
- /* Order producer and data */
- smp_wmb(); /* B, matches C */
-
- WRITE_ONCE(q->ring->producer, q->cached_prod);
-}
-
static inline bool xskq_full_desc(struct xsk_queue *q)
{
/* No barriers needed since data is not accessed */
q->nentries;
}
-static inline bool xskq_empty_desc(struct xsk_queue *q)
+static inline bool xskq_prod_is_empty(struct xsk_queue *q)
{
/* No barriers needed since data is not accessed */
return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer);