static void mlx5e_free_sq_xdp_db(struct mlx5e_sq *sq)
{
kfree(sq->db.xdp.di);
- kfree(sq->db.xdp.wqe_info);
}
static int mlx5e_alloc_sq_xdp_db(struct mlx5e_sq *sq, int numa)
sq->db.xdp.di = kzalloc_node(sizeof(*sq->db.xdp.di) * wq_sz,
GFP_KERNEL, numa);
- sq->db.xdp.wqe_info = kzalloc_node(sizeof(*sq->db.xdp.wqe_info) * wq_sz,
- GFP_KERNEL, numa);
- if (!sq->db.xdp.di || !sq->db.xdp.wqe_info) {
+ if (!sq->db.xdp.di) {
mlx5e_free_sq_xdp_db(sq);
return -ENOMEM;
}
case MLX5E_SQ_ICO:
return MLX5E_ICOSQ_MAX_WQEBBS;
case MLX5E_SQ_XDP:
- return MLX5E_XDP_TX_WQEBBS;
+ return 1;
}
return MLX5_SEND_WQE_MAX_WQEBBS;
}
MLX5E_MAX_NUM_CHANNELS);
}
+static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
+ struct mlx5e_sq_param *param,
+ struct mlx5e_sq *sq)
+{
+ unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
+ unsigned int inline_hdr_sz = 0;
+ int err;
+ int i;
+
+ err = mlx5e_open_sq(c, 0, param, sq);
+ if (err)
+ return err;
+
+ if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
+ inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
+ ds_cnt++;
+ }
+
+ /* Pre initialize fixed WQE fields */
+ for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
+ struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+ struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
+ struct mlx5_wqe_data_seg *dseg;
+
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+ eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
+
+ dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
+ dseg->lkey = sq->mkey_be;
+ }
+ return 0;
+}
+
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_channel_param *cparam,
struct mlx5e_channel **cp)
}
}
- err = c->xdp ? mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->rq.xdpsq) : 0;
+ err = c->xdp ? mlx5e_open_xdpsq(c, &cparam->xdp_sq, &c->rq.xdpsq) : 0;
if (err)
goto err_close_sqs;
{
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_tx_wqe *wqe;
- u16 pi = (sq->pc - MLX5E_XDP_TX_WQEBBS) & wq->sz_m1; /* last pi */
+ u16 pi = (sq->pc - 1) & wq->sz_m1; /* last pi */
wqe = mlx5_wq_cyc_get_wqe(wq, pi);
struct mlx5_wq_cyc *wq = &sq->wq;
u16 pi = sq->pc & wq->sz_m1;
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
- struct mlx5e_sq_wqe_info *wi = &sq->db.xdp.wqe_info[pi];
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
struct mlx5_wqe_data_seg *dseg;
- u8 ds_cnt = MLX5E_XDP_TX_DS_COUNT;
ptrdiff_t data_offset = xdp->data - xdp->data_hard_start;
dma_addr_t dma_addr = di->addr + data_offset;
unsigned int dma_len = xdp->data_end - xdp->data;
+ prefetchw(wqe);
+
if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE ||
MLX5E_SW2HW_MTU(rq->netdev->mtu) < dma_len)) {
rq->stats.xdp_drop++;
return false;
}
- if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_XDP_TX_WQEBBS))) {
+ if (unlikely(!mlx5e_sq_has_room_for(sq, 1))) {
if (sq->db.xdp.doorbell) {
/* SQ is full, ring doorbell */
mlx5e_xmit_xdp_doorbell(sq);
return false;
}
- dma_sync_single_for_device(sq->pdev, dma_addr, dma_len,
- PCI_DMA_TODEVICE);
+ dma_sync_single_for_device(sq->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE);
- memset(wqe, 0, sizeof(*wqe));
+ cseg->fm_ce_se = 0;
dseg = (struct mlx5_wqe_data_seg *)eseg + 1;
+
/* copy the inline part if required */
if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
memcpy(eseg->inline_hdr.start, xdp->data, MLX5E_XDP_MIN_INLINE);
eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
dma_len -= MLX5E_XDP_MIN_INLINE;
dma_addr += MLX5E_XDP_MIN_INLINE;
-
- ds_cnt += MLX5E_XDP_IHS_DS_COUNT;
dseg++;
}
/* write the dma part */
dseg->addr = cpu_to_be64(dma_addr);
dseg->byte_count = cpu_to_be32(dma_len);
- dseg->lkey = sq->mkey_be;
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
- cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
sq->db.xdp.di[pi] = *di;
- wi->opcode = MLX5_OPCODE_SEND;
- wi->num_wqebbs = MLX5E_XDP_TX_WQEBBS;
- sq->pc += MLX5E_XDP_TX_WQEBBS;
+ sq->pc++;
sq->db.xdp.doorbell = true;
rq->stats.xdp_tx++;
wqe_counter = be16_to_cpu(cqe->wqe_counter);
do {
- struct mlx5e_sq_wqe_info *wi;
struct mlx5e_dma_info *di;
u16 ci;
ci = sqcc & sq->wq.sz_m1;
di = &sq->db.xdp.di[ci];
- wi = &sq->db.xdp.wqe_info[ci];
-
- if (unlikely(wi->opcode == MLX5_OPCODE_NOP)) {
- sqcc++;
- continue;
- }
- sqcc += wi->num_wqebbs;
+ sqcc++;
/* Recycle RX page */
mlx5e_page_release(rq, di, true);
} while (!last_wqe);
void mlx5e_free_xdpsq_descs(struct mlx5e_sq *sq)
{
struct mlx5e_rq *rq = container_of(sq, struct mlx5e_rq, xdpsq);
- struct mlx5e_sq_wqe_info *wi;
struct mlx5e_dma_info *di;
u16 ci;
while (sq->cc != sq->pc) {
ci = sq->cc & sq->wq.sz_m1;
di = &sq->db.xdp.di[ci];
- wi = &sq->db.xdp.wqe_info[ci];
-
- if (wi->opcode == MLX5_OPCODE_NOP) {
- sq->cc++;
- continue;
- }
-
- sq->cc += wi->num_wqebbs;
+ sq->cc++;
mlx5e_page_release(rq, di, false);
}