1 From 155738a4f319538a09f734ce1f5a2eac3ada1de2 Mon Sep 17 00:00:00 2001
2 From: Lorenzo Bianconi <lorenzo@kernel.org>
3 Date: Wed, 27 Jul 2022 23:20:51 +0200
4 Subject: [PATCH] net: ethernet: mtk_eth_soc: introduce xdp multi-frag support
6 Add the capability to map non-linear xdp frames in XDP_TX and
9 Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
10 Signed-off-by: David S. Miller <davem@davemloft.net>
12 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 125 +++++++++++++-------
13 1 file changed, 82 insertions(+), 43 deletions(-)
15 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
16 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
17 @@ -988,23 +988,22 @@ static void mtk_tx_unmap(struct mtk_eth
21 - if (tx_buf->type == MTK_TYPE_SKB) {
23 - tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
24 + if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
25 + if (tx_buf->type == MTK_TYPE_SKB) {
26 struct sk_buff *skb = tx_buf->data;
29 napi_consume_skb(skb, napi);
31 dev_kfree_skb_any(skb);
33 - } else if (tx_buf->data) {
34 - struct xdp_frame *xdpf = tx_buf->data;
36 + struct xdp_frame *xdpf = tx_buf->data;
38 - if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
39 - xdp_return_frame_rx_napi(xdpf);
41 - xdp_return_frame(xdpf);
42 + if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
43 + xdp_return_frame_rx_napi(xdpf);
45 + xdp_return_frame(xdpf);
50 @@ -1507,6 +1506,8 @@ static int mtk_xdp_frame_map(struct mtk_
51 mtk_tx_set_dma_desc(dev, txd, txd_info);
53 tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
54 + tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
55 + tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
57 txd_pdma = qdma_to_pdma(ring, txd);
58 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
59 @@ -1518,43 +1519,69 @@ static int mtk_xdp_frame_map(struct mtk_
60 static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
61 struct net_device *dev, bool dma_map)
63 + struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
64 const struct mtk_soc_data *soc = eth->soc;
65 struct mtk_tx_ring *ring = ð->tx_ring;
66 struct mtk_tx_dma_desc_info txd_info = {
70 + .last = !xdp_frame_has_frags(xdpf),
72 - int err = 0, index = 0, n_desc = 1;
73 - struct mtk_tx_dma *txd, *txd_pdma;
74 - struct mtk_tx_buf *tx_buf;
75 + int err, index = 0, n_desc = 1, nr_frags;
76 + struct mtk_tx_dma *htxd, *txd, *txd_pdma;
77 + struct mtk_tx_buf *htx_buf, *tx_buf;
78 + void *data = xdpf->data;
80 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
83 - if (unlikely(atomic_read(&ring->free_count) <= 1))
84 + nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
85 + if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
88 spin_lock(ð->page_lock);
90 txd = ring->next_free;
91 if (txd == ring->last_free) {
94 + spin_unlock(ð->page_lock);
99 tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
100 memset(tx_buf, 0, sizeof(*tx_buf));
103 - err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
104 - xdpf->data, xdpf->headroom, index,
109 + err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
110 + data, xdpf->headroom, index, dma_map);
117 + if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
118 + txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
119 + txd_pdma = qdma_to_pdma(ring, txd);
120 + if (txd == ring->last_free)
123 + tx_buf = mtk_desc_to_tx_buf(ring, txd,
124 + soc->txrx.txd_size);
125 + memset(tx_buf, 0, sizeof(*tx_buf));
129 + memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
130 + txd_info.size = skb_frag_size(&sinfo->frags[index]);
131 + txd_info.last = index + 1 == nr_frags;
132 + data = skb_frag_address(&sinfo->frags[index]);
136 /* store xdpf for cleanup */
137 - tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
138 - tx_buf->data = xdpf;
139 + htx_buf->data = xdpf;
141 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
142 txd_pdma = qdma_to_pdma(ring, txd);
143 @@ -1581,7 +1608,24 @@ static int mtk_xdp_submit_frame(struct m
144 mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
149 + spin_unlock(ð->page_lock);
154 + while (htxd != txd) {
155 + txd_pdma = qdma_to_pdma(ring, htxd);
156 + tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
157 + mtk_tx_unmap(eth, tx_buf, false);
159 + htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
160 + if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
161 + txd_pdma->txd2 = TX_DMA_DESP2_DEF;
163 + htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
166 spin_unlock(ð->page_lock);
169 @@ -1910,18 +1954,15 @@ static int mtk_poll_tx_qdma(struct mtk_e
173 - if (tx_buf->type == MTK_TYPE_SKB &&
174 - tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
175 - struct sk_buff *skb = tx_buf->data;
176 + if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
177 + if (tx_buf->type == MTK_TYPE_SKB) {
178 + struct sk_buff *skb = tx_buf->data;
180 - bytes[mac] += skb->len;
183 - } else if (tx_buf->type == MTK_TYPE_XDP_TX ||
184 - tx_buf->type == MTK_TYPE_XDP_NDO) {
185 + bytes[mac] += skb->len;
191 mtk_tx_unmap(eth, tx_buf, true);
193 ring->last_free = desc;
194 @@ -1952,17 +1993,15 @@ static int mtk_poll_tx_pdma(struct mtk_e
198 - if (tx_buf->type == MTK_TYPE_SKB &&
199 - tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
200 - struct sk_buff *skb = tx_buf->data;
201 - bytes[0] += skb->len;
204 - } else if (tx_buf->type == MTK_TYPE_XDP_TX ||
205 - tx_buf->type == MTK_TYPE_XDP_NDO) {
206 + if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
207 + if (tx_buf->type == MTK_TYPE_SKB) {
208 + struct sk_buff *skb = tx_buf->data;
210 + bytes[0] += skb->len;
216 mtk_tx_unmap(eth, tx_buf, true);
218 desc = ring->dma + cpu * eth->soc->txrx.txd_size;