1 From: Lorenzo Bianconi <lorenzo@kernel.org>
2 Date: Fri, 20 May 2022 20:11:30 +0200
3 Subject: [PATCH] net: ethernet: mtk_eth_soc: rely on txd_size in
6 This is a preliminary patch to add mt7986 ethernet support.
8 Tested-by: Sam Shih <sam.shih@mediatek.com>
9 Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
10 Signed-off-by: David S. Miller <davem@davemloft.net>
13 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
14 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
15 @@ -891,10 +891,11 @@ static inline void *mtk_qdma_phys_to_vir
16 return ret + (desc - ring->phys);
19 -static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
20 - struct mtk_tx_dma *txd)
21 +static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
22 + struct mtk_tx_dma *txd,
25 - int idx = txd - ring->dma;
26 + int idx = ((void *)txd - (void *)ring->dma) / txd_size;
28 return &ring->buf[idx];
30 @@ -1016,6 +1017,7 @@ static int mtk_tx_map(struct sk_buff *sk
32 struct mtk_mac *mac = netdev_priv(dev);
33 struct mtk_eth *eth = mac->hw;
34 + const struct mtk_soc_data *soc = eth->soc;
35 struct mtk_tx_dma *itxd, *txd;
36 struct mtk_tx_dma *itxd_pdma, *txd_pdma;
37 struct mtk_tx_buf *itx_buf, *tx_buf;
38 @@ -1027,7 +1029,7 @@ static int mtk_tx_map(struct sk_buff *sk
39 if (itxd == ring->last_free)
42 - itx_buf = mtk_desc_to_tx_buf(ring, itxd);
43 + itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
44 memset(itx_buf, 0, sizeof(*itx_buf));
46 txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
47 @@ -1055,7 +1057,7 @@ static int mtk_tx_map(struct sk_buff *sk
51 - if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) ||
52 + if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
54 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
55 txd_pdma = qdma_to_pdma(ring, txd);
56 @@ -1079,7 +1081,8 @@ static int mtk_tx_map(struct sk_buff *sk
58 mtk_tx_set_dma_desc(dev, txd, &txd_info);
60 - tx_buf = mtk_desc_to_tx_buf(ring, txd);
61 + tx_buf = mtk_desc_to_tx_buf(ring, txd,
62 + soc->txrx.txd_size);
64 memset(tx_buf, 0, sizeof(*tx_buf));
65 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
66 @@ -1098,7 +1101,7 @@ static int mtk_tx_map(struct sk_buff *sk
67 /* store skb to cleanup */
70 - if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
71 + if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
73 txd_pdma->txd2 |= TX_DMA_LS0;
75 @@ -1116,7 +1119,7 @@ static int mtk_tx_map(struct sk_buff *sk
79 - if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
80 + if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
81 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
83 mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
84 @@ -1130,13 +1133,13 @@ static int mtk_tx_map(struct sk_buff *sk
88 - tx_buf = mtk_desc_to_tx_buf(ring, itxd);
89 + tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
92 mtk_tx_unmap(eth, tx_buf, false);
94 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
95 - if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
96 + if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
97 itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
99 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
100 @@ -1450,7 +1453,8 @@ static int mtk_poll_tx_qdma(struct mtk_e
101 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
104 - tx_buf = mtk_desc_to_tx_buf(ring, desc);
105 + tx_buf = mtk_desc_to_tx_buf(ring, desc,
106 + eth->soc->txrx.txd_size);
107 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)