1 From 5f6f3600a334398e27802de33a6a8726aacbe88c Mon Sep 17 00:00:00 2001
2 From: Weijie Gao <weijie.gao@mediatek.com>
3 Date: Wed, 31 Aug 2022 19:04:23 +0800
4 Subject: [PATCH 07/32] net: mediatek: stop using bitfileds for DMA descriptors
6 This patch is a preparation for adding a new version of PDMA of which the
7 DMA descriptor fields has changed. Using bitfields will result in a complex
8 modification. Convert bitfields to u32 units can solve this problem easily.
10 Reviewed-by: Simon Glass <sjg@chromium.org>
11 Signed-off-by: Weijie Gao <weijie.gao@mediatek.com>
13 drivers/net/mtk_eth.c | 144 ++++++++++++++----------------------------
14 drivers/net/mtk_eth.h | 32 ++++++++++
15 2 files changed, 80 insertions(+), 96 deletions(-)
17 --- a/drivers/net/mtk_eth.c
18 +++ b/drivers/net/mtk_eth.c
20 (DP_DISCARD << MC_DP_S) | \
21 (DP_DISCARD << UN_DP_S))
23 -struct pdma_rxd_info1 {
27 -struct pdma_rxd_info2 {
36 -struct pdma_rxd_info3 {
40 -struct pdma_rxd_info4 {
54 - struct pdma_rxd_info1 rxd_info1;
55 - struct pdma_rxd_info2 rxd_info2;
56 - struct pdma_rxd_info3 rxd_info3;
57 - struct pdma_rxd_info4 rxd_info4;
60 -struct pdma_txd_info1 {
64 -struct pdma_txd_info2 {
73 -struct pdma_txd_info3 {
77 -struct pdma_txd_info4 {
88 - struct pdma_txd_info1 txd_info1;
89 - struct pdma_txd_info2 txd_info2;
90 - struct pdma_txd_info3 txd_info3;
91 - struct pdma_txd_info4 txd_info4;
97 @@ -151,13 +80,15 @@ enum mtk_switch {
105 struct mtk_eth_priv {
106 char pkt_pool[TOTAL_PKT_BUF_SIZE] __aligned(ARCH_DMA_MINALIGN);
108 - struct pdma_txdesc *tx_ring_noc;
109 - struct pdma_rxdesc *rx_ring_noc;
113 int rx_dma_owner_idx0;
114 int tx_cpu_owner_idx0;
115 @@ -1202,14 +1133,16 @@ static void mtk_mac_init(struct mtk_eth_
116 static void mtk_eth_fifo_init(struct mtk_eth_priv *priv)
118 char *pkt_base = priv->pkt_pool;
119 + struct mtk_tx_dma *txd;
120 + struct mtk_rx_dma *rxd;
123 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG, 0xffff0000, 0);
126 - memset(priv->tx_ring_noc, 0, NUM_TX_DESC * sizeof(struct pdma_txdesc));
127 - memset(priv->rx_ring_noc, 0, NUM_RX_DESC * sizeof(struct pdma_rxdesc));
128 - memset(priv->pkt_pool, 0, TOTAL_PKT_BUF_SIZE);
129 + memset(priv->tx_ring_noc, 0, NUM_TX_DESC * priv->soc->txd_size);
130 + memset(priv->rx_ring_noc, 0, NUM_RX_DESC * priv->soc->rxd_size);
131 + memset(priv->pkt_pool, 0xff, TOTAL_PKT_BUF_SIZE);
133 flush_dcache_range((ulong)pkt_base,
134 (ulong)(pkt_base + TOTAL_PKT_BUF_SIZE));
135 @@ -1218,17 +1151,21 @@ static void mtk_eth_fifo_init(struct mtk
136 priv->tx_cpu_owner_idx0 = 0;
138 for (i = 0; i < NUM_TX_DESC; i++) {
139 - priv->tx_ring_noc[i].txd_info2.LS0 = 1;
140 - priv->tx_ring_noc[i].txd_info2.DDONE = 1;
141 - priv->tx_ring_noc[i].txd_info4.FPORT = priv->gmac_id + 1;
142 + txd = priv->tx_ring_noc + i * priv->soc->txd_size;
144 + txd->txd1 = virt_to_phys(pkt_base);
145 + txd->txd2 = PDMA_TXD2_DDONE | PDMA_TXD2_LS0;
146 + txd->txd4 = PDMA_TXD4_FPORT_SET(priv->gmac_id + 1);
148 - priv->tx_ring_noc[i].txd_info1.SDP0 = virt_to_phys(pkt_base);
149 pkt_base += PKTSIZE_ALIGN;
152 for (i = 0; i < NUM_RX_DESC; i++) {
153 - priv->rx_ring_noc[i].rxd_info2.PLEN0 = PKTSIZE_ALIGN;
154 - priv->rx_ring_noc[i].rxd_info1.PDP0 = virt_to_phys(pkt_base);
155 + rxd = priv->rx_ring_noc + i * priv->soc->rxd_size;
157 + rxd->rxd1 = virt_to_phys(pkt_base);
158 + rxd->rxd2 = PDMA_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
160 pkt_base += PKTSIZE_ALIGN;
163 @@ -1315,20 +1252,22 @@ static int mtk_eth_send(struct udevice *
165 struct mtk_eth_priv *priv = dev_get_priv(dev);
166 u32 idx = priv->tx_cpu_owner_idx0;
167 + struct mtk_tx_dma *txd;
170 - if (!priv->tx_ring_noc[idx].txd_info2.DDONE) {
171 + txd = priv->tx_ring_noc + idx * priv->soc->txd_size;
173 + if (!(txd->txd2 & PDMA_TXD2_DDONE)) {
174 debug("mtk-eth: TX DMA descriptor ring is full\n");
178 - pkt_base = (void *)phys_to_virt(priv->tx_ring_noc[idx].txd_info1.SDP0);
179 + pkt_base = (void *)phys_to_virt(txd->txd1);
180 memcpy(pkt_base, packet, length);
181 flush_dcache_range((ulong)pkt_base, (ulong)pkt_base +
182 roundup(length, ARCH_DMA_MINALIGN));
184 - priv->tx_ring_noc[idx].txd_info2.SDL0 = length;
185 - priv->tx_ring_noc[idx].txd_info2.DDONE = 0;
186 + txd->txd2 = PDMA_TXD2_LS0 | PDMA_TXD2_SDL0_SET(length);
188 priv->tx_cpu_owner_idx0 = (priv->tx_cpu_owner_idx0 + 1) % NUM_TX_DESC;
189 mtk_pdma_write(priv, TX_CTX_IDX_REG(0), priv->tx_cpu_owner_idx0);
190 @@ -1340,16 +1279,20 @@ static int mtk_eth_recv(struct udevice *
192 struct mtk_eth_priv *priv = dev_get_priv(dev);
193 u32 idx = priv->rx_dma_owner_idx0;
194 + struct mtk_rx_dma *rxd;
198 - if (!priv->rx_ring_noc[idx].rxd_info2.DDONE) {
199 + rxd = priv->rx_ring_noc + idx * priv->soc->rxd_size;
201 + if (!(rxd->rxd2 & PDMA_RXD2_DDONE)) {
202 debug("mtk-eth: RX DMA descriptor ring is empty\n");
206 - length = priv->rx_ring_noc[idx].rxd_info2.PLEN0;
207 - pkt_base = (void *)phys_to_virt(priv->rx_ring_noc[idx].rxd_info1.PDP0);
208 + length = PDMA_RXD2_PLEN0_GET(rxd->rxd2);
210 + pkt_base = (void *)phys_to_virt(rxd->rxd1);
211 invalidate_dcache_range((ulong)pkt_base, (ulong)pkt_base +
212 roundup(length, ARCH_DMA_MINALIGN));
214 @@ -1363,10 +1306,11 @@ static int mtk_eth_free_pkt(struct udevi
216 struct mtk_eth_priv *priv = dev_get_priv(dev);
217 u32 idx = priv->rx_dma_owner_idx0;
218 + struct mtk_rx_dma *rxd;
220 + rxd = priv->rx_ring_noc + idx * priv->soc->rxd_size;
222 - priv->rx_ring_noc[idx].rxd_info2.DDONE = 0;
223 - priv->rx_ring_noc[idx].rxd_info2.LS0 = 0;
224 - priv->rx_ring_noc[idx].rxd_info2.PLEN0 = PKTSIZE_ALIGN;
225 + rxd->rxd2 = PDMA_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
227 mtk_pdma_write(priv, RX_CRX_IDX_REG(0), idx);
228 priv->rx_dma_owner_idx0 = (priv->rx_dma_owner_idx0 + 1) % NUM_RX_DESC;
229 @@ -1393,11 +1337,11 @@ static int mtk_eth_probe(struct udevice
232 /* Prepare for tx/rx rings */
233 - priv->tx_ring_noc = (struct pdma_txdesc *)
234 - noncached_alloc(sizeof(struct pdma_txdesc) * NUM_TX_DESC,
235 + priv->tx_ring_noc = (void *)
236 + noncached_alloc(priv->soc->txd_size * NUM_TX_DESC,
238 - priv->rx_ring_noc = (struct pdma_rxdesc *)
239 - noncached_alloc(sizeof(struct pdma_rxdesc) * NUM_RX_DESC,
240 + priv->rx_ring_noc = (void *)
241 + noncached_alloc(priv->soc->rxd_size * NUM_RX_DESC,
245 @@ -1554,18 +1498,26 @@ static int mtk_eth_of_to_plat(struct ude
247 static const struct mtk_soc_data mt7629_data = {
249 + .txd_size = sizeof(struct mtk_tx_dma),
250 + .rxd_size = sizeof(struct mtk_rx_dma),
253 static const struct mtk_soc_data mt7623_data = {
255 + .txd_size = sizeof(struct mtk_tx_dma),
256 + .rxd_size = sizeof(struct mtk_rx_dma),
259 static const struct mtk_soc_data mt7622_data = {
261 + .txd_size = sizeof(struct mtk_tx_dma),
262 + .rxd_size = sizeof(struct mtk_rx_dma),
265 static const struct mtk_soc_data mt7621_data = {
267 + .txd_size = sizeof(struct mtk_tx_dma),
268 + .rxd_size = sizeof(struct mtk_rx_dma),
271 static const struct udevice_id mtk_eth_ids[] = {
272 --- a/drivers/net/mtk_eth.h
273 +++ b/drivers/net/mtk_eth.h
277 #include <linux/bitops.h>
278 +#include <linux/bitfield.h>
280 enum mkt_eth_capabilities {
282 @@ -435,4 +436,35 @@ enum mkt_eth_capabilities {
283 #define PHY_POWER_SAVING_M 0x300
284 #define PHY_POWER_SAVING_TX 0x0
286 +/* PDMA descriptors */
292 +} __packed __aligned(4);
299 +} __packed __aligned(4);
301 +/* PDMA TXD fields */
302 +#define PDMA_TXD2_DDONE BIT(31)
303 +#define PDMA_TXD2_LS0 BIT(30)
304 +#define PDMA_TXD2_SDL0_M GENMASK(29, 16)
305 +#define PDMA_TXD2_SDL0_SET(_v) FIELD_PREP(PDMA_TXD2_SDL0_M, (_v))
307 +#define PDMA_TXD4_FPORT_M GENMASK(27, 25)
308 +#define PDMA_TXD4_FPORT_SET(_v) FIELD_PREP(PDMA_TXD4_FPORT_M, (_v))
310 +/* PDMA RXD fields */
311 +#define PDMA_RXD2_DDONE BIT(31)
312 +#define PDMA_RXD2_LS0 BIT(30)
313 +#define PDMA_RXD2_PLEN0_M GENMASK(29, 16)
314 +#define PDMA_RXD2_PLEN0_GET(_v) FIELD_GET(PDMA_RXD2_PLEN0_M, (_v))
315 +#define PDMA_RXD2_PLEN0_SET(_v) FIELD_PREP(PDMA_RXD2_PLEN0_M, (_v))
317 #endif /* _MTK_ETH_H_ */