1 From 25ce45fe40b574e5d7ffa407f7f2db03e7d5a910 Mon Sep 17 00:00:00 2001
2 From: Daniel Golle <daniel@makrotopia.org>
3 Date: Tue, 22 Aug 2023 17:32:54 +0100
4 Subject: [PATCH 112/250] net: ethernet: mtk_eth_soc: add support for in-SoC
7 MT7981, MT7986 and MT7988 come with in-SoC SRAM dedicated for Ethernet
8 DMA rings. Support using the SRAM without breaking existing device tree
9 bindings, ie. only new SoC starting from MT7988 will have the SRAM
10 declared as additional resource in device tree. For MT7981 and MT7986
11 an offset on top of the main I/O base is used.
13 Signed-off-by: Daniel Golle <daniel@makrotopia.org>
14 Link: https://lore.kernel.org/r/e45e0f230c63ad58869e8fe35b95a2fb8925b625.1692721443.git.daniel@makrotopia.org
15 Signed-off-by: Jakub Kicinski <kuba@kernel.org>
17 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 88 ++++++++++++++++-----
18 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 12 ++-
19 2 files changed, 78 insertions(+), 22 deletions(-)
21 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
22 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
23 @@ -1075,10 +1075,13 @@ static int mtk_init_fq_dma(struct mtk_et
27 - eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
28 - cnt * soc->txrx.txd_size,
29 - ð->phy_scratch_ring,
31 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM))
32 + eth->scratch_ring = eth->sram_base;
34 + eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
35 + cnt * soc->txrx.txd_size,
36 + ð->phy_scratch_ring,
38 if (unlikely(!eth->scratch_ring))
41 @@ -2376,8 +2379,14 @@ static int mtk_tx_alloc(struct mtk_eth *
45 - ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
46 - &ring->phys, GFP_KERNEL);
47 + if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) {
48 + ring->dma = eth->sram_base + ring_size * sz;
49 + ring->phys = eth->phy_scratch_ring + ring_size * (dma_addr_t)sz;
51 + ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
52 + &ring->phys, GFP_KERNEL);
58 @@ -2476,8 +2485,7 @@ static void mtk_tx_clean(struct mtk_eth
64 + if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
65 dma_free_coherent(eth->dma_dev,
66 ring->dma_size * soc->txrx.txd_size,
67 ring->dma, ring->phys);
68 @@ -2496,9 +2504,14 @@ static int mtk_rx_alloc(struct mtk_eth *
70 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
71 struct mtk_rx_ring *ring;
72 - int rx_data_len, rx_dma_size;
73 + int rx_data_len, rx_dma_size, tx_ring_size;
76 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
77 + tx_ring_size = MTK_QDMA_RING_SIZE;
79 + tx_ring_size = MTK_DMA_SIZE;
81 if (rx_flag == MTK_RX_FLAGS_QDMA) {
84 @@ -2533,9 +2546,20 @@ static int mtk_rx_alloc(struct mtk_eth *
88 - ring->dma = dma_alloc_coherent(eth->dma_dev,
89 - rx_dma_size * eth->soc->txrx.rxd_size,
90 - &ring->phys, GFP_KERNEL);
91 + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
92 + rx_flag != MTK_RX_FLAGS_NORMAL) {
93 + ring->dma = dma_alloc_coherent(eth->dma_dev,
94 + rx_dma_size * eth->soc->txrx.rxd_size,
95 + &ring->phys, GFP_KERNEL);
97 + struct mtk_tx_ring *tx_ring = ð->tx_ring;
99 + ring->dma = tx_ring->dma + tx_ring_size *
100 + eth->soc->txrx.txd_size * (ring_no + 1);
101 + ring->phys = tx_ring->phys + tx_ring_size *
102 + eth->soc->txrx.txd_size * (ring_no + 1);
108 @@ -2618,7 +2642,7 @@ static int mtk_rx_alloc(struct mtk_eth *
112 -static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
113 +static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_sram)
117 @@ -2641,7 +2665,7 @@ static void mtk_rx_clean(struct mtk_eth
122 + if (!in_sram && ring->dma) {
123 dma_free_coherent(eth->dma_dev,
124 ring->dma_size * eth->soc->txrx.rxd_size,
125 ring->dma, ring->phys);
126 @@ -3001,7 +3025,7 @@ static void mtk_dma_free(struct mtk_eth
127 for (i = 0; i < MTK_MAX_DEVS; i++)
129 netdev_reset_queue(eth->netdev[i]);
130 - if (eth->scratch_ring) {
131 + if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
132 dma_free_coherent(eth->dma_dev,
133 MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
134 eth->scratch_ring, eth->phy_scratch_ring);
135 @@ -3009,13 +3033,13 @@ static void mtk_dma_free(struct mtk_eth
136 eth->phy_scratch_ring = 0;
139 - mtk_rx_clean(eth, ð->rx_ring[0]);
140 - mtk_rx_clean(eth, ð->rx_ring_qdma);
141 + mtk_rx_clean(eth, ð->rx_ring[0], MTK_HAS_CAPS(soc->caps, MTK_SRAM));
142 + mtk_rx_clean(eth, ð->rx_ring_qdma, false);
145 mtk_hwlro_rx_uninit(eth);
146 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
147 - mtk_rx_clean(eth, ð->rx_ring[i]);
148 + mtk_rx_clean(eth, ð->rx_ring[i], false);
151 kfree(eth->scratch_head);
152 @@ -4585,7 +4609,7 @@ static int mtk_sgmii_init(struct mtk_eth
154 static int mtk_probe(struct platform_device *pdev)
156 - struct resource *res = NULL;
157 + struct resource *res = NULL, *res_sram;
158 struct device_node *mac_np;
161 @@ -4605,6 +4629,20 @@ static int mtk_probe(struct platform_dev
162 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
163 eth->ip_align = NET_IP_ALIGN;
165 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
166 + /* SRAM is actual memory and supports transparent access just like DRAM.
167 + * Hence we don't require __iomem being set and don't need to use accessor
168 + * functions to read from or write to SRAM.
170 + if (mtk_is_netsys_v3_or_greater(eth)) {
171 + eth->sram_base = (void __force *)devm_platform_ioremap_resource(pdev, 1);
172 + if (IS_ERR(eth->sram_base))
173 + return PTR_ERR(eth->sram_base);
175 + eth->sram_base = (void __force *)eth->base + MTK_ETH_SRAM_OFFSET;
179 spin_lock_init(ð->page_lock);
180 spin_lock_init(ð->tx_irq_lock);
181 spin_lock_init(ð->rx_irq_lock);
182 @@ -4668,6 +4706,18 @@ static int mtk_probe(struct platform_dev
184 goto err_destroy_sgmii;
186 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
187 + if (mtk_is_netsys_v3_or_greater(eth)) {
188 + res_sram = platform_get_resource(pdev, IORESOURCE_MEM, 1);
191 + goto err_destroy_sgmii;
193 + eth->phy_scratch_ring = res_sram->start;
195 + eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET;
200 if (eth->soc->offload_version) {
201 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
202 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
204 #define MTK_GDMA_MAC_ADRH(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \
205 0x54C : 0x50C + (_x * 0x1000); })
207 +/* Internal SRAM offset */
208 +#define MTK_ETH_SRAM_OFFSET 0x40000
210 /* FE global misc reg*/
211 #define MTK_FE_GLO_MISC 0x124
213 @@ -935,6 +938,7 @@ enum mkt_eth_capabilities {
214 MTK_RSTCTRL_PPE1_BIT,
215 MTK_RSTCTRL_PPE2_BIT,
220 MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT,
221 @@ -970,6 +974,7 @@ enum mkt_eth_capabilities {
222 #define MTK_RSTCTRL_PPE1 BIT_ULL(MTK_RSTCTRL_PPE1_BIT)
223 #define MTK_RSTCTRL_PPE2 BIT_ULL(MTK_RSTCTRL_PPE2_BIT)
224 #define MTK_U3_COPHY_V2 BIT_ULL(MTK_U3_COPHY_V2_BIT)
225 +#define MTK_SRAM BIT_ULL(MTK_SRAM_BIT)
227 #define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \
228 BIT_ULL(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT)
229 @@ -1045,14 +1050,14 @@ enum mkt_eth_capabilities {
230 #define MT7981_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \
231 MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
232 MTK_MUX_U3_GMAC2_TO_QPHY | MTK_U3_COPHY_V2 | \
234 + MTK_RSTCTRL_PPE1 | MTK_SRAM)
236 #define MT7986_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | \
237 MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
239 + MTK_RSTCTRL_PPE1 | MTK_SRAM)
241 #define MT7988_CAPS (MTK_GDM1_ESW | MTK_QDMA | MTK_RSTCTRL_PPE1 | \
243 + MTK_RSTCTRL_PPE2 | MTK_SRAM)
245 struct mtk_tx_dma_desc_info {
247 @@ -1212,6 +1217,7 @@ struct mtk_eth {
249 struct device *dma_dev;
252 spinlock_t page_lock;
253 spinlock_t tx_irq_lock;
254 spinlock_t rx_irq_lock;