71bfcdbac8b41b7736838514a6fc74da1d6c24bf
[openwrt/staging/aparcar.git] /
1 From 463a71af080fbc77339bee2037fb1e081e3824f7 Mon Sep 17 00:00:00 2001
2 Message-Id: <463a71af080fbc77339bee2037fb1e081e3824f7.1662886034.git.lorenzo@kernel.org>
3 In-Reply-To: <e5ecb4f619197b93fa682d722452dc8412864cdb.1662886033.git.lorenzo@kernel.org>
4 References: <e5ecb4f619197b93fa682d722452dc8412864cdb.1662886033.git.lorenzo@kernel.org>
5 From: Lorenzo Bianconi <lorenzo@kernel.org>
6 Date: Sat, 27 Aug 2022 16:15:14 +0200
7 Subject: [PATCH net-next 2/5] net: ethernet: mtk_eth_wed: add wed support for
8 mt7986 chipset
9
10 Introduce Wireless Etherne Dispatcher support on transmission side
11 for mt7986 chipset
12
13 Co-developed-by: Bo Jiao <Bo.Jiao@mediatek.com>
14 Signed-off-by: Bo Jiao <Bo.Jiao@mediatek.com>
15 Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
16 Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
17 Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
18 ---
19 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 34 +-
20 drivers/net/ethernet/mediatek/mtk_wed.c | 371 ++++++++++++++----
21 drivers/net/ethernet/mediatek/mtk_wed.h | 8 +-
22 .../net/ethernet/mediatek/mtk_wed_debugfs.c | 3 +
23 drivers/net/ethernet/mediatek/mtk_wed_regs.h | 81 +++-
24 include/linux/soc/mediatek/mtk_wed.h | 8 +
25 6 files changed, 408 insertions(+), 97 deletions(-)
26
27 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
28 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
29 @@ -3892,6 +3892,7 @@ void mtk_eth_set_dma_device(struct mtk_e
30
31 static int mtk_probe(struct platform_device *pdev)
32 {
33 + struct resource *res = NULL;
34 struct device_node *mac_np;
35 struct mtk_eth *eth;
36 int err, i;
37 @@ -3972,16 +3973,31 @@ static int mtk_probe(struct platform_dev
38 }
39 }
40
41 - for (i = 0;; i++) {
42 - struct device_node *np = of_parse_phandle(pdev->dev.of_node,
43 - "mediatek,wed", i);
44 - void __iomem *wdma;
45 -
46 - if (!np || i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
47 - break;
48 -
49 - wdma = eth->base + eth->soc->reg_map->wdma_base[i];
50 - mtk_wed_add_hw(np, eth, wdma, i);
51 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
52 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
53 + if (!res)
54 + return -EINVAL;
55 + }
56 +
57 + if (eth->soc->offload_version) {
58 + for (i = 0;; i++) {
59 + struct device_node *np;
60 + phys_addr_t wdma_phy;
61 + u32 wdma_base;
62 +
63 + if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
64 + break;
65 +
66 + np = of_parse_phandle(pdev->dev.of_node,
67 + "mediatek,wed", i);
68 + if (!np)
69 + break;
70 +
71 + wdma_base = eth->soc->reg_map->wdma_base[i];
72 + wdma_phy = res ? res->start + wdma_base : 0;
73 + mtk_wed_add_hw(np, eth, eth->base + wdma_base,
74 + wdma_phy, i);
75 + }
76 }
77
78 for (i = 0; i < 3; i++) {
79 --- a/drivers/net/ethernet/mediatek/mtk_wed.c
80 +++ b/drivers/net/ethernet/mediatek/mtk_wed.c
81 @@ -25,6 +25,11 @@
82
83 #define MTK_WED_TX_RING_SIZE 2048
84 #define MTK_WED_WDMA_RING_SIZE 1024
85 +#define MTK_WED_MAX_GROUP_SIZE 0x100
86 +#define MTK_WED_VLD_GROUP_SIZE 0x40
87 +#define MTK_WED_PER_GROUP_PKT 128
88 +
89 +#define MTK_WED_FBUF_SIZE 128
90
91 static struct mtk_wed_hw *hw_list[2];
92 static DEFINE_MUTEX(hw_lock);
93 @@ -150,10 +155,17 @@ mtk_wed_buffer_alloc(struct mtk_wed_devi
94
95 desc->buf0 = cpu_to_le32(buf_phys);
96 desc->buf1 = cpu_to_le32(buf_phys + txd_size);
97 - ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
98 - FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
99 - MTK_WED_BUF_SIZE - txd_size) |
100 - MTK_WDMA_DESC_CTRL_LAST_SEG1;
101 +
102 + if (dev->hw->version == 1)
103 + ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
104 + FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
105 + MTK_WED_BUF_SIZE - txd_size) |
106 + MTK_WDMA_DESC_CTRL_LAST_SEG1;
107 + else
108 + ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
109 + FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2,
110 + MTK_WED_BUF_SIZE - txd_size) |
111 + MTK_WDMA_DESC_CTRL_LAST_SEG0;
112 desc->ctrl = cpu_to_le32(ctrl);
113 desc->info = 0;
114 desc++;
115 @@ -209,7 +221,7 @@ mtk_wed_free_ring(struct mtk_wed_device
116 if (!ring->desc)
117 return;
118
119 - dma_free_coherent(dev->hw->dev, ring->size * sizeof(*ring->desc),
120 + dma_free_coherent(dev->hw->dev, ring->size * ring->desc_size,
121 ring->desc, ring->desc_phys);
122 }
123
124 @@ -229,6 +241,14 @@ mtk_wed_set_ext_int(struct mtk_wed_devic
125 {
126 u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
127
128 + if (dev->hw->version == 1)
129 + mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
130 + else
131 + mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
132 + MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
133 + MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
134 + MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
135 +
136 if (!dev->hw->num_flows)
137 mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
138
139 @@ -237,6 +257,20 @@ mtk_wed_set_ext_int(struct mtk_wed_devic
140 }
141
142 static void
143 +mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable)
144 +{
145 + if (enable) {
146 + wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
147 + wed_w32(dev, MTK_WED_TXP_DW1,
148 + FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103));
149 + } else {
150 + wed_w32(dev, MTK_WED_TXP_DW1,
151 + FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100));
152 + wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
153 + }
154 +}
155 +
156 +static void
157 mtk_wed_dma_disable(struct mtk_wed_device *dev)
158 {
159 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
160 @@ -249,12 +283,22 @@ mtk_wed_dma_disable(struct mtk_wed_devic
161 MTK_WED_GLO_CFG_TX_DMA_EN |
162 MTK_WED_GLO_CFG_RX_DMA_EN);
163
164 - regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
165 wdma_m32(dev, MTK_WDMA_GLO_CFG,
166 MTK_WDMA_GLO_CFG_TX_DMA_EN |
167 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
168 - MTK_WDMA_GLO_CFG_RX_INFO2_PRERES |
169 - MTK_WDMA_GLO_CFG_RX_INFO3_PRERES, 0);
170 + MTK_WDMA_GLO_CFG_RX_INFO2_PRERES, 0);
171 +
172 + if (dev->hw->version == 1) {
173 + regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
174 + wdma_m32(dev, MTK_WDMA_GLO_CFG,
175 + MTK_WDMA_GLO_CFG_RX_INFO3_PRERES, 0);
176 + } else {
177 + wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
178 + MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
179 + MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
180 +
181 + mtk_wed_set_512_support(dev, false);
182 + }
183 }
184
185 static void
186 @@ -293,7 +337,7 @@ mtk_wed_detach(struct mtk_wed_device *de
187 mtk_wed_free_buffer(dev);
188 mtk_wed_free_tx_rings(dev);
189
190 - if (of_dma_is_coherent(wlan_node))
191 + if (of_dma_is_coherent(wlan_node) && hw->hifsys)
192 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
193 BIT(hw->index), BIT(hw->index));
194
195 @@ -308,14 +352,69 @@ mtk_wed_detach(struct mtk_wed_device *de
196 mutex_unlock(&hw_lock);
197 }
198
199 +#define PCIE_BASE_ADDR0 0x11280000
200 +static void
201 +mtk_wed_bus_init(struct mtk_wed_device *dev)
202 +{
203 + struct device_node *np = dev->hw->eth->dev->of_node;
204 + struct regmap *regs;
205 + u32 val;
206 +
207 + regs = syscon_regmap_lookup_by_phandle(np, "mediatek,wed-pcie");
208 + if (IS_ERR(regs))
209 + return;
210 +
211 + regmap_update_bits(regs, 0, BIT(0), BIT(0));
212 +
213 + wed_w32(dev, MTK_WED_PCIE_INT_CTRL,
214 + FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2));
215 +
216 + /* pcie interrupt control: pola/source selection */
217 + wed_set(dev, MTK_WED_PCIE_INT_CTRL,
218 + MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
219 + FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1));
220 + wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
221 +
222 + val = wed_r32(dev, MTK_WED_PCIE_CFG_INTM);
223 + val = wed_r32(dev, MTK_WED_PCIE_CFG_BASE);
224 + wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180);
225 + wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184);
226 +
227 + val = wed_r32(dev, MTK_WED_PCIE_CFG_INTM);
228 + val = wed_r32(dev, MTK_WED_PCIE_CFG_BASE);
229 +
230 + /* pcie interrupt status trigger register */
231 + wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
232 + wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER);
233 +
234 + /* pola setting */
235 + val = wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
236 + wed_set(dev, MTK_WED_PCIE_INT_CTRL, MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA);
237 +}
238 +
239 +static void
240 +mtk_wed_set_wpdma(struct mtk_wed_device *dev)
241 +{
242 + if (dev->hw->version == 1) {
243 + wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
244 + } else {
245 + mtk_wed_bus_init(dev);
246 +
247 + wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int);
248 + wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
249 + wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
250 + wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
251 + }
252 +}
253 +
254 static void
255 mtk_wed_hw_init_early(struct mtk_wed_device *dev)
256 {
257 u32 mask, set;
258 - u32 offset;
259
260 mtk_wed_stop(dev);
261 mtk_wed_reset(dev, MTK_WED_RESET_WED);
262 + mtk_wed_set_wpdma(dev);
263
264 mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
265 MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
266 @@ -325,17 +424,33 @@ mtk_wed_hw_init_early(struct mtk_wed_dev
267 MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
268 wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
269
270 - wdma_set(dev, MTK_WDMA_GLO_CFG,
271 - MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
272 - MTK_WDMA_GLO_CFG_RX_INFO2_PRERES |
273 - MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
274 -
275 - offset = dev->hw->index ? 0x04000400 : 0;
276 - wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
277 - wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
278 + if (dev->hw->version == 1) {
279 + u32 offset = dev->hw->index ? 0x04000400 : 0;
280
281 - wed_w32(dev, MTK_WED_PCIE_CFG_BASE, MTK_PCIE_BASE(dev->hw->index));
282 - wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
283 + wdma_set(dev, MTK_WDMA_GLO_CFG,
284 + MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
285 + MTK_WDMA_GLO_CFG_RX_INFO2_PRERES |
286 + MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
287 +
288 + wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
289 + wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
290 + wed_w32(dev, MTK_WED_PCIE_CFG_BASE,
291 + MTK_PCIE_BASE(dev->hw->index));
292 + } else {
293 + wed_w32(dev, MTK_WED_WDMA_CFG_BASE, dev->hw->wdma_phy);
294 + wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_ETH_DMAD_FMT);
295 + wed_w32(dev, MTK_WED_WDMA_OFFSET0,
296 + FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_INTS,
297 + MTK_WDMA_INT_STATUS) |
298 + FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_CFG,
299 + MTK_WDMA_GLO_CFG));
300 +
301 + wed_w32(dev, MTK_WED_WDMA_OFFSET1,
302 + FIELD_PREP(MTK_WED_WDMA_OFST1_TX_CTRL,
303 + MTK_WDMA_RING_TX(0)) |
304 + FIELD_PREP(MTK_WED_WDMA_OFST1_RX_CTRL,
305 + MTK_WDMA_RING_RX(0)));
306 + }
307 }
308
309 static void
310 @@ -355,37 +470,65 @@ mtk_wed_hw_init(struct mtk_wed_device *d
311
312 wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys);
313
314 - wed_w32(dev, MTK_WED_TX_BM_TKID,
315 - FIELD_PREP(MTK_WED_TX_BM_TKID_START,
316 - dev->wlan.token_start) |
317 - FIELD_PREP(MTK_WED_TX_BM_TKID_END,
318 - dev->wlan.token_start + dev->wlan.nbuf - 1));
319 -
320 wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
321
322 - wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
323 - FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
324 - MTK_WED_TX_BM_DYN_THR_HI);
325 + if (dev->hw->version == 1) {
326 + wed_w32(dev, MTK_WED_TX_BM_TKID,
327 + FIELD_PREP(MTK_WED_TX_BM_TKID_START,
328 + dev->wlan.token_start) |
329 + FIELD_PREP(MTK_WED_TX_BM_TKID_END,
330 + dev->wlan.token_start +
331 + dev->wlan.nbuf - 1));
332 + wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
333 + FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
334 + MTK_WED_TX_BM_DYN_THR_HI);
335 + } else {
336 + wed_w32(dev, MTK_WED_TX_BM_TKID_V2,
337 + FIELD_PREP(MTK_WED_TX_BM_TKID_START,
338 + dev->wlan.token_start) |
339 + FIELD_PREP(MTK_WED_TX_BM_TKID_END,
340 + dev->wlan.token_start +
341 + dev->wlan.nbuf - 1));
342 + wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
343 + FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) |
344 + MTK_WED_TX_BM_DYN_THR_HI_V2);
345 + wed_w32(dev, MTK_WED_TX_TKID_CTRL,
346 + MTK_WED_TX_TKID_CTRL_PAUSE |
347 + FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM,
348 + dev->buf_ring.size / 128) |
349 + FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM,
350 + dev->buf_ring.size / 128));
351 + wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
352 + FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
353 + MTK_WED_TX_TKID_DYN_THR_HI);
354 + }
355
356 mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
357
358 - wed_set(dev, MTK_WED_CTRL,
359 - MTK_WED_CTRL_WED_TX_BM_EN |
360 - MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
361 + if (dev->hw->version == 1)
362 + wed_set(dev, MTK_WED_CTRL,
363 + MTK_WED_CTRL_WED_TX_BM_EN |
364 + MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
365 + else
366 + wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
367
368 wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
369 }
370
371 static void
372 -mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size)
373 +mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size)
374 {
375 + void *head = (void *)ring->desc;
376 int i;
377
378 for (i = 0; i < size; i++) {
379 - desc[i].buf0 = 0;
380 - desc[i].ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
381 - desc[i].buf1 = 0;
382 - desc[i].info = 0;
383 + struct mtk_wdma_desc *desc;
384 +
385 + desc = (struct mtk_wdma_desc *)(head + i * ring->desc_size);
386 + desc->buf0 = 0;
387 + desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
388 + desc->buf1 = 0;
389 + desc->info = 0;
390 }
391 }
392
393 @@ -436,12 +579,10 @@ mtk_wed_reset_dma(struct mtk_wed_device
394 int i;
395
396 for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) {
397 - struct mtk_wdma_desc *desc = dev->tx_ring[i].desc;
398 -
399 - if (!desc)
400 + if (!dev->tx_ring[i].desc)
401 continue;
402
403 - mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE);
404 + mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE);
405 }
406
407 if (mtk_wed_poll_busy(dev))
408 @@ -498,16 +639,16 @@ mtk_wed_reset_dma(struct mtk_wed_device
409
410 static int
411 mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
412 - int size)
413 + int size, u32 desc_size)
414 {
415 - ring->desc = dma_alloc_coherent(dev->hw->dev,
416 - size * sizeof(*ring->desc),
417 + ring->desc = dma_alloc_coherent(dev->hw->dev, size * desc_size,
418 &ring->desc_phys, GFP_KERNEL);
419 if (!ring->desc)
420 return -ENOMEM;
421
422 + ring->desc_size = desc_size;
423 ring->size = size;
424 - mtk_wed_ring_reset(ring->desc, size);
425 + mtk_wed_ring_reset(ring, size);
426
427 return 0;
428 }
429 @@ -515,9 +656,10 @@ mtk_wed_ring_alloc(struct mtk_wed_device
430 static int
431 mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
432 {
433 + u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
434 struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
435
436 - if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE))
437 + if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size))
438 return -ENOMEM;
439
440 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
441 @@ -546,16 +688,41 @@ mtk_wed_configure_irq(struct mtk_wed_dev
442 MTK_WED_CTRL_WED_TX_BM_EN |
443 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
444
445 - wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER,
446 - MTK_WED_PCIE_INT_TRIGGER_STATUS);
447 + if (dev->hw->version == 1) {
448 + wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER,
449 + MTK_WED_PCIE_INT_TRIGGER_STATUS);
450 +
451 + wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER,
452 + MTK_WED_WPDMA_INT_TRIGGER_RX_DONE |
453 + MTK_WED_WPDMA_INT_TRIGGER_TX_DONE);
454
455 - wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER,
456 - MTK_WED_WPDMA_INT_TRIGGER_RX_DONE |
457 - MTK_WED_WPDMA_INT_TRIGGER_TX_DONE);
458 + wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
459 + } else {
460 + /* initail tx interrupt trigger */
461 + wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
462 + MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
463 + MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR |
464 + MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN |
465 + MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR |
466 + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG,
467 + dev->wlan.tx_tbit[0]) |
468 + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG,
469 + dev->wlan.tx_tbit[1]));
470 +
471 + /* initail txfree interrupt trigger */
472 + wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE,
473 + MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN |
474 + MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR |
475 + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
476 + dev->wlan.txfree_tbit));
477 +
478 + wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
479 + wed_set(dev, MTK_WED_WDMA_INT_CTRL,
480 + FIELD_PREP(MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL,
481 + dev->wdma_idx));
482 + }
483
484 - /* initail wdma interrupt agent */
485 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
486 - wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
487
488 wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask);
489 wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask);
490 @@ -580,14 +747,28 @@ mtk_wed_dma_enable(struct mtk_wed_device
491 wdma_set(dev, MTK_WDMA_GLO_CFG,
492 MTK_WDMA_GLO_CFG_TX_DMA_EN |
493 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
494 - MTK_WDMA_GLO_CFG_RX_INFO2_PRERES |
495 - MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
496 + MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
497 +
498 + if (dev->hw->version == 1) {
499 + wdma_set(dev, MTK_WDMA_GLO_CFG,
500 + MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
501 + } else {
502 + wed_set(dev, MTK_WED_WPDMA_CTRL,
503 + MTK_WED_WPDMA_CTRL_SDL1_FIXED);
504 +
505 + wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
506 + MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
507 + MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
508 +
509 + wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
510 + MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
511 + MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
512 + }
513 }
514
515 static void
516 mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
517 {
518 - u32 val;
519 int i;
520
521 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
522 @@ -598,14 +779,17 @@ mtk_wed_start(struct mtk_wed_device *dev
523 mtk_wed_configure_irq(dev, irq_mask);
524
525 mtk_wed_set_ext_int(dev, true);
526 - val = dev->wlan.wpdma_phys |
527 - MTK_PCIE_MIRROR_MAP_EN |
528 - FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, dev->hw->index);
529 -
530 - if (dev->hw->index)
531 - val |= BIT(1);
532 - val |= BIT(0);
533 - regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
534 +
535 + if (dev->hw->version == 1) {
536 + u32 val = dev->wlan.wpdma_phys | MTK_PCIE_MIRROR_MAP_EN |
537 + FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID,
538 + dev->hw->index);
539 +
540 + val |= BIT(0) | (BIT(1) * !!dev->hw->index);
541 + regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
542 + } else {
543 + mtk_wed_set_512_support(dev, true);
544 + }
545
546 mtk_wed_dma_enable(dev);
547 dev->running = true;
548 @@ -639,7 +823,9 @@ mtk_wed_attach(struct mtk_wed_device *de
549 goto out;
550 }
551
552 - dev_info(&dev->wlan.pci_dev->dev, "attaching wed device %d\n", hw->index);
553 + dev_info(&dev->wlan.pci_dev->dev,
554 + "attaching wed device %d version %d\n",
555 + hw->index, hw->version);
556
557 dev->hw = hw;
558 dev->dev = hw->dev;
559 @@ -657,7 +843,9 @@ mtk_wed_attach(struct mtk_wed_device *de
560 }
561
562 mtk_wed_hw_init_early(dev);
563 - regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, BIT(hw->index), 0);
564 + if (hw->hifsys)
565 + regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
566 + BIT(hw->index), 0);
567
568 out:
569 mutex_unlock(&hw_lock);
570 @@ -684,7 +872,8 @@ mtk_wed_tx_ring_setup(struct mtk_wed_dev
571
572 BUG_ON(idx >= ARRAY_SIZE(dev->tx_ring));
573
574 - if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE))
575 + if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE,
576 + sizeof(*ring->desc)))
577 return -ENOMEM;
578
579 if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
580 @@ -711,21 +900,21 @@ static int
581 mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
582 {
583 struct mtk_wed_ring *ring = &dev->txfree_ring;
584 - int i;
585 + int i, index = dev->hw->version == 1;
586
587 /*
588 * For txfree event handling, the same DMA ring is shared between WED
589 * and WLAN. The WLAN driver accesses the ring index registers through
590 * WED
591 */
592 - ring->reg_base = MTK_WED_RING_RX(1);
593 + ring->reg_base = MTK_WED_RING_RX(index);
594 ring->wpdma = regs;
595
596 for (i = 0; i < 12; i += 4) {
597 u32 val = readl(regs + i);
598
599 - wed_w32(dev, MTK_WED_RING_RX(1) + i, val);
600 - wed_w32(dev, MTK_WED_WPDMA_RING_RX(1) + i, val);
601 + wed_w32(dev, MTK_WED_RING_RX(index) + i, val);
602 + wed_w32(dev, MTK_WED_WPDMA_RING_RX(index) + i, val);
603 }
604
605 return 0;
606 @@ -734,11 +923,19 @@ mtk_wed_txfree_ring_setup(struct mtk_wed
607 static u32
608 mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
609 {
610 - u32 val;
611 + u32 val, ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
612 +
613 + if (dev->hw->version == 1)
614 + ext_mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
615 + else
616 + ext_mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
617 + MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
618 + MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
619 + MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
620
621 val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
622 wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
623 - val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK;
624 + val &= ext_mask;
625 if (!dev->hw->num_flows)
626 val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
627 if (val && net_ratelimit())
628 @@ -813,7 +1010,8 @@ out:
629 }
630
631 void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
632 - void __iomem *wdma, int index)
633 + void __iomem *wdma, phys_addr_t wdma_phy,
634 + int index)
635 {
636 static const struct mtk_wed_ops wed_ops = {
637 .attach = mtk_wed_attach,
638 @@ -860,26 +1058,33 @@ void mtk_wed_add_hw(struct device_node *
639 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
640 if (!hw)
641 goto unlock;
642 +
643 hw->node = np;
644 hw->regs = regs;
645 hw->eth = eth;
646 hw->dev = &pdev->dev;
647 + hw->wdma_phy = wdma_phy;
648 hw->wdma = wdma;
649 hw->index = index;
650 hw->irq = irq;
651 - hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
652 - "mediatek,pcie-mirror");
653 - hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
654 - "mediatek,hifsys");
655 - if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) {
656 - kfree(hw);
657 - goto unlock;
658 - }
659 + hw->version = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
660
661 - if (!index) {
662 - regmap_write(hw->mirror, 0, 0);
663 - regmap_write(hw->mirror, 4, 0);
664 + if (hw->version == 1) {
665 + hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
666 + "mediatek,pcie-mirror");
667 + hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
668 + "mediatek,hifsys");
669 + if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) {
670 + kfree(hw);
671 + goto unlock;
672 + }
673 +
674 + if (!index) {
675 + regmap_write(hw->mirror, 0, 0);
676 + regmap_write(hw->mirror, 4, 0);
677 + }
678 }
679 +
680 mtk_wed_hw_add_debugfs(hw);
681
682 hw_list[index] = hw;
683 --- a/drivers/net/ethernet/mediatek/mtk_wed.h
684 +++ b/drivers/net/ethernet/mediatek/mtk_wed.h
685 @@ -18,11 +18,13 @@ struct mtk_wed_hw {
686 struct regmap *hifsys;
687 struct device *dev;
688 void __iomem *wdma;
689 + phys_addr_t wdma_phy;
690 struct regmap *mirror;
691 struct dentry *debugfs_dir;
692 struct mtk_wed_device *wed_dev;
693 u32 debugfs_reg;
694 u32 num_flows;
695 + u8 version;
696 char dirname[5];
697 int irq;
698 int index;
699 @@ -101,14 +103,16 @@ wpdma_txfree_w32(struct mtk_wed_device *
700 }
701
702 void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
703 - void __iomem *wdma, int index);
704 + void __iomem *wdma, phys_addr_t wdma_phy,
705 + int index);
706 void mtk_wed_exit(void);
707 int mtk_wed_flow_add(int index);
708 void mtk_wed_flow_remove(int index);
709 #else
710 static inline void
711 mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
712 - void __iomem *wdma, int index)
713 + void __iomem *wdma, phys_addr_t wdma_phy,
714 + int index)
715 {
716 }
717 static inline void
718 --- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
719 +++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
720 @@ -116,6 +116,9 @@ wed_txinfo_show(struct seq_file *s, void
721 DUMP_WDMA(WDMA_GLO_CFG),
722 DUMP_WDMA_RING(WDMA_RING_RX(0)),
723 DUMP_WDMA_RING(WDMA_RING_RX(1)),
724 +
725 + DUMP_STR("TX FREE"),
726 + DUMP_WED(WED_RX_MIB(0)),
727 };
728 struct mtk_wed_hw *hw = s->private;
729 struct mtk_wed_device *dev = hw->wed_dev;
730 --- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
731 +++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
732 @@ -5,6 +5,7 @@
733 #define __MTK_WED_REGS_H
734
735 #define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(14, 0)
736 +#define MTK_WDMA_DESC_CTRL_LEN1_V2 GENMASK(13, 0)
737 #define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(15)
738 #define MTK_WDMA_DESC_CTRL_BURST BIT(16)
739 #define MTK_WDMA_DESC_CTRL_LEN0 GENMASK(29, 16)
740 @@ -41,6 +42,7 @@ struct mtk_wdma_desc {
741 #define MTK_WED_CTRL_RESERVE_EN BIT(12)
742 #define MTK_WED_CTRL_RESERVE_BUSY BIT(13)
743 #define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
744 +#define MTK_WED_CTRL_ETH_DMAD_FMT BIT(25)
745 #define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
746
747 #define MTK_WED_EXT_INT_STATUS 0x020
748 @@ -57,7 +59,8 @@ struct mtk_wdma_desc {
749 #define MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN BIT(19)
750 #define MTK_WED_EXT_INT_STATUS_RX_DRV_BM_DMAD_COHERENT BIT(20)
751 #define MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR BIT(21)
752 -#define MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR BIT(22)
753 +#define MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR BIT(22)
754 +#define MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR BIT(23)
755 #define MTK_WED_EXT_INT_STATUS_RX_DRV_DMA_RECYCLE BIT(24)
756 #define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \
757 MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \
758 @@ -65,8 +68,7 @@ struct mtk_wdma_desc {
759 MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \
760 MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \
761 MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN | \
762 - MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR | \
763 - MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR)
764 + MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR)
765
766 #define MTK_WED_EXT_INT_MASK 0x028
767
768 @@ -81,6 +83,7 @@ struct mtk_wdma_desc {
769 #define MTK_WED_TX_BM_BASE 0x084
770
771 #define MTK_WED_TX_BM_TKID 0x088
772 +#define MTK_WED_TX_BM_TKID_V2 0x0c8
773 #define MTK_WED_TX_BM_TKID_START GENMASK(15, 0)
774 #define MTK_WED_TX_BM_TKID_END GENMASK(31, 16)
775
776 @@ -94,7 +97,25 @@ struct mtk_wdma_desc {
777
778 #define MTK_WED_TX_BM_DYN_THR 0x0a0
779 #define MTK_WED_TX_BM_DYN_THR_LO GENMASK(6, 0)
780 +#define MTK_WED_TX_BM_DYN_THR_LO_V2 GENMASK(8, 0)
781 #define MTK_WED_TX_BM_DYN_THR_HI GENMASK(22, 16)
782 +#define MTK_WED_TX_BM_DYN_THR_HI_V2 GENMASK(24, 16)
783 +
784 +#define MTK_WED_TX_TKID_CTRL 0x0c0
785 +#define MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM GENMASK(6, 0)
786 +#define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM GENMASK(22, 16)
787 +#define MTK_WED_TX_TKID_CTRL_PAUSE BIT(28)
788 +
789 +#define MTK_WED_TX_TKID_DYN_THR 0x0e0
790 +#define MTK_WED_TX_TKID_DYN_THR_LO GENMASK(6, 0)
791 +#define MTK_WED_TX_TKID_DYN_THR_HI GENMASK(22, 16)
792 +
793 +#define MTK_WED_TXP_DW0 0x120
794 +#define MTK_WED_TXP_DW1 0x124
795 +#define MTK_WED_WPDMA_WRITE_TXP GENMASK(31, 16)
796 +#define MTK_WED_TXDP_CTRL 0x130
797 +#define MTK_WED_TXDP_DW9_OVERWR BIT(9)
798 +#define MTK_WED_RX_BM_TKID_MIB 0x1cc
799
800 #define MTK_WED_INT_STATUS 0x200
801 #define MTK_WED_INT_MASK 0x204
802 @@ -125,6 +146,7 @@ struct mtk_wdma_desc {
803 #define MTK_WED_RESET_IDX_RX GENMASK(17, 16)
804
805 #define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4)
806 +#define MTK_WED_RX_MIB(_n) (0x2e0 + (_n) * 4)
807
808 #define MTK_WED_RING_TX(_n) (0x300 + (_n) * 0x10)
809
810 @@ -155,21 +177,62 @@ struct mtk_wdma_desc {
811 #define MTK_WED_WPDMA_GLO_CFG_BYTE_SWAP BIT(29)
812 #define MTK_WED_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31)
813
814 +/* CONFIG_MEDIATEK_NETSYS_V2 */
815 +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC BIT(4)
816 +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_PKT_PROC BIT(5)
817 +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC BIT(6)
818 +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_CRX_SYNC BIT(7)
819 +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER GENMASK(18, 16)
820 +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNSUPPORT_FMT BIT(19)
821 +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UEVENT_PKT_FMT_CHK BIT(20)
822 +#define MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR BIT(21)
823 +#define MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP BIT(24)
824 +#define MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV BIT(28)
825 +
826 #define MTK_WED_WPDMA_RESET_IDX 0x50c
827 #define MTK_WED_WPDMA_RESET_IDX_TX GENMASK(3, 0)
828 #define MTK_WED_WPDMA_RESET_IDX_RX GENMASK(17, 16)
829
830 +#define MTK_WED_WPDMA_CTRL 0x518
831 +#define MTK_WED_WPDMA_CTRL_SDL1_FIXED BIT(31)
832 +
833 #define MTK_WED_WPDMA_INT_CTRL 0x520
834 #define MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV BIT(21)
835
836 #define MTK_WED_WPDMA_INT_MASK 0x524
837
838 +#define MTK_WED_WPDMA_INT_CTRL_TX 0x530
839 +#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN BIT(0)
840 +#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR BIT(1)
841 +#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG GENMASK(6, 2)
842 +#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN BIT(8)
843 +#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR BIT(9)
844 +#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG GENMASK(14, 10)
845 +
846 +#define MTK_WED_WPDMA_INT_CTRL_RX 0x534
847 +
848 +#define MTK_WED_WPDMA_INT_CTRL_TX_FREE 0x538
849 +#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN BIT(0)
850 +#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR BIT(1)
851 +#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG GENMASK(6, 2)
852 +
853 #define MTK_WED_PCIE_CFG_BASE 0x560
854
855 +#define MTK_WED_PCIE_CFG_BASE 0x560
856 +#define MTK_WED_PCIE_CFG_INTM 0x564
857 +#define MTK_WED_PCIE_CFG_MSIS 0x568
858 #define MTK_WED_PCIE_INT_TRIGGER 0x570
859 #define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16)
860
861 +#define MTK_WED_PCIE_INT_CTRL 0x57c
862 +#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA BIT(20)
863 +#define MTK_WED_PCIE_INT_CTRL_SRC_SEL GENMASK(17, 16)
864 +#define MTK_WED_PCIE_INT_CTRL_POLL_EN GENMASK(13, 12)
865 +
866 #define MTK_WED_WPDMA_CFG_BASE 0x580
867 +#define MTK_WED_WPDMA_CFG_INT_MASK 0x584
868 +#define MTK_WED_WPDMA_CFG_TX 0x588
869 +#define MTK_WED_WPDMA_CFG_TX_FREE 0x58c
870
871 #define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4)
872 #define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4)
873 @@ -203,15 +266,24 @@ struct mtk_wdma_desc {
874 #define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16)
875 #define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24)
876
877 +#define MTK_WED_WDMA_INT_CLR 0xa24
878 +#define MTK_WED_WDMA_INT_CLR_RX_DONE GENMASK(17, 16)
879 +
880 #define MTK_WED_WDMA_INT_TRIGGER 0xa28
881 #define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16)
882
883 #define MTK_WED_WDMA_INT_CTRL 0xa2c
884 #define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL GENMASK(17, 16)
885
886 +#define MTK_WED_WDMA_CFG_BASE 0xaa0
887 #define MTK_WED_WDMA_OFFSET0 0xaa4
888 #define MTK_WED_WDMA_OFFSET1 0xaa8
889
890 +#define MTK_WED_WDMA_OFST0_GLO_INTS GENMASK(15, 0)
891 +#define MTK_WED_WDMA_OFST0_GLO_CFG GENMASK(31, 16)
892 +#define MTK_WED_WDMA_OFST1_TX_CTRL GENMASK(15, 0)
893 +#define MTK_WED_WDMA_OFST1_RX_CTRL GENMASK(31, 16)
894 +
895 #define MTK_WED_WDMA_RX_MIB(_n) (0xae0 + (_n) * 4)
896 #define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4)
897 #define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4)
898 @@ -221,6 +293,7 @@ struct mtk_wdma_desc {
899 #define MTK_WED_RING_OFS_CPU_IDX 0x08
900 #define MTK_WED_RING_OFS_DMA_IDX 0x0c
901
902 +#define MTK_WDMA_RING_TX(_n) (0x000 + (_n) * 0x10)
903 #define MTK_WDMA_RING_RX(_n) (0x100 + (_n) * 0x10)
904
905 #define MTK_WDMA_GLO_CFG 0x204
906 @@ -234,6 +307,8 @@ struct mtk_wdma_desc {
907 #define MTK_WDMA_RESET_IDX_TX GENMASK(3, 0)
908 #define MTK_WDMA_RESET_IDX_RX GENMASK(17, 16)
909
910 +#define MTK_WDMA_INT_STATUS 0x220
911 +
912 #define MTK_WDMA_INT_MASK 0x228
913 #define MTK_WDMA_INT_MASK_TX_DONE GENMASK(3, 0)
914 #define MTK_WDMA_INT_MASK_RX_DONE GENMASK(17, 16)
915 --- a/include/linux/soc/mediatek/mtk_wed.h
916 +++ b/include/linux/soc/mediatek/mtk_wed.h
917 @@ -14,6 +14,7 @@ struct mtk_wdma_desc;
918 struct mtk_wed_ring {
919 struct mtk_wdma_desc *desc;
920 dma_addr_t desc_phys;
921 + u32 desc_size;
922 int size;
923
924 u32 reg_base;
925 @@ -45,10 +46,17 @@ struct mtk_wed_device {
926 struct pci_dev *pci_dev;
927
928 u32 wpdma_phys;
929 + u32 wpdma_int;
930 + u32 wpdma_mask;
931 + u32 wpdma_tx;
932 + u32 wpdma_txfree;
933
934 u16 token_start;
935 unsigned int nbuf;
936
937 + u8 tx_tbit[MTK_WED_TX_QUEUES];
938 + u8 txfree_tbit;
939 +
940 u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
941 int (*offload_enable)(struct mtk_wed_device *wed);
942 void (*offload_disable)(struct mtk_wed_device *wed);