2205fea513d857e25614db8367c21477f24f3050
[openwrt/staging/neocturne.git] /
1 From: Lorenzo Bianconi <lorenzo@kernel.org>
2 Date: Thu, 24 Nov 2022 16:22:54 +0100
3 Subject: [PATCH] net: ethernet: mtk_wed: add mtk_wed_rx_reset routine
4
5 Introduce mtk_wed_rx_reset routine in order to reset rx DMA for Wireless
6 Ethernet Dispatcher available on MT7986 SoC.
7
8 Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
9 Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
10 Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
11 Signed-off-by: Paolo Abeni <pabeni@redhat.com>
12 ---
13
14 --- a/drivers/net/ethernet/mediatek/mtk_wed.c
15 +++ b/drivers/net/ethernet/mediatek/mtk_wed.c
16 @@ -951,42 +951,130 @@ mtk_wed_ring_reset(struct mtk_wed_ring *
17 }
18
19 static u32
20 -mtk_wed_check_busy(struct mtk_wed_device *dev)
21 +mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
22 {
23 - if (wed_r32(dev, MTK_WED_GLO_CFG) & MTK_WED_GLO_CFG_TX_DMA_BUSY)
24 - return true;
25 -
26 - if (wed_r32(dev, MTK_WED_WPDMA_GLO_CFG) &
27 - MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY)
28 - return true;
29 -
30 - if (wed_r32(dev, MTK_WED_CTRL) & MTK_WED_CTRL_WDMA_INT_AGENT_BUSY)
31 - return true;
32 -
33 - if (wed_r32(dev, MTK_WED_WDMA_GLO_CFG) &
34 - MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
35 - return true;
36 -
37 - if (wdma_r32(dev, MTK_WDMA_GLO_CFG) &
38 - MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
39 - return true;
40 -
41 - if (wed_r32(dev, MTK_WED_CTRL) &
42 - (MTK_WED_CTRL_WED_TX_BM_BUSY | MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY))
43 - return true;
44 -
45 - return false;
46 + return !!(wed_r32(dev, reg) & mask);
47 }
48
49 static int
50 -mtk_wed_poll_busy(struct mtk_wed_device *dev)
51 +mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
52 {
53 int sleep = 15000;
54 int timeout = 100 * sleep;
55 u32 val;
56
57 return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
58 - timeout, false, dev);
59 + timeout, false, dev, reg, mask);
60 +}
61 +
62 +static int
63 +mtk_wed_rx_reset(struct mtk_wed_device *dev)
64 +{
65 + struct mtk_wed_wo *wo = dev->hw->wed_wo;
66 + u8 val = MTK_WED_WO_STATE_SER_RESET;
67 + int i, ret;
68 +
69 + ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
70 + MTK_WED_WO_CMD_CHANGE_STATE, &val,
71 + sizeof(val), true);
72 + if (ret)
73 + return ret;
74 +
75 + wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN);
76 + ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
77 + MTK_WED_WPDMA_RX_D_RX_DRV_BUSY);
78 + if (ret) {
79 + mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
80 + mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV);
81 + } else {
82 + wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
83 + MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
84 + MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
85 +
86 + wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
87 + MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE |
88 + MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE);
89 + wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
90 + MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE |
91 + MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE);
92 +
93 + wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
94 + }
95 +
96 + /* reset rro qm */
97 + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN);
98 + ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL,
99 + MTK_WED_CTRL_RX_RRO_QM_BUSY);
100 + if (ret) {
101 + mtk_wed_reset(dev, MTK_WED_RESET_RX_RRO_QM);
102 + } else {
103 + wed_set(dev, MTK_WED_RROQM_RST_IDX,
104 + MTK_WED_RROQM_RST_IDX_MIOD |
105 + MTK_WED_RROQM_RST_IDX_FDBK);
106 + wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0);
107 + }
108 +
109 + /* reset route qm */
110 + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
111 + ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL,
112 + MTK_WED_CTRL_RX_ROUTE_QM_BUSY);
113 + if (ret)
114 + mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM);
115 + else
116 + wed_set(dev, MTK_WED_RTQM_GLO_CFG,
117 + MTK_WED_RTQM_Q_RST);
118 +
119 + /* reset tx wdma */
120 + mtk_wdma_tx_reset(dev);
121 +
122 + /* reset tx wdma drv */
123 + wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN);
124 + mtk_wed_poll_busy(dev, MTK_WED_CTRL,
125 + MTK_WED_CTRL_WDMA_INT_AGENT_BUSY);
126 + mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV);
127 +
128 + /* reset wed rx dma */
129 + ret = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG,
130 + MTK_WED_GLO_CFG_RX_DMA_BUSY);
131 + wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_RX_DMA_EN);
132 + if (ret) {
133 + mtk_wed_reset(dev, MTK_WED_RESET_WED_RX_DMA);
134 + } else {
135 + struct mtk_eth *eth = dev->hw->eth;
136 +
137 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
138 + wed_set(dev, MTK_WED_RESET_IDX,
139 + MTK_WED_RESET_IDX_RX_V2);
140 + else
141 + wed_set(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_RX);
142 + wed_w32(dev, MTK_WED_RESET_IDX, 0);
143 + }
144 +
145 + /* reset rx bm */
146 + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
147 + mtk_wed_poll_busy(dev, MTK_WED_CTRL,
148 + MTK_WED_CTRL_WED_RX_BM_BUSY);
149 + mtk_wed_reset(dev, MTK_WED_RESET_RX_BM);
150 +
151 + /* wo change to enable state */
152 + val = MTK_WED_WO_STATE_ENABLE;
153 + ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
154 + MTK_WED_WO_CMD_CHANGE_STATE, &val,
155 + sizeof(val), true);
156 + if (ret)
157 + return ret;
158 +
159 + /* wed_rx_ring_reset */
160 + for (i = 0; i < ARRAY_SIZE(dev->rx_ring); i++) {
161 + if (!dev->rx_ring[i].desc)
162 + continue;
163 +
164 + mtk_wed_ring_reset(&dev->rx_ring[i], MTK_WED_RX_RING_SIZE,
165 + false);
166 + }
167 + mtk_wed_free_rx_buffer(dev);
168 +
169 + return 0;
170 }
171
172 static void
173 @@ -1004,19 +1092,23 @@ mtk_wed_reset_dma(struct mtk_wed_device
174 true);
175 }
176
177 - if (mtk_wed_poll_busy(dev))
178 - busy = mtk_wed_check_busy(dev);
179 -
180 + /* 1. reset WED tx DMA */
181 + wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_TX_DMA_EN);
182 + busy = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG,
183 + MTK_WED_GLO_CFG_TX_DMA_BUSY);
184 if (busy) {
185 mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA);
186 } else {
187 - wed_w32(dev, MTK_WED_RESET_IDX,
188 - MTK_WED_RESET_IDX_TX |
189 - MTK_WED_RESET_IDX_RX);
190 + wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_TX);
191 wed_w32(dev, MTK_WED_RESET_IDX, 0);
192 }
193
194 - mtk_wdma_rx_reset(dev);
195 + /* 2. reset WDMA rx DMA */
196 + busy = !!mtk_wdma_rx_reset(dev);
197 + wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
198 + if (!busy)
199 + busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG,
200 + MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY);
201
202 if (busy) {
203 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
204 @@ -1033,6 +1125,9 @@ mtk_wed_reset_dma(struct mtk_wed_device
205 MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
206 }
207
208 + /* 3. reset WED WPDMA tx */
209 + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
210 +
211 for (i = 0; i < 100; i++) {
212 val = wed_r32(dev, MTK_WED_TX_BM_INTF);
213 if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
214 @@ -1040,8 +1135,19 @@ mtk_wed_reset_dma(struct mtk_wed_device
215 }
216
217 mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT);
218 + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_BM_EN);
219 mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
220
221 + /* 4. reset WED WPDMA tx */
222 + busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG,
223 + MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY);
224 + wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
225 + MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
226 + MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
227 + if (!busy)
228 + busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG,
229 + MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY);
230 +
231 if (busy) {
232 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
233 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
234 @@ -1052,6 +1158,17 @@ mtk_wed_reset_dma(struct mtk_wed_device
235 MTK_WED_WPDMA_RESET_IDX_RX);
236 wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0);
237 }
238 +
239 + dev->init_done = false;
240 + if (dev->hw->version == 1)
241 + return;
242 +
243 + if (!busy) {
244 + wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_WPDMA_IDX_RX);
245 + wed_w32(dev, MTK_WED_RESET_IDX, 0);
246 + }
247 +
248 + mtk_wed_rx_reset(dev);
249 }
250
251 static int
252 @@ -1274,6 +1391,9 @@ mtk_wed_start(struct mtk_wed_device *dev
253 {
254 int i;
255
256 + if (mtk_wed_get_rx_capa(dev) && mtk_wed_rx_buffer_alloc(dev))
257 + return;
258 +
259 for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
260 if (!dev->rx_wdma[i].desc)
261 mtk_wed_wdma_rx_ring_setup(dev, i, 16);
262 @@ -1362,10 +1482,6 @@ mtk_wed_attach(struct mtk_wed_device *de
263 goto out;
264
265 if (mtk_wed_get_rx_capa(dev)) {
266 - ret = mtk_wed_rx_buffer_alloc(dev);
267 - if (ret)
268 - goto out;
269 -
270 ret = mtk_wed_rro_alloc(dev);
271 if (ret)
272 goto out;
273 --- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
274 +++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
275 @@ -24,11 +24,15 @@ struct mtk_wdma_desc {
276
277 #define MTK_WED_RESET 0x008
278 #define MTK_WED_RESET_TX_BM BIT(0)
279 +#define MTK_WED_RESET_RX_BM BIT(1)
280 #define MTK_WED_RESET_TX_FREE_AGENT BIT(4)
281 #define MTK_WED_RESET_WPDMA_TX_DRV BIT(8)
282 #define MTK_WED_RESET_WPDMA_RX_DRV BIT(9)
283 +#define MTK_WED_RESET_WPDMA_RX_D_DRV BIT(10)
284 #define MTK_WED_RESET_WPDMA_INT_AGENT BIT(11)
285 #define MTK_WED_RESET_WED_TX_DMA BIT(12)
286 +#define MTK_WED_RESET_WED_RX_DMA BIT(13)
287 +#define MTK_WED_RESET_WDMA_TX_DRV BIT(16)
288 #define MTK_WED_RESET_WDMA_RX_DRV BIT(17)
289 #define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
290 #define MTK_WED_RESET_RX_RRO_QM BIT(20)
291 @@ -158,6 +162,8 @@ struct mtk_wdma_desc {
292 #define MTK_WED_RESET_IDX 0x20c
293 #define MTK_WED_RESET_IDX_TX GENMASK(3, 0)
294 #define MTK_WED_RESET_IDX_RX GENMASK(17, 16)
295 +#define MTK_WED_RESET_IDX_RX_V2 GENMASK(7, 6)
296 +#define MTK_WED_RESET_WPDMA_IDX_RX GENMASK(31, 30)
297
298 #define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4)
299 #define MTK_WED_RX_MIB(_n) (0x2e0 + (_n) * 4)
300 @@ -267,6 +273,9 @@ struct mtk_wdma_desc {
301
302 #define MTK_WED_WPDMA_RX_D_GLO_CFG 0x75c
303 #define MTK_WED_WPDMA_RX_D_RX_DRV_EN BIT(0)
304 +#define MTK_WED_WPDMA_RX_D_RX_DRV_BUSY BIT(1)
305 +#define MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE BIT(3)
306 +#define MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE BIT(4)
307 #define MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL GENMASK(11, 7)
308 #define MTK_WED_WPDMA_RX_D_RXD_READ_LEN GENMASK(31, 24)
309