be2d78911e45473ea769d990c686e5ffc1c03155
[openwrt/staging/blogic.git] /
1 From: Felix Fietkau <nbd@nbd.name>
2 Date: Thu, 27 Oct 2022 20:17:27 +0200
3 Subject: [PATCH] net: ethernet: mtk_eth_soc: implement multi-queue
4 support for per-port queues
5
6 When sending traffic to multiple ports with different link speeds, queued
7 packets to one port can drown out tx to other ports.
8 In order to better handle transmission to multiple ports, use the hardware
9 shaper feature to implement weighted fair queueing between ports.
10 Weight and maximum rate are automatically adjusted based on the link speed
11 of the port.
12 The first 3 queues are unrestricted and reserved for non-DSA direct tx on
13 GMAC ports. The following queues are automatically assigned by the MTK DSA
14 tag driver based on the target port number.
15 The PPE offload code configures the queues for offloaded traffic in the same
16 way.
17 This feature is only supported on devices supporting QDMA. All queues still
18 share the same DMA ring and descriptor pool.
19
20 Signed-off-by: Felix Fietkau <nbd@nbd.name>
21 ---
22
23 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
24 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
25 @@ -55,6 +55,7 @@ static const struct mtk_reg_map mtk_reg_
26 },
27 .qdma = {
28 .qtx_cfg = 0x1800,
29 + .qtx_sch = 0x1804,
30 .rx_ptr = 0x1900,
31 .rx_cnt_cfg = 0x1904,
32 .qcrx_ptr = 0x1908,
33 @@ -62,6 +63,7 @@ static const struct mtk_reg_map mtk_reg_
34 .rst_idx = 0x1a08,
35 .delay_irq = 0x1a0c,
36 .fc_th = 0x1a10,
37 + .tx_sch_rate = 0x1a14,
38 .int_grp = 0x1a20,
39 .hred = 0x1a44,
40 .ctx_ptr = 0x1b00,
41 @@ -117,6 +119,7 @@ static const struct mtk_reg_map mt7986_r
42 },
43 .qdma = {
44 .qtx_cfg = 0x4400,
45 + .qtx_sch = 0x4404,
46 .rx_ptr = 0x4500,
47 .rx_cnt_cfg = 0x4504,
48 .qcrx_ptr = 0x4508,
49 @@ -134,6 +137,7 @@ static const struct mtk_reg_map mt7986_r
50 .fq_tail = 0x4724,
51 .fq_count = 0x4728,
52 .fq_blen = 0x472c,
53 + .tx_sch_rate = 0x4798,
54 },
55 .gdm1_cnt = 0x1c00,
56 .gdma_to_ppe = 0x3333,
57 @@ -620,6 +624,75 @@ static void mtk_mac_link_down(struct phy
58 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
59 }
60
61 +static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
62 + int speed)
63 +{
64 + const struct mtk_soc_data *soc = eth->soc;
65 + u32 ofs, val;
66 +
67 + if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
68 + return;
69 +
70 + val = MTK_QTX_SCH_MIN_RATE_EN |
71 + /* minimum: 10 Mbps */
72 + FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
73 + FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
74 + MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
75 + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
76 + val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
77 +
78 + if (IS_ENABLED(CONFIG_SOC_MT7621)) {
79 + switch (speed) {
80 + case SPEED_10:
81 + val |= MTK_QTX_SCH_MAX_RATE_EN |
82 + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
83 + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 2) |
84 + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
85 + break;
86 + case SPEED_100:
87 + val |= MTK_QTX_SCH_MAX_RATE_EN |
88 + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
89 + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3);
90 + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
91 + break;
92 + case SPEED_1000:
93 + val |= MTK_QTX_SCH_MAX_RATE_EN |
94 + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 105) |
95 + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
96 + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
97 + break;
98 + default:
99 + break;
100 + }
101 + } else {
102 + switch (speed) {
103 + case SPEED_10:
104 + val |= MTK_QTX_SCH_MAX_RATE_EN |
105 + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
106 + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
107 + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
108 + break;
109 + case SPEED_100:
110 + val |= MTK_QTX_SCH_MAX_RATE_EN |
111 + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
112 + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5);
113 + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
114 + break;
115 + case SPEED_1000:
116 + val |= MTK_QTX_SCH_MAX_RATE_EN |
117 + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 10) |
118 + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
119 + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
120 + break;
121 + default:
122 + break;
123 + }
124 + }
125 +
126 + ofs = MTK_QTX_OFFSET * idx;
127 + mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
128 +}
129 +
130 static void mtk_mac_link_up(struct phylink_config *config,
131 struct phy_device *phy,
132 unsigned int mode, phy_interface_t interface,
133 @@ -645,6 +718,8 @@ static void mtk_mac_link_up(struct phyli
134 break;
135 }
136
137 + mtk_set_queue_speed(mac->hw, mac->id, speed);
138 +
139 /* Configure duplex */
140 if (duplex == DUPLEX_FULL)
141 mcr |= MAC_MCR_FORCE_DPX;
142 @@ -1106,7 +1181,8 @@ static void mtk_tx_set_dma_desc_v1(struc
143
144 WRITE_ONCE(desc->txd1, info->addr);
145
146 - data = TX_DMA_SWC | TX_DMA_PLEN0(info->size);
147 + data = TX_DMA_SWC | TX_DMA_PLEN0(info->size) |
148 + FIELD_PREP(TX_DMA_PQID, info->qid);
149 if (info->last)
150 data |= TX_DMA_LS0;
151 WRITE_ONCE(desc->txd3, data);
152 @@ -1140,9 +1216,6 @@ static void mtk_tx_set_dma_desc_v2(struc
153 data |= TX_DMA_LS0;
154 WRITE_ONCE(desc->txd3, data);
155
156 - if (!info->qid && mac->id)
157 - info->qid = MTK_QDMA_GMAC2_QID;
158 -
159 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
160 data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
161 WRITE_ONCE(desc->txd4, data);
162 @@ -1186,11 +1259,12 @@ static int mtk_tx_map(struct sk_buff *sk
163 .gso = gso,
164 .csum = skb->ip_summed == CHECKSUM_PARTIAL,
165 .vlan = skb_vlan_tag_present(skb),
166 - .qid = skb->mark & MTK_QDMA_TX_MASK,
167 + .qid = skb_get_queue_mapping(skb),
168 .vlan_tci = skb_vlan_tag_get(skb),
169 .first = true,
170 .last = !skb_is_nonlinear(skb),
171 };
172 + struct netdev_queue *txq;
173 struct mtk_mac *mac = netdev_priv(dev);
174 struct mtk_eth *eth = mac->hw;
175 const struct mtk_soc_data *soc = eth->soc;
176 @@ -1198,8 +1272,10 @@ static int mtk_tx_map(struct sk_buff *sk
177 struct mtk_tx_dma *itxd_pdma, *txd_pdma;
178 struct mtk_tx_buf *itx_buf, *tx_buf;
179 int i, n_desc = 1;
180 + int queue = skb_get_queue_mapping(skb);
181 int k = 0;
182
183 + txq = netdev_get_tx_queue(dev, queue);
184 itxd = ring->next_free;
185 itxd_pdma = qdma_to_pdma(ring, itxd);
186 if (itxd == ring->last_free)
187 @@ -1248,7 +1324,7 @@ static int mtk_tx_map(struct sk_buff *sk
188 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
189 txd_info.size = min_t(unsigned int, frag_size,
190 soc->txrx.dma_max_len);
191 - txd_info.qid = skb->mark & MTK_QDMA_TX_MASK;
192 + txd_info.qid = queue;
193 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
194 !(frag_size - txd_info.size);
195 txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
196 @@ -1287,7 +1363,7 @@ static int mtk_tx_map(struct sk_buff *sk
197 txd_pdma->txd2 |= TX_DMA_LS1;
198 }
199
200 - netdev_sent_queue(dev, skb->len);
201 + netdev_tx_sent_queue(txq, skb->len);
202 skb_tx_timestamp(skb);
203
204 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
205 @@ -1299,8 +1375,7 @@ static int mtk_tx_map(struct sk_buff *sk
206 wmb();
207
208 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
209 - if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
210 - !netdev_xmit_more())
211 + if (netif_xmit_stopped(txq) || !netdev_xmit_more())
212 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
213 } else {
214 int next_idx;
215 @@ -1369,7 +1444,7 @@ static void mtk_wake_queue(struct mtk_et
216 for (i = 0; i < MTK_MAC_COUNT; i++) {
217 if (!eth->netdev[i])
218 continue;
219 - netif_wake_queue(eth->netdev[i]);
220 + netif_tx_wake_all_queues(eth->netdev[i]);
221 }
222 }
223
224 @@ -1393,7 +1468,7 @@ static netdev_tx_t mtk_start_xmit(struct
225
226 tx_num = mtk_cal_txd_req(eth, skb);
227 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
228 - netif_stop_queue(dev);
229 + netif_tx_stop_all_queues(dev);
230 netif_err(eth, tx_queued, dev,
231 "Tx Ring full when queue awake!\n");
232 spin_unlock(&eth->page_lock);
233 @@ -1419,7 +1494,7 @@ static netdev_tx_t mtk_start_xmit(struct
234 goto drop;
235
236 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
237 - netif_stop_queue(dev);
238 + netif_tx_stop_all_queues(dev);
239
240 spin_unlock(&eth->page_lock);
241
242 @@ -1586,10 +1661,12 @@ static int mtk_xdp_submit_frame(struct m
243 struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
244 const struct mtk_soc_data *soc = eth->soc;
245 struct mtk_tx_ring *ring = &eth->tx_ring;
246 + struct mtk_mac *mac = netdev_priv(dev);
247 struct mtk_tx_dma_desc_info txd_info = {
248 .size = xdpf->len,
249 .first = true,
250 .last = !xdp_frame_has_frags(xdpf),
251 + .qid = mac->id,
252 };
253 int err, index = 0, n_desc = 1, nr_frags;
254 struct mtk_tx_buf *htx_buf, *tx_buf;
255 @@ -1639,6 +1716,7 @@ static int mtk_xdp_submit_frame(struct m
256 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
257 txd_info.size = skb_frag_size(&sinfo->frags[index]);
258 txd_info.last = index + 1 == nr_frags;
259 + txd_info.qid = mac->id;
260 data = skb_frag_address(&sinfo->frags[index]);
261
262 index++;
263 @@ -1993,8 +2071,46 @@ rx_done:
264 return done;
265 }
266
267 +struct mtk_poll_state {
268 + struct netdev_queue *txq;
269 + unsigned int total;
270 + unsigned int done;
271 + unsigned int bytes;
272 +};
273 +
274 +static void
275 +mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac,
276 + struct sk_buff *skb)
277 +{
278 + struct netdev_queue *txq;
279 + struct net_device *dev;
280 + unsigned int bytes = skb->len;
281 +
282 + state->total++;
283 + eth->tx_packets++;
284 + eth->tx_bytes += bytes;
285 +
286 + dev = eth->netdev[mac];
287 + if (!dev)
288 + return;
289 +
290 + txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
291 + if (state->txq == txq) {
292 + state->done++;
293 + state->bytes += bytes;
294 + return;
295 + }
296 +
297 + if (state->txq)
298 + netdev_tx_completed_queue(state->txq, state->done, state->bytes);
299 +
300 + state->txq = txq;
301 + state->done = 1;
302 + state->bytes = bytes;
303 +}
304 +
305 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
306 - unsigned int *done, unsigned int *bytes)
307 + struct mtk_poll_state *state)
308 {
309 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
310 struct mtk_tx_ring *ring = &eth->tx_ring;
311 @@ -2026,12 +2142,9 @@ static int mtk_poll_tx_qdma(struct mtk_e
312 break;
313
314 if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
315 - if (tx_buf->type == MTK_TYPE_SKB) {
316 - struct sk_buff *skb = tx_buf->data;
317 + if (tx_buf->type == MTK_TYPE_SKB)
318 + mtk_poll_tx_done(eth, state, mac, tx_buf->data);
319
320 - bytes[mac] += skb->len;
321 - done[mac]++;
322 - }
323 budget--;
324 }
325 mtk_tx_unmap(eth, tx_buf, &bq, true);
326 @@ -2050,7 +2163,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
327 }
328
329 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
330 - unsigned int *done, unsigned int *bytes)
331 + struct mtk_poll_state *state)
332 {
333 struct mtk_tx_ring *ring = &eth->tx_ring;
334 struct mtk_tx_buf *tx_buf;
335 @@ -2068,12 +2181,8 @@ static int mtk_poll_tx_pdma(struct mtk_e
336 break;
337
338 if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
339 - if (tx_buf->type == MTK_TYPE_SKB) {
340 - struct sk_buff *skb = tx_buf->data;
341 -
342 - bytes[0] += skb->len;
343 - done[0]++;
344 - }
345 + if (tx_buf->type == MTK_TYPE_SKB)
346 + mtk_poll_tx_done(eth, state, 0, tx_buf->data);
347 budget--;
348 }
349 mtk_tx_unmap(eth, tx_buf, &bq, true);
350 @@ -2095,26 +2204,15 @@ static int mtk_poll_tx(struct mtk_eth *e
351 {
352 struct mtk_tx_ring *ring = &eth->tx_ring;
353 struct dim_sample dim_sample = {};
354 - unsigned int done[MTK_MAX_DEVS];
355 - unsigned int bytes[MTK_MAX_DEVS];
356 - int total = 0, i;
357 -
358 - memset(done, 0, sizeof(done));
359 - memset(bytes, 0, sizeof(bytes));
360 + struct mtk_poll_state state = {};
361
362 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
363 - budget = mtk_poll_tx_qdma(eth, budget, done, bytes);
364 + budget = mtk_poll_tx_qdma(eth, budget, &state);
365 else
366 - budget = mtk_poll_tx_pdma(eth, budget, done, bytes);
367 + budget = mtk_poll_tx_pdma(eth, budget, &state);
368
369 - for (i = 0; i < MTK_MAC_COUNT; i++) {
370 - if (!eth->netdev[i] || !done[i])
371 - continue;
372 - netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
373 - total += done[i];
374 - eth->tx_packets += done[i];
375 - eth->tx_bytes += bytes[i];
376 - }
377 + if (state.txq)
378 + netdev_tx_completed_queue(state.txq, state.done, state.bytes);
379
380 dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
381 &dim_sample);
382 @@ -2124,7 +2222,7 @@ static int mtk_poll_tx(struct mtk_eth *e
383 (atomic_read(&ring->free_count) > ring->thresh))
384 mtk_wake_queue(eth);
385
386 - return total;
387 + return state.total;
388 }
389
390 static void mtk_handle_status_irq(struct mtk_eth *eth)
391 @@ -2210,6 +2308,7 @@ static int mtk_tx_alloc(struct mtk_eth *
392 int i, sz = soc->txrx.txd_size;
393 struct mtk_tx_dma_v2 *txd;
394 int ring_size;
395 + u32 ofs, val;
396
397 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
398 ring_size = MTK_QDMA_RING_SIZE;
399 @@ -2277,8 +2376,25 @@ static int mtk_tx_alloc(struct mtk_eth *
400 ring->phys + ((ring_size - 1) * sz),
401 soc->reg_map->qdma.crx_ptr);
402 mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
403 - mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
404 - soc->reg_map->qdma.qtx_cfg);
405 +
406 + for (i = 0, ofs = 0; i < MTK_QDMA_NUM_QUEUES; i++) {
407 + val = (QDMA_RES_THRES << 8) | QDMA_RES_THRES;
408 + mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs);
409 +
410 + val = MTK_QTX_SCH_MIN_RATE_EN |
411 + /* minimum: 10 Mbps */
412 + FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
413 + FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
414 + MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
415 + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
416 + val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
417 + mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
418 + ofs += MTK_QTX_OFFSET;
419 + }
420 + val = MTK_QDMA_TX_SCH_MAX_WFQ | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
421 + mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate);
422 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
423 + mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4);
424 } else {
425 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
426 mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0);
427 @@ -2960,7 +3076,7 @@ static int mtk_start_dma(struct mtk_eth
428 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
429 val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
430 MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
431 - MTK_CHK_DDONE_EN;
432 + MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN;
433 else
434 val |= MTK_RX_BT_32DWORDS;
435 mtk_w32(eth, val, reg_map->qdma.glo_cfg);
436 @@ -3006,6 +3122,45 @@ static void mtk_gdm_config(struct mtk_et
437 mtk_w32(eth, 0, MTK_RST_GL);
438 }
439
440 +static int mtk_device_event(struct notifier_block *n, unsigned long event, void *ptr)
441 +{
442 + struct mtk_mac *mac = container_of(n, struct mtk_mac, device_notifier);
443 + struct mtk_eth *eth = mac->hw;
444 + struct net_device *dev = netdev_notifier_info_to_dev(ptr);
445 + struct ethtool_link_ksettings s;
446 + struct net_device *ldev;
447 + struct list_head *iter;
448 + struct dsa_port *dp;
449 +
450 + if (event != NETDEV_CHANGE)
451 + return NOTIFY_DONE;
452 +
453 + netdev_for_each_lower_dev(dev, ldev, iter) {
454 + if (netdev_priv(ldev) == mac)
455 + goto found;
456 + }
457 +
458 + return NOTIFY_DONE;
459 +
460 +found:
461 + if (!dsa_slave_dev_check(dev))
462 + return NOTIFY_DONE;
463 +
464 + if (__ethtool_get_link_ksettings(dev, &s))
465 + return NOTIFY_DONE;
466 +
467 + if (s.base.speed == 0 || s.base.speed == ((__u32)-1))
468 + return NOTIFY_DONE;
469 +
470 + dp = dsa_port_from_netdev(dev);
471 + if (dp->index >= MTK_QDMA_NUM_QUEUES)
472 + return NOTIFY_DONE;
473 +
474 + mtk_set_queue_speed(eth, dp->index + 3, s.base.speed);
475 +
476 + return NOTIFY_DONE;
477 +}
478 +
479 static int mtk_open(struct net_device *dev)
480 {
481 struct mtk_mac *mac = netdev_priv(dev);
482 @@ -3048,7 +3203,8 @@ static int mtk_open(struct net_device *d
483 refcount_inc(&eth->dma_refcnt);
484
485 phylink_start(mac->phylink);
486 - netif_start_queue(dev);
487 + netif_tx_start_all_queues(dev);
488 +
489 return 0;
490 }
491
492 @@ -3757,8 +3913,12 @@ static int mtk_unreg_dev(struct mtk_eth
493 int i;
494
495 for (i = 0; i < MTK_MAC_COUNT; i++) {
496 + struct mtk_mac *mac;
497 if (!eth->netdev[i])
498 continue;
499 + mac = netdev_priv(eth->netdev[i]);
500 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
501 + unregister_netdevice_notifier(&mac->device_notifier);
502 unregister_netdev(eth->netdev[i]);
503 }
504
505 @@ -3975,6 +4135,23 @@ static int mtk_set_rxnfc(struct net_devi
506 return ret;
507 }
508
509 +static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
510 + struct net_device *sb_dev)
511 +{
512 + struct mtk_mac *mac = netdev_priv(dev);
513 + unsigned int queue = 0;
514 +
515 + if (netdev_uses_dsa(dev))
516 + queue = skb_get_queue_mapping(skb) + 3;
517 + else
518 + queue = mac->id;
519 +
520 + if (queue >= dev->num_tx_queues)
521 + queue = 0;
522 +
523 + return queue;
524 +}
525 +
526 static const struct ethtool_ops mtk_ethtool_ops = {
527 .get_link_ksettings = mtk_get_link_ksettings,
528 .set_link_ksettings = mtk_set_link_ksettings,
529 @@ -4009,6 +4186,7 @@ static const struct net_device_ops mtk_n
530 .ndo_setup_tc = mtk_eth_setup_tc,
531 .ndo_bpf = mtk_xdp,
532 .ndo_xdp_xmit = mtk_xdp_xmit,
533 + .ndo_select_queue = mtk_select_queue,
534 };
535
536 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
537 @@ -4018,6 +4196,7 @@ static int mtk_add_mac(struct mtk_eth *e
538 struct phylink *phylink;
539 struct mtk_mac *mac;
540 int id, err;
541 + int txqs = 1;
542
543 if (!_id) {
544 dev_err(eth->dev, "missing mac id\n");
545 @@ -4035,7 +4214,10 @@ static int mtk_add_mac(struct mtk_eth *e
546 return -EINVAL;
547 }
548
549 - eth->netdev[id] = alloc_etherdev(sizeof(*mac));
550 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
551 + txqs = MTK_QDMA_NUM_QUEUES;
552 +
553 + eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1);
554 if (!eth->netdev[id]) {
555 dev_err(eth->dev, "alloc_etherdev failed\n");
556 return -ENOMEM;
557 @@ -4143,6 +4325,11 @@ static int mtk_add_mac(struct mtk_eth *e
558 else
559 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
560
561 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
562 + mac->device_notifier.notifier_call = mtk_device_event;
563 + register_netdevice_notifier(&mac->device_notifier);
564 + }
565 +
566 return 0;
567
568 free_netdev:
569 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
570 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
571 @@ -22,6 +22,7 @@
572 #include <linux/bpf_trace.h>
573 #include "mtk_ppe.h"
574
575 +#define MTK_QDMA_NUM_QUEUES 16
576 #define MTK_QDMA_PAGE_SIZE 2048
577 #define MTK_MAX_RX_LENGTH 1536
578 #define MTK_MAX_RX_LENGTH_2K 2048
579 @@ -216,8 +217,26 @@
580 #define MTK_RING_MAX_AGG_CNT_H ((MTK_HW_LRO_MAX_AGG_CNT >> 6) & 0x3)
581
582 /* QDMA TX Queue Configuration Registers */
583 +#define MTK_QTX_OFFSET 0x10
584 #define QDMA_RES_THRES 4
585
586 +/* QDMA Tx Queue Scheduler Configuration Registers */
587 +#define MTK_QTX_SCH_TX_SEL BIT(31)
588 +#define MTK_QTX_SCH_TX_SEL_V2 GENMASK(31, 30)
589 +
590 +#define MTK_QTX_SCH_LEAKY_BUCKET_EN BIT(30)
591 +#define MTK_QTX_SCH_LEAKY_BUCKET_SIZE GENMASK(29, 28)
592 +#define MTK_QTX_SCH_MIN_RATE_EN BIT(27)
593 +#define MTK_QTX_SCH_MIN_RATE_MAN GENMASK(26, 20)
594 +#define MTK_QTX_SCH_MIN_RATE_EXP GENMASK(19, 16)
595 +#define MTK_QTX_SCH_MAX_RATE_WEIGHT GENMASK(15, 12)
596 +#define MTK_QTX_SCH_MAX_RATE_EN BIT(11)
597 +#define MTK_QTX_SCH_MAX_RATE_MAN GENMASK(10, 4)
598 +#define MTK_QTX_SCH_MAX_RATE_EXP GENMASK(3, 0)
599 +
600 +/* QDMA TX Scheduler Rate Control Register */
601 +#define MTK_QDMA_TX_SCH_MAX_WFQ BIT(15)
602 +
603 /* QDMA Global Configuration Register */
604 #define MTK_RX_2B_OFFSET BIT(31)
605 #define MTK_RX_BT_32DWORDS (3 << 11)
606 @@ -236,6 +255,7 @@
607 #define MTK_WCOMP_EN BIT(24)
608 #define MTK_RESV_BUF (0x40 << 16)
609 #define MTK_MUTLI_CNT (0x4 << 12)
610 +#define MTK_LEAKY_BUCKET_EN BIT(11)
611
612 /* QDMA Flow Control Register */
613 #define FC_THRES_DROP_MODE BIT(20)
614 @@ -266,8 +286,6 @@
615 #define MTK_STAT_OFFSET 0x40
616
617 /* QDMA TX NUM */
618 -#define MTK_QDMA_TX_NUM 16
619 -#define MTK_QDMA_TX_MASK (MTK_QDMA_TX_NUM - 1)
620 #define QID_BITS_V2(x) (((x) & 0x3f) << 16)
621 #define MTK_QDMA_GMAC2_QID 8
622
623 @@ -297,6 +315,7 @@
624 #define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
625 #define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len)
626 #define TX_DMA_SWC BIT(14)
627 +#define TX_DMA_PQID GENMASK(3, 0)
628
629 /* PDMA on MT7628 */
630 #define TX_DMA_DONE BIT(31)
631 @@ -957,6 +976,7 @@ struct mtk_reg_map {
632 } pdma;
633 struct {
634 u32 qtx_cfg; /* tx queue configuration */
635 + u32 qtx_sch; /* tx queue scheduler configuration */
636 u32 rx_ptr; /* rx base pointer */
637 u32 rx_cnt_cfg; /* rx max count configuration */
638 u32 qcrx_ptr; /* rx cpu pointer */
639 @@ -974,6 +994,7 @@ struct mtk_reg_map {
640 u32 fq_tail; /* fq tail pointer */
641 u32 fq_count; /* fq free page count */
642 u32 fq_blen; /* fq free page buffer length */
643 + u32 tx_sch_rate; /* tx scheduler rate control registers */
644 } qdma;
645 u32 gdm1_cnt;
646 u32 gdma_to_ppe;
647 @@ -1177,6 +1198,7 @@ struct mtk_mac {
648 __be32 hwlro_ip[MTK_MAX_LRO_IP_CNT];
649 int hwlro_ip_cnt;
650 unsigned int syscfg0;
651 + struct notifier_block device_notifier;
652 };
653
654 /* the struct describing the SoC. these are declared in the soc_xyz.c files */