1 From e9229ffd550b2d8c4997c67a501dbc3919fd4e26 Mon Sep 17 00:00:00 2001
2 From: Felix Fietkau <nbd@nbd.name>
3 Date: Thu, 22 Apr 2021 22:21:02 -0700
4 Subject: [PATCH] net: ethernet: mtk_eth_soc: implement dynamic interrupt
7 Reduces the number of interrupts under load
9 Signed-off-by: Felix Fietkau <nbd@nbd.name>
10 [Ilya: add documentation for new struct fields]
11 Signed-off-by: Ilya Lipnitskiy <ilya.lipnitskiy@gmail.com>
12 Signed-off-by: David S. Miller <davem@davemloft.net>
14 drivers/net/ethernet/mediatek/Kconfig | 1 +
15 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 96 +++++++++++++++++++--
16 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 41 +++++++--
17 3 files changed, 124 insertions(+), 14 deletions(-)
19 --- a/drivers/net/ethernet/mediatek/Kconfig
20 +++ b/drivers/net/ethernet/mediatek/Kconfig
21 @@ -10,6 +10,7 @@ if NET_VENDOR_MEDIATEK
22 config NET_MEDIATEK_SOC
23 tristate "MediaTek SoC Gigabit Ethernet support"
27 This driver supports the gigabit ethernet MACs in the
29 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
30 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
31 @@ -1266,12 +1266,13 @@ static void mtk_update_rx_cpu_idx(struct
32 static int mtk_poll_rx(struct napi_struct *napi, int budget,
35 + struct dim_sample dim_sample = {};
36 struct mtk_rx_ring *ring;
40 struct mtk_rx_dma *rxd, trxd;
42 + int done = 0, bytes = 0;
44 while (done < budget) {
45 struct net_device *netdev;
46 @@ -1348,6 +1349,7 @@ static int mtk_poll_rx(struct napi_struc
48 skb_checksum_none_assert(skb);
49 skb->protocol = eth_type_trans(skb, netdev);
52 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
53 (trxd.rxd2 & RX_DMA_VTAG))
54 @@ -1380,6 +1382,12 @@ rx_done:
55 mtk_update_rx_cpu_idx(eth);
58 + eth->rx_packets += done;
59 + eth->rx_bytes += bytes;
60 + dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
62 + net_dim(ð->rx_dim, dim_sample);
67 @@ -1472,6 +1480,7 @@ static int mtk_poll_tx_pdma(struct mtk_e
68 static int mtk_poll_tx(struct mtk_eth *eth, int budget)
70 struct mtk_tx_ring *ring = ð->tx_ring;
71 + struct dim_sample dim_sample = {};
72 unsigned int done[MTK_MAX_DEVS];
73 unsigned int bytes[MTK_MAX_DEVS];
75 @@ -1489,8 +1498,14 @@ static int mtk_poll_tx(struct mtk_eth *e
77 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
79 + eth->tx_packets += done[i];
80 + eth->tx_bytes += bytes[i];
83 + dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
85 + net_dim(ð->tx_dim, dim_sample);
87 if (mtk_queue_stopped(eth) &&
88 (atomic_read(&ring->free_count) > ring->thresh))
90 @@ -2171,6 +2186,7 @@ static irqreturn_t mtk_handle_irq_rx(int
92 struct mtk_eth *eth = _eth;
95 if (likely(napi_schedule_prep(ð->rx_napi))) {
96 __napi_schedule(ð->rx_napi);
97 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
98 @@ -2183,6 +2199,7 @@ static irqreturn_t mtk_handle_irq_tx(int
100 struct mtk_eth *eth = _eth;
103 if (likely(napi_schedule_prep(ð->tx_napi))) {
104 __napi_schedule(ð->tx_napi);
105 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
106 @@ -2371,6 +2388,9 @@ static int mtk_stop(struct net_device *d
107 napi_disable(ð->tx_napi);
108 napi_disable(ð->rx_napi);
110 + cancel_work_sync(ð->rx_dim.work);
111 + cancel_work_sync(ð->tx_dim.work);
113 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
114 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
115 mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
116 @@ -2423,6 +2443,64 @@ err_disable_clks:
120 +static void mtk_dim_rx(struct work_struct *work)
122 + struct dim *dim = container_of(work, struct dim, work);
123 + struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
124 + struct dim_cq_moder cur_profile;
127 + cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode,
129 + spin_lock_bh(ð->dim_lock);
131 + val = mtk_r32(eth, MTK_PDMA_DELAY_INT);
132 + val &= MTK_PDMA_DELAY_TX_MASK;
133 + val |= MTK_PDMA_DELAY_RX_EN;
135 + cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
136 + val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT;
138 + cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
139 + val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
141 + mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
142 + mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
144 + spin_unlock_bh(ð->dim_lock);
146 + dim->state = DIM_START_MEASURE;
149 +static void mtk_dim_tx(struct work_struct *work)
151 + struct dim *dim = container_of(work, struct dim, work);
152 + struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
153 + struct dim_cq_moder cur_profile;
156 + cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode,
158 + spin_lock_bh(ð->dim_lock);
160 + val = mtk_r32(eth, MTK_PDMA_DELAY_INT);
161 + val &= MTK_PDMA_DELAY_RX_MASK;
162 + val |= MTK_PDMA_DELAY_TX_EN;
164 + cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
165 + val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT;
167 + cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
168 + val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
170 + mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
171 + mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
173 + spin_unlock_bh(ð->dim_lock);
175 + dim->state = DIM_START_MEASURE;
178 static int mtk_hw_init(struct mtk_eth *eth)
181 @@ -2444,9 +2522,6 @@ static int mtk_hw_init(struct mtk_eth *e
185 - /* enable interrupt delay for RX */
186 - mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
188 /* disable delay and normal interrupt */
189 mtk_tx_irq_disable(eth, ~0);
190 mtk_rx_irq_disable(eth, ~0);
191 @@ -2485,11 +2560,11 @@ static int mtk_hw_init(struct mtk_eth *e
192 /* Enable RX VLan Offloading */
193 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
195 - /* enable interrupt delay for RX */
196 - mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
197 + /* set interrupt delays based on current Net DIM sample */
198 + mtk_dim_rx(ð->rx_dim.work);
199 + mtk_dim_tx(ð->tx_dim.work);
201 /* disable delay and normal interrupt */
202 - mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
203 mtk_tx_irq_disable(eth, ~0);
204 mtk_rx_irq_disable(eth, ~0);
206 @@ -2994,6 +3069,13 @@ static int mtk_probe(struct platform_dev
207 spin_lock_init(ð->page_lock);
208 spin_lock_init(ð->tx_irq_lock);
209 spin_lock_init(ð->rx_irq_lock);
210 + spin_lock_init(ð->dim_lock);
212 + eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
213 + INIT_WORK(ð->rx_dim.work, mtk_dim_rx);
215 + eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
216 + INIT_WORK(ð->tx_dim.work, mtk_dim_tx);
218 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
219 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
220 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
221 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
223 #include <linux/refcount.h>
224 #include <linux/phylink.h>
225 #include <linux/rhashtable.h>
226 +#include <linux/dim.h>
229 #define MTK_QDMA_PAGE_SIZE 2048
230 @@ -136,13 +137,18 @@
232 /* PDMA Delay Interrupt Register */
233 #define MTK_PDMA_DELAY_INT 0xa0c
234 +#define MTK_PDMA_DELAY_RX_MASK GENMASK(15, 0)
235 #define MTK_PDMA_DELAY_RX_EN BIT(15)
236 -#define MTK_PDMA_DELAY_RX_PINT 4
237 #define MTK_PDMA_DELAY_RX_PINT_SHIFT 8
238 -#define MTK_PDMA_DELAY_RX_PTIME 4
239 -#define MTK_PDMA_DELAY_RX_DELAY \
240 - (MTK_PDMA_DELAY_RX_EN | MTK_PDMA_DELAY_RX_PTIME | \
241 - (MTK_PDMA_DELAY_RX_PINT << MTK_PDMA_DELAY_RX_PINT_SHIFT))
242 +#define MTK_PDMA_DELAY_RX_PTIME_SHIFT 0
244 +#define MTK_PDMA_DELAY_TX_MASK GENMASK(31, 16)
245 +#define MTK_PDMA_DELAY_TX_EN BIT(31)
246 +#define MTK_PDMA_DELAY_TX_PINT_SHIFT 24
247 +#define MTK_PDMA_DELAY_TX_PTIME_SHIFT 16
249 +#define MTK_PDMA_DELAY_PINT_MASK 0x7f
250 +#define MTK_PDMA_DELAY_PTIME_MASK 0xff
252 /* PDMA Interrupt Status Register */
253 #define MTK_PDMA_INT_STATUS 0xa20
255 /* QDMA Interrupt Status Register */
256 #define MTK_QDMA_INT_STATUS 0x1A18
257 #define MTK_RX_DONE_DLY BIT(30)
258 +#define MTK_TX_DONE_DLY BIT(28)
259 #define MTK_RX_DONE_INT3 BIT(19)
260 #define MTK_RX_DONE_INT2 BIT(18)
261 #define MTK_RX_DONE_INT1 BIT(17)
263 #define MTK_TX_DONE_INT1 BIT(1)
264 #define MTK_TX_DONE_INT0 BIT(0)
265 #define MTK_RX_DONE_INT MTK_RX_DONE_DLY
266 -#define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
267 - MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
268 +#define MTK_TX_DONE_INT MTK_TX_DONE_DLY
270 /* QDMA Interrupt grouping registers */
271 #define MTK_QDMA_INT_GRP1 0x1a20
272 @@ -864,6 +870,7 @@ struct mtk_sgmii {
273 * @page_lock: Make sure that register operations are atomic
274 * @tx_irq__lock: Make sure that IRQ register operations are atomic
275 * @rx_irq__lock: Make sure that IRQ register operations are atomic
276 + * @dim_lock: Make sure that Net DIM operations are atomic
277 * @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a
278 * dummy for NAPI to work
279 * @netdev: The netdev instances
280 @@ -882,6 +889,14 @@ struct mtk_sgmii {
281 * @rx_ring_qdma: Pointer to the memory holding info about the QDMA RX ring
282 * @tx_napi: The TX NAPI struct
283 * @rx_napi: The RX NAPI struct
284 + * @rx_events: Net DIM RX event counter
285 + * @rx_packets: Net DIM RX packet counter
286 + * @rx_bytes: Net DIM RX byte counter
287 + * @rx_dim: Net DIM RX context
288 + * @tx_events: Net DIM TX event counter
289 + * @tx_packets: Net DIM TX packet counter
290 + * @tx_bytes: Net DIM TX byte counter
291 + * @tx_dim: Net DIM TX context
292 * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
293 * @phy_scratch_ring: physical address of scratch_ring
294 * @scratch_head: The scratch memory that scratch_ring points to.
295 @@ -926,6 +941,18 @@ struct mtk_eth {
297 const struct mtk_soc_data *soc;
299 + spinlock_t dim_lock;
312 u32 tx_int_status_reg;