03ac4b07bf32a9de91ef968eedc83fd43c20b47c
[openwrt/staging/neocturne.git] /
1 From 12bb508bfe5a564c36864b12253db23cac83bfa1 Mon Sep 17 00:00:00 2001
2 From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl>
3 Date: Thu, 11 Mar 2021 13:35:21 +0100
4 Subject: [PATCH] net: broadcom: bcm4908_enet: support TX interrupt
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 It appears that each DMA channel has its own interrupt and both rings
10 can be configured (the same way) to handle interrupts.
11
12 1. Make ring interrupts code generic (make it operate on given ring)
13 2. Move napi to ring (so each has its own)
14 3. Make IRQ handler generic (match ring against received IRQ number)
15 4. Add (optional) support for TX interrupt
16
17 Signed-off-by: Rafał Miłecki <rafal@milecki.pl>
18 Signed-off-by: David S. Miller <davem@davemloft.net>
19 ---
20 drivers/net/ethernet/broadcom/bcm4908_enet.c | 138 ++++++++++++++-----
21 1 file changed, 103 insertions(+), 35 deletions(-)
22
23 --- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
24 +++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
25 @@ -54,6 +54,7 @@ struct bcm4908_enet_dma_ring {
26 int length;
27 u16 cfg_block;
28 u16 st_ram_block;
29 + struct napi_struct napi;
30
31 union {
32 void *cpu_addr;
33 @@ -67,8 +68,8 @@ struct bcm4908_enet_dma_ring {
34 struct bcm4908_enet {
35 struct device *dev;
36 struct net_device *netdev;
37 - struct napi_struct napi;
38 void __iomem *base;
39 + int irq_tx;
40
41 struct bcm4908_enet_dma_ring tx_ring;
42 struct bcm4908_enet_dma_ring rx_ring;
43 @@ -123,24 +124,31 @@ static void enet_umac_set(struct bcm4908
44 * Helpers
45 */
46
47 -static void bcm4908_enet_intrs_on(struct bcm4908_enet *enet)
48 +static void bcm4908_enet_set_mtu(struct bcm4908_enet *enet, int mtu)
49 {
50 - enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_MASK, ENET_DMA_INT_DEFAULTS);
51 + enet_umac_write(enet, UMAC_MAX_FRAME_LEN, mtu + ENET_MAX_ETH_OVERHEAD);
52 }
53
54 -static void bcm4908_enet_intrs_off(struct bcm4908_enet *enet)
55 +/***
56 + * DMA ring ops
57 + */
58 +
59 +static void bcm4908_enet_dma_ring_intrs_on(struct bcm4908_enet *enet,
60 + struct bcm4908_enet_dma_ring *ring)
61 {
62 - enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_MASK, 0);
63 + enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_INT_MASK, ENET_DMA_INT_DEFAULTS);
64 }
65
66 -static void bcm4908_enet_intrs_ack(struct bcm4908_enet *enet)
67 +static void bcm4908_enet_dma_ring_intrs_off(struct bcm4908_enet *enet,
68 + struct bcm4908_enet_dma_ring *ring)
69 {
70 - enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_STAT, ENET_DMA_INT_DEFAULTS);
71 + enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_INT_MASK, 0);
72 }
73
74 -static void bcm4908_enet_set_mtu(struct bcm4908_enet *enet, int mtu)
75 +static void bcm4908_enet_dma_ring_intrs_ack(struct bcm4908_enet *enet,
76 + struct bcm4908_enet_dma_ring *ring)
77 {
78 - enet_umac_write(enet, UMAC_MAX_FRAME_LEN, mtu + ENET_MAX_ETH_OVERHEAD);
79 + enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_INT_STAT, ENET_DMA_INT_DEFAULTS);
80 }
81
82 /***
83 @@ -414,11 +422,14 @@ static void bcm4908_enet_gmac_init(struc
84 static irqreturn_t bcm4908_enet_irq_handler(int irq, void *dev_id)
85 {
86 struct bcm4908_enet *enet = dev_id;
87 + struct bcm4908_enet_dma_ring *ring;
88
89 - bcm4908_enet_intrs_off(enet);
90 - bcm4908_enet_intrs_ack(enet);
91 + ring = (irq == enet->irq_tx) ? &enet->tx_ring : &enet->rx_ring;
92
93 - napi_schedule(&enet->napi);
94 + bcm4908_enet_dma_ring_intrs_off(enet, ring);
95 + bcm4908_enet_dma_ring_intrs_ack(enet, ring);
96 +
97 + napi_schedule(&ring->napi);
98
99 return IRQ_HANDLED;
100 }
101 @@ -426,6 +437,8 @@ static irqreturn_t bcm4908_enet_irq_hand
102 static int bcm4908_enet_open(struct net_device *netdev)
103 {
104 struct bcm4908_enet *enet = netdev_priv(netdev);
105 + struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring;
106 + struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
107 struct device *dev = enet->dev;
108 int err;
109
110 @@ -435,6 +448,17 @@ static int bcm4908_enet_open(struct net_
111 return err;
112 }
113
114 + if (enet->irq_tx > 0) {
115 + err = request_irq(enet->irq_tx, bcm4908_enet_irq_handler, 0,
116 + "tx", enet);
117 + if (err) {
118 + dev_err(dev, "Failed to request IRQ %d: %d\n",
119 + enet->irq_tx, err);
120 + free_irq(netdev->irq, enet);
121 + return err;
122 + }
123 + }
124 +
125 bcm4908_enet_gmac_init(enet);
126 bcm4908_enet_dma_reset(enet);
127 bcm4908_enet_dma_init(enet);
128 @@ -443,14 +467,19 @@ static int bcm4908_enet_open(struct net_
129
130 enet_set(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_MASTER_EN);
131 enet_maskset(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_FLOWC_CH1_EN, 0);
132 - bcm4908_enet_dma_rx_ring_enable(enet, &enet->rx_ring);
133
134 - napi_enable(&enet->napi);
135 + if (enet->irq_tx > 0) {
136 + napi_enable(&tx_ring->napi);
137 + bcm4908_enet_dma_ring_intrs_ack(enet, tx_ring);
138 + bcm4908_enet_dma_ring_intrs_on(enet, tx_ring);
139 + }
140 +
141 + bcm4908_enet_dma_rx_ring_enable(enet, rx_ring);
142 + napi_enable(&rx_ring->napi);
143 netif_carrier_on(netdev);
144 netif_start_queue(netdev);
145 -
146 - bcm4908_enet_intrs_ack(enet);
147 - bcm4908_enet_intrs_on(enet);
148 + bcm4908_enet_dma_ring_intrs_ack(enet, rx_ring);
149 + bcm4908_enet_dma_ring_intrs_on(enet, rx_ring);
150
151 return 0;
152 }
153 @@ -458,16 +487,20 @@ static int bcm4908_enet_open(struct net_
154 static int bcm4908_enet_stop(struct net_device *netdev)
155 {
156 struct bcm4908_enet *enet = netdev_priv(netdev);
157 + struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring;
158 + struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
159
160 netif_stop_queue(netdev);
161 netif_carrier_off(netdev);
162 - napi_disable(&enet->napi);
163 + napi_disable(&rx_ring->napi);
164 + napi_disable(&tx_ring->napi);
165
166 bcm4908_enet_dma_rx_ring_disable(enet, &enet->rx_ring);
167 bcm4908_enet_dma_tx_ring_disable(enet, &enet->tx_ring);
168
169 bcm4908_enet_dma_uninit(enet);
170
171 + free_irq(enet->irq_tx, enet);
172 free_irq(enet->netdev->irq, enet);
173
174 return 0;
175 @@ -484,25 +517,19 @@ static int bcm4908_enet_start_xmit(struc
176 u32 tmp;
177
178 /* Free transmitted skbs */
179 - while (ring->read_idx != ring->write_idx) {
180 - buf_desc = &ring->buf_desc[ring->read_idx];
181 - if (le32_to_cpu(buf_desc->ctl) & DMA_CTL_STATUS_OWN)
182 - break;
183 - slot = &ring->slots[ring->read_idx];
184 -
185 - dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_TO_DEVICE);
186 - dev_kfree_skb(slot->skb);
187 - if (++ring->read_idx == ring->length)
188 - ring->read_idx = 0;
189 - }
190 + if (enet->irq_tx < 0 &&
191 + !(le32_to_cpu(ring->buf_desc[ring->read_idx].ctl) & DMA_CTL_STATUS_OWN))
192 + napi_schedule(&enet->tx_ring.napi);
193
194 /* Don't use the last empty buf descriptor */
195 if (ring->read_idx <= ring->write_idx)
196 free_buf_descs = ring->read_idx - ring->write_idx + ring->length;
197 else
198 free_buf_descs = ring->read_idx - ring->write_idx;
199 - if (free_buf_descs < 2)
200 + if (free_buf_descs < 2) {
201 + netif_stop_queue(netdev);
202 return NETDEV_TX_BUSY;
203 + }
204
205 /* Hardware removes OWN bit after sending data */
206 buf_desc = &ring->buf_desc[ring->write_idx];
207 @@ -539,9 +566,10 @@ static int bcm4908_enet_start_xmit(struc
208 return NETDEV_TX_OK;
209 }
210
211 -static int bcm4908_enet_poll(struct napi_struct *napi, int weight)
212 +static int bcm4908_enet_poll_rx(struct napi_struct *napi, int weight)
213 {
214 - struct bcm4908_enet *enet = container_of(napi, struct bcm4908_enet, napi);
215 + struct bcm4908_enet_dma_ring *rx_ring = container_of(napi, struct bcm4908_enet_dma_ring, napi);
216 + struct bcm4908_enet *enet = container_of(rx_ring, struct bcm4908_enet, rx_ring);
217 struct device *dev = enet->dev;
218 int handled = 0;
219
220 @@ -590,7 +618,7 @@ static int bcm4908_enet_poll(struct napi
221
222 if (handled < weight) {
223 napi_complete_done(napi, handled);
224 - bcm4908_enet_intrs_on(enet);
225 + bcm4908_enet_dma_ring_intrs_on(enet, rx_ring);
226 }
227
228 /* Hardware could disable ring if it run out of descriptors */
229 @@ -599,6 +627,42 @@ static int bcm4908_enet_poll(struct napi
230 return handled;
231 }
232
233 +static int bcm4908_enet_poll_tx(struct napi_struct *napi, int weight)
234 +{
235 + struct bcm4908_enet_dma_ring *tx_ring = container_of(napi, struct bcm4908_enet_dma_ring, napi);
236 + struct bcm4908_enet *enet = container_of(tx_ring, struct bcm4908_enet, tx_ring);
237 + struct bcm4908_enet_dma_ring_bd *buf_desc;
238 + struct bcm4908_enet_dma_ring_slot *slot;
239 + struct device *dev = enet->dev;
240 + unsigned int bytes = 0;
241 + int handled = 0;
242 +
243 + while (handled < weight && tx_ring->read_idx != tx_ring->write_idx) {
244 + buf_desc = &tx_ring->buf_desc[tx_ring->read_idx];
245 + if (le32_to_cpu(buf_desc->ctl) & DMA_CTL_STATUS_OWN)
246 + break;
247 + slot = &tx_ring->slots[tx_ring->read_idx];
248 +
249 + dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_TO_DEVICE);
250 + dev_kfree_skb(slot->skb);
251 + bytes += slot->len;
252 + if (++tx_ring->read_idx == tx_ring->length)
253 + tx_ring->read_idx = 0;
254 +
255 + handled++;
256 + }
257 +
258 + if (handled < weight) {
259 + napi_complete_done(napi, handled);
260 + bcm4908_enet_dma_ring_intrs_on(enet, tx_ring);
261 + }
262 +
263 + if (netif_queue_stopped(enet->netdev))
264 + netif_wake_queue(enet->netdev);
265 +
266 + return handled;
267 +}
268 +
269 static int bcm4908_enet_change_mtu(struct net_device *netdev, int new_mtu)
270 {
271 struct bcm4908_enet *enet = netdev_priv(netdev);
272 @@ -641,6 +705,8 @@ static int bcm4908_enet_probe(struct pla
273 if (netdev->irq < 0)
274 return netdev->irq;
275
276 + enet->irq_tx = platform_get_irq_byname(pdev, "tx");
277 +
278 dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
279
280 err = bcm4908_enet_dma_alloc(enet);
281 @@ -655,7 +721,8 @@ static int bcm4908_enet_probe(struct pla
282 netdev->min_mtu = ETH_ZLEN;
283 netdev->mtu = ETH_DATA_LEN;
284 netdev->max_mtu = ENET_MTU_MAX;
285 - netif_napi_add(netdev, &enet->napi, bcm4908_enet_poll, 64);
286 + netif_tx_napi_add(netdev, &enet->tx_ring.napi, bcm4908_enet_poll_tx, NAPI_POLL_WEIGHT);
287 + netif_napi_add(netdev, &enet->rx_ring.napi, bcm4908_enet_poll_rx, NAPI_POLL_WEIGHT);
288
289 err = register_netdev(netdev);
290 if (err) {
291 @@ -673,7 +740,8 @@ static int bcm4908_enet_remove(struct pl
292 struct bcm4908_enet *enet = platform_get_drvdata(pdev);
293
294 unregister_netdev(enet->netdev);
295 - netif_napi_del(&enet->napi);
296 + netif_napi_del(&enet->rx_ring.napi);
297 + netif_napi_del(&enet->tx_ring.napi);
298 bcm4908_enet_dma_free(enet);
299
300 return 0;