1 From 12bb508bfe5a564c36864b12253db23cac83bfa1 Mon Sep 17 00:00:00 2001
2 From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl>
3 Date: Thu, 11 Mar 2021 13:35:21 +0100
4 Subject: [PATCH] net: broadcom: bcm4908_enet: support TX interrupt
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
9 It appears that each DMA channel has its own interrupt and both rings
10 can be configured (the same way) to handle interrupts.
12 1. Make ring interrupts code generic (make it operate on given ring)
13 2. Move napi to ring (so each has its own)
14 3. Make IRQ handler generic (match ring against received IRQ number)
15 4. Add (optional) support for TX interrupt
17 Signed-off-by: Rafał Miłecki <rafal@milecki.pl>
18 Signed-off-by: David S. Miller <davem@davemloft.net>
20 drivers/net/ethernet/broadcom/bcm4908_enet.c | 138 ++++++++++++++-----
21 1 file changed, 103 insertions(+), 35 deletions(-)
23 --- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
24 +++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
25 @@ -54,6 +54,7 @@ struct bcm4908_enet_dma_ring {
29 + struct napi_struct napi;
33 @@ -67,8 +68,8 @@ struct bcm4908_enet_dma_ring {
36 struct net_device *netdev;
37 - struct napi_struct napi;
41 struct bcm4908_enet_dma_ring tx_ring;
42 struct bcm4908_enet_dma_ring rx_ring;
43 @@ -123,24 +124,31 @@ static void enet_umac_set(struct bcm4908
47 -static void bcm4908_enet_intrs_on(struct bcm4908_enet *enet)
48 +static void bcm4908_enet_set_mtu(struct bcm4908_enet *enet, int mtu)
50 - enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_MASK, ENET_DMA_INT_DEFAULTS);
51 + enet_umac_write(enet, UMAC_MAX_FRAME_LEN, mtu + ENET_MAX_ETH_OVERHEAD);
54 -static void bcm4908_enet_intrs_off(struct bcm4908_enet *enet)
59 +static void bcm4908_enet_dma_ring_intrs_on(struct bcm4908_enet *enet,
60 + struct bcm4908_enet_dma_ring *ring)
62 - enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_MASK, 0);
63 + enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_INT_MASK, ENET_DMA_INT_DEFAULTS);
66 -static void bcm4908_enet_intrs_ack(struct bcm4908_enet *enet)
67 +static void bcm4908_enet_dma_ring_intrs_off(struct bcm4908_enet *enet,
68 + struct bcm4908_enet_dma_ring *ring)
70 - enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_STAT, ENET_DMA_INT_DEFAULTS);
71 + enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_INT_MASK, 0);
74 -static void bcm4908_enet_set_mtu(struct bcm4908_enet *enet, int mtu)
75 +static void bcm4908_enet_dma_ring_intrs_ack(struct bcm4908_enet *enet,
76 + struct bcm4908_enet_dma_ring *ring)
78 - enet_umac_write(enet, UMAC_MAX_FRAME_LEN, mtu + ENET_MAX_ETH_OVERHEAD);
79 + enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_INT_STAT, ENET_DMA_INT_DEFAULTS);
83 @@ -414,11 +422,14 @@ static void bcm4908_enet_gmac_init(struc
84 static irqreturn_t bcm4908_enet_irq_handler(int irq, void *dev_id)
86 struct bcm4908_enet *enet = dev_id;
87 + struct bcm4908_enet_dma_ring *ring;
89 - bcm4908_enet_intrs_off(enet);
90 - bcm4908_enet_intrs_ack(enet);
91 + ring = (irq == enet->irq_tx) ? &enet->tx_ring : &enet->rx_ring;
93 - napi_schedule(&enet->napi);
94 + bcm4908_enet_dma_ring_intrs_off(enet, ring);
95 + bcm4908_enet_dma_ring_intrs_ack(enet, ring);
97 + napi_schedule(&ring->napi);
101 @@ -426,6 +437,8 @@ static irqreturn_t bcm4908_enet_irq_hand
102 static int bcm4908_enet_open(struct net_device *netdev)
104 struct bcm4908_enet *enet = netdev_priv(netdev);
105 + struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring;
106 + struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
107 struct device *dev = enet->dev;
110 @@ -435,6 +448,17 @@ static int bcm4908_enet_open(struct net_
114 + if (enet->irq_tx > 0) {
115 + err = request_irq(enet->irq_tx, bcm4908_enet_irq_handler, 0,
118 + dev_err(dev, "Failed to request IRQ %d: %d\n",
119 + enet->irq_tx, err);
120 + free_irq(netdev->irq, enet);
125 bcm4908_enet_gmac_init(enet);
126 bcm4908_enet_dma_reset(enet);
127 bcm4908_enet_dma_init(enet);
128 @@ -443,14 +467,19 @@ static int bcm4908_enet_open(struct net_
130 enet_set(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_MASTER_EN);
131 enet_maskset(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_FLOWC_CH1_EN, 0);
132 - bcm4908_enet_dma_rx_ring_enable(enet, &enet->rx_ring);
134 - napi_enable(&enet->napi);
135 + if (enet->irq_tx > 0) {
136 + napi_enable(&tx_ring->napi);
137 + bcm4908_enet_dma_ring_intrs_ack(enet, tx_ring);
138 + bcm4908_enet_dma_ring_intrs_on(enet, tx_ring);
141 + bcm4908_enet_dma_rx_ring_enable(enet, rx_ring);
142 + napi_enable(&rx_ring->napi);
143 netif_carrier_on(netdev);
144 netif_start_queue(netdev);
146 - bcm4908_enet_intrs_ack(enet);
147 - bcm4908_enet_intrs_on(enet);
148 + bcm4908_enet_dma_ring_intrs_ack(enet, rx_ring);
149 + bcm4908_enet_dma_ring_intrs_on(enet, rx_ring);
153 @@ -458,16 +487,20 @@ static int bcm4908_enet_open(struct net_
154 static int bcm4908_enet_stop(struct net_device *netdev)
156 struct bcm4908_enet *enet = netdev_priv(netdev);
157 + struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring;
158 + struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
160 netif_stop_queue(netdev);
161 netif_carrier_off(netdev);
162 - napi_disable(&enet->napi);
163 + napi_disable(&rx_ring->napi);
164 + napi_disable(&tx_ring->napi);
166 bcm4908_enet_dma_rx_ring_disable(enet, &enet->rx_ring);
167 bcm4908_enet_dma_tx_ring_disable(enet, &enet->tx_ring);
169 bcm4908_enet_dma_uninit(enet);
171 + free_irq(enet->irq_tx, enet);
172 free_irq(enet->netdev->irq, enet);
175 @@ -484,25 +517,19 @@ static int bcm4908_enet_start_xmit(struc
178 /* Free transmitted skbs */
179 - while (ring->read_idx != ring->write_idx) {
180 - buf_desc = &ring->buf_desc[ring->read_idx];
181 - if (le32_to_cpu(buf_desc->ctl) & DMA_CTL_STATUS_OWN)
183 - slot = &ring->slots[ring->read_idx];
185 - dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_TO_DEVICE);
186 - dev_kfree_skb(slot->skb);
187 - if (++ring->read_idx == ring->length)
188 - ring->read_idx = 0;
190 + if (enet->irq_tx < 0 &&
191 + !(le32_to_cpu(ring->buf_desc[ring->read_idx].ctl) & DMA_CTL_STATUS_OWN))
192 + napi_schedule(&enet->tx_ring.napi);
194 /* Don't use the last empty buf descriptor */
195 if (ring->read_idx <= ring->write_idx)
196 free_buf_descs = ring->read_idx - ring->write_idx + ring->length;
198 free_buf_descs = ring->read_idx - ring->write_idx;
199 - if (free_buf_descs < 2)
200 + if (free_buf_descs < 2) {
201 + netif_stop_queue(netdev);
202 return NETDEV_TX_BUSY;
205 /* Hardware removes OWN bit after sending data */
206 buf_desc = &ring->buf_desc[ring->write_idx];
207 @@ -539,9 +566,10 @@ static int bcm4908_enet_start_xmit(struc
211 -static int bcm4908_enet_poll(struct napi_struct *napi, int weight)
212 +static int bcm4908_enet_poll_rx(struct napi_struct *napi, int weight)
214 - struct bcm4908_enet *enet = container_of(napi, struct bcm4908_enet, napi);
215 + struct bcm4908_enet_dma_ring *rx_ring = container_of(napi, struct bcm4908_enet_dma_ring, napi);
216 + struct bcm4908_enet *enet = container_of(rx_ring, struct bcm4908_enet, rx_ring);
217 struct device *dev = enet->dev;
220 @@ -590,7 +618,7 @@ static int bcm4908_enet_poll(struct napi
222 if (handled < weight) {
223 napi_complete_done(napi, handled);
224 - bcm4908_enet_intrs_on(enet);
225 + bcm4908_enet_dma_ring_intrs_on(enet, rx_ring);
228 /* Hardware could disable ring if it run out of descriptors */
229 @@ -599,6 +627,42 @@ static int bcm4908_enet_poll(struct napi
233 +static int bcm4908_enet_poll_tx(struct napi_struct *napi, int weight)
235 + struct bcm4908_enet_dma_ring *tx_ring = container_of(napi, struct bcm4908_enet_dma_ring, napi);
236 + struct bcm4908_enet *enet = container_of(tx_ring, struct bcm4908_enet, tx_ring);
237 + struct bcm4908_enet_dma_ring_bd *buf_desc;
238 + struct bcm4908_enet_dma_ring_slot *slot;
239 + struct device *dev = enet->dev;
240 + unsigned int bytes = 0;
243 + while (handled < weight && tx_ring->read_idx != tx_ring->write_idx) {
244 + buf_desc = &tx_ring->buf_desc[tx_ring->read_idx];
245 + if (le32_to_cpu(buf_desc->ctl) & DMA_CTL_STATUS_OWN)
247 + slot = &tx_ring->slots[tx_ring->read_idx];
249 + dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_TO_DEVICE);
250 + dev_kfree_skb(slot->skb);
251 + bytes += slot->len;
252 + if (++tx_ring->read_idx == tx_ring->length)
253 + tx_ring->read_idx = 0;
258 + if (handled < weight) {
259 + napi_complete_done(napi, handled);
260 + bcm4908_enet_dma_ring_intrs_on(enet, tx_ring);
263 + if (netif_queue_stopped(enet->netdev))
264 + netif_wake_queue(enet->netdev);
269 static int bcm4908_enet_change_mtu(struct net_device *netdev, int new_mtu)
271 struct bcm4908_enet *enet = netdev_priv(netdev);
272 @@ -642,6 +706,8 @@ static int bcm4908_enet_probe(struct pla
276 + enet->irq_tx = platform_get_irq_byname(pdev, "tx");
278 dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
280 err = bcm4908_enet_dma_alloc(enet);
281 @@ -658,7 +724,8 @@ static int bcm4908_enet_probe(struct pla
282 netdev->min_mtu = ETH_ZLEN;
283 netdev->mtu = ETH_DATA_LEN;
284 netdev->max_mtu = ENET_MTU_MAX;
285 - netif_napi_add(netdev, &enet->napi, bcm4908_enet_poll, 64);
286 + netif_tx_napi_add(netdev, &enet->tx_ring.napi, bcm4908_enet_poll_tx, NAPI_POLL_WEIGHT);
287 + netif_napi_add(netdev, &enet->rx_ring.napi, bcm4908_enet_poll_rx, NAPI_POLL_WEIGHT);
289 err = register_netdev(netdev);
291 @@ -676,7 +743,8 @@ static int bcm4908_enet_remove(struct pl
292 struct bcm4908_enet *enet = platform_get_drvdata(pdev);
294 unregister_netdev(enet->netdev);
295 - netif_napi_del(&enet->napi);
296 + netif_napi_del(&enet->rx_ring.napi);
297 + netif_napi_del(&enet->tx_ring.napi);
298 bcm4908_enet_dma_free(enet);