1 From 4feffeadbcb2e5b11cbbf191a33c245b74a5837b Mon Sep 17 00:00:00 2001
2 From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl>
3 Date: Sun, 7 Feb 2021 23:26:32 +0100
4 Subject: [PATCH] net: broadcom: bcm4908enet: add BCM4908 controller driver
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
9 BCM4908 SoCs family uses Ethernel controller that includes UniMAC but
10 uses different DMA engine (than other controllers) and requires
11 different programming.
13 Signed-off-by: Rafał Miłecki <rafal@milecki.pl>
14 Signed-off-by: David S. Miller <davem@davemloft.net>
17 drivers/net/ethernet/broadcom/Kconfig | 8 +
18 drivers/net/ethernet/broadcom/Makefile | 1 +
19 drivers/net/ethernet/broadcom/bcm4908enet.c | 676 ++++++++++++++++++++
20 drivers/net/ethernet/broadcom/bcm4908enet.h | 96 +++
21 5 files changed, 790 insertions(+)
22 create mode 100644 drivers/net/ethernet/broadcom/bcm4908enet.c
23 create mode 100644 drivers/net/ethernet/broadcom/bcm4908enet.h
27 @@ -3208,6 +3208,15 @@ F: Documentation/devicetree/bindings/mip
28 F: arch/mips/bcm47xx/*
29 F: arch/mips/include/asm/mach-bcm47xx/*
31 +BROADCOM BCM4908 ETHERNET DRIVER
32 +M: Rafał Miłecki <rafal@milecki.pl>
33 +M: bcm-kernel-feedback-list@broadcom.com
34 +L: netdev@vger.kernel.org
36 +F: Documentation/devicetree/bindings/net/brcm,bcm4908enet.yaml
37 +F: drivers/net/ethernet/broadcom/bcm4908enet.*
38 +F: drivers/net/ethernet/broadcom/unimac.h
40 BROADCOM BCM5301X ARM ARCHITECTURE
41 M: Hauke Mehrtens <hauke@hauke-m.de>
42 M: Rafał Miłecki <zajec5@gmail.com>
43 --- a/drivers/net/ethernet/broadcom/Kconfig
44 +++ b/drivers/net/ethernet/broadcom/Kconfig
45 @@ -51,6 +51,14 @@ config B44_PCI
46 depends on B44_PCI_AUTOSELECT && B44_PCICORE_AUTOSELECT
50 + tristate "Broadcom BCM4908 internal mac support"
51 + depends on ARCH_BCM4908 || COMPILE_TEST
54 + This driver supports Ethernet controller integrated into Broadcom
55 + BCM4908 family SoCs.
58 tristate "Broadcom 63xx internal mac support"
60 --- a/drivers/net/ethernet/broadcom/Makefile
61 +++ b/drivers/net/ethernet/broadcom/Makefile
65 obj-$(CONFIG_B44) += b44.o
66 +obj-$(CONFIG_BCM4908ENET) += bcm4908enet.o
67 obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
68 obj-$(CONFIG_BCMGENET) += genet/
69 obj-$(CONFIG_BNX2) += bnx2.o
71 +++ b/drivers/net/ethernet/broadcom/bcm4908enet.c
73 +// SPDX-License-Identifier: GPL-2.0-only
75 + * Copyright (C) 2021 Rafał Miłecki <rafal@milecki.pl>
78 +#include <linux/delay.h>
79 +#include <linux/etherdevice.h>
80 +#include <linux/interrupt.h>
81 +#include <linux/module.h>
82 +#include <linux/of.h>
83 +#include <linux/platform_device.h>
84 +#include <linux/slab.h>
85 +#include <linux/string.h>
87 +#include "bcm4908enet.h"
90 +#define ENET_DMA_CH_RX_CFG ENET_DMA_CH0_CFG
91 +#define ENET_DMA_CH_TX_CFG ENET_DMA_CH1_CFG
92 +#define ENET_DMA_CH_RX_STATE_RAM ENET_DMA_CH0_STATE_RAM
93 +#define ENET_DMA_CH_TX_STATE_RAM ENET_DMA_CH1_STATE_RAM
95 +#define ENET_TX_BDS_NUM 200
96 +#define ENET_RX_BDS_NUM 200
97 +#define ENET_RX_BDS_NUM_MAX 8192
99 +#define ENET_DMA_INT_DEFAULTS (ENET_DMA_CH_CFG_INT_DONE | \
100 + ENET_DMA_CH_CFG_INT_NO_DESC | \
101 + ENET_DMA_CH_CFG_INT_BUFF_DONE)
102 +#define ENET_DMA_MAX_BURST_LEN 8 /* in 64 bit words */
104 +#define ENET_MTU_MIN 60
105 +#define ENET_MTU_MAX 1500 /* Is it possible to support 2044? */
106 +#define ENET_MTU_MAX_EXTRA_SIZE 32 /* L2 */
108 +struct bcm4908enet_dma_ring_bd {
113 +struct bcm4908enet_dma_ring_slot {
114 + struct sk_buff *skb;
116 + dma_addr_t dma_addr;
119 +struct bcm4908enet_dma_ring {
129 + struct bcm4908enet_dma_ring_bd *buf_desc;
131 + dma_addr_t dma_addr;
133 + struct bcm4908enet_dma_ring_slot *slots;
136 +struct bcm4908enet {
137 + struct device *dev;
138 + struct net_device *netdev;
139 + struct napi_struct napi;
140 + void __iomem *base;
142 + struct bcm4908enet_dma_ring tx_ring;
143 + struct bcm4908enet_dma_ring rx_ring;
150 +static inline u32 enet_read(struct bcm4908enet *enet, u16 offset)
152 + return readl(enet->base + offset);
155 +static inline void enet_write(struct bcm4908enet *enet, u16 offset, u32 value)
157 + writel(value, enet->base + offset);
160 +static inline void enet_maskset(struct bcm4908enet *enet, u16 offset, u32 mask, u32 set)
164 + WARN_ON(set & ~mask);
166 + val = enet_read(enet, offset);
167 + val = (val & ~mask) | (set & mask);
168 + enet_write(enet, offset, val);
171 +static inline void enet_set(struct bcm4908enet *enet, u16 offset, u32 set)
173 + enet_maskset(enet, offset, set, set);
176 +static inline u32 enet_umac_read(struct bcm4908enet *enet, u16 offset)
178 + return enet_read(enet, ENET_UNIMAC + offset);
181 +static inline void enet_umac_write(struct bcm4908enet *enet, u16 offset, u32 value)
183 + enet_write(enet, ENET_UNIMAC + offset, value);
186 +static inline void enet_umac_maskset(struct bcm4908enet *enet, u16 offset, u32 mask, u32 set)
188 + enet_maskset(enet, ENET_UNIMAC + offset, mask, set);
191 +static inline void enet_umac_set(struct bcm4908enet *enet, u16 offset, u32 set)
193 + enet_set(enet, ENET_UNIMAC + offset, set);
200 +static void bcm4908enet_intrs_on(struct bcm4908enet *enet)
202 + enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_MASK, ENET_DMA_INT_DEFAULTS);
205 +static void bcm4908enet_intrs_off(struct bcm4908enet *enet)
207 + enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_MASK, 0);
210 +static void bcm4908enet_intrs_ack(struct bcm4908enet *enet)
212 + enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_STAT, ENET_DMA_INT_DEFAULTS);
219 +static int bcm4908_dma_alloc_buf_descs(struct bcm4908enet *enet, struct bcm4908enet_dma_ring *ring)
221 + int size = ring->length * sizeof(struct bcm4908enet_dma_ring_bd);
222 + struct device *dev = enet->dev;
224 + ring->cpu_addr = dma_alloc_coherent(dev, size, &ring->dma_addr, GFP_KERNEL);
225 + if (!ring->cpu_addr)
228 + if (((uintptr_t)ring->cpu_addr) & (0x40 - 1)) {
229 + dev_err(dev, "Invalid DMA ring alignment\n");
230 + goto err_free_buf_descs;
233 + ring->slots = kzalloc(ring->length * sizeof(*ring->slots), GFP_KERNEL);
235 + goto err_free_buf_descs;
237 + memset(ring->cpu_addr, 0, size);
239 + ring->read_idx = 0;
240 + ring->write_idx = 0;
245 + dma_free_coherent(dev, size, ring->cpu_addr, ring->dma_addr);
249 +static void bcm4908enet_dma_free(struct bcm4908enet *enet)
251 + struct bcm4908enet_dma_ring *tx_ring = &enet->tx_ring;
252 + struct bcm4908enet_dma_ring *rx_ring = &enet->rx_ring;
253 + struct device *dev = enet->dev;
256 + size = rx_ring->length * sizeof(struct bcm4908enet_dma_ring_bd);
257 + if (rx_ring->cpu_addr)
258 + dma_free_coherent(dev, size, rx_ring->cpu_addr, rx_ring->dma_addr);
259 + kfree(rx_ring->slots);
261 + size = tx_ring->length * sizeof(struct bcm4908enet_dma_ring_bd);
262 + if (tx_ring->cpu_addr)
263 + dma_free_coherent(dev, size, tx_ring->cpu_addr, tx_ring->dma_addr);
264 + kfree(tx_ring->slots);
267 +static int bcm4908enet_dma_alloc(struct bcm4908enet *enet)
269 + struct bcm4908enet_dma_ring *tx_ring = &enet->tx_ring;
270 + struct bcm4908enet_dma_ring *rx_ring = &enet->rx_ring;
271 + struct device *dev = enet->dev;
274 + tx_ring->length = ENET_TX_BDS_NUM;
275 + tx_ring->is_tx = 1;
276 + tx_ring->cfg_block = ENET_DMA_CH_TX_CFG;
277 + tx_ring->st_ram_block = ENET_DMA_CH_TX_STATE_RAM;
278 + err = bcm4908_dma_alloc_buf_descs(enet, tx_ring);
280 + dev_err(dev, "Failed to alloc TX buf descriptors: %d\n", err);
284 + rx_ring->length = ENET_RX_BDS_NUM;
285 + rx_ring->is_tx = 0;
286 + rx_ring->cfg_block = ENET_DMA_CH_RX_CFG;
287 + rx_ring->st_ram_block = ENET_DMA_CH_RX_STATE_RAM;
288 + err = bcm4908_dma_alloc_buf_descs(enet, rx_ring);
290 + dev_err(dev, "Failed to alloc RX buf descriptors: %d\n", err);
291 + bcm4908enet_dma_free(enet);
298 +static void bcm4908enet_dma_reset(struct bcm4908enet *enet)
300 + struct bcm4908enet_dma_ring *rings[] = { &enet->rx_ring, &enet->tx_ring };
303 + /* Disable the DMA controller and channel */
304 + for (i = 0; i < ARRAY_SIZE(rings); i++)
305 + enet_write(enet, rings[i]->cfg_block + ENET_DMA_CH_CFG, 0);
306 + enet_maskset(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_MASTER_EN, 0);
308 + /* Reset channels state */
309 + for (i = 0; i < ARRAY_SIZE(rings); i++) {
310 + struct bcm4908enet_dma_ring *ring = rings[i];
312 + enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_BASE_DESC_PTR, 0);
313 + enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_STATE_DATA, 0);
314 + enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_DESC_LEN_STATUS, 0);
315 + enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_DESC_BASE_BUFPTR, 0);
319 +static int bcm4908enet_dma_alloc_rx_buf(struct bcm4908enet *enet, unsigned int idx)
321 + struct bcm4908enet_dma_ring_bd *buf_desc = &enet->rx_ring.buf_desc[idx];
322 + struct bcm4908enet_dma_ring_slot *slot = &enet->rx_ring.slots[idx];
323 + struct device *dev = enet->dev;
327 + slot->len = ENET_MTU_MAX + ENET_MTU_MAX_EXTRA_SIZE;
329 + slot->skb = netdev_alloc_skb(enet->netdev, slot->len);
333 + slot->dma_addr = dma_map_single(dev, slot->skb->data, slot->len, DMA_FROM_DEVICE);
334 + err = dma_mapping_error(dev, slot->dma_addr);
336 + dev_err(dev, "Failed to map DMA buffer: %d\n", err);
337 + kfree_skb(slot->skb);
342 + tmp = slot->len << DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT;
343 + tmp |= DMA_CTL_STATUS_OWN;
344 + if (idx == enet->rx_ring.length - 1)
345 + tmp |= DMA_CTL_STATUS_WRAP;
346 + buf_desc->ctl = cpu_to_le32(tmp);
347 + buf_desc->addr = cpu_to_le32(slot->dma_addr);
352 +static void bcm4908enet_dma_ring_init(struct bcm4908enet *enet,
353 + struct bcm4908enet_dma_ring *ring)
355 + int reset_channel = 0; /* We support only 1 main channel (with TX and RX) */
356 + int reset_subch = ring->is_tx ? 1 : 0;
358 + /* Reset the DMA channel */
359 + enet_write(enet, ENET_DMA_CTRL_CHANNEL_RESET, BIT(reset_channel * 2 + reset_subch));
360 + enet_write(enet, ENET_DMA_CTRL_CHANNEL_RESET, 0);
362 + enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG, 0);
363 + enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_MAX_BURST, ENET_DMA_MAX_BURST_LEN);
364 + enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_INT_MASK, 0);
366 + enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_BASE_DESC_PTR,
367 + (uint32_t)ring->dma_addr);
370 +static void bcm4908enet_dma_uninit(struct bcm4908enet *enet)
372 + struct bcm4908enet_dma_ring *rx_ring = &enet->rx_ring;
373 + struct bcm4908enet_dma_ring_slot *slot;
374 + struct device *dev = enet->dev;
377 + for (i = rx_ring->length - 1; i >= 0; i--) {
378 + slot = &rx_ring->slots[i];
381 + dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_FROM_DEVICE);
382 + kfree_skb(slot->skb);
387 +static int bcm4908enet_dma_init(struct bcm4908enet *enet)
389 + struct bcm4908enet_dma_ring *rx_ring = &enet->rx_ring;
390 + struct device *dev = enet->dev;
394 + for (i = 0; i < rx_ring->length; i++) {
395 + err = bcm4908enet_dma_alloc_rx_buf(enet, i);
397 + dev_err(dev, "Failed to alloc RX buffer: %d\n", err);
398 + bcm4908enet_dma_uninit(enet);
403 + bcm4908enet_dma_ring_init(enet, &enet->tx_ring);
404 + bcm4908enet_dma_ring_init(enet, &enet->rx_ring);
409 +static void bcm4908enet_dma_tx_ring_ensable(struct bcm4908enet *enet,
410 + struct bcm4908enet_dma_ring *ring)
412 + enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE);
415 +static void bcm4908enet_dma_tx_ring_disable(struct bcm4908enet *enet,
416 + struct bcm4908enet_dma_ring *ring)
418 + enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG, 0);
421 +static void bcm4908enet_dma_rx_ring_enable(struct bcm4908enet *enet,
422 + struct bcm4908enet_dma_ring *ring)
424 + enet_set(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE);
427 +static void bcm4908enet_dma_rx_ring_disable(struct bcm4908enet *enet,
428 + struct bcm4908enet_dma_ring *ring)
430 + unsigned long deadline;
433 + enet_maskset(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE, 0);
435 + deadline = jiffies + usecs_to_jiffies(2000);
437 + tmp = enet_read(enet, ring->cfg_block + ENET_DMA_CH_CFG);
438 + if (!(tmp & ENET_DMA_CH_CFG_ENABLE))
440 + enet_maskset(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE, 0);
441 + usleep_range(10, 30);
442 + } while (!time_after_eq(jiffies, deadline));
444 + dev_warn(enet->dev, "Timeout waiting for DMA TX stop\n");
451 +static void bcm4908enet_gmac_init(struct bcm4908enet *enet)
455 + cmd = enet_umac_read(enet, UMAC_CMD);
456 + enet_umac_write(enet, UMAC_CMD, cmd | CMD_SW_RESET);
457 + enet_umac_write(enet, UMAC_CMD, cmd & ~CMD_SW_RESET);
459 + enet_set(enet, ENET_FLUSH, ENET_FLUSH_RXFIFO_FLUSH | ENET_FLUSH_TXFIFO_FLUSH);
460 + enet_maskset(enet, ENET_FLUSH, ENET_FLUSH_RXFIFO_FLUSH | ENET_FLUSH_TXFIFO_FLUSH, 0);
462 + enet_set(enet, ENET_MIB_CTRL, ENET_MIB_CTRL_CLR_MIB);
463 + enet_maskset(enet, ENET_MIB_CTRL, ENET_MIB_CTRL_CLR_MIB, 0);
465 + cmd = enet_umac_read(enet, UMAC_CMD);
466 + cmd &= ~(CMD_SPEED_MASK << CMD_SPEED_SHIFT);
469 + cmd |= CMD_SPEED_1000 << CMD_SPEED_SHIFT;
470 + enet_umac_write(enet, UMAC_CMD, cmd);
472 + enet_maskset(enet, ENET_GMAC_STATUS,
473 + ENET_GMAC_STATUS_ETH_SPEED_MASK |
474 + ENET_GMAC_STATUS_HD |
475 + ENET_GMAC_STATUS_AUTO_CFG_EN |
476 + ENET_GMAC_STATUS_LINK_UP,
477 + ENET_GMAC_STATUS_ETH_SPEED_1000 |
478 + ENET_GMAC_STATUS_AUTO_CFG_EN |
479 + ENET_GMAC_STATUS_LINK_UP);
482 +static irqreturn_t bcm4908enet_irq_handler(int irq, void *dev_id)
484 + struct bcm4908enet *enet = dev_id;
486 + bcm4908enet_intrs_off(enet);
487 + bcm4908enet_intrs_ack(enet);
489 + napi_schedule(&enet->napi);
491 + return IRQ_HANDLED;
494 +static int bcm4908enet_open(struct net_device *netdev)
496 + struct bcm4908enet *enet = netdev_priv(netdev);
497 + struct device *dev = enet->dev;
500 + err = request_irq(netdev->irq, bcm4908enet_irq_handler, 0, "enet", enet);
502 + dev_err(dev, "Failed to request IRQ %d: %d\n", netdev->irq, err);
506 + bcm4908enet_gmac_init(enet);
507 + bcm4908enet_dma_reset(enet);
508 + bcm4908enet_dma_init(enet);
510 + enet_umac_set(enet, UMAC_CMD, CMD_TX_EN | CMD_RX_EN);
512 + enet_set(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_MASTER_EN);
513 + enet_maskset(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_FLOWC_CH1_EN, 0);
514 + bcm4908enet_dma_rx_ring_enable(enet, &enet->rx_ring);
516 + napi_enable(&enet->napi);
517 + netif_carrier_on(netdev);
518 + netif_start_queue(netdev);
520 + bcm4908enet_intrs_ack(enet);
521 + bcm4908enet_intrs_on(enet);
526 +static int bcm4908enet_stop(struct net_device *netdev)
528 + struct bcm4908enet *enet = netdev_priv(netdev);
530 + netif_stop_queue(netdev);
531 + netif_carrier_off(netdev);
532 + napi_disable(&enet->napi);
534 + bcm4908enet_dma_rx_ring_disable(enet, &enet->rx_ring);
535 + bcm4908enet_dma_tx_ring_disable(enet, &enet->tx_ring);
537 + bcm4908enet_dma_uninit(enet);
539 + free_irq(enet->netdev->irq, enet);
544 +static int bcm4908enet_start_xmit(struct sk_buff *skb, struct net_device *netdev)
546 + struct bcm4908enet *enet = netdev_priv(netdev);
547 + struct bcm4908enet_dma_ring *ring = &enet->tx_ring;
548 + struct bcm4908enet_dma_ring_slot *slot;
549 + struct device *dev = enet->dev;
550 + struct bcm4908enet_dma_ring_bd *buf_desc;
551 + int free_buf_descs;
554 + /* Free transmitted skbs */
555 + while (ring->read_idx != ring->write_idx) {
556 + buf_desc = &ring->buf_desc[ring->read_idx];
557 + if (buf_desc->ctl & DMA_CTL_STATUS_OWN)
559 + slot = &ring->slots[ring->read_idx];
561 + dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_TO_DEVICE);
562 + dev_kfree_skb(slot->skb);
563 + if (++ring->read_idx == ring->length)
564 + ring->read_idx = 0;
567 + /* Don't use the last empty buf descriptor */
568 + if (ring->read_idx <= ring->write_idx)
569 + free_buf_descs = ring->read_idx - ring->write_idx + ring->length;
571 + free_buf_descs = ring->read_idx - ring->write_idx;
572 + if (free_buf_descs < 2)
573 + return NETDEV_TX_BUSY;
575 + /* Hardware removes OWN bit after sending data */
576 + buf_desc = &ring->buf_desc[ring->write_idx];
577 + if (unlikely(le32_to_cpu(buf_desc->ctl) & DMA_CTL_STATUS_OWN)) {
578 + netif_stop_queue(netdev);
579 + return NETDEV_TX_BUSY;
582 + slot = &ring->slots[ring->write_idx];
584 + slot->len = skb->len;
585 + slot->dma_addr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
586 + if (unlikely(dma_mapping_error(dev, slot->dma_addr)))
587 + return NETDEV_TX_BUSY;
589 + tmp = skb->len << DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT;
590 + tmp |= DMA_CTL_STATUS_OWN;
591 + tmp |= DMA_CTL_STATUS_SOP;
592 + tmp |= DMA_CTL_STATUS_EOP;
593 + tmp |= DMA_CTL_STATUS_APPEND_CRC;
594 + if (ring->write_idx + 1 == ring->length - 1)
595 + tmp |= DMA_CTL_STATUS_WRAP;
597 + buf_desc->addr = cpu_to_le32((uint32_t)slot->dma_addr);
598 + buf_desc->ctl = cpu_to_le32(tmp);
600 + bcm4908enet_dma_tx_ring_ensable(enet, &enet->tx_ring);
602 + if (++ring->write_idx == ring->length - 1)
603 + ring->write_idx = 0;
604 + enet->netdev->stats.tx_bytes += skb->len;
605 + enet->netdev->stats.tx_packets++;
607 + return NETDEV_TX_OK;
610 +static int bcm4908enet_poll(struct napi_struct *napi, int weight)
612 + struct bcm4908enet *enet = container_of(napi, struct bcm4908enet, napi);
613 + struct device *dev = enet->dev;
616 + while (handled < weight) {
617 + struct bcm4908enet_dma_ring_bd *buf_desc;
618 + struct bcm4908enet_dma_ring_slot slot;
623 + buf_desc = &enet->rx_ring.buf_desc[enet->rx_ring.read_idx];
624 + ctl = le32_to_cpu(buf_desc->ctl);
625 + if (ctl & DMA_CTL_STATUS_OWN)
628 + slot = enet->rx_ring.slots[enet->rx_ring.read_idx];
630 + /* Provide new buffer before unpinning the old one */
631 + err = bcm4908enet_dma_alloc_rx_buf(enet, enet->rx_ring.read_idx);
635 + if (++enet->rx_ring.read_idx == enet->rx_ring.length)
636 + enet->rx_ring.read_idx = 0;
638 + len = (ctl & DMA_CTL_LEN_DESC_BUFLENGTH) >> DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT;
640 + if (len < ENET_MTU_MIN ||
641 + (ctl & (DMA_CTL_STATUS_SOP | DMA_CTL_STATUS_EOP)) != (DMA_CTL_STATUS_SOP | DMA_CTL_STATUS_EOP)) {
642 + enet->netdev->stats.rx_dropped++;
646 + dma_unmap_single(dev, slot.dma_addr, slot.len, DMA_FROM_DEVICE);
648 + skb_put(slot.skb, len - 4 + 2);
649 + slot.skb->protocol = eth_type_trans(slot.skb, enet->netdev);
650 + netif_receive_skb(slot.skb);
652 + enet->netdev->stats.rx_packets++;
653 + enet->netdev->stats.rx_bytes += len;
656 + if (handled < weight) {
657 + napi_complete_done(napi, handled);
658 + bcm4908enet_intrs_on(enet);
664 +static const struct net_device_ops bcm96xx_netdev_ops = {
665 + .ndo_open = bcm4908enet_open,
666 + .ndo_stop = bcm4908enet_stop,
667 + .ndo_start_xmit = bcm4908enet_start_xmit,
668 + .ndo_set_mac_address = eth_mac_addr,
671 +static int bcm4908enet_probe(struct platform_device *pdev)
673 + struct device *dev = &pdev->dev;
674 + struct net_device *netdev;
675 + struct bcm4908enet *enet;
678 + netdev = devm_alloc_etherdev(dev, sizeof(*enet));
682 + enet = netdev_priv(netdev);
684 + enet->netdev = netdev;
686 + enet->base = devm_platform_ioremap_resource(pdev, 0);
687 + if (IS_ERR(enet->base)) {
688 + dev_err(dev, "Failed to map registers: %ld\n", PTR_ERR(enet->base));
689 + return PTR_ERR(enet->base);
692 + netdev->irq = platform_get_irq_byname(pdev, "rx");
693 + if (netdev->irq < 0)
694 + return netdev->irq;
696 + dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
698 + err = bcm4908enet_dma_alloc(enet);
702 + SET_NETDEV_DEV(netdev, &pdev->dev);
703 + eth_hw_addr_random(netdev);
704 + netdev->netdev_ops = &bcm96xx_netdev_ops;
705 + netdev->min_mtu = ETH_ZLEN;
706 + netdev->mtu = ENET_MTU_MAX;
707 + netdev->max_mtu = ENET_MTU_MAX;
708 + netif_napi_add(netdev, &enet->napi, bcm4908enet_poll, 64);
710 + err = register_netdev(netdev);
712 + bcm4908enet_dma_free(enet);
716 + platform_set_drvdata(pdev, enet);
721 +static int bcm4908enet_remove(struct platform_device *pdev)
723 + struct bcm4908enet *enet = platform_get_drvdata(pdev);
725 + unregister_netdev(enet->netdev);
726 + netif_napi_del(&enet->napi);
727 + bcm4908enet_dma_free(enet);
732 +static const struct of_device_id bcm4908enet_of_match[] = {
733 + { .compatible = "brcm,bcm4908enet"},
737 +static struct platform_driver bcm4908enet_driver = {
739 + .name = "bcm4908enet",
740 + .of_match_table = bcm4908enet_of_match,
742 + .probe = bcm4908enet_probe,
743 + .remove = bcm4908enet_remove,
745 +module_platform_driver(bcm4908enet_driver);
747 +MODULE_LICENSE("GPL v2");
748 +MODULE_DEVICE_TABLE(of, bcm4908enet_of_match);
750 +++ b/drivers/net/ethernet/broadcom/bcm4908enet.h
752 +/* SPDX-License-Identifier: GPL-2.0-only */
753 +#ifndef __BCM4908ENET_H
754 +#define __BCM4908ENET_H
756 +#define ENET_CONTROL 0x000
757 +#define ENET_MIB_CTRL 0x004
758 +#define ENET_MIB_CTRL_CLR_MIB 0x00000001
759 +#define ENET_RX_ERR_MASK 0x008
760 +#define ENET_MIB_MAX_PKT_SIZE 0x00C
761 +#define ENET_MIB_MAX_PKT_SIZE_VAL 0x00003fff
762 +#define ENET_DIAG_OUT 0x01c
763 +#define ENET_ENABLE_DROP_PKT 0x020
764 +#define ENET_IRQ_ENABLE 0x024
765 +#define ENET_IRQ_ENABLE_OVFL 0x00000001
766 +#define ENET_GMAC_STATUS 0x028
767 +#define ENET_GMAC_STATUS_ETH_SPEED_MASK 0x00000003
768 +#define ENET_GMAC_STATUS_ETH_SPEED_10 0x00000000
769 +#define ENET_GMAC_STATUS_ETH_SPEED_100 0x00000001
770 +#define ENET_GMAC_STATUS_ETH_SPEED_1000 0x00000002
771 +#define ENET_GMAC_STATUS_HD 0x00000004
772 +#define ENET_GMAC_STATUS_AUTO_CFG_EN 0x00000008
773 +#define ENET_GMAC_STATUS_LINK_UP 0x00000010
774 +#define ENET_IRQ_STATUS 0x02c
775 +#define ENET_IRQ_STATUS_OVFL 0x00000001
776 +#define ENET_OVERFLOW_COUNTER 0x030
777 +#define ENET_FLUSH 0x034
778 +#define ENET_FLUSH_RXFIFO_FLUSH 0x00000001
779 +#define ENET_FLUSH_TXFIFO_FLUSH 0x00000002
780 +#define ENET_RSV_SELECT 0x038
781 +#define ENET_BP_FORCE 0x03c
782 +#define ENET_BP_FORCE_FORCE 0x00000001
783 +#define ENET_DMA_RX_OK_TO_SEND_COUNT 0x040
784 +#define ENET_DMA_RX_OK_TO_SEND_COUNT_VAL 0x0000000f
785 +#define ENET_TX_CRC_CTRL 0x044
786 +#define ENET_MIB 0x200
787 +#define ENET_UNIMAC 0x400
788 +#define ENET_DMA 0x800
789 +#define ENET_DMA_CONTROLLER_CFG 0x800
790 +#define ENET_DMA_CTRL_CFG_MASTER_EN 0x00000001
791 +#define ENET_DMA_CTRL_CFG_FLOWC_CH1_EN 0x00000002
792 +#define ENET_DMA_CTRL_CFG_FLOWC_CH3_EN 0x00000004
793 +#define ENET_DMA_FLOWCTL_CH1_THRESH_LO 0x804
794 +#define ENET_DMA_FLOWCTL_CH1_THRESH_HI 0x808
795 +#define ENET_DMA_FLOWCTL_CH1_ALLOC 0x80c
796 +#define ENET_DMA_FLOWCTL_CH1_ALLOC_FORCE 0x80000000
797 +#define ENET_DMA_FLOWCTL_CH3_THRESH_LO 0x810
798 +#define ENET_DMA_FLOWCTL_CH3_THRESH_HI 0x814
799 +#define ENET_DMA_FLOWCTL_CH3_ALLOC 0x818
800 +#define ENET_DMA_FLOWCTL_CH5_THRESH_LO 0x81C
801 +#define ENET_DMA_FLOWCTL_CH5_THRESH_HI 0x820
802 +#define ENET_DMA_FLOWCTL_CH5_ALLOC 0x824
803 +#define ENET_DMA_FLOWCTL_CH7_THRESH_LO 0x828
804 +#define ENET_DMA_FLOWCTL_CH7_THRESH_HI 0x82C
805 +#define ENET_DMA_FLOWCTL_CH7_ALLOC 0x830
806 +#define ENET_DMA_CTRL_CHANNEL_RESET 0x834
807 +#define ENET_DMA_CTRL_CHANNEL_DEBUG 0x838
808 +#define ENET_DMA_CTRL_GLOBAL_INTERRUPT_STATUS 0x840
809 +#define ENET_DMA_CTRL_GLOBAL_INTERRUPT_MASK 0x844
810 +#define ENET_DMA_CH0_CFG 0xa00 /* RX */
811 +#define ENET_DMA_CH1_CFG 0xa10 /* TX */
812 +#define ENET_DMA_CH0_STATE_RAM 0xc00 /* RX */
813 +#define ENET_DMA_CH1_STATE_RAM 0xc10 /* TX */
815 +#define ENET_DMA_CH_CFG 0x00 /* assorted configuration */
816 +#define ENET_DMA_CH_CFG_ENABLE 0x00000001 /* set to enable channel */
817 +#define ENET_DMA_CH_CFG_PKT_HALT 0x00000002 /* idle after an EOP flag is detected */
818 +#define ENET_DMA_CH_CFG_BURST_HALT 0x00000004 /* idle after finish current memory burst */
819 +#define ENET_DMA_CH_CFG_INT_STAT 0x04 /* interrupts control and status */
820 +#define ENET_DMA_CH_CFG_INT_MASK 0x08 /* interrupts mask */
821 +#define ENET_DMA_CH_CFG_INT_BUFF_DONE 0x00000001 /* buffer done */
822 +#define ENET_DMA_CH_CFG_INT_DONE 0x00000002 /* packet xfer complete */
823 +#define ENET_DMA_CH_CFG_INT_NO_DESC 0x00000004 /* no valid descriptors */
824 +#define ENET_DMA_CH_CFG_INT_RX_ERROR 0x00000008 /* rxdma detect client protocol error */
825 +#define ENET_DMA_CH_CFG_MAX_BURST 0x0c /* max burst length permitted */
826 +#define ENET_DMA_CH_CFG_MAX_BURST_DESCSIZE_SEL 0x00040000 /* DMA Descriptor Size Selection */
827 +#define ENET_DMA_CH_CFG_SIZE 0x10
829 +#define ENET_DMA_CH_STATE_RAM_BASE_DESC_PTR 0x00 /* descriptor ring start address */
830 +#define ENET_DMA_CH_STATE_RAM_STATE_DATA 0x04 /* state/bytes done/ring offset */
831 +#define ENET_DMA_CH_STATE_RAM_DESC_LEN_STATUS 0x08 /* buffer descriptor status and len */
832 +#define ENET_DMA_CH_STATE_RAM_DESC_BASE_BUFPTR 0x0c /* buffer descrpitor current processing */
833 +#define ENET_DMA_CH_STATE_RAM_SIZE 0x10
835 +#define DMA_CTL_STATUS_APPEND_CRC 0x00000100
836 +#define DMA_CTL_STATUS_APPEND_BRCM_TAG 0x00000200
837 +#define DMA_CTL_STATUS_PRIO 0x00000C00 /* Prio for Tx */
838 +#define DMA_CTL_STATUS_WRAP 0x00001000 /* */
839 +#define DMA_CTL_STATUS_SOP 0x00002000 /* first buffer in packet */
840 +#define DMA_CTL_STATUS_EOP 0x00004000 /* last buffer in packet */
841 +#define DMA_CTL_STATUS_OWN 0x00008000 /* cleared by DMA, set by SW */
842 +#define DMA_CTL_LEN_DESC_BUFLENGTH 0x0fff0000
843 +#define DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT 16
844 +#define DMA_CTL_LEN_DESC_MULTICAST 0x40000000
845 +#define DMA_CTL_LEN_DESC_USEFPM 0x80000000