1 From f9246c9597e89510ae016c33ffa3b367ed83cf2d Mon Sep 17 00:00:00 2001
2 From: Pavithra R <quic_pavir@quicinc.com>
3 Date: Wed, 28 Feb 2024 11:25:15 +0530
4 Subject: [PATCH 38/50] net: ethernet: qualcomm: Add EDMA support for QCOM
7 Add the infrastructure functions such as Makefile,
8 EDMA hardware configuration, clock and IRQ initializations.
10 Change-Id: I64f65e554e70e9095b0cf3636fec421569ae6895
11 Signed-off-by: Pavithra R <quic_pavir@quicinc.com>
12 Co-developed-by: Suruchi Agarwal <quic_suruchia@quicinc.com>
13 Signed-off-by: Suruchi Agarwal <quic_suruchia@quicinc.com>
15 drivers/net/ethernet/qualcomm/ppe/Makefile | 3 +
16 drivers/net/ethernet/qualcomm/ppe/edma.c | 456 +++++++++++++++++++
17 drivers/net/ethernet/qualcomm/ppe/edma.h | 99 ++++
18 drivers/net/ethernet/qualcomm/ppe/ppe.c | 10 +-
19 drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 253 ++++++++++
20 5 files changed, 820 insertions(+), 1 deletion(-)
21 create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma.c
22 create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma.h
24 diff --git a/drivers/net/ethernet/qualcomm/ppe/Makefile b/drivers/net/ethernet/qualcomm/ppe/Makefile
25 index 76cdc423a8cc..7fea135ceb36 100644
26 --- a/drivers/net/ethernet/qualcomm/ppe/Makefile
27 +++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
30 obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
31 qcom-ppe-objs := ppe.o ppe_config.o ppe_api.o ppe_debugfs.o ppe_port.o
34 +qcom-ppe-objs += edma.o
35 \ No newline at end of file
36 diff --git a/drivers/net/ethernet/qualcomm/ppe/edma.c b/drivers/net/ethernet/qualcomm/ppe/edma.c
38 index 000000000000..d7bf1f39e9e1
40 +++ b/drivers/net/ethernet/qualcomm/ppe/edma.c
42 +// SPDX-License-Identifier: GPL-2.0-only
43 + /* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
46 + /* Qualcomm Ethernet DMA driver setup, HW configuration, clocks and
47 + * interrupt initializations.
50 +#include <linux/clk.h>
51 +#include <linux/delay.h>
52 +#include <linux/kernel.h>
53 +#include <linux/module.h>
54 +#include <linux/netdevice.h>
55 +#include <linux/of_irq.h>
56 +#include <linux/platform_device.h>
57 +#include <linux/printk.h>
58 +#include <linux/regmap.h>
59 +#include <linux/reset.h>
62 +#include "ppe_regs.h"
64 +#define EDMA_IRQ_NAME_SIZE 32
66 +/* Global EDMA context. */
67 +struct edma_context *edma_ctx;
69 +/* Priority to multi-queue mapping. */
70 +static u8 edma_pri_map[PPE_QUEUE_INTER_PRI_NUM] = {
71 + 0, 1, 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7};
79 +static const char * const clock_name[EDMA_CLK_MAX] = {
80 + [EDMA_CLK] = "edma",
81 + [EDMA_CFG_CLK] = "edma-cfg",
84 +/* Rx Fill ring info for IPQ9574. */
85 +static struct edma_ring_info ipq9574_rxfill_ring_info = {
91 +/* Rx ring info for IPQ9574. */
92 +static struct edma_ring_info ipq9574_rx_ring_info = {
98 +/* Tx ring info for IPQ9574. */
99 +static struct edma_ring_info ipq9574_tx_ring_info = {
105 +/* Tx complete ring info for IPQ9574. */
106 +static struct edma_ring_info ipq9574_txcmpl_ring_info = {
112 +/* HW info for IPQ9574. */
113 +static struct edma_hw_info ipq9574_hw_info = {
114 + .rxfill = &ipq9574_rxfill_ring_info,
115 + .rx = &ipq9574_rx_ring_info,
116 + .tx = &ipq9574_tx_ring_info,
117 + .txcmpl = &ipq9574_txcmpl_ring_info,
119 + .napi_budget_rx = 128,
120 + .napi_budget_tx = 512,
123 +static int edma_clock_set_and_enable(struct device *dev,
124 + const char *id, unsigned long rate)
126 + struct device_node *edma_np;
127 + struct clk *clk = NULL;
130 + edma_np = of_get_child_by_name(dev->of_node, "edma");
132 + clk = devm_get_clk_from_child(dev, edma_np, id);
134 + dev_err(dev, "clk %s get failed\n", id);
135 + of_node_put(edma_np);
136 + return PTR_ERR(clk);
139 + ret = clk_set_rate(clk, rate);
141 + dev_err(dev, "set %lu rate for %s failed\n", rate, id);
142 + of_node_put(edma_np);
146 + ret = clk_prepare_enable(clk);
148 + dev_err(dev, "clk %s enable failed\n", id);
149 + of_node_put(edma_np);
153 + of_node_put(edma_np);
155 + dev_dbg(dev, "set %lu rate for %s\n", rate, id);
160 +static int edma_clock_init(void)
162 + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
163 + struct device *dev = ppe_dev->dev;
164 + unsigned long ppe_rate;
167 + ppe_rate = ppe_dev->clk_rate;
169 + ret = edma_clock_set_and_enable(dev, clock_name[EDMA_CLK],
174 + ret = edma_clock_set_and_enable(dev, clock_name[EDMA_CFG_CLK],
183 + * edma_configure_ucast_prio_map_tbl - Configure unicast priority map table.
185 + * Map int_priority values to priority class and initialize
186 + * unicast priority map table for default profile_id.
188 +static int edma_configure_ucast_prio_map_tbl(void)
190 + u8 pri_class, int_pri;
193 + /* Set the priority class value for every possible priority. */
194 + for (int_pri = 0; int_pri < PPE_QUEUE_INTER_PRI_NUM; int_pri++) {
195 + pri_class = edma_pri_map[int_pri];
197 + /* Priority offset should be less than maximum supported
200 + if (pri_class > EDMA_PRI_MAX_PER_CORE - 1) {
201 + pr_err("Configured incorrect priority offset: %d\n",
206 + ret = ppe_edma_queue_offset_config(edma_ctx->ppe_dev,
207 + PPE_QUEUE_CLASS_PRIORITY, int_pri, pri_class);
210 + pr_err("Failed with error: %d to set queue priority class for int_pri: %d for profile_id: %d\n",
215 + pr_debug("profile_id: %d, int_priority: %d, pri_class: %d\n",
216 + 0, int_pri, pri_class);
222 +static int edma_irq_init(void)
224 + struct edma_hw_info *hw_info = edma_ctx->hw_info;
225 + struct edma_ring_info *txcmpl = hw_info->txcmpl;
226 + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
227 + struct edma_ring_info *rx = hw_info->rx;
228 + char edma_irq_name[EDMA_IRQ_NAME_SIZE];
229 + struct device *dev = ppe_dev->dev;
230 + struct platform_device *pdev;
231 + struct device_node *edma_np;
234 + pdev = to_platform_device(dev);
235 + edma_np = of_get_child_by_name(dev->of_node, "edma");
236 + edma_ctx->intr_info.intr_txcmpl = kzalloc((sizeof(*edma_ctx->intr_info.intr_txcmpl) *
237 + txcmpl->num_rings), GFP_KERNEL);
238 + if (!edma_ctx->intr_info.intr_txcmpl) {
239 + of_node_put(edma_np);
243 + /* Get TXCMPL rings IRQ numbers. */
244 + for (i = 0; i < txcmpl->num_rings; i++) {
245 + snprintf(edma_irq_name, sizeof(edma_irq_name), "edma_txcmpl_%d",
246 + txcmpl->ring_start + i);
247 + edma_ctx->intr_info.intr_txcmpl[i] = of_irq_get_byname(edma_np, edma_irq_name);
248 + if (edma_ctx->intr_info.intr_txcmpl[i] < 0) {
249 + dev_err(dev, "%s: txcmpl_info.intr[%u] irq get failed\n",
251 + of_node_put(edma_np);
252 + kfree(edma_ctx->intr_info.intr_txcmpl);
253 + return edma_ctx->intr_info.intr_txcmpl[i];
256 + dev_dbg(dev, "%s: intr_info.intr_txcmpl[%u] = %u\n",
257 + edma_np->name, i, edma_ctx->intr_info.intr_txcmpl[i]);
260 + edma_ctx->intr_info.intr_rx = kzalloc((sizeof(*edma_ctx->intr_info.intr_rx) *
261 + rx->num_rings), GFP_KERNEL);
262 + if (!edma_ctx->intr_info.intr_rx) {
263 + of_node_put(edma_np);
264 + kfree(edma_ctx->intr_info.intr_txcmpl);
268 + /* Get RXDESC rings IRQ numbers. */
269 + for (i = 0; i < rx->num_rings; i++) {
270 + snprintf(edma_irq_name, sizeof(edma_irq_name), "edma_rxdesc_%d",
271 + rx->ring_start + i);
272 + edma_ctx->intr_info.intr_rx[i] = of_irq_get_byname(edma_np, edma_irq_name);
273 + if (edma_ctx->intr_info.intr_rx[i] < 0) {
274 + dev_err(dev, "%s: rx_queue_map_info.intr[%u] irq get failed\n",
276 + of_node_put(edma_np);
277 + kfree(edma_ctx->intr_info.intr_rx);
278 + kfree(edma_ctx->intr_info.intr_txcmpl);
279 + return edma_ctx->intr_info.intr_rx[i];
282 + dev_dbg(dev, "%s: intr_info.intr_rx[%u] = %u\n",
283 + edma_np->name, i, edma_ctx->intr_info.intr_rx[i]);
286 + /* Get misc IRQ number. */
287 + edma_ctx->intr_info.intr_misc = of_irq_get_byname(edma_np, "edma_misc");
288 + if (edma_ctx->intr_info.intr_misc < 0) {
289 + dev_err(dev, "%s: misc_intr irq get failed\n", edma_np->name);
290 + of_node_put(edma_np);
291 + kfree(edma_ctx->intr_info.intr_rx);
292 + kfree(edma_ctx->intr_info.intr_txcmpl);
293 + return edma_ctx->intr_info.intr_misc;
296 + of_node_put(edma_np);
298 + dev_dbg(dev, "%s: misc IRQ:%u\n", edma_np->name,
299 + edma_ctx->intr_info.intr_misc);
304 +static int edma_hw_reset(void)
306 + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
307 + struct device *dev = ppe_dev->dev;
308 + struct reset_control *edma_hw_rst;
309 + struct device_node *edma_np;
310 + const char *reset_string;
314 + /* Count and parse reset names from DTSI. */
315 + edma_np = of_get_child_by_name(dev->of_node, "edma");
316 + count = of_property_count_strings(edma_np, "reset-names");
318 + dev_err(dev, "EDMA reset entry not found\n");
319 + of_node_put(edma_np);
323 + for (i = 0; i < count; i++) {
324 + ret = of_property_read_string_index(edma_np, "reset-names",
327 + dev_err(dev, "Error reading reset-names");
328 + of_node_put(edma_np);
332 + edma_hw_rst = of_reset_control_get_exclusive(edma_np, reset_string);
333 + if (IS_ERR(edma_hw_rst)) {
334 + of_node_put(edma_np);
335 + return PTR_ERR(edma_hw_rst);
338 + /* 100ms delay is required by hardware to reset EDMA. */
339 + reset_control_assert(edma_hw_rst);
342 + reset_control_deassert(edma_hw_rst);
345 + reset_control_put(edma_hw_rst);
346 + dev_dbg(dev, "EDMA HW reset, i:%d reset_string:%s\n", i, reset_string);
349 + of_node_put(edma_np);
354 +static int edma_hw_configure(void)
356 + struct edma_hw_info *hw_info = edma_ctx->hw_info;
357 + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
358 + struct regmap *regmap = ppe_dev->regmap;
362 + reg = EDMA_BASE_OFFSET + EDMA_REG_MAS_CTRL_ADDR;
363 + ret = regmap_read(regmap, reg, &data);
367 + pr_debug("EDMA ver %d hw init\n", data);
369 + /* Setup private data structure. */
370 + edma_ctx->intr_info.intr_mask_rx = EDMA_RXDESC_INT_MASK_PKT_INT;
371 + edma_ctx->intr_info.intr_mask_txcmpl = EDMA_TX_INT_MASK_PKT_INT;
374 + ret = edma_hw_reset();
376 + pr_err("Error in resetting the hardware. ret: %d\n", ret);
380 + /* Allocate memory for netdevices. */
381 + edma_ctx->netdev_arr = kzalloc((sizeof(**edma_ctx->netdev_arr) *
382 + hw_info->max_ports),
384 + if (!edma_ctx->netdev_arr)
387 + /* Configure DMA request priority, DMA read burst length,
388 + * and AXI write size.
390 + data = FIELD_PREP(EDMA_DMAR_BURST_LEN_MASK, EDMA_BURST_LEN_ENABLE);
391 + data |= FIELD_PREP(EDMA_DMAR_REQ_PRI_MASK, 0);
392 + data |= FIELD_PREP(EDMA_DMAR_TXDATA_OUTSTANDING_NUM_MASK, 31);
393 + data |= FIELD_PREP(EDMA_DMAR_TXDESC_OUTSTANDING_NUM_MASK, 7);
394 + data |= FIELD_PREP(EDMA_DMAR_RXFILL_OUTSTANDING_NUM_MASK, 7);
396 + reg = EDMA_BASE_OFFSET + EDMA_REG_DMAR_CTRL_ADDR;
397 + ret = regmap_write(regmap, reg, data);
401 + /* Configure Tx Timeout Threshold. */
402 + data = EDMA_TX_TIMEOUT_THRESH_VAL;
404 + reg = EDMA_BASE_OFFSET + EDMA_REG_TX_TIMEOUT_THRESH_ADDR;
405 + ret = regmap_write(regmap, reg, data);
409 + /* Set Miscellaneous error mask. */
410 + data = EDMA_MISC_AXI_RD_ERR_MASK |
411 + EDMA_MISC_AXI_WR_ERR_MASK |
412 + EDMA_MISC_RX_DESC_FIFO_FULL_MASK |
413 + EDMA_MISC_RX_ERR_BUF_SIZE_MASK |
414 + EDMA_MISC_TX_SRAM_FULL_MASK |
415 + EDMA_MISC_TX_CMPL_BUF_FULL_MASK |
416 + EDMA_MISC_DATA_LEN_ERR_MASK;
417 + data |= EDMA_MISC_TX_TIMEOUT_MASK;
418 + edma_ctx->intr_info.intr_mask_misc = data;
420 + /* Global EDMA enable and padding enable. */
421 + data = EDMA_PORT_PAD_EN | EDMA_PORT_EDMA_EN;
423 + reg = EDMA_BASE_OFFSET + EDMA_REG_PORT_CTRL_ADDR;
424 + ret = regmap_write(regmap, reg, data);
428 + /* Initialize unicast priority map table. */
429 + ret = (int)edma_configure_ucast_prio_map_tbl();
431 + pr_err("Failed to initialize unicast priority map table: %d\n",
433 + kfree(edma_ctx->netdev_arr);
441 + * edma_destroy - EDMA Destroy.
442 + * @ppe_dev: PPE device
444 + * Free the memory allocated during setup.
446 +void edma_destroy(struct ppe_device *ppe_dev)
448 + kfree(edma_ctx->intr_info.intr_rx);
449 + kfree(edma_ctx->intr_info.intr_txcmpl);
450 + kfree(edma_ctx->netdev_arr);
454 + * edma_setup - EDMA Setup.
455 + * @ppe_dev: PPE device
457 + * Configure Ethernet global ctx, clocks, hardware and interrupts.
459 + * Return 0 on success, negative error code on failure.
461 +int edma_setup(struct ppe_device *ppe_dev)
463 + struct device *dev = ppe_dev->dev;
466 + edma_ctx = devm_kzalloc(dev, sizeof(*edma_ctx), GFP_KERNEL);
470 + edma_ctx->hw_info = &ipq9574_hw_info;
471 + edma_ctx->ppe_dev = ppe_dev;
473 + /* Configure the EDMA common clocks. */
474 + ret = edma_clock_init();
476 + dev_err(dev, "Error in configuring the EDMA clocks\n");
480 + dev_dbg(dev, "QCOM EDMA common clocks are configured\n");
482 + ret = edma_hw_configure();
484 + dev_err(dev, "Error in edma configuration\n");
488 + ret = edma_irq_init();
490 + dev_err(dev, "Error in irq initialization\n");
494 + dev_info(dev, "EDMA configuration successful\n");
498 diff --git a/drivers/net/ethernet/qualcomm/ppe/edma.h b/drivers/net/ethernet/qualcomm/ppe/edma.h
500 index 000000000000..6bad51c976dd
502 +++ b/drivers/net/ethernet/qualcomm/ppe/edma.h
504 +/* SPDX-License-Identifier: GPL-2.0-only
505 + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
508 +#ifndef __EDMA_MAIN__
509 +#define __EDMA_MAIN__
511 +#include "ppe_api.h"
513 +/* One clock cycle = 1/(EDMA clock frequency in Mhz) micro seconds.
515 + * One timer unit is 128 clock cycles.
517 + * So, therefore the microsecond to timer unit calculation is:
518 + * Timer unit = time in microseconds / (one clock cycle in microsecond * cycles in 1 timer unit)
519 + * = ('x' microsecond * EDMA clock frequency in MHz ('y') / 128).
522 +#define EDMA_CYCLE_PER_TIMER_UNIT 128
523 +#define EDMA_MICROSEC_TO_TIMER_UNIT(x, y) ((x) * (y) / EDMA_CYCLE_PER_TIMER_UNIT)
524 +#define MHZ 1000000UL
526 +/* EDMA profile ID. */
527 +#define EDMA_CPU_PORT_PROFILE_ID 0
529 +/* Number of PPE queue priorities supported per ARM core. */
530 +#define EDMA_PRI_MAX_PER_CORE 8
533 + * struct edma_ring_info - EDMA ring data structure.
534 + * @max_rings: Maximum number of rings
535 + * @ring_start: Ring start ID
536 + * @num_rings: Number of rings
538 +struct edma_ring_info {
545 + * struct edma_hw_info - EDMA hardware data structure.
546 + * @rxfill: Rx Fill ring information
547 + * @rx: Rx Desc ring information
548 + * @tx: Tx Desc ring information
549 + * @txcmpl: Tx complete ring information
550 + * @max_ports: Maximum number of ports
551 + * @napi_budget_rx: Rx NAPI budget
552 + * @napi_budget_tx: Tx NAPI budget
554 +struct edma_hw_info {
555 + struct edma_ring_info *rxfill;
556 + struct edma_ring_info *rx;
557 + struct edma_ring_info *tx;
558 + struct edma_ring_info *txcmpl;
560 + u32 napi_budget_rx;
561 + u32 napi_budget_tx;
565 + * struct edma_intr_info - EDMA interrupt data structure.
566 + * @intr_mask_rx: RX interrupt mask
567 + * @intr_rx: Rx interrupts
568 + * @intr_mask_txcmpl: Tx completion interrupt mask
569 + * @intr_txcmpl: Tx completion interrupts
570 + * @intr_mask_misc: Miscellaneous interrupt mask
571 + * @intr_misc: Miscellaneous interrupts
573 +struct edma_intr_info {
576 + u32 intr_mask_txcmpl;
578 + u32 intr_mask_misc;
583 + * struct edma_context - EDMA context.
584 + * @netdev_arr: Net device for each EDMA port
585 + * @ppe_dev: PPE device
586 + * @hw_info: EDMA Hardware info
587 + * @intr_info: EDMA Interrupt info
589 +struct edma_context {
590 + struct net_device **netdev_arr;
591 + struct ppe_device *ppe_dev;
592 + struct edma_hw_info *hw_info;
593 + struct edma_intr_info intr_info;
596 +/* Global EDMA context. */
597 +extern struct edma_context *edma_ctx;
599 +void edma_destroy(struct ppe_device *ppe_dev);
600 +int edma_setup(struct ppe_device *ppe_dev);
603 diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe.c b/drivers/net/ethernet/qualcomm/ppe/ppe.c
604 index bcf21c838e05..93f92be9dc41 100644
605 --- a/drivers/net/ethernet/qualcomm/ppe/ppe.c
606 +++ b/drivers/net/ethernet/qualcomm/ppe/ppe.c
608 #include <linux/regmap.h>
609 #include <linux/reset.h>
613 #include "ppe_config.h"
614 #include "ppe_debugfs.h"
615 @@ -208,10 +209,16 @@ static int qcom_ppe_probe(struct platform_device *pdev)
617 return dev_err_probe(dev, ret, "PPE HW config failed\n");
619 - ret = ppe_port_mac_init(ppe_dev);
620 + ret = edma_setup(ppe_dev);
622 + return dev_err_probe(dev, ret, "EDMA setup failed\n");
624 + ret = ppe_port_mac_init(ppe_dev);
626 + edma_destroy(ppe_dev);
627 return dev_err_probe(dev, ret,
628 "PPE Port MAC initialization failed\n");
631 ppe_debugfs_setup(ppe_dev);
632 platform_set_drvdata(pdev, ppe_dev);
633 @@ -226,6 +233,7 @@ static void qcom_ppe_remove(struct platform_device *pdev)
634 ppe_dev = platform_get_drvdata(pdev);
635 ppe_debugfs_teardown(ppe_dev);
636 ppe_port_mac_deinit(ppe_dev);
637 + edma_destroy(ppe_dev);
639 platform_set_drvdata(pdev, NULL);
641 diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
642 index 6e6e469247c8..f2a60776a40a 100644
643 --- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
644 +++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
645 @@ -788,4 +788,257 @@
646 #define XGMAC_RXDISCARD_GB_ADDR 0x9AC
647 #define XGMAC_RXDISCARDBYTE_GB_ADDR 0x9B4
649 +#define EDMA_BASE_OFFSET 0xb00000
651 +/* EDMA register offsets */
652 +#define EDMA_REG_MAS_CTRL_ADDR 0x0
653 +#define EDMA_REG_PORT_CTRL_ADDR 0x4
654 +#define EDMA_REG_VLAN_CTRL_ADDR 0x8
655 +#define EDMA_REG_RXDESC2FILL_MAP_0_ADDR 0x14
656 +#define EDMA_REG_RXDESC2FILL_MAP_1_ADDR 0x18
657 +#define EDMA_REG_RXDESC2FILL_MAP_2_ADDR 0x1c
658 +#define EDMA_REG_TXQ_CTRL_ADDR 0x20
659 +#define EDMA_REG_TXQ_CTRL_2_ADDR 0x24
660 +#define EDMA_REG_TXQ_FC_0_ADDR 0x28
661 +#define EDMA_REG_TXQ_FC_1_ADDR 0x30
662 +#define EDMA_REG_TXQ_FC_2_ADDR 0x34
663 +#define EDMA_REG_TXQ_FC_3_ADDR 0x38
664 +#define EDMA_REG_RXQ_CTRL_ADDR 0x3c
665 +#define EDMA_REG_MISC_ERR_QID_ADDR 0x40
666 +#define EDMA_REG_RXQ_FC_THRE_ADDR 0x44
667 +#define EDMA_REG_DMAR_CTRL_ADDR 0x48
668 +#define EDMA_REG_AXIR_CTRL_ADDR 0x4c
669 +#define EDMA_REG_AXIW_CTRL_ADDR 0x50
670 +#define EDMA_REG_MIN_MSS_ADDR 0x54
671 +#define EDMA_REG_LOOPBACK_CTRL_ADDR 0x58
672 +#define EDMA_REG_MISC_INT_STAT_ADDR 0x5c
673 +#define EDMA_REG_MISC_INT_MASK_ADDR 0x60
674 +#define EDMA_REG_DBG_CTRL_ADDR 0x64
675 +#define EDMA_REG_DBG_DATA_ADDR 0x68
676 +#define EDMA_REG_TX_TIMEOUT_THRESH_ADDR 0x6c
677 +#define EDMA_REG_REQ0_FIFO_THRESH_ADDR 0x80
678 +#define EDMA_REG_WB_OS_THRESH_ADDR 0x84
679 +#define EDMA_REG_MISC_ERR_QID_REG2_ADDR 0x88
680 +#define EDMA_REG_TXDESC2CMPL_MAP_0_ADDR 0x8c
681 +#define EDMA_REG_TXDESC2CMPL_MAP_1_ADDR 0x90
682 +#define EDMA_REG_TXDESC2CMPL_MAP_2_ADDR 0x94
683 +#define EDMA_REG_TXDESC2CMPL_MAP_3_ADDR 0x98
684 +#define EDMA_REG_TXDESC2CMPL_MAP_4_ADDR 0x9c
685 +#define EDMA_REG_TXDESC2CMPL_MAP_5_ADDR 0xa0
687 +/* Tx descriptor ring configuration register addresses */
688 +#define EDMA_REG_TXDESC_BA(n) (0x1000 + (0x1000 * (n)))
689 +#define EDMA_REG_TXDESC_PROD_IDX(n) (0x1004 + (0x1000 * (n)))
690 +#define EDMA_REG_TXDESC_CONS_IDX(n) (0x1008 + (0x1000 * (n)))
691 +#define EDMA_REG_TXDESC_RING_SIZE(n) (0x100c + (0x1000 * (n)))
692 +#define EDMA_REG_TXDESC_CTRL(n) (0x1010 + (0x1000 * (n)))
693 +#define EDMA_REG_TXDESC_BA2(n) (0x1014 + (0x1000 * (n)))
695 +/* RxFill ring configuration register addresses */
696 +#define EDMA_REG_RXFILL_BA(n) (0x29000 + (0x1000 * (n)))
697 +#define EDMA_REG_RXFILL_PROD_IDX(n) (0x29004 + (0x1000 * (n)))
698 +#define EDMA_REG_RXFILL_CONS_IDX(n) (0x29008 + (0x1000 * (n)))
699 +#define EDMA_REG_RXFILL_RING_SIZE(n) (0x2900c + (0x1000 * (n)))
700 +#define EDMA_REG_RXFILL_BUFFER1_SIZE(n) (0x29010 + (0x1000 * (n)))
701 +#define EDMA_REG_RXFILL_FC_THRE(n) (0x29014 + (0x1000 * (n)))
702 +#define EDMA_REG_RXFILL_UGT_THRE(n) (0x29018 + (0x1000 * (n)))
703 +#define EDMA_REG_RXFILL_RING_EN(n) (0x2901c + (0x1000 * (n)))
704 +#define EDMA_REG_RXFILL_DISABLE(n) (0x29020 + (0x1000 * (n)))
705 +#define EDMA_REG_RXFILL_DISABLE_DONE(n) (0x29024 + (0x1000 * (n)))
706 +#define EDMA_REG_RXFILL_INT_STAT(n) (0x31000 + (0x1000 * (n)))
707 +#define EDMA_REG_RXFILL_INT_MASK(n) (0x31004 + (0x1000 * (n)))
709 +/* Rx descriptor ring configuration register addresses */
710 +#define EDMA_REG_RXDESC_BA(n) (0x39000 + (0x1000 * (n)))
711 +#define EDMA_REG_RXDESC_PROD_IDX(n) (0x39004 + (0x1000 * (n)))
712 +#define EDMA_REG_RXDESC_CONS_IDX(n) (0x39008 + (0x1000 * (n)))
713 +#define EDMA_REG_RXDESC_RING_SIZE(n) (0x3900c + (0x1000 * (n)))
714 +#define EDMA_REG_RXDESC_FC_THRE(n) (0x39010 + (0x1000 * (n)))
715 +#define EDMA_REG_RXDESC_UGT_THRE(n) (0x39014 + (0x1000 * (n)))
716 +#define EDMA_REG_RXDESC_CTRL(n) (0x39018 + (0x1000 * (n)))
717 +#define EDMA_REG_RXDESC_BPC(n) (0x3901c + (0x1000 * (n)))
718 +#define EDMA_REG_RXDESC_DISABLE(n) (0x39020 + (0x1000 * (n)))
719 +#define EDMA_REG_RXDESC_DISABLE_DONE(n) (0x39024 + (0x1000 * (n)))
720 +#define EDMA_REG_RXDESC_PREHEADER_BA(n) (0x39028 + (0x1000 * (n)))
721 +#define EDMA_REG_RXDESC_INT_STAT(n) (0x59000 + (0x1000 * (n)))
722 +#define EDMA_REG_RXDESC_INT_MASK(n) (0x59004 + (0x1000 * (n)))
724 +#define EDMA_REG_RX_MOD_TIMER(n) (0x59008 + (0x1000 * (n)))
725 +#define EDMA_REG_RX_INT_CTRL(n) (0x5900c + (0x1000 * (n)))
727 +/* Tx completion ring configuration register addresses */
728 +#define EDMA_REG_TXCMPL_BA(n) (0x79000 + (0x1000 * (n)))
729 +#define EDMA_REG_TXCMPL_PROD_IDX(n) (0x79004 + (0x1000 * (n)))
730 +#define EDMA_REG_TXCMPL_CONS_IDX(n) (0x79008 + (0x1000 * (n)))
731 +#define EDMA_REG_TXCMPL_RING_SIZE(n) (0x7900c + (0x1000 * (n)))
732 +#define EDMA_REG_TXCMPL_UGT_THRE(n) (0x79010 + (0x1000 * (n)))
733 +#define EDMA_REG_TXCMPL_CTRL(n) (0x79014 + (0x1000 * (n)))
734 +#define EDMA_REG_TXCMPL_BPC(n) (0x79018 + (0x1000 * (n)))
736 +#define EDMA_REG_TX_INT_STAT(n) (0x99000 + (0x1000 * (n)))
737 +#define EDMA_REG_TX_INT_MASK(n) (0x99004 + (0x1000 * (n)))
738 +#define EDMA_REG_TX_MOD_TIMER(n) (0x99008 + (0x1000 * (n)))
739 +#define EDMA_REG_TX_INT_CTRL(n) (0x9900c + (0x1000 * (n)))
741 +/* EDMA_QID2RID_TABLE_MEM register field masks */
742 +#define EDMA_RX_RING_ID_QUEUE0_MASK GENMASK(7, 0)
743 +#define EDMA_RX_RING_ID_QUEUE1_MASK GENMASK(15, 8)
744 +#define EDMA_RX_RING_ID_QUEUE2_MASK GENMASK(23, 16)
745 +#define EDMA_RX_RING_ID_QUEUE3_MASK GENMASK(31, 24)
747 +/* EDMA_REG_PORT_CTRL register bit definitions */
748 +#define EDMA_PORT_PAD_EN 0x1
749 +#define EDMA_PORT_EDMA_EN 0x2
751 +/* EDMA_REG_DMAR_CTRL register field masks */
752 +#define EDMA_DMAR_REQ_PRI_MASK GENMASK(2, 0)
753 +#define EDMA_DMAR_BURST_LEN_MASK BIT(3)
754 +#define EDMA_DMAR_TXDATA_OUTSTANDING_NUM_MASK GENMASK(8, 4)
755 +#define EDMA_DMAR_TXDESC_OUTSTANDING_NUM_MASK GENMASK(11, 9)
756 +#define EDMA_DMAR_RXFILL_OUTSTANDING_NUM_MASK GENMASK(14, 12)
758 +#define EDMA_BURST_LEN_ENABLE 0
760 +/* Tx timeout threshold */
761 +#define EDMA_TX_TIMEOUT_THRESH_VAL 0xFFFF
763 +/* Rx descriptor ring base address mask */
764 +#define EDMA_RXDESC_BA_MASK 0xffffffff
766 +/* Rx Descriptor ring pre-header base address mask */
767 +#define EDMA_RXDESC_PREHEADER_BA_MASK 0xffffffff
769 +/* Tx descriptor prod ring index mask */
770 +#define EDMA_TXDESC_PROD_IDX_MASK 0xffff
772 +/* Tx descriptor consumer ring index mask */
773 +#define EDMA_TXDESC_CONS_IDX_MASK 0xffff
775 +/* Tx descriptor ring size mask */
776 +#define EDMA_TXDESC_RING_SIZE_MASK 0xffff
778 +/* Tx descriptor ring enable */
779 +#define EDMA_TXDESC_TX_ENABLE 0x1
781 +#define EDMA_TXDESC_CTRL_TXEN_MASK BIT(0)
782 +#define EDMA_TXDESC_CTRL_FC_GRP_ID_MASK GENMASK(3, 1)
784 +/* Tx completion ring prod index mask */
785 +#define EDMA_TXCMPL_PROD_IDX_MASK 0xffff
787 +/* Tx completion ring urgent threshold mask */
788 +#define EDMA_TXCMPL_LOW_THRE_MASK 0xffff
789 +#define EDMA_TXCMPL_LOW_THRE_SHIFT 0
791 +/* EDMA_REG_TX_MOD_TIMER mask */
792 +#define EDMA_TX_MOD_TIMER_INIT_MASK 0xffff
793 +#define EDMA_TX_MOD_TIMER_INIT_SHIFT 0
795 +/* Rx fill ring prod index mask */
796 +#define EDMA_RXFILL_PROD_IDX_MASK 0xffff
798 +/* Rx fill ring consumer index mask */
799 +#define EDMA_RXFILL_CONS_IDX_MASK 0xffff
801 +/* Rx fill ring size mask */
802 +#define EDMA_RXFILL_RING_SIZE_MASK 0xffff
804 +/* Rx fill ring flow control threshold masks */
805 +#define EDMA_RXFILL_FC_XON_THRE_MASK 0x7ff
806 +#define EDMA_RXFILL_FC_XON_THRE_SHIFT 12
807 +#define EDMA_RXFILL_FC_XOFF_THRE_MASK 0x7ff
808 +#define EDMA_RXFILL_FC_XOFF_THRE_SHIFT 0
810 +/* Rx fill ring enable bit */
811 +#define EDMA_RXFILL_RING_EN 0x1
813 +/* Rx desc ring prod index mask */
814 +#define EDMA_RXDESC_PROD_IDX_MASK 0xffff
816 +/* Rx descriptor ring cons index mask */
817 +#define EDMA_RXDESC_CONS_IDX_MASK 0xffff
819 +/* Rx descriptor ring size masks */
820 +#define EDMA_RXDESC_RING_SIZE_MASK 0xffff
821 +#define EDMA_RXDESC_PL_OFFSET_MASK 0x1ff
822 +#define EDMA_RXDESC_PL_OFFSET_SHIFT 16
823 +#define EDMA_RXDESC_PL_DEFAULT_VALUE 0
825 +/* Rx descriptor ring flow control threshold masks */
826 +#define EDMA_RXDESC_FC_XON_THRE_MASK 0x7ff
827 +#define EDMA_RXDESC_FC_XON_THRE_SHIFT 12
828 +#define EDMA_RXDESC_FC_XOFF_THRE_MASK 0x7ff
829 +#define EDMA_RXDESC_FC_XOFF_THRE_SHIFT 0
831 +/* Rx descriptor ring urgent threshold mask */
832 +#define EDMA_RXDESC_LOW_THRE_MASK 0xffff
833 +#define EDMA_RXDESC_LOW_THRE_SHIFT 0
835 +/* Rx descriptor ring enable bit */
836 +#define EDMA_RXDESC_RX_EN 0x1
838 +/* Tx interrupt status bit */
839 +#define EDMA_TX_INT_MASK_PKT_INT 0x1
841 +/* Rx interrupt mask */
842 +#define EDMA_RXDESC_INT_MASK_PKT_INT 0x1
844 +#define EDMA_MASK_INT_DISABLE 0x0
845 +#define EDMA_MASK_INT_CLEAR 0x0
847 +/* EDMA_REG_RX_MOD_TIMER register field masks */
848 +#define EDMA_RX_MOD_TIMER_INIT_MASK 0xffff
849 +#define EDMA_RX_MOD_TIMER_INIT_SHIFT 0
851 +/* EDMA Ring mask */
852 +#define EDMA_RING_DMA_MASK 0xffffffff
854 +/* RXDESC threshold interrupt. */
855 +#define EDMA_RXDESC_UGT_INT_STAT 0x2
857 +/* RXDESC timer interrupt */
858 +#define EDMA_RXDESC_PKT_INT_STAT 0x1
860 +/* RXDESC Interrupt status mask */
861 +#define EDMA_RXDESC_RING_INT_STATUS_MASK \
862 + (EDMA_RXDESC_UGT_INT_STAT | EDMA_RXDESC_PKT_INT_STAT)
864 +/* TXCMPL threshold interrupt. */
865 +#define EDMA_TXCMPL_UGT_INT_STAT 0x2
867 +/* TXCMPL timer interrupt */
868 +#define EDMA_TXCMPL_PKT_INT_STAT 0x1
870 +/* TXCMPL Interrupt status mask */
871 +#define EDMA_TXCMPL_RING_INT_STATUS_MASK \
872 + (EDMA_TXCMPL_UGT_INT_STAT | EDMA_TXCMPL_PKT_INT_STAT)
874 +#define EDMA_TXCMPL_RETMODE_OPAQUE 0x0
876 +#define EDMA_RXDESC_LOW_THRE 0
877 +#define EDMA_RX_MOD_TIMER_INIT 1000
878 +#define EDMA_RX_NE_INT_EN 0x2
880 +#define EDMA_TX_MOD_TIMER 150
882 +#define EDMA_TX_INITIAL_PROD_IDX 0x0
883 +#define EDMA_TX_NE_INT_EN 0x2
885 +/* EDMA misc error mask */
886 +#define EDMA_MISC_AXI_RD_ERR_MASK BIT(0)
887 +#define EDMA_MISC_AXI_WR_ERR_MASK BIT(1)
888 +#define EDMA_MISC_RX_DESC_FIFO_FULL_MASK BIT(2)
889 +#define EDMA_MISC_RX_ERR_BUF_SIZE_MASK BIT(3)
890 +#define EDMA_MISC_TX_SRAM_FULL_MASK BIT(4)
891 +#define EDMA_MISC_TX_CMPL_BUF_FULL_MASK BIT(5)
893 +#define EDMA_MISC_DATA_LEN_ERR_MASK BIT(6)
894 +#define EDMA_MISC_TX_TIMEOUT_MASK BIT(7)
896 +/* EDMA txdesc2cmpl map */
897 +#define EDMA_TXDESC2CMPL_MAP_TXDESC_MASK 0x1F
899 +/* EDMA rxdesc2fill map */
900 +#define EDMA_RXDESC2FILL_MAP_RXDESC_MASK 0x7