df61e9f83fba39447efd13a9f445187d9781dd8e
[openwrt/staging/stintel.git] /
1 From 4dfbbaa1e9ab01f1126c9e7a89583aad0b6600da Mon Sep 17 00:00:00 2001
2 From: Suruchi Agarwal <quic_suruchia@quicinc.com>
3 Date: Thu, 21 Mar 2024 16:31:04 -0700
4 Subject: [PATCH 42/50] net: ethernet: qualcomm: Add miscellaneous error
5 interrupts and counters
6
7 Miscellaneous error interrupts, EDMA Tx/Rx and error counters are supported
8 using debugfs framework.
9
10 Change-Id: I7da8b978a7e93947b03a45269a81b401f35da31c
11 Co-developed-by: Pavithra R <quic_pavir@quicinc.com>
12 Signed-off-by: Pavithra R <quic_pavir@quicinc.com>
13 Signed-off-by: Suruchi Agarwal <quic_suruchia@quicinc.com>
14 ---
15 drivers/net/ethernet/qualcomm/ppe/Makefile | 2 +-
16 drivers/net/ethernet/qualcomm/ppe/edma.c | 162 ++++++++
17 drivers/net/ethernet/qualcomm/ppe/edma.h | 31 +-
18 .../net/ethernet/qualcomm/ppe/edma_debugfs.c | 370 ++++++++++++++++++
19 .../net/ethernet/qualcomm/ppe/ppe_debugfs.c | 17 +
20 5 files changed, 580 insertions(+), 2 deletions(-)
21 create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_debugfs.c
22
23 diff --git a/drivers/net/ethernet/qualcomm/ppe/Makefile b/drivers/net/ethernet/qualcomm/ppe/Makefile
24 index b358bfd781fb..45e1b103ec7a 100644
25 --- a/drivers/net/ethernet/qualcomm/ppe/Makefile
26 +++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
27 @@ -7,4 +7,4 @@ obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
28 qcom-ppe-objs := ppe.o ppe_config.o ppe_api.o ppe_debugfs.o ppe_port.o
29
30 #EDMA
31 -qcom-ppe-objs += edma.o edma_cfg_rx.o edma_cfg_tx.o edma_port.o edma_rx.o edma_tx.o
32 +qcom-ppe-objs += edma.o edma_cfg_rx.o edma_cfg_tx.o edma_debugfs.o edma_port.o edma_rx.o edma_tx.o
33 diff --git a/drivers/net/ethernet/qualcomm/ppe/edma.c b/drivers/net/ethernet/qualcomm/ppe/edma.c
34 index 739fcfbde0f9..0e16f8ab545f 100644
35 --- a/drivers/net/ethernet/qualcomm/ppe/edma.c
36 +++ b/drivers/net/ethernet/qualcomm/ppe/edma.c
37 @@ -151,6 +151,42 @@ static int edma_clock_init(void)
38 return 0;
39 }
40
41 +/**
42 + * edma_err_stats_alloc - Allocate stats memory
43 + *
44 + * Allocate memory for per-CPU error stats.
45 + */
46 +int edma_err_stats_alloc(void)
47 +{
48 + u32 i;
49 +
50 + edma_ctx->err_stats = alloc_percpu(*edma_ctx->err_stats);
51 + if (!edma_ctx->err_stats)
52 + return -ENOMEM;
53 +
54 + for_each_possible_cpu(i) {
55 + struct edma_err_stats *stats;
56 +
57 + stats = per_cpu_ptr(edma_ctx->err_stats, i);
58 + u64_stats_init(&stats->syncp);
59 + }
60 +
61 + return 0;
62 +}
63 +
64 +/**
65 + * edma_err_stats_free - Free stats memory
66 + *
67 + * Free memory of per-CPU error stats.
68 + */
69 +void edma_err_stats_free(void)
70 +{
71 + if (edma_ctx->err_stats) {
72 + free_percpu(edma_ctx->err_stats);
73 + edma_ctx->err_stats = NULL;
74 + }
75 +}
76 +
77 /**
78 * edma_configure_ucast_prio_map_tbl - Configure unicast priority map table.
79 *
80 @@ -191,11 +227,113 @@ static int edma_configure_ucast_prio_map_tbl(void)
81 return ret;
82 }
83
84 +static void edma_disable_misc_interrupt(void)
85 +{
86 + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
87 + struct regmap *regmap = ppe_dev->regmap;
88 + u32 reg;
89 +
90 + reg = EDMA_BASE_OFFSET + EDMA_REG_MISC_INT_MASK_ADDR;
91 + regmap_write(regmap, reg, EDMA_MASK_INT_CLEAR);
92 +}
93 +
94 +static void edma_enable_misc_interrupt(void)
95 +{
96 + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
97 + struct regmap *regmap = ppe_dev->regmap;
98 + u32 reg;
99 +
100 + reg = EDMA_BASE_OFFSET + EDMA_REG_MISC_INT_MASK_ADDR;
101 + regmap_write(regmap, reg, edma_ctx->intr_info.intr_mask_misc);
102 +}
103 +
104 +static irqreturn_t edma_misc_handle_irq(int irq,
105 + __maybe_unused void *ctx)
106 +{
107 + struct edma_err_stats *stats = this_cpu_ptr(edma_ctx->err_stats);
108 + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
109 + struct regmap *regmap = ppe_dev->regmap;
110 + u32 misc_intr_status, data, reg;
111 +
112 + /* Read Misc intr status */
113 + reg = EDMA_BASE_OFFSET + EDMA_REG_MISC_INT_STAT_ADDR;
114 + regmap_read(regmap, reg, &data);
115 + misc_intr_status = data & edma_ctx->intr_info.intr_mask_misc;
116 +
117 + pr_debug("Received misc irq %d, status: %d\n", irq, misc_intr_status);
118 +
119 + if (FIELD_GET(EDMA_MISC_AXI_RD_ERR_MASK, misc_intr_status)) {
120 + pr_err("MISC AXI read error received\n");
121 + u64_stats_update_begin(&stats->syncp);
122 + ++stats->edma_axi_read_err;
123 + u64_stats_update_end(&stats->syncp);
124 + }
125 +
126 + if (FIELD_GET(EDMA_MISC_AXI_WR_ERR_MASK, misc_intr_status)) {
127 + pr_err("MISC AXI write error received\n");
128 + u64_stats_update_begin(&stats->syncp);
129 + ++stats->edma_axi_write_err;
130 + u64_stats_update_end(&stats->syncp);
131 + }
132 +
133 + if (FIELD_GET(EDMA_MISC_RX_DESC_FIFO_FULL_MASK, misc_intr_status)) {
134 + if (net_ratelimit())
135 + pr_err("MISC Rx descriptor fifo full error received\n");
136 + u64_stats_update_begin(&stats->syncp);
137 + ++stats->edma_rxdesc_fifo_full;
138 + u64_stats_update_end(&stats->syncp);
139 + }
140 +
141 + if (FIELD_GET(EDMA_MISC_RX_ERR_BUF_SIZE_MASK, misc_intr_status)) {
142 + if (net_ratelimit())
143 + pr_err("MISC Rx buffer size error received\n");
144 + u64_stats_update_begin(&stats->syncp);
145 + ++stats->edma_rx_buf_size_err;
146 + u64_stats_update_end(&stats->syncp);
147 + }
148 +
149 + if (FIELD_GET(EDMA_MISC_TX_SRAM_FULL_MASK, misc_intr_status)) {
150 + if (net_ratelimit())
151 + pr_err("MISC Tx SRAM full error received\n");
152 + u64_stats_update_begin(&stats->syncp);
153 + ++stats->edma_tx_sram_full;
154 + u64_stats_update_end(&stats->syncp);
155 + }
156 +
157 + if (FIELD_GET(EDMA_MISC_TX_CMPL_BUF_FULL_MASK, misc_intr_status)) {
158 + if (net_ratelimit())
159 + pr_err("MISC Tx complete buffer full error received\n");
160 + u64_stats_update_begin(&stats->syncp);
161 + ++stats->edma_txcmpl_buf_full;
162 + u64_stats_update_end(&stats->syncp);
163 + }
164 +
165 + if (FIELD_GET(EDMA_MISC_DATA_LEN_ERR_MASK, misc_intr_status)) {
166 + if (net_ratelimit())
167 + pr_err("MISC data length error received\n");
168 + u64_stats_update_begin(&stats->syncp);
169 + ++stats->edma_tx_data_len_err;
170 + u64_stats_update_end(&stats->syncp);
171 + }
172 +
173 + if (FIELD_GET(EDMA_MISC_TX_TIMEOUT_MASK, misc_intr_status)) {
174 + if (net_ratelimit())
175 + pr_err("MISC Tx timeout error received\n");
176 + u64_stats_update_begin(&stats->syncp);
177 + ++stats->edma_tx_timeout;
178 + u64_stats_update_end(&stats->syncp);
179 + }
180 +
181 + return IRQ_HANDLED;
182 +}
183 +
184 static int edma_irq_register(void)
185 {
186 struct edma_hw_info *hw_info = edma_ctx->hw_info;
187 struct edma_ring_info *txcmpl = hw_info->txcmpl;
188 + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
189 struct edma_ring_info *rx = hw_info->rx;
190 + struct device *dev = ppe_dev->dev;
191 int ret;
192 u32 i;
193
194 @@ -270,8 +408,25 @@ static int edma_irq_register(void)
195 edma_rxdesc_irq_name[i]);
196 }
197
198 + /* Request Misc IRQ */
199 + ret = request_irq(edma_ctx->intr_info.intr_misc, edma_misc_handle_irq,
200 + IRQF_SHARED, "edma_misc",
201 + (void *)dev);
202 + if (ret) {
203 + pr_err("MISC IRQ:%d request failed\n",
204 + edma_ctx->intr_info.intr_misc);
205 + goto misc_intr_req_fail;
206 + }
207 +
208 return 0;
209
210 +misc_intr_req_fail:
211 + /* Free IRQ for RXDESC rings */
212 + for (i = 0; i < rx->num_rings; i++) {
213 + synchronize_irq(edma_ctx->intr_info.intr_rx[i]);
214 + free_irq(edma_ctx->intr_info.intr_rx[i],
215 + (void *)&edma_ctx->rx_rings[i]);
216 + }
217 rx_desc_ring_intr_req_fail:
218 for (i = 0; i < rx->num_rings; i++)
219 kfree(edma_rxdesc_irq_name[i]);
220 @@ -503,6 +658,7 @@ static int edma_hw_configure(void)
221 edma_cfg_tx_disable_interrupts(i);
222
223 edma_cfg_rx_disable_interrupts();
224 + edma_disable_misc_interrupt();
225
226 edma_cfg_rx_rings_disable();
227
228 @@ -614,6 +770,7 @@ void edma_destroy(struct ppe_device *ppe_dev)
229 edma_cfg_tx_disable_interrupts(i);
230
231 edma_cfg_rx_disable_interrupts();
232 + edma_disable_misc_interrupt();
233
234 /* Free IRQ for TXCMPL rings. */
235 for (i = 0; i < txcmpl->num_rings; i++) {
236 @@ -634,6 +791,10 @@ void edma_destroy(struct ppe_device *ppe_dev)
237 }
238 kfree(edma_rxdesc_irq_name);
239
240 + /* Free Misc IRQ */
241 + synchronize_irq(edma_ctx->intr_info.intr_misc);
242 + free_irq(edma_ctx->intr_info.intr_misc, (void *)(ppe_dev->dev));
243 +
244 kfree(edma_ctx->intr_info.intr_rx);
245 kfree(edma_ctx->intr_info.intr_txcmpl);
246
247 @@ -699,6 +860,7 @@ int edma_setup(struct ppe_device *ppe_dev)
248 }
249
250 edma_cfg_rx_enable_interrupts();
251 + edma_enable_misc_interrupt();
252
253 dev_info(dev, "EDMA configuration successful\n");
254
255 diff --git a/drivers/net/ethernet/qualcomm/ppe/edma.h b/drivers/net/ethernet/qualcomm/ppe/edma.h
256 index fb8ccbfbaf41..6500d21b9eba 100644
257 --- a/drivers/net/ethernet/qualcomm/ppe/edma.h
258 +++ b/drivers/net/ethernet/qualcomm/ppe/edma.h
259 @@ -36,6 +36,30 @@
260 ((((head) - (tail)) + \
261 (max)) & ((max) - 1)); })
262
263 +/**
264 + * struct edma_err_stats - EDMA error stats
265 + * @edma_axi_read_err: AXI read error
266 + * @edma_axi_write_err: AXI write error
267 + * @edma_rxdesc_fifo_full: Rx desc FIFO full error
268 + * @edma_rx_buf_size_err: Rx buffer size too small error
269 + * @edma_tx_sram_full: Tx packet SRAM buffer full error
270 + * @edma_tx_data_len_err: Tx data length error
271 + * @edma_tx_timeout: Tx timeout error
272 + * @edma_txcmpl_buf_full: Tx completion buffer full error
273 + * @syncp: Synchronization pointer
274 + */
275 +struct edma_err_stats {
276 + u64 edma_axi_read_err;
277 + u64 edma_axi_write_err;
278 + u64 edma_rxdesc_fifo_full;
279 + u64 edma_rx_buf_size_err;
280 + u64 edma_tx_sram_full;
281 + u64 edma_tx_data_len_err;
282 + u64 edma_tx_timeout;
283 + u64 edma_txcmpl_buf_full;
284 + struct u64_stats_sync syncp;
285 +};
286 +
287 /**
288 * struct edma_ring_info - EDMA ring data structure.
289 * @max_rings: Maximum number of rings
290 @@ -97,6 +121,7 @@ struct edma_intr_info {
291 * @rx_rings: Rx Desc Rings, SW is consumer
292 * @tx_rings: Tx Descriptor Ring, SW is producer
293 * @txcmpl_rings: Tx complete Ring, SW is consumer
294 + * @err_stats: Per CPU error statistics
295 * @rx_page_mode: Page mode enabled or disabled
296 * @rx_buf_size: Rx buffer size for Jumbo MRU
297 * @tx_requeue_stop: Tx requeue stop enabled or disabled
298 @@ -111,6 +136,7 @@ struct edma_context {
299 struct edma_rxdesc_ring *rx_rings;
300 struct edma_txdesc_ring *tx_rings;
301 struct edma_txcmpl_ring *txcmpl_rings;
302 + struct edma_err_stats __percpu *err_stats;
303 u32 rx_page_mode;
304 u32 rx_buf_size;
305 bool tx_requeue_stop;
306 @@ -119,7 +145,10 @@ struct edma_context {
307 /* Global EDMA context */
308 extern struct edma_context *edma_ctx;
309
310 +int edma_err_stats_alloc(void);
311 +void edma_err_stats_free(void);
312 void edma_destroy(struct ppe_device *ppe_dev);
313 int edma_setup(struct ppe_device *ppe_dev);
314 -
315 +void edma_debugfs_teardown(void);
316 +int edma_debugfs_setup(struct ppe_device *ppe_dev);
317 #endif
318 diff --git a/drivers/net/ethernet/qualcomm/ppe/edma_debugfs.c b/drivers/net/ethernet/qualcomm/ppe/edma_debugfs.c
319 new file mode 100644
320 index 000000000000..671062d4ee72
321 --- /dev/null
322 +++ b/drivers/net/ethernet/qualcomm/ppe/edma_debugfs.c
323 @@ -0,0 +1,370 @@
324 +// SPDX-License-Identifier: GPL-2.0-only
325 +/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
326 + */
327 +
328 +/* EDMA debugfs routines for display of Tx/Rx counters. */
329 +
330 +#include <linux/cpumask.h>
331 +#include <linux/debugfs.h>
332 +#include <linux/kernel.h>
333 +#include <linux/netdevice.h>
334 +#include <linux/printk.h>
335 +
336 +#include "edma.h"
337 +
338 +#define EDMA_STATS_BANNER_MAX_LEN 80
339 +#define EDMA_RX_RING_STATS_NODE_NAME "EDMA_RX"
340 +#define EDMA_TX_RING_STATS_NODE_NAME "EDMA_TX"
341 +#define EDMA_ERR_STATS_NODE_NAME "EDMA_ERR"
342 +
343 +static struct dentry *edma_dentry;
344 +static struct dentry *stats_dentry;
345 +
346 +static void edma_debugfs_print_banner(struct seq_file *m, char *node)
347 +{
348 + u32 banner_char_len, i;
349 +
350 + for (i = 0; i < EDMA_STATS_BANNER_MAX_LEN; i++)
351 + seq_puts(m, "_");
352 + banner_char_len = (EDMA_STATS_BANNER_MAX_LEN - (strlen(node) + 2)) / 2;
353 + seq_puts(m, "\n\n");
354 +
355 + for (i = 0; i < banner_char_len; i++)
356 + seq_puts(m, "<");
357 + seq_printf(m, " %s ", node);
358 +
359 + for (i = 0; i < banner_char_len; i++)
360 + seq_puts(m, ">");
361 + seq_puts(m, "\n");
362 +
363 + for (i = 0; i < EDMA_STATS_BANNER_MAX_LEN; i++)
364 + seq_puts(m, "_");
365 + seq_puts(m, "\n\n");
366 +}
367 +
368 +static int edma_debugfs_rx_rings_stats_show(struct seq_file *m,
369 + void __maybe_unused *p)
370 +{
371 + struct edma_hw_info *hw_info = edma_ctx->hw_info;
372 + struct edma_ring_info *rxfill = hw_info->rxfill;
373 + struct edma_rxfill_stats *rxfill_stats;
374 + struct edma_rxdesc_stats *rxdesc_stats;
375 + struct edma_ring_info *rx = hw_info->rx;
376 + unsigned int start;
377 + u32 i;
378 +
379 + rxfill_stats = kcalloc(rxfill->num_rings, sizeof(*rxfill_stats), GFP_KERNEL);
380 + if (!rxfill_stats)
381 + return -ENOMEM;
382 +
383 + rxdesc_stats = kcalloc(rx->num_rings, sizeof(*rxdesc_stats), GFP_KERNEL);
384 + if (!rxdesc_stats) {
385 + kfree(rxfill_stats);
386 + return -ENOMEM;
387 + }
388 +
389 + /* Get stats for Rx fill rings. */
390 + for (i = 0; i < rxfill->num_rings; i++) {
391 + struct edma_rxfill_ring *rxfill_ring;
392 + struct edma_rxfill_stats *stats;
393 +
394 + rxfill_ring = &edma_ctx->rxfill_rings[i];
395 + stats = &rxfill_ring->rxfill_stats;
396 + do {
397 + start = u64_stats_fetch_begin(&stats->syncp);
398 + rxfill_stats[i].alloc_failed = stats->alloc_failed;
399 + rxfill_stats[i].page_alloc_failed = stats->page_alloc_failed;
400 + } while (u64_stats_fetch_retry(&stats->syncp, start));
401 + }
402 +
403 + /* Get stats for Rx Desc rings. */
404 + for (i = 0; i < rx->num_rings; i++) {
405 + struct edma_rxdesc_ring *rxdesc_ring;
406 + struct edma_rxdesc_stats *stats;
407 +
408 + rxdesc_ring = &edma_ctx->rx_rings[i];
409 + stats = &rxdesc_ring->rxdesc_stats;
410 + do {
411 + start = u64_stats_fetch_begin(&stats->syncp);
412 + rxdesc_stats[i].src_port_inval = stats->src_port_inval;
413 + rxdesc_stats[i].src_port_inval_type = stats->src_port_inval_type;
414 + rxdesc_stats[i].src_port_inval_netdev = stats->src_port_inval_netdev;
415 + } while (u64_stats_fetch_retry(&stats->syncp, start));
416 + }
417 +
418 + edma_debugfs_print_banner(m, EDMA_RX_RING_STATS_NODE_NAME);
419 +
420 + seq_puts(m, "\n#EDMA RX descriptor rings stats:\n\n");
421 + for (i = 0; i < rx->num_rings; i++) {
422 + seq_printf(m, "\t\tEDMA RX descriptor %d ring stats:\n", i + rx->ring_start);
423 + seq_printf(m, "\t\t rxdesc[%d]:src_port_inval = %llu\n",
424 + i + rx->ring_start, rxdesc_stats[i].src_port_inval);
425 + seq_printf(m, "\t\t rxdesc[%d]:src_port_inval_type = %llu\n",
426 + i + rx->ring_start, rxdesc_stats[i].src_port_inval_type);
427 + seq_printf(m, "\t\t rxdesc[%d]:src_port_inval_netdev = %llu\n",
428 + i + rx->ring_start,
429 + rxdesc_stats[i].src_port_inval_netdev);
430 + seq_puts(m, "\n");
431 + }
432 +
433 + seq_puts(m, "\n#EDMA RX fill rings stats:\n\n");
434 + for (i = 0; i < rxfill->num_rings; i++) {
435 + seq_printf(m, "\t\tEDMA RX fill %d ring stats:\n", i + rxfill->ring_start);
436 + seq_printf(m, "\t\t rxfill[%d]:alloc_failed = %llu\n",
437 + i + rxfill->ring_start, rxfill_stats[i].alloc_failed);
438 + seq_printf(m, "\t\t rxfill[%d]:page_alloc_failed = %llu\n",
439 + i + rxfill->ring_start, rxfill_stats[i].page_alloc_failed);
440 + seq_puts(m, "\n");
441 + }
442 +
443 + kfree(rxfill_stats);
444 + kfree(rxdesc_stats);
445 + return 0;
446 +}
447 +
448 +static int edma_debugfs_tx_rings_stats_show(struct seq_file *m,
449 + void __maybe_unused *p)
450 +{
451 + struct edma_hw_info *hw_info = edma_ctx->hw_info;
452 + struct edma_ring_info *txcmpl = hw_info->txcmpl;
453 + struct edma_ring_info *tx = hw_info->tx;
454 + struct edma_txcmpl_stats *txcmpl_stats;
455 + struct edma_txdesc_stats *txdesc_stats;
456 + unsigned int start;
457 + u32 i;
458 +
459 + txcmpl_stats = kcalloc(txcmpl->num_rings, sizeof(*txcmpl_stats), GFP_KERNEL);
460 + if (!txcmpl_stats)
461 + return -ENOMEM;
462 +
463 + txdesc_stats = kcalloc(tx->num_rings, sizeof(*txdesc_stats), GFP_KERNEL);
464 + if (!txdesc_stats) {
465 + kfree(txcmpl_stats);
466 + return -ENOMEM;
467 + }
468 +
469 + /* Get stats for Tx desc rings. */
470 + for (i = 0; i < tx->num_rings; i++) {
471 + struct edma_txdesc_ring *txdesc_ring;
472 + struct edma_txdesc_stats *stats;
473 +
474 + txdesc_ring = &edma_ctx->tx_rings[i];
475 + stats = &txdesc_ring->txdesc_stats;
476 + do {
477 + start = u64_stats_fetch_begin(&stats->syncp);
478 + txdesc_stats[i].no_desc_avail = stats->no_desc_avail;
479 + txdesc_stats[i].tso_max_seg_exceed = stats->tso_max_seg_exceed;
480 + } while (u64_stats_fetch_retry(&stats->syncp, start));
481 + }
482 +
483 + /* Get stats for Tx Complete rings. */
484 + for (i = 0; i < txcmpl->num_rings; i++) {
485 + struct edma_txcmpl_ring *txcmpl_ring;
486 + struct edma_txcmpl_stats *stats;
487 +
488 + txcmpl_ring = &edma_ctx->txcmpl_rings[i];
489 + stats = &txcmpl_ring->txcmpl_stats;
490 + do {
491 + start = u64_stats_fetch_begin(&stats->syncp);
492 + txcmpl_stats[i].invalid_buffer = stats->invalid_buffer;
493 + txcmpl_stats[i].errors = stats->errors;
494 + txcmpl_stats[i].desc_with_more_bit = stats->desc_with_more_bit;
495 + txcmpl_stats[i].no_pending_desc = stats->no_pending_desc;
496 + } while (u64_stats_fetch_retry(&stats->syncp, start));
497 + }
498 +
499 + edma_debugfs_print_banner(m, EDMA_TX_RING_STATS_NODE_NAME);
500 +
501 + seq_puts(m, "\n#EDMA TX complete rings stats:\n\n");
502 + for (i = 0; i < txcmpl->num_rings; i++) {
503 + seq_printf(m, "\t\tEDMA TX complete %d ring stats:\n", i + txcmpl->ring_start);
504 + seq_printf(m, "\t\t txcmpl[%d]:invalid_buffer = %llu\n",
505 + i + txcmpl->ring_start, txcmpl_stats[i].invalid_buffer);
506 + seq_printf(m, "\t\t txcmpl[%d]:errors = %llu\n",
507 + i + txcmpl->ring_start, txcmpl_stats[i].errors);
508 + seq_printf(m, "\t\t txcmpl[%d]:desc_with_more_bit = %llu\n",
509 + i + txcmpl->ring_start, txcmpl_stats[i].desc_with_more_bit);
510 + seq_printf(m, "\t\t txcmpl[%d]:no_pending_desc = %llu\n",
511 + i + txcmpl->ring_start, txcmpl_stats[i].no_pending_desc);
512 + seq_puts(m, "\n");
513 + }
514 +
515 + seq_puts(m, "\n#EDMA TX descriptor rings stats:\n\n");
516 + for (i = 0; i < tx->num_rings; i++) {
517 + seq_printf(m, "\t\tEDMA TX descriptor %d ring stats:\n", i + tx->ring_start);
518 + seq_printf(m, "\t\t txdesc[%d]:no_desc_avail = %llu\n",
519 + i + tx->ring_start, txdesc_stats[i].no_desc_avail);
520 + seq_printf(m, "\t\t txdesc[%d]:tso_max_seg_exceed = %llu\n",
521 + i + tx->ring_start, txdesc_stats[i].tso_max_seg_exceed);
522 + seq_puts(m, "\n");
523 + }
524 +
525 + kfree(txcmpl_stats);
526 + kfree(txdesc_stats);
527 + return 0;
528 +}
529 +
530 +static int edma_debugfs_err_stats_show(struct seq_file *m,
531 + void __maybe_unused *p)
532 +{
533 + struct edma_err_stats *err_stats, *pcpu_err_stats;
534 + unsigned int start;
535 + u32 cpu;
536 +
537 + err_stats = kzalloc(sizeof(*err_stats), GFP_KERNEL);
538 + if (!err_stats)
539 + return -ENOMEM;
540 +
541 + /* Get percpu EDMA miscellaneous stats. */
542 + for_each_possible_cpu(cpu) {
543 + pcpu_err_stats = per_cpu_ptr(edma_ctx->err_stats, cpu);
544 + do {
545 + start = u64_stats_fetch_begin(&pcpu_err_stats->syncp);
546 + err_stats->edma_axi_read_err +=
547 + pcpu_err_stats->edma_axi_read_err;
548 + err_stats->edma_axi_write_err +=
549 + pcpu_err_stats->edma_axi_write_err;
550 + err_stats->edma_rxdesc_fifo_full +=
551 + pcpu_err_stats->edma_rxdesc_fifo_full;
552 + err_stats->edma_rx_buf_size_err +=
553 + pcpu_err_stats->edma_rx_buf_size_err;
554 + err_stats->edma_tx_sram_full +=
555 + pcpu_err_stats->edma_tx_sram_full;
556 + err_stats->edma_tx_data_len_err +=
557 + pcpu_err_stats->edma_tx_data_len_err;
558 + err_stats->edma_tx_timeout +=
559 + pcpu_err_stats->edma_tx_timeout;
560 + err_stats->edma_txcmpl_buf_full +=
561 + pcpu_err_stats->edma_txcmpl_buf_full;
562 + } while (u64_stats_fetch_retry(&pcpu_err_stats->syncp, start));
563 + }
564 +
565 + edma_debugfs_print_banner(m, EDMA_ERR_STATS_NODE_NAME);
566 +
567 + seq_puts(m, "\n#EDMA error stats:\n\n");
568 + seq_printf(m, "\t\t axi read error = %llu\n",
569 + err_stats->edma_axi_read_err);
570 + seq_printf(m, "\t\t axi write error = %llu\n",
571 + err_stats->edma_axi_write_err);
572 + seq_printf(m, "\t\t Rx descriptor fifo full = %llu\n",
573 + err_stats->edma_rxdesc_fifo_full);
574 + seq_printf(m, "\t\t Rx buffer size error = %llu\n",
575 + err_stats->edma_rx_buf_size_err);
576 + seq_printf(m, "\t\t Tx SRAM full = %llu\n",
577 + err_stats->edma_tx_sram_full);
578 + seq_printf(m, "\t\t Tx data length error = %llu\n",
579 + err_stats->edma_tx_data_len_err);
580 + seq_printf(m, "\t\t Tx timeout = %llu\n",
581 + err_stats->edma_tx_timeout);
582 + seq_printf(m, "\t\t Tx completion buffer full = %llu\n",
583 + err_stats->edma_txcmpl_buf_full);
584 +
585 + kfree(err_stats);
586 + return 0;
587 +}
588 +
589 +static int edma_debugs_rx_rings_stats_open(struct inode *inode,
590 + struct file *file)
591 +{
592 + return single_open(file, edma_debugfs_rx_rings_stats_show,
593 + inode->i_private);
594 +}
595 +
596 +static const struct file_operations edma_debugfs_rx_rings_file_ops = {
597 + .open = edma_debugs_rx_rings_stats_open,
598 + .read = seq_read,
599 + .llseek = seq_lseek,
600 + .release = seq_release
601 +};
602 +
603 +static int edma_debugs_tx_rings_stats_open(struct inode *inode, struct file *file)
604 +{
605 + return single_open(file, edma_debugfs_tx_rings_stats_show, inode->i_private);
606 +}
607 +
608 +static const struct file_operations edma_debugfs_tx_rings_file_ops = {
609 + .open = edma_debugs_tx_rings_stats_open,
610 + .read = seq_read,
611 + .llseek = seq_lseek,
612 + .release = seq_release
613 +};
614 +
615 +static int edma_debugs_err_stats_open(struct inode *inode, struct file *file)
616 +{
617 + return single_open(file, edma_debugfs_err_stats_show, inode->i_private);
618 +}
619 +
620 +static const struct file_operations edma_debugfs_misc_file_ops = {
621 + .open = edma_debugs_err_stats_open,
622 + .read = seq_read,
623 + .llseek = seq_lseek,
624 + .release = seq_release
625 +};
626 +
627 +/**
628 + * edma_debugfs_teardown - EDMA debugfs teardown.
629 + *
630 + * EDMA debugfs teardown and free stats memory.
631 + */
632 +void edma_debugfs_teardown(void)
633 +{
634 + /* Free EDMA miscellaneous stats memory */
635 + edma_err_stats_free();
636 +
637 + debugfs_remove_recursive(edma_dentry);
638 + edma_dentry = NULL;
639 + stats_dentry = NULL;
640 +}
641 +
642 +/**
643 + * edma_debugfs_setup - EDMA debugfs setup.
644 + * @ppe_dev: PPE Device
645 + *
646 + * EDMA debugfs setup.
647 + */
648 +int edma_debugfs_setup(struct ppe_device *ppe_dev)
649 +{
650 + edma_dentry = debugfs_create_dir("edma", ppe_dev->debugfs_root);
651 + if (!edma_dentry) {
652 + pr_err("Unable to create debugfs edma directory in debugfs\n");
653 + goto debugfs_dir_failed;
654 + }
655 +
656 + stats_dentry = debugfs_create_dir("stats", edma_dentry);
657 + if (!stats_dentry) {
658 + pr_err("Unable to create debugfs stats directory in debugfs\n");
659 + goto debugfs_dir_failed;
660 + }
661 +
662 + if (!debugfs_create_file("rx_ring_stats", 0444, stats_dentry,
663 + NULL, &edma_debugfs_rx_rings_file_ops)) {
664 + pr_err("Unable to create Rx rings statistics file entry in debugfs\n");
665 + goto debugfs_dir_failed;
666 + }
667 +
668 + if (!debugfs_create_file("tx_ring_stats", 0444, stats_dentry,
669 + NULL, &edma_debugfs_tx_rings_file_ops)) {
670 + pr_err("Unable to create Tx rings statistics file entry in debugfs\n");
671 + goto debugfs_dir_failed;
672 + }
673 +
674 + /* Allocate memory for EDMA miscellaneous stats */
675 + if (edma_err_stats_alloc() < 0) {
676 + pr_err("Unable to allocate miscellaneous percpu stats\n");
677 + goto debugfs_dir_failed;
678 + }
679 +
680 + if (!debugfs_create_file("err_stats", 0444, stats_dentry,
681 + NULL, &edma_debugfs_misc_file_ops)) {
682 + pr_err("Unable to create EDMA miscellaneous statistics file entry in debugfs\n");
683 + goto debugfs_dir_failed;
684 + }
685 +
686 + return 0;
687 +
688 +debugfs_dir_failed:
689 + debugfs_remove_recursive(edma_dentry);
690 + edma_dentry = NULL;
691 + stats_dentry = NULL;
692 + return -ENOMEM;
693 +}
694 diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c b/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c
695 index 1cd4c491e724..f325fcf1e17e 100644
696 --- a/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c
697 +++ b/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c
698 @@ -6,9 +6,11 @@
699 /* PPE debugfs routines for display of PPE counters useful for debug. */
700
701 #include <linux/debugfs.h>
702 +#include <linux/netdevice.h>
703 #include <linux/regmap.h>
704 #include <linux/seq_file.h>
705
706 +#include "edma.h"
707 #include "ppe.h"
708 #include "ppe_config.h"
709 #include "ppe_debugfs.h"
710 @@ -711,15 +713,30 @@ static const struct file_operations ppe_debugfs_packet_counter_fops = {
711
712 void ppe_debugfs_setup(struct ppe_device *ppe_dev)
713 {
714 + int ret;
715 +
716 ppe_dev->debugfs_root = debugfs_create_dir("ppe", NULL);
717 debugfs_create_file("packet_counter", 0444,
718 ppe_dev->debugfs_root,
719 ppe_dev,
720 &ppe_debugfs_packet_counter_fops);
721 +
722 + if (!ppe_dev->debugfs_root) {
723 + dev_err(ppe_dev->dev, "Error in PPE debugfs setup\n");
724 + return;
725 + }
726 +
727 + ret = edma_debugfs_setup(ppe_dev);
728 + if (ret) {
729 + dev_err(ppe_dev->dev, "Error in EDMA debugfs setup API. ret: %d\n", ret);
730 + debugfs_remove_recursive(ppe_dev->debugfs_root);
731 + ppe_dev->debugfs_root = NULL;
732 + }
733 }
734
735 void ppe_debugfs_teardown(struct ppe_device *ppe_dev)
736 {
737 + edma_debugfs_teardown();
738 debugfs_remove_recursive(ppe_dev->debugfs_root);
739 ppe_dev->debugfs_root = NULL;
740 }
741 --
742 2.45.2
743