1 From 87db029b2576ccae40dcf8173e2fbb84fdbec1a5 Mon Sep 17 00:00:00 2001
2 From: Peng Ma <peng.ma@nxp.com>
3 Date: Mon, 4 Mar 2019 15:45:56 +0800
4 Subject: [PATCH] dmaengine: fsl-dpaa2-qdma: Add NXP dpaa2 qDMA controller
5 driver for Layerscape SoCs
7 DPPA2(Data Path Acceleration Architecture 2) qDMA supports
8 virtualized channel by allowing DMA jobs to be enqueued into
9 different work queues. Core can initiate a DMA transaction by
10 preparing a frame descriptor(FD) for each DMA job and enqueuing
11 this job through a hardware portal. DPAA2 components can also
12 prepare a FD and enqueue a DMA job through a hardware portal.
13 The qDMA prefetches DMA jobs through DPAA2 hardware portal. It
14 then schedules and dispatches to internal DMA hardware engines,
15 which generate read and write requests. Both qDMA source data and
16 destination data can be either contiguous or non-contiguous using
17 one or more scatter/gather tables.
18 The qDMA supports global bandwidth flow control where all DMA
19 transactions are stalled if the bandwidth threshold has been reached.
20 Also supported are transaction based read throttling.
22 Add NXP dppa2 qDMA to support some of Layerscape SoCs.
23 such as: LS1088A, LS208xA, LX2, etc.
25 Signed-off-by: Peng Ma <peng.ma@nxp.com>
27 drivers/dma/Kconfig | 2 +
28 drivers/dma/Makefile | 1 +
29 drivers/dma/fsl-dpaa2-qdma/Kconfig | 9 +
30 drivers/dma/fsl-dpaa2-qdma/Makefile | 3 +
31 drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c | 825 ++++++++++++++++++++++++++++++++
32 drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h | 153 ++++++
33 6 files changed, 993 insertions(+)
34 create mode 100644 drivers/dma/fsl-dpaa2-qdma/Kconfig
35 create mode 100644 drivers/dma/fsl-dpaa2-qdma/Makefile
36 create mode 100644 drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
37 create mode 100644 drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h
39 --- a/drivers/dma/Kconfig
40 +++ b/drivers/dma/Kconfig
41 @@ -669,6 +669,8 @@ source "drivers/dma/sh/Kconfig"
43 source "drivers/dma/ti/Kconfig"
45 +source "drivers/dma/fsl-dpaa2-qdma/Kconfig"
50 --- a/drivers/dma/Makefile
51 +++ b/drivers/dma/Makefile
52 @@ -75,6 +75,7 @@ obj-$(CONFIG_UNIPHIER_MDMAC) += uniphier
53 obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
54 obj-$(CONFIG_ZX_DMA) += zx_dma.o
55 obj-$(CONFIG_ST_FDMA) += st_fdma.o
56 +obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma/
61 +++ b/drivers/dma/fsl-dpaa2-qdma/Kconfig
63 +menuconfig FSL_DPAA2_QDMA
64 + tristate "NXP DPAA2 QDMA"
66 + depends on FSL_MC_BUS && FSL_MC_DPIO
68 + select DMA_VIRTUAL_CHANNELS
70 + NXP Data Path Acceleration Architecture 2 QDMA driver,
71 + using the NXP MC bus driver.
73 +++ b/drivers/dma/fsl-dpaa2-qdma/Makefile
75 +# SPDX-License-Identifier: GPL-2.0
76 +# Makefile for the NXP DPAA2 qDMA controllers
77 +obj-$(CONFIG_FSL_DPAA2_QDMA) += dpaa2-qdma.o dpdmai.o
79 +++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
81 +// SPDX-License-Identifier: GPL-2.0
82 +// Copyright 2019 NXP
84 +#include <linux/init.h>
85 +#include <linux/module.h>
86 +#include <linux/dmapool.h>
87 +#include <linux/of_irq.h>
88 +#include <linux/iommu.h>
89 +#include <linux/sys_soc.h>
90 +#include <linux/fsl/mc.h>
91 +#include <soc/fsl/dpaa2-io.h>
93 +#include "../virt-dma.h"
95 +#include "dpaa2-qdma.h"
97 +static bool smmu_disable = true;
99 +static struct dpaa2_qdma_chan *to_dpaa2_qdma_chan(struct dma_chan *chan)
101 + return container_of(chan, struct dpaa2_qdma_chan, vchan.chan);
104 +static struct dpaa2_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
106 + return container_of(vd, struct dpaa2_qdma_comp, vdesc);
109 +static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
111 + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
112 + struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
113 + struct device *dev = &dpaa2_qdma->priv->dpdmai_dev->dev;
115 + dpaa2_chan->fd_pool = dma_pool_create("fd_pool", dev,
116 + sizeof(struct dpaa2_fd),
117 + sizeof(struct dpaa2_fd), 0);
118 + if (!dpaa2_chan->fd_pool)
121 + dpaa2_chan->fl_pool = dma_pool_create("fl_pool", dev,
122 + sizeof(struct dpaa2_fl_entry),
123 + sizeof(struct dpaa2_fl_entry), 0);
124 + if (!dpaa2_chan->fl_pool)
127 + dpaa2_chan->sdd_pool =
128 + dma_pool_create("sdd_pool", dev,
129 + sizeof(struct dpaa2_qdma_sd_d),
130 + sizeof(struct dpaa2_qdma_sd_d), 0);
131 + if (!dpaa2_chan->sdd_pool)
134 + return dpaa2_qdma->desc_allocated++;
136 + dma_pool_destroy(dpaa2_chan->fl_pool);
138 + dma_pool_destroy(dpaa2_chan->fd_pool);
143 +static void dpaa2_qdma_free_chan_resources(struct dma_chan *chan)
145 + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
146 + struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
147 + unsigned long flags;
151 + spin_lock_irqsave(&dpaa2_chan->vchan.lock, flags);
152 + vchan_get_all_descriptors(&dpaa2_chan->vchan, &head);
153 + spin_unlock_irqrestore(&dpaa2_chan->vchan.lock, flags);
155 + vchan_dma_desc_free_list(&dpaa2_chan->vchan, &head);
157 + dpaa2_dpdmai_free_comp(dpaa2_chan, &dpaa2_chan->comp_used);
158 + dpaa2_dpdmai_free_comp(dpaa2_chan, &dpaa2_chan->comp_free);
160 + dma_pool_destroy(dpaa2_chan->fd_pool);
161 + dma_pool_destroy(dpaa2_chan->fl_pool);
162 + dma_pool_destroy(dpaa2_chan->sdd_pool);
163 + dpaa2_qdma->desc_allocated--;
167 + * Request a command descriptor for enqueue.
169 +static struct dpaa2_qdma_comp *
170 +dpaa2_qdma_request_desc(struct dpaa2_qdma_chan *dpaa2_chan)
172 + struct dpaa2_qdma_priv *qdma_priv = dpaa2_chan->qdma->priv;
173 + struct device *dev = &qdma_priv->dpdmai_dev->dev;
174 + struct dpaa2_qdma_comp *comp_temp = NULL;
175 + unsigned long flags;
177 + spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
178 + if (list_empty(&dpaa2_chan->comp_free)) {
179 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
180 + comp_temp = kzalloc(sizeof(*comp_temp), GFP_NOWAIT);
183 + comp_temp->fd_virt_addr =
184 + dma_pool_alloc(dpaa2_chan->fd_pool, GFP_NOWAIT,
185 + &comp_temp->fd_bus_addr);
186 + if (!comp_temp->fd_virt_addr)
189 + comp_temp->fl_virt_addr =
190 + dma_pool_alloc(dpaa2_chan->fl_pool, GFP_NOWAIT,
191 + &comp_temp->fl_bus_addr);
192 + if (!comp_temp->fl_virt_addr)
195 + comp_temp->desc_virt_addr =
196 + dma_pool_alloc(dpaa2_chan->sdd_pool, GFP_NOWAIT,
197 + &comp_temp->desc_bus_addr);
198 + if (!comp_temp->desc_virt_addr)
201 + comp_temp->qchan = dpaa2_chan;
205 + comp_temp = list_first_entry(&dpaa2_chan->comp_free,
206 + struct dpaa2_qdma_comp, list);
207 + list_del(&comp_temp->list);
208 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
210 + comp_temp->qchan = dpaa2_chan;
215 + dma_pool_free(dpaa2_chan->fl_pool,
216 + comp_temp->fl_virt_addr,
217 + comp_temp->fl_bus_addr);
219 + dma_pool_free(dpaa2_chan->fd_pool,
220 + comp_temp->fd_virt_addr,
221 + comp_temp->fd_bus_addr);
225 + dev_err(dev, "Failed to request descriptor\n");
230 +dpaa2_qdma_populate_fd(u32 format, struct dpaa2_qdma_comp *dpaa2_comp)
232 + struct dpaa2_fd *fd;
234 + fd = dpaa2_comp->fd_virt_addr;
235 + memset(fd, 0, sizeof(struct dpaa2_fd));
238 + dpaa2_fd_set_addr(fd, dpaa2_comp->fl_bus_addr);
241 + * Bypass memory translation, Frame list format, short length disable
242 + * we need to disable BMT if fsl-mc use iova addr
245 + dpaa2_fd_set_bpid(fd, QMAN_FD_BMT_ENABLE);
246 + dpaa2_fd_set_format(fd, QMAN_FD_FMT_ENABLE | QMAN_FD_SL_DISABLE);
248 + dpaa2_fd_set_frc(fd, format | QDMA_SER_CTX);
251 +/* first frame list for descriptor buffer */
253 +dpaa2_qdma_populate_first_framel(struct dpaa2_fl_entry *f_list,
254 + struct dpaa2_qdma_comp *dpaa2_comp,
257 + struct dpaa2_qdma_sd_d *sdd;
259 + sdd = dpaa2_comp->desc_virt_addr;
260 + memset(sdd, 0, 2 * (sizeof(*sdd)));
262 + /* source descriptor CMD */
263 + sdd->cmd = cpu_to_le32(QDMA_SD_CMD_RDTTYPE_COHERENT);
266 + /* dest descriptor CMD */
268 + sdd->cmd = cpu_to_le32(LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT);
270 + sdd->cmd = cpu_to_le32(QDMA_DD_CMD_WRTTYPE_COHERENT);
272 + memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
274 + /* first frame list to source descriptor */
275 + dpaa2_fl_set_addr(f_list, dpaa2_comp->desc_bus_addr);
276 + dpaa2_fl_set_len(f_list, 0x20);
277 + dpaa2_fl_set_format(f_list, QDMA_FL_FMT_SBF | QDMA_FL_SL_LONG);
279 + /* bypass memory translation */
281 + f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
284 +/* source and destination frame list */
286 +dpaa2_qdma_populate_frames(struct dpaa2_fl_entry *f_list,
287 + dma_addr_t dst, dma_addr_t src,
288 + size_t len, uint8_t fmt)
290 + /* source frame list to source buffer */
291 + memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
293 + dpaa2_fl_set_addr(f_list, src);
294 + dpaa2_fl_set_len(f_list, len);
296 + /* single buffer frame or scatter gather frame */
297 + dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
299 + /* bypass memory translation */
301 + f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
305 + /* destination frame list to destination buffer */
306 + memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
308 + dpaa2_fl_set_addr(f_list, dst);
309 + dpaa2_fl_set_len(f_list, len);
310 + dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
311 + /* single buffer frame or scatter gather frame */
312 + dpaa2_fl_set_final(f_list, QDMA_FL_F);
313 + /* bypass memory translation */
315 + f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
318 +static struct dma_async_tx_descriptor
319 +*dpaa2_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
320 + dma_addr_t src, size_t len, ulong flags)
322 + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
323 + struct dpaa2_qdma_engine *dpaa2_qdma;
324 + struct dpaa2_qdma_comp *dpaa2_comp;
325 + struct dpaa2_fl_entry *f_list;
328 + dpaa2_qdma = dpaa2_chan->qdma;
329 + dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
333 + wrt_changed = (bool)dpaa2_qdma->qdma_wrtype_fixup;
335 + /* populate Frame descriptor */
336 + dpaa2_qdma_populate_fd(QDMA_FD_LONG_FORMAT, dpaa2_comp);
338 + f_list = dpaa2_comp->fl_virt_addr;
340 + /* first frame list for descriptor buffer (logn format) */
341 + dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp, wrt_changed);
345 + dpaa2_qdma_populate_frames(f_list, dst, src, len, QDMA_FL_FMT_SBF);
347 + return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags);
350 +static void dpaa2_qdma_issue_pending(struct dma_chan *chan)
352 + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
353 + struct dpaa2_qdma_comp *dpaa2_comp;
354 + struct virt_dma_desc *vdesc;
355 + struct dpaa2_fd *fd;
356 + unsigned long flags;
359 + spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
360 + spin_lock(&dpaa2_chan->vchan.lock);
361 + if (vchan_issue_pending(&dpaa2_chan->vchan)) {
362 + vdesc = vchan_next_desc(&dpaa2_chan->vchan);
365 + dpaa2_comp = to_fsl_qdma_comp(vdesc);
367 + fd = dpaa2_comp->fd_virt_addr;
369 + list_del(&vdesc->node);
370 + list_add_tail(&dpaa2_comp->list, &dpaa2_chan->comp_used);
372 + err = dpaa2_io_service_enqueue_fq(NULL, dpaa2_chan->fqid, fd);
374 + list_del(&dpaa2_comp->list);
375 + list_add_tail(&dpaa2_comp->list,
376 + &dpaa2_chan->comp_free);
380 + spin_unlock(&dpaa2_chan->vchan.lock);
381 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
384 +static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
386 + struct dpaa2_qdma_priv_per_prio *ppriv;
387 + struct device *dev = &ls_dev->dev;
388 + struct dpaa2_qdma_priv *priv;
389 + u8 prio_def = DPDMAI_PRIO_NUM;
393 + priv = dev_get_drvdata(dev);
396 + priv->dpqdma_id = ls_dev->obj_desc.id;
398 + /* Get the handle for the DPDMAI this interface is associate with */
399 + err = dpdmai_open(priv->mc_io, 0, priv->dpqdma_id, &ls_dev->mc_handle);
401 + dev_err(dev, "dpdmai_open() failed\n");
405 + dev_dbg(dev, "Opened dpdmai object successfully\n");
407 + err = dpdmai_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
408 + &priv->dpdmai_attr);
410 + dev_err(dev, "dpdmai_get_attributes() failed\n");
414 + if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
415 + dev_err(dev, "DPDMAI major version mismatch\n"
416 + "Found %u.%u, supported version is %u.%u\n",
417 + priv->dpdmai_attr.version.major,
418 + priv->dpdmai_attr.version.minor,
419 + DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
423 + if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
424 + dev_err(dev, "DPDMAI minor version mismatch\n"
425 + "Found %u.%u, supported version is %u.%u\n",
426 + priv->dpdmai_attr.version.major,
427 + priv->dpdmai_attr.version.minor,
428 + DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
432 + priv->num_pairs = min(priv->dpdmai_attr.num_of_priorities, prio_def);
433 + ppriv = kcalloc(priv->num_pairs, sizeof(*ppriv), GFP_KERNEL);
438 + priv->ppriv = ppriv;
440 + for (i = 0; i < priv->num_pairs; i++) {
441 + err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
442 + i, &priv->rx_queue_attr[i]);
444 + dev_err(dev, "dpdmai_get_rx_queue() failed\n");
447 + ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
449 + err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle,
450 + i, &priv->tx_fqid[i]);
452 + dev_err(dev, "dpdmai_get_tx_queue() failed\n");
455 + ppriv->req_fqid = priv->tx_fqid[i];
457 + ppriv->priv = priv;
463 + dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
467 +static void dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx *ctx)
469 + struct dpaa2_qdma_priv_per_prio *ppriv = container_of(ctx,
470 + struct dpaa2_qdma_priv_per_prio, nctx);
471 + struct dpaa2_qdma_comp *dpaa2_comp, *_comp_tmp;
472 + struct dpaa2_qdma_priv *priv = ppriv->priv;
473 + u32 n_chans = priv->dpaa2_qdma->n_chans;
474 + struct dpaa2_qdma_chan *qchan;
475 + const struct dpaa2_fd *fd_eq;
476 + const struct dpaa2_fd *fd;
477 + struct dpaa2_dq *dq;
485 + err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
491 + dq = dpaa2_io_store_next(ppriv->store, &is_last);
492 + } while (!is_last && !dq);
494 + dev_err(priv->dev, "FQID returned no valid frames!\n");
498 + /* obtain FD and process the error */
499 + fd = dpaa2_dq_fd(dq);
501 + status = dpaa2_fd_get_ctrl(fd) & 0xff;
503 + dev_err(priv->dev, "FD error occurred\n");
505 + for (i = 0; i < n_chans; i++) {
506 + qchan = &priv->dpaa2_qdma->chans[i];
507 + spin_lock(&qchan->queue_lock);
508 + if (list_empty(&qchan->comp_used)) {
509 + spin_unlock(&qchan->queue_lock);
512 + list_for_each_entry_safe(dpaa2_comp, _comp_tmp,
513 + &qchan->comp_used, list) {
514 + fd_eq = dpaa2_comp->fd_virt_addr;
516 + if (le64_to_cpu(fd_eq->simple.addr) ==
517 + le64_to_cpu(fd->simple.addr)) {
518 + spin_lock(&qchan->vchan.lock);
519 + vchan_cookie_complete(&
520 + dpaa2_comp->vdesc);
521 + spin_unlock(&qchan->vchan.lock);
526 + spin_unlock(&qchan->queue_lock);
532 + dpaa2_io_service_rearm(NULL, ctx);
535 +static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
537 + struct dpaa2_qdma_priv_per_prio *ppriv;
538 + struct device *dev = priv->dev;
542 + num = priv->num_pairs;
543 + ppriv = priv->ppriv;
544 + for (i = 0; i < num; i++) {
545 + ppriv->nctx.is_cdan = 0;
546 + ppriv->nctx.desired_cpu = DPAA2_IO_ANY_CPU;
547 + ppriv->nctx.id = ppriv->rsp_fqid;
548 + ppriv->nctx.cb = dpaa2_qdma_fqdan_cb;
549 + err = dpaa2_io_service_register(NULL, &ppriv->nctx, dev);
551 + dev_err(dev, "Notification register failed\n");
556 + dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE, dev);
557 + if (!ppriv->store) {
558 + dev_err(dev, "dpaa2_io_store_create() failed\n");
567 + dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
570 + while (ppriv >= priv->ppriv) {
571 + dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
572 + dpaa2_io_store_destroy(ppriv->store);
578 +static void dpaa2_dpmai_store_free(struct dpaa2_qdma_priv *priv)
580 + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
583 + for (i = 0; i < priv->num_pairs; i++) {
584 + dpaa2_io_store_destroy(ppriv->store);
589 +static void dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv *priv)
591 + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
592 + struct device *dev = priv->dev;
595 + for (i = 0; i < priv->num_pairs; i++) {
596 + dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
601 +static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv)
603 + struct dpdmai_rx_queue_cfg rx_queue_cfg;
604 + struct dpaa2_qdma_priv_per_prio *ppriv;
605 + struct device *dev = priv->dev;
606 + struct fsl_mc_device *ls_dev;
610 + ls_dev = to_fsl_mc_device(dev);
611 + num = priv->num_pairs;
612 + ppriv = priv->ppriv;
613 + for (i = 0; i < num; i++) {
614 + rx_queue_cfg.options = DPDMAI_QUEUE_OPT_USER_CTX |
615 + DPDMAI_QUEUE_OPT_DEST;
616 + rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
617 + rx_queue_cfg.dest_cfg.dest_type = DPDMAI_DEST_DPIO;
618 + rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
619 + rx_queue_cfg.dest_cfg.priority = ppriv->prio;
620 + err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
621 + rx_queue_cfg.dest_cfg.priority,
624 + dev_err(dev, "dpdmai_set_rx_queue() failed\n");
634 +static int __cold dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv *priv)
636 + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
637 + struct device *dev = priv->dev;
638 + struct fsl_mc_device *ls_dev;
642 + ls_dev = to_fsl_mc_device(dev);
644 + for (i = 0; i < priv->num_pairs; i++) {
645 + ppriv->nctx.qman64 = 0;
646 + ppriv->nctx.dpio_id = 0;
650 + err = dpdmai_reset(priv->mc_io, 0, ls_dev->mc_handle);
652 + dev_err(dev, "dpdmai_reset() failed\n");
657 +static void dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan,
658 + struct list_head *head)
660 + struct dpaa2_qdma_comp *comp_tmp, *_comp_tmp;
661 + unsigned long flags;
663 + list_for_each_entry_safe(comp_tmp, _comp_tmp,
665 + spin_lock_irqsave(&qchan->queue_lock, flags);
666 + list_del(&comp_tmp->list);
667 + spin_unlock_irqrestore(&qchan->queue_lock, flags);
668 + dma_pool_free(qchan->fd_pool,
669 + comp_tmp->fd_virt_addr,
670 + comp_tmp->fd_bus_addr);
671 + dma_pool_free(qchan->fl_pool,
672 + comp_tmp->fl_virt_addr,
673 + comp_tmp->fl_bus_addr);
674 + dma_pool_free(qchan->sdd_pool,
675 + comp_tmp->desc_virt_addr,
676 + comp_tmp->desc_bus_addr);
681 +static void dpaa2_dpdmai_free_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
683 + struct dpaa2_qdma_chan *qchan;
686 + num = dpaa2_qdma->n_chans;
687 + for (i = 0; i < num; i++) {
688 + qchan = &dpaa2_qdma->chans[i];
689 + dpaa2_dpdmai_free_comp(qchan, &qchan->comp_used);
690 + dpaa2_dpdmai_free_comp(qchan, &qchan->comp_free);
691 + dma_pool_destroy(qchan->fd_pool);
692 + dma_pool_destroy(qchan->fl_pool);
693 + dma_pool_destroy(qchan->sdd_pool);
697 +static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc)
699 + struct dpaa2_qdma_comp *dpaa2_comp;
700 + struct dpaa2_qdma_chan *qchan;
701 + unsigned long flags;
703 + dpaa2_comp = to_fsl_qdma_comp(vdesc);
704 + qchan = dpaa2_comp->qchan;
705 + spin_lock_irqsave(&qchan->queue_lock, flags);
706 + list_del(&dpaa2_comp->list);
707 + list_add_tail(&dpaa2_comp->list, &qchan->comp_free);
708 + spin_unlock_irqrestore(&qchan->queue_lock, flags);
711 +static int dpaa2_dpdmai_init_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
713 + struct dpaa2_qdma_priv *priv = dpaa2_qdma->priv;
714 + struct dpaa2_qdma_chan *dpaa2_chan;
715 + int num = priv->num_pairs;
718 + INIT_LIST_HEAD(&dpaa2_qdma->dma_dev.channels);
719 + for (i = 0; i < dpaa2_qdma->n_chans; i++) {
720 + dpaa2_chan = &dpaa2_qdma->chans[i];
721 + dpaa2_chan->qdma = dpaa2_qdma;
722 + dpaa2_chan->fqid = priv->tx_fqid[i % num];
723 + dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc;
724 + vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev);
725 + spin_lock_init(&dpaa2_chan->queue_lock);
726 + INIT_LIST_HEAD(&dpaa2_chan->comp_used);
727 + INIT_LIST_HEAD(&dpaa2_chan->comp_free);
732 +static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev)
734 + struct device *dev = &dpdmai_dev->dev;
735 + struct dpaa2_qdma_engine *dpaa2_qdma;
736 + struct dpaa2_qdma_priv *priv;
739 + priv = kzalloc(sizeof(*priv), GFP_KERNEL);
742 + dev_set_drvdata(dev, priv);
743 + priv->dpdmai_dev = dpdmai_dev;
745 + priv->iommu_domain = iommu_get_domain_for_dev(dev);
746 + if (priv->iommu_domain)
747 + smmu_disable = false;
749 + /* obtain a MC portal */
750 + err = fsl_mc_portal_allocate(dpdmai_dev, 0, &priv->mc_io);
753 + err = -EPROBE_DEFER;
755 + dev_err(dev, "MC portal allocation failed\n");
759 + /* DPDMAI initialization */
760 + err = dpaa2_qdma_setup(dpdmai_dev);
762 + dev_err(dev, "dpaa2_dpdmai_setup() failed\n");
763 + goto err_dpdmai_setup;
767 + err = dpaa2_qdma_dpio_setup(priv);
769 + dev_err(dev, "dpaa2_dpdmai_dpio_setup() failed\n");
770 + goto err_dpio_setup;
773 + /* DPDMAI binding to DPIO */
774 + err = dpaa2_dpdmai_bind(priv);
776 + dev_err(dev, "dpaa2_dpdmai_bind() failed\n");
780 + /* DPDMAI enable */
781 + err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle);
783 + dev_err(dev, "dpdmai_enable() faile\n");
787 + dpaa2_qdma = kzalloc(sizeof(*dpaa2_qdma), GFP_KERNEL);
793 + priv->dpaa2_qdma = dpaa2_qdma;
794 + dpaa2_qdma->priv = priv;
796 + dpaa2_qdma->desc_allocated = 0;
797 + dpaa2_qdma->n_chans = NUM_CH;
799 + dpaa2_dpdmai_init_channels(dpaa2_qdma);
801 + if (soc_device_match(soc_fixup_tuning))
802 + dpaa2_qdma->qdma_wrtype_fixup = true;
804 + dpaa2_qdma->qdma_wrtype_fixup = false;
806 + dma_cap_set(DMA_PRIVATE, dpaa2_qdma->dma_dev.cap_mask);
807 + dma_cap_set(DMA_SLAVE, dpaa2_qdma->dma_dev.cap_mask);
808 + dma_cap_set(DMA_MEMCPY, dpaa2_qdma->dma_dev.cap_mask);
810 + dpaa2_qdma->dma_dev.dev = dev;
811 + dpaa2_qdma->dma_dev.device_alloc_chan_resources =
812 + dpaa2_qdma_alloc_chan_resources;
813 + dpaa2_qdma->dma_dev.device_free_chan_resources =
814 + dpaa2_qdma_free_chan_resources;
815 + dpaa2_qdma->dma_dev.device_tx_status = dma_cookie_status;
816 + dpaa2_qdma->dma_dev.device_prep_dma_memcpy = dpaa2_qdma_prep_memcpy;
817 + dpaa2_qdma->dma_dev.device_issue_pending = dpaa2_qdma_issue_pending;
819 + err = dma_async_device_register(&dpaa2_qdma->dma_dev);
821 + dev_err(dev, "Can't register NXP QDMA engine.\n");
822 + goto err_dpaa2_qdma;
830 + dpdmai_disable(priv->mc_io, 0, dpdmai_dev->mc_handle);
832 + dpaa2_dpdmai_dpio_unbind(priv);
834 + dpaa2_dpmai_store_free(priv);
835 + dpaa2_dpdmai_dpio_free(priv);
837 + kfree(priv->ppriv);
838 + dpdmai_close(priv->mc_io, 0, dpdmai_dev->mc_handle);
840 + fsl_mc_portal_free(priv->mc_io);
843 + dev_set_drvdata(dev, NULL);
847 +static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev)
849 + struct dpaa2_qdma_engine *dpaa2_qdma;
850 + struct dpaa2_qdma_priv *priv;
851 + struct device *dev;
853 + dev = &ls_dev->dev;
854 + priv = dev_get_drvdata(dev);
855 + dpaa2_qdma = priv->dpaa2_qdma;
857 + dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
858 + dpaa2_dpdmai_dpio_unbind(priv);
859 + dpaa2_dpmai_store_free(priv);
860 + dpaa2_dpdmai_dpio_free(priv);
861 + dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
862 + fsl_mc_portal_free(priv->mc_io);
863 + dev_set_drvdata(dev, NULL);
864 + dpaa2_dpdmai_free_channels(dpaa2_qdma);
866 + dma_async_device_unregister(&dpaa2_qdma->dma_dev);
873 +static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
875 + .vendor = FSL_MC_VENDOR_FREESCALE,
876 + .obj_type = "dpdmai",
881 +static struct fsl_mc_driver dpaa2_qdma_driver = {
883 + .name = "dpaa2-qdma",
884 + .owner = THIS_MODULE,
886 + .probe = dpaa2_qdma_probe,
887 + .remove = dpaa2_qdma_remove,
888 + .match_id_table = dpaa2_qdma_id_table
891 +static int __init dpaa2_qdma_driver_init(void)
893 + return fsl_mc_driver_register(&(dpaa2_qdma_driver));
895 +late_initcall(dpaa2_qdma_driver_init);
897 +static void __exit fsl_qdma_exit(void)
899 + fsl_mc_driver_unregister(&(dpaa2_qdma_driver));
901 +module_exit(fsl_qdma_exit);
903 +MODULE_ALIAS("platform:fsl-dpaa2-qdma");
904 +MODULE_LICENSE("GPL v2");
905 +MODULE_DESCRIPTION("NXP Layerscape DPAA2 qDMA engine driver");
907 +++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h
909 +/* SPDX-License-Identifier: GPL-2.0 */
910 +/* Copyright 2019 NXP */
912 +#ifndef __DPAA2_QDMA_H
913 +#define __DPAA2_QDMA_H
915 +#define DPAA2_QDMA_STORE_SIZE 16
918 +struct dpaa2_qdma_sd_d {
922 + u32 ssd:12; /* souce stride distance */
923 + u32 sss:12; /* souce stride size */
927 + u32 dsd:12; /* Destination stride distance */
928 + u32 dss:12; /* Destination stride size */
932 + u32 rbpcmd; /* Route-by-port command */
934 +} __attribute__((__packed__));
936 +/* Source descriptor command read transaction type for RBP=0: */
937 +/* coherent copy of cacheable memory */
938 +#define QDMA_SD_CMD_RDTTYPE_COHERENT (0xb << 28)
939 +/* Destination descriptor command write transaction type for RBP=0: */
940 +/* coherent copy of cacheable memory */
941 +#define QDMA_DD_CMD_WRTTYPE_COHERENT (0x6 << 28)
942 +#define LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT (0xb << 28)
944 +#define QMAN_FD_FMT_ENABLE BIT(0) /* frame list table enable */
945 +#define QMAN_FD_BMT_ENABLE BIT(15) /* bypass memory translation */
946 +#define QMAN_FD_BMT_DISABLE (0) /* bypass memory translation */
947 +#define QMAN_FD_SL_DISABLE (0) /* short lengthe disabled */
948 +#define QMAN_FD_SL_ENABLE BIT(14) /* short lengthe enabled */
950 +#define QDMA_FINAL_BIT_DISABLE (0) /* final bit disable */
951 +#define QDMA_FINAL_BIT_ENABLE BIT(31) /* final bit enable */
953 +#define QDMA_FD_SHORT_FORMAT BIT(11) /* short format */
954 +#define QDMA_FD_LONG_FORMAT (0) /* long format */
955 +#define QDMA_SER_DISABLE (8) /* no notification */
956 +#define QDMA_SER_CTX BIT(8) /* notification by FQD_CTX[fqid] */
957 +#define QDMA_SER_DEST (2 << 8) /* notification by destination desc */
958 +#define QDMA_SER_BOTH (3 << 8) /* soruce and dest notification */
959 +#define QDMA_FD_SPF_ENALBE BIT(30) /* source prefetch enable */
961 +#define QMAN_FD_VA_ENABLE BIT(14) /* Address used is virtual address */
962 +#define QMAN_FD_VA_DISABLE (0)/* Address used is a real address */
963 +/* Flow Context: 49bit physical address */
964 +#define QMAN_FD_CBMT_ENABLE BIT(15)
965 +#define QMAN_FD_CBMT_DISABLE (0) /* Flow Context: 64bit virtual address */
966 +#define QMAN_FD_SC_DISABLE (0) /* stashing control */
968 +#define QDMA_FL_FMT_SBF (0x0) /* Single buffer frame */
969 +#define QDMA_FL_FMT_SGE (0x2) /* Scatter gather frame */
970 +#define QDMA_FL_BMT_ENABLE BIT(15) /* enable bypass memory translation */
971 +#define QDMA_FL_BMT_DISABLE (0x0) /* enable bypass memory translation */
972 +#define QDMA_FL_SL_LONG (0x0)/* long length */
973 +#define QDMA_FL_SL_SHORT (0x1) /* short length */
974 +#define QDMA_FL_F (0x1)/* last frame list bit */
976 +/*Description of Frame list table structure*/
977 +struct dpaa2_qdma_chan {
978 + struct dpaa2_qdma_engine *qdma;
979 + struct virt_dma_chan vchan;
980 + struct virt_dma_desc vdesc;
981 + enum dma_status status;
984 + /* spinlock used by dpaa2 qdma driver */
985 + spinlock_t queue_lock;
986 + struct dma_pool *fd_pool;
987 + struct dma_pool *fl_pool;
988 + struct dma_pool *sdd_pool;
990 + struct list_head comp_used;
991 + struct list_head comp_free;
995 +struct dpaa2_qdma_comp {
996 + dma_addr_t fd_bus_addr;
997 + dma_addr_t fl_bus_addr;
998 + dma_addr_t desc_bus_addr;
999 + struct dpaa2_fd *fd_virt_addr;
1000 + struct dpaa2_fl_entry *fl_virt_addr;
1001 + struct dpaa2_qdma_sd_d *desc_virt_addr;
1002 + struct dpaa2_qdma_chan *qchan;
1003 + struct virt_dma_desc vdesc;
1004 + struct list_head list;
1007 +struct dpaa2_qdma_engine {
1008 + struct dma_device dma_dev;
1010 + struct dpaa2_qdma_chan chans[NUM_CH];
1011 + int qdma_wrtype_fixup;
1012 + int desc_allocated;
1014 + struct dpaa2_qdma_priv *priv;
1018 + * dpaa2_qdma_priv - driver private data
1020 +struct dpaa2_qdma_priv {
1023 + struct iommu_domain *iommu_domain;
1024 + struct dpdmai_attr dpdmai_attr;
1025 + struct device *dev;
1026 + struct fsl_mc_io *mc_io;
1027 + struct fsl_mc_device *dpdmai_dev;
1030 + struct dpaa2_qdma_engine *dpaa2_qdma;
1031 + struct dpaa2_qdma_priv_per_prio *ppriv;
1033 + struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_PRIO_NUM];
1034 + u32 tx_fqid[DPDMAI_PRIO_NUM];
1037 +struct dpaa2_qdma_priv_per_prio {
1042 + struct dpaa2_io_store *store;
1043 + struct dpaa2_io_notification_ctx nctx;
1045 + struct dpaa2_qdma_priv *priv;
1048 +static struct soc_device_attribute soc_fixup_tuning[] = {
1049 + { .family = "QorIQ LX2160A"},
1053 +/* FD pool size: one FD + 3 Frame list + 2 source/destination descriptor */
1054 +#define FD_POOL_SIZE (sizeof(struct dpaa2_fd) + \
1055 + sizeof(struct dpaa2_fl_entry) * 3 + \
1056 + sizeof(struct dpaa2_qdma_sd_d) * 2)
1058 +static void dpaa2_dpdmai_free_channels(struct dpaa2_qdma_engine *dpaa2_qdma);
1059 +static void dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan,
1060 + struct list_head *head);
1061 +#endif /* __DPAA2_QDMA_H */