1 From 88476b7fb025afdf9cc2247e8d04ab8e027ce9cb Mon Sep 17 00:00:00 2001
2 From: Robin Gong <yibin.gong@nxp.com>
3 Date: Fri, 31 Mar 2017 15:53:39 +0800
4 Subject: [PATCH] MLK-14610 DMA: fsl-edma-v3: add fsl-edma-v3 support
6 Add edma-v3 driver on i.mx8qm.
8 Signed-off-by: Robin Gong <yibin.gong@nxp.com>
9 (cherry picked from commit d0ac0971c2e637ebddc853f12f71d130f5df4f91)
11 .../devicetree/bindings/dma/fsl-edma-v3.txt | 64 ++
12 drivers/dma/Kconfig | 11 +
13 drivers/dma/Makefile | 1 +
14 drivers/dma/fsl-edma-v3.c | 890 +++++++++++++++++++++
15 4 files changed, 966 insertions(+)
16 create mode 100644 Documentation/devicetree/bindings/dma/fsl-edma-v3.txt
17 create mode 100644 drivers/dma/fsl-edma-v3.c
20 +++ b/Documentation/devicetree/bindings/dma/fsl-edma-v3.txt
22 +* Freescale enhanced Direct Memory Access(eDMA-v3) Controller
24 + The eDMA-v3 controller is inherited from FSL eDMA, and firstly is intergrated
25 + on Freescale i.MX8QM SOC chip. The eDMA channels have multiplex capability by
26 + programmble memory-mapped registers. Specific DMA request source has fixed channel.
31 + - "fsl,imx8qm-edma" for eDMA used similar to that on i.MX8QM SoC
32 + - "fsl,imx8qm-adma" for audio eDMA used on i.MX8QM
33 +- reg : Specifies base physical address(s) and size of the eDMA channel registers.
34 + Each eDMA channel has separated register's address and size.
35 +- interrupts : A list of interrupt-specifiers, each channel has one interrupt.
36 +- interrupt-names : Should contain:
37 + "edma-chan12-tx" - the channel12 transmission interrupt
38 +- #dma-cells : Must be <3>.
39 + The 1st cell specifies the channel ID.
40 + The 2nd cell specifies the channel priority.
41 + The 3rd cell specifies the channel type like for transmit or receive:
42 + 0: transmit, 1: receive.
43 + See the SoC's reference manual for all the supported request sources.
44 +- dma-channels : Number of channels supported by the controller
47 +edma0: dma-controller@40018000 {
48 + compatible = "fsl,imx8qm-edma";
49 + reg = <0x0 0x5a2c0000 0x0 0x10000>, /* channel12 UART0 rx */
50 + <0x0 0x5a2d0000 0x0 0x10000>, /* channel13 UART0 tx */
51 + <0x0 0x5a2e0000 0x0 0x10000>, /* channel14 UART1 rx */
52 + <0x0 0x5a2f0000 0x0 0x10000>; /* channel15 UART1 tx */
55 + interrupts = <GIC_SPI 434 IRQ_TYPE_LEVEL_HIGH>,
56 + <GIC_SPI 435 IRQ_TYPE_LEVEL_HIGH>,
57 + <GIC_SPI 436 IRQ_TYPE_LEVEL_HIGH>,
58 + <GIC_SPI 437 IRQ_TYPE_LEVEL_HIGH>;
59 + interrupt-names = "edma-chan12-tx", "edma-chan13-tx",
60 + "edma-chan14-tx", "edma-chan15-tx";
65 +DMA client drivers that uses the DMA function must use the format described
66 +in the dma.txt file, using a three-cell specifier for each channel: the 1st
67 +specifies the channel number, the 2nd specifies the priority, and the 3rd
68 +specifies the channel type is for transmit or receive: 0: transmit, 1: receive.
71 +lpuart1: serial@5a070000 {
72 + compatible = "fsl,imx8qm-lpuart";
73 + reg = <0x0 0x5a070000 0x0 0x1000>;
74 + interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>;
75 + interrupt-parent = <&gic>;
76 + clocks = <&clk IMX8QM_UART1_CLK>;
77 + clock-names = "ipg";
78 + assigned-clock-names = <&clk IMX8QM_UART1_CLK>;
79 + assigned-clock-rates = <80000000>;
80 + power-domains = <&pd_dma_lpuart1>;
81 + dma-names = "tx","rx";
82 + dmas = <&edma0 15 0 0>,
84 + status = "disabled";
86 --- a/drivers/dma/Kconfig
87 +++ b/drivers/dma/Kconfig
88 @@ -227,6 +227,17 @@ config FSL_QDMA
89 or dequeuing DMA jobs from, different work queues.
90 This module can be found on NXP Layerscape SoCs.
91 The qdma driver only work on SoCs with a DPAA hardware block.
93 + tristate "Freescale eDMA v3 engine support"
96 + select DMA_VIRTUAL_CHANNELS
98 + Support the Freescale eDMA v3 engine with programmable channel.
99 + This driver is based on FSL_EDMA but big changes come such as
100 + different interrupt for different channel, different register
101 + scope for different channel.
102 + This module can be found on Freescale i.MX8QM.
105 tristate "Freescale RAID engine Support"
106 --- a/drivers/dma/Makefile
107 +++ b/drivers/dma/Makefile
108 @@ -32,6 +32,7 @@ obj-$(CONFIG_DW_EDMA) += dw-edma/
109 obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
110 obj-$(CONFIG_FSL_DMA) += fsldma.o
111 obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o
112 +obj-$(CONFIG_FSL_EDMA_V3) += fsl-edma-v3.o
113 obj-$(CONFIG_MCF_EDMA) += mcf-edma.o fsl-edma-common.o
114 obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
115 obj-$(CONFIG_FSL_RAID) += fsl_raid.o
117 +++ b/drivers/dma/fsl-edma-v3.c
120 + * drivers/dma/fsl-edma3-v3.c
122 + * Copyright 2017 NXP .
124 + * Driver for the Freescale eDMA engine v3. This driver based on fsl-edma3.c
125 + * but changed to meet the IP change on i.MX8QM: every dma channel is specific
126 + * to hardware. For example, channel 14 for LPUART1 receive request and channel
127 + * 13 for transmit requesst. The eDMA block can be found on i.MX8QM
129 + * This program is free software; you can redistribute it and/or modify it
130 + * under the terms of the GNU General Public License as published by the
131 + * Free Software Foundation; either version 2 of the License, or (at your
132 + * option) any later version.
135 +#include <linux/init.h>
136 +#include <linux/module.h>
137 +#include <linux/interrupt.h>
138 +#include <linux/clk.h>
139 +#include <linux/dma-mapping.h>
140 +#include <linux/dmapool.h>
141 +#include <linux/slab.h>
142 +#include <linux/spinlock.h>
143 +#include <linux/of.h>
144 +#include <linux/of_device.h>
145 +#include <linux/of_address.h>
146 +#include <linux/of_irq.h>
147 +#include <linux/of_dma.h>
149 +#include "virt-dma.h"
151 +#define EDMA_CH_CSR 0x00
152 +#define EDMA_CH_ES 0x04
153 +#define EDMA_CH_INT 0x08
154 +#define EDMA_CH_SBR 0x0C
155 +#define EDMA_CH_PRI 0x10
156 +#define EDMA_TCD_SADDR 0x20
157 +#define EDMA_TCD_SOFF 0x24
158 +#define EDMA_TCD_ATTR 0x26
159 +#define EDMA_TCD_NBYTES 0x28
160 +#define EDMA_TCD_SLAST 0x2C
161 +#define EDMA_TCD_DADDR 0x30
162 +#define EDMA_TCD_DOFF 0x34
163 +#define EDMA_TCD_CITER_ELINK 0x36
164 +#define EDMA_TCD_CITER 0x36
165 +#define EDMA_TCD_DLAST_SGA 0x38
166 +#define EDMA_TCD_CSR 0x3C
167 +#define EDMA_TCD_BITER_ELINK 0x3E
168 +#define EDMA_TCD_BITER 0x3E
170 +#define EDMA_CH_SBR_RD BIT(22)
171 +#define EDMA_CH_SBR_WR BIT(21)
172 +#define EDMA_CH_CSR_ERQ BIT(0)
173 +#define EDMA_CH_CSR_EARQ BIT(1)
174 +#define EDMA_CH_CSR_EEI BIT(2)
175 +#define EDMA_CH_CSR_DONE BIT(30)
176 +#define EDMA_CH_CSR_ACTIVE BIT(31)
178 +#define EDMA_TCD_ATTR_DSIZE(x) (((x) & 0x0007))
179 +#define EDMA_TCD_ATTR_DMOD(x) (((x) & 0x001F) << 3)
180 +#define EDMA_TCD_ATTR_SSIZE(x) (((x) & 0x0007) << 8)
181 +#define EDMA_TCD_ATTR_SMOD(x) (((x) & 0x001F) << 11)
182 +#define EDMA_TCD_ATTR_SSIZE_8BIT (0x0000)
183 +#define EDMA_TCD_ATTR_SSIZE_16BIT (0x0100)
184 +#define EDMA_TCD_ATTR_SSIZE_32BIT (0x0200)
185 +#define EDMA_TCD_ATTR_SSIZE_64BIT (0x0300)
186 +#define EDMA_TCD_ATTR_SSIZE_16BYTE (0x0400)
187 +#define EDMA_TCD_ATTR_SSIZE_32BYTE (0x0500)
188 +#define EDMA_TCD_ATTR_SSIZE_64BYTE (0x0600)
189 +#define EDMA_TCD_ATTR_DSIZE_8BIT (0x0000)
190 +#define EDMA_TCD_ATTR_DSIZE_16BIT (0x0001)
191 +#define EDMA_TCD_ATTR_DSIZE_32BIT (0x0002)
192 +#define EDMA_TCD_ATTR_DSIZE_64BIT (0x0003)
193 +#define EDMA_TCD_ATTR_DSIZE_16BYTE (0x0004)
194 +#define EDMA_TCD_ATTR_DSIZE_32BYTE (0x0005)
195 +#define EDMA_TCD_ATTR_DSIZE_64BYTE (0x0006)
197 +#define EDMA_TCD_SOFF_SOFF(x) (x)
198 +#define EDMA_TCD_NBYTES_NBYTES(x) (x)
199 +#define EDMA_TCD_SLAST_SLAST(x) (x)
200 +#define EDMA_TCD_DADDR_DADDR(x) (x)
201 +#define EDMA_TCD_CITER_CITER(x) ((x) & 0x7FFF)
202 +#define EDMA_TCD_DOFF_DOFF(x) (x)
203 +#define EDMA_TCD_DLAST_SGA_DLAST_SGA(x) (x)
204 +#define EDMA_TCD_BITER_BITER(x) ((x) & 0x7FFF)
206 +#define EDMA_TCD_CSR_START BIT(0)
207 +#define EDMA_TCD_CSR_INT_MAJOR BIT(1)
208 +#define EDMA_TCD_CSR_INT_HALF BIT(2)
209 +#define EDMA_TCD_CSR_D_REQ BIT(3)
210 +#define EDMA_TCD_CSR_E_SG BIT(4)
211 +#define EDMA_TCD_CSR_E_LINK BIT(5)
212 +#define EDMA_TCD_CSR_ACTIVE BIT(6)
213 +#define EDMA_TCD_CSR_DONE BIT(7)
215 +#define FSL_EDMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
216 + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
217 + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
218 + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) | \
219 + BIT(DMA_SLAVE_BUSWIDTH_16_BYTES))
221 +struct fsl_edma3_hw_tcd {
235 +struct fsl_edma3_sw_tcd {
237 + struct fsl_edma3_hw_tcd *vtcd;
240 +struct fsl_edma3_slave_config {
241 + enum dma_transfer_direction dir;
242 + enum dma_slave_buswidth addr_width;
244 + u32 dev2_addr; /* source addr for dev2dev */
249 +struct fsl_edma3_chan {
250 + struct virt_dma_chan vchan;
251 + enum dma_status status;
252 + struct fsl_edma3_engine *edma3;
253 + struct fsl_edma3_desc *edesc;
254 + struct fsl_edma3_slave_config fsc;
255 + void __iomem *membase;
260 + struct dma_pool *tcd_pool;
261 + u32 chn_real_count;
262 + char txirq_name[32];
265 +struct fsl_edma3_desc {
266 + struct virt_dma_desc vdesc;
267 + struct fsl_edma3_chan *echan;
269 + unsigned int n_tcds;
270 + struct fsl_edma3_sw_tcd tcd[];
273 +struct fsl_edma3_engine {
274 + struct dma_device dma_dev;
275 + struct mutex fsl_edma3_mutex;
278 + bool swap; /* remote/local swapped on Audio edma */
279 + struct fsl_edma3_chan chans[];
282 +static struct fsl_edma3_chan *to_fsl_edma3_chan(struct dma_chan *chan)
284 + return container_of(chan, struct fsl_edma3_chan, vchan.chan);
287 +static struct fsl_edma3_desc *to_fsl_edma3_desc(struct virt_dma_desc *vd)
289 + return container_of(vd, struct fsl_edma3_desc, vdesc);
292 +static void fsl_edma3_enable_request(struct fsl_edma3_chan *fsl_chan)
294 + void __iomem *addr = fsl_chan->membase;
297 + val = readl(addr + EDMA_CH_SBR);
298 + /* Remote/local swapped wrongly on iMX8 QM Audio edma */
299 + if (fsl_chan->edma3->swap) {
300 + if (!fsl_chan->is_rxchan)
301 + val |= EDMA_CH_SBR_RD;
303 + val |= EDMA_CH_SBR_WR;
305 + if (fsl_chan->is_rxchan)
306 + val |= EDMA_CH_SBR_RD;
308 + val |= EDMA_CH_SBR_WR;
310 + writel(val, addr + EDMA_CH_SBR);
312 + val = readl(addr + EDMA_CH_CSR);
314 + val |= EDMA_CH_CSR_ERQ;
315 + writel(val, addr + EDMA_CH_CSR);
318 +static void fsl_edma3_disable_request(struct fsl_edma3_chan *fsl_chan)
320 + void __iomem *addr = fsl_chan->membase;
321 + u32 val = readl(addr + EDMA_CH_CSR);
323 + val &= ~EDMA_CH_CSR_ERQ;
324 + writel(val, addr + EDMA_CH_CSR);
327 +static unsigned int fsl_edma3_get_tcd_attr(enum dma_slave_buswidth addr_width)
329 + switch (addr_width) {
331 + return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
333 + return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
335 + return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
337 + return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
339 + return EDMA_TCD_ATTR_SSIZE_16BYTE | EDMA_TCD_ATTR_DSIZE_16BYTE;
341 + return EDMA_TCD_ATTR_SSIZE_32BYTE | EDMA_TCD_ATTR_DSIZE_32BYTE;
343 + return EDMA_TCD_ATTR_SSIZE_64BYTE | EDMA_TCD_ATTR_DSIZE_64BYTE;
345 + return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
349 +static void fsl_edma3_free_desc(struct virt_dma_desc *vdesc)
351 + struct fsl_edma3_desc *fsl_desc;
354 + fsl_desc = to_fsl_edma3_desc(vdesc);
355 + for (i = 0; i < fsl_desc->n_tcds; i++)
356 + dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd,
357 + fsl_desc->tcd[i].ptcd);
361 +static int fsl_edma3_terminate_all(struct dma_chan *chan)
363 + struct fsl_edma3_chan *fsl_chan = to_fsl_edma3_chan(chan);
364 + unsigned long flags;
367 + spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
368 + fsl_edma3_disable_request(fsl_chan);
369 + fsl_chan->edesc = NULL;
370 + vchan_get_all_descriptors(&fsl_chan->vchan, &head);
371 + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
372 + vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
376 +static int fsl_edma3_pause(struct dma_chan *chan)
378 + struct fsl_edma3_chan *fsl_chan = to_fsl_edma3_chan(chan);
379 + unsigned long flags;
381 + spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
382 + if (fsl_chan->edesc) {
383 + fsl_edma3_disable_request(fsl_chan);
384 + fsl_chan->status = DMA_PAUSED;
386 + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
390 +static int fsl_edma3_resume(struct dma_chan *chan)
392 + struct fsl_edma3_chan *fsl_chan = to_fsl_edma3_chan(chan);
393 + unsigned long flags;
395 + spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
396 + if (fsl_chan->edesc) {
397 + fsl_edma3_enable_request(fsl_chan);
398 + fsl_chan->status = DMA_IN_PROGRESS;
400 + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
404 +static int fsl_edma3_slave_config(struct dma_chan *chan,
405 + struct dma_slave_config *cfg)
407 + struct fsl_edma3_chan *fsl_chan = to_fsl_edma3_chan(chan);
409 + fsl_chan->fsc.dir = cfg->direction;
410 + if (cfg->direction == DMA_DEV_TO_MEM) {
411 + fsl_chan->fsc.dev_addr = cfg->src_addr;
412 + fsl_chan->fsc.addr_width = cfg->src_addr_width;
413 + fsl_chan->fsc.burst = cfg->src_maxburst;
414 + fsl_chan->fsc.attr = fsl_edma3_get_tcd_attr
415 + (cfg->src_addr_width);
416 + } else if (cfg->direction == DMA_MEM_TO_DEV) {
417 + fsl_chan->fsc.dev_addr = cfg->dst_addr;
418 + fsl_chan->fsc.addr_width = cfg->dst_addr_width;
419 + fsl_chan->fsc.burst = cfg->dst_maxburst;
420 + fsl_chan->fsc.attr = fsl_edma3_get_tcd_attr
421 + (cfg->dst_addr_width);
422 + } else if (cfg->direction == DMA_DEV_TO_DEV) {
423 + fsl_chan->fsc.dev2_addr = cfg->src_addr;
424 + fsl_chan->fsc.dev_addr = cfg->dst_addr;
425 + fsl_chan->fsc.addr_width = cfg->dst_addr_width;
426 + fsl_chan->fsc.burst = cfg->dst_maxburst;
427 + fsl_chan->fsc.attr = fsl_edma3_get_tcd_attr
428 + (cfg->dst_addr_width);
435 +static size_t fsl_edma3_desc_residue(struct fsl_edma3_chan *fsl_chan,
436 + struct virt_dma_desc *vdesc, bool in_progress)
438 + struct fsl_edma3_desc *edesc = fsl_chan->edesc;
439 + void __iomem *addr = fsl_chan->membase;
440 + enum dma_transfer_direction dir = fsl_chan->fsc.dir;
441 + dma_addr_t cur_addr, dma_addr;
445 + /* calculate the total size in this desc */
446 + for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
447 + len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
448 + * le16_to_cpu(edesc->tcd[i].vtcd->biter);
453 + if (dir == DMA_MEM_TO_DEV)
454 + cur_addr = readl(addr + EDMA_TCD_SADDR);
456 + cur_addr = readl(addr + EDMA_TCD_DADDR);
458 + /* figure out the finished and calculate the residue */
459 + for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
460 + size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
461 + * le16_to_cpu(edesc->tcd[i].vtcd->biter);
462 + if (dir == DMA_MEM_TO_DEV)
463 + dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
465 + dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr);
468 + if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
469 + len += dma_addr + size - cur_addr;
477 +static enum dma_status fsl_edma3_tx_status(struct dma_chan *chan,
478 + dma_cookie_t cookie, struct dma_tx_state *txstate)
480 + struct fsl_edma3_chan *fsl_chan = to_fsl_edma3_chan(chan);
481 + struct virt_dma_desc *vdesc;
482 + enum dma_status status;
483 + unsigned long flags;
485 + status = dma_cookie_status(chan, cookie, txstate);
486 + if (status == DMA_COMPLETE) {
487 + spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
488 + txstate->residue = fsl_chan->chn_real_count;
489 + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
494 + return fsl_chan->status;
496 + spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
497 + vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
498 + if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
499 + txstate->residue = fsl_edma3_desc_residue(fsl_chan, vdesc,
502 + txstate->residue = fsl_edma3_desc_residue(fsl_chan, vdesc,
505 + txstate->residue = 0;
507 + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
509 + return fsl_chan->status;
512 +static void fsl_edma3_set_tcd_regs(struct fsl_edma3_chan *fsl_chan,
513 + struct fsl_edma3_hw_tcd *tcd)
515 + void __iomem *addr = fsl_chan->membase;
517 + * TCD parameters are stored in struct fsl_edma3_hw_tcd in little
518 + * endian format. However, we need to load the TCD registers in
519 + * big- or little-endian obeying the eDMA engine model endian.
521 + writew(0, addr + EDMA_TCD_CSR);
522 + writel(le32_to_cpu(tcd->saddr), addr + EDMA_TCD_SADDR);
523 + writel(le32_to_cpu(tcd->daddr), addr + EDMA_TCD_DADDR);
525 + writew(le16_to_cpu(tcd->attr), addr + EDMA_TCD_ATTR);
526 + writew(le16_to_cpu(tcd->soff), addr + EDMA_TCD_SOFF);
528 + writel(le32_to_cpu(tcd->nbytes), addr + EDMA_TCD_NBYTES);
529 + writel(le32_to_cpu(tcd->slast), addr + EDMA_TCD_SLAST);
531 + writew(le16_to_cpu(tcd->citer), addr + EDMA_TCD_CITER);
532 + writew(le16_to_cpu(tcd->biter), addr + EDMA_TCD_BITER);
533 + writew(le16_to_cpu(tcd->doff), addr + EDMA_TCD_DOFF);
535 + writel(le32_to_cpu(tcd->dlast_sga), addr + EDMA_TCD_DLAST_SGA);
537 + writew(le16_to_cpu(tcd->csr), addr + EDMA_TCD_CSR);
541 +void fsl_edma3_fill_tcd(struct fsl_edma3_chan *fsl_chan,
542 + struct fsl_edma3_hw_tcd *tcd, u32 src, u32 dst,
543 + u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
544 + u16 biter, u16 doff, u32 dlast_sga, bool major_int,
545 + bool disable_req, bool enable_sg)
550 + * eDMA hardware SGs require the TCDs to be stored in little
551 + * endian format irrespective of the register endian model.
552 + * So we put the value in little endian in memory, waiting
553 + * for fsl_edma3_set_tcd_regs doing the swap.
555 + tcd->saddr = cpu_to_le32(src);
556 + tcd->daddr = cpu_to_le32(dst);
558 + tcd->attr = cpu_to_le16(attr);
560 + tcd->soff = cpu_to_le16(EDMA_TCD_SOFF_SOFF(soff));
562 + tcd->nbytes = cpu_to_le32(EDMA_TCD_NBYTES_NBYTES(nbytes));
563 + tcd->slast = cpu_to_le32(EDMA_TCD_SLAST_SLAST(slast));
565 + tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer));
566 + tcd->doff = cpu_to_le16(EDMA_TCD_DOFF_DOFF(doff));
568 + tcd->dlast_sga = cpu_to_le32(EDMA_TCD_DLAST_SGA_DLAST_SGA(dlast_sga));
570 + tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter));
572 + csr |= EDMA_TCD_CSR_INT_MAJOR;
575 + csr |= EDMA_TCD_CSR_D_REQ;
578 + csr |= EDMA_TCD_CSR_E_SG;
580 + if (fsl_chan->is_rxchan)
581 + csr |= EDMA_TCD_CSR_ACTIVE;
583 + tcd->csr = cpu_to_le16(csr);
586 +static struct fsl_edma3_desc *fsl_edma3_alloc_desc(struct fsl_edma3_chan
587 + *fsl_chan, int sg_len)
589 + struct fsl_edma3_desc *fsl_desc;
592 + fsl_desc = kzalloc(sizeof(*fsl_desc) + sizeof(struct fsl_edma3_sw_tcd)
593 + * sg_len, GFP_ATOMIC);
597 + fsl_desc->echan = fsl_chan;
598 + fsl_desc->n_tcds = sg_len;
599 + for (i = 0; i < sg_len; i++) {
600 + fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
601 + GFP_ATOMIC, &fsl_desc->tcd[i].ptcd);
602 + if (!fsl_desc->tcd[i].vtcd)
609 + dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
610 + fsl_desc->tcd[i].ptcd);
615 +static struct dma_async_tx_descriptor *fsl_edma3_prep_dma_cyclic(
616 + struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
617 + size_t period_len, enum dma_transfer_direction direction,
618 + unsigned long flags)
620 + struct fsl_edma3_chan *fsl_chan = to_fsl_edma3_chan(chan);
621 + struct fsl_edma3_desc *fsl_desc;
622 + dma_addr_t dma_buf_next;
624 + u32 src_addr, dst_addr, last_sg, nbytes;
625 + u16 soff, doff, iter;
627 + sg_len = buf_len / period_len;
628 + fsl_desc = fsl_edma3_alloc_desc(fsl_chan, sg_len);
631 + fsl_desc->iscyclic = true;
633 + dma_buf_next = dma_addr;
634 + nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
635 + iter = period_len / nbytes;
637 + for (i = 0; i < sg_len; i++) {
638 + if (dma_buf_next >= dma_addr + buf_len)
639 + dma_buf_next = dma_addr;
641 + /* get next sg's physical address */
642 + last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
644 + if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
645 + src_addr = dma_buf_next;
646 + dst_addr = fsl_chan->fsc.dev_addr;
647 + soff = fsl_chan->fsc.addr_width;
649 + } else if (fsl_chan->fsc.dir == DMA_DEV_TO_MEM) {
650 + src_addr = fsl_chan->fsc.dev_addr;
651 + dst_addr = dma_buf_next;
653 + doff = fsl_chan->fsc.addr_width;
655 + /* DMA_DEV_TO_DEV */
656 + src_addr = fsl_chan->fsc.dev2_addr;
657 + dst_addr = fsl_chan->fsc.dev_addr;
662 + fsl_edma3_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr,
663 + dst_addr, fsl_chan->fsc.attr, soff, nbytes, 0,
664 + iter, iter, doff, last_sg, true, false, true);
665 + dma_buf_next += period_len;
668 + return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
671 +static struct dma_async_tx_descriptor *fsl_edma3_prep_slave_sg(
672 + struct dma_chan *chan, struct scatterlist *sgl,
673 + unsigned int sg_len, enum dma_transfer_direction direction,
674 + unsigned long flags, void *context)
676 + struct fsl_edma3_chan *fsl_chan = to_fsl_edma3_chan(chan);
677 + struct fsl_edma3_desc *fsl_desc;
678 + struct scatterlist *sg;
679 + u32 src_addr, dst_addr, last_sg, nbytes;
680 + u16 soff, doff, iter;
683 + if (!is_slave_direction(fsl_chan->fsc.dir))
686 + fsl_desc = fsl_edma3_alloc_desc(fsl_chan, sg_len);
689 + fsl_desc->iscyclic = false;
691 + nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
692 + for_each_sg(sgl, sg, sg_len, i) {
693 + /* get next sg's physical address */
694 + last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
696 + if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
697 + src_addr = sg_dma_address(sg);
698 + dst_addr = fsl_chan->fsc.dev_addr;
699 + soff = fsl_chan->fsc.addr_width;
701 + } else if (fsl_chan->fsc.dir == DMA_DEV_TO_MEM) {
702 + src_addr = fsl_chan->fsc.dev_addr;
703 + dst_addr = sg_dma_address(sg);
705 + doff = fsl_chan->fsc.addr_width;
707 + /* DMA_DEV_TO_DEV */
708 + src_addr = fsl_chan->fsc.dev2_addr;
709 + dst_addr = fsl_chan->fsc.dev_addr;
714 + iter = sg_dma_len(sg) / nbytes;
715 + if (i < sg_len - 1) {
716 + last_sg = fsl_desc->tcd[(i + 1)].ptcd;
717 + fsl_edma3_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd,
718 + src_addr, dst_addr, fsl_chan->fsc.attr,
719 + soff, nbytes, 0, iter, iter, doff,
720 + last_sg, false, false, true);
723 + fsl_edma3_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd,
724 + src_addr, dst_addr, fsl_chan->fsc.attr,
725 + soff, nbytes, 0, iter, iter, doff,
726 + last_sg, true, true, false);
730 + return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
733 +static void fsl_edma3_xfer_desc(struct fsl_edma3_chan *fsl_chan)
735 + struct virt_dma_desc *vdesc;
737 + vdesc = vchan_next_desc(&fsl_chan->vchan);
740 + fsl_chan->edesc = to_fsl_edma3_desc(vdesc);
741 + fsl_edma3_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
742 + fsl_edma3_enable_request(fsl_chan);
743 + fsl_chan->status = DMA_IN_PROGRESS;
746 +static size_t fsl_edma3_desc_residue(struct fsl_edma3_chan *fsl_chan,
747 + struct virt_dma_desc *vdesc, bool in_progress);
749 +static void fsl_edma3_get_realcnt(struct fsl_edma3_chan *fsl_chan)
751 + fsl_chan->chn_real_count = fsl_edma3_desc_residue(fsl_chan, NULL, true);
754 +static irqreturn_t fsl_edma3_tx_handler(int irq, void *dev_id)
756 + struct fsl_edma3_chan *fsl_chan = dev_id;
758 + void __iomem *base_addr;
760 + base_addr = fsl_chan->membase;
762 + intr = readl(base_addr + EDMA_CH_INT);
766 + writel(1, base_addr + EDMA_CH_INT);
768 + spin_lock(&fsl_chan->vchan.lock);
769 + if (!fsl_chan->edesc->iscyclic) {
770 + fsl_edma3_get_realcnt(fsl_chan);
771 + list_del(&fsl_chan->edesc->vdesc.node);
772 + vchan_cookie_complete(&fsl_chan->edesc->vdesc);
773 + fsl_chan->edesc = NULL;
774 + fsl_chan->status = DMA_COMPLETE;
776 + vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
779 + if (!fsl_chan->edesc)
780 + fsl_edma3_xfer_desc(fsl_chan);
782 + spin_unlock(&fsl_chan->vchan.lock);
784 + return IRQ_HANDLED;
787 +static void fsl_edma3_issue_pending(struct dma_chan *chan)
789 + struct fsl_edma3_chan *fsl_chan = to_fsl_edma3_chan(chan);
790 + unsigned long flags;
792 + spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
794 + if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
795 + fsl_edma3_xfer_desc(fsl_chan);
797 + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
800 +static struct dma_chan *fsl_edma3_xlate(struct of_phandle_args *dma_spec,
801 + struct of_dma *ofdma)
803 + struct fsl_edma3_engine *fsl_edma3 = ofdma->of_dma_data;
804 + struct dma_chan *chan, *_chan;
805 + struct fsl_edma3_chan *fsl_chan;
807 + if (dma_spec->args_count != 3)
810 + mutex_lock(&fsl_edma3->fsl_edma3_mutex);
811 + list_for_each_entry_safe(chan, _chan, &fsl_edma3->dma_dev.channels,
813 + if (chan->client_count)
816 + fsl_chan = to_fsl_edma3_chan(chan);
817 + if (fsl_chan->hw_chanid == dma_spec->args[0]) {
818 + chan = dma_get_slave_channel(chan);
819 + chan->device->privatecnt++;
820 + fsl_chan->priority = dma_spec->args[1];
821 + fsl_chan->is_rxchan = dma_spec->args[2];
822 + mutex_unlock(&fsl_edma3->fsl_edma3_mutex);
826 + mutex_unlock(&fsl_edma3->fsl_edma3_mutex);
830 +static int fsl_edma3_alloc_chan_resources(struct dma_chan *chan)
832 + struct fsl_edma3_chan *fsl_chan = to_fsl_edma3_chan(chan);
834 + fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
835 + sizeof(struct fsl_edma3_hw_tcd),
840 +static void fsl_edma3_free_chan_resources(struct dma_chan *chan)
842 + struct fsl_edma3_chan *fsl_chan = to_fsl_edma3_chan(chan);
843 + unsigned long flags;
846 + spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
847 + fsl_edma3_disable_request(fsl_chan);
848 + fsl_chan->edesc = NULL;
849 + vchan_get_all_descriptors(&fsl_chan->vchan, &head);
850 + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
852 + vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
853 + dma_pool_destroy(fsl_chan->tcd_pool);
854 + fsl_chan->tcd_pool = NULL;
857 +static int fsl_edma3_probe(struct platform_device *pdev)
859 + struct device_node *np = pdev->dev.of_node;
860 + struct fsl_edma3_engine *fsl_edma3;
861 + struct fsl_edma3_chan *fsl_chan;
862 + struct resource *res;
865 + unsigned long irqflag = 0;
867 + ret = of_property_read_u32(np, "dma-channels", &chans);
869 + dev_err(&pdev->dev, "Can't get dma-channels.\n");
873 + len = sizeof(*fsl_edma3) + sizeof(*fsl_chan) * chans;
874 + fsl_edma3 = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
878 + /* Audio edma rx/tx channel shared interrupt */
879 + if (of_property_read_bool(np, "shared-interrupt"))
880 + irqflag = IRQF_SHARED;
882 + fsl_edma3->swap = of_device_is_compatible(np, "fsl,imx8qm-adma");
883 + fsl_edma3->n_chans = chans;
885 + INIT_LIST_HEAD(&fsl_edma3->dma_dev.channels);
886 + for (i = 0; i < fsl_edma3->n_chans; i++) {
887 + struct fsl_edma3_chan *fsl_chan = &fsl_edma3->chans[i];
888 + char *txirq_name = fsl_chan->txirq_name;
890 + fsl_chan->edma3 = fsl_edma3;
891 + /* Get per channel membase */
892 + res = platform_get_resource(pdev, IORESOURCE_MEM, i);
893 + fsl_chan->membase = devm_ioremap_resource(&pdev->dev, res);
894 + if (IS_ERR(fsl_chan->membase))
895 + return PTR_ERR(fsl_chan->membase);
897 + /* Get the hardware chanel id by the channel membase
898 + * channel0:0x10000, channel1:0x20000... total 32 channels
900 + fsl_chan->hw_chanid = (res->start >> 16) & 0x1f;
901 + sprintf(txirq_name, "edma-chan%d-tx", fsl_chan->hw_chanid);
903 + /* request channel irq */
904 + fsl_chan->txirq = platform_get_irq_byname(pdev, txirq_name);
905 + if (fsl_chan->txirq < 0) {
906 + dev_err(&pdev->dev, "Can't get %s irq.\n", txirq_name);
907 + return fsl_chan->txirq;
910 + ret = devm_request_irq(&pdev->dev, fsl_chan->txirq,
911 + fsl_edma3_tx_handler, irqflag, txirq_name,
914 + dev_err(&pdev->dev, "Can't register %s IRQ.\n",
919 + fsl_chan->vchan.desc_free = fsl_edma3_free_desc;
920 + vchan_init(&fsl_chan->vchan, &fsl_edma3->dma_dev);
923 + mutex_init(&fsl_edma3->fsl_edma3_mutex);
925 + dma_cap_set(DMA_PRIVATE, fsl_edma3->dma_dev.cap_mask);
926 + dma_cap_set(DMA_SLAVE, fsl_edma3->dma_dev.cap_mask);
927 + dma_cap_set(DMA_CYCLIC, fsl_edma3->dma_dev.cap_mask);
929 + fsl_edma3->dma_dev.dev = &pdev->dev;
930 + fsl_edma3->dma_dev.device_alloc_chan_resources
931 + = fsl_edma3_alloc_chan_resources;
932 + fsl_edma3->dma_dev.device_free_chan_resources
933 + = fsl_edma3_free_chan_resources;
934 + fsl_edma3->dma_dev.device_tx_status = fsl_edma3_tx_status;
935 + fsl_edma3->dma_dev.device_prep_slave_sg = fsl_edma3_prep_slave_sg;
936 + fsl_edma3->dma_dev.device_prep_dma_cyclic = fsl_edma3_prep_dma_cyclic;
937 + fsl_edma3->dma_dev.device_config = fsl_edma3_slave_config;
938 + fsl_edma3->dma_dev.device_pause = fsl_edma3_pause;
939 + fsl_edma3->dma_dev.device_resume = fsl_edma3_resume;
940 + fsl_edma3->dma_dev.device_terminate_all = fsl_edma3_terminate_all;
941 + fsl_edma3->dma_dev.device_issue_pending = fsl_edma3_issue_pending;
943 + fsl_edma3->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
944 + fsl_edma3->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
945 + fsl_edma3->dma_dev.directions = BIT(DMA_DEV_TO_MEM) |
946 + BIT(DMA_MEM_TO_DEV) |
947 + BIT(DMA_DEV_TO_DEV);
949 + platform_set_drvdata(pdev, fsl_edma3);
951 + ret = dma_async_device_register(&fsl_edma3->dma_dev);
953 + dev_err(&pdev->dev, "Can't register Freescale eDMA engine.\n");
957 + ret = of_dma_controller_register(np, fsl_edma3_xlate, fsl_edma3);
959 + dev_err(&pdev->dev, "Can't register Freescale eDMA of_dma.\n");
960 + dma_async_device_unregister(&fsl_edma3->dma_dev);
967 +static int fsl_edma3_remove(struct platform_device *pdev)
969 + struct device_node *np = pdev->dev.of_node;
970 + struct fsl_edma3_engine *fsl_edma3 = platform_get_drvdata(pdev);
972 + of_dma_controller_free(np);
973 + dma_async_device_unregister(&fsl_edma3->dma_dev);
978 +static const struct of_device_id fsl_edma3_dt_ids[] = {
979 + { .compatible = "fsl,imx8qm-edma", },
980 + { .compatible = "fsl,imx8qm-adma", },
983 +MODULE_DEVICE_TABLE(of, fsl_edma3_dt_ids);
985 +static struct platform_driver fsl_edma3_driver = {
987 + .name = "fsl-edma-v3",
988 + .of_match_table = fsl_edma3_dt_ids,
990 + .probe = fsl_edma3_probe,
991 + .remove = fsl_edma3_remove,
994 +static int __init fsl_edma3_init(void)
996 + return platform_driver_register(&fsl_edma3_driver);
998 +subsys_initcall(fsl_edma3_init);
1000 +static void __exit fsl_edma3_exit(void)
1002 + platform_driver_unregister(&fsl_edma3_driver);
1004 +module_exit(fsl_edma3_exit);
1006 +MODULE_ALIAS("platform:fsl-edma3");
1007 +MODULE_DESCRIPTION("Freescale eDMA-V3 engine driver");
1008 +MODULE_LICENSE("GPL v2");