1744d37419541f580e1d5b0904a4b79042721ac3
[openwrt/staging/hauke.git] /
1 From 88476b7fb025afdf9cc2247e8d04ab8e027ce9cb Mon Sep 17 00:00:00 2001
2 From: Robin Gong <yibin.gong@nxp.com>
3 Date: Fri, 31 Mar 2017 15:53:39 +0800
4 Subject: [PATCH] MLK-14610 DMA: fsl-edma-v3: add fsl-edma-v3 support
5
6 Add edma-v3 driver on i.mx8qm.
7
8 Signed-off-by: Robin Gong <yibin.gong@nxp.com>
9 (cherry picked from commit d0ac0971c2e637ebddc853f12f71d130f5df4f91)
10 ---
11 .../devicetree/bindings/dma/fsl-edma-v3.txt | 64 ++
12 drivers/dma/Kconfig | 11 +
13 drivers/dma/Makefile | 1 +
14 drivers/dma/fsl-edma-v3.c | 890 +++++++++++++++++++++
15 4 files changed, 966 insertions(+)
16 create mode 100644 Documentation/devicetree/bindings/dma/fsl-edma-v3.txt
17 create mode 100644 drivers/dma/fsl-edma-v3.c
18
19 --- /dev/null
20 +++ b/Documentation/devicetree/bindings/dma/fsl-edma-v3.txt
21 @@ -0,0 +1,64 @@
22 +* Freescale enhanced Direct Memory Access(eDMA-v3) Controller
23 +
24 + The eDMA-v3 controller is inherited from FSL eDMA, and firstly is intergrated
25 + on Freescale i.MX8QM SOC chip. The eDMA channels have multiplex capability by
26 + programmble memory-mapped registers. Specific DMA request source has fixed channel.
27 +
28 +* eDMA Controller
29 +Required properties:
30 +- compatible :
31 + - "fsl,imx8qm-edma" for eDMA used similar to that on i.MX8QM SoC
32 + - "fsl,imx8qm-adma" for audio eDMA used on i.MX8QM
33 +- reg : Specifies base physical address(s) and size of the eDMA channel registers.
34 + Each eDMA channel has separated register's address and size.
35 +- interrupts : A list of interrupt-specifiers, each channel has one interrupt.
36 +- interrupt-names : Should contain:
37 + "edma-chan12-tx" - the channel12 transmission interrupt
38 +- #dma-cells : Must be <3>.
39 + The 1st cell specifies the channel ID.
40 + The 2nd cell specifies the channel priority.
41 + The 3rd cell specifies the channel type like for transmit or receive:
42 + 0: transmit, 1: receive.
43 + See the SoC's reference manual for all the supported request sources.
44 +- dma-channels : Number of channels supported by the controller
45 +
46 +Examples:
47 +edma0: dma-controller@40018000 {
48 + compatible = "fsl,imx8qm-edma";
49 + reg = <0x0 0x5a2c0000 0x0 0x10000>, /* channel12 UART0 rx */
50 + <0x0 0x5a2d0000 0x0 0x10000>, /* channel13 UART0 tx */
51 + <0x0 0x5a2e0000 0x0 0x10000>, /* channel14 UART1 rx */
52 + <0x0 0x5a2f0000 0x0 0x10000>; /* channel15 UART1 tx */
53 + #dma-cells = <3>;
54 + dma-channels = <4>;
55 + interrupts = <GIC_SPI 434 IRQ_TYPE_LEVEL_HIGH>,
56 + <GIC_SPI 435 IRQ_TYPE_LEVEL_HIGH>,
57 + <GIC_SPI 436 IRQ_TYPE_LEVEL_HIGH>,
58 + <GIC_SPI 437 IRQ_TYPE_LEVEL_HIGH>;
59 + interrupt-names = "edma-chan12-tx", "edma-chan13-tx",
60 + "edma-chan14-tx", "edma-chan15-tx";
61 + status = "okay";
62 +};
63 +
64 +* DMA clients
65 +DMA client drivers that uses the DMA function must use the format described
66 +in the dma.txt file, using a three-cell specifier for each channel: the 1st
67 +specifies the channel number, the 2nd specifies the priority, and the 3rd
68 +specifies the channel type is for transmit or receive: 0: transmit, 1: receive.
69 +
70 +Examples:
71 +lpuart1: serial@5a070000 {
72 + compatible = "fsl,imx8qm-lpuart";
73 + reg = <0x0 0x5a070000 0x0 0x1000>;
74 + interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>;
75 + interrupt-parent = <&gic>;
76 + clocks = <&clk IMX8QM_UART1_CLK>;
77 + clock-names = "ipg";
78 + assigned-clock-names = <&clk IMX8QM_UART1_CLK>;
79 + assigned-clock-rates = <80000000>;
80 + power-domains = <&pd_dma_lpuart1>;
81 + dma-names = "tx","rx";
82 + dmas = <&edma0 15 0 0>,
83 + <&edma0 14 0 1>;
84 + status = "disabled";
85 +};
86 --- a/drivers/dma/Kconfig
87 +++ b/drivers/dma/Kconfig
88 @@ -227,6 +227,17 @@ config FSL_QDMA
89 or dequeuing DMA jobs from, different work queues.
90 This module can be found on NXP Layerscape SoCs.
91 The qdma driver only work on SoCs with a DPAA hardware block.
92 +config FSL_EDMA_V3
93 + tristate "Freescale eDMA v3 engine support"
94 + depends on OF
95 + select DMA_ENGINE
96 + select DMA_VIRTUAL_CHANNELS
97 + help
98 + Support the Freescale eDMA v3 engine with programmable channel.
99 + This driver is based on FSL_EDMA but big changes come such as
100 + different interrupt for different channel, different register
101 + scope for different channel.
102 + This module can be found on Freescale i.MX8QM.
103
104 config FSL_RAID
105 tristate "Freescale RAID engine Support"
106 --- a/drivers/dma/Makefile
107 +++ b/drivers/dma/Makefile
108 @@ -32,6 +32,7 @@ obj-$(CONFIG_DW_EDMA) += dw-edma/
109 obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
110 obj-$(CONFIG_FSL_DMA) += fsldma.o
111 obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o
112 +obj-$(CONFIG_FSL_EDMA_V3) += fsl-edma-v3.o
113 obj-$(CONFIG_MCF_EDMA) += mcf-edma.o fsl-edma-common.o
114 obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
115 obj-$(CONFIG_FSL_RAID) += fsl_raid.o
116 --- /dev/null
117 +++ b/drivers/dma/fsl-edma-v3.c
118 @@ -0,0 +1,890 @@
119 +/*
120 + * drivers/dma/fsl-edma3-v3.c
121 + *
122 + * Copyright 2017 NXP .
123 + *
124 + * Driver for the Freescale eDMA engine v3. This driver based on fsl-edma3.c
125 + * but changed to meet the IP change on i.MX8QM: every dma channel is specific
126 + * to hardware. For example, channel 14 for LPUART1 receive request and channel
127 + * 13 for transmit requesst. The eDMA block can be found on i.MX8QM
128 + *
129 + * This program is free software; you can redistribute it and/or modify it
130 + * under the terms of the GNU General Public License as published by the
131 + * Free Software Foundation; either version 2 of the License, or (at your
132 + * option) any later version.
133 + */
134 +
135 +#include <linux/init.h>
136 +#include <linux/module.h>
137 +#include <linux/interrupt.h>
138 +#include <linux/clk.h>
139 +#include <linux/dma-mapping.h>
140 +#include <linux/dmapool.h>
141 +#include <linux/slab.h>
142 +#include <linux/spinlock.h>
143 +#include <linux/of.h>
144 +#include <linux/of_device.h>
145 +#include <linux/of_address.h>
146 +#include <linux/of_irq.h>
147 +#include <linux/of_dma.h>
148 +
149 +#include "virt-dma.h"
150 +
151 +#define EDMA_CH_CSR 0x00
152 +#define EDMA_CH_ES 0x04
153 +#define EDMA_CH_INT 0x08
154 +#define EDMA_CH_SBR 0x0C
155 +#define EDMA_CH_PRI 0x10
156 +#define EDMA_TCD_SADDR 0x20
157 +#define EDMA_TCD_SOFF 0x24
158 +#define EDMA_TCD_ATTR 0x26
159 +#define EDMA_TCD_NBYTES 0x28
160 +#define EDMA_TCD_SLAST 0x2C
161 +#define EDMA_TCD_DADDR 0x30
162 +#define EDMA_TCD_DOFF 0x34
163 +#define EDMA_TCD_CITER_ELINK 0x36
164 +#define EDMA_TCD_CITER 0x36
165 +#define EDMA_TCD_DLAST_SGA 0x38
166 +#define EDMA_TCD_CSR 0x3C
167 +#define EDMA_TCD_BITER_ELINK 0x3E
168 +#define EDMA_TCD_BITER 0x3E
169 +
170 +#define EDMA_CH_SBR_RD BIT(22)
171 +#define EDMA_CH_SBR_WR BIT(21)
172 +#define EDMA_CH_CSR_ERQ BIT(0)
173 +#define EDMA_CH_CSR_EARQ BIT(1)
174 +#define EDMA_CH_CSR_EEI BIT(2)
175 +#define EDMA_CH_CSR_DONE BIT(30)
176 +#define EDMA_CH_CSR_ACTIVE BIT(31)
177 +
178 +#define EDMA_TCD_ATTR_DSIZE(x) (((x) & 0x0007))
179 +#define EDMA_TCD_ATTR_DMOD(x) (((x) & 0x001F) << 3)
180 +#define EDMA_TCD_ATTR_SSIZE(x) (((x) & 0x0007) << 8)
181 +#define EDMA_TCD_ATTR_SMOD(x) (((x) & 0x001F) << 11)
182 +#define EDMA_TCD_ATTR_SSIZE_8BIT (0x0000)
183 +#define EDMA_TCD_ATTR_SSIZE_16BIT (0x0100)
184 +#define EDMA_TCD_ATTR_SSIZE_32BIT (0x0200)
185 +#define EDMA_TCD_ATTR_SSIZE_64BIT (0x0300)
186 +#define EDMA_TCD_ATTR_SSIZE_16BYTE (0x0400)
187 +#define EDMA_TCD_ATTR_SSIZE_32BYTE (0x0500)
188 +#define EDMA_TCD_ATTR_SSIZE_64BYTE (0x0600)
189 +#define EDMA_TCD_ATTR_DSIZE_8BIT (0x0000)
190 +#define EDMA_TCD_ATTR_DSIZE_16BIT (0x0001)
191 +#define EDMA_TCD_ATTR_DSIZE_32BIT (0x0002)
192 +#define EDMA_TCD_ATTR_DSIZE_64BIT (0x0003)
193 +#define EDMA_TCD_ATTR_DSIZE_16BYTE (0x0004)
194 +#define EDMA_TCD_ATTR_DSIZE_32BYTE (0x0005)
195 +#define EDMA_TCD_ATTR_DSIZE_64BYTE (0x0006)
196 +
197 +#define EDMA_TCD_SOFF_SOFF(x) (x)
198 +#define EDMA_TCD_NBYTES_NBYTES(x) (x)
199 +#define EDMA_TCD_SLAST_SLAST(x) (x)
200 +#define EDMA_TCD_DADDR_DADDR(x) (x)
201 +#define EDMA_TCD_CITER_CITER(x) ((x) & 0x7FFF)
202 +#define EDMA_TCD_DOFF_DOFF(x) (x)
203 +#define EDMA_TCD_DLAST_SGA_DLAST_SGA(x) (x)
204 +#define EDMA_TCD_BITER_BITER(x) ((x) & 0x7FFF)
205 +
206 +#define EDMA_TCD_CSR_START BIT(0)
207 +#define EDMA_TCD_CSR_INT_MAJOR BIT(1)
208 +#define EDMA_TCD_CSR_INT_HALF BIT(2)
209 +#define EDMA_TCD_CSR_D_REQ BIT(3)
210 +#define EDMA_TCD_CSR_E_SG BIT(4)
211 +#define EDMA_TCD_CSR_E_LINK BIT(5)
212 +#define EDMA_TCD_CSR_ACTIVE BIT(6)
213 +#define EDMA_TCD_CSR_DONE BIT(7)
214 +
215 +#define FSL_EDMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
216 + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
217 + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
218 + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) | \
219 + BIT(DMA_SLAVE_BUSWIDTH_16_BYTES))
220 +
221 +struct fsl_edma3_hw_tcd {
222 + __le32 saddr;
223 + __le16 soff;
224 + __le16 attr;
225 + __le32 nbytes;
226 + __le32 slast;
227 + __le32 daddr;
228 + __le16 doff;
229 + __le16 citer;
230 + __le32 dlast_sga;
231 + __le16 csr;
232 + __le16 biter;
233 +};
234 +
235 +struct fsl_edma3_sw_tcd {
236 + dma_addr_t ptcd;
237 + struct fsl_edma3_hw_tcd *vtcd;
238 +};
239 +
240 +struct fsl_edma3_slave_config {
241 + enum dma_transfer_direction dir;
242 + enum dma_slave_buswidth addr_width;
243 + u32 dev_addr;
244 + u32 dev2_addr; /* source addr for dev2dev */
245 + u32 burst;
246 + u32 attr;
247 +};
248 +
249 +struct fsl_edma3_chan {
250 + struct virt_dma_chan vchan;
251 + enum dma_status status;
252 + struct fsl_edma3_engine *edma3;
253 + struct fsl_edma3_desc *edesc;
254 + struct fsl_edma3_slave_config fsc;
255 + void __iomem *membase;
256 + int txirq;
257 + int hw_chanid;
258 + int priority;
259 + int is_rxchan;
260 + struct dma_pool *tcd_pool;
261 + u32 chn_real_count;
262 + char txirq_name[32];
263 +};
264 +
265 +struct fsl_edma3_desc {
266 + struct virt_dma_desc vdesc;
267 + struct fsl_edma3_chan *echan;
268 + bool iscyclic;
269 + unsigned int n_tcds;
270 + struct fsl_edma3_sw_tcd tcd[];
271 +};
272 +
273 +struct fsl_edma3_engine {
274 + struct dma_device dma_dev;
275 + struct mutex fsl_edma3_mutex;
276 + u32 n_chans;
277 + int errirq;
278 + bool swap; /* remote/local swapped on Audio edma */
279 + struct fsl_edma3_chan chans[];
280 +};
281 +
282 +static struct fsl_edma3_chan *to_fsl_edma3_chan(struct dma_chan *chan)
283 +{
284 + return container_of(chan, struct fsl_edma3_chan, vchan.chan);
285 +}
286 +
287 +static struct fsl_edma3_desc *to_fsl_edma3_desc(struct virt_dma_desc *vd)
288 +{
289 + return container_of(vd, struct fsl_edma3_desc, vdesc);
290 +}
291 +
292 +static void fsl_edma3_enable_request(struct fsl_edma3_chan *fsl_chan)
293 +{
294 + void __iomem *addr = fsl_chan->membase;
295 + u32 val;
296 +
297 + val = readl(addr + EDMA_CH_SBR);
298 + /* Remote/local swapped wrongly on iMX8 QM Audio edma */
299 + if (fsl_chan->edma3->swap) {
300 + if (!fsl_chan->is_rxchan)
301 + val |= EDMA_CH_SBR_RD;
302 + else
303 + val |= EDMA_CH_SBR_WR;
304 + } else {
305 + if (fsl_chan->is_rxchan)
306 + val |= EDMA_CH_SBR_RD;
307 + else
308 + val |= EDMA_CH_SBR_WR;
309 + }
310 + writel(val, addr + EDMA_CH_SBR);
311 +
312 + val = readl(addr + EDMA_CH_CSR);
313 +
314 + val |= EDMA_CH_CSR_ERQ;
315 + writel(val, addr + EDMA_CH_CSR);
316 +}
317 +
318 +static void fsl_edma3_disable_request(struct fsl_edma3_chan *fsl_chan)
319 +{
320 + void __iomem *addr = fsl_chan->membase;
321 + u32 val = readl(addr + EDMA_CH_CSR);
322 +
323 + val &= ~EDMA_CH_CSR_ERQ;
324 + writel(val, addr + EDMA_CH_CSR);
325 +}
326 +
327 +static unsigned int fsl_edma3_get_tcd_attr(enum dma_slave_buswidth addr_width)
328 +{
329 + switch (addr_width) {
330 + case 1:
331 + return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
332 + case 2:
333 + return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
334 + case 4:
335 + return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
336 + case 8:
337 + return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
338 + case 16:
339 + return EDMA_TCD_ATTR_SSIZE_16BYTE | EDMA_TCD_ATTR_DSIZE_16BYTE;
340 + case 32:
341 + return EDMA_TCD_ATTR_SSIZE_32BYTE | EDMA_TCD_ATTR_DSIZE_32BYTE;
342 + case 64:
343 + return EDMA_TCD_ATTR_SSIZE_64BYTE | EDMA_TCD_ATTR_DSIZE_64BYTE;
344 + default:
345 + return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
346 + }
347 +}
348 +
349 +static void fsl_edma3_free_desc(struct virt_dma_desc *vdesc)
350 +{
351 + struct fsl_edma3_desc *fsl_desc;
352 + int i;
353 +
354 + fsl_desc = to_fsl_edma3_desc(vdesc);
355 + for (i = 0; i < fsl_desc->n_tcds; i++)
356 + dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd,
357 + fsl_desc->tcd[i].ptcd);
358 + kfree(fsl_desc);
359 +}
360 +
361 +static int fsl_edma3_terminate_all(struct dma_chan *chan)
362 +{
363 + struct fsl_edma3_chan *fsl_chan = to_fsl_edma3_chan(chan);
364 + unsigned long flags;
365 + LIST_HEAD(head);
366 +
367 + spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
368 + fsl_edma3_disable_request(fsl_chan);
369 + fsl_chan->edesc = NULL;
370 + vchan_get_all_descriptors(&fsl_chan->vchan, &head);
371 + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
372 + vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
373 + return 0;
374 +}
375 +
376 +static int fsl_edma3_pause(struct dma_chan *chan)
377 +{
378 + struct fsl_edma3_chan *fsl_chan = to_fsl_edma3_chan(chan);
379 + unsigned long flags;
380 +
381 + spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
382 + if (fsl_chan->edesc) {
383 + fsl_edma3_disable_request(fsl_chan);
384 + fsl_chan->status = DMA_PAUSED;
385 + }
386 + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
387 + return 0;
388 +}
389 +
390 +static int fsl_edma3_resume(struct dma_chan *chan)
391 +{
392 + struct fsl_edma3_chan *fsl_chan = to_fsl_edma3_chan(chan);
393 + unsigned long flags;
394 +
395 + spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
396 + if (fsl_chan->edesc) {
397 + fsl_edma3_enable_request(fsl_chan);
398 + fsl_chan->status = DMA_IN_PROGRESS;
399 + }
400 + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
401 + return 0;
402 +}
403 +
404 +static int fsl_edma3_slave_config(struct dma_chan *chan,
405 + struct dma_slave_config *cfg)
406 +{
407 + struct fsl_edma3_chan *fsl_chan = to_fsl_edma3_chan(chan);
408 +
409 + fsl_chan->fsc.dir = cfg->direction;
410 + if (cfg->direction == DMA_DEV_TO_MEM) {
411 + fsl_chan->fsc.dev_addr = cfg->src_addr;
412 + fsl_chan->fsc.addr_width = cfg->src_addr_width;
413 + fsl_chan->fsc.burst = cfg->src_maxburst;
414 + fsl_chan->fsc.attr = fsl_edma3_get_tcd_attr
415 + (cfg->src_addr_width);
416 + } else if (cfg->direction == DMA_MEM_TO_DEV) {
417 + fsl_chan->fsc.dev_addr = cfg->dst_addr;
418 + fsl_chan->fsc.addr_width = cfg->dst_addr_width;
419 + fsl_chan->fsc.burst = cfg->dst_maxburst;
420 + fsl_chan->fsc.attr = fsl_edma3_get_tcd_attr
421 + (cfg->dst_addr_width);
422 + } else if (cfg->direction == DMA_DEV_TO_DEV) {
423 + fsl_chan->fsc.dev2_addr = cfg->src_addr;
424 + fsl_chan->fsc.dev_addr = cfg->dst_addr;
425 + fsl_chan->fsc.addr_width = cfg->dst_addr_width;
426 + fsl_chan->fsc.burst = cfg->dst_maxburst;
427 + fsl_chan->fsc.attr = fsl_edma3_get_tcd_attr
428 + (cfg->dst_addr_width);
429 + } else {
430 + return -EINVAL;
431 + }
432 + return 0;
433 +}
434 +
435 +static size_t fsl_edma3_desc_residue(struct fsl_edma3_chan *fsl_chan,
436 + struct virt_dma_desc *vdesc, bool in_progress)
437 +{
438 + struct fsl_edma3_desc *edesc = fsl_chan->edesc;
439 + void __iomem *addr = fsl_chan->membase;
440 + enum dma_transfer_direction dir = fsl_chan->fsc.dir;
441 + dma_addr_t cur_addr, dma_addr;
442 + size_t len, size;
443 + int i;
444 +
445 + /* calculate the total size in this desc */
446 + for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
447 + len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
448 + * le16_to_cpu(edesc->tcd[i].vtcd->biter);
449 +
450 + if (!in_progress)
451 + return len;
452 +
453 + if (dir == DMA_MEM_TO_DEV)
454 + cur_addr = readl(addr + EDMA_TCD_SADDR);
455 + else
456 + cur_addr = readl(addr + EDMA_TCD_DADDR);
457 +
458 + /* figure out the finished and calculate the residue */
459 + for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
460 + size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
461 + * le16_to_cpu(edesc->tcd[i].vtcd->biter);
462 + if (dir == DMA_MEM_TO_DEV)
463 + dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
464 + else
465 + dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr);
466 +
467 + len -= size;
468 + if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
469 + len += dma_addr + size - cur_addr;
470 + break;
471 + }
472 + }
473 +
474 + return len;
475 +}
476 +
477 +static enum dma_status fsl_edma3_tx_status(struct dma_chan *chan,
478 + dma_cookie_t cookie, struct dma_tx_state *txstate)
479 +{
480 + struct fsl_edma3_chan *fsl_chan = to_fsl_edma3_chan(chan);
481 + struct virt_dma_desc *vdesc;
482 + enum dma_status status;
483 + unsigned long flags;
484 +
485 + status = dma_cookie_status(chan, cookie, txstate);
486 + if (status == DMA_COMPLETE) {
487 + spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
488 + txstate->residue = fsl_chan->chn_real_count;
489 + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
490 + return status;
491 + }
492 +
493 + if (!txstate)
494 + return fsl_chan->status;
495 +
496 + spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
497 + vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
498 + if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
499 + txstate->residue = fsl_edma3_desc_residue(fsl_chan, vdesc,
500 + true);
501 + else if (vdesc)
502 + txstate->residue = fsl_edma3_desc_residue(fsl_chan, vdesc,
503 + false);
504 + else
505 + txstate->residue = 0;
506 +
507 + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
508 +
509 + return fsl_chan->status;
510 +}
511 +
512 +static void fsl_edma3_set_tcd_regs(struct fsl_edma3_chan *fsl_chan,
513 + struct fsl_edma3_hw_tcd *tcd)
514 +{
515 + void __iomem *addr = fsl_chan->membase;
516 + /*
517 + * TCD parameters are stored in struct fsl_edma3_hw_tcd in little
518 + * endian format. However, we need to load the TCD registers in
519 + * big- or little-endian obeying the eDMA engine model endian.
520 + */
521 + writew(0, addr + EDMA_TCD_CSR);
522 + writel(le32_to_cpu(tcd->saddr), addr + EDMA_TCD_SADDR);
523 + writel(le32_to_cpu(tcd->daddr), addr + EDMA_TCD_DADDR);
524 +
525 + writew(le16_to_cpu(tcd->attr), addr + EDMA_TCD_ATTR);
526 + writew(le16_to_cpu(tcd->soff), addr + EDMA_TCD_SOFF);
527 +
528 + writel(le32_to_cpu(tcd->nbytes), addr + EDMA_TCD_NBYTES);
529 + writel(le32_to_cpu(tcd->slast), addr + EDMA_TCD_SLAST);
530 +
531 + writew(le16_to_cpu(tcd->citer), addr + EDMA_TCD_CITER);
532 + writew(le16_to_cpu(tcd->biter), addr + EDMA_TCD_BITER);
533 + writew(le16_to_cpu(tcd->doff), addr + EDMA_TCD_DOFF);
534 +
535 + writel(le32_to_cpu(tcd->dlast_sga), addr + EDMA_TCD_DLAST_SGA);
536 +
537 + writew(le16_to_cpu(tcd->csr), addr + EDMA_TCD_CSR);
538 +}
539 +
540 +static inline
541 +void fsl_edma3_fill_tcd(struct fsl_edma3_chan *fsl_chan,
542 + struct fsl_edma3_hw_tcd *tcd, u32 src, u32 dst,
543 + u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
544 + u16 biter, u16 doff, u32 dlast_sga, bool major_int,
545 + bool disable_req, bool enable_sg)
546 +{
547 + u16 csr = 0;
548 +
549 + /*
550 + * eDMA hardware SGs require the TCDs to be stored in little
551 + * endian format irrespective of the register endian model.
552 + * So we put the value in little endian in memory, waiting
553 + * for fsl_edma3_set_tcd_regs doing the swap.
554 + */
555 + tcd->saddr = cpu_to_le32(src);
556 + tcd->daddr = cpu_to_le32(dst);
557 +
558 + tcd->attr = cpu_to_le16(attr);
559 +
560 + tcd->soff = cpu_to_le16(EDMA_TCD_SOFF_SOFF(soff));
561 +
562 + tcd->nbytes = cpu_to_le32(EDMA_TCD_NBYTES_NBYTES(nbytes));
563 + tcd->slast = cpu_to_le32(EDMA_TCD_SLAST_SLAST(slast));
564 +
565 + tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer));
566 + tcd->doff = cpu_to_le16(EDMA_TCD_DOFF_DOFF(doff));
567 +
568 + tcd->dlast_sga = cpu_to_le32(EDMA_TCD_DLAST_SGA_DLAST_SGA(dlast_sga));
569 +
570 + tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter));
571 + if (major_int)
572 + csr |= EDMA_TCD_CSR_INT_MAJOR;
573 +
574 + if (disable_req)
575 + csr |= EDMA_TCD_CSR_D_REQ;
576 +
577 + if (enable_sg)
578 + csr |= EDMA_TCD_CSR_E_SG;
579 +
580 + if (fsl_chan->is_rxchan)
581 + csr |= EDMA_TCD_CSR_ACTIVE;
582 +
583 + tcd->csr = cpu_to_le16(csr);
584 +}
585 +
586 +static struct fsl_edma3_desc *fsl_edma3_alloc_desc(struct fsl_edma3_chan
587 + *fsl_chan, int sg_len)
588 +{
589 + struct fsl_edma3_desc *fsl_desc;
590 + int i;
591 +
592 + fsl_desc = kzalloc(sizeof(*fsl_desc) + sizeof(struct fsl_edma3_sw_tcd)
593 + * sg_len, GFP_ATOMIC);
594 + if (!fsl_desc)
595 + return NULL;
596 +
597 + fsl_desc->echan = fsl_chan;
598 + fsl_desc->n_tcds = sg_len;
599 + for (i = 0; i < sg_len; i++) {
600 + fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
601 + GFP_ATOMIC, &fsl_desc->tcd[i].ptcd);
602 + if (!fsl_desc->tcd[i].vtcd)
603 + goto err;
604 + }
605 + return fsl_desc;
606 +
607 +err:
608 + while (--i >= 0)
609 + dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
610 + fsl_desc->tcd[i].ptcd);
611 + kfree(fsl_desc);
612 + return NULL;
613 +}
614 +
615 +static struct dma_async_tx_descriptor *fsl_edma3_prep_dma_cyclic(
616 + struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
617 + size_t period_len, enum dma_transfer_direction direction,
618 + unsigned long flags)
619 +{
620 + struct fsl_edma3_chan *fsl_chan = to_fsl_edma3_chan(chan);
621 + struct fsl_edma3_desc *fsl_desc;
622 + dma_addr_t dma_buf_next;
623 + int sg_len, i;
624 + u32 src_addr, dst_addr, last_sg, nbytes;
625 + u16 soff, doff, iter;
626 +
627 + sg_len = buf_len / period_len;
628 + fsl_desc = fsl_edma3_alloc_desc(fsl_chan, sg_len);
629 + if (!fsl_desc)
630 + return NULL;
631 + fsl_desc->iscyclic = true;
632 +
633 + dma_buf_next = dma_addr;
634 + nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
635 + iter = period_len / nbytes;
636 +
637 + for (i = 0; i < sg_len; i++) {
638 + if (dma_buf_next >= dma_addr + buf_len)
639 + dma_buf_next = dma_addr;
640 +
641 + /* get next sg's physical address */
642 + last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
643 +
644 + if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
645 + src_addr = dma_buf_next;
646 + dst_addr = fsl_chan->fsc.dev_addr;
647 + soff = fsl_chan->fsc.addr_width;
648 + doff = 0;
649 + } else if (fsl_chan->fsc.dir == DMA_DEV_TO_MEM) {
650 + src_addr = fsl_chan->fsc.dev_addr;
651 + dst_addr = dma_buf_next;
652 + soff = 0;
653 + doff = fsl_chan->fsc.addr_width;
654 + } else {
655 + /* DMA_DEV_TO_DEV */
656 + src_addr = fsl_chan->fsc.dev2_addr;
657 + dst_addr = fsl_chan->fsc.dev_addr;
658 + soff = 0;
659 + doff = 0;
660 + }
661 +
662 + fsl_edma3_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr,
663 + dst_addr, fsl_chan->fsc.attr, soff, nbytes, 0,
664 + iter, iter, doff, last_sg, true, false, true);
665 + dma_buf_next += period_len;
666 + }
667 +
668 + return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
669 +}
670 +
671 +static struct dma_async_tx_descriptor *fsl_edma3_prep_slave_sg(
672 + struct dma_chan *chan, struct scatterlist *sgl,
673 + unsigned int sg_len, enum dma_transfer_direction direction,
674 + unsigned long flags, void *context)
675 +{
676 + struct fsl_edma3_chan *fsl_chan = to_fsl_edma3_chan(chan);
677 + struct fsl_edma3_desc *fsl_desc;
678 + struct scatterlist *sg;
679 + u32 src_addr, dst_addr, last_sg, nbytes;
680 + u16 soff, doff, iter;
681 + int i;
682 +
683 + if (!is_slave_direction(fsl_chan->fsc.dir))
684 + return NULL;
685 +
686 + fsl_desc = fsl_edma3_alloc_desc(fsl_chan, sg_len);
687 + if (!fsl_desc)
688 + return NULL;
689 + fsl_desc->iscyclic = false;
690 +
691 + nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
692 + for_each_sg(sgl, sg, sg_len, i) {
693 + /* get next sg's physical address */
694 + last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
695 +
696 + if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
697 + src_addr = sg_dma_address(sg);
698 + dst_addr = fsl_chan->fsc.dev_addr;
699 + soff = fsl_chan->fsc.addr_width;
700 + doff = 0;
701 + } else if (fsl_chan->fsc.dir == DMA_DEV_TO_MEM) {
702 + src_addr = fsl_chan->fsc.dev_addr;
703 + dst_addr = sg_dma_address(sg);
704 + soff = 0;
705 + doff = fsl_chan->fsc.addr_width;
706 + } else {
707 + /* DMA_DEV_TO_DEV */
708 + src_addr = fsl_chan->fsc.dev2_addr;
709 + dst_addr = fsl_chan->fsc.dev_addr;
710 + soff = 0;
711 + doff = 0;
712 + }
713 +
714 + iter = sg_dma_len(sg) / nbytes;
715 + if (i < sg_len - 1) {
716 + last_sg = fsl_desc->tcd[(i + 1)].ptcd;
717 + fsl_edma3_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd,
718 + src_addr, dst_addr, fsl_chan->fsc.attr,
719 + soff, nbytes, 0, iter, iter, doff,
720 + last_sg, false, false, true);
721 + } else {
722 + last_sg = 0;
723 + fsl_edma3_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd,
724 + src_addr, dst_addr, fsl_chan->fsc.attr,
725 + soff, nbytes, 0, iter, iter, doff,
726 + last_sg, true, true, false);
727 + }
728 + }
729 +
730 + return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
731 +}
732 +
733 +static void fsl_edma3_xfer_desc(struct fsl_edma3_chan *fsl_chan)
734 +{
735 + struct virt_dma_desc *vdesc;
736 +
737 + vdesc = vchan_next_desc(&fsl_chan->vchan);
738 + if (!vdesc)
739 + return;
740 + fsl_chan->edesc = to_fsl_edma3_desc(vdesc);
741 + fsl_edma3_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
742 + fsl_edma3_enable_request(fsl_chan);
743 + fsl_chan->status = DMA_IN_PROGRESS;
744 +}
745 +
746 +static size_t fsl_edma3_desc_residue(struct fsl_edma3_chan *fsl_chan,
747 + struct virt_dma_desc *vdesc, bool in_progress);
748 +
749 +static void fsl_edma3_get_realcnt(struct fsl_edma3_chan *fsl_chan)
750 +{
751 + fsl_chan->chn_real_count = fsl_edma3_desc_residue(fsl_chan, NULL, true);
752 +}
753 +
754 +static irqreturn_t fsl_edma3_tx_handler(int irq, void *dev_id)
755 +{
756 + struct fsl_edma3_chan *fsl_chan = dev_id;
757 + unsigned int intr;
758 + void __iomem *base_addr;
759 +
760 + base_addr = fsl_chan->membase;
761 +
762 + intr = readl(base_addr + EDMA_CH_INT);
763 + if (!intr)
764 + return IRQ_NONE;
765 +
766 + writel(1, base_addr + EDMA_CH_INT);
767 +
768 + spin_lock(&fsl_chan->vchan.lock);
769 + if (!fsl_chan->edesc->iscyclic) {
770 + fsl_edma3_get_realcnt(fsl_chan);
771 + list_del(&fsl_chan->edesc->vdesc.node);
772 + vchan_cookie_complete(&fsl_chan->edesc->vdesc);
773 + fsl_chan->edesc = NULL;
774 + fsl_chan->status = DMA_COMPLETE;
775 + } else {
776 + vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
777 + }
778 +
779 + if (!fsl_chan->edesc)
780 + fsl_edma3_xfer_desc(fsl_chan);
781 +
782 + spin_unlock(&fsl_chan->vchan.lock);
783 +
784 + return IRQ_HANDLED;
785 +}
786 +
787 +static void fsl_edma3_issue_pending(struct dma_chan *chan)
788 +{
789 + struct fsl_edma3_chan *fsl_chan = to_fsl_edma3_chan(chan);
790 + unsigned long flags;
791 +
792 + spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
793 +
794 + if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
795 + fsl_edma3_xfer_desc(fsl_chan);
796 +
797 + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
798 +}
799 +
800 +static struct dma_chan *fsl_edma3_xlate(struct of_phandle_args *dma_spec,
801 + struct of_dma *ofdma)
802 +{
803 + struct fsl_edma3_engine *fsl_edma3 = ofdma->of_dma_data;
804 + struct dma_chan *chan, *_chan;
805 + struct fsl_edma3_chan *fsl_chan;
806 +
807 + if (dma_spec->args_count != 3)
808 + return NULL;
809 +
810 + mutex_lock(&fsl_edma3->fsl_edma3_mutex);
811 + list_for_each_entry_safe(chan, _chan, &fsl_edma3->dma_dev.channels,
812 + device_node) {
813 + if (chan->client_count)
814 + continue;
815 +
816 + fsl_chan = to_fsl_edma3_chan(chan);
817 + if (fsl_chan->hw_chanid == dma_spec->args[0]) {
818 + chan = dma_get_slave_channel(chan);
819 + chan->device->privatecnt++;
820 + fsl_chan->priority = dma_spec->args[1];
821 + fsl_chan->is_rxchan = dma_spec->args[2];
822 + mutex_unlock(&fsl_edma3->fsl_edma3_mutex);
823 + return chan;
824 + }
825 + }
826 + mutex_unlock(&fsl_edma3->fsl_edma3_mutex);
827 + return NULL;
828 +}
829 +
830 +static int fsl_edma3_alloc_chan_resources(struct dma_chan *chan)
831 +{
832 + struct fsl_edma3_chan *fsl_chan = to_fsl_edma3_chan(chan);
833 +
834 + fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
835 + sizeof(struct fsl_edma3_hw_tcd),
836 + 32, 0);
837 + return 0;
838 +}
839 +
840 +static void fsl_edma3_free_chan_resources(struct dma_chan *chan)
841 +{
842 + struct fsl_edma3_chan *fsl_chan = to_fsl_edma3_chan(chan);
843 + unsigned long flags;
844 + LIST_HEAD(head);
845 +
846 + spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
847 + fsl_edma3_disable_request(fsl_chan);
848 + fsl_chan->edesc = NULL;
849 + vchan_get_all_descriptors(&fsl_chan->vchan, &head);
850 + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
851 +
852 + vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
853 + dma_pool_destroy(fsl_chan->tcd_pool);
854 + fsl_chan->tcd_pool = NULL;
855 +}
856 +
857 +static int fsl_edma3_probe(struct platform_device *pdev)
858 +{
859 + struct device_node *np = pdev->dev.of_node;
860 + struct fsl_edma3_engine *fsl_edma3;
861 + struct fsl_edma3_chan *fsl_chan;
862 + struct resource *res;
863 + int len, chans;
864 + int ret, i;
865 + unsigned long irqflag = 0;
866 +
867 + ret = of_property_read_u32(np, "dma-channels", &chans);
868 + if (ret) {
869 + dev_err(&pdev->dev, "Can't get dma-channels.\n");
870 + return ret;
871 + }
872 +
873 + len = sizeof(*fsl_edma3) + sizeof(*fsl_chan) * chans;
874 + fsl_edma3 = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
875 + if (!fsl_edma3)
876 + return -ENOMEM;
877 +
878 + /* Audio edma rx/tx channel shared interrupt */
879 + if (of_property_read_bool(np, "shared-interrupt"))
880 + irqflag = IRQF_SHARED;
881 +
882 + fsl_edma3->swap = of_device_is_compatible(np, "fsl,imx8qm-adma");
883 + fsl_edma3->n_chans = chans;
884 +
885 + INIT_LIST_HEAD(&fsl_edma3->dma_dev.channels);
886 + for (i = 0; i < fsl_edma3->n_chans; i++) {
887 + struct fsl_edma3_chan *fsl_chan = &fsl_edma3->chans[i];
888 + char *txirq_name = fsl_chan->txirq_name;
889 +
890 + fsl_chan->edma3 = fsl_edma3;
891 + /* Get per channel membase */
892 + res = platform_get_resource(pdev, IORESOURCE_MEM, i);
893 + fsl_chan->membase = devm_ioremap_resource(&pdev->dev, res);
894 + if (IS_ERR(fsl_chan->membase))
895 + return PTR_ERR(fsl_chan->membase);
896 +
897 + /* Get the hardware chanel id by the channel membase
898 + * channel0:0x10000, channel1:0x20000... total 32 channels
899 + */
900 + fsl_chan->hw_chanid = (res->start >> 16) & 0x1f;
901 + sprintf(txirq_name, "edma-chan%d-tx", fsl_chan->hw_chanid);
902 +
903 + /* request channel irq */
904 + fsl_chan->txirq = platform_get_irq_byname(pdev, txirq_name);
905 + if (fsl_chan->txirq < 0) {
906 + dev_err(&pdev->dev, "Can't get %s irq.\n", txirq_name);
907 + return fsl_chan->txirq;
908 + }
909 +
910 + ret = devm_request_irq(&pdev->dev, fsl_chan->txirq,
911 + fsl_edma3_tx_handler, irqflag, txirq_name,
912 + fsl_chan);
913 + if (ret) {
914 + dev_err(&pdev->dev, "Can't register %s IRQ.\n",
915 + txirq_name);
916 + return ret;
917 + }
918 +
919 + fsl_chan->vchan.desc_free = fsl_edma3_free_desc;
920 + vchan_init(&fsl_chan->vchan, &fsl_edma3->dma_dev);
921 + }
922 +
923 + mutex_init(&fsl_edma3->fsl_edma3_mutex);
924 +
925 + dma_cap_set(DMA_PRIVATE, fsl_edma3->dma_dev.cap_mask);
926 + dma_cap_set(DMA_SLAVE, fsl_edma3->dma_dev.cap_mask);
927 + dma_cap_set(DMA_CYCLIC, fsl_edma3->dma_dev.cap_mask);
928 +
929 + fsl_edma3->dma_dev.dev = &pdev->dev;
930 + fsl_edma3->dma_dev.device_alloc_chan_resources
931 + = fsl_edma3_alloc_chan_resources;
932 + fsl_edma3->dma_dev.device_free_chan_resources
933 + = fsl_edma3_free_chan_resources;
934 + fsl_edma3->dma_dev.device_tx_status = fsl_edma3_tx_status;
935 + fsl_edma3->dma_dev.device_prep_slave_sg = fsl_edma3_prep_slave_sg;
936 + fsl_edma3->dma_dev.device_prep_dma_cyclic = fsl_edma3_prep_dma_cyclic;
937 + fsl_edma3->dma_dev.device_config = fsl_edma3_slave_config;
938 + fsl_edma3->dma_dev.device_pause = fsl_edma3_pause;
939 + fsl_edma3->dma_dev.device_resume = fsl_edma3_resume;
940 + fsl_edma3->dma_dev.device_terminate_all = fsl_edma3_terminate_all;
941 + fsl_edma3->dma_dev.device_issue_pending = fsl_edma3_issue_pending;
942 +
943 + fsl_edma3->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
944 + fsl_edma3->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
945 + fsl_edma3->dma_dev.directions = BIT(DMA_DEV_TO_MEM) |
946 + BIT(DMA_MEM_TO_DEV) |
947 + BIT(DMA_DEV_TO_DEV);
948 +
949 + platform_set_drvdata(pdev, fsl_edma3);
950 +
951 + ret = dma_async_device_register(&fsl_edma3->dma_dev);
952 + if (ret) {
953 + dev_err(&pdev->dev, "Can't register Freescale eDMA engine.\n");
954 + return ret;
955 + }
956 +
957 + ret = of_dma_controller_register(np, fsl_edma3_xlate, fsl_edma3);
958 + if (ret) {
959 + dev_err(&pdev->dev, "Can't register Freescale eDMA of_dma.\n");
960 + dma_async_device_unregister(&fsl_edma3->dma_dev);
961 + return ret;
962 + }
963 +
964 + return 0;
965 +}
966 +
967 +static int fsl_edma3_remove(struct platform_device *pdev)
968 +{
969 + struct device_node *np = pdev->dev.of_node;
970 + struct fsl_edma3_engine *fsl_edma3 = platform_get_drvdata(pdev);
971 +
972 + of_dma_controller_free(np);
973 + dma_async_device_unregister(&fsl_edma3->dma_dev);
974 +
975 + return 0;
976 +}
977 +
978 +static const struct of_device_id fsl_edma3_dt_ids[] = {
979 + { .compatible = "fsl,imx8qm-edma", },
980 + { .compatible = "fsl,imx8qm-adma", },
981 + { /* sentinel */ }
982 +};
983 +MODULE_DEVICE_TABLE(of, fsl_edma3_dt_ids);
984 +
985 +static struct platform_driver fsl_edma3_driver = {
986 + .driver = {
987 + .name = "fsl-edma-v3",
988 + .of_match_table = fsl_edma3_dt_ids,
989 + },
990 + .probe = fsl_edma3_probe,
991 + .remove = fsl_edma3_remove,
992 +};
993 +
994 +static int __init fsl_edma3_init(void)
995 +{
996 + return platform_driver_register(&fsl_edma3_driver);
997 +}
998 +subsys_initcall(fsl_edma3_init);
999 +
1000 +static void __exit fsl_edma3_exit(void)
1001 +{
1002 + platform_driver_unregister(&fsl_edma3_driver);
1003 +}
1004 +module_exit(fsl_edma3_exit);
1005 +
1006 +MODULE_ALIAS("platform:fsl-edma3");
1007 +MODULE_DESCRIPTION("Freescale eDMA-V3 engine driver");
1008 +MODULE_LICENSE("GPL v2");