static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
{
- void __iomem *addr = fsl_chan->edma->membase;
+ struct edma_regs *regs = &fsl_chan->edma->regs;
u32 ch = fsl_chan->vchan.chan.chan_id;
- edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), addr + EDMA_SEEI);
- edma_writeb(fsl_chan->edma, ch, addr + EDMA_SERQ);
+ edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei);
+ edma_writeb(fsl_chan->edma, ch, regs->serq);
}
void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
{
- void __iomem *addr = fsl_chan->edma->membase;
+ struct edma_regs *regs = &fsl_chan->edma->regs;
u32 ch = fsl_chan->vchan.chan.chan_id;
- edma_writeb(fsl_chan->edma, ch, addr + EDMA_CERQ);
- edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), addr + EDMA_CEEI);
+ edma_writeb(fsl_chan->edma, ch, regs->cerq);
+ edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei);
}
EXPORT_SYMBOL_GPL(fsl_edma_disable_request);
struct virt_dma_desc *vdesc, bool in_progress)
{
struct fsl_edma_desc *edesc = fsl_chan->edesc;
- void __iomem *addr = fsl_chan->edma->membase;
+ struct edma_regs *regs = &fsl_chan->edma->regs;
u32 ch = fsl_chan->vchan.chan.chan_id;
enum dma_transfer_direction dir = fsl_chan->fsc.dir;
dma_addr_t cur_addr, dma_addr;
return len;
if (dir == DMA_MEM_TO_DEV)
- cur_addr = edma_readl(
- fsl_chan->edma, addr + EDMA_TCD_SADDR(ch));
+ cur_addr = edma_readl(fsl_chan->edma, ®s->tcd[ch].saddr);
else
- cur_addr = edma_readl(
- fsl_chan->edma, addr + EDMA_TCD_DADDR(ch));
+ cur_addr = edma_readl(fsl_chan->edma, ®s->tcd[ch].daddr);
/* figure out the finished and calculate the residue */
for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
struct fsl_edma_hw_tcd *tcd)
{
struct fsl_edma_engine *edma = fsl_chan->edma;
- void __iomem *addr = fsl_chan->edma->membase;
+ struct edma_regs *regs = &fsl_chan->edma->regs;
u32 ch = fsl_chan->vchan.chan.chan_id;
/*
* endian format. However, we need to load the TCD registers in
* big- or little-endian obeying the eDMA engine model endian.
*/
- edma_writew(edma, 0, addr + EDMA_TCD_CSR(ch));
- edma_writel(edma, le32_to_cpu(tcd->saddr), addr + EDMA_TCD_SADDR(ch));
- edma_writel(edma, le32_to_cpu(tcd->daddr), addr + EDMA_TCD_DADDR(ch));
+ edma_writew(edma, 0, ®s->tcd[ch].csr);
+ edma_writel(edma, le32_to_cpu(tcd->saddr), ®s->tcd[ch].saddr);
+ edma_writel(edma, le32_to_cpu(tcd->daddr), ®s->tcd[ch].daddr);
- edma_writew(edma, le16_to_cpu(tcd->attr), addr + EDMA_TCD_ATTR(ch));
- edma_writew(edma, le16_to_cpu(tcd->soff), addr + EDMA_TCD_SOFF(ch));
+ edma_writew(edma, le16_to_cpu(tcd->attr), ®s->tcd[ch].attr);
+ edma_writew(edma, le16_to_cpu(tcd->soff), ®s->tcd[ch].soff);
- edma_writel(edma, le32_to_cpu(tcd->nbytes), addr + EDMA_TCD_NBYTES(ch));
- edma_writel(edma, le32_to_cpu(tcd->slast), addr + EDMA_TCD_SLAST(ch));
+ edma_writel(edma, le32_to_cpu(tcd->nbytes), ®s->tcd[ch].nbytes);
+ edma_writel(edma, le32_to_cpu(tcd->slast), ®s->tcd[ch].slast);
- edma_writew(edma, le16_to_cpu(tcd->citer), addr + EDMA_TCD_CITER(ch));
- edma_writew(edma, le16_to_cpu(tcd->biter), addr + EDMA_TCD_BITER(ch));
- edma_writew(edma, le16_to_cpu(tcd->doff), addr + EDMA_TCD_DOFF(ch));
+ edma_writew(edma, le16_to_cpu(tcd->citer), ®s->tcd[ch].citer);
+ edma_writew(edma, le16_to_cpu(tcd->biter), ®s->tcd[ch].biter);
+ edma_writew(edma, le16_to_cpu(tcd->doff), ®s->tcd[ch].doff);
- edma_writel(edma,
- le32_to_cpu(tcd->dlast_sga), addr + EDMA_TCD_DLAST_SGA(ch));
+ edma_writel(edma, le32_to_cpu(tcd->dlast_sga),
+ ®s->tcd[ch].dlast_sga);
- edma_writew(edma, le16_to_cpu(tcd->csr), addr + EDMA_TCD_CSR(ch));
+ edma_writew(edma, le16_to_cpu(tcd->csr), ®s->tcd[ch].csr);
}
static inline
tcd->attr = cpu_to_le16(attr);
- tcd->soff = cpu_to_le16(EDMA_TCD_SOFF_SOFF(soff));
+ tcd->soff = cpu_to_le16(soff);
- tcd->nbytes = cpu_to_le32(EDMA_TCD_NBYTES_NBYTES(nbytes));
- tcd->slast = cpu_to_le32(EDMA_TCD_SLAST_SLAST(slast));
+ tcd->nbytes = cpu_to_le32(nbytes);
+ tcd->slast = cpu_to_le32(slast);
tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer));
- tcd->doff = cpu_to_le16(EDMA_TCD_DOFF_DOFF(doff));
+ tcd->doff = cpu_to_le16(doff);
- tcd->dlast_sga = cpu_to_le32(EDMA_TCD_DLAST_SGA_DLAST_SGA(dlast_sga));
+ tcd->dlast_sga = cpu_to_le32(dlast_sga);
tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter));
if (major_int)
}
EXPORT_SYMBOL_GPL(fsl_edma_cleanup_vchan);
+/*
+ * On the 32 channels Vybrid/mpc577x edma version (here called "v1"),
+ * register offsets are different compared to ColdFire mcf5441x 64 channels
+ * edma (here called "v2").
+ *
+ * This function sets up register offsets as per proper declared version
+ * so must be called in xxx_edma_probe() just after setting the
+ * edma "version" and "membase" appropriately.
+ */
+void fsl_edma_setup_regs(struct fsl_edma_engine *edma)
+{
+ edma->regs.cr = edma->membase + EDMA_CR;
+ edma->regs.es = edma->membase + EDMA_ES;
+ edma->regs.erql = edma->membase + EDMA_ERQ;
+ edma->regs.eeil = edma->membase + EDMA_EEI;
+
+ edma->regs.serq = edma->membase + ((edma->version == v1) ?
+ EDMA_SERQ : EDMA64_SERQ);
+ edma->regs.cerq = edma->membase + ((edma->version == v1) ?
+ EDMA_CERQ : EDMA64_CERQ);
+ edma->regs.seei = edma->membase + ((edma->version == v1) ?
+ EDMA_SEEI : EDMA64_SEEI);
+ edma->regs.ceei = edma->membase + ((edma->version == v1) ?
+ EDMA_CEEI : EDMA64_CEEI);
+ edma->regs.cint = edma->membase + ((edma->version == v1) ?
+ EDMA_CINT : EDMA64_CINT);
+ edma->regs.cerr = edma->membase + ((edma->version == v1) ?
+ EDMA_CERR : EDMA64_CERR);
+ edma->regs.ssrt = edma->membase + ((edma->version == v1) ?
+ EDMA_SSRT : EDMA64_SSRT);
+ edma->regs.cdne = edma->membase + ((edma->version == v1) ?
+ EDMA_CDNE : EDMA64_CDNE);
+ edma->regs.intl = edma->membase + ((edma->version == v1) ?
+ EDMA_INTR : EDMA64_INTL);
+ edma->regs.errl = edma->membase + ((edma->version == v1) ?
+ EDMA_ERR : EDMA64_ERRL);
+
+ if (edma->version == v2) {
+ edma->regs.erqh = edma->membase + EDMA64_ERQH;
+ edma->regs.eeih = edma->membase + EDMA64_EEIH;
+ edma->regs.errh = edma->membase + EDMA64_ERRH;
+ edma->regs.inth = edma->membase + EDMA64_INTH;
+ }
+
+ edma->regs.tcd = edma->membase + EDMA_TCD;
+}
+EXPORT_SYMBOL_GPL(fsl_edma_setup_regs);
+
MODULE_LICENSE("GPL v2");
#include "virt-dma.h"
-#define EDMA_CR 0x00
-#define EDMA_ES 0x04
-#define EDMA_ERQ 0x0C
-#define EDMA_EEI 0x14
-#define EDMA_SERQ 0x1B
-#define EDMA_CERQ 0x1A
-#define EDMA_SEEI 0x19
-#define EDMA_CEEI 0x18
-#define EDMA_CINT 0x1F
-#define EDMA_CERR 0x1E
-#define EDMA_SSRT 0x1D
-#define EDMA_CDNE 0x1C
-#define EDMA_INTR 0x24
-#define EDMA_ERR 0x2C
-
-#define EDMA_TCD_SADDR(x) (0x1000 + 32 * (x))
-#define EDMA_TCD_SOFF(x) (0x1004 + 32 * (x))
-#define EDMA_TCD_ATTR(x) (0x1006 + 32 * (x))
-#define EDMA_TCD_NBYTES(x) (0x1008 + 32 * (x))
-#define EDMA_TCD_SLAST(x) (0x100C + 32 * (x))
-#define EDMA_TCD_DADDR(x) (0x1010 + 32 * (x))
-#define EDMA_TCD_DOFF(x) (0x1014 + 32 * (x))
-#define EDMA_TCD_CITER_ELINK(x) (0x1016 + 32 * (x))
-#define EDMA_TCD_CITER(x) (0x1016 + 32 * (x))
-#define EDMA_TCD_DLAST_SGA(x) (0x1018 + 32 * (x))
-#define EDMA_TCD_CSR(x) (0x101C + 32 * (x))
-#define EDMA_TCD_BITER_ELINK(x) (0x101E + 32 * (x))
-#define EDMA_TCD_BITER(x) (0x101E + 32 * (x))
-
#define EDMA_CR_EDBG BIT(1)
#define EDMA_CR_ERCA BIT(2)
#define EDMA_CR_ERGA BIT(3)
__le16 biter;
};
+/*
+ * These are iomem pointers, for both v32 and v64.
+ */
+struct edma_regs {
+ void __iomem *cr;
+ void __iomem *es;
+ void __iomem *erqh;
+ void __iomem *erql; /* aka erq on v32 */
+ void __iomem *eeih;
+ void __iomem *eeil; /* aka eei on v32 */
+ void __iomem *seei;
+ void __iomem *ceei;
+ void __iomem *serq;
+ void __iomem *cerq;
+ void __iomem *cint;
+ void __iomem *cerr;
+ void __iomem *ssrt;
+ void __iomem *cdne;
+ void __iomem *inth;
+ void __iomem *intl;
+ void __iomem *errh;
+ void __iomem *errl;
+ struct fsl_edma_hw_tcd __iomem *tcd;
+};
+
struct fsl_edma_sw_tcd {
dma_addr_t ptcd;
struct fsl_edma_hw_tcd *vtcd;
struct fsl_edma_sw_tcd tcd[];
};
+enum edma_version {
+ v1, /* 32ch, Vybdir, mpc57x, etc */
+ v2, /* 64ch Coldfire */
+};
+
struct fsl_edma_engine {
struct dma_device dma_dev;
void __iomem *membase;
int txirq;
int errirq;
bool big_endian;
+ enum edma_version version;
+ struct edma_regs regs;
struct fsl_edma_chan chans[];
};
int fsl_edma_alloc_chan_resources(struct dma_chan *chan);
void fsl_edma_free_chan_resources(struct dma_chan *chan);
void fsl_edma_cleanup_vchan(struct dma_device *dmadev);
+void fsl_edma_setup_regs(struct fsl_edma_engine *edma);
#endif /* _FSL_EDMA_COMMON_H_ */
{
struct fsl_edma_engine *fsl_edma = dev_id;
unsigned int intr, ch;
- void __iomem *base_addr;
+ struct edma_regs *regs = &fsl_edma->regs;
struct fsl_edma_chan *fsl_chan;
- base_addr = fsl_edma->membase;
-
- intr = edma_readl(fsl_edma, base_addr + EDMA_INTR);
+ intr = edma_readl(fsl_edma, regs->intl);
if (!intr)
return IRQ_NONE;
for (ch = 0; ch < fsl_edma->n_chans; ch++) {
if (intr & (0x1 << ch)) {
- edma_writeb(fsl_edma, EDMA_CINT_CINT(ch),
- base_addr + EDMA_CINT);
+ edma_writeb(fsl_edma, EDMA_CINT_CINT(ch), regs->cint);
fsl_chan = &fsl_edma->chans[ch];
{
struct fsl_edma_engine *fsl_edma = dev_id;
unsigned int err, ch;
+ struct edma_regs *regs = &fsl_edma->regs;
- err = edma_readl(fsl_edma, fsl_edma->membase + EDMA_ERR);
+ err = edma_readl(fsl_edma, regs->errl);
if (!err)
return IRQ_NONE;
for (ch = 0; ch < fsl_edma->n_chans; ch++) {
if (err & (0x1 << ch)) {
fsl_edma_disable_request(&fsl_edma->chans[ch]);
- edma_writeb(fsl_edma, EDMA_CERR_CERR(ch),
- fsl_edma->membase + EDMA_CERR);
+ edma_writeb(fsl_edma, EDMA_CERR_CERR(ch), regs->cerr);
fsl_edma->chans[ch].status = DMA_ERROR;
fsl_edma->chans[ch].idle = true;
}
struct device_node *np = pdev->dev.of_node;
struct fsl_edma_engine *fsl_edma;
struct fsl_edma_chan *fsl_chan;
+ struct edma_regs *regs;
struct resource *res;
int len, chans;
int ret, i;
if (!fsl_edma)
return -ENOMEM;
+ fsl_edma->version = v1;
fsl_edma->n_chans = chans;
mutex_init(&fsl_edma->fsl_edma_mutex);
if (IS_ERR(fsl_edma->membase))
return PTR_ERR(fsl_edma->membase);
+ fsl_edma_setup_regs(fsl_edma);
+ regs = &fsl_edma->regs;
+
for (i = 0; i < DMAMUX_NR; i++) {
char clkname[32];
fsl_chan->vchan.desc_free = fsl_edma_free_desc;
vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
- edma_writew(fsl_edma, 0x0, fsl_edma->membase + EDMA_TCD_CSR(i));
+ edma_writew(fsl_edma, 0x0, ®s->tcd[i].csr);
fsl_edma_chan_mux(fsl_chan, 0, false);
}
- edma_writel(fsl_edma, ~0, fsl_edma->membase + EDMA_INTR);
+ edma_writel(fsl_edma, ~0, regs->intl);
ret = fsl_edma_irq_init(pdev, fsl_edma);
if (ret)
return ret;
}
/* enable round robin arbitration */
- edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, fsl_edma->membase + EDMA_CR);
+ edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
return 0;
}
{
struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev);
struct fsl_edma_chan *fsl_chan;
+ struct edma_regs *regs = &fsl_edma->regs;
int i;
for (i = 0; i < fsl_edma->n_chans; i++) {
fsl_chan = &fsl_edma->chans[i];
fsl_chan->pm_state = RUNNING;
- edma_writew(fsl_edma, 0x0, fsl_edma->membase + EDMA_TCD_CSR(i));
+ edma_writew(fsl_edma, 0x0, ®s->tcd[i].csr);
if (fsl_chan->slave_id != 0)
fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, true);
}
- edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA,
- fsl_edma->membase + EDMA_CR);
+ edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
return 0;
}