1 From 8d6b6d7e135e9bbfc923d34a45cb0e72695e63ed Mon Sep 17 00:00:00 2001
2 From: Abhishek Sahu <absahu@codeaurora.org>
3 Date: Mon, 25 Sep 2017 13:21:26 +0530
4 Subject: [PATCH 3/7] mtd: nand: qcom: support for command descriptor formation
6 1. Add the function for command descriptor preparation which will
7 be used only by BAM DMA and it will form the DMA descriptors
8 containing command elements
9 2. DMA_PREP_CMD flag should be used for forming command DMA
12 Reviewed-by: Archit Taneja <architt@codeaurora.org>
13 Signed-off-by: Abhishek Sahu <absahu@codeaurora.org>
14 Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
16 drivers/mtd/nand/qcom_nandc.c | 108 +++++++++++++++++++++++++++++++++++-------
17 1 file changed, 92 insertions(+), 16 deletions(-)
19 --- a/drivers/mtd/nand/qcom_nandc.c
20 +++ b/drivers/mtd/nand/qcom_nandc.c
21 @@ -200,6 +200,14 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_
23 #define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
25 +/* Returns the NAND register physical address */
26 +#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
28 +/* Returns the dma address for reg read buffer */
29 +#define reg_buf_dma_addr(chip, vaddr) \
30 + ((chip)->reg_read_dma + \
31 + ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
33 #define QPIC_PER_CW_CMD_ELEMENTS 32
34 #define QPIC_PER_CW_CMD_SGL 32
35 #define QPIC_PER_CW_DATA_SGL 8
36 @@ -317,7 +325,8 @@ struct nandc_regs {
40 - * @base_dma: physical base address of controller registers
41 + * @base_phys: physical base address of controller registers
42 + * @base_dma: dma base address of controller registers
43 * @core_clk: controller clock
44 * @aon_clk: another controller clock
46 @@ -350,6 +359,7 @@ struct qcom_nand_controller {
50 + phys_addr_t base_phys;
54 @@ -751,6 +761,66 @@ static int prepare_bam_async_desc(struct
58 + * Prepares the command descriptor for BAM DMA which will be used for NAND
59 + * register reads and writes. The command descriptor requires the command
60 + * to be formed in command element type so this function uses the command
61 + * element from bam transaction ce array and fills the same with required
62 + * data. A single SGL can contain multiple command elements so
63 + * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
64 + * after the current command element.
66 +static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
67 + int reg_off, const void *vaddr,
68 + int size, unsigned int flags)
72 + struct bam_cmd_element *bam_ce_buffer;
73 + struct bam_transaction *bam_txn = nandc->bam_txn;
75 + bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
77 + /* fill the command desc */
78 + for (i = 0; i < size; i++) {
80 + bam_prep_ce(&bam_ce_buffer[i],
81 + nandc_reg_phys(nandc, reg_off + 4 * i),
83 + reg_buf_dma_addr(nandc,
84 + (__le32 *)vaddr + i));
86 + bam_prep_ce_le32(&bam_ce_buffer[i],
87 + nandc_reg_phys(nandc, reg_off + 4 * i),
89 + *((__le32 *)vaddr + i));
92 + bam_txn->bam_ce_pos += size;
94 + /* use the separate sgl after this command */
95 + if (flags & NAND_BAM_NEXT_SGL) {
96 + bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
97 + bam_ce_size = (bam_txn->bam_ce_pos -
98 + bam_txn->bam_ce_start) *
99 + sizeof(struct bam_cmd_element);
100 + sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
101 + bam_ce_buffer, bam_ce_size);
102 + bam_txn->cmd_sgl_pos++;
103 + bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
105 + if (flags & NAND_BAM_NWD) {
106 + ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
118 * Prepares the data descriptor for BAM DMA which will be used for NAND
119 * data reads and writes.
121 @@ -868,19 +938,22 @@ static int read_reg_dma(struct qcom_nand
123 bool flow_control = false;
127 - if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
128 - flow_control = true;
129 + vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
130 + nandc->reg_read_pos += num_regs;
132 if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
133 first = dev_cmd_reg_addr(nandc, first);
135 - size = num_regs * sizeof(u32);
136 - vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
137 - nandc->reg_read_pos += num_regs;
138 + if (nandc->props->is_bam)
139 + return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
142 + if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
143 + flow_control = true;
145 - return prep_adm_dma_desc(nandc, true, first, vaddr, size, flow_control);
146 + return prep_adm_dma_desc(nandc, true, first, vaddr,
147 + num_regs * sizeof(u32), flow_control);
151 @@ -897,13 +970,9 @@ static int write_reg_dma(struct qcom_nan
152 bool flow_control = false;
153 struct nandc_regs *regs = nandc->regs;
157 vaddr = offset_to_nandc_reg(regs, first);
159 - if (first == NAND_FLASH_CMD)
160 - flow_control = true;
162 if (first == NAND_ERASED_CW_DETECT_CFG) {
163 if (flags & NAND_ERASED_CW_SET)
164 vaddr = ®s->erased_cw_detect_cfg_set;
165 @@ -920,10 +989,15 @@ static int write_reg_dma(struct qcom_nan
166 if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
167 first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
169 - size = num_regs * sizeof(u32);
170 + if (nandc->props->is_bam)
171 + return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
174 + if (first == NAND_FLASH_CMD)
175 + flow_control = true;
177 - return prep_adm_dma_desc(nandc, false, first, vaddr, size,
179 + return prep_adm_dma_desc(nandc, false, first, vaddr,
180 + num_regs * sizeof(u32), flow_control);
184 @@ -1187,7 +1261,8 @@ static int submit_descs(struct qcom_nand
187 if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
188 - r = prepare_bam_async_desc(nandc, nandc->cmd_chan, 0);
189 + r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
194 @@ -2722,6 +2797,7 @@ static int qcom_nandc_probe(struct platf
195 if (IS_ERR(nandc->base))
196 return PTR_ERR(nandc->base);
198 + nandc->base_phys = res->start;
199 nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start);
201 nandc->core_clk = devm_clk_get(dev, "core");