ca8727d516ce57e3e8921602a8b2124f392f8144
[openwrt/staging/dangole.git] /
1 From 1d479f5b345e0c3650fec4dddeef9fc6fab30c8b Mon Sep 17 00:00:00 2001
2 From: Md Sadre Alam <quic_mdalam@quicinc.com>
3 Date: Wed, 20 Nov 2024 14:45:01 +0530
4 Subject: [PATCH 2/4] mtd: rawnand: qcom: Add qcom prefix to common api
5
6 Add qcom prefix to all the api which will be commonly
7 used by spi nand driver and raw nand driver.
8
9 Reviewed-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
10 Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
11 Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
12 ---
13 drivers/mtd/nand/raw/qcom_nandc.c | 320 +++++++++++++++---------------
14 1 file changed, 160 insertions(+), 160 deletions(-)
15
16 diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
17 index 9ae8c9f2ab55..6da5d23d2c8b 100644
18 --- a/drivers/mtd/nand/raw/qcom_nandc.c
19 +++ b/drivers/mtd/nand/raw/qcom_nandc.c
20 @@ -53,7 +53,7 @@
21 #define NAND_READ_LOCATION_LAST_CW_2 0xf48
22 #define NAND_READ_LOCATION_LAST_CW_3 0xf4c
23
24 -/* dummy register offsets, used by write_reg_dma */
25 +/* dummy register offsets, used by qcom_write_reg_dma */
26 #define NAND_DEV_CMD1_RESTORE 0xdead
27 #define NAND_DEV_CMD_VLD_RESTORE 0xbeef
28
29 @@ -211,7 +211,7 @@
30
31 /*
32 * Flags used in DMA descriptor preparation helper functions
33 - * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
34 + * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma)
35 */
36 /* Don't set the EOT in current tx BAM sgl */
37 #define NAND_BAM_NO_EOT BIT(0)
38 @@ -550,7 +550,7 @@ struct qcom_nandc_props {
39 };
40
41 /* Frees the BAM transaction memory */
42 -static void free_bam_transaction(struct qcom_nand_controller *nandc)
43 +static void qcom_free_bam_transaction(struct qcom_nand_controller *nandc)
44 {
45 struct bam_transaction *bam_txn = nandc->bam_txn;
46
47 @@ -559,7 +559,7 @@ static void free_bam_transaction(struct qcom_nand_controller *nandc)
48
49 /* Allocates and Initializes the BAM transaction */
50 static struct bam_transaction *
51 -alloc_bam_transaction(struct qcom_nand_controller *nandc)
52 +qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc)
53 {
54 struct bam_transaction *bam_txn;
55 size_t bam_txn_size;
56 @@ -595,7 +595,7 @@ alloc_bam_transaction(struct qcom_nand_controller *nandc)
57 }
58
59 /* Clears the BAM transaction indexes */
60 -static void clear_bam_transaction(struct qcom_nand_controller *nandc)
61 +static void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
62 {
63 struct bam_transaction *bam_txn = nandc->bam_txn;
64
65 @@ -614,7 +614,7 @@ static void clear_bam_transaction(struct qcom_nand_controller *nandc)
66 }
67
68 /* Callback for DMA descriptor completion */
69 -static void qpic_bam_dma_done(void *data)
70 +static void qcom_qpic_bam_dma_done(void *data)
71 {
72 struct bam_transaction *bam_txn = data;
73
74 @@ -644,7 +644,7 @@ static void nandc_write(struct qcom_nand_controller *nandc, int offset,
75 iowrite32(val, nandc->base + offset);
76 }
77
78 -static void nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
79 +static void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
80 {
81 if (!nandc->props->supports_bam)
82 return;
83 @@ -824,9 +824,9 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, i
84 * for BAM. This descriptor will be added in the NAND DMA descriptor queue
85 * which will be submitted to DMA engine.
86 */
87 -static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
88 - struct dma_chan *chan,
89 - unsigned long flags)
90 +static int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
91 + struct dma_chan *chan,
92 + unsigned long flags)
93 {
94 struct desc_info *desc;
95 struct scatterlist *sgl;
96 @@ -903,9 +903,9 @@ static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
97 * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
98 * after the current command element.
99 */
100 -static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
101 - int reg_off, const void *vaddr,
102 - int size, unsigned int flags)
103 +static int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
104 + int reg_off, const void *vaddr,
105 + int size, unsigned int flags)
106 {
107 int bam_ce_size;
108 int i, ret;
109 @@ -943,9 +943,9 @@ static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
110 bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
111
112 if (flags & NAND_BAM_NWD) {
113 - ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
114 - DMA_PREP_FENCE |
115 - DMA_PREP_CMD);
116 + ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
117 + DMA_PREP_FENCE |
118 + DMA_PREP_CMD);
119 if (ret)
120 return ret;
121 }
122 @@ -958,9 +958,8 @@ static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
123 * Prepares the data descriptor for BAM DMA which will be used for NAND
124 * data reads and writes.
125 */
126 -static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
127 - const void *vaddr,
128 - int size, unsigned int flags)
129 +static int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
130 + const void *vaddr, int size, unsigned int flags)
131 {
132 int ret;
133 struct bam_transaction *bam_txn = nandc->bam_txn;
134 @@ -979,8 +978,8 @@ static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
135 * is not set, form the DMA descriptor
136 */
137 if (!(flags & NAND_BAM_NO_EOT)) {
138 - ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
139 - DMA_PREP_INTERRUPT);
140 + ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
141 + DMA_PREP_INTERRUPT);
142 if (ret)
143 return ret;
144 }
145 @@ -989,9 +988,9 @@ static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
146 return 0;
147 }
148
149 -static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
150 - int reg_off, const void *vaddr, int size,
151 - bool flow_control)
152 +static int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
153 + int reg_off, const void *vaddr, int size,
154 + bool flow_control)
155 {
156 struct desc_info *desc;
157 struct dma_async_tx_descriptor *dma_desc;
158 @@ -1069,15 +1068,15 @@ static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
159 }
160
161 /*
162 - * read_reg_dma: prepares a descriptor to read a given number of
163 + * qcom_read_reg_dma: prepares a descriptor to read a given number of
164 * contiguous registers to the reg_read_buf pointer
165 *
166 * @first: offset of the first register in the contiguous block
167 * @num_regs: number of registers to read
168 * @flags: flags to control DMA descriptor preparation
169 */
170 -static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
171 - int num_regs, unsigned int flags)
172 +static int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first,
173 + int num_regs, unsigned int flags)
174 {
175 bool flow_control = false;
176 void *vaddr;
177 @@ -1089,18 +1088,18 @@ static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
178 first = dev_cmd_reg_addr(nandc, first);
179
180 if (nandc->props->supports_bam)
181 - return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
182 + return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
183 num_regs, flags);
184
185 if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
186 flow_control = true;
187
188 - return prep_adm_dma_desc(nandc, true, first, vaddr,
189 + return qcom_prep_adm_dma_desc(nandc, true, first, vaddr,
190 num_regs * sizeof(u32), flow_control);
191 }
192
193 /*
194 - * write_reg_dma: prepares a descriptor to write a given number of
195 + * qcom_write_reg_dma: prepares a descriptor to write a given number of
196 * contiguous registers
197 *
198 * @vaddr: contiguous memory from where register value will
199 @@ -1109,8 +1108,8 @@ static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
200 * @num_regs: number of registers to write
201 * @flags: flags to control DMA descriptor preparation
202 */
203 -static int write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
204 - int first, int num_regs, unsigned int flags)
205 +static int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
206 + int first, int num_regs, unsigned int flags)
207 {
208 bool flow_control = false;
209
210 @@ -1124,18 +1123,18 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
211 first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
212
213 if (nandc->props->supports_bam)
214 - return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
215 + return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
216 num_regs, flags);
217
218 if (first == NAND_FLASH_CMD)
219 flow_control = true;
220
221 - return prep_adm_dma_desc(nandc, false, first, vaddr,
222 + return qcom_prep_adm_dma_desc(nandc, false, first, vaddr,
223 num_regs * sizeof(u32), flow_control);
224 }
225
226 /*
227 - * read_data_dma: prepares a DMA descriptor to transfer data from the
228 + * qcom_read_data_dma: prepares a DMA descriptor to transfer data from the
229 * controller's internal buffer to the buffer 'vaddr'
230 *
231 * @reg_off: offset within the controller's data buffer
232 @@ -1143,17 +1142,17 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
233 * @size: DMA transaction size in bytes
234 * @flags: flags to control DMA descriptor preparation
235 */
236 -static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
237 - const u8 *vaddr, int size, unsigned int flags)
238 +static int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
239 + const u8 *vaddr, int size, unsigned int flags)
240 {
241 if (nandc->props->supports_bam)
242 - return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
243 + return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
244
245 - return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
246 + return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
247 }
248
249 /*
250 - * write_data_dma: prepares a DMA descriptor to transfer data from
251 + * qcom_write_data_dma: prepares a DMA descriptor to transfer data from
252 * 'vaddr' to the controller's internal buffer
253 *
254 * @reg_off: offset within the controller's data buffer
255 @@ -1161,13 +1160,13 @@ static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
256 * @size: DMA transaction size in bytes
257 * @flags: flags to control DMA descriptor preparation
258 */
259 -static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
260 - const u8 *vaddr, int size, unsigned int flags)
261 +static int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
262 + const u8 *vaddr, int size, unsigned int flags)
263 {
264 if (nandc->props->supports_bam)
265 - return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
266 + return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
267
268 - return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
269 + return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
270 }
271
272 /*
273 @@ -1178,14 +1177,14 @@ static void config_nand_page_read(struct nand_chip *chip)
274 {
275 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
276
277 - write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
278 - write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
279 + qcom_write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
280 + qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
281 if (!nandc->props->qpic_version2)
282 - write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
283 - write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr,
284 - NAND_ERASED_CW_DETECT_CFG, 1, 0);
285 - write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set,
286 - NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
287 + qcom_write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
288 + qcom_write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr,
289 + NAND_ERASED_CW_DETECT_CFG, 1, 0);
290 + qcom_write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set,
291 + NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
292 }
293
294 /*
295 @@ -1204,17 +1203,17 @@ config_nand_cw_read(struct nand_chip *chip, bool use_ecc, int cw)
296 reg = &nandc->regs->read_location_last0;
297
298 if (nandc->props->supports_bam)
299 - write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
300 + qcom_write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
301
302 - write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
303 - write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
304 + qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
305 + qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
306
307 if (use_ecc) {
308 - read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
309 - read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
310 - NAND_BAM_NEXT_SGL);
311 + qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
312 + qcom_read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
313 + NAND_BAM_NEXT_SGL);
314 } else {
315 - read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
316 + qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
317 }
318 }
319
320 @@ -1238,11 +1237,11 @@ static void config_nand_page_write(struct nand_chip *chip)
321 {
322 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
323
324 - write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
325 - write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
326 + qcom_write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
327 + qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
328 if (!nandc->props->qpic_version2)
329 - write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1,
330 - NAND_BAM_NEXT_SGL);
331 + qcom_write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1,
332 + NAND_BAM_NEXT_SGL);
333 }
334
335 /*
336 @@ -1253,17 +1252,18 @@ static void config_nand_cw_write(struct nand_chip *chip)
337 {
338 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
339
340 - write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
341 - write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
342 + qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
343 + qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
344
345 - read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
346 + qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
347
348 - write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
349 - write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
350 + qcom_write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
351 + qcom_write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1,
352 + NAND_BAM_NEXT_SGL);
353 }
354
355 /* helpers to submit/free our list of dma descriptors */
356 -static int submit_descs(struct qcom_nand_controller *nandc)
357 +static int qcom_submit_descs(struct qcom_nand_controller *nandc)
358 {
359 struct desc_info *desc, *n;
360 dma_cookie_t cookie = 0;
361 @@ -1272,21 +1272,21 @@ static int submit_descs(struct qcom_nand_controller *nandc)
362
363 if (nandc->props->supports_bam) {
364 if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
365 - ret = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
366 + ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
367 if (ret)
368 goto err_unmap_free_desc;
369 }
370
371 if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
372 - ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
373 - DMA_PREP_INTERRUPT);
374 + ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
375 + DMA_PREP_INTERRUPT);
376 if (ret)
377 goto err_unmap_free_desc;
378 }
379
380 if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
381 - ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
382 - DMA_PREP_CMD);
383 + ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
384 + DMA_PREP_CMD);
385 if (ret)
386 goto err_unmap_free_desc;
387 }
388 @@ -1296,7 +1296,7 @@ static int submit_descs(struct qcom_nand_controller *nandc)
389 cookie = dmaengine_submit(desc->dma_desc);
390
391 if (nandc->props->supports_bam) {
392 - bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
393 + bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done;
394 bam_txn->last_cmd_desc->callback_param = bam_txn;
395
396 dma_async_issue_pending(nandc->tx_chan);
397 @@ -1314,7 +1314,7 @@ static int submit_descs(struct qcom_nand_controller *nandc)
398 err_unmap_free_desc:
399 /*
400 * Unmap the dma sg_list and free the desc allocated by both
401 - * prepare_bam_async_desc() and prep_adm_dma_desc() functions.
402 + * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions.
403 */
404 list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
405 list_del(&desc->node);
406 @@ -1333,10 +1333,10 @@ static int submit_descs(struct qcom_nand_controller *nandc)
407 }
408
409 /* reset the register read buffer for next NAND operation */
410 -static void clear_read_regs(struct qcom_nand_controller *nandc)
411 +static void qcom_clear_read_regs(struct qcom_nand_controller *nandc)
412 {
413 nandc->reg_read_pos = 0;
414 - nandc_dev_to_mem(nandc, false);
415 + qcom_nandc_dev_to_mem(nandc, false);
416 }
417
418 /*
419 @@ -1400,7 +1400,7 @@ static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt)
420 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
421 int i;
422
423 - nandc_dev_to_mem(nandc, true);
424 + qcom_nandc_dev_to_mem(nandc, true);
425
426 for (i = 0; i < cw_cnt; i++) {
427 u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
428 @@ -1427,13 +1427,13 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
429 nand_read_page_op(chip, page, 0, NULL, 0);
430 nandc->buf_count = 0;
431 nandc->buf_start = 0;
432 - clear_read_regs(nandc);
433 + qcom_clear_read_regs(nandc);
434 host->use_ecc = false;
435
436 if (nandc->props->qpic_version2)
437 raw_cw = ecc->steps - 1;
438
439 - clear_bam_transaction(nandc);
440 + qcom_clear_bam_transaction(nandc);
441 set_address(host, host->cw_size * cw, page);
442 update_rw_regs(host, 1, true, raw_cw);
443 config_nand_page_read(chip);
444 @@ -1466,18 +1466,18 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
445
446 config_nand_cw_read(chip, false, raw_cw);
447
448 - read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
449 + qcom_read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
450 reg_off += data_size1;
451
452 - read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
453 + qcom_read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
454 reg_off += oob_size1;
455
456 - read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
457 + qcom_read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
458 reg_off += data_size2;
459
460 - read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
461 + qcom_read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
462
463 - ret = submit_descs(nandc);
464 + ret = qcom_submit_descs(nandc);
465 if (ret) {
466 dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
467 return ret;
468 @@ -1575,7 +1575,7 @@ static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
469 u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
470
471 buf = (struct read_stats *)nandc->reg_read_buf;
472 - nandc_dev_to_mem(nandc, true);
473 + qcom_nandc_dev_to_mem(nandc, true);
474
475 for (i = 0; i < ecc->steps; i++, buf++) {
476 u32 flash, buffer, erased_cw;
477 @@ -1704,8 +1704,8 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
478 config_nand_cw_read(chip, true, i);
479
480 if (data_buf)
481 - read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
482 - data_size, 0);
483 + qcom_read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
484 + data_size, 0);
485
486 /*
487 * when ecc is enabled, the controller doesn't read the real
488 @@ -1720,8 +1720,8 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
489 for (j = 0; j < host->bbm_size; j++)
490 *oob_buf++ = 0xff;
491
492 - read_data_dma(nandc, FLASH_BUF_ACC + data_size,
493 - oob_buf, oob_size, 0);
494 + qcom_read_data_dma(nandc, FLASH_BUF_ACC + data_size,
495 + oob_buf, oob_size, 0);
496 }
497
498 if (data_buf)
499 @@ -1730,7 +1730,7 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
500 oob_buf += oob_size;
501 }
502
503 - ret = submit_descs(nandc);
504 + ret = qcom_submit_descs(nandc);
505 if (ret) {
506 dev_err(nandc->dev, "failure to read page/oob\n");
507 return ret;
508 @@ -1751,7 +1751,7 @@ static int copy_last_cw(struct qcom_nand_host *host, int page)
509 int size;
510 int ret;
511
512 - clear_read_regs(nandc);
513 + qcom_clear_read_regs(nandc);
514
515 size = host->use_ecc ? host->cw_data : host->cw_size;
516
517 @@ -1763,9 +1763,9 @@ static int copy_last_cw(struct qcom_nand_host *host, int page)
518
519 config_nand_single_cw_page_read(chip, host->use_ecc, ecc->steps - 1);
520
521 - read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
522 + qcom_read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
523
524 - ret = submit_descs(nandc);
525 + ret = qcom_submit_descs(nandc);
526 if (ret)
527 dev_err(nandc->dev, "failed to copy last codeword\n");
528
529 @@ -1851,14 +1851,14 @@ static int qcom_nandc_read_page(struct nand_chip *chip, u8 *buf,
530 nandc->buf_count = 0;
531 nandc->buf_start = 0;
532 host->use_ecc = true;
533 - clear_read_regs(nandc);
534 + qcom_clear_read_regs(nandc);
535 set_address(host, 0, page);
536 update_rw_regs(host, ecc->steps, true, 0);
537
538 data_buf = buf;
539 oob_buf = oob_required ? chip->oob_poi : NULL;
540
541 - clear_bam_transaction(nandc);
542 + qcom_clear_bam_transaction(nandc);
543
544 return read_page_ecc(host, data_buf, oob_buf, page);
545 }
546 @@ -1899,8 +1899,8 @@ static int qcom_nandc_read_oob(struct nand_chip *chip, int page)
547 if (host->nr_boot_partitions)
548 qcom_nandc_codeword_fixup(host, page);
549
550 - clear_read_regs(nandc);
551 - clear_bam_transaction(nandc);
552 + qcom_clear_read_regs(nandc);
553 + qcom_clear_bam_transaction(nandc);
554
555 host->use_ecc = true;
556 set_address(host, 0, page);
557 @@ -1927,8 +1927,8 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf,
558 set_address(host, 0, page);
559 nandc->buf_count = 0;
560 nandc->buf_start = 0;
561 - clear_read_regs(nandc);
562 - clear_bam_transaction(nandc);
563 + qcom_clear_read_regs(nandc);
564 + qcom_clear_bam_transaction(nandc);
565
566 data_buf = (u8 *)buf;
567 oob_buf = chip->oob_poi;
568 @@ -1949,8 +1949,8 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf,
569 oob_size = ecc->bytes;
570 }
571
572 - write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
573 - i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
574 + qcom_write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
575 + i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
576
577 /*
578 * when ECC is enabled, we don't really need to write anything
579 @@ -1962,8 +1962,8 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf,
580 if (qcom_nandc_is_last_cw(ecc, i)) {
581 oob_buf += host->bbm_size;
582
583 - write_data_dma(nandc, FLASH_BUF_ACC + data_size,
584 - oob_buf, oob_size, 0);
585 + qcom_write_data_dma(nandc, FLASH_BUF_ACC + data_size,
586 + oob_buf, oob_size, 0);
587 }
588
589 config_nand_cw_write(chip);
590 @@ -1972,7 +1972,7 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf,
591 oob_buf += oob_size;
592 }
593
594 - ret = submit_descs(nandc);
595 + ret = qcom_submit_descs(nandc);
596 if (ret) {
597 dev_err(nandc->dev, "failure to write page\n");
598 return ret;
599 @@ -1997,8 +1997,8 @@ static int qcom_nandc_write_page_raw(struct nand_chip *chip,
600 qcom_nandc_codeword_fixup(host, page);
601
602 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
603 - clear_read_regs(nandc);
604 - clear_bam_transaction(nandc);
605 + qcom_clear_read_regs(nandc);
606 + qcom_clear_bam_transaction(nandc);
607
608 data_buf = (u8 *)buf;
609 oob_buf = chip->oob_poi;
610 @@ -2024,28 +2024,28 @@ static int qcom_nandc_write_page_raw(struct nand_chip *chip,
611 oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
612 }
613
614 - write_data_dma(nandc, reg_off, data_buf, data_size1,
615 - NAND_BAM_NO_EOT);
616 + qcom_write_data_dma(nandc, reg_off, data_buf, data_size1,
617 + NAND_BAM_NO_EOT);
618 reg_off += data_size1;
619 data_buf += data_size1;
620
621 - write_data_dma(nandc, reg_off, oob_buf, oob_size1,
622 - NAND_BAM_NO_EOT);
623 + qcom_write_data_dma(nandc, reg_off, oob_buf, oob_size1,
624 + NAND_BAM_NO_EOT);
625 reg_off += oob_size1;
626 oob_buf += oob_size1;
627
628 - write_data_dma(nandc, reg_off, data_buf, data_size2,
629 - NAND_BAM_NO_EOT);
630 + qcom_write_data_dma(nandc, reg_off, data_buf, data_size2,
631 + NAND_BAM_NO_EOT);
632 reg_off += data_size2;
633 data_buf += data_size2;
634
635 - write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
636 + qcom_write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
637 oob_buf += oob_size2;
638
639 config_nand_cw_write(chip);
640 }
641
642 - ret = submit_descs(nandc);
643 + ret = qcom_submit_descs(nandc);
644 if (ret) {
645 dev_err(nandc->dev, "failure to write raw page\n");
646 return ret;
647 @@ -2075,7 +2075,7 @@ static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
648 qcom_nandc_codeword_fixup(host, page);
649
650 host->use_ecc = true;
651 - clear_bam_transaction(nandc);
652 + qcom_clear_bam_transaction(nandc);
653
654 /* calculate the data and oob size for the last codeword/step */
655 data_size = ecc->size - ((ecc->steps - 1) << 2);
656 @@ -2090,11 +2090,11 @@ static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
657 update_rw_regs(host, 1, false, 0);
658
659 config_nand_page_write(chip);
660 - write_data_dma(nandc, FLASH_BUF_ACC,
661 - nandc->data_buffer, data_size + oob_size, 0);
662 + qcom_write_data_dma(nandc, FLASH_BUF_ACC,
663 + nandc->data_buffer, data_size + oob_size, 0);
664 config_nand_cw_write(chip);
665
666 - ret = submit_descs(nandc);
667 + ret = qcom_submit_descs(nandc);
668 if (ret) {
669 dev_err(nandc->dev, "failure to write oob\n");
670 return ret;
671 @@ -2121,7 +2121,7 @@ static int qcom_nandc_block_bad(struct nand_chip *chip, loff_t ofs)
672 */
673 host->use_ecc = false;
674
675 - clear_bam_transaction(nandc);
676 + qcom_clear_bam_transaction(nandc);
677 ret = copy_last_cw(host, page);
678 if (ret)
679 goto err;
680 @@ -2148,8 +2148,8 @@ static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs)
681 struct nand_ecc_ctrl *ecc = &chip->ecc;
682 int page, ret;
683
684 - clear_read_regs(nandc);
685 - clear_bam_transaction(nandc);
686 + qcom_clear_read_regs(nandc);
687 + qcom_clear_bam_transaction(nandc);
688
689 /*
690 * to mark the BBM as bad, we flash the entire last codeword with 0s.
691 @@ -2166,11 +2166,11 @@ static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs)
692 update_rw_regs(host, 1, false, ecc->steps - 1);
693
694 config_nand_page_write(chip);
695 - write_data_dma(nandc, FLASH_BUF_ACC,
696 - nandc->data_buffer, host->cw_size, 0);
697 + qcom_write_data_dma(nandc, FLASH_BUF_ACC,
698 + nandc->data_buffer, host->cw_size, 0);
699 config_nand_cw_write(chip);
700
701 - ret = submit_descs(nandc);
702 + ret = qcom_submit_descs(nandc);
703 if (ret) {
704 dev_err(nandc->dev, "failure to update BBM\n");
705 return ret;
706 @@ -2410,14 +2410,14 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
707 mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
708 /* Free the initially allocated BAM transaction for reading the ONFI params */
709 if (nandc->props->supports_bam)
710 - free_bam_transaction(nandc);
711 + qcom_free_bam_transaction(nandc);
712
713 nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
714 cwperpage);
715
716 /* Now allocate the BAM transaction based on updated max_cwperpage */
717 if (nandc->props->supports_bam) {
718 - nandc->bam_txn = alloc_bam_transaction(nandc);
719 + nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
720 if (!nandc->bam_txn) {
721 dev_err(nandc->dev,
722 "failed to allocate bam transaction\n");
723 @@ -2617,7 +2617,7 @@ static int qcom_wait_rdy_poll(struct nand_chip *chip, unsigned int time_ms)
724 unsigned long start = jiffies + msecs_to_jiffies(time_ms);
725 u32 flash;
726
727 - nandc_dev_to_mem(nandc, true);
728 + qcom_nandc_dev_to_mem(nandc, true);
729
730 do {
731 flash = le32_to_cpu(nandc->reg_read_buf[0]);
732 @@ -2657,23 +2657,23 @@ static int qcom_read_status_exec(struct nand_chip *chip,
733 nandc->buf_start = 0;
734 host->use_ecc = false;
735
736 - clear_read_regs(nandc);
737 - clear_bam_transaction(nandc);
738 + qcom_clear_read_regs(nandc);
739 + qcom_clear_bam_transaction(nandc);
740
741 nandc->regs->cmd = q_op.cmd_reg;
742 nandc->regs->exec = cpu_to_le32(1);
743
744 - write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
745 - write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
746 - read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
747 + qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
748 + qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
749 + qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
750
751 - ret = submit_descs(nandc);
752 + ret = qcom_submit_descs(nandc);
753 if (ret) {
754 dev_err(nandc->dev, "failure in submitting status descriptor\n");
755 goto err_out;
756 }
757
758 - nandc_dev_to_mem(nandc, true);
759 + qcom_nandc_dev_to_mem(nandc, true);
760
761 for (i = 0; i < num_cw; i++) {
762 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
763 @@ -2714,8 +2714,8 @@ static int qcom_read_id_type_exec(struct nand_chip *chip, const struct nand_subo
764 nandc->buf_start = 0;
765 host->use_ecc = false;
766
767 - clear_read_regs(nandc);
768 - clear_bam_transaction(nandc);
769 + qcom_clear_read_regs(nandc);
770 + qcom_clear_bam_transaction(nandc);
771
772 nandc->regs->cmd = q_op.cmd_reg;
773 nandc->regs->addr0 = q_op.addr1_reg;
774 @@ -2723,12 +2723,12 @@ static int qcom_read_id_type_exec(struct nand_chip *chip, const struct nand_subo
775 nandc->regs->chip_sel = cpu_to_le32(nandc->props->supports_bam ? 0 : DM_EN);
776 nandc->regs->exec = cpu_to_le32(1);
777
778 - write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
779 - write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
780 + qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
781 + qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
782
783 - read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
784 + qcom_read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
785
786 - ret = submit_descs(nandc);
787 + ret = qcom_submit_descs(nandc);
788 if (ret) {
789 dev_err(nandc->dev, "failure in submitting read id descriptor\n");
790 goto err_out;
791 @@ -2738,7 +2738,7 @@ static int qcom_read_id_type_exec(struct nand_chip *chip, const struct nand_subo
792 op_id = q_op.data_instr_idx;
793 len = nand_subop_get_data_len(subop, op_id);
794
795 - nandc_dev_to_mem(nandc, true);
796 + qcom_nandc_dev_to_mem(nandc, true);
797 memcpy(instr->ctx.data.buf.in, nandc->reg_read_buf, len);
798
799 err_out:
800 @@ -2774,20 +2774,20 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub
801 nandc->buf_start = 0;
802 host->use_ecc = false;
803
804 - clear_read_regs(nandc);
805 - clear_bam_transaction(nandc);
806 + qcom_clear_read_regs(nandc);
807 + qcom_clear_bam_transaction(nandc);
808
809 nandc->regs->cmd = q_op.cmd_reg;
810 nandc->regs->exec = cpu_to_le32(1);
811
812 - write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
813 + qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
814 if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE))
815 - write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
816 + qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
817
818 - write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
819 - read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
820 + qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
821 + qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
822
823 - ret = submit_descs(nandc);
824 + ret = qcom_submit_descs(nandc);
825 if (ret) {
826 dev_err(nandc->dev, "failure in submitting misc descriptor\n");
827 goto err_out;
828 @@ -2820,8 +2820,8 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_
829 nandc->buf_count = 0;
830 nandc->buf_start = 0;
831 host->use_ecc = false;
832 - clear_read_regs(nandc);
833 - clear_bam_transaction(nandc);
834 + qcom_clear_read_regs(nandc);
835 + qcom_clear_bam_transaction(nandc);
836
837 nandc->regs->cmd = q_op.cmd_reg;
838 nandc->regs->addr0 = 0;
839 @@ -2864,8 +2864,8 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_
840 nandc_set_read_loc(chip, 0, 0, 0, len, 1);
841
842 if (!nandc->props->qpic_version2) {
843 - write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
844 - write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
845 + qcom_write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
846 + qcom_write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
847 }
848
849 nandc->buf_count = len;
850 @@ -2873,17 +2873,17 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_
851
852 config_nand_single_cw_page_read(chip, false, 0);
853
854 - read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
855 - nandc->buf_count, 0);
856 + qcom_read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
857 + nandc->buf_count, 0);
858
859 /* restore CMD1 and VLD regs */
860 if (!nandc->props->qpic_version2) {
861 - write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0);
862 - write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1,
863 - NAND_BAM_NEXT_SGL);
864 + qcom_write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0);
865 + qcom_write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1,
866 + NAND_BAM_NEXT_SGL);
867 }
868
869 - ret = submit_descs(nandc);
870 + ret = qcom_submit_descs(nandc);
871 if (ret) {
872 dev_err(nandc->dev, "failure in submitting param page descriptor\n");
873 goto err_out;
874 @@ -3067,7 +3067,7 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
875 * maximum codeword size
876 */
877 nandc->max_cwperpage = 1;
878 - nandc->bam_txn = alloc_bam_transaction(nandc);
879 + nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
880 if (!nandc->bam_txn) {
881 dev_err(nandc->dev,
882 "failed to allocate bam transaction\n");
883 --
884 2.47.1
885