--- /dev/null
+From 8c52932da5e6756fa66f52f0720da283fba13aa6 Mon Sep 17 00:00:00 2001
+From: Md Sadre Alam <quic_mdalam@quicinc.com>
+Date: Wed, 20 Nov 2024 14:45:00 +0530
+Subject: [PATCH 1/4] mtd: rawnand: qcom: cleanup qcom_nandc driver
+
+Perform a global cleanup of the Qualcomm NAND
+controller driver with the following improvements:
+
+- Remove register value indirection API
+
+- Remove set_reg() API
+
+- Convert read_loc_first & read_loc_last macro to functions
+
+- Rename multiple variables
+
+Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+---
+ drivers/mtd/nand/raw/qcom_nandc.c | 516 ++++++++++++++----------------
+ 1 file changed, 234 insertions(+), 282 deletions(-)
+
+diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
+index 636bba2528bf..9ae8c9f2ab55 100644
+--- a/drivers/mtd/nand/raw/qcom_nandc.c
++++ b/drivers/mtd/nand/raw/qcom_nandc.c
+@@ -189,17 +189,6 @@
+ #define ECC_BCH_4BIT BIT(2)
+ #define ECC_BCH_8BIT BIT(3)
+
+-#define nandc_set_read_loc_first(chip, reg, cw_offset, read_size, is_last_read_loc) \
+-nandc_set_reg(chip, reg, \
+- ((cw_offset) << READ_LOCATION_OFFSET) | \
+- ((read_size) << READ_LOCATION_SIZE) | \
+- ((is_last_read_loc) << READ_LOCATION_LAST))
+-
+-#define nandc_set_read_loc_last(chip, reg, cw_offset, read_size, is_last_read_loc) \
+-nandc_set_reg(chip, reg, \
+- ((cw_offset) << READ_LOCATION_OFFSET) | \
+- ((read_size) << READ_LOCATION_SIZE) | \
+- ((is_last_read_loc) << READ_LOCATION_LAST))
+ /*
+ * Returns the actual register address for all NAND_DEV_ registers
+ * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
+@@ -257,8 +246,6 @@ nandc_set_reg(chip, reg, \
+ * @tx_sgl_start - start index in data sgl for tx.
+ * @rx_sgl_pos - current index in data sgl for rx.
+ * @rx_sgl_start - start index in data sgl for rx.
+- * @wait_second_completion - wait for second DMA desc completion before making
+- * the NAND transfer completion.
+ */
+ struct bam_transaction {
+ struct bam_cmd_element *bam_ce;
+@@ -275,7 +262,6 @@ struct bam_transaction {
+ u32 tx_sgl_start;
+ u32 rx_sgl_pos;
+ u32 rx_sgl_start;
+- bool wait_second_completion;
+ };
+
+ /*
+@@ -471,9 +457,9 @@ struct qcom_op {
+ unsigned int data_instr_idx;
+ unsigned int rdy_timeout_ms;
+ unsigned int rdy_delay_ns;
+- u32 addr1_reg;
+- u32 addr2_reg;
+- u32 cmd_reg;
++ __le32 addr1_reg;
++ __le32 addr2_reg;
++ __le32 cmd_reg;
+ u8 flag;
+ };
+
+@@ -549,17 +535,17 @@ struct qcom_nand_host {
+ * among different NAND controllers.
+ * @ecc_modes - ecc mode for NAND
+ * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
+- * @is_bam - whether NAND controller is using BAM
+- * @is_qpic - whether NAND CTRL is part of qpic IP
+- * @qpic_v2 - flag to indicate QPIC IP version 2
++ * @supports_bam - whether NAND controller is using Bus Access Manager (BAM)
++ * @nandc_part_of_qpic - whether NAND controller is part of qpic IP
++ * @qpic_version2 - flag to indicate QPIC IP version 2
+ * @use_codeword_fixup - whether NAND has different layout for boot partitions
+ */
+ struct qcom_nandc_props {
+ u32 ecc_modes;
+ u32 dev_cmd_reg_start;
+- bool is_bam;
+- bool is_qpic;
+- bool qpic_v2;
++ bool supports_bam;
++ bool nandc_part_of_qpic;
++ bool qpic_version2;
+ bool use_codeword_fixup;
+ };
+
+@@ -613,19 +599,11 @@ static void clear_bam_transaction(struct qcom_nand_controller *nandc)
+ {
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+
+- if (!nandc->props->is_bam)
++ if (!nandc->props->supports_bam)
+ return;
+
+- bam_txn->bam_ce_pos = 0;
+- bam_txn->bam_ce_start = 0;
+- bam_txn->cmd_sgl_pos = 0;
+- bam_txn->cmd_sgl_start = 0;
+- bam_txn->tx_sgl_pos = 0;
+- bam_txn->tx_sgl_start = 0;
+- bam_txn->rx_sgl_pos = 0;
+- bam_txn->rx_sgl_start = 0;
++ memset(&bam_txn->bam_ce_pos, 0, sizeof(u32) * 8);
+ bam_txn->last_data_desc = NULL;
+- bam_txn->wait_second_completion = false;
+
+ sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
+ QPIC_PER_CW_CMD_SGL);
+@@ -640,46 +618,35 @@ static void qpic_bam_dma_done(void *data)
+ {
+ struct bam_transaction *bam_txn = data;
+
+- /*
+- * In case of data transfer with NAND, 2 callbacks will be generated.
+- * One for command channel and another one for data channel.
+- * If current transaction has data descriptors
+- * (i.e. wait_second_completion is true), then set this to false
+- * and wait for second DMA descriptor completion.
+- */
+- if (bam_txn->wait_second_completion)
+- bam_txn->wait_second_completion = false;
+- else
+- complete(&bam_txn->txn_done);
++ complete(&bam_txn->txn_done);
+ }
+
+-static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
++static struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
+ {
+ return container_of(chip, struct qcom_nand_host, chip);
+ }
+
+-static inline struct qcom_nand_controller *
++static struct qcom_nand_controller *
+ get_qcom_nand_controller(struct nand_chip *chip)
+ {
+ return container_of(chip->controller, struct qcom_nand_controller,
+ controller);
+ }
+
+-static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
++static u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
+ {
+ return ioread32(nandc->base + offset);
+ }
+
+-static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
+- u32 val)
++static void nandc_write(struct qcom_nand_controller *nandc, int offset,
++ u32 val)
+ {
+ iowrite32(val, nandc->base + offset);
+ }
+
+-static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
+- bool is_cpu)
++static void nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
+ {
+- if (!nandc->props->is_bam)
++ if (!nandc->props->supports_bam)
+ return;
+
+ if (is_cpu)
+@@ -694,93 +661,90 @@ static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
+ DMA_FROM_DEVICE);
+ }
+
+-static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
+-{
+- switch (offset) {
+- case NAND_FLASH_CMD:
+- return ®s->cmd;
+- case NAND_ADDR0:
+- return ®s->addr0;
+- case NAND_ADDR1:
+- return ®s->addr1;
+- case NAND_FLASH_CHIP_SELECT:
+- return ®s->chip_sel;
+- case NAND_EXEC_CMD:
+- return ®s->exec;
+- case NAND_FLASH_STATUS:
+- return ®s->clrflashstatus;
+- case NAND_DEV0_CFG0:
+- return ®s->cfg0;
+- case NAND_DEV0_CFG1:
+- return ®s->cfg1;
+- case NAND_DEV0_ECC_CFG:
+- return ®s->ecc_bch_cfg;
+- case NAND_READ_STATUS:
+- return ®s->clrreadstatus;
+- case NAND_DEV_CMD1:
+- return ®s->cmd1;
+- case NAND_DEV_CMD1_RESTORE:
+- return ®s->orig_cmd1;
+- case NAND_DEV_CMD_VLD:
+- return ®s->vld;
+- case NAND_DEV_CMD_VLD_RESTORE:
+- return ®s->orig_vld;
+- case NAND_EBI2_ECC_BUF_CFG:
+- return ®s->ecc_buf_cfg;
+- case NAND_READ_LOCATION_0:
+- return ®s->read_location0;
+- case NAND_READ_LOCATION_1:
+- return ®s->read_location1;
+- case NAND_READ_LOCATION_2:
+- return ®s->read_location2;
+- case NAND_READ_LOCATION_3:
+- return ®s->read_location3;
+- case NAND_READ_LOCATION_LAST_CW_0:
+- return ®s->read_location_last0;
+- case NAND_READ_LOCATION_LAST_CW_1:
+- return ®s->read_location_last1;
+- case NAND_READ_LOCATION_LAST_CW_2:
+- return ®s->read_location_last2;
+- case NAND_READ_LOCATION_LAST_CW_3:
+- return ®s->read_location_last3;
+- default:
+- return NULL;
+- }
++/* Helper to check whether this is the last CW or not */
++static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw)
++{
++ return cw == (ecc->steps - 1);
+ }
+
+-static void nandc_set_reg(struct nand_chip *chip, int offset,
+- u32 val)
++/**
++ * nandc_set_read_loc_first() - to set read location first register
++ * @chip: NAND Private Flash Chip Data
++ * @reg_base: location register base
++ * @cw_offset: code word offset
++ * @read_size: code word read length
++ * @is_last_read_loc: is this the last read location
++ *
++ * This function will set location register value
++ */
++static void nandc_set_read_loc_first(struct nand_chip *chip,
++ int reg_base, u32 cw_offset,
++ u32 read_size, u32 is_last_read_loc)
+ {
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+- struct nandc_regs *regs = nandc->regs;
+- __le32 *reg;
+-
+- reg = offset_to_nandc_reg(regs, offset);
++ __le32 locreg_val;
++ u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
++ ((read_size) << READ_LOCATION_SIZE) |
++ ((is_last_read_loc) << READ_LOCATION_LAST));
++
++ locreg_val = cpu_to_le32(val);
++
++ if (reg_base == NAND_READ_LOCATION_0)
++ nandc->regs->read_location0 = locreg_val;
++ else if (reg_base == NAND_READ_LOCATION_1)
++ nandc->regs->read_location1 = locreg_val;
++ else if (reg_base == NAND_READ_LOCATION_2)
++ nandc->regs->read_location2 = locreg_val;
++ else if (reg_base == NAND_READ_LOCATION_3)
++ nandc->regs->read_location3 = locreg_val;
++}
++
++/**
++ * nandc_set_read_loc_last - to set read location last register
++ * @chip: NAND Private Flash Chip Data
++ * @reg_base: location register base
++ * @cw_offset: code word offset
++ * @read_size: code word read length
++ * @is_last_read_loc: is this the last read location
++ *
++ * This function will set location last register value
++ */
++static void nandc_set_read_loc_last(struct nand_chip *chip,
++ int reg_base, u32 cw_offset,
++ u32 read_size, u32 is_last_read_loc)
++{
++ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
++ __le32 locreg_val;
++ u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
++ ((read_size) << READ_LOCATION_SIZE) |
++ ((is_last_read_loc) << READ_LOCATION_LAST));
+
+- if (reg)
+- *reg = cpu_to_le32(val);
+-}
++ locreg_val = cpu_to_le32(val);
+
+-/* Helper to check the code word, whether it is last cw or not */
+-static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw)
+-{
+- return cw == (ecc->steps - 1);
++ if (reg_base == NAND_READ_LOCATION_LAST_CW_0)
++ nandc->regs->read_location_last0 = locreg_val;
++ else if (reg_base == NAND_READ_LOCATION_LAST_CW_1)
++ nandc->regs->read_location_last1 = locreg_val;
++ else if (reg_base == NAND_READ_LOCATION_LAST_CW_2)
++ nandc->regs->read_location_last2 = locreg_val;
++ else if (reg_base == NAND_READ_LOCATION_LAST_CW_3)
++ nandc->regs->read_location_last3 = locreg_val;
+ }
+
+ /* helper to configure location register values */
+ static void nandc_set_read_loc(struct nand_chip *chip, int cw, int reg,
+- int cw_offset, int read_size, int is_last_read_loc)
++ u32 cw_offset, u32 read_size, u32 is_last_read_loc)
+ {
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int reg_base = NAND_READ_LOCATION_0;
+
+- if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
++ if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
+ reg_base = NAND_READ_LOCATION_LAST_CW_0;
+
+ reg_base += reg * 4;
+
+- if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
++ if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
+ return nandc_set_read_loc_last(chip, reg_base, cw_offset,
+ read_size, is_last_read_loc);
+ else
+@@ -792,12 +756,13 @@ static void nandc_set_read_loc(struct nand_chip *chip, int cw, int reg,
+ static void set_address(struct qcom_nand_host *host, u16 column, int page)
+ {
+ struct nand_chip *chip = &host->chip;
++ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+ if (chip->options & NAND_BUSWIDTH_16)
+ column >>= 1;
+
+- nandc_set_reg(chip, NAND_ADDR0, page << 16 | column);
+- nandc_set_reg(chip, NAND_ADDR1, page >> 16 & 0xff);
++ nandc->regs->addr0 = cpu_to_le32(page << 16 | column);
++ nandc->regs->addr1 = cpu_to_le32(page >> 16 & 0xff);
+ }
+
+ /*
+@@ -811,41 +776,43 @@ static void set_address(struct qcom_nand_host *host, u16 column, int page)
+ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, int cw)
+ {
+ struct nand_chip *chip = &host->chip;
+- u32 cmd, cfg0, cfg1, ecc_bch_cfg;
++ __le32 cmd, cfg0, cfg1, ecc_bch_cfg;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+ if (read) {
+ if (host->use_ecc)
+- cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
++ cmd = cpu_to_le32(OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE);
+ else
+- cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
++ cmd = cpu_to_le32(OP_PAGE_READ | PAGE_ACC | LAST_PAGE);
+ } else {
+- cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
++ cmd = cpu_to_le32(OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE);
+ }
+
+ if (host->use_ecc) {
+- cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
+- (num_cw - 1) << CW_PER_PAGE;
++ cfg0 = cpu_to_le32((host->cfg0 & ~(7U << CW_PER_PAGE)) |
++ (num_cw - 1) << CW_PER_PAGE);
+
+- cfg1 = host->cfg1;
+- ecc_bch_cfg = host->ecc_bch_cfg;
++ cfg1 = cpu_to_le32(host->cfg1);
++ ecc_bch_cfg = cpu_to_le32(host->ecc_bch_cfg);
+ } else {
+- cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
+- (num_cw - 1) << CW_PER_PAGE;
++ cfg0 = cpu_to_le32((host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
++ (num_cw - 1) << CW_PER_PAGE);
+
+- cfg1 = host->cfg1_raw;
+- ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
++ cfg1 = cpu_to_le32(host->cfg1_raw);
++ ecc_bch_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE);
+ }
+
+- nandc_set_reg(chip, NAND_FLASH_CMD, cmd);
+- nandc_set_reg(chip, NAND_DEV0_CFG0, cfg0);
+- nandc_set_reg(chip, NAND_DEV0_CFG1, cfg1);
+- nandc_set_reg(chip, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
+- if (!nandc->props->qpic_v2)
+- nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
+- nandc_set_reg(chip, NAND_FLASH_STATUS, host->clrflashstatus);
+- nandc_set_reg(chip, NAND_READ_STATUS, host->clrreadstatus);
+- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
++ nandc->regs->cmd = cmd;
++ nandc->regs->cfg0 = cfg0;
++ nandc->regs->cfg1 = cfg1;
++ nandc->regs->ecc_bch_cfg = ecc_bch_cfg;
++
++ if (!nandc->props->qpic_version2)
++ nandc->regs->ecc_buf_cfg = cpu_to_le32(host->ecc_buf_cfg);
++
++ nandc->regs->clrflashstatus = cpu_to_le32(host->clrflashstatus);
++ nandc->regs->clrreadstatus = cpu_to_le32(host->clrreadstatus);
++ nandc->regs->exec = cpu_to_le32(1);
+
+ if (read)
+ nandc_set_read_loc(chip, cw, 0, 0, host->use_ecc ?
+@@ -1121,7 +1088,7 @@ static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
+ if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
+ first = dev_cmd_reg_addr(nandc, first);
+
+- if (nandc->props->is_bam)
++ if (nandc->props->supports_bam)
+ return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
+ num_regs, flags);
+
+@@ -1136,25 +1103,16 @@ static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
+ * write_reg_dma: prepares a descriptor to write a given number of
+ * contiguous registers
+ *
++ * @vaddr: contiguous memory from where register value will
++ * be written
+ * @first: offset of the first register in the contiguous block
+ * @num_regs: number of registers to write
+ * @flags: flags to control DMA descriptor preparation
+ */
+-static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
+- int num_regs, unsigned int flags)
++static int write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
++ int first, int num_regs, unsigned int flags)
+ {
+ bool flow_control = false;
+- struct nandc_regs *regs = nandc->regs;
+- void *vaddr;
+-
+- vaddr = offset_to_nandc_reg(regs, first);
+-
+- if (first == NAND_ERASED_CW_DETECT_CFG) {
+- if (flags & NAND_ERASED_CW_SET)
+- vaddr = ®s->erased_cw_detect_cfg_set;
+- else
+- vaddr = ®s->erased_cw_detect_cfg_clr;
+- }
+
+ if (first == NAND_EXEC_CMD)
+ flags |= NAND_BAM_NWD;
+@@ -1165,7 +1123,7 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
+ if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
+ first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
+
+- if (nandc->props->is_bam)
++ if (nandc->props->supports_bam)
+ return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
+ num_regs, flags);
+
+@@ -1188,7 +1146,7 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
+ static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+ const u8 *vaddr, int size, unsigned int flags)
+ {
+- if (nandc->props->is_bam)
++ if (nandc->props->supports_bam)
+ return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
+
+ return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
+@@ -1206,7 +1164,7 @@ static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+ static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+ const u8 *vaddr, int size, unsigned int flags)
+ {
+- if (nandc->props->is_bam)
++ if (nandc->props->supports_bam)
+ return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
+
+ return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
+@@ -1220,13 +1178,14 @@ static void config_nand_page_read(struct nand_chip *chip)
+ {
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+- write_reg_dma(nandc, NAND_ADDR0, 2, 0);
+- write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
+- if (!nandc->props->qpic_v2)
+- write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
+- write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
+- write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
+- NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
++ write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
++ write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
++ if (!nandc->props->qpic_version2)
++ write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
++ write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr,
++ NAND_ERASED_CW_DETECT_CFG, 1, 0);
++ write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set,
++ NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
+ }
+
+ /*
+@@ -1239,16 +1198,16 @@ config_nand_cw_read(struct nand_chip *chip, bool use_ecc, int cw)
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+- int reg = NAND_READ_LOCATION_0;
++ __le32 *reg = &nandc->regs->read_location0;
+
+- if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
+- reg = NAND_READ_LOCATION_LAST_CW_0;
++ if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
++ reg = &nandc->regs->read_location_last0;
+
+- if (nandc->props->is_bam)
+- write_reg_dma(nandc, reg, 4, NAND_BAM_NEXT_SGL);
++ if (nandc->props->supports_bam)
++ write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
+
+- write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++ write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
++ write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+
+ if (use_ecc) {
+ read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
+@@ -1279,10 +1238,10 @@ static void config_nand_page_write(struct nand_chip *chip)
+ {
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+- write_reg_dma(nandc, NAND_ADDR0, 2, 0);
+- write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
+- if (!nandc->props->qpic_v2)
+- write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
++ write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
++ write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
++ if (!nandc->props->qpic_version2)
++ write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1,
+ NAND_BAM_NEXT_SGL);
+ }
+
+@@ -1294,13 +1253,13 @@ static void config_nand_cw_write(struct nand_chip *chip)
+ {
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+- write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++ write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
++ write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+
+ read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+
+- write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
+- write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
++ write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
++ write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
+ }
+
+ /* helpers to submit/free our list of dma descriptors */
+@@ -1311,7 +1270,7 @@ static int submit_descs(struct qcom_nand_controller *nandc)
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+ int ret = 0;
+
+- if (nandc->props->is_bam) {
++ if (nandc->props->supports_bam) {
+ if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
+ ret = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
+ if (ret)
+@@ -1336,14 +1295,9 @@ static int submit_descs(struct qcom_nand_controller *nandc)
+ list_for_each_entry(desc, &nandc->desc_list, node)
+ cookie = dmaengine_submit(desc->dma_desc);
+
+- if (nandc->props->is_bam) {
++ if (nandc->props->supports_bam) {
+ bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
+ bam_txn->last_cmd_desc->callback_param = bam_txn;
+- if (bam_txn->last_data_desc) {
+- bam_txn->last_data_desc->callback = qpic_bam_dma_done;
+- bam_txn->last_data_desc->callback_param = bam_txn;
+- bam_txn->wait_second_completion = true;
+- }
+
+ dma_async_issue_pending(nandc->tx_chan);
+ dma_async_issue_pending(nandc->rx_chan);
+@@ -1365,7 +1319,7 @@ static int submit_descs(struct qcom_nand_controller *nandc)
+ list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
+ list_del(&desc->node);
+
+- if (nandc->props->is_bam)
++ if (nandc->props->supports_bam)
+ dma_unmap_sg(nandc->dev, desc->bam_sgl,
+ desc->sgl_cnt, desc->dir);
+ else
+@@ -1382,7 +1336,7 @@ static int submit_descs(struct qcom_nand_controller *nandc)
+ static void clear_read_regs(struct qcom_nand_controller *nandc)
+ {
+ nandc->reg_read_pos = 0;
+- nandc_read_buffer_sync(nandc, false);
++ nandc_dev_to_mem(nandc, false);
+ }
+
+ /*
+@@ -1446,7 +1400,7 @@ static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt)
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ int i;
+
+- nandc_read_buffer_sync(nandc, true);
++ nandc_dev_to_mem(nandc, true);
+
+ for (i = 0; i < cw_cnt; i++) {
+ u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
+@@ -1476,7 +1430,7 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ clear_read_regs(nandc);
+ host->use_ecc = false;
+
+- if (nandc->props->qpic_v2)
++ if (nandc->props->qpic_version2)
+ raw_cw = ecc->steps - 1;
+
+ clear_bam_transaction(nandc);
+@@ -1497,7 +1451,7 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
+ }
+
+- if (nandc->props->is_bam) {
++ if (nandc->props->supports_bam) {
+ nandc_set_read_loc(chip, cw, 0, read_loc, data_size1, 0);
+ read_loc += data_size1;
+
+@@ -1621,7 +1575,7 @@ static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
+ u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
+
+ buf = (struct read_stats *)nandc->reg_read_buf;
+- nandc_read_buffer_sync(nandc, true);
++ nandc_dev_to_mem(nandc, true);
+
+ for (i = 0; i < ecc->steps; i++, buf++) {
+ u32 flash, buffer, erased_cw;
+@@ -1734,7 +1688,7 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
+ oob_size = host->ecc_bytes_hw + host->spare_bytes;
+ }
+
+- if (nandc->props->is_bam) {
++ if (nandc->props->supports_bam) {
+ if (data_buf && oob_buf) {
+ nandc_set_read_loc(chip, i, 0, 0, data_size, 0);
+ nandc_set_read_loc(chip, i, 1, data_size,
+@@ -2455,14 +2409,14 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
+
+ mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
+ /* Free the initially allocated BAM transaction for reading the ONFI params */
+- if (nandc->props->is_bam)
++ if (nandc->props->supports_bam)
+ free_bam_transaction(nandc);
+
+ nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
+ cwperpage);
+
+ /* Now allocate the BAM transaction based on updated max_cwperpage */
+- if (nandc->props->is_bam) {
++ if (nandc->props->supports_bam) {
+ nandc->bam_txn = alloc_bam_transaction(nandc);
+ if (!nandc->bam_txn) {
+ dev_err(nandc->dev,
+@@ -2522,7 +2476,7 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
+ | ecc_mode << ECC_MODE
+ | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
+
+- if (!nandc->props->qpic_v2)
++ if (!nandc->props->qpic_version2)
+ host->ecc_buf_cfg = 0x203 << NUM_STEPS;
+
+ host->clrflashstatus = FS_READY_BSY_N;
+@@ -2556,7 +2510,7 @@ static int qcom_op_cmd_mapping(struct nand_chip *chip, u8 opcode,
+ cmd = OP_FETCH_ID;
+ break;
+ case NAND_CMD_PARAM:
+- if (nandc->props->qpic_v2)
++ if (nandc->props->qpic_version2)
+ cmd = OP_PAGE_READ_ONFI_READ;
+ else
+ cmd = OP_PAGE_READ;
+@@ -2609,7 +2563,7 @@ static int qcom_parse_instructions(struct nand_chip *chip,
+ if (ret < 0)
+ return ret;
+
+- q_op->cmd_reg = ret;
++ q_op->cmd_reg = cpu_to_le32(ret);
+ q_op->rdy_delay_ns = instr->delay_ns;
+ break;
+
+@@ -2619,10 +2573,10 @@ static int qcom_parse_instructions(struct nand_chip *chip,
+ addrs = &instr->ctx.addr.addrs[offset];
+
+ for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
+- q_op->addr1_reg |= addrs[i] << (i * 8);
++ q_op->addr1_reg |= cpu_to_le32(addrs[i] << (i * 8));
+
+ if (naddrs > 4)
+- q_op->addr2_reg |= addrs[4];
++ q_op->addr2_reg |= cpu_to_le32(addrs[4]);
+
+ q_op->rdy_delay_ns = instr->delay_ns;
+ break;
+@@ -2663,7 +2617,7 @@ static int qcom_wait_rdy_poll(struct nand_chip *chip, unsigned int time_ms)
+ unsigned long start = jiffies + msecs_to_jiffies(time_ms);
+ u32 flash;
+
+- nandc_read_buffer_sync(nandc, true);
++ nandc_dev_to_mem(nandc, true);
+
+ do {
+ flash = le32_to_cpu(nandc->reg_read_buf[0]);
+@@ -2706,11 +2660,11 @@ static int qcom_read_status_exec(struct nand_chip *chip,
+ clear_read_regs(nandc);
+ clear_bam_transaction(nandc);
+
+- nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
+- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
++ nandc->regs->cmd = q_op.cmd_reg;
++ nandc->regs->exec = cpu_to_le32(1);
+
+- write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++ write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
++ write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+
+ ret = submit_descs(nandc);
+@@ -2719,7 +2673,7 @@ static int qcom_read_status_exec(struct nand_chip *chip,
+ goto err_out;
+ }
+
+- nandc_read_buffer_sync(nandc, true);
++ nandc_dev_to_mem(nandc, true);
+
+ for (i = 0; i < num_cw; i++) {
+ flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
+@@ -2763,16 +2717,14 @@ static int qcom_read_id_type_exec(struct nand_chip *chip, const struct nand_subo
+ clear_read_regs(nandc);
+ clear_bam_transaction(nandc);
+
+- nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
+- nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg);
+- nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg);
+- nandc_set_reg(chip, NAND_FLASH_CHIP_SELECT,
+- nandc->props->is_bam ? 0 : DM_EN);
+-
+- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
++ nandc->regs->cmd = q_op.cmd_reg;
++ nandc->regs->addr0 = q_op.addr1_reg;
++ nandc->regs->addr1 = q_op.addr2_reg;
++ nandc->regs->chip_sel = cpu_to_le32(nandc->props->supports_bam ? 0 : DM_EN);
++ nandc->regs->exec = cpu_to_le32(1);
+
+- write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
+- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++ write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
++ write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+
+ read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
+
+@@ -2786,7 +2738,7 @@ static int qcom_read_id_type_exec(struct nand_chip *chip, const struct nand_subo
+ op_id = q_op.data_instr_idx;
+ len = nand_subop_get_data_len(subop, op_id);
+
+- nandc_read_buffer_sync(nandc, true);
++ nandc_dev_to_mem(nandc, true);
+ memcpy(instr->ctx.data.buf.in, nandc->reg_read_buf, len);
+
+ err_out:
+@@ -2807,15 +2759,14 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub
+
+ if (q_op.flag == OP_PROGRAM_PAGE) {
+ goto wait_rdy;
+- } else if (q_op.cmd_reg == OP_BLOCK_ERASE) {
+- q_op.cmd_reg |= PAGE_ACC | LAST_PAGE;
+- nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg);
+- nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg);
+- nandc_set_reg(chip, NAND_DEV0_CFG0,
+- host->cfg0_raw & ~(7 << CW_PER_PAGE));
+- nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw);
++ } else if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE)) {
++ q_op.cmd_reg |= cpu_to_le32(PAGE_ACC | LAST_PAGE);
++ nandc->regs->addr0 = q_op.addr1_reg;
++ nandc->regs->addr1 = q_op.addr2_reg;
++ nandc->regs->cfg0 = cpu_to_le32(host->cfg0_raw & ~(7 << CW_PER_PAGE));
++ nandc->regs->cfg1 = cpu_to_le32(host->cfg1_raw);
+ instrs = 3;
+- } else if (q_op.cmd_reg != OP_RESET_DEVICE) {
++ } else if (q_op.cmd_reg != cpu_to_le32(OP_RESET_DEVICE)) {
+ return 0;
+ }
+
+@@ -2826,14 +2777,14 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub
+ clear_read_regs(nandc);
+ clear_bam_transaction(nandc);
+
+- nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
+- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
++ nandc->regs->cmd = q_op.cmd_reg;
++ nandc->regs->exec = cpu_to_le32(1);
+
+- write_reg_dma(nandc, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
+- if (q_op.cmd_reg == OP_BLOCK_ERASE)
+- write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
++ write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
++ if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE))
++ write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
+
+- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++ write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+
+ ret = submit_descs(nandc);
+@@ -2864,7 +2815,7 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_
+ if (ret)
+ return ret;
+
+- q_op.cmd_reg |= PAGE_ACC | LAST_PAGE;
++ q_op.cmd_reg |= cpu_to_le32(PAGE_ACC | LAST_PAGE);
+
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
+@@ -2872,38 +2823,38 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_
+ clear_read_regs(nandc);
+ clear_bam_transaction(nandc);
+
+- nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
+-
+- nandc_set_reg(chip, NAND_ADDR0, 0);
+- nandc_set_reg(chip, NAND_ADDR1, 0);
+- nandc_set_reg(chip, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
+- | 512 << UD_SIZE_BYTES
+- | 5 << NUM_ADDR_CYCLES
+- | 0 << SPARE_SIZE_BYTES);
+- nandc_set_reg(chip, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
+- | 0 << CS_ACTIVE_BSY
+- | 17 << BAD_BLOCK_BYTE_NUM
+- | 1 << BAD_BLOCK_IN_SPARE_AREA
+- | 2 << WR_RD_BSY_GAP
+- | 0 << WIDE_FLASH
+- | 1 << DEV0_CFG1_ECC_DISABLE);
+- if (!nandc->props->qpic_v2)
+- nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
++ nandc->regs->cmd = q_op.cmd_reg;
++ nandc->regs->addr0 = 0;
++ nandc->regs->addr1 = 0;
++
++ nandc->regs->cfg0 = cpu_to_le32(0 << CW_PER_PAGE |
++ 512 << UD_SIZE_BYTES |
++ 5 << NUM_ADDR_CYCLES |
++ 0 << SPARE_SIZE_BYTES);
++
++ nandc->regs->cfg1 = cpu_to_le32(7 << NAND_RECOVERY_CYCLES |
++ 0 << CS_ACTIVE_BSY |
++ 17 << BAD_BLOCK_BYTE_NUM |
++ 1 << BAD_BLOCK_IN_SPARE_AREA |
++ 2 << WR_RD_BSY_GAP |
++ 0 << WIDE_FLASH |
++ 1 << DEV0_CFG1_ECC_DISABLE);
++
++ if (!nandc->props->qpic_version2)
++ nandc->regs->ecc_buf_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE);
+
+ /* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
+- if (!nandc->props->qpic_v2) {
+- nandc_set_reg(chip, NAND_DEV_CMD_VLD,
+- (nandc->vld & ~READ_START_VLD));
+- nandc_set_reg(chip, NAND_DEV_CMD1,
+- (nandc->cmd1 & ~(0xFF << READ_ADDR))
+- | NAND_CMD_PARAM << READ_ADDR);
++ if (!nandc->props->qpic_version2) {
++ nandc->regs->vld = cpu_to_le32((nandc->vld & ~READ_START_VLD));
++ nandc->regs->cmd1 = cpu_to_le32((nandc->cmd1 & ~(0xFF << READ_ADDR))
++ | NAND_CMD_PARAM << READ_ADDR);
+ }
+
+- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
++ nandc->regs->exec = cpu_to_le32(1);
+
+- if (!nandc->props->qpic_v2) {
+- nandc_set_reg(chip, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
+- nandc_set_reg(chip, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
++ if (!nandc->props->qpic_version2) {
++ nandc->regs->orig_cmd1 = cpu_to_le32(nandc->cmd1);
++ nandc->regs->orig_vld = cpu_to_le32(nandc->vld);
+ }
+
+ instr = q_op.data_instr;
+@@ -2912,9 +2863,9 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_
+
+ nandc_set_read_loc(chip, 0, 0, 0, len, 1);
+
+- if (!nandc->props->qpic_v2) {
+- write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
+- write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
++ if (!nandc->props->qpic_version2) {
++ write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
++ write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
+ }
+
+ nandc->buf_count = len;
+@@ -2926,9 +2877,10 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_
+ nandc->buf_count, 0);
+
+ /* restore CMD1 and VLD regs */
+- if (!nandc->props->qpic_v2) {
+- write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
+- write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
++ if (!nandc->props->qpic_version2) {
++ write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0);
++ write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1,
++ NAND_BAM_NEXT_SGL);
+ }
+
+ ret = submit_descs(nandc);
+@@ -3017,7 +2969,7 @@ static const struct nand_controller_ops qcom_nandc_ops = {
+
+ static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
+ {
+- if (nandc->props->is_bam) {
++ if (nandc->props->supports_bam) {
+ if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
+ dma_unmap_single(nandc->dev, nandc->reg_read_dma,
+ MAX_REG_RD *
+@@ -3070,7 +3022,7 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
+ if (!nandc->reg_read_buf)
+ return -ENOMEM;
+
+- if (nandc->props->is_bam) {
++ if (nandc->props->supports_bam) {
+ nandc->reg_read_dma =
+ dma_map_single(nandc->dev, nandc->reg_read_buf,
+ MAX_REG_RD *
+@@ -3151,15 +3103,15 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
+ u32 nand_ctrl;
+
+ /* kill onenand */
+- if (!nandc->props->is_qpic)
++ if (!nandc->props->nandc_part_of_qpic)
+ nandc_write(nandc, SFLASHC_BURST_CFG, 0);
+
+- if (!nandc->props->qpic_v2)
++ if (!nandc->props->qpic_version2)
+ nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
+ NAND_DEV_CMD_VLD_VAL);
+
+ /* enable ADM or BAM DMA */
+- if (nandc->props->is_bam) {
++ if (nandc->props->supports_bam) {
+ nand_ctrl = nandc_read(nandc, NAND_CTRL);
+
+ /*
+@@ -3176,7 +3128,7 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
+ }
+
+ /* save the original values of these registers */
+- if (!nandc->props->qpic_v2) {
++ if (!nandc->props->qpic_version2) {
+ nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
+ nandc->vld = NAND_DEV_CMD_VLD_VAL;
+ }
+@@ -3349,7 +3301,7 @@ static int qcom_nandc_parse_dt(struct platform_device *pdev)
+ struct device_node *np = nandc->dev->of_node;
+ int ret;
+
+- if (!nandc->props->is_bam) {
++ if (!nandc->props->supports_bam) {
+ ret = of_property_read_u32(np, "qcom,cmd-crci",
+ &nandc->cmd_crci);
+ if (ret) {
+@@ -3474,30 +3426,30 @@ static void qcom_nandc_remove(struct platform_device *pdev)
+
+ static const struct qcom_nandc_props ipq806x_nandc_props = {
+ .ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
+- .is_bam = false,
++ .supports_bam = false,
+ .use_codeword_fixup = true,
+ .dev_cmd_reg_start = 0x0,
+ };
+
+ static const struct qcom_nandc_props ipq4019_nandc_props = {
+ .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
+- .is_bam = true,
+- .is_qpic = true,
++ .supports_bam = true,
++ .nandc_part_of_qpic = true,
+ .dev_cmd_reg_start = 0x0,
+ };
+
+ static const struct qcom_nandc_props ipq8074_nandc_props = {
+ .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
+- .is_bam = true,
+- .is_qpic = true,
++ .supports_bam = true,
++ .nandc_part_of_qpic = true,
+ .dev_cmd_reg_start = 0x7000,
+ };
+
+ static const struct qcom_nandc_props sdx55_nandc_props = {
+ .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
+- .is_bam = true,
+- .is_qpic = true,
+- .qpic_v2 = true,
++ .supports_bam = true,
++ .nandc_part_of_qpic = true,
++ .qpic_version2 = true,
+ .dev_cmd_reg_start = 0x7000,
+ };
+
+--
+2.47.1
+
--- /dev/null
+From 1d479f5b345e0c3650fec4dddeef9fc6fab30c8b Mon Sep 17 00:00:00 2001
+From: Md Sadre Alam <quic_mdalam@quicinc.com>
+Date: Wed, 20 Nov 2024 14:45:01 +0530
+Subject: [PATCH 2/4] mtd: rawnand: qcom: Add qcom prefix to common api
+
+Add qcom prefix to all the api which will be commonly
+used by spi nand driver and raw nand driver.
+
+Reviewed-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
+Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+---
+ drivers/mtd/nand/raw/qcom_nandc.c | 320 +++++++++++++++---------------
+ 1 file changed, 160 insertions(+), 160 deletions(-)
+
+diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
+index 9ae8c9f2ab55..6da5d23d2c8b 100644
+--- a/drivers/mtd/nand/raw/qcom_nandc.c
++++ b/drivers/mtd/nand/raw/qcom_nandc.c
+@@ -53,7 +53,7 @@
+ #define NAND_READ_LOCATION_LAST_CW_2 0xf48
+ #define NAND_READ_LOCATION_LAST_CW_3 0xf4c
+
+-/* dummy register offsets, used by write_reg_dma */
++/* dummy register offsets, used by qcom_write_reg_dma */
+ #define NAND_DEV_CMD1_RESTORE 0xdead
+ #define NAND_DEV_CMD_VLD_RESTORE 0xbeef
+
+@@ -211,7 +211,7 @@
+
+ /*
+ * Flags used in DMA descriptor preparation helper functions
+- * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
++ * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma)
+ */
+ /* Don't set the EOT in current tx BAM sgl */
+ #define NAND_BAM_NO_EOT BIT(0)
+@@ -550,7 +550,7 @@ struct qcom_nandc_props {
+ };
+
+ /* Frees the BAM transaction memory */
+-static void free_bam_transaction(struct qcom_nand_controller *nandc)
++static void qcom_free_bam_transaction(struct qcom_nand_controller *nandc)
+ {
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+
+@@ -559,7 +559,7 @@ static void free_bam_transaction(struct qcom_nand_controller *nandc)
+
+ /* Allocates and Initializes the BAM transaction */
+ static struct bam_transaction *
+-alloc_bam_transaction(struct qcom_nand_controller *nandc)
++qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc)
+ {
+ struct bam_transaction *bam_txn;
+ size_t bam_txn_size;
+@@ -595,7 +595,7 @@ alloc_bam_transaction(struct qcom_nand_controller *nandc)
+ }
+
+ /* Clears the BAM transaction indexes */
+-static void clear_bam_transaction(struct qcom_nand_controller *nandc)
++static void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
+ {
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+
+@@ -614,7 +614,7 @@ static void clear_bam_transaction(struct qcom_nand_controller *nandc)
+ }
+
+ /* Callback for DMA descriptor completion */
+-static void qpic_bam_dma_done(void *data)
++static void qcom_qpic_bam_dma_done(void *data)
+ {
+ struct bam_transaction *bam_txn = data;
+
+@@ -644,7 +644,7 @@ static void nandc_write(struct qcom_nand_controller *nandc, int offset,
+ iowrite32(val, nandc->base + offset);
+ }
+
+-static void nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
++static void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
+ {
+ if (!nandc->props->supports_bam)
+ return;
+@@ -824,9 +824,9 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, i
+ * for BAM. This descriptor will be added in the NAND DMA descriptor queue
+ * which will be submitted to DMA engine.
+ */
+-static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
+- struct dma_chan *chan,
+- unsigned long flags)
++static int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
++ struct dma_chan *chan,
++ unsigned long flags)
+ {
+ struct desc_info *desc;
+ struct scatterlist *sgl;
+@@ -903,9 +903,9 @@ static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
+ * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
+ * after the current command element.
+ */
+-static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
+- int reg_off, const void *vaddr,
+- int size, unsigned int flags)
++static int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
++ int reg_off, const void *vaddr,
++ int size, unsigned int flags)
+ {
+ int bam_ce_size;
+ int i, ret;
+@@ -943,9 +943,9 @@ static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
+ bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
+
+ if (flags & NAND_BAM_NWD) {
+- ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
+- DMA_PREP_FENCE |
+- DMA_PREP_CMD);
++ ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
++ DMA_PREP_FENCE |
++ DMA_PREP_CMD);
+ if (ret)
+ return ret;
+ }
+@@ -958,9 +958,8 @@ static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
+ * Prepares the data descriptor for BAM DMA which will be used for NAND
+ * data reads and writes.
+ */
+-static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
+- const void *vaddr,
+- int size, unsigned int flags)
++static int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
++ const void *vaddr, int size, unsigned int flags)
+ {
+ int ret;
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+@@ -979,8 +978,8 @@ static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
+ * is not set, form the DMA descriptor
+ */
+ if (!(flags & NAND_BAM_NO_EOT)) {
+- ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
+- DMA_PREP_INTERRUPT);
++ ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
++ DMA_PREP_INTERRUPT);
+ if (ret)
+ return ret;
+ }
+@@ -989,9 +988,9 @@ static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
+ return 0;
+ }
+
+-static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
+- int reg_off, const void *vaddr, int size,
+- bool flow_control)
++static int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
++ int reg_off, const void *vaddr, int size,
++ bool flow_control)
+ {
+ struct desc_info *desc;
+ struct dma_async_tx_descriptor *dma_desc;
+@@ -1069,15 +1068,15 @@ static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
+ }
+
+ /*
+- * read_reg_dma: prepares a descriptor to read a given number of
++ * qcom_read_reg_dma: prepares a descriptor to read a given number of
+ * contiguous registers to the reg_read_buf pointer
+ *
+ * @first: offset of the first register in the contiguous block
+ * @num_regs: number of registers to read
+ * @flags: flags to control DMA descriptor preparation
+ */
+-static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
+- int num_regs, unsigned int flags)
++static int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first,
++ int num_regs, unsigned int flags)
+ {
+ bool flow_control = false;
+ void *vaddr;
+@@ -1089,18 +1088,18 @@ static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
+ first = dev_cmd_reg_addr(nandc, first);
+
+ if (nandc->props->supports_bam)
+- return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
++ return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
+ num_regs, flags);
+
+ if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
+ flow_control = true;
+
+- return prep_adm_dma_desc(nandc, true, first, vaddr,
++ return qcom_prep_adm_dma_desc(nandc, true, first, vaddr,
+ num_regs * sizeof(u32), flow_control);
+ }
+
+ /*
+- * write_reg_dma: prepares a descriptor to write a given number of
++ * qcom_write_reg_dma: prepares a descriptor to write a given number of
+ * contiguous registers
+ *
+ * @vaddr: contiguous memory from where register value will
+@@ -1109,8 +1108,8 @@ static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
+ * @num_regs: number of registers to write
+ * @flags: flags to control DMA descriptor preparation
+ */
+-static int write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
+- int first, int num_regs, unsigned int flags)
++static int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
++ int first, int num_regs, unsigned int flags)
+ {
+ bool flow_control = false;
+
+@@ -1124,18 +1123,18 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
+ first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
+
+ if (nandc->props->supports_bam)
+- return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
++ return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
+ num_regs, flags);
+
+ if (first == NAND_FLASH_CMD)
+ flow_control = true;
+
+- return prep_adm_dma_desc(nandc, false, first, vaddr,
++ return qcom_prep_adm_dma_desc(nandc, false, first, vaddr,
+ num_regs * sizeof(u32), flow_control);
+ }
+
+ /*
+- * read_data_dma: prepares a DMA descriptor to transfer data from the
++ * qcom_read_data_dma: prepares a DMA descriptor to transfer data from the
+ * controller's internal buffer to the buffer 'vaddr'
+ *
+ * @reg_off: offset within the controller's data buffer
+@@ -1143,17 +1142,17 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
+ * @size: DMA transaction size in bytes
+ * @flags: flags to control DMA descriptor preparation
+ */
+-static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+- const u8 *vaddr, int size, unsigned int flags)
++static int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
++ const u8 *vaddr, int size, unsigned int flags)
+ {
+ if (nandc->props->supports_bam)
+- return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
++ return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
+
+- return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
++ return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
+ }
+
+ /*
+- * write_data_dma: prepares a DMA descriptor to transfer data from
++ * qcom_write_data_dma: prepares a DMA descriptor to transfer data from
+ * 'vaddr' to the controller's internal buffer
+ *
+ * @reg_off: offset within the controller's data buffer
+@@ -1161,13 +1160,13 @@ static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+ * @size: DMA transaction size in bytes
+ * @flags: flags to control DMA descriptor preparation
+ */
+-static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+- const u8 *vaddr, int size, unsigned int flags)
++static int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
++ const u8 *vaddr, int size, unsigned int flags)
+ {
+ if (nandc->props->supports_bam)
+- return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
++ return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
+
+- return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
++ return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
+ }
+
+ /*
+@@ -1178,14 +1177,14 @@ static void config_nand_page_read(struct nand_chip *chip)
+ {
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+- write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
+- write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
++ qcom_write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
++ qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+ if (!nandc->props->qpic_version2)
+- write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
+- write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr,
+- NAND_ERASED_CW_DETECT_CFG, 1, 0);
+- write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set,
+- NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
++ qcom_write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
++ qcom_write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr,
++ NAND_ERASED_CW_DETECT_CFG, 1, 0);
++ qcom_write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set,
++ NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
+ }
+
+ /*
+@@ -1204,17 +1203,17 @@ config_nand_cw_read(struct nand_chip *chip, bool use_ecc, int cw)
+ reg = &nandc->regs->read_location_last0;
+
+ if (nandc->props->supports_bam)
+- write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
++ qcom_write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
+
+- write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+- write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
++ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+
+ if (use_ecc) {
+- read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
+- read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
+- NAND_BAM_NEXT_SGL);
++ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
++ qcom_read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
++ NAND_BAM_NEXT_SGL);
+ } else {
+- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
++ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ }
+ }
+
+@@ -1238,11 +1237,11 @@ static void config_nand_page_write(struct nand_chip *chip)
+ {
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+- write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
+- write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
++ qcom_write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
++ qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+ if (!nandc->props->qpic_version2)
+- write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1,
+- NAND_BAM_NEXT_SGL);
++ qcom_write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1,
++ NAND_BAM_NEXT_SGL);
+ }
+
+ /*
+@@ -1253,17 +1252,18 @@ static void config_nand_cw_write(struct nand_chip *chip)
+ {
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+- write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+- write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
++ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+
+- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
++ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+
+- write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
+- write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
++ qcom_write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
++ qcom_write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1,
++ NAND_BAM_NEXT_SGL);
+ }
+
+ /* helpers to submit/free our list of dma descriptors */
+-static int submit_descs(struct qcom_nand_controller *nandc)
++static int qcom_submit_descs(struct qcom_nand_controller *nandc)
+ {
+ struct desc_info *desc, *n;
+ dma_cookie_t cookie = 0;
+@@ -1272,21 +1272,21 @@ static int submit_descs(struct qcom_nand_controller *nandc)
+
+ if (nandc->props->supports_bam) {
+ if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
+- ret = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
++ ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
+ if (ret)
+ goto err_unmap_free_desc;
+ }
+
+ if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
+- ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
+- DMA_PREP_INTERRUPT);
++ ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
++ DMA_PREP_INTERRUPT);
+ if (ret)
+ goto err_unmap_free_desc;
+ }
+
+ if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
+- ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
+- DMA_PREP_CMD);
++ ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
++ DMA_PREP_CMD);
+ if (ret)
+ goto err_unmap_free_desc;
+ }
+@@ -1296,7 +1296,7 @@ static int submit_descs(struct qcom_nand_controller *nandc)
+ cookie = dmaengine_submit(desc->dma_desc);
+
+ if (nandc->props->supports_bam) {
+- bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
++ bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done;
+ bam_txn->last_cmd_desc->callback_param = bam_txn;
+
+ dma_async_issue_pending(nandc->tx_chan);
+@@ -1314,7 +1314,7 @@ static int submit_descs(struct qcom_nand_controller *nandc)
+ err_unmap_free_desc:
+ /*
+ * Unmap the dma sg_list and free the desc allocated by both
+- * prepare_bam_async_desc() and prep_adm_dma_desc() functions.
++ * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions.
+ */
+ list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
+ list_del(&desc->node);
+@@ -1333,10 +1333,10 @@ static int submit_descs(struct qcom_nand_controller *nandc)
+ }
+
+ /* reset the register read buffer for next NAND operation */
+-static void clear_read_regs(struct qcom_nand_controller *nandc)
++static void qcom_clear_read_regs(struct qcom_nand_controller *nandc)
+ {
+ nandc->reg_read_pos = 0;
+- nandc_dev_to_mem(nandc, false);
++ qcom_nandc_dev_to_mem(nandc, false);
+ }
+
+ /*
+@@ -1400,7 +1400,7 @@ static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt)
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ int i;
+
+- nandc_dev_to_mem(nandc, true);
++ qcom_nandc_dev_to_mem(nandc, true);
+
+ for (i = 0; i < cw_cnt; i++) {
+ u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
+@@ -1427,13 +1427,13 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ nand_read_page_op(chip, page, 0, NULL, 0);
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
+- clear_read_regs(nandc);
++ qcom_clear_read_regs(nandc);
+ host->use_ecc = false;
+
+ if (nandc->props->qpic_version2)
+ raw_cw = ecc->steps - 1;
+
+- clear_bam_transaction(nandc);
++ qcom_clear_bam_transaction(nandc);
+ set_address(host, host->cw_size * cw, page);
+ update_rw_regs(host, 1, true, raw_cw);
+ config_nand_page_read(chip);
+@@ -1466,18 +1466,18 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
+
+ config_nand_cw_read(chip, false, raw_cw);
+
+- read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
++ qcom_read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
+ reg_off += data_size1;
+
+- read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
++ qcom_read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
+ reg_off += oob_size1;
+
+- read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
++ qcom_read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
+ reg_off += data_size2;
+
+- read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
++ qcom_read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
+
+- ret = submit_descs(nandc);
++ ret = qcom_submit_descs(nandc);
+ if (ret) {
+ dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
+ return ret;
+@@ -1575,7 +1575,7 @@ static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
+ u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
+
+ buf = (struct read_stats *)nandc->reg_read_buf;
+- nandc_dev_to_mem(nandc, true);
++ qcom_nandc_dev_to_mem(nandc, true);
+
+ for (i = 0; i < ecc->steps; i++, buf++) {
+ u32 flash, buffer, erased_cw;
+@@ -1704,8 +1704,8 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
+ config_nand_cw_read(chip, true, i);
+
+ if (data_buf)
+- read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
+- data_size, 0);
++ qcom_read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
++ data_size, 0);
+
+ /*
+ * when ecc is enabled, the controller doesn't read the real
+@@ -1720,8 +1720,8 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
+ for (j = 0; j < host->bbm_size; j++)
+ *oob_buf++ = 0xff;
+
+- read_data_dma(nandc, FLASH_BUF_ACC + data_size,
+- oob_buf, oob_size, 0);
++ qcom_read_data_dma(nandc, FLASH_BUF_ACC + data_size,
++ oob_buf, oob_size, 0);
+ }
+
+ if (data_buf)
+@@ -1730,7 +1730,7 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
+ oob_buf += oob_size;
+ }
+
+- ret = submit_descs(nandc);
++ ret = qcom_submit_descs(nandc);
+ if (ret) {
+ dev_err(nandc->dev, "failure to read page/oob\n");
+ return ret;
+@@ -1751,7 +1751,7 @@ static int copy_last_cw(struct qcom_nand_host *host, int page)
+ int size;
+ int ret;
+
+- clear_read_regs(nandc);
++ qcom_clear_read_regs(nandc);
+
+ size = host->use_ecc ? host->cw_data : host->cw_size;
+
+@@ -1763,9 +1763,9 @@ static int copy_last_cw(struct qcom_nand_host *host, int page)
+
+ config_nand_single_cw_page_read(chip, host->use_ecc, ecc->steps - 1);
+
+- read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
++ qcom_read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
+
+- ret = submit_descs(nandc);
++ ret = qcom_submit_descs(nandc);
+ if (ret)
+ dev_err(nandc->dev, "failed to copy last codeword\n");
+
+@@ -1851,14 +1851,14 @@ static int qcom_nandc_read_page(struct nand_chip *chip, u8 *buf,
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
+ host->use_ecc = true;
+- clear_read_regs(nandc);
++ qcom_clear_read_regs(nandc);
+ set_address(host, 0, page);
+ update_rw_regs(host, ecc->steps, true, 0);
+
+ data_buf = buf;
+ oob_buf = oob_required ? chip->oob_poi : NULL;
+
+- clear_bam_transaction(nandc);
++ qcom_clear_bam_transaction(nandc);
+
+ return read_page_ecc(host, data_buf, oob_buf, page);
+ }
+@@ -1899,8 +1899,8 @@ static int qcom_nandc_read_oob(struct nand_chip *chip, int page)
+ if (host->nr_boot_partitions)
+ qcom_nandc_codeword_fixup(host, page);
+
+- clear_read_regs(nandc);
+- clear_bam_transaction(nandc);
++ qcom_clear_read_regs(nandc);
++ qcom_clear_bam_transaction(nandc);
+
+ host->use_ecc = true;
+ set_address(host, 0, page);
+@@ -1927,8 +1927,8 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf,
+ set_address(host, 0, page);
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
+- clear_read_regs(nandc);
+- clear_bam_transaction(nandc);
++ qcom_clear_read_regs(nandc);
++ qcom_clear_bam_transaction(nandc);
+
+ data_buf = (u8 *)buf;
+ oob_buf = chip->oob_poi;
+@@ -1949,8 +1949,8 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf,
+ oob_size = ecc->bytes;
+ }
+
+- write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
+- i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
++ qcom_write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
++ i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
+
+ /*
+ * when ECC is enabled, we don't really need to write anything
+@@ -1962,8 +1962,8 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf,
+ if (qcom_nandc_is_last_cw(ecc, i)) {
+ oob_buf += host->bbm_size;
+
+- write_data_dma(nandc, FLASH_BUF_ACC + data_size,
+- oob_buf, oob_size, 0);
++ qcom_write_data_dma(nandc, FLASH_BUF_ACC + data_size,
++ oob_buf, oob_size, 0);
+ }
+
+ config_nand_cw_write(chip);
+@@ -1972,7 +1972,7 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf,
+ oob_buf += oob_size;
+ }
+
+- ret = submit_descs(nandc);
++ ret = qcom_submit_descs(nandc);
+ if (ret) {
+ dev_err(nandc->dev, "failure to write page\n");
+ return ret;
+@@ -1997,8 +1997,8 @@ static int qcom_nandc_write_page_raw(struct nand_chip *chip,
+ qcom_nandc_codeword_fixup(host, page);
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+- clear_read_regs(nandc);
+- clear_bam_transaction(nandc);
++ qcom_clear_read_regs(nandc);
++ qcom_clear_bam_transaction(nandc);
+
+ data_buf = (u8 *)buf;
+ oob_buf = chip->oob_poi;
+@@ -2024,28 +2024,28 @@ static int qcom_nandc_write_page_raw(struct nand_chip *chip,
+ oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
+ }
+
+- write_data_dma(nandc, reg_off, data_buf, data_size1,
+- NAND_BAM_NO_EOT);
++ qcom_write_data_dma(nandc, reg_off, data_buf, data_size1,
++ NAND_BAM_NO_EOT);
+ reg_off += data_size1;
+ data_buf += data_size1;
+
+- write_data_dma(nandc, reg_off, oob_buf, oob_size1,
+- NAND_BAM_NO_EOT);
++ qcom_write_data_dma(nandc, reg_off, oob_buf, oob_size1,
++ NAND_BAM_NO_EOT);
+ reg_off += oob_size1;
+ oob_buf += oob_size1;
+
+- write_data_dma(nandc, reg_off, data_buf, data_size2,
+- NAND_BAM_NO_EOT);
++ qcom_write_data_dma(nandc, reg_off, data_buf, data_size2,
++ NAND_BAM_NO_EOT);
+ reg_off += data_size2;
+ data_buf += data_size2;
+
+- write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
++ qcom_write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
+ oob_buf += oob_size2;
+
+ config_nand_cw_write(chip);
+ }
+
+- ret = submit_descs(nandc);
++ ret = qcom_submit_descs(nandc);
+ if (ret) {
+ dev_err(nandc->dev, "failure to write raw page\n");
+ return ret;
+@@ -2075,7 +2075,7 @@ static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
+ qcom_nandc_codeword_fixup(host, page);
+
+ host->use_ecc = true;
+- clear_bam_transaction(nandc);
++ qcom_clear_bam_transaction(nandc);
+
+ /* calculate the data and oob size for the last codeword/step */
+ data_size = ecc->size - ((ecc->steps - 1) << 2);
+@@ -2090,11 +2090,11 @@ static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
+ update_rw_regs(host, 1, false, 0);
+
+ config_nand_page_write(chip);
+- write_data_dma(nandc, FLASH_BUF_ACC,
+- nandc->data_buffer, data_size + oob_size, 0);
++ qcom_write_data_dma(nandc, FLASH_BUF_ACC,
++ nandc->data_buffer, data_size + oob_size, 0);
+ config_nand_cw_write(chip);
+
+- ret = submit_descs(nandc);
++ ret = qcom_submit_descs(nandc);
+ if (ret) {
+ dev_err(nandc->dev, "failure to write oob\n");
+ return ret;
+@@ -2121,7 +2121,7 @@ static int qcom_nandc_block_bad(struct nand_chip *chip, loff_t ofs)
+ */
+ host->use_ecc = false;
+
+- clear_bam_transaction(nandc);
++ qcom_clear_bam_transaction(nandc);
+ ret = copy_last_cw(host, page);
+ if (ret)
+ goto err;
+@@ -2148,8 +2148,8 @@ static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs)
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int page, ret;
+
+- clear_read_regs(nandc);
+- clear_bam_transaction(nandc);
++ qcom_clear_read_regs(nandc);
++ qcom_clear_bam_transaction(nandc);
+
+ /*
+ * to mark the BBM as bad, we flash the entire last codeword with 0s.
+@@ -2166,11 +2166,11 @@ static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs)
+ update_rw_regs(host, 1, false, ecc->steps - 1);
+
+ config_nand_page_write(chip);
+- write_data_dma(nandc, FLASH_BUF_ACC,
+- nandc->data_buffer, host->cw_size, 0);
++ qcom_write_data_dma(nandc, FLASH_BUF_ACC,
++ nandc->data_buffer, host->cw_size, 0);
+ config_nand_cw_write(chip);
+
+- ret = submit_descs(nandc);
++ ret = qcom_submit_descs(nandc);
+ if (ret) {
+ dev_err(nandc->dev, "failure to update BBM\n");
+ return ret;
+@@ -2410,14 +2410,14 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
+ mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
+ /* Free the initially allocated BAM transaction for reading the ONFI params */
+ if (nandc->props->supports_bam)
+- free_bam_transaction(nandc);
++ qcom_free_bam_transaction(nandc);
+
+ nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
+ cwperpage);
+
+ /* Now allocate the BAM transaction based on updated max_cwperpage */
+ if (nandc->props->supports_bam) {
+- nandc->bam_txn = alloc_bam_transaction(nandc);
++ nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
+ if (!nandc->bam_txn) {
+ dev_err(nandc->dev,
+ "failed to allocate bam transaction\n");
+@@ -2617,7 +2617,7 @@ static int qcom_wait_rdy_poll(struct nand_chip *chip, unsigned int time_ms)
+ unsigned long start = jiffies + msecs_to_jiffies(time_ms);
+ u32 flash;
+
+- nandc_dev_to_mem(nandc, true);
++ qcom_nandc_dev_to_mem(nandc, true);
+
+ do {
+ flash = le32_to_cpu(nandc->reg_read_buf[0]);
+@@ -2657,23 +2657,23 @@ static int qcom_read_status_exec(struct nand_chip *chip,
+ nandc->buf_start = 0;
+ host->use_ecc = false;
+
+- clear_read_regs(nandc);
+- clear_bam_transaction(nandc);
++ qcom_clear_read_regs(nandc);
++ qcom_clear_bam_transaction(nandc);
+
+ nandc->regs->cmd = q_op.cmd_reg;
+ nandc->regs->exec = cpu_to_le32(1);
+
+- write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+- write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
++ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
++ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+
+- ret = submit_descs(nandc);
++ ret = qcom_submit_descs(nandc);
+ if (ret) {
+ dev_err(nandc->dev, "failure in submitting status descriptor\n");
+ goto err_out;
+ }
+
+- nandc_dev_to_mem(nandc, true);
++ qcom_nandc_dev_to_mem(nandc, true);
+
+ for (i = 0; i < num_cw; i++) {
+ flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
+@@ -2714,8 +2714,8 @@ static int qcom_read_id_type_exec(struct nand_chip *chip, const struct nand_subo
+ nandc->buf_start = 0;
+ host->use_ecc = false;
+
+- clear_read_regs(nandc);
+- clear_bam_transaction(nandc);
++ qcom_clear_read_regs(nandc);
++ qcom_clear_bam_transaction(nandc);
+
+ nandc->regs->cmd = q_op.cmd_reg;
+ nandc->regs->addr0 = q_op.addr1_reg;
+@@ -2723,12 +2723,12 @@ static int qcom_read_id_type_exec(struct nand_chip *chip, const struct nand_subo
+ nandc->regs->chip_sel = cpu_to_le32(nandc->props->supports_bam ? 0 : DM_EN);
+ nandc->regs->exec = cpu_to_le32(1);
+
+- write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
+- write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
++ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+
+- read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
++ qcom_read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
+
+- ret = submit_descs(nandc);
++ ret = qcom_submit_descs(nandc);
+ if (ret) {
+ dev_err(nandc->dev, "failure in submitting read id descriptor\n");
+ goto err_out;
+@@ -2738,7 +2738,7 @@ static int qcom_read_id_type_exec(struct nand_chip *chip, const struct nand_subo
+ op_id = q_op.data_instr_idx;
+ len = nand_subop_get_data_len(subop, op_id);
+
+- nandc_dev_to_mem(nandc, true);
++ qcom_nandc_dev_to_mem(nandc, true);
+ memcpy(instr->ctx.data.buf.in, nandc->reg_read_buf, len);
+
+ err_out:
+@@ -2774,20 +2774,20 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub
+ nandc->buf_start = 0;
+ host->use_ecc = false;
+
+- clear_read_regs(nandc);
+- clear_bam_transaction(nandc);
++ qcom_clear_read_regs(nandc);
++ qcom_clear_bam_transaction(nandc);
+
+ nandc->regs->cmd = q_op.cmd_reg;
+ nandc->regs->exec = cpu_to_le32(1);
+
+- write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
++ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
+ if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE))
+- write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
++ qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
+
+- write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
++ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+
+- ret = submit_descs(nandc);
++ ret = qcom_submit_descs(nandc);
+ if (ret) {
+ dev_err(nandc->dev, "failure in submitting misc descriptor\n");
+ goto err_out;
+@@ -2820,8 +2820,8 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
+ host->use_ecc = false;
+- clear_read_regs(nandc);
+- clear_bam_transaction(nandc);
++ qcom_clear_read_regs(nandc);
++ qcom_clear_bam_transaction(nandc);
+
+ nandc->regs->cmd = q_op.cmd_reg;
+ nandc->regs->addr0 = 0;
+@@ -2864,8 +2864,8 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_
+ nandc_set_read_loc(chip, 0, 0, 0, len, 1);
+
+ if (!nandc->props->qpic_version2) {
+- write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
+- write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
++ qcom_write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
++ qcom_write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
+ }
+
+ nandc->buf_count = len;
+@@ -2873,17 +2873,17 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_
+
+ config_nand_single_cw_page_read(chip, false, 0);
+
+- read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
+- nandc->buf_count, 0);
++ qcom_read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
++ nandc->buf_count, 0);
+
+ /* restore CMD1 and VLD regs */
+ if (!nandc->props->qpic_version2) {
+- write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0);
+- write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1,
+- NAND_BAM_NEXT_SGL);
++ qcom_write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0);
++ qcom_write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1,
++ NAND_BAM_NEXT_SGL);
+ }
+
+- ret = submit_descs(nandc);
++ ret = qcom_submit_descs(nandc);
+ if (ret) {
+ dev_err(nandc->dev, "failure in submitting param page descriptor\n");
+ goto err_out;
+@@ -3067,7 +3067,7 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
+ * maximum codeword size
+ */
+ nandc->max_cwperpage = 1;
+- nandc->bam_txn = alloc_bam_transaction(nandc);
++ nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
+ if (!nandc->bam_txn) {
+ dev_err(nandc->dev,
+ "failed to allocate bam transaction\n");
+--
+2.47.1
+
--- /dev/null
+From fdf3ee5c6e5278dab4f60b998b47ed2d510bf80f Mon Sep 17 00:00:00 2001
+From: Md Sadre Alam <quic_mdalam@quicinc.com>
+Date: Wed, 20 Nov 2024 14:45:02 +0530
+Subject: [PATCH 3/4] mtd: nand: Add qpic_common API file
+
+Add qpic_common.c file which hold all the common
+qpic APIs which will be used by both qpic raw nand
+driver and qpic spi nand driver.
+
+Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+---
+ drivers/mtd/nand/Makefile | 2 +-
+ drivers/mtd/nand/qpic_common.c | 759 ++++++++++++++++++
+ drivers/mtd/nand/raw/qcom_nandc.c | 1092 +-------------------------
+ include/linux/mtd/nand-qpic-common.h | 468 +++++++++++
+ 4 files changed, 1240 insertions(+), 1081 deletions(-)
+ create mode 100644 drivers/mtd/nand/qpic_common.c
+ create mode 100644 include/linux/mtd/nand-qpic-common.h
+
+diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
+index 19e1291ac4d5..da1586a36574 100644
+--- a/drivers/mtd/nand/Makefile
++++ b/drivers/mtd/nand/Makefile
+@@ -3,7 +3,7 @@
+ obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o
+ obj-$(CONFIG_MTD_NAND_ECC_MEDIATEK) += ecc-mtk.o
+ obj-$(CONFIG_MTD_NAND_MTK_BMT) += mtk_bmt.o mtk_bmt_v2.o mtk_bmt_bbt.o mtk_bmt_nmbm.o
+-
++obj-$(CONFIG_MTD_NAND_QCOM) += qpic_common.o
+ obj-y += onenand/
+ obj-y += raw/
+ obj-y += spi/
+diff --git a/drivers/mtd/nand/qpic_common.c b/drivers/mtd/nand/qpic_common.c
+new file mode 100644
+index 000000000000..8abbb960a7ce
+--- /dev/null
++++ b/drivers/mtd/nand/qpic_common.c
+@@ -0,0 +1,759 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved
++ */
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/dmaengine.h>
++#include <linux/dma-mapping.h>
++#include <linux/dma/qcom_adm.h>
++#include <linux/dma/qcom_bam_dma.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/mtd/nand-qpic-common.h>
++
++/**
++ * qcom_free_bam_transaction() - Frees the BAM transaction memory
++ * @nandc: qpic nand controller
++ *
++ * This function frees the bam transaction memory
++ */
++void qcom_free_bam_transaction(struct qcom_nand_controller *nandc)
++{
++ struct bam_transaction *bam_txn = nandc->bam_txn;
++
++ kfree(bam_txn);
++}
++EXPORT_SYMBOL(qcom_free_bam_transaction);
++
++/**
++ * qcom_alloc_bam_transaction() - allocate BAM transaction
++ * @nandc: qpic nand controller
++ *
++ * This function will allocate and initialize the BAM transaction structure
++ */
++struct bam_transaction *
++qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc)
++{
++ struct bam_transaction *bam_txn;
++ size_t bam_txn_size;
++ unsigned int num_cw = nandc->max_cwperpage;
++ void *bam_txn_buf;
++
++ bam_txn_size =
++ sizeof(*bam_txn) + num_cw *
++ ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
++ (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
++ (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
++
++ bam_txn_buf = kzalloc(bam_txn_size, GFP_KERNEL);
++ if (!bam_txn_buf)
++ return NULL;
++
++ bam_txn = bam_txn_buf;
++ bam_txn_buf += sizeof(*bam_txn);
++
++ bam_txn->bam_ce = bam_txn_buf;
++ bam_txn_buf +=
++ sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
++
++ bam_txn->cmd_sgl = bam_txn_buf;
++ bam_txn_buf +=
++ sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
++
++ bam_txn->data_sgl = bam_txn_buf;
++
++ init_completion(&bam_txn->txn_done);
++
++ return bam_txn;
++}
++EXPORT_SYMBOL(qcom_alloc_bam_transaction);
++
++/**
++ * qcom_clear_bam_transaction() - Clears the BAM transaction
++ * @nandc: qpic nand controller
++ *
++ * This function will clear the BAM transaction indexes.
++ */
++void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
++{
++ struct bam_transaction *bam_txn = nandc->bam_txn;
++
++ if (!nandc->props->supports_bam)
++ return;
++
++ memset(&bam_txn->bam_ce_pos, 0, sizeof(u32) * 8);
++ bam_txn->last_data_desc = NULL;
++
++ sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
++ QPIC_PER_CW_CMD_SGL);
++ sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
++ QPIC_PER_CW_DATA_SGL);
++
++ reinit_completion(&bam_txn->txn_done);
++}
++EXPORT_SYMBOL(qcom_clear_bam_transaction);
++
++/**
++ * qcom_qpic_bam_dma_done() - Callback for DMA descriptor completion
++ * @data: data pointer
++ *
++ * This function is a callback for DMA descriptor completion
++ */
++void qcom_qpic_bam_dma_done(void *data)
++{
++ struct bam_transaction *bam_txn = data;
++
++ complete(&bam_txn->txn_done);
++}
++EXPORT_SYMBOL(qcom_qpic_bam_dma_done);
++
++/**
++ * qcom_nandc_dev_to_mem() - Check for dma sync for cpu or device
++ * @nandc: qpic nand controller
++ * @is_cpu: cpu or Device
++ *
++ * This function will check for dma sync for cpu or device
++ */
++inline void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
++{
++ if (!nandc->props->supports_bam)
++ return;
++
++ if (is_cpu)
++ dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
++ MAX_REG_RD *
++ sizeof(*nandc->reg_read_buf),
++ DMA_FROM_DEVICE);
++ else
++ dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
++ MAX_REG_RD *
++ sizeof(*nandc->reg_read_buf),
++ DMA_FROM_DEVICE);
++}
++EXPORT_SYMBOL(qcom_nandc_dev_to_mem);
++
++/**
++ * qcom_prepare_bam_async_desc() - Prepare DMA descriptor
++ * @nandc: qpic nand controller
++ * @chan: dma channel
++ * @flags: flags to control DMA descriptor preparation
++ *
++ * This function maps the scatter gather list for DMA transfer and forms the
++ * DMA descriptor for BAM.This descriptor will be added in the NAND DMA
++ * descriptor queue which will be submitted to DMA engine.
++ */
++int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
++ struct dma_chan *chan, unsigned long flags)
++{
++ struct desc_info *desc;
++ struct scatterlist *sgl;
++ unsigned int sgl_cnt;
++ int ret;
++ struct bam_transaction *bam_txn = nandc->bam_txn;
++ enum dma_transfer_direction dir_eng;
++ struct dma_async_tx_descriptor *dma_desc;
++
++ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
++ if (!desc)
++ return -ENOMEM;
++
++ if (chan == nandc->cmd_chan) {
++ sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
++ sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
++ bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
++ dir_eng = DMA_MEM_TO_DEV;
++ desc->dir = DMA_TO_DEVICE;
++ } else if (chan == nandc->tx_chan) {
++ sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
++ sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
++ bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
++ dir_eng = DMA_MEM_TO_DEV;
++ desc->dir = DMA_TO_DEVICE;
++ } else {
++ sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
++ sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
++ bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
++ dir_eng = DMA_DEV_TO_MEM;
++ desc->dir = DMA_FROM_DEVICE;
++ }
++
++ sg_mark_end(sgl + sgl_cnt - 1);
++ ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
++ if (ret == 0) {
++ dev_err(nandc->dev, "failure in mapping desc\n");
++ kfree(desc);
++ return -ENOMEM;
++ }
++
++ desc->sgl_cnt = sgl_cnt;
++ desc->bam_sgl = sgl;
++
++ dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
++ flags);
++
++ if (!dma_desc) {
++ dev_err(nandc->dev, "failure in prep desc\n");
++ dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
++ kfree(desc);
++ return -EINVAL;
++ }
++
++ desc->dma_desc = dma_desc;
++
++ /* update last data/command descriptor */
++ if (chan == nandc->cmd_chan)
++ bam_txn->last_cmd_desc = dma_desc;
++ else
++ bam_txn->last_data_desc = dma_desc;
++
++ list_add_tail(&desc->node, &nandc->desc_list);
++
++ return 0;
++}
++EXPORT_SYMBOL(qcom_prepare_bam_async_desc);
++
++/**
++ * qcom_prep_bam_dma_desc_cmd() - Prepares the command descriptor for BAM DMA
++ * @nandc: qpic nand controller
++ * @read: read or write type
++ * @reg_off: offset within the controller's data buffer
++ * @vaddr: virtual address of the buffer we want to write to
++ * @size: DMA transaction size in bytes
++ * @flags: flags to control DMA descriptor preparation
++ *
++ * This function will prepares the command descriptor for BAM DMA
++ * which will be used for NAND register reads and writes.
++ */
++int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
++ int reg_off, const void *vaddr,
++ int size, unsigned int flags)
++{
++ int bam_ce_size;
++ int i, ret;
++ struct bam_cmd_element *bam_ce_buffer;
++ struct bam_transaction *bam_txn = nandc->bam_txn;
++
++ bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
++
++ /* fill the command desc */
++ for (i = 0; i < size; i++) {
++ if (read)
++ bam_prep_ce(&bam_ce_buffer[i],
++ nandc_reg_phys(nandc, reg_off + 4 * i),
++ BAM_READ_COMMAND,
++ reg_buf_dma_addr(nandc,
++ (__le32 *)vaddr + i));
++ else
++ bam_prep_ce_le32(&bam_ce_buffer[i],
++ nandc_reg_phys(nandc, reg_off + 4 * i),
++ BAM_WRITE_COMMAND,
++ *((__le32 *)vaddr + i));
++ }
++
++ bam_txn->bam_ce_pos += size;
++
++ /* use the separate sgl after this command */
++ if (flags & NAND_BAM_NEXT_SGL) {
++ bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
++ bam_ce_size = (bam_txn->bam_ce_pos -
++ bam_txn->bam_ce_start) *
++ sizeof(struct bam_cmd_element);
++ sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
++ bam_ce_buffer, bam_ce_size);
++ bam_txn->cmd_sgl_pos++;
++ bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
++
++ if (flags & NAND_BAM_NWD) {
++ ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
++ DMA_PREP_FENCE | DMA_PREP_CMD);
++ if (ret)
++ return ret;
++ }
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(qcom_prep_bam_dma_desc_cmd);
++
++/**
++ * qcom_prep_bam_dma_desc_data() - Prepares the data descriptor for BAM DMA
++ * @nandc: qpic nand controller
++ * @read: read or write type
++ * @vaddr: virtual address of the buffer we want to write to
++ * @size: DMA transaction size in bytes
++ * @flags: flags to control DMA descriptor preparation
++ *
++ * This function will prepares the data descriptor for BAM DMA which
++ * will be used for NAND data reads and writes.
++ */
++int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
++ const void *vaddr, int size, unsigned int flags)
++{
++ int ret;
++ struct bam_transaction *bam_txn = nandc->bam_txn;
++
++ if (read) {
++ sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
++ vaddr, size);
++ bam_txn->rx_sgl_pos++;
++ } else {
++ sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
++ vaddr, size);
++ bam_txn->tx_sgl_pos++;
++
++ /*
++ * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
++ * is not set, form the DMA descriptor
++ */
++ if (!(flags & NAND_BAM_NO_EOT)) {
++ ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
++ DMA_PREP_INTERRUPT);
++ if (ret)
++ return ret;
++ }
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(qcom_prep_bam_dma_desc_data);
++
++/**
++ * qcom_prep_adm_dma_desc() - Prepare descriptor for adma
++ * @nandc: qpic nand controller
++ * @read: read or write type
++ * @reg_off: offset within the controller's data buffer
++ * @vaddr: virtual address of the buffer we want to write to
++ * @size: adm dma transaction size in bytes
++ * @flow_control: flow controller
++ *
++ * This function will prepare descriptor for adma
++ */
++int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
++ int reg_off, const void *vaddr, int size,
++ bool flow_control)
++{
++ struct qcom_adm_peripheral_config periph_conf = {};
++ struct dma_async_tx_descriptor *dma_desc;
++ struct dma_slave_config slave_conf = {0};
++ enum dma_transfer_direction dir_eng;
++ struct desc_info *desc;
++ struct scatterlist *sgl;
++ int ret;
++
++ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
++ if (!desc)
++ return -ENOMEM;
++
++ sgl = &desc->adm_sgl;
++
++ sg_init_one(sgl, vaddr, size);
++
++ if (read) {
++ dir_eng = DMA_DEV_TO_MEM;
++ desc->dir = DMA_FROM_DEVICE;
++ } else {
++ dir_eng = DMA_MEM_TO_DEV;
++ desc->dir = DMA_TO_DEVICE;
++ }
++
++ ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
++ if (!ret) {
++ ret = -ENOMEM;
++ goto err;
++ }
++
++ slave_conf.device_fc = flow_control;
++ if (read) {
++ slave_conf.src_maxburst = 16;
++ slave_conf.src_addr = nandc->base_dma + reg_off;
++ if (nandc->data_crci) {
++ periph_conf.crci = nandc->data_crci;
++ slave_conf.peripheral_config = &periph_conf;
++ slave_conf.peripheral_size = sizeof(periph_conf);
++ }
++ } else {
++ slave_conf.dst_maxburst = 16;
++ slave_conf.dst_addr = nandc->base_dma + reg_off;
++ if (nandc->cmd_crci) {
++ periph_conf.crci = nandc->cmd_crci;
++ slave_conf.peripheral_config = &periph_conf;
++ slave_conf.peripheral_size = sizeof(periph_conf);
++ }
++ }
++
++ ret = dmaengine_slave_config(nandc->chan, &slave_conf);
++ if (ret) {
++ dev_err(nandc->dev, "failed to configure dma channel\n");
++ goto err;
++ }
++
++ dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
++ if (!dma_desc) {
++ dev_err(nandc->dev, "failed to prepare desc\n");
++ ret = -EINVAL;
++ goto err;
++ }
++
++ desc->dma_desc = dma_desc;
++
++ list_add_tail(&desc->node, &nandc->desc_list);
++
++ return 0;
++err:
++ kfree(desc);
++
++ return ret;
++}
++EXPORT_SYMBOL(qcom_prep_adm_dma_desc);
++
++/**
++ * qcom_read_reg_dma() - read a given number of registers to the reg_read_buf pointer
++ * @nandc: qpic nand controller
++ * @first: offset of the first register in the contiguous block
++ * @num_regs: number of registers to read
++ * @flags: flags to control DMA descriptor preparation
++ *
++ * This function will prepares a descriptor to read a given number of
++ * contiguous registers to the reg_read_buf pointer.
++ */
++int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first,
++ int num_regs, unsigned int flags)
++{
++ bool flow_control = false;
++ void *vaddr;
++
++ vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
++ nandc->reg_read_pos += num_regs;
++
++ if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
++ first = dev_cmd_reg_addr(nandc, first);
++
++ if (nandc->props->supports_bam)
++ return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
++ num_regs, flags);
++
++ if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
++ flow_control = true;
++
++ return qcom_prep_adm_dma_desc(nandc, true, first, vaddr,
++ num_regs * sizeof(u32), flow_control);
++}
++EXPORT_SYMBOL(qcom_read_reg_dma);
++
++/**
++ * qcom_write_reg_dma() - write a given number of registers
++ * @nandc: qpic nand controller
++ * @vaddr: contiguous memory from where register value will
++ * be written
++ * @first: offset of the first register in the contiguous block
++ * @num_regs: number of registers to write
++ * @flags: flags to control DMA descriptor preparation
++ *
++ * This function will prepares a descriptor to write a given number of
++ * contiguous registers
++ */
++int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
++ int first, int num_regs, unsigned int flags)
++{
++ bool flow_control = false;
++
++ if (first == NAND_EXEC_CMD)
++ flags |= NAND_BAM_NWD;
++
++ if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
++ first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
++
++ if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
++ first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
++
++ if (nandc->props->supports_bam)
++ return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
++ num_regs, flags);
++
++ if (first == NAND_FLASH_CMD)
++ flow_control = true;
++
++ return qcom_prep_adm_dma_desc(nandc, false, first, vaddr,
++ num_regs * sizeof(u32), flow_control);
++}
++EXPORT_SYMBOL(qcom_write_reg_dma);
++
++/**
++ * qcom_read_data_dma() - transfer data
++ * @nandc: qpic nand controller
++ * @reg_off: offset within the controller's data buffer
++ * @vaddr: virtual address of the buffer we want to write to
++ * @size: DMA transaction size in bytes
++ * @flags: flags to control DMA descriptor preparation
++ *
++ * This function will prepares a DMA descriptor to transfer data from the
++ * controller's internal buffer to the buffer 'vaddr'
++ */
++int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
++ const u8 *vaddr, int size, unsigned int flags)
++{
++ if (nandc->props->supports_bam)
++ return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
++
++ return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
++}
++EXPORT_SYMBOL(qcom_read_data_dma);
++
++/**
++ * qcom_write_data_dma() - transfer data
++ * @nandc: qpic nand controller
++ * @reg_off: offset within the controller's data buffer
++ * @vaddr: virtual address of the buffer we want to read from
++ * @size: DMA transaction size in bytes
++ * @flags: flags to control DMA descriptor preparation
++ *
++ * This function will prepares a DMA descriptor to transfer data from
++ * 'vaddr' to the controller's internal buffer
++ */
++int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
++ const u8 *vaddr, int size, unsigned int flags)
++{
++ if (nandc->props->supports_bam)
++ return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
++
++ return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
++}
++EXPORT_SYMBOL(qcom_write_data_dma);
++
++/**
++ * qcom_submit_descs() - submit dma descriptor
++ * @nandc: qpic nand controller
++ *
++ * This function will submit all the prepared dma descriptor
++ * cmd or data descriptor
++ */
++int qcom_submit_descs(struct qcom_nand_controller *nandc)
++{
++ struct desc_info *desc, *n;
++ dma_cookie_t cookie = 0;
++ struct bam_transaction *bam_txn = nandc->bam_txn;
++ int ret = 0;
++
++ if (nandc->props->supports_bam) {
++ if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
++ ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
++ if (ret)
++ goto err_unmap_free_desc;
++ }
++
++ if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
++ ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
++ DMA_PREP_INTERRUPT);
++ if (ret)
++ goto err_unmap_free_desc;
++ }
++
++ if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
++ ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
++ DMA_PREP_CMD);
++ if (ret)
++ goto err_unmap_free_desc;
++ }
++ }
++
++ list_for_each_entry(desc, &nandc->desc_list, node)
++ cookie = dmaengine_submit(desc->dma_desc);
++
++ if (nandc->props->supports_bam) {
++ bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done;
++ bam_txn->last_cmd_desc->callback_param = bam_txn;
++
++ dma_async_issue_pending(nandc->tx_chan);
++ dma_async_issue_pending(nandc->rx_chan);
++ dma_async_issue_pending(nandc->cmd_chan);
++
++ if (!wait_for_completion_timeout(&bam_txn->txn_done,
++ QPIC_NAND_COMPLETION_TIMEOUT))
++ ret = -ETIMEDOUT;
++ } else {
++ if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
++ ret = -ETIMEDOUT;
++ }
++
++err_unmap_free_desc:
++ /*
++ * Unmap the dma sg_list and free the desc allocated by both
++ * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions.
++ */
++ list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
++ list_del(&desc->node);
++
++ if (nandc->props->supports_bam)
++ dma_unmap_sg(nandc->dev, desc->bam_sgl,
++ desc->sgl_cnt, desc->dir);
++ else
++ dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
++ desc->dir);
++
++ kfree(desc);
++ }
++
++ return ret;
++}
++EXPORT_SYMBOL(qcom_submit_descs);
++
++/**
++ * qcom_clear_read_regs() - reset the read register buffer
++ * @nandc: qpic nand controller
++ *
++ * This function reset the register read buffer for next NAND operation
++ */
++void qcom_clear_read_regs(struct qcom_nand_controller *nandc)
++{
++ nandc->reg_read_pos = 0;
++ qcom_nandc_dev_to_mem(nandc, false);
++}
++EXPORT_SYMBOL(qcom_clear_read_regs);
++
++/**
++ * qcom_nandc_unalloc() - unallocate qpic nand controller
++ * @nandc: qpic nand controller
++ *
++ * This function will unallocate memory alloacted for qpic nand controller
++ */
++void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
++{
++ if (nandc->props->supports_bam) {
++ if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
++ dma_unmap_single(nandc->dev, nandc->reg_read_dma,
++ MAX_REG_RD *
++ sizeof(*nandc->reg_read_buf),
++ DMA_FROM_DEVICE);
++
++ if (nandc->tx_chan)
++ dma_release_channel(nandc->tx_chan);
++
++ if (nandc->rx_chan)
++ dma_release_channel(nandc->rx_chan);
++
++ if (nandc->cmd_chan)
++ dma_release_channel(nandc->cmd_chan);
++ } else {
++ if (nandc->chan)
++ dma_release_channel(nandc->chan);
++ }
++}
++EXPORT_SYMBOL(qcom_nandc_unalloc);
++
++/**
++ * qcom_nandc_alloc() - Allocate qpic nand controller
++ * @nandc: qpic nand controller
++ *
++ * This function will allocate memory for qpic nand controller
++ */
++int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
++{
++ int ret;
++
++ ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
++ if (ret) {
++ dev_err(nandc->dev, "failed to set DMA mask\n");
++ return ret;
++ }
++
++ /*
++ * we use the internal buffer for reading ONFI params, reading small
++ * data like ID and status, and preforming read-copy-write operations
++ * when writing to a codeword partially. 532 is the maximum possible
++ * size of a codeword for our nand controller
++ */
++ nandc->buf_size = 532;
++
++ nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL);
++ if (!nandc->data_buffer)
++ return -ENOMEM;
++
++ nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL);
++ if (!nandc->regs)
++ return -ENOMEM;
++
++ nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD,
++ sizeof(*nandc->reg_read_buf),
++ GFP_KERNEL);
++ if (!nandc->reg_read_buf)
++ return -ENOMEM;
++
++ if (nandc->props->supports_bam) {
++ nandc->reg_read_dma =
++ dma_map_single(nandc->dev, nandc->reg_read_buf,
++ MAX_REG_RD *
++ sizeof(*nandc->reg_read_buf),
++ DMA_FROM_DEVICE);
++ if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
++ dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
++ return -EIO;
++ }
++
++ nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
++ if (IS_ERR(nandc->tx_chan)) {
++ ret = PTR_ERR(nandc->tx_chan);
++ nandc->tx_chan = NULL;
++ dev_err_probe(nandc->dev, ret,
++ "tx DMA channel request failed\n");
++ goto unalloc;
++ }
++
++ nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
++ if (IS_ERR(nandc->rx_chan)) {
++ ret = PTR_ERR(nandc->rx_chan);
++ nandc->rx_chan = NULL;
++ dev_err_probe(nandc->dev, ret,
++ "rx DMA channel request failed\n");
++ goto unalloc;
++ }
++
++ nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
++ if (IS_ERR(nandc->cmd_chan)) {
++ ret = PTR_ERR(nandc->cmd_chan);
++ nandc->cmd_chan = NULL;
++ dev_err_probe(nandc->dev, ret,
++ "cmd DMA channel request failed\n");
++ goto unalloc;
++ }
++
++ /*
++ * Initially allocate BAM transaction to read ONFI param page.
++ * After detecting all the devices, this BAM transaction will
++ * be freed and the next BAM transaction will be allocated with
++ * maximum codeword size
++ */
++ nandc->max_cwperpage = 1;
++ nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
++ if (!nandc->bam_txn) {
++ dev_err(nandc->dev,
++ "failed to allocate bam transaction\n");
++ ret = -ENOMEM;
++ goto unalloc;
++ }
++ } else {
++ nandc->chan = dma_request_chan(nandc->dev, "rxtx");
++ if (IS_ERR(nandc->chan)) {
++ ret = PTR_ERR(nandc->chan);
++ nandc->chan = NULL;
++ dev_err_probe(nandc->dev, ret,
++ "rxtx DMA channel request failed\n");
++ return ret;
++ }
++ }
++
++ INIT_LIST_HEAD(&nandc->desc_list);
++ INIT_LIST_HEAD(&nandc->host_list);
++
++ return 0;
++unalloc:
++ qcom_nandc_unalloc(nandc);
++ return ret;
++}
++EXPORT_SYMBOL(qcom_nandc_alloc);
++
++MODULE_DESCRIPTION("QPIC controller common api");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
+index 6da5d23d2c8b..dcb62fd19dd7 100644
+--- a/drivers/mtd/nand/raw/qcom_nandc.c
++++ b/drivers/mtd/nand/raw/qcom_nandc.c
+@@ -15,417 +15,7 @@
+ #include <linux/of.h>
+ #include <linux/platform_device.h>
+ #include <linux/slab.h>
+-
+-/* NANDc reg offsets */
+-#define NAND_FLASH_CMD 0x00
+-#define NAND_ADDR0 0x04
+-#define NAND_ADDR1 0x08
+-#define NAND_FLASH_CHIP_SELECT 0x0c
+-#define NAND_EXEC_CMD 0x10
+-#define NAND_FLASH_STATUS 0x14
+-#define NAND_BUFFER_STATUS 0x18
+-#define NAND_DEV0_CFG0 0x20
+-#define NAND_DEV0_CFG1 0x24
+-#define NAND_DEV0_ECC_CFG 0x28
+-#define NAND_AUTO_STATUS_EN 0x2c
+-#define NAND_DEV1_CFG0 0x30
+-#define NAND_DEV1_CFG1 0x34
+-#define NAND_READ_ID 0x40
+-#define NAND_READ_STATUS 0x44
+-#define NAND_DEV_CMD0 0xa0
+-#define NAND_DEV_CMD1 0xa4
+-#define NAND_DEV_CMD2 0xa8
+-#define NAND_DEV_CMD_VLD 0xac
+-#define SFLASHC_BURST_CFG 0xe0
+-#define NAND_ERASED_CW_DETECT_CFG 0xe8
+-#define NAND_ERASED_CW_DETECT_STATUS 0xec
+-#define NAND_EBI2_ECC_BUF_CFG 0xf0
+-#define FLASH_BUF_ACC 0x100
+-
+-#define NAND_CTRL 0xf00
+-#define NAND_VERSION 0xf08
+-#define NAND_READ_LOCATION_0 0xf20
+-#define NAND_READ_LOCATION_1 0xf24
+-#define NAND_READ_LOCATION_2 0xf28
+-#define NAND_READ_LOCATION_3 0xf2c
+-#define NAND_READ_LOCATION_LAST_CW_0 0xf40
+-#define NAND_READ_LOCATION_LAST_CW_1 0xf44
+-#define NAND_READ_LOCATION_LAST_CW_2 0xf48
+-#define NAND_READ_LOCATION_LAST_CW_3 0xf4c
+-
+-/* dummy register offsets, used by qcom_write_reg_dma */
+-#define NAND_DEV_CMD1_RESTORE 0xdead
+-#define NAND_DEV_CMD_VLD_RESTORE 0xbeef
+-
+-/* NAND_FLASH_CMD bits */
+-#define PAGE_ACC BIT(4)
+-#define LAST_PAGE BIT(5)
+-
+-/* NAND_FLASH_CHIP_SELECT bits */
+-#define NAND_DEV_SEL 0
+-#define DM_EN BIT(2)
+-
+-/* NAND_FLASH_STATUS bits */
+-#define FS_OP_ERR BIT(4)
+-#define FS_READY_BSY_N BIT(5)
+-#define FS_MPU_ERR BIT(8)
+-#define FS_DEVICE_STS_ERR BIT(16)
+-#define FS_DEVICE_WP BIT(23)
+-
+-/* NAND_BUFFER_STATUS bits */
+-#define BS_UNCORRECTABLE_BIT BIT(8)
+-#define BS_CORRECTABLE_ERR_MSK 0x1f
+-
+-/* NAND_DEVn_CFG0 bits */
+-#define DISABLE_STATUS_AFTER_WRITE 4
+-#define CW_PER_PAGE 6
+-#define UD_SIZE_BYTES 9
+-#define UD_SIZE_BYTES_MASK GENMASK(18, 9)
+-#define ECC_PARITY_SIZE_BYTES_RS 19
+-#define SPARE_SIZE_BYTES 23
+-#define SPARE_SIZE_BYTES_MASK GENMASK(26, 23)
+-#define NUM_ADDR_CYCLES 27
+-#define STATUS_BFR_READ 30
+-#define SET_RD_MODE_AFTER_STATUS 31
+-
+-/* NAND_DEVn_CFG0 bits */
+-#define DEV0_CFG1_ECC_DISABLE 0
+-#define WIDE_FLASH 1
+-#define NAND_RECOVERY_CYCLES 2
+-#define CS_ACTIVE_BSY 5
+-#define BAD_BLOCK_BYTE_NUM 6
+-#define BAD_BLOCK_IN_SPARE_AREA 16
+-#define WR_RD_BSY_GAP 17
+-#define ENABLE_BCH_ECC 27
+-
+-/* NAND_DEV0_ECC_CFG bits */
+-#define ECC_CFG_ECC_DISABLE 0
+-#define ECC_SW_RESET 1
+-#define ECC_MODE 4
+-#define ECC_PARITY_SIZE_BYTES_BCH 8
+-#define ECC_NUM_DATA_BYTES 16
+-#define ECC_NUM_DATA_BYTES_MASK GENMASK(25, 16)
+-#define ECC_FORCE_CLK_OPEN 30
+-
+-/* NAND_DEV_CMD1 bits */
+-#define READ_ADDR 0
+-
+-/* NAND_DEV_CMD_VLD bits */
+-#define READ_START_VLD BIT(0)
+-#define READ_STOP_VLD BIT(1)
+-#define WRITE_START_VLD BIT(2)
+-#define ERASE_START_VLD BIT(3)
+-#define SEQ_READ_START_VLD BIT(4)
+-
+-/* NAND_EBI2_ECC_BUF_CFG bits */
+-#define NUM_STEPS 0
+-
+-/* NAND_ERASED_CW_DETECT_CFG bits */
+-#define ERASED_CW_ECC_MASK 1
+-#define AUTO_DETECT_RES 0
+-#define MASK_ECC BIT(ERASED_CW_ECC_MASK)
+-#define RESET_ERASED_DET BIT(AUTO_DETECT_RES)
+-#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
+-#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
+-#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
+-
+-/* NAND_ERASED_CW_DETECT_STATUS bits */
+-#define PAGE_ALL_ERASED BIT(7)
+-#define CODEWORD_ALL_ERASED BIT(6)
+-#define PAGE_ERASED BIT(5)
+-#define CODEWORD_ERASED BIT(4)
+-#define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
+-#define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
+-
+-/* NAND_READ_LOCATION_n bits */
+-#define READ_LOCATION_OFFSET 0
+-#define READ_LOCATION_SIZE 16
+-#define READ_LOCATION_LAST 31
+-
+-/* Version Mask */
+-#define NAND_VERSION_MAJOR_MASK 0xf0000000
+-#define NAND_VERSION_MAJOR_SHIFT 28
+-#define NAND_VERSION_MINOR_MASK 0x0fff0000
+-#define NAND_VERSION_MINOR_SHIFT 16
+-
+-/* NAND OP_CMDs */
+-#define OP_PAGE_READ 0x2
+-#define OP_PAGE_READ_WITH_ECC 0x3
+-#define OP_PAGE_READ_WITH_ECC_SPARE 0x4
+-#define OP_PAGE_READ_ONFI_READ 0x5
+-#define OP_PROGRAM_PAGE 0x6
+-#define OP_PAGE_PROGRAM_WITH_ECC 0x7
+-#define OP_PROGRAM_PAGE_SPARE 0x9
+-#define OP_BLOCK_ERASE 0xa
+-#define OP_CHECK_STATUS 0xc
+-#define OP_FETCH_ID 0xb
+-#define OP_RESET_DEVICE 0xd
+-
+-/* Default Value for NAND_DEV_CMD_VLD */
+-#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
+- ERASE_START_VLD | SEQ_READ_START_VLD)
+-
+-/* NAND_CTRL bits */
+-#define BAM_MODE_EN BIT(0)
+-
+-/*
+- * the NAND controller performs reads/writes with ECC in 516 byte chunks.
+- * the driver calls the chunks 'step' or 'codeword' interchangeably
+- */
+-#define NANDC_STEP_SIZE 512
+-
+-/*
+- * the largest page size we support is 8K, this will have 16 steps/codewords
+- * of 512 bytes each
+- */
+-#define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
+-
+-/* we read at most 3 registers per codeword scan */
+-#define MAX_REG_RD (3 * MAX_NUM_STEPS)
+-
+-/* ECC modes supported by the controller */
+-#define ECC_NONE BIT(0)
+-#define ECC_RS_4BIT BIT(1)
+-#define ECC_BCH_4BIT BIT(2)
+-#define ECC_BCH_8BIT BIT(3)
+-
+-/*
+- * Returns the actual register address for all NAND_DEV_ registers
+- * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
+- */
+-#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
+-
+-/* Returns the NAND register physical address */
+-#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
+-
+-/* Returns the dma address for reg read buffer */
+-#define reg_buf_dma_addr(chip, vaddr) \
+- ((chip)->reg_read_dma + \
+- ((u8 *)(vaddr) - (u8 *)(chip)->reg_read_buf))
+-
+-#define QPIC_PER_CW_CMD_ELEMENTS 32
+-#define QPIC_PER_CW_CMD_SGL 32
+-#define QPIC_PER_CW_DATA_SGL 8
+-
+-#define QPIC_NAND_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
+-
+-/*
+- * Flags used in DMA descriptor preparation helper functions
+- * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma)
+- */
+-/* Don't set the EOT in current tx BAM sgl */
+-#define NAND_BAM_NO_EOT BIT(0)
+-/* Set the NWD flag in current BAM sgl */
+-#define NAND_BAM_NWD BIT(1)
+-/* Finish writing in the current BAM sgl and start writing in another BAM sgl */
+-#define NAND_BAM_NEXT_SGL BIT(2)
+-/*
+- * Erased codeword status is being used two times in single transfer so this
+- * flag will determine the current value of erased codeword status register
+- */
+-#define NAND_ERASED_CW_SET BIT(4)
+-
+-#define MAX_ADDRESS_CYCLE 5
+-
+-/*
+- * This data type corresponds to the BAM transaction which will be used for all
+- * NAND transfers.
+- * @bam_ce - the array of BAM command elements
+- * @cmd_sgl - sgl for NAND BAM command pipe
+- * @data_sgl - sgl for NAND BAM consumer/producer pipe
+- * @last_data_desc - last DMA desc in data channel (tx/rx).
+- * @last_cmd_desc - last DMA desc in command channel.
+- * @txn_done - completion for NAND transfer.
+- * @bam_ce_pos - the index in bam_ce which is available for next sgl
+- * @bam_ce_start - the index in bam_ce which marks the start position ce
+- * for current sgl. It will be used for size calculation
+- * for current sgl
+- * @cmd_sgl_pos - current index in command sgl.
+- * @cmd_sgl_start - start index in command sgl.
+- * @tx_sgl_pos - current index in data sgl for tx.
+- * @tx_sgl_start - start index in data sgl for tx.
+- * @rx_sgl_pos - current index in data sgl for rx.
+- * @rx_sgl_start - start index in data sgl for rx.
+- */
+-struct bam_transaction {
+- struct bam_cmd_element *bam_ce;
+- struct scatterlist *cmd_sgl;
+- struct scatterlist *data_sgl;
+- struct dma_async_tx_descriptor *last_data_desc;
+- struct dma_async_tx_descriptor *last_cmd_desc;
+- struct completion txn_done;
+- u32 bam_ce_pos;
+- u32 bam_ce_start;
+- u32 cmd_sgl_pos;
+- u32 cmd_sgl_start;
+- u32 tx_sgl_pos;
+- u32 tx_sgl_start;
+- u32 rx_sgl_pos;
+- u32 rx_sgl_start;
+-};
+-
+-/*
+- * This data type corresponds to the nand dma descriptor
+- * @dma_desc - low level DMA engine descriptor
+- * @list - list for desc_info
+- *
+- * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
+- * ADM
+- * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
+- * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
+- * @dir - DMA transfer direction
+- */
+-struct desc_info {
+- struct dma_async_tx_descriptor *dma_desc;
+- struct list_head node;
+-
+- union {
+- struct scatterlist adm_sgl;
+- struct {
+- struct scatterlist *bam_sgl;
+- int sgl_cnt;
+- };
+- };
+- enum dma_data_direction dir;
+-};
+-
+-/*
+- * holds the current register values that we want to write. acts as a contiguous
+- * chunk of memory which we use to write the controller registers through DMA.
+- */
+-struct nandc_regs {
+- __le32 cmd;
+- __le32 addr0;
+- __le32 addr1;
+- __le32 chip_sel;
+- __le32 exec;
+-
+- __le32 cfg0;
+- __le32 cfg1;
+- __le32 ecc_bch_cfg;
+-
+- __le32 clrflashstatus;
+- __le32 clrreadstatus;
+-
+- __le32 cmd1;
+- __le32 vld;
+-
+- __le32 orig_cmd1;
+- __le32 orig_vld;
+-
+- __le32 ecc_buf_cfg;
+- __le32 read_location0;
+- __le32 read_location1;
+- __le32 read_location2;
+- __le32 read_location3;
+- __le32 read_location_last0;
+- __le32 read_location_last1;
+- __le32 read_location_last2;
+- __le32 read_location_last3;
+-
+- __le32 erased_cw_detect_cfg_clr;
+- __le32 erased_cw_detect_cfg_set;
+-};
+-
+-/*
+- * NAND controller data struct
+- *
+- * @dev: parent device
+- *
+- * @base: MMIO base
+- *
+- * @core_clk: controller clock
+- * @aon_clk: another controller clock
+- *
+- * @regs: a contiguous chunk of memory for DMA register
+- * writes. contains the register values to be
+- * written to controller
+- *
+- * @props: properties of current NAND controller,
+- * initialized via DT match data
+- *
+- * @controller: base controller structure
+- * @host_list: list containing all the chips attached to the
+- * controller
+- *
+- * @chan: dma channel
+- * @cmd_crci: ADM DMA CRCI for command flow control
+- * @data_crci: ADM DMA CRCI for data flow control
+- *
+- * @desc_list: DMA descriptor list (list of desc_infos)
+- *
+- * @data_buffer: our local DMA buffer for page read/writes,
+- * used when we can't use the buffer provided
+- * by upper layers directly
+- * @reg_read_buf: local buffer for reading back registers via DMA
+- *
+- * @base_phys: physical base address of controller registers
+- * @base_dma: dma base address of controller registers
+- * @reg_read_dma: contains dma address for register read buffer
+- *
+- * @buf_size/count/start: markers for chip->legacy.read_buf/write_buf
+- * functions
+- * @max_cwperpage: maximum QPIC codewords required. calculated
+- * from all connected NAND devices pagesize
+- *
+- * @reg_read_pos: marker for data read in reg_read_buf
+- *
+- * @cmd1/vld: some fixed controller register values
+- *
+- * @exec_opwrite: flag to select correct number of code word
+- * while reading status
+- */
+-struct qcom_nand_controller {
+- struct device *dev;
+-
+- void __iomem *base;
+-
+- struct clk *core_clk;
+- struct clk *aon_clk;
+-
+- struct nandc_regs *regs;
+- struct bam_transaction *bam_txn;
+-
+- const struct qcom_nandc_props *props;
+-
+- struct nand_controller controller;
+- struct list_head host_list;
+-
+- union {
+- /* will be used only by QPIC for BAM DMA */
+- struct {
+- struct dma_chan *tx_chan;
+- struct dma_chan *rx_chan;
+- struct dma_chan *cmd_chan;
+- };
+-
+- /* will be used only by EBI2 for ADM DMA */
+- struct {
+- struct dma_chan *chan;
+- unsigned int cmd_crci;
+- unsigned int data_crci;
+- };
+- };
+-
+- struct list_head desc_list;
+-
+- u8 *data_buffer;
+- __le32 *reg_read_buf;
+-
+- phys_addr_t base_phys;
+- dma_addr_t base_dma;
+- dma_addr_t reg_read_dma;
+-
+- int buf_size;
+- int buf_count;
+- int buf_start;
+- unsigned int max_cwperpage;
+-
+- int reg_read_pos;
+-
+- u32 cmd1, vld;
+- bool exec_opwrite;
+-};
++#include <linux/mtd/nand-qpic-common.h>
+
+ /*
+ * NAND special boot partitions
+@@ -530,97 +120,6 @@ struct qcom_nand_host {
+ bool bch_enabled;
+ };
+
+-/*
+- * This data type corresponds to the NAND controller properties which varies
+- * among different NAND controllers.
+- * @ecc_modes - ecc mode for NAND
+- * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
+- * @supports_bam - whether NAND controller is using Bus Access Manager (BAM)
+- * @nandc_part_of_qpic - whether NAND controller is part of qpic IP
+- * @qpic_version2 - flag to indicate QPIC IP version 2
+- * @use_codeword_fixup - whether NAND has different layout for boot partitions
+- */
+-struct qcom_nandc_props {
+- u32 ecc_modes;
+- u32 dev_cmd_reg_start;
+- bool supports_bam;
+- bool nandc_part_of_qpic;
+- bool qpic_version2;
+- bool use_codeword_fixup;
+-};
+-
+-/* Frees the BAM transaction memory */
+-static void qcom_free_bam_transaction(struct qcom_nand_controller *nandc)
+-{
+- struct bam_transaction *bam_txn = nandc->bam_txn;
+-
+- devm_kfree(nandc->dev, bam_txn);
+-}
+-
+-/* Allocates and Initializes the BAM transaction */
+-static struct bam_transaction *
+-qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc)
+-{
+- struct bam_transaction *bam_txn;
+- size_t bam_txn_size;
+- unsigned int num_cw = nandc->max_cwperpage;
+- void *bam_txn_buf;
+-
+- bam_txn_size =
+- sizeof(*bam_txn) + num_cw *
+- ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
+- (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
+- (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
+-
+- bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
+- if (!bam_txn_buf)
+- return NULL;
+-
+- bam_txn = bam_txn_buf;
+- bam_txn_buf += sizeof(*bam_txn);
+-
+- bam_txn->bam_ce = bam_txn_buf;
+- bam_txn_buf +=
+- sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
+-
+- bam_txn->cmd_sgl = bam_txn_buf;
+- bam_txn_buf +=
+- sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
+-
+- bam_txn->data_sgl = bam_txn_buf;
+-
+- init_completion(&bam_txn->txn_done);
+-
+- return bam_txn;
+-}
+-
+-/* Clears the BAM transaction indexes */
+-static void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
+-{
+- struct bam_transaction *bam_txn = nandc->bam_txn;
+-
+- if (!nandc->props->supports_bam)
+- return;
+-
+- memset(&bam_txn->bam_ce_pos, 0, sizeof(u32) * 8);
+- bam_txn->last_data_desc = NULL;
+-
+- sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
+- QPIC_PER_CW_CMD_SGL);
+- sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
+- QPIC_PER_CW_DATA_SGL);
+-
+- reinit_completion(&bam_txn->txn_done);
+-}
+-
+-/* Callback for DMA descriptor completion */
+-static void qcom_qpic_bam_dma_done(void *data)
+-{
+- struct bam_transaction *bam_txn = data;
+-
+- complete(&bam_txn->txn_done);
+-}
+-
+ static struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
+ {
+ return container_of(chip, struct qcom_nand_host, chip);
+@@ -629,8 +128,8 @@ static struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
+ static struct qcom_nand_controller *
+ get_qcom_nand_controller(struct nand_chip *chip)
+ {
+- return container_of(chip->controller, struct qcom_nand_controller,
+- controller);
++ return (struct qcom_nand_controller *)
++ ((u8 *)chip->controller - sizeof(struct qcom_nand_controller));
+ }
+
+ static u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
+@@ -644,23 +143,6 @@ static void nandc_write(struct qcom_nand_controller *nandc, int offset,
+ iowrite32(val, nandc->base + offset);
+ }
+
+-static void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
+-{
+- if (!nandc->props->supports_bam)
+- return;
+-
+- if (is_cpu)
+- dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
+- MAX_REG_RD *
+- sizeof(*nandc->reg_read_buf),
+- DMA_FROM_DEVICE);
+- else
+- dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
+- MAX_REG_RD *
+- sizeof(*nandc->reg_read_buf),
+- DMA_FROM_DEVICE);
+-}
+-
+ /* Helper to check whether this is the last CW or not */
+ static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw)
+ {
+@@ -819,356 +301,6 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, i
+ host->cw_data : host->cw_size, 1);
+ }
+
+-/*
+- * Maps the scatter gather list for DMA transfer and forms the DMA descriptor
+- * for BAM. This descriptor will be added in the NAND DMA descriptor queue
+- * which will be submitted to DMA engine.
+- */
+-static int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
+- struct dma_chan *chan,
+- unsigned long flags)
+-{
+- struct desc_info *desc;
+- struct scatterlist *sgl;
+- unsigned int sgl_cnt;
+- int ret;
+- struct bam_transaction *bam_txn = nandc->bam_txn;
+- enum dma_transfer_direction dir_eng;
+- struct dma_async_tx_descriptor *dma_desc;
+-
+- desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+- if (!desc)
+- return -ENOMEM;
+-
+- if (chan == nandc->cmd_chan) {
+- sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
+- sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
+- bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
+- dir_eng = DMA_MEM_TO_DEV;
+- desc->dir = DMA_TO_DEVICE;
+- } else if (chan == nandc->tx_chan) {
+- sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
+- sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
+- bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
+- dir_eng = DMA_MEM_TO_DEV;
+- desc->dir = DMA_TO_DEVICE;
+- } else {
+- sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
+- sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
+- bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
+- dir_eng = DMA_DEV_TO_MEM;
+- desc->dir = DMA_FROM_DEVICE;
+- }
+-
+- sg_mark_end(sgl + sgl_cnt - 1);
+- ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
+- if (ret == 0) {
+- dev_err(nandc->dev, "failure in mapping desc\n");
+- kfree(desc);
+- return -ENOMEM;
+- }
+-
+- desc->sgl_cnt = sgl_cnt;
+- desc->bam_sgl = sgl;
+-
+- dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
+- flags);
+-
+- if (!dma_desc) {
+- dev_err(nandc->dev, "failure in prep desc\n");
+- dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
+- kfree(desc);
+- return -EINVAL;
+- }
+-
+- desc->dma_desc = dma_desc;
+-
+- /* update last data/command descriptor */
+- if (chan == nandc->cmd_chan)
+- bam_txn->last_cmd_desc = dma_desc;
+- else
+- bam_txn->last_data_desc = dma_desc;
+-
+- list_add_tail(&desc->node, &nandc->desc_list);
+-
+- return 0;
+-}
+-
+-/*
+- * Prepares the command descriptor for BAM DMA which will be used for NAND
+- * register reads and writes. The command descriptor requires the command
+- * to be formed in command element type so this function uses the command
+- * element from bam transaction ce array and fills the same with required
+- * data. A single SGL can contain multiple command elements so
+- * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
+- * after the current command element.
+- */
+-static int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
+- int reg_off, const void *vaddr,
+- int size, unsigned int flags)
+-{
+- int bam_ce_size;
+- int i, ret;
+- struct bam_cmd_element *bam_ce_buffer;
+- struct bam_transaction *bam_txn = nandc->bam_txn;
+-
+- bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
+-
+- /* fill the command desc */
+- for (i = 0; i < size; i++) {
+- if (read)
+- bam_prep_ce(&bam_ce_buffer[i],
+- nandc_reg_phys(nandc, reg_off + 4 * i),
+- BAM_READ_COMMAND,
+- reg_buf_dma_addr(nandc,
+- (__le32 *)vaddr + i));
+- else
+- bam_prep_ce_le32(&bam_ce_buffer[i],
+- nandc_reg_phys(nandc, reg_off + 4 * i),
+- BAM_WRITE_COMMAND,
+- *((__le32 *)vaddr + i));
+- }
+-
+- bam_txn->bam_ce_pos += size;
+-
+- /* use the separate sgl after this command */
+- if (flags & NAND_BAM_NEXT_SGL) {
+- bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
+- bam_ce_size = (bam_txn->bam_ce_pos -
+- bam_txn->bam_ce_start) *
+- sizeof(struct bam_cmd_element);
+- sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
+- bam_ce_buffer, bam_ce_size);
+- bam_txn->cmd_sgl_pos++;
+- bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
+-
+- if (flags & NAND_BAM_NWD) {
+- ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
+- DMA_PREP_FENCE |
+- DMA_PREP_CMD);
+- if (ret)
+- return ret;
+- }
+- }
+-
+- return 0;
+-}
+-
+-/*
+- * Prepares the data descriptor for BAM DMA which will be used for NAND
+- * data reads and writes.
+- */
+-static int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
+- const void *vaddr, int size, unsigned int flags)
+-{
+- int ret;
+- struct bam_transaction *bam_txn = nandc->bam_txn;
+-
+- if (read) {
+- sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
+- vaddr, size);
+- bam_txn->rx_sgl_pos++;
+- } else {
+- sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
+- vaddr, size);
+- bam_txn->tx_sgl_pos++;
+-
+- /*
+- * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
+- * is not set, form the DMA descriptor
+- */
+- if (!(flags & NAND_BAM_NO_EOT)) {
+- ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
+- DMA_PREP_INTERRUPT);
+- if (ret)
+- return ret;
+- }
+- }
+-
+- return 0;
+-}
+-
+-static int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
+- int reg_off, const void *vaddr, int size,
+- bool flow_control)
+-{
+- struct desc_info *desc;
+- struct dma_async_tx_descriptor *dma_desc;
+- struct scatterlist *sgl;
+- struct dma_slave_config slave_conf;
+- struct qcom_adm_peripheral_config periph_conf = {};
+- enum dma_transfer_direction dir_eng;
+- int ret;
+-
+- desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+- if (!desc)
+- return -ENOMEM;
+-
+- sgl = &desc->adm_sgl;
+-
+- sg_init_one(sgl, vaddr, size);
+-
+- if (read) {
+- dir_eng = DMA_DEV_TO_MEM;
+- desc->dir = DMA_FROM_DEVICE;
+- } else {
+- dir_eng = DMA_MEM_TO_DEV;
+- desc->dir = DMA_TO_DEVICE;
+- }
+-
+- ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
+- if (ret == 0) {
+- ret = -ENOMEM;
+- goto err;
+- }
+-
+- memset(&slave_conf, 0x00, sizeof(slave_conf));
+-
+- slave_conf.device_fc = flow_control;
+- if (read) {
+- slave_conf.src_maxburst = 16;
+- slave_conf.src_addr = nandc->base_dma + reg_off;
+- if (nandc->data_crci) {
+- periph_conf.crci = nandc->data_crci;
+- slave_conf.peripheral_config = &periph_conf;
+- slave_conf.peripheral_size = sizeof(periph_conf);
+- }
+- } else {
+- slave_conf.dst_maxburst = 16;
+- slave_conf.dst_addr = nandc->base_dma + reg_off;
+- if (nandc->cmd_crci) {
+- periph_conf.crci = nandc->cmd_crci;
+- slave_conf.peripheral_config = &periph_conf;
+- slave_conf.peripheral_size = sizeof(periph_conf);
+- }
+- }
+-
+- ret = dmaengine_slave_config(nandc->chan, &slave_conf);
+- if (ret) {
+- dev_err(nandc->dev, "failed to configure dma channel\n");
+- goto err;
+- }
+-
+- dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
+- if (!dma_desc) {
+- dev_err(nandc->dev, "failed to prepare desc\n");
+- ret = -EINVAL;
+- goto err;
+- }
+-
+- desc->dma_desc = dma_desc;
+-
+- list_add_tail(&desc->node, &nandc->desc_list);
+-
+- return 0;
+-err:
+- kfree(desc);
+-
+- return ret;
+-}
+-
+-/*
+- * qcom_read_reg_dma: prepares a descriptor to read a given number of
+- * contiguous registers to the reg_read_buf pointer
+- *
+- * @first: offset of the first register in the contiguous block
+- * @num_regs: number of registers to read
+- * @flags: flags to control DMA descriptor preparation
+- */
+-static int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first,
+- int num_regs, unsigned int flags)
+-{
+- bool flow_control = false;
+- void *vaddr;
+-
+- vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
+- nandc->reg_read_pos += num_regs;
+-
+- if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
+- first = dev_cmd_reg_addr(nandc, first);
+-
+- if (nandc->props->supports_bam)
+- return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
+- num_regs, flags);
+-
+- if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
+- flow_control = true;
+-
+- return qcom_prep_adm_dma_desc(nandc, true, first, vaddr,
+- num_regs * sizeof(u32), flow_control);
+-}
+-
+-/*
+- * qcom_write_reg_dma: prepares a descriptor to write a given number of
+- * contiguous registers
+- *
+- * @vaddr: contiguous memory from where register value will
+- * be written
+- * @first: offset of the first register in the contiguous block
+- * @num_regs: number of registers to write
+- * @flags: flags to control DMA descriptor preparation
+- */
+-static int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
+- int first, int num_regs, unsigned int flags)
+-{
+- bool flow_control = false;
+-
+- if (first == NAND_EXEC_CMD)
+- flags |= NAND_BAM_NWD;
+-
+- if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
+- first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
+-
+- if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
+- first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
+-
+- if (nandc->props->supports_bam)
+- return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
+- num_regs, flags);
+-
+- if (first == NAND_FLASH_CMD)
+- flow_control = true;
+-
+- return qcom_prep_adm_dma_desc(nandc, false, first, vaddr,
+- num_regs * sizeof(u32), flow_control);
+-}
+-
+-/*
+- * qcom_read_data_dma: prepares a DMA descriptor to transfer data from the
+- * controller's internal buffer to the buffer 'vaddr'
+- *
+- * @reg_off: offset within the controller's data buffer
+- * @vaddr: virtual address of the buffer we want to write to
+- * @size: DMA transaction size in bytes
+- * @flags: flags to control DMA descriptor preparation
+- */
+-static int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+- const u8 *vaddr, int size, unsigned int flags)
+-{
+- if (nandc->props->supports_bam)
+- return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
+-
+- return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
+-}
+-
+-/*
+- * qcom_write_data_dma: prepares a DMA descriptor to transfer data from
+- * 'vaddr' to the controller's internal buffer
+- *
+- * @reg_off: offset within the controller's data buffer
+- * @vaddr: virtual address of the buffer we want to read from
+- * @size: DMA transaction size in bytes
+- * @flags: flags to control DMA descriptor preparation
+- */
+-static int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+- const u8 *vaddr, int size, unsigned int flags)
+-{
+- if (nandc->props->supports_bam)
+- return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
+-
+- return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
+-}
+-
+ /*
+ * Helper to prepare DMA descriptors for configuring registers
+ * before reading a NAND page.
+@@ -1262,83 +394,6 @@ static void config_nand_cw_write(struct nand_chip *chip)
+ NAND_BAM_NEXT_SGL);
+ }
+
+-/* helpers to submit/free our list of dma descriptors */
+-static int qcom_submit_descs(struct qcom_nand_controller *nandc)
+-{
+- struct desc_info *desc, *n;
+- dma_cookie_t cookie = 0;
+- struct bam_transaction *bam_txn = nandc->bam_txn;
+- int ret = 0;
+-
+- if (nandc->props->supports_bam) {
+- if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
+- ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
+- if (ret)
+- goto err_unmap_free_desc;
+- }
+-
+- if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
+- ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
+- DMA_PREP_INTERRUPT);
+- if (ret)
+- goto err_unmap_free_desc;
+- }
+-
+- if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
+- ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
+- DMA_PREP_CMD);
+- if (ret)
+- goto err_unmap_free_desc;
+- }
+- }
+-
+- list_for_each_entry(desc, &nandc->desc_list, node)
+- cookie = dmaengine_submit(desc->dma_desc);
+-
+- if (nandc->props->supports_bam) {
+- bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done;
+- bam_txn->last_cmd_desc->callback_param = bam_txn;
+-
+- dma_async_issue_pending(nandc->tx_chan);
+- dma_async_issue_pending(nandc->rx_chan);
+- dma_async_issue_pending(nandc->cmd_chan);
+-
+- if (!wait_for_completion_timeout(&bam_txn->txn_done,
+- QPIC_NAND_COMPLETION_TIMEOUT))
+- ret = -ETIMEDOUT;
+- } else {
+- if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
+- ret = -ETIMEDOUT;
+- }
+-
+-err_unmap_free_desc:
+- /*
+- * Unmap the dma sg_list and free the desc allocated by both
+- * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions.
+- */
+- list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
+- list_del(&desc->node);
+-
+- if (nandc->props->supports_bam)
+- dma_unmap_sg(nandc->dev, desc->bam_sgl,
+- desc->sgl_cnt, desc->dir);
+- else
+- dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
+- desc->dir);
+-
+- kfree(desc);
+- }
+-
+- return ret;
+-}
+-
+-/* reset the register read buffer for next NAND operation */
+-static void qcom_clear_read_regs(struct qcom_nand_controller *nandc)
+-{
+- nandc->reg_read_pos = 0;
+- qcom_nandc_dev_to_mem(nandc, false);
+-}
+-
+ /*
+ * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
+ * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
+@@ -2967,141 +2022,14 @@ static const struct nand_controller_ops qcom_nandc_ops = {
+ .exec_op = qcom_nand_exec_op,
+ };
+
+-static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
+-{
+- if (nandc->props->supports_bam) {
+- if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
+- dma_unmap_single(nandc->dev, nandc->reg_read_dma,
+- MAX_REG_RD *
+- sizeof(*nandc->reg_read_buf),
+- DMA_FROM_DEVICE);
+-
+- if (nandc->tx_chan)
+- dma_release_channel(nandc->tx_chan);
+-
+- if (nandc->rx_chan)
+- dma_release_channel(nandc->rx_chan);
+-
+- if (nandc->cmd_chan)
+- dma_release_channel(nandc->cmd_chan);
+- } else {
+- if (nandc->chan)
+- dma_release_channel(nandc->chan);
+- }
+-}
+-
+-static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
+-{
+- int ret;
+-
+- ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
+- if (ret) {
+- dev_err(nandc->dev, "failed to set DMA mask\n");
+- return ret;
+- }
+-
+- /*
+- * we use the internal buffer for reading ONFI params, reading small
+- * data like ID and status, and preforming read-copy-write operations
+- * when writing to a codeword partially. 532 is the maximum possible
+- * size of a codeword for our nand controller
+- */
+- nandc->buf_size = 532;
+-
+- nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL);
+- if (!nandc->data_buffer)
+- return -ENOMEM;
+-
+- nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL);
+- if (!nandc->regs)
+- return -ENOMEM;
+-
+- nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD,
+- sizeof(*nandc->reg_read_buf),
+- GFP_KERNEL);
+- if (!nandc->reg_read_buf)
+- return -ENOMEM;
+-
+- if (nandc->props->supports_bam) {
+- nandc->reg_read_dma =
+- dma_map_single(nandc->dev, nandc->reg_read_buf,
+- MAX_REG_RD *
+- sizeof(*nandc->reg_read_buf),
+- DMA_FROM_DEVICE);
+- if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
+- dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
+- return -EIO;
+- }
+-
+- nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
+- if (IS_ERR(nandc->tx_chan)) {
+- ret = PTR_ERR(nandc->tx_chan);
+- nandc->tx_chan = NULL;
+- dev_err_probe(nandc->dev, ret,
+- "tx DMA channel request failed\n");
+- goto unalloc;
+- }
+-
+- nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
+- if (IS_ERR(nandc->rx_chan)) {
+- ret = PTR_ERR(nandc->rx_chan);
+- nandc->rx_chan = NULL;
+- dev_err_probe(nandc->dev, ret,
+- "rx DMA channel request failed\n");
+- goto unalloc;
+- }
+-
+- nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
+- if (IS_ERR(nandc->cmd_chan)) {
+- ret = PTR_ERR(nandc->cmd_chan);
+- nandc->cmd_chan = NULL;
+- dev_err_probe(nandc->dev, ret,
+- "cmd DMA channel request failed\n");
+- goto unalloc;
+- }
+-
+- /*
+- * Initially allocate BAM transaction to read ONFI param page.
+- * After detecting all the devices, this BAM transaction will
+- * be freed and the next BAM transaction will be allocated with
+- * maximum codeword size
+- */
+- nandc->max_cwperpage = 1;
+- nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
+- if (!nandc->bam_txn) {
+- dev_err(nandc->dev,
+- "failed to allocate bam transaction\n");
+- ret = -ENOMEM;
+- goto unalloc;
+- }
+- } else {
+- nandc->chan = dma_request_chan(nandc->dev, "rxtx");
+- if (IS_ERR(nandc->chan)) {
+- ret = PTR_ERR(nandc->chan);
+- nandc->chan = NULL;
+- dev_err_probe(nandc->dev, ret,
+- "rxtx DMA channel request failed\n");
+- return ret;
+- }
+- }
+-
+- INIT_LIST_HEAD(&nandc->desc_list);
+- INIT_LIST_HEAD(&nandc->host_list);
+-
+- nand_controller_init(&nandc->controller);
+- nandc->controller.ops = &qcom_nandc_ops;
+-
+- return 0;
+-unalloc:
+- qcom_nandc_unalloc(nandc);
+- return ret;
+-}
+-
+ /* one time setup of a few nand controller registers */
+ static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
+ {
+ u32 nand_ctrl;
+
++ nand_controller_init(nandc->controller);
++ nandc->controller->ops = &qcom_nandc_ops;
++
+ /* kill onenand */
+ if (!nandc->props->nandc_part_of_qpic)
+ nandc_write(nandc, SFLASHC_BURST_CFG, 0);
+@@ -3240,7 +2168,7 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
+ chip->legacy.block_bad = qcom_nandc_block_bad;
+ chip->legacy.block_markbad = qcom_nandc_block_markbad;
+
+- chip->controller = &nandc->controller;
++ chip->controller = nandc->controller;
+ chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA |
+ NAND_SKIP_BBTSCAN;
+
+@@ -3323,17 +2251,21 @@ static int qcom_nandc_parse_dt(struct platform_device *pdev)
+ static int qcom_nandc_probe(struct platform_device *pdev)
+ {
+ struct qcom_nand_controller *nandc;
++ struct nand_controller *controller;
+ const void *dev_data;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int ret;
+
+- nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
++ nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc) + sizeof(*controller),
++ GFP_KERNEL);
+ if (!nandc)
+ return -ENOMEM;
++ controller = (struct nand_controller *)&nandc[1];
+
+ platform_set_drvdata(pdev, nandc);
+ nandc->dev = dev;
++ nandc->controller = controller;
+
+ dev_data = of_device_get_match_data(dev);
+ if (!dev_data) {
+diff --git a/include/linux/mtd/nand-qpic-common.h b/include/linux/mtd/nand-qpic-common.h
+new file mode 100644
+index 000000000000..425994429387
+--- /dev/null
++++ b/include/linux/mtd/nand-qpic-common.h
+@@ -0,0 +1,468 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * QCOM QPIC common APIs header file
++ *
++ * Copyright (c) 2023 Qualcomm Inc.
++ * Authors: Md sadre Alam <quic_mdalam@quicinc.com>
++ *
++ */
++#ifndef __MTD_NAND_QPIC_COMMON_H__
++#define __MTD_NAND_QPIC_COMMON_H__
++
++/* NANDc reg offsets */
++#define NAND_FLASH_CMD 0x00
++#define NAND_ADDR0 0x04
++#define NAND_ADDR1 0x08
++#define NAND_FLASH_CHIP_SELECT 0x0c
++#define NAND_EXEC_CMD 0x10
++#define NAND_FLASH_STATUS 0x14
++#define NAND_BUFFER_STATUS 0x18
++#define NAND_DEV0_CFG0 0x20
++#define NAND_DEV0_CFG1 0x24
++#define NAND_DEV0_ECC_CFG 0x28
++#define NAND_AUTO_STATUS_EN 0x2c
++#define NAND_DEV1_CFG0 0x30
++#define NAND_DEV1_CFG1 0x34
++#define NAND_READ_ID 0x40
++#define NAND_READ_STATUS 0x44
++#define NAND_DEV_CMD0 0xa0
++#define NAND_DEV_CMD1 0xa4
++#define NAND_DEV_CMD2 0xa8
++#define NAND_DEV_CMD_VLD 0xac
++#define SFLASHC_BURST_CFG 0xe0
++#define NAND_ERASED_CW_DETECT_CFG 0xe8
++#define NAND_ERASED_CW_DETECT_STATUS 0xec
++#define NAND_EBI2_ECC_BUF_CFG 0xf0
++#define FLASH_BUF_ACC 0x100
++
++#define NAND_CTRL 0xf00
++#define NAND_VERSION 0xf08
++#define NAND_READ_LOCATION_0 0xf20
++#define NAND_READ_LOCATION_1 0xf24
++#define NAND_READ_LOCATION_2 0xf28
++#define NAND_READ_LOCATION_3 0xf2c
++#define NAND_READ_LOCATION_LAST_CW_0 0xf40
++#define NAND_READ_LOCATION_LAST_CW_1 0xf44
++#define NAND_READ_LOCATION_LAST_CW_2 0xf48
++#define NAND_READ_LOCATION_LAST_CW_3 0xf4c
++
++/* dummy register offsets, used by qcom_write_reg_dma */
++#define NAND_DEV_CMD1_RESTORE 0xdead
++#define NAND_DEV_CMD_VLD_RESTORE 0xbeef
++
++/* NAND_FLASH_CMD bits */
++#define PAGE_ACC BIT(4)
++#define LAST_PAGE BIT(5)
++
++/* NAND_FLASH_CHIP_SELECT bits */
++#define NAND_DEV_SEL 0
++#define DM_EN BIT(2)
++
++/* NAND_FLASH_STATUS bits */
++#define FS_OP_ERR BIT(4)
++#define FS_READY_BSY_N BIT(5)
++#define FS_MPU_ERR BIT(8)
++#define FS_DEVICE_STS_ERR BIT(16)
++#define FS_DEVICE_WP BIT(23)
++
++/* NAND_BUFFER_STATUS bits */
++#define BS_UNCORRECTABLE_BIT BIT(8)
++#define BS_CORRECTABLE_ERR_MSK 0x1f
++
++/* NAND_DEVn_CFG0 bits */
++#define DISABLE_STATUS_AFTER_WRITE 4
++#define CW_PER_PAGE 6
++#define UD_SIZE_BYTES 9
++#define UD_SIZE_BYTES_MASK GENMASK(18, 9)
++#define ECC_PARITY_SIZE_BYTES_RS 19
++#define SPARE_SIZE_BYTES 23
++#define SPARE_SIZE_BYTES_MASK GENMASK(26, 23)
++#define NUM_ADDR_CYCLES 27
++#define STATUS_BFR_READ 30
++#define SET_RD_MODE_AFTER_STATUS 31
++
++/* NAND_DEVn_CFG0 bits */
++#define DEV0_CFG1_ECC_DISABLE 0
++#define WIDE_FLASH 1
++#define NAND_RECOVERY_CYCLES 2
++#define CS_ACTIVE_BSY 5
++#define BAD_BLOCK_BYTE_NUM 6
++#define BAD_BLOCK_IN_SPARE_AREA 16
++#define WR_RD_BSY_GAP 17
++#define ENABLE_BCH_ECC 27
++
++/* NAND_DEV0_ECC_CFG bits */
++#define ECC_CFG_ECC_DISABLE 0
++#define ECC_SW_RESET 1
++#define ECC_MODE 4
++#define ECC_PARITY_SIZE_BYTES_BCH 8
++#define ECC_NUM_DATA_BYTES 16
++#define ECC_NUM_DATA_BYTES_MASK GENMASK(25, 16)
++#define ECC_FORCE_CLK_OPEN 30
++
++/* NAND_DEV_CMD1 bits */
++#define READ_ADDR 0
++
++/* NAND_DEV_CMD_VLD bits */
++#define READ_START_VLD BIT(0)
++#define READ_STOP_VLD BIT(1)
++#define WRITE_START_VLD BIT(2)
++#define ERASE_START_VLD BIT(3)
++#define SEQ_READ_START_VLD BIT(4)
++
++/* NAND_EBI2_ECC_BUF_CFG bits */
++#define NUM_STEPS 0
++
++/* NAND_ERASED_CW_DETECT_CFG bits */
++#define ERASED_CW_ECC_MASK 1
++#define AUTO_DETECT_RES 0
++#define MASK_ECC BIT(ERASED_CW_ECC_MASK)
++#define RESET_ERASED_DET BIT(AUTO_DETECT_RES)
++#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
++#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
++#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
++
++/* NAND_ERASED_CW_DETECT_STATUS bits */
++#define PAGE_ALL_ERASED BIT(7)
++#define CODEWORD_ALL_ERASED BIT(6)
++#define PAGE_ERASED BIT(5)
++#define CODEWORD_ERASED BIT(4)
++#define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
++#define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
++
++/* NAND_READ_LOCATION_n bits */
++#define READ_LOCATION_OFFSET 0
++#define READ_LOCATION_SIZE 16
++#define READ_LOCATION_LAST 31
++
++/* Version Mask */
++#define NAND_VERSION_MAJOR_MASK 0xf0000000
++#define NAND_VERSION_MAJOR_SHIFT 28
++#define NAND_VERSION_MINOR_MASK 0x0fff0000
++#define NAND_VERSION_MINOR_SHIFT 16
++
++/* NAND OP_CMDs */
++#define OP_PAGE_READ 0x2
++#define OP_PAGE_READ_WITH_ECC 0x3
++#define OP_PAGE_READ_WITH_ECC_SPARE 0x4
++#define OP_PAGE_READ_ONFI_READ 0x5
++#define OP_PROGRAM_PAGE 0x6
++#define OP_PAGE_PROGRAM_WITH_ECC 0x7
++#define OP_PROGRAM_PAGE_SPARE 0x9
++#define OP_BLOCK_ERASE 0xa
++#define OP_CHECK_STATUS 0xc
++#define OP_FETCH_ID 0xb
++#define OP_RESET_DEVICE 0xd
++
++/* Default Value for NAND_DEV_CMD_VLD */
++#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
++ ERASE_START_VLD | SEQ_READ_START_VLD)
++
++/* NAND_CTRL bits */
++#define BAM_MODE_EN BIT(0)
++
++/*
++ * the NAND controller performs reads/writes with ECC in 516 byte chunks.
++ * the driver calls the chunks 'step' or 'codeword' interchangeably
++ */
++#define NANDC_STEP_SIZE 512
++
++/*
++ * the largest page size we support is 8K, this will have 16 steps/codewords
++ * of 512 bytes each
++ */
++#define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
++
++/* we read at most 3 registers per codeword scan */
++#define MAX_REG_RD (3 * MAX_NUM_STEPS)
++
++/* ECC modes supported by the controller */
++#define ECC_NONE BIT(0)
++#define ECC_RS_4BIT BIT(1)
++#define ECC_BCH_4BIT BIT(2)
++#define ECC_BCH_8BIT BIT(3)
++
++/*
++ * Returns the actual register address for all NAND_DEV_ registers
++ * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
++ */
++#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
++
++/* Returns the NAND register physical address */
++#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
++
++/* Returns the dma address for reg read buffer */
++#define reg_buf_dma_addr(chip, vaddr) \
++ ((chip)->reg_read_dma + \
++ ((u8 *)(vaddr) - (u8 *)(chip)->reg_read_buf))
++
++#define QPIC_PER_CW_CMD_ELEMENTS 32
++#define QPIC_PER_CW_CMD_SGL 32
++#define QPIC_PER_CW_DATA_SGL 8
++
++#define QPIC_NAND_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
++
++/*
++ * Flags used in DMA descriptor preparation helper functions
++ * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma)
++ */
++/* Don't set the EOT in current tx BAM sgl */
++#define NAND_BAM_NO_EOT BIT(0)
++/* Set the NWD flag in current BAM sgl */
++#define NAND_BAM_NWD BIT(1)
++/* Finish writing in the current BAM sgl and start writing in another BAM sgl */
++#define NAND_BAM_NEXT_SGL BIT(2)
++/*
++ * Erased codeword status is being used two times in single transfer so this
++ * flag will determine the current value of erased codeword status register
++ */
++#define NAND_ERASED_CW_SET BIT(4)
++
++#define MAX_ADDRESS_CYCLE 5
++
++/*
++ * This data type corresponds to the BAM transaction which will be used for all
++ * NAND transfers.
++ * @bam_ce - the array of BAM command elements
++ * @cmd_sgl - sgl for NAND BAM command pipe
++ * @data_sgl - sgl for NAND BAM consumer/producer pipe
++ * @last_data_desc - last DMA desc in data channel (tx/rx).
++ * @last_cmd_desc - last DMA desc in command channel.
++ * @txn_done - completion for NAND transfer.
++ * @bam_ce_pos - the index in bam_ce which is available for next sgl
++ * @bam_ce_start - the index in bam_ce which marks the start position ce
++ * for current sgl. It will be used for size calculation
++ * for current sgl
++ * @cmd_sgl_pos - current index in command sgl.
++ * @cmd_sgl_start - start index in command sgl.
++ * @tx_sgl_pos - current index in data sgl for tx.
++ * @tx_sgl_start - start index in data sgl for tx.
++ * @rx_sgl_pos - current index in data sgl for rx.
++ * @rx_sgl_start - start index in data sgl for rx.
++ */
++struct bam_transaction {
++ struct bam_cmd_element *bam_ce;
++ struct scatterlist *cmd_sgl;
++ struct scatterlist *data_sgl;
++ struct dma_async_tx_descriptor *last_data_desc;
++ struct dma_async_tx_descriptor *last_cmd_desc;
++ struct completion txn_done;
++ u32 bam_ce_pos;
++ u32 bam_ce_start;
++ u32 cmd_sgl_pos;
++ u32 cmd_sgl_start;
++ u32 tx_sgl_pos;
++ u32 tx_sgl_start;
++ u32 rx_sgl_pos;
++ u32 rx_sgl_start;
++};
++
++/*
++ * This data type corresponds to the nand dma descriptor
++ * @dma_desc - low level DMA engine descriptor
++ * @list - list for desc_info
++ *
++ * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
++ * ADM
++ * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
++ * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
++ * @dir - DMA transfer direction
++ */
++struct desc_info {
++ struct dma_async_tx_descriptor *dma_desc;
++ struct list_head node;
++
++ union {
++ struct scatterlist adm_sgl;
++ struct {
++ struct scatterlist *bam_sgl;
++ int sgl_cnt;
++ };
++ };
++ enum dma_data_direction dir;
++};
++
++/*
++ * holds the current register values that we want to write. acts as a contiguous
++ * chunk of memory which we use to write the controller registers through DMA.
++ */
++struct nandc_regs {
++ __le32 cmd;
++ __le32 addr0;
++ __le32 addr1;
++ __le32 chip_sel;
++ __le32 exec;
++
++ __le32 cfg0;
++ __le32 cfg1;
++ __le32 ecc_bch_cfg;
++
++ __le32 clrflashstatus;
++ __le32 clrreadstatus;
++
++ __le32 cmd1;
++ __le32 vld;
++
++ __le32 orig_cmd1;
++ __le32 orig_vld;
++
++ __le32 ecc_buf_cfg;
++ __le32 read_location0;
++ __le32 read_location1;
++ __le32 read_location2;
++ __le32 read_location3;
++ __le32 read_location_last0;
++ __le32 read_location_last1;
++ __le32 read_location_last2;
++ __le32 read_location_last3;
++
++ __le32 erased_cw_detect_cfg_clr;
++ __le32 erased_cw_detect_cfg_set;
++};
++
++/*
++ * NAND controller data struct
++ *
++ * @dev: parent device
++ *
++ * @base: MMIO base
++ *
++ * @core_clk: controller clock
++ * @aon_clk: another controller clock
++ *
++ * @regs: a contiguous chunk of memory for DMA register
++ * writes. contains the register values to be
++ * written to controller
++ *
++ * @props: properties of current NAND controller,
++ * initialized via DT match data
++ *
++ * @controller: base controller structure
++ * @host_list: list containing all the chips attached to the
++ * controller
++ *
++ * @chan: dma channel
++ * @cmd_crci: ADM DMA CRCI for command flow control
++ * @data_crci: ADM DMA CRCI for data flow control
++ *
++ * @desc_list: DMA descriptor list (list of desc_infos)
++ *
++ * @data_buffer: our local DMA buffer for page read/writes,
++ * used when we can't use the buffer provided
++ * by upper layers directly
++ * @reg_read_buf: local buffer for reading back registers via DMA
++ *
++ * @base_phys: physical base address of controller registers
++ * @base_dma: dma base address of controller registers
++ * @reg_read_dma: contains dma address for register read buffer
++ *
++ * @buf_size/count/start: markers for chip->legacy.read_buf/write_buf
++ * functions
++ * @max_cwperpage: maximum QPIC codewords required. calculated
++ * from all connected NAND devices pagesize
++ *
++ * @reg_read_pos: marker for data read in reg_read_buf
++ *
++ * @cmd1/vld: some fixed controller register values
++ *
++ * @exec_opwrite: flag to select correct number of code word
++ * while reading status
++ */
++struct qcom_nand_controller {
++ struct device *dev;
++
++ void __iomem *base;
++
++ struct clk *core_clk;
++ struct clk *aon_clk;
++
++ struct nandc_regs *regs;
++ struct bam_transaction *bam_txn;
++
++ const struct qcom_nandc_props *props;
++
++ struct nand_controller *controller;
++ struct list_head host_list;
++
++ union {
++ /* will be used only by QPIC for BAM DMA */
++ struct {
++ struct dma_chan *tx_chan;
++ struct dma_chan *rx_chan;
++ struct dma_chan *cmd_chan;
++ };
++
++ /* will be used only by EBI2 for ADM DMA */
++ struct {
++ struct dma_chan *chan;
++ unsigned int cmd_crci;
++ unsigned int data_crci;
++ };
++ };
++
++ struct list_head desc_list;
++
++ u8 *data_buffer;
++ __le32 *reg_read_buf;
++
++ phys_addr_t base_phys;
++ dma_addr_t base_dma;
++ dma_addr_t reg_read_dma;
++
++ int buf_size;
++ int buf_count;
++ int buf_start;
++ unsigned int max_cwperpage;
++
++ int reg_read_pos;
++
++ u32 cmd1, vld;
++ bool exec_opwrite;
++};
++
++/*
++ * This data type corresponds to the NAND controller properties which varies
++ * among different NAND controllers.
++ * @ecc_modes - ecc mode for NAND
++ * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
++ * @supports_bam - whether NAND controller is using BAM
++ * @nandc_part_of_qpic - whether NAND controller is part of qpic IP
++ * @qpic_version2 - flag to indicate QPIC IP version 2
++ * @use_codeword_fixup - whether NAND has different layout for boot partitions
++ */
++struct qcom_nandc_props {
++ u32 ecc_modes;
++ u32 dev_cmd_reg_start;
++ bool supports_bam;
++ bool nandc_part_of_qpic;
++ bool qpic_version2;
++ bool use_codeword_fixup;
++};
++
++void qcom_free_bam_transaction(struct qcom_nand_controller *nandc);
++struct bam_transaction *qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc);
++void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc);
++void qcom_qpic_bam_dma_done(void *data);
++void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu);
++int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
++ struct dma_chan *chan, unsigned long flags);
++int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
++ int reg_off, const void *vaddr, int size, unsigned int flags);
++int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
++ const void *vaddr, int size, unsigned int flags);
++int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read, int reg_off,
++ const void *vaddr, int size, bool flow_control);
++int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first, int num_regs,
++ unsigned int flags);
++int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr, int first,
++ int num_regs, unsigned int flags);
++int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off, const u8 *vaddr,
++ int size, unsigned int flags);
++int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off, const u8 *vaddr,
++ int size, unsigned int flags);
++int qcom_submit_descs(struct qcom_nand_controller *nandc);
++void qcom_clear_read_regs(struct qcom_nand_controller *nandc);
++void qcom_nandc_unalloc(struct qcom_nand_controller *nandc);
++int qcom_nandc_alloc(struct qcom_nand_controller *nandc);
++#endif
++
+--
+2.47.1
+
--- /dev/null
+From 0c08080fd71cd5dd59643104b39d3c89d793ab3c Mon Sep 17 00:00:00 2001
+From: Md Sadre Alam <quic_mdalam@quicinc.com>
+Date: Wed, 20 Nov 2024 14:45:03 +0530
+Subject: [PATCH 4/4] mtd: rawnand: qcom: use FIELD_PREP and GENMASK
+
+Use the bitfield macro FIELD_PREP, and GENMASK to
+do the shift and mask in one go. This makes the code
+more readable.
+
+Reviewed-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
+Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+---
+ drivers/mtd/nand/raw/qcom_nandc.c | 97 ++++++++++++++--------------
+ include/linux/mtd/nand-qpic-common.h | 31 +++++----
+ 2 files changed, 67 insertions(+), 61 deletions(-)
+
+diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
+index dcb62fd19dd7..d2d2aeee42a7 100644
+--- a/drivers/mtd/nand/raw/qcom_nandc.c
++++ b/drivers/mtd/nand/raw/qcom_nandc.c
+@@ -281,7 +281,7 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, i
+ (num_cw - 1) << CW_PER_PAGE);
+
+ cfg1 = cpu_to_le32(host->cfg1_raw);
+- ecc_bch_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE);
++ ecc_bch_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);
+ }
+
+ nandc->regs->cmd = cmd;
+@@ -1494,42 +1494,41 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
+ host->cw_size = host->cw_data + ecc->bytes;
+ bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
+
+- host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
+- | host->cw_data << UD_SIZE_BYTES
+- | 0 << DISABLE_STATUS_AFTER_WRITE
+- | 5 << NUM_ADDR_CYCLES
+- | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
+- | 0 << STATUS_BFR_READ
+- | 1 << SET_RD_MODE_AFTER_STATUS
+- | host->spare_bytes << SPARE_SIZE_BYTES;
+-
+- host->cfg1 = 7 << NAND_RECOVERY_CYCLES
+- | 0 << CS_ACTIVE_BSY
+- | bad_block_byte << BAD_BLOCK_BYTE_NUM
+- | 0 << BAD_BLOCK_IN_SPARE_AREA
+- | 2 << WR_RD_BSY_GAP
+- | wide_bus << WIDE_FLASH
+- | host->bch_enabled << ENABLE_BCH_ECC;
+-
+- host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
+- | host->cw_size << UD_SIZE_BYTES
+- | 5 << NUM_ADDR_CYCLES
+- | 0 << SPARE_SIZE_BYTES;
+-
+- host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
+- | 0 << CS_ACTIVE_BSY
+- | 17 << BAD_BLOCK_BYTE_NUM
+- | 1 << BAD_BLOCK_IN_SPARE_AREA
+- | 2 << WR_RD_BSY_GAP
+- | wide_bus << WIDE_FLASH
+- | 1 << DEV0_CFG1_ECC_DISABLE;
+-
+- host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
+- | 0 << ECC_SW_RESET
+- | host->cw_data << ECC_NUM_DATA_BYTES
+- | 1 << ECC_FORCE_CLK_OPEN
+- | ecc_mode << ECC_MODE
+- | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
++ host->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
++ FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_data) |
++ FIELD_PREP(DISABLE_STATUS_AFTER_WRITE, 0) |
++ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
++ FIELD_PREP(ECC_PARITY_SIZE_BYTES_RS, host->ecc_bytes_hw) |
++ FIELD_PREP(STATUS_BFR_READ, 0) |
++ FIELD_PREP(SET_RD_MODE_AFTER_STATUS, 1) |
++ FIELD_PREP(SPARE_SIZE_BYTES_MASK, host->spare_bytes);
++
++ host->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
++ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, bad_block_byte) |
++ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 0) |
++ FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
++ FIELD_PREP(WIDE_FLASH, wide_bus) |
++ FIELD_PREP(ENABLE_BCH_ECC, host->bch_enabled);
++
++ host->cfg0_raw = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
++ FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_size) |
++ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
++ FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
++
++ host->cfg1_raw = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
++ FIELD_PREP(CS_ACTIVE_BSY, 0) |
++ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
++ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
++ FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
++ FIELD_PREP(WIDE_FLASH, wide_bus) |
++ FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
++
++ host->ecc_bch_cfg = FIELD_PREP(ECC_CFG_ECC_DISABLE, !host->bch_enabled) |
++ FIELD_PREP(ECC_SW_RESET, 0) |
++ FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, host->cw_data) |
++ FIELD_PREP(ECC_FORCE_CLK_OPEN, 1) |
++ FIELD_PREP(ECC_MODE_MASK, ecc_mode) |
++ FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, host->ecc_bytes_hw);
+
+ if (!nandc->props->qpic_version2)
+ host->ecc_buf_cfg = 0x203 << NUM_STEPS;
+@@ -1882,21 +1881,21 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_
+ nandc->regs->addr0 = 0;
+ nandc->regs->addr1 = 0;
+
+- nandc->regs->cfg0 = cpu_to_le32(0 << CW_PER_PAGE |
+- 512 << UD_SIZE_BYTES |
+- 5 << NUM_ADDR_CYCLES |
+- 0 << SPARE_SIZE_BYTES);
++ host->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, 0) |
++ FIELD_PREP(UD_SIZE_BYTES_MASK, 512) |
++ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
++ FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
+
+- nandc->regs->cfg1 = cpu_to_le32(7 << NAND_RECOVERY_CYCLES |
+- 0 << CS_ACTIVE_BSY |
+- 17 << BAD_BLOCK_BYTE_NUM |
+- 1 << BAD_BLOCK_IN_SPARE_AREA |
+- 2 << WR_RD_BSY_GAP |
+- 0 << WIDE_FLASH |
+- 1 << DEV0_CFG1_ECC_DISABLE);
++ host->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
++ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
++ FIELD_PREP(CS_ACTIVE_BSY, 0) |
++ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
++ FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
++ FIELD_PREP(WIDE_FLASH, 0) |
++ FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
+
+ if (!nandc->props->qpic_version2)
+- nandc->regs->ecc_buf_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE);
++ nandc->regs->ecc_buf_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);
+
+ /* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
+ if (!nandc->props->qpic_version2) {
+diff --git a/include/linux/mtd/nand-qpic-common.h b/include/linux/mtd/nand-qpic-common.h
+index 425994429387..e79c79775eb8 100644
+--- a/include/linux/mtd/nand-qpic-common.h
++++ b/include/linux/mtd/nand-qpic-common.h
+@@ -70,35 +70,42 @@
+ #define BS_CORRECTABLE_ERR_MSK 0x1f
+
+ /* NAND_DEVn_CFG0 bits */
+-#define DISABLE_STATUS_AFTER_WRITE 4
++#define DISABLE_STATUS_AFTER_WRITE BIT(4)
+ #define CW_PER_PAGE 6
++#define CW_PER_PAGE_MASK GENMASK(8, 6)
+ #define UD_SIZE_BYTES 9
+ #define UD_SIZE_BYTES_MASK GENMASK(18, 9)
+-#define ECC_PARITY_SIZE_BYTES_RS 19
++#define ECC_PARITY_SIZE_BYTES_RS GENMASK(22, 19)
+ #define SPARE_SIZE_BYTES 23
+ #define SPARE_SIZE_BYTES_MASK GENMASK(26, 23)
+ #define NUM_ADDR_CYCLES 27
+-#define STATUS_BFR_READ 30
+-#define SET_RD_MODE_AFTER_STATUS 31
++#define NUM_ADDR_CYCLES_MASK GENMASK(29, 27)
++#define STATUS_BFR_READ BIT(30)
++#define SET_RD_MODE_AFTER_STATUS BIT(31)
+
+ /* NAND_DEVn_CFG0 bits */
+-#define DEV0_CFG1_ECC_DISABLE 0
+-#define WIDE_FLASH 1
++#define DEV0_CFG1_ECC_DISABLE BIT(0)
++#define WIDE_FLASH BIT(1)
+ #define NAND_RECOVERY_CYCLES 2
+-#define CS_ACTIVE_BSY 5
++#define NAND_RECOVERY_CYCLES_MASK GENMASK(4, 2)
++#define CS_ACTIVE_BSY BIT(5)
+ #define BAD_BLOCK_BYTE_NUM 6
+-#define BAD_BLOCK_IN_SPARE_AREA 16
++#define BAD_BLOCK_BYTE_NUM_MASK GENMASK(15, 6)
++#define BAD_BLOCK_IN_SPARE_AREA BIT(16)
+ #define WR_RD_BSY_GAP 17
+-#define ENABLE_BCH_ECC 27
++#define WR_RD_BSY_GAP_MASK GENMASK(22, 17)
++#define ENABLE_BCH_ECC BIT(27)
+
+ /* NAND_DEV0_ECC_CFG bits */
+-#define ECC_CFG_ECC_DISABLE 0
+-#define ECC_SW_RESET 1
++#define ECC_CFG_ECC_DISABLE BIT(0)
++#define ECC_SW_RESET BIT(1)
+ #define ECC_MODE 4
++#define ECC_MODE_MASK GENMASK(5, 4)
+ #define ECC_PARITY_SIZE_BYTES_BCH 8
++#define ECC_PARITY_SIZE_BYTES_BCH_MASK GENMASK(12, 8)
+ #define ECC_NUM_DATA_BYTES 16
+ #define ECC_NUM_DATA_BYTES_MASK GENMASK(25, 16)
+-#define ECC_FORCE_CLK_OPEN 30
++#define ECC_FORCE_CLK_OPEN BIT(30)
+
+ /* NAND_DEV_CMD1 bits */
+ #define READ_ADDR 0
+--
+2.47.1
+
--- /dev/null
+From b9371866799d67a80be0ea9e01bd41987db22f26 Mon Sep 17 00:00:00 2001
+From: Md Sadre Alam <quic_mdalam@quicinc.com>
+Date: Mon, 6 Jan 2025 18:45:58 +0530
+Subject: [PATCH] mtd: rawnand: qcom: Fix build issue on x86 architecture
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Fix a buffer overflow issue in qcom_clear_bam_transaction by using
+struct_group to group related fields and avoid FORTIFY_SOURCE warnings.
+
+On x86 architecture, the following error occurs due to warnings being
+treated as errors:
+
+In function ‘fortify_memset_chk’,
+ inlined from ‘qcom_clear_bam_transaction’ at
+drivers/mtd/nand/qpic_common.c:88:2:
+./include/linux/fortify-string.h:480:25: error: call to ‘__write_overflow_field’
+declared with attribute warning: detected write beyond size of field
+(1st parameter); maybe use struct_group()? [-Werror=attribute-warning]
+ 480 | __write_overflow_field(p_size_field, size);
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ LD [M] drivers/mtd/nand/nandcore.o
+ CC [M] drivers/w1/masters/mxc_w1.o
+cc1: all warnings being treated as errors
+
+This patch addresses the issue by grouping the related fields in
+struct bam_transaction using struct_group and updating the memset call
+accordingly.
+
+Fixes: 8c52932da5e6 ("mtd: rawnand: qcom: cleanup qcom_nandc driver")
+Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+---
+ drivers/mtd/nand/qpic_common.c | 2 +-
+ include/linux/mtd/nand-qpic-common.h | 19 +++++++++++--------
+ 2 files changed, 12 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/mtd/nand/qpic_common.c b/drivers/mtd/nand/qpic_common.c
+index 8abbb960a7ce..e0ed25b5afea 100644
+--- a/drivers/mtd/nand/qpic_common.c
++++ b/drivers/mtd/nand/qpic_common.c
+@@ -85,7 +85,7 @@ void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
+ if (!nandc->props->supports_bam)
+ return;
+
+- memset(&bam_txn->bam_ce_pos, 0, sizeof(u32) * 8);
++ memset(&bam_txn->bam_positions, 0, sizeof(bam_txn->bam_positions));
+ bam_txn->last_data_desc = NULL;
+
+ sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
+diff --git a/include/linux/mtd/nand-qpic-common.h b/include/linux/mtd/nand-qpic-common.h
+index e79c79775eb8..4d9b736ff8b7 100644
+--- a/include/linux/mtd/nand-qpic-common.h
++++ b/include/linux/mtd/nand-qpic-common.h
+@@ -254,14 +254,17 @@ struct bam_transaction {
+ struct dma_async_tx_descriptor *last_data_desc;
+ struct dma_async_tx_descriptor *last_cmd_desc;
+ struct completion txn_done;
+- u32 bam_ce_pos;
+- u32 bam_ce_start;
+- u32 cmd_sgl_pos;
+- u32 cmd_sgl_start;
+- u32 tx_sgl_pos;
+- u32 tx_sgl_start;
+- u32 rx_sgl_pos;
+- u32 rx_sgl_start;
++ struct_group(bam_positions,
++ u32 bam_ce_pos;
++ u32 bam_ce_start;
++ u32 cmd_sgl_pos;
++ u32 cmd_sgl_start;
++ u32 tx_sgl_pos;
++ u32 tx_sgl_start;
++ u32 rx_sgl_pos;
++ u32 rx_sgl_start;
++
++ );
+ };
+
+ /*
+--
+2.47.1
+
--- /dev/null
+From f81715a4c87c3b75ca2640bb61b6c66506061a64 Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Fri, 3 Jan 2025 15:31:35 +0800
+Subject: [PATCH] clk: qcom: Add CMN PLL clock controller driver for IPQ SoC
+
+The CMN PLL clock controller supplies clocks to the hardware
+blocks that together make up the Ethernet function on Qualcomm
+IPQ SoCs and to GCC. The driver is initially supported for
+IPQ9574 SoC.
+
+The CMN PLL clock controller expects a reference input clock
+from the on-board Wi-Fi block acting as clock source. The input
+reference clock needs to be configured to one of the supported
+clock rates.
+
+The controller supplies a number of fixed-rate output clocks.
+For the IPQ9574, there is one output clock of 353 MHZ to PPE
+(Packet Process Engine) hardware block, three 50 MHZ output
+clocks and an additional 25 MHZ output clock supplied to the
+connected Ethernet devices. The PLL also supplies a 24 MHZ
+clock as XO and a 32 KHZ sleep clock to GCC, and one 31.25
+MHZ clock to PCS.
+
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+Acked-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
+Link: https://lore.kernel.org/r/20250103-qcom_ipq_cmnpll-v8-2-c89fb4d4849d@quicinc.com
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+---
+ drivers/clk/qcom/Kconfig | 9 +
+ drivers/clk/qcom/Makefile | 1 +
+ drivers/clk/qcom/ipq-cmn-pll.c | 435 +++++++++++++++++++++++++++++++++
+ 3 files changed, 445 insertions(+)
+ create mode 100644 drivers/clk/qcom/ipq-cmn-pll.c
+
+diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
+index 42c257e4c433..2daff198aeb3 100644
+--- a/drivers/clk/qcom/Kconfig
++++ b/drivers/clk/qcom/Kconfig
+@@ -199,6 +199,15 @@ config IPQ_APSS_6018
+ Say Y if you want to support CPU frequency scaling on
+ ipq based devices.
+
++config IPQ_CMN_PLL
++ tristate "IPQ CMN PLL Clock Controller"
++ help
++ Support for CMN PLL clock controller on IPQ platform. The
++ CMN PLL consumes the AHB/SYS clocks from GCC and supplies
++ the output clocks to the networking hardware and GCC blocks.
++ Say Y or M if you want to support CMN PLL clock on the IPQ
++ based devices.
++
+ config IPQ_GCC_4019
+ tristate "IPQ4019 Global Clock Controller"
+ help
+diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
+index 1b749da9c13a..6665049cb8c8 100644
+--- a/drivers/clk/qcom/Makefile
++++ b/drivers/clk/qcom/Makefile
+@@ -30,6 +30,7 @@ obj-$(CONFIG_CLK_X1P42100_GPUCC) += gpucc-x1p42100.o
+ obj-$(CONFIG_CLK_QCM2290_GPUCC) += gpucc-qcm2290.o
+ obj-$(CONFIG_IPQ_APSS_PLL) += apss-ipq-pll.o
+ obj-$(CONFIG_IPQ_APSS_6018) += apss-ipq6018.o
++obj-$(CONFIG_IPQ_CMN_PLL) += ipq-cmn-pll.o
+ obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
+ obj-$(CONFIG_IPQ_GCC_5018) += gcc-ipq5018.o
+ obj-$(CONFIG_IPQ_GCC_5332) += gcc-ipq5332.o
+diff --git a/drivers/clk/qcom/ipq-cmn-pll.c b/drivers/clk/qcom/ipq-cmn-pll.c
+new file mode 100644
+index 000000000000..432d4c4b7aa6
+--- /dev/null
++++ b/drivers/clk/qcom/ipq-cmn-pll.c
+@@ -0,0 +1,435 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++/*
++ * CMN PLL block expects the reference clock from on-board Wi-Fi block,
++ * and supplies fixed rate clocks as output to the networking hardware
++ * blocks and to GCC. The networking related blocks include PPE (packet
++ * process engine), the externally connected PHY or switch devices, and
++ * the PCS.
++ *
++ * On the IPQ9574 SoC, there are three clocks with 50 MHZ and one clock
++ * with 25 MHZ which are output from the CMN PLL to Ethernet PHY (or switch),
++ * and one clock with 353 MHZ to PPE. The other fixed rate output clocks
++ * are supplied to GCC (24 MHZ as XO and 32 KHZ as sleep clock), and to PCS
++ * with 31.25 MHZ.
++ *
++ * +---------+
++ * | GCC |
++ * +--+---+--+
++ * AHB CLK| |SYS CLK
++ * V V
++ * +-------+---+------+
++ * | +-------------> eth0-50mhz
++ * REF CLK | IPQ9574 |
++ * -------->+ +-------------> eth1-50mhz
++ * | CMN PLL block |
++ * | +-------------> eth2-50mhz
++ * | |
++ * +----+----+----+---+-------------> eth-25mhz
++ * | | |
++ * V V V
++ * GCC PCS NSS/PPE
++ */
++
++#include <linux/bitfield.h>
++#include <linux/clk-provider.h>
++#include <linux/delay.h>
++#include <linux/err.h>
++#include <linux/mod_devicetable.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/pm_clock.h>
++#include <linux/pm_runtime.h>
++#include <linux/regmap.h>
++
++#include <dt-bindings/clock/qcom,ipq-cmn-pll.h>
++
++#define CMN_PLL_REFCLK_SRC_SELECTION 0x28
++#define CMN_PLL_REFCLK_SRC_DIV GENMASK(9, 8)
++
++#define CMN_PLL_LOCKED 0x64
++#define CMN_PLL_CLKS_LOCKED BIT(8)
++
++#define CMN_PLL_POWER_ON_AND_RESET 0x780
++#define CMN_ANA_EN_SW_RSTN BIT(6)
++
++#define CMN_PLL_REFCLK_CONFIG 0x784
++#define CMN_PLL_REFCLK_EXTERNAL BIT(9)
++#define CMN_PLL_REFCLK_DIV GENMASK(8, 4)
++#define CMN_PLL_REFCLK_INDEX GENMASK(3, 0)
++
++#define CMN_PLL_CTRL 0x78c
++#define CMN_PLL_CTRL_LOCK_DETECT_EN BIT(15)
++
++#define CMN_PLL_DIVIDER_CTRL 0x794
++#define CMN_PLL_DIVIDER_CTRL_FACTOR GENMASK(9, 0)
++
++/**
++ * struct cmn_pll_fixed_output_clk - CMN PLL output clocks information
++ * @id: Clock specifier to be supplied
++ * @name: Clock name to be registered
++ * @rate: Clock rate
++ */
++struct cmn_pll_fixed_output_clk {
++ unsigned int id;
++ const char *name;
++ unsigned long rate;
++};
++
++/**
++ * struct clk_cmn_pll - CMN PLL hardware specific data
++ * @regmap: hardware regmap.
++ * @hw: handle between common and hardware-specific interfaces
++ */
++struct clk_cmn_pll {
++ struct regmap *regmap;
++ struct clk_hw hw;
++};
++
++#define CLK_PLL_OUTPUT(_id, _name, _rate) { \
++ .id = _id, \
++ .name = _name, \
++ .rate = _rate, \
++}
++
++#define to_clk_cmn_pll(_hw) container_of(_hw, struct clk_cmn_pll, hw)
++
++static const struct regmap_config ipq_cmn_pll_regmap_config = {
++ .reg_bits = 32,
++ .reg_stride = 4,
++ .val_bits = 32,
++ .max_register = 0x7fc,
++ .fast_io = true,
++};
++
++static const struct cmn_pll_fixed_output_clk ipq9574_output_clks[] = {
++ CLK_PLL_OUTPUT(XO_24MHZ_CLK, "xo-24mhz", 24000000UL),
++ CLK_PLL_OUTPUT(SLEEP_32KHZ_CLK, "sleep-32khz", 32000UL),
++ CLK_PLL_OUTPUT(PCS_31P25MHZ_CLK, "pcs-31p25mhz", 31250000UL),
++ CLK_PLL_OUTPUT(NSS_1200MHZ_CLK, "nss-1200mhz", 1200000000UL),
++ CLK_PLL_OUTPUT(PPE_353MHZ_CLK, "ppe-353mhz", 353000000UL),
++ CLK_PLL_OUTPUT(ETH0_50MHZ_CLK, "eth0-50mhz", 50000000UL),
++ CLK_PLL_OUTPUT(ETH1_50MHZ_CLK, "eth1-50mhz", 50000000UL),
++ CLK_PLL_OUTPUT(ETH2_50MHZ_CLK, "eth2-50mhz", 50000000UL),
++ CLK_PLL_OUTPUT(ETH_25MHZ_CLK, "eth-25mhz", 25000000UL),
++};
++
++/*
++ * CMN PLL has the single parent clock, which supports the several
++ * possible parent clock rates, each parent clock rate is reflected
++ * by the specific reference index value in the hardware.
++ */
++static int ipq_cmn_pll_find_freq_index(unsigned long parent_rate)
++{
++ int index = -EINVAL;
++
++ switch (parent_rate) {
++ case 25000000:
++ index = 3;
++ break;
++ case 31250000:
++ index = 4;
++ break;
++ case 40000000:
++ index = 6;
++ break;
++ case 48000000:
++ case 96000000:
++ /*
++ * Parent clock rate 48 MHZ and 96 MHZ take the same value
++ * of reference clock index. 96 MHZ needs the source clock
++ * divider to be programmed as 2.
++ */
++ index = 7;
++ break;
++ case 50000000:
++ index = 8;
++ break;
++ default:
++ break;
++ }
++
++ return index;
++}
++
++static unsigned long clk_cmn_pll_recalc_rate(struct clk_hw *hw,
++ unsigned long parent_rate)
++{
++ struct clk_cmn_pll *cmn_pll = to_clk_cmn_pll(hw);
++ u32 val, factor;
++
++ /*
++ * The value of CMN_PLL_DIVIDER_CTRL_FACTOR is automatically adjusted
++ * by HW according to the parent clock rate.
++ */
++ regmap_read(cmn_pll->regmap, CMN_PLL_DIVIDER_CTRL, &val);
++ factor = FIELD_GET(CMN_PLL_DIVIDER_CTRL_FACTOR, val);
++
++ return parent_rate * 2 * factor;
++}
++
++static int clk_cmn_pll_determine_rate(struct clk_hw *hw,
++ struct clk_rate_request *req)
++{
++ int ret;
++
++ /* Validate the rate of the single parent clock. */
++ ret = ipq_cmn_pll_find_freq_index(req->best_parent_rate);
++
++ return ret < 0 ? ret : 0;
++}
++
++/*
++ * This function is used to initialize the CMN PLL to enable the fixed
++ * rate output clocks. It is expected to be configured once.
++ */
++static int clk_cmn_pll_set_rate(struct clk_hw *hw, unsigned long rate,
++ unsigned long parent_rate)
++{
++ struct clk_cmn_pll *cmn_pll = to_clk_cmn_pll(hw);
++ int ret, index;
++ u32 val;
++
++ /*
++ * Configure the reference input clock selection as per the given
++ * parent clock. The output clock rates are always of fixed value.
++ */
++ index = ipq_cmn_pll_find_freq_index(parent_rate);
++ if (index < 0)
++ return index;
++
++ ret = regmap_update_bits(cmn_pll->regmap, CMN_PLL_REFCLK_CONFIG,
++ CMN_PLL_REFCLK_INDEX,
++ FIELD_PREP(CMN_PLL_REFCLK_INDEX, index));
++ if (ret)
++ return ret;
++
++ /*
++ * Update the source clock rate selection and source clock
++ * divider as 2 when the parent clock rate is 96 MHZ.
++ */
++ if (parent_rate == 96000000) {
++ ret = regmap_update_bits(cmn_pll->regmap, CMN_PLL_REFCLK_CONFIG,
++ CMN_PLL_REFCLK_DIV,
++ FIELD_PREP(CMN_PLL_REFCLK_DIV, 2));
++ if (ret)
++ return ret;
++
++ ret = regmap_update_bits(cmn_pll->regmap, CMN_PLL_REFCLK_SRC_SELECTION,
++ CMN_PLL_REFCLK_SRC_DIV,
++ FIELD_PREP(CMN_PLL_REFCLK_SRC_DIV, 0));
++ if (ret)
++ return ret;
++ }
++
++ /* Enable PLL locked detect. */
++ ret = regmap_set_bits(cmn_pll->regmap, CMN_PLL_CTRL,
++ CMN_PLL_CTRL_LOCK_DETECT_EN);
++ if (ret)
++ return ret;
++
++ /*
++ * Reset the CMN PLL block to ensure the updated configurations
++ * take effect.
++ */
++ ret = regmap_clear_bits(cmn_pll->regmap, CMN_PLL_POWER_ON_AND_RESET,
++ CMN_ANA_EN_SW_RSTN);
++ if (ret)
++ return ret;
++
++ usleep_range(1000, 1200);
++ ret = regmap_set_bits(cmn_pll->regmap, CMN_PLL_POWER_ON_AND_RESET,
++ CMN_ANA_EN_SW_RSTN);
++ if (ret)
++ return ret;
++
++ /* Stability check of CMN PLL output clocks. */
++ return regmap_read_poll_timeout(cmn_pll->regmap, CMN_PLL_LOCKED, val,
++ (val & CMN_PLL_CLKS_LOCKED),
++ 100, 100 * USEC_PER_MSEC);
++}
++
++static const struct clk_ops clk_cmn_pll_ops = {
++ .recalc_rate = clk_cmn_pll_recalc_rate,
++ .determine_rate = clk_cmn_pll_determine_rate,
++ .set_rate = clk_cmn_pll_set_rate,
++};
++
++static struct clk_hw *ipq_cmn_pll_clk_hw_register(struct platform_device *pdev)
++{
++ struct clk_parent_data pdata = { .index = 0 };
++ struct device *dev = &pdev->dev;
++ struct clk_init_data init = {};
++ struct clk_cmn_pll *cmn_pll;
++ struct regmap *regmap;
++ void __iomem *base;
++ int ret;
++
++ base = devm_platform_ioremap_resource(pdev, 0);
++ if (IS_ERR(base))
++ return ERR_CAST(base);
++
++ regmap = devm_regmap_init_mmio(dev, base, &ipq_cmn_pll_regmap_config);
++ if (IS_ERR(regmap))
++ return ERR_CAST(regmap);
++
++ cmn_pll = devm_kzalloc(dev, sizeof(*cmn_pll), GFP_KERNEL);
++ if (!cmn_pll)
++ return ERR_PTR(-ENOMEM);
++
++ init.name = "cmn_pll";
++ init.parent_data = &pdata;
++ init.num_parents = 1;
++ init.ops = &clk_cmn_pll_ops;
++
++ cmn_pll->hw.init = &init;
++ cmn_pll->regmap = regmap;
++
++ ret = devm_clk_hw_register(dev, &cmn_pll->hw);
++ if (ret)
++ return ERR_PTR(ret);
++
++ return &cmn_pll->hw;
++}
++
++static int ipq_cmn_pll_register_clks(struct platform_device *pdev)
++{
++ const struct cmn_pll_fixed_output_clk *fixed_clk;
++ struct clk_hw_onecell_data *hw_data;
++ struct device *dev = &pdev->dev;
++ struct clk_hw *cmn_pll_hw;
++ unsigned int num_clks;
++ struct clk_hw *hw;
++ int ret, i;
++
++ fixed_clk = ipq9574_output_clks;
++ num_clks = ARRAY_SIZE(ipq9574_output_clks);
++
++ hw_data = devm_kzalloc(dev, struct_size(hw_data, hws, num_clks + 1),
++ GFP_KERNEL);
++ if (!hw_data)
++ return -ENOMEM;
++
++ /*
++ * Register the CMN PLL clock, which is the parent clock of
++ * the fixed rate output clocks.
++ */
++ cmn_pll_hw = ipq_cmn_pll_clk_hw_register(pdev);
++ if (IS_ERR(cmn_pll_hw))
++ return PTR_ERR(cmn_pll_hw);
++
++ /* Register the fixed rate output clocks. */
++ for (i = 0; i < num_clks; i++) {
++ hw = clk_hw_register_fixed_rate_parent_hw(dev, fixed_clk[i].name,
++ cmn_pll_hw, 0,
++ fixed_clk[i].rate);
++ if (IS_ERR(hw)) {
++ ret = PTR_ERR(hw);
++ goto unregister_fixed_clk;
++ }
++
++ hw_data->hws[fixed_clk[i].id] = hw;
++ }
++
++ /*
++ * Provide the CMN PLL clock. The clock rate of CMN PLL
++ * is configured to 12 GHZ by DT property assigned-clock-rates-u64.
++ */
++ hw_data->hws[CMN_PLL_CLK] = cmn_pll_hw;
++ hw_data->num = num_clks + 1;
++
++ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, hw_data);
++ if (ret)
++ goto unregister_fixed_clk;
++
++ platform_set_drvdata(pdev, hw_data);
++
++ return 0;
++
++unregister_fixed_clk:
++ while (i > 0)
++ clk_hw_unregister(hw_data->hws[fixed_clk[--i].id]);
++
++ return ret;
++}
++
++static int ipq_cmn_pll_clk_probe(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ int ret;
++
++ ret = devm_pm_runtime_enable(dev);
++ if (ret)
++ return ret;
++
++ ret = devm_pm_clk_create(dev);
++ if (ret)
++ return ret;
++
++ /*
++ * To access the CMN PLL registers, the GCC AHB & SYS clocks
++ * of CMN PLL block need to be enabled.
++ */
++ ret = pm_clk_add(dev, "ahb");
++ if (ret)
++ return dev_err_probe(dev, ret, "Fail to add AHB clock\n");
++
++ ret = pm_clk_add(dev, "sys");
++ if (ret)
++ return dev_err_probe(dev, ret, "Fail to add SYS clock\n");
++
++ ret = pm_runtime_resume_and_get(dev);
++ if (ret)
++ return ret;
++
++ /* Register CMN PLL clock and fixed rate output clocks. */
++ ret = ipq_cmn_pll_register_clks(pdev);
++ pm_runtime_put(dev);
++ if (ret)
++ return dev_err_probe(dev, ret,
++ "Fail to register CMN PLL clocks\n");
++
++ return 0;
++}
++
++static void ipq_cmn_pll_clk_remove(struct platform_device *pdev)
++{
++ struct clk_hw_onecell_data *hw_data = platform_get_drvdata(pdev);
++ int i;
++
++ /*
++ * The clock with index CMN_PLL_CLK is unregistered by
++ * device management.
++ */
++ for (i = 0; i < hw_data->num; i++) {
++ if (i != CMN_PLL_CLK)
++ clk_hw_unregister(hw_data->hws[i]);
++ }
++}
++
++static const struct dev_pm_ops ipq_cmn_pll_pm_ops = {
++ SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
++};
++
++static const struct of_device_id ipq_cmn_pll_clk_ids[] = {
++ { .compatible = "qcom,ipq9574-cmn-pll", },
++ { }
++};
++MODULE_DEVICE_TABLE(of, ipq_cmn_pll_clk_ids);
++
++static struct platform_driver ipq_cmn_pll_clk_driver = {
++ .probe = ipq_cmn_pll_clk_probe,
++ .remove_new = ipq_cmn_pll_clk_remove,
++ .driver = {
++ .name = "ipq_cmn_pll",
++ .of_match_table = ipq_cmn_pll_clk_ids,
++ .pm = &ipq_cmn_pll_pm_ops,
++ },
++};
++module_platform_driver(ipq_cmn_pll_clk_driver);
++
++MODULE_DESCRIPTION("Qualcomm Technologies, Inc. IPQ CMN PLL Driver");
++MODULE_LICENSE("GPL");
+--
+2.47.1
+
--- /dev/null
+From c0f1cbf795095c21b92a46fa1dc47a7b787ce538 Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Fri, 3 Jan 2025 15:31:34 +0800
+Subject: [PATCH 1/3] dt-bindings: clock: qcom: Add CMN PLL clock controller
+ for IPQ SoC
+
+The CMN PLL controller provides clocks to networking hardware blocks
+and to GCC on Qualcomm IPQ9574 SoC. It receives input clock from the
+on-chip Wi-Fi, and produces output clocks at fixed rates. These output
+rates are predetermined, and are unrelated to the input clock rate.
+The primary purpose of CMN PLL is to supply clocks to the networking
+hardware such as PPE (packet process engine), PCS and the externally
+connected switch or PHY device. The CMN PLL block also outputs fixed
+rate clocks to GCC, such as 24 MHZ as XO clock and 32 KHZ as sleep
+clock supplied to GCC.
+
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+Reviewed-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Link: https://lore.kernel.org/r/20250103-qcom_ipq_cmnpll-v8-1-c89fb4d4849d@quicinc.com
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+---
+ .../bindings/clock/qcom,ipq9574-cmn-pll.yaml | 77 +++++++++++++++++++
+ include/dt-bindings/clock/qcom,ipq-cmn-pll.h | 22 ++++++
+ 2 files changed, 99 insertions(+)
+ create mode 100644 Documentation/devicetree/bindings/clock/qcom,ipq9574-cmn-pll.yaml
+ create mode 100644 include/dt-bindings/clock/qcom,ipq-cmn-pll.h
+
+diff --git a/Documentation/devicetree/bindings/clock/qcom,ipq9574-cmn-pll.yaml b/Documentation/devicetree/bindings/clock/qcom,ipq9574-cmn-pll.yaml
+new file mode 100644
+index 000000000000..f869b3739be8
+--- /dev/null
++++ b/Documentation/devicetree/bindings/clock/qcom,ipq9574-cmn-pll.yaml
+@@ -0,0 +1,77 @@
++# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
++%YAML 1.2
++---
++$id: http://devicetree.org/schemas/clock/qcom,ipq9574-cmn-pll.yaml#
++$schema: http://devicetree.org/meta-schemas/core.yaml#
++
++title: Qualcomm CMN PLL Clock Controller on IPQ SoC
++
++maintainers:
++ - Bjorn Andersson <andersson@kernel.org>
++ - Luo Jie <quic_luoj@quicinc.com>
++
++description:
++ The CMN (or common) PLL clock controller expects a reference
++ input clock. This reference clock is from the on-board Wi-Fi.
++ The CMN PLL supplies a number of fixed rate output clocks to
++ the devices providing networking functions and to GCC. These
++ networking hardware include PPE (packet process engine), PCS
++ and the externally connected switch or PHY devices. The CMN
++ PLL block also outputs fixed rate clocks to GCC. The PLL's
++ primary function is to enable fixed rate output clocks for
++ networking hardware functions used with the IPQ SoC.
++
++properties:
++ compatible:
++ enum:
++ - qcom,ipq9574-cmn-pll
++
++ reg:
++ maxItems: 1
++
++ clocks:
++ items:
++ - description: The reference clock. The supported clock rates include
++ 25000000, 31250000, 40000000, 48000000, 50000000 and 96000000 HZ.
++ - description: The AHB clock
++ - description: The SYS clock
++ description:
++ The reference clock is the source clock of CMN PLL, which is from the
++ Wi-Fi. The AHB and SYS clocks must be enabled to access CMN PLL
++ clock registers.
++
++ clock-names:
++ items:
++ - const: ref
++ - const: ahb
++ - const: sys
++
++ "#clock-cells":
++ const: 1
++
++required:
++ - compatible
++ - reg
++ - clocks
++ - clock-names
++ - "#clock-cells"
++
++additionalProperties: false
++
++examples:
++ - |
++ #include <dt-bindings/clock/qcom,ipq-cmn-pll.h>
++ #include <dt-bindings/clock/qcom,ipq9574-gcc.h>
++
++ cmn_pll: clock-controller@9b000 {
++ compatible = "qcom,ipq9574-cmn-pll";
++ reg = <0x0009b000 0x800>;
++ clocks = <&cmn_pll_ref_clk>,
++ <&gcc GCC_CMN_12GPLL_AHB_CLK>,
++ <&gcc GCC_CMN_12GPLL_SYS_CLK>;
++ clock-names = "ref", "ahb", "sys";
++ #clock-cells = <1>;
++ assigned-clocks = <&cmn_pll CMN_PLL_CLK>;
++ assigned-clock-rates-u64 = /bits/ 64 <12000000000>;
++ };
++...
+diff --git a/include/dt-bindings/clock/qcom,ipq-cmn-pll.h b/include/dt-bindings/clock/qcom,ipq-cmn-pll.h
+new file mode 100644
+index 000000000000..936e92b3b62c
+--- /dev/null
++++ b/include/dt-bindings/clock/qcom,ipq-cmn-pll.h
+@@ -0,0 +1,22 @@
++/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
++/*
++ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
++ */
++
++#ifndef _DT_BINDINGS_CLK_QCOM_IPQ_CMN_PLL_H
++#define _DT_BINDINGS_CLK_QCOM_IPQ_CMN_PLL_H
++
++/* CMN PLL core clock. */
++#define CMN_PLL_CLK 0
++
++/* The output clocks from CMN PLL of IPQ9574. */
++#define XO_24MHZ_CLK 1
++#define SLEEP_32KHZ_CLK 2
++#define PCS_31P25MHZ_CLK 3
++#define NSS_1200MHZ_CLK 4
++#define PPE_353MHZ_CLK 5
++#define ETH0_50MHZ_CLK 6
++#define ETH1_50MHZ_CLK 7
++#define ETH2_50MHZ_CLK 8
++#define ETH_25MHZ_CLK 9
++#endif
+--
+2.47.1
+
--- /dev/null
+From 758aa2d7e3c0acfe9c952a1cbe6416ec6130c2a1 Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Fri, 3 Jan 2025 15:31:37 +0800
+Subject: [PATCH 2/3] arm64: dts: qcom: ipq9574: Add CMN PLL node
+
+The CMN PLL clock controller allows selection of an input clock rate
+from a defined set of input clock rates. It in-turn supplies fixed
+rate output clocks to the hardware blocks that provide the ethernet
+functions such as PPE (Packet Process Engine) and connected switch or
+PHY, and to GCC.
+
+The reference clock of CMN PLL is routed from XO to the CMN PLL through
+the internal WiFi block.
+.XO (48 MHZ or 96 MHZ)-->WiFi (multiplier/divider)-->48 MHZ to CMN PLL.
+
+The reference input clock from WiFi to CMN PLL is fully controlled by
+the bootstrap pins which select the XO frequency (48 MHZ or 96 MHZ).
+Based on this frequency, the divider in the internal Wi-Fi block is
+automatically configured by hardware (1 for 48 MHZ, 2 for 96 MHZ), to
+ensure output clock to CMN PLL is 48 MHZ.
+
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+Reviewed-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
+Link: https://lore.kernel.org/r/20250103-qcom_ipq_cmnpll-v8-4-c89fb4d4849d@quicinc.com
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+---
+ .../boot/dts/qcom/ipq9574-rdp-common.dtsi | 17 +++++++++++-
+ arch/arm64/boot/dts/qcom/ipq9574.dtsi | 26 ++++++++++++++++++-
+ 2 files changed, 41 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi b/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
+index 91e104b0f865..bb1ff79360d3 100644
+--- a/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
+@@ -3,7 +3,7 @@
+ * IPQ9574 RDP board common device tree source
+ *
+ * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ /dts-v1/;
+@@ -164,6 +164,21 @@ &usb3 {
+ status = "okay";
+ };
+
++/*
++ * The bootstrap pins for the board select the XO clock frequency
++ * (48 MHZ or 96 MHZ used for different RDP type board). This setting
++ * automatically enables the right dividers, to ensure the reference
++ * clock output from WiFi to the CMN PLL is 48 MHZ.
++ */
++&ref_48mhz_clk {
++ clock-div = <1>;
++ clock-mult = <1>;
++};
++
+ &xo_board_clk {
+ clock-frequency = <24000000>;
+ };
++
++&xo_clk {
++ clock-frequency = <48000000>;
++};
+diff --git a/arch/arm64/boot/dts/qcom/ipq9574.dtsi b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+index 00ee3290c181..c543c3492e93 100644
+--- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+@@ -3,10 +3,11 @@
+ * IPQ9574 SoC device tree source
+ *
+ * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
+- * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include <dt-bindings/clock/qcom,apss-ipq.h>
++#include <dt-bindings/clock/qcom,ipq-cmn-pll.h>
+ #include <dt-bindings/clock/qcom,ipq9574-gcc.h>
+ #include <dt-bindings/interconnect/qcom,ipq9574.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+@@ -19,6 +20,12 @@ / {
+ #size-cells = <2>;
+
+ clocks {
++ ref_48mhz_clk: ref-48mhz-clk {
++ compatible = "fixed-factor-clock";
++ clocks = <&xo_clk>;
++ #clock-cells = <0>;
++ };
++
+ sleep_clk: sleep-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+@@ -28,6 +35,11 @@ xo_board_clk: xo-board-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ };
++
++ xo_clk: xo-clk {
++ compatible = "fixed-clock";
++ #clock-cells = <0>;
++ };
+ };
+
+ cpus {
+@@ -335,6 +347,18 @@ pcie1_phy: phy@fc000 {
+ status = "disabled";
+ };
+
++ cmn_pll: clock-controller@9b000 {
++ compatible = "qcom,ipq9574-cmn-pll";
++ reg = <0x0009b000 0x800>;
++ clocks = <&ref_48mhz_clk>,
++ <&gcc GCC_CMN_12GPLL_AHB_CLK>,
++ <&gcc GCC_CMN_12GPLL_SYS_CLK>;
++ clock-names = "ref", "ahb", "sys";
++ #clock-cells = <1>;
++ assigned-clocks = <&cmn_pll CMN_PLL_CLK>;
++ assigned-clock-rates-u64 = /bits/ 64 <12000000000>;
++ };
++
+ qfprom: efuse@a4000 {
+ compatible = "qcom,ipq9574-qfprom", "qcom,qfprom";
+ reg = <0x000a4000 0x5a1>;
+--
+2.47.1
+
--- /dev/null
+From 050b312654523aac9495eae3cf7bfa868fd981ce Mon Sep 17 00:00:00 2001
+From: Luo Jie <quic_luoj@quicinc.com>
+Date: Fri, 3 Jan 2025 15:31:38 +0800
+Subject: [PATCH 3/3] arm64: dts: qcom: ipq9574: Update xo_board_clk to use
+ fixed factor clock
+
+xo_board_clk is fixed to 24 MHZ, which is routed from WiFi output clock
+48 MHZ (also being the reference clock of CMN PLL) divided 2 by analog
+block routing channel.
+
+Reviewed-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
+Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
+Link: https://lore.kernel.org/r/20250103-qcom_ipq_cmnpll-v8-5-c89fb4d4849d@quicinc.com
+Signed-off-by: Bjorn Andersson <andersson@kernel.org>
+---
+ arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi | 7 ++++++-
+ arch/arm64/boot/dts/qcom/ipq9574.dtsi | 3 ++-
+ 2 files changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi b/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
+index bb1ff79360d3..ae12f069f26f 100644
+--- a/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
+@@ -175,8 +175,13 @@ &ref_48mhz_clk {
+ clock-mult = <1>;
+ };
+
++/*
++ * The frequency of xo_board_clk is fixed to 24 MHZ, which is routed
++ * from WiFi output clock 48 MHZ divided by 2.
++ */
+ &xo_board_clk {
+- clock-frequency = <24000000>;
++ clock-div = <2>;
++ clock-mult = <1>;
+ };
+
+ &xo_clk {
+diff --git a/arch/arm64/boot/dts/qcom/ipq9574.dtsi b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+index c543c3492e93..3e93484e7e32 100644
+--- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
+@@ -32,7 +32,8 @@ sleep_clk: sleep-clk {
+ };
+
+ xo_board_clk: xo-board-clk {
+- compatible = "fixed-clock";
++ compatible = "fixed-factor-clock";
++ clocks = <&ref_48mhz_clk>;
+ #clock-cells = <0>;
+ };
+
+--
+2.47.1
+
+++ /dev/null
-From 1142a905d3450a40ac03cbe1426b16cd9650c5f7 Mon Sep 17 00:00:00 2001
-From: Md Sadre Alam <quic_mdalam@quicinc.com>
-Date: Thu, 4 Apr 2024 16:19:43 +0530
-Subject: [PATCH v10 2/8] mtd: rawnand: qcom: cleanup qcom_nandc driver
-
-cleanup qcom_nandc driver as below
-
-- Remove register value indirection api
-
-- Remove set_reg() api
-
-- Convert read_loc_first & read_loc_last macro to function
-
-- Renamed multiple variables
-
-Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
----
-
-Change in [v10]
-
-* No change
-
-Change in [v9]
-
-* Changed type of cmd1, vld to u32 from __le32 in qcom_nand_controller
- structure
-* Changed type of cfg0, cfg1, cfg0_raw, cfg1_raw, clrflashstatus,
- ecc_buf_cfg, ecc_bch_cfg, clrreadstatus to u32 in qcom_nand_host
- structure
-* In nandc_set_read_loc_first() api added cpu_to_le32() macro to fix
- compilation warning reported by kernel test bot
-* In nandc_set_read_loc_last() api added cpu_to_le32() macro to fix
- compilation warning reported by kernel test bot
-* Changed data type of cw_offset, read_size, is_last_read_loc to
- u32 in nandc_set_read_loc() api to fix compilation warning reported
- by kernel test bot
-* In set_address() api added cpu_to_le32() macro to fix compilation
- warning reported by kernel test bot
-* In update_rw_regs() api added cpu_to_le32() macro to fix compilation
- warning reported by kernel test bot
-* In qcom_op_cmd_mapping() api added cpu_to_le32() macro to fix compilation
- warning reported by kernel test bot
-* In qcom_read_status_exec() api added cpu_to_le32() macro to fix compilation
- warning reported by kernel test bot
-* In qcom_read_id_type_exec() api added cpu_to_le32() macro to fix compilation
- warning reported by kernel test bot
-* In qcom_misc_cmd_type_exec() api added cpu_to_le32() macro to fix compilation
- warning reported by kernel test bot
-* In qcom_param_page_type_exec() api added cpu_to_le32() macro to fix
- compilation warning reported by kernel test bot
-
-Change in [v8]
-
-* Fixed compilation warning reported by kernel test robot
-* Added "chip" description in nandc_set_read_loc_first()
-* Added "chip" description in nandc_set_read_loc_last()
-* Changed data type of read_location0, read_location1,
- read_location2, read_location3, read_location_last0,
- read_location_last1, read_location_last2, read_location_last3,
- addr0, addr1, cmd, cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg,
- clrflashstatus, clrreadstatus, orig_cmd1, orig_vld to
- __le32 to fix compilation warning reported by kernel test robot
-
-Change in [v7]
-
-* No change
-
-Change in [v6]
-
-* No change
-
-Change in [v5]
-
-* Cleand up raw nand driver.
-
-* Removed register value indirection
-
-* Removed set_reg() api.
-
-Change in [v4]
-
-* This patch was not included in [v4]
-
-Change in [v3]
-
-* This patch was not included in [v3]
-
-Change in [v2]
-
-* This patch was not included in [v2]
-
-Change in [v1]
-
-* This patch was not included in [v1]
-
- drivers/mtd/nand/raw/qcom_nandc.c | 506 ++++++++++++++----------------
- 1 file changed, 229 insertions(+), 277 deletions(-)
-
---- a/drivers/mtd/nand/raw/qcom_nandc.c
-+++ b/drivers/mtd/nand/raw/qcom_nandc.c
-@@ -189,17 +189,6 @@
- #define ECC_BCH_4BIT BIT(2)
- #define ECC_BCH_8BIT BIT(3)
-
--#define nandc_set_read_loc_first(chip, reg, cw_offset, read_size, is_last_read_loc) \
--nandc_set_reg(chip, reg, \
-- ((cw_offset) << READ_LOCATION_OFFSET) | \
-- ((read_size) << READ_LOCATION_SIZE) | \
-- ((is_last_read_loc) << READ_LOCATION_LAST))
--
--#define nandc_set_read_loc_last(chip, reg, cw_offset, read_size, is_last_read_loc) \
--nandc_set_reg(chip, reg, \
-- ((cw_offset) << READ_LOCATION_OFFSET) | \
-- ((read_size) << READ_LOCATION_SIZE) | \
-- ((is_last_read_loc) << READ_LOCATION_LAST))
- /*
- * Returns the actual register address for all NAND_DEV_ registers
- * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
-@@ -257,8 +246,6 @@ nandc_set_reg(chip, reg, \
- * @tx_sgl_start - start index in data sgl for tx.
- * @rx_sgl_pos - current index in data sgl for rx.
- * @rx_sgl_start - start index in data sgl for rx.
-- * @wait_second_completion - wait for second DMA desc completion before making
-- * the NAND transfer completion.
- */
- struct bam_transaction {
- struct bam_cmd_element *bam_ce;
-@@ -275,7 +262,6 @@ struct bam_transaction {
- u32 tx_sgl_start;
- u32 rx_sgl_pos;
- u32 rx_sgl_start;
-- bool wait_second_completion;
- };
-
- /*
-@@ -471,9 +457,9 @@ struct qcom_op {
- unsigned int data_instr_idx;
- unsigned int rdy_timeout_ms;
- unsigned int rdy_delay_ns;
-- u32 addr1_reg;
-- u32 addr2_reg;
-- u32 cmd_reg;
-+ __le32 addr1_reg;
-+ __le32 addr2_reg;
-+ __le32 cmd_reg;
- u8 flag;
- };
-
-@@ -549,17 +535,17 @@ struct qcom_nand_host {
- * among different NAND controllers.
- * @ecc_modes - ecc mode for NAND
- * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
-- * @is_bam - whether NAND controller is using BAM
-- * @is_qpic - whether NAND CTRL is part of qpic IP
-- * @qpic_v2 - flag to indicate QPIC IP version 2
-+ * @supports_bam - whether NAND controller is using BAM
-+ * @nandc_part_of_qpic - whether NAND controller is part of qpic IP
-+ * @qpic_version2 - flag to indicate QPIC IP version 2
- * @use_codeword_fixup - whether NAND has different layout for boot partitions
- */
- struct qcom_nandc_props {
- u32 ecc_modes;
- u32 dev_cmd_reg_start;
-- bool is_bam;
-- bool is_qpic;
-- bool qpic_v2;
-+ bool supports_bam;
-+ bool nandc_part_of_qpic;
-+ bool qpic_version2;
- bool use_codeword_fixup;
- };
-
-@@ -613,19 +599,18 @@ static void clear_bam_transaction(struct
- {
- struct bam_transaction *bam_txn = nandc->bam_txn;
-
-- if (!nandc->props->is_bam)
-+ if (!nandc->props->supports_bam)
- return;
-
- bam_txn->bam_ce_pos = 0;
- bam_txn->bam_ce_start = 0;
- bam_txn->cmd_sgl_pos = 0;
- bam_txn->cmd_sgl_start = 0;
- bam_txn->tx_sgl_pos = 0;
- bam_txn->tx_sgl_start = 0;
- bam_txn->rx_sgl_pos = 0;
- bam_txn->rx_sgl_start = 0;
- bam_txn->last_data_desc = NULL;
-- bam_txn->wait_second_completion = false;
-
- sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
- QPIC_PER_CW_CMD_SGL);
-@@ -640,17 +618,7 @@ static void qpic_bam_dma_done(void *data
- {
- struct bam_transaction *bam_txn = data;
-
-- /*
-- * In case of data transfer with NAND, 2 callbacks will be generated.
-- * One for command channel and another one for data channel.
-- * If current transaction has data descriptors
-- * (i.e. wait_second_completion is true), then set this to false
-- * and wait for second DMA descriptor completion.
-- */
-- if (bam_txn->wait_second_completion)
-- bam_txn->wait_second_completion = false;
-- else
-- complete(&bam_txn->txn_done);
-+ complete(&bam_txn->txn_done);
- }
-
- static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
-@@ -676,10 +644,9 @@ static inline void nandc_write(struct qc
- iowrite32(val, nandc->base + offset);
- }
-
--static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
-- bool is_cpu)
-+static inline void nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
- {
-- if (!nandc->props->is_bam)
-+ if (!nandc->props->supports_bam)
- return;
-
- if (is_cpu)
-@@ -694,93 +661,90 @@ static inline void nandc_read_buffer_syn
- DMA_FROM_DEVICE);
- }
-
--static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
--{
-- switch (offset) {
-- case NAND_FLASH_CMD:
-- return ®s->cmd;
-- case NAND_ADDR0:
-- return ®s->addr0;
-- case NAND_ADDR1:
-- return ®s->addr1;
-- case NAND_FLASH_CHIP_SELECT:
-- return ®s->chip_sel;
-- case NAND_EXEC_CMD:
-- return ®s->exec;
-- case NAND_FLASH_STATUS:
-- return ®s->clrflashstatus;
-- case NAND_DEV0_CFG0:
-- return ®s->cfg0;
-- case NAND_DEV0_CFG1:
-- return ®s->cfg1;
-- case NAND_DEV0_ECC_CFG:
-- return ®s->ecc_bch_cfg;
-- case NAND_READ_STATUS:
-- return ®s->clrreadstatus;
-- case NAND_DEV_CMD1:
-- return ®s->cmd1;
-- case NAND_DEV_CMD1_RESTORE:
-- return ®s->orig_cmd1;
-- case NAND_DEV_CMD_VLD:
-- return ®s->vld;
-- case NAND_DEV_CMD_VLD_RESTORE:
-- return ®s->orig_vld;
-- case NAND_EBI2_ECC_BUF_CFG:
-- return ®s->ecc_buf_cfg;
-- case NAND_READ_LOCATION_0:
-- return ®s->read_location0;
-- case NAND_READ_LOCATION_1:
-- return ®s->read_location1;
-- case NAND_READ_LOCATION_2:
-- return ®s->read_location2;
-- case NAND_READ_LOCATION_3:
-- return ®s->read_location3;
-- case NAND_READ_LOCATION_LAST_CW_0:
-- return ®s->read_location_last0;
-- case NAND_READ_LOCATION_LAST_CW_1:
-- return ®s->read_location_last1;
-- case NAND_READ_LOCATION_LAST_CW_2:
-- return ®s->read_location_last2;
-- case NAND_READ_LOCATION_LAST_CW_3:
-- return ®s->read_location_last3;
-- default:
-- return NULL;
-- }
--}
--
--static void nandc_set_reg(struct nand_chip *chip, int offset,
-- u32 val)
--{
-- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-- struct nandc_regs *regs = nandc->regs;
-- __le32 *reg;
--
-- reg = offset_to_nandc_reg(regs, offset);
--
-- if (reg)
-- *reg = cpu_to_le32(val);
--}
--
- /* Helper to check the code word, whether it is last cw or not */
- static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw)
- {
- return cw == (ecc->steps - 1);
- }
-
-+/**
-+ * nandc_set_read_loc_first() - to set read location first register
-+ * @chip: NAND Private Flash Chip Data
-+ * @reg_base: location register base
-+ * @cw_offset: code word offset
-+ * @read_size: code word read length
-+ * @is_last_read_loc: is this the last read location
-+ *
-+ * This function will set location register value
-+ */
-+static void nandc_set_read_loc_first(struct nand_chip *chip,
-+ int reg_base, u32 cw_offset,
-+ u32 read_size, u32 is_last_read_loc)
-+{
-+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-+ __le32 locreg_val;
-+ u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
-+ ((read_size) << READ_LOCATION_SIZE) |
-+ ((is_last_read_loc) << READ_LOCATION_LAST));
-+
-+ locreg_val = cpu_to_le32(val);
-+
-+ if (reg_base == NAND_READ_LOCATION_0)
-+ nandc->regs->read_location0 = locreg_val;
-+ else if (reg_base == NAND_READ_LOCATION_1)
-+ nandc->regs->read_location1 = locreg_val;
-+ else if (reg_base == NAND_READ_LOCATION_2)
-+ nandc->regs->read_location2 = locreg_val;
-+ else if (reg_base == NAND_READ_LOCATION_3)
-+ nandc->regs->read_location3 = locreg_val;
-+}
-+
-+/**
-+ * nandc_set_read_loc_last - to set read location last register
-+ * @chip: NAND Private Flash Chip Data
-+ * @reg_base: location register base
-+ * @cw_offset: code word offset
-+ * @read_size: code word read length
-+ * @is_last_read_loc: is this the last read location
-+ *
-+ * This function will set location last register value
-+ */
-+static void nandc_set_read_loc_last(struct nand_chip *chip,
-+ int reg_base, u32 cw_offset,
-+ u32 read_size, u32 is_last_read_loc)
-+{
-+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-+ __le32 locreg_val;
-+ u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
-+ ((read_size) << READ_LOCATION_SIZE) |
-+ ((is_last_read_loc) << READ_LOCATION_LAST));
-+
-+ locreg_val = cpu_to_le32(val);
-+
-+ if (reg_base == NAND_READ_LOCATION_LAST_CW_0)
-+ nandc->regs->read_location_last0 = locreg_val;
-+ else if (reg_base == NAND_READ_LOCATION_LAST_CW_1)
-+ nandc->regs->read_location_last1 = locreg_val;
-+ else if (reg_base == NAND_READ_LOCATION_LAST_CW_2)
-+ nandc->regs->read_location_last2 = locreg_val;
-+ else if (reg_base == NAND_READ_LOCATION_LAST_CW_3)
-+ nandc->regs->read_location_last3 = locreg_val;
-+}
-+
- /* helper to configure location register values */
- static void nandc_set_read_loc(struct nand_chip *chip, int cw, int reg,
-- int cw_offset, int read_size, int is_last_read_loc)
-+ u32 cw_offset, u32 read_size, u32 is_last_read_loc)
- {
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- struct nand_ecc_ctrl *ecc = &chip->ecc;
- int reg_base = NAND_READ_LOCATION_0;
-
-- if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
-+ if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
- reg_base = NAND_READ_LOCATION_LAST_CW_0;
-
- reg_base += reg * 4;
-
-- if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
-+ if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
- return nandc_set_read_loc_last(chip, reg_base, cw_offset,
- read_size, is_last_read_loc);
- else
-@@ -792,12 +756,13 @@ static void nandc_set_read_loc(struct na
- static void set_address(struct qcom_nand_host *host, u16 column, int page)
- {
- struct nand_chip *chip = &host->chip;
-+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
- if (chip->options & NAND_BUSWIDTH_16)
- column >>= 1;
-
-- nandc_set_reg(chip, NAND_ADDR0, page << 16 | column);
-- nandc_set_reg(chip, NAND_ADDR1, page >> 16 & 0xff);
-+ nandc->regs->addr0 = cpu_to_le32(page << 16 | column);
-+ nandc->regs->addr1 = cpu_to_le32(page >> 16 & 0xff);
- }
-
- /*
-@@ -811,41 +776,43 @@ static void set_address(struct qcom_nand
- static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, int cw)
- {
- struct nand_chip *chip = &host->chip;
-- u32 cmd, cfg0, cfg1, ecc_bch_cfg;
-+ __le32 cmd, cfg0, cfg1, ecc_bch_cfg;
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
- if (read) {
- if (host->use_ecc)
-- cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
-+ cmd = cpu_to_le32(OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE);
- else
-- cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
-+ cmd = cpu_to_le32(OP_PAGE_READ | PAGE_ACC | LAST_PAGE);
- } else {
-- cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
-+ cmd = cpu_to_le32(OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE);
- }
-
- if (host->use_ecc) {
-- cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
-- (num_cw - 1) << CW_PER_PAGE;
-+ cfg0 = cpu_to_le32((host->cfg0 & ~(7U << CW_PER_PAGE)) |
-+ (num_cw - 1) << CW_PER_PAGE);
-
-- cfg1 = host->cfg1;
-- ecc_bch_cfg = host->ecc_bch_cfg;
-+ cfg1 = cpu_to_le32(host->cfg1);
-+ ecc_bch_cfg = cpu_to_le32(host->ecc_bch_cfg);
- } else {
-- cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
-- (num_cw - 1) << CW_PER_PAGE;
-+ cfg0 = cpu_to_le32((host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
-+ (num_cw - 1) << CW_PER_PAGE);
-
-- cfg1 = host->cfg1_raw;
-- ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
-+ cfg1 = cpu_to_le32(host->cfg1_raw);
-+ ecc_bch_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE);
- }
-
-- nandc_set_reg(chip, NAND_FLASH_CMD, cmd);
-- nandc_set_reg(chip, NAND_DEV0_CFG0, cfg0);
-- nandc_set_reg(chip, NAND_DEV0_CFG1, cfg1);
-- nandc_set_reg(chip, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
-- if (!nandc->props->qpic_v2)
-- nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
-- nandc_set_reg(chip, NAND_FLASH_STATUS, host->clrflashstatus);
-- nandc_set_reg(chip, NAND_READ_STATUS, host->clrreadstatus);
-- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
-+ nandc->regs->cmd = cmd;
-+ nandc->regs->cfg0 = cfg0;
-+ nandc->regs->cfg1 = cfg1;
-+ nandc->regs->ecc_bch_cfg = ecc_bch_cfg;
-+
-+ if (!nandc->props->qpic_version2)
-+ nandc->regs->ecc_buf_cfg = cpu_to_le32(host->ecc_buf_cfg);
-+
-+ nandc->regs->clrflashstatus = cpu_to_le32(host->clrflashstatus);
-+ nandc->regs->clrreadstatus = cpu_to_le32(host->clrreadstatus);
-+ nandc->regs->exec = cpu_to_le32(1);
-
- if (read)
- nandc_set_read_loc(chip, cw, 0, 0, host->use_ecc ?
-@@ -1121,7 +1088,7 @@ static int read_reg_dma(struct qcom_nand
- if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
- first = dev_cmd_reg_addr(nandc, first);
-
-- if (nandc->props->is_bam)
-+ if (nandc->props->supports_bam)
- return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
- num_regs, flags);
-
-@@ -1136,25 +1103,16 @@ static int read_reg_dma(struct qcom_nand
- * write_reg_dma: prepares a descriptor to write a given number of
- * contiguous registers
- *
-+ * @vaddr: contnigeous memory from where register value will
-+ * be written
- * @first: offset of the first register in the contiguous block
- * @num_regs: number of registers to write
- * @flags: flags to control DMA descriptor preparation
- */
--static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
-- int num_regs, unsigned int flags)
-+static int write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
-+ int first, int num_regs, unsigned int flags)
- {
- bool flow_control = false;
-- struct nandc_regs *regs = nandc->regs;
-- void *vaddr;
--
-- vaddr = offset_to_nandc_reg(regs, first);
--
-- if (first == NAND_ERASED_CW_DETECT_CFG) {
-- if (flags & NAND_ERASED_CW_SET)
-- vaddr = ®s->erased_cw_detect_cfg_set;
-- else
-- vaddr = ®s->erased_cw_detect_cfg_clr;
-- }
-
- if (first == NAND_EXEC_CMD)
- flags |= NAND_BAM_NWD;
-@@ -1165,7 +1123,7 @@ static int write_reg_dma(struct qcom_nan
- if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
- first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
-
-- if (nandc->props->is_bam)
-+ if (nandc->props->supports_bam)
- return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
- num_regs, flags);
-
-@@ -1188,7 +1146,7 @@ static int write_reg_dma(struct qcom_nan
- static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
- const u8 *vaddr, int size, unsigned int flags)
- {
-- if (nandc->props->is_bam)
-+ if (nandc->props->supports_bam)
- return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
-
- return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
-@@ -1206,7 +1164,7 @@ static int read_data_dma(struct qcom_nan
- static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
- const u8 *vaddr, int size, unsigned int flags)
- {
-- if (nandc->props->is_bam)
-+ if (nandc->props->supports_bam)
- return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
-
- return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
-@@ -1220,13 +1178,14 @@ static void config_nand_page_read(struct
- {
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
-- write_reg_dma(nandc, NAND_ADDR0, 2, 0);
-- write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
-- if (!nandc->props->qpic_v2)
-- write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
-- write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
-- write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
-- NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
-+ write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
-+ write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
-+ if (!nandc->props->qpic_version2)
-+ write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
-+ write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr,
-+ NAND_ERASED_CW_DETECT_CFG, 1, 0);
-+ write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set,
-+ NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
- }
-
- /*
-@@ -1239,16 +1198,16 @@ config_nand_cw_read(struct nand_chip *ch
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- struct nand_ecc_ctrl *ecc = &chip->ecc;
-
-- int reg = NAND_READ_LOCATION_0;
-+ __le32 *reg = &nandc->regs->read_location0;
-
-- if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
-- reg = NAND_READ_LOCATION_LAST_CW_0;
-+ if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
-+ reg = &nandc->regs->read_location_last0;
-
-- if (nandc->props->is_bam)
-- write_reg_dma(nandc, reg, 4, NAND_BAM_NEXT_SGL);
-+ if (nandc->props->supports_bam)
-+ write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
-
-- write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
-- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+ write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
-+ write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-
- if (use_ecc) {
- read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
-@@ -1279,10 +1238,10 @@ static void config_nand_page_write(struc
- {
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
-- write_reg_dma(nandc, NAND_ADDR0, 2, 0);
-- write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
-- if (!nandc->props->qpic_v2)
-- write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
-+ write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
-+ write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
-+ if (!nandc->props->qpic_version2)
-+ write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1,
- NAND_BAM_NEXT_SGL);
- }
-
-@@ -1294,13 +1253,13 @@ static void config_nand_cw_write(struct
- {
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
-- write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
-- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+ write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
-+ write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-
- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-
-- write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
-- write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
-+ write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
-+ write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
- }
-
- /* helpers to submit/free our list of dma descriptors */
-@@ -1311,7 +1270,7 @@ static int submit_descs(struct qcom_nand
- struct bam_transaction *bam_txn = nandc->bam_txn;
- int ret = 0;
-
-- if (nandc->props->is_bam) {
-+ if (nandc->props->supports_bam) {
- if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
- ret = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
- if (ret)
-@@ -1336,14 +1295,9 @@ static int submit_descs(struct qcom_nand
- list_for_each_entry(desc, &nandc->desc_list, node)
- cookie = dmaengine_submit(desc->dma_desc);
-
-- if (nandc->props->is_bam) {
-+ if (nandc->props->supports_bam) {
- bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
- bam_txn->last_cmd_desc->callback_param = bam_txn;
-- if (bam_txn->last_data_desc) {
-- bam_txn->last_data_desc->callback = qpic_bam_dma_done;
-- bam_txn->last_data_desc->callback_param = bam_txn;
-- bam_txn->wait_second_completion = true;
-- }
-
- dma_async_issue_pending(nandc->tx_chan);
- dma_async_issue_pending(nandc->rx_chan);
-@@ -1365,7 +1319,7 @@ err_unmap_free_desc:
- list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
- list_del(&desc->node);
-
-- if (nandc->props->is_bam)
-+ if (nandc->props->supports_bam)
- dma_unmap_sg(nandc->dev, desc->bam_sgl,
- desc->sgl_cnt, desc->dir);
- else
-@@ -1382,7 +1336,7 @@ err_unmap_free_desc:
- static void clear_read_regs(struct qcom_nand_controller *nandc)
- {
- nandc->reg_read_pos = 0;
-- nandc_read_buffer_sync(nandc, false);
-+ nandc_dev_to_mem(nandc, false);
- }
-
- /*
-@@ -1446,7 +1400,7 @@ static int check_flash_errors(struct qco
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- int i;
-
-- nandc_read_buffer_sync(nandc, true);
-+ nandc_dev_to_mem(nandc, true);
-
- for (i = 0; i < cw_cnt; i++) {
- u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
-@@ -1476,7 +1430,7 @@ qcom_nandc_read_cw_raw(struct mtd_info *
- clear_read_regs(nandc);
- host->use_ecc = false;
-
-- if (nandc->props->qpic_v2)
-+ if (nandc->props->qpic_version2)
- raw_cw = ecc->steps - 1;
-
- clear_bam_transaction(nandc);
-@@ -1497,7 +1451,7 @@ qcom_nandc_read_cw_raw(struct mtd_info *
- oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
- }
-
-- if (nandc->props->is_bam) {
-+ if (nandc->props->supports_bam) {
- nandc_set_read_loc(chip, cw, 0, read_loc, data_size1, 0);
- read_loc += data_size1;
-
-@@ -1621,7 +1575,7 @@ static int parse_read_errors(struct qcom
- u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
-
- buf = (struct read_stats *)nandc->reg_read_buf;
-- nandc_read_buffer_sync(nandc, true);
-+ nandc_dev_to_mem(nandc, true);
-
- for (i = 0; i < ecc->steps; i++, buf++) {
- u32 flash, buffer, erased_cw;
-@@ -1734,7 +1688,7 @@ static int read_page_ecc(struct qcom_nan
- oob_size = host->ecc_bytes_hw + host->spare_bytes;
- }
-
-- if (nandc->props->is_bam) {
-+ if (nandc->props->supports_bam) {
- if (data_buf && oob_buf) {
- nandc_set_read_loc(chip, i, 0, 0, data_size, 0);
- nandc_set_read_loc(chip, i, 1, data_size,
-@@ -2455,14 +2409,14 @@ static int qcom_nand_attach_chip(struct
-
- mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
- /* Free the initially allocated BAM transaction for reading the ONFI params */
-- if (nandc->props->is_bam)
-+ if (nandc->props->supports_bam)
- free_bam_transaction(nandc);
-
- nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
- cwperpage);
-
- /* Now allocate the BAM transaction based on updated max_cwperpage */
-- if (nandc->props->is_bam) {
-+ if (nandc->props->supports_bam) {
- nandc->bam_txn = alloc_bam_transaction(nandc);
- if (!nandc->bam_txn) {
- dev_err(nandc->dev,
-@@ -2522,7 +2476,7 @@ static int qcom_nand_attach_chip(struct
- | ecc_mode << ECC_MODE
- | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
-
-- if (!nandc->props->qpic_v2)
-+ if (!nandc->props->qpic_version2)
- host->ecc_buf_cfg = 0x203 << NUM_STEPS;
-
- host->clrflashstatus = FS_READY_BSY_N;
-@@ -2556,7 +2510,7 @@ static int qcom_op_cmd_mapping(struct na
- cmd = OP_FETCH_ID;
- break;
- case NAND_CMD_PARAM:
-- if (nandc->props->qpic_v2)
-+ if (nandc->props->qpic_version2)
- cmd = OP_PAGE_READ_ONFI_READ;
- else
- cmd = OP_PAGE_READ;
-@@ -2609,7 +2563,7 @@ static int qcom_parse_instructions(struc
- if (ret < 0)
- return ret;
-
-- q_op->cmd_reg = ret;
-+ q_op->cmd_reg = cpu_to_le32(ret);
- q_op->rdy_delay_ns = instr->delay_ns;
- break;
-
-@@ -2619,10 +2573,10 @@ static int qcom_parse_instructions(struc
- addrs = &instr->ctx.addr.addrs[offset];
-
- for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
-- q_op->addr1_reg |= addrs[i] << (i * 8);
-+ q_op->addr1_reg |= cpu_to_le32(addrs[i] << (i * 8));
-
- if (naddrs > 4)
-- q_op->addr2_reg |= addrs[4];
-+ q_op->addr2_reg |= cpu_to_le32(addrs[4]);
-
- q_op->rdy_delay_ns = instr->delay_ns;
- break;
-@@ -2663,7 +2617,7 @@ static int qcom_wait_rdy_poll(struct nan
- unsigned long start = jiffies + msecs_to_jiffies(time_ms);
- u32 flash;
-
-- nandc_read_buffer_sync(nandc, true);
-+ nandc_dev_to_mem(nandc, true);
-
- do {
- flash = le32_to_cpu(nandc->reg_read_buf[0]);
-@@ -2706,11 +2660,11 @@ static int qcom_read_status_exec(struct
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
-
-- nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
-- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
-+ nandc->regs->cmd = q_op.cmd_reg;
-+ nandc->regs->exec = cpu_to_le32(1);
-
-- write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
-- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+ write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
-+ write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-
- ret = submit_descs(nandc);
-@@ -2719,7 +2673,7 @@ static int qcom_read_status_exec(struct
- goto err_out;
- }
-
-- nandc_read_buffer_sync(nandc, true);
-+ nandc_dev_to_mem(nandc, true);
-
- for (i = 0; i < num_cw; i++) {
- flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
-@@ -2763,16 +2717,14 @@ static int qcom_read_id_type_exec(struct
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
-
-- nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
-- nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg);
-- nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg);
-- nandc_set_reg(chip, NAND_FLASH_CHIP_SELECT,
-- nandc->props->is_bam ? 0 : DM_EN);
-+ nandc->regs->cmd = q_op.cmd_reg;
-+ nandc->regs->addr0 = q_op.addr1_reg;
-+ nandc->regs->addr1 = q_op.addr2_reg;
-+ nandc->regs->chip_sel = cpu_to_le32(nandc->props->supports_bam ? 0 : DM_EN);
-+ nandc->regs->exec = cpu_to_le32(1);
-
-- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
--
-- write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
-- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+ write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
-+ write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-
- read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
-
-@@ -2786,7 +2738,7 @@ static int qcom_read_id_type_exec(struct
- op_id = q_op.data_instr_idx;
- len = nand_subop_get_data_len(subop, op_id);
-
-- nandc_read_buffer_sync(nandc, true);
-+ nandc_dev_to_mem(nandc, true);
- memcpy(instr->ctx.data.buf.in, nandc->reg_read_buf, len);
-
- err_out:
-@@ -2807,15 +2759,14 @@ static int qcom_misc_cmd_type_exec(struc
-
- if (q_op.flag == OP_PROGRAM_PAGE) {
- goto wait_rdy;
-- } else if (q_op.cmd_reg == OP_BLOCK_ERASE) {
-- q_op.cmd_reg |= PAGE_ACC | LAST_PAGE;
-- nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg);
-- nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg);
-- nandc_set_reg(chip, NAND_DEV0_CFG0,
-- host->cfg0_raw & ~(7 << CW_PER_PAGE));
-- nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw);
-+ } else if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE)) {
-+ q_op.cmd_reg |= cpu_to_le32(PAGE_ACC | LAST_PAGE);
-+ nandc->regs->addr0 = q_op.addr1_reg;
-+ nandc->regs->addr1 = q_op.addr2_reg;
-+ nandc->regs->cfg0 = cpu_to_le32(host->cfg0_raw & ~(7 << CW_PER_PAGE));
-+ nandc->regs->cfg1 = cpu_to_le32(host->cfg1_raw);
- instrs = 3;
-- } else if (q_op.cmd_reg != OP_RESET_DEVICE) {
-+ } else if (q_op.cmd_reg != cpu_to_le32(OP_RESET_DEVICE)) {
- return 0;
- }
-
-@@ -2826,14 +2777,14 @@ static int qcom_misc_cmd_type_exec(struc
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
-
-- nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
-- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
-+ nandc->regs->cmd = q_op.cmd_reg;
-+ nandc->regs->exec = cpu_to_le32(1);
-
-- write_reg_dma(nandc, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
-- if (q_op.cmd_reg == OP_BLOCK_ERASE)
-- write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
-+ write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
-+ if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE))
-+ write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
-
-- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+ write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-
- ret = submit_descs(nandc);
-@@ -2864,7 +2815,7 @@ static int qcom_param_page_type_exec(str
- if (ret)
- return ret;
-
-- q_op.cmd_reg |= PAGE_ACC | LAST_PAGE;
-+ q_op.cmd_reg |= cpu_to_le32(PAGE_ACC | LAST_PAGE);
-
- nandc->buf_count = 0;
- nandc->buf_start = 0;
-@@ -2872,38 +2823,38 @@ static int qcom_param_page_type_exec(str
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
-
-- nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
-+ nandc->regs->cmd = q_op.cmd_reg;
-+ nandc->regs->addr0 = 0;
-+ nandc->regs->addr1 = 0;
-+
-+ nandc->regs->cfg0 = cpu_to_le32(0 << CW_PER_PAGE
-+ | 512 << UD_SIZE_BYTES
-+ | 5 << NUM_ADDR_CYCLES
-+ | 0 << SPARE_SIZE_BYTES);
-+
-+ nandc->regs->cfg1 = cpu_to_le32(7 << NAND_RECOVERY_CYCLES
-+ | 0 << CS_ACTIVE_BSY
-+ | 17 << BAD_BLOCK_BYTE_NUM
-+ | 1 << BAD_BLOCK_IN_SPARE_AREA
-+ | 2 << WR_RD_BSY_GAP
-+ | 0 << WIDE_FLASH
-+ | 1 << DEV0_CFG1_ECC_DISABLE);
-
-- nandc_set_reg(chip, NAND_ADDR0, 0);
-- nandc_set_reg(chip, NAND_ADDR1, 0);
-- nandc_set_reg(chip, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
-- | 512 << UD_SIZE_BYTES
-- | 5 << NUM_ADDR_CYCLES
-- | 0 << SPARE_SIZE_BYTES);
-- nandc_set_reg(chip, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
-- | 0 << CS_ACTIVE_BSY
-- | 17 << BAD_BLOCK_BYTE_NUM
-- | 1 << BAD_BLOCK_IN_SPARE_AREA
-- | 2 << WR_RD_BSY_GAP
-- | 0 << WIDE_FLASH
-- | 1 << DEV0_CFG1_ECC_DISABLE);
-- if (!nandc->props->qpic_v2)
-- nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
-+ if (!nandc->props->qpic_version2)
-+ nandc->regs->ecc_buf_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE);
-
- /* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
-- if (!nandc->props->qpic_v2) {
-- nandc_set_reg(chip, NAND_DEV_CMD_VLD,
-- (nandc->vld & ~READ_START_VLD));
-- nandc_set_reg(chip, NAND_DEV_CMD1,
-- (nandc->cmd1 & ~(0xFF << READ_ADDR))
-- | NAND_CMD_PARAM << READ_ADDR);
-+ if (!nandc->props->qpic_version2) {
-+ nandc->regs->vld = cpu_to_le32((nandc->vld & ~READ_START_VLD));
-+ nandc->regs->cmd1 = cpu_to_le32((nandc->cmd1 & ~(0xFF << READ_ADDR))
-+ | NAND_CMD_PARAM << READ_ADDR);
- }
-
-- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
--
-- if (!nandc->props->qpic_v2) {
-- nandc_set_reg(chip, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
-- nandc_set_reg(chip, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
-+ nandc->regs->exec = cpu_to_le32(1);
-+
-+ if (!nandc->props->qpic_version2) {
-+ nandc->regs->orig_cmd1 = cpu_to_le32(nandc->cmd1);
-+ nandc->regs->orig_vld = cpu_to_le32(nandc->vld);
- }
-
- instr = q_op.data_instr;
-@@ -2912,9 +2863,9 @@ static int qcom_param_page_type_exec(str
-
- nandc_set_read_loc(chip, 0, 0, 0, len, 1);
-
-- if (!nandc->props->qpic_v2) {
-- write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
-- write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
-+ if (!nandc->props->qpic_version2) {
-+ write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
-+ write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
- }
-
- nandc->buf_count = len;
-@@ -2926,9 +2877,10 @@ static int qcom_param_page_type_exec(str
- nandc->buf_count, 0);
-
- /* restore CMD1 and VLD regs */
-- if (!nandc->props->qpic_v2) {
-- write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
-- write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
-+ if (!nandc->props->qpic_version2) {
-+ write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0);
-+ write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1,
-+ NAND_BAM_NEXT_SGL);
- }
-
- ret = submit_descs(nandc);
-@@ -3017,7 +2969,7 @@ static const struct nand_controller_ops
-
- static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
- {
-- if (nandc->props->is_bam) {
-+ if (nandc->props->supports_bam) {
- if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
- dma_unmap_single(nandc->dev, nandc->reg_read_dma,
- MAX_REG_RD *
-@@ -3070,7 +3022,7 @@ static int qcom_nandc_alloc(struct qcom_
- if (!nandc->reg_read_buf)
- return -ENOMEM;
-
-- if (nandc->props->is_bam) {
-+ if (nandc->props->supports_bam) {
- nandc->reg_read_dma =
- dma_map_single(nandc->dev, nandc->reg_read_buf,
- MAX_REG_RD *
-@@ -3151,15 +3103,15 @@ static int qcom_nandc_setup(struct qcom_
- u32 nand_ctrl;
-
- /* kill onenand */
-- if (!nandc->props->is_qpic)
-+ if (!nandc->props->nandc_part_of_qpic)
- nandc_write(nandc, SFLASHC_BURST_CFG, 0);
-
-- if (!nandc->props->qpic_v2)
-+ if (!nandc->props->qpic_version2)
- nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
- NAND_DEV_CMD_VLD_VAL);
-
- /* enable ADM or BAM DMA */
-- if (nandc->props->is_bam) {
-+ if (nandc->props->supports_bam) {
- nand_ctrl = nandc_read(nandc, NAND_CTRL);
-
- /*
-@@ -3176,7 +3128,7 @@ static int qcom_nandc_setup(struct qcom_
- }
-
- /* save the original values of these registers */
-- if (!nandc->props->qpic_v2) {
-+ if (!nandc->props->qpic_version2) {
- nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
- nandc->vld = NAND_DEV_CMD_VLD_VAL;
- }
-@@ -3349,7 +3301,7 @@ static int qcom_nandc_parse_dt(struct pl
- struct device_node *np = nandc->dev->of_node;
- int ret;
-
-- if (!nandc->props->is_bam) {
-+ if (!nandc->props->supports_bam) {
- ret = of_property_read_u32(np, "qcom,cmd-crci",
- &nandc->cmd_crci);
- if (ret) {
-@@ -3474,30 +3426,30 @@ static void qcom_nandc_remove(struct pla
-
- static const struct qcom_nandc_props ipq806x_nandc_props = {
- .ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
-- .is_bam = false,
-+ .supports_bam = false,
- .use_codeword_fixup = true,
- .dev_cmd_reg_start = 0x0,
- };
-
- static const struct qcom_nandc_props ipq4019_nandc_props = {
- .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
-- .is_bam = true,
-- .is_qpic = true,
-+ .supports_bam = true,
-+ .nandc_part_of_qpic = true,
- .dev_cmd_reg_start = 0x0,
- };
-
- static const struct qcom_nandc_props ipq8074_nandc_props = {
- .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
-- .is_bam = true,
-- .is_qpic = true,
-+ .supports_bam = true,
-+ .nandc_part_of_qpic = true,
- .dev_cmd_reg_start = 0x7000,
- };
-
- static const struct qcom_nandc_props sdx55_nandc_props = {
- .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
-- .is_bam = true,
-- .is_qpic = true,
-- .qpic_v2 = true,
-+ .supports_bam = true,
-+ .nandc_part_of_qpic = true,
-+ .qpic_version2 = true,
- .dev_cmd_reg_start = 0x7000,
- };
-
+++ /dev/null
-From dde50ed4a7bdb79b4bb408781d3e4846d4c49f0a Mon Sep 17 00:00:00 2001
-From: Md Sadre Alam <quic_mdalam@quicinc.com>
-Date: Wed, 11 Sep 2024 11:13:42 +0530
-Subject: [PATCH v10 3/8] mtd: rawnand: qcom: Add qcom prefix to common api
-
-Add qcom prefix to all the api which will be commonly
-used by spi nand driver and raw nand driver.
-
-Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
----
-
-Change in [v10]
-
-* No change
-
-Change in [v9]
-
-* No change
-
-Change in [v8]
-
-* No change
-
-Change in [v7]
-
-* No change
-
-Change in [v6]
-
-* No change
-
-Change in [v5]
-
-* Add qcom_ prefix to all common API.
-
-Change in [v4]
-
-* This patch was not included in [v4]
-
-Change in [v3]
-
-* This patch was not included in [v3]
-
-Change in [v2]
-
-* This patch was not included in [v2]
-
-Change in [v1]
-
-* This patch was not included in [v1]
-
- drivers/mtd/nand/raw/qcom_nandc.c | 320 +++++++++++++++---------------
- 1 file changed, 160 insertions(+), 160 deletions(-)
-
---- a/drivers/mtd/nand/raw/qcom_nandc.c
-+++ b/drivers/mtd/nand/raw/qcom_nandc.c
-@@ -53,7 +53,7 @@
- #define NAND_READ_LOCATION_LAST_CW_2 0xf48
- #define NAND_READ_LOCATION_LAST_CW_3 0xf4c
-
--/* dummy register offsets, used by write_reg_dma */
-+/* dummy register offsets, used by qcom_write_reg_dma */
- #define NAND_DEV_CMD1_RESTORE 0xdead
- #define NAND_DEV_CMD_VLD_RESTORE 0xbeef
-
-@@ -211,7 +211,7 @@
-
- /*
- * Flags used in DMA descriptor preparation helper functions
-- * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
-+ * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma)
- */
- /* Don't set the EOT in current tx BAM sgl */
- #define NAND_BAM_NO_EOT BIT(0)
-@@ -550,7 +550,7 @@ struct qcom_nandc_props {
- };
-
- /* Frees the BAM transaction memory */
--static void free_bam_transaction(struct qcom_nand_controller *nandc)
-+static void qcom_free_bam_transaction(struct qcom_nand_controller *nandc)
- {
- struct bam_transaction *bam_txn = nandc->bam_txn;
-
-@@ -559,7 +559,7 @@ static void free_bam_transaction(struct
-
- /* Allocates and Initializes the BAM transaction */
- static struct bam_transaction *
--alloc_bam_transaction(struct qcom_nand_controller *nandc)
-+qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc)
- {
- struct bam_transaction *bam_txn;
- size_t bam_txn_size;
-@@ -595,7 +595,7 @@ alloc_bam_transaction(struct qcom_nand_c
- }
-
- /* Clears the BAM transaction indexes */
--static void clear_bam_transaction(struct qcom_nand_controller *nandc)
-+static void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
- {
- struct bam_transaction *bam_txn = nandc->bam_txn;
-
-@@ -614,7 +614,7 @@ static void clear_bam_transaction(struct
- }
-
- /* Callback for DMA descriptor completion */
--static void qpic_bam_dma_done(void *data)
-+static void qcom_qpic_bam_dma_done(void *data)
- {
- struct bam_transaction *bam_txn = data;
-
-@@ -644,7 +644,7 @@ static inline void nandc_write(struct qc
- iowrite32(val, nandc->base + offset);
- }
-
--static inline void nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
-+static inline void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
- {
- if (!nandc->props->supports_bam)
- return;
-@@ -824,9 +824,9 @@ static void update_rw_regs(struct qcom_n
- * for BAM. This descriptor will be added in the NAND DMA descriptor queue
- * which will be submitted to DMA engine.
- */
--static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
-- struct dma_chan *chan,
-- unsigned long flags)
-+static int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
-+ struct dma_chan *chan,
-+ unsigned long flags)
- {
- struct desc_info *desc;
- struct scatterlist *sgl;
-@@ -903,9 +903,9 @@ static int prepare_bam_async_desc(struct
- * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
- * after the current command element.
- */
--static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
-- int reg_off, const void *vaddr,
-- int size, unsigned int flags)
-+static int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
-+ int reg_off, const void *vaddr,
-+ int size, unsigned int flags)
- {
- int bam_ce_size;
- int i, ret;
-@@ -943,9 +943,9 @@ static int prep_bam_dma_desc_cmd(struct
- bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
-
- if (flags & NAND_BAM_NWD) {
-- ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
-- DMA_PREP_FENCE |
-- DMA_PREP_CMD);
-+ ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
-+ DMA_PREP_FENCE |
-+ DMA_PREP_CMD);
- if (ret)
- return ret;
- }
-@@ -958,9 +958,8 @@ static int prep_bam_dma_desc_cmd(struct
- * Prepares the data descriptor for BAM DMA which will be used for NAND
- * data reads and writes.
- */
--static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
-- const void *vaddr,
-- int size, unsigned int flags)
-+static int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
-+ const void *vaddr, int size, unsigned int flags)
- {
- int ret;
- struct bam_transaction *bam_txn = nandc->bam_txn;
-@@ -979,8 +978,8 @@ static int prep_bam_dma_desc_data(struct
- * is not set, form the DMA descriptor
- */
- if (!(flags & NAND_BAM_NO_EOT)) {
-- ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
-- DMA_PREP_INTERRUPT);
-+ ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
-+ DMA_PREP_INTERRUPT);
- if (ret)
- return ret;
- }
-@@ -989,9 +988,9 @@ static int prep_bam_dma_desc_data(struct
- return 0;
- }
-
--static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
-- int reg_off, const void *vaddr, int size,
-- bool flow_control)
-+static int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
-+ int reg_off, const void *vaddr, int size,
-+ bool flow_control)
- {
- struct desc_info *desc;
- struct dma_async_tx_descriptor *dma_desc;
-@@ -1069,15 +1068,15 @@ err:
- }
-
- /*
-- * read_reg_dma: prepares a descriptor to read a given number of
-+ * qcom_read_reg_dma: prepares a descriptor to read a given number of
- * contiguous registers to the reg_read_buf pointer
- *
- * @first: offset of the first register in the contiguous block
- * @num_regs: number of registers to read
- * @flags: flags to control DMA descriptor preparation
- */
--static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
-- int num_regs, unsigned int flags)
-+static int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first,
-+ int num_regs, unsigned int flags)
- {
- bool flow_control = false;
- void *vaddr;
-@@ -1089,18 +1088,18 @@ static int read_reg_dma(struct qcom_nand
- first = dev_cmd_reg_addr(nandc, first);
-
- if (nandc->props->supports_bam)
-- return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
-+ return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
- num_regs, flags);
-
- if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
- flow_control = true;
-
-- return prep_adm_dma_desc(nandc, true, first, vaddr,
-+ return qcom_prep_adm_dma_desc(nandc, true, first, vaddr,
- num_regs * sizeof(u32), flow_control);
- }
-
- /*
-- * write_reg_dma: prepares a descriptor to write a given number of
-+ * qcom_write_reg_dma: prepares a descriptor to write a given number of
- * contiguous registers
- *
- * @vaddr: contnigeous memory from where register value will
-@@ -1109,8 +1108,8 @@ static int read_reg_dma(struct qcom_nand
- * @num_regs: number of registers to write
- * @flags: flags to control DMA descriptor preparation
- */
--static int write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
-- int first, int num_regs, unsigned int flags)
-+static int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
-+ int first, int num_regs, unsigned int flags)
- {
- bool flow_control = false;
-
-@@ -1124,18 +1123,18 @@ static int write_reg_dma(struct qcom_nan
- first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
-
- if (nandc->props->supports_bam)
-- return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
-+ return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
- num_regs, flags);
-
- if (first == NAND_FLASH_CMD)
- flow_control = true;
-
-- return prep_adm_dma_desc(nandc, false, first, vaddr,
-+ return qcom_prep_adm_dma_desc(nandc, false, first, vaddr,
- num_regs * sizeof(u32), flow_control);
- }
-
- /*
-- * read_data_dma: prepares a DMA descriptor to transfer data from the
-+ * qcom_read_data_dma: prepares a DMA descriptor to transfer data from the
- * controller's internal buffer to the buffer 'vaddr'
- *
- * @reg_off: offset within the controller's data buffer
-@@ -1143,17 +1142,17 @@ static int write_reg_dma(struct qcom_nan
- * @size: DMA transaction size in bytes
- * @flags: flags to control DMA descriptor preparation
- */
--static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
-- const u8 *vaddr, int size, unsigned int flags)
-+static int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
-+ const u8 *vaddr, int size, unsigned int flags)
- {
- if (nandc->props->supports_bam)
-- return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
-+ return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
-
-- return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
-+ return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
- }
-
- /*
-- * write_data_dma: prepares a DMA descriptor to transfer data from
-+ * qcom_write_data_dma: prepares a DMA descriptor to transfer data from
- * 'vaddr' to the controller's internal buffer
- *
- * @reg_off: offset within the controller's data buffer
-@@ -1161,13 +1160,13 @@ static int read_data_dma(struct qcom_nan
- * @size: DMA transaction size in bytes
- * @flags: flags to control DMA descriptor preparation
- */
--static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
-- const u8 *vaddr, int size, unsigned int flags)
-+static int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
-+ const u8 *vaddr, int size, unsigned int flags)
- {
- if (nandc->props->supports_bam)
-- return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
-+ return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
-
-- return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
-+ return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
- }
-
- /*
-@@ -1178,14 +1177,14 @@ static void config_nand_page_read(struct
- {
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
-- write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
-- write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
-+ qcom_write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
-+ qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
- if (!nandc->props->qpic_version2)
-- write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
-- write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr,
-- NAND_ERASED_CW_DETECT_CFG, 1, 0);
-- write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set,
-- NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
-+ qcom_write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
-+ qcom_write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr,
-+ NAND_ERASED_CW_DETECT_CFG, 1, 0);
-+ qcom_write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set,
-+ NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
- }
-
- /*
-@@ -1204,17 +1203,17 @@ config_nand_cw_read(struct nand_chip *ch
- reg = &nandc->regs->read_location_last0;
-
- if (nandc->props->supports_bam)
-- write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
-+ qcom_write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
-
-- write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
-- write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
-+ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-
- if (use_ecc) {
-- read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
-- read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
-- NAND_BAM_NEXT_SGL);
-+ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
-+ qcom_read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
-+ NAND_BAM_NEXT_SGL);
- } else {
-- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-+ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
- }
- }
-
-@@ -1238,11 +1237,11 @@ static void config_nand_page_write(struc
- {
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
-- write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
-- write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
-+ qcom_write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
-+ qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
- if (!nandc->props->qpic_version2)
-- write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1,
-- NAND_BAM_NEXT_SGL);
-+ qcom_write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1,
-+ NAND_BAM_NEXT_SGL);
- }
-
- /*
-@@ -1253,17 +1252,18 @@ static void config_nand_cw_write(struct
- {
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
-- write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
-- write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
-+ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-
-- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-+ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-
-- write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
-- write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
-+ qcom_write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
-+ qcom_write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1,
-+ NAND_BAM_NEXT_SGL);
- }
-
- /* helpers to submit/free our list of dma descriptors */
--static int submit_descs(struct qcom_nand_controller *nandc)
-+static int qcom_submit_descs(struct qcom_nand_controller *nandc)
- {
- struct desc_info *desc, *n;
- dma_cookie_t cookie = 0;
-@@ -1272,21 +1272,21 @@ static int submit_descs(struct qcom_nand
-
- if (nandc->props->supports_bam) {
- if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
-- ret = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
-+ ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
- if (ret)
- goto err_unmap_free_desc;
- }
-
- if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
-- ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
-- DMA_PREP_INTERRUPT);
-+ ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
-+ DMA_PREP_INTERRUPT);
- if (ret)
- goto err_unmap_free_desc;
- }
-
- if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
-- ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
-- DMA_PREP_CMD);
-+ ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
-+ DMA_PREP_CMD);
- if (ret)
- goto err_unmap_free_desc;
- }
-@@ -1296,7 +1296,7 @@ static int submit_descs(struct qcom_nand
- cookie = dmaengine_submit(desc->dma_desc);
-
- if (nandc->props->supports_bam) {
-- bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
-+ bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done;
- bam_txn->last_cmd_desc->callback_param = bam_txn;
-
- dma_async_issue_pending(nandc->tx_chan);
-@@ -1314,7 +1314,7 @@ static int submit_descs(struct qcom_nand
- err_unmap_free_desc:
- /*
- * Unmap the dma sg_list and free the desc allocated by both
-- * prepare_bam_async_desc() and prep_adm_dma_desc() functions.
-+ * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions.
- */
- list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
- list_del(&desc->node);
-@@ -1333,10 +1333,10 @@ err_unmap_free_desc:
- }
-
- /* reset the register read buffer for next NAND operation */
--static void clear_read_regs(struct qcom_nand_controller *nandc)
-+static void qcom_clear_read_regs(struct qcom_nand_controller *nandc)
- {
- nandc->reg_read_pos = 0;
-- nandc_dev_to_mem(nandc, false);
-+ qcom_nandc_dev_to_mem(nandc, false);
- }
-
- /*
-@@ -1400,7 +1400,7 @@ static int check_flash_errors(struct qco
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- int i;
-
-- nandc_dev_to_mem(nandc, true);
-+ qcom_nandc_dev_to_mem(nandc, true);
-
- for (i = 0; i < cw_cnt; i++) {
- u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
-@@ -1427,13 +1427,13 @@ qcom_nandc_read_cw_raw(struct mtd_info *
- nand_read_page_op(chip, page, 0, NULL, 0);
- nandc->buf_count = 0;
- nandc->buf_start = 0;
-- clear_read_regs(nandc);
-+ qcom_clear_read_regs(nandc);
- host->use_ecc = false;
-
- if (nandc->props->qpic_version2)
- raw_cw = ecc->steps - 1;
-
-- clear_bam_transaction(nandc);
-+ qcom_clear_bam_transaction(nandc);
- set_address(host, host->cw_size * cw, page);
- update_rw_regs(host, 1, true, raw_cw);
- config_nand_page_read(chip);
-@@ -1466,18 +1466,18 @@ qcom_nandc_read_cw_raw(struct mtd_info *
-
- config_nand_cw_read(chip, false, raw_cw);
-
-- read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
-+ qcom_read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
- reg_off += data_size1;
-
-- read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
-+ qcom_read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
- reg_off += oob_size1;
-
-- read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
-+ qcom_read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
- reg_off += data_size2;
-
-- read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
-+ qcom_read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
-
-- ret = submit_descs(nandc);
-+ ret = qcom_submit_descs(nandc);
- if (ret) {
- dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
- return ret;
-@@ -1575,7 +1575,7 @@ static int parse_read_errors(struct qcom
- u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
-
- buf = (struct read_stats *)nandc->reg_read_buf;
-- nandc_dev_to_mem(nandc, true);
-+ qcom_nandc_dev_to_mem(nandc, true);
-
- for (i = 0; i < ecc->steps; i++, buf++) {
- u32 flash, buffer, erased_cw;
-@@ -1704,8 +1704,8 @@ static int read_page_ecc(struct qcom_nan
- config_nand_cw_read(chip, true, i);
-
- if (data_buf)
-- read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
-- data_size, 0);
-+ qcom_read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
-+ data_size, 0);
-
- /*
- * when ecc is enabled, the controller doesn't read the real
-@@ -1720,8 +1720,8 @@ static int read_page_ecc(struct qcom_nan
- for (j = 0; j < host->bbm_size; j++)
- *oob_buf++ = 0xff;
-
-- read_data_dma(nandc, FLASH_BUF_ACC + data_size,
-- oob_buf, oob_size, 0);
-+ qcom_read_data_dma(nandc, FLASH_BUF_ACC + data_size,
-+ oob_buf, oob_size, 0);
- }
-
- if (data_buf)
-@@ -1730,7 +1730,7 @@ static int read_page_ecc(struct qcom_nan
- oob_buf += oob_size;
- }
-
-- ret = submit_descs(nandc);
-+ ret = qcom_submit_descs(nandc);
- if (ret) {
- dev_err(nandc->dev, "failure to read page/oob\n");
- return ret;
-@@ -1751,7 +1751,7 @@ static int copy_last_cw(struct qcom_nand
- int size;
- int ret;
-
-- clear_read_regs(nandc);
-+ qcom_clear_read_regs(nandc);
-
- size = host->use_ecc ? host->cw_data : host->cw_size;
-
-@@ -1763,9 +1763,9 @@ static int copy_last_cw(struct qcom_nand
-
- config_nand_single_cw_page_read(chip, host->use_ecc, ecc->steps - 1);
-
-- read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
-+ qcom_read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
-
-- ret = submit_descs(nandc);
-+ ret = qcom_submit_descs(nandc);
- if (ret)
- dev_err(nandc->dev, "failed to copy last codeword\n");
-
-@@ -1851,14 +1851,14 @@ static int qcom_nandc_read_page(struct n
- nandc->buf_count = 0;
- nandc->buf_start = 0;
- host->use_ecc = true;
-- clear_read_regs(nandc);
-+ qcom_clear_read_regs(nandc);
- set_address(host, 0, page);
- update_rw_regs(host, ecc->steps, true, 0);
-
- data_buf = buf;
- oob_buf = oob_required ? chip->oob_poi : NULL;
-
-- clear_bam_transaction(nandc);
-+ qcom_clear_bam_transaction(nandc);
-
- return read_page_ecc(host, data_buf, oob_buf, page);
- }
-@@ -1899,8 +1899,8 @@ static int qcom_nandc_read_oob(struct na
- if (host->nr_boot_partitions)
- qcom_nandc_codeword_fixup(host, page);
-
-- clear_read_regs(nandc);
-- clear_bam_transaction(nandc);
-+ qcom_clear_read_regs(nandc);
-+ qcom_clear_bam_transaction(nandc);
-
- host->use_ecc = true;
- set_address(host, 0, page);
-@@ -1927,8 +1927,8 @@ static int qcom_nandc_write_page(struct
- set_address(host, 0, page);
- nandc->buf_count = 0;
- nandc->buf_start = 0;
-- clear_read_regs(nandc);
-- clear_bam_transaction(nandc);
-+ qcom_clear_read_regs(nandc);
-+ qcom_clear_bam_transaction(nandc);
-
- data_buf = (u8 *)buf;
- oob_buf = chip->oob_poi;
-@@ -1949,8 +1949,8 @@ static int qcom_nandc_write_page(struct
- oob_size = ecc->bytes;
- }
-
-- write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
-- i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
-+ qcom_write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
-+ i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
-
- /*
- * when ECC is enabled, we don't really need to write anything
-@@ -1962,8 +1962,8 @@ static int qcom_nandc_write_page(struct
- if (qcom_nandc_is_last_cw(ecc, i)) {
- oob_buf += host->bbm_size;
-
-- write_data_dma(nandc, FLASH_BUF_ACC + data_size,
-- oob_buf, oob_size, 0);
-+ qcom_write_data_dma(nandc, FLASH_BUF_ACC + data_size,
-+ oob_buf, oob_size, 0);
- }
-
- config_nand_cw_write(chip);
-@@ -1972,7 +1972,7 @@ static int qcom_nandc_write_page(struct
- oob_buf += oob_size;
- }
-
-- ret = submit_descs(nandc);
-+ ret = qcom_submit_descs(nandc);
- if (ret) {
- dev_err(nandc->dev, "failure to write page\n");
- return ret;
-@@ -1997,8 +1997,8 @@ static int qcom_nandc_write_page_raw(str
- qcom_nandc_codeword_fixup(host, page);
-
- nand_prog_page_begin_op(chip, page, 0, NULL, 0);
-- clear_read_regs(nandc);
-- clear_bam_transaction(nandc);
-+ qcom_clear_read_regs(nandc);
-+ qcom_clear_bam_transaction(nandc);
-
- data_buf = (u8 *)buf;
- oob_buf = chip->oob_poi;
-@@ -2024,28 +2024,28 @@ static int qcom_nandc_write_page_raw(str
- oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
- }
-
-- write_data_dma(nandc, reg_off, data_buf, data_size1,
-- NAND_BAM_NO_EOT);
-+ qcom_write_data_dma(nandc, reg_off, data_buf, data_size1,
-+ NAND_BAM_NO_EOT);
- reg_off += data_size1;
- data_buf += data_size1;
-
-- write_data_dma(nandc, reg_off, oob_buf, oob_size1,
-- NAND_BAM_NO_EOT);
-+ qcom_write_data_dma(nandc, reg_off, oob_buf, oob_size1,
-+ NAND_BAM_NO_EOT);
- reg_off += oob_size1;
- oob_buf += oob_size1;
-
-- write_data_dma(nandc, reg_off, data_buf, data_size2,
-- NAND_BAM_NO_EOT);
-+ qcom_write_data_dma(nandc, reg_off, data_buf, data_size2,
-+ NAND_BAM_NO_EOT);
- reg_off += data_size2;
- data_buf += data_size2;
-
-- write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
-+ qcom_write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
- oob_buf += oob_size2;
-
- config_nand_cw_write(chip);
- }
-
-- ret = submit_descs(nandc);
-+ ret = qcom_submit_descs(nandc);
- if (ret) {
- dev_err(nandc->dev, "failure to write raw page\n");
- return ret;
-@@ -2075,7 +2075,7 @@ static int qcom_nandc_write_oob(struct n
- qcom_nandc_codeword_fixup(host, page);
-
- host->use_ecc = true;
-- clear_bam_transaction(nandc);
-+ qcom_clear_bam_transaction(nandc);
-
- /* calculate the data and oob size for the last codeword/step */
- data_size = ecc->size - ((ecc->steps - 1) << 2);
-@@ -2090,11 +2090,11 @@ static int qcom_nandc_write_oob(struct n
- update_rw_regs(host, 1, false, 0);
-
- config_nand_page_write(chip);
-- write_data_dma(nandc, FLASH_BUF_ACC,
-- nandc->data_buffer, data_size + oob_size, 0);
-+ qcom_write_data_dma(nandc, FLASH_BUF_ACC,
-+ nandc->data_buffer, data_size + oob_size, 0);
- config_nand_cw_write(chip);
-
-- ret = submit_descs(nandc);
-+ ret = qcom_submit_descs(nandc);
- if (ret) {
- dev_err(nandc->dev, "failure to write oob\n");
- return ret;
-@@ -2121,7 +2121,7 @@ static int qcom_nandc_block_bad(struct n
- */
- host->use_ecc = false;
-
-- clear_bam_transaction(nandc);
-+ qcom_clear_bam_transaction(nandc);
- ret = copy_last_cw(host, page);
- if (ret)
- goto err;
-@@ -2148,8 +2148,8 @@ static int qcom_nandc_block_markbad(stru
- struct nand_ecc_ctrl *ecc = &chip->ecc;
- int page, ret;
-
-- clear_read_regs(nandc);
-- clear_bam_transaction(nandc);
-+ qcom_clear_read_regs(nandc);
-+ qcom_clear_bam_transaction(nandc);
-
- /*
- * to mark the BBM as bad, we flash the entire last codeword with 0s.
-@@ -2166,11 +2166,11 @@ static int qcom_nandc_block_markbad(stru
- update_rw_regs(host, 1, false, ecc->steps - 1);
-
- config_nand_page_write(chip);
-- write_data_dma(nandc, FLASH_BUF_ACC,
-- nandc->data_buffer, host->cw_size, 0);
-+ qcom_write_data_dma(nandc, FLASH_BUF_ACC,
-+ nandc->data_buffer, host->cw_size, 0);
- config_nand_cw_write(chip);
-
-- ret = submit_descs(nandc);
-+ ret = qcom_submit_descs(nandc);
- if (ret) {
- dev_err(nandc->dev, "failure to update BBM\n");
- return ret;
-@@ -2410,14 +2410,14 @@ static int qcom_nand_attach_chip(struct
- mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
- /* Free the initially allocated BAM transaction for reading the ONFI params */
- if (nandc->props->supports_bam)
-- free_bam_transaction(nandc);
-+ qcom_free_bam_transaction(nandc);
-
- nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
- cwperpage);
-
- /* Now allocate the BAM transaction based on updated max_cwperpage */
- if (nandc->props->supports_bam) {
-- nandc->bam_txn = alloc_bam_transaction(nandc);
-+ nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
- if (!nandc->bam_txn) {
- dev_err(nandc->dev,
- "failed to allocate bam transaction\n");
-@@ -2617,7 +2617,7 @@ static int qcom_wait_rdy_poll(struct nan
- unsigned long start = jiffies + msecs_to_jiffies(time_ms);
- u32 flash;
-
-- nandc_dev_to_mem(nandc, true);
-+ qcom_nandc_dev_to_mem(nandc, true);
-
- do {
- flash = le32_to_cpu(nandc->reg_read_buf[0]);
-@@ -2657,23 +2657,23 @@ static int qcom_read_status_exec(struct
- nandc->buf_start = 0;
- host->use_ecc = false;
-
-- clear_read_regs(nandc);
-- clear_bam_transaction(nandc);
-+ qcom_clear_read_regs(nandc);
-+ qcom_clear_bam_transaction(nandc);
-
- nandc->regs->cmd = q_op.cmd_reg;
- nandc->regs->exec = cpu_to_le32(1);
-
-- write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
-- write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-+ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
-+ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-
-- ret = submit_descs(nandc);
-+ ret = qcom_submit_descs(nandc);
- if (ret) {
- dev_err(nandc->dev, "failure in submitting status descriptor\n");
- goto err_out;
- }
-
-- nandc_dev_to_mem(nandc, true);
-+ qcom_nandc_dev_to_mem(nandc, true);
-
- for (i = 0; i < num_cw; i++) {
- flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
-@@ -2714,8 +2714,8 @@ static int qcom_read_id_type_exec(struct
- nandc->buf_start = 0;
- host->use_ecc = false;
-
-- clear_read_regs(nandc);
-- clear_bam_transaction(nandc);
-+ qcom_clear_read_regs(nandc);
-+ qcom_clear_bam_transaction(nandc);
-
- nandc->regs->cmd = q_op.cmd_reg;
- nandc->regs->addr0 = q_op.addr1_reg;
-@@ -2723,12 +2723,12 @@ static int qcom_read_id_type_exec(struct
- nandc->regs->chip_sel = cpu_to_le32(nandc->props->supports_bam ? 0 : DM_EN);
- nandc->regs->exec = cpu_to_le32(1);
-
-- write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
-- write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
-+ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-
-- read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
-+ qcom_read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
-
-- ret = submit_descs(nandc);
-+ ret = qcom_submit_descs(nandc);
- if (ret) {
- dev_err(nandc->dev, "failure in submitting read id descriptor\n");
- goto err_out;
-@@ -2738,7 +2738,7 @@ static int qcom_read_id_type_exec(struct
- op_id = q_op.data_instr_idx;
- len = nand_subop_get_data_len(subop, op_id);
-
-- nandc_dev_to_mem(nandc, true);
-+ qcom_nandc_dev_to_mem(nandc, true);
- memcpy(instr->ctx.data.buf.in, nandc->reg_read_buf, len);
-
- err_out:
-@@ -2774,20 +2774,20 @@ static int qcom_misc_cmd_type_exec(struc
- nandc->buf_start = 0;
- host->use_ecc = false;
-
-- clear_read_regs(nandc);
-- clear_bam_transaction(nandc);
-+ qcom_clear_read_regs(nandc);
-+ qcom_clear_bam_transaction(nandc);
-
- nandc->regs->cmd = q_op.cmd_reg;
- nandc->regs->exec = cpu_to_le32(1);
-
-- write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
-+ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
- if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE))
-- write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
-+ qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
-
-- write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-+ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-
-- ret = submit_descs(nandc);
-+ ret = qcom_submit_descs(nandc);
- if (ret) {
- dev_err(nandc->dev, "failure in submitting misc descriptor\n");
- goto err_out;
-@@ -2820,8 +2820,8 @@ static int qcom_param_page_type_exec(str
- nandc->buf_count = 0;
- nandc->buf_start = 0;
- host->use_ecc = false;
-- clear_read_regs(nandc);
-- clear_bam_transaction(nandc);
-+ qcom_clear_read_regs(nandc);
-+ qcom_clear_bam_transaction(nandc);
-
- nandc->regs->cmd = q_op.cmd_reg;
- nandc->regs->addr0 = 0;
-@@ -2864,8 +2864,8 @@ static int qcom_param_page_type_exec(str
- nandc_set_read_loc(chip, 0, 0, 0, len, 1);
-
- if (!nandc->props->qpic_version2) {
-- write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
-- write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
-+ qcom_write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
-+ qcom_write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
- }
-
- nandc->buf_count = len;
-@@ -2873,17 +2873,17 @@ static int qcom_param_page_type_exec(str
-
- config_nand_single_cw_page_read(chip, false, 0);
-
-- read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
-- nandc->buf_count, 0);
-+ qcom_read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
-+ nandc->buf_count, 0);
-
- /* restore CMD1 and VLD regs */
- if (!nandc->props->qpic_version2) {
-- write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0);
-- write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1,
-- NAND_BAM_NEXT_SGL);
-+ qcom_write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0);
-+ qcom_write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1,
-+ NAND_BAM_NEXT_SGL);
- }
-
-- ret = submit_descs(nandc);
-+ ret = qcom_submit_descs(nandc);
- if (ret) {
- dev_err(nandc->dev, "failure in submitting param page descriptor\n");
- goto err_out;
-@@ -3067,7 +3067,7 @@ static int qcom_nandc_alloc(struct qcom_
- * maximum codeword size
- */
- nandc->max_cwperpage = 1;
-- nandc->bam_txn = alloc_bam_transaction(nandc);
-+ nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
- if (!nandc->bam_txn) {
- dev_err(nandc->dev,
- "failed to allocate bam transaction\n");
+++ /dev/null
-From b00c2f583e54aa8bed2044e5b1898d9accd45415 Mon Sep 17 00:00:00 2001
-From: Md Sadre Alam <quic_mdalam@quicinc.com>
-Date: Wed, 11 Sep 2024 17:20:22 +0530
-Subject: [PATCH v10 4/8] mtd: nand: Add qpic_common API file
-
-Add qpic_common.c file which hold all the common
-qpic APIs which will be used by both qpic raw nand
-driver and qpic spi nand driver.
-
-Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
----
-
-Change in [v10]
-
-* No change
-
-Change in [v9]
-
-* No Change
-
-Change in [v8]
-
-* Removed "inline" from qcom_nandc_dev_to_mem()
-
-Change in [v7]
-
-* Removed partition.h
-
-* Updated commit message heading
-
-* Made CONFIG_MTD_NAND_QCOM as bool
-
-Change in [v6]
-
-* made changes to select qpic_common.c based on either
- CONFIG_MTD_NAND_QCOM=y or CONFIG_SPI_QPIC_SNAND=y
-
-* Removed rawnand.h from qpic_common.c
-
-* change nand_controller variable as a pointer type.
-
-Change in [v5]
-
-* Remove multiple dma call back to avoid race condition
-
-Change in [v4]
-
-* Added kernel doc for all common api as per kernel doc
- standard
-
-* Added QPIC_COMMON config to build qpic_common.c
-
-Change in [v3]
-
-* Added original copy right
-
-* Removed all EXPORT_SYMBOL()
-
-* Made this common api file more generic
-
-* Added qcom_ prefix to all api in this file
-
-* Removed devm_kfree and added kfree
-
-* Moved to_qcom_nand_controller() to raw nand driver
- since it was only used by raw nand driver, so not needed
- as common
-
-* Added kernel doc for all api
-
-* made reverse tree of variable declaration in
- prep_adm_dma_desc() function
-
-* Added if(!ret) condition in prep_adm_dma_desc()
- function
-
-* Initialized slave_conf as 0 while declaration
-
-Change in [v2]
-
-* Posted initial support for common api file
-
-Change in [v1]
-
-* Posted as RFC patch for design review
-
- drivers/mtd/nand/Makefile | 4 +
- drivers/mtd/nand/qpic_common.c | 738 +++++++++++++++++
- drivers/mtd/nand/raw/Kconfig | 2 +-
- drivers/mtd/nand/raw/qcom_nandc.c | 1092 +-------------------------
- include/linux/mtd/nand-qpic-common.h | 468 +++++++++++
- 5 files changed, 1223 insertions(+), 1081 deletions(-)
- create mode 100644 drivers/mtd/nand/qpic_common.c
- create mode 100644 include/linux/mtd/nand-qpic-common.h
-
---- a/drivers/mtd/nand/Makefile
-+++ b/drivers/mtd/nand/Makefile
-@@ -5,6 +5,10 @@ obj-$(CONFIG_MTD_NAND_CORE) += nandcore.
- obj-$(CONFIG_MTD_NAND_ECC_MEDIATEK) += ecc-mtk.o
- obj-$(CONFIG_MTD_NAND_MTK_BMT) += mtk_bmt.o mtk_bmt_v2.o mtk_bmt_bbt.o mtk_bmt_nmbm.o
-
-+ifeq ($(CONFIG_MTD_NAND_QCOM),y)
-+obj-y += qpic_common.o
-+endif
-+
- obj-y += onenand/
- obj-y += raw/
- obj-y += spi/
---- /dev/null
-+++ b/drivers/mtd/nand/qpic_common.c
-@@ -0,0 +1,745 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/*
-+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
-+ */
-+#include <linux/clk.h>
-+#include <linux/delay.h>
-+#include <linux/dmaengine.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/dma/qcom_adm.h>
-+#include <linux/dma/qcom_bam_dma.h>
-+#include <linux/module.h>
-+#include <linux/of.h>
-+#include <linux/platform_device.h>
-+#include <linux/slab.h>
-+#include <linux/mtd/nand-qpic-common.h>
-+
-+/**
-+ * qcom_free_bam_transaction() - Frees the BAM transaction memory
-+ * @nandc: qpic nand controller
-+ *
-+ * This function frees the bam transaction memory
-+ */
-+void qcom_free_bam_transaction(struct qcom_nand_controller *nandc)
-+{
-+ struct bam_transaction *bam_txn = nandc->bam_txn;
-+
-+ kfree(bam_txn);
-+}
-+
-+/**
-+ * qcom_alloc_bam_transaction() - allocate BAM transaction
-+ * @nandc: qpic nand controller
-+ *
-+ * This function will allocate and initialize the BAM transaction structure
-+ */
-+struct bam_transaction *
-+qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc)
-+{
-+ struct bam_transaction *bam_txn;
-+ size_t bam_txn_size;
-+ unsigned int num_cw = nandc->max_cwperpage;
-+ void *bam_txn_buf;
-+
-+ bam_txn_size =
-+ sizeof(*bam_txn) + num_cw *
-+ ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
-+ (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
-+ (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
-+
-+ bam_txn_buf = kzalloc(bam_txn_size, GFP_KERNEL);
-+ if (!bam_txn_buf)
-+ return NULL;
-+
-+ bam_txn = bam_txn_buf;
-+ bam_txn_buf += sizeof(*bam_txn);
-+
-+ bam_txn->bam_ce = bam_txn_buf;
-+ bam_txn_buf +=
-+ sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
-+
-+ bam_txn->cmd_sgl = bam_txn_buf;
-+ bam_txn_buf +=
-+ sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
-+
-+ bam_txn->data_sgl = bam_txn_buf;
-+
-+ init_completion(&bam_txn->txn_done);
-+
-+ return bam_txn;
-+}
-+
-+/**
-+ * qcom_clear_bam_transaction() - Clears the BAM transaction
-+ * @nandc: qpic nand controller
-+ *
-+ * This function will clear the BAM transaction indexes.
-+ */
-+void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
-+{
-+ struct bam_transaction *bam_txn = nandc->bam_txn;
-+
-+ if (!nandc->props->supports_bam)
-+ return;
-+
-+ bam_txn->bam_ce_pos = 0;
-+ bam_txn->bam_ce_start = 0;
-+ bam_txn->cmd_sgl_pos = 0;
-+ bam_txn->cmd_sgl_start = 0;
-+ bam_txn->tx_sgl_pos = 0;
-+ bam_txn->tx_sgl_start = 0;
-+ bam_txn->rx_sgl_pos = 0;
-+ bam_txn->rx_sgl_start = 0;
-+ bam_txn->last_data_desc = NULL;
-+
-+ sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
-+ QPIC_PER_CW_CMD_SGL);
-+ sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
-+ QPIC_PER_CW_DATA_SGL);
-+
-+ reinit_completion(&bam_txn->txn_done);
-+}
-+
-+/**
-+ * qcom_qpic_bam_dma_done() - Callback for DMA descriptor completion
-+ * @data: data pointer
-+ *
-+ * This function is a callback for DMA descriptor completion
-+ */
-+void qcom_qpic_bam_dma_done(void *data)
-+{
-+ struct bam_transaction *bam_txn = data;
-+
-+ complete(&bam_txn->txn_done);
-+}
-+
-+/**
-+ * qcom_nandc_dev_to_mem() - Check for dma sync for cpu or device
-+ * @nandc: qpic nand controller
-+ * @is_cpu: cpu or Device
-+ *
-+ * This function will check for dma sync for cpu or device
-+ */
-+inline void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
-+{
-+ if (!nandc->props->supports_bam)
-+ return;
-+
-+ if (is_cpu)
-+ dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
-+ MAX_REG_RD *
-+ sizeof(*nandc->reg_read_buf),
-+ DMA_FROM_DEVICE);
-+ else
-+ dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
-+ MAX_REG_RD *
-+ sizeof(*nandc->reg_read_buf),
-+ DMA_FROM_DEVICE);
-+}
-+
-+/**
-+ * qcom_prepare_bam_async_desc() - Prepare DMA descriptor
-+ * @nandc: qpic nand controller
-+ * @chan: dma channel
-+ * @flags: flags to control DMA descriptor preparation
-+ *
-+ * This function maps the scatter gather list for DMA transfer and forms the
-+ * DMA descriptor for BAM.This descriptor will be added in the NAND DMA
-+ * descriptor queue which will be submitted to DMA engine.
-+ */
-+int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
-+ struct dma_chan *chan, unsigned long flags)
-+{
-+ struct desc_info *desc;
-+ struct scatterlist *sgl;
-+ unsigned int sgl_cnt;
-+ int ret;
-+ struct bam_transaction *bam_txn = nandc->bam_txn;
-+ enum dma_transfer_direction dir_eng;
-+ struct dma_async_tx_descriptor *dma_desc;
-+
-+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
-+ if (!desc)
-+ return -ENOMEM;
-+
-+ if (chan == nandc->cmd_chan) {
-+ sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
-+ sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
-+ bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
-+ dir_eng = DMA_MEM_TO_DEV;
-+ desc->dir = DMA_TO_DEVICE;
-+ } else if (chan == nandc->tx_chan) {
-+ sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
-+ sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
-+ bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
-+ dir_eng = DMA_MEM_TO_DEV;
-+ desc->dir = DMA_TO_DEVICE;
-+ } else {
-+ sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
-+ sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
-+ bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
-+ dir_eng = DMA_DEV_TO_MEM;
-+ desc->dir = DMA_FROM_DEVICE;
-+ }
-+
-+ sg_mark_end(sgl + sgl_cnt - 1);
-+ ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
-+ if (ret == 0) {
-+ dev_err(nandc->dev, "failure in mapping desc\n");
-+ kfree(desc);
-+ return -ENOMEM;
-+ }
-+
-+ desc->sgl_cnt = sgl_cnt;
-+ desc->bam_sgl = sgl;
-+
-+ dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
-+ flags);
-+
-+ if (!dma_desc) {
-+ dev_err(nandc->dev, "failure in prep desc\n");
-+ dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
-+ kfree(desc);
-+ return -EINVAL;
-+ }
-+
-+ desc->dma_desc = dma_desc;
-+
-+ /* update last data/command descriptor */
-+ if (chan == nandc->cmd_chan)
-+ bam_txn->last_cmd_desc = dma_desc;
-+ else
-+ bam_txn->last_data_desc = dma_desc;
-+
-+ list_add_tail(&desc->node, &nandc->desc_list);
-+
-+ return 0;
-+}
-+
-+/**
-+ * qcom_prep_bam_dma_desc_cmd() - Prepares the command descriptor for BAM DMA
-+ * @nandc: qpic nand controller
-+ * @read: read or write type
-+ * @reg_off: offset within the controller's data buffer
-+ * @vaddr: virtual address of the buffer we want to write to
-+ * @size: DMA transaction size in bytes
-+ * @flags: flags to control DMA descriptor preparation
-+ *
-+ * This function will prepares the command descriptor for BAM DMA
-+ * which will be used for NAND register reads and writes.
-+ */
-+int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
-+ int reg_off, const void *vaddr,
-+ int size, unsigned int flags)
-+{
-+ int bam_ce_size;
-+ int i, ret;
-+ struct bam_cmd_element *bam_ce_buffer;
-+ struct bam_transaction *bam_txn = nandc->bam_txn;
-+
-+ bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
-+
-+ /* fill the command desc */
-+ for (i = 0; i < size; i++) {
-+ if (read)
-+ bam_prep_ce(&bam_ce_buffer[i],
-+ nandc_reg_phys(nandc, reg_off + 4 * i),
-+ BAM_READ_COMMAND,
-+ reg_buf_dma_addr(nandc,
-+ (__le32 *)vaddr + i));
-+ else
-+ bam_prep_ce_le32(&bam_ce_buffer[i],
-+ nandc_reg_phys(nandc, reg_off + 4 * i),
-+ BAM_WRITE_COMMAND,
-+ *((__le32 *)vaddr + i));
-+ }
-+
-+ bam_txn->bam_ce_pos += size;
-+
-+ /* use the separate sgl after this command */
-+ if (flags & NAND_BAM_NEXT_SGL) {
-+ bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
-+ bam_ce_size = (bam_txn->bam_ce_pos -
-+ bam_txn->bam_ce_start) *
-+ sizeof(struct bam_cmd_element);
-+ sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
-+ bam_ce_buffer, bam_ce_size);
-+ bam_txn->cmd_sgl_pos++;
-+ bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
-+
-+ if (flags & NAND_BAM_NWD) {
-+ ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
-+ DMA_PREP_FENCE | DMA_PREP_CMD);
-+ if (ret)
-+ return ret;
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+/**
-+ * qcom_prep_bam_dma_desc_data() - Prepares the data descriptor for BAM DMA
-+ * @nandc: qpic nand controller
-+ * @read: read or write type
-+ * @vaddr: virtual address of the buffer we want to write to
-+ * @size: DMA transaction size in bytes
-+ * @flags: flags to control DMA descriptor preparation
-+ *
-+ * This function will prepares the data descriptor for BAM DMA which
-+ * will be used for NAND data reads and writes.
-+ */
-+int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
-+ const void *vaddr, int size, unsigned int flags)
-+{
-+ int ret;
-+ struct bam_transaction *bam_txn = nandc->bam_txn;
-+
-+ if (read) {
-+ sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
-+ vaddr, size);
-+ bam_txn->rx_sgl_pos++;
-+ } else {
-+ sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
-+ vaddr, size);
-+ bam_txn->tx_sgl_pos++;
-+
-+ /*
-+ * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
-+ * is not set, form the DMA descriptor
-+ */
-+ if (!(flags & NAND_BAM_NO_EOT)) {
-+ ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
-+ DMA_PREP_INTERRUPT);
-+ if (ret)
-+ return ret;
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+/**
-+ * qcom_prep_adm_dma_desc() - Prepare descriptor for adma
-+ * @nandc: qpic nand controller
-+ * @read: read or write type
-+ * @reg_off: offset within the controller's data buffer
-+ * @vaddr: virtual address of the buffer we want to write to
-+ * @size: adm dma transaction size in bytes
-+ * @flow_control: flow controller
-+ *
-+ * This function will prepare descriptor for adma
-+ */
-+int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
-+ int reg_off, const void *vaddr, int size,
-+ bool flow_control)
-+{
-+ struct qcom_adm_peripheral_config periph_conf = {};
-+ struct dma_async_tx_descriptor *dma_desc;
-+ struct dma_slave_config slave_conf = {0};
-+ enum dma_transfer_direction dir_eng;
-+ struct desc_info *desc;
-+ struct scatterlist *sgl;
-+ int ret;
-+
-+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
-+ if (!desc)
-+ return -ENOMEM;
-+
-+ sgl = &desc->adm_sgl;
-+
-+ sg_init_one(sgl, vaddr, size);
-+
-+ if (read) {
-+ dir_eng = DMA_DEV_TO_MEM;
-+ desc->dir = DMA_FROM_DEVICE;
-+ } else {
-+ dir_eng = DMA_MEM_TO_DEV;
-+ desc->dir = DMA_TO_DEVICE;
-+ }
-+
-+ ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
-+ if (!ret) {
-+ ret = -ENOMEM;
-+ goto err;
-+ }
-+
-+ slave_conf.device_fc = flow_control;
-+ if (read) {
-+ slave_conf.src_maxburst = 16;
-+ slave_conf.src_addr = nandc->base_dma + reg_off;
-+ if (nandc->data_crci) {
-+ periph_conf.crci = nandc->data_crci;
-+ slave_conf.peripheral_config = &periph_conf;
-+ slave_conf.peripheral_size = sizeof(periph_conf);
-+ }
-+ } else {
-+ slave_conf.dst_maxburst = 16;
-+ slave_conf.dst_addr = nandc->base_dma + reg_off;
-+ if (nandc->cmd_crci) {
-+ periph_conf.crci = nandc->cmd_crci;
-+ slave_conf.peripheral_config = &periph_conf;
-+ slave_conf.peripheral_size = sizeof(periph_conf);
-+ }
-+ }
-+
-+ ret = dmaengine_slave_config(nandc->chan, &slave_conf);
-+ if (ret) {
-+ dev_err(nandc->dev, "failed to configure dma channel\n");
-+ goto err;
-+ }
-+
-+ dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
-+ if (!dma_desc) {
-+ dev_err(nandc->dev, "failed to prepare desc\n");
-+ ret = -EINVAL;
-+ goto err;
-+ }
-+
-+ desc->dma_desc = dma_desc;
-+
-+ list_add_tail(&desc->node, &nandc->desc_list);
-+
-+ return 0;
-+err:
-+ kfree(desc);
-+
-+ return ret;
-+}
-+
-+/**
-+ * qcom_read_reg_dma() - read a given number of registers to the reg_read_buf pointer
-+ * @nandc: qpic nand controller
-+ * @first: offset of the first register in the contiguous block
-+ * @num_regs: number of registers to read
-+ * @flags: flags to control DMA descriptor preparation
-+ *
-+ * This function will prepares a descriptor to read a given number of
-+ * contiguous registers to the reg_read_buf pointer.
-+ */
-+int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first,
-+ int num_regs, unsigned int flags)
-+{
-+ bool flow_control = false;
-+ void *vaddr;
-+
-+ vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
-+ nandc->reg_read_pos += num_regs;
-+
-+ if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
-+ first = dev_cmd_reg_addr(nandc, first);
-+
-+ if (nandc->props->supports_bam)
-+ return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
-+ num_regs, flags);
-+
-+ if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
-+ flow_control = true;
-+
-+ return qcom_prep_adm_dma_desc(nandc, true, first, vaddr,
-+ num_regs * sizeof(u32), flow_control);
-+}
-+
-+/**
-+ * qcom_write_reg_dma() - write a given number of registers
-+ * @nandc: qpic nand controller
-+ * @vaddr: contnigeous memory from where register value will
-+ * be written
-+ * @first: offset of the first register in the contiguous block
-+ * @num_regs: number of registers to write
-+ * @flags: flags to control DMA descriptor preparation
-+ *
-+ * This function will prepares a descriptor to write a given number of
-+ * contiguous registers
-+ */
-+int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
-+ int first, int num_regs, unsigned int flags)
-+{
-+ bool flow_control = false;
-+
-+ if (first == NAND_EXEC_CMD)
-+ flags |= NAND_BAM_NWD;
-+
-+ if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
-+ first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
-+
-+ if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
-+ first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
-+
-+ if (nandc->props->supports_bam)
-+ return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
-+ num_regs, flags);
-+
-+ if (first == NAND_FLASH_CMD)
-+ flow_control = true;
-+
-+ return qcom_prep_adm_dma_desc(nandc, false, first, vaddr,
-+ num_regs * sizeof(u32), flow_control);
-+}
-+
-+/**
-+ * qcom_read_data_dma() - transfer data
-+ * @nandc: qpic nand controller
-+ * @reg_off: offset within the controller's data buffer
-+ * @vaddr: virtual address of the buffer we want to write to
-+ * @size: DMA transaction size in bytes
-+ * @flags: flags to control DMA descriptor preparation
-+ *
-+ * This function will prepares a DMA descriptor to transfer data from the
-+ * controller's internal buffer to the buffer 'vaddr'
-+ */
-+int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
-+ const u8 *vaddr, int size, unsigned int flags)
-+{
-+ if (nandc->props->supports_bam)
-+ return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
-+
-+ return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
-+}
-+
-+/**
-+ * qcom_write_data_dma() - transfer data
-+ * @nandc: qpic nand controller
-+ * @reg_off: offset within the controller's data buffer
-+ * @vaddr: virtual address of the buffer we want to read from
-+ * @size: DMA transaction size in bytes
-+ * @flags: flags to control DMA descriptor preparation
-+ *
-+ * This function will prepares a DMA descriptor to transfer data from
-+ * 'vaddr' to the controller's internal buffer
-+ */
-+int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
-+ const u8 *vaddr, int size, unsigned int flags)
-+{
-+ if (nandc->props->supports_bam)
-+ return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
-+
-+ return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
-+}
-+
-+/**
-+ * qcom_submit_descs() - submit dma descriptor
-+ * @nandc: qpic nand controller
-+ *
-+ * This function will submit all the prepared dma descriptor
-+ * cmd or data descriptor
-+ */
-+int qcom_submit_descs(struct qcom_nand_controller *nandc)
-+{
-+ struct desc_info *desc, *n;
-+ dma_cookie_t cookie = 0;
-+ struct bam_transaction *bam_txn = nandc->bam_txn;
-+ int ret = 0;
-+
-+ if (nandc->props->supports_bam) {
-+ if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
-+ ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
-+ if (ret)
-+ goto err_unmap_free_desc;
-+ }
-+
-+ if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
-+ ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
-+ DMA_PREP_INTERRUPT);
-+ if (ret)
-+ goto err_unmap_free_desc;
-+ }
-+
-+ if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
-+ ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
-+ DMA_PREP_CMD);
-+ if (ret)
-+ goto err_unmap_free_desc;
-+ }
-+ }
-+
-+ list_for_each_entry(desc, &nandc->desc_list, node)
-+ cookie = dmaengine_submit(desc->dma_desc);
-+
-+ if (nandc->props->supports_bam) {
-+ bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done;
-+ bam_txn->last_cmd_desc->callback_param = bam_txn;
-+
-+ dma_async_issue_pending(nandc->tx_chan);
-+ dma_async_issue_pending(nandc->rx_chan);
-+ dma_async_issue_pending(nandc->cmd_chan);
-+
-+ if (!wait_for_completion_timeout(&bam_txn->txn_done,
-+ QPIC_NAND_COMPLETION_TIMEOUT))
-+ ret = -ETIMEDOUT;
-+ } else {
-+ if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
-+ ret = -ETIMEDOUT;
-+ }
-+
-+err_unmap_free_desc:
-+ /*
-+ * Unmap the dma sg_list and free the desc allocated by both
-+ * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions.
-+ */
-+ list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
-+ list_del(&desc->node);
-+
-+ if (nandc->props->supports_bam)
-+ dma_unmap_sg(nandc->dev, desc->bam_sgl,
-+ desc->sgl_cnt, desc->dir);
-+ else
-+ dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
-+ desc->dir);
-+
-+ kfree(desc);
-+ }
-+
-+ return ret;
-+}
-+
-+/**
-+ * qcom_clear_read_regs() - reset the read register buffer
-+ * @nandc: qpic nand controller
-+ *
-+ * This function reset the register read buffer for next NAND operation
-+ */
-+void qcom_clear_read_regs(struct qcom_nand_controller *nandc)
-+{
-+ nandc->reg_read_pos = 0;
-+ qcom_nandc_dev_to_mem(nandc, false);
-+}
-+
-+/**
-+ * qcom_nandc_unalloc() - unallocate qpic nand controller
-+ * @nandc: qpic nand controller
-+ *
-+ * This function will unallocate memory alloacted for qpic nand controller
-+ */
-+void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
-+{
-+ if (nandc->props->supports_bam) {
-+ if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
-+ dma_unmap_single(nandc->dev, nandc->reg_read_dma,
-+ MAX_REG_RD *
-+ sizeof(*nandc->reg_read_buf),
-+ DMA_FROM_DEVICE);
-+
-+ if (nandc->tx_chan)
-+ dma_release_channel(nandc->tx_chan);
-+
-+ if (nandc->rx_chan)
-+ dma_release_channel(nandc->rx_chan);
-+
-+ if (nandc->cmd_chan)
-+ dma_release_channel(nandc->cmd_chan);
-+ } else {
-+ if (nandc->chan)
-+ dma_release_channel(nandc->chan);
-+ }
-+}
-+
-+/**
-+ * qcom_nandc_alloc() - Allocate qpic nand controller
-+ * @nandc: qpic nand controller
-+ *
-+ * This function will allocate memory for qpic nand controller
-+ */
-+int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
-+{
-+ int ret;
-+
-+ ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
-+ if (ret) {
-+ dev_err(nandc->dev, "failed to set DMA mask\n");
-+ return ret;
-+ }
-+
-+ /*
-+ * we use the internal buffer for reading ONFI params, reading small
-+ * data like ID and status, and preforming read-copy-write operations
-+ * when writing to a codeword partially. 532 is the maximum possible
-+ * size of a codeword for our nand controller
-+ */
-+ nandc->buf_size = 532;
-+
-+ nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL);
-+ if (!nandc->data_buffer)
-+ return -ENOMEM;
-+
-+ nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL);
-+ if (!nandc->regs)
-+ return -ENOMEM;
-+
-+ nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD,
-+ sizeof(*nandc->reg_read_buf),
-+ GFP_KERNEL);
-+ if (!nandc->reg_read_buf)
-+ return -ENOMEM;
-+
-+ if (nandc->props->supports_bam) {
-+ nandc->reg_read_dma =
-+ dma_map_single(nandc->dev, nandc->reg_read_buf,
-+ MAX_REG_RD *
-+ sizeof(*nandc->reg_read_buf),
-+ DMA_FROM_DEVICE);
-+ if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
-+ dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
-+ return -EIO;
-+ }
-+
-+ nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
-+ if (IS_ERR(nandc->tx_chan)) {
-+ ret = PTR_ERR(nandc->tx_chan);
-+ nandc->tx_chan = NULL;
-+ dev_err_probe(nandc->dev, ret,
-+ "tx DMA channel request failed\n");
-+ goto unalloc;
-+ }
-+
-+ nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
-+ if (IS_ERR(nandc->rx_chan)) {
-+ ret = PTR_ERR(nandc->rx_chan);
-+ nandc->rx_chan = NULL;
-+ dev_err_probe(nandc->dev, ret,
-+ "rx DMA channel request failed\n");
-+ goto unalloc;
-+ }
-+
-+ nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
-+ if (IS_ERR(nandc->cmd_chan)) {
-+ ret = PTR_ERR(nandc->cmd_chan);
-+ nandc->cmd_chan = NULL;
-+ dev_err_probe(nandc->dev, ret,
-+ "cmd DMA channel request failed\n");
-+ goto unalloc;
-+ }
-+
-+ /*
-+ * Initially allocate BAM transaction to read ONFI param page.
-+ * After detecting all the devices, this BAM transaction will
-+ * be freed and the next BAM transaction will be allocated with
-+ * maximum codeword size
-+ */
-+ nandc->max_cwperpage = 1;
-+ nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
-+ if (!nandc->bam_txn) {
-+ dev_err(nandc->dev,
-+ "failed to allocate bam transaction\n");
-+ ret = -ENOMEM;
-+ goto unalloc;
-+ }
-+ } else {
-+ nandc->chan = dma_request_chan(nandc->dev, "rxtx");
-+ if (IS_ERR(nandc->chan)) {
-+ ret = PTR_ERR(nandc->chan);
-+ nandc->chan = NULL;
-+ dev_err_probe(nandc->dev, ret,
-+ "rxtx DMA channel request failed\n");
-+ return ret;
-+ }
-+ }
-+
-+ INIT_LIST_HEAD(&nandc->desc_list);
-+ INIT_LIST_HEAD(&nandc->host_list);
-+
-+ return 0;
-+unalloc:
-+ qcom_nandc_unalloc(nandc);
-+ return ret;
-+}
---- a/drivers/mtd/nand/raw/Kconfig
-+++ b/drivers/mtd/nand/raw/Kconfig
-@@ -330,7 +330,7 @@ config MTD_NAND_HISI504
- Enables support for NAND controller on Hisilicon SoC Hip04.
-
- config MTD_NAND_QCOM
-- tristate "QCOM NAND controller"
-+ bool "QCOM NAND controller"
- depends on ARCH_QCOM || COMPILE_TEST
- depends on HAS_IOMEM
- help
---- a/drivers/mtd/nand/raw/qcom_nandc.c
-+++ b/drivers/mtd/nand/raw/qcom_nandc.c
-@@ -15,417 +15,7 @@
- #include <linux/of.h>
- #include <linux/platform_device.h>
- #include <linux/slab.h>
--
--/* NANDc reg offsets */
--#define NAND_FLASH_CMD 0x00
--#define NAND_ADDR0 0x04
--#define NAND_ADDR1 0x08
--#define NAND_FLASH_CHIP_SELECT 0x0c
--#define NAND_EXEC_CMD 0x10
--#define NAND_FLASH_STATUS 0x14
--#define NAND_BUFFER_STATUS 0x18
--#define NAND_DEV0_CFG0 0x20
--#define NAND_DEV0_CFG1 0x24
--#define NAND_DEV0_ECC_CFG 0x28
--#define NAND_AUTO_STATUS_EN 0x2c
--#define NAND_DEV1_CFG0 0x30
--#define NAND_DEV1_CFG1 0x34
--#define NAND_READ_ID 0x40
--#define NAND_READ_STATUS 0x44
--#define NAND_DEV_CMD0 0xa0
--#define NAND_DEV_CMD1 0xa4
--#define NAND_DEV_CMD2 0xa8
--#define NAND_DEV_CMD_VLD 0xac
--#define SFLASHC_BURST_CFG 0xe0
--#define NAND_ERASED_CW_DETECT_CFG 0xe8
--#define NAND_ERASED_CW_DETECT_STATUS 0xec
--#define NAND_EBI2_ECC_BUF_CFG 0xf0
--#define FLASH_BUF_ACC 0x100
--
--#define NAND_CTRL 0xf00
--#define NAND_VERSION 0xf08
--#define NAND_READ_LOCATION_0 0xf20
--#define NAND_READ_LOCATION_1 0xf24
--#define NAND_READ_LOCATION_2 0xf28
--#define NAND_READ_LOCATION_3 0xf2c
--#define NAND_READ_LOCATION_LAST_CW_0 0xf40
--#define NAND_READ_LOCATION_LAST_CW_1 0xf44
--#define NAND_READ_LOCATION_LAST_CW_2 0xf48
--#define NAND_READ_LOCATION_LAST_CW_3 0xf4c
--
--/* dummy register offsets, used by qcom_write_reg_dma */
--#define NAND_DEV_CMD1_RESTORE 0xdead
--#define NAND_DEV_CMD_VLD_RESTORE 0xbeef
--
--/* NAND_FLASH_CMD bits */
--#define PAGE_ACC BIT(4)
--#define LAST_PAGE BIT(5)
--
--/* NAND_FLASH_CHIP_SELECT bits */
--#define NAND_DEV_SEL 0
--#define DM_EN BIT(2)
--
--/* NAND_FLASH_STATUS bits */
--#define FS_OP_ERR BIT(4)
--#define FS_READY_BSY_N BIT(5)
--#define FS_MPU_ERR BIT(8)
--#define FS_DEVICE_STS_ERR BIT(16)
--#define FS_DEVICE_WP BIT(23)
--
--/* NAND_BUFFER_STATUS bits */
--#define BS_UNCORRECTABLE_BIT BIT(8)
--#define BS_CORRECTABLE_ERR_MSK 0x1f
--
--/* NAND_DEVn_CFG0 bits */
--#define DISABLE_STATUS_AFTER_WRITE 4
--#define CW_PER_PAGE 6
--#define UD_SIZE_BYTES 9
--#define UD_SIZE_BYTES_MASK GENMASK(18, 9)
--#define ECC_PARITY_SIZE_BYTES_RS 19
--#define SPARE_SIZE_BYTES 23
--#define SPARE_SIZE_BYTES_MASK GENMASK(26, 23)
--#define NUM_ADDR_CYCLES 27
--#define STATUS_BFR_READ 30
--#define SET_RD_MODE_AFTER_STATUS 31
--
--/* NAND_DEVn_CFG0 bits */
--#define DEV0_CFG1_ECC_DISABLE 0
--#define WIDE_FLASH 1
--#define NAND_RECOVERY_CYCLES 2
--#define CS_ACTIVE_BSY 5
--#define BAD_BLOCK_BYTE_NUM 6
--#define BAD_BLOCK_IN_SPARE_AREA 16
--#define WR_RD_BSY_GAP 17
--#define ENABLE_BCH_ECC 27
--
--/* NAND_DEV0_ECC_CFG bits */
--#define ECC_CFG_ECC_DISABLE 0
--#define ECC_SW_RESET 1
--#define ECC_MODE 4
--#define ECC_PARITY_SIZE_BYTES_BCH 8
--#define ECC_NUM_DATA_BYTES 16
--#define ECC_NUM_DATA_BYTES_MASK GENMASK(25, 16)
--#define ECC_FORCE_CLK_OPEN 30
--
--/* NAND_DEV_CMD1 bits */
--#define READ_ADDR 0
--
--/* NAND_DEV_CMD_VLD bits */
--#define READ_START_VLD BIT(0)
--#define READ_STOP_VLD BIT(1)
--#define WRITE_START_VLD BIT(2)
--#define ERASE_START_VLD BIT(3)
--#define SEQ_READ_START_VLD BIT(4)
--
--/* NAND_EBI2_ECC_BUF_CFG bits */
--#define NUM_STEPS 0
--
--/* NAND_ERASED_CW_DETECT_CFG bits */
--#define ERASED_CW_ECC_MASK 1
--#define AUTO_DETECT_RES 0
--#define MASK_ECC BIT(ERASED_CW_ECC_MASK)
--#define RESET_ERASED_DET BIT(AUTO_DETECT_RES)
--#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
--#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
--#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
--
--/* NAND_ERASED_CW_DETECT_STATUS bits */
--#define PAGE_ALL_ERASED BIT(7)
--#define CODEWORD_ALL_ERASED BIT(6)
--#define PAGE_ERASED BIT(5)
--#define CODEWORD_ERASED BIT(4)
--#define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
--#define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
--
--/* NAND_READ_LOCATION_n bits */
--#define READ_LOCATION_OFFSET 0
--#define READ_LOCATION_SIZE 16
--#define READ_LOCATION_LAST 31
--
--/* Version Mask */
--#define NAND_VERSION_MAJOR_MASK 0xf0000000
--#define NAND_VERSION_MAJOR_SHIFT 28
--#define NAND_VERSION_MINOR_MASK 0x0fff0000
--#define NAND_VERSION_MINOR_SHIFT 16
--
--/* NAND OP_CMDs */
--#define OP_PAGE_READ 0x2
--#define OP_PAGE_READ_WITH_ECC 0x3
--#define OP_PAGE_READ_WITH_ECC_SPARE 0x4
--#define OP_PAGE_READ_ONFI_READ 0x5
--#define OP_PROGRAM_PAGE 0x6
--#define OP_PAGE_PROGRAM_WITH_ECC 0x7
--#define OP_PROGRAM_PAGE_SPARE 0x9
--#define OP_BLOCK_ERASE 0xa
--#define OP_CHECK_STATUS 0xc
--#define OP_FETCH_ID 0xb
--#define OP_RESET_DEVICE 0xd
--
--/* Default Value for NAND_DEV_CMD_VLD */
--#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
-- ERASE_START_VLD | SEQ_READ_START_VLD)
--
--/* NAND_CTRL bits */
--#define BAM_MODE_EN BIT(0)
--
--/*
-- * the NAND controller performs reads/writes with ECC in 516 byte chunks.
-- * the driver calls the chunks 'step' or 'codeword' interchangeably
-- */
--#define NANDC_STEP_SIZE 512
--
--/*
-- * the largest page size we support is 8K, this will have 16 steps/codewords
-- * of 512 bytes each
-- */
--#define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
--
--/* we read at most 3 registers per codeword scan */
--#define MAX_REG_RD (3 * MAX_NUM_STEPS)
--
--/* ECC modes supported by the controller */
--#define ECC_NONE BIT(0)
--#define ECC_RS_4BIT BIT(1)
--#define ECC_BCH_4BIT BIT(2)
--#define ECC_BCH_8BIT BIT(3)
--
--/*
-- * Returns the actual register address for all NAND_DEV_ registers
-- * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
-- */
--#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
--
--/* Returns the NAND register physical address */
--#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
--
--/* Returns the dma address for reg read buffer */
--#define reg_buf_dma_addr(chip, vaddr) \
-- ((chip)->reg_read_dma + \
-- ((u8 *)(vaddr) - (u8 *)(chip)->reg_read_buf))
--
--#define QPIC_PER_CW_CMD_ELEMENTS 32
--#define QPIC_PER_CW_CMD_SGL 32
--#define QPIC_PER_CW_DATA_SGL 8
--
--#define QPIC_NAND_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
--
--/*
-- * Flags used in DMA descriptor preparation helper functions
-- * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma)
-- */
--/* Don't set the EOT in current tx BAM sgl */
--#define NAND_BAM_NO_EOT BIT(0)
--/* Set the NWD flag in current BAM sgl */
--#define NAND_BAM_NWD BIT(1)
--/* Finish writing in the current BAM sgl and start writing in another BAM sgl */
--#define NAND_BAM_NEXT_SGL BIT(2)
--/*
-- * Erased codeword status is being used two times in single transfer so this
-- * flag will determine the current value of erased codeword status register
-- */
--#define NAND_ERASED_CW_SET BIT(4)
--
--#define MAX_ADDRESS_CYCLE 5
--
--/*
-- * This data type corresponds to the BAM transaction which will be used for all
-- * NAND transfers.
-- * @bam_ce - the array of BAM command elements
-- * @cmd_sgl - sgl for NAND BAM command pipe
-- * @data_sgl - sgl for NAND BAM consumer/producer pipe
-- * @last_data_desc - last DMA desc in data channel (tx/rx).
-- * @last_cmd_desc - last DMA desc in command channel.
-- * @txn_done - completion for NAND transfer.
-- * @bam_ce_pos - the index in bam_ce which is available for next sgl
-- * @bam_ce_start - the index in bam_ce which marks the start position ce
-- * for current sgl. It will be used for size calculation
-- * for current sgl
-- * @cmd_sgl_pos - current index in command sgl.
-- * @cmd_sgl_start - start index in command sgl.
-- * @tx_sgl_pos - current index in data sgl for tx.
-- * @tx_sgl_start - start index in data sgl for tx.
-- * @rx_sgl_pos - current index in data sgl for rx.
-- * @rx_sgl_start - start index in data sgl for rx.
-- */
--struct bam_transaction {
-- struct bam_cmd_element *bam_ce;
-- struct scatterlist *cmd_sgl;
-- struct scatterlist *data_sgl;
-- struct dma_async_tx_descriptor *last_data_desc;
-- struct dma_async_tx_descriptor *last_cmd_desc;
-- struct completion txn_done;
-- u32 bam_ce_pos;
-- u32 bam_ce_start;
-- u32 cmd_sgl_pos;
-- u32 cmd_sgl_start;
-- u32 tx_sgl_pos;
-- u32 tx_sgl_start;
-- u32 rx_sgl_pos;
-- u32 rx_sgl_start;
--};
--
--/*
-- * This data type corresponds to the nand dma descriptor
-- * @dma_desc - low level DMA engine descriptor
-- * @list - list for desc_info
-- *
-- * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
-- * ADM
-- * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
-- * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
-- * @dir - DMA transfer direction
-- */
--struct desc_info {
-- struct dma_async_tx_descriptor *dma_desc;
-- struct list_head node;
--
-- union {
-- struct scatterlist adm_sgl;
-- struct {
-- struct scatterlist *bam_sgl;
-- int sgl_cnt;
-- };
-- };
-- enum dma_data_direction dir;
--};
--
--/*
-- * holds the current register values that we want to write. acts as a contiguous
-- * chunk of memory which we use to write the controller registers through DMA.
-- */
--struct nandc_regs {
-- __le32 cmd;
-- __le32 addr0;
-- __le32 addr1;
-- __le32 chip_sel;
-- __le32 exec;
--
-- __le32 cfg0;
-- __le32 cfg1;
-- __le32 ecc_bch_cfg;
--
-- __le32 clrflashstatus;
-- __le32 clrreadstatus;
--
-- __le32 cmd1;
-- __le32 vld;
--
-- __le32 orig_cmd1;
-- __le32 orig_vld;
--
-- __le32 ecc_buf_cfg;
-- __le32 read_location0;
-- __le32 read_location1;
-- __le32 read_location2;
-- __le32 read_location3;
-- __le32 read_location_last0;
-- __le32 read_location_last1;
-- __le32 read_location_last2;
-- __le32 read_location_last3;
--
-- __le32 erased_cw_detect_cfg_clr;
-- __le32 erased_cw_detect_cfg_set;
--};
--
--/*
-- * NAND controller data struct
-- *
-- * @dev: parent device
-- *
-- * @base: MMIO base
-- *
-- * @core_clk: controller clock
-- * @aon_clk: another controller clock
-- *
-- * @regs: a contiguous chunk of memory for DMA register
-- * writes. contains the register values to be
-- * written to controller
-- *
-- * @props: properties of current NAND controller,
-- * initialized via DT match data
-- *
-- * @controller: base controller structure
-- * @host_list: list containing all the chips attached to the
-- * controller
-- *
-- * @chan: dma channel
-- * @cmd_crci: ADM DMA CRCI for command flow control
-- * @data_crci: ADM DMA CRCI for data flow control
-- *
-- * @desc_list: DMA descriptor list (list of desc_infos)
-- *
-- * @data_buffer: our local DMA buffer for page read/writes,
-- * used when we can't use the buffer provided
-- * by upper layers directly
-- * @reg_read_buf: local buffer for reading back registers via DMA
-- *
-- * @base_phys: physical base address of controller registers
-- * @base_dma: dma base address of controller registers
-- * @reg_read_dma: contains dma address for register read buffer
-- *
-- * @buf_size/count/start: markers for chip->legacy.read_buf/write_buf
-- * functions
-- * @max_cwperpage: maximum QPIC codewords required. calculated
-- * from all connected NAND devices pagesize
-- *
-- * @reg_read_pos: marker for data read in reg_read_buf
-- *
-- * @cmd1/vld: some fixed controller register values
-- *
-- * @exec_opwrite: flag to select correct number of code word
-- * while reading status
-- */
--struct qcom_nand_controller {
-- struct device *dev;
--
-- void __iomem *base;
--
-- struct clk *core_clk;
-- struct clk *aon_clk;
--
-- struct nandc_regs *regs;
-- struct bam_transaction *bam_txn;
--
-- const struct qcom_nandc_props *props;
--
-- struct nand_controller controller;
-- struct list_head host_list;
--
-- union {
-- /* will be used only by QPIC for BAM DMA */
-- struct {
-- struct dma_chan *tx_chan;
-- struct dma_chan *rx_chan;
-- struct dma_chan *cmd_chan;
-- };
--
-- /* will be used only by EBI2 for ADM DMA */
-- struct {
-- struct dma_chan *chan;
-- unsigned int cmd_crci;
-- unsigned int data_crci;
-- };
-- };
--
-- struct list_head desc_list;
--
-- u8 *data_buffer;
-- __le32 *reg_read_buf;
--
-- phys_addr_t base_phys;
-- dma_addr_t base_dma;
-- dma_addr_t reg_read_dma;
--
-- int buf_size;
-- int buf_count;
-- int buf_start;
-- unsigned int max_cwperpage;
--
-- int reg_read_pos;
--
-- u32 cmd1, vld;
-- bool exec_opwrite;
--};
-+#include <linux/mtd/nand-qpic-common.h>
-
- /*
- * NAND special boot partitions
-@@ -530,104 +120,6 @@ struct qcom_nand_host {
- bool bch_enabled;
- };
-
--/*
-- * This data type corresponds to the NAND controller properties which varies
-- * among different NAND controllers.
-- * @ecc_modes - ecc mode for NAND
-- * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
-- * @supports_bam - whether NAND controller is using BAM
-- * @nandc_part_of_qpic - whether NAND controller is part of qpic IP
-- * @qpic_version2 - flag to indicate QPIC IP version 2
-- * @use_codeword_fixup - whether NAND has different layout for boot partitions
-- */
--struct qcom_nandc_props {
-- u32 ecc_modes;
-- u32 dev_cmd_reg_start;
-- bool supports_bam;
-- bool nandc_part_of_qpic;
-- bool qpic_version2;
-- bool use_codeword_fixup;
--};
--
--/* Frees the BAM transaction memory */
--static void qcom_free_bam_transaction(struct qcom_nand_controller *nandc)
--{
-- struct bam_transaction *bam_txn = nandc->bam_txn;
--
-- devm_kfree(nandc->dev, bam_txn);
--}
--
--/* Allocates and Initializes the BAM transaction */
--static struct bam_transaction *
--qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc)
--{
-- struct bam_transaction *bam_txn;
-- size_t bam_txn_size;
-- unsigned int num_cw = nandc->max_cwperpage;
-- void *bam_txn_buf;
--
-- bam_txn_size =
-- sizeof(*bam_txn) + num_cw *
-- ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
-- (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
-- (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
--
-- bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
-- if (!bam_txn_buf)
-- return NULL;
--
-- bam_txn = bam_txn_buf;
-- bam_txn_buf += sizeof(*bam_txn);
--
-- bam_txn->bam_ce = bam_txn_buf;
-- bam_txn_buf +=
-- sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
--
-- bam_txn->cmd_sgl = bam_txn_buf;
-- bam_txn_buf +=
-- sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
--
-- bam_txn->data_sgl = bam_txn_buf;
--
-- init_completion(&bam_txn->txn_done);
--
-- return bam_txn;
--}
--
--/* Clears the BAM transaction indexes */
--static void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
--{
-- struct bam_transaction *bam_txn = nandc->bam_txn;
--
-- if (!nandc->props->supports_bam)
-- return;
--
-- bam_txn->bam_ce_pos = 0;
-- bam_txn->bam_ce_start = 0;
-- bam_txn->cmd_sgl_pos = 0;
-- bam_txn->cmd_sgl_start = 0;
-- bam_txn->tx_sgl_pos = 0;
-- bam_txn->tx_sgl_start = 0;
-- bam_txn->rx_sgl_pos = 0;
-- bam_txn->rx_sgl_start = 0;
-- bam_txn->last_data_desc = NULL;
--
-- sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
-- QPIC_PER_CW_CMD_SGL);
-- sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
-- QPIC_PER_CW_DATA_SGL);
--
-- reinit_completion(&bam_txn->txn_done);
--}
--
--/* Callback for DMA descriptor completion */
--static void qcom_qpic_bam_dma_done(void *data)
--{
-- struct bam_transaction *bam_txn = data;
--
-- complete(&bam_txn->txn_done);
--}
--
- static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
- {
- return container_of(chip, struct qcom_nand_host, chip);
-@@ -629,8 +128,8 @@ static inline struct qcom_nand_host *to_
- static inline struct qcom_nand_controller *
- get_qcom_nand_controller(struct nand_chip *chip)
- {
-- return container_of(chip->controller, struct qcom_nand_controller,
-- controller);
-+ return (struct qcom_nand_controller *)
-+ ((u8 *)chip->controller - sizeof(struct qcom_nand_controller));
- }
-
- static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
-@@ -644,23 +143,6 @@ static inline void nandc_write(struct qc
- iowrite32(val, nandc->base + offset);
- }
-
--static inline void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
--{
-- if (!nandc->props->supports_bam)
-- return;
--
-- if (is_cpu)
-- dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
-- MAX_REG_RD *
-- sizeof(*nandc->reg_read_buf),
-- DMA_FROM_DEVICE);
-- else
-- dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
-- MAX_REG_RD *
-- sizeof(*nandc->reg_read_buf),
-- DMA_FROM_DEVICE);
--}
--
- /* Helper to check the code word, whether it is last cw or not */
- static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw)
- {
-@@ -820,356 +302,6 @@ static void update_rw_regs(struct qcom_n
- }
-
- /*
-- * Maps the scatter gather list for DMA transfer and forms the DMA descriptor
-- * for BAM. This descriptor will be added in the NAND DMA descriptor queue
-- * which will be submitted to DMA engine.
-- */
--static int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
-- struct dma_chan *chan,
-- unsigned long flags)
--{
-- struct desc_info *desc;
-- struct scatterlist *sgl;
-- unsigned int sgl_cnt;
-- int ret;
-- struct bam_transaction *bam_txn = nandc->bam_txn;
-- enum dma_transfer_direction dir_eng;
-- struct dma_async_tx_descriptor *dma_desc;
--
-- desc = kzalloc(sizeof(*desc), GFP_KERNEL);
-- if (!desc)
-- return -ENOMEM;
--
-- if (chan == nandc->cmd_chan) {
-- sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
-- sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
-- bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
-- dir_eng = DMA_MEM_TO_DEV;
-- desc->dir = DMA_TO_DEVICE;
-- } else if (chan == nandc->tx_chan) {
-- sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
-- sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
-- bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
-- dir_eng = DMA_MEM_TO_DEV;
-- desc->dir = DMA_TO_DEVICE;
-- } else {
-- sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
-- sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
-- bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
-- dir_eng = DMA_DEV_TO_MEM;
-- desc->dir = DMA_FROM_DEVICE;
-- }
--
-- sg_mark_end(sgl + sgl_cnt - 1);
-- ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
-- if (ret == 0) {
-- dev_err(nandc->dev, "failure in mapping desc\n");
-- kfree(desc);
-- return -ENOMEM;
-- }
--
-- desc->sgl_cnt = sgl_cnt;
-- desc->bam_sgl = sgl;
--
-- dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
-- flags);
--
-- if (!dma_desc) {
-- dev_err(nandc->dev, "failure in prep desc\n");
-- dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
-- kfree(desc);
-- return -EINVAL;
-- }
--
-- desc->dma_desc = dma_desc;
--
-- /* update last data/command descriptor */
-- if (chan == nandc->cmd_chan)
-- bam_txn->last_cmd_desc = dma_desc;
-- else
-- bam_txn->last_data_desc = dma_desc;
--
-- list_add_tail(&desc->node, &nandc->desc_list);
--
-- return 0;
--}
--
--/*
-- * Prepares the command descriptor for BAM DMA which will be used for NAND
-- * register reads and writes. The command descriptor requires the command
-- * to be formed in command element type so this function uses the command
-- * element from bam transaction ce array and fills the same with required
-- * data. A single SGL can contain multiple command elements so
-- * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
-- * after the current command element.
-- */
--static int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
-- int reg_off, const void *vaddr,
-- int size, unsigned int flags)
--{
-- int bam_ce_size;
-- int i, ret;
-- struct bam_cmd_element *bam_ce_buffer;
-- struct bam_transaction *bam_txn = nandc->bam_txn;
--
-- bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
--
-- /* fill the command desc */
-- for (i = 0; i < size; i++) {
-- if (read)
-- bam_prep_ce(&bam_ce_buffer[i],
-- nandc_reg_phys(nandc, reg_off + 4 * i),
-- BAM_READ_COMMAND,
-- reg_buf_dma_addr(nandc,
-- (__le32 *)vaddr + i));
-- else
-- bam_prep_ce_le32(&bam_ce_buffer[i],
-- nandc_reg_phys(nandc, reg_off + 4 * i),
-- BAM_WRITE_COMMAND,
-- *((__le32 *)vaddr + i));
-- }
--
-- bam_txn->bam_ce_pos += size;
--
-- /* use the separate sgl after this command */
-- if (flags & NAND_BAM_NEXT_SGL) {
-- bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
-- bam_ce_size = (bam_txn->bam_ce_pos -
-- bam_txn->bam_ce_start) *
-- sizeof(struct bam_cmd_element);
-- sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
-- bam_ce_buffer, bam_ce_size);
-- bam_txn->cmd_sgl_pos++;
-- bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
--
-- if (flags & NAND_BAM_NWD) {
-- ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
-- DMA_PREP_FENCE |
-- DMA_PREP_CMD);
-- if (ret)
-- return ret;
-- }
-- }
--
-- return 0;
--}
--
--/*
-- * Prepares the data descriptor for BAM DMA which will be used for NAND
-- * data reads and writes.
-- */
--static int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
-- const void *vaddr, int size, unsigned int flags)
--{
-- int ret;
-- struct bam_transaction *bam_txn = nandc->bam_txn;
--
-- if (read) {
-- sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
-- vaddr, size);
-- bam_txn->rx_sgl_pos++;
-- } else {
-- sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
-- vaddr, size);
-- bam_txn->tx_sgl_pos++;
--
-- /*
-- * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
-- * is not set, form the DMA descriptor
-- */
-- if (!(flags & NAND_BAM_NO_EOT)) {
-- ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
-- DMA_PREP_INTERRUPT);
-- if (ret)
-- return ret;
-- }
-- }
--
-- return 0;
--}
--
--static int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
-- int reg_off, const void *vaddr, int size,
-- bool flow_control)
--{
-- struct desc_info *desc;
-- struct dma_async_tx_descriptor *dma_desc;
-- struct scatterlist *sgl;
-- struct dma_slave_config slave_conf;
-- struct qcom_adm_peripheral_config periph_conf = {};
-- enum dma_transfer_direction dir_eng;
-- int ret;
--
-- desc = kzalloc(sizeof(*desc), GFP_KERNEL);
-- if (!desc)
-- return -ENOMEM;
--
-- sgl = &desc->adm_sgl;
--
-- sg_init_one(sgl, vaddr, size);
--
-- if (read) {
-- dir_eng = DMA_DEV_TO_MEM;
-- desc->dir = DMA_FROM_DEVICE;
-- } else {
-- dir_eng = DMA_MEM_TO_DEV;
-- desc->dir = DMA_TO_DEVICE;
-- }
--
-- ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
-- if (ret == 0) {
-- ret = -ENOMEM;
-- goto err;
-- }
--
-- memset(&slave_conf, 0x00, sizeof(slave_conf));
--
-- slave_conf.device_fc = flow_control;
-- if (read) {
-- slave_conf.src_maxburst = 16;
-- slave_conf.src_addr = nandc->base_dma + reg_off;
-- if (nandc->data_crci) {
-- periph_conf.crci = nandc->data_crci;
-- slave_conf.peripheral_config = &periph_conf;
-- slave_conf.peripheral_size = sizeof(periph_conf);
-- }
-- } else {
-- slave_conf.dst_maxburst = 16;
-- slave_conf.dst_addr = nandc->base_dma + reg_off;
-- if (nandc->cmd_crci) {
-- periph_conf.crci = nandc->cmd_crci;
-- slave_conf.peripheral_config = &periph_conf;
-- slave_conf.peripheral_size = sizeof(periph_conf);
-- }
-- }
--
-- ret = dmaengine_slave_config(nandc->chan, &slave_conf);
-- if (ret) {
-- dev_err(nandc->dev, "failed to configure dma channel\n");
-- goto err;
-- }
--
-- dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
-- if (!dma_desc) {
-- dev_err(nandc->dev, "failed to prepare desc\n");
-- ret = -EINVAL;
-- goto err;
-- }
--
-- desc->dma_desc = dma_desc;
--
-- list_add_tail(&desc->node, &nandc->desc_list);
--
-- return 0;
--err:
-- kfree(desc);
--
-- return ret;
--}
--
--/*
-- * qcom_read_reg_dma: prepares a descriptor to read a given number of
-- * contiguous registers to the reg_read_buf pointer
-- *
-- * @first: offset of the first register in the contiguous block
-- * @num_regs: number of registers to read
-- * @flags: flags to control DMA descriptor preparation
-- */
--static int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first,
-- int num_regs, unsigned int flags)
--{
-- bool flow_control = false;
-- void *vaddr;
--
-- vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
-- nandc->reg_read_pos += num_regs;
--
-- if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
-- first = dev_cmd_reg_addr(nandc, first);
--
-- if (nandc->props->supports_bam)
-- return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
-- num_regs, flags);
--
-- if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
-- flow_control = true;
--
-- return qcom_prep_adm_dma_desc(nandc, true, first, vaddr,
-- num_regs * sizeof(u32), flow_control);
--}
--
--/*
-- * qcom_write_reg_dma: prepares a descriptor to write a given number of
-- * contiguous registers
-- *
-- * @vaddr: contnigeous memory from where register value will
-- * be written
-- * @first: offset of the first register in the contiguous block
-- * @num_regs: number of registers to write
-- * @flags: flags to control DMA descriptor preparation
-- */
--static int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
-- int first, int num_regs, unsigned int flags)
--{
-- bool flow_control = false;
--
-- if (first == NAND_EXEC_CMD)
-- flags |= NAND_BAM_NWD;
--
-- if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
-- first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
--
-- if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
-- first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
--
-- if (nandc->props->supports_bam)
-- return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
-- num_regs, flags);
--
-- if (first == NAND_FLASH_CMD)
-- flow_control = true;
--
-- return qcom_prep_adm_dma_desc(nandc, false, first, vaddr,
-- num_regs * sizeof(u32), flow_control);
--}
--
--/*
-- * qcom_read_data_dma: prepares a DMA descriptor to transfer data from the
-- * controller's internal buffer to the buffer 'vaddr'
-- *
-- * @reg_off: offset within the controller's data buffer
-- * @vaddr: virtual address of the buffer we want to write to
-- * @size: DMA transaction size in bytes
-- * @flags: flags to control DMA descriptor preparation
-- */
--static int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
-- const u8 *vaddr, int size, unsigned int flags)
--{
-- if (nandc->props->supports_bam)
-- return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
--
-- return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
--}
--
--/*
-- * qcom_write_data_dma: prepares a DMA descriptor to transfer data from
-- * 'vaddr' to the controller's internal buffer
-- *
-- * @reg_off: offset within the controller's data buffer
-- * @vaddr: virtual address of the buffer we want to read from
-- * @size: DMA transaction size in bytes
-- * @flags: flags to control DMA descriptor preparation
-- */
--static int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
-- const u8 *vaddr, int size, unsigned int flags)
--{
-- if (nandc->props->supports_bam)
-- return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
--
-- return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
--}
--
--/*
- * Helper to prepare DMA descriptors for configuring registers
- * before reading a NAND page.
- */
-@@ -1262,83 +394,6 @@ static void config_nand_cw_write(struct
- NAND_BAM_NEXT_SGL);
- }
-
--/* helpers to submit/free our list of dma descriptors */
--static int qcom_submit_descs(struct qcom_nand_controller *nandc)
--{
-- struct desc_info *desc, *n;
-- dma_cookie_t cookie = 0;
-- struct bam_transaction *bam_txn = nandc->bam_txn;
-- int ret = 0;
--
-- if (nandc->props->supports_bam) {
-- if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
-- ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
-- if (ret)
-- goto err_unmap_free_desc;
-- }
--
-- if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
-- ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
-- DMA_PREP_INTERRUPT);
-- if (ret)
-- goto err_unmap_free_desc;
-- }
--
-- if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
-- ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
-- DMA_PREP_CMD);
-- if (ret)
-- goto err_unmap_free_desc;
-- }
-- }
--
-- list_for_each_entry(desc, &nandc->desc_list, node)
-- cookie = dmaengine_submit(desc->dma_desc);
--
-- if (nandc->props->supports_bam) {
-- bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done;
-- bam_txn->last_cmd_desc->callback_param = bam_txn;
--
-- dma_async_issue_pending(nandc->tx_chan);
-- dma_async_issue_pending(nandc->rx_chan);
-- dma_async_issue_pending(nandc->cmd_chan);
--
-- if (!wait_for_completion_timeout(&bam_txn->txn_done,
-- QPIC_NAND_COMPLETION_TIMEOUT))
-- ret = -ETIMEDOUT;
-- } else {
-- if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
-- ret = -ETIMEDOUT;
-- }
--
--err_unmap_free_desc:
-- /*
-- * Unmap the dma sg_list and free the desc allocated by both
-- * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions.
-- */
-- list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
-- list_del(&desc->node);
--
-- if (nandc->props->supports_bam)
-- dma_unmap_sg(nandc->dev, desc->bam_sgl,
-- desc->sgl_cnt, desc->dir);
-- else
-- dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
-- desc->dir);
--
-- kfree(desc);
-- }
--
-- return ret;
--}
--
--/* reset the register read buffer for next NAND operation */
--static void qcom_clear_read_regs(struct qcom_nand_controller *nandc)
--{
-- nandc->reg_read_pos = 0;
-- qcom_nandc_dev_to_mem(nandc, false);
--}
--
- /*
- * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
- * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
-@@ -2967,141 +2022,14 @@ static const struct nand_controller_ops
- .exec_op = qcom_nand_exec_op,
- };
-
--static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
--{
-- if (nandc->props->supports_bam) {
-- if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
-- dma_unmap_single(nandc->dev, nandc->reg_read_dma,
-- MAX_REG_RD *
-- sizeof(*nandc->reg_read_buf),
-- DMA_FROM_DEVICE);
--
-- if (nandc->tx_chan)
-- dma_release_channel(nandc->tx_chan);
--
-- if (nandc->rx_chan)
-- dma_release_channel(nandc->rx_chan);
--
-- if (nandc->cmd_chan)
-- dma_release_channel(nandc->cmd_chan);
-- } else {
-- if (nandc->chan)
-- dma_release_channel(nandc->chan);
-- }
--}
--
--static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
--{
-- int ret;
--
-- ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
-- if (ret) {
-- dev_err(nandc->dev, "failed to set DMA mask\n");
-- return ret;
-- }
--
-- /*
-- * we use the internal buffer for reading ONFI params, reading small
-- * data like ID and status, and preforming read-copy-write operations
-- * when writing to a codeword partially. 532 is the maximum possible
-- * size of a codeword for our nand controller
-- */
-- nandc->buf_size = 532;
--
-- nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL);
-- if (!nandc->data_buffer)
-- return -ENOMEM;
--
-- nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL);
-- if (!nandc->regs)
-- return -ENOMEM;
--
-- nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD,
-- sizeof(*nandc->reg_read_buf),
-- GFP_KERNEL);
-- if (!nandc->reg_read_buf)
-- return -ENOMEM;
--
-- if (nandc->props->supports_bam) {
-- nandc->reg_read_dma =
-- dma_map_single(nandc->dev, nandc->reg_read_buf,
-- MAX_REG_RD *
-- sizeof(*nandc->reg_read_buf),
-- DMA_FROM_DEVICE);
-- if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
-- dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
-- return -EIO;
-- }
--
-- nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
-- if (IS_ERR(nandc->tx_chan)) {
-- ret = PTR_ERR(nandc->tx_chan);
-- nandc->tx_chan = NULL;
-- dev_err_probe(nandc->dev, ret,
-- "tx DMA channel request failed\n");
-- goto unalloc;
-- }
--
-- nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
-- if (IS_ERR(nandc->rx_chan)) {
-- ret = PTR_ERR(nandc->rx_chan);
-- nandc->rx_chan = NULL;
-- dev_err_probe(nandc->dev, ret,
-- "rx DMA channel request failed\n");
-- goto unalloc;
-- }
--
-- nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
-- if (IS_ERR(nandc->cmd_chan)) {
-- ret = PTR_ERR(nandc->cmd_chan);
-- nandc->cmd_chan = NULL;
-- dev_err_probe(nandc->dev, ret,
-- "cmd DMA channel request failed\n");
-- goto unalloc;
-- }
--
-- /*
-- * Initially allocate BAM transaction to read ONFI param page.
-- * After detecting all the devices, this BAM transaction will
-- * be freed and the next BAM transaction will be allocated with
-- * maximum codeword size
-- */
-- nandc->max_cwperpage = 1;
-- nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
-- if (!nandc->bam_txn) {
-- dev_err(nandc->dev,
-- "failed to allocate bam transaction\n");
-- ret = -ENOMEM;
-- goto unalloc;
-- }
-- } else {
-- nandc->chan = dma_request_chan(nandc->dev, "rxtx");
-- if (IS_ERR(nandc->chan)) {
-- ret = PTR_ERR(nandc->chan);
-- nandc->chan = NULL;
-- dev_err_probe(nandc->dev, ret,
-- "rxtx DMA channel request failed\n");
-- return ret;
-- }
-- }
--
-- INIT_LIST_HEAD(&nandc->desc_list);
-- INIT_LIST_HEAD(&nandc->host_list);
--
-- nand_controller_init(&nandc->controller);
-- nandc->controller.ops = &qcom_nandc_ops;
--
-- return 0;
--unalloc:
-- qcom_nandc_unalloc(nandc);
-- return ret;
--}
--
- /* one time setup of a few nand controller registers */
- static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
- {
- u32 nand_ctrl;
-
-+ nand_controller_init(nandc->controller);
-+ nandc->controller->ops = &qcom_nandc_ops;
-+
- /* kill onenand */
- if (!nandc->props->nandc_part_of_qpic)
- nandc_write(nandc, SFLASHC_BURST_CFG, 0);
-@@ -3240,7 +2168,7 @@ static int qcom_nand_host_init_and_regis
- chip->legacy.block_bad = qcom_nandc_block_bad;
- chip->legacy.block_markbad = qcom_nandc_block_markbad;
-
-- chip->controller = &nandc->controller;
-+ chip->controller = nandc->controller;
- chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA |
- NAND_SKIP_BBTSCAN;
-
-@@ -3323,17 +2251,21 @@ static int qcom_nandc_parse_dt(struct pl
- static int qcom_nandc_probe(struct platform_device *pdev)
- {
- struct qcom_nand_controller *nandc;
-+ struct nand_controller *controller;
- const void *dev_data;
- struct device *dev = &pdev->dev;
- struct resource *res;
- int ret;
-
-- nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
-+ nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc) + sizeof(*controller),
-+ GFP_KERNEL);
- if (!nandc)
- return -ENOMEM;
-+ controller = (struct nand_controller *)&nandc[1];
-
- platform_set_drvdata(pdev, nandc);
- nandc->dev = dev;
-+ nandc->controller = controller;
-
- dev_data = of_device_get_match_data(dev);
- if (!dev_data) {
---- /dev/null
-+++ b/include/linux/mtd/nand-qpic-common.h
-@@ -0,0 +1,468 @@
-+/* SPDX-License-Identifier: GPL-2.0 */
-+/*
-+ * QCOM QPIC common APIs header file
-+ *
-+ * Copyright (c) 2023 Qualcomm Inc.
-+ * Authors: Md sadre Alam <quic_mdalam@quicinc.com>
-+ *
-+ */
-+#ifndef __MTD_NAND_QPIC_COMMON_H__
-+#define __MTD_NAND_QPIC_COMMON_H__
-+
-+/* NANDc reg offsets */
-+#define NAND_FLASH_CMD 0x00
-+#define NAND_ADDR0 0x04
-+#define NAND_ADDR1 0x08
-+#define NAND_FLASH_CHIP_SELECT 0x0c
-+#define NAND_EXEC_CMD 0x10
-+#define NAND_FLASH_STATUS 0x14
-+#define NAND_BUFFER_STATUS 0x18
-+#define NAND_DEV0_CFG0 0x20
-+#define NAND_DEV0_CFG1 0x24
-+#define NAND_DEV0_ECC_CFG 0x28
-+#define NAND_AUTO_STATUS_EN 0x2c
-+#define NAND_DEV1_CFG0 0x30
-+#define NAND_DEV1_CFG1 0x34
-+#define NAND_READ_ID 0x40
-+#define NAND_READ_STATUS 0x44
-+#define NAND_DEV_CMD0 0xa0
-+#define NAND_DEV_CMD1 0xa4
-+#define NAND_DEV_CMD2 0xa8
-+#define NAND_DEV_CMD_VLD 0xac
-+#define SFLASHC_BURST_CFG 0xe0
-+#define NAND_ERASED_CW_DETECT_CFG 0xe8
-+#define NAND_ERASED_CW_DETECT_STATUS 0xec
-+#define NAND_EBI2_ECC_BUF_CFG 0xf0
-+#define FLASH_BUF_ACC 0x100
-+
-+#define NAND_CTRL 0xf00
-+#define NAND_VERSION 0xf08
-+#define NAND_READ_LOCATION_0 0xf20
-+#define NAND_READ_LOCATION_1 0xf24
-+#define NAND_READ_LOCATION_2 0xf28
-+#define NAND_READ_LOCATION_3 0xf2c
-+#define NAND_READ_LOCATION_LAST_CW_0 0xf40
-+#define NAND_READ_LOCATION_LAST_CW_1 0xf44
-+#define NAND_READ_LOCATION_LAST_CW_2 0xf48
-+#define NAND_READ_LOCATION_LAST_CW_3 0xf4c
-+
-+/* dummy register offsets, used by qcom_write_reg_dma */
-+#define NAND_DEV_CMD1_RESTORE 0xdead
-+#define NAND_DEV_CMD_VLD_RESTORE 0xbeef
-+
-+/* NAND_FLASH_CMD bits */
-+#define PAGE_ACC BIT(4)
-+#define LAST_PAGE BIT(5)
-+
-+/* NAND_FLASH_CHIP_SELECT bits */
-+#define NAND_DEV_SEL 0
-+#define DM_EN BIT(2)
-+
-+/* NAND_FLASH_STATUS bits */
-+#define FS_OP_ERR BIT(4)
-+#define FS_READY_BSY_N BIT(5)
-+#define FS_MPU_ERR BIT(8)
-+#define FS_DEVICE_STS_ERR BIT(16)
-+#define FS_DEVICE_WP BIT(23)
-+
-+/* NAND_BUFFER_STATUS bits */
-+#define BS_UNCORRECTABLE_BIT BIT(8)
-+#define BS_CORRECTABLE_ERR_MSK 0x1f
-+
-+/* NAND_DEVn_CFG0 bits */
-+#define DISABLE_STATUS_AFTER_WRITE 4
-+#define CW_PER_PAGE 6
-+#define UD_SIZE_BYTES 9
-+#define UD_SIZE_BYTES_MASK GENMASK(18, 9)
-+#define ECC_PARITY_SIZE_BYTES_RS 19
-+#define SPARE_SIZE_BYTES 23
-+#define SPARE_SIZE_BYTES_MASK GENMASK(26, 23)
-+#define NUM_ADDR_CYCLES 27
-+#define STATUS_BFR_READ 30
-+#define SET_RD_MODE_AFTER_STATUS 31
-+
-+/* NAND_DEVn_CFG0 bits */
-+#define DEV0_CFG1_ECC_DISABLE 0
-+#define WIDE_FLASH 1
-+#define NAND_RECOVERY_CYCLES 2
-+#define CS_ACTIVE_BSY 5
-+#define BAD_BLOCK_BYTE_NUM 6
-+#define BAD_BLOCK_IN_SPARE_AREA 16
-+#define WR_RD_BSY_GAP 17
-+#define ENABLE_BCH_ECC 27
-+
-+/* NAND_DEV0_ECC_CFG bits */
-+#define ECC_CFG_ECC_DISABLE 0
-+#define ECC_SW_RESET 1
-+#define ECC_MODE 4
-+#define ECC_PARITY_SIZE_BYTES_BCH 8
-+#define ECC_NUM_DATA_BYTES 16
-+#define ECC_NUM_DATA_BYTES_MASK GENMASK(25, 16)
-+#define ECC_FORCE_CLK_OPEN 30
-+
-+/* NAND_DEV_CMD1 bits */
-+#define READ_ADDR 0
-+
-+/* NAND_DEV_CMD_VLD bits */
-+#define READ_START_VLD BIT(0)
-+#define READ_STOP_VLD BIT(1)
-+#define WRITE_START_VLD BIT(2)
-+#define ERASE_START_VLD BIT(3)
-+#define SEQ_READ_START_VLD BIT(4)
-+
-+/* NAND_EBI2_ECC_BUF_CFG bits */
-+#define NUM_STEPS 0
-+
-+/* NAND_ERASED_CW_DETECT_CFG bits */
-+#define ERASED_CW_ECC_MASK 1
-+#define AUTO_DETECT_RES 0
-+#define MASK_ECC BIT(ERASED_CW_ECC_MASK)
-+#define RESET_ERASED_DET BIT(AUTO_DETECT_RES)
-+#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
-+#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
-+#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
-+
-+/* NAND_ERASED_CW_DETECT_STATUS bits */
-+#define PAGE_ALL_ERASED BIT(7)
-+#define CODEWORD_ALL_ERASED BIT(6)
-+#define PAGE_ERASED BIT(5)
-+#define CODEWORD_ERASED BIT(4)
-+#define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
-+#define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
-+
-+/* NAND_READ_LOCATION_n bits */
-+#define READ_LOCATION_OFFSET 0
-+#define READ_LOCATION_SIZE 16
-+#define READ_LOCATION_LAST 31
-+
-+/* Version Mask */
-+#define NAND_VERSION_MAJOR_MASK 0xf0000000
-+#define NAND_VERSION_MAJOR_SHIFT 28
-+#define NAND_VERSION_MINOR_MASK 0x0fff0000
-+#define NAND_VERSION_MINOR_SHIFT 16
-+
-+/* NAND OP_CMDs */
-+#define OP_PAGE_READ 0x2
-+#define OP_PAGE_READ_WITH_ECC 0x3
-+#define OP_PAGE_READ_WITH_ECC_SPARE 0x4
-+#define OP_PAGE_READ_ONFI_READ 0x5
-+#define OP_PROGRAM_PAGE 0x6
-+#define OP_PAGE_PROGRAM_WITH_ECC 0x7
-+#define OP_PROGRAM_PAGE_SPARE 0x9
-+#define OP_BLOCK_ERASE 0xa
-+#define OP_CHECK_STATUS 0xc
-+#define OP_FETCH_ID 0xb
-+#define OP_RESET_DEVICE 0xd
-+
-+/* Default Value for NAND_DEV_CMD_VLD */
-+#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
-+ ERASE_START_VLD | SEQ_READ_START_VLD)
-+
-+/* NAND_CTRL bits */
-+#define BAM_MODE_EN BIT(0)
-+
-+/*
-+ * the NAND controller performs reads/writes with ECC in 516 byte chunks.
-+ * the driver calls the chunks 'step' or 'codeword' interchangeably
-+ */
-+#define NANDC_STEP_SIZE 512
-+
-+/*
-+ * the largest page size we support is 8K, this will have 16 steps/codewords
-+ * of 512 bytes each
-+ */
-+#define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
-+
-+/* we read at most 3 registers per codeword scan */
-+#define MAX_REG_RD (3 * MAX_NUM_STEPS)
-+
-+/* ECC modes supported by the controller */
-+#define ECC_NONE BIT(0)
-+#define ECC_RS_4BIT BIT(1)
-+#define ECC_BCH_4BIT BIT(2)
-+#define ECC_BCH_8BIT BIT(3)
-+
-+/*
-+ * Returns the actual register address for all NAND_DEV_ registers
-+ * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
-+ */
-+#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
-+
-+/* Returns the NAND register physical address */
-+#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
-+
-+/* Returns the dma address for reg read buffer */
-+#define reg_buf_dma_addr(chip, vaddr) \
-+ ((chip)->reg_read_dma + \
-+ ((u8 *)(vaddr) - (u8 *)(chip)->reg_read_buf))
-+
-+#define QPIC_PER_CW_CMD_ELEMENTS 32
-+#define QPIC_PER_CW_CMD_SGL 32
-+#define QPIC_PER_CW_DATA_SGL 8
-+
-+#define QPIC_NAND_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
-+
-+/*
-+ * Flags used in DMA descriptor preparation helper functions
-+ * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma)
-+ */
-+/* Don't set the EOT in current tx BAM sgl */
-+#define NAND_BAM_NO_EOT BIT(0)
-+/* Set the NWD flag in current BAM sgl */
-+#define NAND_BAM_NWD BIT(1)
-+/* Finish writing in the current BAM sgl and start writing in another BAM sgl */
-+#define NAND_BAM_NEXT_SGL BIT(2)
-+/*
-+ * Erased codeword status is being used two times in single transfer so this
-+ * flag will determine the current value of erased codeword status register
-+ */
-+#define NAND_ERASED_CW_SET BIT(4)
-+
-+#define MAX_ADDRESS_CYCLE 5
-+
-+/*
-+ * This data type corresponds to the BAM transaction which will be used for all
-+ * NAND transfers.
-+ * @bam_ce - the array of BAM command elements
-+ * @cmd_sgl - sgl for NAND BAM command pipe
-+ * @data_sgl - sgl for NAND BAM consumer/producer pipe
-+ * @last_data_desc - last DMA desc in data channel (tx/rx).
-+ * @last_cmd_desc - last DMA desc in command channel.
-+ * @txn_done - completion for NAND transfer.
-+ * @bam_ce_pos - the index in bam_ce which is available for next sgl
-+ * @bam_ce_start - the index in bam_ce which marks the start position ce
-+ * for current sgl. It will be used for size calculation
-+ * for current sgl
-+ * @cmd_sgl_pos - current index in command sgl.
-+ * @cmd_sgl_start - start index in command sgl.
-+ * @tx_sgl_pos - current index in data sgl for tx.
-+ * @tx_sgl_start - start index in data sgl for tx.
-+ * @rx_sgl_pos - current index in data sgl for rx.
-+ * @rx_sgl_start - start index in data sgl for rx.
-+ */
-+struct bam_transaction {
-+ struct bam_cmd_element *bam_ce;
-+ struct scatterlist *cmd_sgl;
-+ struct scatterlist *data_sgl;
-+ struct dma_async_tx_descriptor *last_data_desc;
-+ struct dma_async_tx_descriptor *last_cmd_desc;
-+ struct completion txn_done;
-+ u32 bam_ce_pos;
-+ u32 bam_ce_start;
-+ u32 cmd_sgl_pos;
-+ u32 cmd_sgl_start;
-+ u32 tx_sgl_pos;
-+ u32 tx_sgl_start;
-+ u32 rx_sgl_pos;
-+ u32 rx_sgl_start;
-+};
-+
-+/*
-+ * This data type corresponds to the nand dma descriptor
-+ * @dma_desc - low level DMA engine descriptor
-+ * @list - list for desc_info
-+ *
-+ * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
-+ * ADM
-+ * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
-+ * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
-+ * @dir - DMA transfer direction
-+ */
-+struct desc_info {
-+ struct dma_async_tx_descriptor *dma_desc;
-+ struct list_head node;
-+
-+ union {
-+ struct scatterlist adm_sgl;
-+ struct {
-+ struct scatterlist *bam_sgl;
-+ int sgl_cnt;
-+ };
-+ };
-+ enum dma_data_direction dir;
-+};
-+
-+/*
-+ * holds the current register values that we want to write. acts as a contiguous
-+ * chunk of memory which we use to write the controller registers through DMA.
-+ */
-+struct nandc_regs {
-+ __le32 cmd;
-+ __le32 addr0;
-+ __le32 addr1;
-+ __le32 chip_sel;
-+ __le32 exec;
-+
-+ __le32 cfg0;
-+ __le32 cfg1;
-+ __le32 ecc_bch_cfg;
-+
-+ __le32 clrflashstatus;
-+ __le32 clrreadstatus;
-+
-+ __le32 cmd1;
-+ __le32 vld;
-+
-+ __le32 orig_cmd1;
-+ __le32 orig_vld;
-+
-+ __le32 ecc_buf_cfg;
-+ __le32 read_location0;
-+ __le32 read_location1;
-+ __le32 read_location2;
-+ __le32 read_location3;
-+ __le32 read_location_last0;
-+ __le32 read_location_last1;
-+ __le32 read_location_last2;
-+ __le32 read_location_last3;
-+
-+ __le32 erased_cw_detect_cfg_clr;
-+ __le32 erased_cw_detect_cfg_set;
-+};
-+
-+/*
-+ * NAND controller data struct
-+ *
-+ * @dev: parent device
-+ *
-+ * @base: MMIO base
-+ *
-+ * @core_clk: controller clock
-+ * @aon_clk: another controller clock
-+ *
-+ * @regs: a contiguous chunk of memory for DMA register
-+ * writes. contains the register values to be
-+ * written to controller
-+ *
-+ * @props: properties of current NAND controller,
-+ * initialized via DT match data
-+ *
-+ * @controller: base controller structure
-+ * @host_list: list containing all the chips attached to the
-+ * controller
-+ *
-+ * @chan: dma channel
-+ * @cmd_crci: ADM DMA CRCI for command flow control
-+ * @data_crci: ADM DMA CRCI for data flow control
-+ *
-+ * @desc_list: DMA descriptor list (list of desc_infos)
-+ *
-+ * @data_buffer: our local DMA buffer for page read/writes,
-+ * used when we can't use the buffer provided
-+ * by upper layers directly
-+ * @reg_read_buf: local buffer for reading back registers via DMA
-+ *
-+ * @base_phys: physical base address of controller registers
-+ * @base_dma: dma base address of controller registers
-+ * @reg_read_dma: contains dma address for register read buffer
-+ *
-+ * @buf_size/count/start: markers for chip->legacy.read_buf/write_buf
-+ * functions
-+ * @max_cwperpage: maximum QPIC codewords required. calculated
-+ * from all connected NAND devices pagesize
-+ *
-+ * @reg_read_pos: marker for data read in reg_read_buf
-+ *
-+ * @cmd1/vld: some fixed controller register values
-+ *
-+ * @exec_opwrite: flag to select correct number of code word
-+ * while reading status
-+ */
-+struct qcom_nand_controller {
-+ struct device *dev;
-+
-+ void __iomem *base;
-+
-+ struct clk *core_clk;
-+ struct clk *aon_clk;
-+
-+ struct nandc_regs *regs;
-+ struct bam_transaction *bam_txn;
-+
-+ const struct qcom_nandc_props *props;
-+
-+ struct nand_controller *controller;
-+ struct list_head host_list;
-+
-+ union {
-+ /* will be used only by QPIC for BAM DMA */
-+ struct {
-+ struct dma_chan *tx_chan;
-+ struct dma_chan *rx_chan;
-+ struct dma_chan *cmd_chan;
-+ };
-+
-+ /* will be used only by EBI2 for ADM DMA */
-+ struct {
-+ struct dma_chan *chan;
-+ unsigned int cmd_crci;
-+ unsigned int data_crci;
-+ };
-+ };
-+
-+ struct list_head desc_list;
-+
-+ u8 *data_buffer;
-+ __le32 *reg_read_buf;
-+
-+ phys_addr_t base_phys;
-+ dma_addr_t base_dma;
-+ dma_addr_t reg_read_dma;
-+
-+ int buf_size;
-+ int buf_count;
-+ int buf_start;
-+ unsigned int max_cwperpage;
-+
-+ int reg_read_pos;
-+
-+ u32 cmd1, vld;
-+ bool exec_opwrite;
-+};
-+
-+/*
-+ * This data type corresponds to the NAND controller properties which varies
-+ * among different NAND controllers.
-+ * @ecc_modes - ecc mode for NAND
-+ * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
-+ * @supports_bam - whether NAND controller is using BAM
-+ * @nandc_part_of_qpic - whether NAND controller is part of qpic IP
-+ * @qpic_version2 - flag to indicate QPIC IP version 2
-+ * @use_codeword_fixup - whether NAND has different layout for boot partitions
-+ */
-+struct qcom_nandc_props {
-+ u32 ecc_modes;
-+ u32 dev_cmd_reg_start;
-+ bool supports_bam;
-+ bool nandc_part_of_qpic;
-+ bool qpic_version2;
-+ bool use_codeword_fixup;
-+};
-+
-+void qcom_free_bam_transaction(struct qcom_nand_controller *nandc);
-+struct bam_transaction *qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc);
-+void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc);
-+void qcom_qpic_bam_dma_done(void *data);
-+void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu);
-+int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
-+ struct dma_chan *chan, unsigned long flags);
-+int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
-+ int reg_off, const void *vaddr, int size, unsigned int flags);
-+int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
-+ const void *vaddr, int size, unsigned int flags);
-+int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read, int reg_off,
-+ const void *vaddr, int size, bool flow_control);
-+int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first, int num_regs,
-+ unsigned int flags);
-+int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr, int first,
-+ int num_regs, unsigned int flags);
-+int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off, const u8 *vaddr,
-+ int size, unsigned int flags);
-+int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off, const u8 *vaddr,
-+ int size, unsigned int flags);
-+int qcom_submit_descs(struct qcom_nand_controller *nandc);
-+void qcom_clear_read_regs(struct qcom_nand_controller *nandc);
-+void qcom_nandc_unalloc(struct qcom_nand_controller *nandc);
-+int qcom_nandc_alloc(struct qcom_nand_controller *nandc);
-+#endif
-+
+++ /dev/null
-From 9c5b6453db27706f090ab06987394aabaaf24e1b Mon Sep 17 00:00:00 2001
-From: Md Sadre Alam <quic_mdalam@quicinc.com>
-Date: Wed, 11 Sep 2024 12:50:42 +0530
-Subject: [PATCH v10 5/8] mtd: rawnand: qcom: use FIELD_PREP and GENMASK
-
-Use the bitfield macro FIELD_PREP, and GENMASK to
-do the shift and mask in one go. This makes the code
-more readable.
-
-Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
----
-
-Change in [v10]
-
-* No change
-
-Change in [v9]
-
-* In update_rw_regs() api added cpu_to_le32() macro to fix compilation
- issue reported by kernel test bot
-* In qcom_param_page_type_exec() api added cpu_to_le32() macro to fix
- compilation issue reported by kernel test bot
-
-Change in [v8]
-
-* No change
-
-Change in [v7]
-
-* No change
-
-Change in [v6]
-
-* Added FIELD_PREP() and GENMASK() macro
-
-Change in [v5]
-
-* This patch was not included in [v1]
-
-Change in [v4]
-
-* This patch was not included in [v4]
-
-Change in [v3]
-
-* This patch was not included in [v3]
-
-Change in [v2]
-
-* This patch was not included in [v2]
-
-Change in [v1]
-
-* This patch was not included in [v1]
-
- drivers/mtd/nand/raw/qcom_nandc.c | 97 ++++++++++++++--------------
- include/linux/mtd/nand-qpic-common.h | 31 +++++----
- 2 files changed, 67 insertions(+), 61 deletions(-)
-
---- a/drivers/mtd/nand/raw/qcom_nandc.c
-+++ b/drivers/mtd/nand/raw/qcom_nandc.c
-@@ -281,7 +281,7 @@ static void update_rw_regs(struct qcom_n
- (num_cw - 1) << CW_PER_PAGE);
-
- cfg1 = cpu_to_le32(host->cfg1_raw);
-- ecc_bch_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE);
-+ ecc_bch_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);
- }
-
- nandc->regs->cmd = cmd;
-@@ -1494,42 +1494,41 @@ static int qcom_nand_attach_chip(struct
- host->cw_size = host->cw_data + ecc->bytes;
- bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
-
-- host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
-- | host->cw_data << UD_SIZE_BYTES
-- | 0 << DISABLE_STATUS_AFTER_WRITE
-- | 5 << NUM_ADDR_CYCLES
-- | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
-- | 0 << STATUS_BFR_READ
-- | 1 << SET_RD_MODE_AFTER_STATUS
-- | host->spare_bytes << SPARE_SIZE_BYTES;
--
-- host->cfg1 = 7 << NAND_RECOVERY_CYCLES
-- | 0 << CS_ACTIVE_BSY
-- | bad_block_byte << BAD_BLOCK_BYTE_NUM
-- | 0 << BAD_BLOCK_IN_SPARE_AREA
-- | 2 << WR_RD_BSY_GAP
-- | wide_bus << WIDE_FLASH
-- | host->bch_enabled << ENABLE_BCH_ECC;
--
-- host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
-- | host->cw_size << UD_SIZE_BYTES
-- | 5 << NUM_ADDR_CYCLES
-- | 0 << SPARE_SIZE_BYTES;
--
-- host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
-- | 0 << CS_ACTIVE_BSY
-- | 17 << BAD_BLOCK_BYTE_NUM
-- | 1 << BAD_BLOCK_IN_SPARE_AREA
-- | 2 << WR_RD_BSY_GAP
-- | wide_bus << WIDE_FLASH
-- | 1 << DEV0_CFG1_ECC_DISABLE;
--
-- host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
-- | 0 << ECC_SW_RESET
-- | host->cw_data << ECC_NUM_DATA_BYTES
-- | 1 << ECC_FORCE_CLK_OPEN
-- | ecc_mode << ECC_MODE
-- | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
-+ host->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
-+ FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_data) |
-+ FIELD_PREP(DISABLE_STATUS_AFTER_WRITE, 0) |
-+ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
-+ FIELD_PREP(ECC_PARITY_SIZE_BYTES_RS, host->ecc_bytes_hw) |
-+ FIELD_PREP(STATUS_BFR_READ, 0) |
-+ FIELD_PREP(SET_RD_MODE_AFTER_STATUS, 1) |
-+ FIELD_PREP(SPARE_SIZE_BYTES_MASK, host->spare_bytes);
-+
-+ host->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
-+ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, bad_block_byte) |
-+ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 0) |
-+ FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
-+ FIELD_PREP(WIDE_FLASH, wide_bus) |
-+ FIELD_PREP(ENABLE_BCH_ECC, host->bch_enabled);
-+
-+ host->cfg0_raw = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
-+ FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_size) |
-+ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
-+ FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
-+
-+ host->cfg1_raw = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
-+ FIELD_PREP(CS_ACTIVE_BSY, 0) |
-+ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
-+ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
-+ FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
-+ FIELD_PREP(WIDE_FLASH, wide_bus) |
-+ FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
-+
-+ host->ecc_bch_cfg = FIELD_PREP(ECC_CFG_ECC_DISABLE, !host->bch_enabled) |
-+ FIELD_PREP(ECC_SW_RESET, 0) |
-+ FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, host->cw_data) |
-+ FIELD_PREP(ECC_FORCE_CLK_OPEN, 1) |
-+ FIELD_PREP(ECC_MODE_MASK, ecc_mode) |
-+ FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, host->ecc_bytes_hw);
-
- if (!nandc->props->qpic_version2)
- host->ecc_buf_cfg = 0x203 << NUM_STEPS;
-@@ -1882,21 +1881,21 @@ static int qcom_param_page_type_exec(str
- nandc->regs->addr0 = 0;
- nandc->regs->addr1 = 0;
-
-- nandc->regs->cfg0 = cpu_to_le32(0 << CW_PER_PAGE
-- | 512 << UD_SIZE_BYTES
-- | 5 << NUM_ADDR_CYCLES
-- | 0 << SPARE_SIZE_BYTES);
--
-- nandc->regs->cfg1 = cpu_to_le32(7 << NAND_RECOVERY_CYCLES
-- | 0 << CS_ACTIVE_BSY
-- | 17 << BAD_BLOCK_BYTE_NUM
-- | 1 << BAD_BLOCK_IN_SPARE_AREA
-- | 2 << WR_RD_BSY_GAP
-- | 0 << WIDE_FLASH
-- | 1 << DEV0_CFG1_ECC_DISABLE);
-+ host->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, 0) |
-+ FIELD_PREP(UD_SIZE_BYTES_MASK, 512) |
-+ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
-+ FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
-+
-+ host->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
-+ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
-+ FIELD_PREP(CS_ACTIVE_BSY, 0) |
-+ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
-+ FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
-+ FIELD_PREP(WIDE_FLASH, 0) |
-+ FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
-
- if (!nandc->props->qpic_version2)
-- nandc->regs->ecc_buf_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE);
-+ nandc->regs->ecc_buf_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);
-
- /* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
- if (!nandc->props->qpic_version2) {
---- a/include/linux/mtd/nand-qpic-common.h
-+++ b/include/linux/mtd/nand-qpic-common.h
-@@ -70,35 +70,42 @@
- #define BS_CORRECTABLE_ERR_MSK 0x1f
-
- /* NAND_DEVn_CFG0 bits */
--#define DISABLE_STATUS_AFTER_WRITE 4
-+#define DISABLE_STATUS_AFTER_WRITE BIT(4)
- #define CW_PER_PAGE 6
-+#define CW_PER_PAGE_MASK GENMASK(8, 6)
- #define UD_SIZE_BYTES 9
- #define UD_SIZE_BYTES_MASK GENMASK(18, 9)
--#define ECC_PARITY_SIZE_BYTES_RS 19
-+#define ECC_PARITY_SIZE_BYTES_RS GENMASK(22, 19)
- #define SPARE_SIZE_BYTES 23
- #define SPARE_SIZE_BYTES_MASK GENMASK(26, 23)
- #define NUM_ADDR_CYCLES 27
--#define STATUS_BFR_READ 30
--#define SET_RD_MODE_AFTER_STATUS 31
-+#define NUM_ADDR_CYCLES_MASK GENMASK(29, 27)
-+#define STATUS_BFR_READ BIT(30)
-+#define SET_RD_MODE_AFTER_STATUS BIT(31)
-
- /* NAND_DEVn_CFG0 bits */
--#define DEV0_CFG1_ECC_DISABLE 0
--#define WIDE_FLASH 1
-+#define DEV0_CFG1_ECC_DISABLE BIT(0)
-+#define WIDE_FLASH BIT(1)
- #define NAND_RECOVERY_CYCLES 2
--#define CS_ACTIVE_BSY 5
-+#define NAND_RECOVERY_CYCLES_MASK GENMASK(4, 2)
-+#define CS_ACTIVE_BSY BIT(5)
- #define BAD_BLOCK_BYTE_NUM 6
--#define BAD_BLOCK_IN_SPARE_AREA 16
-+#define BAD_BLOCK_BYTE_NUM_MASK GENMASK(15, 6)
-+#define BAD_BLOCK_IN_SPARE_AREA BIT(16)
- #define WR_RD_BSY_GAP 17
--#define ENABLE_BCH_ECC 27
-+#define WR_RD_BSY_GAP_MASK GENMASK(22, 17)
-+#define ENABLE_BCH_ECC BIT(27)
-
- /* NAND_DEV0_ECC_CFG bits */
--#define ECC_CFG_ECC_DISABLE 0
--#define ECC_SW_RESET 1
-+#define ECC_CFG_ECC_DISABLE BIT(0)
-+#define ECC_SW_RESET BIT(1)
- #define ECC_MODE 4
-+#define ECC_MODE_MASK GENMASK(5, 4)
- #define ECC_PARITY_SIZE_BYTES_BCH 8
-+#define ECC_PARITY_SIZE_BYTES_BCH_MASK GENMASK(12, 8)
- #define ECC_NUM_DATA_BYTES 16
- #define ECC_NUM_DATA_BYTES_MASK GENMASK(25, 16)
--#define ECC_FORCE_CLK_OPEN 30
-+#define ECC_FORCE_CLK_OPEN BIT(30)
-
- /* NAND_DEV_CMD1 bits */
- #define READ_ADDR 0
-From dc12953941ed3b8bc9eb8d47f8c7e74f54b47049 Mon Sep 17 00:00:00 2001
From: Md Sadre Alam <quic_mdalam@quicinc.com>
-Date: Mon, 19 Aug 2024 11:05:18 +0530
-Subject: [PATCH v10 6/8] spi: spi-qpic: add driver for QCOM SPI NAND flash
- Interface
+To: <broonie@kernel.org>, <robh@kernel.org>, <krzk+dt@kernel.org>,
+ <conor+dt@kernel.org>, <andersson@kernel.org>,
+ <konradybcio@kernel.org>, <miquel.raynal@bootlin.com>,
+ <richard@nod.at>, <vigneshr@ti.com>,
+ <manivannan.sadhasivam@linaro.org>,
+ <linux-arm-msm@vger.kernel.org>, <linux-spi@vger.kernel.org>,
+ <devicetree@vger.kernel.org>, <linux-kernel@vger.kernel.org>,
+ <linux-mtd@lists.infradead.org>
+Cc: <quic_srichara@quicinc.com>, <quic_varada@quicinc.com>,
+ <quic_mdalam@quicinc.com>
+Subject: [PATCH v14 6/8] spi: spi-qpic: add driver for QCOM SPI NAND flash Interface
+Date: Wed, 20 Nov 2024 14:45:04 +0530 [thread overview]
+Message-ID: <20241120091507.1404368-7-quic_mdalam@quicinc.com> (raw)
+In-Reply-To: <20241120091507.1404368-1-quic_mdalam@quicinc.com>
This driver implements support for the SPI-NAND mode of QCOM NAND Flash
Interface as a SPI-MEM controller with pipelined ECC capability.
Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
---
+Change in [v14]
+
+* No Change
+
+Change in [v13]
+
+* Changed return type of qcom_spi_cmd_mapping() from u32 to
+ int to fix the kernel test bot warning
+* Changed type of variable cmd in qcom_spi_write_page() from u32
+ to int
+* Removed unused variable s_op from qcom_spi_write_page()
+* Updated return value variable type from u32 to int in
+ qcom_spi_send_cmdaddr()
+
+Change in [v12]
+
+* Added obj-$(CONFIG_SPI_QPIC_SNAND) += qpic_common.o in Makefile
+ to build qpic_common.c based on CONFIG_SPI_QPIC_SNAND
+
+Change in [v11]
+
+* Fixed build error reported by kernel test bot
+* Changed "depends on MTD" to "select MTD" in
+ drivers/spi/Kconfig file
+
Change in [v10]
* Fixed compilation warnings reported by kernel test robot.
* Added RFC patch for design review
- drivers/mtd/nand/Makefile | 5 +-
+ drivers/mtd/nand/Makefile | 4 +
drivers/spi/Kconfig | 9 +
drivers/spi/Makefile | 1 +
- drivers/spi/spi-qpic-snand.c | 1634 ++++++++++++++++++++++++++
+ drivers/spi/spi-qpic-snand.c | 1633 ++++++++++++++++++++++++++
include/linux/mtd/nand-qpic-common.h | 7 +
- 5 files changed, 1655 insertions(+), 1 deletion(-)
+ 5 files changed, 1654 insertions(+)
create mode 100644 drivers/spi/spi-qpic-snand.c
+diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
+index da1586a36574..db516a45f0c5 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
-@@ -7,8 +7,11 @@ obj-$(CONFIG_MTD_NAND_MTK_BMT) += mtk_bm
-
- ifeq ($(CONFIG_MTD_NAND_QCOM),y)
- obj-y += qpic_common.o
-+else
+@@ -3,7 +3,11 @@
+ obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o
+ obj-$(CONFIG_MTD_NAND_ECC_MEDIATEK) += ecc-mtk.o
+ obj-$(CONFIG_MTD_NAND_MTK_BMT) += mtk_bmt.o mtk_bmt_v2.o mtk_bmt_bbt.o mtk_bmt_nmbm.o
+ifeq ($(CONFIG_SPI_QPIC_SNAND),y)
-+obj-y += qpic_common.o
++obj-$(CONFIG_SPI_QPIC_SNAND) += qpic_common.o
++else
+ obj-$(CONFIG_MTD_NAND_QCOM) += qpic_common.o
+endif
- endif
--
obj-y += onenand/
obj-y += raw/
obj-y += spi/
+diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
+index f51f9466e518..1aaf93964429 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
-@@ -870,6 +870,15 @@ config SPI_QCOM_QSPI
+@@ -920,6 +920,15 @@ config SPI_QCOM_QSPI
help
QSPI(Quad SPI) driver for Qualcomm QSPI controller.
+config SPI_QPIC_SNAND
+ bool "QPIC SNAND controller"
+ depends on ARCH_QCOM || COMPILE_TEST
-+ depends on MTD
++ select MTD
+ help
+ QPIC_SNAND (QPIC SPI NAND) driver for Qualcomm QPIC controller.
+ QPIC controller supports both parallel nand and serial nand.
config SPI_QUP
tristate "Qualcomm SPI controller with QUP interface"
depends on ARCH_QCOM || COMPILE_TEST
+diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
+index aea5e54de195..3309b7bb2445 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
-@@ -110,6 +110,7 @@ obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-
+@@ -115,6 +115,7 @@ obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o
obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o
obj-$(CONFIG_SPI_QCOM_GENI) += spi-geni-qcom.o
obj-$(CONFIG_SPI_QCOM_QSPI) += spi-qcom-qspi.o
obj-$(CONFIG_SPI_QUP) += spi-qup.o
obj-$(CONFIG_SPI_ROCKCHIP) += spi-rockchip.o
obj-$(CONFIG_SPI_ROCKCHIP_SFC) += spi-rockchip-sfc.o
+diff --git a/drivers/spi/spi-qpic-snand.c b/drivers/spi/spi-qpic-snand.c
+new file mode 100644
+index 000000000000..1ba562a9369e
--- /dev/null
+++ b/drivers/spi/spi-qpic-snand.c
-@@ -0,0 +1,1634 @@
+@@ -0,0 +1,1633 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ return 0;
+}
+
-+static u32 qcom_spi_cmd_mapping(struct qcom_nand_controller *snandc, u32 opcode)
++static int qcom_spi_cmd_mapping(struct qcom_nand_controller *snandc, u32 opcode)
+{
-+ u32 cmd = 0x0;
++ int cmd = 0x0;
+
+ switch (opcode) {
+ case SPINAND_RESET:
+static int qcom_spi_write_page(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
-+ struct qpic_snand_op s_op = {};
-+ u32 cmd;
++ int cmd;
+
+ cmd = qcom_spi_cmd_mapping(snandc, op->cmd.opcode);
+ if (cmd < 0)
+ return cmd;
+
-+ s_op.cmd_reg = cmd;
-+
+ if (op->cmd.opcode == SPINAND_PROGRAM_LOAD)
+ snandc->qspi->data_buf = (u8 *)op->data.buf.out;
+
+ u32 cmd;
+ int ret, opcode;
+
-+ cmd = qcom_spi_cmd_mapping(snandc, op->cmd.opcode);
-+ if (cmd < 0)
-+ return cmd;
++ ret = qcom_spi_cmd_mapping(snandc, op->cmd.opcode);
++ if (ret < 0)
++ return ret;
++
++ cmd = ret;
+
+ s_op.cmd_reg = cmd;
+ s_op.addr1_reg = op->addr.val;
+
+static const struct of_device_id qcom_snandc_of_match[] = {
+ {
-+ .compatible = "qcom,spi-qpic-snand",
++ .compatible = "qcom,ipq9574-snand",
+ .data = &ipq9574_snandc_props,
+ },
+ {}
+MODULE_AUTHOR("Md Sadre Alam <quic_mdalam@quicinc.com>");
+MODULE_LICENSE("GPL");
+
+diff --git a/include/linux/mtd/nand-qpic-common.h b/include/linux/mtd/nand-qpic-common.h
+index e79c79775eb8..7dba89654d6c 100644
--- a/include/linux/mtd/nand-qpic-common.h
+++ b/include/linux/mtd/nand-qpic-common.h
@@ -322,6 +322,10 @@ struct nandc_regs {
struct list_head host_list;
union {
+--
+2.34.1
+++ /dev/null
-From be826ce36477e94539f5d2dfe292126dbb39b3a4 Mon Sep 17 00:00:00 2001
-From: Luo Jie <quic_luoj@quicinc.com>
-Date: Thu, 7 Nov 2024 17:50:22 +0800
-Subject: [PATCH 1/5] dt-bindings: clock: qcom: Add CMN PLL clock controller
- for IPQ SoC
-
-The CMN PLL controller provides clocks to networking hardware blocks
-and to GCC on Qualcomm IPQ9574 SoC. It receives input clock from the
-on-chip Wi-Fi, and produces output clocks at fixed rates. These output
-rates are predetermined, and are unrelated to the input clock rate.
-The primary purpose of CMN PLL is to supply clocks to the networking
-hardware such as PPE (packet process engine), PCS and the externally
-connected switch or PHY device. The CMN PLL block also outputs fixed
-rate clocks to GCC, such as 24 MHZ as XO clock and 32 KHZ as sleep
-clock supplied to GCC.
-
-Reviewed-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
-Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
----
- .../bindings/clock/qcom,ipq9574-cmn-pll.yaml | 85 +++++++++++++++++++
- include/dt-bindings/clock/qcom,ipq-cmn-pll.h | 22 +++++
- 2 files changed, 107 insertions(+)
- create mode 100644 Documentation/devicetree/bindings/clock/qcom,ipq9574-cmn-pll.yaml
- create mode 100644 include/dt-bindings/clock/qcom,ipq-cmn-pll.h
-
-diff --git a/Documentation/devicetree/bindings/clock/qcom,ipq9574-cmn-pll.yaml b/Documentation/devicetree/bindings/clock/qcom,ipq9574-cmn-pll.yaml
-new file mode 100644
-index 000000000000..db8a3ee56067
---- /dev/null
-+++ b/Documentation/devicetree/bindings/clock/qcom,ipq9574-cmn-pll.yaml
-@@ -0,0 +1,85 @@
-+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
-+%YAML 1.2
-+---
-+$id: http://devicetree.org/schemas/clock/qcom,ipq9574-cmn-pll.yaml#
-+$schema: http://devicetree.org/meta-schemas/core.yaml#
-+
-+title: Qualcomm CMN PLL Clock Controller on IPQ SoC
-+
-+maintainers:
-+ - Bjorn Andersson <andersson@kernel.org>
-+ - Luo Jie <quic_luoj@quicinc.com>
-+
-+description:
-+ The CMN (or common) PLL clock controller expects a reference
-+ input clock. This reference clock is from the on-board Wi-Fi.
-+ The CMN PLL supplies a number of fixed rate output clocks to
-+ the devices providing networking functions and to GCC. These
-+ networking hardware include PPE (packet process engine), PCS
-+ and the externally connected switch or PHY devices. The CMN
-+ PLL block also outputs fixed rate clocks to GCC. The PLL's
-+ primary function is to enable fixed rate output clocks for
-+ networking hardware functions used with the IPQ SoC.
-+
-+properties:
-+ compatible:
-+ enum:
-+ - qcom,ipq9574-cmn-pll
-+
-+ reg:
-+ maxItems: 1
-+
-+ clocks:
-+ items:
-+ - description: The reference clock. The supported clock rates include
-+ 25000000, 31250000, 40000000, 48000000, 50000000 and 96000000 HZ.
-+ - description: The AHB clock
-+ - description: The SYS clock
-+ description:
-+ The reference clock is the source clock of CMN PLL, which is from the
-+ Wi-Fi. The AHB and SYS clocks must be enabled to access CMN PLL
-+ clock registers.
-+
-+ clock-names:
-+ items:
-+ - const: ref
-+ - const: ahb
-+ - const: sys
-+
-+ "#clock-cells":
-+ const: 1
-+
-+ assigned-clocks:
-+ maxItems: 1
-+
-+ assigned-clock-rates-u64:
-+ maxItems: 1
-+
-+required:
-+ - compatible
-+ - reg
-+ - clocks
-+ - clock-names
-+ - "#clock-cells"
-+ - assigned-clocks
-+ - assigned-clock-rates-u64
-+
-+additionalProperties: false
-+
-+examples:
-+ - |
-+ #include <dt-bindings/clock/qcom,ipq-cmn-pll.h>
-+ #include <dt-bindings/clock/qcom,ipq9574-gcc.h>
-+
-+ cmn_pll: clock-controller@9b000 {
-+ compatible = "qcom,ipq9574-cmn-pll";
-+ reg = <0x0009b000 0x800>;
-+ clocks = <&cmn_pll_ref_clk>,
-+ <&gcc GCC_CMN_12GPLL_AHB_CLK>,
-+ <&gcc GCC_CMN_12GPLL_SYS_CLK>;
-+ clock-names = "ref", "ahb", "sys";
-+ #clock-cells = <1>;
-+ assigned-clocks = <&cmn_pll CMN_PLL_CLK>;
-+ assigned-clock-rates-u64 = /bits/ 64 <12000000000>;
-+ };
-+...
-diff --git a/include/dt-bindings/clock/qcom,ipq-cmn-pll.h b/include/dt-bindings/clock/qcom,ipq-cmn-pll.h
-new file mode 100644
-index 000000000000..936e92b3b62c
---- /dev/null
-+++ b/include/dt-bindings/clock/qcom,ipq-cmn-pll.h
-@@ -0,0 +1,22 @@
-+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
-+/*
-+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
-+ */
-+
-+#ifndef _DT_BINDINGS_CLK_QCOM_IPQ_CMN_PLL_H
-+#define _DT_BINDINGS_CLK_QCOM_IPQ_CMN_PLL_H
-+
-+/* CMN PLL core clock. */
-+#define CMN_PLL_CLK 0
-+
-+/* The output clocks from CMN PLL of IPQ9574. */
-+#define XO_24MHZ_CLK 1
-+#define SLEEP_32KHZ_CLK 2
-+#define PCS_31P25MHZ_CLK 3
-+#define NSS_1200MHZ_CLK 4
-+#define PPE_353MHZ_CLK 5
-+#define ETH0_50MHZ_CLK 6
-+#define ETH1_50MHZ_CLK 7
-+#define ETH2_50MHZ_CLK 8
-+#define ETH_25MHZ_CLK 9
-+#endif
---
-2.45.2
-
+++ /dev/null
-From f9ecde8dc380769d1477f01416d2e3a65c4fd881 Mon Sep 17 00:00:00 2001
-From: Luo Jie <quic_luoj@quicinc.com>
-Date: Thu, 7 Nov 2024 17:50:23 +0800
-Subject: [PATCH 2/5] clk: qcom: Add CMN PLL clock controller driver for IPQ
- SoC
-
-The CMN PLL clock controller supplies clocks to the hardware
-blocks that together make up the Ethernet function on Qualcomm
-IPQ SoCs and to GCC. The driver is initially supported for
-IPQ9574 SoC.
-
-The CMN PLL clock controller expects a reference input clock
-from the on-board Wi-Fi block acting as clock source. The input
-reference clock needs to be configured to one of the supported
-clock rates.
-
-The controller supplies a number of fixed-rate output clocks.
-For the IPQ9574, there is one output clock of 353 MHZ to PPE
-(Packet Process Engine) hardware block, three 50 MHZ output
-clocks and an additional 25 MHZ output clock supplied to the
-connected Ethernet devices. The PLL also supplies a 24 MHZ
-clock as XO and a 32 KHZ sleep clock to GCC, and one 31.25
-MHZ clock to PCS.
-
-Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
----
- drivers/clk/qcom/Kconfig | 9 +
- drivers/clk/qcom/Makefile | 1 +
- drivers/clk/qcom/ipq-cmn-pll.c | 436 +++++++++++++++++++++++++++++++++
- 3 files changed, 446 insertions(+)
- create mode 100644 drivers/clk/qcom/ipq-cmn-pll.c
-
-diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
-index b9a5cc9fd8c8..3cc7156f881d 100644
---- a/drivers/clk/qcom/Kconfig
-+++ b/drivers/clk/qcom/Kconfig
-@@ -190,6 +190,15 @@ config IPQ_APSS_6018
- Say Y if you want to support CPU frequency scaling on
- ipq based devices.
-
-+config IPQ_CMN_PLL
-+ tristate "IPQ CMN PLL Clock Controller"
-+ help
-+ Support for CMN PLL clock controller on IPQ platform. The
-+ CMN PLL consumes the AHB/SYS clocks from GCC and supplies
-+ the output clocks to the networking hardware and GCC blocks.
-+ Say Y or M if you want to support CMN PLL clock on the IPQ
-+ based devices.
-+
- config IPQ_GCC_4019
- tristate "IPQ4019 Global Clock Controller"
- help
-diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
-index 65b825a54c45..d12ed80a3021 100644
---- a/drivers/clk/qcom/Makefile
-+++ b/drivers/clk/qcom/Makefile
-@@ -29,6 +29,7 @@ obj-$(CONFIG_CLK_X1E80100_TCSRCC) += tcsrcc-x1e80100.o
- obj-$(CONFIG_CLK_QCM2290_GPUCC) += gpucc-qcm2290.o
- obj-$(CONFIG_IPQ_APSS_PLL) += apss-ipq-pll.o
- obj-$(CONFIG_IPQ_APSS_6018) += apss-ipq6018.o
-+obj-$(CONFIG_IPQ_CMN_PLL) += ipq-cmn-pll.o
- obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
- obj-$(CONFIG_IPQ_GCC_5018) += gcc-ipq5018.o
- obj-$(CONFIG_IPQ_GCC_5332) += gcc-ipq5332.o
-diff --git a/drivers/clk/qcom/ipq-cmn-pll.c b/drivers/clk/qcom/ipq-cmn-pll.c
-new file mode 100644
-index 000000000000..1da8a4a9a8d5
---- /dev/null
-+++ b/drivers/clk/qcom/ipq-cmn-pll.c
-@@ -0,0 +1,436 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/*
-+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
-+ */
-+
-+/*
-+ * CMN PLL block expects the reference clock from on-board Wi-Fi block,
-+ * and supplies fixed rate clocks as output to the networking hardware
-+ * blocks and to GCC. The networking related blocks include PPE (packet
-+ * process engine), the externally connected PHY or switch devices, and
-+ * the PCS.
-+ *
-+ * On the IPQ9574 SoC, there are three clocks with 50 MHZ and one clock
-+ * with 25 MHZ which are output from the CMN PLL to Ethernet PHY (or switch),
-+ * and one clock with 353 MHZ to PPE. The other fixed rate output clocks
-+ * are supplied to GCC (24 MHZ as XO and 32 KHZ as sleep clock), and to PCS
-+ * with 31.25 MHZ.
-+ *
-+ * +---------+
-+ * | GCC |
-+ * +--+---+--+
-+ * AHB CLK| |SYS CLK
-+ * V V
-+ * +-------+---+------+
-+ * | +-------------> eth0-50mhz
-+ * REF CLK | IPQ9574 |
-+ * -------->+ +-------------> eth1-50mhz
-+ * | CMN PLL block |
-+ * | +-------------> eth2-50mhz
-+ * | |
-+ * +----+----+----+---+-------------> eth-25mhz
-+ * | | |
-+ * V V V
-+ * GCC PCS NSS/PPE
-+ */
-+
-+#include <linux/bitfield.h>
-+#include <linux/clk-provider.h>
-+#include <linux/delay.h>
-+#include <linux/err.h>
-+#include <linux/mod_devicetable.h>
-+#include <linux/module.h>
-+#include <linux/platform_device.h>
-+#include <linux/pm_clock.h>
-+#include <linux/pm_runtime.h>
-+#include <linux/regmap.h>
-+
-+#include <dt-bindings/clock/qcom,ipq-cmn-pll.h>
-+
-+#define CMN_PLL_REFCLK_SRC_SELECTION 0x28
-+#define CMN_PLL_REFCLK_SRC_DIV GENMASK(9, 8)
-+
-+#define CMN_PLL_LOCKED 0x64
-+#define CMN_PLL_CLKS_LOCKED BIT(8)
-+
-+#define CMN_PLL_POWER_ON_AND_RESET 0x780
-+#define CMN_ANA_EN_SW_RSTN BIT(6)
-+
-+#define CMN_PLL_REFCLK_CONFIG 0x784
-+#define CMN_PLL_REFCLK_EXTERNAL BIT(9)
-+#define CMN_PLL_REFCLK_DIV GENMASK(8, 4)
-+#define CMN_PLL_REFCLK_INDEX GENMASK(3, 0)
-+
-+#define CMN_PLL_CTRL 0x78c
-+#define CMN_PLL_CTRL_LOCK_DETECT_EN BIT(15)
-+
-+#define CMN_PLL_DIVIDER_CTRL 0x794
-+#define CMN_PLL_DIVIDER_CTRL_FACTOR GENMASK(9, 0)
-+
-+/**
-+ * struct cmn_pll_fixed_output_clk - CMN PLL output clocks information
-+ * @id: Clock specifier to be supplied
-+ * @name: Clock name to be registered
-+ * @rate: Clock rate
-+ */
-+struct cmn_pll_fixed_output_clk {
-+ unsigned int id;
-+ const char *name;
-+ unsigned long rate;
-+};
-+
-+/**
-+ * struct clk_cmn_pll - CMN PLL hardware specific data
-+ * @regmap: hardware regmap.
-+ * @hw: handle between common and hardware-specific interfaces
-+ */
-+struct clk_cmn_pll {
-+ struct regmap *regmap;
-+ struct clk_hw hw;
-+};
-+
-+#define CLK_PLL_OUTPUT(_id, _name, _rate) { \
-+ .id = _id, \
-+ .name = _name, \
-+ .rate = _rate, \
-+}
-+
-+#define to_clk_cmn_pll(_hw) container_of(_hw, struct clk_cmn_pll, hw)
-+
-+static const struct regmap_config ipq_cmn_pll_regmap_config = {
-+ .reg_bits = 32,
-+ .reg_stride = 4,
-+ .val_bits = 32,
-+ .max_register = 0x7fc,
-+ .fast_io = true,
-+};
-+
-+static const struct cmn_pll_fixed_output_clk ipq9574_output_clks[] = {
-+ CLK_PLL_OUTPUT(XO_24MHZ_CLK, "xo-24mhz", 24000000UL),
-+ CLK_PLL_OUTPUT(SLEEP_32KHZ_CLK, "sleep-32khz", 32000UL),
-+ CLK_PLL_OUTPUT(PCS_31P25MHZ_CLK, "pcs-31p25mhz", 31250000UL),
-+ CLK_PLL_OUTPUT(NSS_1200MHZ_CLK, "nss-1200mhz", 1200000000UL),
-+ CLK_PLL_OUTPUT(PPE_353MHZ_CLK, "ppe-353mhz", 353000000UL),
-+ CLK_PLL_OUTPUT(ETH0_50MHZ_CLK, "eth0-50mhz", 50000000UL),
-+ CLK_PLL_OUTPUT(ETH1_50MHZ_CLK, "eth1-50mhz", 50000000UL),
-+ CLK_PLL_OUTPUT(ETH2_50MHZ_CLK, "eth2-50mhz", 50000000UL),
-+ CLK_PLL_OUTPUT(ETH_25MHZ_CLK, "eth-25mhz", 25000000UL),
-+};
-+
-+/*
-+ * CMN PLL has the single parent clock, which supports the several
-+ * possible parent clock rates, each parent clock rate is reflected
-+ * by the specific reference index value in the hardware.
-+ */
-+static int ipq_cmn_pll_find_freq_index(unsigned long parent_rate)
-+{
-+ int index = -EINVAL;
-+
-+ switch (parent_rate) {
-+ case 25000000:
-+ index = 3;
-+ break;
-+ case 31250000:
-+ index = 4;
-+ break;
-+ case 40000000:
-+ index = 6;
-+ break;
-+ case 48000000:
-+ case 96000000:
-+ /*
-+ * Parent clock rate 48 MHZ and 96 MHZ take the same value
-+ * of reference clock index. 96 MHZ needs the source clock
-+ * divider to be programmed as 2.
-+ */
-+ index = 7;
-+ break;
-+ case 50000000:
-+ index = 8;
-+ break;
-+ default:
-+ break;
-+ }
-+
-+ return index;
-+}
-+
-+static unsigned long clk_cmn_pll_recalc_rate(struct clk_hw *hw,
-+ unsigned long parent_rate)
-+{
-+ struct clk_cmn_pll *cmn_pll = to_clk_cmn_pll(hw);
-+ u32 val, factor;
-+
-+ /*
-+ * The value of CMN_PLL_DIVIDER_CTRL_FACTOR is automatically adjusted
-+ * by HW according to the parent clock rate.
-+ */
-+ regmap_read(cmn_pll->regmap, CMN_PLL_DIVIDER_CTRL, &val);
-+ factor = FIELD_GET(CMN_PLL_DIVIDER_CTRL_FACTOR, val);
-+
-+ return parent_rate * 2 * factor;
-+}
-+
-+static int clk_cmn_pll_determine_rate(struct clk_hw *hw,
-+ struct clk_rate_request *req)
-+{
-+ int ret;
-+
-+ /* Validate the rate of the single parent clock. */
-+ ret = ipq_cmn_pll_find_freq_index(req->best_parent_rate);
-+
-+ return ret < 0 ? ret : 0;
-+}
-+
-+/*
-+ * This function is used to initialize the CMN PLL to enable the fixed
-+ * rate output clocks. It is expected to be configured once.
-+ */
-+static int clk_cmn_pll_set_rate(struct clk_hw *hw, unsigned long rate,
-+ unsigned long parent_rate)
-+{
-+ struct clk_cmn_pll *cmn_pll = to_clk_cmn_pll(hw);
-+ int ret, index;
-+ u32 val;
-+
-+ /*
-+ * Configure the reference input clock selection as per the given
-+ * parent clock. The output clock rates are always of fixed value.
-+ */
-+ index = ipq_cmn_pll_find_freq_index(parent_rate);
-+ if (index < 0)
-+ return index;
-+
-+ ret = regmap_update_bits(cmn_pll->regmap, CMN_PLL_REFCLK_CONFIG,
-+ CMN_PLL_REFCLK_INDEX,
-+ FIELD_PREP(CMN_PLL_REFCLK_INDEX, index));
-+ if (ret)
-+ return ret;
-+
-+ /*
-+ * Update the source clock rate selection and source clock
-+ * divider as 2 when the parent clock rate is 96 MHZ.
-+ */
-+ if (parent_rate == 96000000) {
-+ ret = regmap_update_bits(cmn_pll->regmap, CMN_PLL_REFCLK_CONFIG,
-+ CMN_PLL_REFCLK_DIV,
-+ FIELD_PREP(CMN_PLL_REFCLK_DIV, 2));
-+ if (ret)
-+ return ret;
-+
-+ ret = regmap_update_bits(cmn_pll->regmap, CMN_PLL_REFCLK_SRC_SELECTION,
-+ CMN_PLL_REFCLK_SRC_DIV,
-+ FIELD_PREP(CMN_PLL_REFCLK_SRC_DIV, 0));
-+ if (ret)
-+ return ret;
-+ }
-+
-+ /* Enable PLL locked detect. */
-+ ret = regmap_update_bits(cmn_pll->regmap, CMN_PLL_CTRL,
-+ CMN_PLL_CTRL_LOCK_DETECT_EN,
-+ CMN_PLL_CTRL_LOCK_DETECT_EN);
-+ if (ret)
-+ return ret;
-+
-+ /*
-+ * Reset the CMN PLL block to ensure the updated configurations
-+ * take effect.
-+ */
-+ ret = regmap_update_bits(cmn_pll->regmap, CMN_PLL_POWER_ON_AND_RESET,
-+ CMN_ANA_EN_SW_RSTN, 0);
-+ if (ret)
-+ return ret;
-+
-+ usleep_range(1000, 1200);
-+ ret = regmap_update_bits(cmn_pll->regmap, CMN_PLL_POWER_ON_AND_RESET,
-+ CMN_ANA_EN_SW_RSTN, CMN_ANA_EN_SW_RSTN);
-+ if (ret)
-+ return ret;
-+
-+ /* Stability check of CMN PLL output clocks. */
-+ return regmap_read_poll_timeout(cmn_pll->regmap, CMN_PLL_LOCKED, val,
-+ (val & CMN_PLL_CLKS_LOCKED),
-+ 100, 100 * USEC_PER_MSEC);
-+}
-+
-+static const struct clk_ops clk_cmn_pll_ops = {
-+ .recalc_rate = clk_cmn_pll_recalc_rate,
-+ .determine_rate = clk_cmn_pll_determine_rate,
-+ .set_rate = clk_cmn_pll_set_rate,
-+};
-+
-+static struct clk_hw *ipq_cmn_pll_clk_hw_register(struct platform_device *pdev)
-+{
-+ struct clk_parent_data pdata = { .index = 0 };
-+ struct device *dev = &pdev->dev;
-+ struct clk_init_data init = {};
-+ struct clk_cmn_pll *cmn_pll;
-+ struct regmap *regmap;
-+ void __iomem *base;
-+ int ret;
-+
-+ base = devm_platform_ioremap_resource(pdev, 0);
-+ if (IS_ERR(base))
-+ return ERR_CAST(base);
-+
-+ regmap = devm_regmap_init_mmio(dev, base, &ipq_cmn_pll_regmap_config);
-+ if (IS_ERR(regmap))
-+ return ERR_CAST(regmap);
-+
-+ cmn_pll = devm_kzalloc(dev, sizeof(*cmn_pll), GFP_KERNEL);
-+ if (!cmn_pll)
-+ return ERR_PTR(-ENOMEM);
-+
-+ init.name = "cmn_pll";
-+ init.parent_data = &pdata;
-+ init.num_parents = 1;
-+ init.ops = &clk_cmn_pll_ops;
-+
-+ cmn_pll->hw.init = &init;
-+ cmn_pll->regmap = regmap;
-+
-+ ret = devm_clk_hw_register(dev, &cmn_pll->hw);
-+ if (ret)
-+ return ERR_PTR(ret);
-+
-+ return &cmn_pll->hw;
-+}
-+
-+static int ipq_cmn_pll_register_clks(struct platform_device *pdev)
-+{
-+ const struct cmn_pll_fixed_output_clk *fixed_clk;
-+ struct clk_hw_onecell_data *hw_data;
-+ struct device *dev = &pdev->dev;
-+ struct clk_hw *cmn_pll_hw;
-+ unsigned int num_clks;
-+ struct clk_hw *hw;
-+ int ret, i;
-+
-+ fixed_clk = ipq9574_output_clks;
-+ num_clks = ARRAY_SIZE(ipq9574_output_clks);
-+
-+ hw_data = devm_kzalloc(dev, struct_size(hw_data, hws, num_clks + 1),
-+ GFP_KERNEL);
-+ if (!hw_data)
-+ return -ENOMEM;
-+
-+ /*
-+ * Register the CMN PLL clock, which is the parent clock of
-+ * the fixed rate output clocks.
-+ */
-+ cmn_pll_hw = ipq_cmn_pll_clk_hw_register(pdev);
-+ if (IS_ERR(cmn_pll_hw))
-+ return PTR_ERR(cmn_pll_hw);
-+
-+ /* Register the fixed rate output clocks. */
-+ for (i = 0; i < num_clks; i++) {
-+ hw = clk_hw_register_fixed_rate_parent_hw(dev, fixed_clk[i].name,
-+ cmn_pll_hw, 0,
-+ fixed_clk[i].rate);
-+ if (IS_ERR(hw)) {
-+ ret = PTR_ERR(hw);
-+ goto unregister_fixed_clk;
-+ }
-+
-+ hw_data->hws[fixed_clk[i].id] = hw;
-+ }
-+
-+ /*
-+ * Provide the CMN PLL clock. The clock rate of CMN PLL
-+ * is configured to 12 GHZ by DT property assigned-clock-rates-u64.
-+ */
-+ hw_data->hws[CMN_PLL_CLK] = cmn_pll_hw;
-+ hw_data->num = num_clks + 1;
-+
-+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, hw_data);
-+ if (ret)
-+ goto unregister_fixed_clk;
-+
-+ platform_set_drvdata(pdev, hw_data);
-+
-+ return 0;
-+
-+unregister_fixed_clk:
-+ while (i > 0)
-+ clk_hw_unregister(hw_data->hws[fixed_clk[--i].id]);
-+
-+ return ret;
-+}
-+
-+static int ipq_cmn_pll_clk_probe(struct platform_device *pdev)
-+{
-+ struct device *dev = &pdev->dev;
-+ int ret;
-+
-+ ret = devm_pm_runtime_enable(dev);
-+ if (ret)
-+ return ret;
-+
-+ ret = devm_pm_clk_create(dev);
-+ if (ret)
-+ return ret;
-+
-+ /*
-+ * To access the CMN PLL registers, the GCC AHB & SYSY clocks
-+ * of CMN PLL block need to be enabled.
-+ */
-+ ret = pm_clk_add(dev, "ahb");
-+ if (ret)
-+ return dev_err_probe(dev, ret, "Fail to add AHB clock\n");
-+
-+ ret = pm_clk_add(dev, "sys");
-+ if (ret)
-+ return dev_err_probe(dev, ret, "Fail to add SYS clock\n");
-+
-+ ret = pm_runtime_resume_and_get(dev);
-+ if (ret)
-+ return ret;
-+
-+ /* Register CMN PLL clock and fixed rate output clocks. */
-+ ret = ipq_cmn_pll_register_clks(pdev);
-+ pm_runtime_put(dev);
-+ if (ret)
-+ return dev_err_probe(dev, ret,
-+ "Fail to register CMN PLL clocks\n");
-+
-+ return 0;
-+}
-+
-+static void ipq_cmn_pll_clk_remove(struct platform_device *pdev)
-+{
-+ struct clk_hw_onecell_data *hw_data = platform_get_drvdata(pdev);
-+ int i;
-+
-+ /*
-+ * The clock with index CMN_PLL_CLK is unregistered by
-+ * device management.
-+ */
-+ for (i = 0; i < hw_data->num; i++) {
-+ if (i != CMN_PLL_CLK)
-+ clk_hw_unregister(hw_data->hws[i]);
-+ }
-+}
-+
-+static const struct dev_pm_ops ipq_cmn_pll_pm_ops = {
-+ SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
-+};
-+
-+static const struct of_device_id ipq_cmn_pll_clk_ids[] = {
-+ { .compatible = "qcom,ipq9574-cmn-pll", },
-+ { }
-+};
-+MODULE_DEVICE_TABLE(of, ipq_cmn_pll_clk_ids);
-+
-+static struct platform_driver ipq_cmn_pll_clk_driver = {
-+ .probe = ipq_cmn_pll_clk_probe,
-+ .remove_new = ipq_cmn_pll_clk_remove,
-+ .driver = {
-+ .name = "ipq_cmn_pll",
-+ .of_match_table = ipq_cmn_pll_clk_ids,
-+ .pm = &ipq_cmn_pll_pm_ops,
-+ },
-+};
-+module_platform_driver(ipq_cmn_pll_clk_driver);
-+
-+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. IPQ CMN PLL Driver");
-+MODULE_LICENSE("GPL");
---
-2.45.2
-
+++ /dev/null
-From ed97b7b7c657baf9c9d8e9dfebd9f7703c870593 Mon Sep 17 00:00:00 2001
-From: Luo Jie <quic_luoj@quicinc.com>
-Date: Thu, 7 Nov 2024 17:50:25 +0800
-Subject: [PATCH 4/5] arm64: dts: qcom: Add CMN PLL node for IPQ9574 SoC
-
-The CMN PLL clock controller allows selection of an input clock rate
-from a defined set of input clock rates. It in-turn supplies fixed
-rate output clocks to the hardware blocks that provide the ethernet
-functions such as PPE (Packet Process Engine) and connected switch or
-PHY, and to GCC.
-
-The reference clock of CMN PLL is routed from XO to the CMN PLL through
-the internal WiFi block.
-.XO (48 MHZ or 96 MHZ)-->WiFi (multiplier/divider)-->48 MHZ to CMN PLL.
-
-The reference input clock from WiFi to CMN PLL is fully controlled by
-the bootstrap pins which select the XO frequency (48 MHZ or 96 MHZ).
-Based on this frequency, the divider in the internal Wi-Fi block is
-automatically configured by hardware (1 for 48 MHZ, 2 for 96 MHZ), to
-ensure output clock to CMN PLL is 48 MHZ.
-
-Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
----
- .../boot/dts/qcom/ipq9574-rdp-common.dtsi | 16 +++++++++++-
- arch/arm64/boot/dts/qcom/ipq9574.dtsi | 26 ++++++++++++++++++-
- 2 files changed, 40 insertions(+), 2 deletions(-)
-
-diff --git a/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi b/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
-index 91e104b0f865..78f6a2e053d5 100644
---- a/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
-+++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
-@@ -3,7 +3,7 @@
- * IPQ9574 RDP board common device tree source
- *
- * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
-- * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
-+ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
- */
-
- /dts-v1/;
-@@ -164,6 +164,20 @@ &usb3 {
- status = "okay";
- };
-
-+/*
-+ * The bootstrap pins for the board select the XO clock frequency,
-+ * which automatically enables the right dividers to ensure the
-+ * reference clock output from WiFi is 48 MHZ.
-+ */
-+&ref_48mhz_clk {
-+ clock-div = <1>;
-+ clock-mult = <1>;
-+};
-+
- &xo_board_clk {
- clock-frequency = <24000000>;
- };
-+
-+&xo_clk {
-+ clock-frequency = <48000000>;
-+};
-diff --git a/arch/arm64/boot/dts/qcom/ipq9574.dtsi b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-index c113fff22f73..dc4965abff58 100644
---- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-+++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-@@ -3,10 +3,11 @@
- * IPQ9574 SoC device tree source
- *
- * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
-- * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
-+ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
- */
-
- #include <dt-bindings/clock/qcom,apss-ipq.h>
-+#include <dt-bindings/clock/qcom,ipq-cmn-pll.h>
- #include <dt-bindings/clock/qcom,ipq9574-gcc.h>
- #include <dt-bindings/interconnect/qcom,ipq9574.h>
- #include <dt-bindings/interrupt-controller/arm-gic.h>
-@@ -21,6 +22,12 @@ / {
- #size-cells = <2>;
-
- clocks {
-+ ref_48mhz_clk: ref-48mhz-clk {
-+ compatible = "fixed-factor-clock";
-+ clocks = <&xo_clk>;
-+ #clock-cells = <0>;
-+ };
-+
- sleep_clk: sleep-clk {
- compatible = "fixed-clock";
- #clock-cells = <0>;
-@@ -30,6 +37,11 @@ xo_board_clk: xo-board-clk {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- };
-+
-+ xo_clk: xo-clk {
-+ compatible = "fixed-clock";
-+ #clock-cells = <0>;
-+ };
- };
-
- cpus {
-@@ -245,6 +257,18 @@ mdio: mdio@90000 {
- status = "disabled";
- };
-
-+ cmn_pll: clock-controller@9b000 {
-+ compatible = "qcom,ipq9574-cmn-pll";
-+ reg = <0x0009b000 0x800>;
-+ clocks = <&ref_48mhz_clk>,
-+ <&gcc GCC_CMN_12GPLL_AHB_CLK>,
-+ <&gcc GCC_CMN_12GPLL_SYS_CLK>;
-+ clock-names = "ref", "ahb", "sys";
-+ #clock-cells = <1>;
-+ assigned-clocks = <&cmn_pll CMN_PLL_CLK>;
-+ assigned-clock-rates-u64 = /bits/ 64 <12000000000>;
-+ };
-+
- qfprom: efuse@a4000 {
- compatible = "qcom,ipq9574-qfprom", "qcom,qfprom";
- reg = <0x000a4000 0x5a1>;
---
-2.45.2
-
+++ /dev/null
-From dcb1e63fbc695c3971d7207238a78f66355a2f9a Mon Sep 17 00:00:00 2001
-From: Luo Jie <quic_luoj@quicinc.com>
-Date: Thu, 7 Nov 2024 17:50:26 +0800
-Subject: [PATCH 5/5] arm64: dts: qcom: Update IPQ9574 xo_board_clk to use
- fixed factor clock
-
-xo_board_clk is fixed to 24 MHZ, which is routed from WiFi output clock
-48 MHZ (also being the reference clock of CMN PLL) divided 2 by analog
-block routing channel.
-
-Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
----
- arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi | 7 ++++++-
- arch/arm64/boot/dts/qcom/ipq9574.dtsi | 3 ++-
- 2 files changed, 8 insertions(+), 2 deletions(-)
-
-diff --git a/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi b/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
-index 78f6a2e053d5..9a8692377176 100644
---- a/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
-+++ b/arch/arm64/boot/dts/qcom/ipq9574-rdp-common.dtsi
-@@ -174,8 +174,13 @@ &ref_48mhz_clk {
- clock-mult = <1>;
- };
-
-+/*
-+ * The frequency of xo_board_clk is fixed to 24 MHZ, which is routed
-+ * from WiFi output clock 48 MHZ divided by 2.
-+ */
- &xo_board_clk {
-- clock-frequency = <24000000>;
-+ clock-div = <2>;
-+ clock-mult = <1>;
- };
-
- &xo_clk {
-diff --git a/arch/arm64/boot/dts/qcom/ipq9574.dtsi b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-index dc4965abff58..376b75976524 100644
---- a/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-+++ b/arch/arm64/boot/dts/qcom/ipq9574.dtsi
-@@ -34,7 +34,8 @@ sleep_clk: sleep-clk {
- };
-
- xo_board_clk: xo-board-clk {
-- compatible = "fixed-clock";
-+ compatible = "fixed-factor-clock";
-+ clocks = <&ref_48mhz_clk>;
- #clock-cells = <0>;
- };
-
---
-2.45.2
-
+++ /dev/null
-From 4305650c92eef5921cc140c999eccbb6de1ab4b8 Mon Sep 17 00:00:00 2001
-From: Devi Priya <quic_devipriy@quicinc.com>
-Date: Fri, 25 Oct 2024 09:25:14 +0530
-Subject: [PATCH 1/7] clk: qcom: clk-alpha-pll: Add NSS HUAYRA ALPHA PLL
- support for ipq9574
-
-Add support for NSS Huayra alpha pll found on ipq9574 SoCs.
-Programming sequence is the same as that of Huayra type Alpha PLL,
-so we can re-use the same.
-
-Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
-Signed-off-by: Devi Priya <quic_devipriy@quicinc.com>
-Signed-off-by: Manikanta Mylavarapu <quic_mmanikan@quicinc.com>
----
- drivers/clk/qcom/clk-alpha-pll.c | 11 +++++++++++
- drivers/clk/qcom/clk-alpha-pll.h | 1 +
- 2 files changed, 12 insertions(+)
-
-diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
-index f9105443d7db..c2e56e9403ff 100644
---- a/drivers/clk/qcom/clk-alpha-pll.c
-+++ b/drivers/clk/qcom/clk-alpha-pll.c
-@@ -267,6 +267,17 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
- [PLL_OFF_OPMODE] = 0x30,
- [PLL_OFF_STATUS] = 0x3c,
- },
-+ [CLK_ALPHA_PLL_TYPE_NSS_HUAYRA] = {
-+ [PLL_OFF_L_VAL] = 0x04,
-+ [PLL_OFF_ALPHA_VAL] = 0x08,
-+ [PLL_OFF_TEST_CTL] = 0x0c,
-+ [PLL_OFF_TEST_CTL_U] = 0x10,
-+ [PLL_OFF_USER_CTL] = 0x14,
-+ [PLL_OFF_CONFIG_CTL] = 0x18,
-+ [PLL_OFF_CONFIG_CTL_U] = 0x1c,
-+ [PLL_OFF_STATUS] = 0x20,
-+ },
-+
- };
- EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
-
-diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
-index 55eca04b23a1..c6d1b8429f95 100644
---- a/drivers/clk/qcom/clk-alpha-pll.h
-+++ b/drivers/clk/qcom/clk-alpha-pll.h
-@@ -32,6 +32,7 @@ enum {
- CLK_ALPHA_PLL_TYPE_BRAMMO_EVO,
- CLK_ALPHA_PLL_TYPE_STROMER,
- CLK_ALPHA_PLL_TYPE_STROMER_PLUS,
-+ CLK_ALPHA_PLL_TYPE_NSS_HUAYRA,
- CLK_ALPHA_PLL_TYPE_MAX,
- };
-
---
-2.45.2
-
+ partition@4d0000 {
+ label = "0:rpm";
+ reg = <0x4d0000 0x20000>;
-+ read-only;
++ // read-only;
+ };
+
+ partition@4f0000 {